text
stringlengths
56
7.94M
\begin{document} \title{\large\bf Nabla Euler -Lagrange equations in discrete fractional variational calculus within Riemann and Caputo} \author{\small \bf Thabet Abdeljawad $^a$ \\ {\footnotesize $^a$ Department of Mathematics and Physical Sciences}\\ {\footnotesize Prince Sultan University, P. O. Box 66833, Riyadh 11586, Saudi Arabia}} \date{} \maketitle {\footnotesize {\noindent\bf Abstract.} Different fractional difference types of Euler-Lagrange equations are obtained within Riemann and Caputo by making use of different versions of integration by part forumlas in fractional difference calculus. An example is presented to illustrate part of the results. \\ {\bf Keywords:} right (left) delta and nabla fractional sums, right (left) delta and nabla Riemann, dual identity, Euler equation, integration by parts. \section{Introduction} Fractional calculus which deals with integration and differentiation of arbitrary orders attracted the attention of many researchers in the last two decades or so for its widespread applications in different fields of mathematics, physics, engineering, economic and biology. For detailed and sufficient material about this calculus we refer to the books \cite{podlubny, Samko, Kilbas}. However, the discrete fractional calculus which is not as old as fractional calculus, was initiated lately in eighty's of the last century in \cite{Miller, Gray}. Then in the last few years many authors started to investigate the theory and applications of the discrete fractional calculus \cite{Th Caputo,Ferd, Feri, Nabla, Atmodel, Gronwall, TDbyparts, Gdelta, Gnabla, Gfound}. Very recently, the authors in \cite{THFer, Th DDNS, Th ADE} have discussed different definitions for fractional differences specially in the right case, under which suitable integration by parts formulae have been initiated. Benefitting from those formulae we continue in this work and apply to discrete fractional variational calculus to obtain different results from those obtained in \cite{Nuno, Atmodel}. In the usual fractional variational case we refer to \cite{FTD1, FTD2, FTD3,tr, FTD4}. The article is organized as follows: In the rest of this section we give basic definitions and preliminary results about nabla fractional sums and differences. In Section 2 we discussed different integration by parts formulae in discrete fractional calculus. In Section 3 we set some discrete variational problems benefitting from the integration by parts formulae obtained in Section 2. Finally, in Section 4 an example of physical interest is given to illustrate our main results. For the sake of the nabla fractional calculus we have the following definition \begin{defn} \label{rising}(\cite{Adv,Boros,Grah,Spanier}) (i) For a natural number $m$, the $m$ rising (ascending) factorial of $t$ is defined by \begin{equation}\label{rising 1} t^{\overline{m}}= \prod_{k=0}^{m-1}(t+k),~~~t^{\overline{0}}=1. \end{equation} (ii) For any real number the $\alpha$ rising function is defined by \begin{equation}\label{alpharising} t^{\overline{\alpha}}=\frac{\Gamma(t+\alpha)}{\Gamma(t)},~~~t \in \mathbb{R}-~\{...,-2,-1,0\},~~0^{\mathbb{\alpha}}=0 \end{equation} \end{defn} Regarding the rising factorial function we observe for example that \begin{equation}\label{oper} \nabla (t^{\overline{\alpha}})=\alpha t^{\overline{\alpha-1}} \end{equation} \textbf{Notation}: \begin{enumerate} \item[$(i)$] For a real $\alpha>0$, we set $n=[\alpha]+1$, where $[\alpha]$ is the greatest integer less than or equal to $\alpha$. \item[$(ii)$] For real numbers $a$ and $b$, we denote $\mathbb{N}_a=\{a,a+1,...\}$ and $~_{b}\mathbb{N}=\{b,b-1,...\}$. \item[$(iii)$]For $n \in \mathbb{N}$ and real $a$, we denote $$ _{\circleddash}\Delta^n f(t)\triangleq (-1)^n\Delta^n f(t),$$ where $\Delta^n f$ is the $n$ iterating of $\Delta f(t)=f(t+1)-f(t)$. \end{enumerate} \begin{defn} \label{fractional sums} \cite{Th ADE} Let $\sigma(t)=t+1$ and $\rho(t)=t-1$ be the forward and backward jumping operators, respectively. Then (i) The (nabla) left fractional sum of order $\alpha>0$ (starting from $a$) is defined by: \begin{equation}\label{nlf} \nabla_a^{-\alpha} f(t)=\frac{1}{\Gamma(\alpha)} \sum_{s=a+1}^t(t-\rho(s))^{\overline{\alpha-1}}f(s),~~t \in \mathbb{N}_{a+1}. \end{equation} (ii)The (nabla) right fractional sum of order $\alpha>0$ (ending at $b$) is defined by: \begin{equation}\label{nrs} ~_{b}\nabla^{-\alpha} f(t)=\frac{1}{\Gamma(\alpha)} \sum_{s=t}^{b-1}(s-\rho(t))^{\overline{\alpha-1}}f(s)=\frac{1}{\Gamma(\alpha)} \sum_{s=t}^{b-1}(\sigma(s)-t)^{\overline{\alpha-1}}f(s),~~t \in ~_{b-1}\mathbb{N}. \end{equation} \end{defn} \begin{defn} \label{fractional differences} \cite{Th ADE} (i) The (nabla) left fractional difference of order $\alpha>0$ (starting from $a$ ) is defined by: \begin{equation}\label{nld} \nabla_a^{\alpha} f(t)=\nabla^n \nabla_a^{-(n-\alpha)}f(t)= \frac{\nabla^n}{\Gamma(n-\alpha)} \sum_{s=a+1}^t(t-\rho(s))^{\overline{n-\alpha-1}}f(s),~~t \in \mathbb{N}_{a+1} \end{equation} (ii) The (nabla) right fractional difference of order $\alpha>0$ (ending at $b$ ) is defined by: \begin{equation}\label{nrd} ~_{b}\nabla^{\alpha} f(t)= ~_{\circleddash}\Delta^n ~_{b}\nabla^{-(n-\alpha)}f(t) =\frac{(-1)^n\Delta^n}{\Gamma(n-\alpha)} \sum_{s=t}^{b-1}(s-\rho(t))^{\overline{n-\alpha-1}}f(s),~~t \in ~ _{b-1}\mathbb{N} \end{equation} \end{defn} \begin{defn} \cite{Th DDNS} Let $\alpha >0$ be noninteger, $~ n=[\alpha]+1,~a(\alpha)=a+n-1$ and $b(\alpha)=b-n+1$. Then the (dual) nabla left and right Caputo fractional differences are defined by \begin{equation}\label{Cdual left} ~^{C}\nabla_{a(\alpha)}^\alpha f(t)=\nabla_{a(\alpha)}^{-(n-\alpha)} \nabla^n f(t),~~t \in \mathbb{N}_{a+n} \end{equation} and \begin{equation}\label{Cdual right} _{b(\alpha)}~ ^{C}\nabla^\alpha f(t)=~_{b(\alpha)}\nabla^{-(n-\alpha)} {\ominus}\Delta^n f(t), ~~t \in ~_{b-n}\mathbb{N}, \end{equation} respectively. \end{defn} Notice that when $0<\alpha < 1$ we have $$~^{C}\nabla_{a(\alpha)}^\alpha f(t)=~^{C}\nabla_a^\alpha f(t)~~and ~~ _{b(\alpha)} ^{C}\nabla^\alpha f(t)= ~_{b} ^{C}\nabla^\alpha f(t).$$ It is important to remark that the two quantities $(\nabla_a^{-\alpha}f^\rho)(t)$ and $(\nabla_a^{-\alpha}f)(\rho(t))$ are different, where $\rho(t)=t-1$. In connection, we state the following properties without proofs. \begin{prop}\label{properties} Let $\rho(t)=t-1$, $\sigma(t)=t+1$ , $\alpha >0$ and $f$ be function defined on $\mathbb{N}_a \cap ~_{b}\mathbb{N}$ where $a\equiv b ~(mod ~1)$. Then \begin{itemize} \item 1) $(\nabla_a^ {-\alpha}f^\rho)(t)=(\nabla_{a-1}^ {-\alpha}f)(\rho(t))$. \item 2) $(\nabla_a^ {\alpha}f^\rho)(t)=(\nabla_{a-1}^ {\alpha}f)(\rho(t))$. \item 3) $(~^{C}\nabla_a^\alpha f^\rho)(t)=(~^{C}\nabla_{a-1}^\alpha f)(\rho(t))$. \item 4)$(~_{b}\nabla^{-\alpha}f^\sigma)(t)=(~_{b+1}\nabla^{-\alpha}f)(\sigma(t))$. \item 5)$(~_{b}\nabla^{\alpha}f^\sigma)(t)=(~_{b+1}\nabla^{\alpha}f)(\sigma(t))$. \item 6)$(~^{C}_{b}\nabla^\alpha f^\sigma)(t)=(~^{C}_{b+1}\nabla^\alpha f)(\sigma(t))$. \end{itemize} \end{prop} \section{Integration by parts for fractional sums and differences} In this section we state the integration by parts formulas for nabla fractional sums and differences obtained in \cite{THFer}, whereafter in \cite{Th ADE}, delta by parts formulas are obtained by using certain dual identities. Then, we proceed to obtain a one more integration by parts formula where both Riemann and Caputo fractional differences can appear. \begin{prop} \label{summation by parts}\cite{THFer} For $\alpha>0$, $a,b \in \mathbb{R}$, $f$ defined on $\mathbb{N}_a$ and $g$ defined on $~_{b}\mathbb{N}$, we have \begin{equation}\label{sum1} \sum_{s=a+1}^{b-1}g(s) \nabla_a^{-\alpha} f(s)=\sum_{s=a+1}^{b-1}f(s)~_{b}\nabla^{-\alpha}g(s). \end{equation} \end{prop} \begin{prop} \label{nabla bydiff} \cite{THFer} Let $\alpha>0$ be non-integer and $a,b\in \mathbb{R}$ such that $a< b$ and $b\equiv a~(mod~1)$.If $f$ is defined on $ _{b}\mathbb{N}$ and $g$ is defined on $\mathbb{N}_a$, then \begin{equation}\label{nabla bydiff1} \sum_{s=a+1}^{b-1} f(s)\nabla_a^\alpha g(s) =\sum_{s=a+1}^{b-1}g(s)~_{b}\nabla^\alpha f(s). \end{equation} \end{prop} Now by the above nabla integration by parts formulas and using dual identities in \cite{Th ADE}, the following delta integration by parts formulae were obtained. \begin{prop} \label{delta by parts semmation} Let $\alpha>0$, $a,b\in \mathbb{R}$ such that $a< b$ and $b\equiv a~(mod~1)$. If $f$ is defined on $\mathbb{N}_a$ and $g$ is defined on $_{b}\mathbb{N}$, then we have \begin{equation}\label{byse} \sum_{s=a+1}^{b -1}g(s)(\Delta_{a+1} ^{-\alpha}f)(s+\alpha)=\sum_{s=a+1}^{b-1}f(s) ~_{b-1}\Delta ^{-\alpha}g(s-\alpha). \end{equation} \end{prop} \begin{prop} \label{delta by parts semmation} Let $\alpha>0$ be non-integer and assume that $b\equiv a~(mod~1)$. If $f$ is defined on $ _{b}\mathbb{N}$ and $g$ is defined on $\mathbb{N}_a$, then \begin{equation}\label{bydiff1} \sum_{s=a+1}^{b-1} f(s)\Delta_{a+1}^\alpha g(s-\alpha)=\sum_{s=a+1}^{b-1}g(s)~_{b-1}\Delta^\alpha f(s+\alpha). \end{equation} \end{prop} The following version of integration by parts contains boundary conditions. \begin{thm} \label{Caputo by parts} \cite{Th DDNS} Let $0<\alpha<1$ and $f,g$ be functions defined on $\mathbb{N}_a \cap ~_{b}\mathbb{N}$ where $a\equiv b ~(mod ~1)$. Then \begin{equation} \label{cbp1} \sum_{s=a+1}^{b-1} g(s) ~^{C}\nabla_a^\alpha f(s)=f(s) ~_{b}\nabla^{-(1-\alpha)}g(s)\mid_a^{b-1}+ \sum_{s=a+1}^{b-1} f(s-1) (~_{b}\nabla^\alpha g)(s-1), \end{equation} where clearly $_{b}\nabla^{-(1-\alpha)}g(b-1)= g(b-1)$. \end{thm} Similarly, if interchange the role of Caputo and Riemann we obtain the following version of integration by parts for fractional differences. \begin{thm} \label{Riemann2 by parts} Let $0<\alpha<1$ and $f,g$ be functions defined on $\mathbb{N}_a \cap ~_{b}\mathbb{N}$ where $a\equiv b ~(mod ~1)$. Then \begin{eqnarray}\label{cbp1}\nonumber \sum_{s=a+1}^{b-1} f(s-1) \nabla_a^\alpha g(s) &=& f(s) \nabla_a^{-(1-\alpha)}g(s)\mid_a^{b-1}+ \sum_{s=a}^{b-2} g(s+1) ~~^{C}_{b}\nabla^\alpha f(s) \\ &=& f(s) \nabla_a^{-(1-\alpha)}g(s)\mid_a^{b-1}+ \sum_{s=a+1}^{b-1} g(s) ~~(^{C}_{b}\nabla^\alpha f)(s-1) \end{eqnarray} where clearly $\nabla_a^{-(1-\alpha)}g(a)= 0$. \end{thm} \begin{proof} From the definition of the left Riemann fractional difference, the integration by parts in $\nabla-$difference calculus, Proposition \ref{summation by parts}, noting that $\nabla f(s)=\Delta f(s-1)$, and the definition of right Caputo fractional difference we can write \begin{eqnarray} \nonumber \sum_{s=a+1}^{b-1} f(s-1) \nabla_a^\alpha g(s) &=& \sum_{s=a+1}^{b-1} f(s-1)\nabla \nabla_a^{-(1-\alpha)} g(s) \\ \nonumber &=& f(s) \nabla_a^{-(1-\alpha)}g(s)\mid_a^{b-1}-\sum_{s=a+1}^{b-1} \nabla f(s) (\nabla_a^{-(1-\alpha)}g)(s) \\ \nonumber &=& f(s) \nabla_a^{-(1-\alpha)}g(s)\mid_a^{b-1}-\sum_{s=a+1}^{b-1}g(s)~_{b}\nabla^{-(1-\alpha)} \nabla f(s)\\ \nonumber &=&f(s) \nabla_a^{-(1-\alpha)}g(s)\mid_a^{b-1}+ \sum_{s=a}^{b-2} g(s+1) ~~^{C}_{b}\nabla^\alpha f(s)\\ \nonumber &=&f(s) \nabla_a^{-(1-\alpha)}g(s)\mid_a^{b-1}+ \sum_{s=a+1}^{b-1} g(s) ~~(^{C}_{b}\nabla^\alpha f)(s-1). \end{eqnarray} Hence, the proof is completed. \end{proof} \section{Fractional difference Euler-Lagrange Equations} \begin{thm}\label{m1} Let $\alpha>0$ be non-integer, $a,b \in \mathbb{R}$, and $f$ is defined on $\mathbb{N}_a \cap ~_{b}\mathbb{N}$, where $a\equiv b ~(mod ~1)$. Assume that the functional $$J(f)=\sum_{t=a+1}^{b-1} L(t,f(t),\nabla_{a-1}^\alpha f(t) )$$ has a local extremum in $S=\{y:\mathbb{N}_a \cap ~_{b}\mathbb{N}\rightarrow \mathbb{R}~\texttt{is bounded}, ~~y(a)=A\}$ at some $f \in S$, where $L:(\mathbb{N}_a \cap ~_{b}\mathbb{N})\times \mathbb{R}\times \mathbb{R}\rightarrow \mathbb{R}$. Then, \begin{equation}\label{E1} [L_1(s) + ~_b\nabla^\alpha L_2(s)] =0,~\texttt{for all}~ s \in (\mathbb{N}_{a+1} \cap ~_{b-1}\mathbb{N}), \end{equation} where $L_1(s)= \frac{\partial L}{\partial f}(s)$ and $L_2(s)=\frac{\partial L}{\partial \nabla_a^\alpha f}(s)$. \end{thm} \begin{proof} Without loss of generality, assume that $J$ has local maximum in $S$ at $f$. Hence, there exists an $\epsilon>0$ such that $J(\widehat{f})-J(f)\leq 0$ for all $\widehat{f}\in S$ with $\|\widehat{f}-f\|=\sup_{t \in \mathbb{N}_a \cap ~_{b}\mathbb{N}} |\widehat{f}(t)-f(t)|< \epsilon$. For any $\widehat{f} \in S$ there is an $\eta \in H=\{y:\mathbb{N}_a \cap ~_{b}\mathbb{N}\rightarrow \mathbb{R}~\texttt{is bounded}, ~~y(a)=0\}$ such that $\widehat{f}=f+\epsilon \eta$. Then, the $\epsilon-$Taylor's theorem implies that $$L(t,f,\widehat{f})=L(t,f+\epsilon \eta,\nabla_{a-1}^\alpha f+\epsilon \nabla_{a-1}^\alpha \eta)=L(t,f,\nabla_{a-1}^\alpha f)+ \epsilon [\eta L_1+\nabla_a^\alpha\eta L_2]+O(\epsilon^2).$$ Then, \begin{eqnarray}\nonumber J(\widehat{f})-J(f) &=& \sum_{t=a+1}^{b-1}L(t,\widehat{f}(t),\nabla_{a-1}^\alpha \widehat{f}(t))-\sum_{t=a+1}^{b-1}L(t,f(t),\nabla_{a-1}^\alpha f(t)) \\ &=& \epsilon \sum_{t=a+1}^{b-1}[\eta(t) L_1(t)+ \nabla_{a-1}^\alpha \eta(t) L_2(t)]+ O(\epsilon^2). \end{eqnarray} Let the quantity $\delta J(\eta,y)=\sum_{t=a+1}^{b-1}[\eta(t) L_1(t)+ \nabla_{a-1}^\alpha \eta(t) L_2(t)]$ denote the first variation of $J$. Evidently, if $\eta \in H$ then $-\eta \in H$, and $\delta J(\eta,y)=-\delta J(-\eta,y)$. For $\epsilon$ small, the sign of $J(\widehat{f})-J(f)$ is determined by the sign of first variation, unless $\delta J(\eta,y)=0$ for all $\eta \in H$. To make the parameter $\eta$ free, we use the integration by part formula in Proposition \ref{nabla bydiff} together with the fact that $\nabla_{a-1}^\alpha \eta(t)=\nabla_{a}^\alpha \eta(t)+\frac{\eta(a)}{\Gamma(\alpha)}(t-a+1)^{\overline{\alpha-1}}$, to reach $$\delta J(\eta,y)=\sum_{s=a+1}^{b-1} \eta (s)[L_1(s) + ~_b\nabla^\alpha L_2(s)]++\frac{\eta(a)}{\Gamma(\alpha)}\sum_{t=a+1}^{b-1}(t-a+1)^{\overline{\alpha-1}}L_2(t) =0,$$ for all $\eta \in H$, and hence the result follows by taking the special $\eta's$ in $\{e_t=(0,...,1,0,0,), ~1~\texttt{in t-th place}: t \in \mathbb{N}_{a+1} \cap ~_{b-1}\mathbb{N} \}$ with $\eta(a)=0$. \end{proof} Note that in the above theorem the Riemann fractional variational difference problem will not require any boundary conditions at the points $a$ and $b-1$ if we consider $\nabla_a^\alpha$ instead of $\nabla_{a-1}^\alpha$ in the Lagrangian $L$ and hence the functions $\eta$ can be taken from $S$ again without any restrictions. This is due to that the used integration by parts formula does not contain any boundary conditions. Different boundary conditions can be generated at $b$ as well, if we terminate the sum at $b$ instead of $b-1$ . Next, we develop a discrete Reiamnn fractional variational problem of order $0<\alpha <1$ with different boundary conditions by making use of the integration by part formula in Theorem \ref{Riemann2 by parts}. \begin{thm} \label{mm} Let $0< \alpha <1$ be non-integer, $a,b \in \mathbb{R}$, and $f$ is defined on $\mathbb{N}_a \cap ~_{b}\mathbb{N}$, where $a\equiv b ~(mod ~1)$. Assume that the functional $$J(y)=\sum_{t=a+1}^{b-1} L(t,f(t),\nabla_a^\alpha f(t) )$$ has a local extremum in $S=\{y:\mathbb{N}_a \cap ~_{b}\mathbb{N}\rightarrow \mathbb{R}~\texttt{is bounded}\}$ at some $f \in S$, where $L:(\mathbb{N}_a \cap ~_{b}\mathbb{N})\times \mathbb{R}\times \mathbb{R}\rightarrow \mathbb{R}$. Further, assume either $\nabla_a^{-(1-\alpha)}f (b-1)=A$ or $L_2 (b)=0$. Then, \begin{equation}\label{E2} [L_1(s) + (~^{C}_b\nabla^\alpha L_2^\sigma)(s-1)]=[L_1(s) + (~^{C}_{b+1}\nabla^\alpha L_2)(s)] =0,~\texttt{for all}~ s \in (\mathbb{N}_{a+1} \cap ~_{b-1}\mathbb{N}), \end{equation} where $L_1(s)= \frac{\partial L}{\partial f}(s)$ and $L_2(s)=\frac{\partial L}{\partial \nabla_a^\alpha f}(s)$. \end{thm} \begin{proof} We proceed as in Theorem \ref{m1}, except when $\nabla_a^{-(1-\alpha)}f(b-1)$ is preassigned the function $\eta$ is taken from $H=\{y:\mathbb{N}_a \cap ~_{b}\mathbb{N}\rightarrow \mathbb{R}~\texttt{is bounded},~\nabla_a^{-(1-\alpha)}y(b-1)=0\}$. Then, $$\delta J(\eta,f)=\sum_{t=a+1}^{b-1}[\eta(t) L_1(t)+ \nabla_a^\alpha \eta(t) L_2^\sigma(t-1)]=0,$$ for every $\eta \in H$. Then, the integration by part in Theorem \ref{Riemann2 by parts} implies that $$\delta J(\eta,f)=\sum_{t=a+1}^{b-1}\eta(t)[ L_1(t)+ ~_{b}^{C}\nabla^\alpha L_2^\sigma(t-1)]+ L_2^\sigma(t) \nabla_a ^{-(1-\alpha)}\eta(t)|_a^{b-1}=0,$$ for every $\eta \in H$. Finally, the assumption and Proposition \ref{properties} 6) implies (\ref{E2}) and the proof is finished. \end{proof} Similar to what applied in Theorem \ref{m1}, we can generate boundary conditions at $a$ as well in Theorem \ref{mm} above, if we consider $\nabla_{a-1}^\alpha$ instead of $\nabla_{a}^\alpha$ in the Lagrangian $L$. Finally, we obtain the Euler-Lagrange equations for a Lagrangian including the Caputo left fractional difference by making use of the integration by parts formula in Theorem \ref{Caputo by parts}. \begin{thm} \label{mmm} Let $0< \alpha <1$ be non-integer, $a,b \in \mathbb{R}$, and $f$ are defined on $\mathbb{N}_a \cap ~_{b}\mathbb{N}$, where $a\equiv b ~(mod ~1)$. Assume that the functional $$J(f)=\sum_{t=a+1}^{b-1} L(t,f^\rho(t),~^{C}\nabla_a^\alpha f(t) )$$ has a local extremum in $S=\{y:\mathbb{N}_a \cap ~_{b}\mathbb{N}\rightarrow \mathbb{R}~\texttt{is bounded}\}$ at some $f \in S$, where $L:(\mathbb{N}_a \cap ~_{b}\mathbb{N})\times \mathbb{R}\times \mathbb{R}\rightarrow \mathbb{R}$. Further, assume either $f(a)=A$ and $f(b-1)=B$ or the natural boundary conditions $~_{b}\nabla^{-(1-\alpha)}L_2 (a)=~_{b}\nabla^{-(1-\alpha)}L_2 (b-1)=0$. Then, \begin{equation}\label{E3} [L_1^\sigma(s) + (~_b\nabla^\alpha L_2)(s)] =0,~\texttt{for all}~ s \in (\mathbb{N}_{a+1} \cap ~_{b-2}\mathbb{N}). \end{equation} \end{thm} \begin{proof} If $f$ is preassigned at $a$ and $b-1$ then the function $\eta$ is taken from $H=\{y:\mathbb{N}_a \cap ~_{b}\mathbb{N}\rightarrow \mathbb{R}~\texttt{is bounded},~y(a)=y(b-1)=0\}$. Then, we proceed to reach $$\delta J(\eta,f)=\sum_{t=a+1}^{b-1}[\eta(t-1) L_1(t)+ ~^{C}\nabla_a^\alpha \eta(t) L_2(t)]=0,$$ for every $\eta \in H$. The integration by parts formula in Theorem \ref{Caputo by parts} then implies that $$\delta J(\eta,f)=\sum_{t=a+1}^{b-1}\eta(t-1)[ L_1(t)+ (~_{b}\nabla^\alpha L_2)(t-1)]+\eta(t) ~_{b}\nabla^{-(1-\alpha)}L_2(t)|_a^{b-1}=0,$$ for every $\eta \in H$. Hence, (\ref{E3}) follows. \end{proof} We finish this section by remarking that we can obtain a delta analogue of the discussed nabla discrete variational problems in this section by making use of the dual identities studied in \cite{Th DDNS, Th ADE}. \section{Example} In order to exemplify our results we analyze an example of physical interest under Theorem \ref{mm} and Theorem \ref{mmm}. Namely, let us consider the following fractional discrete actions, \begin{itemize} \item 1) $J(y)=\sum_{t=a+1}^{b-1}[\frac{1}{2}(\nabla_a^\alpha y(t))^2-V(y(t))],$ where $0<\alpha <1$. Assume either $\nabla_a^{-(1-\alpha)}f (b-1)=A$ or $\nabla_a^\alpha y(b)=0$. Then the Euler-Lagrange equation by applying Theorem \ref{mm} is $$~^{C}_{b+1}\nabla^\alpha \nabla_a^\alpha y(s)-\frac{dV}{dy}(s)=0~\texttt{for all}~ s \in (\mathbb{N}_{a+1} \cap ~_{b-1}\mathbb{N}).$$ \item 2)$J(y)=\sum_{t=a+1}^{b-1}[\frac{1}{2}(~^{C}\nabla_a^\alpha y(t))^2-V(y(\rho(t)))],$ where $0<\alpha <1$. Assume either $y(a)=A$ and $y(b-1)=B$ or the natural boundary conditions $~_{b}\nabla^{-(1-\alpha)}~^{C}\nabla_a^\alpha (a)=~_{b}\nabla^{-(1-\alpha)} ~^{C}\nabla_a^\alpha y(b-1)=0$. Then the Euler-Lagrange equation by applying Theorem \ref{mmm} is $$ (~_b\nabla^\alpha ~^{C}\nabla_a^\alpha y)(s)-\frac{dV}{d y^\rho}(\sigma(s))] =0,~\texttt{for all}~ s \in (\mathbb{N}_{a+1} \cap ~_{b-2}\mathbb{N}).$$ Finally, we remark that it is of interest to deal with the above Euler- Lagrange equations obtained in the above example, where we have composition of left and right fractional differences. In the usual fractional case for such left-right fractional dynamical systems we mention the work done in \cite{Thabet}. \end{itemize} \section{Acknowledgments} The author would like to thank Prince Salman Research and Translation Center in Prince Sultan University for the financial support. \end{document}
\begin{document} \begin{frontmatter} \title{Smoothness of the density for solutions to Gaussian rough differential equations} \runtitle{Smoothness of Gaussian RDEs} \begin{aug} \author[A]{\fnms{Thomas} \snm{Cass}\corref{}\ead[label=e1]{[email protected]}}, \author[B]{\fnms{Martin} \snm{Hairer}\ead[label=e2]{[email protected]}\thanksref{T1}}, \author[C]{\fnms{Christian} \snm{Litterer}\thanksref{T2}\ead[label=e3]{[email protected]}}\break \and \author[D]{\fnms{Samy} \snm{Tindel}\ead[label=e4]{[email protected]}\thanksref{T3}} \runauthor{Cass, Hairer, Litterer and Tindel} \affiliation{Imperial College London, University of Warwick, Imperial College London\break and Universit\'e de Lorraine} \address[A]{T. Cass\\ C. Litterer\\ Department of Mathematics\\ Imperial College London\\ The Huxley Building\\ 180 Queensgate, London\\ United Kingdom\\ \printead{e1}} \address[B]{M. Hairer\\ Mathematics Institute\\ University of Warwick\\ Coventry, CV4 7AL\\ United Kingdom\\ \printead{e2}\hspace*{10pt}} \address[C]{C. Litterer\\ Centre de Math\'ematiques Appliqu\'ees\\ \'Ecole Polytechnique\\ Route de Sacla\\ 91128 Palaiseau\\ France\\ \printead{e3}} \address[D]{S. Tindel\\ Institut {\'E}lie Cartan Nancy\\ Universit\'e de Lorraine\\ B.P. 239\\ 54506 Vand{\oe}uvre-l{\`e}s-Nancy\\ France\\ \printead{e4}} \thankstext{T1}{Supported by EPSRC Grant EP/D071593/1, a Wolfson Research Merit award of the Royal Society and a Philip Leverhulme Prize.} \thankstext{T2}{Supported by EPSRC Grant EP/H000100/1 and supported in part by a grant of the European Research Council (ERC Grant nr. 258237).} \thankstext{T3}{S. Tindel is member of the BIGS (Biology, Genetics and Statistics) team at INRIA.} \end{aug} \received{\smonth{9} \syear{2012}} \revised{\smonth{10} \syear{2013}} \begin{abstract} We consider stochastic differential equations of the form $dY_{t}= V ( Y_{t} ) \,dX_{t}+V_{0} ( Y_{t} ) \,dt$ driven by a multi-dimensional Gaussian process. Under the assumption that the vector fields $V_{0}$ and $V= ( V_{1},\ldots,V_{d} ) $ satisfy H\"{o}rmander's bracket condition, we demonstrate that $Y_{t}$ admits a smooth density for any $t\in(0,T]$, provided the driving noise satisfies certain nondegeneracy assumptions. Our analysis relies on relies on an interplay of rough path theory, Malliavin calculus and the theory of Gaussian processes. Our result applies to a broad range of examples including fractional Brownian motion with Hurst parameter $H>1/4$, the Ornstein--Uhlenbeck process and the Brownian bridge returning after time $T$. \end{abstract} \begin{keyword}[class=AMS] \kwd{60H07} \kwd{60G15} \kwd{60H10} \end{keyword} \begin{keyword} \kwd{Rough path analysis} \kwd{Gaussian processes} \kwd{Malliavin calculus} \end{keyword} \end{frontmatter} \section{Introduction}\label{sec1} Over the past decade, our understanding of stochastic differential equations (SDEs) driven by Gaussian processes has evolved considerably. As a natural counterpart to this development, there is now much interest in investigating the probabilistic properties of solutions to these equations. Consider an SDE of the form \begin{equation} dY_{t}=V(Y_{t})\,dX_{t}+V_{0} ( Y_{t} ) \,dt,\qquad Y ( 0 ) =y_{0}\in \mathbb{R} ^{e}, \label{Int-Eq} \end{equation} driven by an $ \mathbb{R} ^{d}$-valued continuous Gaussian process $X$ along $C_{b}^{\infty}$ vector fields $V_{0}$ and $V= ( V_{1},\ldots,V_{d} ) $ on $ \mathbb{R} ^{e}$. Once the existence and uniqueness of $Y$ has been settled, it is natural to ask about the existence of a smooth density of $Y_{t}$ for \mbox{$t>0$}. In the context of diffusion processes, the theory is classical and goes back to H\"{o}rmander \cite{horm} for an analytical approach, and Malliavin \cite{mall} for a probabilistic one. For the case where $X$ is fractional Brownian motion, this question was first addressed by Nualart and Hu \cite{NH}, where the authors show the existence and smoothness of the density when the vector fields are elliptic, and the driving Gaussian noise is fractional Brownian motion (fBm) for $H>1/2$. Further progress was achieved in \cite{BH} where, again for the regime $H>1/2$, the density was shown to be smooth under H\"{o}rmander's celebrated bracket condition. Rougher noises are not directly amenable to the analysis put forward in these two papers. Additional ingredients have since gradually become available with the development of a broader theory of (Gaussian) rough paths (see \cite{L,CQ,FV}). The papers \cite{CFV} and \cite{CF} used this technology to establish the existence of a density under fairly general assumptions on the Gaussian driving noises. These papers, however, fall short of proving the smoothness of the density, because the proof demands far more quantitative estimates than were available at the time. More recently, decisive progress was made on two aspects which obstructed the extension of this earlier work. First, the paper \cite{CLL} established sharp tail estimates on the Jacobian of the flow $J_{t\leftarrow0}^{\mathbf{X} }(y_{0})$ driven by a wide class of (rough) Gaussian processes. The tail turns out to decay quickly enough to allow to conclude the finiteness of all moments for $J_{t\leftarrow0}^{\mathbf{X}}(y_{0})$. Second, \cite{H3} obtained a general, deterministic version of the key Norris lemma (see also \cite {HT} for some recent work in the context of fractional Brownian motion). The lemma of Norris first appeared in~\cite{N} and has been interpreted as a quantitative version of the Doob--Meyer decomposition. Roughly speaking, it ensures that there cannot be too many cancellations between martingale and bounded variation parts of the decomposition. The work~\cite{H3}, however, shows that the same phenomenon arises in a purely deterministic setting, provided that the one-dimensional projections of the driving process are sufficiently and uniformly rough. This intuition is made precise through a notion of ``modulus of H\"{o}lder roughness;'' see Definition~\ref{def:rough} below. Together with an analysis of the higher order Malliavin derivatives of the flow of (\ref{Int-Eq}), also carried out in \cite{H3}, these two results yield a H\"{o}rmander-type theorem for fractional Brownian motion if $H>1/3$. In this paper, we aim to realise the broader potential of these developments by generalising the analysis to a wide class of Gaussian processes. This class includes fractional Brownian motion with Hurst parameter $H \in({\frac{1}{4}}, {\frac{1}{2}}]$, the Ornstein--Uhlenbeck process, and the Brownian bridge. Instead of focusing on particular examples of processes, our approach aims to develop a general set of conditions on $X$ under which Malliavin--H\"{o}rmander theory still works. The probabilistic proof of H\"ormander's theorem is intricate, and hard to summarise in a few lines; see \cite{MallHor} for a relatively short exposition. However, let us highlight some basic features of the method in order to see where our main contributions lie: \begin{longlist}[(iii)] \item[(i)] At the centre of the proof of H\"{o}rmander's theorem is a quantitative estimate on the nondegeneracy of the Malliavin covariance matrix $C_{T} ( \omega ) $. Our effort in this direction consists in a direct and instructive approach, which reveals an additional structure of the problem. In particular, the conditional variance of the process plays an important role, which does not appear to have been noticed so far. More specifically, following \cite{CFV} we study the Malliavin covariance matrix as a 2D Young integral against the covariance function $R ( s,t ) $. This provides the convenient representation \[ v^{T}C_{t}(\omega) v=\int_{ [ 0,t ] \times [ 0,t ] }f_{s} ( v;\omega ) f_{r} ( v;\omega ) \,dR ( s,r ) \] for some $\gamma$-H\"{o}lder continuous $f ( v;\omega ) $, which avoids any detours via the fractional calculus that are specific to fBm. Compared to the setting of \cite{CF}, we have to impose some additional assumptions on $R ( s,t ) $, but our more quantitative approach allows us in return to relax the zero--one law condition required in this paper. \item[(ii)] An essential step in the proof is achieved when one obtains some lower bounds on $v^{T}C_{t} v$ in terms of the supremum norm of $f$. Toward this aim, we prove a novel interpolation inequality, which lies at the heart of this paper. It is explicit and also sharp in the sense that it collapses to a well-known inequality for the space $L^{2}([ 0,T]) $ in the case of Brownian motion. Furthermore, this result should be important in other applications in the area, for example, in establishing bounds on the density function (see \cite{BOS} for a first step in this direction) or studying small-time asymptotic. \item[(iii)] H\"{o}rmander's theorem also relies on an accurate analysis and control of the higher order Malliavin derivatives of the flow $J_{t\leftarrow 0}^{\mathbf{X}}(y_{0})$. This turns out the be notationally cumbersome, but structurally quite similar to the technology already developed for fBm. For this step, we therefore rely as much as possible on the analysis performed in \cite{H3}. The integrability results in \cite{CLL} then play the first of two important roles in showing that the flow belongs to the Shigekawa--Sobolev space $\mathbb{D}^{\infty}(\mathbb{R}^{e})$. \item[(iv)] Finally, an induction argument that allows to transfer the bounds from the interpolation inequality to the higher order Lie brackets of the vector fields has to be set up. This induction requires another integrability estimate for $J_{t\leftarrow0}^{\mathbf{X}}(y_{0})$, plus a Norris type lemma allowing to bound a generic integrand $A$ in terms of the resulting noisy integral $\int A \,dX$ in the rough path context. This is the content of our second main contribution, which can be seen as a generalisation of the Norris lemma from \cite{H3} to a much wider range of regularities and Gaussian structures for the driving process $X$. Namely, we extend the result of \cite{H3} from $p$-rough paths with $p<3$ to general $p$ under the same ``modulus of H\"{o}lder roughness'' assumption. It is interesting to note that the argument still only requires information about the roughness of the path itself and not its lift. \end{longlist} Let us further comment on the Gaussian assumptions allowing the derivation of the interpolation inequality briefly described in step (ii) above. First, we need a standing assumption that regards the regularity of $R(s,t)$ (expressed in terms of its so called 2D $\rho$-variation, see \cite{FV}) and complementary Young regularity of $X$ and its Cameron--Martin space. This is a standard assumption in the theory of Gaussian rough paths. The first part of the condition guarantees the existence of a natural lift of the process to a rough path. The complementary Young regularity in turn is necessary to perform Malliavin calculus, and allows us to obtain the integrability estimates for $J_{t\leftarrow0}^{\mathbf{X}}(y_{0})$ in \cite{CLL}. In order to understand the assumptions on which our central interpolation inequality hinges, let us mention that it emerges from the need to prove lower bounds of the type \begin{equation} \int_{ [ 0,T ] \times [ 0,T ] }f_{s}f_{t} \,dR ( s,t ) \geq C\vert f\vert _{\gamma; [ 0,T ] } ^{a}\vert f\vert _{\infty; [ 0,T ] }^{2-a} \label {opt} \end{equation} for some exponents $\gamma$ and $a$, and all $\gamma$-H\"{o}lder continuous functions $f$. After viewing the integral in (\ref{opt}) along a sequence of discrete-time approximations to the integral, relation \eqref{opt} relies on solving a sequence of finite dimensional partially constrained quadratic programming (QP) problems. These (QP) problems involve some matrices $Q$ whose entries can be written as $Q^{ij}=E[X_{t_{i},t_{i+1}}^{1} X_{t_{j},t_{j+1}}^{1}]$, where $X_{t_{i},t_{i+1}}^{1}$ denotes the increment $X_{t_{i+1}}^{1}-X_{t_{i}}^{1}$ of the first component of $X$. Interestingly enough, some positivity properties of Schur complements computed within the matrix $Q$ play a prominent role in the resolution of the aforementioned (QP) problems. In order to guarantee these positivity properties, we shall make two nondegeneracy type assumptions on the conditional variance and covariance structure of our underlying process $X^{1}$ (see Conditions \ref{nondeterm} and \ref{cond dom} below). This is quite natural since Schur complements are classically related to conditional variances in elementary Gaussian analysis. We also believe that our conditions essentially characterise the class of processes for which we can quantify the nondegeneracy of $C_{T}(\omega)$ in terms of the conditional variance of the process $X$. The outline of the article is as follows. In Section~\ref{rough paths}, we give a short overview of the elements of the theory of rough paths required for our analysis. Section~\ref{main thm} then states our main result. In Section~\ref{examples section}, we demonstrate how to verify the nondegeneracy assumptions required for the driving process in a number of concrete examples. The remainder of the article is devoted to the proofs. First, in Section~\ref{Norris lemma section}, we state and prove our general version of Norris's lemma and we apply it to the class of Gaussian processes we have in mind.\vadjust{\goodbreak} In Section~\ref{interpol}, we then provide the proof of an interpolation inequality of the type (\ref{opt}). In Section~\ref{differentiability section}, we obtain bounds on the derivatives of the solution with respect to its initial condition, as well as on its Malliavin derivative. Finally, we combine all of these ingredients in Section~\ref{section proof main theorem} to complete the proof of our main theorem. \section{Rough paths and Gaussian processes} \label{rough paths} In this section, we introduce some basic notation concerning rough paths, following the exposition in \cite{CLL}. In particular, we recall the conditions needed to ensure that a given Gaussian process has a natural rough path lift. For $N\in\mathbb{N}$, recall that the truncated tensor algebra $T^{N}(\mathbb{R} ^{d})$ is defined by $T^{N}(\mathbb{R}^{d})=\bigoplus_{n=0}^{N}(\mathbb {R} ^{d})^{\otimes n}$, with the convention $(\mathbb{R}^{d})^{\otimes 0}=\mathbb{R}$. The space $T^{N}(\mathbb{R}^{d})$ is equipped with a straightforward vector space structure, plus an operation $\otimes$ defined by \[ \pi_{n}(g\otimes h)=\sum_{k=0}^{N} \pi_{n-k}(g)\otimes\pi_{k}(h),\qquad g,h\in T^{N}\bigl( \mathbb{R}^{d}\bigr), \] where $\pi_{n}$ denotes the projection on the $n$th tensor level. Then $(T^{N}(\mathbb{R}^{d}),+,\otimes)$ is an associative algebra with unit element $\mathbf{1} \in(\mathbb{R}^{d})^{\otimes0}$. At its most fundamental, we will study continuous $ \mathbb{R} ^{d}$-valued paths parameterised by time on a compact interval $ [ 0,T ] $; we denote the set of such functions by $C([0,T],\mathbb{R} ^{d})$. We write $x_{s,t}:=x_{t}-x_{s}$ as a shorthand for the increments of a path. Using this notation, we define the uniform norm and the $p$-variation semi-norm of a path $x$ by \begin{equation} \label{e:defNorms} \qquad\Vert x\Vert _{\infty}:=\sup_{t\in [ 0,T ] } \vert x_{t}\vert,\qquad \Vert x\Vert _{p\mbox{-}\mathrm{var}; [ 0,T ] }:= \biggl( \sup _{\mathcal{D}} \sum_{[s,t] \in\mathcal{D}}\vert x_{s,t}\vert ^{p} \biggr) ^{1/p}, \end{equation} where the supremum in the second term runs over all partitions $\mathcal{D}$ of $[0,T]$. We will use the notation $C^{p\mbox{-}\mathrm{var}}( [ 0,T], \mathbb {R}^{d}) $ for the linear subspace of $C([0,T],\break \mathbb{R}^{d})$ consisting of the continuous paths that have finite $p$-varia\-tion. Of interest will also be the set of $\gamma $-H\"{o}lder continuous functions, denoted by $C^{\gamma}([0,T], \mathbb {R} ^{d})$, which consists of functions satisfying \[ \Vert x\Vert _{\gamma; [ 0,T ] }:=\sup_{0\leq s<t\leq T} \frac{\vert x_{s,t}\vert }{\vert t-s\vert ^{\gamma} }<\infty. \] For $s<t$ and $n\geq2$, consider the simplex $\Delta_{st}^{n}=\{(u_{1} ,\ldots,u_{n})\in [ s,t]^{n}; u_{1}<\cdots<u_{n}\} $, while the simplices over $[0,1]$ will simply be denoted by $\Delta^{n}$. A continuous map $\mathbf{x}\dvtx \Delta^{2}\rightarrow T^{N}(\mathbb{R}^{d})$ is called a multiplicative functional if for $s<u<t$ one has $\mathbf{x}_{s,t} =\mathbf{x}_{s,u}\otimes\mathbf{x}_{u,t}$. An important example arises from considering paths $x$ with finite variation: for $0<s<t$, we set \begin{equation} \mathbf{x}_{s,t}^{n}=\sum_{1\leq i_{1},\ldots,i_{n}\leq d} \biggl( \int _{\Delta_{st}^{n}}\,dx^{i_{1}}\cdots dx^{i_{n}} \biggr) e_{i_{1}}\otimes \cdots\otimes e_{i_{n}}, \label{eq:def-iterated-intg} \end{equation} where $\{e_{1},\ldots,e_{d}\}$ denotes the canonical basis of $\mathbb {R}^{d} $, and then define the \textit{signature} of $x$ as \[ S_{N}(x)\dvtx \Delta^{2}\rightarrow T^{N}\bigl( \mathbb{R}^{d}\bigr),\qquad (s,t)\mapsto S_{N}(x)_{s,t}:=1+ \sum_{n=1}^{N}\mathbf{x}_{s,t}^{n}. \] $S_{N}(x)$ will be our typical example of multiplicative functional. Let us also add the following two remarks: \begin{longlist}[(ii)] \item[(i)] A geometric rough path (see Definition~\ref{def:RP} below), as well as the signature of any smooth function, takes values in the strict subset $G^{N}(\mathbb{R}^{d})\subset T^{N}(\mathbb{R}^{d})$ given by the ``group-like elements'' \[ G^{N}\bigl(\mathbb{R}^{d}\bigr) = \exp^{\otimes} \bigl(L^{N}\bigl(\mathbb {R}^{d}\bigr) \bigr), \] where $L^{N}(\mathbb{R}^{d})$ is the linear span of all elements that can be written as a commutator of the type $a \otimes b - b\otimes a$ for two elements in $T^{N}(\mathbb{R}^{d})$. \item[(ii)] It is sometimes convenient to think of the indices $w=(i_{1} ,\ldots,i_{n})$ in \eqref{eq:def-iterated-intg} as words based on the alphabet $\{1,\ldots,d\}$. We shall then write $\mathbf{x}^{w}$ for the iterated integral $\int_{\Delta_{st}^{n}}\,dx^{i_{1}}\cdots dx^{i_{n}}$. \end{longlist} More generally, if $N\geq1$ we can consider the set of such group-valued paths \[ \mathbf{x}_{t}= \bigl( 1,\mathbf{x}_{t}^{1}, \ldots,\mathbf {x}_{t}^{N} \bigr) \in G^{N} \bigl( \mathbb{R}^{d} \bigr). \] Note that the group structure provides a natural notion of increment, namely $\mathbf{x}_{s,t}:=\mathbf{x}_{s}^{-1}\otimes\mathbf{x}_{t}$, and we can describe the set of ``norms'' on $G^{N} ( \mathbb{R} ^{d} ) $ which are homogeneous with respect to the natural scaling operation on the tensor algebra (see~\cite{FV} for definitions and details). One such example is the Carnot--Caratheodory (CC) norm (see \cite {FV}), which we denote by $\Vert \cdot\Vert _{\mathrm{CC}}$. The precise norm used is mostly irrelevant in finite dimensions because they are all equivalent. The subset of these so-called homogeneous norms which are symmetric and sub-additive (again, see \cite{FV}) gives rise to genuine metrics on $G^{N} ( \mathbb{R} ^{d} ) $, for example, $d_{\mathrm{CC}}$ in the case of the CC norm. In turn, these metrics give rise to a notion of homogenous $p$-variation metrics $d_{p\mbox{-}\mathrm{var}}$ on the set of $G^{N} ( \mathbb{R} ^{d} )$-valued paths. Using the CC norm for definiteness, we will use the following homogenous $p$-variation and $\gamma$-H\"{o}lder variation semi-norms: \begin{eqnarray}\label{homogeneous norm} \Vert \mathbf{x}\Vert _{p\mbox{-}\mathrm{var}; [ s,t ] } &=&\max_{i=1,\ldots, \lfloor p \rfloor } \biggl( \sup_{\mathcal{D}}\sum_{[s,t] \in\mathcal{D}} \Vert \mathbf{x}_{s,t}\Vert _{\mathrm{CC}}^{p} \biggr) ^{1/p}, \nonumber \\[-8pt] \\[-8pt] \nonumber \Vert \mathbf{x}\Vert _{\gamma, [ s,t ] } &=&\sup_{(u,v)\in\Delta_{st}^{2}} \frac{\Vert \mathbf{x}_{u,v}\Vert _{\mathrm{CC}}}{\vert v-u\vert ^{\gamma}}, \nonumber \end{eqnarray} where the supremum over $\mathcal{D}$ is as in \eqref{e:defNorms}. We will also use some metrics on path spaces which are not homogenous. The most important will be the following: \begin{equation} \mathcal{N}_{\mathbf{x,}\gamma;[s,t]}:=\sum_{k=1}^{N} \sup_{(u,v)\in \Delta _{st}^{2}}\frac{|\mathbf{x}_{u,v}^{k}|_{ ( \mathbb{R} ^{d} ) ^{\otimes k}}}{|v-u|^{k\gamma}}, \label{inhomogeneous} \end{equation} which will be written simply as $\mathcal{N}_{\mathbf{x,}\gamma}$ when the interval $ [ s,t ] $ is clear from the context. \begin{definition} \label{def:RP} The space of weakly geometric $p$-rough paths [denoted $WG\Omega_{p}(\mathbb{R}^{d})$] is the set of paths $\mathbf {x}\dvtx \Delta ^{2}\rightarrow G^{\lfloor p\rfloor}( \mathbb{R} ^{d}) $ such that (\ref{homogeneous norm}) is finite. \end{definition} We will also work with the space of geometric $p$-rough paths, which we denote by $G\Omega_{p}( \mathbb{R}^{d}) $, defined as the $d_{p\mbox{-}\mathrm{var}} $-closure of \[ \bigl\{ S_{ \lfloor p \rfloor} ( x ) \dvtx x\in C^{1\mbox{-}\mathrm{var}} \bigl( [ 0,T ] , \mathbb{R} ^{d} \bigr) \bigr\}. \] Analogously, if $\gamma>0$ and $N=[1/\gamma]$ we define $C^{0,\gamma }([0,T];G^{N}(\mathbb{R}^{d}))$ to be the linear subspace of $G\Omega_{N}( \mathbb{R}^{d}) $ consisting of paths $\mathbf{x}\dvtx \Delta ^{2}\rightarrow G^{N}(\mathbb{R}^{d})$ such that \[ \lim_{n\rightarrow\infty}\bigl\Vert\mathbf{x}-S_{N}(x_{n}) \bigr\Vert _{\gamma ;[0,T]}=0 \] for some sequence $ ( x_{n} ) _{n=1}^{\infty}\subset C^{\infty }([0,T];\mathbb{R}^{d})$. In the following, we will consider RDEs driven by paths $\mathbf{x}$ in $WG\Omega_{p}(\mathbb{R}^{d}) $, along a collection of vector fields $V= ( V_{1},\ldots,V_{d} ) $ on $\mathbb{R}^{e}$, as well as a deterministic drift along $V_{0}$. From the point of view of existence and uniqueness results, the appropriate way to measure the regularity of the $V_{i}$'s turns out to be the notion of Lipschitz-$\gamma$ (short: Lip-$\gamma$) in the sense of Stein \cite{FV,LCL}. This notion provides a norm on the space of such vector fields (the Lip-$\gamma$ norm), which we denote $\vert \cdot\vert _{\mathrm{Lip}\mbox{-}\gamma}$. For the collection $V$ of vector fields, we will often make use of the shorthand \[ \vert V\vert _{\mathrm{Lip}\mbox{-}\gamma}=\max_{i=1,\ldots ,d}\vert V_{i}\vert _{\mathrm{Lip}\mbox{-}\gamma}, \] and refer to the quantity $\vert V\vert _{\mathrm{Lip}\mbox{-}\gamma}$ as the Lip-$\gamma$ norm of $V$. A theory of such Gaussian rough paths has been developed by a succession of authors \cite{CQ,FV07,CFV,FO10} and we will mostly work within their framework. To be more precise, we will assume that $X_{t}= ( X_{t} ^{1},\ldots,X_{t}^{d} ) $ is a continuous, centred (i.e., mean zero) Gaussian process with i.i.d. components on a complete probability space $ ( \Omega,\mathcal{F},P ) $. Let $\mathcal{W}=C([ 0,T], \mathbb{R}^{d})$ and suppose that $ ( \mathcal{W},\mathcal {H},\mu ) $ is the abstract Wiener space associated with $X$. The function $R\dvtx [ 0,T ] \times [ 0,T ] \rightarrow \mathbb{R} $ will denote the covariance function of any component of $X$, that is, \[ R(s,t) =E \bigl[ X_{s}^{1}X_{t}^{1} \bigr]. \] Following \cite{FV07}, we recall some basic assumptions on the covariance function of a Gaussian process which are sufficient to guarantee the existence of a natural lift of a Gaussian rough process to a rough path. We recall the notion of \textit{rectangular increments} of $R$ from \cite {FVupdate}; these are defined by \[ R\pmatrix{ s,t \vspace*{2pt}\cr u,v }:=E \bigl[ \bigl( X_{t}^{1}-X_{s}^{1} \bigr) \bigl( X_{v} ^{1}-X_{u}^{1} \bigr) \bigr]. \] The existence of a lift for $X$ is ensured by insisting on a sufficient rate of decay for the correlation of the increments. This is captured, in a very general way, by the following two-dimensional $\rho$-variation constraint on the covariance function. \begin{definition} \label{rho var}Given $1\leq\rho<2$, we say that $R$ has \textit{finite (two-dimen\-sional) }$\rho$\textit{-variation} if \begin{equation} V_{\rho} \bigl( R; [ 0,T ] \times [ 0,T ] \bigr) ^{\rho}:=\sup _{\mathcal{D},\mathcal{D}'}\mathop{\sum_{[s,t] \in\mathcal{D}}}_{[s',t'] \in\mathcal{D} '}\biggl\vert R \pmatrix{ s,t \vspace*{2pt}\cr s',t'} \biggr\vert ^{\rho}<\infty. \label{2dvariation} \end{equation} \end{definition} If a process has a covariance function with finite $\rho$-variation for $\rho\in [1,2)$ in the sense of Definition~\ref{rho var}, \cite{FV07}, Theorem~35, asserts that $ ( X_{t} ) _{t\in [ 0,T ] } $ lifts to a geometric $p$-rough path provided $p>2\rho$. Moreover, there is a unique \textit{natural lift} which is the limit, in the $d_{p\mbox{-}\mathrm{var}}$-induced topology, of the canonical lift of piecewise linear approximations to $X$. A related take on this notion is obtained by enlarging the set of partitions of $ [ 0,T ] ^{2}$ over which the supremum is taken in (\ref{2dvariation}). Recall from \cite{FVupdate} that a \textit{rectangular partition }of the square $ [ 0,T ] ^{2}$ is a collection $ \{ A_{i}\dvtx i\in I \} $ of rectangles of the form $A_{i}= [ s_{i} ,t_{i} ] \times [ u_{i},v_{i} ] $, whose union equals $ [ 0,T ] ^{2}$ and which have pairwise disjoint interiors. The collection of rectangular partitions is denoted $\mathcal{P}_{\mathrm{rec}} ( [ 0,T ] ^{2} ) $, and $R$ is said to have \textit{controlled }$\rho$\textit{-variation} if \begin{equation} \vert R\vert _{\rho\mbox{-}\mathrm{var}; [ 0,T ] ^{2} }^{\rho}:=\mathop{\sup _{ \{ A_{i}\dvtx i\in I \} \in\mathcal {P} _{\mathrm{rec}} ( [ 0,T ] ^{2} ) }}_{A_{i}= [ s_{i},t_{i} ] \times [ u_{i},v_{i} ] }\sum_{i,j} \biggl\vert R\pmatrix{ s_{i},t_{i} \vspace*{2pt}\cr u_{i},v_{i}} \biggr\vert ^{\rho}<\infty. \label{controlled} \end{equation} We obviously have $V_{\rho}( R; [ 0,T ] ^{2}) \leq\vert R\vert _{\rho\mbox{-}\mathrm{var}; [ 0,T ] ^{2}}$, and it is shown in \cite{FVupdate} that for every $\varepsilon>0$ there exists $c_{p,\varepsilon}$ such that $\vert R\vert _{\rho\mbox{-}\mathrm{var}; [ 0,T ] ^{2}} \leq c_{p,\varepsilon} V_{\rho+\varepsilon}( R; [ 0,T ] ^{2})$. The main advantage of the quantity~(\ref{controlled}) compared to (\ref{2dvariation}) is that the map \[ [ s,t ] \times [ u,v ] \mapsto\vert R\vert _{\rho\mbox{-}\mathrm{var}; [ s,t ] \times [ u,v ] }^{\rho} \] is a 2D control in the sense of \cite{FVupdate}. \begin{definition} \label{hol rho var}Given $1\leq\rho<2$, we say that $R$ has \textit{finite (two-dimen\-sional) H\"{o}lder-controlled }$\rho$\textit{-variation} if $V_{\rho} ( R; [ 0,T ] \times [ 0,T ] ) <\infty$, and if there exists $C>0$ such that for all $0\leq s\leq t \leq T$ we have \begin{equation} V_{\rho} \bigl( R; [ s,t ] \times [ s,t ] \bigr) \leq C ( t-s ) ^{1/\rho}. \end{equation} \end{definition} \begin{remark} \label{remark f1} This is (essentially) without loss of generality compared to Definition \ref{rho var}. To see this, we note that if $R$ also has controlled $\rho$-variation in the sense of (\ref{controlled}), then we can introduce the deterministic time-change $\tau\dvtx [0,T]\rightarrow[0,T]$ given by $\tau =\sigma^{-1}$, where $\sigma\dvtx [ 0,T ] \rightarrow [ 0,T ] $ is the strictly increasing function defined by \begin{equation} \sigma ( t ):=\frac{T\vert R\vert _{\rho \mbox{-}\mathrm{var}; [ 0,t ] ^{2}}^{\rho}}{\vert R\vert _{\rho\mbox{-}\mathrm{var}; [ 0,T ] ^{2}}^{\rho}}. \label{parametrisation} \end{equation} It is then easy to see that $\tilde{R}$, the covariance function of $\tilde {X}=X\circ\tau$, is H\"{o}lder-controlled in the sense of Definition \ref{hol rho var}. \end{remark} Two important consequences of assuming that $R$ has finite H\"{o}lder-cont\-rolled $\rho$-variation are: (i) $\mathbf{X}$ has $1/p$-H\"{o}lder sample paths for every $p>2\rho$, and (ii) by using \cite{FV}, Theorem~15.33, we can deduce that \begin{equation} E \bigl[ \exp \bigl( \eta\| \mathbf{X}\| _{1/p; [ 0,T ] }^{2} \bigr) \bigr] <\infty\qquad\mbox{for some } \eta>0, \label{gauss} \end{equation} that is, $\mathbf{\|X\|}_{1/p; [ 0,T ] }^{2}$ has a Gaussian tail. The mere existence of this lift is unfortunately not sufficient to apply the usual concepts of Malliavin calculus. In addition, it will be important to require a complementary (Young) regularity of the sample paths of $X$ and the elements of its Cameron--Martin space. The following assumption captures both of these requirements. \begin{condition} \label{standing assumption} Let $ ( X_{t} ) _{t\in [ 0,T ] }= ( X_{t}^{1},\ldots,X_{t}^{d} ) _{t\in [ 0,T ] } $ be a Gaussian process with i.i.d. components. Suppose that the covariance function has finite H\"{o}lder-controlled $\rho $-variation for some $\rho\in [1,2)$. We will assume that $X$ has a natural lift to a geometric p-rough path and that $\mathcal{H}$, the Cameron--Martin space associated with $X$, has Young-complementary regularity to $X$ in the following sense: for some $q\geq1$ satisfying $1/p+1/q>1$, we have the continuous embedding \[ \mathcal{H}\hookrightarrow C^{q\mbox{-}\mathrm{var}} \bigl( [ 0,T ], \mathbb{R} ^{d} \bigr). \] \end{condition} The following theorem appears in \cite{FV07} as Proposition 17 (cf. also the recent note~\cite{FVupdate}); it shows how the assumption $V_{\rho } ( R; [ 0,T ] ^{2} ) <\infty$ allows us to embed $\mathcal{H}$ in the space of continuous paths with finite $\rho$ variation. The result is stated in~\cite{FV07} for one-dimensional Gaussian processes, but the generalisation to arbitrary finite dimensions is straightforward. \begin{theorem}[({\cite{FV07}})]\label{CM_pVar_embedding}Let $ ( X_{t} ) _{t\in [ 0,T ] }= ( X_{t}^{1},\ldots,X_{t}^{d} ) _{t\in [ 0,T ] }$ be a mean-zero Gaussian process with independent and identically distributed components. Let $R$ denote the covariance function of (any) one of the components. Then if $R$ is of finite $\rho$-variation for some $\rho\in [1,2)$ we can embed $\mathcal{H}$ in the space $C^{\rho\mbox{-}\mathrm{var}} ( [ 0,T ], \mathbb{R} ^{d} ) $, in fact, \begin{equation} \vert h\vert _{\mathcal{H}}\geq\frac{\vert h\vert _{\rho\mbox{-}\mathrm{var}; [ 0,T ] }}{\sqrt{V_{\rho} ( R; [ 0,T ] \times [ 0,T ] ) }}. \label {embedding} \end{equation} \end{theorem} \begin{remark}[{(\cite{FV3})}]\label{fBM embedding} Writing $\mathcal{H}^{H}$ for the Cameron--Martin space of fBm for $H$ in $ ( 1/4,1/2 ) $, the variation embedding in \cite{FV3} gives the stronger result that \[ \mathcal{H}^{H}\hookrightarrow C^{q\mbox{-}\mathrm{var}} \bigl( [ 0,T ] , \mathbb{R} ^{d} \bigr) \qquad\mbox{for any } q> ( H+1/2 ) ^{-1}. \] \end{remark} Theorem $\ref{CM_pVar_embedding}$ and Remark \ref{fBM embedding} provide sufficient conditions for a process to satisfy the fundamental Condition \ref{standing assumption}, which we summarise in the following remark. \begin{remark} \label{remark f2} As already observed, the requirement that $R$ has finite 2D $\rho$-variation, for some $\rho\in [1,2)$, implies both that $X$ lifts to a geometric $p$-rough path for all $p>2\rho$ and also that $\mathcal{H} \hookrightarrow C^{\rho\mbox{-}\mathrm{var}} ( [ 0,T ], \mathbb{R} ^{d} ) $ (Theorem \ref{CM_pVar_embedding}). Complementary regularity of $\mathcal{H}$ in the above condition thus can be obtained by $\rho\in [1,3/2)$, which covers for example BM, the OU process and the Brownian bridge (in each case with $\rho=1$). When $X$ is fBm, we know that $X$ admit a lift to $G\Omega_{p} ( \mathbb{R} ^{d} ) $ if $p>1/H$, and Remark \ref{fBM embedding} therefore ensures the complementary regularity of $X$ and $\mathcal{H}$ if $H>1/4$. \end{remark} \section{Statement of the main theorem} \label{main thm} We will begin the section by laying out and providing motivation for the assumptions we impose on the driving Gaussian signal $X$. We will then end the section with a statement of the central theorem of this paper, which is a version of H\"{o}rmander's theorem for Gaussian RDEs. First, we give some notation which will feature repeatedly. \begin{notation} We define \[ \mathcal{F}_{a,b}:=\sigma \bigl( X_{v,v^{\prime}}\dvtx a\leq v\leq v^{\prime }\leq b \bigr) \] to be the $\sigma$-algebra generated by the increments of $X$ between times $a$ and~$b$. \end{notation} The following condition aims to capture the nondegeneracy of $X$, it will feature prominently in the sequel. \begin{condition}[(Nondeterminism-type condition)]\label{nondeterm}Let $ ( X_{t} ) _{t\in [ 0,T ] }$ be a continuous Gaussian process. Suppose that the covariance function $R$ of $X$ has finite H\"{o}lder-controlled $\rho $-variation for some $\rho$ in $[1,2)$. We assume that there exists $\alpha>0$ such that \begin{equation} \inf_{0\leq s<t\leq T}\frac{1}{ ( t-s ) ^{\alpha}} \operatorname{Var} ( X_{s,t}|\mathcal{F}_{0,s}\vee\mathcal {F} _{t,T} ) >0. \label{index} \end{equation} Whenever this condition is satisfied, we will call $\alpha$ the \emph{index of nondeterminism} if it is the smallest value of $\alpha$ for which (\ref{index}) is true. \end{condition} \begin{remark} It is worthwhile making a number of comments. First, notice that the conditional variance \[ \operatorname{Var} ( X_{s,t}|\mathcal{F}_{0,s}\vee \mathcal {F} _{t,T} ) \] is actually deterministic by Gaussian considerations. Then for any $ [ s,t ] \subseteq [ 0,S ] \subseteq [ 0,T ] $, the law of total variance can be used to show that \[ \operatorname{Var} ( X_{s,t}|\mathcal{F}_{0,s}\vee \mathcal {F} _{t,S} ) \geq\operatorname{Var} ( X_{s,t}|\mathcal {F}_{0,s} \vee \mathcal{F}_{t,T} ). \] It follows that if (\ref{index}) holds on $ [ 0,T ] $, then it will also hold on any interval $ [ 0,S ] \subseteq [ 0,T ] $ provided $S>0$. \end{remark} Note that Condition \ref{nondeterm} implies the existence of $c>0$ such that \[ \operatorname{Var} ( X_{s,t}|\mathcal{F}_{0,s}\vee \mathcal {F} _{t,T} ) \geq c ( t-s ) ^{\alpha}. \] This is reminiscent of (but not equivalent to) other notions of nondeterminism which have been studied in the literature. For example, it should be compared to the similar notion introduced in \cite{berman}, where it was exploited to show the existence of a smooth local time function (see also the subsequent work of Cuzick et al.~\cite{cuzick2} and~\cite{cuz}). In the present context, Condition \ref{nondeterm} is also related to the following condition: for any $f$ of finite $p$-variation over $ [ 0,T ] $ \begin{equation} \int_{0}^{T}f_{s}\,dh_{s}=0\qquad \forall h\in\mathcal{H } \quad\Rightarrow\quad f=0\qquad \mbox{a.e. on $ [ 0,T ] $.} \label{CFV nondeg} \end{equation} This has been used in \cite{CF} to prove the existence of the density for Gaussian RDEs. In some sense, our Condition \ref{nondeterm} is the quantitative version of (\ref{CFV nondeg}). In this paper, when we speak of a nondegenerate Gaussian process $ ( X_{t} ) _{t\in [ 0,T ] }$ we will mean the following. \begin{definition} \label{definition nondegeneracy}Let $ ( X_{t} ) _{t\in [ 0,T ] }$ be a continuous, real-valued Gaussian process. For any partition $D= \{ t_{i}\dvtx i=0,1,\ldots,n \}$ of $ [ 0,T ]$, let $(Q_{ij}^{D})_{1\leq i,j\leq n}$ denote the $n\times n$ matrix given by the covariance matrix of the increments of $X$ along $D$, that is, \begin{equation} Q_{ij}^{D}=R\pmatrix{ t_{i-1},t_{i} \vspace*{2pt}\cr t_{j-1},t_{j}}. \label{increment matrix} \end{equation} We say that $X$ is nondegenerate if $Q^{D}$ is positive definite for every partition $D$ of $ [ 0,T ] $. \end{definition} \begin{remark} An obvious example of a ``degenerate'' Gaussian process is a bridge process which returns to zero in $ [ 0,T ] $. This is plainly ruled out by an assumption of nondegeneracy. \end{remark} It is shown in \cite{CFV} that nondegeneracy is implied by (\ref{CFV nondeg} ). Thus, nondegeneracy is a weaker condition than (\ref{CFV nondeg}). It also has the advantage of being formulated more tangibly in terms of the covariance matrix. The next lemma shows that Condition~\ref{nondeterm} also implies that the process is nondegenerate. \begin{lemma} Let $ ( X_{t} ) _{t\in [ 0,T ] }$ be a continuous Gaussian process which satisfies Condition \ref{nondeterm} then $X$ is nondegenerate. \end{lemma} \begin{pf} Fix a partition $D$ of $ [ 0,T ] $, and denote the covariance matrix along this partition by $Q$ with entries as in (\ref{increment matrix} ). If $Q$ is not positive definite, then for some nonzero vector $\lambda= ( \lambda_{1},\ldots,\lambda_{n} ) \in \mathbb{R} ^{n}$ we have \begin{equation} 0=\lambda^{T}Q\lambda=E \Biggl[ \Biggl( \sum _{i=1}^{n}\lambda_{i} X_{t_{i-1},t_{i}} \Biggr) ^{2} \Biggr]. \label{an} \end{equation} Suppose, without loss of generality, that $\lambda_{j}\neq0$. Then from (\ref{an}), we can deduce that \[ X_{t_{j-1},t_{j}}=\sum_{i\neq j}^{n} \frac{\lambda_{i}}{\lambda_{j}} X_{t_{i-1},t_{i}} \] with probability one. This immediately implies that \[ \operatorname{Var} ( X_{t_{j-1},t_{j}}|\mathcal {F}_{0,t_{j-1}} \vee\mathcal{F}_{t_{j},T} ) =0, \] which contradicts (\ref{index}). \end{pf} A crucial step in the proof of the main theorem is to establish lower bounds on the eigenvalues of the Malliavin covariance matrix in order to obtain moment estimates for its inverse. In the setting we have adopted, it transpires that these eigenvalues can be bounded from below by some power of the 2D Young integral: \begin{equation} \int_{ [ 0,T ] ^{2}}f_{s}f_{t}\,dR ( s,t ) \label{2d} \end{equation} for some suitable (random) function $f\in C^{p\mbox{-}\mathrm{var}} ( [ 0,T ], \mathbb{R} ^{d} ) $. By considering the Riemann sum approximations to (\ref{2d}), the problem of finding a lower bound can be re-expressed in terms of solving a sequence of finite-dimensional constrained quadratic programming problems. By considering the dual of these problems, we can simplify the constraints which appear considerably; they become nonnegativity constraints, which are much easier to handle. Thus, the dual problem has an explicit solution subject to a dual feasibility condition. The following condition is what emerges as the limit of the dual feasibility conditions for the discrete approximations. \begin{condition} \label{cond dom}Let $ ( X_{t} ) _{t\in [ 0,T ] }$ be a continuous, real-valued Gaussian process. We will assume that $X$ has nonnegative conditional covariance in that for every $ [ u,v ] \subseteq [ s,t ] \subseteq [ 0,S ] \subseteq [ 0,T ] $ we have \begin{equation} \operatorname{Cov} ( X_{s,t},X_{u,v}| \mathcal{F}_{0,s}\vee \mathcal{F}_{t,S} ) \geq0. \label{con dom eq} \end{equation} \end{condition} In Section~\ref{interpol}, we will prove a novel interpolation inequality. The significance of Condition \ref{cond dom} will become clearer when we work through the details of that section. For the moment, we content ourselves with an outline. First, for a finite partition $D$ of the interval $ [ 0,T ]$, one can consider the discretisation of the process $X_{t}$ conditioned on the increments in $D \cap ( [ 0,s ] \cup [ t,T ] ) $. Let $Q^{D}$ be the corresponding covariance matrix of the increments [see $ ( \ref{increment matrix} )$]. Then the conditional covariance $\operatorname{Cov} ( X_{s,t}^{D},X_{u,v}^{D}|\mathcal{F}_{0,s}^{D}\vee\mathcal {F}_{t,T}^{D} ) $ of the discretised process can be characterised in terms of a Schur complement $\Sigma$ of the matrix $Q^{D}$. Using this relation, the condition \[ \operatorname{Cov} \bigl( X_{s,t}^{D},X_{u,v}^{D}| \mathcal {F}_{0,s}^{D} \vee\mathcal{F}_{t,T}^{D} \bigr) \geq0 \] is precisely what ensures that the row sums for $\Sigma$ are nonnegative. Conversely, if for any finite partition $D$ all Schur complements of the matrix $Q^{D}$ have nonnegative row sums, then Condition~\ref{cond dom} is satisfied. This relation motivates an alternative sufficient condition that implies Condition~\ref{cond dom}, which has the advantage that it may be more readily verified for a given Gaussian process. In order to state the condition, recall that an $n\times n$ real matrix $Q$ is diagonally dominant if \begin{equation} Q_{ii}\geq\sum_{j\neq i}\vert Q_{ij}\vert \qquad\mbox{for every $i\in \{ 1,2,\ldots,n \} $.} \label{diag} \end{equation} \begin{condition} \label{diagonal dominance}Let $ ( X_{t} ) _{t\in [ 0,T ] } $ be a continuous real-valued Gaussian process. For every $ [ 0,S ] \subseteq [ 0,T ]$, we assume that $X$ has diagonally dominant increments on $ [ 0,S ] $. By this, we mean that for every partition $D= \{ t_{i}\dvtx i=0,1,\ldots,n \} $ of $ [ 0,S ] $, the $n\times n$ matrix $ ( Q_{ij}^{D} ) _{1\leq i,j\leq n}$ with entries \[ Q_{ij}^{D}=E [ X_{t_{i-1},t_{i}}X_{t_{j-1},t_{j}} ] =R \pmatrix{ t_{i-1},t_{i} \vspace*{2pt}\cr t_{j-1},t_{j}} \] is diagonally dominant. \end{condition} Diagonal dominance is obviously in general a stronger assumption than requiring that a covariance matrix has positive row sums. Consequently, Condition \ref{diagonal dominance} is particularly useful for negatively correlated processes, when diagonal dominance of the increments and positivity of row sums are the same. The condition can then be expressed succinctly as \[ E [ X_{0,S}X_{s,t} ] \geq0\qquad \forall [ s,t ] \subseteq [ 0,S ] \subseteq [ 0,T ]. \] In fact, it turns out that Condition \ref{diagonal dominance} implies Condition \ref{cond dom}. This is not obvious a priori, and ultimately depends on two nice structural features. The first is the observation from linear algebra that the property of diagonal dominance is preserved under taking Schur complements (see \cite{zhang} for a proof of this). The second results from the interpretation of the Schur complement (in the setting of Gaussian vectors) as the covariance matrix of a certain conditional distribution. We will postpone the proof of this until Section~\ref{interpol} when these properties will be used extensively. The final condition we will impose is classical, namely H\"{o}rmander's condition on the vector fields defining the RDE. \begin{condition}[(H\"{o}rmander)]\label{horm}We assume that \begin{eqnarray}\label{Lie} &&\operatorname{span} \bigl\{ V_{1},\ldots,V_{d}, [ V_{i},V_{j} ], \bigl[ V_{i}, [ V_{j},V_{k} ] \bigr],\ldots\dvtx \nonumber \\[-8pt] \\[-8pt] \nonumber &&\hspace*{76pt}\qquad i,j,k,\ldots =0,1,\ldots,d \bigr\} |_{y_{0}}=\mathcal{T}_{y_{0}} \mathbb{R} ^{e} \cong \mathbb{R} ^{e}. \end{eqnarray} \end{condition} We are ready to formulate our main theorem. \begin{theorem} \label{main theorem}Let $ ( X_{t} ) _{t\in [ 0,T ] }= ( X_{t}^{1},\ldots,X_{t}^{d} ) _{t\in [ 0,T ] }$ be a continuous Gaussian process, with i.i.d. components associated to the abstract Wiener space $ ( \mathcal{W},\mathcal{H},\mu ) $. Assume that some (and hence every) component of $X$ satisfies: \begin{longlist}[(1)] \item[(1)] Condition \ref{standing assumption}, for some $\rho\in [1,2)$; \item[(2)] Condition \ref{nondeterm}, with index of nondeterminacy $\alpha<2/\rho$; \item[(3)] Condition \ref{cond dom}, that is, it has nonnegative conditional covariance. \end{longlist} Fix $p>2\rho$, and let $\mathbf{X\in}G\Omega_{p} ( \mathbb{R} ^{d} ) $ denote the canonical lift of $X$ to a Gaussian rough path. Suppose $V= ( V_{1},\ldots,V_{d} ) $ is a collection of $C^{\infty }$-bounded vector fields on $ \mathbb{R} ^{e}$, and let $ ( Y_{t} ) _{t\in [ 0,T ] }$ be the solution to the RDE \[ dY_{t}=V ( Y_{t} ) \,d\mathbf{X}_{t}+V_{0} ( Y_{t} ) \,dt,\qquad Y ( 0 ) =y_{0}. \] Assume that the collection $ ( V_{0},V_{1},\ldots,V_{d} ) $ satisfy H\"{o}rmander's condition, Condition \ref{horm}, at the starting point $y_{0}$. Then random variable $Y_{t}$ has a smooth density with respect to Lebesgue measure on $ \mathbb{R} ^{e}$ for every $t\in(0,T]$. \end{theorem} \section{Examples} \label{examples section} In this section, we demonstrate how the conditions on $X$ we introduced in the last section can be checked for a number of well-known processes. We choose to focus on three particular examples: fractional Brownian motion (fBm) with Hurst parameter $H>1/4$, the Ornstein--Uhlenbeck (OU) process and the fractional Brownian bridge (fBb) with Hurst parameter $1/3<H\leq1/2$. Together, these encompass a broad range of Gaussian processes that one encounters in practice. Of course, there are many more examples, but these should be checked on a case-by-case basis by analogy with our presentation for these core examples. We first remark that Condition~\ref{standing assumption} is straightforward to check in all these cases (see, e.g., \cite{FV} and \cite{CFV}). Proving that the fBb (returning at $T^{\prime}>0$) with $H>1/3$ satisfies Condition \ref{standing assumption} is a simple calculation in a similar style. We will now commence with a verification of the nondeterminism condition, that is, Condition~\ref{nondeterm}. \subsection{Nondeterminism-type condition} Recall that the Cameron--Martin space~$\mathcal{H}$ is defined to be the completion of the linear space of functions of the form \[ \sum_{i=1}^{n}a_{i}R ( t_{i},\cdot ),\qquad a_{i}\in \mathbb{R} \mbox{ and }t_{i}\in [ 0,T ], \] with respect to the inner product \[ \Biggl\langle\sum_{i=1}^{n}a_{i}R ( t_{i},\cdot ),\sum_{j=1} ^{m}b_{j}R ( s_{j},\cdot ) \Biggr \rangle_{\mathcal {H}}=\sum_{i=1}^{n}\sum _{j=1}^{m}a_{i}b_{j}R ( t_{i},s_{j} ). \] Some authors prefer instead to work with the set of step functions $\mathcal{E}$ \[ \mathcal{E=} \Biggl\{ \sum_{i=1}^{n}a_{i}1_{ [ 0,t_{i} ] }\dvtx a_{i} \in \mathbb{R} ,t_{i}\in [ 0,T ] \Biggr\}, \] equipped with the inner product \[ \langle1_{ [ 0,t ] },1_{ [ 0,s ] } \rangle _{\mathcal{\tilde{H}}}=R ( s,t ). \] If $\mathcal{\tilde{H}}$ denote the completion of $\mathcal{E}$ w.r.t. $ \langle \cdot,\cdot \rangle_{\mathcal{\tilde{H}}}$, then it is obvious that the linear map $\phi\dvtx \mathcal{E}\rightarrow\mathcal{H}$ defined by \begin{equation} \phi ( 1_{ [ 0,t ] } ) =R ( t,\cdot ) \label{isom} \end{equation} extends to an isometry between $\mathcal{\tilde{H}}$ and $\mathcal {H}$. We also recall that $\mathcal{\tilde{H}}$ is isometric to the Hilbert space $H^{1} ( Z ) \subseteq L^{2} ( \Omega,\mathcal {F},P ) $ which is defined to be the $\vert \cdot\vert _{L^{2} ( \Omega ) }$-closure of the set \[ \Biggl\{ \sum_{i=1}^{n}a_{i}Z_{t_{i}}\dvtx a_{i} \in \mathbb{R} ,t_{i}\in [ 0,T ], n\in \mathbb{N} \Biggr\}. \] In particular, we have that $\vert 1_{ [ 0,t ] }\vert _{\mathcal{\tilde{H}}}=\vert Z_{t}\vert _{L^{2} ( \Omega ) }$. We will now prove that Condition~\ref{nondeterm} holds whenever it is the case that $\mathcal{\tilde{H}}$ embeds continuously in $L^{q} ( [ 0,T ] ) $ for some $q\geq1$. Hence, Condition \ref{nondeterm} will simplify in many cases to showing that \[ |\tilde{h}|_{L^{q} [ 0,T ] }\leq C|\tilde{h}|_{\mathcal {\tilde {H}} } \] for some $C>0$ and all $\tilde{h}\in\mathcal{\tilde{H}}$. \begin{lemma} \label{CM embed general}Suppose $ ( Z_{t} ) _{t\in [ 0,T ] }$ is a continuous real-valued Gaussian processes. Assume that for some $q\geq1$ we have $\mathcal{\tilde{H}\hookrightarrow}L^{q} ( [ 0,T ] ) $. Then $Z$ satisfies Condition \ref{nondeterm} with index of nondeterminacy less than or equal to $2/q$, that is, \[ \inf_{0\leq s<t\leq T}\frac{1}{ ( t-s ) ^{2/q}} \operatorname{Var} ( Z_{s,t}|\mathcal{F}_{0,s}\vee\mathcal {F} _{t,T} ) >0. \] \end{lemma} \begin{pf} Fix $ [ s,t ] \subseteq [ 0,T ] $ and for brevity let $\mathcal{G}$ denote the $\sigma$-algebra $\mathcal{F}_{0,s}\vee \mathcal{F}_{t,T}$. Then, using the fact that $\operatorname{Var} ( Z_{s,t}|\mathcal{G} ) $ is deterministic and positive, we have \begin{eqnarray*} \operatorname{Var} ( Z_{s,t}|\mathcal{G} ) &=& \bigl\Vert \operatorname{Var} ( Z_{s,t}| \mathcal{G} ) \bigr\Vert _{L^{2} ( \Omega ) } = E \bigl[ E \bigl[ \bigl( Z_{s,t}-E [ Z_{s,t}|\mathcal{G} ] \bigr) ^{2}| \mathcal{G} \bigr] ^{2} \bigr] ^{1/2} \\ &= &E \bigl[ \bigl( Z_{s,t}-E [ Z_{s,t}|\mathcal{G} ] \bigr) ^{2} \bigr] = \bigl\Vert Z_{s,t}-E [ Z_{s,t}|\mathcal{G} ] \bigr\Vert _{L^{2} ( \Omega ) }^{2} \\ &= &\inf_{Y\in L^{2} ( \Omega,\mathcal{G},P ) } \Vert Z_{s,t}-Y\Vert _{L^{2} ( \Omega ) }^{2}. \end{eqnarray*} We can therefore find sequence of random variables $ ( Y_{n} ) _{n=1}^{\infty}\subset L^{2} ( \Omega,\mathcal {G},P ) $ such that \begin{equation} \Vert Z_{s,t}-Y_{n}\Vert _{L^{2} ( \Omega ) }^{2}=E \bigl[ ( Z_{s,t}-Y_{n} ) ^{2} \bigr] \downarrow \operatorname{Var} ( Z_{s,t}|\mathcal{G} ). \label {approx} \end{equation} Moreover, because $E [ Z_{s,t}|\mathcal{G} ] $ belongs to the closed subspace $H^{1} ( Z ) $, we can assume that $Y_{n}$ has the form \[ Y_{n}=\sum_{i=1}^{k_{n}}a_{i}^{n}Z_{t_{i}^{n},t_{i+1}^{n}} \] for some sequence of real numbers $ \{ a_{i}^{n}\dvtx i=1,\ldots,k_{n} \}$ and a collection of subintervals \[ \bigl\{ \bigl[ t_{i}^{n},t_{i+1}^{n} \bigr] \dvtx i=1,\ldots,k_{n} \bigr\}, \] which satisfy $ [ t_{i}^{n},t_{i+1}^{n} ] \subseteq [ 0,s ] \cup [ s,T]$ for every $n\in \mathbb{N} $. We now exhibit a lower bound for $\Vert Z_{s,t}-Y_{n}\Vert _{L^{2} ( \Omega ) }^{2}$ which is independent of $n$ [and hence from (\ref{approx}) will apply also to $\operatorname{Var} ( Z_{s,t}|\mathcal{G} ) $]. Let us note that the isometry between the $H^{1} ( Z ) $ and $\mathcal{\tilde{H}}$ yields \begin{equation} \Vert Z_{s,t}-Y_{n}\Vert _{L^{2} ( \Omega ) }^{2}=| \tilde {h}_{n}|_{\mathcal{\tilde{H}}}^{2}, \label{isom1} \end{equation} where \[ \tilde{h}_{n} ( \cdot ):=\sum_{i=1}^{k_{n}}a_{i}^{n}1_{ [ t_{i}^{n},t_{i+1}^{n} ] } ( \cdot ) +1_{ [ s,t ] } ( \cdot ) \in\mathcal{\tilde H}. \] The embedding $\mathcal{\tilde{H}\hookrightarrow}L^{q} ( [ 0,T ] ) $ then shows that \[ |\tilde{h}_{n}|_{\mathcal{\tilde{H}}}^{2}\geq c|\tilde {h}|_{L^{q} [ 0,T ] }^{2}\geq c ( t-s ) ^{2/q}. \] The result follows immediately from this together with (\ref{approx}) and (\ref{isom1}). \end{pf} Checking that $\mathcal{\tilde{H}}$ embeds continuously in a suitable $L^{q} ( [ 0,T ] ) $ space is something which is readily done for our three examples. This is what the next lemma shows. \begin{lemma} \label{CM embed}If $ ( Z_{t} ) _{t\in [ 0,T ] }$ is fBm with Hurst index $H\in ( 0,1/2 ) $ and $q\in [1,2)$ then $\mathcal{\tilde{H}} \hookrightarrow L^{q} ( [ 0,T ] ) $. If $ ( Z_{t} ) _{t\in [ 0,T ] }$ is the (centred) Ornstein--Uhlenbeck process or the Brownian bridge (returning to zero after time $T$) then $\mathcal{\tilde{H}}$ $\hookrightarrow L^{2} ( [ 0,T ] ) $. \end{lemma} \begin{pf} The proof for each of the three examples has the same structure. We first identify an isometry $K^{\ast}$ which maps $\mathcal{\tilde{H}}$ surjectively onto $L^{2} [ 0,T ] $. (The operator $K^{\ast}$ is of course different for the three examples.) We then prove that the inverse $ ( K^{\ast} ) ^{-1}$ is a bounded linear operator when viewed as a map from $L^{2} [ 0,T ] $ into $L^{q} [ 0,T ] $. For fBm this is shown via the Hardy--Littlewood lemma (see \cite{nualart}). For the OU process and the Brownian bridge, it follows from a direct calculation on the operator $K^{\ast}$. Equipped with this fact, we can deduce that \begin{eqnarray*} |\tilde{h}|_{L^{q} [ 0,T ] } &=&\bigl\vert \bigl( K^{\ast } \bigr) ^{-1}K^{\ast}\tilde{h}\bigr\vert _{L^{q} [ 0,T ] } \leq\bigl \vert \bigl( K^{\ast} \bigr) ^{-1}\bigr\vert _{L^{2}\rightarrow L^{q}}\bigl\vert K^{\ast}\tilde{h}\bigr\vert _{L^{2} [ 0,T ] } \\ & =&\bigl\vert \bigl( K^{\ast} \bigr) ^{-1} \bigr\vert _{L^{2}\rightarrow L^{q}}|\tilde{h}|_{\mathcal{\tilde{H}}}, \end{eqnarray*} which completes the proof. \end{pf} \begin{remark} We can verify the condition in the case of the fBb by more direct means. One representation of the fBb is of the form \begin{equation} X_{t}=B_{t}-a_{t}B_{T}\qquad\mbox{with } a_{t}=\frac{t^{2H}+T^{2H} -(T-t)^{2H}}{2T^{2H}}. \label{eq:anticip-rep-fbb} \end{equation} Then \begin{eqnarray*} \operatorname{Var} ( X_{s,t}|\mathcal{F}_{0,s}\vee \mathcal {F} _{t,T} ) &\geq&\operatorname{Var} ( X_{s,t}|\mathcal {F}_{0,s} \vee \mathcal{F}_{t,T}, B_{T} ) =\operatorname{Var} \bigl( B_{s,t}|\mathcal{F}_{0,s}^{B}\vee \mathcal{F}_{t,T}^{B} \bigr)\\ & \asymp& c r^{2H}. \end{eqnarray*} \end{remark} As an immediate corollary of the last two lemmas, we can conclude that the (centred) Ornstein--Uhlenbeck process and the Brownian bridge (returning to zero after time $T$) both satisfy Condition~\ref{nondeterm} with index of nondeterminism no greater than unity. In the case of fBm $(Z_{t}^{H} )_{t\in [ 0,T ] }$, the scaling properties of $Z^{H}$ enable us to say more about the nondeterminism index than can be obtained by an immediate application of Lemmas \ref{CM embed general} and \ref{CM embed}. To see this, note that for fixed $ [ s,t ] \subseteq [ 0,T ] $ we can introduce a new process \[ \tilde{Z}_{u}^{H}:= ( t-s ) ^{-H}Z_{u ( t-s ) }^{H}. \] $\tilde{Z}$ defines another fBm, this time on the interval $[0,T ( t-s ) ^{-1}]=:[0,\tilde{T}]$. Let $u=s ( t-s ) ^{-1} ,v=t ( t-s ) ^{-1}$ and denote by $\mathcal{\tilde {F}}_{a,b}$ the $\sigma$-algebra generated by the increments of $\tilde{Z}$ in $ [ a,b ] $. Scaling then allows us to deduce that \begin{equation} \operatorname{Var} ( Z_{s,t}|\mathcal{F}_{0,s}\vee \mathcal {F} _{t,T} ) = ( t-s ) ^{2H}\mathop{ \operatorname{Var}}(\tilde {Z} _{u,v}|\mathcal{ \tilde{F}}_{0,u}\vee\mathcal{\tilde{F}}_{v,\tilde{T}}). \label{scaling} \end{equation} By construction $u-v=1$. And since $\tilde{Z}$ is fBm it follows from Lemmas \ref{CM embed general} and~\ref{CM embed} that \begin{equation} \mathop{\inf_{[ u,v]\subseteq [0,\tilde{T}], }}_{|u-v|=1} \mathop{ \operatorname{Var}}(\tilde{Z}_{u,v}|\mathcal{\tilde{F}}_{0,u} \vee\mathcal{\tilde{F}}_{v,\tilde{T}})>0. \label{em} \end{equation} It follows from (\ref{scaling}) and (\ref{em}) that $Z^{H}$ satisfies Condition \ref{nondeterm} with index of nondeterminacy no greater than $2H$. \subsection{Nonnegativity of the conditional covariance} We finally verify that our example processes also satisfy Condition \ref{cond dom}. We first consider the special case of process with negatively correlated increments. \subsubsection{Negatively correlated increments} From our earlier discussion, it suffices to check that Condition \ref{diagonal dominance} holds. In other words, that $Q^{D}$ is diagonally dominant for every partition $D$. This amounts to showing that \[ E [ Z_{t_{i-1},t_{i}} Z_{0,T} ] \geq0 \] for every $0\leq t_{i-1}<t_{i}\leq T$. It is useful to have two general conditions on $R$ which will guarantee that (i) the increments of $Z$ are negatively correlated, and (ii) diagonal dominance is satisfied. Here is a simple characterisation of these properties: \textit{Negatively correlated increments}: If $i<j$, write \[ Q_{ij}=E [ Z_{t_{i-1},t_{i}} Z_{t_{j-1},t_{j}} ] =\int _{t_{i-1} }^{t_{i}}\int_{t_{j-1}}^{t_{j}} \partial_{ab}^{2}R(a,b) \,da \,db, \] so that a sufficient condition for $Q_{ij}<0$ is $\partial_{ab}^{2} R(a,b)\leq0$ for $a<b$. This is trivially verified for fBm with $H<1/2$. Note that the distributional derivative $\partial_{ab}^{2}R(a,b)$ might be singular on the diagonal, but the diagonal is avoided here. \textit{Diagonal dominance}: If we assume negatively correlated increments, then diagonal dominance is equivalent to $\sum_{j}Q_{ij}>0$. Moreover, if we assume $Z_{0}$ is deterministic and $Z$ is centred we get \[ \sum_{j}Q_{ij}=E [ Z_{t_{i-1},t_{i}} Z_{T} ] =\int_{t_{i-1} }^{t_{i}} \partial_{a}R(a,T) \,da, \] so that a sufficient condition for $\sum_{j}Q_{ij}\geq0$ is $\partial _{a}R(a,b)\geq0$ for $a<b$. This is again trivially verified for fBm with $H<1/2$. \begin{example} In the case where $ ( Z_{t} ) _{t\in [ 0,T ] }$ is the Brownian bridge, which returns to zero at time $T^{\prime}>T$ we have \[ R ( a,b ) =\frac{a}{T^{\prime}} \bigl( T^{\prime}-b \bigr) \qquad\mbox{for $a<b$.} \] It is then immediate that $\partial_{ab}^{2}R(a,b)=-1/T^{\prime}<0$ $ $and $\partial_{a}R(a,b)=1-b/T^{\prime}>0$. Similarly, for the centred Ornstein--Uhlenbeck process, we have \[ R ( a,b ) =2e^{-b}\sinh ( a ) \qquad\mbox{for $a<b$.} \] From which it follows that $\partial_{ab}^{2}R(a,b)=-2e^{-b}\cosh ( a ) <0$ and $\partial_{a}R(a,b)=2e^{-b}\cosh ( a ) >0$. In the more general case of the fractional Brownian bridge returning to zero at time $T^{\prime}>T$, the covariance function is given for $a<b$ by \[ R(a,b)=\frac{1}{2}R_H(a,b) -\frac {1}{2(T^{\prime})^{2H}}R_H \bigl(a,T'\bigr) R_H\bigl(b,T'\bigr), \] where we used the shorthand $R_H(a,b) = a^{2H}+b^{2H}-(b-a)^{2H}$. Thus, \begin{eqnarray*} \partial_{a}R(a,b)&=&H \bigl[ a^{2H-1}+(b-a)^{2H-1} \bigr]\\ &&{} -\frac{H} {(T^{\prime})^{2H}} \bigl[ a^{2H-1}+\bigl(T^{\prime}-a \bigr)^{2H-1} \bigr] R_H\bigl(b,T'\bigr). \end{eqnarray*} This is positive, since $R_H(b,T') \leq(T^{\prime})^{2H}$ and $(T^{\prime}-a)^{2H-1}\leq(b-a)^{2H-1}$ whenever $H<1/2$. The fact that $\partial_{ab}R(a,b)\leq0$ is also easily seen. \end{example} \subsubsection{Without negatively correlated increments} In the three examples, we were able to check Condition \ref{cond dom} by using the negative correlation of the increments and showing explicitly the diagonal dominance. In the case where the increments have positive or mixed correlation, we may have to check the weaker condition, Condition \ref{cond dom}, directly. An observation that might be useful in this regard is the following geometrical interpretation. Recall that we want to want to check that \[ \operatorname{Cov} ( Z_{s,t},Z_{u,v}| \mathcal{F}_{0,s}\vee \mathcal{F}_{t,T} ) \geq0. \] For simplicity, let $X=Z_{s,t}$, $Y=Z_{u,v}$ and $\mathcal{G=F}_{0,s} \vee\mathcal{F}_{t,T}$. The map $P_{\mathcal{G}}\dvtx Z\mapsto E [ Z|\mathcal{G} ] $ then defines a projection from the Hilbert space $L^{2} ( \Omega,\mathcal{F},P ) $ onto the closed subspace $L^{2} ( \Omega,\mathcal{G},P ) $, which gives the orthogonal decomposition \[ L^{2} ( \Omega,\mathcal{F},P ) =L^{2} ( \Omega, \mathcal{G} ,P ) \oplus L^{2} ( \Omega,\mathcal{G},P ) ^{\perp}. \] A simple calculation then yields \begin{eqnarray*} \operatorname{Cov} ( X,Y|\mathcal{G} )& =&E \bigl[ \operatorname{Cov} ( X,Y|\mathcal {G} ) \bigr] =E \bigl[ ( I-P_{\mathcal{G}} ) X ( I-P_{\mathcal {G}} ) Y \bigr] \\ &=& \bigl\langle P_{\mathcal{G}}^{\perp}X,P_{\mathcal{G}}^{\perp }Y \bigr\rangle_{L^{2} ( \Omega ) }, \end{eqnarray*} where $P_{\mathcal{G}}^{\perp}$ is the projection onto $L^{2} ( \Omega,\mathcal{G},P ) ^{\perp}$. In other words, $\operatorname{Cov} ( X,Y|\mathcal{G} ) \geq0$ if and only if $\cos\theta\geq0$, where $\theta$ is the angle between the projections $P_{\mathcal{G}}^{\perp}X$ and $P_{\mathcal{G}}^{\perp}Y$ of, respectively, $X$ and $Y$ onto the orthogonal complement of $L^{2} ( \Omega ,\mathcal{G},P ) $. \section{A Norris-type lemma} \label{Norris lemma section} In this section, we generalise a deterministic version of the Norris lemma, obtained in \cite{H3} for $p$ rough paths with $1<p<3$, to general $p>1$. It is interesting to note that the assumption on the driving noise we make is consistent with \cite{H3}. In particular, it still only depends on the roughness of the basic path and not the rough path lift. \subsection{Norris' lemma} To simplify the notation, we will assume that $T=1$ in this subsection; all the work will therefore be done on the interval $ [ 0,1 ] $. Our Norris-type lemma relies on the notion of controlled process, which we proceed to define now. Recall first the definition contained in \cite{Gu} for second-order rough paths: whenever $\mathbf{x}\in C^{0,\gamma }([0,1];G^{N} (\mathbb{R}^{d}))$ with $\gamma>1/3$, the space $\mathcal {Q}_{\mathbf{x} }(\mathbb{R})$ of controlled processes is the set of functions $y\in C^{\gamma}([0,1];\mathbb{R})$ such that the increment $y_{st}$ can be decomposed as \[ y_{st}=y_{s}^{i}x_{s,t}^{i}+r_{s,t}, \] where the remainder term $r$ satisfies $|r_{s,t}|\leq c_{y}|t-s|^{2\gamma}$ and where we have used the summation over repeated indices convention. Notice that $y$ has to be considered in fact as a vector $(y,y^{1},\ldots,y^{d})$. In order to generalise this notion to lower values of $\gamma$, we shall index our controlled processes by words based on the alphabet $\{1,\ldots,d\} $. To this end, we need the following additional notation. \begin{notation} Let $w=(i_{1},\ldots,i_{n})$ and $\bar w=(j_{1},\ldots,j_{m})$ be two words based on the alphabet $\{1,\ldots,d\}$. Then $|w|=n$ denotes the length of $w$, and $w\bar w$ stands for the concatenation $(i_{1},\ldots,i_{n}$, $j_{1},\ldots,j_{m})$ of $w$ and $\bar w$. For $L\ge1$, $\mathcal{W}_{L}$ denotes the set of words of length at most $L$. \end{notation} Let us now turn to the definition of controlled process based on a rough path. \begin{definition} \label{def:controlled-paths} Let $\mathbf{x}\in C^{0,\gamma}([0,1];G^{N} (\mathbb{R}^{d}))$, with $\gamma>0$, $N=[1/\gamma]$. A~controlled path based on $\mathbf{x}$ is a family $(y^{w})_{w\in\mathcal{W}_{N-1}}$ indexed by words of length at most $N-1$, such that for any word $w\in\mathcal {W}_{N-2}$ we have \begin{equation} \qquad y_{s,t}^{w}=\sum_{\bar{w}\in\mathcal{W}_{N-1-|w|}}y_{s}^{w\bar{w}} \mathbf{x}_{s,t}^{\bar{w}}+r_{s,t}^{w}\qquad \mbox{where } \bigl|r_{s,t}^{w}\bigr|\leq c_{y}|t-s|^{(N-|w|)\gamma}. \label{eq:dcp-controlled-path} \end{equation} In order to take the drift term of \eqref{Int-Eq} into account, we also assume that for $w=\varnothing$ we get a decomposition for the increment $y_{s,t}$ of the form \begin{equation}\qquad y_{s,t}=\sum_{\bar{w}\in\mathcal{W}_{N-1}}y_{s}^{\bar{w}} \mathbf {x} _{s,t}^{\bar{w}}+ y_{s}^{0} (t-s) + r_{st}^{y} \qquad\mbox{where }\bigl |r_{s,t}^{y}\bigr| \leq c_{y}|t-s|^{N\gamma}. \label{eq:dcp-increment-y} \end{equation} The set of controlled processes is denoted by $\mathcal{Q}_{\mathbf{x} }^{\gamma}$, and the norm on $\mathcal{Q}_{\mathbf{x}}^{\gamma}$ is given by \[ \Vert y\Vert_{\mathcal{Q}_{\mathbf{x}}^{\gamma}}= \bigl\Vert y^{0}\bigr\Vert _{\gamma} + \sum _{w\in\mathcal{W}_{N-1}}\bigl\Vert y^{w}\bigr\Vert_{\gamma}. \] \end{definition} We next recall the definition of $\theta$-H\"{o}lder-roughness introduced in \cite{H3}. \begin{definition}\label{def:rough} Let $\theta\in ( 0,1 ) $. A path $x\dvtx [ 0,T ] \rightarrow\mathbb{R}^{d}$ is called $\theta$-H\"{o}lder rough if there exists a constant $c>0$ such that for every $s$ in $ [ 0,T ] $, every $\varepsilon$ in $(0,T/2]$, and every $\phi$ in $\mathbb{R}^{d}$ with $\vert \phi\vert =1$, there exists $t$ in $ [ 0,T ] $ such that $\varepsilon/2<\vert t-s\vert <\varepsilon$ and \[ \bigl\vert \langle\phi,x_{s,t} \rangle\bigr\vert >c\varepsilon ^{\theta}. \] The largest such constant is called the modulus of $\theta$-H\"{o}lder roughness, and is denoted $L_{\theta} ( x ) $. \end{definition} A first rather straightforward consequence of this definition is that if a rough path $\mathbf{x}$ happens to be H\"{o}lder rough, then the \emph{derivative processes} $y^{w}$ in the decomposition \eqref{eq:dcp-controlled-path} of a controlled path $y$ is uniquely determined by $y$. This can be made quantitative in the following way. \begin{proposition} \label{prop:Nlemma1} Let $\mathbf{x}\in C^{0,\gamma}([0,1];G^{N} (\mathbb{R}^{d}))$, with $\gamma>0$ and $N=[1/\gamma]$. We also assume that $x$ is $\theta$-H\"{o}lder rough for some $\theta<2\gamma$. Let $y$ be a real-valued controlled path defined as in Definition~\ref{def:controlled-paths}, and set $\mathcal {Y}_{n}(y)=\sup_{|w|=n}\Vert y^{w}\Vert_{\infty}$ for $n\le N-1$. Then there exists a constant $M$ depending only on $d $ such that the bound \begin{equation} \mathcal{Y}_{n}(y)\leq \frac{M (\|y\|_{\mathcal{Q}_{\mathbf{x}}^{\gamma}} \mathcal{N}_{\mathbf{x}} )^{{\theta}/{(2\gamma)}} }{L_{\theta}(x)} \mathcal{Y}_{n-1}^{1-{\theta}/{(2\gamma)}}(y) \label{eq:bnd-Yny} \end{equation} holds for every controlled rough path $\mathcal{Q}_{\mathbf {x}}^{\gamma }$ and every $1\le n \le N-1$. \end{proposition} \begin{pf} For sake of clarity, we shall assume that $y^{0}=0$, leaving to the patient reader the straightforward adaptation to a nonvanishing drift coefficient. Now start from the decomposition \eqref{eq:dcp-controlled-path} and recast it as \[ y_{s,t}^{w}=\sum_{j=1}^{d}y_{s}^{wj} \mathbf{x}_{s,t}^{(j)}+\sum_{2\leq |\bar {w}|\leq N-1-|w|}y_{s}^{w\bar{w}} \mathbf{x}_{s,t}^{\bar{w}}+r_{s,t}^{w}, \] where we have set $wj$ for the concatenation of the word $w$ and the word $(j)$ for notational sake. This identity easily yields \begin{eqnarray}\label{eq:dcp-controlled-path2} \sup_{|t-s|\leq\varepsilon}\Biggl\vert \sum_{j=1}^{d}y_{s}^{wj} \mathbf{x} _{s,t}^{(j)}\Biggr\vert &\leq&2\bigl\Vert y^{w}\bigr\Vert_{\infty}\nonumber\\ &&{}+\sum_{2\leq |\bar {w}|\leq N-1-|w|}\bigl\Vert y^{w\bar{w}}\bigr\Vert_{\infty}\bigl\Vert\mathbf {x}^{\bar {w} } \bigr\Vert_{\gamma|\bar{w}|} \varepsilon^{|\bar{w}| \gamma}\\ &&\hspace*{76pt}{}+\bigl\Vert r^{w} \bigr\Vert_{\gamma(N-|w|)} \varepsilon^{(N-|w|) \gamma}. \nonumber \end{eqnarray} Since $x$ is $\theta$-H\"{o}lder rough by assumption, there exists $v$, which is independent of j, with $\varepsilon/2\leq |v-s|\leq\varepsilon$ such that \begin{equation} \Biggl\vert \sum_{j=1}^{d}y_{s}^{wj} \mathbf{x}_{s,v}^{(j)}\Biggr\vert >L_{\theta}(x) \varepsilon^{\theta}\bigl| \bigl( y_{s}^{w1}, \ldots,y_{s} ^{wd} \bigr) \bigr|. \label{eqn:low-bnd-ywj} \end{equation} Combining both \eqref{eq:dcp-controlled-path2} and \eqref {eqn:low-bnd-ywj} for all words $w$ of length $n-1$, we thus obtain that \begin{eqnarray*} \mathcal{Y}_{n}(y)&\leq&\frac{c}{L_{\theta}(x)} \biggl[ \mathcal{Y} _{n-1}(y) \varepsilon^{-\theta} \\ &&\hspace*{35pt}{} +\sup_{|w|=n-1} \biggl(\sum_{2\leq|\bar{w}|\leq N-1-|w|} \bigl\Vert y^{w\bar {w} }\bigr\Vert_{\infty}\bigl\Vert\mathbf{x}^{\bar{w}} \bigr\Vert_{\gamma|\bar{w} |} \varepsilon^{|\bar{w}| \gamma-\theta}\\ &&\hspace*{151pt}{}+\bigl\Vert r^{w} \bigr\Vert_{\gamma (N-|w|)} \varepsilon^{(N-|w|) \gamma-\theta} \biggr) \biggr]. \end{eqnarray*} Let us further simplify this relation by recalling that we take supremums over words $w$ such that $|w|=n-1\le N-2$, so that $N-|w|\ge 2$, and we also consider words $\bar{w}$ whose length is at least $2$. This yields \[ \mathcal{Y}_{n}(y)\leq\frac{c}{L_{\theta}(x)} \bigl( \mathcal{Y}_{n-1}(y) \varepsilon^{-\theta} + \|y\|_{\mathcal{Q}_{\mathbf{x}}^{\gamma}} \mathcal{N}_{\mathbf{x},\gamma} \varepsilon^{2\gamma-\theta} \bigr). \] One can optimise the right-hand side of the previous inequality over $\varepsilon$, by choosing $\varepsilon$ such that the term $\mathcal{Y} _{n-1}(y) \varepsilon^{-\theta}$ is of the same order as $\mathcal {N}_{\mathbf{x},\gamma} \varepsilon^{2\gamma-\theta}$. One then verifies that our claim \eqref{eq:bnd-Yny} follows from this elementary computation. \end{pf} \begin{remark} Definition \ref{def:controlled-paths} and Proposition \ref {prop:Nlemma1} can be generalised to $d$-dimensional controlled processes. In particular, if $y$ is a $d$-dimensional path, the decomposition \eqref{eq:dcp-increment-y} becomes \begin{equation} y_{s,t}^{i}=\sum_{\bar{w}\in\mathcal{W}_{N-1}}y_{s}^{i,\bar {w}} \mathbf {x} _{s,t}^{\bar{w}}+r_{s,t}^{i,y}\qquad \mbox{where }\bigl |r_{s,t}^{i,y}\bigr|\leq c_{y}|t-s|^{N\gamma} \label{eq:dcp-increment-y-d-dim} \end{equation} for all $i=1,\ldots,d$. \end{remark} We now show how the integration of controlled processes fits into the general rough paths theory. For this, we will use the nonhomogeneous norm $\mathcal{N}_{\mathbf{x},\gamma}=\mathcal{N}_{\mathbf{x},\gamma , [ 0,1 ] }$ introduced in (\ref{inhomogeneous}). \begin{proposition} \label{prop:integral-ctrld-path} Let $y$ be a $d$-dimensional controlled process, given as in Definition \ref{def:controlled-paths} and whose increments can be written as in \eqref{eq:dcp-increment-y-d-dim}. Then $(\mathbf{x},\mathbf{y})$ is a geometrical rough path in $G^{N}(\mathbb {R} ^{2d})$. In particular, for $(s,t)\in\Delta^{2}$, the integral $I_{st} \equiv\int_{s}^{t}y_{s}^{i} \,dx_{s}^{i}$ is well defined and admits the decomposition \begin{equation} I_{s,t}=\sum_{j=1}^{d} \biggl( y_{s}^{j}x_{s,t}^{j}+\sum _{\bar{w}\in \mathcal{W}_{N-1}}y_{s}^{\bar{w}}\mathbf{x}_{s,t}^{\bar{w}j} \biggr) +r_{s,t} ^{I}, \label{eq:dcp-Ist} \end{equation} where $|r_{s,t}^{I}|\leq\mathcal{N}_{\mathbf{x}}\Vert\mathbf {y}\Vert _{\gamma }|t-s|^{(N+1)\gamma}$. \end{proposition} \begin{pf} Approximate $x$ and $y$ by smooth functions $x^{m},y^{m}$, while preserving the controlled process structure (namely $y^{m}\in\mathcal {Q}_{\mathbf {x}^{m} }$). Then one can easily check that $(x^{m},y^{m})$ admits a signature, and that $I_{s,t}^{m}\equiv\int_{s}^{t}y_{s}^{m,i} \,dx_{s}^{m,i}$ can be decomposed as \eqref{eq:dcp-Ist}. Limits can then be taken thanks to \cite{Gu10}, which completes the proof. \end{pf} The following theorem is a version of Norris' lemma, and constitutes the main result of this section. \begin{theorem} \label{thm:NlemmaRP} Let $\mathbf{x}$ be a geometric rough path of order $N\geq1$ based on the $\mathbb{R}^{d}$-valued function $x$. We also assume that $x$ is a $\theta$-H\"{o}lder rough path with $2\gamma>\theta$. Let $y $ be a $\mathbb{R}^{d}$-valued controlled path of the form given in Definition \ref{def:controlled-paths}, $b\in C^{\gamma}([0,1])$, and set \[ z_{t}=\sum_{i=1}^{d}\int _{0}^{t}y_{s}^{i} \,dx_{s}^{i}+\int_{0}^{t} b_{s} \,ds=I_{st}+\int_{0}^{t}b_{s} \,ds. \] Then there exist constants $r>0$ and $q>0$ such that, setting \begin{equation} \mathcal{R}=1+{L_{\theta}(x)}^{-1}+\mathcal{N}_{\mathbf{x},\gamma} +\Vert\mathbf{y}\Vert_{\mathcal{Q}_{\mathbf{x}}^{\gamma}}+\Vert b\Vert _{C^{\gamma}}, \label{eq:def-R} \end{equation} one has the bound \[ \Vert y\Vert_{\infty}+\Vert b\Vert_{\infty}\leq M\mathcal{R}^{q} \Vert z\Vert_{\infty}^{r} \] for a constant $M$ depending only on $T$, $d$ and $y$. \end{theorem} \begin{pf} We shall divide this proof in several steps. In the following computations, the symbol $\kappa$ will stand for an exponent for $\mathcal{R}$ and $M$ will stand for an arbitrary multiplicative constant. The exact values of these two constants are irrelevant and can change from line to line without warning. \textit{Step \textup{1:} Bounds on $y$}. Combining \eqref{eq:dcp-Ist}, the bound on $r^{I}$ given in Proposition~\ref{prop:integral-ctrld-path} and the definition of $\mathcal{R} $, we easily get the relation \begin{equation} \label{e:boundz} \|z\|_{\infty} \le M \mathcal{R}^{\kappa}. \end{equation} We now resort to relation \eqref{eq:bnd-Yny} applied to the controlled path $z$ and for $n=1$, which means that $\mathcal{Y}_{n}(z)\asymp\|y\| _{\infty}$ and $\mathcal{Y}_{n-1}(z)\asymp\|z\|_{\infty}$. With the definition of $\mathcal{R}$ in mind, this yields the bound \begin{equation} \label{eq:bnd-norris1}\|y\|_{\infty} \le M \|z\|_{\infty}^{1-{\theta }/{(2\gamma)}} \mathcal{R}^{\kappa}, \end{equation} which corresponds to our claim for $y$. Along the same lines and thanks to relation \eqref{eq:bnd-Yny} for $n>1$, we iteratively get the bounds \begin{equation} \label{eq:bnd-Yny-z}\mathcal{Y}_{n}(y) \le M \|z\|_{\infty }^{(1-{\theta}/{(2\gamma)})^{n}} \mathcal{R}^{\kappa}, \end{equation} which will be useful in order to complete the bound we have announced for~$b$. \textit{Step \textup{2:} Bounds on $r^{I}$ and $I$.} In order to get an appropriate bound on $r$, it is convenient to consider $\mathbf{x}$ as a rough path with H\"older regularity $\beta<\gamma$, still satisfying the inequality $2\beta>\theta$. Notice furthermore that $\mathcal{N} _{\mathbf{x},\beta}\le\mathcal{N}_{\mathbf{x},\gamma}$. Consider now $w\in\mathcal{W}_{n}$. According to \eqref{eq:bnd-Yny-z}, we have \[ \bigl\|y^{w}\bigr\|_{\infty} \le M \|z\|_{\infty}^{(1-{\theta}/{(2\gamma) })^{n} } \mathcal{R}^{\kappa}, \] while $\|y^{w}\|_{\gamma}\le M \mathcal{R}$ by definition. Hence, invoking the inequality \[ \bigl\|y^{w}\bigr\|_{\beta} \le2 \bigl\|y^{w} \bigr\|^{{\beta}/{\gamma}}_{\gamma} \bigl\| y^{w}\bigr\|^{1 - {\beta}/{\gamma}}_{\infty} , \] which follows immediately from the definition of the H\"older norm, we obtain the bound \[ \bigl\|y^{w}\bigr\|_{\beta} \le M \|z\|_{\infty}^{(1-{\theta}/{(2\gamma)} )^{n}(1-{\beta}/{\gamma})} \mathcal{R}^{\kappa}, \] which is valid for all $w\in\mathcal{W}_{n}$ and all $n\le N-1$. Summing up, we end up with the relation \[ \|\mathbf{y}\|_{\beta} \le M \|z\|_{\infty}^{(1-{\theta }/{(2\gamma) })^{N-1}(1-{\beta}/{\gamma})} \mathcal{R}^{\kappa}. \] Now according to Proposition \ref{prop:integral-ctrld-path}, we get $r_{s,t}^{I}\leq\mathcal{N}_{\mathbf{x},\beta}\Vert\mathbf {y}\Vert _{\beta }|t-s|^{(N+1)\beta}$ and the above estimate yields \[ \bigl\Vert r^{I}\bigr\Vert_{(N+1)\beta}\leq M \Vert z\Vert_{\infty}^{(1-{\theta }/{(2\gamma)})^{N-1}(1-{\beta}/{\gamma})} \mathcal{R}^{\kappa}. \] Plugging this estimate into the decomposition \eqref{eq:dcp-Ist} of $I_{st}$ we end up with \begin{equation} \Vert I\Vert_{\infty}\leq M \Vert z\Vert_{\infty}^{(1-{\theta }/{(2\gamma) })^{N-1}(1-{\beta}/{\gamma})} \mathcal{R}^{\kappa}. \label {eq:bnd-I-infty} \end{equation} \textit{Step \textup{3:} Bound on $b$.} Combining the bound \eqref{eq:bnd-I-infty} with \eqref{e:boundz} and the fact that the exponent of $\|z\|_\infty$ appearing in \eqref{eq:bnd-I-infty} is less than $1$, we have \[ \biggl\|\int_{0}^{\cdot}b_{s} \,ds \biggr\|_{\infty}\leq M \Vert z\Vert _{\infty }^{(1-{\theta}/{(2\gamma)})^{N-1}(1-{\beta}/{\gamma })} \mathcal{R} ^{\kappa}. \] Once again, we use an interpolation inequality to strengthen this bound. Indeed, we have (see \cite{HM11}, Lemma 6.14, for further details) \[ \Vert\partial_{t}f\Vert_{\infty}\leq M\Vert f\Vert_{\infty} \max \biggl({\frac{1}{T}},\Vert f\Vert_{\infty}^{-{{1}/{(\gamma+1)}}}\Vert \partial_{t}f\Vert_{\gamma}^{{1}/{(\gamma+1)}} \biggr), \] and applying this inequality to $f_{t}=\int_{0}^{t}b_{s} \,ds$, it follows that \begin{equation} \Vert b\Vert_{\infty}\leq M \Vert z\Vert_{\infty}^{(1-{\theta }/{(2\gamma) })^{N-1}(1-{\beta}/{\gamma})({\gamma}/{(\gamma +1)})} \mathcal {R}^{\kappa }. \label{eq:bnd-norris2} \end{equation} Gathering the bounds \eqref{eq:bnd-norris1} and \eqref{eq:bnd-norris2}, our proof is now complete. \end{pf} \begin{remark} One might be motivated to consider situations in which the drift and the noise have different natural parameterisations (see, e.g., the recent work \cite{FS}). More precisely suppose $\mathbf{X}$ is a Gaussian rough path in $WG\Omega_{p} ( \mathbb{R} ^{d} ) $ (with general $p$-variation regularity) and let $Y$ be the solution to \[ dY_{t}=V ( Y_{t} ) \,d\mathbf{X} + V_{0} ( Y_{t} ) \,dt, \qquad Y ( 0 ) =y_{0}. \] Then, as we have already observed in Remark \ref{remark f1}, we can use the parameterisation $\tau\dvtx [ 0,T ] \rightarrow [ 0,T ]$, the inverse of $\sigma$ in (\ref{parametrisation}), to force $\tilde {X} _{t}:=X_{\tau ( t ) }$ to have a H\"{o}lder-controlled covariance function. This leads us to consider the solution to \begin{equation} d\tilde{Y}_{t}=V ( \tilde{Y}_{t} ) \,d\mathbf{\tilde{X}} + V_{0} ( \tilde{Y}_{t} ) \,d\tau(t), \qquad\tilde{Y} ( 0 ) =y_{0},\label{repar} \end{equation} whereupon $\tilde{Y}_{t}=Y_{\tau ( t ) }$. In particular, for proving smoothness of the density of $Y_{T}(=\tilde{Y}_{T})$, one needs never to consider any parameterisation in which the noise is not of H\"{o}lder-type regularity. This is a useful remark because Condition \ref{nondeterm} explicitly involves the H\"{o}lder-parameterisation. To deal with the situation presented by (\ref{repar}), one should adapt the previous theorem to accommodate RDEs of the form \[ z_{t}=\sum_{i=1}^{d}\int _{0}^{t}y_{s}^{i} \,dx_{s}^{i}+\int_{0}^{t}b_{s} \,d\tau ( s ). \] \end{remark} \subsection{Small-ball estimates for \texorpdfstring{${L}_{\theta}(X)$}{L_{theta}(X)}} We now take $X$ to be a Gaussian process satisfying Condition \ref {nondeterm} . As the reader might have noticed, equation \eqref{eq:def-R} above involves the random variable $L_{\theta} ( X ) ^{-1}$, for which we will need some tail estimates. The nondeterminism condition naturally gives rise to such estimates as the following lemma makes clear. \begin{lemma} \label{sbp}Suppose $ ( X_{t} ) _{t\in [ 0,T ] }$ is a zero-mean, $ \mathbb{R} ^{d}$-valued, continuous Gaussian process with i.i.d. components, with each component having a continuous covariance function $R$. Suppose that one (and hence every) component of $X$ satisfies Condition \ref{nondeterm}. Let $\alpha_{0}>0$ be the index of nondeterminism for $X$ and suppose $\alpha \geq\alpha_{0}$. Then there exist positive and finite constants $C_{1}$ and $C_{2}$ such that for any interval $I_{\delta}\subseteq [ 0,T ] $ of length $\delta$ and $0<x<1$ we have \begin{equation} P \Bigl( \inf_{\vert \phi\vert =1}\sup_{s,t\in I_{\delta} }\bigl\vert \langle\phi,X_{s,t} \rangle\bigr\vert \leq x \Bigr) \leq C_{1}\exp \bigl( -C_{2}\delta x^{-2/\alpha} \bigr). \label {sbpbound} \end{equation} \end{lemma} \begin{pf} The proof is similar to Theorem 2.1 of Monrad and Rootzen \cite {monrad}; we need to adapt it because our nondeterminism condition is different. We start by introducing two simplifications. First, for any $\phi$ in $ \mathbb{R} ^{d}$ with $\vert \phi\vert =1$, we have \begin{equation} \bigl( \langle\phi,X_{t} \rangle \bigr) _{t\in [ 0,T ] }\stackrel{D} {=} \bigl( X_{t}^{1} \bigr) _{t\in [ 0,T ] }, \label{distr} \end{equation} which implies that \begin{equation} P \Bigl( \sup_{s,t\in I_{\delta}}\bigl\vert \langle\phi ,X_{s,t} \rangle\bigr\vert \leq x \Bigr) =P \Bigl( \sup _{s,t\in I_{\delta} }\bigl\vert X_{s,t}^{1}\bigr\vert \leq x \Bigr). \label{cmp} \end{equation} We will prove that the this probability is bounded above by \[ \exp \bigl( -c\delta x^{2/\alpha} \bigr) \] for a positive real constant $c$, which will not depend on $T$, $\delta $ or $x$. The inequality~(\ref{sbpbound}) will then follow by a well-known compactness argument (see \cite{H3} and~\cite{N}). The second simplification is to assume that $\delta=1$. We can justify this by working with the scaled process \[ \tilde{X}_{t}=\delta^{\alpha/2}X_{t/\delta}, \] which is still a Gaussian process only now defined on the interval $[0,\tilde{T}]:= [ 0,T\delta ]$. Furthermore, the scaled process also satisfies Condition \ref{nondeterm} since \begin{eqnarray*} \operatorname{Var} ( \tilde{X}_{s,t}|\mathcal{\tilde{F}}_{0,s}\vee\mathcal {\tilde {F}} _{t,\tilde{T}} ) &=&\delta^{\alpha}\operatorname{Var} ( X_{s/\delta ,t/\delta}| \mathcal{F}_{0,s/\delta}\vee \mathcal{F}_{t/\delta,T} ) \\ &\geq& c\delta^{\alpha} \biggl( \frac{t-s}{\delta} \biggr) ^{\alpha} =c ( t-s ) ^{\alpha}. \end{eqnarray*} Thus, if we can prove the result for intervals of length $1$, we can deduce the bound on (\ref{cmp}) we want from the identity \[ P \Bigl( \sup_{s,t\in I_{\delta}}\bigl\vert X_{s,t}^{1} \bigr\vert \leq x \Bigr) =P \biggl( \sup_{s,t\in I_{1}} \bigl| \tilde{X}_{s,t}^{1} \bigr| \leq\frac{x}{\delta^{\alpha/2}} \biggr). \] To complete the proof, we begin by defining the natural number $n:=\lfloor x^{-2/\alpha}\rfloor\geq1$ and the dissection $D ( I ) = \{ t_{i}\dvtx i=0,1,\ldots,n+1 \} $ of $I=I_{1}$, given by \begin{eqnarray*} t_{i} &=&\inf I+ix^{2/\alpha},\qquad i=0,1,\ldots,n, \\ t_{n+1} &=&\inf I+1=\sup I. \end{eqnarray*} Then it is trivial to see that \begin{equation} P \Bigl( \sup_{s,t\in I}\bigl\vert X_{s,t}^{1} \bigr\vert \leq x \Bigr) \leq P \Bigl( \max_{i=1,2,\ldots,n} |X_{t_{i-1},t_{i}}^{1} |\leq x \Bigr). \label{discrete small ball} \end{equation} To estimate (\ref{discrete small ball}), we successively condition on the components of \[ \bigl(X_{t_{0},t_{1}}^{1},\ldots,X_{t_{n-1},t_{n}}^{1} \bigr). \] More precisely, the distribution of $X_{t_{n-1},t_{n}}^{1}$ conditional on $(X_{t_{0},t_{1}}^{1},\ldots,\break X_{t_{n-2},t_{n-1}}^{1})$ is Gaussian with a variance $\sigma^{2}$. Condition \ref{nondeterm} ensures that $\sigma ^{2}$ is bounded below by~$cx^{2}$. When $Z$ is a Gaussian random variable with fixed variance, $P ( \vert Z\vert \leq x ) $ will be maximised when the mean is zero. We therefore obtain the following upper bound: \[ P \Bigl( \sup_{s,t\in I}\bigl\vert X_{s,t}^{1} \bigr\vert \leq x \Bigr) \leq \biggl(\int_{-x/\sigma}^{x/\sigma} \frac{1}{\sqrt{2\pi}}\exp \biggl( -\frac{1}{2}y^{2} \biggr) \,dy \biggr)^{n}. \] Using $x/\sigma\leq\sqrt{c}$, we can finally deduce that \[ P \Bigl( \sup_{s,t\in I}\bigl\vert X_{s,t}^{1} \bigr\vert \leq x \Bigr) \leq\exp ( -Cn ) \leq\exp \biggl( -\frac{Cx^{-2/\alpha }}{2} \biggr), \] where $C:=\log [ 2\Phi ( \sqrt{c} ) -1 ] ^{-1}\in ( 0,\infty ) $. \end{pf} \begin{remark} As well as \cite{monrad}, these small-ball estimates should be compared to the estimates obtained by Li and Linde in \cite{Li} and Molchan \cite{M} in the case of fractional Brownian motion. \end{remark} \begin{corollary} \label{l theta integrability}Suppose $ ( X_{t} ) _{t\in [ 0,T ] }$ is a zero-mean, $ \mathbb{R} ^{d}$-valued, continuous Gaussian process with i.i.d. components satisfying the conditions of Lemma~\ref{sbp}. Then for every $\theta>\alpha/2$, the path $ ( X_{t} ) _{t\in [ 0,T ] }$ is almost surely $\theta $-H\"{o}lder rough. Furthermore, for $0<x<1$, there exist positive finite constants $C_{1}$ and $C_{2}$ such that the modulus of $\theta$-H\"{o}lder roughness, $L_{\theta} ( X ) $, satisfies \[ P \bigl( L_{\theta} ( X ) <x \bigr) \leq C_{1}\exp \bigl( -C_{2}x^{-2/\alpha} \bigr). \] In particular, under these assumptions we have that $L_{\theta} ( X ) ^{-1}$ is in $\bigcap_{p>0}L^{p} ( \Omega ) $. \end{corollary} \begin{pf} The argument of \cite{H3} applies in exactly the same way to show that $L_{\theta} ( X ) $ is bounded below by \[ \frac{1}{2\cdot8^{\theta}}D_{\theta} ( X ), \] where \[ D_{\theta} ( X ):=\inf_{\Vert \phi\Vert =1}\inf_{n\geq1} \inf_{k\leq2^{n}}\sup_{s,t\in I_{k,n}}\frac{\vert \langle \phi,X_{s,t} \rangle\vert }{ ( 2^{-n}T ) ^{\theta}} \] and $I_{k,n}:= [ ( k-1 ) 2^{-n}T,k2^{-n}T ] $. We can deduce that for any $x\in ( 0,1 ) $ \[ P \bigl( D_{\theta} ( X ) <x \bigr) \leq\sum_{n=1}^{\infty} \sum_{k=1}^{2^{n}}P \biggl(\inf _{\Vert \phi\Vert =1}\sup_{s,t\in I_{k,n}}\frac{\vert \langle\phi,X_{s,t} \rangle \vert }{ ( 2^{-n}T ) ^{\theta}}<x \biggr), \] whereupon we can apply Lemma \ref{sbp} to yield \[ P \bigl( D_{\theta} ( X ) <x \bigr) \leq c_{1}\sum _{n=1}^{\infty }2^{n}\exp \bigl( -c_{2}2^{-n ( 1-2\theta/\alpha ) }T^{-2\theta /\alpha}x^{-2/\alpha} \bigr). \] By exploiting the fact that $\theta>\alpha/2$, we can then find positive constants $c_{3}$ and $c_{4}$ such that \begin{eqnarray*} P \bigl( D_{\theta} ( X ) <x \bigr) &\leq &c_{3}\sum _{n=1}^{\infty }\exp \bigl( -c_{4}nx^{-2/\alpha} \bigr) =c_{3}\frac{\exp ( -c_{4}x^{-2/\alpha} ) }{1-\exp ( -c_{4}x^{-2/\alpha} ) } \\ &\leq& c_{5}\exp \bigl( -c_{4}x^{-2/\alpha} \bigr), \end{eqnarray*} which completes the proof. \end{pf} \section{An interpolation inequality} \label{interpol} Under the standing assumptions on the Gaussian process, the \textit{Malliavin covariance matrix} of the random variable $U_{t\leftarrow0}^{\mathbf {X} } ( y_{0} ) \equiv Y_{t}$ can be represented as a 2D Young integral (see \cite{CFV}) \begin{equation} C_{t}=\sum_{i=1}^{d}\int _{ [ 0,t ] ^{2}}J_{t\leftarrow s}^{\mathbf{X}} ( y_{0} ) V_{i} ( Y_{s} ) \otimes J_{t\leftarrow s^{\prime}}^{\mathbf{X}} ( y_{0} ) V_{i} ( Y_{s^{\prime}} ) \,dR \bigl( s,s^{\prime} \bigr). \label{eq:def-malliavin-matrix} \end{equation} In practice, showing the smoothness of the density boils down to getting integrability estimates on the inverse of $\inf_{\Vert v\Vert =1}v^{T}C_{T}v$, the smallest eigenvalue of~$C_{T}$. For this reason, we will be interested in \[ v^{T}C_{T}v=\sum_{i=1}^{d} \int_{ [ 0,T ] ^{2}} \bigl\langle v,J_{t\leftarrow s}^{\mathbf{X}} ( y_{0} ) V_{i} ( Y_{s} ) \bigr\rangle \bigl \langle v,J_{t\leftarrow s^{\prime} }^{\mathbf{X}} ( y_{0} ) V_{i} ( Y_{s^{\prime}} ) \bigr\rangle \,dR \bigl( s,s^{\prime} \bigr). \] We will return to study the properties of $C_{T}$ more extensively in Section~\ref{section proof main theorem}. For the moment, we look to generalise this perspective somewhat. Suppose $f\dvtx [ 0,T ] \rightarrow \mathbb{R} $ is some (deterministic) real-valued H\"{o}lder-continuous function, where $\gamma$ is Young-complementary to $\rho$, 2D-variation regularity of $R$. Our aim in this section is elaborate on the nondegeneracy of the 2D Young integral \begin{equation} \int_{ [ 0,T ] }f_{s}f_{t}\,dR ( s,t ). \label {1st2D} \end{equation} More precisely, what we want is to use Conditions \ref{nondeterm} and \ref{cond dom} to give a quantitative version of the nondegeneracy statement \begin{equation} \int_{ [ 0,T ] }f_{s}f_{t}\,dR ( s,t ) =0 \quad\Rightarrow \quad f\equiv0. \label{cfv} \end{equation} To give an idea of the type of estimate we might aim for, consider the case where $R\equiv R^{\mathrm{BM}}$ is the covariance function of Brownian motion. The 2D Young integral~(\ref{1st2D}) then collapses to the square of the $L^{2}$-norm of $f$: \begin{equation} \biggl\vert \int_{ [ 0,T ] }f_{s}f_{t}\,dR^{\mathrm{BM}} ( s,t ) \biggr\vert =\vert f\vert _{L^{2} [ 0,T ] }^{2}, \label{bmcov} \end{equation} and the interpolation inequality (Lemma A3 of \cite{HP}) gives \begin{equation} \Vert f\Vert _{\infty; [ 0,T ] }\leq2\max \bigl( T^{-1/2}\vert f \vert _{L^{2} [ 0,T ] },\vert f\vert _{L^{2} [ 0,T ] }^{2\gamma/ ( 2\gamma +1 ) }\Vert f\Vert_{\gamma\mbox{\fontsize{8.36pt}{10pt}\selectfont{-H\"{o}l}}; [0,T ]}^{1/( 2\gamma+1 )}\bigr). \label{itl} \end{equation} Therefore, in the setting of Brownian motion at least, (\ref{itl}) and (\ref{bmcov}) quantifies~(\ref{cfv}). The problem is that the proof of (\ref{itl}) relies heavily properties of the $L^{2}$-norm, in particular, we use the fact that \[ \mbox{if }f ( u ) \geq c>0\mbox{ for all }u\in [ s,t ] \mbox{ then }\vert f \vert _{L^{2} [ s,t ] }\geq c ( t-s ) ^{1/2}. \] We cannot expect for this to naively generalise to inner products resulting from other covariance functions. We therefore have to re-examine the proof of the inequality (\ref{itl}) with this generalisation in mind. It is easier to first consider a discrete version of the problem. Suppose $D$ is some (finite) partition of $ [ 0,T ] $. Then the Riemann sum approximation to (\ref{1st2D}) along $D$ can be written as \[ f ( D ) ^{T}Qf ( D ), \] where $Q$ is the matrix (\ref{increment matrix}) and $f ( D ) $ the vector with entries given by the values of $f$ at the points in the partition. The next sequence of results is aimed at addressing the following question. \begin{problem} Suppose $\vert f\vert _{\infty; [ s,t ] }\geq 1$ for some interval $ [ s,t ] \subseteq [ 0,T ] $. Can we find a positive lower bound $f ( D ) ^{T}Qf ( D ) $ which holds uniformly over some sequence of partitions whose mesh tends to zero? \end{problem} To take a first step toward securing an answer, let $D= \{ t_{i}\dvtx i=0,1,\ldots,n \} $ and define \[ Z:= ( Z_{1},\ldots,Z_{n} ):= ( X_{t_{0},t_{1}},\ldots,X_{t_{n-1} ,t_{n}} ) \sim N ( 0,Q ). \] Suppose that $Q$ has the block decomposition \[ Q=\pmatrix{ Q_{11} & Q_{12} \vspace*{2pt}\cr Q_{12}^{T} & Q_{22}}\qquad\mbox{with } Q_{11}\in\mathbb{R}^{k,k},Q_{12} \in \mathbb{R}^{k,n-k},Q_{22}\in\mathbb{R}^{n-k,n-k}. \] In other words, $Q_{11}$ is the covariance matrix of $ ( Z_{1} ,\ldots,Z_{k} ) $ and $Q_{22}$ is the covariance matrix of $ ( Z_{k+1},\ldots,Z_{n} ) $. We are interested in finding the infimum of the quadratic form $x^{T}Qx$ over the subset \[ \bigl\{ ( x_{1},\ldots,x_{n} ) \in \mathbb{R} ^{n}\dvtx x_{j}\geq b,\forall j=k+1,\ldots,n \bigr\}, \] where $b>0$. To simplify the problem, we recall that the description of the condition distribution \[ ( Z_{k+1},\ldots,Z_{n} ) |\sigma ( Z_{1},\ldots,Z_{k} ) \sim N ( \bar{\mu},\bar{Q} ), \] where the mean and covariance are given by \[ \bar{\mu}=Q_{12}^{T}Q_{11}^{-1} ( Z_{1},\ldots,Z_{k} ) ^{T},\qquad \bar{Q}=Q_{22}-Q_{12}^{T}Q_{11}^{-1}Q_{12}. \] $\bar{Q}$ is the so-called \textit{Schur complement} of $Q_{11}$ in $Q$. It follows that $x_{1}Z_{1}+x_{2}Z_{2}+\cdots+x_{n}Z_{n}| ( Z_{k+1} ,\ldots,Z_{n} ) \sim N ( \sum_{i=1}^{k}x_{i}Z_{i}+\sum_{i=k+1} ^{n}x_{i}\bar{\mu}_{i}$, $\sum_{i,j=1}^{k}x_{i}\bar {Q}_{i,j}x_{j} )$, and hence \begin{eqnarray*} E \bigl[ ( x_{1}Z_{1}+\cdots+x_{n}Z_{n} ) ^{2} \bigr] & =&E \bigl[ E \bigl[ ( x_{1}Z_{1}+x_{2}Z_{2}+\cdots+x_{n}Z_{n} ) ^{2} |\sigma ( Z_{1},\ldots,Z_{k} ) \bigr] \bigr] \\ & =&\sum_{i,j=1}^{k}x_{i} \bar{Q}_{i,j}x_{j}+E \Biggl[ \Biggl( \sum _{i=1} ^{k}x_{i}\bar{ \mu}_{i}+\sum_{i=k+1}^{n}x_{i}Z_{i} \Biggr) ^{2} \Biggr]. \end{eqnarray*} We may always choose the unconstrained variables $x_{1},\ldots,x_{k}$ in order that the second term is zero, therefore, \begin{equation} \qquad\inf_{x_{k+1}\geq b,\ldots,x_{n}\geq b}E \bigl[ ( x_{1}Z_{1}+\cdots+x_{n} Z_{n} ) ^{2} \bigr] =\inf_{x_{k+1}\geq b,\ldots,x_{n}\geq b}\sum _{i,j=1}^{k}x_{i} \bar{Q}_{i,j}x_{j}. \label{qp} \end{equation} At first glance, it may appear that the minimiser in the right-hand side is $ ( x_{k+1},\ldots,x_{n} ) = ( b,\ldots,b )$, but this is not always true.\setcounter{footnote}{3}\footnote{For example, suppose $b=1$ and $\bar{Q}$ is the $2\times2$ positive definite, symmetric matrix given by $\bar{Q}=\bigl( {5 \atop -2}\enskip {-2 \atop 1} \bigr) $. Then $ ( 1,1 ) \bar{Q} ( 1,1 ) ^{T}=2$, but $ ( 1,1.1 ) \bar{Q} ( 1,1.1 ) ^{T}=1.8$.} The following lemma, however, gives a simple condition on $\bar{Q}$ which ensures that it is. \begin{lemma} \label{QP lemma}Let $b>0$ and $\mathbf{b}$ in $ \mathbb{R} ^{n}$ denote the vector $ ( b,\ldots,b ) $. Suppose $ ( \bar{Q}_{ij} ) _{i,j\in \{ 1,2,\ldots,n \} }$ is a real $n\times n$ positive definite matrix and assume $\bar{Q}$ has nonnegative row sums, that is, \begin{equation} \sum_{j=1}^{n}\bar{Q}_{ij}\geq0\qquad \mbox{for all }i\in \{ 1,\ldots,n \}. \label{feas} \end{equation} Then the infimum of the quadratic form $x^{T}\bar{Q}x$ over the subset \[ \mathcal{C}= \bigl\{ ( x_{1},\ldots,x_{n} ) \in \mathbb{R} ^{n}\dvtx x_{j} \geq b\mathbf{,\forall}j=1,\ldots,n \bigr\} \] is attained at $x=\mathbf{b}$, and hence \[ \inf_{x\in\mathcal{C}}x^{T}\bar{Q}x=\mathbf{b}^{T} \bar{Q}\mathbf {b=}b^{2} \sum_{i,j=1}^{n} \bar{Q}_{ij}. \] \end{lemma} \begin{pf} Without loss of generality, we may assume that $b=1$. We can then reformulate the statement as describing the smallest value for the following constrained quadratic programming problem: \[ \min x^{T}\bar{Q}x\qquad \mbox{subject to } x\geq\mathbf{1}, \] where $\mathbf{1}:= ( 1,\ldots,1 ) \in \mathbb{R} ^{n}$ and $x\geq\mathbf{1}$ means $x_{i}\geq\mathbf {1}_{i}=1,\mathbf {\forall }i=1,\ldots,n$. The Lagrangian function of this quadratic programming problem (see, e.g., \cite{boyd}, page 215) is given by \[ L(x,\lambda)=x^{T}\bar{Q}x+\lambda^{T} ( -x+\mathbf{1} ). \] Solving for \[ \nabla_{x}L(x,\lambda)=2\bar{Q}x-\lambda=0 \] and using the strict convexity of the function we deduce that $x^{\ast} =\frac{1}{2}Q^{-1}\lambda$ is the minimiser of $L$. Hence, the (Lagrangian) dual function $g ( \lambda ):=\inf_{x}L ( x,\lambda ) $ is given by \[ g ( \lambda ) =-\tfrac{1}{4}\lambda^{T}\bar {Q}^{-1} \lambda +\lambda^{T}\mathbf{1} \] and the dual problem consists of \[ \max g ( \lambda )\qquad \mbox{subject to } \lambda\geq0. \] As $Q^{-1}$ is positive definite the function $g$ is strictly concave and the local maximum $\lambda^{\ast}=2\bar{Q}\mathbf{1}$ that is obtained by solving $\nabla_{\lambda}g ( \lambda ) =0$ with \begin{equation} \nabla_{\lambda}g ( \lambda ) =-\tfrac{1}{2}\bar{Q}^{-1} \lambda+\mathbf{1} \label{dual1} \end{equation} is also the unique global maximum. In order to prove that $\lambda ^{\ast}$ solves the dual problem, we therefore need only check that it is feasible for the dual problem, that is, we must show that $\lambda^{\ast}\geq \mathbf {0}$. But since the components of $\lambda^{\ast}$ are just twice the sum of the respective rows of $\bar{Q}$, this feasibility condition follows at once from assumption \ref{feas}. \end{pf} When $Q$ arises as the covariance matrix of the increments of a Gaussian process, we need to know when the Schur complement of some sub-block of $Q$ will satisfy condition (\ref{feas}). In the context of Gaussian vectors, these Schur complements have a convenient interpretation; they are the covariance matrices which result from partially conditioning on some of the components. It is this identification which motivates the positive conditional covariance condition (Condition \ref{cond dom}). In order to present the proof of the interpolation inequality as transparently as possible, we first gather together some relevant technical comments. To start with, suppose we have two sets of real numbers \[ D= \{ t_{i}\dvtx i=0,1,\ldots,n \} \subset\tilde{D}= \{ \tilde {t}_{i}\dvtx i=0,1,\ldots,\tilde{n} \} \subseteq [ 0,T ] \] ordered in such a way that $0\leq t_{0}<t_{1}<\cdots<t_{n}\leq T$, and likewise for $\tilde{D}$. Suppose $s$ and $t$ be real numbers with $s<t$ and let $Z$ be a continuous Gaussian process. We need to consider how the variance of the increment $Z_{s,t}$ changes when we condition on \[ \mathcal{F}^{D}:=\sigma ( Z_{t_{i-1},t_{i}}\dvtx i=1,\ldots,n ), \] compared to conditioning the larger $\sigma$-algebra \[ \mathcal{F}^{\tilde{D}}:=\sigma ( Z_{\tilde{t}_{i-1},\tilde {t}_{i} }\dvtx i=1,\ldots,\tilde{n} ). \] To simplify the notation a little, we introduce \[ \mathcal{G}=\sigma \bigl( Z_{\tilde{t}_{i-1},\tilde{t}_{i}}\dvtx \{ \tilde {t}_{i-1}, \tilde{t}_{i} \} \cap\tilde{D}\setminus D \neq \varnothing \bigr) , \] so that \[ \mathcal{F}^{\tilde{D}}=\mathcal{F}^{D}\vee\mathcal{G}. \] Because \begin{equation} ( Z_{s,t},Z_{t_{0},t_{1}},\ldots,Z_{t_{\tilde{n}-1},t_{\tilde {n}} } ) \in \mathbb{R} ^{\tilde{n}+1} \label{info} \end{equation} is Gaussian, the joint distribution of $Z_{s,t}$ and the vector (\ref{info}) conditional on $\mathcal{F}^{D}$ (or indeed $\mathcal{F}^{\tilde{D}}$) is once again Gaussian, with a random mean but a deterministic covariance matrix. A simple calculation together with the \textit{law of total variance} gives that \begin{eqnarray*} \operatorname{Var} \bigl( Z_{s,t}|\mathcal{F}^{D} \bigr) &=&E \bigl[ \operatorname{Var} \bigl( Z_{s,t}| \mathcal{F}^{D}\vee\mathcal{G} \bigr) \bigr] +\operatorname{Var} \bigl( E \bigl[ Z_{s,t}| \mathcal{F}^{D}\vee\mathcal{G} \bigr] \bigr) \\ &\geq &E \bigl[ \operatorname{Var} \bigl( Z_{s,t}|\mathcal{F}^{D}\vee \mathcal {G} \bigr) \bigr] =\operatorname{Var} \bigl( Z_{s,t}| \mathcal{F}^{\tilde{D}} \bigr), \end{eqnarray*} which is the comparison we sought. We condense these observations into the following lemma. \begin{lemma} \label{monotone}Let $ ( Z_{t} ) _{t\in [ 0,T ] }$ be a Gaussian process, and suppose that $D$ and $\tilde{D}$ are two partitions of $ [ 0,T ] $ with $D\subseteq\tilde{D}$. Then for any $ [ s,t ] \subseteq [ 0,T ] $ we have \[ \operatorname{Var} \bigl( Z_{s,t}|\mathcal{F}^{D} \bigr) \geq \operatorname{Var} \bigl( Z_{s,t}|\mathcal{F}^{\tilde{D}} \bigr). \] \end{lemma} Our aim is to show how the optimisation problem of Lemma \ref{QP lemma} can be used to exhibit lower bounds on 2D Young integrals with respect to $R$. In order to do this, we need to take a detour via two technical lemmas. The first is the following continuity result for the conditional covariance, which we need approximate when passing to a limit from a discrete partition. The situation we will often have is two subintervals $ [ s,t ] \subseteq [ 0,S ] $ of $ [ 0,T ] $, and a sequence of sets $ ( D_{n} ) _{n=1}^{\infty}$of the form \[ D_{n}=D_{n}^{1}\cup D_{n}^{2}. \] $ ( D_{n}^{1} ) _{n=1}^{\infty}$ and $ ( D_{n}^{2} ) _{n=1}^{\infty}$ here will be nested sequences of partitions of $ [ 0,s ] $ and $[t,S]$, respectively, with $\operatorname{mesh} ( D_{n}^{i} ) \rightarrow0$ as $n\rightarrow\infty$ for $i=1,2$. If \[ \mathcal{F}^{D}:=\sigma \bigl( Z_{u,v}\dvtx \{ u,v \} \subseteq D \bigr), \] then we can define a filtration $ ( \mathcal{G}_{n} ) _{n=1}^{\infty}$ by $\mathcal{G}_{n}:=\mathcal{F}^{D_{n}^{1}}\vee \mathcal{F}^{D_{n}^{2}}$ and ask about the convergence of \[ \operatorname{Cov} ( Z_{p,q}Z_{u,v}|\mathcal{G}_{n} ) \] as $n\rightarrow\infty$ for subintervals $ [ p,q ] $ and $ [ u,v ] $ are subintervals of $ [ 0,S ] $. The following lemma records the relevant continuity statement. \begin{lemma} \label{continuity} For any $p,q,u,v$ such that $ [ p,q ] $ and $ [ u,v ] $ are subintervals of $ [ 0,S ] \subseteq [ 0,T ] $ we have \[ \operatorname{Cov} ( Z_{p,q}Z_{u,v}|\mathcal{G}_{n} ) \rightarrow\operatorname{Cov} ( Z_{p,q}Z_{u,v}|\mathcal {F}_{0,s} \vee\mathcal{F}_{t,S} ) \] as $n\rightarrow\infty$. \end{lemma} \begin{pf} The martingale convergence theorem gives \[ \operatorname{Cov} ( Z_{p,q}Z_{u,v}|\mathcal{G}_{n} ) \rightarrow\operatorname{Cov} \Biggl( Z_{p,q}Z_{u,v}\bigg|\bigvee _{n=1}^{\infty }\mathcal{G}_{n} \Biggr),\qquad \mbox{a.s. and in }L^{p}\mbox{ for all }p\geq1. \] The continuity of $Z$ and the fact that $\operatorname{mesh} ( D_{n} ) \rightarrow0$ easily implies that, modulo null sets, one has $\bigvee _{n=1}^{\infty}\mathcal{G}_{n}=\mathcal{F}_{0,s}\vee\mathcal{F}_{t,T}$. \end{pf} We now introduce another condition on $Z$, which we will later discard. This condition is virtually the same as Condition~\ref{cond dom}, the only difference being that we insist on the strict positivity of the conditional variance. \begin{condition} \label{prime} Let $ ( Z_{t} ) _{t\in [ 0,T ] }$ be a real-valued continuous Gaussian process. We will assume that for every $ [ u,v ] \subseteq [ s,t ] \subseteq [ 0,S ] \subseteq [ 0,T ] $ we have \begin{equation} \operatorname{Cov} ( Z_{s,t},Z_{u,v}| \mathcal{F}_{0,s}\vee \mathcal{F}_{t,S} ) >0. \end{equation} \end{condition} The second technical lemma we need will apply whenever we work with a Gaussian process that satisfies Condition \ref{prime}. It delivers a nested sequence of partitions, with mesh tending to zero, and such that the discretisation of $Z$ along each partition will satisfy the dual feasibility condition [i.e., (\ref{feas}) in Lemma \ref{QP lemma}]. \begin{lemma} \label{technical}Let $ ( Z_{t} ) _{t\in [ 0,T ] }$ be a continuous Gaussian process that satisfies Condition \ref{prime}. Then for every $0\leq s<t\leq S\leq T$ there exists a nested sequence of partitions \[ ( D_{m} ) _{m=1}^{\infty}= \bigl( \bigl\{ t_{i}^{m} \dvtx i=0,1,\ldots,n_{m} \bigr\} \bigr) _{m=1}^{\infty} \] of $ [ 0,S ] $ with the following properties: \begin{longlist}[(1)] \item[(1)] The mesh of $D_{m}$ converges to $0$ as $m\rightarrow\infty$. \item[(2)] One has $ \{ s,t \} \subseteq D_{m}$ for all $m$. \item[(3)] If $Z_{1}^{m}$ and $Z_{2}^{m}$ are the jointly Gaussian vectors, \begin{eqnarray*} Z_{1}^{m} &= &\bigl( Z_{t_{i}^{m},t_{i+1}^{m}}\dvtx t_{i}^{m} \in D_{m}\cap \bigl( [0,s ) \cup [ t,S)\bigr) \bigr), \\ Z_{2}^{m} &= &\bigl( Z_{t_{i}^{m},t_{i+1}^{m}}\dvtx t_{i}^{m} \in D_{m}\cap [ s,t) \bigr), \end{eqnarray*} with respective covariance matrices $Q_{11}^{m}$ and $Q_{22}^{m}$, then the Gaussian vector $ ( Z_{1}^{m},Z_{2}^{m} ) $ has a covariance matrix of the form \[ Q^{m}=\pmatrix{ Q_{11}^{m} & Q_{12}^{m} \vspace*{2pt}\cr \bigl( Q_{12}^{m} \bigr) ^{T} & Q_{22}^{m}}, \] and the Schur complement of $Q_{11}^{m}$ in $Q^{m}$ has nonnegative row sums. \end{longlist} \end{lemma} \begin{pf} See the \hyperref[app]{Appendix}. \end{pf} The next result shows how we can bound from below the 2D Young integral of a H\"{o}lder-continuous $f$ against $R$. The lower bound thus obtained is expressed in terms of the minimum of $f$, and the conditional variance of the Gaussian process. \begin{proposition} \label{comparison}Suppose that $R\dvtx [ 0,T ] ^{2}\rightarrow \mathbb{R} $ is the covariance function of some continuous Gaussian process $ ( Z_{t} ) _{t\in [ 0,T ] }$. Suppose that $R$ has finite 2D $\rho $-variation for some $\rho$ in $[1,2)$ and that $Z$ is nondegenerate and has a nonnegative conditional covariance (i.e., satisfies Condition \ref {cond dom} ). Let $\gamma\in ( 0,1 ) $ be such that $1/\rho+\gamma >1$ and assume $f\in C^{\gamma} ( [ 0,T ], \mathbb{R} ) $. Then for every $ [ s,t ] \subseteq [ 0,T ] $ we have the following lower bound on the 2D-Young integral of $f$ against $R$: \[ \int_{ [ 0,T ] ^{2}}f_{u}f_{v}\,dR ( u,v ) \geq \Bigl( \inf_{u\in [ s,t ] }\bigl\vert f ( u ) \bigr\vert ^{2} \Bigr) \operatorname{Var} ( Z_{s,t}| \mathcal{F}_{0,s} \vee\mathcal{F}_{t,T} ). \] \end{proposition} \begin{remark} We emphasise again that $\mathcal{F}_{a,b}$ is the $\sigma$-algebra generated by the increments of the form $Z_{u,v}$ for $u,v\in [ a,b ] $. \end{remark} \begin{pf*}{Proof of Proposition \ref{comparison}} Fix $ [ s,t ] \subseteq [ 0,T ] $, and take $b:=\break \inf_{u\in [ s,t ] }\vert f ( u ) \vert $. \textit{Step} 1: We first note that there is no loss of generality in assuming the stronger Condition \ref{prime} instead of Condition \ref{cond dom}. To see this, let $ ( B_{t} ) _{t\in [ 0,T ] }$ be a Brownian motion, which is independent of $ ( Z_{t} ) _{t\in [ 0,T ] }$, and for every $\varepsilon >0$ define the perturbed process \[ Z_{t}^{\varepsilon}:=Z_{t}+\varepsilon B_{t}. \] It is easy to check that $Z^{\varepsilon}$ satisfies the conditions in the statement. Let $\mathcal{F}_{p,q}^{\varepsilon}$ be the $\sigma$-algebra generated by the increments $Z_{u,v}^{\varepsilon}$ between times $p$ and $q$ [note that $\mathcal{F}_{p,q}^{\varepsilon}$ actually equals $\mathcal {F} _{p,q}\vee\sigma ( B_{l,m}\dvtx u\leq l<m\leq q ) $], and note that we have \[ \operatorname{Cov} \bigl( Z_{s,t}^{\varepsilon},Z_{u,v}^{\varepsilon} |\mathcal{F}_{0,s}^{\varepsilon}\vee\mathcal{F}_{t,T}^{\varepsilon } \bigr) =\operatorname{Cov} ( Z_{s,t},Z_{u,v}| \mathcal{F}_{0,s}\vee \mathcal{F}_{t,T} ) + \varepsilon^{2} ( u-v ) >0 \] for every $0\leq s<u<v\leq t\leq T$. It follows that $Z^{\varepsilon}$ satisfies Condition~\ref{prime}. Let $R^{\varepsilon}$ denote the covariance function of $Z^{\varepsilon}$. If we could prove the result with the additional hypothesis of Condition~\ref{prime}, then it would follow that \begin{eqnarray}\label{x prime} \int_{ [ 0,T ] ^{2}}f_{u}f_{v}\,dR^{\varepsilon} ( u,v ) &\geq& b^{2}\operatorname{Var} \bigl( Z_{s,t}^{\varepsilon}| \mathcal{F}_{0,s}^{\varepsilon }\vee \mathcal{F}_{t,T}^{\varepsilon} \bigr) \nonumber \\[-8pt] \\[-8pt] \nonumber &=& b^{2}\operatorname{Var} ( Z_{s,t}|\mathcal{F}_{0,s}\vee \mathcal {F}_{t,T} ) +b^{2}\varepsilon^{2} ( t-s ). \end{eqnarray} Because \[ \int_{ [ 0,T ] ^{2}}f_{u}f_{v}\,dR^{\varepsilon} ( u,v ) =\int_{ [ 0,T ] ^{2}}f_{u}f_{v}\,dR ( u,v ) +\varepsilon ^{2}\vert f\vert _{L^{2} [ 0,T ] }^{2}, \] the result for $Z$ will then follow from (\ref{x prime}) by letting $\varepsilon$ tend to zero. \textit{Step} 2: We now prove the result under the additional assumption of Condition~\ref{prime}. By considering $-f$ if necessary, we may assume that $f$ is bounded from below by $b$ on $ [ s,t ] $. Since we now assume Condition \ref{prime} we can use Lemma~\ref{technical} to obtain a nested sequence of partitions $ ( D_{r} ) _{r=1}^{\infty}$ such that $ \{ s,t \} \subset D_{r}$ for all~$r$, $\operatorname{mesh} ( D_{r} ) \rightarrow0$ as $r\rightarrow \infty$, and such that the dual feasibility condition (property 3 in the Lemma~\ref{technical}) holds. Suppose $D= \{ t_{i}\dvtx i=0,1,\ldots ,n \} $ is any partition of $ [ 0,T ] $ in this sequence (i.e., $D=D_{r}$ for some $r$). Then for some $l<m\in \{ 0,1,\ldots,n-1 \} $ we have $t_{l}=s$ and $t_{m}=t$. Denote by $f ( D ) $ the column vector \[ f ( D ) = \bigl( f ( t_{0} ),\ldots,f ( t_{n-1} ) \bigr) ^{T}\in \mathbb{R} ^{n}, \] and $Q= ( Q_{i,j} ) _{1\leq i,j<n}$ the symmetric $n\times n$ matrix with entries \[ Q_{ij}=R\pmatrix{ t_{i-1},t_{i} \vspace*{2pt}\cr t_{j-1},t_{j}} =E [ Z_{t_{i-1},t_{i}}Z_{t_{j-1},t_{j}} ]. \] From the nondegeneracy of $Z$, it follows that $Q$ is positive definite. The Riemann sum approximation to the 2D integral of $f$ against $R$ along the partition $D$ can be written as \begin{eqnarray}\label{Riemann} \sum_{i=1}^{n}\sum _{j=1}^{n}f_{t_{i-1}}f_{t_{j-1}}R\pmatrix{ t_{i-1},t_{i} \vspace*{2pt}\cr t_{j-1},t_{j}} &=&\sum_{i=1}^{n}\sum _{j=1}^{n}f_{t_{i-1}}f_{t_{j-1}}Q_{i,j} \nonumber \\[-8pt] \\[-8pt] \nonumber &=&f ( D ) ^{T}Qf ( D ). \end{eqnarray} If necessary, we can ensure that last $m-l$ components of $f ( D ) $ are bounded below by $b$. To see this, we simply permute its coordinates using any bijective map $\tau\dvtx \{ 1,\ldots,n \} \rightarrow \{ 1,\ldots,n \} $ which has the property that \[ \tau ( l+j ) =n-m+l+j\qquad\mbox{for }j=0,1,\ldots,m-l. \] Fix one such map $\tau$, and let $f_{\tau} ( D ) $ denote the vector resulting from applying $\tau$ to the coordinates of $f ( D ) $. Similarly, let $Q_{\tau}= ( Q_{i,j}^{\tau} ) _{1\leq i,j<n}$ be the $n\times n$ matrix \[ Q_{ij}^{\tau}=Q_{\tau ( i ) \tau ( j ) }, \] and note that $Q^{\tau}$ is the covariance matrix of the Gaussian vector \[ Z= ( Z_{t_{\tau ( 1 ) -1,}t_{\tau ( 1 ) }} ,\ldots,Z_{t_{\tau ( n ) -1},t_{\tau ( n ) }} ). \] A simple calculation shows that \[ f ( D ) ^{T}Qf ( D ) =f_{\tau} ( D ) ^{T}Q_{\tau}f_{\tau} ( D ). \] We can apply Lemma \ref{QP lemma} because condition (\ref{feas}) is guaranteed to hold by the properties of the sequence $ ( D_{r} ) _{r=1}^{\infty}$. We deduce that \begin{equation} f ( D ) ^{T}Qf ( D ) =f_{\tau} ( D ) ^{T}Q_{\tau}f_{\tau} ( D ) \geq b^{2}\sum_{i,j=1}^{m-l}S_{ij}, \label{permute} \end{equation} where $S$ is the $ ( m-l ) \times ( m-l ) $ matrix obtained by taking the Schur complement of the leading principal $ ( n-m+l ) \times ( n-m+l ) $ minor of $\tilde{Q}$. As already mentioned, the distribution of a Gaussian vector conditional on some of its components remains Gaussian and the conditional covariance is described by a suitable Schur complement. In this case, this means we have that \begin{eqnarray}\label{Schur cov} &&S=\operatorname{Cov} \bigl[ ( Z_{t_{l},t_{l+1}},\ldots,Z_{t_{m-1} ,t_{m}} ) |Z_{t_{j-1},t_{j}}, \nonumber \\[-8pt] \\[-8pt] \nonumber &&\hspace*{44pt} j\in \{ 1,\ldots,l \} \cup \{ m+1,\ldots,n \} \bigr] . \end{eqnarray} If we define \[ \mathcal{F}^{D}:=\sigma \bigl( Z_{t_{j-1},t_{j}}\dvtx j\in \{ 1, \ldots,l \} \cup \{ m+1,\ldots,n \} \bigr), \] to be the $\sigma$-algebra generated by the increments of $Z$ in $D\setminus [ s,t ] $, then using (\ref{Schur cov}) we arrive at \begin{eqnarray}\label{cond var increment} \sum_{i,j=1}^{m-l}S_{ij} &=&\sum _{i,j=1}^{m-l-1}E \bigl[ ( Z_{t_{l+i-1},t_{l+i}} ) ( Z_{t_{l+j-1},t_{l+j}} ) |\mathcal{F} ^{D} \bigr] \nonumber \\ & &{}-\sum_{i,j=1}^{m-l-1}E \bigl[ ( Z_{t_{l+i-1},t_{l+i}} ) |\mathcal{ F}^{D} \bigr] E \bigl[ ( Z_{t_{l+j-1},t_{l+j}} ) |\mathcal {F}^{D} \bigr] \\ &=&E \bigl[ ( Z_{s,t} ) ^{2}|\mathcal{F}^{D} \bigr] -E \bigl[ Z_{s,t}|\mathcal{F}^{D} \bigr] ^{2} =\operatorname{Var} \bigl( Z_{s,t}|\mathcal{F}^{D} \bigr). \nonumber \end{eqnarray} To complete the proof, we note that $\mathcal{F}^{D}\subseteq\mathcal{F} _{0,s}\vee\mathcal{F}_{t,T}$, and exploit the monotonicity of the conditional variance described by Lemma \ref{monotone} to give \begin{equation} \operatorname{Var} \bigl( Z_{s,t}|\mathcal{F}^{D} \bigr) \geq \operatorname{Var} ( Z_{s,t}|\mathcal{F}_{0,s}\vee \mathcal {F} _{t,T} ). \label{projection} \end{equation} Then by combining (\ref{projection}), (\ref{cond var increment}) and (\ref{permute}) in (\ref{Riemann}), we obtain \[ \sum_{i=1}^{n}\sum _{j=1}^{n}f_{t_{i-1}}f_{t_{j-1}}Q_{i,j} \geq b^{2} \operatorname{Var} ( Z_{s,t}| \mathcal{F}_{0,s}\vee\mathcal {F} _{t,T} ). \] Because this inequality holds for any $D\in ( D_{r} ) _{r=1}^{\infty}$, we can apply it for $D=D_{r}$ and let $r\rightarrow \infty$, which yields \[ \int_{ [ 0,T ] ^{2}}f_{u}f_{v}\,dR ( u,v ) \geq b^{2}\operatorname{Var} ( Z_{s,t}|\mathcal{F}_{0,s}\vee\mathcal {F}_{t,T} ), \] whereupon the proof is complete. \end{pf*} We now deliver on a promise we made in Section~\ref{main thm} by proving that the diagonal dominance of the increments implies the positivity of the conditional covariance. \begin{corollary} Let $ ( Z_{t} ) _{t\in [ 0,T ] }$ be a real-valued continuous Gaussian process. If $Z$ satisfies Condition \ref{diagonal dominance} then it also satisfies Condition~\ref{cond dom}. \end{corollary} \begin{pf} Fix $s<t$ in $ [ 0,T ] $, let $(D_{n})_{n=1}^{\infty}$ be a sequence of partitions having the properties described in the statement of Lemma \ref{continuity} and suppose $ [ u,v ] \subseteq$ $ [ s,t ] $. From the conclusion of Lemma \ref{continuity}, we have that \begin{equation} \operatorname{Cov} ( Z_{s,t}Z_{u,v}|\mathcal{G}_{n} ) \rightarrow\operatorname{Cov} ( Z_{s,t}Z_{u,v}|\mathcal {F}_{0,s} \vee\mathcal{F}_{t,T} ) \label{ap} \end{equation} as $n\rightarrow\infty$. Let $Z_{n}$ be the Gaussian vector whose components consist of the increments of $Z$ over all the consecutive points in the partition $D_{n}\cup \{ s,u,v,t \} $. Let $Q$ denote the covariance matrix of $Z_{n}$. The left-hand side of (\ref{ap}) is the sum of all the entries in some row of a particular Schur complement of $Q$. $Z$ is assumed to have diagonally dominant increments. Any such Schur complement of $Q$ will therefore be diagonally dominant, since diagonal dominance is preserved under Schur-complementation (see \cite{zhang}). As diagonally dominant matrices have nonnegative row sums, it follows that $\operatorname{Cov} ( Z_{s,t}Z_{u,v}|\mathcal{G}_{n} ) $ is nonnegative, and hence the limit in (\ref{ap}) is also. \end{pf} We are now in a position to generalise the $L^{2}$-interpolation inequality (\ref{itl}) stated earlier. \begin{theorem}[(Interpolation)]\label{interpolation}Let $ ( Z_{t} ) _{t\in [ 0,T ] }$ be a continuous Gaussian process with covariance function $R\dvtx [ 0,T ] ^{2}\rightarrow \mathbb{R} $. Suppose $R$ has finite two-dimensional $\rho$-variation for some $\rho$ in $[1,2)$. Assume that $Z$ is nondegenerate in the sense of Definition \ref{definition nondegeneracy}, and has positive conditional covariance (i.e., satisfies Condition~\ref{cond dom}). Suppose $f\in C ( [ 0,T ], \mathbb{R} ) $ with $\gamma+1/\rho>1$. Then for every $0<S\leq T$ at least one of the following inequalities is always true: \begin{equation} \Vert f\Vert _{\infty; [ 0,S ] }\leq2E \bigl[ Z_{S} ^{2} \bigr] ^{-1/2} \biggl( \int_{ [ 0,S ] ^{2}}f_{s}f_{t}\,dR ( s,t ) \biggr) ^{1/2}, \label{L2} \end{equation} or, for some interval $ [ s,t ] \subseteq [ 0,S ] $ of length at least \[ \biggl( \frac{\Vert f\Vert _{\infty; [ 0,S ] }}{2\Vert f\Vert _{\gamma; [ 0,S ] }} \biggr) ^{1/\gamma}, \] we have \begin{equation} \frac{1}{4}\Vert f\Vert _{\infty; [ 0,S ] }^{2} \operatorname{Var} ( Z_{s,t}|\mathcal{F}_{0,s}\vee\mathcal {F} _{t,S} ) \leq\int_{ [ 0,S ] ^{2}}f_{v}f_{v^{\prime }}\,dR \bigl( v,v^{\prime} \bigr). \label{inf} \end{equation} \end{theorem} \begin{pf} We take $S=T$, the generalisation to $0<S<T$ needing only minor changes. $f$ is continuous and, therefore, achieves its maximum in $ [ 0,T ] $. Thus, by considering $-f$ if necessary, we can find $t\in [ 0,T ] $ such that \[ f ( t ) =\Vert f\Vert _{\infty; [ 0,T ] }. \] There are two possibilities which together are exhaustive. In the first case, $f$ never takes any value less than half its maximum, that is, \[ \inf_{u\in [ 0,T ] }f ( u ) \geq\tfrac {1}{2}\Vert f\Vert _{\infty; [ 0,T ] }. \] Hence, we can apply Proposition \ref{comparison} to deduce (\ref {L2}). In the second case, there exists $u\in [ 0,T ] $ such that $f ( u ) =2^{-1}\Vert f\Vert _{\infty; [ 0,T ] }$. Then, assuming that $u<t$ (the argument for $u>t$ leads to the same outcome), we can define \[ s=\sup \bigl\{ v<t\dvtx f ( v ) \leq\tfrac{1}{2}\Vert f\Vert _{\infty; [ 0,T ] } \bigr\}. \] By definition $f$ is then bounded below by $\Vert f\Vert _{\infty ; [ 0,T ] }/2$ on $ [ s,t ] $. The H\"{o}lder continuity of $f$ gives a lower bound on the length of this interval in an elementary way \[ \tfrac{1}{2}\Vert f\Vert _{\infty; [ 0,T ] }=\bigl\vert f ( t ) -f ( s ) \bigr\vert \leq\Vert f\Vert _{\gamma; [ 0,T ] }\vert t-s\vert ^{\gamma}, \] which yields \[ \vert t-s\vert \geq \biggl( \frac{\Vert f\Vert _{\infty ; [ 0,T ] }}{2\Vert f\Vert _{\gamma; [ 0,T ] } } \biggr) ^{1/\gamma}. \] Another application of Proposition \ref{comparison} then gives (\ref{inf}). \end{pf} \begin{corollary} \label{interpolation2}Assume Condition \ref{nondeterm} so that the $\rho $-variation of $R$ is H\"{o}lder-controlled, and for some $c>0 $ and some $\alpha\in ( 0,1 ) $ we have the lower bound on the conditional variance \[ \operatorname{Var} ( Z_{s,t}|\mathcal{F}_{0,s}\vee \mathcal {F} _{t,T} ) \geq c ( t-s ) ^{\alpha}. \] Theorem \ref{interpolation} then allows us to bound $\Vert f\Vert _{\infty; [ 0,T ] }$ above by the maximum of \[ 2E \bigl[ Z_{T}^{2} \bigr] ^{-1/2} \biggl( \int _{ [ 0,T ] ^{2} }f_{s}f_{t}\,dR ( s,t ) \biggr) ^{1/2} \] and \[ \frac{2}{\sqrt{c}} \biggl( \int_{ [ 0,T ] ^{2}}f_{s}f_{t}\,dR ( s,t ) \biggr) ^{\gamma/ ( 2\gamma+\alpha ) } \Vert f\Vert _{\gamma; [ 0,T ] }^{\alpha/ ( 2\gamma +\alpha ) }. \] \end{corollary} \begin{pf} This is immediate from Theorem \ref{interpolation}. \end{pf} In particular, if $Z$ is a Brownian motion we have $\operatorname{Var} ( Z_{s,t}|\mathcal{F}_{0,s}\vee\mathcal {F} _{t,T} ) = ( t-s ) $, hence Corollary \ref{interpolation2} shows that \[ \Vert f\Vert _{\infty; [ 0,T ] }\leq2\max \bigl( T^{-1/2}\vert f \vert _{L^{2} [ 0,T ] },\vert f\vert _{L^{2} [ 0,T ] }^{2\gamma/ ( 2\gamma +1 ) }\Vert f\Vert _{\gamma; [ 0,T ] }^{1/ ( 2\gamma+1 ) } \bigr), \] which is exactly (\ref{itl}). We have therefore achieved out goal of generalising this inequality. \begin{remark} Another application where we anticipate estimates of this kind being useful is when estimating short-time density asymptotics (see, e.g., the recent works \cite{BauO,I}). Here, frequent use is made of the asymptotic behaviour of the Malliavin covariance matrix as $t\downarrow0$. \end{remark} \section{Malliavin differentiability of the flow} \label{differentiability section} \subsection{High-order directional derivatives} Let $\mathbf{x}$ be in $WG\Omega_{p} ( \mathbb{R} ^{d} ) $ and suppose that the vector fields $V= ( V_{1} ,\ldots,V_{d} ) $ and $V_{0}$ are smooth and bounded. For $t\in [ 0,T ] $ we let $U_{t\leftarrow0}^{\mathbf{x}} ( \cdot ) $ denote the map defined by \[ U_{t\leftarrow0}^{\mathbf{x}} ( \cdot ) \dvtx y_{0}\mapsto y_{t}, \] where $y$ is the solution to the RDE \begin{equation} dy_{t}=V ( y_{t} ) \,d\mathbf{x}_{t}+V_{0} ( y_{t} ) \,dt,\qquad y ( 0 ) =y_{0}. \label{flow} \end{equation} It is well known (see \cite{FV}) that the flow [i.e., the map $y_{0}\mapsto U_{t\leftarrow0}^{\mathbf{x}} ( y_{0} ) $] is differentiable; its derivative (or Jacobian) is the linear map \[ J_{t\leftarrow0}^{\mathbf{x}} ( y_{0} ) ( \cdot ) \equiv \frac{d}{d\varepsilon}U_{t\leftarrow0}^{\mathbf {x}} ( y_{0}+\varepsilon \cdot ) \bigg\vert _{\varepsilon=0}\in L \bigl( \mathbb{R} ^{e}, \mathbb{R} ^{e} \bigr). \] If we let $\Phi_{t\leftarrow0}^{\mathbf{x}} ( y_{0} )$, denote the pair \[ \Phi_{t\leftarrow0}^{\mathbf{x}} ( y_{0} ) = \bigl( U_{t\leftarrow 0}^{\mathbf{x}} ( y_{0} ),J_{t\leftarrow0}^{\mathbf {x}} ( y_{0} ) \bigr) \in \mathbb{R} ^{e}\oplus L \bigl( \mathbb{R} ^{e}, \mathbb{R} ^{e} \bigr), \] and if $W= ( W_{1},\ldots,W_{d} ) $ is the collection vector fields given by \[ W_{i} ( y,J ) = \bigl( V_{i} ( y ),\nabla V_{i} ( y ) \cdot J \bigr),\qquad i=1,\ldots,d \] and \[ W_{0} ( y,J ) = \bigl( V_{0} ( y ),\nabla V_{0} ( y ) \cdot J \bigr) \] then $\Phi_{t\leftarrow0}^{\mathbf{x}} ( y_{0} ) $ is the solution\footnote{A little care is needed because the vector fields have linear growth (and hence are not Lip-$\gamma$). But one can exploit the ``triangular'' dependence structure in the vector fields to rule out the possibility of explosion. See \cite{FV} for details.} to the RDE \[ d\Phi_{t\leftarrow0}^{\mathbf{x}}=W \bigl( \Phi_{t\leftarrow 0}^{\mathbf {x} } \bigr) \,d\mathbf{x}_{t}+W_{0} \bigl( \Phi_{t\leftarrow0}^{\mathbf {x} } \bigr) \,dt,\Phi_{t\leftarrow0}^{\mathbf{x}}|_{t=0}= ( y_{0},I ). \] In fact, the Jacobian is invertible as a linear map and the inverse, which we will denote $J_{0\leftarrow t}^{\mathbf{x}} ( y_{0} ) $, is also a solution to an RDE [again jointly with the base flow $U_{t\leftarrow 0}^{\mathbf{x}} ( y_{0} ) $]. We also recall the relation \[ J_{t\leftarrow s}^{\mathbf{x}} ( y ):= \frac {d}{d\varepsilon }U_{t\leftarrow s}^{\mathbf{x}} ( y+\varepsilon\cdot ) \bigg\vert _{\varepsilon=0}=J_{t\leftarrow0}^{\mathbf{x}} ( y ) \cdot J_{0\leftarrow s}^{\mathbf{x}} ( y ). \] \begin{notation} In what follows, we will let \begin{equation} \qquad M_{\cdot\leftarrow0}^{\mathbf{x}} ( y_{0} ) \equiv \bigl( U_{t\leftarrow0}^{\mathbf{x}} ( y_{0} ),J_{t\leftarrow 0}^{\mathbf{x}} ( y_{0} ),J_{0\leftarrow t}^{\mathbf {x}} ( y_{0} ) \bigr) \in \mathbb{R} ^{e}\oplus \mathbb{R} ^{e\times e}\oplus \mathbb{R} ^{e\times e}. \label{M} \end{equation} \end{notation} For any path $h$ in $C^{q\mbox{-}\mathrm{var}} ( [ 0,T ] , \mathbb{R} ^{d} ) $ with $1/q+1/p>1$, we can canonically define the translated rough path $T_{h}\mathbf{x}$ (see \cite{FV}). Hence, we have the directional derivative \[ D_{h}U_{t\leftarrow0}^{\mathbf{x}} ( y_{0} ) \equiv \frac {d}{d\varepsilon}U_{t\leftarrow0}^{T_{\varepsilon h}\mathbf{x}} ( y_{0} ) \bigg\vert _{\varepsilon=0}. \] It is not difficult to show that \[ D_{h}U_{t\leftarrow0}^{\mathbf{x}} ( y_{0} ) =\sum _{i=1}^{d} \int _{0}^{t}J_{t\leftarrow s}^{\mathbf{x}} ( y_{0} ) V_{i} \bigl( U_{s\leftarrow0}^{\mathbf{x}} ( y_{0} ) \bigr) \,dh_{s}^{i}, \] which implies by Young's inequality that \begin{equation} \bigl\vert D_{h}U_{t\leftarrow0}^{\mathbf{x}} ( y_{0} ) \bigr\vert \leq C\bigl\Vert M_{\cdot\leftarrow0}^{\mathbf{x}} ( y_{0} ) \bigr\Vert _{p\mbox{-}\mathrm{var}; [ 0,t ] }\vert h\vert _{q\mbox{-}\mathrm{var}; [ 0,t ] }. \label{linear bound} \end{equation} In this section, we will be interested in the form of the higher order directional derivatives \[ D_{h_{1}}\cdots D_{h_{n}}U_{t\leftarrow0}^{\mathbf{x}} ( y_{0} ):= \frac{\partial^{n}}{\partial\varepsilon_{1},\ldots,\partial \varepsilon _{n} }U_{t\leftarrow0}^{T_{\varepsilon_{n}h_{n}}\cdots T_{\varepsilon_{1}h_{1}} \mathbf{x}} ( y_{0} ) \bigg\vert _{\varepsilon_{1}=\cdots =\varepsilon_{n}=0}. \] Our aim will be to obtain bounds of the form (\ref{linear bound}); to do this in a systematic way is a challenging exercise. We rely on the treatment presented in \cite{H3}. For the reader's convenience when comparing the two accounts, we note that \cite{H3} uses the notation \[ \bigl( D_{s}U_{t\leftarrow0}^{\mathbf{x}} ( y_{0} ) \bigr) _{s\in [ 0,T ] }= \bigl( D_{s}^{1}U_{t\leftarrow 0}^{\mathbf{x} } ( y_{0} ),\ldots,D_{s}^{d}U_{t\leftarrow0}^{\mathbf {x}} ( y_{0} ) \bigr) _{s\in [ 0,T ] }\in \mathbb{R} ^{d} \] to identify the derivative. The relationship between $D_{s}U_{t\leftarrow 0}^{\mathbf{x}} ( y_{0} ) $ and\break $D_{h}U_{t\leftarrow 0}^{\mathbf {x} } ( y_{0} ) $ is simply that \[ D_{h}U_{t\leftarrow0}^{\mathbf{x}} ( y_{0} ) =\sum _{i=1}^{d} \int _{0}^{t}D_{s}^{i}U_{t\leftarrow0}^{\mathbf{x}} ( y_{0} ) \,dh_{s}^{i}. \] Note, in particular, $D_{s}U_{t\leftarrow0}^{\mathbf{x}} ( y_{0} ) =0$ if $t<s$. \begin{proposition} \label{induction}Assume $\mathbf{x}$ is in $WG\Omega_{p} ( \mathbb{R} ^{d} ) $ and let $V= ( V_{1},\ldots,V_{d} ) $ be a collection of smooth and bounded vector fields. Denote the solution flow to the RDE (\ref{flow}) by \[ U_{t\leftarrow0}^{\mathbf{x}} ( y_{0} ) = \bigl( U_{t\leftarrow 0}^{\mathbf{x}} ( y_{0} ) _{1}, \ldots,U_{t\leftarrow 0}^{\mathbf {x} } ( y_{0} ) _{e} \bigr) \in \mathbb{R} ^{e}. \] Suppose $q\geq1$ and $n\in \mathbb{N} $ and let $ \{ h_{1},\ldots,h_{n} \} $ be any subset of $C^{q\mbox{-}\mathrm{var}} ( [ 0,T ], \mathbb{R} ^{d} ) $. Then the directional derivative $D_{h_{1}}\cdots D_{h_{n} }U_{t\leftarrow0}^{\mathbf{x}} ( y_{0} ) $ exists for any $t\in [ 0,T ] $. Moreover, there exists a collection of finite indexing sets \[ \bigl\{ \mathbf{K}_{ ( i_{1},\ldots,i_{n} ) }\dvtx ( i_{1} , \ldots,i_{n} ) \in \{ 1,\ldots,d \} ^{n} \bigr\}, \] such that for every $j\in \{ 1,\ldots,e \} $ we have the identity \begin{eqnarray}\label{high order rep} &&D_{h_{1}}\cdots D_{h_{n}}U_{t\leftarrow0}^{\mathbf{x}} ( y_{0} ) _{j}\nonumber\\ &&\qquad =\sum _{i_{1},\ldots,i_{n}=1}^{d}\sum_{k\in\mathbf{K}_{ ( i_{1},\ldots,i_{n} ) }}\int _{0<t_{1}<\cdots <t_{n}<t}f_{1}^{k} ( t_{1} ) \cdots\\ &&\hspace*{177pt}{} f_{n}^{k} ( t_{n} ) f_{n+1}^{k} ( t ) \,dh_{t_{1}}^{i_{1}}\cdots dh_{t_{n}}^{i_{n}}\nonumber \end{eqnarray} for some functions $f_{l}^{k}$ which are in $C^{p\mbox{-}\mathrm{var}} ( [ 0,T ], \mathbb{R} ) $ for every $l$ and $k$, that is, \[ \bigcup_{ ( i_{1},\ldots,i_{n} ) \in \{ 1,\ldots ,d \} ^{n} }\bigcup_{k\in\mathbf{K}_{ ( i_{1},\ldots,i_{n} ) }} \bigl\{ f_{l} ^{k}\dvtx l=1,\ldots,n+1 \bigr\} \subset C^{p\mbox{-}\mathrm{var}} \bigl( [ 0,T ] , \mathbb{R} \bigr). \] Furthermore, there exists a constant $C$, which depends only on $n$ and $T$ such that \begin{equation} \bigl\vert f_{l}^{k}\bigr\vert _{p\mbox{-}\mathrm{var}; [ 0,T ] }\leq C \bigl( 1+\bigl\Vert M_{\cdot\leftarrow0}^{\mathbf{x}} ( y_{0} ) \bigr\Vert _{p\mbox{-}\mathrm{var}; [ 0,T ] } \bigr) ^{p} \label{high order bound} \end{equation} for every $l=1,\ldots,n+1$, every $k\in\mathbf{K}_{ ( i_{1},\ldots ,i_{n} ) }$ and every $ ( i_{1},\ldots,i_{n} ) \in \{ 1,\ldots,d \} ^{n}$. \end{proposition} \begin{pf} We observe that $D_{h_{1}}\cdots D_{h_{n}}U_{t\leftarrow0}^{\mathbf {x}} ( y_{0} ) _{j}$ equals \begin{equation} \sum_{i_{1},\ldots,i_{n}=1}^{d}\int_{0<t_{1}<\cdots <t_{n}<t}D_{t_{1}\cdots t_{n}}^{i_{1}\cdots i_{n}}U_{t\leftarrow0}^{\mathbf{x}} ( y_{0} ) _{j}\,dh_{t_{1}}^{i_{1}}\cdots dh_{t_{n}}^{i_{n}}. \label{high order} \end{equation} The representation for the integrand in (\ref{high order}) derived in Proposition 4.4 in \cite{H3} then allows us to deduce (\ref{high order rep}) and (\ref{high order bound}). \end{pf} \subsubsection{Malliavin differentiability} We now switch back to the context of a continuous Gaussian process $ ( X_{t} ) _{t\in [ 0,T ] }= ( X_{t}^{1},\ldots ,X_{t} ^{d} ) _{t\in [ 0,T ] }$ with i.i.d. components associated to the abstract Wiener space $ ( \mathcal{W},\mathcal{H},\mu ) $. Under the assumption of finite 2D $\rho$-variation, we have already remarked that, for any $p>2\rho$, $X$ has a unique natural lift to a geometric $p$-rough path $\mathbf{X}$. But the assumption of finite $\rho $-variation on the covariance also gives rise to the embedding \begin{equation} \mathcal{H\hookrightarrow}C^{q\mbox{-}\mathrm{var}} \bigl( [ 0,T ], \mathbb{R} ^{d} \bigr) \label{CM embedding} \end{equation} for the Cameron--Martin space, for any $1/p+1/q>1$, \cite{CFV}, Proposition~2. The significance of this result it twofold. First, it is proved in \cite{CFV}, Proposition~3, that it implies the existence of a (measurable) subset $\mathcal{V\subset W}$ with $\mu ( \mathcal{V} ) =1$ on which \[ T_{h}\mathbf{X} ( \omega ) \equiv\mathbf{X} ( \omega +h ) \] for all $h\in\mathcal{H}$ simultaneously. It follows that the Malliavin derivative\break $\mathcal{D}U_{t\leftarrow0}^{\mathbf{X} ( \omega ) } ( y_{0} ) \dvtx \mathcal{H\rightarrow} \mathbb{R} ^{e}$ \begin{equation} \mathcal{D}U_{t\leftarrow0}^{\mathbf{X} ( \omega ) } ( y_{0} ) \dvtx h\mathcal{ \mapsto D}_{h}U_{t\leftarrow0}^{\mathbf {X} ( \omega ) } ( y_{0} ) := \frac{d}{d\varepsilon }U_{t\leftarrow0}^{\mathbf{X} ( \omega+\varepsilon h ) } ( y_{0} ) \bigg\vert _{\varepsilon=0}, \label{SGD} \end{equation} coincides with the directional derivative of the previous section, that is, \begin{equation} \frac{d}{d\varepsilon}U_{t\leftarrow0}^{\mathbf{X} ( \omega +\varepsilon h ) } ( y_{0} ) \bigg\vert _{\varepsilon=0}= \frac {d}{d\varepsilon}U_{t\leftarrow0}^{T_{\varepsilon h}\mathbf{x}} ( y_{0} ) \bigg\vert _{\varepsilon=0}. \label{coincide} \end{equation} The second important consequence results from combining (\ref{CM embedding}), (\ref{coincide}) and (\ref{linear bound}), namely that \begin{equation} \bigl\Vert \mathcal{D}U_{t\leftarrow0}^{\mathbf{X} ( \omega ) } ( y_{0} ) \bigr\Vert _{\mathrm{op}}\leq C\bigl\Vert M_{\cdot \leftarrow 0}^{\mathbf{X} ( \omega ) } ( y_{0} ) \bigr\Vert _{p\mbox{-}\mathrm{var}; [ 0,t ] }. \label{op norm bound} \end{equation} If we can show that the right-hand side of (\ref{op norm bound}) has finite positive moments of all order, then these observations lead to the conclusion that \[ Y_{t}=U_{t\leftarrow0}^{\mathbf{X}}(y_{0})\in\bigcap _{p>1}\mathbb {D}^{1,p} \bigl( \mathbb{R} ^{e} \bigr), \] where $\mathbb{D}^{k,p}$ is the Shigekawa--Sobolev space (see Nualart \cite{nualart}). The purpose of Proposition~\ref{induction} is to extend this argument to the higher order derivatives. We will make this more precise shortly, but first we remark that the outline just given is what motivates the assumption \[ \mathcal{H\hookrightarrow}C^{q\mbox{-}\mathrm{var}} \bigl( [ 0,T ], \mathbb{R} ^{d} \bigr) \] detailed in Condition \ref{standing assumption}.\hskip.2pt\footnote{The requirement of complementary regularity in the Condition \ref{standing assumption} then amounts to $\rho\in [1,3/2)$. This covers BM, the OU process and the Brownian bridge (all with $\rho=1$) and fBm for $H>1/3$ (taking $\rho=1/2H$). For the special case of fBm, one can actually improve on this general embedding statement via Remark \ref{fBM embedding}. The requirement of complementary then leads to the looser restriction $H>1/4$.} The following theorem follows from the recent paper \cite{CLL}. It asserts the sufficiency of Condition \ref{standing assumption} to show the existence of finite moments for the $p$-variation of the Jacobian of the flow (and its inverse). \begin{theorem}[{[Cass--Litterer--Lyons (CLL)]}] \label{CLL}Let $ ( X_{t} ) _{t\in [ 0,T ] }$ be a continuous, centred Gaussian process in $ \mathbb{R} ^{d}$ with i.i.d. components. Let $X$ satisfy Condition \ref{standing assumption}, so that for some $p\geq1$, $X$ admits a natural lift to a geometric $p$-rough path $\mathbf{X}$. Assume $V= ( V_{0},V_{1},\ldots,V_{d} ) $ is any collection of smooth bounded vector fields on $ \mathbb{R} ^{e}$ and let $U_{t\leftarrow0}^{\mathbf{X}} ( \cdot ) $ denote the solution flow to the RDE \begin{eqnarray*} dU_{t\leftarrow0}^{\mathbf{X}} ( y_{0} ) &=&V \bigl( U_{t\leftarrow 0}^{\mathbf{X}} ( y_{0} ) \bigr) \,d\mathbf {X}_{t}+V_{0} \bigl( U_{t\leftarrow0}^{\mathbf{X}} ( y_{0} ) \bigr) \,dt, \\ U_{0\leftarrow0}^{\mathbf{X}} ( y_{0} ) &=&y_{0}. \end{eqnarray*} Then the map $U_{t\leftarrow0}^{\mathbf{X}} ( \cdot ) $ is differentiable with derivative $J_{t\leftarrow0}^{\mathbf{X}} ( y_{0} ) \in \mathbb{R} ^{e\times e}$;\break $J_{t\leftarrow0}^{\mathbf{X}} ( y_{0} ) $ is invertible as a linear map with inverse denoted by $J_{0\leftarrow t}^{\mathbf{X}} ( y_{0} ) $. Furthermore, if we define \[ M_{\cdot\leftarrow0}^{\mathbf{X}} ( y_{0} ) \equiv \bigl( U_{t\leftarrow0}^{\mathbf{X}} ( y_{0} ),J_{t\leftarrow 0}^{\mathbf{X}} ( y_{0} ),J_{0\leftarrow t}^{\mathbf {X}} ( y_{0} ) \bigr) \in \mathbb{R} ^{e}\oplus \mathbb{R} ^{e\times e}\oplus \mathbb{R} ^{e\times e}, \] and assume $X$ satisfies Condition \ref{standing assumption}, we have that \[ \bigl\Vert M_{\cdot\leftarrow0}^{\mathbf{X}} ( y_{0} ) \bigr\Vert _{p\mbox{-}\mathrm{var}; [ 0,T ] }\in\bigcap_{q\geq 1}L^{q} ( \mu ). \] \end{theorem} \begin{pf} This follows from by repeating the steps of \cite{CLL} generalised to incorporate a drift term. \end{pf} \begin{remark} Under the additional assumption that the covariance $R$ has finite H\"older-controlled $\rho$-variation, it is possible to prove a version of this theorem showing that \[ \bigl\Vert M_{\cdot\leftarrow0}^{\mathbf{X}} ( y_{0} ) \bigr\Vert _{1/p}\in\bigcap_{q\geq1}L^{q} ( \mu ). \] \end{remark} \subsection{Proof that \texorpdfstring{$U_{t\leftarrow0}^{\mathbf{X}(\cdot)}(y_{0})\in\mathbb{D}^{\infty}(\mathbb{R}^{e})$}{$U_{t leftarrow 0}^{X(cdot)}(y_{0})in D^{infty}(R^{e})$}} We have already seen that appropriate assumptions on the covariance lead to the observation that for all $h\in\mathcal{H}$, \[ D_{h}U_{t\leftarrow0}^{\mathbf{X} ( \omega ) } ( y_{0} ) \equiv \frac{d}{d\varepsilon}U_{t\leftarrow0}^{T_{h}\mathbf {X} ( \omega ) } ( y_{0} ) \bigg\vert _{\varepsilon=0} \] for all $\omega$ in a set of $\mu$-full measure. We will show that the Wiener functional $\omega\mapsto U_{t\leftarrow0}^{\mathbf{X} ( \omega ) } ( y_{0} ) $ belongs to the Sobolev space $\mathbb {D}^{\infty } ( \mathbb{R} ^{e} ) $. Recall that \[ \mathbb{D}^{\infty} \bigl( \mathbb{R} ^{e} \bigr):=\bigcap_{p>1} \bigcap_{k=1}^{\infty}\mathbb {D}^{k,p} \bigl( \mathbb{R} ^{e} \bigr), \] where $\mathbb{D}^{k,p}$ is the usual Shigekawa--Sobolev space, which is defined as the completion of the smooth random variables with respect to a Sobolev-type norm (see Nualart \cite{nualart}). There is an equivalent characterisation of the spaces $\mathbb{D}^{k,p}$ (originally due to Kusuoka and Stroock), which is easier to use in the present context. We briefly recall the main features of this characterisation starting with the following definitions. Suppose $E$ is a given Banach space and $F\dvtx \mathcal {W\rightarrow }E$ is a measurable function. Recall (see Sugita \cite{sugita}) that $F$ is called ray absolutely continuous (RAC) if for every $h\in\mathcal {H}$, there exists a measurable map $\tilde{F}_{h}\dvtx \mathcal{W\rightarrow}E$ satisfying \[ F ( \cdot ) =\tilde{F}_{h} ( \cdot ), \qquad \mu \mbox{-a.e.,} \] and for every $\omega\in\mathcal{W}$ \[ t\mapsto\tilde{F}_{h} ( \omega+th ) \qquad\mbox{is absolutely continuous in }t\in \mathbb{R} . \] And furthermore, $F$ is called stochastically G\^ateaux differentiable (SGD) if there exists a measurable $G\dvtx \mathcal{W\rightarrow}L ( \mathcal {H} ,E ) $, such that for any $h\in\mathcal{H}$ \[ \frac{1}{t} \bigl[ F ( \cdot+th ) -F ( \cdot ) \bigr] \stackrel{\mu} { \rightarrow}G ( \omega ) ( h ) \qquad\mbox{as }t\rightarrow0, \] where $\stackrel{\mu}{\rightarrow}$ indicates convergence in $\mu$-measure. If $F$ is SGD, then its derivative $G$ is unique $\mu$-a.s. and we denote it by $\mathcal{D}F$. Higher order derivatives are defined inductively in the obvious way. Hence, $\mathcal{D}^{n}F$ $ ( \omega ) $ (if it exists) is a multi-linear map (in $n$ variables) from $\mathcal{H}$ to $E$. We now define the spaces $\tilde{\mathbb{D}}^{k,p} ( \mathbb{R} ^{e} ) $ for $1<p<\infty$ by \[ \tilde{\mathbb{D}}^{1,p} \bigl( \mathbb{R} ^{e} \bigr):= \bigl\{ F\in L^{p} \bigl( \mathbb{R} ^{e} \bigr) \dvtx F\mbox{ is RAC and SGD, } \mathcal{D}F\in L^{p} \bigl( L \bigl( \mathcal{H}, \mathbb{R} ^{e} \bigr) \bigr) \bigr\}, \] and for $k=2,3,\ldots.$ \[ \tilde{\mathbb{D}}^{k,p} \bigl( \mathbb{R} ^{e} \bigr):= \bigl\{ F\in\tilde{\mathbb{D}}^{k-1,p} \bigl( \mathbb{R} ^{e} \bigr) \dvtx \mathcal{D}F\in\tilde{\mathbb{D}}^{k-1,p} \bigl( L \bigl( \mathcal{H}, \mathbb{R} ^{e} \bigr) \bigr) \bigr\}. \] \begin{theorem}[{(Sugita \cite{sugita})}]\label{sugita}For $1<p<\infty$ and $k\in \mathbb{N,} $ we have $\tilde{\mathbb{D}}^{k,p} ( \mathbb{R} ^{e} ) =\mathbb{D}^{k,p} ( \mathbb{R} ^{e} ) $. \end{theorem} It follows immediately from this result that we have \[ \mathbb{D}^{\infty} \bigl( \mathbb{R} ^{e} \bigr) =\bigcap_{p>1} \bigcap_{k=1}^{\infty}\tilde{\mathbb{D}}^{k,p} \bigl( \mathbb{R} ^{e} \bigr). \] With these preliminaries out the way, we can prove the following. \begin{proposition} \label{sobolev}Suppose $ ( X_{t} ) _{t\in [ 0,T ] }$ is an $ \mathbb{R} ^{d}$-valued, zero-mean Gaussian process with i.i.d. components associated with the abstract Wiener space $ ( \mathcal{W},\mathcal{H},\mu ) $. Assume that for some $p\geq1$, $X$ lifts to a geometric $p$-rough path $\mathbf{X}$. Let $V= ( V_{0},V_{1},\ldots,V_{d} ) $ be a collection of $C^{\infty}$-bounded vector fields on $ \mathbb{R} ^{e}$, and let $U_{t\leftarrow0}^{\mathbf{X} ( \omega ) } ( y_{0} ) $ denote the solution flow of the RDE \[ dY_{t}=V ( Y_{t} ) \,d\mathbf{X}_{t} ( \omega ) +V_{0} ( Y_{t} ) \,dt,\qquad Y ( 0 ) =y_{0}. \] Then, under the assumption that $X$ satisfies Condition \ref{standing assumption}, we have that the Wiener functional \[ U_{t\leftarrow0}^{\mathbf{X} ( \cdot ) } ( y_{0} ) \dvtx \omega\mapsto U_{t\leftarrow0}^{\mathbf{X} ( \omega ) } ( y_{0} ) \] is in $\mathbb{D}^{\infty} ( \mathbb{R} ^{e} ) $ for every $t\in [ 0,T ] $. \end{proposition} \begin{pf} We have already remarked that Condition \ref{standing assumption} implies that on a set of $\mu$-full measure \begin{equation} T_{h}\mathbf{X} ( \omega ) \equiv\mathbf{X} ( \omega +h ) \label{trans} \end{equation} for all $h\in\mathcal{H}$. It easily follows that $U_{t\leftarrow 0}^{\mathbf{X} ( \cdot ) } ( y_{0} ) $ is RAC. Furthermore, its stochastic G\^ateaux derivative is precisely the map $\mathcal{D}U_{t\leftarrow0}^{\mathbf{X} ( \omega ) } ( y_{0} ) $ defined in (\ref{SGD}). The relation~(\ref{trans}) implies that the directional and Malliavin derivatives coincide (on a set of $\mu $-full measure), hence $\mathcal{D}U_{t\leftarrow0}^{\mathbf{X} ( \omega ) } ( y_{0} ) \in L ( \mathcal{H}, \mathbb{R} ^{e} ) $ is the map \[ \mathcal{D}U_{t\leftarrow0}^{\mathbf{X} ( \omega ) } ( y_{0} ) \dvtx h\mapsto D_{h}U_{t\leftarrow0}^{\mathbf{X} ( \omega ) } ( y_{0} ). \] We have shown in (\ref{op norm bound}) that \begin{equation} \bigl\Vert \mathcal{D}U_{t\leftarrow0}^{\mathbf{X} ( \omega ) } ( y_{0} ) \bigr\Vert _{\mathrm{op}}\leq C\bigl\Vert M_{\cdot \leftarrow 0}^{\mathbf{X}} ( y_{0} ) \bigr\Vert _{p\mbox{-}\mathrm{var}; [ 0,T ] }, \label{op norm bound 2} \end{equation} where \begin{equation} M_{\cdot\leftarrow0}^{\mathbf{X}} ( y_{0} ) \equiv \bigl( U_{t\leftarrow0}^{\mathbf{X}} ( y_{0} ),J_{t\leftarrow 0}^{\mathbf{X}} ( y_{0} ),J_{0\leftarrow t}^{\mathbf {X}} ( y_{0} ) \bigr). \label{eq:def-M} \end{equation} It follows from Theorem \ref{CLL} that \[ \bigl\Vert M_{\cdot\leftarrow0}^{\mathbf{X}} ( y_{0} ) \bigr\Vert _{p\mbox{-}\mathrm{var}; [ 0,T ] }\in\bigcap_{p\geq 1}L^{p} ( \mu ). \] Using this together with (\ref{op norm bound 2}) proves that $U_{t\leftarrow 0}^{\mathbf{X} ( \cdot ) } ( y_{0} ) $ is in $\bigcap_{p>1}\tilde{\mathbb{D}}^{1,p} ( \mathbb{R} ^{e} ) $ which equals $\bigcap_{p>1}\mathbb{D}^{1,p} ( \mathbb{R} ^{e} ) $ by Theorem \ref{sugita}. We prove that $U_{t\leftarrow0}^{\mathbf{X} ( \cdot ) } ( y_{0} ) $ is in $\bigcap_{p>1}\tilde{\mathbb{D}}^{k,p} ( \mathbb{R} ^{e} ) $ for all $k\in \mathbb{N} $ by induction. If $U_{t\leftarrow0}^{\mathbf{X} ( \cdot ) } ( y_{0} ) \in\tilde{\mathbb{D}}^{k-1,p} ( \mathbb{R} ^{e} ) $ then, by the uniqueness of the stochastic G\^ateaux derivative, we must have \[ \mathcal{D}^{k-1}U_{t\leftarrow0}^{\mathbf{X} ( \omega ) } ( y_{0} ) ( h_{1},\ldots,h_{k-1} ) =D_{h_{1}}\cdots D_{h_{k} }U_{t\leftarrow0}^{\mathbf{X} ( \omega ) } ( y_{0} ). \] It is then easy to see that $\mathcal{D}^{k-1}U_{t\leftarrow 0}^{\mathbf {X} ( \omega ) } ( y_{0} ) $ is RAC and SGD. Moreover, the stochastic G\^ateaux derivative is \[ \mathcal{D}^{k}U_{t\leftarrow0}^{\mathbf{X} ( \omega ) } ( y_{0} ) \dvtx ( h_{1},\ldots,h_{k} ) =D_{h_{1}}\cdots D_{h_{k} }U_{t\leftarrow0}^{\mathbf{X} ( \omega ) } ( y_{0} ). \] It follows from Proposition \ref{induction} together with Condition \ref{standing assumption} that we can bound the operator norm of $\mathcal{D}^{k}U_{t\leftarrow0}^{\mathbf{X} ( \omega ) } ( y_{0} ) $ in the following way: \[ \bigl\|\mathcal{D}^{k}U_{t\leftarrow0}^{\mathbf{X} ( \omega ) } ( y_{0} ) \bigr\|_{\mathrm{op}}\leq C \bigl( 1+\bigl\Vert M_{\cdot\leftarrow0} ^{\mathbf{X} ( \omega ) } ( y_{0} ) \bigr\Vert _{p\mbox{-}\mathrm{var}; [ 0,T ] } \bigr) ^{ ( k+1 ) p} \] for some nonrandom constants $C>0$. The conclusion that $U_{t\leftarrow 0}^{\mathbf{X} ( \cdot ) } ( y_{0} ) \in\break \bigcap_{p>1}\mathbb{D}^{k,p} ( \mathbb{R} ^{e} ) $ follows at once from Theorems \ref{CLL} and \ref{sugita}. \end{pf} \subsection{Note added in proof} Shortly before the article went to press, the authors were made aware of a mistake in the proof of Proposition \ref{sobolev}: control of the operator norms of $\mathcal{D}^{k}U_{t\leftarrow0}^{\mathbf{X}( \omega) } ( y_{0})$ does not imply Malliavin smoothness; instead control of the stronger Hilbert--Schmidt norms is required. In a more restrictive setting, this stronger control has been obtained in \cite{H3}. At the level of generality considered in this article, this result has recently been obtained in \cite{Ina13}, so that the statement of Proposition \ref{sobolev} does hold and none of our results are affected. \section{Smoothness of the density: The proof of the main theorem} \label{section proof main theorem} \label{sec:proof-main-result} This section is devoted to the proof of our H\"ormander-type Theorem \ref{main theorem}. As mentioned in the \hyperref[sec1]{Introduction}, apart from rather standard considerations concerning probabilistic proofs of H\"ormander's theorem (see, e.g., \cite{H3}), this boils down to the following steps: \begin{longlist}[(1)] \item[(1)] Let $W$ be a smooth and bounded vector field in $\mathbb{R}^{e}$. Following \cite{H3}, denote by $ ( Z_{t}^{W} ) _{t\in [ 0,T ] }$ the process \begin{equation} Z_{t}^{W}=J_{0\leftarrow t}^{\mathbf{X}}W \bigl( U_{t\leftarrow 0}^{\mathbf{X} } ( y_{0} ) \bigr). \label{defn Z} \end{equation} Then assuming Conditions \ref{nondeterm} and \ref{cond dom} we get a bound on $|Z^{W}|_{\infty}$ in terms of the Malliavin matrix $C_{T}$ defined at \eqref{eq:def-malliavin-matrix}. This will be the content of Proposition \ref{CMatrix}. \item[(2)] We invoke iteratively our Norris lemma (Theorem \ref{thm:NlemmaRP}) to processes like $Z^{W}$ in order to generate enough upper bounds on Lie brackets of our driving vector fields at the origin. \end{longlist} In order to perform this second step, we first have to verify the assumptions of Theorem \ref{thm:NlemmaRP} for the process $M_{\cdot\leftarrow 0}^{\mathbf{x}} ( y_{0} ) $ defined by \eqref{eq:def-M}. Namely, we shall see that $M_{\cdot\leftarrow0}^{\mathbf{x}} ( y_{0} ) $ is a process controlled by $\mathbf{X}$ in the sense of Definition \ref{def:controlled-paths} and relation~\eqref{eq:dcp-increment-y-d-dim}. \begin{proposition} \label{CLL mod}Suppose $ ( X_{t} ) _{t\in [ 0,T ] }$ satisfies the condition of Theorem~\ref{CLL}. In particular, $X$ has a lift to $\mathbf{X,}$ a geometric-p rough path for some $p>1$ which is in $C^{0,\gamma}([0,T];G^{\lfloor p\rfloor}(\mathbb{R}^{d}))$ for $\gamma=1/p$. Then $M_{\cdot\leftarrow0}^{\mathbf{x}} ( y_{0} ) $ is a process controlled by $\mathbf{X}$ in the sense of Definition \ref{def:controlled-paths} and \[ \bigl\Vert M_{\cdot\leftarrow0}^{\mathbf{X}} ( y_{0} ) \bigr\Vert _{\mathcal{Q}_{\mathbf{X}}^{\gamma}}\in\bigcap_{p\geq1}L^{p} ( \Omega ). \] \end{proposition} \begin{pf} For notational sake, the process $M_{\cdot\leftarrow0}^{\mathbf {X}} ( y_{0} ) $ will be denoted by $M$ only. It is readily checked that $M$ is solution to a rough differential equation driven by~$\mathbf{X}$, associated to the vector fields given by \begin{equation} \label{eq:coefficients-M}F_{i} ( y,J,K ) = \bigl( V_{i} ( y ), \nabla V_{i} ( y ) \cdot J, -K \cdot\nabla V_{i} ( y ) \bigr), \qquad i=0,\ldots,d. \end{equation} This equation can be solved either by genuine rough paths methods or within the landmark of algebraic integration. As mentioned in Proposition \ref{prop:integral-ctrld-path}, both notions of solution coincide thanks to approximation procedures. This finishes the proof of our claim $M\in \mathcal{Q}_{\mathbf{X}}^{\gamma}$. In order to prove integrability of $M$ as an element of $\mathcal{Q} _{\mathbf{X}}^{\gamma}$, let us write the equation governing the dynamics of $M$ under the form \[ dM_{t}= F_{0}(M_{t}) \,dt + \sum _{i=1}^{d}F_{i}(M_{t}) \,d \mathbf{X}_{t}^{i}, \] where $\mathbf{X}$ is our Gaussian rough path of order at most $N=3$. The expansion of $M$ as a controlled process is simply given by the Euler scheme introduced in \cite{FV}, Proposition 10.3. More specifically, $M$ admits a decomposition \eqref{eq:dcp-increment-y} of the form: \[ M_{s,t}^{j}=M_{s}^{j,0} (t-s) + M_{s}^{j,i_{1}} \mathbf{X}_{s,t}^{1,i_{1}} +M_{s}^{j,i_{1},i_{2}} \mathbf{X}_{s,t}^{2,i_{1},i_{2}}+R_{s,t}^{j,M}, \] with \begin{eqnarray*} M_{s}^{j,0} &=& F_{0}^{j}(M_{s}),\qquad M_{s}^{j,i_{1}}=F_{i_{1}}^{j} (M_{s}), \qquad M_{s}^{j,i_{1},i_{2}}=F_{i_{2}}F_{i_{1}}^{j}(M_{s} ),\\ \bigl|R_{s,t}^{j,M}\bigr|&\leq& c_{M}|t-s|^{3\gamma}. \end{eqnarray*} With the particular form \eqref{eq:coefficients-M} of the coefficient $F$ and our assumptions on the vector fields $V$, it is thus readily checked that \[ \Vert M\Vert_{\mathcal{Q}_{\mathbf{X}}^{\gamma}}\leq c_{V} \bigl( 1+\Vert J \Vert_{\infty}^{2}+\Vert J^{-1}\Vert_{\infty}^{2}+ \Vert J\Vert _{\gamma }+\Vert U\Vert_{\gamma} \bigr), \] and the right-hand side of the latter relation admits moments of all order thanks to Theorem~\ref{CLL} and the remark which follows it. \end{pf} Define $\mathcal{L}_{\mathbf{x}} ( y_{0},\theta,T ) $ to be the quantity \[ \mathcal{L}_{\mathbf{x}} ( y_{0},\theta,T ):=1+L_{\theta } ( x ) ^{-1}+\vert y_{0}\vert +\bigl\Vert M_{\cdot \leftarrow 0}^{\mathbf{x}} ( y_{0} ) \bigr\Vert _{\mathcal{Q}_{\mathbf {x} }^{\gamma}}+\mathcal{N}_{\mathbf{x},\gamma}. \] \begin{corollary} \label{holder roughness integrability}Under the assumptions of Proposition \ref{CLL mod}, we have \[ \mathcal{L}_{\mathbf{x}} ( y_{0},\theta,T ) \in\bigcap _{p\geq 1}L^{p} ( \Omega ). \] \end{corollary} \begin{pf} We recall that the standing assumptions imply that $\mathbf{\|X\|} _{\gamma;[ 0,T ] }$ has a Gaussian tail [see (\ref{gauss}) from Section~\ref{rough paths}]. It is easily deduce from this that \[ \mathcal{N}_{\mathbf{X},\gamma}\in\bigcap_{p\geq1}L^{p} ( \Omega ). \] Similarly, we see from Corollary \ref{l theta integrability} and Proposition \ref{CLL mod} that $L_{\theta} ( x ) ^{-1}$ and $\Vert M_{\cdot\leftarrow0}^{\mathbf{x}} ( y_{0} ) \Vert _{\mathcal{Q}_{\mathbf{x}}^{\gamma}}$ have moments of all orders and the claim follows. \end{pf} \begin{definition} We define the sets of vector fields $\mathcal{V}_{k}$ for $k\in \mathbb{N} $ inductively by \[ \mathcal{V}_{1}= \{ V_{i}\dvtx i=1,\ldots,d \}, \] and then \[ \mathcal{V}_{n+1}= \bigl\{ [ V_{i},W ] \dvtx i=0,1,\ldots,d,W \in\mathcal{V}_{n} \bigr\}. \] \end{definition} \begin{proposition} \label{CMatrix} Let $ ( X_{t} ) _{t\in [ 0,T ] }= ( X_{t}^{1},\ldots,X_{t}^{d} ) _{t\in [ 0,T ] }$ be a continuous Gaussian process, with i.i.d. components associated to the abstract Wiener space $ ( \mathcal{W},\mathcal{H},\mu ) $. Assume that $X$ satisfies the assumptions of Theorem \ref{main theorem}. Then there exist real numbers $p$ and $\theta$ satisfying $2/p>$ $\theta>\alpha/2$ such that: \textup{(i)} $X$ is $\theta$-H\"{o}lder rough and \textup{(ii)} $X$ has a natural lift to a geometric $p$ rough path $\mathbf{X}$ in $C^{0,1/p}([0,T];G^{\lfloor p\rfloor }(\mathbb{R}^{d}))$. For $t\in(0,T]$, let \[ C_{t}=\sum_{i=1}^{d}\int _{ [ 0,t ] ^{2}}J_{t\leftarrow s}^{\mathbf{X}} ( y_{0} ) V_{i} ( Y_{s} ) \otimes J_{t\leftarrow s^{\prime}}^{\mathbf{X}} ( y_{0} ) V_{i} ( Y_{s^{\prime}} ) \,dR \bigl( s,s^{\prime} \bigr), \] and suppose $k\in \mathbb{N} \cup \{ 0 \} $. Then there exist constants $\mu=\mu ( k ) >0$ and $C=C ( t,k ) >0$ such that for all $W\in \mathcal{V}_{k}$ and all $v\in \mathbb{R} ^{e}$ with $\vert v\vert =1$, we have \begin{equation} \bigl\vert \bigl\langle v,Z_{\cdot}^{W} \bigr\rangle\bigr \vert _{\infty; [ 0,t ] }\leq C\mathcal{L}_{\mathbf{X}} ( y_{0} ,\theta,t ) ^{\mu} \bigl( v^{T}C_{t}v \bigr) ^{\mu}. \label {conc} \end{equation} \end{proposition} \begin{pf} Let us prove the first assertion. To do this, we note that the constraint on $\rho$ implies that $X$ lifts to a geometric $p$-rough path for any $p>2\rho$. Because the $\rho$-variation is assumed to be H\"{o}lder-controlled, it follows from \cite{FV} that $\mathbf{X}$ is in $C^{0,1/p}([0,T];G^{\lfloor p\rfloor}(\mathbb{R}^{d}))$. By assumption $\alpha<2/\rho$, therefore we may always choose $p$ close enough to $2\rho$ in order that \[ \frac{2}{p}>\frac{\alpha}{2}. \] On the other hand, $X$ is $\theta$-H\"{o}lder rough for any $\theta >\alpha/2$ by Corollary \ref{l theta integrability}. Hence, there always exist $p$ and $\theta$ with the stated properties. We have that \begin{equation} v^{T}C_{t}v=\sum_{i=1}^{d} \Lambda_{t}^{i} \qquad\mbox{with } \Lambda_{t}^{i} \equiv\int_{ [ 0,t ] ^{2}}f^{i} ( s ) f^{i} \bigl( s^{\prime} \bigr) \,dR \bigl( s,s^{\prime} \bigr), \label{covariance matrix} \end{equation} where we have set $f^{i} ( s ):= \langle v,J_{t\leftarrow s}^{\mathbf{X}} ( y_{0} ) V_{i} ( y_{s} ) \rangle = \langle v,Z_{s}^{V_{i}} \rangle$. Furthermore, because the hypotheses of Theorem \ref{interpolation} and Corollary \ref{interpolation2} are satisfied, we can deduce that \begin{equation} \bigl\vert f^{i}\bigr\vert _{\infty; [ 0,t ] }\leq2\max \biggl[ \frac{\vert \Lambda_{t}^{i} \vert ^{1/2}}{E [ X_{t}^{2} ] ^{1/2}},\frac{1}{\sqrt{c}}\bigl\vert \Lambda_{t}^{i} \bigr\vert ^{\gamma / ( 2\gamma+\alpha ) }\bigl\vert f^{i}\bigr\vert _{\gamma; [ 0,t ] }^{\alpha/ ( 2\gamma+\alpha ) } \biggr] \label{in} \end{equation} for $i=1,\ldots,d$. On the other hand, Young's inequality for 2D integrals (see \cite{FV07}) gives \begin{equation} \bigl\vert \Lambda_{t}^{i} \bigr\vert \lesssim \bigl[ \bigl\vert f^{i}\bigr\vert _{\gamma; [ 0,t ] }+\bigl\vert f^{i} ( 0 ) \bigr\vert \bigr] ^{2}V_{\rho} \bigl( R; [ 0,t ] ^{2} \bigr). \label{2D Young} \end{equation} From (\ref{2D Young}), (\ref{in}) and the relation $v^{T}C_{t}v=\sum_{i=1}^{d} \Lambda_{t}^{i}$, it follows that there exists some $C_{1}>0$, depending on $t$ and $c$, such that we have \[ \bigl\vert f^{i}\bigr\vert _{\infty; [ 0,t ] }\leq C_{1} \bigl( v^{T}C_{t}v \bigr) ^{\gamma/ ( 2\gamma+\alpha ) }\max _{i=1,\ldots,d} \bigl[ \bigl\vert f^{i} ( 0 ) \bigr\vert +\bigl\vert f^{i}\bigr\vert _{\gamma; [ 0,t ] } \bigr] ^{\alpha/ ( 2\gamma+\alpha ) }. \] Using the fact that for some $\nu>0$, \[ \bigl\vert f^{i} ( 0 ) \bigr\vert +\bigl\vert f^{i} \bigr\vert _{\gamma; [ 0,t ] }\leq C_{2}\mathcal{L}_{\mathbf {X}} ( y_{0},\theta,t ) ^{\nu}\qquad\mbox{for }i=1,\ldots,d, \] it is easy to deduce that (\ref{conc}) holds whenever $W\in\mathcal{V}_{1}$. The proof of (\ref{conc}) for arbitrary $k\in \mathbb{N} $ now follows by induction. The key relation comes from observing that \[ \bigl\langle v,Z_{u}^{W} \bigr\rangle= \bigl\langle v,W ( y_{0} ) \bigr\rangle+\sum_{i=1}^{d} \int_{0}^{u} \bigl\langle v,Z_{r}^{ [ V_{i},W ] } \bigr\rangle \,dX_{r}^{i}, \] in the sense of Proposition \ref{prop:integral-ctrld-path}. Hence, assuming the induction hypothesis, we can use Theorem \ref{thm:NlemmaRP} to obtain a bound of the form (\ref{conc}) on \[ \bigl\vert \bigl\langle v,Z_{\cdot}^{ [ V_{i},W ] } \bigr\rangle \bigr \vert _{\infty; [ 0,t ] } \] for all $W\in\mathcal{V}_{k}$. Since $\mathcal{V}_{k+1}= \{ [ V_{i},W ] \dvtx i=0,1,\ldots,d,W\in\mathcal{V}_{n} \} $, the result is then established.\vadjust{\goodbreak} \end{pf} We are now in a position to prove our main theorem. Since the structure of the argument is the classical one, we will minimise the amount of detail where possible. \begin{pf*}{Proof of Theorem \ref{main theorem}} This involves assembling together the pieces we have developed in the paper. First, let $2/p>$ $\theta >\alpha /2$ be chosen such that $X$ is $\theta$-H\"{o}lder rough and $X$ has a natural lift to a geometric $p$ rough path $\mathbf{X}$ in $C^{0,1/p}([0,1];G^{\lfloor p\rfloor}(\mathbb{R}^{d}))$. This is always possible by the first part of Proposition~\ref{CMatrix}. Let $0<t\leq T$ and note that we have shown in Proposition \ref{sobolev} that $U_{t\leftarrow0}^{\mathbf{X} ( \omega ) } ( y_{0} ) $ is in $\mathbb{D}^{\infty } ( \mathbb{R} ^{e} ) $. The result will therefore follow by showing that for every $q>0$, there exists $c_{1}=c_{1} ( q ) $ such that \[ P \Bigl( \inf_{\vert v\vert =1} \langle v,C_{t}v \rangle < \varepsilon \Bigr) \leq c_{1}\varepsilon^{q} \] for all $\varepsilon\in ( 0,1 ) $. It is classical that proving $ ( \det C_{t} ) ^{-1}$ has finite moments of all order is sufficient for $U_{t\leftarrow0}^{\mathbf{X} ( \omega ) } ( y_{0} ) $ to have a smooth density (see, e.g., \cite{nualart}). \textit{Step} 1: From H\"{o}rmander's condition, there exists $N\in \mathbb{N} $ with the property that \[ \operatorname{span} \Biggl\{ W ( y_{0} ) \dvtx W\in\bigcup_{i=1}^{N} \mathcal{V} _{i} \Biggr\} = \mathbb{R} ^{e}. \] Consequently, we can deduce that \begin{equation} a:=\inf_{\vert v\vert =1}\sum_{W\in\bigcup _{i=1}^{N}\mathcal {V}_{i} }\bigl \vert \bigl\langle v,W ( y_{0} ) \bigr\rangle \bigr\vert >0. \label{lb} \end{equation} For every $W\in\bigcup_{i=1}^{N}\mathcal{V}_{i}$, we have \begin{equation} \bigl\vert \bigl\langle v,Z_{\cdot}^{W} \bigr\rangle\bigr \vert _{\infty; [ 0,t ] }\geq\bigl\vert \bigl\langle v,W ( y_{0} ) \bigr\rangle\bigr\vert, \label{lb2} \end{equation} and hence using (\ref{lb}), (\ref{lb2}) and Proposition \ref{CMatrix} we end up with \begin{equation} a\leq\inf_{\vert v\vert =1}\sup_{W\in\bigcup _{i=1}^{N}\mathcal {V}_{i} }\bigl\vert \bigl\langle v,Z_{\cdot}^{W} \bigr\rangle\bigr\vert _{\infty; [ 0,t ] }\leq c_{1}\mathcal{L}_{\mathbf {X}} ( y_{0},\theta,t ) ^{\mu}\inf_{\vert v\vert =1}\bigl \vert v^{T}C_{t}v\bigr\vert ^{\pi} \label{key bound} \end{equation} for some positive constants $c_{1}$, $\mu=\mu_{N}$ and $\pi=\pi_{N}$. \textit{Step} 2: From (\ref{key bound}) can deduce that for any $\varepsilon\in ( 0,1 ) $ \[ P \Bigl( \inf_{\vert v\vert =1}\bigl\vert v^{T}C_{t}v \bigr\vert <\varepsilon \Bigr) \leq P \bigl( \mathcal{L}_{\mathbf{X}} ( y_{0} ,\theta,t ) ^{\mu}\mathcal{>}c_{2} \varepsilon^{-k} \bigr) \] for some constants $c_{2}>0$ and $k>0$ which do not depend on $\varepsilon $. It follows from Corollary \ref{holder roughness integrability} that for every $q>0$ we have \[ P \Bigl( \inf_{\vert v\vert =1}\bigl\vert v^{T}C_{t}v \bigr\vert <\varepsilon \Bigr) \leq c_{3}\varepsilon^{kq}, \] where $c_{3}=c_{3} ( q ) >0$ does not depend on $\varepsilon$. \end{pf*} \begin{appendix}\label{app} \section*{Appendix} \begin{pf*}{Proof of Lemma \ref{technical}} We prove the result for $S=T$, the modifications for $S<T$ are straightforward. Consider three nested sequences $(A_{m})_{m=1}^{\infty}$, $ ( B_{m} ) _{m=1}^{\infty}$ and $ ( C_{m} ) _{m=1}^{\infty}$ consisting of partitions of $ [ 0,s ] $, $ [ s,t ] $ and $ [ t,T ]$, respectively, and suppose that the mesh of each sequence tends to zero as $m$ tends to infinity. For each $m_{1} $ and $m_{2}$ in $ \mathbb{N} $ let $D^{m_{1},m_{2}}$ denote the partition of $ [ 0,T ] $ defined by \[ D^{m_{1},m_{2}}=A_{m_{1}}\cup B_{m_{2}}\cup C_{m_{1}}. \] We now construct an increasing function $r\dvtx \mathbb{N} \rightarrow \mathbb{N} $ such that \[ ( D_{m} ) _{m=1}^{\infty}= \bigl( D^{r ( m ) ,m} \bigr) _{m=1}^{\infty} \] together form a nested sequence of partitions of $ [ 0,T ] $ having the needed properties. We do this inductively. First, let $m=1$, then for every two consecutive points $u<v$ in the partition $B_{m}$ Lemma \ref{continuity} implies that \[ \operatorname{Cov} \bigl( Z_{s,t}Z_{u,v}| \mathcal{F}^{A_{n}}\vee \mathcal{F}^{C_{n}} \bigr) \rightarrow \operatorname{Cov} ( Z_{s,t}Z_{u,v}|\mathcal{F}_{s,t} \vee\mathcal{F}_{t,T} ) \] as $n\rightarrow\infty$. $Z$ has positive conditional covariance, therefore the right-hand side of the last expression is positive. This means we can choose $r ( 1 ) $ to ensure that \setcounter{equation}{0} \begin{equation} \operatorname{Cov} \bigl( Z_{s,t}Z_{u,v}| \mathcal{F}^{A_{r ( 1 ) }}\vee\mathcal{F}^{C_{r ( 1 ) }} \bigr) \geq0 \label {cov part} \end{equation} for every two consecutive points $u$ and $v$ in $B_{m}$ [the total number of such pairs does not depend on $r(1)$]. We then let $D_{1}=D^{r ( 1 ),1}$, both properties 2 and 3 in the statement are easy to check; the latter follows from (\ref{cov part}), when we interpret~the Schur complement as the covariance matrix of $Z_{2}^{1}$ conditional on $Z_{1}^{1}$ (see also the proof of Proposition \ref{comparison}). Having specified $r ( 1 ) <\cdots<r ( k-1 )$, we need only repeat the treatment outlined above by choosing some natural number $r ( k ) >r ( k-1 ) $ to ensure that \[ \operatorname{Cov} \bigl( Z_{s,t}Z_{u,v}| \mathcal{F}^{A_{r ( k ) }}\vee\mathcal{F}^{C_{r ( k ) }} \bigr) \geq0 \] for each pair of consecutive points $u<v$ in $B_{k}$. It is easy to verify that $ ( D_{m} ) _{m=1}^{\infty}$ constructed in this way has the properties we need. \end{pf*} \end{appendix} \printaddresses \end{document}
\begin{document} \title{Big polynomial rings and Stillman's Conjecture} \section{Introduction} The purpose of this paper is to prove that certain limits of polynomial rings are themselves polynomial rings, and show how this observation can be used to deduce some interesting results in commutative algebra. In particular, we give two new proofs of Stillman's conjecture \cite[Problem 3.14]{stillman}. The first is similar to that of Ananyan--Hochster \cite{ananyan-hochster}, though more streamlined; in particular, it establishes the existence of small subalgebras. The second proof is completely different, and relies on a recent noetherianity result of Draisma \cite{draisma}. \subsection{Polynomiality results} For a commutative ring $A$, let $A \invl x_1,x_2,\dots\invr$ be the inverse limit of the standard-graded polynomial rings $A[x_1,\ldots,x_n]$ in the category of graded rings. A degree $d$ element of this ring is a (possibly infinite) formal $A$-linear combination of degree $d$ monomials in the variables $\{x_i\}_{i \ge 1}$. Fix a field $\bk$, and let $\bR=\bk\invl x_1,x_2,\dots\invr$. Our first polynomiality theorem is: \begin{theorem} \label{introthm1} Assume $\bk$ is perfect. Then $\bR$ is (isomorphic to) a polynomial ring. \end{theorem} The set of variables in the polynomial ring is uncountable; hence the phrase ``big polynomial rings'' in the title of the paper. We deduce Theorem~\ref{introthm1} from the following general criterion. For a graded ring\footnote{In this paper, all graded rings are supported in non-negative degrees.} $R$, we write $R_+$ for the ideal of positive degree elements, and we write $R_+^2$ for the square of this ideal. \begin{theorem} \label{polycrit} Let $R$ be a graded ring with $R_0=\bk$ a perfect field. Assume: \begin{itemize} \item Characteristic zero: $R$ has enough derivations (Definition~\ref{defn:enough1}), that is, for every non-zero $x \in R_+$ there is a derivation $\partial$ of negative degree such that $\partial(x) \ne 0$. \item Positive characteristic: $R$ has enough Hasse derivations (see Definition~\ref{defn:enough2}). \end{itemize} Then $R$ is isomorphic to a polynomial ring. Precisely, for any set of positive degree homogeneous elements $\{f_i\}_{i \in I}$ whose images in $R_+/R_+^2$ form a $\bk$-basis, the $\bk$-algebra homomorphism $\bk[X_i]_{i \in I} \to R$ taking $X_i$ to $f_i$ is an isomorphism. \end{theorem} The proof of Theorem~\ref{polycrit} is elementary. Surjectivity follows immediately by induction on degree: any homogeneous element in $R_+^2$ is, by definition, generated by elements of lower degree. For injectivity, if one had an algebraic relation among some of the $f_i$, then one could apply an appropriate (Hasse) derivation to get a lower degree relation, and eventually reach a contradiction. To then obtain Theorem~\ref{introthm1} from Theorem~\ref{polycrit}, we simply observe that (Hasse) derivatives with respect to the variables $x_i$ extend continuously to $\bR$ and furnish it with enough (Hasse) derivations. The inverse limit $\bR$ is one way to make sense of a limit of finite polynomial rings. A different way is through the use of ultrapowers, or, more generally, ultraproducts (see \S \ref{subsec:ultra} for background). Let $\bS$ be the graded ultrapower of the standard-graded polynomial ring $\bk[x_1,x_2,\ldots]$. We also prove: \begin{theorem} \label{introthm2} Assume $\bk$ is perfect. Then $\bS$ is a polynomial ring. \end{theorem} This also follows quickly from Theorem~\ref{polycrit}. The perfectness hypotheses in this section can be relaxed: for instance, Theorems~\ref{introthm1} and~\ref{introthm2} hold if $[\bk:\bk^p]$ is finite, see Remarks~\ref{rmk:relax} and~\ref{rmk:relax2}. In fact, some time after completing this paper, we succeeded in removing these hypotheses entirely; see \cite{imperfect}. \subsection{Connection to the work of Ananyan--Hochster} We recall (and slightly extend) the notion of strength from \cite{ananyan-hochster}: \begin{definition} Let $R$ be a graded ring with $R_0=\bk$ a field, and let $f$ be a homogeneous element of $R$. The {\bf strength} of $f$ is the minimal integer $k \ge -1$ for which there is a decomposition $f=\sum_{i=1}^{k+1} g_i h_i$ with $g_i$ and $h_i$ homogeneous elements of $R$ of positive degree, or $\infty$ if no such decomposition exists. The {\bf collective strength} of a set of homogeneous elements $\{f_i\}_{i \in I}$ of $R$ is the minimal strength of a non-trivial homogeneous $\bk$-linear combination. \end{definition} \begin{example} \label{ex:strength} \hangindent\leftmargini \textup{(a)}\hskip\labelsep In $\bk[x_1, \ldots, x_n]$, non-zero elements of degree $1$ have infinite strength, while non-zero elements of degree $>1$ have strength $<n$. \begin{enumerate}[topsep=0pt] \setcounter{enumi}{1} \item In $\bR$, there are a wealth of interesting elements of infinite strength, such as $\sum_{i \ge 1} x_i^d$ (if $d$ is invertible in $\bk$).\footnote{To see this, let $F=\sum_i x_i^d$. If $F$ decomposes as $F=f_0g_0+ \cdots +f_sg_s$ then the singular locus of $V(F)$ would contain $V(f_0,\dots,f_s,g_0,\dots,g_s)$ and hence would have codimension at most $2s+2$. However, if $d$ is invertible, then the singular locus of $V(F)$ has infinite codimension.} \item In any graded ring $R$, the ideal $R_+^2$ is exactly the ideal of finite strength elements. \qedhere \end{enumerate} \end{example} Many of the results of Ananyan--Hochster are instances of the following general principle: elements in a polynomial ring of sufficiently large collective strength behave approximately like independent variables. Theorem~\ref{introthm1} shows that this approximation becomes exact in the limiting ring $\bR$. Indeed, suppose $\{f_i\}_{i \in I}$ are elements of $\bR_+$ that form a basis modulo $\bR_+^2$. Thus no linear combination of the $f_i$ belongs to $\bR_+^2$, i.e., has finite strength (Example~\ref{ex:strength}(c)), and so $\{f_i\}$ has infinite collective strength. The Ananyan--Hochster principle thus suggests that the $\{f_i\}$ should be independent variables, and this is exactly the content of Theorem~\ref{introthm1}. \subsection{Stillman's conjecture via ultraproducts} \label{ss:stillmanS} While ultraproducts may be less familiar to some readers than inverse limits, Theorem~\ref{introthm2} leads to our most efficient proof of Stillman's conjecture. As in \cite{ananyan-hochster} (see \S \ref{ss:stillman}), both the existence of small subalgebras and Stillman's conjecture can be reduced to the following statement: \begin{theorem}\label{thm:reg high strength} Fix integers $d_1, \ldots, d_r$. Then there exists an integer $N$ with the following property. If $\bk$ is a perfect field, and $f_1, \ldots, f_r\in \bk[x_1,\dots,x_n]$ are polynomials of degrees $d_1, \ldots, d_r$ with collective strength at least $N$, then $f_1, \ldots, f_r$ is a regular sequence. \end{theorem} Ananyan--Hochster prove this theorem via a multi-tiered induction, where elements of increasingly high strength obtain an array of increasingly nice properties. Our proof using Theorem~\ref{introthm2} is more direct. Here is the idea. Suppose that $f_{1,i}, \ldots, f_{r,i} \in \bk[x_1,x_2,\dots]$, for $i \in \bN$, are polynomials of the given degrees with collective strength tending to infinity. It suffices to show that $f_{1,i}, \ldots, f_{r,i}$ eventually form a regular sequence. For each $j$, the sequence $f_{j,\bullet}$ defines an element $f_j$ in the ultraproduct ring $\bS$. It is easy to see that $f_1, \ldots, f_r$ have infinite collective strength (Proposition~\ref{prop:str}). Thus, by Theorem~\ref{introthm2}, $f_1, \ldots, f_r$ are independent variables in $\bS$, and hence form a regular sequence. We then apply a result (Corollary~\ref{cor:reg seq ultra}) comparing codimension in $\bS$ to codimension in $\bk[x_1,x_2,\ldots]$ to conclude that $f_{1,i}, \ldots, f_{r,i}$ is eventually a regular sequence. As in \cite{ananyan-hochster}, we show that the bound in Theorem~\ref{thm:reg high strength} (and Stillman's conjecture as well) is independent of the field $\bk$. To do so, we prove a generalization of Theorem~\ref{introthm2} (see \S\ref{ss:ultramain}) where $\bS$ is replaced with an ultraproduct of polynomial rings with variable coefficient fields. \subsection{Stillman's conjecture via inverse limits} \label{ss:stillmanR} Returning to the inverse limit, Theorem~\ref{introthm1} enables a proof of Stillman's conjecture that follows the very general rubric in algebraic geometry of proving a result generically, spreading out to an open set, and then inductively treating proper closed subsets. The basic idea in characteristic zero is as follows. Suppose that $A$ is a characteristic~0 domain with fraction field $K$, and $M$ is a finitely presented $A \invl x_1,x_2,\dots\invr$-module. Then $K \otimes_A M$ is a finitely presented module over the ring $K \otimes_A A\invl x_1,x_2,\dots\invr$. While $K\otimes_A A\invl x_1,x_2,\dots\invr$ is generally not isomorphic to $K\invl x_1,x_2,\dots\invr$, Theorem~\ref{polycrit} shows it is also an abstract polynomial ring. It then follows from simple homological properties of infinite polynomial rings that $K \otimes_A M$ has a finite length resolution by finite free modules. A flatness argument produces an open dense subset $U$ of $\Spec(A)$ such that $M_y$ has the same Betti table as $K \otimes_A M$ for all $y \in U$. We can then restrict our attention to $\Spec(A) \setminus U$, and apply the same argument. This shows that there is some (perhaps infinite) stratification of $\Spec(A)$ such that on each stratum the fibers of $M$ have the same Betti table. We apply this as follows. Fix positive integers $d_1, \ldots, d_r$, and let $A$ be the symmetric algebra on the vector space $\Sym^{d_1}(\bk^{\infty}) \oplus \cdots \oplus \Sym^{d_r}(\bk^{\infty})$. Then $\Spec(A)$ is the space of forms $f_1, \ldots, f_r \in \bk\invl x_1,x_2,\dots\invr$ of degrees $d_1, \ldots, d_r$. We let $M$ be the universal module $A\invl x_1,x_2,\dots\invr/(f_1,\ldots,f_r)$. The stratification constructed in the previous paragraph can be made compatible with the $\GL_{\infty}$ action on $\Spec(A)$. A recent theorem of Draisma \cite{draisma} asserts that $\Spec(A)$ is $\GL_{\infty}$-noetherian, and hence this stratification is finite. We conclude that there are only finitely many resolution types for ideals generated by $f_1, \ldots, f_r$ of the given degrees. This, in particular, implies Stillman's conjecture in characteristic zero. The same idea works in positive characteristic, but when $K$ fails to be perfect, we need to bootstrap from the perfect case to produce the open subset with constant Betti numbers. \subsection{Connections to other work} The Milnor--Moore theorem \cite{MM}, and generalizations \cite{sjodin}, establish that certain commutative graded rings are polynomial rings via properties of a comultiplication. While this, and its extensions to non-commutative rings, can be applied to examples in commutative algebra, it is of a fairly distinct nature from the criteria in the present paper. Theorem~\ref{introthm1} is an example of the meta-principle that inverse limits of free objects tend to be free themselves. See \cite[\S I.4.2, Corollary 4]{serre} for an example of this principle with pro-$p$-groups. Alexandru Chirvasitu informed us that he can prove a non-commutative version of Theorem~\ref{introthm1} where polynomial rings are replaced by non-commutative polynomial rings. The use of ultraproducts in commutative algebra was famously employed in \cite{van-den-Dries-schmidt} to establish a variety of bounds (with the number of variables fixed). See \cite{schoutens} for more discussion and examples. The Gr\"obner theory of the inverse limit ring $\bk\invl x_1,x_2,\dots\invr$ was studied by Snellman in~\cite{snellman,snellman-article}. Shortly after a draft of this article was posted, ~\cite{draisma-lason-leykin} applied Theorem~\ref{introthm1} to obtain finiteness results for grevlex Gr\"obner bases over $\bR$, and then used this to answer some questions raised by Snellman and to give a generic initial ideal proof of Stillman's Conjecture. The use of $\GL_\infty$-noetherianity of spaces to prove the existence of uniform bounds in algebraic geometry has been used in several papers. See \cite{draisma-survey} for a survey. \subsection{Outline} In \S \ref{s:poly}, we establish our polynomiality criteria (summarized in Theorem~\ref{polycrit}). In \S \ref{s:codim}, we prove some easy results concerning dimension theory in polynomial rings with an infinite number of variables. In \S \ref{s:ultra}, we prove that the ultraproduct ring is a polynomial ring (Theorem~\ref{introthm2}), and use this to deduce our first proof of Stillman's conjecture. Finally, in \S \ref{s:limit}, we prove that the inverse limit ring is a polynomial ring (Theorem~\ref{introthm1}), and use this to deduce our second proof of Stillman's conjecture. \section{Criteria for polynomiality} \label{s:poly} Let $R$ be a graded ring with $R_0=\bk$ a field. We say that $R$ {\bf is a polynomial ring} if there are elements $\{x_i\}_{i \in I}$ of $R$, each homogeneous of positive degree, such that the natural map $\bk[X_i]\to R$ sending $X_i$ to $x_i$ is an isomorphism. The $x_i$'s need not have degree~1, and the set $I$ need not be finite. The purpose of this section is to characterize polynomial rings via derivations. \subsection{Characteristic~0} We first treat the case where $\bk$ has characteristic~0, for which the following definition and theorem constitute our criterion for polynomiality. We say that a derivation $\partial$ of a graded ring $R$ is {\bf homogeneous of degree $d$} if $\deg \partial(x)=\deg(x)+d$ for all homogeneous $x \in R$. \begin{definition} \label{defn:enough1} Let $R$ be a graded ring with $R_0=\bk$ a field. We say that $R$ {\bf has enough derivations} if for every non-zero homogeneous element $x$ of positive degree there is a homogeneous derivation $\partial$ of negative degree such that $\partial(x) \ne 0$. \end{definition} \begin{theorem} \label{thm:poly} Let $R$ be a graded $\bk$-algebra with $R_0=\bk$ a field of characteristic~$0$. Then $R$ is a polynomial ring if and only if $R$ has enough derivations. \end{theorem} \begin{proof} In this proof, ``derivation'' will mean ``homogeneous derivation of negative degree.'' It is clear that a polynomial ring has enough derivations. We prove the converse. Let $\cE$ be a set of homogeneous elements of $R_+$ that gives a basis of $R_+/R_+^2$. By graded Nakayama's lemma, $\cE$ generates $R$ as a $\bk$-algebra, so it suffices to show that $\cE$ is algebraically independent. Let $\cE_{\le d}$ (resp.\ $\cE_d$) be the set of elements in $\cE$ of degree $\le d$ (resp.\ $d$). We prove that $\cE_{\le d}$ is algebraically independent for all $d$ by induction on $d$. Suppose that we have shown $\cE_{\le d-1}$ is algebraically independent. To prove that $\cE_{\le d}$ is algebraically independent, it suffices to prove the following statement: if $\cE_{\le d-1} \subset E \subset \cE_{\le d}$ is algebraically independent and $x \in \cE_d \setminus E$, then $E'=E \cup \{x\}$ is algebraically independent. Indeed, this statement implies that all sets of the form $\cE_{\le d-1} \cup E''$ with $E''$ a finite subset of $\cE_d$ are algebraically independent, which implies that $\cE_{\le d}$ is algebraically independent. Thus let $E$, $E'$, and $x$ as above be given. Let $A \subset R$ be the $\bk$-subalgebra generated by $E$. To prove that $E'$ is algebraically independent, it suffices to show that if $0=\sum_{i=0}^n a_i x^i$ with $a_i \in A$ then $a_i=0$ for all $i$. Before proceeding, we note that if $\partial$ is any derivation of $R$ then $\partial(\cE_{\le d}) \subset A$ since $\partial$ decreases degrees, and so $\partial(A) \subset A$ and $\partial(x) \in A$. Suppose that $0=\sum_{i=0}^n a_i x^i$ with $a_i \in A$ and $a_n \ne 0$. Of all such relations, choose a homogeneous one of minimal degree (i.e., with $\deg(a_nx^n)$ minimal). Suppose that $a_n$ has positive degree. By assumption, there exists a derivation $\partial$ such that $\partial(a_n) \ne 0$. Applying $\partial$ to our given relation yields $0=\partial(a_n) x^n + \sum_{i=0}^{n-1} b_i x^i$ where the $b_i$ are elements of $A$. This is a contradiction, since $\partial(a_n)$ has smaller degree than $a_n$. Thus $\deg(a_n)=0$, and so we may assume $a_n=1$. Since $\cE$ is linearly independent modulo $R_+^2$, we see that $x\notin A$, and so $n \ge 2$ and $nx+a_{n-1}$ is non-zero. It follows that there exists a derivation $\partial$ such that $\partial(nx+a_{n-1}) \ne 0$. Applying $\partial$ to our original relation gives $0=\partial(nx+a_{n-1}) x^{n-1} + \sum_{i=0}^{n-2} b_i x^i$ for some $b_i \in A$. This is a smaller degree relation, which is a contradiction. We thus see that no relation $0=\sum_{i=0}^n a_i x^i$ exists with $a_n$ non-zero, which completes the proof. \end{proof} \subsection{Positive characteristic} Theorem~\ref{thm:poly} obviously fails in characteristic $p$: since $p$th powers are killed by every derivation, no reduced ring has enough derivations. The most obvious adjustment would be to ask that if $x$ is a homogeneous element of $R$ that is not a $p$th power then there is a derivation $\partial$ such that $\partial(x) \ne 0$. The following two examples show that this condition is insufficient to conclude that $R$ is a polynomial ring. \begin{example} Let $R=\bk[x]/(x^p)$ where $\bk$ is perfect of characteristic~$p$ and $x$ has degree~1. Then $\frac{d}{dx}$ is a well-defined derivation on $R$, and thus $R$ has enough derivations. \end{example} \begin{example} Let $R=\bk[x,y,\tfrac{y}{x^p}]$ where $\bk$ is perfect of characteristic $p$, $x$ has degree~1, and $y$ has degree $p+1$. Then $\frac{\partial}{\partial x}$ and $x^p \frac{\partial}{\partial y}$ are well-defined derivations on $R$, and every homogeneous element of $R$ that is not a $p$th power is not annihilated by one of them. \end{example} To extend our criterion to the positive characteristic case, we employ the following extension of the notion of a derivation (see \cite[pp.\ 27--29]{goldschmidt} for additional discussion). \begin{definition} Let $R$ be a $\bk$-algebra. A {\bf Hasse derivation} on $R$ is a sequence $\partial^{\bullet}=(\partial^n)_{n \ge 0}$ where each $\partial^n$ is a $\bk$-linear endomorphism of $R$ such that $\partial^0$ is the identity and \begin{displaymath} \partial^n(xy)=\sum_{i+j=n} \partial^i(x) \partial^j(y) \end{displaymath} holds for all $x,y \in R$. If $R$ is graded then we say $\partial^{\bullet}$ is {\bf homogeneous of degree $d$} if $\partial^n(x)$ has degree $\deg(x)+nd$ for all homogeneous $x \in R$. \end{definition} \begin{remark} Giving a Hasse derivation on $R$ is equivalent to giving a ring homomorphism $\varphi \colon R \to R \lbb t \rbb$ such that the constant term of $\varphi(x)$ is $x$. If $\partial^{\bullet}$ is a Hasse derivation, then the associated ring homomorphism is defined by $\varphi(x) = \sum_{i \ge 0} \partial^i(x) t^i$. \end{remark} \begin{example} \label{ex:hasse} Suppose $R=\bk[x]$, with $\bk$ any field. Define $\partial^n(x^k) = \binom{k}{n} x^{k-n}$. (Note that $\partial^n = \frac{1}{n!} \frac{d^n}{dx^n}$ if $n!$ is invertible in $\bk$.) Then $\partial^{\bullet}$ is a Hasse derivation, called the {\bf Hasse derivative}. If $R$ is graded with $x$ of degree $d$ then $\partial^{\bullet}$ is homogeneous of degree $-d$. The homomorphism $\varphi \colon R \to R\lbb t \rbb$ associated to the Hasse derivative is given by $x \mapsto x+t$. \end{example} \begin{remark}\label{rmk:draisma hasse} Curiously, Hasse derivatives also play a key role in Draisma's~\cite{draisma}, where they are closely related to his directional derivatives. \end{remark} \begin{lemma} \label{lem:der-powers} Let $R$ be a $\bk$-algebra, where $\bk$ is a field of characteristic~$p$, and let $\partial^{\bullet}$ be a Hasse derivation on $R$. Let $q$ be a power of $p$. Then for $x \in R$ and $n \in \bN$ we have \begin{displaymath} \partial^n(x^q) = \begin{cases} (\partial^{n/q}{x})^q & \text{if $q \mid n$} \\ 0 & \text{if $q \nmid n$} \end{cases}. \end{displaymath} \end{lemma} \begin{proof} We have \begin{displaymath} \partial^n(x^q) = \sum_{\substack{(i_1,\dots,i_q)\\i_1+\cdots+i_q=n}} \partial^{i_1}(x) \cdots \partial^{i_q}(x). \end{displaymath} If $i_1, \ldots, i_q$ are not all equal then the orbit of $(i_1, \ldots, i_q)$ under the symmetric group $S_q$ has cardinality divisible by $p$. All elements of this orbit contribute equally to the sum, and thus they all cancel. We thus see that the only surviving term occurs when $n$ is a multiple of $q$ and $i_1=\cdots=i_q=n/q$; this term is $(\partial^{n/q}{x})^q$. \end{proof} The following definition and theorem constitute our criterion for polynomiality in positive characteristic. \begin{definition} \label{defn:enough2} Let $R$ be a graded ring with $R_0=\bk$ a field of characteristic~$p>0$. Let $R^p = \{f^p \mid f \in R\}$ denote the subring of $p$th powers in $R$. We say that $R$ {\bf has enough Hasse derivations} if the following condition holds: if $x$ is a positive degree homogeneous element of $R$ such that $x \not\in \bk R^p$ (the $\bk$-span of the set $R^p$) then there exists a homogeneous Hasse derivation $\partial^{\bullet}$ of $R$ of negative degree such that $\partial^1(x) \ne 0$. \end{definition} \begin{theorem} \label{thm:poly2} Let $R$ be a graded ring with $R_0=\bk$ a perfect field of characteristic~$p>0$. Then $R$ is a polynomial ring if and only if it has enough Hasse derivations. \end{theorem} \begin{proof} In this proof, ``Hasse derivation'' will mean ``homogeneous Hasse derivation of negative degree.'' We note that since $\bk$ is perfect, $\bk R^p=R^p$. If $R$ is a polynomial ring then it has enough Hasse derivations; one can see this using Hasse derivatives (Example~\ref{ex:hasse}). We now prove the converse. We first show that $R$ is reduced. Suppose not, and let $x \in R$ be a non-zero homogeneous nilpotent element of minimal degree. Note that $x \not\in R^p$, for if $x=y^p$ then $y$ would be a lower degree nilpotent element. Let $r$ be such that $x^{p^r}=0$ and let $\partial^{\bullet}$ be a Hasse derivation such that $\partial^1(x) \ne 0$. Then $0=\partial^{p^r}(x^{p^r})=(\partial^1{x})^{p^r}$ (Lemma~\ref{lem:der-powers}), and so $\partial^1(x)$ is nilpotent, contradicting the minimality of $x$. Thus $R$ is reduced. Let $\cE$ be a set of homogeneous elements of $R_+$ that forms a basis for $R_+/R_+^2$. It suffices to prove that $\cE$ is algebraically independent. For $E \subset \cE$, consider the following statement: \begin{itemize}[leftmargin=3.5em] \item[$\sA_E$:] Given distinct elements $x_1, \ldots ,x_r \in E$ and a polynomial $F \in \bk[X_1, \ldots, X_r]$ such that $F(x_1, \ldots, x_r) \in R^p$, we have $F \in \bk[X_1, \ldots, X_r]^p$. \end{itemize} Observe that if $\sA_E$ holds then $E$ is algebraically independent. Indeed, suppose that $F(x_1, \ldots, x_r)=0$ is a minimal degree algebraic relation among distinct elements of $E$. Since $0 \in R^p$, we see that $F(X_1, \ldots, X_r)=G(X_1, \ldots, X_r)^p$ for some $G$ by $\sA_E$, and so $G(x_1, \ldots, x_r)^p=0$. Since $R$ is reduced, it follows that $G(x_1, \ldots, x_r)=0$, contradicting the minimality of $F$. Thus to prove the theorem it suffices to prove $\sA_{\cE}$. We prove that $\sA_E$ holds for all $E$ by induction on $E$ in the following manner. Let $\cE_{\le d}$ (resp.\ $\cE_d$) be the set of elements of $\cE$ of degree $\le d$ (resp.\ $d$). Suppose that $\cE_{\le d-1} \subset E \subset \cE_{\le d}$ and let $E'=E \cup \{x\}$ for some $x \in \cE_d \setminus E$. Assuming $\sA_E$, we prove $\sA_{E'}$. This will establish $\sA_E$ for all $E$ by the same logic used in the proof of Theorem~\ref{thm:poly}. Fix $E$, $E'$, and $x$ as above. Let $A$ be the $\bk$-subalgebra of $R$ generated by $E$. We claim that $\sA_{E'}$ can be reduced to the following statement, for all $n$ and $m$: \begin{itemize}[leftmargin=4.3em] \item[$\sB_{n,m}$:] If $\sum_{i=0}^n a_i x^i \in R^p$ with $a_i \in A$ and $\deg(a_n) \le m$, then $a_i \in R^p$ and $ia_i=0$ for all $i$. \end{itemize} Indeed, suppose $\sB_{n,m}$ holds for all $n$ and $m$, and suppose $F(x_1,\ldots,x_r) \in R^p$ for distinct elements $x_1, \ldots, x_r \in E'$. We may as well suppose $x_r=x$ and $x_1,\ldots,x_{r-1} \in E$. Write $F(X_1,\ldots,X_r)=\sum_{i=0}^n G_i(X_1,\ldots,X_{r-1}) X_r^i$ for polynomials $G_i$. By $\sB_{n,m}$, we see that $G_i(x_1,\ldots,x_{r-1}) \in R^p$ for all $i$ and $G_i(x_1,\ldots,x_{r-1})=0$ if $p \nmid i$. By $\sA_E$, it follows that $G_i(X_1,\ldots,X_{r-1})=G'_i(X_1,\ldots,X_{r-1})^p$ for some polynomial $G'_i$ and that $G_i(X_1,\ldots,X_r)=0$ if $p \nmid i$. We thus find \begin{displaymath} F(X_1,\ldots,X_r)=\bigg( \sum_{\substack{0 \le i \le n\\ p \mid i}} G_{i}'(X_1,\ldots,X_{r-1}) X_r^{i/p} \bigg)^p, \end{displaymath} which establishes $\sA_{E'}$. We now prove $\sB_{n,m}$ by induction on $n$ and $m$. Clearly, $\sB_{0,m}$ holds for all $m$. We note that if $\sB_{n,m}$ holds and $\sum_{i=0}^n a_i x^i=0$ with $\deg(a_n) \le m$ then $a_i=0$ for all $i$; the proof is the same as the proof given above that $\sA_E$ implies algebraic independence of $E$. We also note that if $\partial^{\bullet}$ is any Hasse derivation then $\partial^n(\cE_{\le d}) \subset A$ for all $n>0$, and so $\partial^n(A) \subset A$ and $\partial^n(x) \in A$. We now prove $\sB_{1,m}$ for all $m$ by induction on $m$. First suppose $m=0$, and suppose that $ax+b \in R^p$ with $a \in \bk$ and $b \in A$. Write $b = b_0 + b'$ where $b_0$ is the degree $0$ piece in the homogeneous decomposition of $b$. Then $b_0 \in \bk^p$ and $ax+b'=0$ in $R_+/R_+^2$. Since $\cE$ is linearly independent in $R_+/R_+^2$, it follows that $a=0$, and so $\sB_{1,0}$ holds. Now suppose $\sB_{1,m-1}$ holds, and let us prove $\sB_{1,m}$. Thus suppose that $ax+b=y^p$ for some $y \in R^p$ with $a,b \in A$ and $\deg(a)\leq m$. If $\partial^{\bullet}$ is any Hasse derivation of $R$ then $\partial^1(a)x+(a\partial^1(x)+\partial^1(b))=0$ (Lemma~\ref{lem:der-powers}). Since $\deg(\partial^1(a))<m$, we see that $\partial^1(a)=0$ by $\sB_{1,m-1}$. Since this holds for all $\partial^{\bullet}$, we find $a \in R^p$. Suppose $a \ne 0$, and let $q$ be the maximal power of $p$ such that $a \in R^q$ (this exists since $\deg(a)>0$). Write $a=c^q$, and note $c \not\in R^p$. Let $\partial^{\bullet}$ be a Hasse derivation such that $\partial^1(c) \ne 0$; note then that $\partial^q(a)=(\partial^1{c})^q \ne 0$ (Lemma~\ref{lem:der-powers}). Again by Lemma~\ref{lem:der-powers}, we have \begin{displaymath} \partial^q(a)x+(a\partial^q(x)+\partial^q(b))=\partial^q(y^p)=(\partial^{q/p}{y})^p \in R^p \end{displaymath} By $\sB_{1,m-1}$, we have $\partial^q(a)=0$, a contradiction. Thus $a=0$ and $\sB_{1,m}$ holds. We now prove $\sB_{n,m}$ for $n \ge 2$, assuming $\sB_{k,m}$ for all $1\leq k\leq n-1$ and assuming $\sB_{n,m-1}$. Thus suppose that $\sum_{i=0}^n a_i x^i \in R^p$ with $a_i \in A$ and $\deg(a_n) \le m$. Let $\partial^{\bullet}$ be a Hasse derivation of $R$. Applying $\partial^1$, we find \begin{displaymath} 0=\partial^1(a_n) x^n + (n a_n \partial^1(x)+\partial^1(a_{n-1})) x^{n-1} + \cdots, \end{displaymath} where the remaining terms have degree $\le n-2$ in $x$. By $\sB_{n,m-1}$, all the above coefficients vanish. Thus $\partial^1(a_n)=0$ for all $\partial^{\bullet}$, and so $a_n\in R^p$. We now see that the coefficient of $x^{n-1}$ is $\partial^1(n a_n x + a_{n-1})$. Since this vanishes for all $\partial^{\bullet}$, we find $na_nx+a_{n-1} \in R^p$, and so $na_n=0$ by $\sB_{1,m}$. In particular, $p \mid n$ if $a_n \ne 0$, so $a_n x^n \in R^p$, and hence $\sum_{i=0}^{n-1} a_i x^i \in R^p$. Thus by $\sB_{n-1,m}$ we have $a_i \in R^p$ and $i a_i=0$ for all $0 \le i \le n-1$. This proves $\sB_{n,m}$. \end{proof} \begin{remark} \label{rmk:relax} The perfectness hypothesis in Theorem~\ref{thm:poly2} can be omitted. Indeed, letting $\bK$ be the perfection of $\bk$, the theorem shows that $\bK \otimes_{\bk} R$ is a polynomial ring, which implies that $R$ is a polynomial ring. \end{remark} \section{Dimension theory in polynomial rings} \label{s:codim} Fix a field $\bk$. For a ring $A$ and a (possibly infinite) set $\cU$, we let $A[\cU]$ be the polynomial algebra over $A$ in variables $\cU$. We aim to prove a number of basic results on codimension in rings of the form $A[\cU]$ where $A$ is a finitely generated $\bk$-algebra. All of these results are standard when $\cU$ is finite. We do not impose any gradings in this section. For a prime ideal $\fp$ in a commutative ring $R$, the {\bf codimension} (or {\bf height}) of $\fp$ is the maximum integer $c$ for which there exists a chain of prime ideals $\fp_0 \subsetneq \cdots \subsetneq \fp_c=\fp$, or $\infty$ if such chains exist with $c$ arbitrarily large. All ideals considered in this section are assumed to be non-unital. The {\bf codimension} of an arbitrary non-unital ideal $I$ of $R$ is the minimum of the codimensions of primes containing $I$, or $\infty$ if $I$ is not contained in any prime of finite codimension. This will be denoted $\codim_R(I)$. We start with a basic fact that we will cite often. \begin{proposition} \label{prop:finflat} Let $A \subset B$ be a flat integral extension of rings. For any ideal $I \subset B$, we have $\codim_B(I)=\codim_A(A \cap I)$. \end{proposition} \begin{proof} We first prove the statement assuming that $I=\fp$ is prime. Suppose that $\codim_B(\fp) \ge c$. Let $\fp_0 \subsetneq \fp_1\subsetneq \cdots \subsetneq \fp_c = \fp$ be a chain of distinct prime ideals. Let $\fq_i= A \cap \fp_i$. By incomparability \cite[Theorem~14.3(2)]{altman-kleiman}, the $\fq_i$ are distinct and thus $\codim_A(\fq_c) \geq c$. In particular, if $\codim_B(\fp)=\infty$, this shows that $\codim_A(A \cap \fp)=\infty$. Now suppose that $\codim_B(\fp)$ is finite and equal to $c$. If there were some longer chain of primes leading up to $\fq_c$, then by going down for flat extensions \cite[Theorem 14.11]{altman-kleiman}, we would have $\codim_B(\fp_c) > c$, which is a contradiction. Thus $\codim_A(\fq_c)=c$ which finishes the special case when $I$ is prime. Now consider the general case. Given a prime $\fp$ containing $I$, we have just shown that $\codim_A(A \cap \fp) = \codim_B(\fp)$. On the other hand, given a prime $\fq$ containing $A \cap I$, using \cite[Theorem 14.3(4)]{altman-kleiman}, there is a prime $\fp \supset I$ such that $A \cap \fp = \fq$. In particular, we deduce that $\codim_B(I) = \codim_A(A \cap I)$. \end{proof} \begin{proposition} \label{prop:fg} Let $A$ be a finitely generated $\bk$-algebra and let $\fp$ be a prime ideal of $A[\cU]$ of finite codimension. Then $\fp$ is finitely generated. \end{proposition} \begin{proof} Let $c$ be the codimension of $\fp$. We prove the statement by induction on $c$. First suppose that $c=0$. If $\fp=0$, then we are done. Otherwise, choose a nonzero element $g \in \fp$. Let $\cV \subset \cU$ be a finite subset such that $g$ belongs to $A[\cV]$. Let $\fp' = A[\cU](A[\cV] \cap \fp)$. Then we have $\fp' \subseteq \fp$. Since $\fp$ is prime, so is $A[\cV] \cap \fp$, and hence so is $\fp'$ since $A[\cU]$ is obtained from $A[\cV]$ by adjoining variables. In particular, we have $\fp'=\fp$, and so $\fp$ is finitely generated. Now suppose $c>0$. Choose a prime ideal $\fq \subset \fp$ of codimension $c-1$. By induction, we know that $\fq$ is finitely generated; let $f_1, \ldots, f_r$ be generators. Let $g \in \fp \setminus \fq$ and let $\cV \subset \cU$ be a finite subset such that the $f_i$'s and $g$ belong to $A[\cV]$. Let $\fp' = A[\cU](A[\cV] \cap \fp)$. Note that $\fq = A[\cU](A[\cV] \cap \fq)$. We thus have $\fq \subset \fp' \subset \fp$. Since $\fp$ is prime, so is its contraction to $A[\cV]$, and so is the extension of this back to $A[\cU]$, since $A[\cU]$ is obtained from $A[\cV]$ by adjoining variables. Thus $\fp'$ is either $\fq$ or $\fp$; however, it is not $\fq$ since it contains $g$. Thus $\fp=\fp'$, which shows that $\fp$ is finitely generated. \end{proof} \begin{proposition} \label{prop:codimex} Let $A$ be a finitely generated $\bk$-algebra and let $\cV \subset \cU$ be sets. If $I$ is a finitely generated ideal of $A[\cV]$, and $J$ is its extension to $A[\cU]$, then $\codim_{A[\cV]}(I)=\codim_{A[\cU]}(J)$. \end{proposition} \begin{proof} We note that the result is classical if $\cU$ is finite (as can be seen, for example, using Hilbert polynomials). We will use this twice in the proof of the general case. First suppose that $\cV$ is finite and $I=\fp$ is prime. Note then that $J=\fq$ is prime as well. If $\fp_0 \subsetneq \cdots \subsetneq \fp_c=\fp$ is a chain of primes in $A[\cV]$, then letting $\fq_i$ be the extension of $\fp_i$, we get a chain of primes $\fq_0 \subsetneq \cdots \subsetneq \fq_c=\fq$ in $A[\cU]$, and so $\codim_{A[\cU]}(\fq) \ge c$, which shows $\codim_{A[\cU]}(\fq) \ge \codim_{A[\cV]}(\fp)$. Next suppose that $\fq_0 \subsetneq \cdots \subsetneq \fq_c=\fq$ is a chain of primes in $A[\cU]$. For each $0<i\le c$, pick $f_i \in \fq_i \setminus \fq_{i-1}$. Let $\cV'$ be a finite subset of $\cU$ containing $\cV$ and such that each $f_i$ belongs to $A[\cV']$. Then $\fq_{\bullet} \cap A[\cV']$ is a strict chain of primes ideals in $A[\cV']$, and so we see that $\codim_{A[\cV']}(\fp A[\cV']) \ge c$ (note that the contraction of $\fq$ to $A[\cV']$ is equal to the extension of $\fp$ to $A[\cV']$). However, $\codim_{A[\cV']}(\fp A[\cV'])=\codim_{A[\cV]}(\fp)$ by classical theory. We thus see that $\codim_{A[\cV]}(\fp) \ge c$, and so $\codim_{A[\cV]}(\fp) \ge \codim_{A[\cU]}(\fq)$. In particular, we have equality and this case has been proven. Next, suppose still that $\cV$ is finite, but let $I$ be an arbitrary ideal. If $\fp$ is a codimension $c$ prime of $A[\cV]$ containing $I$ then $\fp A[\cU]$ is a codimension $c$ prime of $A[\cU]$, by the previous paragraph, containing $J$. We thus see that $\codim_{A[\cU]}(J) \le \codim_{A[\cV]}(I)$. Next, suppose that $\fq$ is a codimension $c$ prime of $A[\cU]$ containing $J$. By Proposition~\ref{prop:fg}, there is a finite subset $\cV'$ of $\cU$ (which we can assume contains $\cV$) such that $\fq$ is the extension of an ideal (necessarily prime) $\fq'$ of $A[\cV']$. By the previous paragraph, $\fq'$ has codimension $c$ in $A[\cV']$. Since $\fq'$ clearly contains the extension of $I$ to $A[\cV']$, we see that $\codim_{A[\cV']}(I A[\cV']) \le c$. But $\codim_{A[\cV']}(I A[\cV'])=\codim_{A[\cV]}(I)$ by classical theory, and so $\codim_{A[\cV]}(I) \le c$. We thus see that $\codim_{A[\cV]}(I) \le \codim_{A[\cU]}(J)$. Finally, we treat the case where $\cV$ is arbitrary. Since $I$ is finitely generated, there is a finite subset $\cV_0$ of $\cV$ such that $I$ is the extension of an ideal $I_0$ of $A[\cV_0]$. Thus \begin{displaymath} \codim_{A[\cV]}(I)=\codim_{A[\cV_0]}(I_0)=\codim_{A[\cU]}(J), \end{displaymath} by two applications of the case where $\cV$ is finite. \end{proof} \begin{corollary} \label{cor:fincodim} Let $A$ be a finitely generated $\bk$-algebra. Every finitely generated ideal of $A[\cU]$ has finite codimension. \end{corollary} \begin{proof} Let $J$ be a finitely generated ideal of $A[\cU]$. Then $J$ is the extension of an ideal $I$ of some $A[\cV]$ with $\cV \subset \cU$ finite. Since $\codim_{A[\cV]}(I)\leq \dim A[\cV]<\infty$, Proposition~\ref{prop:codimex} implies that $\codim_{A[\cU]}(J)$ is finite as well. \end{proof} \begin{corollary} \label{cor:rs} Let $\cU$ be a set, and let $f_1, \ldots, f_r \in \bk[\cU]$. Then $f_1, \ldots, f_r$ form a regular sequence if and only if the ideal $(f_1, \ldots, f_r)$ has codimension $r$. \end{corollary} \begin{proof} Let $\cV$ be a finite subset of $\cU$ such that $f_1, \ldots, f_r \in \bk[\cV]$. Let $I$ (resp.\ $J$) be the ideal of $\bk[\cV]$ (resp.\ $\bk[\cU]$) generated by the $f_i$. The $f_i$ form a regular sequence in $\bk[\cV]$ (or in $\bk[\cU]$) if and only if the Koszul complex on the $f_i$ is exact; however, since $\bk[\cU]\subseteq \bk[\cV]$ is faithfully flat, the Koszul complex on the $f_i$ is exact over $\bk[\cU]$ if and only if it exact over $\bk[\cV]$. \end{proof} \begin{corollary} \label{cor:krull} Let $A$ be a finitely generated $\bk$-algebra, let $\cU$ be a set, and let $J$ be a finitely generated ideal of $A[\cU]$ containing a nonzerodivisor $f$. Let $\ol{J}$ be the image of $J$ in $A[\cU]/(f)$. Then $\codim_{A[\cU]/(f)}(\ol{J})=\codim_{A[\cU]}(J)-1$. \end{corollary} \begin{proof} Let $\cV$ be a sufficiently large finite subset of $\cU$ such that $A[\cV]$ contains $f$ and some finite generating set of $J$; thus $J$ is the extension of some ideal $I$ of $A[\cV]$. Let $B=A[\cV]$, which is a finitely generated $\bk$-algebra, and note that $A[\cU] = B[\cU']$, where $\cU'=\cU \setminus \cV$. Let $\ol{I}$ be the image of $I$ in $\ol{B}=B/(f)$. Since $\ol{J}$ is the extension of $\ol{I}$ to $\ol{B}[\cU']=A[\cU]/(f)$, we obtain $\codim_{\ol{B}}(\ol{I}) = \codim_{A[\cU]/(f)}(\ol{J})$ and $\codim_B(I) = \codim_{A[\cU]}(J)$ by two applications of Proposition~\ref{prop:codimex}. Finally since $\codim_{\ol{B}}(\ol{I})=\codim_B(I)-1$ by classical theory (for example, the principal ideal theorem and ~\cite[Corollary~13.4]{eisenbud}), the result follows. \end{proof} \begin{proposition} \label{prop:fgc} Let $A$ be a finitely generated $\bk$-algebra and let $\cU$ be a set. Let $J$ be a finitely generated ideal of $A[\cU][y]$. Then $A[\cU] \cap J$ is also a finitely generated ideal. \end{proposition} \begin{proof} Let $\cV$ be a finite subset of $\cU$ such that $J$ is the extension of an ideal $I$ of $A[\cV][y]$. Then $A[\cV] \cap I$ is finitely generated since $A[\cV]$ is noetherian. One easily sees that $A[\cU] \cap J$ is the extension of $A[\cV] \cap I$, which proves the result. \end{proof} \begin{corollary} \label{cor:contract} Let $A$ be a finitely generated $\bk$-algebra, let $\cU$ be a set, let $R=A[\cU]$, and let $S=R[y]$. Let $I$ be a finitely generated ideal of $S$. Suppose that $I$ contains a positive degree monic polynomial, that is, an element of the form $y^n + \sum_{i=0}^{n-1} a_i y^i$ with $a_i \in R$ and $n>0$. Then $\codim_R(R \cap I) = \codim_S(I)-1$. \end{corollary} \begin{proof} Let $f \in I$ be a monic polynomial. Let $\ol{I}$ be the image of $I$ in $S/(f)$. Then $R \to S/(f)$ is a finite flat extension of rings and $R \cap I$ is the contraction of $\ol{I}$ along this map. We thus see that $\codim_R(R \cap I)=\codim_{S/(f)}(\ol{I})$ by Proposition~\ref{prop:finflat}. But $\codim_{S/(f)}(\ol{I})=\codim_S(I)-1$ by Corollary~\ref{cor:krull}. \end{proof} \section{Stillman's conjecture via the ultraproduct ring} \label{s:ultra} \subsection{Background on ultraproducts} \label{subsec:ultra} For more details and references on ultraproducts, see \cite[\S 2.1]{schoutens}. Let $\cI$ be an infinite set. We fix a non-principal ultrafilter $\cF$ on $\cI$, which is a collection of subsets of $\cI$ satisfying the following properties: \begin{enumerate} \item $\cF$ contains no finite sets, \item if $A \in \cF$ and $B \in \cF$, then $A \cap B \in \cF$, \item if $A \in \cF$ and $A \subseteq B$, then $B \in \cF$, \item for all $A \subseteq \cI$, either $A \in \cF$ or $\cI \setminus A \in \cF$ (but not both). \end{enumerate} We think of the sets in $\cF$ as neighborhoods of some hypothetical (and non-existent) point $\ast$ of $\cI$, and refer to them as such. We say that some condition holds near $\ast$ if it holds in some neighborhood of $\ast$. Given a family of sets $\{X_i\}_{i \in \cI}$, their ultraproduct is the quotient of the usual product $\mathrm{pro}d_{i \in \cI} X_i$ in which two sequences $(x_i)$ and $(y_i)$ are identified if the equality $x_i=y_i$ holds near $\ast$. If $x$ is an element of the ultraproduct, we will write $x_i$ for the $i$th coordinate of $x$, keeping in mind that this is only well-defined in sufficiently small neighborhoods of $\ast$; in other words, we can think of $x$ as a germ of a function around $\ast$. Suppose that each $X_i$ is a graded abelian group. We define the {\bf graded ultraproduct} of the $X_i$'s to be the subgroup of the usual ultraproduct consisting of elements $x$ such that $\deg(x_i)$ is bounded near $\ast$. The graded ultraproduct is a graded abelian group; in fact, it is the ultraproduct of the $X_i$'s in the category of graded abelian groups. The degree $d$ piece of the graded ultraproduct is the usual ultraproduct of the degree $d$ pieces of the $X_i$'s. We apply this construction in particular to the case where the $X_i$'s are graded rings; the graded ultraproduct is then again a graded ring. \begin{example} If $\bK$ is the ultraproduct of $\{\bk_i\}_{i\in \cI}$, then the graded ultraproduct of $\bk_i[x_1,\dots,x_n]$ (with standard grading) is $\bK[x_1,\dots,x_n]$ (also with standard grading). \end{example} In this subsection, we develop a few basic properties of graded ultraproduct rings. We begin with a simple observation on adjoining variables to ultraproducts. \begin{proposition} \label{prop:ultravar} Let $\{R_i\}_{i \in \cI}$ be a family of graded rings with graded ultraproduct $\bS$. Let $y$ be a variable of degree $1$, and let $\wt{\bS}$ be the graded ultraproduct of the rings $R_i[y]$. Then the natural map $\bS[y] \to \wt{\bS}$ is an isomorphism. \end{proposition} \begin{proof} Suppose that $f=\sum_{k=0}^d a_k y^k$ is an element of $\bS[y]$, and let $g$ be its image in $\wt{\bS}$. Then $g_i=\sum_{k=0}^d a_{k,i} y^k$. If $g=0$ then, passing to some neighborhood of $\ast$, we can assume $g_i=0$ for all $i$, which implies that $a_{k,i}=0$ for all $i$ and $k$, which implies that $a_k=0$ for all $k$, which shows that $f=0$. Thus the map is injective. Next, suppose that $g$ is an element of $\wt{\bS}$ of degree $d$. Then we can write $g_i=\sum_{k=0}^d a_{k,i} y^k$ for each $i$, where the $a_{k,i}$'s are elements of $R_i$. Let $a_k$ be the element of $\bS$ defined by the sequence $(a_{k,i})$. Then $g$ is the image of $f=\sum_{k=0}^d a_k y^k$, and so the map is surjective. \end{proof} We next prove a simple result on base change: \begin{proposition} \label{prop:basechange} Let $\bk'/\bk$ be a finite field extension. Let $\{R_i\}_{i \in \cI}$ be a family of graded $\bk$-algebras with graded ultraproduct $\bS$. Let $R'_i=\bk' \otimes_{\bk} R_i$, and let $\bS'$ be the graded ultraproduct of $\{R'_i\}_{i \in \cI}$. Then the natural map $\bk' \otimes_{\bk} \bS \to \bS'$ is an isomorphism. \end{proposition} \begin{proof} Let $\epsilon_1, \dots, \epsilon_d$ be a basis for $\bk'$ over $\bk$. We note that $R'_i$ is free as an $R_i$-module with basis $\epsilon_1 \otimes 1, \ldots, \epsilon_d \otimes 1$; similarly for $\bk' \otimes_{\bk} \bS$ over $\bS$. We claim that the $\epsilon$'s are also a basis for $\bS'$ over $\bS$. Given $f = (f_i) \in \bS'$, we can decompose $f_i$ uniquely as $\epsilon_1 \otimes f_{i,1} + \epsilon_2 \otimes f_{i,2} +\cdots + \epsilon_d \otimes f_{i,d}$ where $f_{i,j}\in R_i$ for all $i,j$. We define $g_j = (f_{i,j})\in \bS$ for $1\leq j \leq d$, and we have the unique decomposition $f = g_1\epsilon_1+\cdots +g_d\epsilon_d$ in $\bS'$. \end{proof} We now examine how ideals in an ultraproduct relate to ideals in the original rings. Given a family of graded rings $\{R_i\}_{i \in \cI}$ and a family of ideals $\{I_i\}_{i \in \cI}$, we say that the $I_i$ are {\bf uniformly finitely generated} if there exists an integer $n$ such that $I_i$ is generated by at most $n$ elements for all $i$ in some neighborhood of $*$. The graded ultraproduct of $\{I_i\}_{i \in \cI}$ is the subset of elements $(r_i)_{i \in \cI}$ of the graded ultraproduct of $\{R_i\}_{i \in \cI}$ such that $r_i \in I_i$ for all $i$ in some neighborhood of $*$. It is an ideal of the graded ultraproduct of $\{R_i\}_{i \in \cI}$. From now on, we will generally drop the subscript $i \in \cI$. \begin{proposition} \label{prop:ultraideal} Let $\{R_i\}$ be a family of graded rings with graded ultraproduct $\bS$. \begin{enumerate}[\indent \rm (a)] \item Suppose that $\{I_i\}$ is a uniformly finitely generated family of homogeneous ideals. Then their graded ultraproduct $I$ is a finitely generated ideal of $\bS$. \item Suppose that $\{I_i\}$ and $\{J_i\}$ are two uniformly finitely generated families of homogeneous ideals whose graded ultraproducts are equal. Then $I_i=J_i$ for all $i$ in some neighborhood of $*$. \item Suppose that $I$ is a finitely generated homogeneous ideal of $\bS$. Then there exists a uniformly finitely generated family of homogeneous ideals $\{I_i\}$ with ultraproduct $I$. \end{enumerate} \end{proposition} \begin{proof} (a) Suppose that each $I_i$ is generated by $\le n$ elements; pick homogeneous generators $f_{1,i}, \ldots, f_{n,i}$ of each $I_i$. Let $f_1, \ldots, f_n$ be the elements of $\bS$ defined by these sequences. We claim that $I$ is generated by $f_1, \ldots, f_n$. Indeed, suppose that $g$ is a homogeneous element of $I$; thus, passing to a small enough neighborhood of $*$, we see that each $g_i$ is an element of $I_i$, and can thus be written as $\sum_{k=1}^n a_{k,i} f_{k,i}$ for some homogeneous elements $a_{k,i} \in R_i$. Let $a_k$ be the element of $\bS$ defined by the sequence $a_{k,i}$. Then $g=\sum_{k=1}^n a_k f_k$, proving the claim. (Note that for $k$ fixed, each $a_{k,i}$ is homogeneous of some degree, but that the degree may depend on $i$. However, the degree is bounded by the degree of $g$, and so in any small enough neighborhood of $*$, the degree of $a_{k,i}$ will be independent of $i$.) (b) Suppose that $I_i$ and $J_i$ are each generated by at most $n$ elements for all $i$, and pick generators $f_{1,i}, \ldots, f_{n,i}$ and $g_{1,i}, \ldots, g_{n,i}$. Let $f_1, \ldots, f_n$ and $g_1, \ldots, g_n$ be the elements of $\bS$ these sequences define. By (a), the $f_k$'s and $g_k$'s generate the same ideal of $\bS$. Thus we have an expression $g_k=\sum_{j=1}^n a_j f_j$ for some $a_j \in \bS$, and so $g_{k,i} = \sum_{j=1}^n a_{j,i} f_{j,i}$ holds for all $i$ in some neighboorhood of $\ast$, and so $g_{k,i}$ belongs to the ideal $I_i$ for all such $i$. Since there are only finitely many $f$'s and $g$'s, we can pass to some common neighboorhood of $\ast$ so that $g_{k,i} \in I_i$ and $f_{k,i} \in J_i$ for all $i$ and $k$, and so $I_i=J_i$. (c) Let $I$ be generated by $f_1, \ldots, f_n$. Let $f_k$ be represented by some sequence $(f_{k,i})$, and let $I_i$ be the ideal of $R_i$ generated by $f_{1,i}, \ldots, f_{n,i}$. Then the argument in (a) shows that $I$ is the ultraproduct of the $I_i$'s. \end{proof} Due to this proposition, we can unambiguously speak of the germ of a finitely generated homogeneous ideal $I$ of $\bS$. We denote these ideals by $I_i$, keeping in mind that they are only well-defined for $i$ sufficiently close to $\ast$. We next show that this construction interacts well with contraction. \begin{proposition} \label{prop:ultracontract} Let $\{R_i\}$ be a family of graded rings with graded ultraproduct $\bS$, and let $\{R'_i\}$ be a family of graded subrings of $\{R_i\}$ with graded ultraproduct $\bS'$. Let $I$ be a finitely generated homogeneous ideal of $\bS$, and suppose that $\bS' \cap I$ is a finitely generated ideal of $\bS'$. Then $(\bS' \cap I)_i = R'_i \cap I_i$ in a neighborhood of $\ast$. \end{proposition} \begin{proof} Let $g_1, \ldots, g_m$ be generators for $\bS' \cap I$. Then $g_{k,i}$ belongs to $R'_i \cap I_i$ (in some neighborhood of $\ast$), and so $(\bS' \cap I)_i$ is contained in $R'_i \cap I_i$ (in some neighborhood of $\ast$), since the former is generated by $g_{1,i}, \ldots, g_{m,i}$. We now claim that the inclusion $(\bS' \cap I)_i \subset R'_i \cap I_i$ is an equality in some neighborhood of $\ast$. Assume not. Then we can find a sequence $(h_i)$ such that $h_i \in R'_i \cap I_i$ for all $i$, but in any neighborhood of $\ast$ there exists $i$ such that $h_i \not\in (\bS' \cap I)_i$. Let $h \in \bS$ be the element defined by $(h_i)$. Then $h \in \bS'$, since $h_i \in R'_i$ for all $i$, and $h \in I$, since $h_i \in I_i$ for all $i$. Thus $h \in \bS' \cap I$, and so $h=\sum_{k=1}^m a_k g_k$ for some $a_k \in \bS'$. But then $h_i=\sum_{k=1}^m a_{k,i} g_{k,i}$ holds in some neighborhood of $\ast$, which shows that $h_i \in (\bS' \cap I)_i$ in some neighborhood of $\ast$, a contradiction. \end{proof} We close this subsection with a result on strength in ultraproducts: \begin{proposition} \label{prop:str} Let $\{R_i\}$ be a family of graded rings with graded ultraproduct $\bS$. Suppose that the degree~$0$ piece of $R_i$ is a field $\bk_i$, so that the degree~$0$ piece of $\bS$ is the ultraproduct $\bK$ of these fields. Choose homogeneous elements $f_1, \ldots, f_r \in \bS$. Suppose that the collective strength of $f_{1,i}, \ldots, f_{r,i}$ is unbounded in all sufficiently small neighborhoods of $\ast$. Then $f_1, \ldots, f_r$ have infinite collective strength. \end{proposition} \begin{proof} Suppose we have a relation $\sum_{j=1}^r a_j f_j = \sum_{k=1}^s g_k h_k$ where $a_i \in \bK$ are not all zero and $g_k$ and $h_k$ are elements of positive degree. Represent everything by sequences: $a_j=(a_{j,i})$, $g_k=(g_{k,i})$, and $h_k=(h_{k,i})$. Then, by definition of the ultraproduct, in all sufficiently small neighborhoods of $*$, we have $\sum_{j=1}^r a_{j,i} f_{j,i} = \sum_{k=1}^s g_{k,i} h_{k,i}$ and $a_{j,i} \ne 0$ for at least one $j$. But this shows that $f_{1,i},\ldots,f_{r,i}$ have collective strength $<s$ in this neighborhood of $\ast$. \end{proof} \subsection{The main theorems on ultraproduct rings} \label{ss:ultramain} Let $\{\bk_i\}_{i \in \cI}$ be a family of perfect fields with ultraproduct $\bK$. The field $\bK$ is also perfect, as if $\bK$ has characteristic $p>0$, then $\bk_i$ is perfect of characteristic of $p$ for all $i$ sufficiently close to $\ast$, and so one can take $p$th roots in $\bK$. Let $R_i=\bk_i[x_1,x_2,\ldots]$ with standard grading, and let $\bS$ be the graded ultraproduct of the family $\{R_i\}_{i \in \cI}$. \begin{theorem} \label{thm:S-poly-ring} The ring $\bS$ is (isomorphic to) a polynomial ring. \end{theorem} \begin{proof} We use the criteria of \S \ref{s:poly}. First suppose that $\bK$ has characteristic~0, and let us prove that $\bS$ has enough derivations (Definition~\ref{defn:enough1}). Let $f \in \bS$ be a non-zero homogeneous element of degree $d>0$. Passing to a neighborhood of $\ast$, we can assume that each $\bk_i$ has characteristic~0 or characteristic~$p$ with $p>d$, and that $f_i \ne 0$. For each $i$, let $a(i)$ be an index such that $x_{a(i)}$ appears in some monomial in $f_i$, and let $\partial_i$ be the derivation $\frac{\partial}{ \partial x_{a(i)}}$ of $R_i$. The derivations $(\partial_i)$ define a derivation $\partial$ on $\bS$. Since $\partial_i(f_i) \ne 0$ near $*$, we see that $\partial(f) \ne 0$, and so $\bS$ has enough derivations. Thus $\bS$ is a polynomial ring (Theorem~\ref{thm:poly}). Now suppose that $\bK$ has characteristic~$p$, and let us prove that $\bS$ has enough Hasse derivations (Definition~\ref{defn:enough2}). Let $f \in \bS$ be a homogeneous element of positive degree that is not a $p$th power. Passing to a neighborhood of $\ast$, we can assume that each $f_i$ is not a $p$th power. For each $i$, let $a(i)$ be an index such that $x_{a(i)}$ appears in some monomial in $f_i$ with exponent not divisible by $p$, and let $\partial_i$ be the Hasse derivative on $R_i$ with respect to $x_{a(i)}$ (Example~\ref{ex:hasse}). The Hasse derivations $\partial_i$ on the $R_i$ induce a Hasse derivation $\partial$ on $\bS$. Since $\partial_i(f_i) \ne 0$ near $*$, we see that $\partial(f) \ne 0$, and so $\bS$ has enough Hasse derivations. Thus $\bS$ is a polynomial ring (Theorem~\ref{thm:poly2}). \end{proof} \begin{theorem} \label{thm:codim in nhood} If $I \subset \bS$ is a finitely generated homogeneous ideal, then $\codim_{\bS}(I) = \codim_{R_i}(I_i)$ for all $i$ sufficiently close to $\ast$. \end{theorem} \begin{proof} Let $c=\codim_{\bS}(I)$, which is finite by Corollary~\ref{cor:fincodim}. If $c=0$ then $I=0$, and so $I_i=0$ for all $i$ sufficiently close to $\ast$, and so the formula holds. We now proceed by induction on $c$. Suppose the result holds for $c-1$, and let $I$ be an ideal of $\bS$ of codimension $c>0$. Let $f \in I$ be a non-zero homogeneous element. We would like for $\#\bk_i>\deg f$ to hold sufficiently close to $\ast$. Suppose this is not the case. Then, since the size of the $\bk_i$ is bounded near $\ast$, there must exist a single $q$ such that $\bk_i=\bF_q$ for all $i$ sufficiently close to $\ast$. It follows that $\bK=\bF_q$. Indeed, an element of $\bK$ is a sequence $(x_i)_{i \in \cI}$, and as each $x_i$ can only take finitely many values the sequence must be constant in a neighborhood of $\ast$. Let $e>0$ be an integer so that $q^e>\deg{f}$, let $\bk'_i=\bF_{q^e}$, let $R'_i=\bk'_i[x_1,x_2,\ldots]$, and let $\bS'$ be the ultraproduct of the $R'_i$. By Proposition~\ref{prop:basechange}, the natural map $\bF_{q^e} \otimes_{\bF_q} \bS \to \bS'$ is an isomorphism. Write $I'$ and $I'_i$ for the extension of the ideals $I$ and $I_i$ to $\bS'$ and $R_i'$, respectively. We have that $\codim_{R_i}(I_i) = \codim_{R_i'}(I_i')$ and $\codim_{\bS'}(I')=\codim_{\bS}(I)$. It thus suffices to prove that $\codim_{\bS'}(I')=\codim_{R_i'}(I_i')$ for all $i$ sufficiently close to $\ast$. Relabeling, we have reduced to the case where $\#\bk_i>\deg f$ holds in a neighborhood of $\ast$. For each $i$, let $\gamma_i$ be an automorphism of $R_i$ such that $\gamma_i(f_i)$ is monic in $x_1$, at least for $i$ sufficiently close to $\ast$ (see Lemma~\ref{lem:monic}). The family $\{\gamma_i\}$ induces an automorphism $\gamma$ of $\bS$. Since codimension is invariant under automorphisms, we may replace $I$ with $\gamma(I)$, and so we can assume that $f_i$ is monic in $x_1$ for all $i$ sufficiently close to $\ast$. Let $R'_i=\bk_i[x_2,\ldots]$ and let $\bS'$ be the ultraproduct of $\{R'_i\}$. We have $R_i=R'_i[x_1]$ for each $i$, and so $\bS \cong \bS'[x_1]$ by Proposition~\ref{prop:ultravar}. Under this identification, $f$ corresponds to a monic polynomial in $\bS'[x_1]$. Let $I'$ be the contraction of $I$ to $\bS'$, which is finitely generated by Proposition~\ref{prop:fgc}. We note that $I'_i$ is the contraction of $I_i$ to $R'_i$, for all $i$ sufficiently close to $\ast$, by Proposition~\ref{prop:ultracontract}. Corollary~\ref{cor:contract} implies that $\codim_{\bS'}(I')=\codim_{\bS}(I)-1=c-1$. Thus, by the inductive hypothesis, we have $\codim_{R'_i}(I'_i)=c-1$ for all $i$ sufficiently close to $\ast$. By Corollary~\ref{cor:contract} again, $\codim_{R_i}(I_i)=\codim_{R'_i}(I'_i)+1=c$. The result follows. \end{proof} \begin{lemma} \label{lem:monic} Let $\bk$ be a field, let $R=\bk[x_1,x_2,\ldots]$, and let $f \in R$ be a non-zero homogeneous element. If $\#\bk>\deg f$ then there exists an automorphism $\gamma$ of $R$ (as a graded $\bk$-algebra) such that $\gamma(f)$ is monic in $x_1$. \end{lemma} \begin{proof} (Compare with \cite[Lemma~13.2(c)]{eisenbud}.) We may assume that $f$ lies in $\bk[x_1,\dots,x_n]$ for some $n$. Let $d=\deg f$. We consider an automorphism of the form $\gamma(x_i)=x_i$ for $i=1$ or $i>n$ and $\gamma(x_i)=x_i-a_ix_1$ for $2\leq i \leq n$, where $a_i\in \bk$. The coefficient of $x_1^d$ in $\gamma(f)$ can be viewed as an inhomogeneous polynomial $g(a_2,\dots,a_n)$, with $\deg(g)\leq d$. Thus, as long as $\#\bk>d$, we can find some choice of $a_2,\dots,a_n$ where $g(a_2,\dots,a_n)\ne 0$, \end{proof} \begin{corollary}\label{cor:reg seq ultra} Let $f_1, \ldots, f_r$ be homogeneous elements of $\bS$. Then $f_1, \ldots, f_r$ form a regular sequence in $\bS$ if and only if $f_{1,i}, \ldots, f_{r,i}$ form a regular sequence in $R_i$ for all $i$ sufficiently close to $\ast$. \end{corollary} \begin{proof} This follows from Theorem~\ref{thm:codim in nhood} and Corollary~\ref{cor:rs}. \end{proof} \subsection{Stillman's conjecture}\label{ss:stillman} \begin{theorem} \label{thm:reg} Given positive integers $d_1, \ldots, d_r$ there exists an integer $N=N(d_1,\ldots,d_r)$ with the following property. If $f_1, \ldots, f_r$ are homogeneous elements of $\bk[x_1, \ldots, x_n]$, for any perfect field $\bk$ and any $n$, of degrees $d_1, \ldots, d_r$ and collective strength at least $N$ then $f_1, \ldots, f_r$ form a regular sequence. \end{theorem} \begin{proof} Suppose that the theorem is false. Then for each $i\in \bN$, we can find $f_{1,i}, \dots,f_{r,i} \in \bk_j[x_1,x_2,\dots]$, with $\bk_i$ perfect, which fail to form a regular sequence and where the collective strength goes to $\infty$ as $i\to \infty$. Choosing $\cI=\bN$, we let $f_1=(f_{1,i}), \dots, f_r=(f_{r,i})$ be the corresponding collection in $\bS$. By Proposition~\ref{prop:str}, $f_1,\dots,f_r$ have infinite collective strength. However, by Corollary~\ref{cor:reg seq ultra}, $f_1,\dots,f_r$ fail to form a regular sequence. This contradicts Theorem~\ref{polycrit}. \end{proof} For completeness, we now illustrate how Theorem~\ref{thm:reg} implies the existence of small subalgebras and Stillman's conjecture. This implication is essentially the same as in \cite{ananyan-hochster}. \begin{theorem} \label{thm:smallalg} Given positive integers $d_1, \ldots, d_r$ there exists an integer $s=s(d_1,\ldots,d_r)$ with the following property. If $f_1, \ldots, f_r$ are homogeneous elements of $\bk[x_1, \ldots, x_n]$, for any perfect field $\bk$ and any $n$, with $\deg(f_i)=d_i$, then: \begin{enumerate}[\rm \indent (a)] \item There exists a regular sequence $g_1, \ldots, g_s$ in $\bk[x_1, \ldots, x_n]$, where each $g_i$ is homogeneous of degree at most $\max(d_1, \ldots, d_r)$, such that $f_1, \ldots, f_r$ are contained in the subalgebra $\bk[g_1, \ldots, g_s]$. \item The ideal $( f_1,\ldots,f_r) $ has projective dimension at most $s$. \end{enumerate} \end{theorem} \begin{proof} (a) To each sequence $\bd=(d_1,\dots,d_r)$ we attach a monomial $y(\bd)=y_1^{b_1}y_2^{b_2}\cdots$ where $b_j$ is the number of times $j$ appears in $\bd$. If there is an ideal $(f_1,\dots,f_r)$ of type $\bd$ that fails to be a regular sequence, then by Theorem~\ref{thm:reg} there is some $N$, depending only on $\bd$, such that some $\bk$-linear homogeneous combination of the $f_i$ has strength $\leq N$. Without loss of generality, we may replace one of our elements with this linear combination, and call it $f_i$. Taking $f_i=\sum_{j=1}^N a_jg_j$, and replacing $f_i$ by the $a_j$ and the $g_j$, we get an ideal of type $\bd'$ and where $y(\bd)<y(\bd')$ in the lexicographic order (but where the variables are checked in reverse order), and where the difference in total degree is at most $2N-1$. In particular, given $y(\bd)$ there are only a finite number of possible monomials $y(\bd')$ that could arise in this way. The descendants of $y(\bd)$ thus form a tree with finitely many branches at each node and with no infinite chains, and there are thus only finitely many descendants of $y(\bd)$. Letting $s$ be the max total degree of a descendant of $y(\bd)$, it follows that $f_1,\dots,f_r$ can be embedded in a subalgebra $\bk[g_1,\dots,g_s]$ where the $g_i$ form a regular sequence. (b) Choose $g_1,\dots,g_s$ as in (a). Since $g_1,\dots,g_s$ form a regular sequence, the extension $\bk[g_1,\dots,g_s]\subseteq \bk[x_1,\dots,x_n]$ is flat. Thus, if $G$ is the minimal free resolution of $(f_1,\dots,f_r)$ over $\bk[g_1,\dots,g_s]$, then the extension of $G$ to $\bk[x_1,\dots,x_n]$ is the minimal free resolution of this ideal over $\bk[x_1,\dots,x_n]$. In particular, the projective dimension of $(f_1,\dots,f_r)$ is $\leq s$. \end{proof} \section{The inverse limit ring} \label{s:limit} \subsection{Inverse limit polynomial ring}\label{subsec:inv} Recall that $A\invl x_1,x_2,\dots\invr$ denotes the inverse limit of the standard-graded polynomial rings $A[x_1,\ldots,x_n]$ in the category of graded rings. We let $K$ denote a ring containing $A$, and we write $\alpha_n \colon K \otimes_A A\invl x_1,x_2,\dots\invr \to K[x_1,\dots,x_n]$ for the natural surjection. We set $\bR = K \otimes_A A\invl x_1,x_2,\dots\invr$. The following hypothesis will be used repeatedly. \begin{hypothesis}\label{defn:bR} $A$ is an integral domain with fraction field $K$. If the characteristic $p$ of $K$ is positive, we assume furthermore that the Frobenius map on $A$ is surjective (so that $K$ is perfect) and that $K$ is infinite. \end{hypothesis} \begin{remark}\label{rmk:replace} If $A$ is normal and its fraction field $K$ is perfect, then because $a^{1/p}$ satisfies the integral equation $x^p-a$, it lies in $A$. Thus, we can often arrange to satisfy Hypothesis~\ref{defn:bR} by replacing $A$ with its integral closure in an algebraic closure of $K$. \end{remark} The following is an analogue of Theorem~\ref{thm:S-poly-ring} and Corollary~\ref{cor:reg seq ultra}. It implies Theorem~\ref{introthm1}. \begin{theorem}\label{thm:inverse is poly} Suppose Hypothesis~\ref{defn:bR} holds (except we do not require $K$ to be infinite). Then $\bR$ is a polynomial ring. \end{theorem} \begin{proof} We use the criteria of \S \ref{s:poly}. If $p=0$, then the partial derivatives $\frac{d}{dx_i}$ show that $\bR$ has enough derivations. Now suppose that $p>0$. We claim that the Hasse derivatives corresponding to $\frac{d}{dx_i}$ (Example~\ref{ex:hasse}) provide $\bR$ with enough Hasse derivations. Let $f \in \bR$ be such that $\frac{d}{dx_i} f = 0$ for all $i$. This implies that $f \in K\otimes_A A\llbracket\hspace{-.12cm} \llbracket x_1^p,x_2^p,\dots \rrbracket\hspace{-.12cm} \rrbracket$. In particular, we can write $f = g/a$ where $a \in A$ and $g \in A\llbracket\hspace{-.12cm} \llbracket x_1^p, x_2^p, \dots \rrbracket\hspace{-.12cm} \rrbracket$. Since the Frobenius map is surjective on $A$ and $K$, both $g$ and $a$ are $p$th powers, which implies that $f$ is also a $p$th power. \end{proof} \begin{remark} \label{rmk:relax2} The perfectness hypothesis in Theorem~\ref{thm:inverse is poly} can be relaxed. For example, suppose $\bk$ is a field of characteristic~$p$ such that $\bk$ is a finite extension of the subfield $\bk^p$, and let $R=\bk \invl x_1,x_2,\dots\invr$. Then $\bk R^p$ consists exactly of all (possibly infinite) $\bk$-linear combinations of $p$th powers of monomials; this uses the hypothesis on $\bk$. Thus if $f \not\in \bk R^p$ then some Hasse derivative will not kill $f$, and so $R$ has enough derivations, and is thus a polynomial ring by Remark~\ref{rmk:relax}. \end{remark} \begin{theorem} \label{thm:codim in truncations} Suppose Hypothesis~\ref{defn:bR} holds. Let $f_1,\dots,f_s\in \bR$ and let $I=(f_1,\dots,f_s)$. \begin{enumerate}[\rm \indent (a)] \item For any $n\gg 0$, we have that $\codim_\bR(I) = \codim_{K[x_1,\dots,x_n]}(\alpha_n(I))$. \item The sequence $f_1,\dots,f_s$ forms a regular sequence if and only if $\alpha_n(f_1),\dots,\alpha_n(f_s)$ forms a regular sequence for all $n\gg 0$. \item If $\alpha_n(f_1),\dots,\alpha_n(f_s)$ forms a regular sequence for some $n$, then $\alpha_m(f_1),\dots,\alpha_m(f_s)$ forms a regular sequence for all $m\geq n$. \end{enumerate} \end{theorem} \begin{proof} (a) We prove this by induction on $c=\codim_\bR(I)$, which is finite by Corollary~\ref{cor:fincodim}. If $c=0$ then $I=0$ and the statement is immediate. Now let $c>0$ and pick $f \in I$ nonzero. Let $n$ large enough so that $\alpha_n(f)\ne 0$. Since $K$ is infinite, there is a graded $K$-algebra automorphism $\gamma$ of $K[x_1,\dots,x_n]$ such that $\gamma \alpha_n(f)$ is monic over $K[x_2,\dots,x_n]$ (see \cite[Lemma~13.2(c)]{eisenbud} and its proof). If $\gamma'$ is the automorphism of $\bR$ which acts by $\gamma$ on $x_1,\dots,x_n$ and which acts trivially on the other $x_i$, then $\alpha_n(\gamma' f)=\gamma \alpha_n f$. We may thus assume that $f$ is monic over $K\otimes_A A\llbracket\hspace{-.12cm} \llbracket x_2,x_3,\dots \rrbracket\hspace{-.12cm} \rrbracket$. The rest of the proof is essentially identical to the proof of Theorem~\ref{thm:codim in nhood}. (b) This is an immediate consequence of (a) and Corollary~\ref{cor:rs}. (c) By Corollary~\ref{cor:rs}, we have $\codim \alpha_n(I)=s$ and it suffices to show that $\codim \alpha_{n+1}(I)=s$. Since $K[x_1,\dots,x_{n+1}]/(\alpha_{n+1}(I)+(x_{n+1}))$ is isomorphic to $K[x_1,\dots,x_n]/\alpha_n(I)$, the principal ideal theorem implies that $\codim \alpha_{n+1}(I)$ is either $s$ or $s+1$. However, $\alpha_{n+1}(I)$ is generated by $s$ elements, so its codimension is at most $s$. Thus $\codim \alpha_{n+1}(I)=s$. \end{proof} \begin{definition}\label{defn:restriction to point} Fix a ring $A$, a field $\bk$, and a point $y\in\Spec(A)(\bk)$. For $f\in A\invl x_1,x_2,\dots\invr$, we let $f_y$ denote the image of $f$ in $\bk\invl x_1,x_2,\dots\invr$. Similarly, for an $A\invl x_1,x_2,\dots\invr$-module $M$, we let $M_y=\bk\invl x_1,x_2,\dots\invr\otimes_{A\invl x_1,x_2,\dots\invr} M$. If instead $f\in A[x_1,\dots,x_n]$, then we let $f_y$ denote its image in $\bk[x_1,\dots,x_n]$. If $M$ is an $A[x_1,\dots,x_n]$-module, then we let $M_y=\bk[x_1,\dots,x_n]\otimes_{A[x_1,\dots,x_n]}M$. \end{definition} \begin{remark} We note that $\bk\otimes_A A\invl x_1,x_2,\dots\invr$ is not generally isomorphic to $\bk\invl x_1,x_2,\dots\invr$: $\bk \otimes_A A \invl x_1,x_2,\dots\invr$ consists of those infinite series in $x_1,x_2,\dots$ whose coefficients have a common denominator. For example, if $A = \bZ$ and $\bk=\bQ$, then $\sum_{i \ge 1} x_i/i$ is an element of $\bQ\invl x_1,x_2,\dots\invr$ but is not an element of $\bQ \otimes \bZ\invl x_1,x_2,\dots\invr$. \end{remark} \begin{corollary}\label{cor:zariski condition} Suppose Hypothesis~\ref{defn:bR} holds. Let $f_1,\dots,f_s \in A\invl x_1,x_2,\dots\invr$ be elements whose images in $\bR$ form a regular sequence. There exists a dense open set $U\subseteq \Spec(A)$ such that for any algebraically closed field $\bk$ and any $y\in U(\bk)$, the elements $f_{1,y},\dots,f_{s,y}$ form a regular sequence in $\bk\invl x_1,x_2,\dots\invr$. \end{corollary} \begin{proof} By Theorem~\ref{thm:codim in truncations}, there is some $n$ so that $\alpha_n(f_1),\dots,\alpha_n(f_s)\in K[x_1,\dots,x_n]$ forms a regular sequence. Let $g_i=\alpha_n(f_i)$, considered as an element of $A[x_1,\dots,x_n]$. Let $Q=A[x_1,\dots,x_n]/(g_1,\dots,g_s)$ and let $\pi\colon \Spec(Q)\to \Spec(A)$. Since the generic fiber of $\pi$ has dimension $n-s$, it follows that the locus $U\subseteq \Spec(A)$ of points whose fibers have dimension $n-s$ is dense and Zariski open by semicontinuity of fiber dimension \stacks{05F6}. Let $\bk$ be an algebraically closed field and let $y\in U(\bk)$. Since $\dim(Q\otimes_A \bk)=n-s$, it follows that $g_{1,y},\dots,g_{s,y}$ forms a regular sequence. But $g_{i,y}$ equals $\alpha_n(f_{i,y})$, and thus by Theorem~\ref{thm:codim in truncations}(c) and (b), we have that $f_{1,y},\ldots,f_{s,y} $ forms a regular sequence. \end{proof} \begin{lemma}\label{lem:flat extension} If $\bk$ is a perfect field and $f_1,\dots,f_s\in \bk\invl x_1,x_2,\dots\invr$ is a regular sequence, then $i' \colon \bk[f_1,\dots,f_s]\to \bk\invl x_1,x_2,\dots\invr$ is faithfully flat. \end{lemma} \begin{proof} By Theorem~\ref{thm:inverse is poly}, we can write $\bk\invl x_1,x_2,\dots\invr \cong \bk[{\cV}]$. There is a finite subset ${\cH}\subseteq {\cV}$ such that ${f_1},\dots,{f_s}\in \bk[{\cH}]$. Since the ${f_i}$ form a regular sequence, we can extend this to a maximal regular sequence, ${f_1},\dots,{f_s},g_1,\dots,g_{r}$ on $\bk[{\cH}]$. The map $i'$ factors as \[ \bk[{f_1},\dots,{f_s}]\overset{i_1}{\longrightarrow} \bk[{f_1},\dots,{f_s},g_1,\dots,g_{r}] \overset{i_2}{\longrightarrow} \bk[{\cH}] \overset{i_3}{\longrightarrow} \bk\invl x_1,x_2,\dots\invr. \] For each extension, the larger ring is free over the smaller ring. Both $i_1$ and $i_3$ are extensions of polynomial rings. For $i_2$, freeness follows from ~\cite[Proposition~2.2.11]{bruns-herzog} (the statement there is for a local ring, but the proof also works for a graded ring). \end{proof} \subsection{Constant Betti tables over an open subset}\label{sec:inverse limit proof} For a graded ring $R$ with $R_0=\bk$ a field, we set $\beta_{i,j}(M)=\dim_{\bk} \Tor^R_i(M,\bk)_j$. The {\bf Betti table} of $M$ is the collection of all $\beta_{i,j}$. \begin{definition}\label{defn:constant betti} Let $A$ be a commutative ring and let $U\subseteq \Spec(A)$ be a locally closed subset. Let $M$ be a finitely presented, graded module over either $A\invl x_1,x_2,\dots\invr$ or over a polynomial ring over $A$. We say that $M$ {\bf has a constant Betti table over $U$} if for every algebraically closed field $\bk$ and every $y\in U(\bk)$, the Betti table of $M_y$ is the same. (Recall that $M_y$ is defined in Definition~\ref{defn:restriction to point}). \end{definition} The following lemma, which is likely known to experts, shows that a finitely presented module over a finite polynomial ring has a constant Betti table over an open subset. \begin{lemma}\label{lem:finite betti} Let $A$ be an integral domain and let $R=A[y_1,\dots,y_r]$ be a graded polynomial ring over $A$, with $\deg(y_i)\geq 1$ for $1\leq i \leq r$. If $M'$ is a finitely presented, graded $R$-module, then $M'$ has a constant Betti table over some dense, open subset $U\subseteq \Spec(A)$. \end{lemma} \begin{proof} Let $K$ be the fraction field of $A$ and let $G'_K=[0\to G'_{K,p} \overset{\partial_p}{\to}\cdots \overset{\partial_1}{\to} G'_{K,0}]$ be the minimal free resolution of $K\otimes_A M'$ over $K[y_1,\dots,y_r]$. Represent each $\partial_i$ by a matrix $\phi_i$. The entries of each $\phi_i$ have positive degree and, by multiplying by an element in $A$ if needed, we may assume that the entries of each $\phi_i$ also lie in $R$. These matrices can then be used to define a bounded, graded complex $G'$ of free $R$-modules. By construction, $K\otimes_A \coker(G'_1\to G'_0)$ is isomorphic to $K\otimes_AM'$. Since both $M'$ and $\coker(G'_1\to G'_0)$ are finitely presented, this isomorphism extends to an isomorphism over $A_g$ for some $g\in A$. Let $B$ be the subring of $A$ generated by all of the coefficients of the polynomials that appear as entries in the differentials $\partial_i$ and let $S = B[y_1,\dots,y_r]$. Then we can also use the $\partial_i$ to define a complex $H$ over $S$ with the property that $H \otimes_B A \cong G'$. Define $N$ be the direct sum of the homology modules $\rH_i(H)$ for $0\leq i \leq p$ along with the images of the differentials $\partial_i$ for $i=1,\dots,p$. Since $N$ is a finitely generated module over the finitely presented extension $B\to B[y_1,\dots,y_r]$,~\stacks{051S} implies that there is some $h\in B$ such that $N_h$ is free over $B_h$. Working over $B_h$ and $A_h$, we see that base extension of $H_h$ to $A_h$ and also $K$ commutes with taking homology (due to flatness of the images of $\partial_i$ over $B_h$). But since they are free over $B_h$, this implies that the homology of both $H_h$ and $G'_h$ vanish in positive degrees. In sum, if $f=gh$, then $G'_f$ is a free resolution of $M'_f$, and $M'_f$ is a flat $A_f$-module. Let $U=\Spec(A_f)$. Let $\bk$ be a field and $y\in U(\bk)$. Since $M'_f$ is flat over $A_f$, $\bk\otimes_{A_f} G'_f$ is a free resolution of $M'_y$. The resolution is minimal since each entry of $\phi_i$ has positive degree, and this remains true under localization at $f$ and specialization to $\bk$. The Betti table of $M'_y$ is thus determined by the free modules in $G'_f$, and so it does not depend on $y$. \end{proof} \begin{lemma}\label{lem:first step} Suppose Hypothesis~\ref{defn:bR} holds. Let $M$ be a finitely presented, graded $A\invl x_1,x_2,\dots\invr$-module. There exist: \begin{enumerate}[\rm \indent (a)] \item elements $f_1,\dots,f_s\in A\invl x_1,x_2,\dots\invr$ whose images in $\bR$ form a regular sequence, and \item an element $g\in A$ and a finitely presented $A_g[f_1,\dots,f_s]$-module $M'$ such that the extension of $M'$ to $A_g\otimes_A A\invl x_1,x_2,\dots\invr$ is isomorphic to $A_g\otimes_A M$. \end{enumerate} \end{lemma} \begin{proof} Let $\cU$ be a set of homogeneous elements of $\bR_+$ such that $\bR=K[\cU]$. Since any $f\in \bR$ can be written as a fraction with numerator in $A\invl x_1,x_2,\dots\invr$ and denominator in $A$, we may rescale each element of $\cU$ so that it lies in $A\invl x_1,x_2,\dots\invr$. Since $\bR=K[\cU]$ is a polynomial ring, for any element $f\in \bR$, there is a finite subset $\cU' \subseteq \cU$, and an element $\gamma \in A$ such that $f$ lies in $A_\gamma[\cU']$. The same holds for any finite collection of elements in $\bR$. Let $\Phi$ be a finite presentation matrix of $M$. By the above discussion, we can find distinct elements $f_1,\dots,f_s\in \cU$ and $g\in A$ such that each entry of $\Phi$ lies in $A_g[f_1,\dots,f_s]$. Let $\Phi'$ be the same matrix as $\Phi$, but considered as a map of graded, free $A_g[f_1,\dots,f_s]$-modules and let $M'$ be the cokernel of $\Phi'$. By construction, the extension of $M'$ to $A_g\otimes_A A\invl x_1,x_2,\dots\invr$ is isomorphic to $A_g\otimes_A M$. The elements $f_1, \ldots, f_s \in \bR$ form a regular sequence as they are ``variables'' (elements of $\cU$). \end{proof} \begin{theorem}\label{thm:open subset} Suppose Hypothesis~\ref{defn:bR} holds. If $M$ is a finitely presented, graded $A\invl x_1,x_2,\dots\invr$-module, then $M$ has a constant Betti table over some dense open subset $U\subseteq \Spec(A)$. \end{theorem} \begin{proof} We apply Lemma~\ref{lem:first step}, and let $M'$ be the $A_g[f_1,\dots,f_s]$-module satisfying the conclusion of that lemma. Applying Lemma~\ref{lem:finite betti} to $M'$, we can assume that $M'$ has constant Betti table over a dense open subset $U_1\subseteq \Spec(A_g)$. By Corollary~\ref{cor:zariski condition}, we can find a dense open subset $U_2\subseteq \Spec(A_g)$ where for all algebraically closed fields $\bk$ and all $y\in U_2(\bk)$, the sequence $f_{1,y}, \dots, f_{s,y}$ forms a regular sequence. We let $U=U_1\cap U_2$. Let $\bk$ be an algebraically closed field and let $y\in U(\bk)$. We have a commutative diagram \[ \xymatrix{ &A_g[f_1,\dots,f_s]\ar[r]\ar[d]&\bk[f_{1,y},\dots,f_{s,y}]\ar[d]_{i'}\\ A\invl x_1,x_2,\dots\invr \ar[r]&A_g\otimes_A A\invl x_1,x_2,\dots\invr\ar[r] &\bk\invl x_1,x_2,\dots\invr } \] where the extension of the $\bk[f_{1,y}, \dots, f_{s,y}]$-module $M'_y$ by $i'$ is $M_y$. By Lemma~\ref{lem:flat extension}, $i'$ is faithfully flat, and thus the Betti table of $M'_y$ is the same as the Betti table of $M_y$. Since $M'$ has a constant Betti table over $U$, the module $M$ also has a constant Betti table over $U$. \end{proof} \begin{corollary}\label{cor:constant Betti} (We do not assume Hypothesis~\ref{defn:bR}.) Let $A$ be an integral domain. If $M$ is a finitely presented, graded $A\invl x_1,x_2,\dots\invr$-module, then $M$ has a constant Betti table over some dense open subset $U\subseteq \Spec(A)$. \end{corollary} \begin{proof} Let $\ol{A}$ be the integral closure of $A$ in an algebraic closure of $K$, and let $\ol{K}$ be the fraction field of $\ol{A}$. Let $\overline{M}$ be the extension of $M$ to $\overline{A}\invl x_1,x_2,\dots\invr$. Since $\overline{A}$ and $\overline{K}$ satisfy Hypothesis~\ref{defn:bR} (see Remark~\ref{rmk:replace}), Theorem~\ref{thm:open subset} implies that $\ol{M}$ has a constant Betti table over some dense open subset $\overline{U}\subseteq \Spec(\ol{A})$. Since the integral morphism $\Spec(\ol{A})\to \Spec(A)$ is closed~\stacks{01WM}, the image of $\ol{U}$ in $\Spec(A)$ contains a dense open set $U$. Let $\bk$ be an algebraically closed field and let $y\in U(\bk)$. By integrality, there is $\bk$-point $y'$ lying over $y$, and by definition of $U$, $y'\in \ol{U}(\bk)$. The map $y'\to y$ induces an isomorphism of $\overline{M}_{y'}$ and $M_y$ as $\bk\invl x_1,x_2,\dots\invr$-modules, and they therefore have the same Betti table. Thus $M$ has a constant Betti table over $U$. \end{proof} \begin{example} Consider the case when $M$ is a quotient of $A\invl x_1,x_2,\dots\invr$ by $r$ linear forms $f_i = \sum_j a_{i,j} x_j$. Generically, these forms are linearly independent. More precisely, this is true over the complement of the vanishing locus of the $r \times r$ minors of the matrix $(a_{i,j})$. This gives a dense open set where $M_y$ is resolved by a Koszul complex, and hence the Betti numbers are given by $\beta_{i,i} = \binom{r}{i}$ for $0 \le i \le r$ and $0$ otherwise. As a second example, consider when $M$ is defined by determinantal conditions. Let $f_i= \sum_j a_{i,j} x_j$ for $i=1,\dots,6$ and consider the matrix $\begin{bmatrix} f_1 & f_2 & f_3 \\ f_4 & f_5 & f_6 \end{bmatrix}$. Let $M$ be the quotient of $R$ by the $2 \times 2$ determinants of this matrix. General facts about determinantal loci tell us that this ideal has codimension $\le 2$. Having codimension 2 is an open condition, and in that case, the nonzero Betti numbers are given by $\beta_{0,0} = 1$, $\beta_{1,2} = 3$, $\beta_{2,3} = 2$ (for example, by the Hilbert--Burch theorem). \end{example} \subsection{Connection with $\GL$-noetherianity and Stillman's conjecture} We now combine Corollary~\ref{cor:constant Betti} with~\cite{draisma} to prove Stillman's conjecture. Throughout this section we fix a ground field $\bk$. Fix degrees $d_1,\dots,d_r$. Let $S$ be the set of pairs $(\alpha,i)$ where $1 \le i \le r$ and $\alpha$ ranges over all exponent vectors of degree $d_i$ in the variables $x_1,x_2,\dots$. Let $\bA = \bk[c_{\alpha,i} \mid (\alpha,i) \in S]$. For $1\leq i \leq r$, let $\widetilde{f}_i=\sum c_{\alpha,i}x^\alpha \in \bA\invl x_1,x_2,\dots\invr$ be a universal polynomial of degree $d_i$. We let $Q=\bA\invl x_1,x_2,\dots\invr/(\widetilde{f}_1,\dots,\widetilde{f}_r)$. If $\bk'$ is a field over $\bk$, then there is a bijection between $\Spec(\bA)(\bk')$ and tuples $f_1,\dots,f_r\in \bk'\invl x_1,x_2,\dots\invr$ where $\deg(f_i)=d_i$; with notation from Definition~\ref{defn:restriction to point}, this bijection is given by $y\in \Spec(\bA)(\bk') \leftrightarrow \widetilde{f}_{1,y},\dots,\widetilde{f}_{r,y}$. There is a natural change of basis action by the group scheme $\GL_\infty$ on $\Spec(\bA)$. The $\bA\invl x_1,x_2,\dots\invr$-module $Q$ is equivariant with respect to this action. \begin{theorem}\label{thm:finite Betti} The space $\Spec(\bA)$ decomposes into a finite disjoint union of locally closed subsets $\{U_j\}$ such that $Q$ has a constant Betti table over $U_j$ for each $j$. In particular, there are only finitely many distinct Betti tables among all ideals $(f_1,\dots,f_r)\subseteq \bk'[x_1,\dots,x_n]$ generated in degrees $d_1,\dots,d_r$, for all $n$ and all fields $\bk'$ over $\bk$. \end{theorem} \begin{proof} Applying Corollary~\ref{cor:constant Betti}, we have that $Q$ has a constant Betti table over a dense, open subset $U'\subseteq \Spec(\bA)$. Let $U$ be the union of all $\GL_\infty$ translates of $U'$. Since Betti tables are $\GL_\infty$-invariant, $Q$ has a constant Betti table over $U$. By~\cite[Theorem 1]{draisma}, $\Spec(\bA)\setminus U$ consists of finitely many irreducible components, each of which is $\GL_\infty$-invariant. Passing to a component, we can apply the same argument. Continuing in this way, we obtain the desired stratification of $\Spec(\bA)$, and it is finite by~\cite[Theorem 1]{draisma}. For any field $\bk'$, the canonical map $\bk'[x_1,\ldots,x_n] \otimes_{\bk'} \bk' \llbracket\hspace{-.12cm} \llbracket x_{n+1}, \ldots \rrbracket\hspace{-.12cm} \rrbracket \to \bk'\invl x_1,x_2,\dots\invr$ is an isomorphism. It follows that for $f_1, \ldots, f_r \in \bk'[x_1,\ldots,x_n]$ the Betti table of the quotient ring $\bk'[x_1,\ldots,x_n]/(f_1, \ldots, f_r)$ is the same as that of $\bk' \invl x_1,x_2,\dots\invr/(f_1, \ldots, f_r)$, and this implies the final statement of the theorem for algebraically closed fields $\bk'$. To get the statement for arbitrary $\bk'$, we let $\ol{\bk'}$ be an algebraic closure of $\bk'$ and note that the extension $\bk'[x_1,\dots,x_n] \to \ol{\bk'}[x_1,\dots,x_n]$ is faithfully flat, and hence Betti tables are unchanged under this extension. \end{proof} \begin{remark} It would be interesting to extend~\cite[Theorem 1]{draisma} to spaces over $\bZ$, as this would yield characteristic free bounds in the above result. \end{remark} \begin{remark} Theorem~\ref{thm:finite Betti} slightly generalizes Stillman's conjecture, as it also applies to ideals $(f_1,\dots,f_r)$ in $\bk\invl x_1,x_2,\dots\invr$ that use an infinite number of variables. \end{remark} \begin{remark} The proof of Theorem~\ref{thm:finite Betti} is much less self-contained than our ultraproduct proof, however it is distinctly different in character: it does not rely on the notion of strength, but rather on a generalized noetherianity principle. This is pursued in more detail in~\cite{erman-sam-snowden} to obtain generalizations of Stillman's conjecture. \end{remark} \begin{bibdiv} \begin{biblist} \bib{altman-kleiman}{book}{ author ={Altman, Allen}, author ={Kleiman, Steven}, title = {A term of commutative algebra}, date={2013}, note = {\url{http://web.mit.edu/18.705/www/13Ed.pdf}}, } \bib{ananyan-hochster}{article}{ author={Ananyan, Tigran}, author={Hochster, Melvin}, title={Small subalgebras of polynomial rings and Stillman's conjecture}, journal={J. Amer. Math. Soc.}, volume={33}, date={2020}, pages={291--309}, note={\arxiv{1610.09268v3}}, } \bib{bruns-herzog}{book}{ author={Bruns, Winfried}, author={Herzog, J\"urgen}, title={Cohen-Macaulay rings}, series={Cambridge Studies in Advanced Mathematics}, volume={39}, publisher={Cambridge University Press, Cambridge}, date={1993}, pages={xii+403}, } \bib{draisma-survey}{article}{ author={Draisma, Jan}, title={Noetherianity up to symmetry}, conference={ title={Combinatorial algebraic geometry}, }, book={ series={Lecture Notes in Math.}, volume={2108}, publisher={Springer, Cham}, }, date={2014}, pages={33--61}, } \bib{draisma}{article}{ author = {Draisma, Jan}, title = {Topological noetherianity for polynomial functors}, journal = {J. Amer. Math. Soc.}, volume = {32}, pages = {691--707}, date={2019}, note = {\arxiv{1705.01419v4}}, } \bib{draisma-lason-leykin}{article}{ author = {Draisma, Jan}, author = {Laso\'{n}, Micha\l}, author = {Leykin, Anton}, title = {Stillman's Conjecture via generic initial ideals}, journal = {Comm. Algebra}, volume = {47}, pages = {2384--2395}, date={2019}, note = {\arxiv{1802.10139v2}}, } \bib{eisenbud}{book}{ author={Eisenbud, David}, title={Commutative algebra with a view toward algebraic geometry}, series={Graduate Texts in Mathematics}, volume={150}, publisher={Springer-Verlag, New York}, date={1995}, pages={xvi+785}, } \bib{erman-sam-snowden}{article}{ author={Erman, Daniel}, author={Sam, Steven~V}, author={Snowden, Andrew}, title={Generalizations of Stillman's conjecture via twisted commutative algebras}, journal={Int. Math. Res. Not. IMRN}, date={2021}, issue={16}, pages={12281--12304}, note={\arxiv{1804.09807v1}} } \bib{imperfect}{article}{ author={Erman, Daniel}, author={Sam, Steven~V}, author={Snowden, Andrew}, title={Big polynomial rings with imperfect coefficient fields}, journal={Michigan Math. J.}, volume={70}, pages={649--672}, date={2021}, note={\arxiv{1806.04208v2}} } \bib{goldschmidt}{book}{ author={Goldschmidt, David M.}, title={Algebraic functions and projective curves}, series={Graduate Texts in Mathematics}, volume={215}, publisher={Springer-Verlag, New York}, date={2003}, pages={xvi+179}, } \bib{MM}{article}{ author={Milnor, John W.}, author={Moore, John C.}, title={On the structure of Hopf algebras}, journal={Ann. of Math. (2)}, volume={81}, date={1965}, pages={211--264}, } \bib{stillman}{article}{ author={Peeva, Irena}, author={Stillman, Mike}, title={Open problems on syzygies and Hilbert functions}, journal={J. Commut. Algebra}, volume={1}, date={2009}, number={1}, pages={159--195}, } \bib{schoutens}{book}{ author={Schoutens, Hans}, title={The use of ultraproducts in commutative algebra}, series={Lecture Notes in Mathematics}, volume={1999}, publisher={Springer-Verlag, Berlin}, date={2010}, pages={x+204}, } \bib{serre}{book}{ author={Serre, Jean-Pierre}, title={Galois cohomology}, note={Translated from the French by Patrick Ion and revised by the author}, publisher={Springer-Verlag, Berlin}, date={1997}, pages={x+210}, isbn={3-540-61990-9}, } \bib{sjodin}{article}{ author={Sj\"odin, Gunnar}, title={Hopf algebras and derivations}, journal={J. Algebra}, volume={64}, date={1980}, number={1}, pages={218--229}, } \bib{snellman-article}{article}{ author={Snellman, Jan}, title={Gr\"obner bases and normal forms in a subring of the power series ring on countably many variables}, journal={J. Symbolic Comput.}, volume={25}, date={1998}, number={3}, pages={315--328}, } \bib{snellman}{thesis}{ author = {Snellman, Jan}, title = {A graded subring of an inverse limit of polynomial rings}, year={1998}, note = {\url{http://www.diva-portal.org/smash/get/diva2:195258/FULLTEXT01.pdf}}, } \bib{stacks-project}{misc}{ label={Stacks}, author = {The {Stacks Project Authors}}, title = {Stacks Project}, year = {2017}, note = {\url{http://stacks.math.columbia.edu}}, } \bib{van-den-Dries-schmidt}{article}{ author={van den Dries, L.}, author={Schmidt, K.}, title={Bounds in the theory of polynomial rings over fields. A nonstandard approach}, journal={Invent. Math.}, volume={76}, date={1984}, number={1}, pages={77--91}, } \end{biblist} \end{bibdiv} \end{document}
\begin{document} \begin{abstract}We give minimal presentations for the $RO(C_2)$-graded Bredon cohomology of the equivariant classifying spaces $B_{C_2}U(n), B_{C_2}SO(n)$ and $B_{C_2}Sp(n)$ with coefficients in the rational Burnside Green functor $A_{\Q}$. This results in an efficient description of rational $C_2$ equivariant Chern, Pontryagin and symplectic characteristic classes. These classes are then related to each other using the inclusions of maximal tori. \end{abstract} \title{$C_2$ equivariant characteristic classes over the rational Burnside ring}{} \tableofcontents \section{Introduction}\label{Intro} Characteristic classes are classical and invaluable tools for understanding and distinguishing bundles over spaces. If we have a compact Lie group $G$ acting on a space $X$, there is a corresponding theory of $G$-equivariant bundles and $G$-equivariant characteristic classes. May proves in \cite{May87} that when Borel cohomology $$H^*_{G,Borel}(X)=H^*(X\times_GEG)$$ is used, the theory of Borel equivariant characteristic classes reduces to the non\-equivariant one, in the sense that $H^*_{G,Borel}(B_GL)=H^*(BG)\otimes H^*(BL)$ for any compact Lie group $L$ (which can be $L=U(n), SO(n), Sp(n)$ and so on). Equivariant characteristic classes in genuine (Bredon) equivariant cohomology are much less understood, owing to the significant complexity involved in computing it. Recall that for a $G$-space $X$, unreduced $G$-equivariant (Bredon) cohomology $H^{\bigstar}_G(X)$ is not just a ring, but a Green functor: for every orbit $G/H$ we have a ring $H^{\bigstar}_G(X)(G/H)$ with an action from the Weyl group $W_GH=N_GH/H$ (where $N_GH$ is the normalizer of $H$ in $G$) and these rings are related to each other via restriction and transfer maps satisfying certain axioms. In more detail, for any subgroup inclusion $K\subseteq H$ we have a corresponding restriction and transfer maps: \begin{gather} \Res^H_K:H^{\bigstar}_G(X)(G/H)\to H^{\bigstar}_G(X)(G/K)\\ \Tr^H_K:H^{\bigstar}_G(X)(G/K)\to H^{\bigstar}_G(X)(G/H) \end{gather} Moreover, the index $\bigstar$ is not just an integer, but an element of the real representation ring $RO(G)$. The coefficients used in $RO(G)$-graded cohomology are also Green functors and the initial ring $\Z$ is supplanted by the initial Burnside Green functor $A_{\Z}$. So $H^{\bigstar}_G(X)$ is by definition $H^{\bigstar}_G(X;A_{\Z})$ and we can more generally consider $H^{\bigstar}_G(X;R)$ for a $G$-Green functor $R$. Computing the coefficients of $RO(G)$-graded cohomology, namely the Green functor $H^{\bigstar}_G(*;A_{\Z})$, is a non-trivial undertaking on its own. The reader can consult \cite{Lew88} for the rather complicated answer when $G=C_p$ is the cyclic group of prime order $p$. Even when we replace the coefficients $A_{\Z}$ by the constant Green functors corresponding to trivial $G$-modules $\Z$ and $\F_2$, the computations remain quite involved (see \cite{Geo19} and \cite{BC4S2} for the case of $G=C_4$). For characteristic classes, we further need to compute the $RO(G)$-graded cohomology of equivariant classifying spaces such as $B_GU(n), B_GSO(n)$ and $B_GSp(n)$. Such calculations for $n\le 3$, $G=C_2$ and using $A_{\Z}$ coefficients are performed in \cite{Shu14}, \cite{Cho18}. For $n=1$, $G=C_2$ and using constant $\F_2$ coefficients, the cohomology of $B_{C_2}O(1)=B_{C_2}\Sigma_2$ is the test module used in the determination of the dual Steenrod algebra (\cite{HK96}) and equivariant Dyer-Lashof operations (\cite{Wil19}). The same calculation for $G=C_4$ is significantly more complicated (\cite{BC4S2}). A way to simplify the algebra involved is to use coefficients in the rational Burnside Green functor $A_{\Q}$. Indeed, a result by Greenlees-May reduces the computation of the $RO(G)$-graded cohomology of a space $X$ in $A_{\Q}$ coefficients to nonequivariant rational cohomology of the fixed points $X^H$ where $H$ ranges over the subgroups of $G$ (\cite{GM95}). This allows us to compute explicit descriptions of the Green functors $H^{\bigstar}_G(B_GU(n);A_{\Q})$ , $ H^{\bigstar}_G(B_GSO(n);A_{\Q})$, $H^{\bigstar}_G(B_GSp(n);A_{\Q})$ and so on. However, those explicit descriptions are rather inefficient: For $G=C_2$, the ring $H^{\bigstar}_G(B_GU(n))(G/G)$, according to the Greenlees-May decomposition, has $n^2+2n$ many algebra generators over the homology of a point, which is just under double the minimal amount $\frac{n^2+2n}2+1$ of generators that we can obtain (see the remarks after Proposition \ref{C2Chern2}). Part of the goal of this paper is to systematically obtain such minimal explicit descriptions; said another way, we are producing only the essential characteristic classes upon which all the others are built. Our method rests on equivariant generalizations of the following nonequivariant arguments: By a classical Theorem of Borel (\cite{BCM}), if $L$ is a connected compact Lie group, $T\subseteq L$ a maximal torus and $W_LT=N_LT/T$ is the Weyl group then, at least rationally, \begin{equation} H^*(BL)=H^*(BT)^{W_LT} \end{equation} Through this result, the characteristic classes in $H^*(BL)$ can be computed from $H^*(BS^1)$, as long as the Weyl group action is understood. For example, if we take $L=U(n)$ then $T=(S^1)^n$ and $W_LT=\Sigma_n$ acts on $H^*(BT;\Q)=\Q[a_1,...,a_n]$ by permuting the generators $a_i$. The fixed points under this permutation action are minimally generated by the elementary symmetric polynomials on the $a_i$, which are by definition the Chern classes $c_i$. In this way, $H^*(BU(n);\Q)=\Q[c_1,...,c_n]$. The same method can be performed equivariantly for $G=C_2$ and coefficients in $A_{\Q}$. There is an extra degree of complexity owing to the fact that $H^{\bigstar}_G(B_GS^1;A_{\Q})$ is not polynomial on one generator over $H^{\bigstar}_G(*;A_{\Q})$, but rather on two generators, one of which is idempotent (Proposition \ref{C2Chern1Class}). As such, in the $L=U(n)$ example, the elementary symmetric polynomials $c_i$ must be replaced by a family of more complicated polynomials $\alpha,c_i,\gamma_{s,j}$ (Proposition \ref{C2Chern3}). Moreover, while this family of generators is minimal, it is not algebraically independent i.e. there are relations within this family. It is true however that $H^{\bigstar}_G(B_GU(n);A_{\Q})$ is a finite module over $H^{\bigstar}_G(*;A_{\Q})[c_1,...,c_n]$ where the $c_i$ are $C_2$-equivariant refinements of the classical Chern classes. We use this method to obtain explicit minimal descriptions of $H^{\bigstar}_{C_2}(B_{C_2}L;A_{\Q})$ where $L=U(n), SO(n), Sp(n)$. We also examine the cases of $L=O(n), SU(n)$ and of the non-compact Lie groups $L=U, SO, Sp, O, SU$. The resulting equivariant Chern, Pontryagin and symplectic classes are compared using the complexification, quaternionization and forgetful maps between the aforementioned Lie groups. Finally, we compute the effect of these characteristic classes on the direct sum of bundles and on the tensor product of line bundles. As for the organization of this paper, sections \ref{Conv} and \ref{C2RationalStems} set up the notation used throughout and contain the computation of the $C_2$ rational stable stems. Section \ref{Summary} contains a summary of all our results on $C_2$ characteristic classes. The proofs are then found in sections \ref{C2ChernSection}-\ref{SU} for the interested reader. Finally, appendix \ref{appen} contains the results on symmetric polynomials with relations that are critical for our presentation of $H^{\bigstar}_{C_2}(B_{C_2}U(n);A_{\Q})$. In particular, it contains an algorithm for writing every "symmetric polynomial" in terms of the "elementary symmetric polynomials" $\gamma_{s,j}$; this also leads to an algorithm for explicitly obtaining the relations between the $\gamma_{s,j}$. We have implemented these algorithms in a computer program available \href{https://github.com/NickG-Math/Symmetric_Polynomials}{here} (executable files are available \href{https://github.com/NickG-Math/Symmetric_Polynomials/releases}{here} for a quick demonstration). The appendix is completely self contained and independent of the rest of the paper. \subsection{Acknowledgment} We would like to thank Peter May for reading several earlier drafts of this paper. Through his numerous editing suggestions, readability was vastly improved. \section{Conventions and Notations}\label{Conv} Throughout this paper, the ambient group is $G=C_2$ and all our $G$-Mackey functors are modules over the rational Burnside Green functor $A_{\Q}$: \begin{equation} A_{\Q}=\begin{tikzcd} \frac{\Q[x]}{x^2=2x}\ar[d, "x\mapsto 2" left, bend right]\\ \Q\ar[u, "1\mapsto x" right,bend right] \end{tikzcd}=\begin{tikzcd} \Q x\ar[d, "x\mapsto 2" left, bend right]\\ \Q\ar[u, "1\mapsto x" right,bend right] \end{tikzcd} \oplus \begin{tikzcd} \Q y\ar[d, bend right]\\ 0\ar[u, bend right] \end{tikzcd} \end{equation} where $x=\Tr(1)$ and $y=1-x/2$.\medbreak The \emph{unreduced} cohomology of a $G$-space $X$ in $A_{\Q}$ coefficients is the $G$-Green functor defined on orbits as \begin{equation} H^{\bigstar}_G(X)(G/H)= [X_+, \Sigma^{\bigstar}HA_{\Q}]^H \end{equation} where $HA_{\Q}$ is the Eilenberg-MacLane spectrum associated to $A_{\Q}$ and the index $\bigstar$ is an element of the real representation ring $RO(G)=RO(C_2)$. This ring is spanned by the trivial representation $1$ and the sign representation $\sigma$ so $\bigstar=n+m\sigma$ for $n,m\in \Z$. Moreover, $H^{\bigstar}_G(X)$ is a Green functor algebra over the cohomology of a point $H^{\bigstar}_G(*)=H^{\bigstar}_G$.\medbreak The same conventions apply to homology $H^G_{\bigstar}(X)$ (with the exception of the ring structure, which exists only when $X$ is an equivariant $H$-space).\medbreak The advantage of using $A_{\Q}$ coefficients is twofold: \cite{GM95} prove that \begin{itemize} \item All rational Mackey functors (i.e. $A_{\Q}$ modules) are projective and injective, so we have the Kunneth formula: \begin{equation} H_{\bigstar}^G(X\times Y)=H_{\bigstar}^G(X)\boxtimes_{H_{\bigstar}^G}H_{\bigstar}^G(Y) \end{equation} and duality formula: \begin{equation} H_G^{\bigstar}(X)=\Hom_{H_{\bigstar}^G}(H_{\bigstar}^G(X),H_{\bigstar}^G) \end{equation} \item We have the isomorphism of graded Green functors: \begin{equation} H_G^*(X)=\begin{tikzcd} H^*(X)^G\ar[d, bend right]\\ H^*(X)\ar[u,bend right] \end{tikzcd}\oplus \begin{tikzcd} H^*(X^G)\ar[d, bend right]\\ 0\ar[u, bend right] \end{tikzcd} \end{equation} \end{itemize} The second bullet allows us to reduce equivariant computations to nonequivariant ones, as long as we use integer grading $*\in \Z$. Using the first bullet, integer graded cohomology together with the homology of a point recover the $RO(G)$-graded cohomology: \begin{equation} H_G^{\bigstar}(X)=H_G^*(X)\boxtimes_{A_{\Q}}H_G^{\bigstar} \end{equation} As such, once $H_G^{\bigstar}$ is computed, we need only worry about integer grading. \section{\texorpdfstring{The $C_2$ rational stable stems}{The C2 rational stable stems}}\label{C2RationalStems} The Green functor $H_{\bigstar}^G=H^{-\bigstar}_G$ agrees with the $G$-equivariant rational stable stems: \begin{equation} \pi_{\bigstar}^G(S)\otimes \Q=\pi_{\bigstar}^G(HA_{\Q})=H_{\bigstar}^G \end{equation} The generating classes for $H_{\bigstar}^G$ are the Euler and orientation classes. The Euler class $a_{\sigma}$ is the inclusion of north-south poles $S^0\hookrightarrow S^{\sigma}$ and its image in $H^{\bigstar}_G$ under the Hurewicz map generates a Mackey functor that we denote by $M_1$: \begin{equation} M_1\{a_{\sigma}\}=\begin{tikzcd} \Q a_{\sigma}\ar[d, bend right]\\ 0\ar[u, bend right] \end{tikzcd} \end{equation} The orientation class $u_{\sigma}$ is the generator of the reduced nonequivariant homology group $\tilde H_1(S^{\sigma};\Z)=\Z$ (determined uniquely once we fix an orientation for $S^{\sigma}$) and generates a Mackey functor that we denote by $M_0^-$: \begin{equation} M_0^-\{u_{\sigma}\}=\begin{tikzcd} 0\ar[d, bend right]\\ \Q u_{\sigma}\ar[u, bend right]\ar[loop right, "C_2"] \end{tikzcd} \end{equation} The Weyl group action by the generator $g\in C_2$ is $gu_{\sigma}=-u_\sigma$. The square of $u_{\sigma}$, $u_{\sigma}^2$, is the restriction of the orientation class $u_{2\sigma}$ generating a Mackey functor that we denote by $M_0$: \begin{equation} M_0\{u_{2\sigma}\}=\begin{tikzcd} \Q u_{2\sigma}\ar[d, "1" left, bend right]\\ \Q u^2_{\sigma}\ar[u, "2" right, bend right] \end{tikzcd} \end{equation} This follows from the fact $M_0^-\boxtimes_{A_{\Q}} M_0^-=M_0$ and by the Kunneth formula for $S^{2\sigma}=S^{\sigma}\wedge S^{\sigma}$. Note that $a_{\sigma}u_{2\sigma}=0$ since $M_1\boxtimes_{A_{\Q}}M_0=0$. Using the duality formula: \begin{equation} \tilde H_{-*}^G(S^{-\sigma})=\tilde H_G^{*}(S^{\sigma})=\Hom_{A_{\Q}}(\tilde H_*^G(S^{\sigma}),A_{\Q}) \end{equation} we see that there is a class generating $M_1$ which when multiplied with $a_{\sigma}$ returns $y\in A_{\Q}$; we denote this class by $y/a_{\sigma}$. We similarly have classes $u_{\sigma}^{-1}$ and $x/u_{2\sigma}$ spanning $M_0^-$ and $M_0$ respectively. We have proven the following Proposition: \begin{prop}The $C_2$ equivariant rational stable stems are: \begin{equation} H^G_{k+n\sigma}=\begin{cases} M_0&\textup{if }k=n\text{ : even and}\neq 0\\ M_0^{-}&\textup{if }k=n\text{ : odd} \\ M_1&\textup{if }k=0\text{ , }n\neq 0\\ A_{\Q}&\textup{if }k=n=0\\ 0&\textup{otherwise } \end{cases} \end{equation} and: \begin{itemize} \item $u_{2\sigma}^{j}, x/u_{2\sigma}^j$ generate a copy $M_0$ for each $j=1,2,...$. \item $u_{\sigma}^{2j+1}$ generate a copy $M_0^-$ for each $j\in \Z$. \item $a_{\sigma}^{j}, y/a_{\sigma}^j$ generate a copy of $M_1$ for each $j=1,2,...$. \item $1$ generates $A_{\Q}$. \end{itemize} \end{prop} To spell things out, as a ring, the $C_2/C_2$ level of $H^G_{\bigstar}$ is $\Q[x, u_{2\sigma}, x/u_{2\sigma}, a_{\sigma}, y/a_{\sigma}]$ modulo the relations: \begin{gather} x^2=2x\\ xu_{2\sigma}=2u_{2\sigma}\\ ya_{\sigma}=a_{\sigma}\\ u_{2\sigma}(x/u_{2\sigma}^i)=x/u_{2\sigma}^{i-1}\\ a_{\sigma}(y/a_{\sigma}^i)=y/a_{\sigma}^{i-1}\\ a_{\sigma}u_{2\sigma}=0\\ a_{\sigma}(x/u_{2\sigma}^i)=0\\ u_{2\sigma}(y_1/a_{\sigma}^i)=0\\ (x/u_{2\sigma}^i)(y/a_{\sigma}^j)=0 \end{gather} \section{\texorpdfstring{Summary of the $C_2$ characteristic classes}{Summary of the C2 characteristic classes}}\label{Summary} We summarize our results on $C_2$ characteristic classes in $A_{\Q}$ coefficients that we shall prove in sections \ref{C2ChernSection}-\ref{SU}. Slightly abusing the notation, we shall use $H^*_G(X)$ to simultaneously denote both the $G$-Green functor and its top level $H^*_G(X)(G/G)$. We can do that because knowledge of the top and bottom levels and of the restriction map can be used to recover the Mackey functor, as long as the restriction is surjective (the transfer is computed from $\Tr(a)=xb$ where $\Res(b)=a$). In all cases we encounter, restriction is indeed surjective so it suffices to describe the top level and how generators restrict to the bottom (nonequivariant) level. \subsection{Chern classes} We start with the results on Chern classes. We view $H^*_G(B_GU(n))$ as an augmented algebra over $H^*(BU(n))$ with the augmentation being restriction. \begin{prop}\label{C2Chern1}The augmentation \begin{equation} \Res:H^*_G(B_GU(n))\to H^*(BU(n)) \end{equation} is a split surjection, so the nonequivariant Chern classes have $C_2$ equivariant refinements. \end{prop} We fix a section of the augmentation, i.e. equivariant refinements $c_1,...,c_n$ of the Chern classes, according to Proposition \ref{C2ChernIso}. \begin{prop}\label{C2Chern2}There exist elements $\alpha\in H^0_G(B_GU(n))$ and $\gamma_{s,j}\in H^{2s}_G(B_GU(n))$ for $1\le s<n$ and $1\le j\le n-s$, generating $H_G^*(B_GU(n))$ as an augmented algebra over $H^*(BU(n))\otimes A_{\Q}$: \begin{equation} H_G^*(B_GU(n))=\frac{(H^*(BU(n))\otimes A_{\Q})[\alpha,\gamma_{s,j}]}{\Res(\alpha), \Res(\gamma_{s,j}), S} \end{equation} where the finite set of relations $S\subseteq \Q[\alpha,c_i,\gamma_{s,j}]$ is described in Proposition \ref{AlgebraCorollaryQ}. \\ The $c_i$ are algebraically independent and for each degree $*$, $H_G^*(B_GU(n))$ is a finitely generated module over $\Q[c_1,...,c_n]$.\\ The generating family $\{\alpha, c_i,\gamma_{s,j}\}$ has cardinality $\frac{n^2+2n}2+1$ and is a minimal generating set of $H_G^*(B_GU(n))$ as an $A_{\Q}$ algebra, in the sense that any other generating set has at least $\frac{n^2+2n}2+1$ many elements. \end{prop} Substituting $H^*(BU(n))=\Q[c_1,...,c_n]$ in the formula for $H^*_G(B_GU(n))$ gives: \begin{prop}\label{C2Chern3}As an algebra over $A_{\Q}$, \begin{equation} H_G^*(B_GU(n))=\frac{A_{\Q}[\alpha, c_i,\gamma_{s,j}]}{x\alpha,x\gamma_{s,j},S} \end{equation} \end{prop} Two observations: \begin{itemize} \item The relations $x\alpha=0, x\gamma_{s,j}=0$ are equivalent to $\alpha,\gamma_{s,j}$ having trivial restrictions (i.e. augmentations) respectively. This completes the description of the Mackey functor structure of $H^*_G(B_GU(n))$. \item The $\frac{n^2+2n}2+1$ many generators of the generating set $\{\alpha, c_i,\gamma_{s,j}\}$ are just over half of the $n^2+2n$ many generators given by the idempotent decomposition (\cite{GM95}) of the Mackey functor $H_G^*(B_GU(n))$.\medbreak \end{itemize} For $n=1$ the computation takes a simpler form: \begin{equation} H_G^*(B_GU(1))=A_{\Q}[\alpha,c_1]/(\alpha^2=\alpha, x\alpha) \end{equation} To simplify the notation in the next Proposition, we set $u=c_1\in H^2_G(B_GU(1))$. \begin{prop}\label{C2ChernIso} The maximal torus inclusion $U(1)^n\hookrightarrow U(n)$ induces an isomorphism \begin{gather} H^*_G(B_GU(n))= (H^*_G(B_GU(1))^{\otimes n})^{\Sigma_n} \end{gather} Explicitly: \begin{equation} A_{\Q}[\alpha,c_i,\gamma_{s,j}]/(x\alpha, x\gamma_{s,j}, S)=(A_{\Q}[\alpha_i,u_i]/(x\alpha_i))^{\Sigma_n} \end{equation} under the identifications: \begin{gather} \alpha=\sigma_1(\alpha_1,...,\alpha_n)=\sum_{1\le m\le n}\alpha_m\\ c_i=\sigma_i(u_1,...,u_n)=\sum_{m_*\in K_i}u_{m_1}\cdots u_{m_i}\\ \gamma_{s,j}=\sum_{(m_*,l_*)\in K_{s,j}}u_{m_1}\cdots u_{m_s}\alpha_{l_1}\cdots \alpha_{l_j} \end{gather} where $K_i$ consists of all partitions $1\le m_1<\cdots<m_i\le n$ and $K_{s,j}\subseteq K_s\times K_j$ consists of all pairs of disjoint partitions. The polynomial $\sigma_i$ is the $i$-th elementary symmetric polynomial. \end{prop} The family of generators $\alpha,c_i,\gamma_{s,j}$ is determined upon choosing $\alpha,u=c_1$ in $H^0_G(B_GU(1)), H^2_G(B_GU(1))$ respectively, with: \begin{equation} H_G^*(B_GU(1))=A_{\Q}[\alpha,u]/(\alpha^2=\alpha, x\alpha) \end{equation} The choice of $u$ is unique under the additional requirement that its restriction is the nonequivariant Chern class $c_1$ (in this way, the equivariant $c_i$ are all canonically determined). There are two equally good candidates for $\alpha$ however: $\alpha$ and $y-\alpha$. They can only be distinguished upon fixing a model for $B_GU(1)$, as we do in subsection \ref{n=1Comp}. As such, there is no canonical choice of $\alpha\in H^0_G(B_GU(1))$. \begin{prop}\label{AddTriv}The map $B_GU(n)\to B_GU(n+1)$ given by direct sum with a trivial complex representation induces on cohomology: \begin{gather} \alpha\mapsto y+\alpha\\ c_i\mapsto c_i\\ \gamma_{s,j}\mapsto \gamma_{s,j}+\gamma_{s,j-1}\end{gather} using the convention $\gamma_{s,0}=yc_s$. \\ The map $B_GU(n)\to B_GU(n+1)$ given by direct sum with a $\sigma$ representation induces on cohomology: \begin{gather} \alpha\mapsto \alpha\\ c_i\mapsto c_i\\ \gamma_{s,j}\mapsto \gamma_{s,j} \end{gather} For both maps we use the conventions that $c_{n+1}=0$ and $\gamma_{s,n+1-s}=0$ in every RHS. \end{prop} \begin{prop}\label{AddBun} The direct sum of bundles map $B_GU(n)\times B_GU(m)\to B_GU(n+m)$ induces on cohomology: \begin{gather} \alpha\mapsto \alpha\otimes 1+1\otimes \alpha\\ c_i\mapsto \sum_{j+k=i}c_j\otimes c_k\\ \gamma_{s,j}\mapsto \sum_{s'+s''=s\atop j'+j''=j}\gamma_{s',j'}\otimes \gamma_{s'',j''} \end{gather} using the conventions $c_0=1,\gamma_{s,0}=yc_s,\gamma_{0,j}=(j!)^{-1}\alpha(\alpha-1)\cdots (\alpha-j+1)$ in every RHS. \end{prop} \begin{prop}\label{TensorBun} The tensor product of line bundles map $B_GU(1)\times B_GU(1)\to B_GU(1)$ induces on cohomology: \begin{gather} \alpha\mapsto y-\alpha\otimes 1-1\otimes \alpha+2\alpha\otimes \alpha\\ c_1\mapsto c_1\otimes 1+1\otimes c_1\end{gather} \end{prop} \subsection{Symplectic classes}The theory of $C_2$ symplectic characteristic classes is entirely analogous to Chern classes, by replacing $B_GU(n)$ with $B_GSp(n)$ and the generators $c_i,\gamma_{s,j}$ with generators $k_i,\kappa_{s,j}$ of double degree. Propositions \ref{C2Chern1}-\ref{C2Chern3} become: \begin{prop}There exist classes $\alpha,k_i,\kappa_{s,j}\in H_G^*(B_GSp(n))$ of degrees $0,4i,4s$ respectively, where $1\le i,s\le n$ and $1\le j\le n-s$, such that \begin{equation} H_G^*(B_GSp(n))=\frac{A_{\Q}[\alpha,k_i,\kappa_{s,j}]}{x\alpha, x\kappa_{s,j}, S} \end{equation} where the relation set $S$ is the same as that for $H_G^*(B_GU(n))$ with $c_i,\gamma_{s,i}$ replaced by $k_i,\kappa_{s,i}$. The generators $k_i$ restrict to the nonequivariant symplectic classes $k_i$, so the restriction map $H^*_G(B_GSp(n))\to H^*(BSp(n))$ is a split surjection. The maximal torus inclusion $U(1)^n\hookrightarrow Sp(n)$ induces an isomorphism \begin{gather} H^*_G(B_GSp(n))= (H^*_G(B_GU(1))^{\otimes n})^{C_2\wr \Sigma_n}\end{gather} Explicitly: \begin{gather} A_{\Q}[\alpha,k_i,\kappa_{s,j}]/(x\alpha, x\kappa_{s,j}, S)=(A_{\Q}[\alpha_i,u_i]/(x\alpha_i))^{C_2\wr\Sigma_n} \end{gather} under the identifications: \begin{gather} \alpha=\sum_{1\le m\le n}\alpha_m\\ k_i=\sum_{m_*\in K_i}u_{m_1}^2\cdots u_{m_i}^2\\ \kappa_{s,j}=\sum_{(m_*,l_*)\in K_{s,j}}u_{m_1}^2\cdots u_{m_s}^2\alpha_{l_1}\cdots \alpha_{l_j} \end{gather} where $K_i$ and $K_{s,j}$ are as in Proposition \ref{C2ChernIso}. \end{prop} Propositions \ref{AddTriv}-\ref{TensorBun} have analogous statements in the symplectic case, replacing $B_GU(n)$ by $B_GSp(n)$ and $c_i,\gamma_{s,j}$ with $k_i,\kappa_{s,j}$ respectively; we shall not repeat them here. \begin{prop}The forgetful map $B_GSp(n)\to B_GU(2n)$ induces on cohomology: \begin{gather} \alpha\mapsto \alpha\\ c_{2i+1}, \gamma_{2s+1,j}\mapsto 0\\ c_{2i}\mapsto (-1)^ik_i\\ \gamma_{2s,j}\mapsto (-1)^s\kappa_{s,j} \end{gather} The quaternionization map $B_GU(n)\to B_GSp(n)$ induces: \begin{gather} \alpha\mapsto \alpha\\ k_i\mapsto \sum_{a+b=2i}(-1)^{a+i}c_ac_b \end{gather} The effect of quaternionization on the $\kappa_{s,j}$ is explained in Proposition \ref{QuaterExplained}. \end{prop} \subsection{Pontryagin and Euler classes} The results are analogous to the symplectic classes, but we need to distinguish between $B_GSO(2n)$ and $B_GSO(2n+1)$. The following Proposition contains the shared aspects of both cases: \begin{prop} The restriction map $H^*_G(B_GSO(n))\to H^*(BSO(n))$ is a split surjection. The maximal torus inclusion $T\hookrightarrow SO(n)$ induces an isomorphism \begin{gather} H^*_G(B_GSO(n))=(H^*_G(B_GT))^W \end{gather} where $W$ is the corresponding Weyl group. \end{prop} This gives us $C_2$ equivariant refinements $p_i,\chi$ of the Pontryagin and Euler classes respectively. Recall that for $BSO(2n)$ the characteristic classes are $p_1,...$,$p_{n-1}$, $\chi$ (and $p_n=\chi^2$) while for $BSO(2n+1)$ they are $p_1,...,p_n$. \begin{prop}There exist classes $\alpha,\pi_{s,j}$ of degrees $0,4s$ respectively in $H^*_G(B_GSO(2n))$, where $1\le s< n$ and $1\le j\le n-s$ such that \begin{equation} H_G^*(B_GSO(2n))=\frac{A_{\Q}[\alpha,p_i,\pi_{s,j},\chi]}{x\alpha, x\pi_{s,j}, S} \end{equation} where the relation set $S$ is the same as that for $H_G^*(B_GU(n))$ with $c_i,\gamma_{s,i}$ replaced by $p_i,\pi_{s,i}$ and using that $p_n=\chi^2$. Under the maximal torus isomorphism: \begin{gather} \alpha=\sum_{1\le m\le n}\alpha_m\\ p_i=\sum_{m_*\in K_i}u_{m_1}^2\cdots u_{m_i}^2\\ \pi_{s,j}=\sum_{(m_*,l_*)\in K_{s,j}}u_{m_1}^2\cdots u_{m_s}^2\alpha_{l_1}\cdots \alpha_{l_j}\\ \chi=u_1\cdots u_n \end{gather} where $K_i$ and $K_{s,j}$ are as in Proposition \ref{C2ChernIso}. \end{prop} \begin{prop} The map $B_GSO(2n)\to B_GSO(2n+1)$ induces an injection in cohomology and: \begin{equation} H_G^*(B_GSO(2n+1))=\frac{A_{\Q}[\alpha,p_i,\pi_{s,j}]}{x\alpha, x\pi_{s,j}, S} \end{equation} where $i=1,...,n$. \end{prop} Propositions \ref{AddTriv}-\ref{TensorBun} have analogous statements in this context. The action on the Euler class $\chi$ is the same as in the nonequivariant case; for example, under $B_GSO(n)\times B_GSO(m)\to B_GSO(n+m)$ we get: \begin{equation} \chi\mapsto \chi\otimes\chi \end{equation} \begin{prop}The complexification map $B_GSO(2n)\to B_GU(2n)$ induces on cohomology: \begin{gather} \alpha\mapsto \alpha\\ c_{2i+1}, \gamma_{2s+1,j}\mapsto 0\\ c_{2i}\mapsto (-1)^ip_i\\ \gamma_{2s,j}\mapsto (-1)^s\pi_{s,j} \end{gather} The forgetful map $B_GU(n)\to B_GSO(2n)$ induces on cohomology: \begin{gather} \alpha\mapsto \alpha\\ p_i\mapsto \sum_{a+b=2i}(-1)^{a+i}c_ac_b\\ \chi\mapsto c_n \end{gather} and the action on $\pi_{s,j}$ is explained in Proposition \ref{ForgetExplained}. \end{prop} \subsection{Stable characteristic classes}\label{StableSummary} In the $C_2$-equivariant case, there are different notions of stability for complex bundles, represented by the following spaces: \begin{itemize} \item $B_G^+U=\colimit (B_GU(1)\xrightarrow{\oplus 1}B_GU(2)\xrightarrow{\oplus 1}\cdots)$. This is the usual equivariant classifying space $B_GU=E_GU/U$ and is a $G$-equivariant $H$-space using the direct sum of bundles maps $B_GU(n)\times B_GU(m)\to B_GU(n+m)$. \item $B_G^-U=\colimit (B_GU(1)\xrightarrow{\oplus \sigma }B_GU(2)\xrightarrow{\oplus \sigma}\cdots)$. This is equivalent to $B_G^+U$. \item $B_G^{\pm}U=\colimit (B_G^+U\xrightarrow{\oplus \sigma}B_G^+U\xrightarrow{\oplus \sigma}\cdots)=\colimit (B_G^-U\xrightarrow{\oplus 1}B_G^-U\xrightarrow{\oplus 1}\cdots)$. This becomes a $G$-equivariant $H$-space using the direct sum of bundles, and is the group completion of $B_G^+U$ (and $B_G^-U$). Moreover, $B_G^{\pm}U\times \Z$ represents equivariant $K$-theory. \end{itemize} Computing $H^*_G(B_G^-U)$ in terms of the generators $\alpha,c_i,\gamma_{s,j}$ is more complicated compared to the nonequivariant case because for fixed degree $*$, the $\Q$-dimension of $H^*_G(B_GU(n))$ does not stabilize as $n\to +\infty$ and as a result, $H^*_G(B_G^-U)$ is infinite dimensional (dimension is $2^{\aleph_0}$). In degree $*=0$, $H^0_G(B_G^-U)$ is linearly spanned over $A_{\Q}$ by series of the form \begin{equation} a_{-1}+ \sum_{i\ge 0}a_i\alpha(\alpha-1)\cdots (\alpha-i) \end{equation} Generally, the graded algebra $H^*_G(B_G^-U)$ is generated over $H^0_G(B_G^-U)[c_1,c_2,...]$ by series of the form \begin{equation} \sum_{j=1}^{\infty}a_j\gamma_{s,j}\in H^{2s}_G(B_G^-U) \end{equation} for $a_j\in \Q$ and $s=1,2,...$. See section \ref{C2ChernStable} for more details. For the ring $H^*_G(B_G^{\pm}U)$ we also have to compute the effect of the $\oplus 1$ map on the series in $H^*_G(B_G^-U)$. If we restrict our attention to finite series, we are in essence dealing with characteristic classes that are stable under addition of both the $\oplus 1 $ and $\oplus \sigma$ representations. Since the $\oplus 1$ map takes the form $\gamma_{s,j}\mapsto \gamma_{s,j}+\gamma_{s,j-1}$ (and $\gamma_{s,0}=yc_s$, $\gamma_{0,1}=\alpha$) we can immediately see that for $i\ge 1$, the elements \begin{equation} c_i\text{ , }\gamma_i:=c_i\alpha-\gamma_{i,1} \end{equation} are stable under both $\oplus 1$ and $\oplus \sigma$. We conjecture that all classes with this property are polynomially generated by $c_i,\gamma_i$; this is equivalent to the elements $\gamma_1,\gamma_2,...$ being algebraically independent over $\Q[c_1,c_2,...]$. In any case, the elements $c_i,\gamma_i$ span sub-Hopf-algebras of $H^*_G(B_G^-U)$ and $H^*_G(B_G^{\pm}U)$ with \begin{gather} \gamma_s\mapsto \sum_{i+j=s}(c_i\otimes \gamma_j+\gamma_i\otimes c_j) \end{gather} using the conventions $c_0=1$ and $\gamma_0=0$.\medbreak \iffalse The equivariant cohomology of $B_G^+U$ is the limit of $H^*_G(B_GU(n))$ i.e. an element of $H^*_G(B_G^+U)$ is a sequence $s_n\in H^*_G(B_GU(n))$ satisfying a compatibility condition (namely $s_n\mapsto s_{n-1}$ under $H^*_G(B_GU(n))\xrightarrow{\oplus 1} H^*_G(B_GU(n-1))$); similarly an element of $H^*_G(B_G^{\pm}U)$ is a compatible double indexed sequence $s_{n,m}\in H^*_G(B_GU(n))$. Describing these sequences in terms of the generators $\alpha,c_i,\gamma_{s,j}$ is rather complicated (see section \ref{C2ChernStable}). The constant sequences are simple enough however: \begin{prop}\label{SuperStableChern}The elements $c_i$ and $\gamma_i:=c_i\alpha-\gamma_{i,1}$ for $i\ge 1$, are invariant under both $H^*_G(B_GU(n))\xrightarrow{\oplus 1} H^*_G(BU(n-1))$ and $H^*_G(B_GU(n))\xrightarrow{\oplus \sigma} H^*_G(BU(n-1))$. They span the sub-Hopf-algebra of $H^*_G(B_G^{\pm}U)$ consisting of constant sequences, given by: \begin{equation} \frac{A_{\Q}[c_i,\gamma_i]}{x\gamma_i} \end{equation} with coalgebra structure: \begin{gather} c_s\mapsto \sum_{i+j=s}c_i\otimes c_j\\ \gamma_s\mapsto \sum_{i+j=s}(c_i\otimes \gamma_j+\gamma_i\otimes c_j) \end{gather} using the conventions $c_0=1$ and $\gamma_0=0$. \end{prop} \fi The spaces $B_G^+U, B_G^-U, B_G^{\pm}U$ are equivariant $H$-spaces, hence their equivariant homology is a Green functor dual to their equivariant cohomology. This homology can be expressed in terms of the classes $a_i,b_i,d\in H_*^G(B_GU(1))$ dual to $\alphac_1^i,c_1^i, x/2+\alpha\in H^*_G(B_GU(1))$ respectively, where $i\ge 1$. Note that the $\gamma_1^i\in H^*_G(B_GU(2))$ map to $\alphac_1^i$ under $H^*_G(B_GU(2))\xrightarrow{\oplus 1}H^*_G(B_GU(1))$ so the $a_i$ can be thought of as duals to the $\gamma_1^i$. \begin{prop}\label{StableChernHomology}We have: \begin{gather} H_*^G(B_G^-U)=\frac{A_{\Q}[d,a_i,b_i]}{xa_i, xd=x}\\ H_*^G(B_G^{\pm}U)=\frac{A_{\Q}[d^{\pm},a_i,b_i]}{xa_i, xd=x} \end{gather} and for the coalgebra structure: \begin{gather} d\mapsto d\otimes d\\ a_i\mapsto \sum_{j+k=i}a_j\otimes a_k\\ b_i\mapsto \sum_{j+k=i}(b_j\otimes b_k-b_j\otimes a_k-b_k\otimes a_j+2a_j\otimes a_k) \end{gather} using the conventions $a_0=d-x/2$ and $b_0=1$. \end{prop} The case of stable symplectic classes is entirely analogous: We can distinguish between $B_G^+Sp, B_G^-Sp$ and $ B_G^{\pm}Sp$ and we have classes $k_i, \kappa_i=k_i\alpha-\kappa_{i,1}$ that are stable under both $\oplus 1, \oplus \sigma$ maps. Moreover, \begin{prop}\label{UvsSp} The forgetful map $Sp\to U$ induces \begin{gather} c_{2s+1}, \gamma_{2s+1}\mapsto 0\\ c_{2s}\mapsto (-1)^sk_{s}\\ \gamma_{2s}\mapsto (-1)^s\kappa_{s} \end{gather} while quaternionization $U\to Sp$ induces \begin{gather} k_i\mapsto \sum_{a+b=2i}(-1)^{a+i}c_ac_b\\ \kappa_i\mapsto \sum_{a+b=2i}(-1)^{a+i}c_a\gamma_b \end{gather} \end{prop} The dual homology result can be expressed in terms of the classes $a_i^{sp},b_i^{sp},d\in H_*^G(B_GSp(1))$ dual to $\alpha k_1^i,k_1^i, x/2+\alpha\in H^*_G(B_GSp(1))$ respectively, for $i\ge 1$ (the $a_i^{sp}$ are dual to $\kappa_1^i$). The analogue of Proposition \ref{StableChernHomology} holds, and: \begin{prop}\label{UvsSpHomology} The forgetful map $Sp\to U$ induces \begin{gather} d\mapsto d\\ a_i^{sp}\mapsto \sum_{2i=j+k}(-1)^ka_ja_k\\ b_i^{sp}\mapsto \sum_{2i=j+k}(-1)^k(b_jb_k-a_jb_k-a_kb_j+2a_ja_k) \end{gather} while quaternionization $U\to Sp$ induces \begin{gather} d\mapsto d\\ a_{2i+1}\mapsto 0\text{ , }a_{2i}\mapsto a_i^{sp}\\ b_{2i+1}\mapsto 0\text{ , }b_{2i}\mapsto b_i^{sp} \end{gather} \end{prop} The case of stable Pontryagin classes is entirely analogous, replacing $Sp$ by $SO$ (the forgetful map $Sp\to U$ is replaced by complexification $SO\to U$ and the quaternionization map $U\to Sp$ is replaced by the forgetful map $U\to SO$). In brief, setting $\pi_i=p_i\alpha-\pi_{i,1}$ gives the analogue of \ref{UvsSp}. Moreover, we have classes $a_i^{so},b_i^{so},d\in H_*^G(B_GSO(2))$ dual to $\alpha p_1^i,p_1^i, x/2+\alpha\in H^*_G(B_GSO(2))$ respectively, for $i\ge 1$, and the analogues of Propositions \ref{StableChernHomology} and \ref{UvsSpHomology} also hold. \subsection{Orthogonal groups} Unlike their nonequivariant counterparts, the $C_2$ classifying spaces of the orthogonal groups $O(n)$ don't generally satisfy the maximal torus isomorphism, i.e. $H^*_G(B_GO(n))\to H_G^*(B_GT)^W$ is not generally an isomorphism, where $T$ is the maximal torus in $O(n)$ and $W$ the Weyl group. Moreover, $H^*_G(B_GO(2n))$ is not isomorphic to $H^*_G(B_GO(2n+1))$, but rather, the inclusion-induced map $$H^*_G(B_GO(2n+1))\to H^*_G(B_GO(2n))$$ is always a surjection with nontrivial kernel. The spaces $B_GO(2n+1)$ can be put into our framework using the splitting $O(2n+1)=SO(2n+1)\times O(1)$: \begin{prop}There is a generator $\beta\in H^0_G(B_GO(1))$ such that \begin{equation} H^*_G(B_GO(2n+1))=\frac{A_{\Q}[\alpha,\beta,p_i,\pi_{s,j}]}{x\alpha, x\beta, x\pi_{s,j}, S} \end{equation} \end{prop} The $H^*_G(B_GO(2n))$ can then be understood as quotients of $H_G^*(B_GO(2n+1))$ (see section \ref{Orthogonal}). The stable case similarly reduces to $B_GSO$ by use of the fact that $B_GO=B_GSO\times B_GO(1)$. \subsection{Special unitary groups} For $SU(n)$ we have the maximal torus isomorphism equivariantly: \begin{prop} The maximal torus inclusion $U(1)^{n-1}\to SU(n)$ induces an isomorphism \begin{equation} H^*_G(B_GSU(n))\to H^*_G(B_GU(1)^{n-1})^{\Sigma_n} \end{equation} \end{prop} We prove that for any $n$, the inclusion induced map \begin{equation} H^*_G(B_GU(n))\to H^*_G(B_GSU(n)) \end{equation} is a surjection, and $c_1=\gamma_{1,n-1}=0$ in $H^*_G(SU(n))$. There are more relations however; for example, if $n=2$ there is an additional relation $\alpha^2=2\alpha$ since $SU(2)=Sp(1)$. In the stable case, we can distinguish between $B_G^+SU, B_G^-SU$ and $B_G^{\pm}SU$ and we have $c_1=\gamma_1=0$. \section{\texorpdfstring{$C_2$ Chern classes}{C2 Chern classes}}\label{C2ChernSection} The goal of this section is to prove our results on Chern classes. In effect, we need to prove the isomorphism: \begin{gather} H^*_G(B_GU(n))= (H^*_G(B_GU(1))^{\otimes n})^{\Sigma_n} \end{gather} and then use the computation of $H^*_G(B_GU(1))$ to obtain the algebraic description in Proposition \ref{C2Chern2} and Proposition \ref{C2ChernIso}. \subsection{\texorpdfstring{The $n=1$ computation}{The n=1 computation}}\label{n=1Comp}For any $C_2$-space $X$, by \cite{GM95}, \begin{equation} H^*_{C_2}(X)=H^*(X)^{C_2}\oplus H^*(X^{C_2}) \end{equation} Now take $X=B_GS^1$ which is $\C P^{\infty}$ with the $C_2$ action given on complex homogeneous coordinates by: \begin{equation} g(z_0:z_1:z_2:z_3:\cdots)=(z_0:-z_1:z_2:-z_3:\cdots) \end{equation} We have $H^*(\C P^{\infty})=\Q[r]$ for a generator $r$ of degree $2$. The $C_2$ action is trivial as can be verified on the $2$-skeleton $S^2=\C P^1\subseteq \C P^{\infty}$: the $C_2$ action on $S^2$ is a rotation hence has degree $1$. We also have $(B_GS^1)^{C_2}=\C P^{\infty}\coprod \C P^{\infty}$ spanned by $v^+=(z_0:0:z_2:0:\cdots)$ and $v^-=(0:z_1:0:z_3:\cdots)$ respectively. Thus we get \begin{equation} H^*_{C_2}(B_GS^`)=H^*(BS^1)\oplus H^*((B_GS^1)^{C_2})=\Q[cidem_1]\oplus \Q[cidem_2]\oplus \Q[cidem_3] \end{equation} where $e_1$ is the nonequivariant generator and $cidem_2,cidem_3$ correspond to $v^+,v^-$ respectively. Define $\alpha=cidem_2^0$ and $u=cidem_1+cidem_2+cidem_3$; then \begin{gather} cidem_1^0=x/2\text{ , }cidem_2^0=\alpha\text{ , }cidem_3^0=y-\alpha\\ cidem_1=xu/2\text{ , } cidem_2=u\alpha\text{ , }cidem_3=(y-\alpha)u \end{gather} We have proven: \begin{prop}\label{C2Chern1Class}As an algebra over $A_{\Q}$: \begin{equation} H_G^*(B_{C_2}S^1)=\frac{A_{\Q}[u,\alpha]}{\alpha^2=\alpha, x\alpha=0} \end{equation} for $|u|=2$ and $|\alpha|=0$. \end{prop} We should compare this with the description $$H_G^{\bigstar}(B_GS^1)=H^{\bigstar}_G[c,b]/(c^2=a_{\sigma}^2c+u_{2\sigma}b)$$ obtained in \cite{Shu14}. The correspondence of generators is: \begin{gather} \alpha=c\frac{y}{a_{\sigma}^2}\\ u=c\frac{x}{2u_{2\sigma}}+b\frac{y}{a_{\sigma}^2}\\ c=u u_{2\sigma}+\alpha a_{\sigma}^2\\ b=-u^2u_{2\sigma}+u a_{\sigma}^2 \end{gather} \subsection{Maximal tori and Weyl groups} If $L$ is a compact connected Lie group and $T$ a maximal torus in $L$, we have the inclusion-induced map \begin{equation} H_G^*(B_GL)\to H_G^*(B_GT) \end{equation} The Weyl group $W=W_LT$ acts on $L, T$ by conjugation hence on $H^*(B_GT), H^*(B_GL)$ and the inclusion-induced map is $W$-equivariant. Actually, $W$ acts trivially on $B_GL$, which is a special case of the fact that an inner automorphism of $H$ induces the identity map on $B_GH$ up to homotopy (see \cite{BCM} for the classical nonequivariant case; the equivariant generalization is straightforward). Thus our map factors through the $W$-fixed points: \begin{equation} H_G^*(B_GL)\to H_G^*(B_GT)^W \end{equation} This breaks into: \begin{gather} H^*(BL)\to H^*(BT)^W\\ H^*((B_GL)^{C_2})\to H^*((B_GT)^{C_2})^W \end{gather} The first map is an isomorphism (\cite{BCM}), so if we can prove that the second map is an isomorphism then \begin{equation} H^*_G(B_GL)\to H^*_G(B_GT)^W \end{equation} will also be an isomorphism. If $T=\prod^nS^1$ then \begin{equation} (B_GT)^{C_2}=\coprod^{2^n}\prod^nBS^1 \end{equation} The coproduct is indexed over $C_2^n$ i.e. sign configurations $(\pm,...,\pm)$ (the next subsection explains why it's natural to use sign configurations). By considering the number $m$ of $+$'s in a configuration, we can further break this into \begin{equation} (B_GT)^{C_2}=\coprod_{m=0}^n\coprod^{\binom nm}\prod^nBS^1 \end{equation} In cohomology: \begin{equation} H^*((B_GT)^{C_2})=\oplus_{m=0}^n\oplus^{\binom nm}H^*(BS^1)^{\otimes n} \end{equation} The $\Sigma_n$ action permuting the $S^1$ factors in $T$ has the effect of preserving the $m$, permuting the $\binom nm$ many sign configurations and permuting the factors in the tensor product. \subsection{The maximal torus isomorphism} We use the Grassmannian model $Gr(n,\C^{\infty\rho})$ for $B_GU(n)$, that consists of $n$-dimensional (complex) subspaces of $\C^{\infty\rho}$ where $\rho=1+\sigma$ is the complex regular representation of $G$. We have: \begin{equation} B_GU(n)^{C_2}=\coprod_{m=0}^nBU(m)\times BU(n-m) \end{equation} Indeed, a fixed point $V$ is a $C_2$ subspace and thus admits a unique decomposition $V=V^+\oplus V^-$ where $gv=v$ for any $v\in V^+$ and $gv=-v$ for any $v\in V^-$. So a subspace $V$ in the LHS corresponds to the pair $(V^+,V^-)$ in the RHS. We use the maximal torus $T=\prod^nS^1$ in $U(n)$. The Weyl group is $\Sigma_n$ and the action is by permuting the $S^1$ factors. \begin{prop}The maximal torus inclusion induces an isomorphism: \begin{equation} H_G^*(B_GU(n))= H_G^*(B_GT)^{\Sigma_n} \end{equation} \end{prop} \begin{proof}The map $B_GT\to B_GU(n)$ is on the $C_2$-fixed points: \begin{equation} \coprod^{2^n}BU(1)^n\to \coprod_{m=0}^nBU(m)\times BU(n-m) \end{equation} sending $(v_1^{\pm},...,v_n^{\pm})$ to the direct sum $v_1^{\pm}\oplus ...\oplus v_n^{\pm}$ (we are implicitly using the identification $Gr_n(\C^{\infty \rho}\oplus \C^{\infty \rho})=Gr_n(\C^{\infty \rho})$ through a fixed linear equivariant isomorphism $\C^{\infty \rho}\oplus \C^{\infty \rho}=\C^{\infty\rho}$). Here, $v_i^+$ denotes a $1$-dimensional subspace with trivial $C_2$ action, while $v_i^-$ is a $1$-dimensional subspace with antipodal $C_2$ action. The signs in $(v_1^{\pm},...,v_n^{\pm})$ correspond to the sign configuration and the index $m$ on the RHS corresponds to the amount of $+$ signs in a configuration. Thus, the map above breaks into \begin{equation} \coprod^{\binom nm}BU(1)^n\to BU(m)\times BU(n-m) \end{equation} for every $m=0,...,n$. Fixing the $m$, the induced map on cohomology is \begin{equation} H^*(BU(m))\otimes H^*(BU(n-m))\to \oplus^{\binom nm} H^*(BU(1))^{\otimes n} \end{equation} The action of $\Sigma_n$ on the right permutes the factors in the tensor product and the sign configuration. Taking $\Sigma_n$ fixed points is equivalent to fixing our favorite configuration, say $(+,...,+,-,...,-)$, and then taking $\Sigma_m\times \Sigma_{n-m}$ fixed points, where $\Sigma_m$ permutes only the $+$'s and $\Sigma_{n-m}$ permutes only the $-$'s. With that in mind, our map factors through $\Sigma_n$ fixed points and we get \begin{gather} H^*(BU(m))\otimes H^*(BU(n-m))\to (H^*(BU(1))^{\otimes m})^{\Sigma_m}\otimes (H^*(BU(1))^{\otimes (n-m)})^{\Sigma_{n-m}} \end{gather} This is the tensor product of maps $H^*(BU(i))\to (H^*(BU(1))^{\otimes i})^{\Sigma_i}$ where $i=m,n-m$. These maps are induced by the maximal torus inclusions $U(1)^i\to U(i)$, hence are isomorphisms by the nonequivariant case. \end{proof} \subsection{The fixed point computation}\label{TheAlgebraPart} The cohomology of the maximal torus is: \begin{equation} H^*_G(B_GT)= H^*_G(\prod_nB_GS^1)=\frac{A_{\Q}[u_i,\alpha_i]_{1\le i\le n}}{x\alpha_i=0, \alpha_i^2=\alpha_i} \end{equation} with the $\Sigma_n$ action permuting the $u_i$ and $\alpha_i$ separately (namely $\sigma u_i=u_{\sigma(i)}$ and $\sigma \alpha_i=\alpha_{\sigma(i)}$ for $\sigma\in \Sigma_n$). In this subsection, we summarize the computation of the $\Sigma_n$-fixed points: \begin{equation}H^*_G(B_GU(n))=(H_G^*(B_GS^1)^{\otimes n})^{\Sigma_n}=\Big(\frac{A_{\Q}[u_i,\alpha_i]}{x\alpha_i=0, \alpha_i^2=\alpha_i}\Big)^{\Sigma_n} \end{equation} that is proven in a more general form in Appendix \ref{appen} (see Propositions \ref{AlgebraPropositionGeneral} and \ref{AlgebraProposition2General}). We have: \begin{cor}\label{AlgebraCorollaryQ}For $1\le i,s\le n$ and $1\le j\le n-s$ consider the elements of the $A_{\Q}$-algebra: \begin{equation}H_G^*(B_GS^1)^{\otimes n}=\frac{A_{\Q}[u_i,\alpha_i]}{x\alpha_i=0, \alpha_i^2=\alpha_i} \end{equation} given by: \begin{gather} \alpha=\sigma_1(\alpha_1,...,\alpha_n)=\sum_{1\le m\le n}\alpha_m\\ c_i=\sigma_i(u_1,...,u_n)=\sum_{m_*\in K_i}u_{m_1}\cdots u_{m_i}\\ \gamma_{s,j}=\sum_{(m_*,l_*)\in K_{s,j}}u_{m_1}\cdots u_{m_s}\alpha_{l_1}\cdots \alpha_{l_j} \end{gather} where $K_i$ consists of partitions $1\le m_1<\cdots<m_i\le n$ , $K_{s,j}\subseteq K_s\times K_j$ consists of pairs of disjoint partitions and $\sigma_i$ is the $i$-th elementary symmetric polynomial.\\ Then: \begin{equation} H^*_G(B_GU(n))=\frac{A_{\Q}[\alpha,c_i,\gamma_{s,j}]}{x\alpha=0, x\gamma_{s,j}=0, S} \end{equation} where the finite set $S$ of relations consists of three types of relations: \begin{itemize} \item Type I: $$\alpha^{n+1}=\sum_{m=1}^nr_m\alpha^{m}$$ where $r_1=(-1)^nn!$ and \begin{equation} r_{m+1}=(-1)^{n+m}n!\sum_{1\le i_1<\cdots<i_m\le n}\frac1{i_1\cdots i_m} \end{equation} We can also write this relation as \begin{equation} \alpha(\alpha-1)\cdots (\alpha-n)=0 \end{equation} \item Type II: \begin{equation} \alpha^s\gamma_{s,i}=\frac{s!}{(s+i)!}c_s\alpha^{s+i}+\cdots \end{equation} where $\cdots$ denotes a homogeneous polynomial smaller than $c_s\alpha^{s+i}$ (see Appendix \ref{appen} for a definition of this order). \item Type III: If $s\le t\le s+i$, $$\gamma_{s,i}\gamma_{t,j}=\binom{\min(i+j+s,n)-t}{j}c_t\gamma_{s,\min(i+j,n-s)}+\cdots$$ where $\cdots$ denotes a homogeneous polynomial smaller than $c_t\gamma_{s,\min(i+j,n-s)}$. \end{itemize} The polynomials $\cdots$ can be algorithmically computed in terms of $\alpha,c_i,\gamma_{s,j}$; the algorithm is described in Appendix \ref{appen} and has been implemented in the computer program found \href{https://github.com/NickG-Math/Symmetric_Polynomials}{here}. The elements $c_i$ are algebraically independent and $H^*_G(B_GU(n))$ is finite over $A_{\Q}[c_1,...,c_n]$ hence has Krull dimension $n$. A basis of $H^*_G(B_GU(n))$ over $A_{\Q}$ consists of the elements \begin{equation} \alpha^a\prod_{i=1}^n c_i^{k_i}\prod_{s=1}^n\prod_{i=1}^{n-s} \gamma_{s,i}^{\epsilon_{s,i}} \end{equation} where $0\le a\le n$ and $\epsilon_{s,i}=0,1$ are such that for any $\gamma_{s,i}$ appearing in the product we must have $a<s$ and for any two factors $\gamma_{s,i},\gamma_{t,j}$ with $s\le t$ we must have $s+i<t$.\end{cor} \begin{cor}\label{AlgebraCorollary2Q}Any set of homogeneous algebra generators of $H^*_G(B_GU(n))$ over $A_{\Q}$ has cardinality at least $1+n+\binom n2$, which is the cardinality of the generating set $\{\alpha,c_s,\gamma_{s,j}\}$. \end{cor} We can extend $\gamma_{s,i}$ to $s=0$ and $i=0$ via: \begin{align} \gamma_{s,0}&=yc_s=y\sigma_i(u_1,...,u_n)\\ \gamma_{0,i}&=\sigma_i(\alpha_1,...,\alpha_n)=(i!)^{-1}\alpha(\alpha-1)\cdots (\alpha-i+1)\\ \gamma_{0,0}&=y \end{align} Whenever we write $\gamma_{s,i}$ it is implicit that $s,i>0$, unless we explicitly state that we are using the convention above. \subsection{Dimension count}Consider the modified partition function $p(n,m)$ counting sequences $a_1\ge \cdots\ge a_n\ge 0$ with $m=\sum_ia_i$ (the usual partition function requires $a_n\ge 1$). We have the recursion \begin{equation} p(n,m)=p(n,m-n)+p(n-1,m) \end{equation} Then $p(n,m)$ is the dimension of the vector space of symmetric polynomials in $\Q[x_1,...,x_n]$ of degree $m$, which we grade as $|x_1|=\cdots=|x_n|=1$. If $R$ is as in subsection \ref{TheAlgebraPart} with $k=\Q$, we have: \begin{equation} \dim (R^{\Sigma_n}_m)=\sum_{i=0}^n\sum_{j=0}^mp(i,j)p(n-i,m-j) \end{equation} We can equivalently express these facts as: \begin{equation} \dim H^{2m}(BU(n))=p(n,m) \end{equation} and \begin{equation} \dim H^{2m}_G(B_GU(n))=p(n,m)+\sum_{i=0}^n\sum_{j=0}^mp(i,j)p(n-i,m-j) \end{equation} Note that for fixed $m$ the dimensions $\dim H^{2m}(BU(n))$ stabilize for large enough $n$. That is not the case for $\dim H^{2m}_GB_GU(n)$. \section{\texorpdfstring{$C_2$ Chern classes of sums and tensor products}{C2 Chern classes of sums and tensor products}}\label{C2ChernSumTensor} \begin{prop}The map $B_GU(n)\to B_GU(n+1)$ given by direct sum with a trivial complex representation induces on cohomology: \begin{gather} \alpha\mapsto y+\alpha\\ c_i\mapsto c_i\\ \gamma_{s,i}\mapsto \gamma_{s,i}+\gamma_{s,i-1} \end{gather} using the convention $\gamma_{s,0}=yc_s$.\\ The map $B_GU(n)\to B_GU(n+1)$ given by direct sum with a complex $\sigma$ representation induces on cohomology: \begin{gather} \alpha\mapsto \alpha\\ c_i\mapsto c_i\\ \gamma_{s,i}\mapsto \gamma_{s,i} \end{gather} For both maps we use the conventions that $c_{n+1}=0$ and $\gamma_{s,n+1-s}=0$ in every RHS. \end{prop} \begin{proof}We have a commutative diagram \begin{center}\begin{tikzcd} B_GU(n)\ar[r,"\oplus 1"]&B_GU(n+1)\\ B_GT^n=B_GT^n\times *\ar[u]\ar[r]&B_GT^n\times B_GS^1=B_GT^{n+1}\ar[u] \end{tikzcd}\end{center} where the bottom map is the product of the identity map $B_GT^n\to B_GT^n$ and the inclusion map $*\to B_GS^1$ given by $*\mapsto v^+$ where $v^+=(1:0:1:0:\cdots)$ in the homogeneous coordinates of $B_GS^1$. This inclusion map induces on cohomology: $$A_{\Q}[\alpha,u]/(\alpha^2=\alpha, x\alpha) \to A_{\Q}$$ given by $y-\alpha\mapsto 0$ and $u\mapsto 0$ (this is verified by looking at the $C_2$ fixed points). Similarly, adding a $\sigma$ representation induces $$A_{\Q}[\alpha,u]/(\alpha^2=\alpha, x\alpha) \to A_{\Q}$$ given by $\alpha\mapsto 0$ and $u\mapsto 0$. Thus the $\oplus 1$ induced map is determined by: \begin{gather} u_i\mapsto u_i\text{ , }\alpha_i\mapsto \alpha_i\text{ , }i<n+1\\ u_{n+1}\mapsto 0\text{ , }\alpha_{n+1}\mapsto y \end{gather} The $\oplus \sigma$ induced map is determined by: \begin{gather} u_i\mapsto u_i\text{ , }\alpha_i\mapsto \alpha_i\text{ , }i<n+1\\ u_{n+1}\mapsto 0\text{ , }\alpha_{n+1}\mapsto 0 \end{gather} These descriptions imply the ones on the generators $\alpha,c_i,\gamma_{s,j}$.\end{proof} \begin{prop} The direct sum of bundles map $B_GU(n)\times B_GU(m)\to B_GU(n+m)$ induces on cohomology: \begin{gather} \alpha\mapsto \alpha\otimes 1+1\otimes \alpha\\ c_i\mapsto \sum_{j+k=i}c_j\otimes c_k\\ \gamma_{s,i}\mapsto \sum_{s'+s''=s\atop i'+i''=i}\gamma_{s',i'}\otimes \gamma_{s'',i''} \end{gather} using the convention for defining $c_0,\gamma_{s,0},\gamma_{0,i}$ in the RHS. \end{prop} \begin{proof}The corresponding map on maximal tori $$\prod^nB_GS^1\times \prod^mB_GS^1\to \prod^{n+m}B_GS^1$$ induces on cohomology: \begin{equation} u_i\mapsto \begin{cases} u_i\otimes 1&\textup{ if }i\le n\\ 1\otimes u_{i-n}&\textup{ if }i>n \end{cases}\text{ , } \alpha_i\mapsto \begin{cases} \alpha_i\otimes 1&\textup{ if }i\le n\\ 1\otimes \alpha_{i-n}&\textup{ if }i>n \end{cases} \end{equation} This implies the formulas on $\alpha,c_i,\gamma_{s,j}$. \end{proof} \begin{prop}\label{TensorProof} The tensor product of bundles map $B_GU(1)\times B_GU(1)\to B_GU(1)$ induces on cohomology: \begin{gather} \alpha\mapsto y-\alpha\otimes 1-1\otimes \alpha+2\alpha\otimes \alpha\\ u\mapsto u\otimes 1+1\otimes u\end{gather} \end{prop} \begin{proof}The map in question, induced from multiplication $S^1\times S^1\to S^1$, is given on the homogeneous coordinates by multiplication of polynomials: \begin{equation} (x_0:x_1:\cdots)\otimes (y_0:y_1:\cdots)=(x_0y_0:x_0y_1+x_1y_0:\cdots) \end{equation} Write \begin{equation} H^*_{C_2}(B_GU(1))=H^*(BU(1))\oplus H^*(B_GU(1)^{C_2})=\Q[cidem_1]\oplus \Q[cidem_2]\oplus \Q[cidem_3] \end{equation} as in subsection \ref{n=1Comp}. Then $B_GU(1)\times B_GU(1)\to B_GU(1)$ induces \begin{equation} H^*_G(B_GU(1))\to H^*_G(B_GU(1))\boxtimes_{A_{\Q}} H^*_G(B_GU(1)) \end{equation} which breaks into \begin{gather} H^*(BU(1))\to H^*(B_GU(1))\otimes H^*(B_GU(1))\\ H^*(B_GU(1)^{C_2})\to H^*(B_GU(1)^{C_2})\otimes H^*(B_GU(1)^{C_2}) \end{gather} By the nonequivariant case, the first map is \begin{gather} cidem_1^0\mapsto cidem_1^0\otimes cidem_1^0\\ cidem_1\mapsto cidem_1\otimes cidem_1^0+cidem_1^0\otimes cidem_1 \end{gather} For the second map, note that in the $H$-space structure, $v^+,v^-$ multiply according to: $v^{\alpha}\cdot v^{\beta}=v^{\alpha\beta}$ for $\alpha,\beta=\pm 1$. This means that the $+$ part of $H^*(B_GU(1)^{C_2})$ maps to the $+\otimes +$ and $-\otimes -$ parts of $H^*(B_GU(1)^{C_2})\otimes H^*(B_GU(1)^{C_2})$ to give: \begin{gather} cidem_2^0\mapsto cidem_2^0\otimes cidem_2^0+cidem_3^0\otimes cidem_3^0\\ cidem_2\mapsto cidem_2\otimes cidem_2^0+cidem_2^0\otimes cidem_2+cidem_3\otimes cidem_3^0+cidem_3^0\otimes cidem_3 \end{gather} Similarly, the $-$ part of $H^*(B_GU(1)^{C_2})$ maps to the $+\otimes -$ and $-\otimes +$ parts of $H^*(B_GU(1)^{C_2})\otimes H^*(B_GU(1)^{C_2})$ to give: \begin{gather} cidem_3^0\mapsto cidem_2^0\otimes cidem_3^0+cidem_3^0\otimes cidem_2^0\\ cidem_3\mapsto cidem_2\otimes cidem_3^0+cidem_2^0\otimes cidem_3+cidem_3\otimes cidem_2^0+cidem_3^0\otimes cidem_2 \end{gather} In terms of the $u,\alpha$ generators, recall $cidem_2^0=\alpha, cidem_3^0=y-\alpha, cidem_1=ucidem_1^0, cidem_2=ucidem_2^0$ and $cidem_3=ucidem_3^0$. Substituting these gives the desired formulas. \end{proof} Iterating $B_GS^1\times B_GS^1\to B_GS^1$ gives $(B_GS^1)^{\times n}\to B_GS^1$ which induces on cohomology: \begin{gather} \alpha\mapsto \frac{(-1)^n+1}2y+(-1)^{n+1}\sum_{i=1}^n(-2)^{i-1}\sigma_i(\alpha_1,...,\alpha_n)\\ u\mapsto u_1+\cdots+u_n \end{gather} We will also need that the map induced from conjugation $B_GU(1)\to B_GU(1)$ is: \begin{gather} \alpha\mapsto \alpha\\ u\mapsto -u \end{gather} This is verified similarly to Proposition \ref{TensorProof}. \section{\texorpdfstring{$C_2$ stable Chern classes}{C2 stable Chern classes}}\label{C2ChernStable} Since there are two maps $B_GU(n)\to B_GU(n+1)$ (given by direct sum with the trivial or the $\sigma$ representation) one can try to stabilize against, there are a few distinct notions of stable characteristic classes. \subsection{Stabilizing against one representation} First, we can stabilize with respect to direct sum with the trivial representation and get: \begin{equation} B_G^+U=\colimit(B_GU(1)\xrightarrow{\oplus 1}B_GU(2)\xrightarrow{\oplus 1}\cdots) \end{equation} Then \begin{equation} K_G^+(X)=[X,B_G^+U\times \Z]^{G} \end{equation} is the semi-ring of virtual bundles $V-n$ on $X$ modulo the equivalence relation $V\sim W\iff V+n=W+n$ for some $n$. For example, $K_G^+(*)=\Z\times \mathbb N$. Group completing $K_G^+$ gives equivariant $K$-theory $K_G$. The fixed points of $B_G^+U$ are: \begin{equation} (B_G^+U)^{C_2}=BU\times \coprod_{n=0}^{\infty}BU(n) \end{equation} We note that $B_G^+U$ is the usual equivariant classifying space $B_GU=E_GU/U$. An equivalent way of getting $B_GU$ is to stabilize with respect to direct sum with the $\sigma$ representation: \begin{equation} B_G^-U=\colimit(B_GU(1)\xrightarrow{\oplus \sigma}B_GU(2)\xrightarrow{\oplus \sigma}\cdots) \end{equation} Note that $B_G^+U\simeq B_G^-U$. \subsection{Stabilizing with respect to all representations} Group completing $B_G^+U$ or equivalently $B_G^-U$, gives: \begin{equation} B_G^{\pm}U=\colimit(B_G^+U\xrightarrow{\oplus \sigma}B_G^+U\xrightarrow{\oplus \sigma}\cdots)=\colimit(B_G^-U\xrightarrow{\oplus 1}B_G^-U\xrightarrow{\oplus 1}\cdots) \end{equation} Then \begin{equation} K_G(X)=[X,B_G^{\pm}U\times \Z]^{G} \end{equation} is the semi-ring of virtual bundles $V-n-m\sigma$ modulo the equivalence relation $V\sim W\iff V+n+m\sigma=W+n+m\sigma$ for some $n,m$. Thus $K_G(X)$ is exactly the equivariant $K$-theory of $X$. We finally have the fixed point computation: \begin{equation} (B_G^{\pm}U)^{C_2}=BU\times BU\times \Z \end{equation} \subsection{Cohomology computations} We prefer to work with $B_G^-U$ since the map\\ $H^*_G(B_GU(n+1))\xrightarrow{\oplus \sigma} H^*_G(B_GU(n))$ has a simpler expression on the generators $\alpha,\gamma_{s,j}$ compared to the map $H^*_G(B_GU(n+1))\xrightarrow{\oplus 1} H^*_G(B_GU(n))$. The $\Q$-subalgebras spanned by $\alpha$ in each $H_G^*(B_GU(n))$ have limit: \begin{gather} \limit \frac{\Q[\alpha]}{\alpha(\alpha-1)(\alpha-2)\cdots (\alpha-n)}= \prod_{n\ge 0}\Q\end{gather} under the correspondence: \begin{gather} a_0+\sum_{i\ge 0}a_{i+1}\alpha(\alpha-1)\cdots (\alpha-i)\mapsto (a_0,a_0+a_1,a_0+2a_1+2a_2,...) \end{gather} given by evaluating the series in the LHS at $\alpha=0,1,2,...$. Under this correspondence we have: \begin{equation} H^0_G(B_G^-U)=A_{\Q}\times \prod_{n\ge 1}A_{\Q}/x \end{equation} As a graded $H^0_G(B_G^-U)[c_1,c_2,...]$-algebra, $H^*_G(B_G^-U)$ is generated by the series $\sum_{i=1}^{\infty}r_i\gamma_{s,i}\in H^{2s}_G(B_G^-U)$ for $r_i\in \Q$ and $s=1,2,...$.\medbreak Describing $H^*_G(B_G^{\pm}U)$ in terms of the generators $\alpha,c_i,\gamma_{s,j}$ is even more complicated, as we need to take the limit of $H^*_G(B_G^-U)$ with respect to the $\oplus 1$ maps. We can alternatively view $H^*_G(B_G^{\pm}U)$ as the limit of a diagram indexed on $\mathbb N\times \mathbb N$ with $(n,m)\mapsto B_GU(n)$, horizontal maps being $\oplus 1$ and vertical maps being $\oplus \sigma$. An element of $H^*_G(B_G^{\pm}U)$ will then be a compatible doubly indexed sequence $s_{n,m}\in H^*_G(B_GU(n))$. The constant sequences (in both variables) consist of elements that are invariant under both the $\oplus 1$ and $\oplus \sigma$ maps. We can see that \begin{equation} c_i, \gamma_i:=c_i\alpha-\gamma_{i,1} \end{equation} have this property, and we conjecture that the subalgebra of constant sequences is generated by them. This is equivalent to the $\gamma_1,\gamma_2,...$ being algebraically independent over $\Q[c_1,c_2,...]$ and further equivalent to the subalgebra of constant sequences being \begin{equation} \frac{A_{\Q}[c_i,\gamma_i]}{x\gamma_i} \end{equation} The coalgebra structure is \begin{gather} \gamma_s\mapsto \sum_{i+j=s}(c_i\otimes \gamma_j+\gamma_i\otimes c_j) \end{gather} using the conventions $c_0=1$ and $\gamma_0=0$. \iffalse \begin{prop}The elements $c_i$ and $\gamma_i:=c_i\alpha-\gamma_{i,1}$ for $i\ge 1$, are invariant under both $H^*_G(B_GU(n))\xrightarrow{\oplus 1} H^*_G(BU(n-1))$ and $H^*_G(B_GU(n))\xrightarrow{\oplus \sigma} H^*_G(BU(n-1))$. They span the sub-Hopf-algebra of $H^*_G(B_G^{\pm}U)$ consisting of constant sequences, given by: \begin{equation} \frac{A_{\Q}[c_i,\gamma_i]}{x\gamma_i} \end{equation} with coalgebra structure: \begin{gather} c_s\mapsto \sum_{i+j=s}c_i\otimes c_j\\ \gamma_s\mapsto \sum_{i+j=s}(c_i\otimes \gamma_j+\gamma_i\otimes c_j) \end{gather} using the conventions $c_0=1$ and $\gamma_0=0$. \end{prop} \begin{proof}If $X$ is the quotient of $B_G^{\pm}U$ formed by identifying $1\sim \sigma$ then $X$ is a $G$-space with $X^G=BU\times BU$; the quotient map $B^{\pm}_GU\to X$ on the fixed points is $BU\times BU\times \Z\to BU\times BU$ collapsing $\Z$ to a point. Thus in cohomology, $H^*_G(X)\subseteq H^*_G(B_G^{\pm}U)$ consists of the constant sequences. It is clear that $H^*_G(X)$ is generated by the $c_i,\gamma_i$, and by To see that there are no relations in the (graded) $\Q$-algebra generated by $c_s,\gamma_s$, we use induction on the claim that the elements $c_1,...,c_n,\gamma_1,...,\gamma_{n-1},c_n\alpha$ are algebraically independent; this is obvious for $n=1$. For the induction step $n\implies n+1$, assume we have a nontrivial relation on $c_1,...,c_{n+1},\gamma_1,...,\gamma_n,c_{n+1}\alpha$ in $H^d_G(B_GU(m))$, $m>n$. Then under $H^d_G(B_GU(m))\xrightarrow{\oplus (m-n)\sigma} H^d_G(B_GU(n))$ we have $c_{n+1}=0, \gamma_n=c_n\alpha$ hence we get a relation on $c_1,...,c_n,\gamma_1,...,\gamma_{n-1},c_{n}\alpha$; by the induction hypothesis this relation must be trivial. we can see that the relation must be of the form $c_{n+1}P=0$ in $H^d_G(B_GU(n+1))$; this implies that $P=0$ is a nontrivial relation in degree $d-n$, contradicting the minimality of $d$. \end{proof} \fi \subsection{Homology computations} For homology, the idempotent decomposition gives \begin{equation} H_*^{C_2}(X)=H_*(X)^{C_2}\oplus H_*(X^{C_2}) \end{equation} as Mackey functors. For an $H$-space $X$ where $X\times X\to X$ and $S^0\to X$ are $C_2$ equivariant, this becomes an isomorphism of Green functors. Setting $X=B_G^-U$ and using \begin{equation} (B_G^-U)^{C_2}=\coprod_{n=0}^{\infty}BU(n)\times BU \end{equation} we get that $H_*^G(X)$ is the sum of \begin{gather} H_*(BU)=\Q[b_i^e]_{i\ge 1}\text{ , }\\ H_*(B_G^-U^{C_2})= \oplus_{n\ge 1}H_*(BU(n)))\otimes H_*(BU)=\Q[b_0^+,b_i^+,b_i^-]_{i\ge 1} \end{gather} where for $i\ge 1$, the $b_i^e,b_i^+,b_i^-$ are duals of $c_1^i$ using the first Chern class in the respective nonequivariant cohomology rings ($b_0^+$ indexes the components). If we let $b_i=b_i^e+b_i^++b_i^-$, $a_i=b_i^+$ for $i\ge 1$ and $d=x/2+b_0^+$ we get \begin{equation} H_*^G(B_G^-U)=A_{\Q}[d,a_i,b_i]_{i\ge 1}/(xd=x, xa_i=0) \end{equation} Group completing is localization at $d$: \begin{equation} H_*^G(B_G^{\pm}U)=A_{\Q}[d^{\pm},a_i,b_i]_{i\ge 1}/(xd=x, xa_i=0) \end{equation} The $a_i,b_i$ are dual to $\gamma_1^i,c_1^i$ respectively, while $d$ is dual to the finite series \\$x/2+\alpha\in H^0(B_G^-U)$ i.e. the sequence $(x/2,1,2,...)\in A_{\Q}\times \prod_{n\ge 1}A_{\Q}/x$. The coalgebra structure is: \begin{gather} d\mapsto d\otimes d\\ a_i\mapsto \sum_{j+k=i}a_j\otimes a_k\\ b_i\mapsto \sum_{j+k=i}(b_j\otimes b_k-b_j\otimes a_k-b_k\otimes a_j+2a_j\otimes a_k) \end{gather} using the conventions $a_0=d-x/2$ and $b_0=1$. \section{\texorpdfstring{$C_2$ symplectic classes}{C2 symplectic classes}}\label{C2Symplectic} Analogously to the Chern classes, we have: \begin{prop}There exist classes $\alpha,k_i,\kappa_{s,j}\in H_G^*(B_GSp(n))$ of degrees $0,4i,4s$ respectively, where $1\le i,s\le n$ and $1\le j\le n-s$, such that \begin{equation} H_G^*(B_GSp(n))=A_{\Q}[\alpha,k_i,\kappa_{s,j}]/(x\alpha, x\kappa_{s,j}, S) \end{equation} The relation set $S$ is the same as that for $H_G^*(B_GU(n))$ with $c_i,\gamma_{s,i}$ replaced by $k_i,\kappa_{s,i}$.\smallbreak The generators $\alpha, \kappa_{s,j}$ restrict to $0$ while the $k_i$ restrict to the nonequivariant symplectic classes.\smallbreak The maximal torus inclusion $U(1)^n\hookrightarrow Sp(n)$ induces an isomorphism \begin{gather} H^*_G(B_GSp(n))= (H^*_G(B_GU(1))^{\otimes n})^{C_2\wr \Sigma_n} \end{gather} Explicitly: \begin{gather} A_{\Q}[\alpha,k_i,\kappa_{s,j}]/(x\alpha, x\kappa_{s,j}, S)=(A_{\Q}[\alpha_i,u_i]/(x\alpha_i))^{C_2\wr\Sigma_n} \end{gather} Under this identification: \begin{gather} \alpha=\sum_{1\le i\le n}\alpha_i\\ k_i=\sum_{m_*\in K_i}u_{m_1}^2\cdots u_{m_i}^2\\ \kappa_{s,j}=\sum_{(m_*,l_*)\in K_{s,j}}u_{m_1}^2\cdots u_{m_s}^2\alpha_{l_1}\cdots \alpha_{l_j} \end{gather} where $K_i$ and $K_{s,j}$ are as in Corollary \ref{AlgebraCorollaryQ}. \end{prop} \begin{proof}First, we have the fixed point computation: \begin{equation} B_GSp(n)^{C_2}=\coprod_{m+k=n}BSp(m)\times BSp(k) \end{equation} The maximal torus in $Sp(n)$ is $T=\prod^nS^1$, the same as in $U(n)$, but the Weyl group now is $C_2\wr \Sigma_n$ with $\Sigma_n$ permuting the $S^1$ factors and the $i$-th $C_2$ in $C_2\wr \Sigma_n=C_2^n\rtimes \Sigma_n$ acting as conjugation on the $i$-th $S^1$ factor in $T$. Following the $B_GU(n)$ case, the maximal torus inclusion on the fixed points breaks into \begin{equation} \coprod^{\binom nm}BU(1)^n\to BSp(m)\times BSp(n-m) \end{equation} where the coproduct of the left is indexed on sign configurations with $m$ many $+$'s. The induced map on cohomology is \begin{equation} H^*(BSp(m))\otimes H^*(BSp(n-m))\to \oplus^{\binom nm} H^*(BU(1))^{\otimes n} \end{equation} The action of $\Sigma_n$ on the right permutes the factors in the tensor product and the sign configuration, while the $i$-th $C_2$ in $C_2^n\rtimes \Sigma_n$ acts by as the conjugation-induced map $H^*(BS^1)\to H^*(BS^1)$ on the $i$-th factor in the tensor product (fixing the sign configuration). Thus analogously to the $B_GU(n)$ case, we have an isomorphism into the $C_2\wr \Sigma_n$ fixed points of the right hand side. In conclusion: \begin{equation} H_G^*(B_GSp(n))=(\otimes^n H_G^*(B_GS^1))^{C_2\wr \Sigma_n} \end{equation} To compute the $C_2$ action on $H^*_G(B_GS^1)$ recall that conjugation $S^1\to S^1$ induces $\alpha\mapsto \alpha$ and $u\mapsto -u$ on cohomology. Fixing under the $C_2^n$ action, we get that $H_G^*(B_GSp(n))=R'^{\Sigma_n}$ where $R'=R(u_1^2,...,u_n^2,\alpha_1,...,\alpha_n)$ is the ring $R$ we used for $B_GU(n)$ but now with $u_i$ replaced by $u_i^2$. So we get equivariant symplectic classes $\alpha, k_i, \kappa_{s,i}$ of degrees $0,4i,4s$ with the desired expressions in terms of $\alpha_i,u_i^2$. \end{proof} The maps $B_GSp(n)\to B_GSp(n+1)$ and $B_GSp(n)\times B_GSp(m)\to B_GSp(n+m)$ have the same formulas as the analogous maps for $B_GU(n)$, with $c_i,\gamma_{s,i}$ replaced by $k_i,\kappa_{s,i}$ respectively. \begin{prop}\label{QuaterExplained}The forgetful map $B_GSp(n)\to B_GU(2n)$ induces on cohomology: \begin{gather} \alpha\mapsto \alpha\\ c_{2i+1}, \gamma_{2s+1,j}\mapsto 0\\ c_{2i}\mapsto (-1)^ik_i\\ \gamma_{2s,j}\mapsto (-1)^s\kappa_{s,j} \end{gather} The quaternionization map $B_GU(n)\to B_GSp(n)$ induces: \begin{gather} \alpha\mapsto \alpha\\ k_i\mapsto \sum_{a+b=2i}(-1)^{a+i}c_ac_b\\ \kappa_{1,j}\mapsto c_1\gamma_{1,j}-\alpha\gamma_{2,j-1}+(j-2)\gamma_{2,j}+(j-1)\gamma_{2,j-1}\\ \kappa_{s,j}\mapsto c_s\gamma_{s,j}+\cdots \end{gather} where $\cdots$ denotes a homogeneous polynomial in $R^{\Sigma_n}$ smaller than $c_s\gamma_{s,j}$ (according to the order defined in appendix \ref{appen}). This polynomial can be computed algorithmically according to the algorithm in appendix \ref{appen} which has been implemented in the computer program found \href{https://github.com/NickG-Math/Symmetric_Polynomials}{here}. \end{prop} \begin{proof} To compute the effect of the forgetful map $B_GSp(n)\to B_GU(2n)$ note that we have the commutative diagram \begin{center}\begin{tikzcd} T^n\ar[r,"\text{$z_i\mapsto (z_i,\bar z_i)$}"]\ar[d]&T^{2n}\ar[d]\\ Sp(n)\ar[r]&U(2n) \end{tikzcd}\end{center} (here $T=S^1$). The top map induces $H^*_G(B_GT^{2n})\to H^*_G(B_GT^n)$ given by $\alpha_i\mapsto \alpha_i, \alpha_{i+n}\mapsto \alpha_i, u_i\mapsto u_i, u_{i+n}\mapsto -u_i$ for $1\le i\le n$. \end{proof} The stable symplectic classes work analogously to the stable Chern classes; see subsection \ref{StableSummary} for a summary. \section{\texorpdfstring{$C_2$ Euler and Pontryagin classes}{C2 Euler and Pontryagin classes}}\label{C2EulerPontryagin} Analogously to the symplectic classes, we have: \begin{prop}There exist classes $\alpha,p_i,\pi_{s,j},\chi$ of degrees $0,4i,4s,n$ respectively in $H^*_G(B_GSO(2n))$, where $1\le i,s< n$ and $1\le j\le n-s$ such that \begin{equation} H_G^*(B_GSO(2n))=A_{\Q}[\alpha,p_i,\pi_{s,j},\chi]/(x\alpha, x\pi_{s,j}, S) \end{equation} where the relation set $S$ is the same as that for $H_G^*(B_GU(n))$ with $c_i,\gamma_{s,i}$ replaced by $p_i,\pi_{s,i}$ and using the convention $p_n=\chi^2$.\smallbreak The generators $\alpha,\pi_{s,j}$ restrict to $0$ while the $p_i,\chi$ restrict to the nonequivariant Pontryagin and Euler classes respectively.\smallbreak The map $B_GSO(2n)\to B_GSO(2n+1)$ induces an injection in cohomology with \begin{equation} H_G^*(B_GSO(2n+1))=A_{\Q}[\alpha,p_i,\pi_{s,j}]/(x\alpha, x\pi_{s,j}, S) \end{equation} where $i$ is allowed to be $n$ (i.e. $p_n=\chi^2$ is included). \smallbreak The maximal torus inclusion $T\hookrightarrow SO(n)$ induces an isomorphism \begin{gather} H^*_G(B_GSO(n))=(H^*_G(B_GT))^W \end{gather} where $W$ is the corresponding Weyl group. Under this isomorphism, \begin{gather} \alpha=\sum_{1\le m\le n}\alpha_m\\ p_i=\sum_{m_*\in K_i}u_{m_1}^2\cdots u_{m_i}^2\\ \pi_{s,j}=\sum_{(m_*,l_*)\in K_{s,j}}u_{m_1}^2\cdots u_{m_s}^2\alpha_{l_1}\cdots \alpha_{l_j}\\ \chi=u_1\cdots u_n \end{gather} where $K_i, K_{s,j}$ are as in Corollary \ref{AlgebraCorollaryQ}. \end{prop} \begin{proof}We use the oriented Grassmannian model for $B_GSO(n)$, consisting of $n$-dimensional oriented subspaces of $\R^{\infty\rho}$ where $\rho=1+\sigma$ is the real regular representation of $G=C_2$. The $G$ action sends an oriented subspace with basis $v_1,...,v_n$ to one with basis $gv_1,...,gv_n$. If $V\in B_GSO(n)$ is fixed by the $G$ action, then $V=V^+\oplus V^-$ where $V^+$ is an oriented subspace with $gv=v$ for every $v\in V^+$ and $V^-$ is an oriented subspace with $gv=-v$ for every $v\in V^-$. Moreover, since $g$ must act by an $SO(n)$ action on $V$, the dimension of $V^-$ must be even. In particular, if a $2$-dimensional subspace $V$ is fixed then $V=V^+$ or $V=V^-$. As for the uniqueness of the decomposition, note that if $W^+\oplus W^-= V^+\oplus V^-$ through an $SO(n)$ matrix $A$, then $A$ is block diagonal with blocks $B\in O(n-2k), C\in O(2k)$ and $\det(B)\det(C)=1$; $B$ gives $W^+= V^+$ while $C$ gives $W^-= V^-$. Thus \begin{equation} B_GSO(n)^{C_2}=\coprod_{k=0}^{n/2}BZ_{2k,n} \end{equation} where $Z_{k,n}$ is the subgroup of $O(k)\times O(n-k)$ consisting of pairs $(A,B)$ with $\det(A)\det(B)=1$. Note $Z_{k,n}=Z_{n-k,n}$ and $Z_{0,n}=Z_{n,0}=SO(n)$; if $0<k<n$ we have \begin{equation} Z_{k,n}=(SO(k)\times SO(n-k))\rtimes C_2 \end{equation} with $C_2$ acting diagonally by conjugation.\smallbreak The maximal torus of $Z_{2k,2n}$ is $SO(2)^n$ with Weyl group $H\rtimes (\Sigma_k\times \Sigma_{n-k})\subseteq C_2^n\rtimes (\Sigma_k\times \Sigma_{n-k})$ where $H\subseteq C_2^n$ consists of elements with even number of coordinates equal to $-1$.\smallbreak The maximal torus of $Z_{2k,2n+1}$ is $SO(2)^n$ with Weyl group $C_2^n\rtimes (\Sigma_k\times \Sigma_{n-k})$.\smallbreak Following the $B_GU(n)$ case, the maximal torus inclusion $SO(2)^n\to SO(2n+1)$ induces $B_GSO(2)^n\to B_GSO(2n+1)$ which on the fixed points becomes: \begin{equation} \coprod^{2^{n}}BSO(2)^{n}\to \coprod_{k=0}^{n}BZ_{2k,2n+1} \end{equation} with the LHS indexed over sign configurations as usual. Fixing the total amount $k$ of $-$ signs, the coproduct breaks into \begin{equation} \coprod^{\binom nk}BSO(2)^n\to BZ_{2k,2n+1} \end{equation} which induces \begin{equation} H^*(BZ_{2k,2n+1})\to \oplus^{\binom nk}H^*(BSO(2))^{\otimes n} \end{equation} Taking $C_2\wr \Sigma_n=C_2^n\rtimes \Sigma_n$ fixed points on the right hand side is equivalent to fixing a sign configuration and then taking $C_2^n\rtimes (\Sigma_k\times \Sigma_{n-k})$ fixed points i.e. fixing under the action of the Weyl group of $BZ_{2k,2n+1}$. Therefore we are reduced to proving $H^*(BZ_{2k,2n+1})=H^*(BSO(2)^n)^{C_2\wr (\Sigma_k\times \Sigma_{n-k})}$. Although Borel's Theorem is stated for connected Lie groups, it nonetheless works for $Z_{2k,2n+1}$ as it does for $O(n)$. This can be seen from the covering space \begin{equation} C_2\to BSO(2k)\times BSO(2n+1-2k)\to BZ_{2k,2n+1} \end{equation} for which the associated transfer map $$H^*(BZ_{2k,2n+1})\to H^*(BSO(2k)\times BSO(2n+1-2k))^{C_2}$$ is an isomorphism. Computing the right hand side shows that it's isomorphic to $H^*(BSO(2)^n)^{C_2\wr (\Sigma_k\times \Sigma_{n-k})}$ as desired. Similar arguments work for $H^*_G(B_GSO(2n))$ proving the maximal torus isomorphism in that case as well. Given the maximal torus isomorphism, we compute $H_G^*(B_GSO(2n+1))$ identically to $H_G^*(B_GSp(n))$ so we get the desired equivariant Pontryagin classes $p_i, \pi_{s,j}$. For $H_G^*(B_GSO(2n))$ we only want to fix under an even number of sign changes on the $u_i$ hence we get the equivariant Pontryagin classes plus the Euler class $\chi=\sigma_n(u_1,...,u_n)$. Note that $p_n=\chi^2$ and after removing $p_n$ there are no relations involving $\chi$ and the other generators. \end{proof} In the identification $B_GSO(2)=B_GU(1)$ we have \begin{gather} \chi=c_1 \end{gather} The maps $B_GSO(n)\times B_GSO(m)\to B_GSO(n+m)$ work analogously to the symplectic case, replacing the $k_i,\kappa_{s,j}$ with $p_i,\pi_{s,j}$; the action on the Euler class is \begin{equation} \chi\mapsto \chi\otimes\chi \end{equation} \begin{prop}\label{ForgetExplained}The complexification map $B_GSO(2n)\to B_GU(2n)$ induces on cohomology: \begin{gather} \alpha\mapsto \alpha\\ c_{2i+1}, \gamma_{2s+1,j}\mapsto 0\\ c_{2i}\mapsto (-1)^ip_i\\ \gamma_{2s,j}\mapsto (-1)^s\pi_{s,j} \end{gather} The forgetful map $B_GU(n)\to B_GSO(2n)$ induces on cohomology: \begin{gather} \alpha\mapsto \alpha\\ p_i\mapsto \sum_{a+b=2i}(-1)^{a+i}c_ac_b\\ \chi\mapsto c_n\\ \pi_{1,j}\mapsto c_1\gamma_{1,j}-u\gamma_{2,j-1}+(j-2)\gamma_{2,j}+(j-1)\gamma_{2,j-1}\\ \pi_{s,j}\mapsto c_s\gamma_{s,j}+\cdots \end{gather} where $\cdots$ denotes the same homogeneous polynomial as the $\cdots$ in Proposition \ref{QuaterExplained}. \end{prop} \begin{proof}To understand the effect of complexification $SO(2n)\to U(2n)$ we use the nonstandard maximal torus $T^2\to U(2)$ making the following diagram commute: \begin{center} \begin{tikzcd} S^1\ar[d,equals]\ar[r,"\text{$a\mapsto (a,\bar a)$}"]&T^2\ar[d]\\ SO(2)\ar[r]&U(2) \end{tikzcd} \end{center} where the map in the bottom row is complexification. More generally we have the commutative diagram \begin{center} \begin{tikzcd}[column sep=10em] T^n\ar[d]\ar[r,"\text{$(a_1,...,a_n)\mapsto (a_1,\bar a_1,...,a_n,\bar a_n)$}"]&T^{2n}\ar[d]\\ SO(2n)\ar[r]&U(2n) \end{tikzcd} \end{center} Any two maximal tori are conjugate hence induce the same map in $H^*_G(B_GH)$ so we get \begin{center} \begin{tikzcd} A_{\Q}[u_1,...,u_n,\alpha_1,...,\alpha_n]/x\alpha_i&A_{\Q}[u_1,...,u_{2n},\alpha_1,...,\alpha_{2n}]/x\alpha_i\ar[l]\\ H^*_GSO(2n)\ar[u,hook]&H^*_GU(2n)\ar[u,hook]\ar[l] \end{tikzcd} \end{center} In the top row, $u_i\mapsto u_i, u_{n+i}\mapsto -u_i, \alpha_i\mapsto \alpha_i, \alpha_{n+i}\mapsto \alpha_i$ for $i\le n$. This implies the formulas for the effect of the complexification map. \end{proof} We only have one map $B_GSO(n)\to B_GSO(n+1)$, given by direct sum with the trivial representation; direct sum with the $\sigma$ representation does not result in a $C_2$ equivariant map. There is however a $C_2$-equivariant map $B_GSO(n)\to B_GSO(n+2)$ given by adding $2\sigma$. The map $B_GSO(n)\xrightarrow{\oplus 1} B_GSO(n+1)$ induces \begin{gather} \alpha\mapsto y+\alpha\\ p_i\mapsto p_i\\ \pi_{s,j}\mapsto \pi_{s,j}+\pi_{s,j-1}\\ \chi\mapsto 0 \end{gather} (with the usual conventions on the RHS, including that $\pi_{s,0}=yp_s$). The map $B_GSO(n)\xrightarrow{\oplus 2\sigma} B_GSO(n+2)$ induces: \begin{gather} \alpha\mapsto \alpha\\ p_i\mapsto p_i\\ \pi_{s,i}\mapsto \pi_{s,i}\\ \chi\mapsto 0 \end{gather} (with the usual conventions on the RHS). So we can distinguish between $B_G^+SO, B_G^-SO$ and $B_G^{\pm}SO$ as usual. The results here are then parallel to the $B_G^+U, B_G^-U$ and $B_G^{\pm}U$ cases respectively, by replacing $\alpha,c_i,\gamma_{s,i}$ by $\alpha,p_i,\pi_{s,i}$; the Euler class $\chi$ is not stable. See subsection \ref{StableSummary} for a summary. \section{Orthogonal groups}\label{Orthogonal} The groups $O(2n), SO(2n+1), O(2n+1)$ have the same maximal torus and Weyl group. The resulting nonequivariant classifying spaces have isomorphic cohomology rings and the maximal torus isomorphism works in all cases (even though the orthogonal groups are disconnected). In this subsection, we shall see that this observation doesn't generalize to the $C_2$ equivariant case. For $B_GO(n)$ we compute the fixed points: \begin{equation} B_GO(n)^{C_2}=\coprod_{m+k=n}BO(m)\times BO(k) \end{equation} We consider the map \begin{equation} H^*_G(B_GO(n))\to H^*_G(B_GT)^W \end{equation} where $T\subseteq O(n)$ is a maximal torus and $W$ the Weyl group. We can see directly that for $n\le 3$ the RHS has smaller dimension compared to the LHS even before taking fixed points. So this map cannot be an isomorphism for $n\le 3$. We can also see from the fixed point computation that $H^*_G(B_GO(n))$ is never isomorphic to $H^*_G(B_GO(n+1))$ for any $n$.\medbreak There is however a natural description of the characteristic classes for the group $O(2n+1)$: since $O(2n+1)=SO(2n+1)\times O(1)$ we have that \begin{equation} B_GO(2n+1)=B_GSO(2n+1)\times B_GO(1) \end{equation} hence \begin{equation} H^*_G(B_GO(2n+1))=H^*_G(B_GSO(2n+1))\boxtimes_{A_{\Q}} H^*_G(B_GO(1)) \end{equation} and \begin{equation} H^*_G(B_GO(1))=A_{\Q}[\beta]/(\beta^2=\beta, x\beta) \end{equation} for $\beta$ in degree $0$. So the $C_2$-characteristic classes for $O(2n+1)$ are $\alpha, p_i, \pi_{s,j}, \beta$ The map $B_GO(2n)\to B_GO(2n+1)$ is always a surjection in cohomology (this follows by looking at the fixed points) so $H^*_G(B_GO(2n))$ is a quotient of $H^*_G(B_GO(2n+1))$. For $n=1$ we get: \begin{gather} H^*_G(B_GO(2))=\frac{A_{\Q}[\alpha,\beta,p_1]}{x\beta, \alpha^2=\alpha, \beta^2=\beta, \alpha\beta=\alpha, yp_1=\beta p_1} \end{gather} More generally, in degree $0$: \begin{equation} H^0_G(B_GO(2n))=H^0_G(B_GO(2n+1))/\alpha(\alpha-1)\cdots (\alpha-n+1)(1-\beta) \end{equation} For $n=2$ the other relations in higher degrees are $p_1\alpha^2(1-\beta)=0$ and $p_2(y-\beta-\alpha+\alpha\beta)=0$. Finally, stability for orthogonal groups is understood from $B_GO=B_GSO\times B_GO(1)$. \section{Special unitary groups}\label{SU} \begin{prop} The maximal torus inclusion $U(1)^{n-1}\to SU(n)$ induces an isomorphism \begin{equation} H^*_G(B_GSU(n))\to H^*_G(B_GU(1)^{n-1})^{\Sigma_n} \end{equation} \end{prop} \begin{proof} Analogously to $B_GSO(n)$, \begin{gather} B_GSU(n)^{C_2}=\coprod_{k=0}^{n/2}BZ'_{2k,n} \end{gather} where $Z'_{k,n}\subseteq U(k)\times U(n-k)$ consists of $(A,B)$ with $\det(A)\det(B)=1$. So $Z'_{k,n}=Z'_{n-k,n}$ and $Z'_{0,n}=SU(n)$. If $0<k<n$, \begin{equation} Z'_{k,n}=(SU(k)\times SU(n-k))\rtimes S^1 \end{equation} where $S^1$ acts diagonally by conjugation. The maximal torus in $Z'_{2k,n}$ is $U(1)^{n-1}$ and the Weyl group is $\Sigma_{2k}\times \Sigma_{n-2k}$. Taking fixed points in $B_GU(1)^{n-1}\to B_GSU(n)$ gives: \begin{equation} \coprod^{2^{n-1}}BU(1)^{n-1}\to \coprod_{k=0}^{n/2}BZ'_{2k,n} \end{equation} and as in $B_GSO(n)$, the coproduct breaks into \begin{equation} \coprod^{\binom{n}{2k}}BU(1)^{n-1}\to BZ'_{2k,n} \end{equation} In cohomology: \begin{equation} H^*(BZ'_{2k,n})\to \oplus^{\binom{n}{2k}}H^*(BU(1))^{\otimes (n-1)} \end{equation} The group $\Sigma_n$ acts on the right by permuting the sign configuration and the tensor factors $H^*(BU(1))=\Q[a_i]$ where $a_n=-(a_1+\cdots+a_{n-1})$. Thus, taking $\Sigma_n$ fixed points is equivalent to fixing a sign configuration, say $(-,...,-,+,...,+)$, and then taking $\Sigma_{2k}\times \Sigma_{n-2k}$ fixed points, which is exactly the Weyl group of $Z'_{2k,n}$. This establishes the maximal torus isomorphism for $B_GSU(n)$. \end{proof} In conclusion, $H^*_G(B_GSU(n))=R'^{\Sigma_n}$ where $R'=A_{\Q}[u_i,\alpha_i]_{1\le i\le n}$ modulo the relations \begin{gather} \alpha_n= \frac{(-1)^{n+1}+1}2y+(-1)^n\sum_{i=1}^{n-1}(-2)^{i-1}\sigma_i(\alpha_1,...,\alpha_{n-1})\\ u_n=-u_1\cdots-u_{n-1} \end{gather} Using the same definitions for $\alpha,c_i,\gamma_{s,j}$ in terms of $\alpha_i,u_i$ as in $B_GU(n)$, we can see that \begin{equation} c_1=\gamma_{1,n-1}=0 \end{equation} There are more relations however: for example, $SU(2)=Sp(1)$ so we need: \begin{equation} \alpha^2=2\alpha \end{equation} as an additional relation. The identification $SU(2)=Sp(1)$ then becomes: \begin{gather*} \frac{A_{\Q}[c_2,\alpha]}{\alpha^2=2\alpha, x\alpha}\to \frac{A_{\Q}[k_1,\alpha]}{\alpha^2=\alpha}\\ c_2\mapsto k_1\\ \alpha\mapsto 2\alpha \end{gather*} The rest of the relations for each $B_GSU(n)$ can be computed algorithmically. As for stability, we have the map $B_GSU(n)\to B_GSU(n+1)$ given by direct sum with a trivial representation. The map $B_GSU(n)\to B_GSU(n+1)$ given by direct sum with a $\sigma$ representation is not $C_2$ equivariant, so we instead use the map $B_GSU(n)\to B_GSU(n+2)$ adding $2\sigma$. Analogously to the $B_GU$ case, we can distinguish between spaces $B_G^+SU, B_G^-SU$ and $B_G^{\pm}SU$. Under the inclusion $B_G^{\pm}SU\to B_G^{\pm}U$, $c_1=\gamma_1=0$. \appendix \section{Symmetric Polynomials with Relations}\label{appen} For a fixed commutative ring $k$ consider the graded $k$-algebra $R$: \begin{equation} R=R(u_1,...,u_n,\alpha_1,...,\alpha_n):=k[u_i,\alpha_i]/(\alpha_i^2=\alpha_i) \end{equation} whose generators have degrees $|u_i|=1$ and $|\alpha_i|=0$. The group $\Sigma_n$ acts on $R$ by permuting the $u_i$ and $\alpha_i$ separately.\medbreak Any monic monomial in $R$ takes the unique form \begin{equation} u_1^{a_1}\cdots u_n^{a_n}\alpha_1^{\epsilon_1}\cdots \alpha_n^{\epsilon_n} \end{equation} for $a_i\ge 0$ and $\epsilon_i=0,1$. We order the monic monomials of the same degree lexicographically by the powers $a_1,...,a_n,\epsilon_1,...,\epsilon_n$. For a homogeneous element $p\in R$, we consider the (nonzero) monomials $p_i$ in $p$ and let $p'_i$ be the corresponding monic monomials; the greatest of the $p'_i$ is the dominant term $\dom(p)$ of $p$. We compare homogeneous polynomials using their dominant terms (ignoring their coefficients). \begin{prop}\label{AlgebraPropositionGeneral}We have the $k$-algebra presentation: \begin{equation} R^{\Sigma_n}=\frac{k[\gamma_{s,i}]}{\gamma_{s,i}\gamma_{t,j}=\binom{\min(i+j+s,n)-t}{j}\gamma_{t,0}\gamma_{s,\min(i+j,n-s)}+p_{s,i,t,j,n}} \end{equation} where: \begin{itemize} \item For each pair of nonnegative indices $s,i$ such that $s+i\le n$ we have one generator $\gamma_{s,i}$ of degree $s$ given by: \begin{gather} \gamma_{s,i}=\sum_{(m_*,l_*)}u_{m_1}\cdots u_{m_s}\alpha_{l_1}\cdots \alpha_{l_i} \end{gather} where $(m_*,l_*)$ ranges over pairs of disjoint partitions $1\le m_1<\cdots<m_s\le n$ and $1\le l_1<\cdots<l_i\le n$. \item For each quadruple of indices $s,i,t,j$ such that $0\le s\le t\le s+i$, $0<i\le n-s$ and $0<j\le n-t$, we have a relation: \begin{gather} \gamma_{s,i}\gamma_{t,j}=\binom{\min(i+j+s,n)-t}{j}\gamma_{t,0}\gamma_{s,\min(i+j,n-s)}+p_{s,i,t,j,n} \end{gather} where $p_{s,i,t,j,n}$ is a homogeneous polynomial in $R^{\Sigma_n}$ smaller than $\gamma_{t,0}\gamma_{s,\min(i+j,n-s)}$. \end{itemize} The elements $c_i=\gamma_{i,0}$ are algebraically independent over $k$ and $R^{\Sigma_n}$ is finite over $k[c_1,...,c_n]$ so we have the equality of Krull dimensions: \begin{equation} \dim(R)=\dim(k)+n \end{equation} A $k$-module basis of $R^{\Sigma_n}$ consists of the elements \begin{equation} \prod_{i=1}^nc_i^{r_i}\prod_{s=0}^n\prod_{i=1}^{n-s} \gamma_{s,i}^{\epsilon_{s,i}} \end{equation} where the $\epsilon_{s,i}=0,1$ are such that whenever $\epsilon_{s,i}=\epsilon_{t,j}=1$ for $1\le i,j$ and $s\le t$ then we must also have $s+i<t$. \end{prop} Three remarks about the polynomials $p_{n,s,i,t,j}$ appearing in the relations: \begin{itemize} \item The proof of Proposition \ref{AlgebraPropositionGeneral} provides an algorithm for computing these polynomials. This algorithm has been implemented in a computer program available \href{https://github.com/NickG-Math/Symmetric_Polynomials}{here} (executable files are available \href{https://github.com/NickG-Math/Symmetric_Polynomials/releases}{here} for a quick demonstration). \item The coefficients of the polynomials $p_{s,i,t,j,n}$ are in the image of the initial homomorphism $\Z\to k$ (i.e. they are independent of $k$). \item If $n\ge i+j+s$ then $p_{s,i,t,j,n}$ is independent of $n$ and moreover the relation on $\gamma_{s,i}\gamma_{t,j}$ is independent of $n$, taking the simpler form: \begin{gather} \gamma_{s,i}\gamma_{t,j}=\binom{i+j+s-t}{j}c_t\gamma_{s,i+j}+p_{s,i,t,j} \end{gather} \end{itemize} The elements $c_i=\gamma_{i,0}$ generating $k[c_1,...,c_n]$ are the elementary symmetric polynomials on the variables $u_i$: \begin{gather} c_i=\sigma_i(u_1,...,u_n)=\sum_{1\le m_1<\cdots<m_i\le n}u_{m_1}\cdots u_{m_i} \end{gather} Similarly, the elements $\gamma_{0,i}$ generating $R^{\Sigma_n}_0$ are the elementary symmetric polynomials on the variables $\alpha_i$: \begin{gather} \gamma_{0,i}=\sigma_i(\alpha_1,...,\alpha_n)=\sum_{1\le l_1<\cdots<l_i\le n}\alpha_{l_1}\cdots \alpha_{l_i} \end{gather} \begin{prop}\label{AlgebraProposition2General}Any set of homogeneous algebra generators of $R^{\Sigma_n}$ over $R^{\Sigma_n}_0$ has cardinality at least $n+\binom n2$, which is the cardinality of the generating set $\{c_s,\gamma_{s,j}\}_{s,j>0}$. \end{prop} A minimal generating set for the $k$-algebra $R^{\Sigma_n}_0$ depends on which primes are invertible in $k$. For example, if $k$ is a $\Q$-algebra, which is our case of interest, we can generate all $\gamma_{0,i}$ from the single element \begin{equation} \alpha=\gamma_{0,1}=\alpha_1+\cdots+\alpha_n \end{equation} via the formula: \begin{equation} \gamma_{0,i}=\frac{\alpha(\alpha-1)\cdots (\alpha-i+1)}{i!} \end{equation} The disadvantage of supplanting $\gamma_{0,i}$ with $\alpha$ is that the relations between the generators now require additional rational coefficients, as in Corollary \ref{AlgebraCorollaryQ}. For the purposes of the algorithm implemented in our \href{https://github.com/NickG-Math/Symmetric_Polynomials}{computer program}, it is better (in terms of speed and numerical stability) to use all the $\gamma_{0,i}$ and $\Z$-coefficients.\medbreak The rest of this appendix is dedicated to proving Propositions \ref{AlgebraPropositionGeneral} and \ref{AlgebraProposition2General}. \begin{proof}\label{ProofAlgorithmGeneral}(Of Proposition \ref{AlgebraPropositionGeneral}) We will need that the dominant term of $\gamma_{s,i}$ is: \begin{gather} \dom(\gamma_{s,i})=u_1\cdots u_s\alpha_{s+1}\cdots \alpha_{s+i} \end{gather} Let us note a subtlety about dominant terms and multiplication: $\dom(pq)$ is $\dom(p)\dom(q)$ if $p$ or $q$ are polynomials solely on the $u_i$, but if both $p,q$ contain $\alpha_i$'s that may not be the case: for example $\gamma_{1,1}$ has dominant term $u_1\alpha_2$ but $\gamma_{1,1}^2$ has dominant term $u_1^2\alpha_2\alpha_3$. Every $\Sigma_n$ orbit of a monic monomial in $R$ has a greatest term $M$ that can be written as either: \begin{equation} M=u_1^{a_1}\cdots u_s^{a_s}\alpha_1^{\epsilon_1}\cdots \alpha_s^{\epsilon_s}\alpha_{s+1}\cdots \alpha_{s+i} \end{equation} with $a_1\ge \cdots\ge a_s> 0$ and $\epsilon_i=0,1$, or as: \begin{equation} M=\alpha_1\cdots\alpha_k \end{equation} It suffices to prove that any such $M$ is the dominant term of a polynomial on $\gamma_{s,j}$. Note that $M=\alpha_1\cdots\alpha_k$ is the dominant term of $\gamma_{0,k}$, so we may restrict our attention exclusively to $M$'s of the first form. It should also be noted that we can't assume that the $\epsilon_i$ are in decreasing order, since applying a permutation to fix such an order would affect the decreasing order on the $a_i$. So let \begin{equation} M=u_1^{a_1}\cdots u_s^{a_s}\alpha_1^{\epsilon_1}\cdots \alpha_s^{\epsilon_s}\alpha_{s+1}\cdots \alpha_{s+i} \end{equation} be greatest in its $\Sigma_n$ orbit, where $a_1\ge \cdots\ge a_s> 0$ and $\epsilon_i=0,1$. We shall prove that $M=\dom(P)$ where $P$ is a product of $\gamma_{s,j}$. To ensure that $P$ is unique per $M$, we insist that if $\gamma_{t,j}, \gamma_{t',j'}$ are factors of $P$ with $0<j,j'$ and $t\le t'$ then $t+j<t'$. With this extra requirement, no two distinct products $P$ can have the same dominant term. Further simplifying matters, note that it suffices to write \begin{equation} M=u_1^{k_1}\cdots u_s^{k_s}\dom(P') \end{equation} where $k_1\ge\cdots\ge k_s\ge 0$ and $P'$ is a product of $\gamma_{s,i}$ with $i>0$ satisfying the condition above. This is because $u_1^{k_1}\cdots u_s^{k_s}$ is the dominant term of a product $P_{c}$ of $c_i$ by the fundamental result on symmetric polynomials. Then we can take $P=P_{c}P'$ and $M=\dom(P)$. We distinguish cases on the number of $\epsilon_i$'s in $M$ that are nonzero, i.e. the number of $\alpha_i$'s in $M$ with $i\le s$. If there are none then \begin{equation} M= u_1^{a_1}\cdots u_s^{a_s}\alpha_{s+1}\cdots \alpha_{s+i} \end{equation} can be written as \begin{equation} (u_1^{a_1-1}\cdots u_s^{a_s-1})(u_1\cdots u_s\alpha_{s+1}\cdots \alpha_{s+i}) \end{equation} so we use $P'=\gamma_{s,i}$.\\ Now assume there's only one $\alpha_j$ with $j\le s$: \begin{equation} M= u_1^{a_1}\cdots u_s^{a_s}\alpha_j\alpha_{s+1}\cdots \alpha_{s+i} \end{equation} If $j>1$ notice that $a_{j-1}>a_j$ for otherwise we can exchange $j-1,j$ and get a greater term in our order, contradicting that $M$ is greatest in its $\Sigma_n$ orbit. If further $j<s$, we can write $M$ as \begin{equation} (u_1^{a_1-2}\cdots u_{j-1}^{a_{j-1}-2}u_j^{a_j-1}\cdots u_s^{a_s-1})(u_1\cdots u_{j-1}\alpha_j)(u_1\cdots u_s\alpha_{s+1}\cdots \alpha_{s+i}) \end{equation} and use $P_{\alpha,\gamma}=\gamma_{j-1,1}\gamma_{s,i}$. If $j=s$ we instead write $M$ as \begin{equation} (u_1^{a_1-1}\cdots u_{s-1}^{a_{s-1}-1}u_s^{a_s})(u_1\cdots u_{s-1}\alpha_s\alpha_{s+1}\cdots \alpha_{s+i}) \end{equation} and use $P'=\gamma_{s-1,i+1}$. If $j=1$ then \begin{equation} M= u_1^{a_1}\cdots u_s^{a_s}\alpha_1\alpha_{s+1}\cdots \alpha_{s+i} \end{equation} is \begin{equation} (u_1^{a_1-1}\cdots u_s^{a_s-1})\alpha_1(u_1\cdots u_s\alpha_{s+1}\cdots \alpha_{s+i}) \end{equation} and we use $P'=\gamma_{0,1}\gamma_{s+1,i}$. Now assume there are two $\alpha_j$'s with $j\le s$ in $M$, say $\alpha_j,\alpha_k$ with $j<k\le s$: \begin{equation} M=u_1^{a_1}\cdots u_s^{a_s}\alpha_{j}\alpha_k\alpha_{s+1}\cdots \alpha_{s+i} \end{equation} If $j>1$ then as before we must have $a_{j-1}>a_j$ and $a_{k-1}>a_k$. If furthermore $j<k-1$ and $k<s$ we can write $M$ as \begin{align} \left(\prod_{l=1}^{j-1}u_l^{a_l-3}\prod_{l=j}^{k-1}u_l^{a_l-2}\prod_{l=k}^{s}u_l^{a_l-1}\right)(u_1\cdots u_{j-1}\alpha_j)(u_1\cdots u_{k-1}\alpha_k)(u_1\cdots u_s\alpha_{s+1}\cdots \alpha_{s+i}) \end{align} and use $P'=\gamma_{j-1,1}\gamma_{k-1,1}\gamma_{s,i}$. If $j=k-1$ and $k<s$ then write $M$ as \begin{align} (u_1^{a_1-2}\cdots u_{j-1}^{a_{j-1}-2}u_j^{a_j-1}\cdots u_s^{a_s-1})(u_1\cdots u_{j-1}\alpha_j\alpha_{j+1})(u_1\cdots u_s\alpha_{s+1}\cdots \alpha_{s+i}) \end{align} and use $P'=\gamma_{j-1,2}\gamma_{s,i}$. The other cases are all handled similarly. Note that for $j=1,k=2$ we get \begin{equation} (u_1^{a_1-1}\cdots u_s^{a_s-1})(\alpha_1\alpha_2)(u_1\cdots u_s\alpha_{s+1}\cdots \alpha_{s+i}) \end{equation} and use $P'=\gamma_{0,2}\gamma_{s,i}$. We proceed in this fashion to treat the case where $r$ many $\alpha_j$'s with $j\le s$ appear in $M$, for any $r=0,...,s$. \end{proof} We now prove Proposition \ref{AlgebraProposition2General} regarding the minimality of generators. \begin{proof}(Of Proposition \ref{AlgebraProposition2General}) For every $s$ with $1\le s\le n$, consider the equivalence relation on $R^{\Sigma_n}_s$ whereby two elements are equivalent if they have equal dominant terms. The orbit set of the equivalence relation, when ordered from least to greatest, starts with $\gamma_{s,0},\gamma_{s,1},...,\gamma_{s,n-s}$ and continues with products of elements in $R^{\Sigma_n}_t$, $t<s$. Let $X$ be a homogeneous generating set of $R^{\Sigma_n}$ over $R^{\Sigma_n}_0$ of minimum cardinality, ordered first by degree and then by dominant term. The smallest element $x\in X$ must then satisfy $\dom(x)=\dom(\gamma_{1,0})$ hence $x-r\gamma_{1,0}\in R^{\Sigma_n}_0$ for some $r\in k$; this means that we can replace $x$ by $\gamma_{1,0}$ resulting in a new generating set $X$ with minimum cardinality. Applying the same argument repeatedly shows that all elements of $X$ can be replaced by $\gamma_{1,0},\gamma_{1,1},...,\gamma_{n,0}$ while preserving cardinality and polynomial span.\end{proof} \phantom{1}\smallbreak \begin{small} \noindent \textsc{Department of Mathematics, University of Chicago}\\ \textit{E-mail:} \verb|[email protected]|\\ \textit{Website:} \href{http:://math.uchicago.edu/~nickg}{math.uchicago.edu/$\sim$nickg} \end{small} \end{document}
\begin{document} \title{Fully Dynamic Bin Packing Revisited ootnote{Supported by DFG Project, Entwicklung und Analyse von effizienten polynomiellen Approximationsschemata f\"ur Scheduling- und verwandte Optimierungsprobleme, Ja 612/14-1.} \begin{abstract} We consider the \emph{fully dynamic bin packing} problem, where items arrive and depart in an online fashion and repacking of previously packed items is allowed. The goal is, of course, to minimize both the number of bins used as well as the amount of repacking. A recently introduced way of measuring the repacking costs at each timestep is the \emph{migration factor,} defined as the total size of repacked items divided by the size of an arriving or departing item. Concerning the trade-off between number of bins and migration factor, if we wish to achieve an asymptotic competitive ration of $1 + \epsilon$ for the number of bins, a relatively simple argument proves a lower bound of $\Omega(\nicefrac{1}{\epsilon})$ for the migration factor. We establish a nearly matching upper bound of $O(\nicefrac{1}{\epsilon}^4 \log \nicefrac{1}{\epsilon})$ using a new dynamic rounding technique and new ideas to handle small items in a dynamic setting such that no amortization is needed. The running time of our algorithm is polynomial in the number of items $n$ \emph{and} in $\nicefrac{1}{\epsilon}$. The previous best trade-off was for an asymptotic competitive ratio of $\nicefrac{5}{4}$ for the bins (rather than $1+\epsilon$) and needed an amortized number of $O(\log n)$ repackings (while in our scheme the number of repackings is independent of~$n$ and non-amortized). \end{abstract} \section{Introduction} For the classical bin packing\xspace problem, we are given a set $I$ of items with a size function $s\colon I\to (0,1]$ and need to pack them into as few unit sized bins as possible. In practice, the complete instance is often not known in advance, which has lead to the definition of a variety of \emph{online} versions of the bin packing problem. First, in the classical \emph{online bin packing} \cite{ullman1971}, items arrive over time and have to be packed on arrival. Second, in \emph{dynamic bin packing} \cite{coffman1983}, items may also depart over time. This dynamic bin packing model is often used for instance in \begin{compactitem} \item the placement and movement of virtual machines onto different servers for cloud computing \cite{beloglazov2010energy, bobroff2007dynamic,srikantaiah2008energy,verma2008pmapper,jung2008generating,jung2009cost}, \item the development of guaranteed quality of service channels over certain multi-frequency time division multiple access systems \cite{park2000efficient}, \item the placement of processes, which require different resources, onto physical host machines \cite{stolyar2013infinite, stolyar2013large}, \item the resource allocation in a cloud network where the cost depends upon different parameters \cite{daudjee2014fault,li2014dynamic}. \end{compactitem} Third and fourth, we may allow already packed items to be slightly rearranged, leading to online bin packing with repacking (known as \emph{relaxed online bin packing}) \cite{gambosi2000} and dynamic bin packing with repacking (known as \emph{fully dynamic bin packing}) \cite{ivkovic1998}. See Figure \ref{fig:overview} for a short overview on the different models. \begin{figure} \caption{Overview of online models} \label{fig:overview} \end{figure} The amount of repacking can be measured in different ways. We can either count the total number of moved items at each timestep or the sum of the sizes of the moved items at each timestep. If one wants to count the number of moved items, one typically counts a group of tiny items as a single move. A \emph{shifting move} \cite{gambosi2000} thus involves either a single large item or a bundle of small items in the same bin of total size $s$ with $\nicefrac{1}{10}\leq s\leq \nicefrac{1}{5}$. Such a bundle may consists of up to $\Omega(n)$ (very small) items. If an algorithm measures the repacking by shifting moves, a new tiny item may lead to a large amount of repacking. In order to guarantee that a tiny item $i$ with size $s(i)$ only leads to a small amount of repacking, one may allow to repack items whose size adds up to at most $\beta\cdot s(i)$. The term $\beta$ is called the \emph{migration factor} \cite{sanders2009}. Note that shifting moves and migration factor are incomparable in the sense that a small migration factor does not imply a small number of shifting moves and vice versa. In order to measure the quality of an online algorithm, we compare the costs incurred by an online algorithm with the costs incurred by an optimal offline algorithm. An \emph{online algorithm} receives as input a \emph{sequence} of items $I=(i_{1},i_{2},i_{3},\ldots)$ and decides at each timestep $t$, where to place the item $i_t$ without knowing future items $i_{t+1},i_{t+2},\ldots$. We denote by $I(t)=(i_{1},i_{2},\ldots,i_{t})$ the instance containing the first $t$ items of the instance $I$ and by $\operatorname{\text{\textsc{opt}}}(I(t))$ the minimal number of bins needed to pack all items in $I(t)$. Note that the packings corresponding to $\operatorname{\text{\textsc{opt}}}(I(t))$ and $\operatorname{\text{\textsc{opt}}}(I(t+1))$ may differ significantly, as those packings do not need to be consistent. For an online algorithm $A$, we denote by $A(I(t))$ the number of bins generated by the algorithm on the input sequence $I(t)$. Note that $A$ must make its decision online, while $\operatorname{\text{\textsc{opt}}}(I(t))$ is the optimal value of the offline instance. The quality of an algorithm for the online bin packing\xspace problem is typically measured by its \emph{asymptotic competitive ratio}. An online algorithm $A$ is called an \emph{asymptotic $\alpha$-competitive algorithm}, if there is a function $f\in o(\operatorname{\text{\textsc{opt}}})$ such that $A(I(t))\leq \alpha\operatorname{\text{\textsc{opt}}}(I(t))+f(I(t))$ for all instances $I$ and all $t\leq |I|$. The minimum $\alpha$ such that $A$ is an asymptotic $\alpha$-competitive algorithm is called the \emph{asymptotic competitive ratio of $A$}, denoted by $r_{\infty}^{\online}(A)$, i.\,e., the ratio is defined as $r_{\infty}^{\online}(A)=\min\{\alpha \mid A$ is an asymptotic $\alpha$-competitive algorithm$\}$. The online algorithm $A$ thus has a double disadvantage: It does not know future items and we compare its quality to the optimal offline algorithm which may produce arbitrary different packings at time $t$ and time $t+1$. In order to remedy this situation, one may also compare the solution generated by $A$ to a non-repacking optimal offline algorithm. This non-repacking optimal offline algorithm knows the complete instance, but is not allowed to repack. In this work, we present new results in fully dynamic bin packing where we measure the quality of an algorithm against a repacking optimal offline algorithm and achieve a asymptotic competitive ratio of $1 + \epsilon$. The amount of repacking is bounded by $\mathcal{O}(\nicefrac{1}{\epsilon}^4\log (\nicefrac{1}{\epsilon}))$. While we measure the amount of repacking in terms of the migration factor, we also prove that our algorithm uses at most $\mathcal{O}(\nicefrac{1}{\epsilon}^4\log (\nicefrac{1}{\epsilon}))$ shifting moves. Our algorithm runs in time polynomial in the instance size and in $\nicefrac{1}{\epsilon}$. \subsection{Previous Results on Online Variants of Bin Packing} \subsubsection*{Online Bin Packing} \label{sec:online-bp} The classical version of online bin packing\xspace problem was introduced by Ullman \cite{ullman1971}. In this classical model items arrive over time and have to be packed at their arrival, while \emph{one is not allowed to repack already packed items}. Ullman gave the very first online algorithm \textsc{FirstFit} for the problem and proved that it its absolute competitive ratio is at most $2$. The next algorithm \textsc{NextFit} was given by Johnson \cite{johnson1974fast}, who proved that its absolute competitive is also at most $2$. The analysis of the \textsc{FirstFit} algorithm was refined by Johnson, Demers, Ullman, Garey and Graham \cite{johnson1974worst}, who proved that its asymptotic competitive ratio is at most $\nicefrac{17}{10}$. A revised version of \textsc{FirstFit}, called \textsc{Revised FirstFit} was shown to have asymptotic competitive ratio of at most $\nicefrac{5}{3}$ by Yao \cite{yao1980new}. A series of developments of so called \emph{harmonic algorithms} for this problem was started by Lee and Lee \cite{lee1985simple} and the best known algorithm of this class which has asymptotic competitive ratio at most $1{.}58889$ was given by Seiden \cite{seiden2002}. The lower bound on the absolute approximation ratio of $\nicefrac{3}{2}$ also holds for the asymptotic competitive ratio as shown by Yao \cite{yao1980new}. This lower bound was first improved independently by Brown \cite{brown1979lower} and Liang \cite{liang1980lower} to $1{.}53635$ and subsequently to $1{.}54014$ by van Vliet \cite{vliet1992} and finally to $1{.}54037$ by Balogh, B{\'e}k{\'e}si and Galambos \cite{balogh2010}. \subsubsection*{Relaxed Online Bin Packing Model} In contrast to the classical online bin packing\xspace problem, Gambosi, Postiglione and Talamo \cite{gambosi2000} considered the online case where one is \emph{allowed to repack items}. They called this model the \emph{relaxed online bin packing\xspace model} and proved that the lower bound on the competitive ratio in the classical online bin packing\xspace model can be beaten. They presented an algorithm that uses $3$ \emph{shifting moves} and has an asymptotic competitive ratio of at most $\nicefrac{3}{2}$, and an algorithm that uses at most $7$ shifting moves and has an asymptotic competitive ratio of $\nicefrac{4}{3}$. In another work, Ivkovi\'{c} and Lloyd \cite{ivkovic1997} gave an algorithm that uses $\mathcal{O}(\log n)$ \emph{amortized} shifting moves and achieves an asymptotic competitive ratio of $1+\epsilon$. In this amortized setting, shifting moves can be saved up for later use and the algorithm may repack the whole instance sometimes. Epstein and Levin \cite{epstein2006robust} used the measure of the migration factor to give an algorithm that has an asymptotic competitive ratio of $1+\epsilon$ and a migration factor of $2^{\mathcal{O}((1/\epsilon)\log^{2}(1/\epsilon))}$. This result was improved by Jansen and Klein \cite{jansen2013binpacking} who achieved polynomial migration. Their algorithm uses a migration factor of $\mathcal{O}(\nicefrac{1}{\epsilon}^{4})$ to achieve an asymptotic competitive ratio of $1+\epsilon$. Concerning lower bounds on the migration factor, Epstein and Levin \cite{epstein2006robust} showed that no optimal solution can be maintained while having a constant migration factor (independent of $\nicefrac{1}{\epsilon}$). Furthermore, Balogh, B{\'e}k{\'e}si, Galambos and Reinelt \cite{balogh2008lower} proved that a lower bound on the asymptotic competitive ratio of $1{.}3877$ holds, if the amount of repacking is measured by the number of items and one is only allowed to repack a \emph{constant number of items}. \subsubsection*{Dynamic Bin Packing} An extension to the classical online bin packing\xspace model was given by Coffman, Garey and Johnson \cite{coffman1983}, called the \emph{dynamic bin packing} model. In addition to the insertion of items, \emph{items also depart} over time. \emph{No repacking is allowed} in this model. It is easily seen that no algorithm can achieve a constant asymptotic competitive ratio in this setting. In order to measure the performance of an online algorithm $A$ in this case, they compared the \emph{maximum number of bins used by $A$} with the \emph{maximum number of bins used by an optimal offline algorithm}, i.\,e., an algorithm $A$ in this dynamic model is called an \emph{asymptotic $\alpha$-competitive algorithm}, if there is a function $f\in o(\text{max-\textsc{opt}})$, where $\text{max-\textsc{opt}}(I)=\max_{t} \operatorname{\text{\textsc{opt}}}(I(t))$ such that $\max_{t} A(I(t))\leq \alpha\cdot \max_{t}\operatorname{\text{\textsc{opt}}}(I(t))+f(I)$ for all instances $I$. The minimum of all such $\alpha$ is called the \emph{asymptotic competitive ratio of $A$}. Coffman, Garey and Johnson modified the \textsc{FirstFit} algorithm and proved that its asymptotic competitive ratio is at most $2{.}897$. Furthermore, they showed a lower bound of $2{.}5$ on the asymptotic competitive ratio when the performance of the algorithm is compared to a repacking optimal offline algorith, i.\,e., $\max_{t} \operatorname{\text{\textsc{opt}}}(I(t))$. In the case that the performance of the algorithm is compared to an optimal non-repacking offline algorithm, Coffman, Garey and Johnson showed a lower bound of $2{.}388$. This lower bound on the non-repacking optimum was later improved by Chan, Lam and Wong \cite{chan2008dynamic} to $2{.}428$ and even further in a later work by Chan, Wong and Yung \cite{chan2009dynamic} to $2{.}5$. \subsubsection*{Fully Dynamic Bin Packing} We consider the dynamic bin packing\xspace when repacking of already packed items is allowed. This model was first investigated by Ivkovi\'{c} and Lloyd \cite{ivkovic1998} and is called \emph{fully dynamic bin packing\xspace}. In this model, items arrive and depart in an online fashion and limited repacking is allowed. The quality of an algorithm is measured by the asymptotic competitive ratio as defined in the classical online model (no maximum is taken as in the dynamic bin packing model). Ivkovi\'{c} and Lloyd developed an algorithm that uses amortized $\mathcal{O}(\log n)$ many shifting moves (see definition above) to achieve an asymptotic competitive ratio of $\nicefrac{5}{4}$. \subsubsection*{Related Results on the Migration Factor} Since the introduction of the migration factor, several problems were considered in this model and different robust algorithms for these problems have been developed. Following the terminology of Sanders, Sivadasan and Skutella \cite{sanders2009} we sometimes use the term \emph{(online) approximation ratio} instead of competitive ratio. Hence, we also use the term \ac{aptas} and \ac{afptas} in the context of online algorithms. If the migration factor of an algorithm $A$ only depends upon the approximation ratio $\epsilon$ and not on the size of the instance, we say that \emph{$A$ is an robust algorithm}. In the case of online bin packing, Epstein and Levin \cite{epstein2006robust} developed the first robust \ac{aptas} for the problem using a migration factor of $2^{\mathcal{O}((1/\epsilon^{2}) \log (1/\epsilon))}$. They also proved that there is no online algorithm for this problem that has a constant migration factor and that maintains an optimal solution. The \ac{aptas} by Epstein and Levin was later improved by Jansen and Klein \cite{jansen2013binpacking}, who developed a robust \ac{afptas} for the problem with migration factor $\mathcal{O}(\nicefrac{1}{\epsilon^4})$. In their paper, they developed new \ac{lp}/\ac{ilp} techniques, which we make use of to obtain polynomial migration. It was shown by Epstein and Levin \cite{epsteinu} that their \ac{aptas} for bin packing can be generalized to packing $d$-dimensional cubes into unit cubes. Sanders, Sivadasan and Skutella \cite{sanders2009} developed a robust \ac{ptas} for the scheduling problem on identical machines with a migration factor of $2^{\mathcal{O}((1/\epsilon) \log^2(1/\epsilon))}$. Skutella and Verschae \cite{skutella2010} studied the problem of maximizing the minimum load given $n$ jobs and $m$ identical machines. They also considered a dynamic setting, where jobs may also depart. They showed that there is no robust \ac{ptas} for this machine covering problem with constant migration. The main reason for the nonexistence is due to very small jobs. By using an amortized migration factor, they developed a \ac{ptas} for the problem with amortized migration of $2^{\mathcal{O}((1/\epsilon) \log^2(1/\epsilon))}$. \subsection{Our Contributions} \subsubsection*{Main Result} In this work, we investigate the \emph{fully dynamic bin packing} model. We measure the amount of repacking by the \emph{migration factor}; but our algorithm uses a bounded number of shifting moves as well. Since the work of Ivkovi\'{c} and Lloyd from 1998 \cite{ivkovic1998}, no progress was made on the fully dynamic bin packing\xspace problem concerning the asymptotic competitive ratio of $\nicefrac{5}{4}$. It was also unclear whether the number of shifting moves (respectively migration factor) must depend on the number of packed items $n$. In this paper we give positive answers for both of these concerns. We develop an algorithm that provides at each time step $t$ an approximation guarantee of $(1+\epsilon)\operatorname{\text{\textsc{opt}}}(I(t)) + \mathcal{O}(\nicefrac{1}{\epsilon} \log (\nicefrac{1}{\epsilon}))$. The algorithm uses a migration factor of $\mathcal{O}(\nicefrac{1}{\epsilon^4}\cdot \log(\nicefrac{1}{\epsilon}))$ by repacking at most $\mathcal{O}(\nicefrac{1}{\epsilon^3}\cdot \log(\nicefrac{1}{\epsilon}))$ bins. Hence, the generated solution can be arbitrarily close to the optimum solution, and for every fixed $\epsilon$ the provided migration factor is constant (it does not depend on the number of packed items). The running time is polynomial in $n$ and $\nicefrac{1}{\epsilon}$. In case that no deletions are used, the algorithm has a migration factor of $\mathcal{O}(\nicefrac{1}{\epsilon^3}\cdot \log(\nicefrac{1}{\epsilon}))$, which beats the best known migration factor of $\mathcal{O}(\nicefrac{1}{\epsilon^4})$ by Jansen and Klein \cite{jansen2013binpacking}. Since the number of repacked bins is bounded, so is the number of shifting moves as it requires at most $O(\nicefrac{1}{\epsilon})$ shifting moves to repack a single bin. Furthermore, we prove that there is no asymptotic approximation scheme for the online bin packing\xspace problem with a migration factor of $o(\nicefrac{1}{\epsilon})$ even in the case that no items depart (and even if $\mathcal{P}=\mathcal{NP}$). \subsubsection*{Technical Contributions} We use the following techniques to achieve our results: \begin{compactitem} \item In order to obtain a lower bound on the migration factor in Section \ref{sec:bound}, we construct a series of instances that provably need a migration factor of $\Omega(\nicefrac{1}{\epsilon})$ in order to have an asymptotic approximation ratio of $1+\epsilon$. \item In Section \ref{sec:rounding}, we show how to handle large items in a fully dynamic setting. The fully dynamic setting involves more difficulties in the rounding procedure, in contrast to the setting where large items may not depart, treated in \cite{jansen2013binpacking}. A simple adaption of the dynamic techniques developed in \cite{jansen2013binpacking} does not work (see introduction of Section \ref{sec:rounding}). We modify the offline rounding technique by Karmarkar and Karp \cite{karmarkar1982} such that a feasible rounding structure can be maintained when items are inserted or removed. This way, we can make use of the \acs{lp}-techniques developed in Jansen and Klein \cite{jansen2013binpacking}. \item In Section \ref{sec:small}, we explain how to deal with small items in a dynamic setting. In contrast to the setting where departure of items is not allowed, the fully dynamic setting provides major challenges in the treatment of small items. An approach is thus developed where small items of similar size are packed near each other. We describe how this structure can be maintained as new items arrive or depart. Note that the algorithm of Ivkovi\'{c} and Lloyd \cite{ivkovic1998} relies on the ability to manipulate up to $\Omega(n)$ very small items in constant time. See also their updated work for a thorough discussion of this issue \cite{ivkovic2009fully}. \item In order to unify the different approaches for small and large items, in Section \ref{sec:general}, we develop an advanced structure for the packing. We give novel techniques and ideas to manage this mixed setting of small and large items. The advanced structure makes use of a potential function, which bounds the number of bins that need to be reserved for incoming items. \end{compactitem} \section{Lower Bound} \label{sec:bound} We start by showing that there is no robust (asymptotic) approximation scheme for bin packing\xspace with migration factor of $o(\nicefrac{1}{\epsilon})$, even if $\mathcal{P}=\mathcal{NP}$. This improves the lower bound given by Epstein and Levin \cite{epstein2006robust}, which states that no algorithm for bin packing\xspace, that maintains an optimal solution can have a constant migration factor. Previously it was not clear whether there exists a robust approximation algorithm for bin packing with sublinear migration factor or even a constant migration factor. \begin{theorem} \label{thm:bound} For a fixed migration factor $\gamma > 0$, there is no robust approximation algorithm for bin packing\xspace with asymptotic approximation ratio better than $1 + \frac{1}{6\lceil\gamma\rceil+5}$. \end{theorem} \begin{proof} Let $\mathcal{A}$ be an approximation algorithm with migration factor $\gamma > 0$ and $c=\lceil \gamma\rceil$. We will now construct an instance such that the asymptotic approximation ratio of $\mathcal{A}$ with migration factor $c$ is at least $1+ \frac{1}{6c+5}$. The instance contains only two types of items: An $A$-item has size $a=\frac{\nicefrac{3}{2}}{3c+2}$ and an $B$-item has size $b=\nicefrac{1}{2}-\nicefrac{a}{3}$. For a $M\in \mathbb{N}$, let \begin{align*} I_M=[\underbrace{(b,\text{Insert}),(b,\text{Insert}),\ldots,(b,\text{Insert})}_{2M},\underbrace{(a,\text{Insert}),(a,\text{Insert}),\ldots,(a,\text{Insert})}_{2M(c+1)}] \end{align*} be the instance consisting of $2M$ insertions of $B$-items, followed by $2M(c+1)$ insertions of $A$-items. Denote by $r(t)$ the approximation ratio of the algorithm at time $t\in \mathbb{N}$. The approximation ratio of the algorithm is thus $r=\max_{t}\{r(t)\}$. The insertion of the $B$-items produces a packing with $\beta_1$ bins containing a single $B$-item and $\beta_2$ bins containing two $B$-items. These are the only possible packings and hence $\beta_1+2 \beta_2=2M$. The optimal solution is reached if $\beta_1=0,\beta_2=M$. We thus have an approximation ratio of \begin{align*} r(2M)=:r_1=\frac{\beta_1+\beta_2}{M}=\frac{2M-\beta_2}{M}, \end{align*} which is strictly monotonically decreasing in $\beta_2$. The $A$-items, which are inserted afterwards, may either be put into bins which only contain $A$-items or into bins which contain only one $B$-item. The choice of $a,b$ implies $2\cdot b+a>1$ which shows that no $A$-item can be put into a bin containing two $B$-items. Denote by $\alpha$ the number of bins containing only $A$-items. The existing $B$-items may not be moved as the choice of $a,b$ implies $b>c\cdot a>\gamma\cdot a$. At most $\frac{\nicefrac{1}{2}+\nicefrac{a}{3}}{a}=c+1$ items of type $A$ may be put into the bins containing only one $B$-item. Note that this also implies that a bin which contains one $B$-item and $c+1$ items of type $A$ is filled completely. The optimal packing thus consists of $2M$ of those bins and the approximation ratio of the solution is given by \begin{align*} r(2M(c+2))=:r_2=\frac{\beta_1+\beta_2+\alpha}{2M}=\frac{2M-2\beta_2+\beta_2+\alpha}{2M}=\frac{2M-\beta_2+\alpha}{2M}. \end{align*} There are at most $\beta_1\cdot (c+1)$ items of type $A$ which can be put into bins containing only one $B$-item. The remaining $(2M-\beta_1)(c+1)$ items of type $A$ therefore need to be put into bins containing only $A$-items. We can thus conclude $\alpha\geq (2M-\beta_1)(c+1) a=(2M-2M+2\beta_2)(c+1)a=2\beta_2(c+1)a$. As noted above, $\frac{\nicefrac{1}{2}+\nicefrac{a}{3}}{a}=c+1$ and thus $(c+1)a=\nicefrac{1}{2}+\nicefrac{a}{3}$. Hence the approximation ratio is at least \begin{align*} &r_2=\frac{\beta_1+\beta_2+\alpha}{2M}\geq \frac{2M-\beta_2+2\beta_2(\nicefrac{1}{2}+\nicefrac{a}{3})}{2M}=\\ &\frac{2M+\beta_2(-1+1+\nicefrac{2a}{3})}{2M}=\frac{2M+\beta_2\cdot \nicefrac{2a}{3}}{2M}, \end{align*} which is strictly monotonically increasing in $\beta_2$. As $r\geq \max\{r_1,r_2\}$, a lower bound on the approximation ratio is thus given if $r_1=r_2$ by $\frac{2M-\beta}{M}=\frac{2M+\beta\cdot \nicefrac{2a}{3}}{2M}$ for a certain $\beta$. Solving this equation leads to $\beta=\frac{M}{\nicefrac{a}{3}+1}$. The lower bound is thus given as \begin{align*} r\geq \frac{2M-\beta}{M}=2-\frac{1}{\nicefrac{a}{3}+1}=1+ \frac{1}{6c+5} \end{align*} by the choice of $a$. Note that this lower bound is independent from $M$. Hence, $r$ is also a lower bound on the asymptotic approximation ratio of any algorithm as the instance size grows with $M$. \end{proof} We obtain the following corollary: \begin{corollary} There is no robust/dynamic (asymptotic) approximation scheme for bin packing\xspace with a migration factor $\gamma \leq \nicefrac{1}{6}(\nicefrac{1}{\epsilon}-11) = \Theta(\nicefrac{1}{\epsilon})$. \end{corollary} \section{Dynamic Rounding} \label{sec:rounding} The goal of this section is to give a robust \ac{afptas} for the case that only large items arrive and depart. In the first subsection we present a general rounding structure. In the second subsection we give operations on how the rounding can be modified such that the general structure is preserved. We give the final algorithm in Section \ref{sec:dynamicbinpacking}, which is performed, when large items arrive or depart. Finally, the correctness is proved by using the \ac{lp}/\ac{ilp} techniques developed in \cite{jansen2013binpacking}. In \cite{jansen2013binpacking}, the last two authors developed a dynamic rounding technique based on an offline rounding technique from Fernandez de la Vega and Lueker \cite{de1981bin}. However, a simple adaption of these techniques does not work in the dynamic case where items may also depart. In the case of the offline rounding by Fernandez de la Vega and Lueker, items are sorted and then collected in groups of the same cardinality. As a new item arrives in an online fashion, this structure can be maintained by inserting the new item to its corresponding group. By shifting the largest item of each group to the left, the cardinality of each group (except for the first one) can be maintained. However, shifting items to the right whenever an item departs leads to difficulties in the \ac{lp}/\ac{ilp} techniques. As the rounding for a group may increase, patterns of the existing \ac{lp}/\ac{ilp} solution might become infeasible. We overcome these difficulties by developing a new dynamic rounding structure and operations based on the offline rounding technique by Karmarkar and Karp \cite{karmarkar1982}. We felt that the dynamic rounding technique based on Karmarkar and Karp is easier to analyze since the structure can essentially be maintained by shifting items. A bin packing\xspace instance consists of a set of \emph{items} $I=\{i_{1},i_{2},\ldots,i_{n}\}$ with \emph{size function} $s:I\to [0,1]\cap \mathbb{Q}$. A feasible solution is a partition $B^{1},\ldots,B^{k}$ of $I$ such that $\sum_{i\in B^{j}}s(i)\leq 1$ for $j=1,\ldots,k$. We call a partition $B^{1},\ldots,B^{k}$ a \emph{packing} and a single set $B^{j}$ is called a \emph{bin}. The goal is to find a solution with a minimal number of bins. If the item $i$ is packed into the bin $B^{j}$, we write $B(i)=j$. The smallest value of $k\in \mathbb{N}$ such that a packing with $k$ bins exists is denoted by $\operatorname{\text{\textsc{opt}}}(I,s)$ or if the size function is clear by $\operatorname{\text{\textsc{opt}}}(I)$. A trivial lower bound is given by the value $\operatorname{\text{\textsc{size}}}(I,s)=\sum_{i\in I}s(i)$. \subsection{Rounding} \label{subsec:rounding} To obtain an \ac{lp} formulation of fixed (independent of $|I|$) dimension, we use a rounding technique based on the offline \ac{afptas} by Karmarkar and Karp \cite{karmarkar1982}. In order to use the technique for our dynamic setting, we give a more general rounding. This generalized rounding has a certain structure that is maintained throughout the algorithm and guarantees an approximate solution for the original instance. First, we divide the set of items into \emph{small} ones and \emph{large} ones. An item $i$ is called \emph{small} if $s(i) <\nicefrac{\epsilon}{14}$, otherwise it is called \emph{large}. Instance $I$ is partitioned accordingly into a set of large items $I_{L}$ and a set of small items $I_{S}$. We treat small items and large items differently. Small items can be packed using an algorithm presented in Section \ref{sec:smallitems} while large items will be assigned using an \ac{ilp}. In this section we discuss how to handle large items. First, we characterize the set of large items more precisely by their sizes. We say that two large items $i,i'$ are in the same size category if there is a $\ell\in \mathbb{N}$ such that $s(i)\in (2^{-(\ell+1)},2^{-\ell}]$ and $s(i')\in (2^{-(\ell+1)},2^{-\ell}]$. Denote the set of all size categories by $W$. As every large item has size at least $\nicefrac{\epsilon}{14}$, the number of size categories is bounded by $\log(\nicefrac{1}{\epsilon})+5$. Next, items of the same size category are characterized by their \emph{block}, which is either $A$ or $B$ and their \emph{position} $r\in \mathbb{N}$ in this block. Therefore, we partition the set of large items into a set of groups $G \subseteq W \times \{A,B\}\times \mathbb{N}$. A group $g \in G$ consists of a triple $(\ell,X,r)$ with size category $\ell \in W$, block $X \in \{A,B \}$ and position $r \in \mathbb{N}$. The \emph{rounding function} is defined as a function $R: I_L \mapsto G$ that maps each large item $i\in I_L$ to a \emph{group} $g\in G$. By $g[R]$ we denote the set of items being mapped to the group $g$, i.\,e.,\ $g[R]=\mengest{i\in I_L}{R(i)=g}$. Let $q(\ell,X)$ be the maximal $r\in \mathbb{N}$ such that $|(\ell,X,r)[R]|> 0$. If $(\ell,X_1,r_1)$ and $(\ell,X_2,r_2)$ are two different groups, we say that $(\ell,X_1,r_1)$ is \emph{left} of $(\ell,X_2,r_1)$, if $X_1=A$ and $X_2=B$ or $X_1=X_2$ and $r_1<r_2$. We say that $(\ell,X_1,r_1)$ is \emph{right} of $(\ell,X_2,r_2)$ if it is not left of it. \begin{figure} \caption{Grouping in $(\ell,A,\cdot)$ and $(\ell,B,\cdot)$} \end{figure} Given an instance $(I,s)$ and a rounding function $R$, we define the rounded size function $s^R$ by rounding the size of every large item $i \in g[R]$ up to the size of the largest item in its group, hence $s^R(i)=\max\mengest{s(i')}{R(i')=R(i)}$. We denote by $\operatorname{\text{\textsc{opt}}}(I,s^R)$ the value of an optimal solution of the rounded instance $(I,s^R)$. Depending on a parameter $k$, we define the following properties for a rounding function~$R$. \begin{compactdesc} \item[(a)\label{prop:a}] For each $i\in (\ell,X,r)[R]$ we have $2^{-(\ell+1)}<s(i)\leq 2^{-\ell}$. \item[(b)\label{prop:b}] For each $i\in (\ell,X,r)[R]$ and each $i'\in (\ell,X,r')[R]$ and $r<r'$, we have $s(i)\geq s(i')$. \item[(c)\label{prop:c}] For each $\ell\in W$ and $1 \leq r\leq q(\ell,A) $ we have $|(\ell,A,r)[R]| = 2^{\ell} k$ and $|(\ell,A,0)[R]|\leq 2^{\ell}k$. \item[(d)\label{prop:d}] For each $\ell\in W$ and each $0 \leq r\leq q(\ell,B)-1 $ we have $|(\ell,B,r)[R]| = 2^{\ell} (k-1)$ and furthermore $|(\ell,B,q(\ell,B))[R]|\leq 2^{\ell}(k-1)$. \end{compactdesc} Property \nameref{prop:a} guarantees that the items are categorized correctly according to their sizes. Property \nameref{prop:b} guarantees that items of the same size category are sorted by their size and properties \nameref{prop:c} and \nameref{prop:d} define the number of items in each group. \begin{lemma}\label{lem1} For $k = \left\lfloor \frac{\operatorname{\text{\textsc{size}}}(I_{L})\cdot \epsilon}{2(\lfloor \log(\nicefrac{1}{\epsilon})\rfloor +5)}\right\rfloor$ the number of non-empty groups in $G$ is bounded from above by $\mathcal{O}(\nicefrac{1}{\epsilon}\log(\nicefrac{1}{\epsilon}))$ assuming that $\operatorname{\text{\textsc{size}}}(I_L) > \nicefrac{8}{\epsilon}\cdot (\lceil \log (\nicefrac{1}{\epsilon})\rceil+5)$. \end{lemma} \begin{proof} Using the definition of $k$ and the assumption, we show $\frac{2\operatorname{\text{\textsc{size}}}(I_{L})}{k-1}\leq \nicefrac{8}{\epsilon}(\lfloor \log(\nicefrac{1}{\epsilon})\rfloor +5)$. We have \begin{align*} &\frac{2\operatorname{\text{\textsc{size}}}(I_L)}{k-1} = \frac{2\operatorname{\text{\textsc{size}}}(I_L)}{\left\lfloor \frac{\operatorname{\text{\textsc{size}}}(I_{L})\cdot \epsilon}{2(\lfloor \log(\nicefrac{1}{\epsilon})\rfloor +5)}\right\rfloor-1}\leq \frac{2\operatorname{\text{\textsc{size}}}(I_L)}{\frac{\operatorname{\text{\textsc{size}}}(I_L)\cdot \epsilon}{2(\lfloor \log( \nicefrac{1}{\epsilon})\rfloor +5)}-2}=\\ &\frac{2\operatorname{\text{\textsc{size}}}(I_L)}{\frac{\operatorname{\text{\textsc{size}}}(I_L)\cdot \epsilon- 4(\lfloor\log (\nicefrac{1}{\epsilon})\rfloor +5)}{2(\lfloor\log( \nicefrac{1}{\epsilon})\rfloor +5)}}= \frac{2\operatorname{\text{\textsc{size}}}(I_L)\cdot 2(\lfloor \log(\nicefrac{1}{\epsilon})\rfloor +5)}{\operatorname{\text{\textsc{size}}}(I_L)\cdot \epsilon - 4(\lfloor\log(\nicefrac{1}{\epsilon})\rfloor +5)} \end{align*} As $\operatorname{\text{\textsc{size}}}(I_L)> \nicefrac{8}{\epsilon}\cdot (\lceil \log (\nicefrac{1}{\epsilon})\rceil+5)$, we have $\nicefrac{\epsilon}{2}\operatorname{\text{\textsc{size}}}(I_L) > 4(\lfloor \log(\nicefrac{1}{\epsilon})\rfloor +5)$. We can thus bound: \begin{align*} &\frac{2\operatorname{\text{\textsc{size}}}(I_L)\cdot 2(\lfloor \log(\nicefrac{1}{\epsilon})\rfloor +5)}{\operatorname{\text{\textsc{size}}}(I_L)\cdot \epsilon - 4(\lfloor\log(\nicefrac{1}{\epsilon})\rfloor +5)}\leq \frac{2\operatorname{\text{\textsc{size}}}(I_L)\cdot 2(\lfloor \log(\nicefrac{1}{\epsilon})\rfloor +5)}{\operatorname{\text{\textsc{size}}}(I_L)\cdot \epsilon - \nicefrac{\epsilon}{2}\operatorname{\text{\textsc{size}}}(I_L) +1}=\\ &\frac{2\operatorname{\text{\textsc{size}}}(I_L)\cdot 2(\lfloor \log(\nicefrac{1}{\epsilon})\rfloor +5)}{\operatorname{\text{\textsc{size}}}(I_L)\cdot \nicefrac{\epsilon}{2}}= \frac{4(\lfloor \log(\nicefrac{1}{\epsilon})\rfloor +5)}{\nicefrac{\epsilon}{2}}= \frac{8(\lfloor \log(\nicefrac{1}{\epsilon})\rfloor +5)}{\epsilon} \end{align*} Note that property \nameref{prop:c} and property \nameref{prop:d} imply $|I(\ell)|\geq (q(\ell,A)+q(\ell,B)-2)2^{\ell}(k-1)$ . Hence property \nameref{prop:a} implies that $\operatorname{\text{\textsc{size}}}(I(\ell),s)\geq |I(\ell)|2^{-\ell+1}\geq (q(\ell,A)+q(\ell,B)-2)(k-1)/2$ and therefore $q(\ell,A)+q(\ell,B)\leq 2\operatorname{\text{\textsc{size}}}(I(\ell))/(k-1) +2$. We can now bound the total number of used groups by \begin{align*} &\sum_{\ell\in W}q(\ell,A)+q(\ell,B)\leq \sum_{\ell\in W} \left (\frac{2\operatorname{\text{\textsc{size}}}(I(\ell))}{k-1}+2\right )\\ &=2|W|+\frac{2}{k-1}\sum_{\ell\in W}\operatorname{\text{\textsc{size}}}(I(\ell))= 2|W|+\frac{2}{k-1}\operatorname{\text{\textsc{size}}}(I_{L})\\ &\leq 2|W|+\frac{8}{\epsilon}(\lfloor \log(\nicefrac{1}{\epsilon})\rfloor +5) \leq\\ & 2\cdot (\log(\nicefrac{1}{\epsilon})+5)+\frac{8}{\epsilon}(\log (\nicefrac{1}{\epsilon})+5)=\\ &(\nicefrac{8}{\epsilon}+2)(\log (\nicefrac{1}{\epsilon})+5)\in \mathcal{O}(\nicefrac{1}{\epsilon}\log(\nicefrac{1}{\epsilon})) \end{align*} The total number of used groups is therefore bounded by $\mathcal{O}(\nicefrac{1}{\epsilon}\log(\nicefrac{1}{\epsilon}))$. \end{proof} The following lemma shows that the rounding function does in fact yield a $(1+\epsilon)$-approximation. \begin{lemma}\label{lem2} Given an instance $(I,s)$ with items greater than $\epsilon/14$ and a rounding function~$R$ fulfilling properties \nameref{prop:a} to \nameref{prop:d}, then $\operatorname{\text{\textsc{opt}}}(I,s^R) \leq (1+\epsilon)\mathit{OPT}(I,s)$. \end{lemma} \begin{proof} As $(I,s)$ only contains large items, $I_L=I$. Define for every $\ell$ the instances $J_{\ell}=\bigcup_{r=2}^{q(\ell,A)}(\ell,A,r)[R]\cup \bigcup_{r=0}^{q(\ell,B)}(\ell,B,r)[R]$, $J=\bigcup_{\ell\in W}J_{\ell}$ and $K=\bigcup_{\ell\in W}(\ell,A,0)[R]\cup (\ell,A,1)[R]$. We will now prove, that the error generated by this rounding is bounded by $\epsilon$. As each solution to $J\cup K$ yields a solution to $J$ and a solution to $K$, we get $\operatorname{\text{\textsc{opt}}}(J\cup K,s^R)\leq \operatorname{\text{\textsc{opt}}}(J,s^{R})+\operatorname{\text{\textsc{opt}}}(K,s^{R})$. For $i\in (\ell,A,0)[R]\cup (\ell,A,1)[R]$, we have $s(i)\leq \max\mengest{s(i')}{i'\in (\ell,A,0)[R]}\leq 2^{-\ell}$ because of property \nameref{prop:a}. We can therefore pack at least $2^{\ell}$ items from $(\ell,A,0)[R]\cup (\ell,A,1)[R]$ into a single bin. Hence, we get with property \nameref{prop:c}: \begin{align*} &\operatorname{\text{\textsc{opt}}}((\ell,A,0)[R]\cup (\ell,A,1)[R]),s^{R})\\ &\leq (|(\ell,A,0)[R]|+|(\ell,A,1)[R]|)\cdot 2^{-\ell}\\ &=2k \end{align*} We can therefore bound $\operatorname{\text{\textsc{opt}}}(K,s^{R})$ as follows: \begin{align*} \operatorname{\text{\textsc{opt}}}(K,s^{R})&\leq \sum_{\ell\in W}\operatorname{\text{\textsc{opt}}}((\ell,A,0)[R]\cup (\ell,A,1)[R]),s^{R})\\ &\leq \sum_{\ell\in W}2k\\ &\leq 2(\lfloor\log(\nicefrac{1}{\epsilon})\rfloor+5)k\\ &=2\lfloor\frac{\operatorname{\text{\textsc{size}}}(I)\epsilon}{2(\lfloor\log(\nicefrac{1}{\epsilon})\rfloor+5)}\rfloor\cdot (\lfloor\log(\nicefrac{1}{\epsilon})\rfloor+5)\\ &\leq 2\frac{\operatorname{\text{\textsc{size}}}(I)\epsilon}{2(\lfloor\log(\nicefrac{1}{\epsilon})\rfloor+5)}\cdot (\lfloor\log(\nicefrac{1}{\epsilon})\rfloor+5)\\ &=\epsilon \operatorname{\text{\textsc{size}}}(I)\\ &\leq \epsilon \operatorname{\text{\textsc{opt}}}(I,s)\ \end{align*} Using property \nameref{prop:b} for each item in $((\ell,X,r+1)[R]),s^R)$ we find a unique larger item in $(\ell,X,r)[R]$. Therefore we have for every item in the rounded instance $(J,s^R)$ an item with larger size in instance $(I,s)$ and hence \begin{align*} \operatorname{\text{\textsc{opt}}}(J,s^{R})\leq \operatorname{\text{\textsc{opt}}}(I,s). \end{align*} The optimal value of the rounded solution can be bounded by \begin{align*} \operatorname{\text{\textsc{opt}}}(I,s^{R})\leq \operatorname{\text{\textsc{opt}}}(J,s^{R})+\operatorname{\text{\textsc{opt}}}(K,s^{R})\leq (1+\epsilon)\operatorname{\text{\textsc{opt}}}(I,s). \end{align*} \end{proof} We therefore have a rounding function, which generates only $\mathcal{O}(\nicefrac{1}{\epsilon}\log(\nicefrac{1}{\epsilon}))$ different item sizes and the generated error is bounded by $\epsilon$. \subsection{Rounding Operations} \label{subsec:operations} Let us consider the case where large items arrive and depart in an online fashion. Formally this is described by a sequence of pairs $(i_{1},A_{1}),\ldots,(i_{n},A_{n})$ where $A_{i}\in \{\operatorname{Insert},\operatorname{Delete}\}$. At each time $t\in \{1,\ldots,n\}$ we need to pack the item $i_{t}$ into the corresponding packing of $i_{1},\ldots,i_{t-1}$ if $A_{i}=\operatorname{Insert}$ or remove the item $i_{t}$ from the corresponding packing of $i_{1},\ldots,i_{t-1}$ if $A_{i}=\operatorname{Delete}$. We will denote the instance $i_{1},\ldots,i_{t}$ at time $t$ by $I(t)$ and the corresponding packing by $B_t$. We will also round our items and denote the rounding function at time $t$ by $R_t$. The large items of $I(t)$ are denoted by $I_{L}(t)$. At time $t$ we are allowed to repack several items with a total size of $\beta\cdot s(i_{t})$ but we intend to keep the migration factor $\beta$ as small as possible. The term $\operatorname{repack}(t)=\sum_{i, B_{t-1}(i)\neq B_t(i)}s(i)$ denotes the sum of the items which are moved at time $t$, the \emph{migration factor} $\beta$ of an algorithm is then defined as $\max_t\menge{\nicefrac{\operatorname{repack}(t)}{s(i_t)}}$. As the value of $\operatorname{\text{\textsc{size}}}$ will also change over the time, we define the value $\kappa(t)$ as \begin{align*} \kappa(t)=\frac{\operatorname{\text{\textsc{size}}}(I_{L}(t))\cdot \epsilon}{2(\lfloor \log (\nicefrac{1}{\epsilon})\rfloor+5)}. \end{align*} As shown in Lemma \ref{lem1}, we will make use of the value $k(t):= \lfloor \kappa(t)\rfloor$. We present operations that modify the current rounding $R_t$ and packing $B_t$ with its corresponding \ac{lp}/\ac{ilp} solutions to give a solution for the new instance $I(t+1)$. At every time $t$ the rounding $R_t$ maintains properties $\nameref{prop:a}$ to $\nameref{prop:d}$. Therefore the rounding provides an asymptotic approximation ratio of $1+\epsilon$ (Lemma \ref{lem2}) while maintaining only $\mathcal{O}(\nicefrac{1}{\epsilon}\log(\nicefrac{1}{\epsilon}))$ many groups (Lemma \ref{lem1}). We will now present a way how to adapt this rounding to a dynamic setting, where items arrive or depart online. Our rounding $R_{t}$ is manipulated by different \emph{operations}, called the \emph{insert, delete, shiftA} and \emph{shiftB} operation. Some ideas behind the operations are inspired by Epstein and Levin \cite{epstein2006robust}. The insert operation is performed whenever a large item arrives and the delete operation is performed whenever a large item departs. The shiftA/shiftB operations are used to modify the number of groups that are contained in the $A$ and $B$ block. As we often need to filter the largest items of a group $g$ belonging to a rounding $R$, we denote this item by $\lambda(g,R)$. \begin{itemize} \item shift: A shift operation takes two groups $(\ell,X_1,r_1)$ and $(\ell,X_2,r_2)$, where $(\ell,X_1,r_1)$ is left of $(\ell,X_2,r_2)$, and a rounding function $R$ and produces a new rounding function $R'$ and packing $B'$ by shifting the largest item from $(\ell,X_2,r_2)$ to $(\ell,X_2,r_2-1)$ and so on until $(\ell,X_1,r_1)$ is reached. \begin{itemize} \item For all groups $g$ left of $(\ell,X_1,r_1)$ or right of $(\ell,X_2,r_2)$ set $g[R']=g[R]$. \item As we move an items out of $(\ell,X_2,r_2)$, set \begin{align*} (\ell,X_2,r_2)[R']=(\ell,X_2,r_2)[R]\setminus \lambda((\ell,X_2,r_2),R). \end{align*} \item As we move an item into $(\ell,X_1,r_1)$, set \begin{align*} (\ell,X_1,r_1)[R']=(\ell,X_1,r_1)[R]\cup \lambda(\operatorname{\text{\textsc{right}}}(\ell,X_1,r_1),R). \end{align*} \end{itemize} Whenever a shift-operation on $(\ell,X_{1},r_{1})$ and $(\ell,X_{2},r_{2})$ is performed, the \ac{lp} solution $x$ and the corresponding \ac{ilp} solution $y$ is updated to $x'$ and $y'$. Let $C_i$ be a configuration containing $\lambda((\ell,X_2,r_2),R)$ with $x_i\geq 1$. Let $C_j = C_i \setminus s(\lambda((\ell,X_2,r_2),R))$ be the configuration without $\lambda((\ell,X_2,r_2),R)$. Set $x'_j = x_j +1$, $y'_j = y_j +1$ and $x'_i = x_i -1$, $y'_i = y_i -1$. In order to add the new item in $(\ell,X_1,r_1)$, set $x_h' = x_h +1$ and $y'_h = y_h +1$ for the index $h$ with $C_h = \{1:s(\lambda((\ell,X_1,r_1),R)) \}$. The remaining configurations do not change. \begin{figure} \caption{shift with parameters $(\ell,X_1,r_1)$ and $(\ell,X_2,r_2)$} \end{figure} \item Insert: To insert item $i_t$, find the corresponding group $(\ell,X,r)$ with \begin{compactitem} \item $s(i_t)\in [\ell,2\ell)$, \item $\min\mengest{s(i)}{i\in (\ell,X,r-1)}> s(i_t)$ and \item $s(\lambda((\ell,X,r+1),R))\leq s(i_t)$. \end{compactitem} We will then insert $i_t$ into $(\ell,X,r)$ and get the rounding $R'$ by shifting the largest element of $(\ell,X,r)$ to $(\ell,X,r-1)$ and the largest item of $(\ell,X,r-1)$ to $(\ell,X,r-2)$ and so on until $(\ell,A,0)$ is reached. Formally, set $R^*(i_{t})=(\ell,X,r)$ and $R^*(i_j)=R(i_j)$ for $j\neq t$. The rounding function $R'$ is then obtained by applying the shift operation on $R^*$ i.e. the new rounding is $R'=\operatorname{shift}((\ell,A,0),(\ell,X,r),R^*)$. In order to pack the new item, let $i$ be the index with $C_i=\{1:s(\lambda((\ell,X,r),R'))\}$, as $i_t$ is rounded to the largest size in $(\ell,X,r)[R]$ after the shift. Place item $i_t$ into a new bin by setting $B'(i_t)=\max_j B(i_j)+1$ and $x_i' = x_i +1$ and $y_i'=y_i +1$. If $|(\ell, A ,0)[R']|=2^{\ell}\cdot k+1$, we have to create a new rounding group $(\ell, A ,-1)$. Additionally we shift the largest item in $(\ell,A,0)[R']$ to the new group $(\ell,A,-1)[R']$. The final rounding $R''$ is then obtained by setting $(\ell,A,r)[R'']=(\ell,A,r-1)[R']$ i.e. incrementing the number of each rounding group by $1$. Note that the largest item in $(\ell,A,0)[R']$ is already packed into a bin of its own due to the shift operation. Hence, no change in the packing or the \ac{lp}/\ac{ilp} solution is needed. The insert operation thus yields a new packing $B'$ (or $B''$) which uses two more bins than the packing $B$. \begin{figure} \caption{Insert $i$ into $(\ell,X,\cdot)$} \end{figure} \item Delete: To delete item $i_t$ from the group $(\ell,X,r)$ with $R(i_t)=(\ell,X,r)$, we remove $i_t$ from this group and move the largest item from $(\ell,X,r+1)$ into $(\ell,X,r)$ and the largest item from $(\ell,X,r+2)$ into $(\ell,X,r+1)$ and so on until $(\ell,B,q(\ell,B))$. Formally the rounding $R'$ is described by the expression $\operatorname{shift}((\ell,X,r),(\ell,B,q(\ell,B)),R^*)$ where \begin{align*} g[R^*]= \begin{cases} (\ell,X,r)[R]\setminus \{i_t\} & g=(\ell,X,r)\\ g[R] & \text{else} \end{cases}. \end{align*} As a single shift operation is used, the delete operation yields a new packing $B'$ which uses one more bin than the packing $B$. For the \ac{lp}/ \ac{ilp} solution let $C_i$ be a configuration containing $\lambda((\ell,B,q(\ell,B)),R)$ with $x_i\geq 1$. Let $C_j = C_i \ s(\lambda((\ell,B,q(\ell,B)),R))$ be the configuration without the item $\lambda((\ell,B,q(\ell,B)),R)$. Set $x'_j = x_j +1$, $y'_j = y_j +1$ and $x'_i = x_i -1$, $y'_i = y_i -1$. Set $B'(i_j) = B(i_j)$ for all $j \neq t$ in order to remove the item $i_t$ from the packing. \begin{figure} \caption{Delete $i$ from $(\ell,X,\cdot)$} \end{figure} \end{itemize} To control the number of groups in $A$ and $B$ we introduce operations shiftA and shiftB that increase or decrease the number of groups in $A$ respectively $B$. An operation shiftA increases the number of groups in $A$ by $1$ and decreases the number of groups in $B$ by $1$. Operations shiftB is doing the inverse of shiftA. \begin{itemize} \item shiftA: In order to move a group from $B$ to $A$ we will perform exactly $2^\ell$ times the operation $\operatorname{shift}((\ell,B,0),(\ell,B,q(\ell,B)),R)$ to receive the rounding $R^*$. Instead of opening a new bin for each of those $2^\ell$ items in every shift operation, we rather open one bin containing all items. Since every item in the corresponding size category has size $\leq 2^{-\ell}$, the items fit into a single bin. The group $(\ell,B,0)$ has now the same size as the groups in $(\ell,A,\cdot)$. We transfer $(\ell,B,0)$ to block $A$. Hence we define for the final rounding $R'$ that $(\ell,A,r)[R']=(\ell,A,r)[R^*]$ for $r=0,\ldots,q(\ell,A)$ and $(\ell,A,q(\ell,A)+1)[R']=(\ell,B,0)[R^*]$ as well as $(\ell,B,r)[R']=(\ell,B,r+1)[R^*]$ for $r=0,\ldots,q(\ell,B)-1$. The resulting packing $B'$ hence uses one more bin than the packing $B$. \begin{figure} \caption{shiftA} \end{figure} \item shiftB: In order to move a group from $A$ to $B$ we will perform exactly $2^\ell$ times the operation $\operatorname{shift}((\ell,A,0),(\ell,A,q(\ell,A)),R)$ to receive the rounding $R^*$. As before in shiftA, we open a single bin containing all of the $2^\ell$ items. The group $(\ell,A,q(\ell,A))$ has now the same size as the groups in $(\ell,B,\cdot)$. We transfer $(\ell,A,q(\ell,A))$ to block $B$. Similar to shiftA we define for the final rounding $R'$ that $(\ell,A,r)[R']=(\ell,A,r)[R^*]$ for $r=0,\ldots,q(\ell,A)-1$ and $(\ell,B,0)[R']=(\ell,A,q(\ell,A))[R^*]$ as well as $(\ell,B,r+1)[R']=(\ell,B,r)[R^*]$. The resulting packing $B'$ hence uses one more bin than the packing $B$. \end{itemize} \begin{lemma} \label{lem4} Let $R$ be a rounding function fulfilling properties $\nameref{prop:a}$ to $\nameref{prop:d}$. Applying one of the operations insert, delete, shiftA or shiftB on $R$ results in a rounding function $R'$ fulfilling properties $\nameref{prop:a}$ to $\nameref{prop:d}$. \end{lemma} \begin{proof} Property $\nameref{prop:a}$ is always fulfilled as no item is moved between different size categories and the insert operation inserts an item into its appropriate size category. As the order of items never changes and the insert operation inserts an item into the appropriate place, property $\nameref{prop:b}$ also holds. For properties $\nameref{prop:c}$ and $\nameref{prop:d}$ we first note that the operation $\operatorname{shift}(g,g',R)$ increases the number of items in $g$ by $1$ and decreases the number of items in $g'$ by $1$. The insert operation consists of adding a new item to a group $g$ followed by a $\operatorname{shift}((\ell,A,0),g,R)$ operation. Hence the number of items in every group except for $(\ell,A,0)$ (which is increased by $1$) remains the same. The delete operation consists of removing an item from a group $g$ followed by a $\operatorname{shift}(g,(\ell,B,q(\ell,B)),R)$ operation. Therefore the number of items in all groups except for $(\ell,B,q(\ell,B))$ (which is decreased by $1$) remains the same. As the number of items in $(\ell,A,0)$ and $(\ell,B,q(\ell,B))$ are treated seperately and may be smaller than $2^{\ell}\cdot k$ respectively $2^{\ell}\cdot (k-1)$, the properties $\nameref{prop:c}$ and $\nameref{prop:d}$ are always fulfilled for the insert and the delete operation. Concerning the shiftA operation we increase the number of items in a group $(\ell,B,0)$ by $2^\ell$. Therefore it now contains $2^{\ell}(k-1)+2^\ell= 2^{\ell}\cdot k$ items, which equals the number of items in groups of block $A$. As this group is now moved to block $A$, the properties $\nameref{prop:c}$ and $\nameref{prop:d}$ are fulfilled. Symmetrically the shiftB operation decreases the number of items in a group $(\ell,A,q(\ell,A))$ by $2^{\ell}$. Therefore the number of items in the group is now $2^{\ell}\cdot k - 2^\ell=2^{\ell}\cdot (k-1)$, which equals the number of items in the groups of block $B$. As this group is now moved to block $B$, the properties $\nameref{prop:c}$ and $\nameref{prop:d}$ are fulfilled. \end{proof} According to Lemma \ref{lem1} the rounded instance $(I,s^R)$ has $\mathcal{O}(\nicefrac{1}{\epsilon}\log (\nicefrac{1}{\epsilon}))$ different item sizes (given a suitable $k$). Using the \ac{lp} formulation of Eisemann \cite{eisemann1957trim}, the resulting \ac{lp} called $LP(I,s^R)$ has $m = \mathcal{O}(\nicefrac{1}{\epsilon}\log(\nicefrac{1}{\epsilon}))$ constraints. We say a packing $B$ \emph{corresponds} to a rounding $R$ and an integral solution $y$ of the \ac{ilp} if all items in $(I,s^R)$ are packed by $B$ according to $y$. \begin{lemma} \label{lem5} Applying any of the operations insert, delete, shiftA or shiftB on a rounding function $R$ and \ac{ilp} solution $y$ with corresponding packing $B$ defines a new rounding function $R'$ and a new integral solution $y'$. Solution $y'$ is a feasible solution of $LP(I,s^{R'})$. \end{lemma} \begin{proof} We have to analyze how the \ac{lp} for instance $(I,s^{R'})$ changes in comparison to the \ac{lp} for instance $(I,s^R)$. \\ {\bf Shift Operation:} A single $\operatorname{shift}(g_1,g_2,R)$ operation moves one item from each group $g$ between $g_1$ and $g_2$ into $g$ and one item out of $g$. As no item is moved out of $g_1$ and no item is moved into $g_2$, the number of items in $g_1$ is increased by $1$ and the number of items in $g_2$ is decreased by $1$. The right hand side of the $LP(I,s^R)$ is defined by the cardinalities $|g[R]|$ of the rounding groups $g$ in $R$. As only the cardinalities of $g_1$ and $g_2$ change by $\pm 1$ the right hand side changes accordingly to $\pm 1$ in the corresponding components of $y$. The moved item from $g_2$ is removed from the configuration and a new configuration containing the new item of $g_1$ is added. The \ac{lp} and \ac{ilp} solutions $x$ and $y$ are being modified such that $\lambda(g_2,R)$ is removed from its configuration and a new configuration is added such that the enhanced right hand side of $g_1$ is covered. Since the largest item $\lambda(g,R)$ of every group $g$ between $g_1$ and $g_2$ is shifted to its left group, the size $s^{R'}(i)$ of item $i\in g[R]$ is defined by $s^{R'}(i)=s(\iota(g,R))$, where $\iota(g,R)$ is the second largest item of $g[R]$. Therefore each item in $(I,s^{R'})$ is rounded to a smaller or equal value as $s(\iota(g,R))\leq s(\lambda(g,R))$. All configurations of $(I,s^R)$ can thus be transformed into feasible configurations of $(I,s^{R'})$.\\ {\bf Insert Operation:} The insert operation consists of inserting the new item into its corresponding group $g$ followed by a shift operation. Inserting the new item into $g$ increases the right hand side of the \ac{lp} by $1$. To cover the increased right hand side, we add a new configuration $\{1:s^{R'}(i)\}$ containing only the new item. In order to reflect the change in the \ac{lp} solution, the new item is added into an additional bin. The remaining changes are due to the shift operation already treated above.\\ {\bf Delete Operation:} The delete operation consists of removing an item $i$ from its corresponding group $g$ followed by a shift operation. Removing the new item from $g$ decreases the right hand side of the \ac{lp} by $1$. The current \ac{lp} and \ac{ilp} solutions $x$ and $y$ do not need to be changed to cover the new right hand side. The remaining changes are due to the shift operation already treated above.\\ {\bf shiftA/shiftB Operation:} As the shiftA and shiftB operations consist only of repeated use of the shift operation, the correspondence between the packing and the \ac{lp}/\ac{ilp} solution follow simply by induction. \end{proof} \subsection{Algorithm for Dynamic Bin Packing} \label{sec:dynamicbinpacking} We will use the operations from the previous section to obtain a dynamic algorithm for bin packing\xspace with respect to large items. The operations insert and delete are designed to process the input depending of whether an item is to be inserted or removed. Keep in mind that the parameter $k = \lfloor \kappa\rfloor = \left\lfloor\frac{\operatorname{\text{\textsc{size}}}(I_L)\cdot \epsilon}{2(\lfloor \log(\nicefrac{1}{\epsilon})\rfloor +5)} \right\rfloor$ changes over time as $\operatorname{\text{\textsc{size}}}(I_L)$ may increase or decrease. In order to fulfill the properties $\nameref{prop:c}$ and $\nameref{prop:d}$, we need to adapt the number of items per group whenever $k$ changes. The shiftA and shiftB operations are thus designed to manage the dynamic number of items in the groups as $k$ changes. Note that a group in the $A$-block with parameter $k$ has by definition the same number of items as a group in the $B$-block with parameter $k-1$ assuming they are in the same size category. If $k$ increases, the former $A$ block is treated as the new $B$ block in order to fulfill the properties $\nameref{prop:c}$ and $\nameref{prop:d}$ while a new empty $A$ block is introduced. To be able to rename the blocks, the $B$ block needs to be empty. Accordingly the $A$ block needs to be empty if $k$ decreases in order to treat the old $B$ block as new $A$ block. Hence we need to make sure that there are no groups in the $B$-block if $k$ increases and vice versa, that there are no groups in the $A$-block if $k$ decreases. We denote the number of all groups in the $A$-blocks at time t by $A(t)$ and the number of groups in $B$-blocks at time $t$ by $B(t)$. To make sure that the $B$-block (respectively the $A$-block) is empty when $k$ increases (decreases) the ratio $\frac{A(t)}{A(t)+B(t)}$ needs to correlate to the fractional digits of $\kappa(t)$ at time $t$ denoted by $\Delta(t)$. Hence we partition the interval $[0,1)$ into exactly $A(t)+B(t)$ smaller intervals $J_i=\left[\frac{i}{A(t)+B(t)},\frac{i+1}{A(t)+B(t)}\right)$. We will make sure that $\Delta(t)\in J_i$ iff $\frac{A(t)}{A(t)+B(t)}\in J_i$. Note that the term $\frac{A(t)}{A(t)+B(t)}$ is $0$ if the $A$-block is empty and the term is $1$ if the $B$-block is empty. This way, we can make sure that as soon as $k(t)$ increases, the number of $B$-blocks is close to $0$ and as soon as $k(t)$ decreases, the number of $A$-blocks is close to $0$. Therefore, the $A,B$-block can be renamed whenever $k(t)$ changes. The algorithm uses shiftA and shiftB operations to adjust the number of $A$- and $B$-blocks. Recall that a shiftA operation reduces the number of groups in the $B$-block by $1$ and increases the number of groups in the $A$-block by $1$ (shiftB works vice versa). Let $d$ be the number of shiftA/shiftB operations that need to be performed to adjust $\frac{A(t)}{A(t)+B(t)}$. \begin{figure} \caption{Before Insert} \caption{After Insert} \caption{Comparison of the situation before and after an Insert Operation} \end{figure} In the following algorithm we make use of an algorithm called \textsc{improve}, which was developed in \cite{jansen2013binpacking} to reduce the number of used bins. Using \textsc{improve}(x) on a packing $B$ with approximation guarantee $\max_i B(i) \leq (1+\bar{\epsilon})\operatorname{\text{\textsc{opt}}} + C$ for some $\bar{\epsilon} = \mathcal{O}(\epsilon)$ and some additive term $C$ yields a new packing $B'$ with approximation guarantee $\max_i B(i) \leq (1+\bar{\epsilon})\operatorname{\text{\textsc{opt}}} + C-x$. We use the operations in combination with the improve algorithm to obtain a fixed approximation guarantee. \begin{algo}[\ac{afptas} for large items] \label{alg-afptas} \ \begin{small} \begin{algorithm}[H] \TitleOfAlgo{Insertion} \If{SIZE($I(t)) < (m+2)(\nicefrac{1}{\delta} +2)$ or SIZE$(I(t)) < 8 (\nicefrac{1}{\delta} +1)$}{use offline Bin Packing} \Else{ \textsc{improve}(2); insert($i$)\; \tcp{Shifting to the correct interval} Let $J_i$ be the interval containing $\Delta(t)$\; Let $J_j$ be the interval containing $\frac{A(t)}{A(t)+B(t)}$\; Set $d=i-j$\; \If(\tcp*[h]{Modulo $A(t)+B(t)$ when $k$ increases}){$k(t) > k(t-1)$}{ $d$ = $d$ + $(A(t)+B(t))$\; } \tcp{Shifting $d$ groups from $B$ to $A$} \For{$p :=0$ to $|d|-1$}{ \If{i+p = A(t) + B(t)}{Rename($A,B$);} \textsc{improve}(1); shiftA\; } } \end{algorithm} \end{small} \begin{small} \begin{algorithm}[H] \TitleOfAlgo{Deletion} \If{SIZE($I(t)) < (m+2)(\nicefrac{1}{\delta} +2)$ or SIZE$(I(t)) < 8 (\nicefrac{1}{\delta} +1)$}{use offline Bin Packing} \Else{ \tcp{Departing item $i$} \textsc{improve}(4); delete($i$)\; \textsc{ReduceComponents}\; \tcp{} \tcp{Shifting to the correct interval} Let $J_i$ be the interval containing $\Delta(t)$\; Let $J_j$ be the interval containing $\frac{A(t)}{A(t)+B(t)}$\; Set $d=i-j$\; \If(\tcp*[h]{Modulo $A(t)+B(t)$ when $k$ decreases}){$k(t) < k(t-1)$}{ d = d - (A(t)+B(t))\; } \tcp{Shifting $d$ groups from A to B} \For{$p :=0$ to $|d|-1$}{ \If{i-p = 0}{Rename(A,B);} \textsc{improve}(3); shiftB\; } } \end{algorithm} \end{small} \end{algo} Note that as exactly $d$ groups are shifted from $A$ to $B$ (or $B$ to $A$) we have by definition that $\Delta(t) \in \left[ \frac{A(t)}{A(t)+B(t)}, \frac{A(t)+1}{A(t)+B(t)}\right)$ at the end of the algorithm. Note that $d$ can be bounded by $11$. \begin{lemma} \label{lem:disbounded} At most $11$ groups are shifted from $A$ to $B$ (or $B$ to $A$) in Algorithm \ref{alg-afptas}. \end{lemma} \begin{proof} Since the value $|\operatorname{\text{\textsc{size}}}(I(t-1))-\operatorname{\text{\textsc{size}}}(I(t))|$ changes at most by $1$ we can bound $|\kappa(t-1) - \kappa(t)|$ by $\frac{\epsilon}{2(\lfloor \log(1/\epsilon)\rfloor +5)}\leq \frac{\epsilon}{\log(\nicefrac{1}{\epsilon})+5}$ to obtain the change in the fractional part. By Lemma \ref{lem1} the number of intervals (=the number of groups) is bounded by $(\frac{8}{\epsilon}+2)(\log (\nicefrac{1}{\epsilon})+5)$. Using $\Delta(t-1) \in [ \frac{A(t-1)}{A(t-1)+B(t-1)}, \frac{A(t-1)+1}{A(t-1)+B(t-1)})$ and the fact that the number of groups $A(t-1)+B(t-1)$ increases or decreases at most by $1$, we can give a bound for the parameter $d$ in both cases by \begin{align*} &d \leq \frac{D}{\text{interval length}} +1 = D \cdot \#intervals +1 \leq \\ &\left((\frac{\epsilon}{\log (\nicefrac{1}{\epsilon})+5})\cdot (\frac{8}{\epsilon}+2)\cdot (\log(\nicefrac{1}{\epsilon})+5)\right)+1=\\ &8+2\epsilon +1 < 11 \end{align*} Hence, the number of shiftA/shiftB operations is bounded by $11$. \end{proof} \begin{lemma} \label{lem:binpackingalg} Every rounding function $R_t$ produced by Algorithm \ref{alg-afptas} fulfills properties $\nameref{prop:a}$ to $\nameref{prop:d}$ with parameter $k(t)= \left\lfloor \frac{\operatorname{\text{\textsc{size}}}(I_{L})\cdot \epsilon}{2(\lfloor \log(1/\epsilon)\rfloor +5)}\right\rfloor$. \end{lemma} \begin{proof} Since Algorithm \ref{alg-afptas} uses only the operations insert, delete, shiftA and shiftB, the properties $\nameref{prop:a}$to$\nameref{prop:d}$ are always fulfilled by Lemma $\ref{lem4}$ and the \ac{lp}/\ac{ilp} solutions $x,y$ correspond to the rounding function by Lemma $\ref{lem5}$. Furthermore, the algorithm is designed such that whenever $k$ increases the $B$-block is empty and the $A$-block is renamed to be the new $B$-block. Whenever $k$ decreases the $A$-block is empty and the $B$-block is renamed to be the new $A$-block. Therefore the number of items in the groups is dynamically adapted to match with the parameter $k$. \end{proof} \subsection{Large items} In this section we prove that Algorithm \ref{alg-afptas} is a dynamic robust \ac{afptas} for the bin packing\xspace problem if all items have size at least $\nicefrac{\epsilon}{14}$. The treatment of small items is described in Section \ref{sec:small} and the general case is described in Section \ref{sec:general}. We will prove that the migration between packings $B_t$ and $B_{t+1}$ is bounded by $\mathcal{O}(\nicefrac{1}{\epsilon^3}\log(\nicefrac{1}{\epsilon}))$ and that we can guarantee an asymptotic approximation ratio such that $\max B_{t}(i) \leq (1+2\Delta) \operatorname{\text{\textsc{opt}}}(I(t),s) + \text{poly}(\nicefrac{1}{\Delta})$ for a parameter $\Delta = \mathcal{O}(\epsilon)$ and for every $t \in \mathbb{N}$. The Algorithm \textsc{improve} was developed in \cite{jansen2013binpacking} to improve the objective value of an \ac{lp} with integral solution $y$ and corresponding fractional solution $x$. For a vector $z \in \mathbb{R}^n$ let $V(z)$ be the set of all integral vectors $v = (v_1, \ldots v_n)^T$ such that $0 \leq v_i \leq z_i$. Let $x$ be an approximate solution of the \ac{lp} $\min \mengest{\nor{x}_1}{Ax \geq b, x \geq 0 }$ with $m$ inequalities and let $\nor{x}_1 \leq (1+ \delta) \operatorname{\text{\textsc{lin}}}$ and $\nor{x}_1 \geq 2 \alpha (1/ \delta +1)$, where $\operatorname{\text{\textsc{lin}}}$ denotes the fractional optimum of the \ac{lp} and $\alpha\in \mathbb{N}$ is part of the input of the algorithm (see Jansen and Klein \cite{jansen2013binpacking}). Let $y$ be an approximate integer solution of the \ac{lp} with $\nor{y}_1 \leq \operatorname{\text{\textsc{lin}}} +2C$ for some value $C \geq \delta \operatorname{\text{\textsc{lin}}}$ and with $\nor{y}_1 \geq (m+2)(1/\delta +2)$. Suppose that both $x$ and $y$ have only $\leq C$ non-zero components. For every component $i$ we suppose that $y_i \geq x_i$. Furthermore we are given indices $a_1, \ldots ,a_K$, such that the non-zero components $y_{a_j}$ are sorted in non-decreasing order, i.\,e., $y_{a_1} \leq \ldots \leq y_{a_K}$. \begin{algo}[\textsc{improve}]\label{improve} \ \begin{enumerate} \item Set $x^{var} := 2 \frac{ \alpha(1 / \delta +1)}{\nor{x}}x$, $x^{fix} := x - x^{var}$ and $b^{var} = b - A(x^{fix})$ \item Compute an approximate solution $\hat{x}$ of the \ac{lp} $\min \mengest{\nor{x}_1}{Ax \geq b^{var}, x\geq 0 }$ with ratio $(1+ \delta/2)$ \item If $\nor{x^{fix} + \hat{x}}_1 \geq \nor{x}_1$ then set $x' = x$, $\hat{y} = y$ and goto step 9 \item Choose the largest $\ell$ such that the sum of the smallest components $y_1, \ldots , y_{\ell}$ is bounded by $\sum_{1\leq i \leq \ell} y_{a_i} \leq (m+2)(1/ \delta +2)$ \item For all $i $ set $\bar{x}^{fix}_{i} = \begin{cases} 0 & \text{if }i= a_j, j \leq \ell \\ x^{fix}_i & \text{else} \end{cases}$ and $\bar{y}_i = \begin{cases} 0 & \text{if }i= a_j, j \leq \ell \\ y_i & \text{else} \end{cases}$ \item Set $\bar{x} = \hat{x} + x_{\ell}$ where $x_{\ell}$ is a vector consisting of the components $x_{a_1}, \ldots ,x_{a_{\ell}}$. Reduce the number of non-zero components to at most $m+1$. \item $x' = \bar{x}^{fix} + \bar{x}$ \item For all non-zero components $i$ set $\hat{y}_i = \max \{\lceil x'_i \rceil , \bar{y}_i \}$ \item If possible choose $d \in V(\hat{y}-x')$ such that $\nor{d}_1 = \alpha (1/ \delta +1)$ otherwise choose $d \in V(\hat{y}-x')$ such that $\nor{d}_1 < \alpha (1/ \delta +1)$ is maximal. \item Return $y' = \hat{y} -d$ \end{enumerate} \end{algo} In the following we prove that the algorithm \textsc{improve} applied to the bin packing\xspace \ac{ilp} actually generates a new improved packing $B'$ from the packing $B$ with corresponding \ac{lp} and \ac{ilp} solutions $x'$ and $y'$. We therefore use Theorem \ref{thm-improve} and Corollary \ref{cor-improve} that were proven in \cite{jansen2013binpacking}. \begin{theorem}\label{thm-improve} Let $x$ be a solution of the \ac{lp} with $\nor{x}_1 \leq (1+\delta) \operatorname{\text{\textsc{lin}}}$ and furthermore $\nor{x}_1 \geq 2 \alpha (1/ \delta +1)$. Let $y$ be an integral solution of the \ac{lp} with $\nor{y'}_1 \leq \operatorname{\text{\textsc{lin}}} +2C$ for some value $C \geq \delta \operatorname{\text{\textsc{lin}}}$ and with $\nor{y}_1 \geq (m+2)(1/\delta +2)$. Solutions $x$ and $y$ have the same number of non-zero components and for each component we have $x_i \leq y_i$. The Algorithm $\textsc{improve}(\alpha)$ then returns a fractional solution $x'$ with $\nor{x'}_1 \leq (1+ \delta)\operatorname{\text{\textsc{lin}}} -\alpha$ and an integral solution $y''$ where one of the two properties hold: $\nor{y'}_1 = \nor{y}_1 - \alpha$ or $\nor{y'}_1 = \nor{x'}_1 + C$. Both, $x'$ and $y'$ have at most $C$ non-zero components and the distance between $y'$ and $y$ is bounded by $\nor{y'-y}_1 = \mathcal{O}(\frac{m + \alpha}{\delta})$. \end{theorem} \begin{corollary}\label{cor-improve} Let $\nor{x}_1 = (1+ \delta')\operatorname{\text{\textsc{lin}}}$ for some $\delta' \geq \delta$ and $\nor{x}_1 \geq 2 \alpha (1/ \delta +1)$ and let $\nor{y}_1 \leq \operatorname{\text{\textsc{lin}}} + 2C$ for some $C \geq \delta'\operatorname{\text{\textsc{lin}}}$ and $\nor{y}_1 \geq (m+2)(1/\delta +2)$. Solutions $x$ and $y$ have the same number of non-zero components and for each component we have $x_i \leq y_i$. Then Algorithm $\textsc{improve}(\alpha)$ returns a fractional solution $x'$ with $\nor{x'}_1 \leq \nor{x}_1 - \alpha = (1+ \delta')\operatorname{\text{\textsc{lin}}} - \alpha$ and integral solution $y'$ where one of the two properties hold: $\nor{y'}_1 = \nor{y}_1 - \alpha$ or $\nor{y'}_1 = \nor{x}_1 - \alpha + C$. Both, $x'$ and $y'$ have at most $C$ non-zero components and the distance between $y'$ and $y$ is bounded by $\nor{y'-y}_1 \in \mathcal{O}(\frac{m + \alpha}{\delta})$. \end{corollary} Let $\Delta = \epsilon + \delta + \epsilon \delta$ and $C = \Delta \operatorname{\text{\textsc{opt}}}(I,s) + m$. \begin{theorem}\label{thm-packing} Given a rounding function $R$ and an \ac{lp} defined for $(I,s^{R})$, let $x$ be a fractional solution of the \ac{lp} with $\nor{x}_1 \leq (1+ \Delta) \operatorname{\text{\textsc{opt}}}(I,s)$, $\nor{x}_1 \geq 2\alpha(1/\delta +1)$ and $\nor{x}_1 = (1+\delta')\operatorname{\text{\textsc{lin}}}(I,s^{R})$ for some $\delta'>0$. Let $y$ be an integral solution of the \ac{lp} with $\nor{y}_1 \geq (m+2)(1/\delta +2)$ and corresponding packing $B$ such that $\max_i B (i) = \nor{y}_1 \leq (1+ 2\Delta) \operatorname{\text{\textsc{opt}}}(I,s)+m$. Suppose $x$ and $y$ have the same number $\leq C$ of non-zero components and for all components $i$ we have $y_i \geq x_i$. Then Algorithm $\textsc{improve}(\alpha)$ on $x$ and $y$ returns a new fractional solution $x'$ with $\nor{x'}_1 \leq (1+ \Delta) \operatorname{\text{\textsc{opt}}}(I,s) - \alpha$ and also a new integral solution $y'$ with corresponding packing $B'$ such that \begin{align*} \max_i B' (i) =\nor{y'}_1 \leq (1+ 2 \Delta) \operatorname{\text{\textsc{opt}}}(I,s) +m- \alpha. \end{align*} Further, both solutions $x'$ and $y'$ have the same number $\leq C$ of non-zero components and for each component we have $x'_i \leq y'_i$. The number of changed bins from the packing $B$ to the packing $B'$ is bounded by $\mathcal{O}(\frac{m}{\delta})$. \end{theorem} \begin{proof} To use Theorem \ref{thm-improve} and Corollary \ref{cor-improve} we have to prove that certain conditions follow from the requisites of Theorem \ref{thm-packing}. We have $\max_i B (i) = \nor{y}_1 \leq (1+ 2\Delta) \operatorname{\text{\textsc{opt}}}(I,s)+m$ by condition. Since $ \operatorname{\text{\textsc{opt}}}(I,s) \leq \operatorname{\text{\textsc{opt}}}(I,s^{R})$ we obtain for the integral solution $y$ that $\nor{y}_1 \leq 2\Delta \operatorname{\text{\textsc{opt}}}(I,s)+m + \operatorname{\text{\textsc{opt}}}(I,s^{R}) \leq 2 \Delta \operatorname{\text{\textsc{opt}}}(I,s)+ m + \operatorname{\text{\textsc{lin}}}(I,s^{R}) +m$. Hence by definition of $C$ we get $\nor{y}_1 \leq \operatorname{\text{\textsc{lin}}}(I,s^{R}) + 2C$. This is one requirement to use Theorem \ref{thm-improve} or Corollary \ref{cor-improve}. We distinguish the cases where $\delta' \leq \delta$ and $\delta' > \delta$ and look at them separately. Case 1: $\delta' \leq \delta$. For the parameter $C$ we give a lower bound by the inequality $C > \Delta \operatorname{\text{\textsc{opt}}}(I,s) = (\delta + \epsilon + \delta \epsilon)\operatorname{\text{\textsc{opt}}}(I,s)$. Lemma \ref{lem2} shows that $\operatorname{\text{\textsc{opt}}}(I,s^R) \leq (1+\epsilon)\operatorname{\text{\textsc{opt}}}(I,s)$ and therefore yields \begin{align*} &\frac{\delta + \epsilon + \delta \epsilon}{1+ \epsilon} \operatorname{\text{\textsc{opt}}}(I,s^R) = \frac{(1+\delta)(1+\epsilon)-1}{1+\epsilon} \operatorname{\text{\textsc{opt}}}(I,s^R)\\ &= (1+\delta)\operatorname{\text{\textsc{opt}}}(I,s^R) - \frac{1}{1+\epsilon}\operatorname{\text{\textsc{opt}}}(I,s^R)\\ &\geq \delta \operatorname{\text{\textsc{opt}}}(I,s^R) \geq \delta LIN(I,s^R) \end{align*} and hence $C > \delta \operatorname{\text{\textsc{lin}}}(I,s^R)$. We can therefore use Theorem \ref{thm-improve}. Algorithm \textsc{improve} returns by Theorem \ref{thm-improve} a $x'$ with $\nor{x'}_1 \leq (1+\delta)\operatorname{\text{\textsc{lin}}}(I,s^{R})-\alpha \leq (1+\delta)\operatorname{\text{\textsc{opt}}}(I,s^{R})-\alpha$ and an integral solution $y'$ with $\nor{y'}_1 \leq \nor{x'}_1 + C$ or $\nor{y'}_1 \leq \nor{y}_1 - \alpha$. Using that $\operatorname{\text{\textsc{opt}}}(I,s^R) \leq (1+\epsilon)\operatorname{\text{\textsc{opt}}}(I,s)$ we can conclude $\nor{x'}_1 \leq (1+ \delta)(1+ \epsilon)\operatorname{\text{\textsc{opt}}}(I,s) - \alpha = (1+\Delta)\operatorname{\text{\textsc{opt}}}(I,s) - \alpha$. In the case where $\nor{y'}_1 \leq \nor{x'}_1 + C$ we can bound the number of bins of the new packing $B'$ by $\max_i B' (i) = \nor{y'}_1 \leq \nor{x'}_1 + C \leq (1 + 2 \Delta) \operatorname{\text{\textsc{opt}}}(I,s)+ m - \alpha$. In the case that $\nor{y'}_1 \leq \nor{y}_1 - \alpha$ we obtain $\max_i B' (i) = \nor{y'}_1 \leq \nor{y}_1 - \alpha \leq (1+ 2\Delta) \operatorname{\text{\textsc{opt}}}(I,s) +m- \alpha$. Furthermore we know by Theorem \ref{thm-improve} that $x'$ and $y'$ have at most $C$ non-zero components. Case 2: $\delta' > \delta$. First we prove that $C$ is bounded from below. Since $\nor{x}_1 = (1+\delta') \operatorname{\text{\textsc{lin}}}(I,s^R) \leq (1+ \Delta) \operatorname{\text{\textsc{opt}}}(I,s)\leq (1+ \Delta) \operatorname{\text{\textsc{opt}}}(I,s^R) \leq (1+ \Delta) \operatorname{\text{\textsc{opt}}}(I,s^R) \leq (1+ \Delta) (\operatorname{\text{\textsc{lin}}}(I,s^R) + \frac{m}{2}) \leq \operatorname{\text{\textsc{lin}}}(I,s^R) +C$ we obtain that $C\geq \delta' \operatorname{\text{\textsc{lin}}}(I,s^R)$, which is a requirement to use Corollary \ref{cor-improve}. By using Algorithm \textsc{improve} on solutions $x$ with $\nor{x}_1 = (1+\delta')\operatorname{\text{\textsc{lin}}}(I,s^{R})$ and $y$ with $\nor{y}_1 \leq \operatorname{\text{\textsc{lin}}}(I,s^{R}) + 2C$ we obtain by Corollary \ref{cor-improve} a fractional solution $x'$ with $\nor{x'}_1 \leq \nor{x}_1 - \alpha \leq (1+\Delta)\operatorname{\text{\textsc{opt}}}(I,s) - \alpha$ and an integral solution $y'$ with either $\nor{y'}_1 \leq \nor{y}_1 - \alpha$ or $\nor{y'}_1 \leq \nor{x}_1 + C - \alpha$. So for the new packing $B'$ we can guarantee that $\max_i B' (i) = \nor{y'}_1 \leq \nor{y}_1 - \alpha = \max_i B (i) - \alpha \leq (1+ 2\Delta) \operatorname{\text{\textsc{opt}}}(I,s) +m - \alpha$ if $\nor{y'}_1 \leq \nor{y}_1 - \alpha$. In the case that $\nor{y'}_1 \leq \nor{x}_1 + C - \alpha$, we can guarantee that $\max_i B' (i) = \nor{y'}_1 \leq \nor{x}_1 + C - \alpha \leq (1+ \Delta) \operatorname{\text{\textsc{opt}}}(I,s)+ C - \alpha \leq (1+ 2\Delta) \operatorname{\text{\textsc{opt}}}(I,s) +m - \alpha$. Furthermore we know by Corollary \ref{thm-improve} that $x'$ and $y'$ have at most $C$ non-zero components. Theorem \ref{thm-improve} as well as Corollary \ref{cor-improve} state that the distance $\nor{y'-y}_1$ is bounded by $\mathcal{O}(\nicefrac{m}{\delta})$. Since $y$ corresponds directly to the packing $B$ and the new integral solution $y'$ corresponds to the new packing $B'$, we know that only $\mathcal{O}(\nicefrac{m}{\delta})$ bins of $B$ need to be changed to obtain packing $B'$. \end{proof} In order to prove correctness of Algorithm \ref{alg-afptas}, we will make use of the auxiliary Algorithm \ref{reducecomponents} (\textsc{ReduceComponents}). Due to a delete-operation, the value of the optimal solution $\operatorname{\text{\textsc{opt}}}(I,s)$ might decrease. Since the number of non-zero components has to be bounded by $C = \Delta \operatorname{\text{\textsc{opt}}}(I,s) + m$, the number of non-zero components might have to be adjusted down. The following algorithm describes how a fractional solution $x'$ and an integral solution $y'$ with reduced number of non-zero components can be computed such that $\nor{y-y'}_1$ is bounded. The idea behind the algorithm is also used in the \textsc{Improve} algorithm. The smallest $m+2$ components are reduced to $m+1$ components using a standard technique presented for example in \cite{beling1998}. Arbitrary many components of $x'$ can thus be reduced to $m+1$ components without making the approximation guarantee worse. \begin{algo}[\textsc{ReduceComponents}]\label{reducecomponents} \ \begin{enumerate} \item Choose the smallest non-zero components $y_{a_1}, \ldots , y_{a_{m+2}}$. \item If $\sum_{1\leq i \leq m+2} y_{a_i} \geq (1/ \Delta +2)(m+2)$ then return $x=x'$ and $y=y'$ \item Reduce the components $x_{a_1}, \ldots , x_{a_{m+2}}$ to $m+1$ components $\hat{x}_{b_1}, \ldots , \hat{x}_{b_{m+1}}$ with $\sum_{j=1}^{m+2}x_{a_{j}}=\sum_{j=1}^{m+1}\hat{x}_{b_{j}}$. \item For all $i $ set $x'_i = \begin{cases} \hat{x}_i +x_i & \text{if $i= b_j$ for some } j \leq m \\ 0 & \text{if $i= a_j$ for some } j \leq m+1 \\ x_i & \text{else} \end{cases}$ and $\hat{y}_i = \begin{cases} \lceil \hat{x}_i + x'_i \rceil & \text{if $i= b_j$ for some } j \leq m \\ 0 & \text{if $i= a_j$ for some } j \leq m+1 \\ y_i & \text{else} \end{cases}$ \item If possible choose $d \in V(\hat{y}-x')$ such that $\nor{d}_1 = m+1$ otherwise choose $d \in V(\hat{y}-x')$ such that $\nor{d}_1 < m+1$ is maximal. \item Return $y' = \hat{y} -d$ \end{enumerate} \end{algo} The following theorem shows that the algorithm above yields a new fractional solution $x'$ and a new integral solution $y'$ with a reduced number of non-zero components. \begin{theorem} \label{thm:reduce} Let $x$ be a fractional solution of the \ac{lp} with $\nor{x}_1 \leq (1+ \Delta) \operatorname{\text{\textsc{opt}}}(I,s)$. Let $y$ be an integral solution of the \ac{lp} with $\nor{y}_1 \leq (1+ 2\Delta) \operatorname{\text{\textsc{opt}}}(I,s)+m$. Suppose $x$ and $y$ have the same number $\leq C+1$ of non-zero components and for all components $i$ we have $y_i \geq x_i$. Using the Algorithm $\textsc{ReduceComponents}$ on $x$ and $y$ returns a new fractional solution $x'$ with $\nor{x'}_1 \leq (1+ \Delta) \operatorname{\text{\textsc{opt}}}(I,s)$ and a new integral solution $y'$ with $\nor{y'}_1 \leq (1+ 2 \Delta) \operatorname{\text{\textsc{opt}}}(I,s) +m$. Further, both solutions $x'$ and $y'$ have the same number of non-zero components and for each component we have $x'_i \leq y'_i$. The number of non-zero components can now be bounded by $\leq C$. Furthermore, we have that $\nor{y-y'}_1 \leq 2\cdot (1/\Delta +3) (m+2)$. \end{theorem} \begin{proof} Case 1: $\sum_{1\leq i \leq m+2} y_{a_i} \geq (1/ \Delta +2)(m+2)$. We will show that in this case, $x$ and $y$ already have $\leq C$ non-zero components. In this case the algorithm returns $x' = x$ and $y' =y$. Since $\sum_{1\leq i \leq m+2} y_{a_i} \geq (1/ \Delta +2)(m+2)$ the components $y_{a_1}, \ldots , y_{a_{m+2}}$ have an average size of at least $(1/ \Delta +2)$ and since $y_{a_1}, \ldots , y_{a_{m+2}}$ are the smallest components, all components of $y$ have average size at least $(1/ \Delta +2)$. The size $\nor{y}_1$ is bounded by $(1+ 2\Delta) \operatorname{\text{\textsc{opt}}}(I,s)+m$. Hence the number of non-zero components can be bounded by $\frac{(1+ 2\Delta) \operatorname{\text{\textsc{opt}}}(I,s)+m}{\nicefrac{1}{\Delta}+2} \leq \Delta \operatorname{\text{\textsc{opt}}}(I,s)+ \Delta m \leq C$. Case 2: $\sum_{1\leq i \leq m+1} y_{a_i} < (1/ \Delta +2)(m+2)$. We have to prove different properties for the new fractional solution $x'$ and the new integral solution $y'$. \textbf{Number of non-zero components}: The only change in the number of non-zero components is in step 3 of the algorithm, where the number of non-zero components is reduced by $1$. As $x,y$ have at most $C+1$ non-zero components, $x',y'$ have at most $C$ non-zero components. In step 4 of the algorithm, $\hat{y}$ is defined such that $\hat{y}_i \geq x'_i$. In step 5 of the algorithm $d$ is chosen such that $\hat{y}_i -d \geq x'_i$. Hence we obtain that $y'_i = \hat{y}_i -d \geq x'_i$. \textbf{Distance between $y$ and $y'$}: The only steps where components of $y$ changes are in step 4 and 5. The distance between $y$ and $\hat{y}$ is bounded by the sum of the components that are set to $0$, i.\,e., $\sum_{j=1}^{m+2}y_{a_{j}}$ and the sum of the increase of the increased components $\sum_{j=1}^{m+1}\lceil \hat{x}_{b_{j}}\rceil \leq \sum_{j=1}^{m+1}\hat{x}_{b_{j}} +m+1 = \sum_{j=1}^{m+2}x_{a_{j}} +m+1$. As $\sum_{j=1}^{m+2}x_{a_{j}}\leq \sum_{j=1}^{m+2}y_{a_{j}} < (1/ \Delta +2)(m+2)$, we obtain that the distance between $y$ and $\hat{y}$ is bounded by $2\cdot (1/\Delta +2)(m+2)+m+1$. Using that $\nor{d}_1 \leq m+1$, the distance between $y$ and $y'$ is bounded by $\nor{y'-y}_1 < 2\cdot (1/\Delta +3) (m+2)$. \textbf{Approximation guarantee}: The fractional solution $x$ is modified by condition of step 3 such that the sum of the components does not change. Hence $\nor{x'}_1=\nor{x}_1\leq (1+\Delta)\operatorname{\text{\textsc{opt}}}(I,s)$.\\ Case 2a: $\nor{d}_1 < m+1$. Since $d$ is chosen maximally we have for every non-zero component that $y'_i-x'_i <1$. Since there are at most $C=\Delta\operatorname{\text{\textsc{opt}}}(I,s)+m$ non-zero components we obtain that $\nor{y'}_1 \leq \nor{x'}_1 + C \leq (1+ 2\Delta)\operatorname{\text{\textsc{opt}}}(I,s)+m$. Case 2b: $\nor{d}_1 = m+1$. By definition of $\hat{y}$ we have $\nor{\hat{y}}_1 \leq \nor{y}_1 + \sum_{j=1}^{m+1} \lceil \hat{x_{b_j}}+x_{b_j}\rceil - \sum_{j=1}^{m+2}x_{a_j} \leq \nor{y}_1 + m+1$. We obtain for $y'$ that $\nor{y'}_1 = \nor{\hat{y}}_1 - \nor{d}_1 \leq \nor{y}_1 + m+1 - (m+1) =\nor{y}_1 \leq (1+ 2\Delta) \operatorname{\text{\textsc{opt}}}(I,s)+m$. \end{proof} \begin{theorem} \label{thm-main} Algorithm \ref{alg-afptas} is an \ac{afptas} with migration factor at most $\mathcal{O}(\frac{1}{\epsilon^3}\cdot \log(\nicefrac{1}{\epsilon}))$ for the fully dynamic bin packing\xspace problem with respect to large items. \end{theorem} \begin{proof} Set $\delta = \epsilon$. Then $\Delta = 2 \epsilon + \epsilon^2 = \mathcal{O}(\epsilon)$. We assume in the following that $\Delta \leq 1$ (which holds for $\epsilon\leq \sqrt{2}-1$). We prove by induction that four properties hold for any packing $B_t$ and the corresponding \ac{lp} solutions. Let $x$ be a fractional solution of the \ac{lp} defined by the instance $(I_t,s^{R_{t}})$ and $y$ be an integral solution of this \ac{lp}. The properties $(2)$ to $(4)$ are necessary to apply Theorem \ref{thm-packing} and property $(1)$ provides the wished approximation ratio for the bin packing\xspace problem. \begin{enumerate} \item[(1)\label{prop:1}] $\max_i B_t(i) = \nor{y}_1 \leq (1+ 2\Delta)\operatorname{\text{\textsc{opt}}}(I(t),s) +m$ (the number of bins is bounded) \item[(2)\label{prop:2}] $\nor{x}_1 \leq (1+ \Delta) \operatorname{\text{\textsc{opt}}}(I(t),s)$ \item[(3)\label{prop:3}] for every configuration $i$ we have $x_i \leq y_i$ \item[(4)\label{prop:4}] $x$ and $y$ have the same number of non-zero components and that number is bounded by $\Delta \operatorname{\text{\textsc{opt}}}(I(t),s) +m$ \end{enumerate} To apply Theorem \ref{thm-packing} we furthermore need a guaranteed minimal size for $\nor{x}_1$ and $\nor{y}_1$. According to Theorem \ref{thm-packing} the integral solution $y$ needs $\nor{y}_1 \geq (m+2)(\nicefrac{1}{\delta} +2)$ and $\nor{x}_1 \geq 8 (\nicefrac{1}{\delta} +1)$ as we set $\alpha \leq 4$. By condition of the while-loop the call of \textsc{improve} is made iff $SIZE(I_t,s) \geq 8 (\nicefrac{1}{\delta} +1)$ and $SIZE(I_t,s) \geq (m+2)(\nicefrac{1}{\delta} +2)$. Since $\nor{y}_1 \geq \nor{x}_1 \geq SIZE(I_t,s)$ the requirements for the minimum size are fulfilled. As long as the instance is smaller than $8 (\nicefrac{1}{\delta} +1)$ or $(m+2)(\nicefrac{1}{\delta} +2)$ an offline algorithm for bin packing\xspace is used. Note that there is an offline algorithm which fulfills properties $(1)$ to $(4)$ as shown by Jansen and Klein \cite{jansen2013binpacking}. Now let $B_t$ be a packing with $SIZE(I_t,s) \geq 8 (\nicefrac{1}{\delta} +1)$ and $SIZE(I_t,s) \geq (m+2)(\nicefrac{1}{\delta} +2)$ for instance $I_t$ with solutions $x$ and $y$ of the \ac{lp} defined by $(I(t),s^{R_t})$. Suppose by induction that the properties $(1)$ to $(4)$ hold for the instance $I_t$. We have to prove that these properties also hold for the instance $I(t+1)$ and the corresponding solutions $x''$ and $y''$. The packing $B_{t+1}$ is created by the repeated use of an call of \textsc{improve} for $x$ and $y$ followed by an operation (insert, delete, shiftA or shiftB). We will prove that the properties $(1)$ to $(4)$ hold after a call of \textsc{improve} followed by an operation. \\{\bf improve:} Let $x'$ be the resulting fractional solution of Theorem \ref{thm-packing}, let $y'$ be the resulting integral solution of Theorem \ref{thm-packing} and let $B'_t$ be the corresponding packing. Properties $(1)$ to $(4)$ are fulfilled for $x$, $y$ and $B_t$ by induction hypothesis. Hence all conditions are fulfilled to use Theorem \ref{thm-packing}. By Theorem \ref{thm-packing} the properties $(1)$ to $(4)$ are still fulfilled for $x'$, $y'$ and $B'_t$ and moreover we get $\nor{x'}_1 \leq (1+ \Delta) \operatorname{\text{\textsc{opt}}}(I(t),s)- \alpha$ and $\nor{y'}_1 = \max_i B'_t (i) \leq (1+ 2 \Delta) \operatorname{\text{\textsc{opt}}}(I(t),s) + m - \alpha$ for chosen parameter $\alpha$. Let $x''$ and $y''$ be the fractional and integral solution after an operation is applied to $x'$ and $y'$. We have to prove that the properties $(1)$ to $(4)$ are also fulfilled for $x''$ and $y''$. \\{\bf operations:} First we take a look at how the operations modify $\nor{x'}_1$ and $\nor{y'}_1 =\max_i B'_t (i)$. By construction of the insertion operation, $\nor{x'}_1$ and $\nor{y'}$ are increased at most by $2$. By construction of the delete operation, $\nor{x'}_1$ and $\nor{y'}_1$ are increased by $1$. By construction of the shiftA and shiftB operation, $\nor{x'}_1$ and $\nor{y'}_1$ are increased by $1$. An \textsc{improve}(2) call followed by an insertion operation therefore yields $\nor{y''} = \nor{y'}_1 +2 = (1+ 2\Delta)\operatorname{\text{\textsc{opt}}}(I(t),s) +m -2 +2 = (1+ 2\Delta)\operatorname{\text{\textsc{opt}}}(I(t+1),s) +m$ since $\operatorname{\text{\textsc{opt}}}(I(t),s) \leq \operatorname{\text{\textsc{opt}}}(I(t+1),s)$. An \textsc{improve}(4) call followed by a delete operation yields $\nor{y''} = \nor{y'}_1 + 1 = (1+ 2\Delta)\operatorname{\text{\textsc{opt}}}(I(t),s) + m -3 \leq (1+ 2\Delta)\operatorname{\text{\textsc{opt}}}(I(t+1),s) + (1+2\Delta) +m - 3 \leq (1+ 2\Delta)\operatorname{\text{\textsc{opt}}}(I(t+1),s)$ since $\operatorname{\text{\textsc{opt}}}(I(t),s) \leq \operatorname{\text{\textsc{opt}}}(I(t+1),s) +1$ (an item is removed) and $\Delta \leq 1$. In the same way we obtain that $\nor{y''}_1 \leq \nor{y'}_1 +1 \leq (1+ 2\Delta)\operatorname{\text{\textsc{opt}}}(I(t+1),s) +m$ for an \textsc{improve}(1)/\textsc{improve}(3) call followed by a shiftA/shiftB operation. This concludes the proof that property $(1)$ is fulfilled for $I(t+1)$. The proof that property $(2)$ holds is analog since $\nor{x'}_1$ increases in the same way as $\nor{y'}_1$ and $\nor{x'}_1 \leq (1+ \Delta) \operatorname{\text{\textsc{opt}}}(I(t),s) - \alpha$. For property $(3)$ note that in the operations a configuration $x_i$ of the fractional solution is increased by $1$ if and only if a configuration $y_i$ is increased by $1$. Therefore the property that for all configurations $x''_i \leq y''_i$ retains from $x'$ and $y'$. By Theorem \ref{thm-packing} the number of non-zero components of $x'$ and $y'$ is bounded by $\Delta \operatorname{\text{\textsc{opt}}}(I(t),s) +m \leq \Delta \operatorname{\text{\textsc{opt}}}(I(t+1),s) +m$ in case of an insert operation. If an item is removed, the number of non-zero components of $x'$ and $y'$ is bounded by $\Delta \operatorname{\text{\textsc{opt}}}(I(t),s) +m \leq \Delta \operatorname{\text{\textsc{opt}}}(I(t+1),s) +m +1 = C+1$. By Theorem \ref{thm:reduce} the algorithm \textsc{ReduceComponents} guarantees that there are at most $C=\Delta \operatorname{\text{\textsc{opt}}}(I(t+1),s) +m$ non-zero components. By construction of the shift-operation, $x''$ and $y''$ might have two additional non-zero components. But since these are being reduced by Algorithm \ref{alg-afptas} (note that we increased the number of components being reduced in step 6 by $2$ to- see \cite{jansen2013binpacking} for details), the \ac{lp} solutions $x''$ and $y''$ have at most $\Delta \operatorname{\text{\textsc{opt}}}(I(t+1),s) +m$ non-zero components which proves property $(4)$. Algorithm \ref{alg-afptas} therefore has an asymptotic approximation ratio of $1+\epsilon$. We still need to examine the migration factor of Algorithm \ref{alg-afptas}. In the case that the offline algorithm is used, the size of the instance is smaller than $8 (\nicefrac{1}{\delta} +1) = \mathcal{O}(\nicefrac{1}{\epsilon})$ or smaller than $(m+2)(\nicefrac{1}{\delta} +2) = \mathcal{O}(\frac{1}{\epsilon^2} \log(\nicefrac{1}{\epsilon}))$. Hence the migration factor in that case is bounded by $\mathcal{O}(\frac{1}{\epsilon^3} \log(\nicefrac{1}{\epsilon}))$. If the instance is bigger the call of \textsc{improve} repacks at most $\mathcal{O}(\nicefrac{m}{\epsilon})$ bins by Theorem \ref{thm-packing}. Since every large arriving item has size $> \nicefrac{\epsilon}{14}$ and $m = \mathcal{O}(\frac{1}{\epsilon} \log (\nicefrac{1}{\epsilon}))$ we obtain a migration factor of $\mathcal{O}(\frac{1}{\epsilon^3} \log (\nicefrac{1}{\epsilon}))$ for the Algorithm \textsc{improve}. Since the migration factor of each operation is also bounded by $\mathcal{O}(\frac{1}{\epsilon^2} \log (\nicefrac{1}{\epsilon}))$, we obtain an overall migration factor of $\mathcal{O}(\frac{1}{\epsilon^3} \log (\nicefrac{1}{\epsilon}))$. The main complexity of Algorithm \ref{alg-afptas} lies in the use of Algorithm \textsc{improve}. As described by Jansen and Klein \cite{jansen2013binpacking} the running time of \textsc{improve} is bounded by $\mathcal{O}(M(\nicefrac{1}{\epsilon} \log(\nicefrac{1}{\epsilon}))\cdot \nicefrac{1}{\epsilon^3} \log(\nicefrac{1}{\epsilon}))$, where $M(n)$ is the time needed to solve a system of $n$ linear equations. By using heap structures to store the items, each operation can be performed in time $\mathcal{O}(\nicefrac{1}{\epsilon} \log(\nicefrac{1}{\epsilon})\cdot \log(\epsilon^2\cdot n(t)))$ at time $t$, where $n(t)$ denotes the number of items in the instance at time $t$. As the number of non-zero components is bounded by $\mathcal{O}(\epsilon\cdot n(t))$, the total running time of the algorithm is bounded by $\mathcal{O}(M(\nicefrac{1}{\epsilon} \log(\nicefrac{1}{\epsilon}))\cdot \nicefrac{1}{\epsilon^3} \log(\nicefrac{1}{\epsilon})+\nicefrac{1}{\epsilon} \log(\nicefrac{1}{\epsilon}) \log(\epsilon^2\cdot n(t))+\epsilon n(t))$. The best known running time for the dynamic bin packing\xspace problem \emph{without} removals was $\mathcal{O}(M(\nicefrac{1}{\epsilon^2})\cdot \nicefrac{1}{\epsilon^4}+\epsilon n(t)+\frac{1}{\epsilon^2}\log(\epsilon^2 n(t)))$ and is due to Jansen and Klein \cite{jansen2013binpacking}. As this is polynomial in $n(t)$ and in $\nicefrac{1}{\epsilon}$ we can conclude that Algorithm \ref{alg-afptas} is an \ac{afptas}. \end{proof} If no deletions are present, we can use a simple FirstFit algorithm (as described by Jansen and Klein \cite{jansen2013binpacking}) to pack the small items into the bins. This does not change the migration factor or the running time of the algorithm and we obtain a robust \ac{afptas} with $\mathcal{O}(\frac{1}{\epsilon^3}\cdot \log(\nicefrac{1}{\epsilon}))$ migration for the case that no items is removed. This improves the best known migration factor of $\mathcal{O}(\frac{1}{\epsilon^4})$ \cite{jansen2013binpacking}. \section{Handling Small Items} \label{sec:small} In this section we present methods for dealing with arbitrary small items in a dynamic online setting. First, we present a robust \ac{afptas} with migration factor of $\mathcal{O}(\nicefrac{1}{\epsilon})$ for the case that only small items arrive and depart. In Section \ref{sec:final} we generalize these techniques to a setting where small items arrive into a packing where large items are already packed and can not be rearranged. Finally we state the \ac{afptas} for the general fully dynamic bin packing\xspace problem. In a robust setting without departing items, small items can easily be treated by packing them greedily via the classical FirstFit algorithm of Johnson et al. \cite{johnson1974packing} (see Epstein and Levin \cite{epstein2006robust} or Jansen and Klein \cite{jansen2013binpacking}). However, in a setting where items may also depart, small items need to be treated much more carefully. We show that the FirstFit algorithm does not work in this dynamic setting. \begin{lemma} \label{lem:firstfitdoesnotwork} Using the FirstFit algorithm to pack small items may lead to an arbitrarily bad approximation. \end{lemma} \begin{proof} Suppose, that there is an algorithm $\mathcal{A}$ with migration factor $c$ which uses FirstFit on items with size $< \nicefrac{\epsilon}{14}$. We will now construct an instance where $\mathcal{A}$ yields an arbitrary bad approximation ratio. Let $b=\nicefrac{\epsilon}{14}-\delta$ and $a=\nicefrac{\epsilon}{14c}-(\nicefrac{(\delta+c\delta)}{c})$ for a small $\delta$ such that $\nicefrac{(1-b)}{a}$ is integral. Note that $ac<b$ by definition. Furthermore, let $M\in \mathbb{N}$ be an arbitrary integer and consider the instance \begin{align*} I_{M}=[\underbrace{A,A,\ldots,A}_M,\underbrace{B,B,\ldots,B}_M] \end{align*} with \begin{align*} &A=(b,\text{Insert}),\underbrace{(a,\text{Insert}),(a,\text{Insert}),\ldots,(a,\text{Insert})}_{\nicefrac{(1-b)}{a}}\\ &B=\underbrace{(a,\text{Delete}),(a,\text{Delete}),\ldots,(a,\text{Delete})}_{\nicefrac{(1-b)}{a}}. \end{align*} After the insertion of all items, there are $M$ bins containing an item of size $b$ and $\nicefrac{1-b}{a}$ items of size $a$ (see Figure \ref{fig:counter}). As $ac<b$, the deletion of the items of size $a$ can not move the items of size $b$. The remaining $M$ bins thus only contain a single item of size $b$ (see Figure \ref{fig:counter1}), while $\lceil M\cdot b\rceil$ bins would be sufficient to pack all of the remaining items. The approximation ratio is thus at least $\nicefrac{M}{M\cdot b}=\nicefrac{1}{b}\approx \frac{1}{\epsilon}$ and thus grows as $\epsilon$ shrinks. In order to avoid this problem, we design an algorithm which groups items of similar size together. Using such a mechanism would therefore put the second item of size $b$ into the first bin by shifting out an appropriate number of items of size $a$ and so on. Our algorithms achieves this grouping of small items by enumerating the bins and maintaining the property, that larger small items are always left of smaller small items. \begin{figure} \caption{A single bin after the insertion} \caption{A single bin after the deletion} \caption{Construction in the proof of Lemma \ref{lem:firstfitdoesnotwork} \label{fig:counter} \label{fig:counter1} \end{figure} \end{proof} \subsection{Only Small Items} \label{sec:smallitems} We consider a setting where only small items exist, i.\,e., items with a size less than $ \nicefrac{\epsilon}{14}$. First, we divide the set of small items into different size intervals $S_j$ where $S_j=\left[\frac{\epsilon}{2^{j+1}},\frac{\epsilon}{2^j}\right)$ for $j \geq 1$. Let $b_1,\ldots, b_m$ be the used bins of our packing. We say a size category $S_j$ is bigger than a size category $S_k$ if $j<k$, i.\,e., the item sizes contained in $S_j$ are larger (note that a size category $S_j$ with large index $j$ is called small). We say a bin $b_i$ is filled completely if it has less than $\frac{\epsilon}{2^j}$ remaining space, where $S_j$ is the biggest size category appearing in $b_i$. Furthermore we label bins $b_i$ as \emph{normal} or as \emph{buffer bins} and partition all bins $b_1,\ldots, b_m$ into \emph{queues} $Q_1, \ldots, Q_{d}$ for $|Q| \leq m$. A queue is a subsequence of bins $b_i, b_{i+1} \ldots, b_{i+c}$ where bins $b_i, \ldots, b_{i+c-1}$ are normal bins and bin $b_{i+c}$ is a buffer bin. We denote the $i$-th queue by~$Q_i$ and the number of bins in $Q_i$ by $|Q_i|$. The buffer bin of queue $Q_i$ is denoted by $bb_i$. We will maintain a special form for the packing of small items such that the following properties are always fulfilled. For the sake of simplicity, we assume that $\nicefrac{1}{\epsilon}$ is integral. \begin{compactenum} \item[(1)] For every item $i\in b_d$ with size $s(i)\in S_j $ for some $j,d \in \mathbb{N}$, there is no item $i' \in b_{d'}$ with size $s(i') \in S_{j'}$ such that $d'>d$ and $j' > j$. This means: Items are ordered from left to right by their size intervals. \item[(2)] Every normal bin is filled completely. \item[(3)] The length of each queue is at least $\nicefrac{1}{\epsilon}$ and at most $\nicefrac{2}{\epsilon}$ except for the last queue $Q_d$. \end{compactenum} Note that property (1) implies that all items in the same size interval $S_j$ are packed into bins $b_x, b_{x+1}, \ldots , b_{x+c}$ for constants $x$ and $c$. Items in the next smaller size category $S_{j+1}$ are then packed into bins $b_{x+c}, b_{x+c+1}, \ldots$ and so on. We denote by $b_{S(\ell)}$ the last bin in which an item of size interval $S_\ell$ appears. We denote by $S_{>\ell}$ the set of smaller size categories $S_{\ell'}$ with $\ell' > \ell$. Note that items in size category $S_{>\ell}$ are smaller than items in size category $S_\ell$. \tikzset{ brace/.style={ decoration={brace, mirror}, decorate }, position label/.style={ below = 3pt, text height = 1.5ex, text depth = 1ex } } \begin{figure} \caption{Distribution of bins with small items into queues} \end{figure} The following lemma guarantees that a packing that fulfills properties $(1)$ to $(3)$ is close to the optimum solution. \begin{lemma} \label{lem:small_approx} If properties $(1)$ to $(3)$ hold, then at most $(1+\mathcal{O}(\epsilon))\operatorname{\text{\textsc{opt}}}(I,s)+2$ bins are used in the packing for every $\epsilon \leq \nicefrac{1}{3}$. \end{lemma} \begin{proof} Let $C$ be the number of used bins in our packing. By property (2) we know that all normal bins have less than $\nicefrac{\epsilon}{14}$ free space. Property (3) implies that there are at most $\epsilon\cdot C +1$ buffer bins and hence possibly empty. The number of normal bins is thus at least $(1-\epsilon)\cdot C-1$. Therefore we can bound the total size of all items by $\geq (1-\nicefrac{\epsilon}{14})\cdot ((1-\epsilon)\cdot C-1)$. As $\operatorname{\text{\textsc{opt}}}(I,s)\geq SIZE(I,s) \geq (1-\nicefrac{\epsilon}{14})\cdot ((1-\epsilon)\cdot C-1)$ and $\frac{1}{(1-\nicefrac{\epsilon}{14})(1-\epsilon)} \leq 1+ 2\epsilon$ for $\epsilon \leq \nicefrac{1}{3}$ we get $C \leq (1+2\epsilon) \operatorname{\text{\textsc{opt}}}(I,s) +2.$ \end{proof} We will now describe the operations that are applied whenever a small item has to be inserted or removed from the packing. The operations are designed such that properties $(1)$ to $(3)$ are never violated and hence a good approximation ratio can be guaranteed by Lemma \ref{lem:small_approx} at every step of the algorithm. The operations are applied recursively such that some items from each size interval are shifted from left to right (insert) or right to left (delete). The recursion halts if the first buffer bin is reached. Therefore, the free space in the buffer bins will change over time. Since the recursion always halts at the buffer bin, the algorithm is applied on a single queue $Q_k$. The following Insert/Delete operation is defined for a whole set $J = \{i_1, \ldots , i_n \}$ of items. If an item $i$ of size interval $S_{\ell}$ has to be inserted or deleted, the algorithm is called with Insert$(\{ i \},b_{S(\ell)}, Q_k)$ respectively Delete$(\{i\},b_x, Q_k)$, where $b_x$ is the bin containing item $i$ and $Q_k$ is the queue containing bin $b_{S(\ell)}$ or $b_x$. Recall that $S_{j}=\left[\frac{\epsilon}{2^{j+1}},\frac{\epsilon}{2^{j}}\right)$ is a fixed interval for every $j\geq 1$ and $S_{\leq j}=\bigcup_{i=1}^{j}S_{i}$ and $S_{>j}=\bigcup_{i>j}S_{i}$. \begin{algo}[Insert or Delete for only small items] \label{alg-insertsmall} \ \begin{compactitem} \item {\bf Insert$(J,b_x,Q_k)$:} \begin{compactitem} \item Insert the set of small items $J = \{i_1, \ldots , i_n \}$ with size $s(i_j) \in S_{\leq \ell}$ into bin $b_x$. (By Lemma \ref{onlysmallitems} the total size of $J$ is bounded by $\mathcal{O}(\nicefrac{1}{\epsilon})$ times the size of the item which triggered the first Insert operation.) \item Remove just as many items $J' = \{ i'_1, \ldots , i'_m \}$ of the smaller size interval $S_{>\ell}$ appearing in bin $b_x$ (starting by the smallest) such that the items $i_1, \ldots, i_n$ fit into the bin $b_x$. If there are not enough items of smaller size categories to insert all items from $I$, insert the remaining items from $I$ into bin $b_{x+1}$. \item Let $J'_{\ell'}\subseteq J'$ be the items in the respective size interval $S_{\ell'}$ with $\ell'>\ell$. Put the items $J'_{\ell'}$ recursively into bin $b_{S(\ell')}$ (i.\,e., call Insert$(J'_{\ell'},b_{S(\ell')},Q_k)$ for each $\ell'>\ell$). If the buffer bin $bb_k$ is left of $b_{S(\ell')}$ call Insert$(J'_{\ell'},bb_k,Q_k)$ instead. \end{compactitem} \item {\bf Delete$(J,b_x,Q_k)$:} \begin{compactitem} \item Remove the set of items $J= \{ i_1, \ldots , i_n \}$ with size $s(i_j) \in S_{\leq \ell}$ from bin $b_x$ (By Lemma \ref{onlysmallitems} the total size of $J$ is bounded by $\mathcal{O}(\nicefrac{1}{\epsilon})$ times the size of the item which triggered the first Delete operation.) \item Insert as many small items $J'=\{i'_1, \ldots , i'_m\}$ from $b_{S(\ell')}$, where $S_{\ell'}$ is the smallest size interval appearing in $b_x$ such that $b_x$ is filled completely. If there are not enough items from the size category $S_{\ell'}$, choose items from size category $S_{\geq \ell'+1}$ in bin $b_{x+1}$. \item Let $J'_{\ell'}\subseteq J'$ be the items in the respective size interval $S_{\ell'}$ with $\ell'>\ell$. Remove items $J'_{\ell'}$ from bin $b_{S(\ell')}$ recursively (i.\,e., call Delete$(J'_{\ell'},b_{S(\ell')},Q_k)$ for each $\ell'>\ell$). If the buffer bin $bb_k$ is left of $b_{S(\ell')}$, call Delete$(J'_{\ell'},bb_k,Q_k)$ instead. \end{compactitem} \end{compactitem} \end{algo} Using the above operations maintains the property of normal bins to be filled completely. However, the size of items in buffer bins changes. In the following we describe how to handle buffer bins that are being emptied or filled completely. \begin{algo}[Handle filled or emptied buffer bins] \label{alg-bb} \ \begin{compactitem} \item {\bf Case 1: The buffer bin of $Q_i$ is filled completely by an insert operation.} \begin{compactitem} \item Label the filled bin as a normal bin and add a new empty buffer bin to the end of $Q_i$. \item If $|Q_i|>\nicefrac{2}{\epsilon}$, split $Q_i$ into two new queues $Q'_i,Q''_i$ with $|Q''_i|=|Q'_i|+1$. The buffer bin of $Q''_i$ is the newly added buffer bin. Add an empty bin labeled as the buffer bin to $Q'_i$ such that $|Q'_i|=|Q''_i|$. \end{compactitem} \item {\bf Case 2: The buffer bin of $Q_i$ is being emptied due to a delete operation.} \begin{compactitem} \item Remove the now empty bin. \item If $|Q_{i}| \geq |Q_{i+1}|$ and $|Q_{i}|> \nicefrac{1}{\epsilon}$, choose the last bin of $Q_{i}$ and label it as new buffer bin of $Q_i$. \item If $|Q_{i+1}| > |Q_i|$ and $|Q_{i+1}|> \nicefrac{1}{\epsilon}$, choose the first bin of $Q_{i+1}$ and move the bin to $Q_i$ and label it as buffer bin. \item If $|Q_{i+1}| = |Q_i| = \nicefrac{1}{\epsilon}$, merge the two queues $Q_i$ and $Q_{i+1}$. As $Q_{i+1}$ already contains a buffer bin, there is no need to label another bin as buffer bin for the merged queue. \end{compactitem} \end{compactitem} \end{algo} Creating and deleting buffer bins this way guarantees that property (3) is never violated since queues never exceed the length of $\nicefrac{2}{\epsilon}$ and never fall below $\nicefrac{1}{\epsilon}$. \begin{figure} \caption{Insert($\{i \} \caption{Delete($\{i \} \caption{Example calls of Insert and Delete.} \label{fig:insert} \label{fig:delete} \end{figure} Figure \ref{fig:insert} shows an example call of Insert($\{ i \}$,$b_x$,$Q_k$). Item $i$ with $s(i)\in S_1$ is put into the corresponding bin $b_x$ into the size interval $S_1$. As $b_x$ now contains too many items, some items from the smallest size interval $S_2$ (marked by the dashed lines) are put into the last bin $b_{x+2}$ containing items from $S_2$. Those items in turn push items from the smallest size interval $S_3$ into the last bin containing items of this size and so on. This process terminates if either no items need to be shifted to the next bin or the buffer bin $bb_k$ is reached. It remains to prove that the migration of the operations is bounded and that the properties are invariant under those operations. \begin{lemma} \label{onlysmallitems} \ \begin{enumerate} \item[(i)] Let $I$ be an instance that fulfills properties $(1)$ to $(3)$. Applying operations insert/delete on $I$ yields an instance $I'$ that also fulfills properties $(1)$ to $(3)$. \item[(ii)] The migration factor of a single insert/delete operation is bounded by $\mathcal{O}(\nicefrac{1}{\epsilon})$ for all $\epsilon\leq \nicefrac{2}{7}$. \end{enumerate} \end{lemma} \begin{proof} Proof for (i): Suppose the insert/delete operation is applied to a packing which fulfills properties $(1)$ to $(3)$. By construction of the insert operation, items from a size category $S_\ell$ in bin $b_x$ are shifted to a bin $b_y$. The bin $b_y$ is either $b_{S(\ell)}$ or the a buffer bin left of $b_{S(\ell)}$. By definition $b_y$ contains items of size category $S_{\ell}$. Therefore property $(1)$ is not violated. Symmetrically, by construction of the delete operation, items from a size category $S_\ell$ in bin $b_{S(\ell)}$ are shifted to a bin $b_x$. By definition $b_x$ contains items of size category $S_{\ell}$ and property $(1)$ is therefore not violated. For property $(2)$: Let $b_x$ be a normal bin, where items $i_1, \ldots , i_n$ of size category $S_{\leq \ell}$ are inserted. We have to prove that the free space in $b_x$ remains smaller than $\nicefrac{\epsilon}{2^j}$, where $S_j$ is the smallest size category appearing in bin $b_x$. By construction of the insert operation, just as many items of size categories $S_{>\ell}$ are shifted out of bin $b_x$ such that $i_1,\ldots,i_n$ fit into $b_x$. Hence the remaining free space is less than $\frac{\epsilon}{2^{\ell}}$ and bin $b_x$ is filled completely. The same argumentation holds for the delete operation. Property $(3)$ is always fulfilled by definition of Algorithm \ref{alg-bb}. Proof for (ii): According to the insert operation, in every recursion step of the algorithm, it tries to insert a set of items into a bin $b_{x'}$, starting with an Insert$(\{ i \},b_{x'},Q_k)$ operation. Let $\operatorname{\text{\textsc{insert}}}(S_{\leq \ell+y}, b_x)$ ($x\geq x'$) be the size of all items in size categories $S_{j}$ with $j \leq \ell+y$ that the algorithm tries to insert into $b_x$ as a result of an Insert$(\{ i \},b_{x'},Q_k)$ call. Let $\operatorname{\text{\textsc{pack}}}(b_x)$ be the size of items that are actually packed into bin $b_x$. We have to distinguish between two cases. In the case that $\operatorname{\text{\textsc{insert}}}(S_{\leq \ell+y}, b_x) = \operatorname{\text{\textsc{pack}}}(b_x)$ there are enough items of smaller size categories $S_{>\ell+y}$ that can be shifted out, such that items $I$ fit into bin $b_x$. In the case that $\operatorname{\text{\textsc{insert}}}(S_{\leq \ell+y}, b_x) > \operatorname{\text{\textsc{pack}}}(b_x)$ there are not enough items of smaller size category that can be shifted out and the remaining size of $\operatorname{\text{\textsc{insert}}}(S_{\leq \ell+y}, b_x) - \operatorname{\text{\textsc{pack}}}(b_x)$ has to be shifted to the following bin $b_{x+1}$. Under the assumption that each $\operatorname{\text{\textsc{insert}}}(S_{\leq \ell}, b_x)\leq 1$ for all $x$ and $\ell$ (which is shown in the following) all items fit into $b_{x+1}$. Note that no items from bins left of $b_{x}$ can be shifted into $b_{x+1}$ since $b_x = b_{S(\ell+y)}$ is the last bin where items of size category $S_{\leq \ell+y}$ appear. Hence all items shifted out from bins left of $b_{x}$ are of size categories $S_{\leq \ell +y}$ (property $(1)$) and they are inserted into bins left of $b_{x+1}$. We prove by induction that for each $\operatorname{\text{\textsc{insert}}}(S_{\leq \ell+y}, b_x)$ the total size of moved items is at most \begin{align*} \operatorname{\text{\textsc{insert}}}(S_{\leq \ell+y}, b_x) \leq s(i) + 3 \sum_{j=1}^{y} \frac{\epsilon}{2^{\ell+j}} \end{align*} The claim holds obviously for $\operatorname{\text{\textsc{insert}}}(S_{\leq \ell}, b_{x'})$ since $b_{x'}=b_{S(\ell)}$ is the bin where only item $i$ is inserted. \begin{figure} \caption{Case 1} \caption{Case 2a} \caption{Case 2b} \caption{All cases to consider in Lemma \ref{onlysmallitems} \end{figure} Case 1: $\operatorname{\text{\textsc{insert}}}(S_{\leq \ell + y}, b_x) > \operatorname{\text{\textsc{pack}}}(b_x)$\\ In this case, the size of all items that have to be inserted into $b_{x+1}$ can be bounded by the size of items that did not fit into bin $b_x$ plus the size of items that were removed from bin $b_x$. We can bound $\operatorname{\text{\textsc{insert}}}(S_{\leq \ell+\bar{y}}, b_{x+1})$ where $\bar{y} > y$ is the largest index $S_{\ell+\bar{y}}$ appearing in bin $b_{x}$ by \begin{align*} \operatorname{\text{\textsc{insert}}}(S_{\leq \ell + y}, b_x) + \frac{\epsilon}{2^{\ell+y}} \leq s(i) + 3 \sum_{j=1}^{y} \frac{\epsilon}{2^{\ell+j}} + 2 \frac{\epsilon}{2^{\ell+y +1}} < s(i) + 3 \sum_{j=1}^{y+1} \frac{\epsilon}{2^{\ell+j}} \end{align*} Case 2: $\operatorname{\text{\textsc{insert}}}(S_{\leq \ell+y}, b_x) = \operatorname{\text{\textsc{pack}}}(b_x)$\\ Suppose that the algorithm tries to insert a set of items $I$ of size categories $S_{\leq \ell+\bar{y}}$ into the bin $b_{x+1} = b_{S(\ell+\bar{y})}$. The items $I$ can only be shifted from previous bins where items of size category $S_{\leq \ell+\bar{y}}$ appear. There are only two possibilities remaining. Either all items $I$ are shifted from a single bin $b_{\hat{x}}$ ($\hat{x} \leq x$) or from two consecutive bins $b_{\hat{x}},b_{\hat{x}+1}$ with $\operatorname{\text{\textsc{insert}}}(S_{\leq \ell+y}, b_{\hat{x}}) > \operatorname{\text{\textsc{pack}}}(b_{\hat{x}})$. Note that $b_{x+1}$ can only receive items from more than one bin if there are two bins $b_{\hat{x}},b_{\hat{x}+1}$ with $\operatorname{\text{\textsc{insert}}}(S_{\leq \ell+y}, b_{\hat{x}}) > \operatorname{\text{\textsc{pack}}}(b_{\hat{x}})$ such that $b_{x+1}=b_{S(\ell+\bar{y})}$ and all items shifted out of $b_{\hat{x}},b_{\hat{x}+1}$ and into $b_{x+1}$ are of size category $S_{\ell+\bar{y}}$. Hence bins left of $b_{\hat{x}}$ or right of $b_{\hat{x}+1}$ can not shift items into $b_{x+1}$. Case 2a: All items $I$ are shifted from a single bin $b_{\hat{x}}$ with $\hat{x} \leq x$ (note that $\hat{x}<x$ is possible since $\operatorname{\text{\textsc{pack}}}(b_x) = \operatorname{\text{\textsc{insert}}}(S_{\leq \ell+y}, b_x)$ can be zero). The total size of items that are shifted out of $b_{\hat{x}}$ can be bounded by $\operatorname{\text{\textsc{insert}}}(S_{\leq \ell+y},b_{\hat{x}})+\frac{\epsilon}{2^{\ell+y}}$. By induction hypothesis $\operatorname{\text{\textsc{insert}}}(S_{\leq \ell+y}, b_{\hat{x}})$ is bounded by $s(i)+3\sum_{j=1}^{y}\frac{\epsilon}{2^{\ell+j}}$. Since all items that are inserted into $b_{x+1}$ come from $b_{\hat{x}}$, the value $\operatorname{\text{\textsc{insert}}}(S_{\leq \ell+\bar{y}},b_{x+1})$ ($\bar{y}>y$) can be bounded by $\operatorname{\text{\textsc{insert}}}(S_{\leq \ell+y}, b_{\hat{x}}) + \frac{\epsilon}{2^{\ell+y}} \leq s(i)+3\sum_{j=1}^{y}\frac{\epsilon}{2^{\ell+j}}+\frac{\epsilon}{2^{\ell+y}}< s(i) + 3 \sum_{j=1}^{\bar{y}} \frac{\epsilon}{2^{\ell+j}}$ where $S_{\ell+\bar{y}}$ is the smallest size category inserted into $b_{x+1}$. Note that the items $I$ belong to only one size category $S_{\ell+\bar{y}}$ if $\hat{x}<x$ since all items that are in size intervals $S_{<\ell+\bar{y}}$ are inserted into bin $b_{\hat{x}+1}$. Case 2b: Items $I$ are shifted from bins $b_{\hat{x}}$ and $b_{\hat{x}+1}$ ($\hat{x}+1 \leq x$) with $\operatorname{\text{\textsc{insert}}}(S_{\leq \ell+y}, b_{\hat{x}}) > \operatorname{\text{\textsc{pack}}}(b_{\hat{x}})$. In this case, all items $I$ belong to the size category $S_{\ell+\bar{y}}$ since $b_{\hat{x}}$ is left of $b_x$. Hence all items which are inserted into $b_{\hat{x}+1}$ are from $I$, i.\,e., $\operatorname{\text{\textsc{insert}}}(S_{\leq \ell+y},b_{\hat{x}}) = \operatorname{\text{\textsc{pack}}}(b_{\hat{x}}) + \operatorname{\text{\textsc{pack}}}(b_{\hat{x}+1})$ as all items in $I$ belong to the same size category $S_{\ell+\bar{y}}$. We can bound $\operatorname{\text{\textsc{insert}}}(S_{\ell+\bar{y}},b_{x+1})$ by the size of items that are shifted out of $b_{\hat{x}}$ plus the size of items that are shifted out of $b_{\hat{x}+1}$. We obtain \begin{align*} &\operatorname{\text{\textsc{insert}}}(S_{\leq \ell+\bar{y}},b_{x+1}) \leq \operatorname{\text{\textsc{pack}}}(b_{\hat{x}}) + \frac{\epsilon}{2^{\ell+ y}} + \operatorname{\text{\textsc{pack}}}(b_{\hat{x}+1}) +\frac{\epsilon}{2^{\ell+ \bar{y}}} \\ &= \operatorname{\text{\textsc{insert}}}(S_{\leq \ell+y},b_{\hat{x}}))+\frac{\epsilon}{2^{\ell+ y}}+\frac{\epsilon}{2^{\ell+ \bar{y}}}\\ &\leq s(i) + 3 \sum_{j=1}^{y} \frac{\epsilon}{2^{\ell+j}}+\frac{\epsilon}{2^{\ell+ y}}+\frac{\epsilon}{2^{\ell+ \bar{y}}}\\ &\leq s(i) + 3\sum_{j=1}^{y} \frac{\epsilon}{2^{\ell+j}}+3\frac{\epsilon}{2^{\ell+ \bar{y}}} \leq s(i) + 3 \sum_{j=1}^{\bar{y}} \frac{\epsilon}{2^{\ell+j}} \end{align*} This yields that $\operatorname{\text{\textsc{insert}}}(S_{\leq \ell+y},b_x)$ is bounded by $s(i) + 3 \sum_{j=1}^{\bar{y}} \frac{\epsilon}{2^{\ell+j}}$ for all bins $b_x$ in $Q_k$. Now, we can bound the migration factor for every bin $b_x$ of $Q_k$ for any $y\in \mathbb{N}$ by $\operatorname{\text{\textsc{pack}}}(b_x) + \frac{\epsilon}{2^{\ell+y}} \leq \operatorname{\text{\textsc{insert}}}(S_{\leq \ell+y},b_x) + \frac{\epsilon}{2^{\ell+y}}$. Using the above claim, we get: \begin{align*} &\operatorname{\text{\textsc{insert}}}(S_{\leq \ell+y},b_x) + \frac{\epsilon}{2^{\ell+y}} \leq s(i) + 3 \sum_{j=1}^{y} \frac{\epsilon}{2^{\ell+j}} + 2 \frac{\epsilon}{2^{\ell+y+1}}\\ &< s(i) + 3 \sum_{j=1}^{\infty} \frac{\epsilon}{2^{\ell+j}} = s(i) + 3 \frac{\epsilon}{2^\ell}\sum_{j=1}^{\infty} \frac{1}{2^{j}} = s(i) + 3 \cdot \frac{\epsilon}{2^\ell} \leq 7 s(i) \end{align*} Since there are at most $\nicefrac{2}{\epsilon}$ bins per queue, we can bound the total migration of Insert$(\{ i \},b_{S(\ell)}, Q_k)$ by $7 \cdot \nicefrac{2}{\epsilon} \in \mathcal{O}(\nicefrac{1}{\epsilon})$. Note also that $s(i) \leq \nicefrac{\epsilon}{14}$ for every $i$ implies that $\operatorname{\text{\textsc{insert}}}(S_{\leq \ell}, b_x)$ is bounded by $\nicefrac{\epsilon}{2}$ for all $x$ and $\ell$ . Suppose that items $i_1, \ldots , i_n$ of size interval $S_{\ell+y}$ have to be removed from bin $b_x$. In order to fill the emerging free space, items from the same size category are moved out of $b_{S(\ell)}$ into the free space. As the bin $b_x$ may already have additional free space, we need to move at most a size of $\operatorname{\text{\textsc{size}}}(i_1,\ldots,i_n)+\nicefrac{\epsilon}{2^{\ell+y}}$. Using a symmetric proof as above yields a migration factor of $\mathcal{O}(\frac{1}{\epsilon})$. \end{proof} \subsection{Handling small items in the general setting} \label{sec:general} In the scenario that there are mixed item types (small and large items), we need to be more careful in the creation and the deletion of buffer bins. To maintain the approximation guarantee, we have to make sure that as long as there are bins containing only small items, the remaining free space of all bins can be bounded. Packing small items into empty bins and leaving bins with large items untouched does not lead to a good approximation guarantee as the free space of the bins containing only large items is not used. In this section we consider the case where a sequence of small items is inserted or deleted. We assume that the packing of large items does not change. Therefore the number of bins containing large items equals a fixed constant $\Lambda(B)$. In the previous section, the bins $b_1,\ldots, b_{m(B)}$ all had a capacity of $1$. In order to handle a mixed setting, we will treat a bin $b_i$ containing large items as having capacity of $c(b_i) = 1-S$, where $S$ is the total size of the large items in $b_i$. The bins containing small items are enumerated by $b_1, \ldots ,b_{L(B)}, b_{L(B)+1}, \ldots , b_{m(B)}$ for some $L(B)\leq m(B)$ where $c(b_1),\ldots,c(b_{L(B)})< 1$ and $c(b_{L(B)+1})=\ldots = c(b_{m(B)})=1$. Additionally we have a separate set of bins, called the \emph{heap bins}, which contain only large items. This set of bins is enumerated by $h_1, \ldots h_{h(B)}$. Note that $L(B)+h(B)=\Lambda(B)$. In general we may consider only bins $b_i$ and $h_i$ with capacity $c(b_i) \geq \nicefrac{\epsilon}{14}$ and $c(h_i) \geq \nicefrac{\epsilon}{14}$ since bins with less capacity are already packed well enough for our approximation guarantee as shown by Lemma \ref{onlysmallitems}. Therefore, full bins are not considered in the following. \tikzset{ brace/.style={ decoration={brace, mirror}, decorate }, position label/.style={ below = 3pt, text height = 1.5ex, text depth = 1ex } } \begin{figure} \caption{Distribution of bins} \label{figmixedsetting} \end{figure} As before, we partition the bins $b_1,\ldots,b_{L(B)}, b_{L(B)+1}, \ldots ,b_{m(B)}$ into several different queues $Q_1, \ldots , Q_{\ell(B)} , Q_{\ell(B) +1}, \ldots ,Q_{d(B)}$ such that $b_1, \ldots b_{L(B)} = Q_1, \ldots Q_{\ell(B)}$ and $b_{L(B)+1}, \ldots b_{m(B)} = Q_{\ell(B)+1}, \ldots , Q_{d(B)}$. If the corresponding packing $B$ is clear from the context, we will simply write $h,L,\ell,d,m,\Lambda$ instead of $h(B),L(B),\ell(B),d(B),m(B),\Lambda(B)$. We denote the last bin of queue $Q_i$ by $bb_i$ which is a buffer bin. The buffer bin $bb_{\ell}$ is special and will be treated differently in the insert and delete operation. Note that the bins containing large items $b_1,\ldots,b_{L(B)}$ are enumerated first. This guarantees that the free space in the bins containing large items is used before new empty bins are opened to pack the small items. However, enumerating bins containing large items first, leads to a problem if according to Algorithm \ref{alg-bb} when a buffer bin is being filled and a new bin has to be inserted right to the filled bin. Instead of inserting a new empty bin, we insert a heap bin at this position. Since the heap bin contains only large items, we do not violate the order of the small items (see Figure \ref{figmixedsetting}). As the inserted heap bin has remaining free space (is not filled completely) for small items, it can be used as a buffer bin. In order to get an idea of how many heap bins we have to reserve for Algorithm \ref{alg-bb} where new bins are inserted or deleted, we define a potential function. As a buffer bin is being filled or emptied completely the Algorithm \ref{alg-bb} is executed and inserts or deletes buffer bins. The potential function $\Phi(B)$ thus bounds the number of buffer bins in $Q_1, \ldots , Q_{\ell(B)}$ that are about to get filled or emptied. The potential $\Phi(B)$ is defined by \begin{align*} \Phi(B) = \sum_{i=1}^{\ell-1} r_i + \lceil \epsilon \Lambda \rceil - \ell \end{align*} where the \emph{fill ratio} $r_i$ is defined by $r_i=\frac{s(bb_i)}{c(bb_i)}$ and $s(bb_i)$ is the total size of all small items in $bb_i$ . Note that the potential only depends on the queues $Q_1, \ldots , Q_{\ell(B)}$ and the bins which contain small and large items. The term $r_i$ intends to measure the number of buffer bins that become full. According to Case 1 of the previous section a new buffer bin is opened when $bb_i$ is filled i.\,e., $r_i \approx 1$. Hence the sum $\sum_{i=1}^{\ell-1} r_i$ bounds the number of buffer bins getting filled. The term $\epsilon \Lambda$ in the potential measures the number of bins that need to be inserted due to the length of a queue exceeding $\nicefrac{2}{\epsilon}$, as we need to split the queue $Q_i$ into two queues of length $\nicefrac{1}{\epsilon}$ according to Case 1. Each of those queues needs a buffer bin, hence we need to insert a new buffer bin out of the heap bins. Therefore the potential $\Phi(B)$ bounds the number of bins which will be inserted as new buffer bins according to Case 1. Just like in the previous section we propose the following properties to bound the approximation ratio and the migration factor. The first three properties remain the same as in Section \ref{sec:smallitems} and the last property gives the desired connection between the potential function and the heap bins. \begin{enumerate} \item[(1)] For every item $i\in b_d$ with size $s(i)\in S_j $ for some $j,d \in \mathbb{N}$, there is no item $i' \in b_{d'}$ with size $s(i') \in s_{j'}$ such that $d'>d$ and $j' > j$. This means: Items are ordered from left to right by their size intervals. \item[(2)] Every normal bin of $b_1,\ldots,b_m$ is filled completely \item[(3)] The length of each queue is at least $\nicefrac{1}{\epsilon}$ and at most $\nicefrac{2}{\epsilon}$ except for $Q_{\ell}$ and $Q_{d}$. The length of $Q_{\ell}$ and $Q_{d}$ is only limited by $1\leq |Q_{\ell}|,|Q_d|\leq \nicefrac{1}{\epsilon}$. Furthermore, $|Q_{\ell+1}| = 1$ and $1 \leq |Q_{\ell+2}| \leq \nicefrac{2}{\epsilon}$. \item[(4)] The number of heap bins $H_1, \ldots , H_{h}$ is exactly $h = \lfloor \Phi(B) \rfloor$ \end{enumerate} Since bins containing large items are enumerated first, property $(1)$ implies in this setting that bins with large items are filled before bins that contain no large items. Note also that property (3) implies that $\Phi(B) \geq 0$ for arbitrary packings $B$ since $\epsilon\Lambda \geq \ell-1+\epsilon$ and thus $\lceil \epsilon \Lambda \rceil \geq \ell$. The following lemma proves that a packing which fulfills properties $(1)$ to $(4)$ provides a solution that is close to the optimum. \begin{lemma} \label{lem-smallitems-approximation} Let $M = m + h$ be the number of used bins and $\epsilon \leq \nicefrac{1}{4}$. If properties $(1)$ to $(4)$ hold, then at most $\max \{ \Lambda , (1+\mathcal{O}(\epsilon))\operatorname{\text{\textsc{opt}}}(I,s)+\mathcal{O}(1) \}$ bins are used in the packing. \end{lemma} \begin{proof} Case 1: There is no bin containing only small items, i.\,e., $L=m$. Hence all items are packed into $M=L+h = \Lambda$ bins. Case 2: There are bins containing only small items, i.\,e., $L<m$. Property (3) implies that the number of queues $d$ is bounded by $d\leq \epsilon m+4$. Hence the number of buffer bins is bounded by $\epsilon m+4$ and the number of heap bins $\Phi(B)$ (property (4)) is bounded by $\Phi(B) = \sum_{i=1}^{\ell-1} r_i + \lceil \epsilon \Lambda \rceil - \ell \leq \ell -1 + \epsilon \Lambda +1 - \ell = \epsilon \Lambda$ as $r_i\leq 1$. Since $\Lambda < M$, we can bound $\Phi (B)$ by $\Phi(B) < \epsilon M$. The number of normal bins is thus at least $M - (\epsilon m + 5) - (\epsilon M - 1) \geq M - 2 \epsilon M - 4 = (1-2\epsilon)M- 4$. By property (2) every normal bin has less than $\nicefrac{\epsilon}{14}$ free space and the total size $S$ of all items is thus at least $S \geq (1-\nicefrac{\epsilon}{14})(1-2\epsilon)M-4$. Since $\operatorname{\text{\textsc{opt}}}(I,s)\geq S$, we have $\operatorname{\text{\textsc{opt}}}(I,s)\geq (1-\nicefrac{\epsilon}{14}(1-2\epsilon)M-4$. A simple calculation shows that $\frac{1}{(1-\nicefrac{\epsilon}{14})(1-2\epsilon)}\leq (1+5\epsilon)$ for $\epsilon\leq \nicefrac{1}{4}$. Therefore we can bound the number of used bins by $(1+5\epsilon)\operatorname{\text{\textsc{opt}}}(I,s)+4$. \end{proof} According to property (4) we have to guarantee, that if the rounded potential $\lfloor \Phi(B) \rfloor$ changes, the number of heap bins has to be adjusted accordingly. The potential $\lfloor \Phi(B) \rfloor$ might increases by $1$ due to an insert operation. Therefore the number of heap bins has to be incremented. If the potential $\lfloor \Phi(B) \rfloor$ decreases due to a delete operation, the number of heap bins has to be decremented. In order to maintain property $(4)$ we have to make sure, that the number of heap bins can be adjusted whenever $\lfloor \Phi(B) \rfloor$ changes. Therefore we define the fractional part $\{ \Phi(B) \}=\Phi(B)-\lfloor\Phi(B)\rfloor$ of $\Phi(B)$ and put it in relation to the fill ratio $r_{\ell}$ of $bb_{\ell}$ (the last bin containing large items) through the following equation: \begin{align*} \tag{Heap Equation} | (1 - r_{\ell}) - \{ \Phi(B) \} | \leq \frac{s}{c(bb_\ell)} \end{align*} where $s$ is the biggest size of a small item appearing in $bb_{\ell}$. The Heap Equation ensures that the potential $\Phi(B)$ is correlated to $1-r_{\ell}$. The values may only differ by the small term $\frac{s}{c(bb_{\ell})}$. Note that the Heap Equation can always be fulfilled by shifting items from $bb_{\ell}$ to queue $Q_{\ell+1}$ or vice versa. Assuming the Heap Equation holds and the potential $\lfloor \Phi(B)\rfloor$ increases by $1$, we can guarantee that buffer bin $bb_{\ell}$ is nearly empty. Hence the remaining items can be shifted to $Q_{\ell+1}$ and $bb_\ell$ can be moved to the heap bins. The bin left of $bb_\ell$ becomes the new buffer bin of $Q_\ell$. Vice versa, if $\lfloor \Phi(B)\rfloor$ decreases, we know by the Heap Equation that $bb_{\ell}$ is nearly full, hence we can label $bb_{\ell}$ as a normal bin and open a new buffer bin from the heap at the end of queue $Q_{\ell}$. Our goal is to ensure that the Heap Equation is fulfilled at every step of the algorithm along with properties $(1)$ to $(4)$. Therefore we enhance the delete and insert operations from the previous section. Whenever a small item $i$ is inserted or removed, we will perform the operations described in Algorithm \ref{alg-insertsmall} (which can be applied to bins of different capacities) in the previous section. This will maintain properties $(1)$ to $(3)$. If items are inserted or deleted from queue $Q_{\ell}$ (the last queue containing large and small items) the recursion does not halt at $bb_{\ell}$. Instead the recursion goes further and halts at $bb_{\ell+1}$. So, when items are inserted into bin $bb_\ell$ according to Algorithm \ref{alg-insertsmall} the bin $bb_\ell$ is treated as a normal bin. Items are shifted from $bb_{\ell}$ to queue $Q_{\ell+1}$ until the Heap Equation is fulfilled. This way we can make sure that the Heap Equation maintains fulfilled whenever an item is inserted or removed from $Q_{\ell}$. \begin{algo}[Insert or Delete small items for the mixed setting] \label{algo-small-items-general} \ {\bf Insert$(i,b_x,Q_j)$:} \begin{compactitem} \item Use Algorithm \ref{alg-insertsmall} to insert item $i$ into $Q_j$ with $j < \ell$. \item Let $i_1, \ldots , i_m$ be the items that are inserted at the last step of Algorithm \ref{alg-insertsmall} into $bb_j$. \item For $k = 1, \ldots , m$ do \begin{compactenum} \item Insert item $i_k$ into bin $bb_j$. \item If $bb_j$ is completely filled use Algorithm \ref{alg-bb}. \item If the potential $\lfloor \Phi(B) \rfloor$ increases use Algorithm \ref{alg-potential} (see below) to adjust the number of heap bins (property (4)). \item Decrease the fill ratio $r_\ell$ of $bb_\ell$ by shifting the smallest items in $bb_\ell$ to $Q_{\ell+1}$ until $(1 - r_{\ell}) \leq \{ \Phi(B) \}$ to fulfill the Heap Equation. \end{compactenum} \end{compactitem} {\bf Delete$(i,b_x,Q_j)$:} \begin{compactitem} \item Use Algorithm \ref{alg-insertsmall} to remove item $i$ from bin $b_x$ in queue $Q_j$ with $j < \ell$. \item Let $i_1, \ldots , i_m$ be the items that are removed at the last step of Algorithm \ref{alg-insertsmall} from $bb_j$. \item For $k = 1, \ldots , m$ do \begin{compactenum} \item If $bb_j$ is empty use Algorithm \ref{alg-bb}. \item Remove item $i_k$ from bin $bb_j$. \item If the potential $\lfloor \Phi(B) \rfloor$ decreases use Algorithm \ref{alg-potential}. \item Increase the fill ratio $r_\ell$ of $bb_\ell$ by shifting the smallest items in $bb_\ell$ to $Q_{\ell+1}$ until $(1 - r_{\ell}) \geq \{ \Phi(B) \}$ to fulfill the Heap Equation. \end{compactenum} \end{compactitem} \end{algo} For the correctness of step 4 (the adjustment to $r_{\ell}$) note the following: In case of the insert operation, the potential $\Phi(B)$ increases and we have $\Phi(B) \geq 1-r_\ell$. As items are being shifted from $bb_\ell$ to $Q_{\ell+1}$, the first time that $(1 - r_{\ell}) \leq \{ \Phi(B) \}$ is fulfilled, the Heap Equation is also fulfilled. Since the fill ratio of $bb_\ell$ changes at most by $\frac{s}{c(bb_\ell)}$ as an item (which has size at most $s$) is shifted to $Q_{\ell+1}$ we know that $| (1 - r_{\ell}) - \{ \Phi(B) \} | \leq \frac{s}{c(bb_\ell)}$. Correctness of step 4 in the delete operation follows symmetrically. The potential $\Phi(B)$ changes if items are inserted or deleted into queues $Q_1, \ldots , Q_{\ell-1}$. Due to these insert or delete operations it might happen that the potential $\lfloor \Phi(B) \rfloor$ increases or that a buffer bin is being filled or emptied. The following operation is applied as soon as an item is inserted or deleted into a buffer bin and the potential $\lfloor \Phi(B) \rfloor$ increases or decreases. \begin{algo}[Change in the potential] \label{alg-potential} \ \begin{compactitem} \item {\bf Case 1: The potential $\lfloor \Phi(B) \rfloor$ increases by $1$.} \begin{compactitem} \item According to the Heap Equation the remaining size of small items in $bb_{\ell}$ can be bounded. Shift all small items from $bb_{\ell}$ to $Q_{\ell+1}$. \item If $|Q_{\ell}| >1$ then label the now empty buffer bin $bb_{\ell}$ as a heap bin and the last bin in $Q_{\ell}$ is labeled as a buffer bin. \item If $Q_{\ell}$ only consists of the buffer bin (i.\,e., $|Q_{\ell}| = 1$) shift items from $bb_{\ell-1}$ to $Q_{\ell+1}$ until the heap equation is fulfilled. If $bb_{\ell-1}$ becomes empty remove $bb_{\ell-1}$ and $bb_{\ell}$. The bin left to $bb_{\ell-1}$ becomes the new buffer bin of $Q_{\ell-1}$. The queue $Q_{\ell}$ is deleted and $Q_{\ell-1}$ becomes the new last queue containing large items. \end{compactitem} \item {\bf Case 2: The potential $\lfloor \Phi(B) \rfloor$ decreases by $1$.} \begin{compactitem} \item According to the Heap Equation the remaining free space in $bb_{\ell}$ can be bounded. Shift items from $bb_{\ell+1}$ to $bb_{\ell}$ such that the buffer bin $bb_{\ell}$ is filled completely. \item Add the new buffer bin from the heap to $Q_{\ell}$. \item If $|Q_{\ell}| = \nicefrac{1}{\epsilon}$ label an additional heap bin as a buffer bin to create a new queue $Q_{\ell+1}$ with $|Q_{\ell+1}| = 1$. \end{compactitem} \end{compactitem} \end{algo} Like in the last section we also have to describe how to handle buffer bins that are being emptied or filled completely. We apply the same algorithm when a buffer bin is being emptied or filled but have to distinguish now between buffer bins of $Q_1, \ldots , Q_{\ell}$ and buffer bins of $Q_{\ell+1}, \ldots, Q_{d}$. Since the buffer bins in $Q_{\ell+1},\ldots,Q_{d}$ all have capacity $1$, we will use the same technique as in the last section. If a buffer bin in $Q_{1},\ldots,Q_{\ell}$ is emptied or filled we will also use similar technique. But instead of inserting a new empty bin as a new buffer bin, we take an existing bin out of the heap. And if a buffer bin from $Q_1, \ldots Q_{\ell}$ is being emptied (it still contains large items), it is put into the heap. This way we make sure that there are always sufficiently many bins containing large items which are filled completely. \begin{lemma} \label{lem-algorithm} Let $B$ be an packing which fulfills the properties $(1)$ to $(4)$ and the Heap Equation. Applying Algorithm \ref{alg-potential} or Algorithm \ref{alg-bb} on $B$ during an insert/delete operation yields an packing $B'$ which also fulfills properties $(1)$ to $(4)$. The migration to fulfill the Heap Equation is bounded by $\mathcal{O}(\nicefrac{1}{\epsilon})$. \end{lemma} \begin{proof} {\bf Analysis of Algorithm \ref{alg-potential}}\\ Properties $(1)$ and $(2)$ are never violated by the algorithm because the items are only moved by shift operations. Property $(3)$ is never violated because no queue (except for $Q_{\ell}$) exceeds $\nicefrac{2}{\epsilon}$ or falls below $\nicefrac{1}{\epsilon}$ by construction. Algorithm \ref{alg-potential} is called during an insert or delete operation. The Algorithm is executed as items are shifted into or out of buffer $bb_j$ such that $\lfloor \Phi(B)\rfloor$ changes. In the following we prove property $(4)$ for the packing $B'$ assuming that $\lfloor \Phi(B) \rfloor = h(B)$ holds by induction. Furthermore we give a bound for the migration to fulfill the heap equation: \begin{itemize} \item Case 1: The potential $\lfloor \Phi(B) \rfloor$ increases during an insert operation, i.\,e., it holds $\lfloor \Phi(B') \rfloor = \lfloor \Phi(B) \rfloor +1$. Let item $i^*$ be the first item that is shifted into a bin $bb_j$ such that $\lfloor \Phi(B) + r^* \rfloor = \lfloor \Phi(B') \rfloor$, where $r^*$ is the fill ratio being added to $bb_j$ by item $i^*$. In this situation, the fractional part changes from $\{\Phi(B)\} \approx 1$ to $\{\Phi(B')\} \approx 0$. \begin{itemize} \item In the case that $|Q_{\ell}|> 1$, the buffer bin $bb_{\ell}$ is being emptied and moved to the heap bins. The bin left of $bb_{\ell}$ becomes the new buffer bin $bb'_\ell$ of $Q_{\ell}$. Hence the number of heap bins increases and we have $h(B') = h(B) +1 = \lfloor \Phi(B) \rfloor +1 = \lfloor \Phi(B') \rfloor$, which implies property (4). To give a bound on the total size of items needed to be shifted out of (or into) bin $bb_\ell$ to fulfill the heap equation, we bound the term $|(1 - r'_{\ell}) - \{ \Phi(B') \}|$ by some term $C\leq \mathcal{O}(\nicefrac{s(i)}{\epsilon})$, where $r'_\ell$ is the fill ratio of $bb'_\ell$ and $s(i)$ is the size of the arriving or departing item. If the term $|(1 - r'_{\ell}) - \{ \Phi(B') \}|$ can be bounded by $C$, the fill ratio of $bb'_\ell$ has to be adjusted to fulfill the heap equation according to the insert and delete operation. This can be done be shifting a total size of at most $C$ items out of (or into) $bb'_\ell$. The bin $bb'_\ell$ is completely filled by property (3) and therefore has a fill ratio of $r'_\ell \geq \frac{c(bb_\ell) - s}{c(bb_\ell)} \geq 1- 2 \frac{s}{\epsilon}$, where $s \leq \frac{\epsilon}{2^k}$ is the largest size of a small item appearing in $bb_\ell$ and $S_k$ is the largest size category appearing in $bb'_\ell$. Let $k'$ be the largest size category appearing in bin $bb_j$. As the bin $bb'_\ell$ is right of $bb_j$ we know $k \leq k'$ (property $(1)$) and hence $s \leq 2 s(i^*)$. We get $r'_{\ell} \geq 1 - 4 \frac{s(i^*)}{\epsilon}$. Using that $\{ \Phi(B') \} \leq r^* \leq 2 \nicefrac{s(i^*)}{\epsilon}$, we can bound $|(1 - r'_{\ell}) - \{ \Phi(B') \}|$ by $4 \frac{s(i^*)}{\epsilon} + 2 \nicefrac{s(i^*)}{\epsilon} = \mathcal{O}(\nicefrac{s(i^*)}{\epsilon})$. Hence the Heap Equation can be fulfilled by shifting items of total size $\mathcal{O}(\nicefrac{s(i^*)}{\epsilon})$ at the end of the insert operation. \item If $|Q_{\ell}| = 1$ a set of items in the buffer bin $bb_{\ell-1}$ is shifted to $Q_{\ell+1}$ to fulfill the Heap Equation. Since items are being removed from $bb_{\ell-1}$ the potential decreases. If $r_{\ell-1} > \{ \Phi(B') \}$, there are enough items which can be shifted out of $bb_{\ell-1}$ such that we obtain a new potential $\Phi(B'') < \Phi(B') - \{\Phi(B')\}$. Hence $\lfloor \Phi(B'')\rfloor = \lfloor \Phi(B)\rfloor$ and the Heap Equation is fulfilled. Note that the size of items that are shifted out of $bb_{\ell-1}$ is bounded by $r^*+s = \mathcal{O}(\nicefrac{s(i^*)}{\epsilon})$, where $s$ is the biggest size of an item appearing in $bb_{\ell-1}$. If $r_{\ell-1}\leq \{ \Phi(B') \}$ all items are shifted out of $bb_{\ell-1}$. As the number of queues decreases, we obtain the new potential $\Phi(B'') = \Phi(B') - r_{\ell -1} + 1= \lfloor \Phi(B') \rfloor +\{\Phi(B')\}-r_{\ell-1} + 1 \geq \lfloor \Phi(B')\rfloor +1 $. Hence $\lfloor\Phi(B'')\rfloor = \lfloor \Phi(B)\rfloor +2$. The buffer bins $bb_{\ell-1}$ and $bb_{\ell}$ are moved to the heap and thus $h(B'')=h(B)+2=\lfloor \Phi(B)\rfloor + 2=\lfloor \Phi(B'')\rfloor$ (property (4)). Note that if $r_{\ell-1} \leq \{ \Phi(B') \}$, item $i^*$ is not inserted into bin $bb_{\ell-1}$ as $r_{\ell-1} \geq r^* > \{ \Phi(B') \}$. Therefore the bin $bb_j$ is left of $bb_{\ell-1}$ and we can bound the fill ratio of the bin left of $bb_{\ell-1}$ called $r''_{\ell}$ by $1 - 2 \frac{s(i^*)}{\epsilon}$. Using $\{\Phi(B'') \} \leq r^* = \mathcal{O}(\nicefrac{s(i^*)}{\epsilon})$ the heap equation can be fulfilled by shifting items of total size $\mathcal{O}(\nicefrac{s(i)}{\epsilon})$ at the end of the insert operation. \end{itemize} \item Case 2: The potential $\lfloor \Phi(B) \rfloor$ decreases during a delete operation, i.\,e., it holds $\lfloor \Phi(B') \rfloor = \lfloor \Phi(B) \rfloor -1$ = $\lfloor \Phi(B) - r^* \rfloor$, where $r^*$ is the fill ratio being removed from a buffer bin $bb_j$ due to the first shift of an item $i^*$ that decreases the potential.\\ According to Algorithm \ref{alg-potential}, buffer bin $bb_{\ell}$ is being filled completely and a new buffer bin for $Q_{\ell}$ is inserted from the heap. Hence the number of heap bins decreases and we have $\lfloor \Phi(B') \rfloor = h(B) - 1 = h(B')$. As $\lfloor \Phi(B)\rfloor -1=\Phi(B)-\{\Phi(B)\}-1=\lfloor \Phi(B)-r^*\rfloor$, it holds that $\{\Phi(B)\}\leq r^*$ and by the heap equation the fill ratio of $bb_\ell$ is $r_\ell \geq r^*+s$, where $s$ is the largest size of a small item in $bb_{\ell}$. As above, $r^*$ and $s$ can be bounded by $\mathcal{O}(\frac{s(i^*)}{\epsilon})$. Hence the total size that is shifted from $Q_{\ell+1}$ into bin $bb_\ell$ can be bounded by $\mathcal{O}(\frac{s(i^*)}{\epsilon})$. Furthermore $\{ \Phi(B') \} \geq 1 - r^*$ (as $\Phi(B')=\Phi(B)-r^*$) and $r'_\ell = 0$, therefore we can bound $|(1 - r'_{\ell}) - \{ \Phi(B') \}|$ by $r^*\leq \mathcal{O}(\nicefrac{s(i^*)}{\epsilon})$ and the Heap Equation can be fulfilled by shifting a total size of at most $\mathcal{O}(\nicefrac{s(i^*)}{\epsilon})$ items. In the case that $|Q_{\ell}|=\nicefrac{1}{\epsilon}$ a new queue $Q_{\ell+1}$ is created which consists of a single buffer bin (inserted from the heap), which does not contain small items, i.\,e., $h(B'') = h(B') -1 = h(B) -2$, where $B''$ is the packing after the insertion of item $i^*$. Let $\Phi(B'')$ be the potential after the queue $Q_{\ell+1}$ is created. Then $\Phi(B'')= \sum_{i=1}^{\ell(B'')-1}r_i +\epsilon\Lambda -\ell(B'') = \sum_{i=1}^{\ell(B')-2}r_i+\epsilon\Lambda-\ell(B')-1=\Phi(B')-1$, as the buffer bin $bb_{\ell}$ is now counted in the potential, but does not contain any small items and thus $r''_\ell=0$. Hence $\Phi(B'') = \Phi(B') -1 = h(B') -1 = h(B'')$. \end{itemize} {\bf Analysis of Algorithm \ref{alg-bb}}\\ Algorithm \ref{alg-bb} is executed as an item $i^*$ is moved into a buffer bin $bb_{j}$ such that $bb_j$ is completely filled or Algorithm \ref{alg-bb} is executed if the buffer bin $bb_j$ is emptied by moving the last item $i^*$ out of the bin. As in the analysis of Algorithm \ref{alg-potential}, properties $(1)$ and $(2)$ are never violated by the algorithm because the items are only moved by shift operations. Property $(3)$ is never violated because no queue (except for $Q_{\ell}$) exceeds $\nicefrac{2}{\epsilon}$ or falls below $\nicefrac{1}{\epsilon}$ by construction. It remains to prove property $(4)$ and a bound for the migration to fulfill the heap equation: \begin{itemize} \item Case 1: An item $i^*$ is moved into the buffer bin $bb_j$ such that $bb_j$ is filled completely for some $j<\ell$. According to Algorithm \ref{alg-bb} a bin is taken out of the heap and labeled as the new buffer bin $bb'_j$ with fill ratio $r'_j= 0$ of queue $Q_j$, i.\,e., the number of heap bins decreases by $1$. Let $\Phi(B)$ be the potential before Algorithm \ref{alg-bb} is executed and let $\Phi(B')$ be the potential after Algorithm \ref{alg-bb} is executed. The potential changes as follows: \begin{align*} \Phi(B)-\Phi(B')= (r_j - r'_j) - (\ell(B) - \ell(B')) \end{align*} Since $r'_j = 0$ the new potential is $\Phi(B') = \Phi(B) - r_j \approx \Phi(B) -1$ (assuming $\ell(B) = \ell(B')$, as the splitting of queue is handled later on). \begin{itemize} \item If $\lfloor \Phi(B') \rfloor = \lfloor \Phi(B) \rfloor - 1$ property (4) is fulfilled since the number of heap bins decreases by $h(B') = h(B) - 1 = \lfloor \Phi(B) \rfloor -1 = \lfloor \Phi(B') \rfloor$. As $r_j \geq \frac{c(bb_j)-s}{c(bb_j)}$, where $s$ is the biggest size category appearing in $bb_j$ and $s \leq 2 s(i^*)$, we obtain for the fractional part of the potential that $\{ \Phi(B) \} - \{ \Phi(B') \} \leq 2 \frac{s}{\epsilon} \leq 4 \frac{s(i^*)}{\epsilon}$. Hence the Heap Equation can be fulfilled by shifting items of total size $\mathcal{O}(\nicefrac{s(i^*)}{\epsilon})$ at the end of the insert operation as in the above proof. \item In the case that $\lfloor \Phi(B') \rfloor = \lfloor \Phi(B) \rfloor = \lfloor \Phi(B) - r_j \rfloor$ we know that the fractional part changes by $\{ \Phi(B') \} = \{ \Phi(B) \} - r_j$. Since the bin $bb_j$ is filled completely we know that $r_j \geq \frac{c(bb_j)-s}{c(bb_j)} \approx 1$ and hence $\{ \Phi(B) \} \geq r_j \approx 1$ and $\{ \Phi(B') \} \leq 1-r_j \approx 0$. According to the Heap Equation, items have to be shifted out of $r_{\ell}$ such that the fill ratio $r_{\ell}$ changes from $r_{\ell} \leq 1-r_j$ to $r_{\ell} \approx 1$. Therefore we know that as items are shifted out of $bb_{\ell}$ to fulfill the Heap Equation, the buffer bin $bb_{\ell}$ is being emptied and moved to the heap (see Algorithm \ref{alg-potential}). We obtain for the number of heap bins that $h(B') = h(B) +1 -1 = h(B)$ and hence $h(B') = \lfloor \Phi(B') \rfloor$ (property (4)). As $\{ \Phi(B) \} \geq r_j \geq 1- 4 \frac{s(i^*)}{\epsilon}$, the Heap Equation implies that $r_\ell \leq 4 \frac{s(i^*)}{\epsilon} + \frac{s}{c(bb_\ell)} = \mathcal{O}(\nicefrac{s(i^*)}{\epsilon})$. The buffer bin $bb_\ell$ is thus emptied by moving a size of $\mathcal{O}(\nicefrac{s(i^*)}{\epsilon})$ items out of the bin. Let $bb'_\ell$ be the new buffer bin of $Q_\ell$ that was left of $bb_\ell$. The Heap Equation can be fulfilled by shifting at most $\mathcal{O}(\nicefrac{s(i)}{\epsilon})$ out of $bb'_\ell$ since $\{ \Phi(B') \}$ is bounded by $1 - r_j = \mathcal{O}(\nicefrac{s(i^*)}{\epsilon})$. \item In the case that $|Q_j| > \nicefrac{2}{\epsilon}$ the queue is split into two queues and an additional heap bin is inserted, i.\,e., $h(B'') = h(B') -1$. As the potential changes by $\Phi(B'') = \Phi(B') + (\ell(B') - \ell(B'')) = \Phi(B') - 1$ we obtain again that $h(B'') = \lfloor \Phi(B'')\rfloor $. \end{itemize} \item Case 2: Algorithm \ref{alg-bb} is executed if bin $bb_j$ is emptied due to the removal of an item $i^*$ as a result of a Delete$(i,b_x,Q_j)$ call. According to Algorithm \ref{alg-bb}, the emptied bin is moved to the heap, i.\,e., the number of heap bins increases by $1$. Depending on the length of $Q_j$ and $Q_{j+1}$, the bin right of $bb_j$ or the bin left of $bb_{j}$ is chosen as the new buffer bin $bb'_j$. The potential changes by $\Phi(B') = \Phi(B) + r'_j$, where $r'_j$ is the fill ratio of $bb'_j$ as in case 1. \begin{itemize} \item If $\lfloor \Phi(B') \rfloor = \lfloor \Phi(B) \rfloor + 1$ property (4) is fulfilled since the number of heap bins increases by $h(B') = h(B) + 1$. As bin $bb'_j$ is completely filled, the fill ratio is bounded by $r'_j \geq 1-2 \frac{s}{\epsilon}$, where $s$ is the largest size appearing in $bb'_j$. Since the bin $b_x$ has to be left of $bb_j$ we know that $s \leq 2s(i)$. We obtain for the fractional part of the potential that $\{ \Phi(B) \} \geq \{ \Phi(B') \} - 2 \frac{s}{\epsilon} \leq 4 \frac{s(i)}{\epsilon}$. Hence the Heap Equation can be fulfilled by shifting items of total size $\mathcal{O}(\nicefrac{s(i)}{\epsilon})$ at the end of the remove operation. \item In the case that $\lfloor \Phi(B') \rfloor = \lfloor \Phi(B) \rfloor = \lfloor \Phi(B) + r'_j \rfloor$ we know that the fractional part changes similar to case 1 by $\{ \Phi(B') \} = \{ \Phi(B) \} + r'_j$. Since the bin $bb_j$ is filled completely we know that $r_j \geq \frac{c(bb_j)-s}{c(bb_j)} \approx 1$ and hence $\{ \Phi(B') \} \geq r_j \approx 1$ and $\{ \Phi(B) \} \leq 1-r_j \approx 0$. According to the Heap Equation items have to be shifted to $bb_{\ell}$ such that the fill ratio $r_{\ell}$ changes from $r_{\ell} \approx 0$ to $r_{\ell} \approx 1$. Therefore we know that as items are shifted into $bb_{\ell}$ to fulfill the Heap Equation, $bb_{\ell}$ is filled completely and a bin from the heap is labeled as the new buffer bin of $Q_{\ell}$ (see Algorithm \ref{alg-potential}). We obtain for the number of heap bins that $h(B') = h(B) -1 +1 = h(B)$ and hence $h(B') = \Phi(B')$ (property (4)). The Heap Equation can be fulfilled similarly to case 1 by shifting items of total size $\mathcal{O}(\nicefrac{s(i)}{\epsilon})$. \end{itemize} \end{itemize} \end{proof} Using the above lemma for, we can finally prove the following central theorem, which states that the migration of an insert/delete operation is bounded and that properties $(1)$ to $(4)$ are maintained. \begin{theorem} \label{thm-main-small} \ \begin{enumerate} \item[(i)] Let $B$ be a packing which fulfills properties $(1)$ to $(4)$ and the Heap Equation. Applying operations insert$(i,b_x,Q_j)$ or delete$(i,b_x,Q_j)$ on a packing $B$ yields an instance $B'$ which also fulfills properties $(1)$ to $(4)$ and the Heap Equation. \item[(ii)] The migration factor of an insert/delete operation is bounded by $\mathcal{O}(\nicefrac{1}{\epsilon})$. \end{enumerate} \end{theorem} \begin{proof} Suppose a small item $i$ with size $s(i)$ is inserted or deleted from queue $Q_j$. The insert and delete operation basically consists of application of Algorithm \ref{alg-insertsmall} and iterated use of steps $(1)$ to $(3)$ where Algorithms \ref{alg-bb} and \ref{alg-potential} are used and items in $bb_\ell$ are moved to $Q_{\ell+1}$ and vice versa. Let $B$ be the packing before the insert/delete operation and let $B'$ be the packing after the operation. Proof for (i): Now suppose by induction that property $(1)$ to $(4)$ and the Heap Equation is fulfilled for packing $B$. We prove that property $(4)$ and the Heap Equation maintain fulfilled after applying an insert or delete operation on $B$ resulting in the new packing $B'$. Properties $(1)$ to $(3)$ hold by conclusion of Lemma \ref{onlysmallitems} and Lemma \ref{lem-algorithm}. Since the potential and the number of heap bins only change as a result of Algorithm \ref{alg-bb} or Algorithm \ref{alg-potential}, property (4) maintains fulfilled also. By definition of step 4 in the insert operation, items are shifted from $bb_\ell$ to $Q_{\ell+1}$ until the Heap Equation is fulfilled. By definition of step 4 of the delete operation, the size of small items in $bb_{\ell}$ is adjusted such that the Heap Equation is fulfilled. Hence the Heap Equation is always fulfilled after application of Insert$(i,b_x,Q_j)$ or Delete$(i,b_x,Q_j)$. Proof for (ii): According to Lemma \ref{onlysmallitems} the migration factor of the usual insert operation is bounded by $\mathcal{O}(\nicefrac{1}{\epsilon})$. By Lemma \ref{lem-algorithm} the migration in Algorithm \ref{alg-bb} and Algorithm \ref{alg-potential} is also bounded by $\mathcal{O}(\nicefrac{1}{\epsilon})$. It remains to bound the migration for step 4 in the insert/delete operation. Therefore we have to analyze the total size of items to be shifted out or into $bb_\ell$ in order to fulfill the Heap Equation. Since the size of all items $i_1, \ldots, i_k$ that are inserted into $bb_j$ is bounded by $7 s(i)$ (see Lemma \ref{onlysmallitems}) and the capacity of $bb_j$ is at least $\nicefrac{\epsilon}{14}$ the potential $\Phi(B)$ changes by at most $\mathcal{O}(\nicefrac{s(i)}{\epsilon})$. By Lemma \ref{lem-algorithm} the size of items that needs to be shifted out or into $bb_\ell$ as a result of Algorithm \ref{alg-bb} or \ref{alg-potential} is also bounded by $\mathcal{O}(\nicefrac{s(i)}{\epsilon})$. Therefore the size of all items that need to be shifted out or into $bb_\ell$ in step (4) of the insert/delete operation is bounded by $\mathcal{O}(\nicefrac{s(i)}{\epsilon})$. Shifting a size of $\mathcal{O}(\nicefrac{s(i)}{\epsilon})$ to $Q_{\ell+1}$ or vice versa leads to a migration factor of $\mathcal{O}(\nicefrac{1}{\epsilon^2})$ (Lemma \ref{onlysmallitems}). Fortunately we can modify the structure of queues $Q_{\ell+1}$ and $Q_{\ell+2}$ such that we obtain a smaller migration factor. Assuming that $Q_{\ell+1}$ consists of a single buffer bin, i.\,e., $|Q_{\ell+1}| = 1$ items can directly be shifted from $bb_\ell$ to $bb_{\ell+1}$ and therefore we obtain a migration factor of $\mathcal{O}(\nicefrac{1}{\epsilon})$. A structure with $|Q_{\ell+1}| = 1$ and $1 \leq |Q_{\ell+2}| \leq \nicefrac{2}{\epsilon}$ (see property (3)) can be maintained by changing Algorithm \ref{alg-bb} in the following way: \begin{itemize} \item If $bb_{\ell+1}$ is filled completely, move the filled bin to $Q_{\ell+2}$. \begin{itemize} \item If $|Q_{\ell+2}| > \nicefrac{2}{\epsilon}$, split $Q_{\ell+2}$ into two queues. \end{itemize} \item If $bb_{\ell+1}$ is being emptied, remove the bin and label the first bin of $Q_{\ell+2}$ as $bb_{\ell+1}$. \begin{itemize} \item If $|Q_{\ell+2}| = 0$, remove $Q_{\ell+2}$. \end{itemize} \end{itemize} \end{proof} \subsection{Handling the General Setting} \label{sec:final} In the previous section we described how to handle small items in a mixed setting. It remains to describe how large items are handled in this mixed setting. Algorithm \ref{alg-afptas} describes how to handle large items only. However, in a mixed setting, where there are also small items, we have to make sure that properties $(1)$ to $(4)$ and the Heap Equation maintain fulfilled as a large item is inserted or deleted. Algorithm \ref{alg-afptas} changes the configuration of at most $\mathcal{O}(\nicefrac{1}{\epsilon}^2 \cdot \log \nicefrac{1}{\epsilon})$ bins (Theorem \ref{thm-main}). Therefore, the size of large items in a bin $b$ ($= 1-c(b)$) changes, as Algorithm \ref{alg-afptas} may increase or decrease the capacity of a bin. Changing the capacity of a bin may violate properties (2) to (4) and the Heap Equation. We describe an algorithm to change the packing of small items such that all properties and the Heap Equation are fulfilled again after Algorithm \ref{alg-afptas} was applied. The following algorithm describes how the length of a queue $Q_{j}$ is adjusted if the length $|Q_{j}|$ falls below $\nicefrac{1}{\epsilon}$: \begin{algo}[Adjust the queue length] \ \begin{itemize} \item Remove all small item $I_S$ from $bb_{j}$ and add $bb_j$ to the heap. \item Merge $Q_j$ with $Q_{j+1}$. The merged queue is called $Q_j$. \item If $|Q_{j}|> \nicefrac{2}{\epsilon}$ split queue $Q_j$ by adding a heap bin in the middle. \item Insert items $I_S$ using Algorithm \ref{algo-small-items-general}. \end{itemize} \end{algo} The following algorithm describes how the number of heap bins can be adjusted. \begin{algo}[Adjust number of heap bins] \label{algo-adjust-heap} \ \begin{itemize} \item Decreasing the number of heap bins by $1$. \begin{itemize} \item Shift small items from $Q_{\ell+1}$ to $bb_{\ell}$ until $bb_{\ell}$ is filled completely \item Label a heap bin as the new buffer bin of $Q_{\ell}$ \end{itemize} \item Increasing the number of heap bins by $1$. \begin{itemize} \item Shift all small items from $bb_{\ell}$ to $Q_{\ell+1}$ \item Label $bb_{\ell}$ as a heap bin \item Label the bin left of $bb_{\ell}$ as new buffer bin of $Q_{\ell}$ \end{itemize} \end{itemize} \end{algo} Note that the Heap Equation can be fulfilled in the same way, by shifting items from $bb_{\ell}$ to $Q_{\ell+1}$ or vice versa. Using these algorithms, we obtain our final algorithm for the fully dynamic $bin packing\xspace$ problem. \begin{algo}[\ac{afptas} for the mixed setting] \label{algo-final-afptas} \ \begin{itemize} \item If $i$ is large do \begin{enumerate} \item Use Algorithm \ref{alg-afptas}. \item Remove all small items $I_S$ of bins $b$ with changed capacity. \item Adjust queue length. \item Adjust the number of heap bins. \item Adjust the Heap Equation. \item Insert all items $I_S$ using Algorithm \ref{algo-small-items-general}. \end{enumerate} \item If $i$ is small use Algorithm \ref{algo-small-items-general} \end{itemize} \end{algo} Combining all the results from the current and the previous section, we finally prove the central result that there is fully dynamic \ac{afptas} for the $bin packing\xspace$ problem with polynomial migration. \begin{theorem} Algorithm \ref{algo-final-afptas} is a fully dynamic \ac{afptas} for the $bin packing\xspace$ problem, that achieves a migration factor of at most $\mathcal{O}(\nicefrac{1}{\epsilon}^4 \cdot \log \nicefrac{1}{\epsilon})$ by repacking items from at most $\mathcal{O}(\nicefrac{1}{\epsilon}^3 \cdot \log \nicefrac{1}{\epsilon})$ bins. \end{theorem} \begin{proof} {\bf Approximation guarantee:} By definition of the algorithm, it generates at every timestep $t$ a packing $B_t$ of instance $I(t)$ such that properties $(1)$ to $(4)$ are fulfilled. According to Lemma \ref{lem-smallitems-approximation}, at most $\max\{\Lambda,(1+\mathcal{O}(\epsilon))\operatorname{\text{\textsc{opt}}}(I(t),s)+\mathcal{O}(1)\}$ bins are used where $\Lambda$ is the number of bins containing large items. Since we use Algorithm \ref{alg-afptas} to pack the large items, Theorem \ref{thm-main} implies that $\Lambda\leq (1+\mathcal{O}(\epsilon))\operatorname{\text{\textsc{opt}}}(I(t),s)+\mathcal{O}(\nicefrac{1}{\epsilon}\log \nicefrac{1}{\epsilon})$. Hence the number of used bins can be bounded in any case by $(1+\mathcal{O}(\epsilon))\operatorname{\text{\textsc{opt}}}(I(t),s)+\mathcal{O}(\nicefrac{1}{\epsilon}\log \nicefrac{1}{\epsilon})$. {\bf Migration Factor:} Note that the Algorithm uses Algorithm \ref{algo-small-items-general} or Algorithm \ref{alg-afptas} to insert and delete small or large items. The migration factor for Algorithm \ref{algo-small-items-general} is bounded by $\mathcal{O}(\nicefrac{1}{\epsilon})$ due to Theorem \ref{thm-main-small} while the migration factor for Algorithm \ref{alg-afptas} is bounded by $\mathcal{O}(\nicefrac{1}{\epsilon^3}\cdot \log \nicefrac{1}{\epsilon})$ due to Theorem \ref{thm-main}. It remains to bound the migration that is needed to adjust the heap bins, the length of a queue falling below $\nicefrac{1}{\epsilon}$ and the Heap Equation in case a large item arrives and Algorithm \ref{alg-afptas} is applied. Suppose the number of heap bins has to be adjusted by $1$. In this case Algorithm \ref{algo-adjust-heap} shifts items from $Q_{\ell+1}$ to $bb_{\ell}$ or vice versa until $bb_{\ell}$ is either filled or emptied. Hence, the size of moved items is bounded by $1$. Since the size of the arriving or departing item is $\geq \nicefrac{\epsilon}{14}$ the migration factor is bounded by $\mathcal{O}(\nicefrac{1}{\epsilon})$. In the same way, a migration of at most $\mathcal{O}(\nicefrac{1}{\epsilon})$ is used to fulfill the Heap Equation which implies that the migration in step 5 is bounded by $\mathcal{O}(\nicefrac{1}{\epsilon})$. If $|Q_j|$ falls below $\nicefrac{1}{\epsilon}$, the two queues $Q_j$ and $Q_{j+1}$ are merged by emptying $bb_j$. The removed items are inserted by Algorithm \ref{algo-small-items-general}. As their total size is bounded by $1$ and the algorithm has a migration factor of $\mathcal{O}(\nicefrac{1}{\epsilon})$, the size of the moved items is bounded by $\mathcal{O}(\nicefrac{1}{\epsilon})$. The migration to merge two queues can thus be bounded by $\mathcal{O}(\nicefrac{1}{\epsilon^2})$. Note that the proof of Theorem \ref{thm-main} implies that at most $\gamma=\mathcal{O}(\nicefrac{1}{\epsilon^2}\log \nicefrac{1}{\epsilon})$ bins are changed by Algorithm \ref{alg-afptas}. The total size of the items $I_S$ which are removed in step 2 is thus bounded by $\gamma$. Similarly, the length of at most $\gamma$ queues can fall below $\nicefrac{1}{\epsilon}$. The migration of step 3 is thus bounded by $\gamma\cdot \nicefrac{1}{\epsilon^2}$. As at most $\gamma$ buffer bins are changed, the change of the potential (and thus the number of heap bins) is also bounded by $\gamma$ and the migration in step 4 can be bounded by $\gamma\cdot \nicefrac{1}{\epsilon}$. The migration in step 6 is bounded by $s(I_S) \cdot \nicefrac{1}{\epsilon}\leq \gamma\cdot \nicefrac{1}{\epsilon}$ as Algorithm \ref{algo-small-items-general} has migration factor $\nicefrac{1}{\epsilon}$. The total migration of the adjustments is thus bounded by $\gamma\cdot \nicefrac{1}{\epsilon^2}=\mathcal{O}(\nicefrac{1}{\epsilon^4}\log \nicefrac{1}{\epsilon})$. {\bf Running Time:} The handling of small items can be performed in linear time while the handling of large items requires $\mathcal{O}(M(\nicefrac{1}{\epsilon} \log(\nicefrac{1}{\epsilon}))\cdot \nicefrac{1}{\epsilon^3} \log(\nicefrac{1}{\epsilon})+\nicefrac{1}{\epsilon} \log(\nicefrac{1}{\epsilon}) \log(\epsilon^2\cdot n(t))+\epsilon n(t))$, where $M(n)$ is the time needed to solve a system of $n$ linear equations (see Theorem \ref{thm-main}). The total running time of the algorithm is thus $\mathcal{O}(M(\nicefrac{1}{\epsilon} \log(\nicefrac{1}{\epsilon}))\cdot \nicefrac{1}{\epsilon^3} \log(\nicefrac{1}{\epsilon})+\nicefrac{1}{\epsilon} \log(\nicefrac{1}{\epsilon}) \log(\epsilon^2\cdot n(t))+ n(t))$. \end{proof} \end{document}
\begin{document} \title[{On the slopes of semistable representations of tame quivers}]{On the slopes of semistable representations\\ of tame quivers} \author[X. Wang]{Xintian Wang} \address{School of Mathematical Sciences, Beijing Normal University, Beijing 100875, China.} \email{[email protected] } \thanks{Supported by the Natural Science Foundation of China.} \keywords{stability condition, slope, tame quiver} \subjclass[2010]{16G20, 16G70.} \begin{abstract} Stability conditions play an important role in the study of representations of a quiver. In the present paper, we study semistable representations of quivers. In particular, we describe the slopes of semistable representations of a tame quiver for a fixed stability condition. \end{abstract} \maketitle \section{Introduction and preliminaries} The notion of stability was firstly introduced by Mumford in his work on the geometric invariant theory in 1960s and soon became widely used as a technical tool while constructing moduli varieties. In \cite{ADK} King set up the semistability and stability in the language of the module category over a finite dimensional algebra, more generally for an arbitrary abelian category. Let $Q=(Q_0,Q_1)$ be a finite acyclic quiver (i.e., without oriented cycles) with vertex set $I=Q_0$ and arrow set $Q_1$. Let $k$ be an algebraically closed field and ${\text{\rm mod}} kQ$ denote the category of finite dimensional modules over the path algebra $kQ$ (equivalently, finite dimensional representations of $Q$ over $k$). Following Reineke \cite{MR}, a stability in ${\text{\rm mod}} kQ$ is defined relative to a slope function $\mu$ on ${\mathbb N} I\backslash\{0\}$. More precisely, a $kQ$-module $X$ is called semistable (resp. stable) if $\mu({{\rm\bf dim}\,} U)\leq \mu({{\rm\bf dim}\,} X)$ (resp. $\mu(\textbf{dim} U)< \mu(\textbf{dim} X)$) for all proper submodules $0\neq U\subseteq X$, where ${{\rm\bf dim}\,} X$ and ${{\rm\bf dim}\,} U$ denote the dimension vectors of $X$ and $U$, respectively. In this case, $\mu({{\rm\bf dim}\,} X)$ is called the slope of $X$. For each $a\in\mathbb Q$, let ${\text{\rm mod}}^{a}kQ$ denote the full subcategory of ${\text{\rm mod}} kQ$ consisting of semistable $kQ$-modules of slope $a$. It is known that each ${\text{\rm mod}}^{a}kQ$ is an abelian category of ${\text{\rm mod}} kQ$. In case $Q$ is a Dynkin or tame quiver, the subcategory ${\text{\rm mod}}^{a}kQ$ has been characterized in \cite{IPT,IT}. The main purpose of the present paper is to describe the slopes of semistable modules of $kQ$ when $Q$ is a tame quiver. This is based on an investigation of the structure of the subcategories ${\text{\rm mod}}^{a} kQ$. In the following we briefly review some basic facts about finite dimensional algebras and their representations. We also introduce the stability condition for a finite dimensional algebra. We refer to \cite{ASS,ARS,HP,DDPW} for more details and complete treatments. Let $k$ be a field and $A$ be a finite dimensional algebra over $k$. By ${\text{\rm mod}} A$ we denote the category of all finite dimensional left $A$-modules. Let $I$ denote the set of isoclasses of simple objects in ${\text{\rm mod}} A$, and fix a set $\{S_i\mid i\in I\}$ of representatives of the isoclasses in $I$. For any $M\in$ ${\text{\rm mod}} A$, let $[M]$ denote the isoclass of $M$ and ${{\rm\bf dim}\,} M$ the dimension vector of $M$. More precisely, if ${{\rm\bf dim}\,} M=(x_i)_{i\in I}$, then $x_i$ is the number of composition factors isomorphic to $S_i$ in a composition series of $M$. Further, set $f_i={\rm dim}\,_{k}{{\rm End}\,}} \def\thz{\theta_{A}(S_i)$. From now onwards, we always assume that $A$ is hereditary. The Euler form of $A$ is defined by $$\langle{{\rm\bf dim}\,} M,{{\rm\bf dim}\,} N\rightarrow} \def\lra{\longrightarrowngle={\rm dim}\,_{k}{\mathcal H}om_{A}(M,N)-{\rm dim}\,_{k}{{\rm Ext}\,}_{A}^{1}(M,N),$$ where $M,N\in{\text{\rm mod}} A$. Further, let $\Gamma_{A}$ be the Auslander--Reiten quiver of $A$ with the Auslander--Reiten translation $\tau=\tau_A$. A connected component $\mathcal{P}$ in $\Gamma_{A}$ is called preprojective (resp. preinjective) if, for each vertex $[M]$ in $\mathcal{P}$, the supremum (resp. infimum) of the lengths of the paths ending (resp. starting) at $[M]$ is finite. Otherwise, it is called regular. An indecomposable $A$-module is called preprojective (resp. preinjective) if it belongs to a preprojective (resp. preinjective) component of $\Gamma _{A}$ and an arbitrary $A$-module is called preprojective (resp. preinjective) if it is a direct sum of indecomposable preprojective (resp. preinjective) modules. Otherwise, it is a regular module. Suppose now that $A$ is of tame type. Let $\delta$ be the minimal positive imaginary root of $A$. Recall from \cite{DR} that the defect $\partial(M)$ of a module $M$ is defined to be the integer $\langle \delta, {{\rm\bf dim}\,} M \rightarrow} \def\lra{\longrightarrowngle$. Then an indecomposable module M is preprojective (resp. regular, preinjective) if and only if $\partial(M)<0$ (resp. $\partial(M)=0$, $\partial(M)>0$ ) A translation quiver ($\mathcal{T},\tau$) is defined to be a stable tube of rank $r\geq1$ if there is an isomorphism of translation quivers $\mathcal{T} \cong \mathbb{Z}\mathbb{A}_{\infty}$$/(\tau ^r)$. A stable tube of rank r=1 is defined to be a homogeneous tube; otherwise, it is a non-homogeneous tube. A representation in a stable tube which has only one arrow to and from it is called quasi-simple. The following result is well known; see \cite{DR}. \begin{lem} Let $A$ be a finite dimensional hereditary algebra of tame type. Then the Auslander--Reiten quiver $\Gamma_{A}$ of $A$ contains a preprojective component $\mathcal{P}$, a preinjective component $\mathcal{I}$, and a $\mathbb{P}$$^1(k)$-family $\{\mathcal{T}_\lambda\}$ of stable tubes of which only finitely many ones are non-homogeneous. Moreover, for $\lambda, \lambda'\in \mathbb{P}$$^1(k)$, we have \begin{itemize} \item[(1)] ${\mathcal H}om({\mathcal I},{\mathcal P} {\bf i}} \def\az{\alpha} \def\dz{\deltagcup {\mathcal T}_\lambda)=0$ and ${\mathcal H}om({\mathcal T}_\lambda,{\mathcal P})=0$, \item[(2)] ${\mathcal H}om({\mathcal T}_{\lambda},{\mathcal T}_{\lambda'})=0$ if $\lambda \neq \lambda'$. \end{itemize} \end{lem} Take $\theta=(\theta_i)_{i\in I}\in{\mathbb Z} I$ and define a linear form on $\mathbb{Z}$$I$ by setting $\theta (d)= \sum_{i\in I}\theta_id_if_i$ (for simplicity, we still denote by $\theta$ the linear form), where $d=(d_i)_{i\in I}\in{\mathbb Z} I$. We call $\theta$ a weight for $A$. The slope function $\mu$ on $\mathbb{N}$$I\backslash \{0\}$ associated to $\thz$ is defined by $$\mu(d)=\frac{\sum\limits_{i\in I}\theta_id_if_i }{ \sum\limits_{i\in I}d_if_i}.$$ For each $ M\in{\text{\rm mod}} A$, write $\mu(M$) for $\mu({{\rm\bf dim}\,} M)$. A module $M\in{\text{\rm mod}} A$ is called {\it semistable} (resp. {\it stable}) if $\mu(U)\leq \mu(M)$ (resp. $\mu(U)< \mu(M)$) for all proper submodules $0\neq U\subset M$. The following two lemmas are well known, see, for example, \cite{MR}. \begin{lem}\label{ses} Given a short exact sequence $$0\longrightarrow M\longrightarrow X\longrightarrow N\longrightarrow 0.$$ in ${\text{\rm mod}} A$, we have $$\mu(M)\leq \mu(X) \Longleftrightarrow\mu(X)\leq \mu(N)\Longleftrightarrow\mu(M)\leq \mu(N)\;\text{ and}$$ $${\rm min}(\mu(M),\mu(N))\leq \mu(X)\leq{\rm max}(\mu(M),\mu(N)).$$ If $\mu(M)=\mu(X)=\mu(N)$, then $X$ is semistable if and only if $M$ and $N$ are semistable. \end{lem} By the above lemma, if $M=M_1\oplus M_2$ with both $M_1$ and $M_2$ indecomposable, then $M$ is semistable if and only if $M_1$ and $M_2$ are semistable and $\mu(M_1)=\mu(M_2)=\mu(M)$. Hence, for semistable $A$-modules, it is enough to consider the indecomposable ones. \begin{lem}\label{equivalent definition} Let $X\in$ ${\text{\rm mod}} A$. Then $X$ is semistable {\rm(}resp. stable{\rm)} if and only if $\mu(X)\leq \mu(U)$ {\rm(}resp. $\mu(X) < \mu(U)${\rm)} for all proper quotient modules $U$. \end{lem} For each $a\in \mathbb{Q}$, denote by ${\text{\rm mod}}^{a}A$ the full subcategory of ${\text{\rm mod}} A$ consisting of semistable $A$-modules of slope $a$. By convention, we always assume that ${\text{\rm mod}}^{a}A$ consists of the zero module $0$. \begin{lem}\label{prop-of-sub} For each $a\in \mathbb{Q}$, the category ${\text{\rm mod}}^{a}A$ is an abelian subcategory of ${\text{\rm mod}} A$ whose simple objects are the indecomposable stable $A$-modules of slope $a$. Moreover, we have that ${\mathcal H}om({\text{\rm mod}}^aA,{\text{\rm mod}}^bA)=0$ whenever $a> b$. \end{lem} \section{Category of semistable $kQ$-modules of slope $a$ } In this section, we recall some results from \cite{IPT,IT} which will be needed in the next section in order to prove our main result. In the following, we assume that $k$ is an algebraically closed field, $Q$ is an acyclic quiver, and $A$ is the path algebra $kQ$. Thus, the set $I$ of isoclasses of simple $A$-modules is identified with the vertex set $Q_0$, and $f_i={\rm dim}\,_{k}{{\rm End}\,}} \def\thz{\theta_{A}(S_i)=1$ for all $i\in I$. In case $Q$ is a tame quiver, we denote by $\delta$ the minimal positive imaginary root of $Q$, and let $\mathcal{P}$ and $\mathcal{I}$ be the preprojective and preinjective components, respectively, and let $\mathcal{R}$ be the union of all tubes of the Auslander--Reiten quiver $\Gamma_A$. We first introduce a different stability notion as follows \cite{HP}. Let $\theta=(\theta_i)_{i\in I}$ be a weight for $Q$. We denote $\theta(\rm{\bf dim}M)$ by $\theta(M)$. A module $M\in$ ${\text{\rm mod}} A$ is called $\theta$-semistable (resp. stable) if $\theta(M)=0$ and $\theta(U)\leq 0$ (resp. $\theta(U)< 0$) for any proper submodule $0\neq U\subseteq M$. Finally, by ${\text{\rm mod}}_{\theta}A$ we denote the full subcategory of ${\text{\rm mod}} A$ consisting of all the $\theta$-semistable modules. \begin{lem} \label{stab-cond-equiv} Let $\theta=(\theta_i)_{i\in I}$ be a weight. Then for each $a\in \mathbb{Q}$, ${\text{\rm mod}}^{a}A$= ${\text{\rm mod}}_{\theta '}A$, where $\theta'=\theta-a\theta_0$ and $\theta_0= (1)_{i\in I}$. Conversely, for a weight $\omega=(\omega_i)_{i\in I}$, there exists a weight $\theta=(\theta_i)_{i\in I}$ and $a\in \mathbb{Q}$ such that ${\text{\rm mod}}_{\omega}A= $ ${\text{\rm mod}}^{a}A$. \end{lem} \begin{proof} By the definition, for a weight $\theta=(\theta_i)_{i\in I}$, if $\mu$ is the slope function associated with $\thz$, then $\mu(M)= a$ if and only if $(\theta - a\theta_0)(M)=0$. Moreover, $\mu(M)\leq a$ if and only if $(\theta - a\theta_0)(M)\leq 0$. This implies the desired statements. \end{proof} By \cite{IPT}, we have the following statement. \begin{thm}\label{subcat-equiv} Let $Q$ be a tame quiver and $\theta=(\theta_i)_{i\in I}$ be a weight for $A=kQ$. Then for each $a\in\mathbb Q$, the subcategory ${\text{\rm mod}}^{a}A$ is equivalent to one of the following two categories: \begin{itemize} \item[(1)] the module category ${\text{\rm mod}} kQ'$ of the path algebra $kQ'$ for a Dynkin or tame quiver $Q'$; \item[(2)] the full subcategory $\mathcal{R'}$ consisting of all the regular objects of ${\text{\rm mod}} kQ'$ with $Q'$ a possibly disconnected tame quiver (i.e., a quiver with one tame component and all other components (if any) Dynkin). \end{itemize} \end{thm} The following statement is an easy consequence of the above theorem. \begin{cor}\label{Euler-type-equiv} Let $Q$ and $Q'$ be as in Theorem {\rm\ref{subcat-equiv}} with $A=kQ$ and $B=kQ'$. Let $\langle-,-\rightarrow} \def\lra{\longrightarrowngle_{B}$ and $\langle-,-\rightarrow} \def\lra{\longrightarrowngle_{A}$ be the Euler forms associated to $B$ and $A$, respectively. Then for any $M,N \in$ ${\text{\rm mod}} B$, $M,N$ can be viewed as $A$-modules and, moreover, $$\langle M,N\rightarrow} \def\lra{\longrightarrowngle_{B}= \langle M,N\rightarrow} \def\lra{\longrightarrowngle_A.$$ \end{cor} \begin{prop}\label{decr} Let $Q$ be a tame quiver and $\theta=(\theta_i)_{i\in I}$ be a weight for $A=kQ$. If there is an indecomposable $A$-module of dimension vector $m\delta$ in ${\text{\rm mod}}^{a}A$ for some $m\geq 1$, then there is an indecomposable $A$-module of dimension vector $\delta$ in ${\text{\rm mod}}^{a}A$. \end{prop} \begin{proof} Let $M$ be an indecomposable $A$-module in ${\text{\rm mod}}^{a}A$ with ${{\rm\bf dim}\,} M= m\delta$ for some $m\geq 1$. Then $M$ has a submodule $N$ with ${{\rm\bf dim}\,} N=\delta$ which gives an exact sequence $$0 \longrightarrow N \longrightarrow M \longrightarrow M/N \longrightarrow 0.$$ Since $\mu(N)=\mu(\delta)=\mu(M)$, we have by Lemma \ref{prop-of-sub} that $N$ is semistable. Hence, $N$ lies in ${\text{\rm mod}}^{a}A$. \end{proof} \begin{cor}\label{classification of slope} We keep the notations as in the above proposition and take $a\in\mathbb Q$. {\rm (1)} If $a\neq \mu(\delta)$, then ${\text{\rm mod}}^{a}A$ is equivalent to ${\text{\rm mod}} kQ'$, where $Q'$ is a Dynkin quiver. {\rm (2)} If ${\text{\rm mod}}^aA \cap \mathcal{P} \neq \varnothing$ or ${\text{\rm mod}}^aA \cap \mathcal{I} \neq \varnothing$, then ${\text{\rm mod}}^{a}A$ is equivalent to ${\text{\rm mod}} kQ'$, where $Q'$ is a Dynkin or tame quiver. Moreover, if ${\text{\rm mod}}^{a}A$ contains an indecomposable module with dimension vector $\delta$, then $Q'$ is a tame quiver. Otherwise, $Q'$ is a Dynkin quiver. {\rm (3)} If ${\text{\rm mod}}^aA \subset \mathcal{R}$, and has no indecomposable object with dimension $\delta$, then ${\rm mod}^aA$ is equivalent to ${\text{\rm mod}} kQ'$, where $Q'$ is a Dynkin quiver. {\rm (4)} If ${\text{\rm mod}}^aA \subset \mathcal{R}$, and has an indecomposable object with dimension $\delta$, then ${\text{\rm mod}}^{a}A$ is equivalent to the full subcategory consisting of all the regular objects of ${\text{\rm mod}} kQ'$, where $Q'$ is a possibly disconnected tame quiver. \end{cor} \begin{proof} (1) Suppose that ${\text{\rm mod}}^{a}A$ is equivalent to ${\text{\rm mod}} kQ'$, where $Q'$ is a tame quiver. By Corollary \ref{Euler-type-equiv}, there is an indecomposable module $M$ in ${\text{\rm mod}}^{a}A$ with ${{\rm\bf dim}\,} M= m\delta$ for some $m\geq 1$. Then $a= \mu(M)=\mu(m\delta)=\mu(\delta)$, which is a contradiction. Similarly, ${\text{\rm mod}}^{a}A$ is not equivalent to the full subcategory consisting of all the regular objects of ${\text{\rm mod}} kQ'$, where $Q'$ is a possibly disconnected tame quiver. The statement follows from Theorem \ref{subcat-equiv}. (2) For the first statement, suppose that ${\text{\rm mod}}^{a}A$ is equivalent to $\mathcal{R'}$, where $\mathcal{R'}$ denotes the full subcategory consisting of all the regular objects of ${\text{\rm mod}} kQ'$ with $Q'$ a possibly disconnected tame quiver. Let $\delta '$ be the minimal positive imaginary root of $Q'$ and take $M \in \mathcal{R'}$ with $\textbf{dim} M= \delta '$. Then for all $N$ in ${\text{\rm mod}}^{a}A$, $$\langle M,N\rightarrow} \def\lra{\longrightarrowngle_{A}=\langle M,N\rightarrow} \def\lra{\longrightarrowngle_{B}=0.$$ Here $M,N$ are viewed as both $A$-modules and $B$-modules. But since ${\text{\rm mod}}^aA \cap \mathcal{P} \neq \varnothing$ or ${\text{\rm mod}}^aA \cap \mathcal{I} \neq \varnothing$, there exists a module $N_0 \in$ ${\text{\rm mod}}^{a}A$ such that $\langle M,N_0\rightarrow} \def\lra{\longrightarrowngle_{A}\neq 0 $, a contradiction. The second statement follows from Corollary \ref{Euler-type-equiv}. (3) By Proposition \ref{decr}, there exists no indecomposable module in ${\text{\rm mod}}^{a}A$ with dimension vector $m\delta,m\geq 1$. Then the statement follows from Corollary \ref{Euler-type-equiv} and Theorem \ref{subcat-equiv}. (4) The proof is similar to (2). \end{proof} \section{The slopes of semistable $kQ$-modules} In this section, we describe the slopes of semistable $kQ$-modules in case $Q$ is a tame quiver. The main result is stated in Theorem \ref{main theorem}. We keep all the notations in the previous section. In particular, $Q=(I=Q_0,Q_1)$ denotes an acyclic quiver and $A=kQ$ is the path algebra of $Q$ over an algebraically closed field. We denote by ${\text{\rm mod}} ^{ss}A$ the full subcategory of ${\text{\rm mod}} A$ consisting of semistable $A$-modules. Hence, ${\text{\rm mod}}^{ss}A=\displaystyle\cup_{a\in \mathbb{Q}} {\text{\rm mod}} ^aA$. For a weight $\theta=(\theta_i)_{i\in I}$ for $A$, define $${\scr X}_\theta=\{ a\in {\mathbb{Q}} \mid {\text{\rm mod}}^aA \;\text{ is non-zero} \}.$$ Our main aim in this section is to describe the set ${\scr X}_\theta$ in case $Q$ is a tame quiver. First of all, we have the following facts in some special cases. \begin{prop} {\rm (1)} $|{\scr X}_\theta|=1$ if and only if all $A$-modules are semistable, i.e. ${\text{\rm mod}} ^{ss}A={\text{\rm mod}} A$. In other words, all $\theta_i$, $i\in I$, coincide. {\rm (2)} $|{\scr X}_\theta|=2$ if and only if $Q$ contains two full subquivers $Q'=(Q'_0,Q'_1)$ and $Q''=(Q_0'',Q_1'')$ such that $Q_0=Q_0'\cup Q_0''$ and there are no arrows from $Q_0''$ to $Q_0'$ and $\theta_{i_1}=\theta_{j_1}<\theta_{i_2}=\theta_{j_2}$ for all $i_1,j_1\in Q_0',i_2,j_2\in Q_0''$. \end{prop} \begin{proof} {\rm (1)} Since each simple module $S_i$ is semistable, it follows that $\theta _i=\mu(S_i)\in {\scr X}_\theta$. Thus, if $|{\scr X}_\theta|=1$, then $\theta_i=\theta_j$ for all $i\neq j\in Q_0$. This implies that the slopes of all $A$-modules are equal. Therefore, all $A$-modules are semistable. Conversely, assume that all $A$-modules are semistable. Suppose $|{\scr X}_\theta|>1$, i.e., there are $i,j\in Q_0$ such that $\theta _i\neq \theta_j$. Without loss of generality, we assume $\theta _i< \theta_j$. This implies that the semisimple module $S_i\oplus S_j$ is not semistable. This is a contradiction. Hence, $|{\scr X}_\theta|=1$. {\rm (2)} Suppose $|{\scr X}_\theta|=2$, say ${\scr X}_\theta=\{a,b\}$ with $a<b$. Set $$Q_0'=\{i\in I\mid \theta_i=a\}\;\text{ and }\;Q_0''=\{i\in I\mid \theta_i=b\}.$$ Let $Q'$ and $Q''$ be the full subquivers of $Q$ with vertex sets $Q_0'$ and $Q_0''$, respectively. Then $Q_0$ is the disjoint union of $Q'_0$ and $Q''_0$. Suppose there is an arrow $j\longrightarrow i$ with $i\in Q_0'$ and $j\in Q_0''$. Consider the indecomposable $A$-module $M$ with socle $S_i$ and $M/S_i\cong S_j$. Then $M$ is semistable and $\mu({{\rm\bf dim}\,} M)=(a+b)/2\in{\scr X}_\theta$. This contradicts the assumption ${\scr X}_\theta=\{a,b\}$ since $a<(a+b)/2<b$. Therefore, there are no arrows from $Q_0''$ to $Q_0'$. The converse follows from the fact that each semistable $A$-module is either a $kQ'$-module or a $kQ''$-module. \end{proof} From now onwards, we assume that $Q$ is a (connected) tame quiver which is obtained from a (connected) Dynkin quiver $\Gamma$ of type $A,D,E$ by adding a vertex. This gives symmetric Cartan matrices $C_{Q}$ and $C_\Gamma$ of $Q$ and $\Gamma$, respectively. Thus, we have the associated Kac--Moody Lie algebras ${\frak g}(C_{Q})$ and ${\frak g}(C_\Gamma)$. Let ${{\rm D}\,}elta _0$ and ${{\rm D}\,}elta_0^+$ be the set of real roots and the set of positive real roots of ${\frak g}(C_\Gamma)$, respectively. By \cite{K}, the set of positive real roots of ${\frak g}(C_{Q})$ can be described as $${{\rm D}\,}elta _+^{\rm re}=\{\alpha+n\delta \mid \alpha\in {{\rm D}\,}elta _0,n\geq1\}\cup {{\rm D}\,}elta_0^+,$$ and its set of imaginary roots is ${{\rm D}\,}elta^{\rm im}={\mathbb Z}\delta\backslash\{0\}$, where $\delta$ denotes the minimal positive imaginary root of $Q$. Let $\mathcal P$ be the preprojective component of the Auslander--Reiten quiver of $A=kQ$. By \cite{R}, the dimension vectors of $P\in\mathcal P$ are positive real roots of ${\frak g}(C_{Q})$. Let $P_1,P_2,\ldots,P_N$ be all the indecomposable preprojective $A$-modules, up to isomorphism, with ${{\rm\bf dim}\,} P_i=\alpha_i<\delta$. For each $P\in \mathcal{P}$, ${{\rm\bf dim}\,} P=\alpha+n\delta$, with $\alpha\in{{\rm D}\,}elta^{\text{re}}_{+}$ and $\alpha<\delta$. Then $0>\partial(P)=\langle\delta, \alpha+n\delta\rightarrow} \def\lra{\longrightarrowngle=\langle\delta, \alpha\rightarrow} \def\lra{\longrightarrowngle$. Thus, the indecomposable module $X$ with ${{\rm\bf dim}\,} X=\alpha$ is preprojective. Hence, $X\cong P_i$ and $\alpha=\alpha _i$ for some $1\leq i\leq N$, i.e., ${{\rm\bf dim}\,} P=\alpha _i+n\delta$. The following fact is well known. \begin{lem}\label{existence of homomorphism} Let $M\in\mathcal{P}$. Then there exists $m\gg 0$ such that for each projective module $P$, ${\mathcal H}om_A(M,\tau^{-n} P)\neq0$ whenever $n\geq m$. \end{lem} \begin{lem}\label{maximal slope} Suppose $M\in\mathcal{P}$ is semistable satisfying $\mu(P_i)\leq\mu(M)$ for all $1\leq i\leq N$ and $\mu(\delta)<\mu(M)$. Then there are only finitely many semistable modules in $\mathcal{P}$. \end{lem} \begin{proof} By Lemma \ref{existence of homomorphism}, there is $m\gg0$ such that for each projective module $P$ and $m\geq n$, ${\mathcal H}om_A(M,\tau^{-m} P)\neq0$. We can assume that ${{\rm\bf dim}\,} \tau^{-m} P >\delta$. This implies that $\mu(M)>\mu(\tau^{-m} P)$. By Lemma \ref{prop-of-sub}, $\tau^{-m} P$ is not semistable. Therefore, there are only finitely many semistable modules in $\mathcal{P}$. \end{proof} Now we recall some facts about the regular $A$-modules from \cite{CB1,CB2,R}. Let $\mathcal T$ be a tube of rank $r$ in the Auslander--Reiten quiver of $A$. Let $E_1,E_2,\ldots,E_r$ be the quasi-simple modules in $\mathcal T$ with $\tau(E_i)=E_{i+1},1\leq i\leq r$, where $E_{r+1}=E_1$. Let $E_{i,j}$ denote the indecomposable module in $\mathcal T$ with quasi-length $j$ and quasi-socle $E_i$. It is known that $E_{i,j}$ is regular uniserial with regular composition factors of the form $E_i,\tau^{-1} E_i,\ldots,\tau^{-(r-1)}E_i$ and $\textbf{dim}E_{i,j}={{\rm\bf dim}\,} E_{i,j_0}+n\delta$, where $j=j_0+nr$ for some $0\leq j_0< r$ and $n\geq 0$. We have the following known fact. \begin{prop}\label{r3} Let $1\leq i\leq r$ and $m\geq 1$. Then for any $1\leq s\leq r$ and $m\delta\leq j< (m+1)\delta$, $${\mathcal H}om_A(E_{i,m\delta},E_{s,j})\neq0\;\text{ and }\;{\mathcal H}om_A(E_{s,j},E_{i,(m+1)\delta})\neq0.$$ \end{prop} \begin{lem}\label{dimension vector comparison} Let $P\in\mathcal{P}$. Assume that ${{\rm\bf dim}\,} P=\alpha_i+n\delta$ and $M$ is a submodule of P with ${{\rm\bf dim}\,} M=\alpha_j+n'\delta$, where $1\leq i,j\leq N$. Then $n'\leq n$. \end{lem} \begin{proof} Suppose $n'> n$. Since $\textbf{dim}M=\alpha_j+n'\delta < {{\rm\bf dim}\,} P=\alpha_i+n\delta$, it follows that $(n'-n)\delta \leq \alpha_i - \alpha_j$. But $(n'-n)\delta$ is positive and sincere. This is impossible. Therefore, $n'\leq n$. \end{proof} Now we state our main theorem. \begin{thm}\label{main theorem} {\rm (1)} If ${\text{\rm mod}}^{\mu(\delta)}A$ is equivalent to ${\text{\rm mod}} kQ'$, where $Q'$ is a Dynkin or tame quiver, then ${\scr X}_\theta$ is a finite set. {\rm (2)} If ${\text{\rm mod}}^{\mu(\delta)}A$ is equivalent to $\mathcal{R'}$, where $\mathcal{R'}$ is the regular part of ${\text{\rm mod}} kQ'$ for a (possibly disconnected) tame quiver $Q'$, then ${\scr X}_\theta$ is an infinite set. \end{thm} \begin{proof} (1) Let ${\mathcal X}_1$ (resp. ${\mathcal X}_2$) be the set of isomorphism classes of indecomposable semistable $A$-modules $M$ (resp.\,with $\mu(M)\neq\mu(\delta)$). According to Theorem {\rm \ref{subcat-equiv}(1)}, we need to consider the following two cases. {\bf Case 1}. $Q'$ is a Dynkin quiver. We first show that ${\mathcal X}_1\cap\mathcal{P}$ is finite. Indeed, by Corollary \ref{classification of slope}, each module in a homogeneous tube is not semistable. Hence, there exists $P\in\mathcal{P}$ such that $\mu(P)>\mu(\delta)$. By the discussion right above Lemma \ref{existence of homomorphism}, we get that $\mu(\delta)<\mu(P_i)$ for some $1\leq i\leq N$. Choose $1\leq s\leq N$ satisfying $\mu(P_j)\leq \mu(P_s)$ for any $1\leq j\leq N$. By Lemma \ref{dimension vector comparison}, $P_s$ is semistable. Hence, by Lemma \ref{maximal slope}, there are only finitely many semistable modules in $\mathcal{P}$. Next we show that ${\mathcal X}_1\cap\mathcal{R}$ is finite. By Corollary \ref{classification of slope}, $A$-modules with dimension vector $m\delta$ are not semistable. So we only need to consider the non-homogeneous tubes. Let ${\mathcal T}$ be a non-homogeneous tube of rank $r$. As before, for $1\leq i\leq r$ and $j\geq 1$, let $E_{i,j}$ be the indecomposable module in ${\mathcal T}$ with quasi-length $j$ and quasi-socle $E_i$. Then $${{\rm\bf dim}\,} E_{i,j}={{\rm\bf dim}\,} E_{i,j_0}+n\delta\;\text{ with $j=j_0+nr,\, 0\leq j_0< r$, and $n\geq 1$.}$$ If $\mu(E_{i,j_0})=\mu(\delta)$, then there exists a submodule $N$ of $E_{i,\delta}$ satisfying $\mu(N)>\mu(E_{i,\delta})=\mu(E_{i,j})$ since $E_{i,\delta}$ is not semistable. Hence, $E_{i,j}$ is not semistable because $N$ is also a submodule of $E_{i,j}$. If $\mu(E_{i,j_0})\neq \mu(\delta)$, say $\mu(E_{i,j_0}) < \mu(\delta)$, then $\mu(E_{i,j_0}) < \mu(E_{i,j}) < \mu(\delta)=\mu(E_{i,\delta})$, which implies that $E_{i,j}$ is not semistable since $E_{i,\delta}$ is a submodule of $E_{i,j}$. Consequently, each module in ${\mathcal T}$ with quasi-length $\geq r$ is not semistable. Therefore, there are only finitely many semistable modules in $\mathcal{R}$. By Lemma \ref{equivalent definition} and an argument similar to the proof for the case of $\mathcal{P}$, we get that ${\mathcal X}_1\cap\mathcal{I}$ is finite. $\textbf{Case 2}$. $Q'$ is a tame quiver. We first show that ${\mathcal X}_2 \cap\mathcal{P}$ is finite. We will prove that for $m\gg 0$ and each projective module $P$, if $M=\tau^{-m}P\notin {\text{\rm mod}}^{\mu(\delta)}A$, then $M$ is not semistable. In fact, suppose $M$ is semistable. Let $L \in {\text{\rm mod}}^{\mu(\delta)}A\cap\mathcal{P}$ satisfy ${\mathcal H}om(L,M)\neq0$. Since $kQ'\cong {\text{\rm mod}}^{\mu(\delta)}A$ has an infinite preprojective component, ${\text{\rm mod}}^{\mu(\delta)}A \cap \mathcal{P}$ is infinite. By Proposition \ref{existence of homomorphism}, there exists $N_1 \in {\text{\rm mod}}^{\mu(\delta)}A\cap \mathcal{P}$ such that ${\mathcal H}om(M,N_1)\neq0$. Thus, $$\mu(\delta)=\mu(L)\leq\mu(M)\leq\mu(N_1)=\mu(\delta),$$ which implies $\mu(M)=\mu(\delta)$ and $M \in {\text{\rm mod}}^{\mu(\delta)}A$, a contradiction. Therefore, ${\mathcal X}_2 \cap\mathcal{P}$ is finite. Next we show that ${\mathcal X}_2\cap \mathcal{R}$ is finite. Since $A$-modules with dimension vector $m\delta$ do not lie in ${\mathcal X}_2$, we only need to consider the non-homogenous tubes. Let ${\mathcal T}$ be a non-homogenous tube of rank $r$. As in Case 1, for $1\leq i\leq r$ and $j\geq 1$, $\textbf{dim}E_{i,j}=\textbf{dim}E_{i,j_0}+n\delta$ with $j=j_0+nr,\, 0\leq j_0< r$, and $n\geq 1$. Suppose that $E_{i,j} \notin {\text{\rm mod}}^{\mu(\delta)}A$. By Proposition \ref{r3}, ${\mathcal H}om(E_{i,m\delta},E_{i,j})\neq0$ and ${\mathcal H}om(E_{i,j},E_{i,(m+1)\delta})\neq0$. If $E_{i,m\delta}$ is not semistable, then $E_{i,j}$ is not semistable by an argument similar to Case 1. If $E_{i,m\delta}$ is semistable, then $E_{i,(m+1)\delta}$ is semistable and $$\mu(\delta)=\mu(E_{i,m\delta})\leq\mu(E_{i,j})\leq\mu(E_{i,(m+1)\delta})=\mu(\delta),$$ which implies that $\mu(E_{i,j})=\mu(\delta)$. Since $E_{i,j} \notin$ ${\text{\rm mod}}^{\mu(\delta)}A$, $E_{i,j}$ is not semistable. In conclusion, the modules in ${\mathcal T}$ with quasi-length $\geq r$ do not belong to ${\mathcal X}_2$. Hence, there are only finitely many indecomposable modules in ${\mathcal X}_2\cap\mathcal{R}$. Similarly, we get that ${\mathcal X}_2 \cap \mathcal{I}$ is finite. (2) We will construct a family of semistable modules $\{ P_{j_i}^{i} \}_{i\in \mathbb{N}} \in \mathcal{P}$ satisfying $$\mu( P_{j_i}^{i}) <\mu( P_{j_{i+1}}^{i+1}) < \mu(\delta).$$ By Corollary \ref{classification of slope}, there exists an indecomposable semistable module $M$ with ${{\rm\bf dim}\,} M=\delta$. Choose $1\leq j_0\leq N$ satisfying $\mu(P_j)\leq \mu(P_{j_0})$ for all $1\leq j\leq N$. Write $P_{j_0}^0$ for $P_{j_0}.$ By Lemma \ref{dimension vector comparison}, $P_{j_0}^{0}$ is semistable. Since ${\mathcal H}om(P_{j_0}^{0},M)\neq 0$, we have $\mu(P_{j_0}^{0})\leq \mu(M)= \mu(\delta)$. Since $P_{j_0}^{0} \notin {\text{\rm mod}}^{\mu(\delta)}A$, $\mu(P_{j_0}^{0})< \mu(M)= \mu(\delta)$. Let $P_{1}^{1},P_{2}^{1},\ldots,P_{N}^{1}$ be the preprojective modules in $\mathcal{P}$ with $\bf{dim}$$P_{i}^{1}=\alpha_i+\delta$ for $1\leq i\leq N$, where $\alpha_1, \ldots, \alpha_N$ are defined right above Lemma \ref{existence of homomorphism}. Let $1\leq j_1\leq N$ satisfy $\mu(P_{j}^{1})\leq \mu(P_{j_1}^{1})$ for all $1\leq j\leq N$. Since $$\mu(P_{j_1}^{1})\geq \mu(P_{j_0}^{1})> \mu(P_{j_0}^{0})\geq \mu(P_i), 1\leq i\leq N,$$ we have by Lemma \ref{dimension vector comparison} that $P_{j_1}^{1}$ is semistable. Repeating the above process, we finally get a family of semistable modules $\{P_{j_i}^{i} \}_{i\in \mathbb{N}} \in \mathcal{P}$ satisfying $\mu(P_{j_i}^{i}) <\mu( P_{j_{i+1}}^{i+1} )< \mu(\delta)$. Hence, $\{\mu(P_{j_i}^{i}) \}_{i\in \mathbb{N}}\subseteq {\scr X}_\theta$ and ${\scr X}_\theta$ is infinite. \end{proof} \begin{example} Let $Q$ be the tame quiver of type $\widetilde A_3$ with $A=kQ$: $$\xymatrix@=0.5cm{ &&2\ar[dr]&\\Q: &1\ar[ur]\ar[dr]&&4\\ &&3\ar[ur]& } $$ It is known that $\delta=(1,1,1,1)$. (1) Take $\theta=(1, 1, 2, 0)$. An easy calculation shows that $${\scr X}_\theta=\{0,1/2,2/3,1,2\}.$$ Moreover, $\mu(\delta)=1$ and ${\text{\rm mod}}^{1}A$ is equivalent to ${\text{\rm mod}} k\Gamma$, where $\Gamma$ is a tame quiver of type $\widetilde A_2$. (2) Take $\theta=(1, 2, 3, 2)$. An easy calculation shows that $${\scr X}_\theta=\{1,2,5/2,3\}.$$ Moreover, $\mu(\delta)=2$ and ${\text{\rm mod}}^{2}A$ is equivalent to ${\text{\rm mod}} k \Gamma$, where $\Gamma$ is a Dynkin quiver of type $A_2$. (3) Take $\theta=(3, 2, 2, 1)$. Then $$\{(8n+5)/(4n+3)\mid n\geq 0\} \subseteq {\scr X}_\theta.$$ Hence, ${\scr X}_\theta$ is infinite. Moreover, $\mu(\delta)=2$ and ${\text{\rm mod}}^{2}A$ consists of all the regular $A$-modules. \end{example} \end{document}
\begin{document} \title{Mosco convergence of nonlocal to local quadratic forms} \author{Guy Fabrice Foghem Gounoue} \address{Universit\"at Bielefeld, Fakult\"at f\"ur Mathematik, Postfach 100131, 33501 Bielefeld, Germany} \email{[email protected]} \author{Moritz Kassmann} \address{Universit\"at Bielefeld, Fakult\"at f\"ur Mathematik, Postfach 100131, 33501 Bielefeld, Germany} \email{[email protected]} \author{Paul Voigt} \address{Universit\"at Bielefeld, Fakult\"at f\"ur Mathematik, Postfach 100131, 33501 Bielefeld, Germany} \email{[email protected]} \thanks{Financial support by the DFG via IRTG 2235: ``Searching for the regular in the irregular: Analysis of singular and random systems'' is gratefully acknowledged.} \begin{abstract} We study sequences of nonlocal quadratic forms and function spaces that are related to Markov jump processes in bounded domains with a Lipschitz boundary. Our aim is to show the convergence of these forms to local quadratic forms of gradient type. Under suitable conditions we establish the convergence in the sense of Mosco. Our framework allows bounded and unbounded nonlocal operators to be studied at the same time. Moreover, we prove that smooth functions with compact support are dense in the nonlocal function spaces under consideration. \end{abstract} \keywords{Dirichlet forms, Mosco-convergence, Sobolev spaces, integro-differential operators} \subjclass[2010]{28A80, 35J20, 35J92, 46B10, 46E35, 47A07, 49J40, 49J45} \maketitle \mathrm{d}ate{December 29, 2018} \section{Introduction} In the last two decades the study of nonlocal operators and integro-differential operators has attracted much attention. Here, we have in mind linear or nonlinear operators satisfying a maximum principle as the fractional Laplace operator does. In this work we study the convergence of sequences of such nonlocal operators to local differential operators. Let $(\alpha _n)$ be a sequence of numbers $\alpha_n \in (0,2)$ with $\lim \alpha_n = 2$. Given a function $u \in C^\infty_c(\R^d)$, the convergence \begin{align}\label{eq:frac-laplace_to_laplace} (-\Delta)^{\alpha_n/2} u \longrightarrow -\Delta u \end{align} clearly holds true. There are many possible ways resp. topologies in which the operators $(-\Delta)^{\alpha_n/2}$ converge to the classical Laplace operator. In this work we do not study the operators directly. We focus on corresponding quadratic forms because they appear naturally when formulating boundary or complement value problems. Note that for functions $u,v \in C^\infty_c(\R^d)$ the equality \begin{align}\label{eq:part-int_frac-laplace} \int\limits_{\R^d} (-\Delta)^{\alpha/2} u (x) v (x) \mathrm{d} x = \frac{C_{d, \alpha}}{2} \iint\limits_{\R^d \R^d} \frac{(u(x)-u(y))^2}{|x-y|^{d+\alpha}} \mathrm{d} x \, \mathrm{d} y \end{align} holds true. Here, $ C_{d, \alpha}$ is a constant depending on the dimension $d$ and the value $\alpha \in (0,2)$, for which the relation $\widehat{(-\Delta)^{\alpha/2} u(\xi)} = |\xi|^\alpha \widehat{u}(\xi)$ holds true in $C^\infty_c(\R^d)$. Let us mention that asymptotically $C_{d, \alpha}\asymp \alpha (2-\alpha)$, which is important for our analysis. Interested readers may consult \cite{Hitchhiker} for more details about the fractional Laplacian $(-\Delta)^{\alpha/2}$ and the constant $C_{d,\alpha}$. If $\Omega \subset \R^d$ is open and $u \in C^\infty_c(\Omega)$, then one can easily show \begin{align}\label{eq:frac-forms_to_gradient-form} \frac{C_{d, \alpha}}{2} \iint\limits_{\Omega \Omega} \frac{(u(x)-u(y))^2}{|x-y|^{d+\alpha}} \mathrm{d} x \, \mathrm{d} y \longrightarrow \int\limits_{\Omega} |\nabla u|^2 \quad \text{ as } \alpha \to 2- \,. \end{align} In light of equalities \eqref{eq:frac-laplace_to_laplace} and \eqref{eq:part-int_frac-laplace} this is a natural result. A more interesting version of this result is proved in \cite{BBM01}. Therein, it is shown that \eqref{eq:frac-forms_to_gradient-form} holds true if $\Omega \subset \R^d$ is a bounded open set with a Lipschitz boundary and $u \in H^1(\Omega)$. The regularity assumption on $\Omega$ and $u$ ensures that suitable extensions of $u$ to the whole space exist. Analogously to the above, one can easily prove for $\Omega \subset \R^d$ open and $u \in C^\infty_c(\R^d)$ the following result: \begin{align} \frac{C_{d, \alpha}}{2} \iint\limits_{\Omega \R^d} \frac{(u(x)-u(y))^2}{|x-y|^{d+\alpha}} \mathrm{d} x \, \mathrm{d} y &\longrightarrow \int\limits_{\Omega} |\nabla u|^2 \quad \text{ as } \alpha \to 2- \,, \label{eq:new-frac-forms_to_gradient-form} \\ \frac{(2+d)}{\varepsilon^{d+2}\omega_{d-1}} \iint\limits_{\Omega \R^d} \big(u(x)-u(y) \big)^2 \mathbbm{1}_{B_\varepsilon}(x-y) \mathrm{d} x \, \mathrm{d} y &\longrightarrow \int\limits_{\Omega} |\nabla u|^2 \quad \text{ as } \varepsilon \to 0+ \,. \label{eq:bounded-frac-forms_to_gradient-form} \end{align} The expression on the left-hand side of \eqref{eq:new-frac-forms_to_gradient-form} naturally appears when studying nonlocal Dirichlet or Neumann problems with prescribed data on the complement of $\Omega$, see \cite{FKV15 ,DRV17}. It is important for the study of Dirichlet-to-Neumann maps of certain nonlocal problems involving the the fractional Laplacian, see \cite{Calderon-Tuhin}. The expression also appears when studying extension theorems for nonlocal operators, see \cite{DyKa18, BGPR17}. Assertions \eqref{eq:frac-forms_to_gradient-form}, \eqref{eq:new-frac-forms_to_gradient-form} and \eqref{eq:bounded-frac-forms_to_gradient-form} describe the convergence of a sequence of numbers since the function $u$ is fixed. The main aim of this paper is to prove a result in the spirit of \eqref{eq:new-frac-forms_to_gradient-form} but not for a given function. We study the convergence of forms in the sense of Mosco, see \autoref{def:mosco}, which is a well-known generalization of the famous concept of $\Gamma$-convergence. The result then applies to variational solutions to boundary data or complement data problems. Note that our main result \autoref{thm:Mosco-convergence} covers sequences of forms with bounded and unbounded kernels at the same time. An important role in our study is played by function spaces. We assume that $\Omega$ is a bounded open subset of $\R^d$. For several results we assume that $\Omega$ has a Lipschitz boundary. Let us introduce generalized Sobolev-Slobodeckij-like spaces with respect to an unimodal L\'{e}vy measure $\nu(h) \mathrm{d} h$. Assume $\nu:\R^d\setminus \{0\} \to [0, \infty)$ is a radial function, which (a) satisfies $\nu \in L^1(\R^d, (1 \wedge |h|^2) \mathrm{d} h)$ and (b) is almost decreasing, i.e., there is $c \geq 0$ such that $|y| \geq |x|$ implies $\nu(y) \leq c \nu(x)$. The function $\nu$ then is the density of an unimodal L\'evy measure. Possible examples are given by $\nu(h) = \mathbbm{1}_{B_1}(h)$ and for $\alpha \in (0,2)$ by $\nu(h) = C_{d,\alpha} |h|^{-d-\alpha}$ for $h \in \R^d, h \ne 0$. With the help of $\nu$ we can now define several function spaces. Set \begin{align*} H_{\nu} (\Omega)= \Big\{u \in L^2(\Omega)| \iint\limits_{\Omega\Omega} \big(u(x)-u(y) \big)^2 \, \nu (x-y)\mathrm{d}x\,\mathrm{d}y<\infty \Big \}\,. \end{align*} We endow this space with the norm \begin{align*} \|u\|^2_{H_{\nu} (\Omega)}= \|u\|^2_{L^{2} (\Omega)}+ \iint\limits_{\Omega\Omega} \big(u(x)-u(y) \big)^2 \, \nu (x-y) \mathrm{d}x\,\mathrm{d}y. \end{align*} Note that for bounded functions $\nu$, e.g., in the case $\nu(h) = \mathbbm{1}_{B_1}(h)$, the space $H_{\nu} (\Omega)$ equals $L^2(\Omega)$. Following \cite{FKV15} we define $ V_{\nu} (\Omega|\mathbb{R}^d)$ as follows: \begin{align*} V_{\nu} (\Omega|\mathbb{R}^d) = \Big\lbrace u: \R^d \to \R \text{ meas. } | \, [u]^2_{V_{\nu} (\Omega|\mathbb{R}^d)} = \!\!\iint\limits_{(\Omega^c\times \Omega^c)^c} \!\!\big(u(x)-u(y) \big)^2 \, \nu (x-y) \mathrm{d}x \, \mathrm{d}y <\infty \Big\rbrace \,. \end{align*} We endow this space with two norms as follows: \begin{align*} \|u\|^2_{V_{\nu} (\Omega|\mathbb{R}^d)} &:= \|u\|^2_{L^{2} (\R^d)}+ \iint\limits_{(\Omega^c\times \Omega^c)^c} \big(u(x)-u(y) \big)^2 \, \nu (x-y) \mathrm{d}x \, \mathrm{d}y \,, \\ \vertiii{u}^2_{V_{\nu} (\Omega|\mathbb{R}^d)} &:= \|u\|^2_{L^{2} (\Omega)}+ \iint\limits_{(\Omega^c\times \Omega^c)^c} \big(u(x)-u(y) \big)^2 \, \nu (x-y) \mathrm{d}x \, \mathrm{d}y \,. \end{align*} Note that for $\alpha \in (0,2)$, $\nu(h) = C_{d,\alpha} |h|^{-d-\alpha}$ for $h \in \R^d, h \ne 0$, the space $H_{\nu} (\Omega)$ equals the classical Sobolev-Slobodeckij space $H^{\alpha/2}(\Omega)$. For the same choice of $\nu$ we define $V^{\alpha/2} (\Omega|\mathbb{R}^d)$ as the space $V_{\nu} (\Omega|\mathbb{R}^d)$. Our first main result concerns the density of smooth functions in $V_{\nu}(\Omega|\R^d)$. Its rather technical proof is provided in \autoref{sec:spaces}. \begin{theorem}\label{thm:density} Assume $\Omega\subset \mathbb{R}^d$ is open, bounded and $\partial \Omega$ is Lipschitz continuous. Let $\nu$ be as above. Then $C_c^\infty(\mathbb{R}^d)$ is dense in $V_{\nu}(\Omega|\mathbb{R}^d)$ with respect to the two norms mentioned above, i.e. for $u \in V_{\nu}(\Omega|\mathbb{R}^d)$ there exists a sequence $(u_n) \subset C_c^\infty(\mathbb{R}^d)$ with \begin{align*} \|u_n-u\|_{V_{\nu}(\Omega|\mathbb{R}^d)} \longrightarrow 0 \text{ as } n\to\infty \,. \end{align*} Obviously, the convergence $\vertiii{u_n-u}_{V_{\nu}(\Omega|\mathbb{R}^d)} \to 0$ follows. \end{theorem} \noindent Next, let us explain for which sequences of nonlocal quadratic forms we can prove convergence to a classical local gradient form. \begin{definition}\label{def:nu-alpha} Let $(\rho_\varepsilon)_{0<\varepsilon<2}$ be a family of radial functions approximating the Dirac measure at the origin. We assume that every $\varepsilon,\mathrm{d}elta > 0$ \begin{align*} \begin{split} \rho_\varepsilon\geq 0, \quad \int_{\mathbb{R}^d}\rho_\varepsilon (x)\mathrm{d} x=1, \quad \lim_{\varepsilon\to 0^+}\int_{|x|>\mathrm{d}elta}\rho_\varepsilon (x)\mathrm{d} x=0\,. \end{split} \end{align*} Moreover, we assume that $h \mapsto |h|^{-2}\rho_\varepsilon(h)$ is almost decreasing, i.e., for some $c \geq 1$ and all $x,y$ with $|x| \leq |y|$ we have $|y|^{-2} \rho_\varepsilon(y) \leq c \, |x|^{-2} \rho_\varepsilon(x)$. Given a sequence $(\rho_\varepsilon)_{0<\varepsilon<2}$ with the aforementioned properties, we define a sequence $(\nu^\alpha)_{0<\alpha<2}$ of functions $\nu^\alpha:\R^d \setminus\{0\} \to \R$ by $\nu^\alpha (h) = |h|^{-2} \rho_{2-\alpha}(h)$. This sequence is used to set up function spaces below. \end{definition} \begin{example}\label{ex:most-important} For $\rho_\varepsilon (h) = \frac{\varepsilon}{\omega_{d-1}} |h|^{-d+\varepsilon} \mathbbm{1}_{B_1}(h)$ we obtain $\nu^\alpha (h) = \frac{2-\alpha}{\omega_{d-1}} |h|^{-d-\alpha} \mathbbm{1}_{B_1}(h)$ and $H_{\nu^\alpha}(\Omega) = H^{\alpha/2}(\Omega)$. Note that there is no sequence $(\rho_\varepsilon)$ satisfying the conditions above, for which $\nu^\alpha (h) = C_{d,\alpha} |h|^{-d-\alpha}$ for all $h$. One would need to relax the integrability condition on $\rho_\varepsilon$. Consequently, the vector spaces $V_{\nu^\alpha}(\Omega|\R^d)$ and $V^{\alpha/2} (\Omega|\mathbb{R}^d)$ do not coincide. However, the normed space $(V_{\nu^\alpha}(\Omega|\R^d),\|\cdot\|_{V_{\nu^\alpha} (\Omega|\mathbb{R}^d)})$ is equivalent to the normed space $(V^{\alpha/2} (\Omega|\mathbb{R}^d),\|\cdot\|_{V^{\alpha/2} (\Omega|\mathbb{R}^d)})$, where \[\|u\|^2_{V^{\alpha/2} (\Omega|\mathbb{R}^d)} := \|u\|^2_{L^{2} (\R^d)}+ \iint\limits_{(\Omega^c\times \Omega^c)^c} \frac{\big(u(x)-u(y) \big)^2}{\bet{x-y}^{d+\alpha}} \mathrm{d}x \, \mathrm{d}y \,.\] \end{example} \begin{example}\label{ex:bounded-nu} As the following example shows, $(\nu^\alpha)$ can be a sequence of bounded functions. For $\varepsilon \in (0,2)$ define $\rho_\varepsilon$ by \begin{align}\label{eq:def-rho-bd-nu} \rho_\varepsilon(h) = \frac{d+2}{ \omega_{d-1}\varepsilon^{d+2}} |h|^{2}\mathbbm{1}_{B_{\varepsilon}}(h) \qquad (h \in \R^d)\,. \end{align} Define $\nu^{\alpha}$ for $\alpha \in (0,2)$ as in \autoref{def:nu-alpha}. Then for every $\alpha \in (0,2)$ $H_{\nu^\alpha}(\Omega)$ is equivalent to $L^{2}(\Omega)$ and $(V_{\nu^\alpha}(\Omega|\R^d),\|\cdot\|_{V_{\nu^\alpha} (\Omega|\mathbb{R}^d)})$ is equivalent to $L^{2}(\R^d)$. Note that these equivalences are not uniform in $\alpha$. \end{example} \noindent Note that each function $\nu^\alpha$ determines a symmetric unimodal L\'{e}vy measure, i.e., it is a radially almost decreasing function and $\min(1, |h|^2) \in L^1(\R^d, \nu^\alpha(h) \mathrm{d} h)$. Next, let us introduce the nonlocal bilinear forms under consideration. We recall that $\Omega \subset \mathbb{R}^d $ is an open bounded set. Given $\alpha \in (0,2)$, $J^\alpha: \mathbb{R}^d\times \mathbb{R}^d \setminus \operatorname{diag} \to [0, \infty]$ and sufficiently smooth functions $u,v: \mathbb{R}^d\to \mathbb{R}$, we define \begin{align} \mathcal{E}^{\alpha}_{\Omega}(u,v) &= \iint\limits_{\Omega \Omega} \big(u(y)-u(x)\big) \big(v(y)-v(x)\big) J^\alpha(x,y)\mathrm{d} x \, \mathrm{d} y\,, \label{eq-inner-form} \\ \mathcal{E}^{\alpha}(u,v) &= \iint\limits_{(\Omega^c\times \Omega^c)^c} \big(u(y)-u(x)\big) \big(v(y)-v(x) J^\alpha(x,y) \mathrm{d} x \, \mathrm{d} y, \label{eq-ext-form} \end{align} \noindent In the sequel we will not introduce a separate notation for the quadratic forms $u \mapsto \mathcal{E}^{\alpha}_{\Omega}(u,u)$ and $u \mapsto \mathcal{E}^{\alpha}(u,u)$. Note that $(\Omega^c\times \Omega^c)^c$ equals $(\Omega\times \Omega) \cup (\Omega\times \Omega^c) \cup (\Omega^c\times \Omega)$. We assume that $(J^\alpha)_{0<\alpha<2}$ is a sequence of positive symmetric kernels $J^\alpha: \mathbb{R}^d\times \mathbb{R}^d \setminus \operatorname{diag} \to [0, \infty]$ satisfying the following conditions: \begin{itemize} \item[(E)] There exists a constant $\Lambda\geq 1$ such that for every $\alpha\in (0,2)$ and all $x,y \in \mathbb{R}^d$, $x \ne y$, with $|x-y|\leq 1$ \begin{align}\label{eq:elliptic-condition}\tag{$E$} \Lambda^{-1} \nu^\alpha (x-y) \leq J^\alpha(x,y) &\leq \Lambda \nu^\alpha (x-y) \end{align} \item[(L)] For every $\mathrm{d}elta >0$ \begin{align} \label{eq:integrability-condition}\tag{$L$} \lim_{\alpha \to 2^-}\sup_{x\in \mathbb{R}^d} \int_{|h| > \mathrm{d}elta} J^\alpha(x,x+h)dh=0. \end{align} \end{itemize} \noindent Finally, let us define the limit object, which is a local quadratic form of gradient type. Given $x \in \R^d$ and $\mathrm{d}elta > 0$, we define the symmetric matrix $A(x) = (a_{ij}(x))_{1\leq i,j\leq d}$ by \begin{align}\label{eq:coef-matrix} a_{ij}(x) = \lim_{\alpha\to 2^{-}} \int_{B_\mathrm{d}elta} h_ih_j J^\alpha(x,x+h)dh \end{align} and for $u,v \in H^{1}(\Omega)$ the corresponding bilinear form by \begin{align*} \mathcal{E}^A(u,v):= \int\limits_{\Omega} \big( A(x)\nabla u(x), \nabla v(x) \big) \mathrm{d} x \,. \end{align*} \noindent Conditions \eqref{eq:elliptic-condition} and \eqref{eq:integrability-condition} are sufficient in order to show convergence results similar to \eqref{eq:new-frac-forms_to_gradient-form} and \eqref{eq:bounded-frac-forms_to_gradient-form}, see \autoref{thm:quadratic-convergence-BBM}. As we will see in \autoref{prop:elliptic-matrix}, conditions \eqref{eq:elliptic-condition} and \eqref{eq:integrability-condition} ensure that the symmetric matrices $A(\cdot)$ defined in \eqref{eq:coef-matrix} are uniformly positive definite and bounded. For our main result, \autoref{thm:Mosco-convergence}, we impose translation invariance of the kernels: \begin{itemize} \item[(I)] For each $\alpha \in (0,2)$ the kernel $J^\alpha$ is translation invariant, i.e., for every $h \in \mathbb{R}^d$ \begin{align}\label{eq:translation-invariance}\tag{I} J^\alpha(x+h, y+h) = J^\alpha(x, y) \end{align} \end{itemize} \begin{remark} (i) Under conditions \eqref{eq:elliptic-condition} and \eqref{eq:integrability-condition} the expression $\int_{B_\mathrm{d}elta} h_ih_j J^{\alpha_n}(x,x+h)dx$ converges for a suitable subsequence of $(\alpha_n)$. The existence of the limit in \eqref{eq:coef-matrix} poses an implicit condition on the family $(J^\alpha)$. (ii) \eqref{eq:elliptic-condition} and \eqref{eq:integrability-condition} ensure that the quantity $a_{ij}(x)$ does not depend on the choice of $\mathrm{d}elta$ and is bounded as a function in $x$. (iii) Under condition \eqref{eq:translation-invariance} the functions $a_{ij}(x)$ are constant in $x$. \end{remark} \noindent Let us formulate our second main result. \begin{theorem}\label{thm:Mosco-convergence} Let $\Omega\subset \mathbb{R}^d$ be an open bounded set with a Lipschitz continuous boundary. Assume \eqref{eq:elliptic-condition}, \eqref{eq:integrability-condition} and \eqref{eq:translation-invariance}. Then the two families of quadratic forms $(\mathcal{E}^\alpha_{\Omega}(\cdot, \cdot), H_{\nu^\alpha}( \Omega) )_{\alpha}$ and $( \mathcal{E}^\alpha(\cdot, \cdot),V_{\nu^\alpha}( \Omega|\mathbb{R}^d) )_{\alpha}$ both converge to $( \mathcal{E}^A(\cdot,\cdot), H^{1}( \Omega) )$ in the Mosco sense in $L^2(\Omega)$ as $\alpha\to 2^-$. \end{theorem} A stronger version of \autoref{thm:Mosco-convergence} not assuming condition \eqref{eq:translation-invariance} will be proved elsewhere, see also \cite{Voi17}. We refer the reader to \autoref{def:mosco} for details about the Mosco convergence of bilinear forms. Note that \autoref{thm:quadratic-convergence-BBM}, which is part of the proof of \autoref{thm:Mosco-convergence}, implies the convergence results \eqref{eq:frac-forms_to_gradient-form}, \eqref{eq:new-frac-forms_to_gradient-form} and \eqref{eq:bounded-frac-forms_to_gradient-form} for fixed functions $u$. Let us discuss the assumption on the family $(J^\alpha)_\alpha$ and provide some examples. Condition \eqref{eq:elliptic-condition} is a sufficient condition for what can be seen as nonlocal version of the classical ellipticity condition for second order operators in divergence form. Condition \eqref{eq:integrability-condition} ensures that long-range interactions encoded by $J^\alpha(x,y)$ vanish as $\alpha\to 2^-$. As a result, for some $\alpha_0\in (0,2)$, the quantity \begin{align} \label{eq:consequence-integrability} \kappa_0 = \sup_{\alpha \in(\alpha_0,2)}\sup_{x\in \mathbb{R}^d} \int_{|h|>1} J^\alpha(x,x+h)dh \end{align} is finite. One can easily check that conditions \eqref{eq:elliptic-condition} and \eqref{eq:integrability-condition} imply the following uniform L\'evy integrability type property: \begin{align*} \sup_{\alpha \in(\alpha_0,2)}\sup_{x\in \mathbb{R}^d} \int_{\mathbb{R}^d} (1\land |h|^2)J^\alpha(x,x+h) \mathrm{d} h<\infty \,. \end{align*} \begin{example}\label{ex:J-guys-singular} For $\varepsilon > 0$ set $\rho_{\varepsilon}(h) = \frac{\varepsilon}{\omega_{d-1}} |h|^{-d+\varepsilon}\mathbbm{1}_{B_1}(h)$. Define $\nu^{\alpha}$ for $\alpha \in (0,2)$ as in \autoref{def:nu-alpha}. Then conditions \eqref{eq:elliptic-condition}, \eqref{eq:integrability-condition} and \eqref{eq:translation-invariance} are fulfilled for each of the following cases and $\beta>0$: \begin{align*} J_1^{\alpha}(x,y) &= C_{d, \alpha} |x-y|^{-d-\alpha} \,, \\ J_2^{\alpha}(x,y) &= C_{d, \alpha} |x-y|^{-d-\alpha}\mathbbm{1}_{B_1}(x-y)+ (2-\alpha)|x-y|^{-d-\beta} \mathbbm{1}_{\mathbb{R}^d\setminus B_1}(x-y)\,, \\ J_3^{\alpha}(x,y) &= C_{d, \alpha} |x-y|^{-d-\alpha}\mathbbm{1}_{B_1}(x-y)+ (2-\alpha)J(x,y) \mathbbm{1}_{\mathbb{R}^d\setminus B_1}(x-y) \,, \end{align*} where $J$ is a symmetric function satisfying $\sup_{x\in \mathbb{R}^d} \int_{\mathbb{R}^d\setminus B_\mathrm{d}elta} J(x,x+h)dh<\infty$ for every $\mathrm{d}elta >0$. Regarding \autoref{thm:Mosco-convergence}, in the cases $J_1^\alpha$, $J_2^\alpha$ and $J_3^\alpha$, one obtains $A(x) = (\mathrm{d}elta_{ij})_{1\leq i,j\leq d}$, i.e., the matrix $A$ equals the identity matrix. \end{example} \begin{example} In \autoref{ex:J-guys-singular} we provide examples of singular kernels $J$. As we explain above, \autoref{thm:Mosco-convergence} applies to bounded kernels, too. Here is one example. For $\varepsilon \in (0,2)$ define $\rho_\varepsilon$ as in \eqref{eq:def-rho-bd-nu}. Define $\nu^{\alpha}$ for $\alpha \in (0,2)$ as in \autoref{def:nu-alpha}. Then conditions \eqref{eq:elliptic-condition}, \eqref{eq:integrability-condition} and \eqref{eq:translation-invariance} are fulfilled for $J_4^{\alpha}(x,y) = \frac{1}{(2-\alpha)^{d+2}}\mathbbm{1}_{B_{2-\alpha}}(x-y)$. As in the cases above, in the case $J_4$ one obtains $A(x) = (\mathrm{d}elta_{ij})_{1\leq i,j\leq d}$. We refer the reader to \autoref{sec:examples} for more examples. \end{example} Let us relate our result to other works. We study \autoref{thm:density} as a tool needed for the proof of \autoref{thm:Mosco-convergence}. However, the density result itself is of importance for the study of nonlocal problems in bounded domains. We refer the reader to \cite{DyKa18, BGPR17, KaWa18} for recent results involving function spaces of the type of $V_{\nu}(\Omega|\R^d)$. \autoref{thm:Mosco-convergence} is closely related to the weak convergence of the finite-dimensional distributions of stochastic processes. Since both quadratic forms, $(\mathcal{E}^\alpha_{\Omega}(\cdot, \cdot), H_{\nu^\alpha}( \Omega) )_{\alpha}$ and \linebreak $( \mathcal{E}^\alpha(\cdot, \cdot),V_{\nu^\alpha}( \Omega|\mathbb{R}^d) )_{\alpha}$ turn out to be regular Dirichlet forms, cf. \autoref{cor:regular-DF}, they correspond to L\'evy processes. In dependence of the choice of $\nu^\alpha$, the L\'evy measure has finite mass or not. \autoref{thm:Mosco-convergence} implies that the distributions of these processes converge weakly to the distribution of a diffusion process defined by the Dirichlet form $(\mathcal{E}^A(\cdot,\cdot), H^{1}( \Omega) )$. In \cite{Mos94} (see also \cite{KS03}) it is shown that Mosco convergence of a sequence of symmetric closed forms is equivalent to the convergence of the sequence of associated semigroups (or of associated resolvents) and implies the weak convergence the finite-dimensional distributions of the corresponding processes if any. Note that several authors have studied the weak convergence of Markov processes with the help of Dirichlet forms, e.g., in \cite{LyZh96, KuUe97, MRZ98, Sun98, Kol05, Kol06, BBCK09, CKK13}. Most of related results are concerned with situations where the type of the process does not change, i.e., diffusions converge to a diffusion or jump processes converge to a jump process. In the present work, we consider examples where a sequence of jump processes in bounded domains converges to a diffusion. This will appear implicitly as consequence of the Mosco convergence in \autoref{thm:Mosco-convergence}. The Dirichlet form $(\mathcal{E}^\alpha_{\Omega}(\cdot, \cdot), H_{\nu^\alpha}( \Omega) )_{\alpha}$ has appeared in the analysis literature for decades. When $\nu^\alpha$ is singular, then it arises naturally through the norms of Sobolev-Slobodeckij spaces introduced by Aronszajn, Gagliardo and Slobodeckij. The regular Dirichlet form generates a censored jump process, which is introduced and thoroughly studied in \cite{BBC03}. Jumps from $\Omega$ into $\R^d \setminus \Omega$ are erased from the underlying free jump process. The stochastic process is restarted each time such a jump occurs. The situation is very different for the Dirichlet form $( \mathcal{E}^\alpha(\cdot, \cdot),V_{\nu^\alpha}( \Omega|\mathbb{R}^d) )_{\alpha}$. It appears in \cite{DRV17} in connection with the study of nonlocal problems with Neumann-type conditions, see also \cite{LMPPS18}. The function space $V_{\nu}(\Omega|\R^d)$ is central for the Hilbert space approach to complement value problems with Dirichlet data in \cite{FKV15}. The article \cite{DRV17} offers some probabilistic interpretation but a mathematical study of the corresponding stochastic process seems not to be available yet. The authors have been informed that, in an ongoing project Z. Vondracek addresses the probabilistic interpretation of quadratic forms including examples like $( \mathcal{E}^\alpha(\cdot, \cdot),V_{\nu^\alpha}( \Omega|\mathbb{R}^d) )_{\alpha}$. Of course, reflections of jump processes have been studied for a long time, e.g. in \cite{MeRo85}. In the case of bounded jump measures $\nu^\alpha$ the works on so-called nonlocal diffusion equations study similar problems, cf. \cite{CERW07, AMRT10}. Bounded kernels also appear in the study of peridyamics. Neumann boundary conditions have recently been studied in this context in \cite{AC17, TTD17}. Last, let us mention that integro-differential operators have been considered by several authors with nonlocal Neumann conditions in the framework of strong solutions or viscosity solutions, cf. \cite{GaMe02, BCGJ14}. The paper is organized as follows. In \autoref{sec:spaces} we study the function spaces $V_{\nu}(\Omega|\R^d)$ in detail. In particular, we prove that the subspace $C^\infty_c(\R^d)$ is dense in $V_{\nu}(\Omega|\mathbb{R}^d)$. \autoref{sec:main-proof} is devoted to the proof of \autoref{thm:Mosco-convergence}. \emph{Acknowledgement:} The authors thank Vanja Wagner (Zagreb) for helpful discussions on the proof of \autoref{thm:density}. \section{Density of smooth functions}\label{sec:spaces} The aim of this section is to prove \autoref{thm:density}. Let us recall the corresponding setup. $\Omega$ is a bounded open subset of $\R^d$ with a Lipschitz boundary. The function $\nu:\R^d\setminus \{0\} \to [0, \infty)$ is radial and satisfies $\nu \in L^1(\R^d, (1 \wedge |h|^2) \mathrm{d} h)$. Moreover, it is almost decreasing, i.e., there is $c \geq 0$ such that $|y| \geq |x|$ implies $\nu(y) \leq c \nu(x)$. The space $V_{\nu}(\Omega|\R^d)$ is defined as above. First, let us explain why, for certain choices of $\nu$, it is natural to consider the norm $\vertiii{\cdot}_{V_{\nu}(\Omega|\R^d)}$ on the space $V_{\nu}(\Omega|\R^d)$. \begin{proposition}\label{prop:natural-norm-on-V} Assume $\nu $ is given as above. \noindent (a) If $\Omega\subset B_{|\xi|/2}(0) $ for some $ \xi \in \mathbb{R}^d$ with $\nu(\xi)\neq 0$. Then $V_{\nu}(\Omega|\R^d) \subset L^2(\Omega)$.\\ (b) Assume $\nu$ is positive on sets of positive measure, i.e. $\nu$ has full support. Then there exists another almost decreasing radial measure $\widetilde{\nu}: \mathbb{R}^d \to [0,\infty) $ and a constant $C>0$ both depending only on $\nu, d$ and $\Omega$ such that \begin{enumerate}[(i)] \item $\widetilde{\nu}(\mathbb{R}^d)<\infty$\,, \item $0\leq \widetilde{\nu} \leq C(1\land \nu)$\,, \item $V_{\nu}(\Omega|\R^d) \subset L^2(\R^d,\widetilde{\nu}(h)\mathrm{d} h) \subset L^1(\R^d,\widetilde{\nu}(h)\mathrm{d} h)$\,, \item on $V_{\nu}(\Omega|\R^d)$, the norms $\vertiii{\cdot}_{V_{\nu}(\Omega|\R^d)}$ and $\vertiii{\cdot}^*_{V_{\nu}(\Omega|\R^d)}$ with \begin{align*} \vertiii{u}^{*2}_{V_{\nu}(\Omega|\R^d)}=\int_{\mathbb{R}^d} u^2(x) \widetilde{\nu}(x)\mathrm{d} x+ \iint\limits\limits_{(\Omega^c\times \Omega^c)^c} (u(x)-u(y))^2\nu(x-y)\mathrm{d} x\mathrm{d} y \end{align*} are equivalent. \end{enumerate} \end{proposition} \begin{remark} Regarding property (ii) let us mention that in some cases like $\nu(h) = |h|^{-d-\alpha}$ it is possible to obtain $\widetilde{\nu} \asymp 1\land \nu$. In the aforementioned case one could define $\widetilde{\nu}(h) = (1+|h|)^{-d-\alpha}$ for $h \in \R^d$. \end{remark} \begin{proof} First, if $\Omega\subset B_{|\xi|/2}(0)$, then for all $x,y\in \Omega$ we have $\nu(x-y)\geq c'$ with $c'= c\nu(\xi)>0 $. By Jensen's inequality, we have \begin{align*} \iint\limits_{(\Omega^c\times \Omega^c)^c} \big(u(x)-u(y)\big)^2 \nu(x-y) \mathrm{d} x \, \mathrm{d} y&\geq c' \iint\limits_{\Omega\Omega} (|u(x)|-|u(y)|)^2\mathrm{d} x \, \mathrm{d} y\\ &\geq c'|\Omega| \int\limits_{\Omega} \Big(|u(x)|-\hbox{$\fint_{\Omega}|u|$}\Big)^2\mathrm{d} x. \end{align*} This shows that the mean value $\fint_{\Omega}|u|$ is finite. We conclude $u \in L^2(\Omega)$ because of \[\int_{\Omega}u^2(x) \leq 2\int_{\Omega} \Big(|u(x)|-\hbox{$\fint_{\Omega}$}|u|\Big)^2+ 2|\Omega| \Big(\hbox{$\fint_{\Omega}|u|$}\Big)^2\,. \] The proof of part (b) is similar to the proof of \cite[Proposition 13]{DyKa18}. Assume $\nu $ has full support. Since $\Omega$ is bounded, there is $R\geq 1$ large enough such that $\Omega\subset B_R(0)$. Clearly, we have $|x-y|\leq R(1+|y|)$ for all $x\in \Omega$ and all $y\in \R^d$. The monotonicity condition on $\nu$ implies $\nu(R(1+|y|))\leq c\nu(x-y) $. Set $\widetilde{\nu}(h) = \nu(R(1+|h|))$ for $h \in \R^d$, where we abuse the notation and write $\nu(|y|)$ instead of $\nu(y)$ for $y\in \R^d$. Let us show that $\widetilde{\nu}$ satisfies the desired conditions. Note that $(ii)$ is a direct consequence of the fact that $|h|\leq R(1+|h|)$ and $R\leq R(1+|h|)$ for all $h \in \R^d. $ Passing through polar coordinates, we have \begin{align*} \widetilde{\nu}(\R^d)& = \int\limits_{\R^d} \nu(R(1+|h|))\mathrm{d} h =|\mathbb{S}^{d-1}| \int_{0}^{\infty} \nu(R(1+r)) r^{d-1} \mathrm{d} r\\ &= |\mathbb{S}^{d-1}|R^{-1} \int_{R}^{\infty} \nu(r) \Big(\frac{r}{R}-1\Big)^{d-1} \mathrm{d} r\leq |\mathbb{S}^{d-1}|R^{-d} \int_{R}^{\infty} \nu(r) r^{d-1} \mathrm{d} r\\ &= R^{-d} \int_{|h|\geq R} (1\land |h|^2) \nu(h) \mathrm{d} h \leq R^{-d} \int_{\R^d} (1\land |h|^2) \nu(h) \mathrm{d} h< \infty\, . \end{align*} This proves $(i)$ and hence $L^2(\R^d,\widetilde{\nu}(h)\mathrm{d} h) \subset L^1(\R^d,\widetilde{\nu}(h)\mathrm{d} h)$. Let $u\in V_{\nu}(\Omega|\R^d) \subset L^2(\Omega)$. Then \begin{align*} \int_{\Omega}u^2(x)\mathrm{d} x+\iint\limits_{\Omega\Omega^c} &(u(x)-u(y))^2\nu(x-y)\mathrm{d} y\mathrm{d} x \\ &= \widetilde{\nu}(\Omega^c)^{-1} \iint\limits_{\Omega\Omega^c} u^2(x)\widetilde{\nu}(y)\mathrm{d} y\mathrm{d} x+\iint\limits_{\Omega\Omega^c} (u(x)-u(y))^2\nu(x-y)\mathrm{d} y\mathrm{d} x\\ &\geq (1\land \widetilde{\nu}(\Omega^c)^{-1})(1\land c^{-1}) \iint\limits_{\Omega\Omega^c}\Big[ u^2(x)+ (u(x)-u(y))^2 \Big] \widetilde{\nu}(y)\mathrm{d} y\mathrm{d} x\\ &\geq (1\land \widetilde{\nu}(\Omega^c)^{-1})(1\land c^{-1})\frac{|\Omega|}{2} \int\limits_{\Omega^c}u^2(y) \widetilde{\nu}(y)\mathrm{d} y\, . \end{align*} Moreover, note that for an appropriate constant $C>0$ we have $C^{-1}\|u\|_{L^2(\Omega)} \leq\|u\|_{L^2(\Omega, \, \widetilde{\nu}(h)\mathrm{d} h)} \leq C \|u\|_{L^2(\Omega)} $ since $R\leq R(1+|h|)\leq R(1+R)$ for all $ h\in \Omega$. This together with the previous estimate shows $u\in L^2(\R^d, \widetilde{\nu})$. Therefore, the proof of $(iii)$ is complete. Obviously, we also have $\vertiii{u}_{V_{\nu}(\Omega|\R^d)}\leq C\vertiii{u}^*_{V_{\nu}(\Omega|\R^d)}$. The reverse inequality is an immediate consequence of the above estimates, thereby proving the equivalence of the two norms under consideration. Part $(iv)$ is proved. \end{proof} \begin{proposition} Let $\alpha_0\in (0,2)$ be as in \eqref{eq:consequence-integrability}. The quadratic forms $(\mathcal{E}^{\alpha}_{\Omega}(\cdot, \cdot), H_{\nu^\alpha} (\Omega)) $ and $\big(\mathcal{E}^{\alpha}(\cdot, \cdot), V_{\nu^\alpha}(\Omega|\mathbb{R}^d)\cap L^2(\mathbb{R}^d)\big)$ are well defined for every $\alpha\in (\alpha_0, 2)$. \end{proposition} \begin{proof} Let $\alpha\in (\alpha_0, 2)$ . Let $u \in H_{\nu_\alpha}(\Omega)$. By the assumption \eqref{eq:elliptic-condition} and relation \eqref{eq:consequence-integrability} we have \begin{align*} \mathcal{E}^\alpha_{\Omega}(u,u) &= \hspace*{-2ex}\iint\limits_{\Omega\Omega \cap \{|x-y|\leq 1\}} (u(x)-u(y))^2 J^\alpha(x,y)\mathrm{d} x\mathrm{d} y+ \hspace*{-2ex} \iint\limits_{\Omega\Omega \cap \{|x-y|>1\}} (u(x)-u(y))^2 J^\alpha(x,y)\mathrm{d} x\mathrm{d} y\\ &\leq \Lambda \hspace*{-2ex}\iint\limits_{\Omega\Omega \cap \{|x-y|\leq 1\}} (u(x)-u(y))^2 \nu^\alpha(x-y)\mathrm{d} x\mathrm{d} y+ 4 \int\limits_{\Omega} u^2(x)\mathrm{d} x\int\limits_{|x-y|>1} J^\alpha(x,y)\mathrm{d} y\\ &\leq \Lambda \iint\limits_{\Omega\Omega } (u(x)-u(y))^2 \nu^\alpha(x-y)\mathrm{d} x\mathrm{d} y+ 4 \kappa_0 \int\limits_{\Omega} u^2(x)d x\\ &\leq (\Lambda+4\kappa_0)\|u\|^2_{H_{\nu^\alpha}(\Omega)}<\infty\, . \end{align*} Now if $u \in V_{\nu_\alpha}(\Omega|\mathbb{R}^d) $ then, from the above we deduce $\mathcal{E}^\alpha_{\Omega}(u,u)<\infty$. By the same argument we obtain \begin{align*} &\iint\limits_{\Omega\Omega^c } (u(x)-u(y))^2 J^\alpha(x-y)\mathrm{d} x\mathrm{d} y\\&\leq \Lambda \hspace*{-2ex}\iint\limits_{\Omega\Omega^c \cap \{|x-y|\leq 1\}} (u(x)-u(y))^2 \nu^\alpha(x-y)\mathrm{d} x\mathrm{d} y+ 2\hspace*{-2ex} \iint\limits_{\Omega\Omega^c \cap \{|x-y|>1\}} (u^2(x)+u^2(y)J^\alpha(x,y)\mathrm{d} x\mathrm{d} y\\ &\leq \Lambda \iint\limits_{\Omega\Omega^c\cap \{|x-y|\leq 1\}} (u(x)-u(y))^2 \nu^\alpha(x-y)\mathrm{d} x\mathrm{d} y+ 2 \kappa_0 \int\limits_{\Omega} u^2(x)d x+ 2 \kappa_0 \int\limits_{\Omega^c} u^2(x)d x\\ &\leq \Lambda \iint\limits_{\Omega\Omega^c} (u(x)-u(y))^2 \nu^\alpha(x-y)\mathrm{d} x\mathrm{d} y+ 2 \kappa_0 \int\limits_{\mathbb{R}^d} u^2(x)dx<\infty\, . \end{align*} Finally, we obtain \begin{align*} \mathcal{E}^\alpha(u,u) &= \mathcal{E}^\alpha_{\Omega}(u,u) + 2\iint\limits_{\Omega\Omega^c } (u(x)-u(y))^2 J^\alpha(x-y)\mathrm{d} x\mathrm{d} y<\infty\, . \end{align*} \end{proof} \begin{definition}[cf. \cite{Adams}] In what follows, a domain $D \subset \mathbb{R}^d$ is called an extension domain if there exists a linear operator $E: H^1(D)\to H^1(\mathbb{R}^d)$ and a constant $C: = C(D, d)$ depending only on the domain $D$ and the dimension $d$ such that for all $u \in H^1(D)$ \begin{align*} Eu|_{D} = u \qquad\hbox{and} \qquad \|Eu\|_{H^1(\mathbb{R}^d)}\leq C \|u\|_{H^1(D)}. \end{align*} \end{definition} The next lemma shows that the nonlocal quadratic forms under consideration are continuous on $H^1(D)$. \begin{lemma}\label{lem-cont-qua-form} Assume $ D \subset \mathbb{R}^d$ be an extension domain. Assume $J^\alpha $ satisfies \eqref{eq:elliptic-condition} and \eqref{eq:integrability-condition} and let $\alpha_0\in (0,2)$be as in \eqref{eq:consequence-integrability}. Then, there exists a constant $C:= C(D, \Lambda, d, \alpha_0)$ such that for every $u \in H^1(D)$ and every $\alpha\in (\alpha_0,2)$ \begin{align*} \mathcal{E}^{\alpha}_D(u,u) \leq C\| u\|^2_{H^1(D)}. \end{align*} \end{lemma} \begin{proof} Firstly, from the symmetry of $J^\alpha(x,y)$ and \eqref{eq:consequence-integrability} we have the following estimates \begin{eqnarray*} \iint\limits_{D\times D \cap \{|x-y|\geq 1\}} (u(x)-u(y))^2 J^\alpha(x,y)\mathrm{d} x \, \mathrm{d} y& \leq & 2\int\limits_{D}u^2(x) \mathrm{d} x \int\limits_{ |x-y|\geq 1} J^\alpha(x,y) \, \mathrm{d} y\\ &\leq& 2\kappa_0\|u\|^2_{L^2(D)}. \end{eqnarray*} Now, let $\overline{u}\in H^{1}(\mathbb{R}^d)$ be an extension of $u$ then upon the estimate $\|\overline{u}(\cdot+ h)-\overline{u}\|_{L^2(\mathbb{R}^d)} \leq |h| \|\nabla\overline{u}\|_{L^2(\mathbb{R}^d)}$ (which can be established through density of smooth functions with compact support in $H^{1}(\mathbb{R}^d)$) we have \begin{align*} \iint\limits_{D\times D\cap \{|x-y|\leq 1\}} &\frac{(u(x)-u(y))^2}{|x-y|^{2}} \rho_{2-\alpha}(x-y) \mathrm{d} x \, \mathrm{d} y = \iint\limits_{D\times D\cap \{|x-y|\leq 1\}} \frac{(\overline{u}(x)-\overline{u}y))^2}{|x-y|^{2}} \rho_{2-\alpha}(x-y) \mathrm{d} x \, \mathrm{d} y \\ &\leq \int\limits_{ |h|\leq 1} \rho_{2-\alpha}(h) \frac{ \mathrm{d} h }{|h|^{2}} \int\limits_{\mathbb{R}^d} (\overline{u}(x+h)-\overline{u}(x))^2 \mathrm{d} x \\ &\leq \|\nabla\overline{u}\|_{L^2(\mathbb{R}^d)} \int\limits_{|h|\leq 1} \rho_{2-\alpha}(h) \mathrm{d} h \leq C \|u\|^2_{H^{1}(D)}. \end{align*} Precisely, we have \begin{align*} \iint\limits_{D\times D \cap \{|x-y|\leq 1\}} (u(x)-u(y))^2 |x-y|^{-2} \rho_{2-\alpha}(x-y)\mathrm{d} x \, \mathrm{d} y &\leq C\| u\|^2_{H^1(D)}. \end{align*} Combining the above estimates along with the condition \eqref{eq:elliptic-condition} we get, $$ \mathcal{E}^{\alpha}_D(u,u) \leq C\| u\|^2_{H^1(D)}. $$ \end{proof} \begin{proposition} Let $\nu$ be as above. The function spaces $\big(V_\nu (\Omega|\mathbb{R}^d), \|\cdot\|_{V_\nu (\Omega|\mathbb{R}^d)}\big)$ and $\big(H_\nu (\Omega), \|\cdot\|_{H_{\nu} (\Omega)}\big)$ are separable Hilbert spaces. If $\nu$ has full support in $\R^d$, i.e. if $\nu>0$ a.e on $\R^d$, then the same is true for the space $\big(V_\nu (\Omega|\mathbb{R}^d), \vertiii{\cdot}_{V_\nu (\Omega|\mathbb{R}^d)}\big)$. \end{proposition} \noindent For the proof we follow ideas from \cite{FKV15, DRV17}. \begin{proof} It is not difficult to check that, $ \|\cdot\|_{V_\nu (\Omega|\mathbb{R}^d)}$ and $ \|\cdot\|_{H_{\nu} (\Omega)}$ are norms on $V_\nu (\Omega|\mathbb{R}^d)$ and $H_{\nu} (\Omega)$ respectively. Now, if $\vertiii{u}_{V_\nu (\Omega|\mathbb{R}^d)} = 0,$ then, $u=0$ a.e on $\Omega$ and since $[u]^2_{V_\nu (\Omega|\mathbb{R}^d)}=0$ with $\nu(h)>0$ a.e we have $u(y)=u(x) =0$ for almost all $(x,y)\in \Omega\times \R^d$. That, is $u=0$ a.e on $\R^d$ and this enables $\vertiii{\cdot}_{V_\nu (\Omega|\mathbb{R}^d)}$ to be a norm on $V_\nu (\Omega|\mathbb{R}^d). $ Now, let $(u_n)_n$ be a Cauchy sequence in $\big(V_\nu (\Omega|\mathbb{R}^d), \|\cdot\|_{V_\nu (\Omega|\mathbb{R}^d)}\big)$. It converges to some $u$ in the topology of $L^2(\mathbb{R}^d)$ and pointwise almost everywhere in $\mathbb{R}^d$ up to a subsequence $(u_{k_n})_n$. Fix $n$ large enough, the Fatou lemma implies \begin{align*} [u_{k_n}-u]^2_{V_\nu (\Omega|\mathbb{R}^d)} \leq \liminf_{\ell\to \infty} \iint\limits_{(\Omega^c\times \Omega^c)^c} \big([u_{k_n}-u_{k_\ell}](x)-([u_{k_n}-u_{k_\ell}](y) \big)^2 \, \nu (x-y) \mathrm{d}x \, \mathrm{d}y \, \end{align*} Since $(u_{k_n})_n$ is a Cauchy sequence, the right hand side is finite for any $n$ and tends to $0$ as $n\to \infty$. This implies $u\in V_\nu (\Omega|\mathbb{R}^d)$ and $[u_{k_n}-u]^2_{V_\nu (\Omega|\mathbb{R}^d)} \to 0$ as $n\to \infty$. Finally, $u_n\to u$ in $V_\nu (\Omega|\mathbb{R}^d)$. Furthermore, the map $\mathcal{I}: V_\nu (\Omega|\mathbb{R}^d)\to L^2(\R^d) \times L^2(\Omega\times \R^d)$ with \begin{align*} \mathcal{I}(u) = \Big(u(x), (u(x)-u(y))\sqrt{\nu(x-y)}\Big) \end{align*} is an isometry. Hence from its Hilbert structure, the space $\big(V_\nu (\Omega|\mathbb{R}^d), \|\cdot\|_{V_\nu (\Omega|\mathbb{R}^d)}\big)$, which can be identified with $\mathcal{I}\Big(V_\nu (\Omega|\mathbb{R}^d)\Big)$, is separable as a closed subspace of the separable space $ L^2(\R^d) \times L^2(\Omega\times \R^d)$. Analogously, one shows that, $\big(H_\nu (\Omega), \|\cdot\|_{H_{\nu} (\Omega)}\big)$ is a separable Hilbert space. It remains to prove that $\big(V_\nu (\Omega|\mathbb{R}^d), \vertiii{\cdot}_{V_\nu (\Omega|\mathbb{R}^d)}\big)$ is a separable Hilbert space. Here we assume that $\nu$ has full support on $\R^d$. Without loss of generality we assume $\nu(h)>0$ for every $h\in \R^d$. Assume that $(u_n)_n$ is a Cauchy sequence in $\big(V_\nu (\Omega|\mathbb{R}^d), \vertiii{\cdot}_{V_\nu (\Omega|\mathbb{R}^d)}\big)$. Then there exist a subsequence $(u_{k_n})_n$ of $(u_{n})_n$, a function $u$ in $L^2(\Omega)$, a function $U \in L^{2}(\Omega\times \mathbb{R}^d)$, and null sets $N\subset \mathbb{R}^d$ and $\mathcal{R}\subset \Omega\times \mathbb{R}^d$ such that \begin{itemize} \item[-] $(u_{k_n})_n$ converges to $u$ in $L^2(\Omega)$\,, \item[-] $(u_{k_n})_n$ converges to $u$ pointwise on $\Omega\setminus N$\,, \item[-] $(U_{k_n})_n$ converges to $U$ in $L^2(\Omega \times \R^d)$\,, \item[-] $(U_{k_n})_n$ converges to $U$ pointwise on $(\Omega\times \R^d)\setminus \mathcal{R}$\,, \end{itemize} where $U_n(x,y) = (u_n(x)-u_n(y))\sqrt{\nu(x-y)}$. Let $(x,y)\in (\Omega \times \R^d)\setminus \mathcal{R'} $ with $x\neq y$ where $ \mathcal{R'}= \mathcal{R} \cup (N\times \emptyset)$. Then, as $n\to \infty$ we have \begin{align*} u_{n_k}(y)= u_{n_k}(x) - U_{n_k}(x,y)/\sqrt{\nu(x-y)} \to u(x) - U(x,y)/\sqrt{\nu(x-y)} \end{align*} Finally, $U(x,y) = (u(x)-u(y))\sqrt{\nu(x-y)} \in L^2(\Omega \times \R^d)$ so that $u \in V_{\nu}(\Omega|\R^d) $. We easily conclude $\vertiii{u_n-u}_{V_\nu (\Omega|\mathbb{R}^d)}\to 0$ as $n\to \infty$, which proves completeness. Let us mention that, alternatively, one could apply the equivalence of the norms $\vertiii{\cdot}_{V_\nu (\Omega|\mathbb{R}^d)}$ and $\vertiii{\cdot}^*_{V_\nu (\Omega|\mathbb{R}^d)}$, cf. \autoref{prop:natural-norm-on-V} $(iv)$. This would allow to establish completeness along the lines of the proof of completeness in the first case. The separability of the space $\big(V_\nu (\Omega|\mathbb{R}^d), \vertiii{\cdot}_{V_\nu (\Omega|\mathbb{R}^d)}\big)$ can be shown as in the case above. \end{proof} \begin{remark} Let us define spaces of functions that vanish on the complement of $\Omega$. Set \begin{align}\label{eq:VnuOm-vanish} V^\Omega_{\nu}(\Omega|\mathbb{R}^d)= \{ u\in V_{\nu}(\Omega|\mathbb{R}^d)~| ~u=0~~\text{a.e. on } \mathbb{R}^d\setminus \Omega\}\,. \end{align} As a direct direct consequence of \autoref{prop:natural-norm-on-V}, the space $\big(V^\Omega_{\nu}(\Omega|\mathbb{R}^d), \|\cdot \|_{V_{\nu}(\Omega|\mathbb{R}^d)} \big)$ is a separable Hilbert space, too. Both norms $\|\cdot \|_{V_{\nu}(\Omega|\mathbb{R}^d)} $ and $\vertiii{\cdot}_{V_{\nu}(\Omega|\mathbb{R}^d)} $ coincide on $V^\Omega_{\nu}(\Omega|\mathbb{R}^d)$. \end{remark} Finally, we are in the position to prove our first main result, \autoref{thm:density}. \begin{proof}[Proof of \autoref{thm:density}] Assume $u \in V_{\nu} (\Omega|\mathbb{R}^d)$. We prove that there is a sequence $(u_n)$ of functions in $C^\infty_c(\R^d)$ such that $[u-u_n]_{V_{\nu}(\Omega|\R^d)}$ converges to $0$ as $n\to\infty$. This implies \begin{align}\label{eq:density-conv} \|u_n-u\|_{V_{\nu}(\Omega|\mathbb{R}^d)} \longrightarrow 0 \text{ as } n\to\infty \,, \end{align} since the convergence $\|u_n - u\|_{L^2(\R^d)}$ follows by standard arguments. Obviously, the convergence $\vertiii{u_n-u}_{V_{\nu}(\Omega|\mathbb{R}^d)} \to 0$ follows from \eqref{eq:density-conv}. Note that the sequence $(u_n)$ is constructed by translation and convolution of the function $u$ with a mollifier. \noindent \textbf{Step 1:} Let $x_0\in\partial\Omega$. Since $\partial\Omega$ Lipschitz, there exists $r>0$ and a Lipschitz function $\gamma:\R^{d-1}\to\R$ with Lipschitz constant $k>0$, such that (upon relabeling the coordinates) \begin{align*} \Omega\cap B_r(x_0) &= \{ x\in B_r(x_0)| x_d > \gamma(x_1,...,x_{d-1})\}.\\ \end{align*} Set $x= (x_1,...,x_{d-1},x_d)=(x',x_d)$. For sake of convenience, we choose $r>0$ so small such that $|\Omega\cap B^c_r(x_0)|>0$. For $x\in B_{r/2}(x_0)$, $\tau >1+k$ and $0<\varepsilon <\frac{r}{2(1+\tau)}$ we define the shifted point \[ x_\varepsilon = x + \tau \varepsilon e_d\,. \] We define $u_\varepsilon(x) = u(x_\varepsilon)= u(x+\tau \varepsilon e_d)$ and \[ v_\varepsilon = \eta_\varepsilon \ast u_\varepsilon \] where $\eta_\varepsilon$ is a smooth mollifier having support in $B_\varepsilon(0)$. \noindent \textbf{Step 2:} Let us assume $\supp u\Subset B_{r/4}(x_0)$. In this case $v_\varepsilon \in C^\infty_c (B_r(x_0))$. The aim of this step is to prove \[ [v_\varepsilon - u]_{V_{\nu}(\Omega|\R^d)} \longrightarrow 0 \quad \text{ as } \varepsilon \to 0 \,. \] Due to the nonlocal nature of the seminorm, this step turns out to be rather challenging. We begin with a geometric observation. \begin{lemma}\label{lem:guy-geo} Let $z\in B_1(0)$. Let $\Omega^z_\varepsilon = \Omega+ \varepsilon(\tau e_d-z)$. Then $\Omega^z_\varepsilonilon\cap B_{r/2}(x_0) \subset \Omega\cap B_{r}(x_0)$. \end{lemma} \begin{proof} For $h \in \Omega^z_\varepsilonilon\cap B_{r/2}(x_0)$, let us write $ h= t+\varepsilon \tau e_d-\varepsilon z $ with $t \in \Omega$. Then $t\in B_{r/2}(x_0) $, $h' = t'-\varepsilon z'$ and $h_d= t_d+ \varepsilon(\tau -z_d)$. Since $\gamma$ is Lipschitz with Lipschitz constant $k<\tau-1$ and $t\in \Omega\cap B_{r/2}(x_0) = \{ x\in B_{r/2}(x_0)| x_d > \gamma(x')\}$ we obtain \begin{align*} \gamma(h')&\leq \gamma(t')+ |\gamma(h')-\gamma(t')| <t_d+ \varepsilon k|z'|\\ & <t_d+ \varepsilon k< t_d+ \varepsilon(\tau -z_d)= h_d. \end{align*} Thus, $h \in B_r(x_0)$ and $h_d>\gamma(h')$. We have shown $h\in \Omega \cap B_r(x_0)$ as desired. \end{proof} The main technical tool of the argument below is the Vitali convergence theorem, see \cite[Chapter 3]{Alt16} or \cite[Corollary 4.5.5.]{bogachev2007volumeI}. Since $u$ belongs to the space $V_{\nu}(\Omega|\R^d)$, for every $\mathrm{d}elta > 0$ there is $\eta > 0$ such that for all sets $E \subset \Omega$, $F \subset \R^d$ with $|E \times F| < \eta$ we know \begin{align}\label{eq:equi-int-u} \iint\limits_{E F} \big(u(x)-u(y)\big)^2 \nu(x-y) \mathrm{d} y \mathrm{d} x &< \mathrm{d}elta \text{ and } \iint\limits_{E F} u^2(y) \mathrm{d} y \mathrm{d} x < \mathrm{d}elta \,. \end{align} The second estimate uses the fact that $\iint_{\Omega \R^d} u^2(y) \mathrm{d} y \mathrm{d} x$ is finite because $u$ has compact support. As a consequence of \eqref{eq:equi-int-u}, we derive the following lemma. \begin{lemma}\label{lem:equi-int-u-eps} For every $\mathrm{d}elta > 0$ there is $\eta > 0$ such that for all sets $E \subset \Omega$, $F \subset \R^d$ with $|E \times F| < \eta$ \begin{align}\label{eq:equi-int-u_eps-z} \sup\limits_{z \in B_1(0)} \sup\limits_{\varepsilon > 0} \iint\limits_{E F} \big(u^z_\varepsilon(x)-u^z_\varepsilon(y)\big)^2 \nu(x-y) \mathrm{d} y \mathrm{d} x < \mathrm{d}elta \,, \end{align} where $ u^z_\varepsilon(\xi) = u_\varepsilon(\xi-\varepsilon z) = u(\xi+\varepsilon \tau e_d-\varepsilon z)$. \end{lemma} \begin{proof} Let $\mathrm{d}elta > 0$. Choose $\eta > 0$ as in \eqref{eq:equi-int-u}. Let $\varepsilon > 0$, $z \in B_1(0)$. Let $E \subset \Omega$, $F \subset \R^d$ be sets with with $|E \times F| < \eta$. Then \begin{align} \iint\limits_{E F} \big(u^z_\varepsilon(x)-u^z_\varepsilon(y)\big)^2 \nu(x-y) \mathrm{d} y \mathrm{d} x = \iint\limits_{E^z_\varepsilon F^z_\varepsilon} \big(u(x)-u(y)\big)^2 \nu(x-y) \mathrm{d} y \mathrm{d} x \,, \end{align} where $E^z_\varepsilon = E + \varepsilon(\tau e_d-z)$ and $F^z_\varepsilon$ defined analogously. We decompose $E^z_\varepsilon$ as follows $E^z_\varepsilon = E^z_\varepsilon \cap B_{r/2}(x_0) \cup E^z_\varepsilon \cap B^c_{r/2}(x_0)$. Note \[ E^z_\varepsilon \cap B_{r/2}(x_0) \subset \Omega^z_\varepsilon \cap B_{r/2}(x_0) \subset \Omega \cap B_{r/2}(x_0) \,,\] where we apply \autoref{lem:guy-geo}. We directly conclude \begin{align}\label{eq:equi-one} \iint\limits_{E^z_\varepsilon F^z_\varepsilon} \mathbbm{1}_{B_{r/2}(x_0)}(x) \big(u(y)-u(x)\big)^2 \nu(x-y) \mathrm{d} y \mathrm{d} x \leq \mathrm{d}elta \,. \end{align} With regard to the remaining term note \begin{align}\label{eq:equi-two} \begin{split} \iint\limits_{E^z_\varepsilon F^z_\varepsilon}& \mathbbm{1}_{B^c_{r/2}(x_0)}(x) \big(u(x)-u(y)\big)^2 \nu(x-y) \mathrm{d} y \mathrm{d} x \\ &= \iint\limits_{E^z_\varepsilon F^z_\varepsilon} \mathbbm{1}_{B^c_{r/2}(x_0)}(x) \mathbbm{1}_{B_{r/4}(x_0)}(y) u^2(y) \nu(x-y) \mathrm{d} y \mathrm{d} x\\ &\leq c(r, \nu) \iint\limits_{E^z_\varepsilon F^z_\varepsilon} \mathbbm{1}_{B^c_{r/2}(x_0)}(x) \mathbbm{1}_{B_{r/4}(x_0)}(y) u^2(y) \mathrm{d} y \mathrm{d} x \leq c \iint\limits_{E^z_\varepsilon F^z_\varepsilon} u^2(y) \mathrm{d} y \mathrm{d} x\\ &= c \iint\limits_{E F^z_\varepsilon} u^2(y) \mathrm{d} y \mathrm{d} x\leq c \mathrm{d}elta \,. \end{split} \end{align} The positive constant $c(r,\nu)$ depends on $r$ and on the shape of $\rho$. Summation over \eqref{eq:equi-one} and \eqref{eq:equi-two} completes the proof after redefining $\mathrm{d}elta$ accordingly. \end{proof} The next lemma shows the tightness of $u^z_\varepsilon(x)-u^z_\varepsilon(y)$ uniformly for $z\in B_1(0)$ and $\varepsilon >0$. \begin{lemma}\label{lem:tigh-u-eps-z} For every $\mathrm{d}elta>0$ there exists $E_\mathrm{d}elta \subset \Omega$ and $F_\mathrm{d}elta \subset \mathbb{R}^d$ such that $|E_\mathrm{d}elta \times F_\mathrm{d}elta |<\infty$ and \begin{align}\label{eq:tight-u_eps} \sup\limits_{z \in B_1(0)} \sup\limits_{\varepsilon > 0} \iint\limits_{(\Omega \times \mathbb{R}^d) \setminus ( E_\mathrm{d}elta \times F_\mathrm{d}elta)} \big(u^z_\varepsilon(x)-u^z_\varepsilon(y)\big)^2 \nu(x-y) \mathrm{d} y \mathrm{d} x < \mathrm{d}elta. \end{align} \end{lemma} \begin{proof} Fix $\varepsilon>0$ and $z\in B_1(0)$. Let $ \bar{R}= \sup\limits_{\xi \in \Omega} |\xi-x_0|$ which is finite since $\Omega $ is bounded. Note that $\supp u^z_\varepsilon \subset B_{r/2}(x_0)$. Choose $R>0$ so large such that $[B^c_{R}(x_0)]_{\varepsilon}^z= B^c_{R}(x_0) +\varepsilon (\tau e_d+z)\subset B^c_{R/2}(x_0) $ and $|x-y|\geq R/2-\bar{R}$ for $x\in B^c_{R/2}(x_0) $ and $y \in \Omega$. Thus, \begin{align*} \iint\limits_{(\Omega \times \mathbb{R}^d) \setminus ( \Omega \times B_R(x_0))} & \big(u^z_\varepsilon(x)-u^z_\varepsilon(y)\big)^2 \nu(x-y) \mathrm{d} y \mathrm{d} x = \iint\limits_{\Omega B^c_R(x_0)} \big(u^z_\varepsilon(x)\big)^2 \nu(x-y) \mathrm{d} y \mathrm{d} x\\ &= \int\limits_{\Omega^z_\varepsilon \cap B_{r/2}(x_0) } u^2(x)\mathrm{d} x \int\limits_{ [B^c_{R}(x_0)]_{\varepsilon}^z} \nu(x-y) \mathrm{d} y \leq \int\limits_{\Omega} u^2(x)\mathrm{d} x \int\limits_{ B^c_{R/2-\bar{R}}(x)} \nu(x-y) \mathrm{d} y\\ &= \|u\|^2_{L^2(\Omega)} \int\limits_{ B^c_{R/2-\bar{R}}(0)} \nu(h) \mathrm{d} h. \end{align*} The desired result follows by taking $E_\mathrm{d}elta = \Omega$ and $F_\mathrm{d}elta =B_R(x_0)$ with $R>0$ large enough such that $ \int\limits_{ B^c_{R/2-\bar{R}}(0)} \nu(h) \mathrm{d} h<\mathrm{d}elta \|u\|^{-2}_{L^2(\Omega)}$ . \end{proof} \begin{lemma} There exists a constant $C(\Omega,r, \nu)$ depending only on $\Omega,r$ and $\nu$ such that, for all $z\in B_1(0)$ and all $\varepsilon>0$ \begin{align}\label{eq:estimate-seminorm} [u^z_\varepsilon]^2_{V_{\nu}(\Omega|\R^d)}\leq C(\Omega,r, \nu) [ u]^2_{V_{\nu}(\Omega|\R^d)}. \end{align} \end{lemma} \begin{proof} Note that, $|x-y|\geq r/4$ for $x\in B^c_{r/2}(x_0) $ and $y \in B_{r/4}(x_0)$ and there is $c_r(\Omega, \nu)>0$ such that $ \nu(x-y) >c_r(\Omega,\nu)$ for all $x\in \Omega$ and all $y \in B_{r/4}(x_0)$ since $\Omega$ is bounded. Let us chose $ C=C(\Omega,r, \nu)$ not less than \[ 1+c_r^{-1}(\Omega,\nu)|\Omega\cap B_r^c(x_0)|^{-1}\int\limits_{ B^c_{r/4}(0)} \nu(h) \mathrm{d} h.\] Therefore, for each $z\in B_1(0)$ and each $\varepsilon>0 $ we have \begin{align*} \iint\limits_{ \Omega^z_\varepsilonilon\cap B^c_{r/2}(x_0)\times \R^d} \left( u(x) - u(y) \right)^2 \nu(x-y) \mathrm{d} y \mathrm{d} x &= \int\limits_{ B_{r/4}(x_0) } u^2(y)\mathrm{d} y \int\limits_{ \Omega^z_\varepsilonilon\cap B^c_{r/2}(x_0)} \nu(x-y) \mathrm{d} x\\ &\leq \int\limits_{ B_{r/4}(x_0) } u^2(y)\mathrm{d} y \int\limits_{ B^c_{r/4}(y)} \nu(x-y) \mathrm{d} x\\ &\leq C\hspace{-3ex} \int\limits_{ B_{r/4}(x_0) } u^2(y)\mathrm{d} y \int\limits_{ \Omega \cap B^c_{r}(x_0)} \nu(x-y) \mathrm{d} x\\ &= C \hspace{-3ex}\iint\limits_{ \Omega \cap B^c_{r}(x_0)\times\R^d }(u(x)-u(y))^2 \nu(x-y) \mathrm{d} y \mathrm{d} x.\ \end{align*} Using a change of variables, this and \autoref{lem:guy-geo}, we have \begin{align*} &[ u^z_\varepsilon]^2_{V_{\nu}(\Omega|\R^d)} = \iint\limits_{ \Omega\R^d} \left( u^z_\varepsilon(x) - u^z_\varepsilon(y) \right)^2 \nu(x-y) \mathrm{d} y \mathrm{d} x = \iint\limits_{ \Omega^z_\varepsilonilon\R^d} \left( u(x) - u(y) \right)^2\nu (x-y) \mathrm{d} y \mathrm{d} x\\ &= \iint\limits_{ \Omega^z_\varepsilonilon\cap B_{r/2}(x_0)\times \R^d}\hspace*{-2ex} \left( u(x) - u(y) \right)^2 \nu(x-y) \mathrm{d} y \mathrm{d} x + \hspace{-3ex} \iint\limits_{ \Omega^z_\varepsilonilon\cap B^c_{r/2}(x_0)\times \R^d} \left( u(x) - u(y) \right)^2 \nu(x-y) \mathrm{d} y \mathrm{d} x\\ &\leq C\hspace{-3ex} \iint\limits_{ \Omega\cap B_{r}(x_0)\times \R^d}\hspace*{-2ex} \left( u(x) - u(y) \right)^2 \nu(x-y) \mathrm{d} y \mathrm{d} x +C\hspace{-3ex} \iint\limits_{ \Omega \cap B^c_{r}(x_0)\times\R^d }(u(x)-u(y))^2 \nu(x-y) \mathrm{d} y \mathrm{d} x\\ &= C [u]^2_{V_{\nu}(\Omega|\R^d)}. \end{align*} \end{proof} We are now in position to prove the main result of this step. By Jensen's inequality, we get the following \begin{align*} &[v_\varepsilon - u]^2_{V_{\nu}(\Omega|\R^d)} = \iint\limits_{\Omega\,\R^d} ((v_\varepsilon(x)-v_\varepsilon(y)) -(u(x)-u(y)))^2 \nu(x-y)\mathrm{d} y \mathrm{d} x\\ &= \iint\limits_{\Omega\,\R^d} \Big(\int\limits_{\R^d} ((u_\varepsilon(x-z)-u_\varepsilon(y-z))\eta_\varepsilon(z) \mathrm{d} z -(u(x)-u(y))\Big)^2\nu(x-y) \mathrm{d} y \mathrm{d} x \\ &= \iint\limits_{\Omega\,\R^d} \Big(\int\limits_{B_1(0)} ((u_\varepsilon(x-\varepsilon z) - u_\varepsilon(y-\varepsilon z) ) -(u(x)-u(y))) \eta(z) \mathrm{d} z \Big)^2 \nu(x-y) \mathrm{d} y \mathrm{d} x \\ &\leq \iint\limits_{\Omega \R^d} \int\limits_{B_1(0)} \big( (u_\varepsilon(x-\varepsilon z) - u_\varepsilon(y-\varepsilon z) ) -(u(x)-u(y))\big)^2\nu(x-y) \eta(z) \mathrm{d} z \mathrm{d} y \mathrm{d} x \\ &=\int\limits_{B_1(0)} \eta(z) \iint\limits_{\Omega \R^d} \big( ( u_\varepsilon(x-\varepsilon z) - u_\varepsilon(y-\varepsilon z) ) -(u(x)-u(y))\big)^2\nu(x-y) \mathrm{d} y \mathrm{d} x\, \mathrm{d} z\,\\ &= \int\limits_{B_1(0)} [u^z_\varepsilon-u]^2_{V_{\nu}(\Omega|\R^d)}\eta(z) \mathrm{d} z\,. \end{align*} \noindent For each $z \in B_1(0)$ the family of functions $(x,y)\mapsto \left( ( u^z_\varepsilon(x) - u^z_\varepsilon(y) ) -(u(x)-u(y))\right)^2\nu(x-y) $ with $(x,y) \in \Omega\times\mathbb{R}^d$, $\varepsilon>0$ is equiintegrable (by \autoref{lem:equi-int-u-eps}), is tight (by \autoref{lem:tigh-u-eps-z}) and converges to $0$ a.e on $\Omega\times\mathbb{R}^d$. Thus for fixed $z\in B_1(0)$ the Vitali's convergence theorem gives \begin{align*} \iint\limits_{\Omega\,\R^d} \left(( u_\varepsilon(x-\varepsilon z) - u_\varepsilon(y-\varepsilon z) ) -(u(x)-u(y))\right)^2\nu(x-y) \ \mathrm{d} y \ \mathrm{d} x\overset{\varepsilon \to 0}{\longrightarrow} 0\,. \end{align*} That is, $[u^z_\varepsilon-u]^2_{V_{\nu}(\Omega|\R^d)} \to 0, $ as $\varepsilon \to 0$ for each $ z\in B_1(0)$. Further, from estimate \eqref{eq:estimate-seminorm} the function $ z \mapsto \eta(z)[u^z_\varepsilon-u]^2_{V_{\nu}(\Omega|\R^d)} $ is bounded by $2C [u]_{V_{\nu}(\Omega|\R^d)}$ for all $\varepsilon>0$ and a.e. $z\in B_1(0)$. Thus, by Lebesgue's dominated convergence theorem \[ \int\limits_{B_1(0)} [u^z_\varepsilon-u]^2_{V_{\nu}(\Omega|\R^d)}\eta(z) \mathrm{d} z \overset{\varepsilon \to 0}{ \longrightarrow 0}. \] Which implies $ [v_\varepsilon-u]_{V_{\nu}(\Omega|\R^d)}\to 0$ as $\varepsilon \to 0$. \noindent \textbf{Step 3:} Let $u\in V_{\nu}(\Omega|\R^d)$ be arbitrary. Let $R>0$ such that $\Omega\subset B_{R}(0)$. Let $f_R\in C_c^\infty(B_{3R}(0))$ with $f_R\leq1$ and $f_R(x)=1$ for all $x\in B_{2 R}(0)$. Define $u_R = f_R u$. Then $\supp(u_R)\subset B_{3R}(0)$ and $[u-u_R]_{V_{\nu}(\Omega|\R^d)} \to 0$ as $R \to \infty$. \noindent \textbf{Step 4:} Let $x_i\in\partial\Omega$, $r_i>0$, $i=1,..,N$, such that \[ \partial\Omega \subset \bigcup_{i=1}^N B_{r_i/2}(x_i), \] where the $r_i$ are chosen small enough, such that (up to relabeling the coordinates) we can assume \begin{align*} \Omega\cap B_{4r_i}(x_i) &= \{ x\in B_{4r_i}(x_i)| x_d > \gamma_i(x')\}\\ \end{align*} for some smooth $\gamma_i:\R^{d-1}\to\R$ as in Step 1. Let $\Omega^*= \{x\in \R^d| \mathrm{d}ist(x,\Omega)>\frac12 \min_{i=\{1,..,N\}} r_i\}$ and $\Omega_0 = \{x\in \Omega| \mathrm{d}ist(x,\Omega^c)>\frac12 \min_{i=\{1,..,N\}} r_i\}$. Then \[ \bigcup_{i=1}^N B_{r_i}(x_i) \cup \Omega^*\cup\Omega_0 = \R^d . \] Let $\{\xi_i\}_{i=0}^{N+1}$ be a smooth partition of unity subordinated to the above constructed sets. \\ We define \[ u_i = \xi_i\cdot u_R \text{ for all } i\in\{0,..,N+1\}, \] and thus \begin{align*} &\supp u_i \subset B_{r_i}(x_i) \text{ for }i\in\{1,..N\}, \\ &\supp u_0 \subset \Omega_0, \\ &\supp u_{N+1} \subset \Omega^*. \end{align*} \noindent \textbf{Step 5:} In this step, we use the shorthand notation $\Delta u(x;y) = u(x)-u(y) $. Let $\mathrm{d}elta>0$ and $i\in\{1,..,N\}$. By Step 2 there exists a sequence $v^i_\varepsilon\in C_c^\infty(B_{4r_i}(x_i))$ such that \[ [u_i-v^i_\varepsilon]_{V_{\nu}(\Omega|\R^d)} \longrightarrow 0 \] for $\varepsilon\to 0$. Thus we can choose $\varepsilon_0>0$ such that $[u_i-v^i_\varepsilon]_{V_{\nu}(\Omega|\R^d)}< \frac{\mathrm{d}elta}{N+2}$ for all $i\in\{1,..,N\}$. For $i=N+1$ define $v^{N+1}_\varepsilon = \eta_\varepsilon \ast u_{N+1}$ and set $r=\frac14 \min_{i\in\{1,..,N\}} r_i$. Choosing $\varepsilon< r$ and since $\supp u_{N+1} \subset \Omega^*$ for all $x\in\Omega$, $y\in\R^d$ and $z\in B_\varepsilon(0)$ \[ \Delta u_{N+1}(x;y) = \Delta v_\varepsilon^{N+1}(x-z;y-z)= 0 \quad \text{ or } \quad \bet{x-y}>r. \] Thus \begin{align*} [v^{N+1}_\varepsilon&-u_{N+1}]^2_{V_{\nu}(\Omega|\R^d)} = \iint\limits_{\Omega\,\R^d} (\Delta v^{N+1}_\varepsilon(x;y)-\Delta u^{N+1}(x;y))^2 \nu(x-y) \mathrm{d} y \mathrm{d} x \\ &= \iint\limits_{\Omega \R^d} \left(\int\limits_{B_\varepsilon(0)} \Delta u_{N+1}(x-z;y-z)-\Delta u_{N+1}(x;y) \eta_\varepsilon(z) \mathrm{d} z\right)^2 \nu(x-y) \mathrm{d} x \mathrm{d} y \\ &\leq C_r \iiint\limits_{B_1(0)\times\Omega\times\R^d} (\Delta u_{N+1}(x-\varepsilon z;y-\varepsilon z)-\Delta u_{N+1}(x;y))^2 \mathrm{d} y \mathrm{d} x \eta(z) \mathrm{d} z . \end{align*} By the continuity of the shift in $L^2(\R^d)$ \[ \iint\limits_{\Omega\,\R^d} (\Delta u_{N+1}(x-\varepsilon z;y-\varepsilon z)-\Delta u_{N+1}(x;y))^2 \mathrm{d} y \mathrm{d} x \longrightarrow 0. \] Further, for any $z\in B_1(0)$, the map \[ z \mapsto \bet{\eta(z) \iint\limits_{\Omega\,\R^d} (\Delta u_{N+1}(x-\varepsilon z;y-\varepsilon z)-\Delta u_{N+1}(x;y))^2 \nu(x-y) \mathrm{d} y \mathrm{d} x } \] is bounded. Thus $[v^{N+1}_\varepsilon-u_{N+1}]_{V_{\nu}(\Omega|\R^d)}\to 0$ by dominated convergence and we find $\varepsilon_0>0$, such that $[v^{N+1}_\varepsilon-u_{N+1}]_{V_{\nu}(\Omega|\R^d)} < \frac{\mathrm{d}elta}{N+2}$ for all $\varepsilon<\varepsilon_0$. We define $v^{0}_\varepsilon = \eta_\varepsilon \ast u_{0}$. Thus for $\varepsilon<r$ \[\supp v^0_\varepsilon\Subset \Omega.\] The convergence $v^0_\varepsilon \to u_0$ follows by the same arguments as above and we find $\varepsilon_0>0$ such that $[v_\varepsilon^0-u_0]_{V_{\nu}(\Omega|\R^d)} < \frac{\mathrm{d}elta}{N+2}$ for all $\varepsilon,\varepsilon_0$. \noindent \textbf{Step 6:} Define $v_\varepsilon = \sum_{i=0}^{N+1} v^i_\varepsilon\in C^\infty_c(\R^d)$. Since $u_R(x) = \sum_{i=0}^{N+1} u_i(x)$, we have \begin{align*} [u_R-v_\varepsilon]_{V_{\nu}(\Omega|\R^d)} &\leq \left[\sum_{i=0}^{N+1} \left(v^i_\varepsilon - u_i\right)\right]_{V_{\nu}(\Omega|\R^d)} \\ & \leq \sum_{i=0}^{N+1} [v^i_\varepsilon -u_i]_{V_{\nu}(\Omega|\R^d)}\\ & \leq (N+2) \frac{\mathrm{d}elta}{N+2} . \end{align*} Choosing $R=\frac{1}{\varepsilon}$ in Step 3, concludes \[[u-v_\varepsilon]_{V_{\nu}(\Omega|\R^d)}\leq [u-u_{R}]_{V_{\nu}(\Omega|\R^d)}+ [u_R-v_\varepsilon|_{V_{\nu}(\Omega|\R^d)} \overset{\varepsilon\to 0}{\longrightarrow}0.\] The convergence in $L^2(\R^d)$ follows from the continuity of the shift in $L^2(\R^d)$. \end{proof} \pagebreak[3] The density of $C^\infty_c(\R^d)$ has a direct consequence for the nonlocal bilinear form under consideration. Concerning the definition of $\nu^\alpha$, the reader might consult \autoref{def:nu-alpha}. \begin{corollary}\label{cor:regular-DF} Assume $\Omega\subset\mathbb{R}^d$ is a bounded domain with Lipschitz continuous boundary. Assume $J^\alpha$ satisfies \eqref{eq:elliptic-condition} and \eqref{eq:integrability-condition}. Then the bilinear forms $(\mathcal{E}^\alpha, (V_{\nu^\alpha}(\Omega|\R^d) \cap L^2(\R^d))$ and $(\mathcal{E}_\Omega^\alpha, H_{\nu^\alpha}(\Omega))$ are regular Dirichlet forms on $L^2(\R^d)$ resp. $L^2(\Omega)$. \end{corollary} Note that the bilinear form $(\mathcal{E}^A,H^1(\Omega))$ is a regular Dirichlet form on $L^2(\Omega)$, which follows from the fact that $\Omega$ is an extension domain. \begin{corollary}\label{cor:regular-DF-classical} Assume $\Omega\subset\mathbb{R}^d$ is a bounded domain with Lipschitz continuous boundary. Assume that $\nu^\alpha$ has full support. Set $J^\alpha (x,y) = \nu^\alpha(x-y)$ and let $\widetilde{\nu^\alpha}$ be given as in \autoref{prop:natural-norm-on-V}. Then the bilinear form $(\mathcal{E}^\alpha, V_{\nu^\alpha}(\Omega|\R^d))$ is a regular Dirichlet form on $L^2(\R^d, \widetilde{\nu^\alpha})$. In particular, if $J^\alpha$ is given by $J^\alpha_1$ as in \autoref{ex:J-guys-singular}, then the bilinear form $(\mathcal{E}^\alpha, V^{\alpha/2}(\Omega|\R^d))$ is a regular Dirichlet form on $L^2(\R^d, \frac{\mathrm{d} x}{1+|x|^{d+\alpha}})$. \end{corollary} The next density theorem is proved in \cite[Theorem A.4]{BGPR17} and it is adapted from the main result in \cite{FSV15} for fractional Sobolev spaces. A more general result is provided by \cite[Theorem 3.3.9]{CF12}. \begin{theorem}\label{thm:density-omega} Assume $\Omega$ has a continuous boundary. Let $\nu$ be a L\'evy measure. Then $C_c^\infty(\Omega)$ is dense in the space (cf. \eqref{eq:VnuOm-vanish}) $\big(V^\Omega_{\nu}(\Omega|\mathbb{R}^d), \|\cdot \|_{V_{\nu}(\Omega|\mathbb{R}^d)} \big)$ . \end{theorem} \noindent A counterpart of \autoref{cor:regular-DF} is given by the following. \begin{corollary}\label{cor:regular-DF-omega} Assume $\Omega\subset\mathbb{R}^d$ is a bounded domain with continuous boundary. Assume $J^\alpha$ satisfies \eqref{eq:elliptic-condition} and \eqref{eq:integrability-condition}. The bilinear forms $(\mathcal{E}^\alpha, V^\Omega_{\nu^\alpha}(\Omega|\R^d))$ and $\big(\mathcal{E}_\Omega^\alpha, \overline{C_c^\infty(\Omega)}^{ H_{\nu^\alpha}(\Omega)}\big)$ are regular Dirichlet forms on $L^2(\Omega)$. \end{corollary} \noindent Note that $(\mathcal{E}^A,H_0^1(\Omega))$ is a regular Dirichlet forms, too. This result holds true without any assumption on the regularity of $\partial \Omega$. \section{Proof of \autoref{thm:Mosco-convergence}}\label{sec:main-proof} The aim of this section is to provide the proof of \autoref{thm:Mosco-convergence}. Let us begin with a simple but important observation. \begin{proposition}\label{prop:elliptic-matrix} Under condition \eqref{eq:elliptic-condition} and \eqref{eq:integrability-condition}, the symmetric matrix $A$ defined as in \eqref{eq:coef-matrix} has bounded coefficients and satisfies the ellipticity condition. Precisely, we have \begin{align*} d^{-1}\Lambda^{-1}|\xi|^2\leq \langle A(x) \xi, \xi \rangle \leq d^{-1}\Lambda |\xi|^2, \quad\text{ for every } x, \xi \in \R^d \,. \end{align*} \end{proposition} \begin{proof} Let $x, \xi \in \R^d$ and $|h|\leq 1$. Then Condition \eqref{eq:elliptic-condition} implies that \begin{align*} \Lambda^{-1} \nu^\alpha(h)[\xi\cdot h]^2 \leq J^\alpha(x,x+h)[\xi\cdot h]^2 \leq \Lambda \nu^\alpha(h)[\xi\cdot h]^2\quad\text{for every }\quad \xi \in \R^d \end{align*} Note that, by definition of the matrix $A$ \begin{align*}\lim_{\alpha\to 2^-}\int\limits_{|h|\leq 1} J^\alpha(x,x+h)[\xi\cdot h]^2\mathrm{d} h= \langle A(x) \xi, \xi \rangle \,. \end{align*} \noindent On the other hand, by rotationally invariance of the Lebesgue measure, we have \begin{align*} &\lim_{\alpha\to 2^-}\int\limits_{|h|\leq 1}\nu^\alpha(h)[\xi\cdot h]^2\mathrm{d} h = \lim_{\alpha\to 2^-}\int\limits_{|h|\leq 1}\nu^\alpha(h)\sum_{1\leq i,j\leq d}\xi_i\xi_j h_ih_j\mathrm{d} h\\ &=\lim_{\alpha\to 2^-} \sum_{1\leq i\leq d}\xi_i^2 \int\limits_{|h|\leq 1} h_1^2 \nu^\alpha(h)\mathrm{d} h=\lim_{\alpha\to 2^-} |\xi|^2 \int\limits_{|h|\leq 1} h_1^2 \nu^\alpha(h)\mathrm{d} h\\ &=\lim_{\alpha\to 2^-} |\xi|^2 d^{-1}\int\limits_{|h|\leq 1} \sum_{1\leq i\leq d}h_i^2 \nu^\alpha(h)\mathrm{d} h =\lim_{\alpha\to 2^-} |\xi|^2 d^{-1}\int\limits_{|h|\leq 1} \rho_{2-\alpha}(h)\mathrm{d} h\\ &= |\xi|^2 d^{-1}\,, \end{align*} which ends the proof. \end{proof} Let us recall the notion of Mosco convergence on $L^2$- spaces according to \cite[Definition 2.1.1.]{Mos94}. \begin{definition}[Mosco-convergence]\label{def:mosco} Assume $(\mathcal{E}^n, \mathcal{D}(\mathcal{E}^n))_{n\in \mathbb{N}}$ and $(\mathcal{E}, \mathcal{D}(\mathcal{E}))$ are quadratic forms with dense domains in $L^2(E,\mu)$ where $(E,\mu ) $ is a measure space. One says that the sequence $(\mathcal{E}^n, \mathcal{D}(\mathcal{E}^n))_{n\in \mathbb{N}}$ converges in $L^2(E,\mu)$ in the Mosco sense to $(\mathcal{E}, \mathcal{D}(\mathcal{E}))$ if the following two conditions are satisfied. \noindent \textbf{Limsup:} For every $u\in L^2(E,\mu)$ there exists a sequence $(u_n)_n$ in $ L^2(E,\mu)$ such that $u_n\in \mathcal{D}(\mathcal{E}^n)$, $u_n\to u$ (read $u_n$ strongly converges to $u$) in $ L^2(E,\mu)$ and \[\limsup_{n\to \infty} \mathcal{E}^n(u_n,u_n) \leq \mathcal{E}(u,u). \] \textbf{Liminf:} For every sequence, $(u_n)_n$ with $u_n\in \mathcal{D}(\mathcal{E}^n)$ and every $u\in \mathcal{D}(\mathcal{E})$ such that $u_n \rightharpoonup u$ (read $u_n$ weakly converges to $u$) in $ L^2(E,\mu)$ we have, \[\mathcal{E}(u,u)\leq \liminf_{n\to \infty} \mathcal{E}^n(u_n,u_n).\] \end{definition} \begin{remark} (i) It is worth emphasizing that, combining the $\limsup$ and $\liminf$ conditions, the $\limsup$ condition is equivalent to the existence of a sequence $(u_n)_n$ in $ L^2(E,\mu)$ such that $u_n\in \mathcal{D}(\mathcal{E}^n)$, $u_n\to u$ in $ L^2(E,\mu)$ and \[\lim_{n\to \infty} \mathcal{E}^n(u_n,u_n)=\mathcal{E}(u,u). \] (ii) Also note that, replacing the weak convergence in the $\liminf$ condition by the strong convergence, one recovers the famous concept of Gamma convergence. \end{remark} The following Theorem is reminiscent of \cite[Theorem 2]{BBM01}. \begin{theorem}\label{thm:quadratic-convergence-BBM} Let $D\subset \mathbb{R}^d$ be an open extension domain and bounded. Then, under assumptions \eqref{eq:elliptic-condition} and \eqref{eq:integrability-condition} we have \begin{align}\label{eqquadratics-limit} \lim_{\alpha \to 2^-}\iint\limits_{D D} (u(x)-u(y))^2 J^\alpha(x,y)\mathrm{d} x \, \mathrm{d} y = \int\limits_{D} \langle A(x) \nabla u(x), \nabla u(x) \rangle \mathrm{d} x. \end{align} for all $u\in H^{1}(D)$. In particular, if $J^\alpha= 2d|x-y|^{-2}\rho_{2-\alpha}(x-y)$ or $J^\alpha= J_k^\alpha$ with $k=1,2,3$ then \begin{align*}\label{eqquadratics-limit-special} \lim_{\alpha \to 2^-} \frac{1}{2}\iint\limits_{D D} (u(x)-u(y))^2 J^\alpha(x,y)\mathrm{d} x \, \mathrm{d} y = \int\limits_{D} |\nabla u(x) |^2 \mathrm{d} x. \end{align*} \end{theorem} \noindent In the proof we will make use of the following simple observation. \begin{lemma}\label{lem:concentration} Assume $\beta\geq 0$ and $R>0$. Then, obviously, $\int_{|x|\leq R}\rho_{2-\alpha}(x)\mathrm{d} x \leq 1$. Moreover, \begin{align*} \lim\limits_{\alpha\to 2^-} \int_{|x|\leq R}|x|^\beta\rho_{2-\alpha}(x)\mathrm{d} x = \begin{cases} 1 \quad &\text{ if } \beta=0 \,, \\ 0 &\text{ if }\beta > 0 \,. \end{cases} \end{align*} \end{lemma} \begin{proof}[Proof of \autoref{thm:quadratic-convergence-BBM}] \autoref{lem-cont-qua-form} suggests that it suffices to prove \eqref{eqquadratics-limit} for $u$ in a dense subset of $H^{1}(D)$. For instance, let us choose $u\in C^2(\overline{D}) $. \begin{align} &\iint\limits_{D\times D \cap \{|x-y|\geq 1\}} (u(x)-u(y))^2 J^\alpha(x,y)\mathrm{d} x \, \mathrm{d} y \nonumber\\ & \quad \leq 4\int\limits_{D}u^2(x) \mathrm{d} x \int\limits_{ |x-y|\geq 1} J^\alpha(x,y) \, \mathrm{d} y \to 0, ~\hbox{as $\alpha\to 2^-$} \,. \end{align} \noindent Now, we consider the mapping $F:D\times (0,2)\to \mathbb{R}$ with \begin{align*} F(x,\alpha):= \int\limits_{|x-y|\leq 1} (u(x)-u(y))^2 J^\alpha(x,y) \mathrm{d} y. \end{align*} By Taylor expansion we obtain \begin{align*} u(y)-u(x) = \nabla u (x)\cdot(y-x)+ r_1(x,y)|x-y|^2 \end{align*} therefore, we can write \begin{align*} (u(y)-u(x))^2 = (\nabla u (x)\cdot(y-x))^2+ r(x,y)|x-y|^3 \end{align*} with bounded remainders $r(x,y)$ and $r_1(x,y)$. Hence, $F(x,\alpha)$ can be written as \begin{align*} F(x,\alpha)&= \int\limits_{|x-y|\leq 1} [\nabla u(x)\cdot (y-x)]^2 J^\alpha(x,y) \mathrm{d} y+ R(x,\alpha)\,. \end{align*} with \begin{align*} |R(x,\alpha)| &:=\Big|\, \int\limits_{|x-y|\leq 1} r(x,y) |x-y|^3 J^\alpha(x,y) \mathrm{d} y\Big| \\ &\leq C \int\limits_{|x-y|\leq 1 } |x-y|\rho_{2-\alpha}(x-y)\mathrm{d} y \to 0 ~~\mbox{ as } \alpha \to 2^- \,. \end{align*} Here, we have applied \eqref{eq:elliptic-condition} and \autoref{lem:concentration}. Finally, we obtain \begin{align*} \lim_{\alpha\to 2^-} F(x,\alpha) &= \lim_{\alpha\to 2^-} \int\limits_{|x-y|\leq 1} [\nabla u(x)\cdot (y-x)]^2 J^\alpha(x,y) \mathrm{d} y \\ &= \lim_{\alpha\to 2^-} \int\limits_{|x-y|\leq 1} \left[\sum_{i=1}^{d} \partial_i u(x) (y_i-x_i)\right]^2 J^\alpha(x,y) \mathrm{d} y \\ &= \sum_{0\leq i,j\leq d} \partial_i u(x) \partial_j u(x) \lim_{\alpha\to 2^-} \int\limits_{|x-y|\leq 1} ( y_i-x_i)(y_j-x_j) J^\alpha(x,y) \mathrm{d} y \\ &= \sum_{0\leq i,j\leq d} a_{ij} (x)\partial_i u(x) \partial_j u(x) = \langle A (x)\nabla u(x), \nabla u(x)\rangle. \end{align*} In particular, if $J^\alpha(x,y)\mathbbm{1}_{B_1}(x-y)= \frac{C_{d,\alpha }}{2}|x-y|^{-d-\alpha}$ then, thanks to the rotationally invariance of the Lebesgue measure we get $a_{ij}(x) =0$ for $i\neq j$ and \begin{align*} a_{ii}(x)&= \lim_{\alpha\to 2^-}\frac{C_{d,\alpha}}{2} \int\limits_{|x-y|\leq 1} h_i^2 |h|^{-d-\alpha} \mathrm{d} h=\lim_{\alpha\to 2^-} \frac{C_{d,\alpha}}{2d} \int\limits_{|x-y|\leq 1} |h|^{2-d-\alpha} \mathrm{d} h\\ &=\lim_{\alpha\to 2^-} \frac{C_{d,\alpha}}{2d \omega_{d-1} (2-\alpha)} = 1. \end{align*} The fact that, $ \frac{C_{d,\alpha}}{2d \omega_{d-1} (2-\alpha)} \to 1$ can be found in \cite{Hitchhiker}. Similar conclusion also holds if $J^\alpha(x,y)\mathbbm{1}_{B_1}(x-y)=d| x-y|^{-2}\rho_{2-\alpha}(x-y)$. Now noticing that the function $F(x, \alpha) $ is bounded on $D\times (0,2) $, the Lebesgue's dominated convergence theorem yields \begin{align*} \lim_{\alpha \to2^-} \hspace*{-3ex} \iint\limits_{D\times D\cap \{|x-y|\leq1 \}} \hspace*{-2ex} (u(x)-u(y))^2J^\alpha(x,y)\mathrm{d} x \, \mathrm{d} y = \lim_{\alpha \to2^-} \int\limits_{D} F(x,\alpha)\mathrm{d} x = \int\limits_{D} \langle A(x)\nabla u(x), \nabla u(x) \rangle \ \mathrm{d} x. \end{align*} Altogether, we obtain the required result. \end{proof} \begin{lemma}\label{lem-liminf} Let $\Omega$ be a bounded and open subset of $\mathbb{R}^d$. Assume $(u_n)_{n}\subset L^2(\Omega) $ is a sequence converging in $L^2(\Omega)$ to some $u \in H^{1}(\Omega)$. Then, under the assumptions $\eqref{eq:elliptic-condition}$ and \eqref{eq:translation-invariance}, for any given sequence $\alpha_n\in (0,2)$ such that $\alpha_n\to 2^- $ we have \begin{align}\label{eq:convex-ineq} \int\limits_{\Omega} \langle A\nabla u(x), \nabla u(x) \rangle \mathrm{d} x \leq \liminf\limits_{n \to \infty} \iint\limits_{\Omega\Omega} (u_{n}(x) -u_{n}(y))^2 J^{\alpha_n}(x,y)\mathrm{d} x\mathrm{d} y. \end{align} \end{lemma} \begin{proof} We borrow the technique from \cite{Brezis-const-function} and it is worth mentioning that an inequality similar to \eqref{eq:convex-ineq} appears in \cite{Ponce2004}. Assume $0 \in \Omega$ otherwise one can consider any arbitrary point $x_0$ in $\Omega$. Let us fix $\mathrm{d}elta >0$ small enough and put, $\Omega_\mathrm{d}elta =\{x\in \Omega: \operatorname{dist}(x,\partial\Omega)>\mathrm{d}elta\}$. Let consider $\phi \in C_c^{\infty}(\mathbb{R}^d)$ supported in $B_1(0)$ be such that $\phi \geq 0$ and $ \int_{} \phi = 1$. Define the mollifier $\phi_\mathrm{d}elta(x)= \frac{1}{\mathrm{d}elta^d}\phi\left(\frac{x}{\mathrm{d}elta}\right)$ with support in $B_\mathrm{d}elta(0)$ and let $u^\mathrm{d}elta_{n} = u_{n} *\phi_\mathrm{d}elta$ denote the convolution of $u_n $ and $\phi_\mathrm{d}elta$. For sake of the simplicity we will assume $u_n,$ and $u$ are extended by zero outside of $\Omega$. Assume $z\in\Omega_\mathrm{d}elta $ and $|h|\le \mathrm{d}elta$ then, $z-h\in \Omega_\mathrm{d}elta-h \subset \Omega$ so that, the translation invariance condition \eqref{eq:translation-invariance} implies, \begin{align*} \iint\limits_{\Omega_\mathrm{d}elta\Omega_\mathrm{d}elta} \left( u_{n}(x-h) -u_{n}(y-h)\right)^2 J^{\alpha_n}(x,y)\mathrm{d} x\mathrm{d} y \leq \iint\limits_{\Omega\Omega}(u_{n}(x) -u_{n}(y))^2 J^{\alpha_n}(x,y)\mathrm{d} x\mathrm{d} y. \end{align*} Thus given that, $\int_{} \phi_\mathrm{d}elta = 1$, integrating both side over the ball $B_\mathrm{d}elta(0)$ with respect to $\phi_\mathrm{d}elta(h)dh$ and employing Jensen's inequality afterwards, yields \begin{align}\label{eqmolification-convex-Jessen} \iint\limits_{\Omega_\mathrm{d}elta\Omega_\mathrm{d}elta} \left(u^\mathrm{d}elta_{n}(x) -u^\mathrm{d}elta_{n}(y)\right)^2 J^{\alpha_n}(x,y)\mathrm{d} x\mathrm{d} y \leq \iint\limits_{\Omega\Omega} \left( u_{n}(x) -u_{n}(y)\right)^2 J^{\alpha_n}(x,y)\mathrm{d} x\mathrm{d} y. \end{align} By Lemma \ref{lem-cont-qua-form} there is a constant C independent on $\alpha_n$ for which, \begin{align*} \left| (\mathcal{E}^{\alpha_n}_{\Omega_\mathrm{d}elta}(u_n^\mathrm{d}elta, u_n^\mathrm{d}elta))^{1/2}-(\mathcal{E}^{\alpha_n}_{\Omega_\mathrm{d}elta}(u^\mathrm{d}elta, u^\mathrm{d}elta))^{1/2}\right| &\leq (\mathcal{E}^{\alpha_n}_{\Omega_\mathrm{d}elta}(u_n^\mathrm{d}elta-u^\mathrm{d}elta, u_n^\mathrm{d}elta-u^\mathrm{d}elta))^{1/2}\\ &\leq C\|u_n^\mathrm{d}elta-u^\mathrm{d}elta\|_{H^1(\Omega_\mathrm{d}elta )}\\ &\leq C \|\phi_\mathrm{d}elta\|_{W^{1,\infty}(B_\mathrm{d}elta )}\|u_n-u\|_{L^2(\Omega )}. \end{align*} Which implies, \begin{align*} \left| (\mathcal{E}^{\alpha_n}_{\Omega_\mathrm{d}elta}(u_n^\mathrm{d}elta, u_n^\mathrm{d}elta))^{1/2}-(\mathcal{E}^{\alpha_n}_{\Omega_\mathrm{d}elta}(u^\mathrm{d}elta, u^\mathrm{d}elta))^{1/2}\right|\leq C \|\phi_\mathrm{d}elta\|_{W^{1,\infty}(B_\mathrm{d}elta )}\|u_n-u\|_{L^2(\Omega )} \to 0. \end{align*} since by assumption, $\|u_{n}-u\|_{L^2(\Omega)}\to 0$. On the other hand, Theorem \ref{thm:quadratic-convergence-BBM} yields that, $\mathcal{E}^{\alpha_n}_{\Omega_\mathrm{d}elta}(u^\mathrm{d}elta, u^\mathrm{d}elta)\to \mathcal{E}^{A}_{\Omega_\mathrm{d}elta}(u^\mathrm{d}elta, u^\mathrm{d}elta)$. Thus, we have shown that \begin{align*} \mathcal{E}^{\alpha_n}_{\Omega_\mathrm{d}elta}(u_n^\mathrm{d}elta, u_n^\mathrm{d}elta) \to \mathcal{E}^{A}_{\Omega_\mathrm{d}elta}(u^\mathrm{d}elta, u^\mathrm{d}elta). \end{align*} Inserting this in \eqref{eqmolification-convex-Jessen}, we obtain \begin{align*} \int\limits_{\Omega_\mathrm{d}elta} \langle A\nabla u^\mathrm{d}elta(x), \nabla u^\mathrm{d}elta(x) \rangle \mathrm{d} x \leq \liminf\iint\limits_{\Omega\Omega} (u_{n}(x) -u_{n}(y))^2 J^{\alpha_n}(x,y)\mathrm{d} x\mathrm{d} y. \end{align*} Given that $u \in H^{1}(\Omega)$, it is clear that $\nabla(\phi_\mathrm{d}elta*u)=\phi_\mathrm{d}elta* \nabla u$ and hence the desired inequality follows by letting $\mathrm{d}elta \to 0^+$ since $\| \phi_\mathrm{d}elta* \nabla u - \nabla u \|_{L^2(\Omega)}\to 0$ as $\mathrm{d}elta \to 0^+$. \end{proof} Finally, we now are in the position to prove our main result, \autoref{thm:Mosco-convergence}. \begin{proof}[Proof of \autoref{thm:Mosco-convergence}] Note that $C_c^\infty(\mathbb{R}^d) \subset V_{\nu^\alpha}(\Omega|\R^d)$ and $V_{\nu^\alpha}(\Omega|\R^d) \big|_\Omega \subset H_{\nu^{\alpha}}( \Omega) \subset L^{2}( \Omega) $. Hence the denseness of domains in $L^2(\Omega)$ readily follows from \autoref{thm:density}. We consider the "$\limsup$" and the "$\liminf$"-part separately. \par \textbf{Limsup:} Let $u\in L^2(\Omega)$, if $u \not\in H^1(\Omega)$ then the $\limsup$ statement holds true since $ \mathcal{E}^A(u,u)=\infty$. Now if $u \in H^1(\Omega)$. By identifying $u$ to one of its extension $\overline{u}\in H^1(\mathbb{R}^d)$, for sake of simplicity we can always assume that $u \in H^1(\mathbb{R}^d) $. On the one hand, Theorem \ref{thm:quadratic-convergence-BBM} shows that $\lim\limits_{\alpha\to 2^{-}} \mathcal{E}^\alpha_{\Omega}(u,u)=\mathcal{E}^A(u,u)$. On the other hand, since by Theorem \ref{thm:density}, $C_c^\infty(\mathbb{R}^d)$ is dense in $H^1(\mathbb{R}^d)\cap V_{\nu^\alpha}(\Omega|\R^d)$ and \begin{align*} \mathcal{E}^\alpha(u,u)= \mathcal{E}^\alpha_{\Omega}(u,u) + 2\iint\limits_{\Omega\Omega^c} (u(x)-u(y))^2J^\alpha(x,y)\mathrm{d} x\mathrm{d} y \end{align*} it remains to show that, for $ u \in C_c^\infty(\mathbb{R}^d)$ \begin{align}\label{eqextern-lim} \iint\limits_{\Omega\Omega^c} (u(x)-u(y))^2J^\alpha(x,y)\mathrm{d} x\mathrm{d} y\to 0,\quad \text{as $\alpha\to 2^-$.} \end{align} To this end, let us assume $u\in C_c^\infty(\mathbb{R}^d)$. Then we have \[ |u(y)-u(x)|^2 \leq \|\nabla u\|^2_{\infty} | x-y|^2. \] Let $R>0$ large enough such that, $\operatorname{supp} u\subset B_{R/2}(0)$ and for fix $x\in \Omega$, let $\mathrm{d}elta_x= dist(x, \partial \Omega)>0 $, we obtain the following estimates \begin{align*} \int_{\Omega^c} \frac{(u(x) -u(y))^2}{|x-y|^{2}} \rho_{2-\alpha}(x-y) dy &=\hspace*{-2ex} \int\limits_{R>|x-y|> \mathrm{d}elta_x} \hspace*{-3ex} \frac{(u(x) -u(y))^2}{|x-y|^{2}} \rho_{2-\alpha}(x-y)\mathrm{d} y+ u^2(x) \hspace*{-2ex} \int\limits_{|x-y|\geq R} \hspace*{-2ex} \frac{ \rho_{2-\alpha}(x-y) }{|x-y|^{2}} \mathrm{d} y\\ &\leq \|\nabla u\|^2_{\infty}\hspace*{-2ex} \int\limits_{R>|x-y|> \mathrm{d}elta_x} \rho_{2-\alpha}(x-y)\mathrm{d} y+ \|u\|^2_{\infty} R^2 \hspace*{-2ex} \int\limits_{|x-y|\geq R} \hspace*{-2ex} \rho_{2-\alpha}(x-y) \mathrm{d} y\\ &\leq C \int\limits_{|x-y|> \mathrm{d}elta_x} \rho_{2-\alpha}(x-y) dy \to 0 \quad \text{ as } \alpha\to 2^{-}. \end{align*} Moreover, from the above estimates one also has, \begin{align*} \int_{\Omega^c} \frac{(u(x) -u(y))^2}{|x-y|^{2}} \rho_{2-\alpha}(x-y) dy \leq C\hspace*{-2ex}\int\limits_{|x-y|>\mathrm{d}elta_x} \rho_{2-\alpha}(x-y) dy \leq C \end{align*} with the constant $C$ independent on $x$. Hence, combining this and the assumption \eqref{eq:elliptic-condition}, the statement \eqref{eqextern-lim} follows from the dominated convergence theorem. Thus, we conclude that for $u\in H^1(\Omega)$, \[\limsup_{\alpha \to 2^{-}} \mathcal{E}_\Omega^\alpha(u,u)= \limsup_{\alpha \to 2^{-}} \mathcal{E}^\alpha(u,u)= \mathcal{E}^A(u,u). \] Thus, choosing the constant sequence $u_\alpha= u$ for all $ \alpha \in (0,2)$ we are provided with the $\limsup$ condition for both forms $( \mathcal{E}^\alpha_{\Omega}(\cdot, \cdot), H_{\nu^{\alpha}}( \Omega) )_{\alpha }$ and $( \mathcal{E}^\alpha(\cdot,\cdot) , V_{\nu^\alpha}(\Omega|\R^d))_{\alpha}$. \par \textbf{Liminf}: Let $u, u_n\in L^2(\Omega)$ be such that, $u_n \rightharpoonup u$ in $L^2(\Omega)$. Necessarily, $(u_n)_{n}$ is bounded in $ L^2(\Omega)$. Let $( \alpha_n)_n$ be a sequence in $(0,2)$ such that $ \alpha_n \to 2^-$ as $n\to \infty$. If $\liminf\limits_{n \to \infty} \mathcal{E}_\Omega^{\alpha_n}(u_n,u_n) =\infty$ then, \[ \mathcal{E}^A(u,u)\leq \liminf_{n \to \infty} \mathcal{E}_\Omega^{ \alpha_n }(u_n,u_n) = \liminf_{n \to \infty} \mathcal{E}^{ \alpha_n }(u_n,u_n) =\infty. \] Assume $\liminf\limits_{n \to \infty} \mathcal{E}_\Omega^{ \alpha_n }(u_n,u_n)<\infty$ then according to \cite{BBM01, Ponce2004} the sequence $(u_n)_n$ has a subsequence (which we again denote by $u_n$) converging in $L^2(\Omega)$ to some $\widetilde{u}\in H^1(\Omega)$. Consequently, as $u_n \rightharpoonup u$ it readily follows that, $u_n\to u$ in $L^2(\Omega)$. Therefore, taking into account that $u\in H^1(\Omega)$, the desired liminf inequality is an immediate consequence of \autoref{lem-liminf}. The proof of \autoref{thm:Mosco-convergence} is complete. \end{proof} We adopt the convention that, for a given quadratic form $\big(\mathcal{E}, \mathcal{D}(\mathcal{E})\big)$, we have $\mathcal{E}(u,u)= \infty$ whenever $u \not\in \mathcal{D}(\mathcal{E})$. The next result is a variant of \autoref{thm:Mosco-convergence} with $H^1(\Omega)$ replaced by $H^1_0(\Omega)$ \begin{theorem}\label{thm:Mosco-convergence-bis} Let $\Omega\subset \mathbb{R}^d$ be an open bounded set with a continuous boundary. Assume \eqref{eq:elliptic-condition}, \eqref{eq:integrability-condition} and \eqref{eq:translation-invariance}. Then the two families of quadratic forms $\big(\mathcal{E}^\alpha_{\Omega}(\cdot, \cdot),\overline{C_c^\infty(\Omega)}^{ H_{\nu^\alpha}( \Omega)} \big)_{\alpha}$ and $\big( \mathcal{E}^\alpha(\cdot, \cdot),V^\Omega_{\nu^\alpha}( \Omega|\mathbb{R}^d) \big)_{\alpha}$ both converge to $( \mathcal{E}^A(\cdot,\cdot), H_0^{1}( \Omega) )$ in the Mosco sense in $L^2(\Omega)$ as $\alpha\to 2^-$. \end{theorem} The result relies on the density of $\overline{C_c^\infty(\Omega)}^{ H_{\nu^\alpha}( \Omega)}$ resp. $V^\Omega_{\nu^\alpha}( \Omega|\mathbb{R}^d)$. The density of the first space is trivial. The density of the second space is formulated in \autoref{thm:density-omega}. Apart from the density issue, the details of the proof are the same as in the proof of \autoref{thm:Mosco-convergence}. \section{Examples of kernels}\label{sec:examples} Here we collect some concrete examples of sequences $(\rho_\varepsilon)_\varepsilon$ satisfying the assumptions in \autoref{def:nu-alpha}. Note that we have two different kinds of examples. The functions $h \mapsto \rho_\varepsilon(h)$ that appear in \autoref{ex:most-important} are unbounded and the singularity gets critical at $h=0$ as $\varepsilon \to 0+$. The functions $\rho_\varepsilon$ that appear in \autoref{ex:bounded-nu} are bounded where the bound depends on a rescaling factor that blows up as $\varepsilon \to 0+$. Both examples lead to a diffusion operator resp. gradient form in the limit. \noindent Through all these examples, $d\geq 1$, the constant $\omega_{d-1}$ is the area of the $d-1$-dimensional unit sphere and $\varepsilon_{0} >0$ is a fixed number. \begin{example}\label{ex:4-0} This example is taken from \autoref{ex:most-important}. For $\varepsilon > 0$ and $x \in \R^d$ set \begin{align*} \rho_\varepsilon (x) = \frac{\varepsilon}{\omega_{d-1}} |x|^{-d+\varepsilon} \mathbbm{1}_{B_1}(x)\,. \end{align*} \end{example} \begin{example}\label{ex:4-1} This example is a version of \autoref{ex:bounded-nu}. Assume $d\geq 1$, $0<\varepsilon < \varepsilon_{0}$ and any $-d < \beta \leq 2$. Set \begin{align*} \rho_\varepsilon(x) = \frac{d+\beta}{ \omega_{d-1}\varepsilon^{d+\beta}} |x|^{\beta}\mathbbm{1}_{B_{\varepsilon}}(x),\qquad\qquad x\in \mathbb{R}^d. \end{align*} \end{example} \begin{example}\label{ex:4-2} For $d\geq 1$ and $0<\varepsilon < \varepsilon_{0}$. Set \begin{align*} \rho_\varepsilon(x) = \frac{1}{\omega_{d-1}\log(\varepsilon_{0}/\varepsilon )}|x|^{-d}\mathbbm{1}_{\{\varepsilon <|x|<\varepsilon_{0}\}}. \end{align*} Note that $ \log(\varepsilon_{0}/\varepsilon )\sim |\log(\varepsilon )| $. This example is the counter part of \autoref{ex:4-1} at the end point $\beta =-d$. \end{example} \begin{example}\label{ex:4-3} Assume $d\geq 1$, $0<\varepsilon < \varepsilon_{0}$ and $-d < \beta \leq 2$. For $x \in \mathbb{R}^d$ consider \begin{align*} \rho_\varepsilon(x) = \frac{(|x|+\varepsilon)^{\beta}}{\omega_{d-1} b_\varepsilon}\mathbbm{1}_{B_{\varepsilon_{0}}}(x)\qquad\qquad \hbox{with }\quad b_\varepsilon = \varepsilon^{d+\beta}\int^{1}_{\frac{\varepsilon}{\varepsilon+\varepsilon_{0}}} t^{-d-\beta-1}(1-t)^{d-1}dt. \end{align*} The constant $b_\varepsilon $ is chosen such that $\int_{\mathbb{R}^d} \rho_\varepsilon(x) dx =1$. Additionally one can check $$\frac{(d+\beta)}{b_\varepsilon\varepsilon_{0} ^{d+\beta}}\to 1\quad \hbox{as }~~~\varepsilon\to 0^+. $$ \end{example} \begin{example}\label{ex:4-4} Assume $d\geq 1$ and $0<\varepsilon < \varepsilon_{0}$. For $x \in \mathbb{R}^d$ consider \begin{align*} \rho_\varepsilon(x) = \frac{(|x|+\varepsilon)^{-d}}{\omega_{d-1} b_\varepsilon}\mathbbm{1}_{B_{\varepsilon_{0}}(x)}\qquad\qquad \hbox{with }\quad b_\varepsilon = \int^{1}_{\frac{\varepsilon}{\varepsilon+\varepsilon_{0}}} t^{-1}(1-t)^{d-1}dt. \end{align*} The choice of the constant $b_\varepsilon $ ensures $\int_{\mathbb{R}^d} \rho_\varepsilon(x) dx =1$. It is not difficult to check $$ \frac{|\log(\varepsilon)|}{b_\varepsilon}\to 1\quad \hbox{as }~~~\varepsilon\to 0^+. $$ This example is the counter part of \autoref{ex:4-3} at the end point $\beta=-d$. \end{example} \begin{example}\label{ex:4-5} Assume $d\geq 1$, $0<\varepsilon < \varepsilon_{0}$ and $\beta>0$. For $x \in \mathbb{R}^d$ consider \begin{align*} \rho_\varepsilon(x) = \frac{|x|^\beta}{\omega_{d-1} b_\varepsilon(|x|+\varepsilon)^{d+\beta}}\mathbbm{1}_{B_{\varepsilon_{0}}(x)}\qquad\qquad \hbox{with }\quad b_\varepsilon = \int^{1}_{\frac{\varepsilon}{\varepsilon+\varepsilon_{0}}} t^{-1}(1-t)^{d+\beta-1}dt. \end{align*} As above, the choice of $b_\varepsilon $ ensures $\int_{\mathbb{R}^d} \rho_\varepsilon(x) dx =1$. It is not difficult to check that $$ \frac{|\log(\varepsilon)|}{b_\varepsilon}\to 1\quad \hbox{as }~~~\varepsilon\to 0^+. $$ \end{example} \begin{example}\label{ex:4-6} Assume $d\geq 1$, $0<\varepsilon<\varepsilon_0$. Let $\phi: \mathbb{R}\to [0, \infty)$ be almost decreasing and such that, $\int_{\mathbb{R}}\phi(s)\mathrm{d} s= 1$ \begin{align*} \rho_\varepsilon(x) = \frac{|x|^{-d+1}}{\omega_{d-1}\varepsilon }\phi\big(|x|/\varepsilon\big)\, . \end{align*} \end{example} \newcommand{\etalchar}[1]{$^{#1}$} \end{document}
\begin{document} \title{Simulating open quantum systems by applying SU(4) to quantum master equations} \author{Minghui Xu} \author{D. A. Tieri} \author{M. J. Holland} \affiliation{JILA, National Institute of Standards and Technology and Department of Physics, University of Colorado, Boulder, Colorado 80309-0440, USA} \date{\today} \begin{abstract} We show that open quantum systems of two-level atoms symmetrically coupled to a single-mode photon field can be efficiently simulated by applying a {\em SU}(4) group theory to quantum master equations. This is important since many foundational examples in quantum optics fall into this class. We demonstrate the method by finding exact solutions for many-atom open quantum systems such as lasing and steady state superradiance. \end{abstract} \pacs{03.65.Yz 03.67.Ac 02.20.Qs 42.50.Pq} \keywords{Suggested keywords} \maketitle \section{Introduction} Most physical situations to which quantum mechanics is applied are open. The open nature is necessary to treat basic irreversible processes such as energy transfer with a heat bath, particle exchange with a reservoir, and quantum measurements. Open quantum systems can be treated under the Born and Markov approximations by the quantum master equation in the Lindblad form~\cite{Lindblad76}, which has been applied across many fields of physics, including quantum optics and quantum information science~\cite{Gardiner04,Carmichael}, atomic and molecular physics~\cite{Bollinger10}, solid state physics~\cite{Blais07}, and optomechanics~\cite{Teufel11}. In general, for all but the smallest system sizes, exact analytic solutions to the quantum master equation are intractable. Various approximation methods have been introduced, {\em e.g.} perturbation theories~\cite{Cirac12}, mean-field approaches~\cite{Zoller10,nagy11}, cummulant expansions~\cite{Meiser09,Meiser10}, linear response theories~\cite{Lukin13} and $c$-number Langevin equations~\cite{Scully90, Fabre93}. However, it is often necessary to benchmark approximate methods with exact numerical solutions. Existing numerical simulation approaches, such as the quantum Monte Carlo method~\cite{Knight98}, scale exponentially with the underlying dimensionality of the Hilbert space. Therefore, treating any appreciable system size is extremely difficult. Here we present a novel group-theoretic approach to find an efficient solution of the quantum master equation, which reduces the exponential scaling of the problem to cubic. Even though we focus on an important class of quantum optical systems, the methods we present could be more generally applied. We consider the symmetric coupling of a single-mode cavity field to an ensemble of $N$ two-level atoms (analogous to pseudo-spin-1/2 systems or qubits). The Hamiltonian that describes this situation in the interaction picture is given by \begin{equation} H=\frac{\hbar\Delta}{2}\sum_{j=1}^N\sigma_j^{(3)} +\hbar \Omega\sum_{j=1}^{N}(a^\dagger\sigma_j^-+a\sigma_j^+)\,, \label{eq:hamiltonian} \end{equation} where the first term is the free energy, with $\Delta$ being the detuning of the light field from the atomic transition, and the second term is the reversible atom-field coupling with strength~$\Omega$. The photon annihilation operator is $a$, and $\sigma_j^{(3)}$ and $\sigma_j^+=(\sigma_j^-)^\dagger$ are Pauli operators for the $j$th spin-component. In the presence of decoherence, the full quantum evolution is described by the quantum master equation for the reduced density operator $\rho$: \begin{eqnarray}\label{eq1} \dot{\rho} &=& \mathcal{L}\rho= \frac{1}{i\hbar}[H,\rho]+\kappa\mathcal{D}[a]\rho\nonumber\\ &&+ \sum_{j=1}^N\left(\gamma\mathcal{D}[\sigma_j^-] +w\mathcal{D}[\sigma_j^+]+\frac{1}{2T_2} \mathcal{D}[\sigma_j^3]\right)\rho\,, \end{eqnarray} where $\mathcal{D}[\hat{O}]\rho=(2\hat{O}\rho \hat{O}^\dagger-\hat{O}^\dagger \hat{O}\rho-\rho \hat{O}^\dagger \hat{O})/2$ denotes the Lindblad superoperator. We have introduced the decay rate $\kappa$ for the cavity, and population relaxation rates for the spin components $\gamma,w$ (for decay and pumping respectively) and dephasing rate $1/(2T_2)$. \section{Applying {\em SU}(4) to the quantum master equation} Recently, it was pointed out that it is preferable to work in Liouville space rather than in Hilbert space since the Lindblad operators are invariant under {\em SU}(4) transformations~\cite{Hartmann12}. This observation allows one to express all of the Lindblad operators in terms of generators of the {\em SU}(4) group. For this purpose, 18 superoperators $\mathcal{O}_+$, $\mathcal{O}_-$ and $\mathcal{O}_3$ where $\mathcal{O}\in\left\{\mathcal{Q},\Sigma,\mathcal{M},\mathcal{N}, \mathcal{U},\mathcal{V}\right\}$ are defined \begin{equation}\label{super} \begin{split} \mathcal{Q}_{\pm}\rho := \sum_{j=1}^N\sigma_j^{\pm}\rho\sigma_j^{\mp}&,\;\;\;\mathcal{Q}_3\rho := \frac{1}{4}\sum_{j=1}^N\left(\sigma_j^3\rho+\rho\sigma_j^3\right)\\ \Sigma_{\pm}\rho := \sum_{j=1}^N\sigma_j^{\pm}\rho\sigma_j^{\pm}&,\;\;\;\Sigma_3\rho := \frac{1}{4}\sum_{j=1}^N\left(\sigma_j^3\rho-\rho\sigma_j^3\right)\\ \mathcal{M}_{\pm}\rho := \sum_{j=1}^N\sigma_j^{\pm}\rho\frac{1+\sigma_j^3}{2}&,\;\;\;\mathcal{M}_3\rho := \frac{1}{2}\sum_{j=1}^N\sigma_j^3\rho\frac{1+\sigma_j^3}{2}\\ \mathcal{N}_{\pm}\rho := \sum_{j=1}^N\sigma_j^{\pm}\rho\frac{1-\sigma_j^3}{2}&,\;\;\;\mathcal{N}_3\rho := \frac{1}{2}\sum_{j=1}^N\sigma_j^3\rho\frac{1-\sigma_j^3}{2}\\ \mathcal{U}_{\pm}\rho := \sum_{j=1}^N\frac{1+\sigma_j^3}{2}\rho\sigma_j^{\mp}&,\;\;\;\mathcal{U}_3\rho := \frac{1}{2}\sum_{j=1}^N\frac{1+\sigma_j^3}{2}\rho\sigma_j^3\\ \mathcal{V}_{\pm}\rho := \sum_{j=1}^N\frac{1-\sigma_j^3}{2}\rho\sigma_j^{\mp}&,\;\;\;\mathcal{V}_3\rho := \frac{1}{2}\sum_{j=1}^N\frac{1-\sigma_j^3}{2}\rho\sigma_j^3. \end{split} \end{equation} Although this list, Eq.~(\ref{super}), contains 18 operator definitions, only 15 of them are independent (it is possible to write $\mathcal{N}_3$, $\mathcal{U}_3$, $\mathcal{V}_3$ in terms of the others). One can also demonstrate that the 15 remaining superoperators are linear combinations of the familiar Gell-Mann matrices that are the generators of the {\em SU}(4) group, $\lambda_1,...,\lambda_{15}$~ (see Appendix \ref{app1}). As a consequence, it is possible to construct a reduced basis for the density operator using a multiplet of the {\em SU}(4) group. Transcribing notation from the four-flavor quark model---a model with the same symmetry structure---the fundamental representation is given by $u=|1\rangle\langle1|$, $d=|0\rangle\langle0|$, $s=|1\rangle\langle0|$, and $c=|0\rangle\langle1|$ (up, down, strange, and charm). Since the symmetry type of the basis is preserved under the action of the {\em SU}(4) generators~\cite{note1}, this leads to a tremendous reduction of the number of required basis states needed to provide an exact solution of the master equation. For the fully symmetric case, the basis is: \begin{equation} P_{q,q_3,\sigma_3}=\mathcal{S}(u^\alpha d^\beta s^\gamma c^\delta), \end{equation} where $\mathcal{S}$ denotes the symmetrizer and $\alpha+\beta+\gamma+\delta=N$. Note that only basis states with $\gamma=\delta=0$ have non-vanishing trace. The three quantum numbers $q,q_3$ and $\sigma_3$ have ranges $q=0,1/2,...,N/2$, $q_3=-q,-q+1,...,q$ and $\sigma_3=q-N/2, q-N/2+1,...,N/2-q$, resulting in the dimensionality of the basis $(N+1)(N+2)(N+3)/6$, {\em i.e.\/} of order $N^3$. This tremendous reduction should be compared with the full dimensionality of the Liouville space given by~$4^N$. In this paper, we apply the {\em SU}(4) group theory to find exact solutions to the quantum master equation in general form. We show how to calculate the various basic observables of interest. We demonstrate that the density matrix in the {\em SU}(4) basis representation can be precisely mapped to the collective spin-angular-momentum representation $|S,M\rangle$ in Hilbert space, which enables us to efficiently diagonalize the density matrix. This allows us to provide complete information about the system, including functional properties of the density operator such as the purity and von Neumann entropy. In order to solve Eq.~(\ref{eq1}), we expand the density matrix as \begin{equation}\label{ex} \rho=\sum_{q,q_3,\sigma_3,m,n} C_{q,q_3,\sigma_3}^{m,n} P_{q,q_3,\sigma_3}\bigl|m\bigr>\bigl<n\bigr|\,, \end{equation} where $C_{q,q_3,\sigma_3}^{m,n}$ are complex coefficients, and $|n\rangle$ is the photon Fock state. The Lindblad operators can be written compactly: \begin{eqnarray}\label{liv} \sum_{j=1}^N\mathcal{D}[\sigma_j^{\pm}]&=&-\frac{N}{2}\pm \mathcal{Q}_3+\mathcal{Q}_{\pm}\,,\nonumber\\ \sum_{j=1}^N\mathcal{D}[\sigma_j^{(3)}]&=&4\mathcal{M}_3-2 \mathcal{Q}_3-2\Sigma_3-N\,. \end{eqnarray} The completeness of $\mathcal{O}_{+,-,3}$ and $a$ implies that an arbitrary Hamiltonian can be expressed by them, {\em e.g.} from Eq.~(\ref{eq:hamiltonian}), \begin{eqnarray}\label{ham} \frac{1}{i\hbar}[H,\rho]&=&-2i\Delta\Sigma_3\rho-i\Omega \left[a(\mathcal{M}_++\mathcal{N}_+)\rho+a^\dagger (\mathcal{M}_-+\mathcal{N}_-)\rho\right]\nonumber\\ &&\quad{}+i\Omega\left[(\mathcal{U}_++\mathcal{V}_+)\rho a^\dagger +(\mathcal{U}_-+\mathcal{V}_-)\rho a\right]\,. \end{eqnarray} Combining Eqs.~(\ref{liv}) and (\ref{ham}) with the action rules of the {\em SU}(4) and photon operators on the basis states (see Appendix \ref{app2}) gives a closed solution of Eq.~(\ref{eq1}). In general, this can be solved analytically or numerically with standard methods. \section{Observables} Having established the procedure for determining the time evolution of $\rho$, it is now important to describe how to calculate physical observables. We begin with the trace given by: \begin{equation} \mathrm{Tr}[\rho]=\sum_{m,q3}C_{N/2,q3,0}^{m,m}=1\,, \label{eq:trace} \end{equation} which is an invariant during evolution to represent probability conservation. Average values $\bigl<a\bigr>$ and $\bigl<a^\dagger a\bigr>$ are found analogously. For the spin-operators, we provide the following examples up to quadratic order: \begin{equation}\label{ob} \begin{split} &\langle\sigma_j^{(3)}\rangle=2\mathrm{Tr}[\mathcal{Q}_3\rho]/N,\\ &\langle\sigma_j^{(3)}\sigma_k^{(3)}\rangle=(4\mathrm{Tr} [(\mathcal{Q}_3^2-\Sigma_3^2)\rho]-N)/[N(N-1)],\\ &\langle\sigma_j^{\pm}\rangle=\mathrm{Tr}[(\mathcal{M}_{\pm} +\mathcal{N}_{\pm})\rho]/N,\\ &\langle\sigma_j^+\sigma_k^-\rangle=\mathrm{Tr} [\mathcal{V}_-(\mathcal{M}_-+\mathcal{N}_-)\rho- \mathcal{Q}_-\rho]/[N(N-1)], \end{split} \end{equation} where $j\ne k$. For coherence properties it is necessary to calculate products of operators evaluated at different times. Of particular interest are the first-order and second-order correlations, which can be found by applying the quantum regression theorem: \begin{eqnarray}\label{quantumre} \langle\hat{O}_1(t+\tau)\hat{O}_2(t)\rangle&=&\mathrm{Tr} \left[\hat{O}_1e^{\mathcal{L}\tau}[\hat{O}_2\rho(t)]\right], \nonumber\\ \hspace*{-1pc} \langle\hat{O}_1(t)\hat{O}_1(t\!+\!\tau)\hat{O}_2(t\!+\!\tau) \hat{O}_2(t)\rangle &=&\mathrm{Tr}\left[\hat{O}_2e^{\mathcal{L}\tau} [\hat{O}_2\rho(t)\hat{O}_1]\hat{O}_1\right], \end{eqnarray} where $e^{\mathcal{L}\tau}[\rho]$ is the time propagation from Eq.~(\ref{eq1}) starting with the initial density matrix $\rho$. For example, in order to obtain the first-order correlation of $\hat{O}_1$ and $\hat{O}_2$, one takes $\hat{O}_2\rho(t)$ as an initial condition, time evolves it for $\tau$ according to Eq.~(\ref{eq1}), applies $\hat{O}_1$, and computes the trace. A similar procedure follows for the second-order correlation. In this way, field quantities, $\langle a^{\dagger}(t+\tau)a(t)\rangle$ and $\langle a^{\dagger}(t) a^{\dagger}(t+\tau)a(t+\tau)a(t)\rangle$ are directly calculated. For spin-coherence, the required expressions are: \begin{eqnarray} &&\sum_{j,k=1}^{N}\langle \sigma_j^+(t+\tau)\sigma_k^-(t)\rangle =\mathrm{Tr}\left[(\mathcal{M}_++\mathcal{N}_+)e^{\mathcal{L}\tau} [(\mathcal{M}_-+\mathcal{N}_-)\rho(t)]\right],\nonumber\\ &&\sum_{j,j',k,k'=1}^{N}\langle \sigma_j^+(t)\sigma_{j'}^+ (t+\tau)\sigma_k^-(t+\tau)\sigma_{k'}^-(t)\rangle=\nonumber\\ &&\qquad\mathrm{Tr}\left[\mathcal{V}_-(\mathcal{M}_-+\mathcal{N}_-) e^{\mathcal{L}\tau}[\mathcal{V}_-(\mathcal{M}_-+\mathcal{N}_-)\rho(t)]\right]\,. \end{eqnarray} \section{Transform to the $|S,M\rangle\langle S,M'|$ representation} Although at this point we have provided a theoretical framework that is complete and provides exact and efficient solutions to the general quantum master equation, it is often inconvenient to work in the $P_{q,q_3,\sigma_3}$ representation of the density operator. For example, it can be a nontrivial procedure to characterize the many-body spin-state in this representation by quantifying the degree of entanglement, which is derived from a functional ({\em i.e.}~${\rm Tr}[\rho\log(\rho)]$). For this reason, we illustrate now the procedure for efficiently projecting the density operator from the {\em SU}(4) basis representation onto the usual representation of density matrices formed from the Hilbert space basis vectors. These Hilbert space basis vectors are specified by the angular momentum eigenket $|S,M\rangle$, where $S=N/2,N/2-1,...,(1/2 \,\mathrm{or}\,0)$ is the total spin and $M=-S,-S\!+\!1,\ldots,S$ is the spin-projection. Note that $S$ also labels the symmetry of the states, e.g. $S=N/2$ corresponds to the fully symmetrical Dicke states. In order to illustrate how this projection is done, it is instructive for us to first examine explicitly the $N\!=\!2$ case where the Hilbert space is 4~dimensional. Two spins form a symmetric triplet state and an antisymmetric singlet state, corresponding to total spin $S=1$ and $S=0$ respectively. In this case, the complete density matrix from Eq.~(\ref{ex}) for given ${m,n}$ is \begin{equation}\nonumber \bordermatrix{ &\langle1,1|&\langle1,0|&\langle1,-1|&\langle0,0|\cr |1,1\rangle&C_{1,1,0}^{m,n}&\frac{C_{1/2,1/2,1/2}^{m,n}}{\sqrt{2}} &C_{0,0,1}^{m,n}&0\cr |1,0\rangle&\frac{C_{1/2,1/2,-1/2}^{m,n} }{\sqrt{2}} &\frac{C_{1,0,0}^{m,n}+C_{0,0,0}^{m,n}}{2} &\frac{C_{1/2,-1/2,1/2}^{m,n}}{\sqrt{2}}&0\cr |1,-1\rangle&C_{0,0,-1}^{m,n}&\frac{C_{1/2,-1/2,-1/2}^{m,n}}{\sqrt{2}} &C_{1,-1,0}^{m,n} &0\cr |0,0\rangle&0&0&0&\frac{C_{1,0,0}^{m,n}-C_{0,0,0}^{m,n}}{2} }. \end{equation} Notice that the resulting matrix is block diagonal in the $S=1$ and $S=0$ subspaces (a $3\times3$ block and a $1\times1$ block). In addition, the complex coefficients contributing to the matrix element for $|S,M\rangle\langle S,M'|$ all satisfy $q_3+\sigma_3=M$ and $q_3-\sigma_3=M'$. Finally, the trace is simply $\sum_{q_3=-1}^1C_{1,q3,0}^{m,n}=1$. These results can be systematically extended to higher $N$. For any $N$, the density matrix is block diagonal in $S$, with each block given by \begin{equation} \rho_S^{m,n}=\sum_{M,M'} D_{S,M,M'}^{m,n}|S,M\rangle\langle S,M'|, \end{equation} where $D_{S,M,M'}$ are density matrix elements for the symmetry type~$S$. There are $n_S$ ways for $N$ spins to construct the basis for each~$S$, so that $\sum_{S}(2S+1)n_S=2^N$, {\em i.e.\/} the Hilbert space dimension~\cite{Gilmore72}. To find $n_S$, we note that $|S,M\rangle$ forms a basis of the $(2S+1)$-dimensional irreducible representation of the {\em SU}(2) group. Determining $n_S$ is accomplished with the help of the Young tableau of the {\em SU}(2) group, where one can obtain the number of equivalent representations iteratively. Fig.~\ref{Fig1}(a) shows the Young tableau for the $N=4$ case. A corresponding tabular method for evaluating $n_S$ for any $N$ is shown in Fig.~\ref{Fig1}(b), which contains about one half of Pascal's triangle. \begin{figure} \caption{\label{Fig1} \label{Fig1} \end{figure} With this in mind, one can now derive a systematic algorithm for obtaining density matrix elements $D_{S,M,M'}^{m,n}$ given {\em SU}(4) expansion coefficients $C_{q,q_3,\sigma_3}^{m,n}$. The procedure is outlined as follows. For each layer of the pyramid~[cf. Fig.~\ref{Fig1}(c)], one may start with a corner element ($M$ and $M'$ maximal) and fill out the matrix by successive application of the angular momentum lowering operator $\hat{J}_-=\sum_{j=1}^N\sigma_j^-$ (noting that $\rho\hat{J}_-=(\mathcal{U}_++\mathcal{V}_+)\rho$) to recursively fill out each row, and $\hat{J}_-\rho$ (or hermiticity of $\rho$) to fill out each column. The layers are filled upwards from the base, starting with $D_{N/2,N/2,N/2}^{m,n}=C_{N/2,N/2,0}^{m,n}$ as the corner element of the lowest layer, and finding the corner element of higher layers by Gaussian elimination from the trace constraint Eq.~(\ref{eq:trace}). In Appendix \ref{app3}, we demonstrate explicit application to 3 atoms, with extrapolation to higher $N$ straightforward. Being able to express the density operator in the $|S,M\rangle$ representation makes easy the calculation of functionals, such as the purity $\mathrm{Tr}[\rho^2]$, or the von Neumann entropy \begin{equation} S=-\mathrm{Tr}(\rho\ln\rho)=-\sum_{j} \lambda_j\ln\lambda_j, \end{equation} where $\lambda_j$ are eigenvalues of $\rho$. The point is that, because the density matrix is block diagonal in the $|S,M\rangle$ representation, we do not need to diagonalize the whole density matrix, which would be a daunting task. Instead, we only need to diagonalize a series of $\lfloor N/2\rfloor+1$ blocks of dimension $2S+1$. \section{Application to Lasing} In the following, we demonstrate the method by solving many-atom open quantum systems such as lasing and steady state superradiance. We show the capability for finding exact solutions of large systems and are able to obtain full information about both the transient and steady-state density matrix. First, let us consider a single-mode laser consisting of an ensemble of two-level atoms coupled to an optical cavity, which can be modeled by the general quantum master equation Eq.~(\ref{eq1})~\cite{Carmichael}. In this model we will ignore $T_2$ dephasing for simplicity. The laser system is difficult to solve without approximation since it involves both many atoms and large numbers of photons when above threshold. Therefore, it constitutes an interesting test-case to illustrate the capability of the {\em SU}(4) approach. \begin{figure} \caption{\label{Fig3} \label{Fig3} \end{figure} Fig.~\ref{Fig3}(a) shows the average intracavity photon number of the laser as a function of the repumping rate, where the threshold is evident. This result confirms the conventional laser theory prediction~\cite{Carmichael}. Interestingly, the spin-spin correlation $\langle\sigma_j^+\sigma_k^-\rangle$ above the threshold is directly proportional to the photon number, which shows that the collective photon emission plays an essential role for the laser. In Fig.~\ref{Fig3}(b), we show that the photon statistics of the laser changes from thermal below threshold to Poisson above threshold. In Fig.~\ref{Fig3}(c), we demonstrate that the laser linewidth narrows considerably as one goes above threshold. Finally, in Fig.~\ref{Fig3}(d), the laser threshold behavior is characterized by the intensity correlation $g^{(2)}(0)$ and the entropy of the whole system. It can be seen that $g^{(2)}(0)$ jumps from two below threshold to one above threshold with the entropy increasing and saturating. It is remarkable to have an exact solution to this fundamental system and to be able to rigorously confirm standard laser theory results. As discussed earlier, those results are typically based on various kinds of analytic approximations necessary to make the problem tractable. \begin{figure} \caption{\label{Fig4} \label{Fig4} \end{figure} \section{Application to Steady state superradiance} As a second example, we apply our approach to steady-state superradiance as previously proposed~\cite{Meiser09} and demonstrated in a recent experiment~\cite{Thompson12}. The steady-state superradiance represents a novel regime of cavity quantum electrodynamics, where the highly coherent collective atomic dipole induces an extremely narrow linewidth for the generated light. The bad-cavity mode only plays a role as the source of collective coupling for the atoms and the definition of the spatial mode for the output light~\cite{Meiser10}. The behavior of this system is also described by a master equation Eq.~(\ref{eq1}), but in a completely different parameter regime to the conventional laser. For steady-state superradiance, the vacuum Rabi splitting is much less than the cavity linewidth, $\sqrt{N}\,\Omega\ll\kappa$, and equivalently the photon number per atom in the cavity is much less than unity. We present here calculations of the second order intensity correlation $g^{(2)}(0)$ in steady-state as a function of the repump rate. As shown in Fig.~\ref{Fig4}(a), the agreement of the present calculation and the quantum Monte Carlo result from Ref.~\cite{Meiser10} is within error bars. The quantum Monte Carlo simulations were significantly more numerically intensive. In the weak pumping limit, the light exhibits strongly super-Poissonian fluctuations and deviates remarkably from the semiclassical prediction~(blue line in Fig.~\ref{Fig4}(a)). The failure of the semiclassical prediction in the weak pumping limit indicates that the atoms are in a highly-correlated state. To reveal the atomic states in this case, we apply the techniques of projecting the density operator in the $P_{q,q_3,\sigma_3}$ representation onto the $|S,M\rangle$ representation and obtain the atomic populations. The inset of Fig.~\ref{Fig4} shows explicitly that the atoms are mainly pumped into long-lived collective subradiant states~\cite{Dicke54} $|S=0,M=0\rangle$ and $|S=1,M=-1\rangle$. From $|S=0,M=0\rangle$, the atoms can only be repumped to $|S=1,M=1\rangle$, from which they rapidly emit two photons and relax to $|S=1,M=-1\rangle$. Therefore, our methods have enabled us to reveal detailed information about the underlying quantum dynamics. \section{conclusion} In conclusion, we have formulated and applied a {\em SU}(4) theory to numerically solve the quantum master equation, which has reduced the exponential scaling of the problem to cubic in $N$. We have developed powerful methods to transform the density operator in the {\em SU}(4) basis representation to the $|S,M\rangle$ representation. This has enabled us to efficiently diagonalize the whole density matrix and thus provided complete information about the system, including state information and functional properties of the density operator. We have included lasing and steady-state superradiance as examples in order to illustrate the potential for this method. The method described here will find numerous applications for simulating open quantum systems with large system size. \begin{acknowledgments} We acknowledge stimulating discussions with J. Cooper and D. Meiser. This work has been supported by the DARPA QuASAR program and the NSF. \end{acknowledgments} \appendix \section{SU(4) Algebra}\label{app1} In order to see how the superoperators~[Eq.~(\ref{super})] are related to generators of the {\em SU}(4) group~(Gell-Mann matrices), consider first the fundamental one atom case. We interpret the $2\times2$ density matrix as a $4\times1$ vector in the representing vector space ({\em i.e.}~Liouville space). \begin{equation} \begin{pmatrix} a & c \\ d & b \end{pmatrix}\rightarrow\begin{pmatrix} a \\ c \\ d \\ b \end{pmatrix}. \end{equation} The relations are then given by \begin{equation} \begin{split} \mathcal{Q}_{\pm}\rightarrow\frac{1}{2}(\lambda_9\pm i\lambda_{10})&,\;\;\;\mathcal{Q}_3\rightarrow\frac{1}{4}\lambda_3 +\frac{1}{4\sqrt{3}}\lambda_8+\sqrt{\frac{1}{6}}\lambda_{15}\\ \Sigma_{\pm}\rightarrow\frac{1}{2}(\lambda_6\pm i\lambda_{7})&,\;\;\;\Sigma_3\rightarrow -\frac{1}{4}\lambda_3+\frac{\sqrt{3}}{4}\lambda_8\\ \mathcal{M}_{\pm}\rightarrow\frac{1}{2}(\lambda_4\pm i\lambda_{5})&,\;\;\;\mathcal{M}_3\rightarrow\frac{1}{4}\lambda_3+\frac{\sqrt{3}}{4}\lambda_8\\ \mathcal{N}_{\pm}\rightarrow\frac{1}{2}(\lambda_{11}\pm i\lambda_{12})&,\;\;\;\mathcal{N}_3\rightarrow-\frac{1}{4}\lambda_3 +\frac{1}{4\sqrt{3}}\lambda_8+\sqrt{\frac{1}{6}}\lambda_{15}\\ \mathcal{U}_{\pm}\rightarrow\frac{1}{2}(\lambda_1\pm i\lambda_{2})&,\;\;\;\mathcal{U}_3\rightarrow\frac{1}{2}\lambda_3\\ \mathcal{V}_{\pm}\rightarrow\frac{1}{2}(\lambda_{13}\pm i\lambda_{14})&,\;\;\;\mathcal{V}_3\rightarrow -\frac{1}{2\sqrt{3}}\lambda_8+\sqrt{\frac{1}{6}}\lambda_{15}. \end{split} \end{equation} The commutation relations of the superoperators are given in both Ref.~\cite{Hartmann12} and \cite{Pfeifer03}. We can also identify six {\em SU}(2) subalgebras, \begin{equation} [\mathcal{O}_+,\mathcal{O}_-]=2\mathcal{O}_3,\;\;\; [\mathcal{O}_3,\mathcal{O}_{\pm}]=\pm\mathcal{O}_{\pm}, \end{equation} so that it is useful to define six corresponding quadratic superoperators $\mathcal{O}^2=\mathcal{O}_-\mathcal{O}_++\mathcal{O}_3^2+\mathcal{O}_3$, which commute with $\mathcal {O}_3$. The {\em SU}(4) group has 3 Casimir operators, one of which is quadratic in the generators, and the others are of higher order. The quadratic Casimir operator $\mathcal{C}_1$ can be expressed in terms of superoperators \begin{equation} \mathcal{C}_1=\sum_\mathcal{O}(\mathcal{O}_-\mathcal{O}_+ +\mathcal{O}_3)+\mathcal{U}_3^2+\frac{1}{3}(\mathcal{U}_3+2\Sigma_3)^2 +\frac{1}{6}(3\mathcal{Q}_3-2\mathcal{U}_3-\Sigma_3)^2. \end{equation} \section{Fully symmetrical basis for {\em SU}(4) group }\label{app2} The fundamental representation of the {\em SU}(4) group, adapted to serve as basis of the single-spin density matrix, is given by $u=|1\rangle\langle1|$, $d=|0\rangle\langle0|$, $s=|1\rangle\langle0|$, $c=|0\rangle\langle1|$. Higher order representations can then be obtained from the fundamental representation and the symmetry type, which is described by the Young Tableau. The basis for the fully symmetrical case is defined as \begin{equation} P_{q,q_3,\sigma_3}=\mathcal{S}(u^\alpha d^\beta s^\gamma c^\delta), \end{equation} which are eigenstates of both $\mathcal{O}^2$ and $\mathcal{O}_3$~\cite{Hartmann12}, with eigenvalues \begin{equation} \mathcal{O}^2P_{q,q_3,\sigma_3}^{(\mathrm{s})}=o(o+1)P_{q,q_3,\sigma_3}^{(\mathrm{s})},\;\;\; \mathcal{O}_3P_{q,q_3,\sigma_3}^{(\mathrm{s})}=o_3P_{q,q_3,\sigma_3}^{(\mathrm{s})}, \end{equation} where $o\in\{q,\sigma,m,n,u,v\}$. The eigenvalues are not independent, but can be expressed in terms of $\alpha,\gamma,\beta,\delta$: \begin{equation} \begin{array}{cc} q=(\alpha+\beta)/2,&q_3=(\alpha-\beta)/2,\\ \sigma=(\gamma+\delta)/2,&\sigma_3=(\gamma-\delta)/2,\\ m=(\alpha+\delta)/2,&m_3=(\alpha-\delta)/2;\\ n=(\gamma+\beta)/2,&n_3=(\gamma-\beta)/2;\\ u=(\alpha+\gamma)/2,&u_3=(\alpha-\gamma)/2;\\ v=(\delta+\beta)/2,&v_3=(\delta-\beta)/2. \end{array} \end{equation} Then it is straightforward to determine actions of all the raising and lowering superoperators on $P_{q,q_3,\sigma_3}$, \begin{equation}\label{act} \begin{split} \mathcal{Q}_{\pm}P_{q,q_3,\sigma_3}&=(q\mp q_3)P_{q,q_3\pm 1,\sigma_3},\\ \Sigma_{\pm}P_{q,q_3,\sigma_3}&=(\sigma\mp \sigma_3)P_{q,q_3,\sigma_3\pm 1},\\ \mathcal{M}_{\pm}P_{q,q_3,\sigma_3}&=(m\mp m_3)P_{q\pm 1/2,q_3\pm 1/2,\sigma_3 \pm 1/2},\\ \mathcal{N}_{\pm}P_{q,q_3,\sigma_3}&=(n\mp n_3)P_{q\mp 1/2,q_3\pm 1/2,\sigma_3 \pm 1/2},\\ \mathcal{U}_{\pm}P_{q,q_3,\sigma_3}&=(u\mp u_3)P_{q\pm 1/2,q_3\pm 1/2,\sigma_3 \mp 1/2},\\ \mathcal{V}_{\pm}P_{q,q_3,\sigma_3}&=(v\mp v_3)P_{q\mp 1/2,q_3\pm 1/2,\sigma_3 \mp 1/2}. \end{split} \end{equation} We note that the fully symmetrical basis are also eigenstates of the quadratic Casimir operator $\mathcal{C}_1$ with common eigenvalue $3N(N+4)/8$. Analogous actions for the photon part are the simple harmonic oscillator relations: \begin{eqnarray} a\,\bigl|n\bigr>&=&\sqrt{n}\,\bigl|n-1\bigr>\,,\nonumber\\ a^{\dag}\,\bigl|n\bigr>&=&\sqrt{n+1}\,\bigl|n+1\bigr>\,. \end{eqnarray} \section{$|S,M\rangle\langle S,M'|$ representation}\label{app3} In order to project the density operator from the {\em SU}(4) basis onto the $|S,M\rangle\langle S,M'|$ representation , let us first show that $M$ and $M'$ are related to the $P_{q,q_3,\sigma_3}^{(\mathrm{s})}$ by $q_3+\sigma_3=M$ and $q_3-\sigma_3=M'$. To see this, defining $\hat{J}_3=\sum_{j=1}^N\sigma_j^{(3)}/2$, we could get \begin{equation} \begin{split} \hat{J}_3P_{q,q_3,\sigma_3}^{(\mathrm{s})}&=\frac{1}{2}(\alpha+\gamma- \beta-\delta)P_{q,q_3,\sigma_3}^{(\mathrm{s})}=(q_3+\sigma_3)P_{q,q_3,\sigma_3}^{(\mathrm{s})},\\ P_{q,q_3,\sigma_3}^{(\mathrm{s})}\hat{J}_3&=\frac{1}{2}(\alpha+\delta- \beta-\gamma)P_{q,q_3,\sigma_3}^{(\mathrm{s})}=(q_3-\sigma_3)P_{q,q_3,\sigma_3}^{(\mathrm{s})}, \end{split} \end{equation} and by definition, we have \begin{equation} \begin{split} \hat{J}_3|S,M\rangle\langle S,M'|&=M|S,M\rangle\langle S,M'|,\\ |S,M\rangle\langle S,M'|\hat{J}_3&=M'|S,M\rangle\langle S,M'|. \end{split} \end{equation} Therefore, the complex coefficients from the $P_{q,q_3,\sigma_3}^{(\mathrm{s})}$ basis contributing to the matrix element for $|S,M\rangle\langle S,M'|$ all satisfy $q_3+\sigma_3=M$ and $q_3-\sigma_3=M'$. With this in mind, we now describe a systematic algorithm to obtain the density matrix elements $D_{S,M,M'}^{m,n}$ from the {\em SU}(4) expansion coefficients $C_{q,q_3,\sigma_3}^{m,n}$. We illustrate our method by considering in detail the elementary case of three atoms. The density matrix in the $|S,M\rangle\langle S,M'|$ representation is block diagonal in $S$; the block matrices for all $S$ can be arranged in the shape of a pyramid as shown in Fig.~1(c). For instance, the base layer corresponds to $S=N/2$, with the matrix dimension being $(N+1)^2$. The second layer has $S=N/2-1$ and dimension $(N-1)^2$, and so on. Furthermore there are $n_S$ copies associated with each layer, so that $\sum_{S}(2S+1)n_S=2^N$. Taking $N=3$ for example, there are two layers, $S=3/2$ and $S=1/2$ with $n_{3/2}=1$ and $n_{1/2}=2$, so that the Hilbert space dimension is $(3+1)+2\times(1+1)=2^3$. The density matrix needs to be built from the bottom layer upwards. In the bottom layer, we find that the only element contributing to $|N/2,N/2\rangle\langle N/2,N/2|$ is $P_{N/2,N/2,0}^{(\mathrm{s})}$. So the top left corner is $D_{N/2,N/2,N/2}^{m,n}=C_{N/2,N/2,0}^{m,n}$. We next apply the lowering operator $\hat{J}_-=\sum_{j=1}^N\sigma_j^-$ to iteratively generate $D_{N/2,N/2,M}^{m,n}$, with $M=N/2-1,\ldots,-N/2$. To do this, we need the recursion relation \begin{equation}\label{rec} \begin{split} D_{S,M,M'-1}^{m,n}&=\langle S,M|\rho^{m,n}|S,M'-1\rangle= \frac{\langle S,M|\rho^{m,n}\hat{J}_-|S,M'\rangle}{\sqrt{(S+M')(S-M'+1)}}\\ &=\frac{\langle S,M|(\mathcal{U}_++\mathcal{V}_+)\rho^{m,n}|S,M'\rangle}{\sqrt{(S+M')(S-M'+1)}}. \end{split} \end{equation} Therefore, with the actions of the raising and lowering operators~[Eq.~(\ref{act})], we can derive all $D_{N/2,N/2,M'}^{m,n}$, {\em i.e.}~the first row of the bottom layer. Using the fact that the density matrix is Hermitian and $C_{q,q_3,\sigma_3}^{m,n}=(C_{q,q3,-\sigma_3}^{m,n})^*$, we could get all the elements for the first column by $D_{N/2,M',N/2}^{m,n}=(D_{N/2,N/2,M'}^{m,n})^*$. By repeatedly applying the recursion relation~[Eq.~(\ref{rec})] in each row, we then construct the full base layer. As an explicit example, we have constructed the bottom layer, {\em i.e.}~$S=3/2$ for the three atom case, \begin{equation}\label{ma} \bordermatrix{ &\langle\frac32,\frac32|&\langle\frac32,\frac12|&\langle\frac32,-\frac12|&\langle\frac32,-\frac32|\cr |\frac32,\frac32\rangle&C_{3/2, 3/2, 0}^{m,n}&\frac{C_{1, 1, 1/2}^{m,n}}{\sqrt{3}} &\frac{C_{1/2, 1/2, 1}^{m,n}}{\sqrt{3}}&C_{0, 0, 3/2}^{m,n}\cr |\frac32,\frac12\rangle&\frac{C_{1, 1, -1/2}^{m,n} }{\sqrt{3}} &\frac{C_{3/2, 1/2, 0}^{m,n}+C_{1/2, 1/2, 0}^{m,n}}{3} &\frac{C_{1, 0, 1/2}^{m,n}+C_{0, 0, 1/2}^{m,n}}{3}&\frac{C_{1/2, -1/2, 1}^{m,n}}{\sqrt{3}}\cr |\frac32,-\frac12\rangle&\frac{C_{1/2, 1/2, -1}^{m,n}}{\sqrt{3}}& \frac{C_{1, 0, -1/2}^{m,n}+C_{0, 0, -1/2}^{m,n}}{3} &\frac{C_{3/2, -1/2, 0}^{m,n}+C_{1/2, -1/2, 0}^{m,n}}{3}&\frac{C_{1, -1, 1/2}^{m,n}} {\sqrt{3}} \cr |\frac32,-\frac32\rangle&C_{0, 0, -3/2}^{m,n}&\frac{C_{1/2, -1/2, -1}^{m,n}}{\sqrt{3}} &\frac{C_{1, -1, -1/2}^{m,n}}{\sqrt{3}} &C_{3/2, -3/2, 0}^{m,n} }. \end{equation} In order to illustrate the use of the recursion relation, we now show how to get $D_{3/2,1/2,1/2}^{m,n}$ from $D_{3/2,1/2,3/2}^{m,n}$. Because $\mathcal{V}_+P_{3/2,1/2,0}^{(\mathrm{s})}= P_{1,1,-1/2}^{(\mathrm{s})}$ and $\mathcal{U}_+P_{1/2,1/2,0}^{(\mathrm{s})}= P_{1,1,-1/2}^{(\mathrm{s})}$ , we have $D_{3/2,1/2,1/2}^{m,n}=(C_{3/2, 1/2, 0}^{m,n}+C_{1/2, 1/2, 0}^{m,n})/\sqrt{3}/ \sqrt{3}$. To construct the next layer, we thus find out the top left matrix element first, and then apply the same procedure as before to determine the rest of the matrix elements. Let us first examine the three atom case. The $S=1/2$ layer has two copies, each of which is a $2\times 2$ matrix. To find the top left element $D_{1/2,1/2,1/2}^{m,n}$, noticing the constraint imposed by the trace of the density matrix, we derive $2D_{1/2,1/2,1/2}^{m,n}+D_{3/2,1/2,1/2}^{m,n}=C_{3/2, 1/2, 0}^{m,n}$ so that $D_{1/2,1/2,1/2}^{m,n}=(2C_{3/2, 1/2, 0}^{m,n}-C_{1/2, 1/2, 0}^{m,n})/6$. By applying the same method as in the bottom layer, we construct the block matrix for $S=1/2$ layer \begin{equation}\label{ma1} \bordermatrix{ &\langle\frac12,\frac12|&\langle\frac12,-\frac12|\cr |\frac12,\frac12\rangle&\frac{2C_{3/2, 1/2, 0}^{m,n}-C_{1/2, 1/2, 0}^{m,n}}{6}& \frac{C_{1, 0, 1/2}^{m,n}-2C_{0, 0, 1/2}^{m,n}}{6}\cr |\frac12,-\frac12\rangle&\frac{C_{1, 0, -1/2}^{m,n}-2C_{0, 0, -1/2}^{m,n}}{6} &\frac{2C_{3/2, -1/2, 0}^{m,n}-C_{1/2, -1/2, 0}^{m,n}}{6}\cr }. \end{equation} Therefore in general, if we suppose that we have constructed the block matrix for $S'>S$, the formula to find the top left matrix element $D_{S,S,S}^{m,n}$ for layer $S$ is \begin{equation} \sum_{S\leq S'\leq N/2}n_{S'}D_{S',S,S}^{m,n}=C_{N/2,S,0}^{m,n}. \end{equation} Having the top left matrix element for each layer $S$, we can easily construct the $(2S+1)\times(2S+1)$ block matrix by applying the recursion relation based on the angular momentum lowering operator. Repeated iteration of these steps systematically fills in all sites of the pyramid. \end{document}
\begin{document} \title{Damping of local Rabi oscillations in the presence of thermal motion} \author{Anat Daniel, Ruti Agou, Omer Amit, David Groswasser, Yonathan Japha and Ron Folman} \email{[email protected]} \affiliation{Department of Physics, Ben-Gurion University of the Negev, Be'er Sheva 84105, Israel} \date{\today} \begin{abstract} We investigate both theoretically and experimentally the effect of thermal motion of laser cooled atoms on the coherence of Rabi oscillations induced by an inhomogeneous driving field. The experimental results are in excellent agreement with the derived analytical expressions. For freely falling atoms with negligible collisions, as those used in our experiment, we find that the amplitude of the Rabi oscillations decays with time $t$ as $\exp[-(t/\tau)^4]$ , where the coherence time $\tau$ drops with increasing temperature and field gradient. We discuss the consequences of these results regarding the fidelity of Rabi rotations of atomic qubits. We also show that the process is equivalent to the loss of coherence of atoms undergoing a Ramsey sequence in the presence of static magnetic field gradients - a common situation in many applications. In addition, our results are relevant for determining the resolution when utilizing atoms as field probes. Using numerical calculations, our model can be easily extended to situations in which the atoms are confined by a potential or to situations where collisions are important. \end{abstract} \pacs{37.10.Gh, 32.70.Cs, 05.40.-a, 67.85.-d} \maketitle \section{Introduction} A two-level system is a key element in understanding the structure of matter and its interaction with electromagnetic fields. Two-level systems manipulated by electromagnetic waves are the fundamental building blocks in many applications, such as nuclear magnetic resonance (NMR) \cite{NMR} and electron paramagnetic resonance (EPR) microscopy, atomic clocks \cite{clocks1,clocks2} and interferometers \cite{AtomInterferometry1,AtomInterferometry2}, magnetometry with atoms \cite{magsense} or NV centers in diamonds \cite{NVMagnetometry} and quantum information processing with atoms, ions, quantum dots or superconducting qubits \cite{QIP1, QIP2, QIP3,QIP4,comp,comm,qubit}. The basic operation in two-level system manipulation is Rabi rotation (also called Rabi flopping or Rabi oscillation), which appears whenever a two-level system is subjected to a constant nearly-resonant driving field. Measurement of Rabi oscillations and their damping provides information about the coherence of the system. Decoherence may follow from spontaneous emission \cite{Robledo2010}, external or intrinsic noise \cite{Ku2005,Dobrovitski2009,Huber2011,DeRaedt2012} and spatial inhomogeneities across the sample, which may be due to inhomogeneities of external fields or due to the dynamics of the two-level systems themselves during the oscillations (e.g. dipole-dipole interactions) \cite{Paik2008,DeRaedt2012}. A process which is analogous to the damping of Rabi oscillations is the decoherence (dephasing) of free phase oscillations of two-level systems which are prepared in a superposition of the two energy eigenstates. In NMR this process is called free-induction-decay (FID) and used for characterizing the environment, and in atomic clocks and interferometers it involves the loss of visibility of Ramsey fringes. This decoherence is usually caused by fluctuations or inhomogeneities in the energy splitting between the two levels. If these inhomogeneities are time-independent or vary slowly in time, then this incoherence may be reversed by using a spin-echo technique which reverses the evolution of the relative phase (equivalent to the direction of spin precession). In this way it is possible to distinguish between the effect of static inhomogeneities and other sources of decoherence. In dilute gases of alkali atoms, which are used for atomic clocks, interferometers and magnetic sensors, dipole-dipole interactions are negligible such that spin decoherence is usually caused by fluctuations and inhomogeneities of external magnetic or electromagnetic fields, and by atomic collisions \cite{collisions}. These hindering effects of inhomogeneous fields are also relevant for single trapped atoms and ions if the fields vary significantly over the length scale of the particle localization. In this context it is important to understand the effect of temperature. On the other hand, such inhomogeneous fields may be useful in the case of low velocity cold atoms, which can be locally manipulated by these fields. Furthermore, cold atoms may be used for micron-scale sensing of local forces and fields. For example, in Ref. \cite{OurScience} local forces were probed, while in Ref. \cite{ruti,Treutlein} it was shown that probing local Rabi oscillations of ultracold atoms driven by inhomogeneous fields can serve as a tool for mapping the intensity and direction of electromagnetic waves in the microscopic scale. In this context, it is important to understand the resolution limits of such methods of local manipulation or sensing when thermal motion mixes measurements at neighboring locations and reduces the visibility of spatial and temporal modulations of the atomic population. Here we consider the damping of Rabi oscillations in a sample of laser cooled thermal atoms subjected to an inhomogeneous driving field. Beyond spatially dependent Rabi frequencies, which imply the observation of internal state population modulation (``fringes") across the applied electromagnetic field, we observe damping of Rabi oscillations at any given location with a constant field intensity (see Fig.~\ref{fig1}). This effect is shown to be sensitive to the atomic temperature and we attribute it to the thermal motion of the atoms. \begin{figure} \caption{ (color online) (a) absorption image of $^{87} \label{fig1} \end{figure} We analyze both theoretically and experimentally a model system of freely propagating two-level atoms in the presence of gradients of driving fields or state-selective potentials, which are weak enough not to affect the dynamics of the motional degrees of freedom of the atoms. The atomic motion is then mainly governed by the initial thermal velocity distribution. In this case we obtain simple analytical expressions for the damping of Rabi oscillations or Ramsey phase oscillations. The model can be easily extended to cases where the atoms move in a potential (as long as their motion may be treated classically), or to the case where atomic collisions are important. In such a case the theoretical solution may need to involve numerical integration. This part is not included here because the simple version of the model is sufficient for a quantitative understanding of the experimental results. The structure of the paper is as follows: in section~\ref{sec:theory} we present the theoretical model and its simple solutions for the collisionless potential-free case. In section~\ref{sec:experiment} we present the specific experimental realization with cold atoms at different temperatures and analyze the results with the help of the theoretical model of section~\ref{sec:theory}. In section~\ref{sec:discussion} we discuss some fundamental and practical implications. \section{Theoretical model} \label{sec:theory} Consider an ensemble of two-level atoms in the presence of inhomogeneous fields. We assume that the two levels $|1\rangle$ and $|2\rangle$ are not coupled by an electric dipole transition, such that spontaneous emission is negligible over the time of the experiment. The single-atom Hamiltonian is then \begin{equation} H=H_{\rm ext}\hat{1} -\frac{1}{2}\hbar\omega_{12}({\bf x})\hat{{\mbox{\boldmath{$\sigma$}}}ma}_z+\hbar\hat{{\mbox{\boldmath{$\sigma$}}}} \cdot \mathbf{\Omega}({ \bf x})\cos\omega t\ . \label{eq:Ham} \end{equation} Here, the first term is the state-independent part $H_{\rm ext}={\bf p}^2/2m+V({\bf x})$ that governs the external (motional) degrees of freedom, $\hat{1}$ being the 2$\times$2 unity matrix. The second term describes a time-independent energy splitting $\hbar\omega_{12}({\bf x})$ which may depend on position due to static inhomogeneous fields. The last term describes the coupling of the atom to a driving field with frequency $\omega$, with $\mathbf{\hat{{\mbox{\boldmath{$\sigma$}}}ma}}\equiv (\hat{{\mbox{\boldmath{$\sigma$}}}ma}_x,\hat{{\mbox{\boldmath{$\sigma$}}}ma}_y,\hat{{\mbox{\boldmath{$\sigma$}}}ma}_z)$ being the vector of Pauli matrices and $\mathbf{\Omega}({\bf x})$ being a vector representing the amplitudes of atom-field coupling corresponding to angular frequencies of rotation about the axes of the Bloch sphere. In the rotating wave approximation, only terms which oscillate with frequencies that are nearly resonant with the atomic level splitting $\omega_{12}$ are retained, while rapidly oscillating terms are dropped. The effective Hamiltonian becomes \begin{equation} H_{\rm RWA}=H_{\rm ext}\hat{1}+\frac{\hbar}{2}\left(\begin{array}{cc} -\omega_{12}({\bf x}) & e^{i\omega t} \Omega({\bf x}) \\ e^{-i\omega t}\Omega^*({\bf x}) & \omega_{12}({\bf x}) \end{array}\right), \label{eq:H_RWA} \end{equation} where $\Omega({\bf x})\equiv \Omega_x({\bf x})+i\Omega_y({\bf x})$ is typically complex. In general, the spatially dependent Hamiltonian of Eq.~(\ref{eq:H_RWA}) determines the dynamics of the internal state as well as the motional degrees of freedom. However, here we consider driving frequencies in the microwave (MW) regime and field gradients that are too small to affect the atomic motion in the time scale of the experiment, namely $|\nabla \omega_{12}| ,|\nabla\Omega|\ll mv_T/\hbar t$, where $v_T$ is the average thermal velocity and $t$ is the time scale of the experiment. In this case the atomic sample may be approximated by an ensemble of atoms with classical trajectories ${\bf \bar{x}}(t)$, which are independent of the internal dynamics. The internal wave function of a single atom in the frame of reference moving with the atom along a given trajectory is then \begin{equation} |\psi_{{\bf \bar{x}}}(t)\rangle=a_{{\bf \bar{x}}}(t)|1\rangle + b_{{\bf \bar{x}}}(t)\exp\left[-i\int_0^t \omega_{12}[{\bf \bar{x}}(t')]dt' \right]|2\rangle, \end{equation} where the coefficients $a_{{\bf \bar{x}}}$ and $b_{{\bf \bar{x}}}$ satisfy the Schr\"odinger equations \begin{eqnarray} \dot{a}_{{\bf \bar{x}}}&=&-\frac{i}{2}\Omega({\bf \bar{x}}(t))\exp\left[i\int_0^t \Delta(t')dt'\right]b_{{\bf \bar{x}}} \label{eq:dadt} \\ \dot{b}_{{\bf \bar{x}}}&=& -\frac{i}{2}\Omega^*({\bf \bar{x}}(t))\exp\left[-i\int_0^t \Delta(t')dt'\right]a_{{\bf \bar{x}}} \label{eq:dbdt} \end{eqnarray} where $\Delta[{\bf \bar{x}}(t)]=\omega-\omega_{12}[{\bf \bar{x}}(t)]$ is the local detuning of the driving field frequency from the energy splitting. In principle, Doppler shifts can also be included in the detuning frequency. These would lead to the broadening of the transition between the two states. In the MW range of frequencies which is used in our experiment (more specifically $6.8\,$GHz) and the range of temperatures used ($T<100\,\mu$K) Doppler shifts are of the order of a few Hz, while the Rabi frequency along the sample is of the order of kHz (see Fig.~\ref{fig1}). For this reason we neglect the effects of Doppler shifts in what follows. Doppler broadening effects would be important at room temperature, where they reach the order of a few kHz. The density matrix of the internal state at a given position ${\bf x}$ is obtained by averaging over the pure density matrices of all the atoms with different trajectories: \begin{equation} \rho({\bf x},t)=\sum_{{\bf \bar{x}}}P({\bf \bar{x}})\delta[{\bf \bar{x}}(t)-{\bf x}] \left(\begin{array}{cc} |a_{{\bf \bar{x}}}(t)|^2 & a_{{\bf \bar{x}}}(t)b^*_{{\bf \bar{x}}}(t) \\ a^*_{{\bf \bar{x}}}(t)b_{{\bf \bar{x}}}(t) & |b_{{\bf \bar{x}}}(t)|^2 \end{array}\right) \label{eq:sumP} \end{equation} where $P({\bf \bar{x}})$ is the probability for an atom to be in a given trajectory. In principle, the density matrix can be found by solving Eqs.~(\ref{eq:dadt}) and~(\ref{eq:dbdt}) numerically for all the possible trajectories for a given external potential and initial conditions. Such a simulation of the trajectories may also include collisional effects. However, here we consider two simple cases in which Eqs.~(\ref{eq:dadt}) and~(\ref{eq:dbdt}) have a simple analytical solution, which is relevant to common experimental conditions, including our experiment which is described in section~\ref{sec:experiment}. \subsection{Resonant Rabi oscillations} In the absence of static gradients, i.e., if $\omega_{12}$ is constant and $\omega=\omega_{12}$ everywhere in space, the solution of the Schr\"odinger equation may be represented by a simple trajectory on the Bloch sphere. If we further assume for simplicity that the axis of rotation is constant everywhere in space, we may, without loss of generality, take $\Omega$ to be real and obtain the analytic solution \begin{eqnarray} a_{\bf \bar{x}}(t) &=& \cos[\theta_{\bf \bar{x}}(t)/2]a_{\bf \bar{x}}(0)-i\sin[\theta_{\bf \bar{x}}(t)/2]b_{\bf \bar{x}}(0) \label{eq:at} \\ b_{\bf \bar{x}}(t) &=& -i\sin[\theta_{\bf \bar{x}}(t)/2]a_{\bf \bar{x}}(0)+\cos[\theta_{\bf \bar{x}}(t)/2]b_{\bf \bar{x}}(0) \label{eq:bt} \end{eqnarray} where \begin{equation} \theta_{\bf\bar{x}}(t)=\int_0^t \Omega[{\bf\bar{x}}(t')]dt' \end{equation} is the Bloch sphere angle relative to the $z$ axis. If collisions are rare during the time scale of the experiment, then each atomic trajectory is characterized by a constant velocity $\mathbf{v}$. An atom in a position ${\bf x}$ and velocity ${\bf v}$ at time $t$ has gone through the trajectory ${\bf\bar{x}}(t')={\bf x}-{\bf v}(t-t')$. If the Rabi frequency along the trajectory changes linearly such that $\Omega[{\bf\bar{x}}(t')]\approx \Omega[{\bf x}]-{\bf v}\cdot(\nabla\Omega)(t-t')$, the Bloch sphere angle at time $t$ for this trajectory is given by \begin{equation} \theta_{\bf \bar{x}}(t)=\Omega({\bf x})t-\frac{1}{2}{\bf v}\cdot(\nabla\Omega)t^2 \label{eq:thetax} \end{equation} We consider an initial atomic cloud having a Gaussian position distribution of width $\Delta_x(0)$ along the gradient of the driving field intensity and a thermal velocity distribution of width $\Delta_v=\sqrt{k_B T/m}$. At time $t=0$ the cloud is released and freely expands with negligible collisions. The distribution at time $t>0$ is \begin{equation} P({\bf x},{\bf v},t)=\frac{1}{2\pi\Delta_x\Delta_v}\exp\left(-\frac{|{\bf x}-{\bf v}t|^2}{2\Delta_x^2}-\frac{v^2}{2\Delta_v^2} \right). \end{equation} This corresponds to a time dependent spatial width $\Delta_x(t)=\alpha(t)\Delta_x(0)$ and velocity width $\Delta_v(t)=\Delta_v(0)/\alpha(t)$, where $\alpha(t)= \sqrt{1+\Delta_v(0)^2t^2/\Delta_x(0)^2}$. We assume that the atoms are initially in state $|1\rangle$ and use the solution in Eq.~(\ref{eq:bt}) to determine the probability for an atom in a given trajectory $\bar{x}$ to be in the state $|2\rangle$ at time $t$, namely $\sin^2(\theta_{\bf\bar{x}}/2)=\frac{1}{2}(1-\cos\theta_{\bf\bar{x}})$ . By inserting this into Eq.~(\ref{eq:sumP}) and integrating over all the trajectories that pass through the point ${\bf x}$ we obtain the following expression for the probability distribution of atoms in the state $|2\rangle$ \begin{eqnarray} &&\rho_{22}({\bf x},t)= \frac{e^{-x^2/2\Delta_x(t)^2}}{\sqrt{2\pi}\Delta_x(t)}\times \nonumber \\ &&\times \frac{1}{2}\left\{1-\cos[\tilde{\Omega}({\bf x},t)t]\exp\left[-\frac{1}{8}|\nabla \Omega|^2 \Delta_v(t)^2 t^4\right]\right\} \label{eq:rho22} \end{eqnarray} where $\tilde{\Omega}=\Omega-\frac{1}{2}{\bf x}\cdot\nabla\Omega \Delta_v(0)^2t^2/\Delta_x(t)^2$ is shifted relative to the Rabi frequency at ${\bf x}$ due to averaging over the Rabi frequencies during the expansion. It follows that the amplitude of Rabi oscillations decays as $\exp[-t^4/\tau_v^4\alpha^2(t)]$, where the temperature dependent coherence time is \begin{equation} \tau_v=\frac{8^{1/4}}{(\Delta_v(0)|\nabla\Omega|)^{1/2}}=2\left(\frac{m}{2k_B T \partial_x\Omega^2} \right)^{1/4}. \label{eq:tsol} \end{equation} At a short enough time, where the spatial width of the atomic cloud has not yet grown considerably we find that the decay of the visibility of the oscillations has a quartic exponential dependence and it becomes Gaussian when the cloud expands to a few times its original size. The $t^4$ exponential dependence of the decay of Rabi oscillations follows from the Gaussian velocity distribution in the thermal cloud. Atoms with higher velocity travel a larger distance along the field gradient thereby acquiring a larger phase difference relative to atoms at rest in the detection point. This additional phase is an integral over time along the way which was traversed by an atom with a given velocity, namely $vt$, such that the total phase depends quadratically on time. The combination of the quadratic dependence of the phase on time and the quadratic exponential dependence of the distribution on velocity provides the $t^4$ exponential dependence of the coherence on time. We may consider $\tau_v$ as a critical time such that at smaller times the Rabi rotation is unaffected by the velocity distribution. \subsection{Ramsey fringes} An equivalent situation that can be solved analytically is the decay of Ramsey fringes, whose visibility is determined by the coherence of free phase oscillations of the energy eigenstates. Consider a $\pi/2$ Rabi pulse at time $t=0$, which prepares the system in a superposition $(|1\rangle+|2\rangle)/\sqrt{2}$. In the presence of inhomogeneous fields that induce an inhomogeneous energy shift of the levels $|1\rangle$ and $|2\rangle$, Eqs.~(\ref{eq:dadt}) and~(\ref{eq:dbdt}) yield the trivial solution \begin{equation} |\psi_{\bf\bar{x}}(t)\rangle=\frac{1}{\sqrt{2}}\left[|1\rangle+\exp[-i\phi_{\bf \bar{x}}(t)]|2\rangle\right], \end{equation} where the Bloch sphere angle $\phi_{\bf \bar{x}}(t)$ for the given trajectory ${\bf \bar{x}}$ is given by \begin{equation} \phi_{\bf \bar{x}}(t)=\int_0^t \omega_{12}[{\bf\bar{x}}(t')]dt'=\omega_{12}({\bf \bar{x}})t- \frac{1}{2}({\bf v}\cdot \partial_x\omega_{12})t^2, \label{eq:phix} \end{equation} in analogy with Eq.~(\ref{eq:thetax}), where we have made the same assumptions regarding the inhomogeneity of $\omega_{12}({\bf x})$ as we did above for $\Omega({\bf x})$. The Ramsey sequence is terminated by a second $\pi/2$ pulse at time $t$, after which the populations of the two energy eigenstates are determined by the phase difference $\phi(t)$ accumulated during the free phase oscillation time. The coherence of the state after the Ramsey sequence is given by the off-diagonal component of the density matrix $\rho_{12}$ just before the second $\pi/2$ pulse. By inserting Eq.~(\ref{eq:phix}) into Eq.~(\ref{eq:sumP}) we obtain the following result for the coherence \begin{eqnarray} &&\rho_{12}({\bf x},t)= \frac{e^{-x^2/2\Delta_x(t)^2}}{\sqrt{2\pi}\Delta_x(t)}\times \nonumber \\ &&\times e^{-i\bar{\omega}_{12}({\bf x})t}\exp\left[-\frac{1}{8}|\nabla \omega_{12}|^2 \Delta_v(t)^2 t^4\right] \label{eq:rho12} \end{eqnarray} such that the populations after the Ramsey sequence are determined by the phase $\omega_{12}({\bf x})t$ and the visibility of the Ramsey fringes decays equivalently to the decay of Rabi oscillations with $\nabla\omega_{12}$ replacing $\nabla\Omega$ in the expression for $\tau_v$. It is interesting to examine the effect of a spin-echo technique on the coherence of the atomic sample in the presence of thermal motion. In this process, a $\pi$ pulse which flips the atomic population is applied at half the time interval between the $\pi/2$ pulses of the Ramsey sequence. In this case the total phase that is accumulated for an atom with velocity ${\bf v}$ and position ${\bf x}$ is given by \begin{eqnarray} \phi_{\bf\bar{x}}(t)&=& \int_0^{t/2} dt' \omega_{12}[{\bf \bar{x}}(t')]-\int_{t/2}^{t} dt' \omega_{12}[{\bf \bar{x}}(t')] \nonumber \\ &=& -({\bf v}\cdot\nabla\omega_{12})\left[\int_0^{t/2}t'\,dt'-\int_{t/2}^t t'\,dt'\right] \nonumber \\ &=& \frac{1}{4}({\bf v}\cdot\nabla\omega_{12})t^2. \label{eq:spinecho} \end{eqnarray} In contrast to Eq.~(\ref{eq:phix}), here the term proportional to $\omega_{12}({\bf x})$, which implies a spatially dependent phase during the Ramsey sequence and spatial modulation of the population after the sequence, has dropped. We are left with a position independent term proportional to the gradient of the internal energy splitting and the velocity. After summation over trajectories to obtain the off-diagonal density matrix $\rho_{12}({\bf x})$, one finds that following the second $\pi/2$ pulse at time $t$ the internal state population is homogeneous over the atomic cloud, similarly to what happens for a spin-echo in a zero atom velocity sample in an inhomogeneous environment. However, in analogy with the derivation following Eq.~(\ref{eq:thetax}), we find that the coherence of the atomic population will decrease by a factor $\exp[-(t/\tau_v)^4]$ as before. Due to the factor of $1/4$ appearing in the last line of Eq.~(\ref{eq:spinecho}), the value of the coherence time $\tau_v$ is larger by a factor of $\sqrt{2}$ relative to its value for a simple Ramsey sequence without spin-echo. It follows that the spin-echo removes the population inhomogeneity due to the spatial inhomogeneity of the field, but does not cancel the decoherence caused by the thermal velocity distribution. \section{Experiment} \label{sec:experiment} \begin{figure} \caption{ (color online) Illustration of the experimental set-up. The atoms are cooled and trapped within a vacuum chamber by a standard MOT. The atoms are prepared in the $F=1$ state by turning off the repumper beam before the cooling beams. Once the MOT beams are turned off, the atoms fall due to gravity and are subjected to a MW field generated by a horn antenna. The MW shutter is controlled by a TTL trigger sent from the experimental control (PXI). After the MW field is switched off, an on-resonance imaging beam directed along the gravitational axis is applied. The beam passes through the cloud and is collected by a CCD camera. The population of the atoms in $F=2$ is extracted from the absorption image. } \label{fig11} \end{figure} \begin{figure} \caption{ (color online) Rabi oscillations for different temperatures ($8~\mu$K to $37~\mu$K). The graphs present the population in $F=2$ scaled to the background population $n_b$, as a function of the MW pulse duration. It can be seen that the coherence time of the oscillations increases when the temperature decreases. Each of the graphs was fitted to the model of Eq. (\ref{eq:spint} \label{fig2} \end{figure} To demonstrate the theoretical model, we experimentally investigate a cloud of freely propagating atoms in free-fall. We start our experiment with a cloud of $10^6$ cold $^{87}Rb$ atoms. The atoms are cooled by a standard magneto-optical trap (MOT) followed by laser molasses to the required temperature, of the order of a few $\mu$K. The atoms are prepared in the $F=1$ hyperfine state, and are then released into free-fall for the duration of the experiment, typically $10-30$ ms. At the time of release the atomic cloud has a nearly circular Gaussian distribution of half width $\sqrt{2}\Delta_x=1.55$ mm (at $1/e$ of maximum density). During the free-fall the atoms are subjected for a time $t$ to a MW field generated by a horn antenna, which is tuned to the $6.8\,$GHz $|F,m_F\rangle=|1,0\rangle\equiv |1\rangle\rightarrow|2,0\rangle\equiv |2\rangle$ clock transition. The MW field induces Rabi oscillations. The Rabi frequency is the matrix element $\Omega=\langle 2|(\mu_B/\hbar) (g_S\hat{\bf S}+g_I \hat{\bf I})\cdot \mathbf{B}^{MW}({\bf x})|1\rangle$, where $g_S$ and $g_I$ are the Land\'e factors of the electronic and nuclear spins, respectively, $\hat{\bf S}$ and $\hat{\bf I}$ are the corresponding spin operators, $\mu_B$ is Bohr's magneton and ${\bf B}^{MW}({\bf x})$ is the magnetic field of the MW radiation. In the case of a single clock transition, the Rabi frequency matrix element reduces to $\Omega=\frac{1}{2\hbar}\mu_B (g_S-g_I) B_{\parallel}^{MW}({\bf x})$, where $B_{\parallel}^{MW}$ is the component of the MW magnetic field which is parallel to the quantization axis of the Zeeman sub-levels, determined by a static magnetic field. Fig. \ref{fig11} illustrates the experimental set-up. A 6.8 GHz radiation is generated by a signal generator (SMR20, Rohde and Schwarz) synchronized with an atomic clock (AR40A, Accubeat-Rubidium frequency standard). The signal is then passed through a MW shutter (SWNND-2184-1DT AMC Inc.), which provides accurate microwave pulses. The pulse is amplified by a 3 Watt MW amplifier (ZVE-3W-83, Mini-Circuits) before being transmitted to a horn antenna. After time $t$ the MW field is switched off and the population of the atoms in the $F=2$ hyperfine state is determined by on-resonance absorption imaging directed along the gravitational axis. As the horn antenna produces a spatially inhomogeneous MW field, a gradient of Rabi frequencies is produced along the cloud (frequency decreasing with growing distance from the antenna); this is exhibited in Fig.~\ref{fig1}(a) as a fringe-like pattern \cite{ruti}. In other words, the fringes are iso-Rabi-frequency bands which vary smoothly to yield multiple Rabi oscillations that can be viewed simultaneously $n_2({\bf x})\propto \sin^2[\Omega({\bf x})t/2]$. This observation may be viewed as a measurement of the local Rabi frequency and hence the local amplitude of the driving field component $B_{\parallel}^{MW}({\bf x})$. This is illustrated in Fig.~\ref{fig1}(b), where we deduce the Rabi frequency gradient from a fit of the horizontal dependence of the atomic density to a Gaussian multiplied by a sinusoidal function. In Fig. \ref{fig1}(c) we present a typical Rabi oscillation over time, where each data point represents the population of the atoms in the $F=2$ state averaged over a vertical strip of camera pixels, one pixel wide and $100$ pixels long (perpendicular to the direction of the driving field gradient). The graph may be fitted to find the Rabi frequency and the damping constants, as we show below. Fig.~\ref{fig1}(d) shows the local Rabi frequencies deduced from time evolution curves as in (c). The gradient of the Rabi frequencies is deduced by fitting the spatial dependence of the measured Rabi frequencies to a linear slope. Far from the antenna the radiated magnetic field is expected to fall like $1/r$ with an approximate linear dependence in the relevant range 10$\,$cm$<r<$10.6$\,$cm. The value of the Rabi frequency gradient that we find in the linear fit in Fig.~\ref{fig1}(d) is in good agreement with the value obtained from a spatial fit of a single image in Fig.~\ref{fig1}(b). \begin{figure} \caption{ (color online) A comparison of different model fits to the data ($T=43~\mu$K). The dotted, dashed and solid lines represent the envelope of the exponential ($t$) model, the Gaussian ($t^2$) model and our model presented in Eq. (\ref{eq:spint} \label{fig3} \end{figure} \begin{figure} \caption{ (color online) Coherence time as a function of the temperature ($T=8-102~\mu$K). The data points are the coherence times extracted from the $t^4$ model, as shown in Fig. \ref{fig2} \label{fig4} \end{figure} Next, we analyze quantitatively the damping of the local Rabi oscillations as a function of sample temperature. In Fig. \ref{fig2} we present, as an example, four data sets of Rabi oscillations at different temperatures. We fit each data set to the function \begin{equation} n_2(t)=A\exp\left[-\frac{t^4/\tau_v^4}{1+\Delta_v^2 t^2/\Delta_x^2} -\frac{t^2}{\tau_x^2}\right]cos(\Omega t+\phi)+n_b \label{eq:spint} \end{equation} where $A$ is the amplitude of oscillations at the moment $t=0$, $\Omega$ is the local Rabi frequency, $\phi$ is an arbitrary constant phase used to account for possible systematic shifts in the timing of the MW pulse and $n_b$ is the background population, whose time dependence due to cloud expansion is neglected. . In Eq.~(\ref{eq:spint}), the argument of the first damping exponent is derived from Eq.~(\ref{eq:rho22}) and is due to thermal motion. In this term $\Delta_v=\sqrt{k_B\cdot T/m}$ is calculated for each temperature, the initial width of the cloud, $\Delta_x$, is extracted from a Gaussian fit to the image of the initial cloud ($\Delta_x=1.1$mm) and $\tau_v$ is left as a free parameter. The second damping parameter, $\tau_x$, is obtained when we consider a finite spatial resolution of the imaging system, such that the image of the atoms represents a convolution of Eq.~(\ref{eq:rho22}) with a Gaussian resolution disk $(\sqrt{\pi}{\mbox{\boldmath{$\sigma$}}}ma_I)^{-1}\exp[-(x-x')^2/2{\mbox{\boldmath{$\sigma$}}}ma_I^2]$ of radius ${\mbox{\boldmath{$\sigma$}}}ma_I$. The decay of the observed Rabi oscillations is then due to the fact that the periodicity of the spatial modulation of the internal state population becomes shorter with time, such that the spatial visibility of these fringes drops due to the limited optical resolution. This gives rise to a temporal damping of the observed local oscillations with a time constant \begin{equation} \tau_x=\frac{2^{1/2}}{{\mbox{\boldmath{$\sigma$}}}ma_I(\partial_x\Omega)}. \label{taux} \end{equation} In order to make the fit, we first estimate the value of $\tau_x$. When leaving both $\tau_x$ and $\tau_v$ as free parameters, a fit to the $T=43~\mu$K data set appearing in Fig.~\ref{fig1} returns $\tau_x=8.8$ ms with a $\chi^2$ of $0.97$. This corresponds to a ${\mbox{\boldmath{$\sigma$}}}ma_I$ of $94$ microns [Eq. (\ref{taux}), with $\partial_x\Omega=1.7$ (mm ms)$^{-1}$ from Fig.~\ref{fig1}(d)] which in turn corresponds to a misalignment of our $30$ cm focal length lens by $1$ mm or so along the imaging axis. As we estimate that our optics alignment error is at least that (as the cloud size itself is about $1$ mm in all directions), we adopt the $\tau_x=8.8$ ms value for the rest of the paper, and leave $A$, $B$, $\Omega$, $\phi$ and $\tau_v$ as free parameters. Let us note that the fitting procedure is very robust and changing ${\mbox{\boldmath{$\sigma$}}}ma_I$ by a factor of $2$ in each direction returns $\chi^2$ values with a mere change of $1\%$. Finally, we also fit the data to Eq.~(\ref{eq:spint}) while replacing the power of $4$ by a free parameter $d$ and find that it converges to values of $d=3.778- 4.1$ with $\chi^2$ values $0.961 - 0.971$. For comparison we plot in Fig.~\ref{fig3} one set of Rabi oscillations ($T=43~\mu$K) with a fit to three possible models: a Gaussian model ($t^2$), an exponential model ($t$) and the $t^4$ model developed here. It can be clearly seen that the $t^4$ model provides the best fit. We now use the gradient measured by the fit presented in Fig. \ref{fig1}(d) to compare the observed coherence times at different temperatures to the theoretically expected value [Eq. (\ref{eq:tsol})]. As presented in Fig. \ref{fig4}, we find an excellent agreement between the theoretical prediction and the experimental data. \section{Discussion} \label{sec:discussion} The damping of local Rabi oscillations sets a limit on the spatial resolution of differential manipulation of thermal atoms by engineered spatially varying fields, and likewise sets a limit on the probing accuracy of driving field amplitudes by such atoms. Suppose that we want to obtain a single-shot measurement of the driving field gradient $\partial_x\Omega$. We would then fit the atomic population to a function $n(x)=A\cos(ax+b)+B$, where $a=\partial_x \Omega t$ and $b=\Omega(x=0)t$. If the measurement error of the coefficients $a$ and $b$ is constant with time, then it follows that the accuracy of $\Omega(x=0)$ and $\partial_x \Omega$ improves linearly with time. On the other hand, the measurement is limited by a maximum measurement time of $t\sim \tau_v$, as the visibility of population modulations drops drastically at this time. It follows that at the optimal measurement time, the error in the measurement of $\Omega(x)$ is proportional to $1/\tau_v\propto T^{1/4}(\partial_x\Omega)^{1/;2}$. We conclude that detection error grows slowly with temperature. Another aspect that can be derived from this work concerns the limitation of thermal atom manipulation by inhomogeneous fields where the field gradient is viewed as an imperfection. For example, our model can be used to infer the fidelity of a $\pi/2$ pulse applied to an atomic sample by a driving field which is inhomogeneous (e.g. an atomic cloud passing through a MW cavity in an atomic clock). Fidelity is defined by the overlap between a target state $|\psi\rangle_{\rm target}$ and an actual state $|\psi\rangle$. If the actual state is not pure then it is described by a density matrix $\rho=\sum_j w_j|\psi_j\rangle\langle \psi_j|$. The fidelity is then given by $ F=\left[\sum_j w_j |\langle \psi_j|\psi\rangle_{\rm target}|^2\right]^{1/2}$. For a $\pi/2$ pulse the target state is $|\psi\rangle_{\rm target}=\cos(\pi/4)|1\rangle-i\sin(\pi/4)|2\rangle$. For a given velocity the actual state is $|\psi_v\rangle=\cos\left(\frac{1}{4}(\pi+\partial_x\Omega vt_0^2)\right]|1\rangle -i\sin\left[\frac{1}{4}(\pi+\partial_x\Omega vt_0^2)\right]|2\rangle$, where $t_0=\pi/4\Omega_0$. The overlap between the actual state and the target state is $ \langle \psi_v|\psi\rangle_{\rm target}=\cos(v\partial_x\Omega t_0^2/4) $. Integrating the square of the overlap over the different velocities we obtain $ F^2=\int dv P(v)|\langle \psi_v|\psi_{\rm target}\rangle|^2=\frac{1}{2}\left\{1+\exp[-(\pi/4\Omega_0\tau_v)^4]\right\} $. When $\Omega_0\tau_v>\pi/4$ the fidelity is almost $1$, while if $\Omega_0\tau_v<\pi/4$ the fidelity drops to a minimum value of $F=1/\sqrt{2}$, which represents the fidelity for a totally random qubit state. It follows again that $\tau_v$ acts as a critical time for atom manipulation in the presence of gradients and thermal velocities. To conclude, we have developed a simple model for the damping of local Rabi oscillations in the presence of driving field gradients and damping of Ramsey fringe coherence in the presence of static state-selective field gradients. For a sample of freely propagating thermal atoms we have shown that in the presence of gradients of driving fields, local Rabi oscillations of two-level atoms lose their coherence with an exponential quartic time dependence. Equivalently, in the presence of gradients of static fields, the coherence of local population oscillations in a Ramsey sequence reduces in the same way. The coherence time scales inversely with the square root of the field gradient and with the 4th root of the temperature. We have demonstrated the theoretical model in an experiment with laser cooled atoms and obtained an excellent agreement between the analytical solutions of the theory and the experimental results. Our model and experimental demonstration lays the grounds for understanding of more general situations in which a sample of atoms interacts with local fields. On the one hand, the atoms can serve as a measurement tool for probing the amplitudes of local fields and their spatial dependence, in which case our model may be used to determine the accuracy limits of such a measurement. On the other hand, our model may contribute to the understanding of limitations on local qubit manipulation in systems of thermal qubits whose external motion may be described classically and when they are not localized well enough relative to the variation length-scale of the manipulating fields. The model may be extended to cases where the atomic gas is confined by a potential or in a vapor cell. Further extensions of the model may also include the effects of atomic collisions or the behavior of atoms at ultracold temperatures where a degenerate gas is formed. For their assistance we are grateful to the members of the atom chip group and especially, Amir Waxman, Shimon Machluf, Menachem Givon and Zina Binshtok. We acknowledge support from the FP7 European consortium ``matter-wave interferometry" (601180). \end{document}
\begin{document} \title{Upper bound for isometric embeddings \lmp} \footnotetext[1]{{\em 2000 Mathematics Subject Classification: 46B04}\newline\indent {\em Keywords:} Isometric embeddings, quaternion spaces } \begin{abstract} The isometric embeddings $\ensuremath{\ell_2^m\rightarrow\ell_p^n}k$ ($m\geq 2$, $p\in 2\ensuremath{{\mathbb N}}\xspace$) over a field $\ensuremath{{\mathbb K}}\xspace\in\lbrace \ensuremath{{\mathbb R}}\xspace,\ensuremath{{\mathbb C}}\xspace,\ensuremath{{\mathbb H}}\xspace\rbrace$ are considered, and an upper bound for the minimal $n$ is proved. In the commutative case ($\ensuremath{{\mathbb K}}\xspace\neq\ensuremath{{\mathbb H}}\xspace$) the bound was obtained by Delbaen, Jarchow and Pe{\l}czy{\'n}ski (1998) in a different way. \end{abstract} Let $\ensuremath{{\mathbb K}}\xspace$ be one of three fields $\ensuremath{{\mathbb R}}\xspace,\ensuremath{{\mathbb C}}\xspace,\ensuremath{{\mathbb H}}\xspace$ (real, complex or quaternion). Let $\ensuremath{{\mathbb K}}\xspace^n$ be the $\ensuremath{{\mathbb K}}\xspace$-linear space consisting of columns $x=\left[\xi_i\right]_1^n$, $\xi_i\in\ensuremath{{\mathbb K}}\xspace$, with the right (for definiteness) multiplication by scalars $\alpha\in\ensuremath{{\mathbb K}}\xspace$. The normed space $\ensuremath{\ell_{p;\K}^n}$ is $\ensuremath{{\mathbb K}}\xspace^n$ provided with the norm \[ \norm{x}_p = \left(\sum_{k=1}^n \abs{\xi_i}^p\right)^{1/p},\quad 1\leq p < \infty. \] For $p=2$ this space is Euclidean, $\norm{x}_2 = \sqrt{\ip{x,x}}$, where the inner product $\ip{x,y}$ of $x$ and a vector $y=[\eta_i]_1^n$ is \[ \ip{x,y} = \sum_{i=1}^n \overline{\xi_i}\eta_i. \] An isometric embedding $\ensuremath{\ell_2^m\rightarrow\ell_p^n}k$, $2\leq m\leq n$, may exist only if $p\in 2\ensuremath{{\mathbb N}}\xspace = {2,4,6,\ldots}$, see \cite{lyubich70} for $\ensuremath{{\mathbb K}}\xspace=\ensuremath{{\mathbb R}}\xspace$ and \cite{lyushat05_PeterMJ} for any $\ensuremath{{\mathbb K}}\xspace$. Conversely, under these conditions for $m$ and $p$, there exists an $n$ such that $\ensuremath{\ell_{2;\K}^m}$ can be isometrically embedded into $\ensuremath{\ell_{p;\K}^n}$, see \cite{milman88} (and also \cite{lyuvas93,reznick92}) for $\ensuremath{{\mathbb K}}\xspace=\ensuremath{{\mathbb R}}\xspace$, \cite{konig95} for $\ensuremath{{\mathbb K}}\xspace=\ensuremath{{\mathbb C}}\xspace$, and \cite{lyushat05_PeterMJ} for $\ensuremath{{\mathbb K}}\xspace=\ensuremath{{\mathbb H}}\xspace$, $\ensuremath{{\mathbb C}}\xspace$ and $\ensuremath{{\mathbb R}}\xspace$ simultaneously. The proofs of existence in these papers also yield some upper bounds for the minimal $n=\ensuremath{{\mathbb N}}\xspaceKmp$. According to \cite{lyushat05_PeterMJ}, these bounds can be joined in the inequality \begin{equation} \label{eq:1} \ensuremath{{\mathbb N}}\xspaceKmp\leq \dim\ensuremath{\Phi_{\K}(m,p)}, \end{equation} where $\ensuremath{\Phi_{\K}(m,p)}$ is the space of homogeneous polynomials (forms) $\phi(x)$ over $\ensuremath{{\mathbb R}}\xspace$ of degree $p$ in real coordinates on $\ensuremath{{\mathbb K}}\xspace^m$ such that $\phi(x\alpha)=\phi(x)$ for all $\alpha\in\ensuremath{{\mathbb K}}\xspace$, $\abs{\alpha}=1$. For $\ensuremath{{\mathbb K}}\xspace=\ensuremath{{\mathbb R}}\xspace$ the latter condition is fulfilled automatically since $p\in2\ensuremath{{\mathbb N}}\xspace$, so $\Phi_{\ensuremath{{\mathbb R}}\xspace}(m,p)$ consists of all forms of degree $p$ on $\ensuremath{{\mathbb R}}\xspace^m$. The space $\Phi_{\ensuremath{{\mathbb C}}\xspace}(m,p)$ coincides with that which was used in \cite{konig95}. Note that in all cases $\dim\ensuremath{\Phi_{\K}(m,p)}$ can be explicitly expressed through binomial coefficients. (All the formulas are brought together in \cite[Theorem 2]{lyushat05_PeterMJ}.) In the present paper we prove that \begin{equation} \label{eq:2} \ensuremath{{\mathbb N}}\xspaceKmp\leq \dim \ensuremath{\Phi_{\K}(m,p)}-1. \end{equation} For $\ensuremath{{\mathbb K}}\xspace=\ensuremath{{\mathbb R}}\xspace$ and $\ensuremath{{\mathbb C}}\xspace$ this result (in terms of binomial coefficients) was obtained by Delbaen, Jarchow and Pe{\l}czy{\'n}ski \cite{delbaenetal98} as a by-product of the proof of their Theorem~B. Their rather complicated technique essentially uses the commutativity of the field $\ensuremath{{\mathbb K}}\xspace$, so it is not applicable to $\ensuremath{{\mathbb K}}\xspace=\ensuremath{{\mathbb H}}\xspace$. Our proof of (\ref{eq:2}) is general and elementary. Let us start with two lemmas, the first of which is well known. \begin{lemma} \label{lem:1} A linear mapping $f:\ensuremath{\ell_2^m\rightarrow\ell_p^n}k$ is isometric if and only if there is a system of vectors $u_k\in\ensuremath{\ell_{2;\K}^m}$, $1\leq k\leq n$, such that the identity \begin{equation} \label{eq:3} \sum_{k=1}^n\abs{\ip{u_k,x}}^p = \ip{x,x}^{p/2} \end{equation} holds for $x\in\ensuremath{\ell_{2;\K}^m}$. \end{lemma} \begin{proof} A general form of $f$ as a linear mapping is $fx=\left[\ip{u_k,x}\right]_1^n$, where $\left(u_k\right)_1^n$ is a system of vectors from $\ensuremath{\ell_{2;\K}^m}$ (called the \defin{frame} of $f$ \cite{lyushat05_PeterMJ,lyuvas93}). The identity (\ref{eq:3}) is nothing but $\norm{fx}_p=\norm{x}_2$. \end{proof} An isometric embedding $\ensuremath{\ell_2^m\rightarrow\ell_p^n}k$ is called \defin{minimal} if $n=\ensuremath{{\mathbb N}}\xspaceKmp$. \begin{lemma} \label{lem:2} If $f$ is minimal and $\left(u_k\right)_1^n$ is its frame then the functions $\abs{\ip{u_k,x}}^p$ are linearly independent. \end{lemma} \begin{proof} Let \begin{equation} \label{eq:4} \sum_{k=1}^n\omega_k\abs{\ip{u_k,x}}^p =0 \end{equation} with some real $\omega_k$, $\displaystyle\max_k \omega_k=1$, and let $\omega_n=1$ for definiteness. By subtraction of (\ref{eq:4}) from (\ref{eq:3}) we get \[ \sum_{k=1}^{n-1}\left(1-\omega_k\right)\abs{\ip{u_k,x}}^p = \ip{x,x}^{p/2}, \] i.e. \[ \sum_{k=1}^{n-1}\abs{\ip{v_k,x}}^p = \ip{x,x}^{p/2}, \] where $v_k = u_k\left(1-\omega_k\right)^{1/p}$. This contradicts the minimality of $f$. \end{proof} \begin{remark} Since all functions $\abs{\ip{\cdot,x}}^p$ belong to $\ensuremath{\Phi_{\K}(m,p)}$, the inequality (\ref{eq:1}) immediately follows from Lemma \ref{lem:2}. However, the existence of an isometric embedding $\ensuremath{\ell_2^m\rightarrow\ell_p^n}k$ is assumed in this context. \end{remark} Now we proceed to the proof of (\ref{eq:2}). \begin{proof} Let $f:\ensuremath{\ell_2^m\rightarrow\ell_p^n}k$ be a minimal isometric embedding. Then, according to (\ref{eq:1}), $n\leq\dim\ensuremath{\Phi_{\K}(m,p)}$. We have to prove that the equality is impossible. Suppose to the contrary. Then the system $\left(\abs{\ip{u_k,x}}^p\right)_1^n$ corresponding to the frame of $f$ is a basis of $\ensuremath{\Phi_{\K}(m,p)}$ by Lemma \ref{lem:2}. In particular, there is an expansion \begin{equation} \label{eq:5} \left(\sum_{i=1}^m\lambda_i\abs{\xi_i}^2\right)^{p/2} = \sum_{k=1}^n a_k(\lambda_1,\ldots,\lambda_m)\abs{\ip{u_k,x}}^p, \end{equation} where $(\lambda_i)_1^m\in\ensuremath{{\mathbb R}}\xspace^m$ and $a_k$ are some functions of these parameters. Now we introduce the inner product \[ \ipf{\phi_1,\phi_2} = \int_S \phi_1(x)\phi_2(x)\ensuremath{\,d}\sigma(x)\quad (\phi_1,\phi_2\in\ensuremath{\Phi_{\K}(m,p)}) \] where $\sigma$ is the standard measure on the unit sphere $S\subset\ensuremath{\ell_{2;\K}^m}$. In the Euclidean space $\ensuremath{\Phi_{\K}(m,p)}$ we have the basis $\left(\theta_k(x)\right)_1^n$ dual to $\left(\abs{\ip{u_k,x}}^p\right)_1^n$. This allows us to represent the coefficients $a_k$ as \[ a_k(\lambda_1,\ldots,\lambda_m) = \int_S\left(\sum_{i=1}^m\lambda_i\abs{\xi_i}^2\right)^{p/2} \theta_k(x)\ensuremath{\,d}\sigma(x). \] Hence, $a_k(\lambda_1,\ldots,\lambda_m)$ are forms of degree $p/2$, a fortiori, they are continuous. Denote by $\ensuremath{{\mathbb R}}\xspace^m_{+}$ the open coordinate cone in $\ensuremath{{\mathbb R}}\xspace^m$, so $\ensuremath{{\mathbb R}}\xspace^m_{+} =\{(\lambda_i)_1^m\subset\ensuremath{{\mathbb R}}\xspace^m: \lambda_1>0,...,\lambda_m>0\}$. We prove that \defin{on} $\ensuremath{{\mathbb R}}\xspace^m_{+}$ \defin{all} $a_k(\lambda_1,\ldots,\lambda_m)>0$ or equivalently, $\hat{a}(\lambda_1,\ldots,\lambda_m)\equiv\displaystyle\min_k a_k(\lambda_1,\ldots,\lambda_m) >0$. Suppose to the contrary: let $\hat{a}(\gamma_1,\ldots,\gamma_m)<0$ for some $(\gamma_i)_1^m\subset\ensuremath{{\mathbb R}}\xspace^m_{+}$. On the other hand, $\hat{a}(1,\ldots,1)=1$ by comparing (\ref{eq:3}) to (\ref{eq:5}) with all $\lambda_i=1$. Since $\hat{a}$ is continuous, we have $\hat{a}(\mu_1,\ldots,\mu_m)=0$ for some $(\mu_i)_1^m\subset\ensuremath{{\mathbb R}}\xspace^m_{+}$. But the latter means that all $a_k(\mu_1,\ldots,\mu_m)\geq 0$ and, at least one of them is zero, say $a_n(\mu_1,\ldots,\mu_m)=0$. Therefore, \[ \left(\sum_{i=1}^m \mu_i\abs{\xi_i}^2\right)^{p/2} = \sum_{k=1}^{n-1}a_k(\mu_1,\ldots,\mu_m)\abs{\ip{u_k,x}}^p, \] whence \begin{equation} \label{eq:6} \ip{z,z}^{p/2} = \sum_{k=1}^{n-1}\abs{\ip{v_k,z}}^p, \end{equation} where \[ z=\ensuremath{{\mathcal D}}\xspace x, \quad v_k = \left(a_k(\mu_1,\ldots,\mu_m)\right)^{1/p}\ensuremath{{\mathcal D}}\xspace^{-1}u_k \] and \ensuremath{{\mathcal D}}\xspace is the diagonal matrix with entries $\mu_1^{1/2},\ldots,\mu_m^{1/2}$. By Lemma \ref{lem:1} the identity (\ref{eq:6}) means that the system $(v_k)_1^{n-1}$ is the frame of an isometric embedding $\ensuremath{\ell_{2;\K}^m}\rightarrow\ell_{p;\ensuremath{{\mathbb K}}\xspace}^{n-1}$. This contradicts the minimality of $n$. As a result, all $a_k(\lambda_1,\ldots,\lambda_m)\geq 0$ for $\lambda_1\geq 0,\ldots,\lambda_m\geq 0$, i.e. on the closed coordinate cone. Now we take $\xi_1=1$ and $\xi_i=0$ for all $i\geq 2$, so $x=e_1$, the first vector from the canonical basis of $\ensuremath{\ell_{2;\K}^m}$. In this setting (\ref{eq:5}) reduces to \[ \lambda_1^{p/2} = \sum_{k=1}^n a_k(\lambda_1,\ldots,\lambda_m)\abs{\ip{u_k,e_1}}^p. \] (Recall that $m\geq 2$.) This yields \[ \sum_{k=1}^n a_k(0,\lambda_2,\ldots,\lambda_m)\abs{\ip{u_k,e_1}}^p=0. \] Assume all $\ip{u_k,e_1}\neq 0$. Since for $\lambda_2>0,\ldots,\lambda_m>0$ all \mbox{$a_k(0,\lambda_2,\ldots,\lambda_m)\geq0$}, all of them are equal to zero. Hence, the right side of the identity (\ref{eq:5}) vanishes as long as $\lambda_1 =0$, in contrast to the function on the left side, a contradiction. To finish the proof we only have to show that the assumption $\ip{u_k,e_1}\neq 0$, $1\leq k\leq n$, is not essential. First, note that all $u_k\neq 0$, otherwise, the number $n$ in (\ref{eq:3}) would be reduced. Therefore, the sets $\{x: \ip{u_k,x} =0\}, 1\leq k\leq n$, are hyperplanes in $\ensuremath{\ell_{2;\K}^m}$. Their union is different from the whole space. Hence, there is a vector $e$ such that all $\ip{u_k,e_}\neq 0$, $\norm{e}_2=1$. This $e$ can be represented as $e=ge_1$ where $g$ is an isometry of the space $\ensuremath{\ell_{2;\K}^m}$. Indeed, this space is Euclidean, so its isometry group is transitive on the unit sphere. Thus, all $\ip{g^{-1}u_k,e_1}\neq 0$. On the other hand, $(g^{-1}u_k)_1^n$ is the frame of the isometric embedding $fg:\ensuremath{\ell_2^m\rightarrow\ell_p^n}k$. \end{proof} \begin{singlespace} Address: {\it Department of Mathematics, Technion, 32000, Haifa, Israel} email: {\it [email protected]} \end{singlespace} \end{document}
\begin{document} \title{Eigenvalue variation under moving mixed Dirichlet--Neumann boundary conditions and applications} \begin{abstract} We deal with the sharp asymptotic behaviour of eigenvalues of elliptic operators with varying mixed Dirichlet--Neumann boundary conditions. In case of simple eigenvalues, we compute explicitly the constant appearing in front of the expansion's leading term. This allows inferring some remarkable consequences for Aharonov--Bohm eigenvalues when the singular part of the operator has two coalescing poles. \end{abstract} \paragraph{Keywords.} Mixed boundary conditions, asymptotics of eigenvalues, Aharonov--Bohm eigenvalues. \paragraph{MSC classification.} Primary: 35P20; Secondary: 35P15, 35J25. \section{Introduction and main results} The present paper deals with elliptic operators with varying mixed Dirichlet--Neumann boundary conditions and their spectral stability under varying of the Dirichlet and Neumann boundary regions. More precisely, we study the behaviour of eigenvalues under a homogeneous Neumann condition on a portion of the boundary concentrating at a point and a homogeneous Dirichlet boundary condition on the complement. Let $\Omega$ be a bounded open set in $\ensuremath{\mathbb{R}}^2_+:=\{(x_1,x_2)\in\ensuremath{\mathbb{R}}^2:x_2>0\}$ having the following properties: \begin{align}\label{eq:38} &\text{$\Omega$ is Lipschitz},\\ \label{eq:40} &\text{there exists $\ensuremath{\ensuremath{\mathbf{a}}repsilon}_0>0$ such that $\Gamma_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}_0}:=[-\ensuremath{\ensuremath{\mathbf{a}}repsilon}_0,\ensuremath{\ensuremath{\mathbf{a}}repsilon}_0]\times\{0\}\subset \partial \Omega$}. \end{align} We consider the eigenvalue problem for the Dirichlet Laplacian on the domain $\Omega$ \begin{equation}\label{eqD} \begin{cases} -\Delta\,u=\lambda\,u,& \mbox{in }\Omega,\\ u=0,&\mbox{on } \partial\Omega. \end{cases} \end{equation} We denote by $(\lambda_j)_{j\ge 1}$ the eigenvalues of Problem \eqref{eqD}, arranged in non-decreasing order and counted with multiplicities. For each $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\in (0,\ensuremath{\ensuremath{\mathbf{a}}repsilon}_0]$, we also consider the following eigenvalue problem with mixed boundary conditions: \begin{equation}\label{eqDND} \begin{cases} -\Delta\,u=\lambda\,u,& \mbox{in }\Omega,\\ u=0,&\mbox{on } \partial\Omega\setminus \Gamma_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}},\\ \frac{\partial u}{\partial \nu}=0,&\mbox{on } \Gamma_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}, \end{cases} \end{equation} with $\Gamma_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}:=[-\ensuremath{\ensuremath{\mathbf{a}}repsilon},\ensuremath{\ensuremath{\mathbf{a}}repsilon}]\times\{0\}$, see Figure \ref{fig:f1}. We denote by $(\lambda_j(\ensuremath{\ensuremath{\mathbf{a}}repsilon}))_{j\ge 1}$ the eigenvalues of Problem \eqref{eqDND}, arranged in non-decreasing order and counted with multiplicities. \begin{figure} \caption{The mixed boundary condition problem in the domain $\Omega$.} \label{fig:f1} \end{figure} A rigorous weak formulation of the eigenvalue problems described above can be given as follows. For $\ensuremath{\ensuremath{\mathbf{a}}repsilon} \in (0,\ensuremath{\ensuremath{\mathbf{a}}repsilon}_0]$, we define \begin{equation*} \mathcal{Q}_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}=\left\{u\in H^1(\Omega):\,\chi_{\partial\Omega\setminus\Gamma_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}}\gamma_0(u)=0\mbox{ in } L^2(\partial \Omega)\right\}, \end{equation*} where $\gamma_0$ is the trace operator from $H^1(\Omega)$ to $L^2(\partial \Omega)$, which is a continuous linear mapping (see for instance \cite[Definition 13.2]{Tartar2007Sobolev}) and $\chi_{\partial\Omega\setminus\Gamma_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}}$ is the indicator function of $\partial\Omega\setminus\Gamma_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}$ in $\partial \Omega$. Furthermore, we define the quadratic form $q$ on $H^1(\Omega)$ by \begin{equation} \label{eqQuad} q(u):=\int_{\Omega}\left|\nabla u\right|^2\,dx. \end{equation} Let us denote by $q_0$ the restriction of $q$ to $H^1_0(\Omega)$ and by $q_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}$ the restriction of $q$ to $\mathcal Q_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}$. The sequences $(\lambda_j)_{j\ge 1}$ and $(\lambda_j(\ensuremath{\ensuremath{\mathbf{a}}repsilon}))_{j\ge 1}$ for $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\in (0,\ensuremath{\ensuremath{\mathbf{a}}repsilon}_0]$ can then be defined by the min-max principle: \begin{equation}\label{eqMinMaxD} \lambda_j:=\min_{\substack{ \mathcal E\subset H^1_0(\Omega)\text{ subspace}\\ \mbox{dim}(\mathcal E)=j}}\max_{u\in \mathcal E} \frac{q(u)}{\|u\|^2} \end{equation} and \begin{equation}\label{eqMinMaxDND} \lambda_j(\ensuremath{\ensuremath{\mathbf{a}}repsilon}):=\min_{\substack{\mathcal E\subset \mathcal{Q}_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\text{ subspace}\\ \mbox{dim}(\mathcal E)=j}}\max_{u\in \mathcal E} \frac{q(u)}{\|u\|^2}, \end{equation} where \[ \|u\|^2=\int_{\Omega}u^2(x)\,dx. \] Since $H^1(\Omega)$ is compactly embedded in $L^2(\Omega)$ (see e.g. \cite[Lemma 18.4]{Tartar2007Sobolev}), the eigenvalues of $q_0$, defined by Equation \eqref{eqMinMaxD}, and those of $q_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}$, defined by Equation \eqref{eqMinMaxDND}, are of finite multiplicity, and form sequences tending to $+\infty$. \begin{rem} \label{remIneq} Let us fix $\ensuremath{\ensuremath{\mathbf{a}}repsilon}_1$ and $\ensuremath{\ensuremath{\mathbf{a}}repsilon}_2$ in $(0,\ensuremath{\ensuremath{\mathbf{a}}repsilon}_0]$ such that $\ensuremath{\ensuremath{\mathbf{a}}repsilon}_1>\ensuremath{\ensuremath{\mathbf{a}}repsilon}_2$. Since $H^1_0(\Omega)\subset\mathcal Q_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}_2} \subset\mathcal Q_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}_1}$, the definitions given by Formulas \eqref{eqMinMaxD} and \eqref{eqMinMaxDND} immediately imply that $\lambda_j(\ensuremath{\ensuremath{\mathbf{a}}repsilon}_1)\le \lambda_j(\ensuremath{\ensuremath{\mathbf{a}}repsilon}_2)\le \lambda_j$ for each integer $j\ge 1$. The function $(0,\ensuremath{\ensuremath{\mathbf{a}}repsilon}_0]\ni \ensuremath{\ensuremath{\mathbf{a}}repsilon}\mapsto \lambda_j(\ensuremath{\ensuremath{\mathbf{a}}repsilon})$ is therefore non-increasing and bounded by $\lambda_j$ for each integer $j\ge1$. \end{rem} \begin{rem} \label{rem:conf_change} For the sake of simplicity, in the present paper we assume that the domain $\Omega$ satisfies assumption \eqref{eq:40}, i.e. that $\partial\Omega$ is straight in a neighborhood of $0$. We observe that, since we are in dimension $2$, this assumption is not restrictive. Indeed, starting from a general sufficiently regular domain $\Omega$, a conformal transformation leads us to consider a new domain satisfying \eqref{eq:40}, see e.g. \cite{FFFN}. The counterpart is the appearance of a conformal weight in the new eigenvalue problem; however, if $\Omega$ is sufficiently regular, the weighted problem presents no additional difficulties. \end{rem} The purpose of the present paper is to study the eigenvalue function $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\mapsto\lambda_j(\ensuremath{\ensuremath{\mathbf{a}}repsilon})$ as $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to 0^+$. The continuity of this map as well as some asymptotic expansions were obtained in \cite{Gad} (see also Appendix C of the present paper for an alternative proof of some results of \cite{Gad}). Here we mean to provide some explicit characterization of the leading terms in the expansion given in \cite{Gad} and of the limit profiles arising from blowing-up of eigenfunctions. Spectral stability and asymptotic expansion of the eigenvalue variation in a somehow complementary setting were obtained in \cite{AFHL2016}; indeed, if we consider the eigenvalue problem under homogeneous Dirichlet boundary conditions on a vanishing portion of a straight part of the boundary, Neumann conditions on the complement in the straight part and Dirichlet conditions elsewhere, by a reflection the problem becomes equivalent to the one studied in \cite{AFHL2016}, i.e. a Dirichlet eigenvalue problem in a domain with a small segment removed. Related spectral stability results were discussed in \cite[Section 4]{ColoradoPeral2003} for the first eigenvalue under mixed Dirichlet-Neumann boundary conditions on a smooth bounded domain $\Omega\subset\ensuremath{\mathbb{R}}^N$ ($N\geq3$), both for vanishing Dirichlet boundary portion and for vanishing Neumann boundary portion. We also mention that some regularity results for solutions to second-order elliptic problems with mixed Dirichlet--Neumann type boundary conditions were obtained in \cite{Kassmann,Savare}, see also the references therein, whereas asymptotic expansions at Dirichlet-Neumann boundary junctions were derived in \cite{FFFN}. Let us assume that \begin{equation}\label{eq:6} \lambda_N \text{ (i.e. the $N$-th eigenvalue of $q_0$) is simple}. \end{equation} Let $u_N$ be a normalized eigenfunction associated to $\lambda_N$, i.e. $u_N$ satisfies \begin{equation}\label{eq:5} \begin{cases} -\Delta u_N=\lambda_N u_N,&\text{in }\Omega,\\ u_N=0,&\text{on }\partial\Omega,\\ \int_\Omega u_N^2(x)\,dx=1. \end{cases} \end{equation} It is known (see \cite{Gad}) that, under assumption \eqref{eq:6}, the rate of the convergence $\lambda_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to\lambda_N$ is strongly related to the order of vanishing of the Dirichlet eigenfunction $u_N$ at $0$. Moreover $u_N$ has an integer order of vanishing $k\geq1$ at $0\in\partial\Omega$ and there exists $\beta\in\ensuremath{\mathbb{R}}\setminus\{0\}$ such that \begin{align}\label{eq:orderk} r^{-k} u_N(r\cos t,r\sin t ) \to \beta \sin(kt) \text{ as }r\to 0 \text{ in }C^{1,\tau}([0,\pi]) \end{align} for any $\tau\in(0,1)$, see e.g. \cite[Theorem 1.1]{FF}. Actually, one can see that $\beta$ is directly linked to the norm of the $k$-th differential of $u_N$ at $0$. More precisely, if we consider \[ \| d^{j}u (x) \|^2 := \sum_{i_1,\ldots,i_j=1}^2 \left| \frac{\partial ^j u}{\partial x_{i_1}\ldots \partial x_{i_j}}(x) \right|^2 , \] then \[ \beta^2 = \frac{\| d^{k}u_N (0) \|^2}{(k!)^2 \,2^{k-1}}. \] This follows by differentiating the harmonic homogeneous functions $\beta r^k \sin(kt)$ and $\beta r^k \cos(kt)$ with respect to $x_1$ and $x_2$ and considering $d^{k}u_N (0)$. Our main results provide sharp asymptotic estimates with explicit coefficients for the eigenvalue variation $\lambda_N-\lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})$ as $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0^+$ under assumption \eqref{eq:6} (Theorem \ref{t:new_main}), as well as an explicit representation in elliptic coordinates of the limit blow-up profile for the corresponding eigenfunction $u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ (Theorem \ref{t:blowup2}). \begin{thm}\label{t:new_main} Let $\Omega$ be a bounded open set in $\ensuremath{\mathbb{R}}^2$ satisfying \eqref{eq:38} and \eqref{eq:40}. Let $N\geq 1$ be such that the $N$-th eigenvalue $\lambda_N$ of $q_0$ on $\Omega$ is simple with associated eigenfunctions having in $0$ a zero of order $k$ with $k$ as in \eqref{eq:orderk}. For $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\in (0,\ensuremath{\ensuremath{\mathbf{a}}repsilon}_0)$, let $\lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})$ be the $N$-th eigenvalue of $q_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ on $\Omega$. Then \begin{equation*} \lim_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to 0^+}\frac{\lambda_N-\lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})}{\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{2k}}= \beta^2\, \frac{k\pi}{2^{2k-1}} \binom{k-1}{\left\lfloor\frac{k-1}2\right\rfloor}^{\!2} \end{equation*} with $\beta\neq0$ being as in \eqref{eq:5}--\eqref{eq:orderk}. \end{thm} \begin{thm}\label{t:blowup2} Let $\Omega$ be a bounded open set in $\ensuremath{\mathbb{R}}^2$ satisfying \eqref{eq:38} and \eqref{eq:40}. Let $N\geq 1$ be such that the $N$-th eigenvalue $\lambda_N$ of $q_0$ on $\Omega$ is simple with associated eigenfunctions having in $0$ a zero of order $k$ with $k$ as in \eqref{eq:orderk}. For $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\in [0,\ensuremath{\ensuremath{\mathbf{a}}repsilon}_0)$, let $\lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})$ be the $N$-th eigenvalue of $q_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ on $\Omega$ and $u_N^{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}$ be an associated eigenfunction satisfying $\int_\Omega |u_N^{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}|^2\,dx=1$ and $\int_{\Omega}u_N^{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\,u_N \,dx\ge0$. Then \[ \ensuremath{\ensuremath{\mathbf{a}}repsilon}^{-k}u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon}(\ensuremath{\ensuremath{\mathbf{a}}repsilon} x)\to \beta (\psi_k+W_k\circ F^{-1})\quad\text{as }\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0^+ \] in $H^{1}_{\rm loc}(\overline{\ensuremath{\mathbb{R}}^2_+})$, a.e. and in $C^{2}_{\rm loc}(\overline{\ensuremath{\mathbb{R}}^2_+}\setminus\{(1,0),(-1,0)\})$, where $\beta$ is as in \eqref{eq:5}--\eqref{eq:orderk}, \begin{align}\label{eq:psi_k} & \psi_k(r\cos t,r\sin t)=r^k \sin(kt), \quad\text{for }t\in[0,\pi] \text{ and }r>0,\\ \label{eq:44} &F(\xi,\eta)=(\cosh(\xi)\cos(\eta),\sinh(\xi)\sin(\eta)),\quad \text{for }\xi\geq0,\ \eta\in[0,2\pi), \end{align} and \begin{equation}\label{eq:W_k} W_k(\xi,\eta)=\frac1{2^{k-1}}\sum_{j=0}^{\left\lfloor{\frac{k-1}2}\right\rfloor} \binom kj \exp(-(k-2j)\xi)\sin((k-2j)\eta). \end{equation} \end{thm} Actually, the fact that $\lim_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to 0^+}\frac{\lambda_N-\lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})}{\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{2k}}$ is finite and different from zero and the convergence of $\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{-k}u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon}(\ensuremath{\ensuremath{\mathbf{a}}repsilon} x)$ to some nontrivial profile was proved in the paper \cite{Gad} with a quite implicit description of the limits (see also Appendix \ref{sec:altern-proof-theor} for an alternative proof). The original contribution of the present paper relies in the explicit characterization of the leading term of the expansion provided by \cite{Gad} and in its applications to Aharonov--Bohm operators, see Section \ref{subs:IntroAB}. The key tool allowing us to write explicitly the coefficients of the expansion consists in the use of elliptic coordinates, which turn out to be more suitable to our problem than radial ones, see Section \ref{sec:explicit}. \section{Applications to Aharonov--Bohm operators} \label{subs:IntroAB} The present work is in part motivated by the study of Aharonov--Bohm eigenvalues. In this section we describe some applications of Theorem \ref{t:new_main} to the problem of spectral stability of Aharonov--Bohm operators with two moving poles, referring to Section \ref{s:AB} for the proofs. Let us first review some definitions and known results. For any point $\ensuremath{\mathbf{a}}=(a_1,a_2)\in \ensuremath{\mathbb{R}}^2$, we define the Aharonov--Bohm potential of circulation $1/2$ by \begin{equation*} \ensuremath{\mathbf{A}}_\ensuremath{\mathbf{a}}(x)=\frac12\left(\frac{-(x_2-a_2)}{(x_1-a_1)^2+(x_2-a_2)^2},\frac{x_1-a_1}{(x_1-a_1)^2+(x_2-a_2)^2}\right). \end{equation*} Let us consider an open and bounded open set $\widehat\Omega$ with Lipschitz boundary, such that $0\in\widehat\Omega$. For better readability, we denote by $\mathcal H$ the complex Hilbert space of complex-valued functions $L^2(\widehat\Omega,\ensuremath{\mathbb{C}})$, equipped with the scalar product defined, for all $u,v\in \mathcal H$, by \begin{equation*} \langle u,v\rangle:=\int_{\widehat\Omega}u\overline v\,dx. \end{equation*} We define, for $\ensuremath{\mathbf{a}}\in \widehat\Omega$, \begin{equation} \label{eq:QAB1Pole} \mathcal Q^{AB}_\ensuremath{\mathbf{a}}:=\left\{u\in H^1_0\big(\widehat{\Omega},\ensuremath{\mathbb{C}}\big)\,;\, \frac{|u|}{|x-\ensuremath{\mathbf{a}}|}\in L^2\big(\widehat{\Omega}\big)\right\}, \end{equation} the quadratic form $q^{AB}_\ensuremath{\mathbf{a}}$ on $\mathcal Q^{AB}_\ensuremath{\mathbf{a}}$ by \begin{equation} \label{eq:QuadAB1Pole} q^{AB}_\ensuremath{\mathbf{a}}(u):=\int_{\widehat\Omega}\left|(i\nabla+\ensuremath{\mathbf{A}}_\ensuremath{\mathbf{a}})u\right|^2\,dx, \end{equation} and the sequence of eigenvalues $\left(\lambda^{AB}_j(\ensuremath{\mathbf{a}})\right)_{j\ge 1}$ by the min-max principle \begin{equation} \label{eq:MinMaxAB1Poles} \lambda_j^{AB}(\ensuremath{\mathbf{a}}):= \min_{\substack{\mathcal E\subset \mathcal{Q}^{AB}_{\ensuremath{\mathbf{a}}}\text{ subspace}\\ \mbox{dim}(\mathcal E)=j}}\max_{u\in \mathcal E} \frac{q^{AB}_\ensuremath{\mathbf{a}}(u)}{\|u\|^2}. \end{equation} It follows from the definition in Equation \eqref{eq:QAB1Pole} that $\mathcal Q^{AB}_\ensuremath{\mathbf{a}}$ is compactly embedded in $\mathcal H$. The above eigenvalues are therefore of finite multiplicity and $\lambda_j^{AB}(\ensuremath{\mathbf{a}})\to +\infty$ as $j\to +\infty$. \begin{rem} Let us note that, as shown in \cite[Lemma 2.1]{NT}, $\mathcal Q^{AB}_\ensuremath{\mathbf{a}}$ is the completion of the set of smooth functions supported in $\widehat\Omega\setminus\{\ensuremath{\mathbf{a}}\}$ for the norm $\|\cdot\|_\ensuremath{\mathbf{a}}$ defined by \begin{equation*} \|u\|_\ensuremath{\mathbf{a}}^2=\|u\|^2+q^{AB}_\ensuremath{\mathbf{a}}(u). \end{equation*} Let us point out that functions in $\mathcal Q^{AB}_\ensuremath{\mathbf{a}}$ satisfy a Dirichlet boundary condition, which is not the case in \cite{NT}. However, this difference is unimportant for the compact embedding. \end{rem} \begin{rem} We could also consider the Friedrichs extension of the differential operator \begin{equation*} (i\nabla+\ensuremath{\mathbf{A}}_\ensuremath{\mathbf{a}})^*(i\nabla+\ensuremath{\mathbf{A}}_\ensuremath{\mathbf{a}}) u= -\Delta u+2i\ensuremath{\mathbf{A}}_\ensuremath{\mathbf{a}}\cdot\nabla u+\left|\ensuremath{\mathbf{A}}_\ensuremath{\mathbf{a}}\right|^2u \end{equation*} acting on functions $u\in C^{\infty}_c(\widehat\Omega\setminus\{\ensuremath{\mathbf{a}}\},\ensuremath{\mathbb{C}})$. As shown for instance in \cite[Section I]{Len15AB} or \cite[Section 2]{BNNT}), this defines a positive and self-adjoint operator with compact resolvent, whose eigenvalues, counted with multiplicities, are $\left(\lambda^{AB}_j(\ensuremath{\mathbf{a}})\right)_{j\ge 1}$. It is called the Aharonov-Bohm operator of pole $\ensuremath{\mathbf{a}}$ and circulation $1/2$. \end{rem} In recent years, several authors have studied the dependence of eigenvalues on the position of the pole. It has been established in \cite[Theorem 1.1]{BNNT}, that the functions $\ensuremath{\mathbf{a}} \mapsto \lambda^{AB}_j(\ensuremath{\mathbf{a}})$ are continuous in $\overline{\Omega}$. In \cite{abatangelo2015sharp,AbatangeloFelli2016SIAM}, the two first authors obtained the precise rate of convergence $\lambda^{AB}_j(\ensuremath{\mathbf{a}}) \to \lambda^{AB}_j(0)$ as $\ensuremath{\mathbf{a}}$ converges to the interior point $0$ for simple eigenvalues. In order to state the most complete result, given in \cite[Theorem 1.2]{AbatangeloFelli2016SIAM}, we consider an $L^2$-normalized eigenfunction $u_N^0$ of $q_0^{AB}$ associated with the eigenvalue $\lambda^{AB}_N(0)$. We additionally assume that $\lambda^{AB}_N(0)$ is simple. From \cite[Section 7]{FFT} it follows that there exists an odd positive integer $k$ and a non-zero complex number $\beta_0$ such that, up to a rotation of the coordinate axes, \begin{equation*} r^{-\frac{k}2}u_N^0(r\cos t,r\sin t)\to {\beta_0} e^{i\frac{t}2}\sin\left(\frac{k}2t\right)\mbox{ in }C^{1,\tau}\left([0,2\pi],\ensuremath{\mathbb{C}}\right) \end{equation*} as $r \to 0^+$, for all $\tau\in (0,1)$. The integer $k$ has a simple geometric interpretation: it is the number of nodal lines of the function $u_N^0$ which meet at $0$. We say that $u_N^0$ has a zero of order $k/2$ in $0$. Our coordinate axes are chosen in such a way that one of these nodal lines is tangent to the positive $x_1$ semi-axis. \begin{thm} \label{t:monopole} Let us define $\ensuremath{\mathbf{a}}_\ensuremath{\ensuremath{\mathbf{a}}repsilon}:=\ensuremath{\ensuremath{\mathbf{a}}repsilon}(\cos(\alpha),\sin(\alpha))$, with $\ensuremath{\ensuremath{\mathbf{a}}repsilon}>0$. We have, as $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to 0^+$, \begin{equation*} \lambda^{AB}_N\left(\ensuremath{\mathbf{a}}_\ensuremath{\ensuremath{\mathbf{a}}repsilon}\right)=\lambda^{AB}_N(0)-\frac{k\pi\beta_0^2}{2^{2k-1}}\left(\begin{array}{c} k-1\\ \left\lfloor \frac{k-1}{2}\right\rfloor\end{array}\right)^2\cos(k\alpha)\ensuremath{\ensuremath{\mathbf{a}}repsilon}^k+o\left(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^k\right). \end{equation*} \end{thm} \begin{rem} The expansion in \cite{abatangelo2015sharp,AbatangeloFelli2016SIAM} involves a constant depending on $k$, defined as the minimal energy in a Dirichlet-type problem. We compute this constant in Appendix \ref{a:A} in order to obtain the more explicit result in Theorem \ref{t:monopole}. \end{rem} Let us now consider, for any $\ensuremath{\ensuremath{\mathbf{a}}repsilon}>0$, an Aharonov-Bohm potential with two poles $(\ensuremath{\ensuremath{\mathbf{a}}repsilon},0)$ and $(-\ensuremath{\ensuremath{\mathbf{a}}repsilon},0)$, of fluxes respectively $1/2$ and $-1/2$: \begin{equation*} \ensuremath{\mathbf{A}}_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}:=\ensuremath{\mathbf{A}}_{(\ensuremath{\ensuremath{\mathbf{a}}repsilon},0)}-\ensuremath{\mathbf{A}}_{(-\ensuremath{\ensuremath{\mathbf{a}}repsilon},0)}. \end{equation*} As in the case of one pole, we define the vector space $\mathcal Q^{AB}_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ by \begin{equation} \label{eq:QAB2Poles} \mathcal Q^{AB}_\ensuremath{\ensuremath{\mathbf{a}}repsilon}:=\left\{u\in H^1_0\big(\widehat{\Omega},\ensuremath{\mathbb{C}} \big)\,;\, \frac{|u|}{|x\pm\ensuremath{\ensuremath{\mathbf{a}}repsilon}\,{\mathbf e}|}\in L^2\big(\widehat{\Omega}\big)\right\}, \end{equation} where $\mathbf e=(1,0)$, the quadratic form $q^{AB}_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ on $\mathcal Q^{AB}_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ by \begin{equation} \label{eq:QuadAB2Poles} q^{AB}_\ensuremath{\ensuremath{\mathbf{a}}repsilon}(u):=\int_{\widehat\Omega}\left|(i\nabla+\ensuremath{\mathbf{A}}_\ensuremath{\ensuremath{\mathbf{a}}repsilon})u\right|^2\,dx, \end{equation} and the sequence of eigenvalue $\left(\lambda^{AB}_j(\ensuremath{\ensuremath{\mathbf{a}}repsilon})\right)_{j\ge 1}$ by the min-max principle \begin{equation} \label{eq:MinMaxAB1Poles} \lambda_j^{AB}(\ensuremath{\ensuremath{\mathbf{a}}repsilon}):= \min_{\substack{\mathcal E\subset \mathcal{Q}^{AB}_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\text{ subspace}\\ \mbox{dim}(\mathcal E)=j}}\max_{u\in \mathcal E} \frac{q^{AB}_\ensuremath{\ensuremath{\mathbf{a}}repsilon}(u)}{\|u\|^2}. \end{equation} It follows from \cite[Corollary 3.5]{Len15AB} that, for any $j\ge1$, $\lambda^{AB}_j(\ensuremath{\ensuremath{\mathbf{a}}repsilon})$ converges to the $j$-th eigenvalue of the Laplacian in $\widehat\Omega$ as $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to 0^+$. In \cite{AFL2017ANS,AFHL2016} the authors obtained in some cases a sharp rate of convergence. In order to state the result, let us introduce some notation. We denote by $\widehat q_0$ the quadratic form on $H^1_0(\widehat \Omega)$ defined by Equation \eqref{eqQuad}, replacing $\Omega$ with $\widehat\Omega$, and we denote by $\big(\widehat\lambda_j\big)_{j\ge1}$ the sequence of eigenvalues defined by Equation \eqref{eqMinMaxD}, replacing $\Omega$ with $\widehat\Omega$ and $q$ with $\widehat q_0$. We fix an integer $N\ge1$ and assume that $\widehat\lambda_N$ is a simple eigenvalue. We denote by $\widehat u_N$ an associated eigenfunction, normalized in $L^2\big(\widehat\Omega\big)$. \begin{thm}{\cite[Theorem 1.2]{AFL2017ANS}} \label{thmNonZero} If $\widehat u_N(0)\neq 0$, we have, as $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0$, \begin{equation*} \lambda_N^{AB}(\ensuremath{\ensuremath{\mathbf{a}}repsilon})=\widehat\lambda_N+\frac{2\pi}{|\log(\ensuremath{\ensuremath{\mathbf{a}}repsilon})|}\widehat u_N ^2 (0)+o\left(\frac1{|\log(\ensuremath{\ensuremath{\mathbf{a}}repsilon})|}\right). \end{equation*} \end{thm} In the case $\widehat u_N(0)=0$, it is well known that there exist $k\in\ensuremath{\mathbb{N}}\setminus\{0\}$, $\widehat\beta\in \ensuremath{\mathbb{R}}\setminus\{0\}$ and $\alpha\in [0,\pi)$ such that \begin{equation*} r^{-k} \widehat u_N (r\cos t,r\sin t)\to \widehat\beta \sin\left(\alpha-kt\right)\mbox{ in }C^{1,\tau}\left([0,2\pi],\ensuremath{\mathbb{C}}\right) \end{equation*} as $r\to 0^+$ for all $\tau\in (0,1)$. In particular, there is a nodal line whose tangent makes the angle $\alpha/k$ with the positive $x_1$ semi-axis. As above we can characterize $\widehat\beta$ as $|\widehat\beta|^2 = \frac{\| d^{k}\widehat u_N (0) \|^2}{(k!)^2 \,2^{k-1}}$.\ Let us assume that \[ \widehat\Omega \text{ is symmetric with respect to the $x_1$-axis.} \] Since $\widehat\lambda_N$ is simple, $\widehat u_N$ is either even or odd in the variable $x_2$ and $\alpha$ is either $\pi/2$ or $0$ accordingly. \begin{thm}{\cite[Theorem 1.16]{AFHL2016}} \label{t:dipoleSymEven} If $\widehat u_N$ is even in $x_2$, which corresponds to $\alpha=\pi/2$, we have, as $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to 0^+$, \begin{equation*} \lambda_N^{AB}(\ensuremath{\ensuremath{\mathbf{a}}repsilon})=\widehat\lambda_N+\frac{k\pi{\widehat\beta}^2}{4^{k-1}}\left(\begin{array}{c} k-1\\ \left\lfloor\frac{k-1}2\right\rfloor\end{array}\right)^2\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{2k}+o\left(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{2k}\right). \end{equation*} \end{thm} \begin{rem} The statements in \cite{AFHL2016} contain a constant $C_k$ which we put in a simpler form in Appendix \ref{a:A}, in order to obtain Theorem \ref{t:dipoleSymEven}. \end{rem} As a corollary of Theorem \ref{t:new_main}, we prove in Section \ref{s:AB} the following result, which complements the previous theorem. \begin{thm} \label{t:dipoleSymOdd} If $\widehat u_N$ is odd in $x_2$, which corresponds to $\alpha=0$, we have, as $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to 0^+$, \begin{equation*} \lambda_N^{AB}(\ensuremath{\ensuremath{\mathbf{a}}repsilon})=\widehat\lambda_N-\frac{k\pi{\widehat\beta}^2}{4^{k-1}}\left(\begin{array}{c} k-1\\ \left\lfloor\frac{k-1}2\right\rfloor\end{array}\right)^2\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{2k}+o\left(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{2k}\right). \end{equation*} \end{thm} \begin{rem} As discussed in Section \ref{s:AB}, the assumption that $\widehat\lambda_N$ is simple can be slightly relaxed, admitting, in some cases, also double eigenvalues. \end{rem} \section{Sharp asymptotics for the eigenvalue variation}\label{sec:explicit} \subsection{Related results from the literature}\label{subsec:relres} As already mentioned in the introduction, some asymptotic expansions for the eigenvalue variation $\lambda_N-\lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})$ were derived in \cite{Gad}. Let us first recall the results from \cite{Gad} which are the starting of our analysis. Let $s:=\{(x_1,x_2)\in\ensuremath{\mathbb{R}}^2: x_2=0\text{ and }x_1\geq 1\ \text{ or }x_1 \leq -1\}$. We denote as $\mathcal Q$ the completion of $C^\infty_{\rm c}(\overline{\ensuremath{\mathbb{R}}^2_+} \setminus s)$ under the norm $( \int_{\ensuremath{\mathbb{R}}^2_+} |\nabla u|^2\,dx )^{1/2}$. From the Hardy type inequality proved in \cite{LW99} and a change of gauge, it follows that functions in $\mathcal Q$ satisfy the Hardy type inequalities \begin{equation}\label{eq:1} \frac14 \int_{\ensuremath{\mathbb{R}}^2_+} \dfrac{|\ensuremath{\mathbf{a}}rphi(x)|^2}{|x-{\mathbf e}|^2}\,dx\leq \int_{\ensuremath{\mathbb{R}}^2_+} |\nabla \ensuremath{\mathbf{a}}rphi(x)|^2\,dx, \quad\text{for all }\ensuremath{\mathbf{a}}rphi\in \mathcal Q, \end{equation} and \begin{equation}\label{eq:2} \frac14 \int_{\ensuremath{\mathbb{R}}^2_+} \dfrac{|\ensuremath{\mathbf{a}}rphi(x)|^2}{|x+{\mathbf e}|^2}\,dx\leq \int_{\ensuremath{\mathbb{R}}^2_+} |\nabla \ensuremath{\mathbf{a}}rphi(x)|^2\,dx, \quad\text{for all }\ensuremath{\mathbf{a}}rphi\in \mathcal Q, \end{equation} where $\mathbf e=(1,0)$. Inequalities \eqref{eq:1} and \eqref{eq:2} allow characterizing $\mathcal Q$ as the following concrete functional space: \[ \mathcal Q=\Big\{ u\in L^1_{\rm loc}(\ensuremath{\mathbb{R}}^2_+):\ \nabla u\in L^2(\ensuremath{\mathbb{R}}^2_+), \ \tfrac{u}{|x\pm\mathbf e|}\in L^2(\ensuremath{\mathbb{R}}^2_+ ), \text{ and } u=0 \text{ on }s\Big\}. \] We refer to the paper \cite{Gad}, where the following theorem can be found as a particular case of more general results. \begin{thm}[\cite{Gad}]\label{t:gad} Let $\Omega$ be a bounded open set in $\ensuremath{\mathbb{R}}^2$ satisfying \eqref{eq:38} and \eqref{eq:40}. Let $N\geq 1$ be such that the $N$-th eigenvalue $\lambda_N$ of $q_0$ on $\Omega$ is simple with associated eigenfunction $u_N$ having in $0$ a zero of order $k$ with $k$ as in \eqref{eq:orderk}. For $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\in (0,\ensuremath{\ensuremath{\mathbf{a}}repsilon}_0)$, let $\lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})$ be the $N$-th eigenvalue of $q_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ on $\Omega$ and $u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ be its associated eigenfunction, normalized to satisfy $\int_\Omega |u_N^{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}|^2\,dx=1$ and $\int_{\Omega}u_N^{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\,u_N \,dx\ge0$. Then, as $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0^+$, \begin{align} &\frac{\lambda_N-\lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})}{\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{2k}} \to - \beta^2\, \displaystyle{\int_{-1}^1 \frac{\partial w_k}{\partial x_2}\,w_k \, dx_1 }, \label{eq:gad}\\ &\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{-k}u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon}(\ensuremath{\ensuremath{\mathbf{a}}repsilon} x) \to \beta (\psi_k+w_k) \quad \text{in $H^{1}_{\rm loc}(\overline{\ensuremath{\mathbb{R}}^2_+})$}, \label{eq:gad2} \end{align} with $\beta\neq0$ being as in \eqref{eq:5}--\eqref{eq:orderk}, $\psi_k$ being defined in \eqref{eq:psi_k}, and $w_k$ being the unique $\mathcal Q$-weak solution to the problem \begin{equation}\label{eq:wk} \begin{cases} -\Delta w_k=0, &\text{in }\ensuremath{\mathbb{R}}^2_+, \\ w_k=0, &\text{on }s, \\ \frac{\partial w_k}{\partial \nu}=-\frac{\partial \psi_k}{\partial \nu}, &\text{on }\Gamma_1. \end{cases} \end{equation} \end{thm} Convergence \eqref{eq:gad} can be obtained combining \cite[Equation (4.6)]{Gad} for simple eigenvalues, \cite[Equation (3.4)]{Gad} together with \cite[Lemma 3.3]{Gad}. As well, \eqref{eq:gad2} is given by \cite[Equation (2.3)]{Gad}, which is a consequence of \cite[Theorem 5.2]{Gad}, \cite[Equation (4.10)]{Gad}, \cite[Lemma 3.3]{Gad}. For the sake of clarity and completeness, we present an alternative proof in Appendix \ref{sec:altern-proof-theor}, which relies on energy estimates obtained by an Almgren type monotonicity argument and blow-up analysis. We remark that in \cite{Gad} the author describes the limit profile $w_k$ solving \eqref{eq:wk} with polar coordinates. On the contrary, our contribution relies essentially on the use of elliptic coordinates in place of polar ones. This allows us to compute explicitly the right hand side of \eqref{eq:gad}, thus obtaining the following result. \begin{prop} \label{propmk} For any positive integer $k$, \begin{equation*} \int_{-1}^1 \frac{\partial w_k}{\partial x_2}\,w_k \, dx_1 =-\frac{k\pi}{2^{2k-1}}\left(\begin{array}{c}k-1\\ \left\lfloor\frac{k-1}2\right\rfloor\end{array}\right)^{\!2}. \end{equation*} \end{prop} The proof of Proposition \ref{propmk} relies in an explicit construction of the limit profile $w_k$, using a parametrization of the upper half-plane $\ensuremath{\mathbb{R}}^2_+$ by elliptic coordinates, a finite trigonometric expansion, and the simplification of a sum involving binomial coefficients. \subsection{Computation of the limit profile $w_k$} Let us first compute $w_k$. By uniqueness, any function in the functional space $\mathcal Q$ that satisfies all the conditions of Problem \eqref{eq:wk} is equal to $w_k$. In order to find such a function, we use the elliptic coordinates $(\xi,\eta)$ defined by \begin{equation}\label{eqChangeCoord} \begin{cases} x_1=\cosh(\xi)\cos(\eta),\\ x_2=\sinh(\xi)\sin(\eta). \end{cases} \end{equation} More precisely, we consider the function $F:(\xi,\eta)\mapsto (x_1,x_1)$ defined by the equations \eqref{eqChangeCoord}. It is a $C^{\infty}$ diffeomorphism from $D:=(0,+\infty)\times (0,\pi)$ to $\ensuremath{\mathbb{R}}^2_+$. We note that $F$ is actually a conformal mapping. Indeed, if we define the complex variables $z:=x_1+ix_2$ and $\zeta:=\xi+i\eta$, we have $z=\cosh(\zeta)$, which proves the claim since $\cosh$ is an entire function. Let us denote by $h(\xi,\eta)$ the scale factor associated with $F$, expressed in elliptic coordinates. We have \begin{equation*} h(\xi,\eta)=\left|\cosh'(\zeta)\right|=\left|\sinh(\zeta)\right| =\left|\sinh(\xi)\cos(\eta)+i\cosh(\xi)\sin(\eta)\right|=\sqrt{\cosh^2(\xi)-\cos^2(\eta)}. \end{equation*} For any function $u\in\mathcal Q$, let us define $U:=u\circ F$. From the fact the $F$ is conformal, it follows that $\left|\nabla U\right|$ is in $L^2(D)$ with \begin{equation*} \int_{D}\left|\nabla U\right|^2\,d\xi d\eta=\int_{\ensuremath{\mathbb{R}}^2_+}\left|\nabla u\right|^2\,dx. \end{equation*} We also have \begin{equation}\label{eq:normder} \frac{\partial u}{\partial \nu}(x)=-\frac1{h(0,\eta)}\frac{\partial U}{\partial \xi}(0,\eta) \end{equation} for any $x\in \Gamma_1$, where $\eta\in(0,\pi)$ satisfies $x=F(0,\eta)=(\cos(\eta),0)$. Furthermore, $U$ is harmonic in $D$ if, and only if, $u$ is harmonic in $\ensuremath{\mathbb{R}}^2_+$. We now give an explicit formula for $w_k\circ F$. \begin{prop} \label{propwk} For any positive integer $k$, $w_k\circ F=W_k$, where $W_k$ is defined in \eqref{eq:W_k}. \end{prop} \begin{proof} Let us begin by computing the function $\Psi_k:=\psi_k\circ F$. We have $\psi_k(x)=\mbox{Im}\left(z^k\right)$, so that $\Psi_k(\xi,\eta)=\mbox{Im}\left((\cosh(\zeta))^k\right)$, where the complex variables $z$ and $\zeta$ are defined as above. Using the binomial theorem, we find \begin{equation*} \Psi_k(\xi,\eta)=\mbox{Im}\left(\frac1{2^k} \sum_{j=0}^k\left(\begin{array}{c}k\\ j\end{array}\right)e^{(k-2j)\zeta}\right)= \frac1{2^k}\sum_{j=0}^k\left(\begin{array}{c}k\\ j\end{array}\right) e^{(k-2j)\xi}\sin\left((k-2j)\eta\right). \end{equation*} This can be written \begin{equation*} \Psi_k(\xi,\eta)=\frac1{2^{k-1}}\sum_{j=0}^{\left\lfloor\frac{k-1}2\right\rfloor}\left(\begin{array}{c}k\\ j\end{array}\right)\sinh\left((k-2j)\xi\right)\sin\left((k-2j)\eta\right) \end{equation*} by grouping terms of the sum in pairs, starting from opposite extremities. In particular, for all $\eta\in(0,\pi)$, \begin{equation*} \frac{\partial \Psi_k}{\partial \xi}(0,\eta)=\frac1{2^{k-1}}\sum_{j=0}^{\left\lfloor\frac{k-1}2\right\rfloor}(k-2j)\left(\begin{array}{c}k\\ j\end{array}\right)\sin\left((k-2j)\eta\right). \end{equation*} We now define \begin{equation*} V(\xi,\eta)=\frac1{2^{k-1}}\sum_{j=0}^{\left\lfloor\frac{k-1}2\right\rfloor}\left(\begin{array}{c}k\\ j\end{array}\right)e^{-(k-2j)\xi}\sin\left((k-2j)\eta\right). \end{equation*} The function $|\nabla V|$ is in $L^2(D)$ and, for all $\eta\in(0,\pi)$, \begin{equation*} \frac{\partial V}{\partial \xi}(0,\eta)= -\frac1{2^{k-1}}\sum_{j=0}^{\left\lfloor\frac{k-1}2\right\rfloor} (k-2j)\left(\begin{array}{c}k\\ j\end{array}\right)\sin\left((k-2j)\eta\right). \end{equation*} Additionally, $V$ vanishes on half-lines defined by $\eta=0$ and $\eta=\pi$, which are the lower and upper boundary of $D$, respectively, and are mapped to $\ensuremath{\mathbb{R}}\times\{0\}\setminus\Gamma_1$ by $F$. It can be checked directly that $V\circ F^{-1}\in \mathcal Q$. Finally, $V$ is harmonic in $D$, since it is a linear combination of functions of the type $(\xi,\eta)\mapsto e^{\pm n \xi}e^{\pm i n \eta}$, which are harmonic. We conclude that $V\circ F^{-1}$ is a solution of Problem \eqref{eq:wk}, and therefore $V=w_k\circ F$ by uniqueness. \end{proof} \begin{proof}[Proof of Theorem \ref{t:blowup2}] Theorem \ref{t:blowup2} follows combining Theorem \ref{t:gad} and Proposition \ref{propwk}. \end{proof} \begin{cor} \label{cormk} For any positive integer $k\ge1$, \begin{equation}\label{eq:const1} \int_{-1}^1 \frac{\partial w_k}{\partial x_2}\,w_k \, dx_2 =-\frac\pi{2^{2k-1}}\sum_{j=0}^{\left\lfloor\frac{k-1}2\right\rfloor}(k-2j)\left(\begin{array}{c}k\\ j\end{array}\right)^2. \end{equation} \end{cor} \begin{proof} Using \eqref{eq:W_k}, a direct computation gives \begin{equation*} \nabla W_k(\xi,\eta)=\frac1{2^{k-1}}\sum_{j=0}^{\left\lfloor\frac{k-1}2\right\rfloor}(k-2j)\left(\begin{array}{c}k\\ j\end{array}\right)e^{-(k-2j)\xi}(-\sin\left((k-2j)\eta\right),\cos\left((k-2j)\eta\right). \end{equation*} Recalling \eqref{eq:normder}, we perform a standard change of variables in the left-hand side of \eqref{eq:const1} to elliptic coordinates and this yields the thesis. \end{proof} \subsection{Simplification of the sum} We now prove the following result. \begin{lem} \label{lemSumBin} For every integer $k\ge1$, \begin{equation*} \sum_{j=0}^{\left\lfloor\frac{k-1}2\right\rfloor}(k-2j)\left(\begin{array}{c} k \\ j\end{array}\right)^{\!\!2}=k\left(\begin{array}{c}k-1\\ \left\lfloor\frac{k-1}2\right\rfloor\end{array}\right)^{\!\!2}. \end{equation*} \end{lem} \begin{proof}We will use repeatedly the two following properties of binomial coefficients. First, the \emph{Vandermonde identity}: for any non-negative integers $m$, $n$ and $r$, \begin{equation} \label{eqVdM} \sum_{j=0}^r\left(\begin{array}{c} m \\ j\end{array}\right)\left(\begin{array}{c} n \\ r-j\end{array}\right)=\left(\begin{array}{c} m+n \\ r\end{array}\right); \end{equation} and second, the elementary identity \begin{equation} \label{eqElem} \left(\begin{array}{c} n \\ r\end{array}\right)=\frac{n}{r}\left(\begin{array}{c} n-1 \\ r-1\end{array}\right) \end{equation} with $n$ and $r$ positive integers. Let us now fix an integer $k\ge 1$. To simplify the notation, we write \begin{equation*} s:=\left\lfloor\frac{k-1}2\right\rfloor \hspace{1cm} \mbox{ and } \hspace{1cm} S:=\sum_{j=0}^s(k-2j)\left(\begin{array}{c} k \\ j\end{array}\right)^{\!\!2}. \end{equation*} Next, we remark that \begin{equation*} S=S_0-\frac2kS_1-\frac2kS_2, \end{equation*} with \begin{equation*} S_0:=\sum_{j=0}^s k\left(\begin{array}{c} k \\ j\end{array}\right)^{\!\!2},\quad S_1:=\sum_{j=0}^s j(k-j)\left(\begin{array}{c} k \\ j\end{array}\right)^{\!\!2},\quad S_2:=\sum_{j=0}^s j^2\left(\begin{array}{c} k \\ j\end{array}\right)^{\!\!2}. \end{equation*} Let us compute the previous sums when $k=2p+1$, with $p$ a non-negative integer. We first have \begin{equation*} \frac{S_0}k=\frac12\sum_{j=0}^k\left(\begin{array}{c} k \\ j\end{array}\right)^{\!\!2}=\frac12\left(\begin{array}{c} 2k \\ k\end{array}\right), \end{equation*} where the last equality is a special case of identity \eqref{eqVdM}. We then find \begin{multline*} S_1=\frac12\sum_{j=0}^kj\left(\begin{array}{c} k \\ j\end{array}\right)(k-j)\left(\begin{array}{c} k \\ k-j\end{array}\right)=\frac{k^2}2\sum_{j=1}^{k-1}\left(\begin{array}{c} k-1 \\ j-1\end{array}\right)\left(\begin{array}{c} k-1 \\ k-j-1\end{array}\right)\\ = \frac{k^2}2\sum_{\ell=0}^{k-2}\left(\begin{array}{c} k-1 \\ \ell\end{array}\right)\left(\begin{array}{c} k-1 \\ k-2-\ell\end{array}\right)=\frac{k^2}2\left(\begin{array}{c} 2k-2 \\ k-2\end{array}\right) \end{multline*} by applying Identity \eqref{eqElem} followed by \eqref{eqVdM}. Finally, Identity \eqref{eqElem} implies \begin{multline*} S_2=k^2\sum_{j=1}^p\left(\begin{array}{c} k-1 \\ j-1\end{array}\right)^{\!\!2}=k^2\sum_{\ell=0}^{p-1}\left(\begin{array}{c} k-1 \\ \ell\end{array}\right)^{\!\!2} \\ = \frac{k^2}2\left(\sum_{\ell=0}^{k-1}\left(\begin{array}{c} k-1 \\ \ell\end{array}\right)^{\!\!2}-\left(\begin{array}{c} k-1 \\ p\end{array}\right)^{\!\!2}\right)=\frac{k^2}2\left(\begin{array}{c} 2k-2 \\ k-1\end{array}\right)-\frac{k^2}2\left(\begin{array}{c} k-1 \\ p\end{array}\right)^{\!\!2}. \end{multline*} We obtain \begin{multline*} S=\frac{k}2 \left(\begin{array}{c} 2k \\ k\end{array}\right)-k\left( \begin{array}{c} 2k-2 \\ k-2\end{array}\right)-k\left(\begin{array}{c} 2k-2 \\ k-1\end{array}\right)+k\left(\begin{array}{c} k-1 \\ p\end{array}\right)^{\!\!2} \\ =\frac{k}2\left(\begin{array}{c} 2k \\ k\end{array}\right)-k \left(\begin{array}{c} 2k-1 \\ k-1\end{array}\right)+k\left(\begin{array}{c} k-1 \\ p\end{array}\right)^{\!\!2} =k\left(\begin{array}{c} k-1 \\ p\end{array}\right)^{\!\!2} \end{multline*} where the second equality follows from Pascal's identity and the third from Identity \eqref{eqElem}. Let us now treat the case $k=2p$, with $p$ a positive integer. In a similar way as before, we find \begin{equation*} S_0=\frac{k}2\left(\sum_{j=0}^k\left(\begin{array}{c} k \\ j\end{array}\right)^{\!\!2}-\left(\begin{array}{c} k \\ p\end{array}\right)^{\!\!2}\right)=\frac{k}2\left(\begin{array}{c} 2k \\ k\end{array}\right)-\frac{k}2\left(\begin{array}{c} k \\ p\end{array}\right)^{\!\!2}, \end{equation*} \begin{equation*} S_1=\frac12\left(\sum_{j=0}^kj\left(\begin{array}{c} k \\ j\end{array}\right)(k-j)\left(\begin{array}{c} k \\ k-j\end{array}\right)-p^2\left(\begin{array}{c} k \\ p\end{array}\right)^{\!\!2}\right)=\frac{k^2}2\left(\begin{array}{c} 2k-2 \\ k-2\end{array}\right)-\frac{k^2}8\left(\begin{array}{c} k \\ p\end{array}\right)^{\!\!2} \end{equation*} and \[ S_2=k^2\sum_{j=0}^{p-2}\left(\begin{array}{c} k-1 \\ j\end{array}\right)^{\!\!2} =\frac{k^2}2\left(\sum_{j=0}^{k-1}\left(\begin{array}{c} k-1 \\ j\end{array}\right)^{\!\!2}-2 \left(\begin{array}{c} k-1 \\ p-1\end{array}\right)^{\!\!2}\right) =\frac{k^2}2\left(\begin{array}{c} 2k-2 \\ k-1\end{array}\right)-k^2\left(\begin{array}{c} k-1 \\ p-1\end{array}\right)^{\!\!2}. \] We finally obtain, after simplifications, \begin{equation*} S=k\left(\begin{array}{c} k-1 \\ p-1\end{array}\right)^2. \end{equation*} This completes the proof of Lemma \ref{lemSumBin}. \end{proof} \subsection{Conclusions} By the results from the preceding subsections, we can now prove Proposition \ref{propmk} and Theorem \ref{t:new_main}. \begin{proof}[Proof of Proposition \ref{propmk}] It follows from Corollary \ref{cormk} and Lemma \ref{lemSumBin}. \end{proof} Combining the above results, we can now prove our main theorem. \begin{proof}[Proof of Theorem \ref{t:new_main}] Theorem \ref{t:new_main} follows from the combination of Theorem \ref{t:gad} and Proposition \ref{propmk}. \end{proof} \section{Asymptotic estimates for Aharonov--Bohm eigenvalues} \label{s:AB} \subsection{Symmetry for the Aharonov--Bohm operator} As in Section \ref{subs:IntroAB}, we assume $\widehat\Omega\subset \ensuremath{\mathbb{R}}^2$ to be a bounded open set with a Lipschitz boundary, such that $0\in \widehat\Omega$. We additionally assume that $\widehat\Omega$ is symmetric with respect to the $x_1$-axis and that $\Omega:=\widehat\Omega\cap\ensuremath{\mathbb{R}}^2_+$ also has a Lipschitz boundary. \begin{figure} \caption{The domain considered for Aharonov--Bohm eigenvalues with collapsing symmetric poles.} \label{fig:2} \end{figure} According to \cite[Theorem VIII.15]{ReeSim80}, there exists a unique Friedrichs extension $H_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ of the quadratic form $q^{AB}_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$, that is to say a self-adjoint operator whose domain $\mathcal D(H_\ensuremath{\ensuremath{\mathbf{a}}repsilon})$ is contained in $\mathcal Q^{AB}_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ and which satisfies \begin{equation*} \langle H_\ensuremath{\ensuremath{\mathbf{a}}repsilon} u,v \rangle= q^{AB}_\ensuremath{\ensuremath{\mathbf{a}}repsilon}(u,v) =\int_{\widehat\Omega}(i\nabla+\ensuremath{\mathbf{A}}_\ensuremath{\ensuremath{\mathbf{a}}repsilon})u\cdot \overline{(i\nabla+\ensuremath{\mathbf{A}}_\ensuremath{\ensuremath{\mathbf{a}}repsilon})v}\,dx\quad \mbox{for all } u,v\in \mathcal D(H_\ensuremath{\ensuremath{\mathbf{a}}repsilon}), \end{equation*} where we are denoting by $q^{AB}_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ both the quadratic form defined in \eqref{eq:QuadAB2Poles} and the associated bilinear form (see Figure \ref{fig:2}). We recall in this section the results proved in \cite{AFHL2016} concerning the properties of $H_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$, in particular the effect of the symmetry of the domain on its spectrum. Since most of the proofs in the present section reduce to a series of standard verifications, we generally only give an indication of them. We use gauge functions $\Phi_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$, for $\ensuremath{\ensuremath{\mathbf{a}}repsilon} \in (0,\ensuremath{\ensuremath{\mathbf{a}}repsilon}_0]$, whose existence is guaranteed by the following result. In the sequel the denote as $\sigma$ the reflection through the $x_1$-axis, i.e. $\sigma(x_1,x_2)=(x_1,-x_2)$. \begin{lem} \label{l:gauge} For each $\ensuremath{\ensuremath{\mathbf{a}}repsilon}>0$, there exists a function $\Phi_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}$ in $C^{\infty}\left(\ensuremath{\mathbb{R}}^2\setminus \Gamma_\ensuremath{\ensuremath{\mathbf{a}}repsilon}\right)$ satisfying \begin{enumerate}[(i)] \item $\Phi_\ensuremath{\ensuremath{\mathbf{a}}repsilon}\circ\sigma=\overline{\Phi}_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ in $\ensuremath{\mathbb{R}}^2\setminus \Gamma_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$; \item $\left|\Phi_\ensuremath{\ensuremath{\mathbf{a}}repsilon}\right|=1$ in $\ensuremath{\mathbb{R}}^2\setminus \Gamma_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$; \item $\left(i\nabla+\ensuremath{\mathbf{A}}_\ensuremath{\ensuremath{\mathbf{a}}repsilon}\right)\Phi_\ensuremath{\ensuremath{\mathbf{a}}repsilon}=0$ in $\ensuremath{\mathbb{R}}^2\setminus \Gamma_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$; \item $\Phi_\ensuremath{\ensuremath{\mathbf{a}}repsilon}=1$ on $(\ensuremath{\mathbb{R}}\times\{0\})\setminus\Gamma_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ and $\lim_{\delta\to0^+}\Phi_\ensuremath{\ensuremath{\mathbf{a}}repsilon}(t,\pm \delta)=\pm i$ for every $t\in(-\ensuremath{\ensuremath{\mathbf{a}}repsilon},\ensuremath{\ensuremath{\mathbf{a}}repsilon})$. \end{enumerate} \end{lem} We define the anti-unitary operators $K_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ and $\Sigma^c$ by $K_\ensuremath{\ensuremath{\mathbf{a}}repsilon} u:=\Phi_\ensuremath{\ensuremath{\mathbf{a}}repsilon}^2 \overline u$ and $\Sigma^c u:=\overline u\circ \sigma$. The subspace $\mathcal D(H_\ensuremath{\ensuremath{\mathbf{a}}repsilon})\subset \mathcal H$ is preserved by $K_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ and $\Sigma^c$. The operators $K_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$, $\Sigma^c$ and $H_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ mutually commute. In particular, we can define the following subsets \begin{align*} \mathcal H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}&:=\{u\in\mathcal H:\,K_\ensuremath{\ensuremath{\mathbf{a}}repsilon} u=u\};\\ \mathcal D(H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}})&:=\{u\in\mathcal D(H_\ensuremath{\ensuremath{\mathbf{a}}repsilon}):\,K_\ensuremath{\ensuremath{\mathbf{a}}repsilon} u=u\}. \end{align*} The scalar product $\langle\cdot\,,\cdot\rangle$ gives $\mathcal H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}$ the structure of a real Hilbert space. As suggested by the notation, we define $H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}$ as the restriction of $H_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ to $\mathcal D(H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}})$. It is a positive self-adjoint operator on $\mathcal H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}$ of domain $\mathcal D(H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}})$, with compact resolvent. It has the same eigenvalues as $H_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$, with the same multiplicities. The fact that $K$ and $\Sigma^c$ commute ensures that $\mathcal H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}$ and $\mathcal D(H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}})$ are $\Sigma^c$-invariant. We can therefore define \begin{align*} \mathcal H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^s&:=\{u\in\mathcal H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}:\Sigma^cu=u\};\\ \mathcal D(H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^s)&:=\{u\in\mathcal D(H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}):\Sigma^cu=u\};\\ \mathcal H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^a&:=\{u\in\mathcal H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}:\Sigma^cu=-u\};\\ \mathcal D(H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^a)&:=\{u\in\mathcal D(H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}):\Sigma^cu=-u\}. \end{align*} We have the following orthogonal decomposition of $\mathcal H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}$ into spaces of symmetric and antisymmetric functions: \begin{equation} \label{eq:MagnDecomp} \mathcal H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}=\mathcal H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^s \oplus \mathcal H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^a. \end{equation} We also define $H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^s$ and $H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^a$ as the restrictions of $H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}$ to $\mathcal D(H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^s)$ and $\mathcal D(H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^a)$ respectively. The operator $H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^s$ is positive and self-adjoint on $\mathcal H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^s$ of domain $\mathcal D(H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^s)$ , with compact resolvent. Similar conclusions hold for $\mathcal H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^a$. Decomposition \eqref{eq:MagnDecomp} implies the following result. \begin{lem} The spectrum of $H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}$ is the union of the spectra of $H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^s$ and $H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^a$, counted with multiplicities. \end{lem} \begin{rem} Let us note that we can give an alternative description of the spectra of $H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^s$ and $H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^a$. One can check that they are the spectra of the quadratic form $q^{AB}_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ restricted to $\mathcal Q^{AB}_\ensuremath{\ensuremath{\mathbf{a}}repsilon} \cap \mathcal H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^s$ and $\mathcal Q^{AB}_\ensuremath{\ensuremath{\mathbf{a}}repsilon} \cap \mathcal H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^a$ respectively. These spectra can therefore be obtained by the min-max principle. \end{rem} \subsection{Isospectrality} In this subsection, we establish an isospectrality result between Aharonov-Bohm eigenvalue problems with symmetry and Laplacian eigenvalue problems with mixed boundary conditions, in the spirit of \cite{BN-He-HH2009}. To this aim, we define an additional family of eigenvalue problems, similar to Problems \eqref{eqD} and \eqref{eqDND}. With the notation $\partial\Omega_+:=\partial \Omega\cap \ensuremath{\mathbb{R}}_+^2$ and $\partial\Omega_0:=\partial \Omega\cap (\ensuremath{\mathbb{R}}\times\{0\})$, we consider the eigenvalue problem \begin{equation}\label{eqN} \begin{cases} -\Delta\,u=\lambda\,u,& \mbox{in }\Omega,\\ u=0,&\mbox{on } \partial\Omega_+,\\ \frac{\partial u}{\partial \nu}=0,&\mbox{on } \partial\Omega_0. \end{cases} \end{equation} We denote by $(\mu_j)_{j\ge1}$ the eigenvalues of Problem \eqref{eqN}. We also consider, for each $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\in(0,\ensuremath{\ensuremath{\mathbf{a}}repsilon}_0]$, \begin{equation}\label{eqNDN} \begin{cases} -\Delta\,u=\lambda\,u,& \mbox{in }\Omega,\\ u=0,&\mbox{on } \partial\Omega_+ \cup\Gamma_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}},\\ \frac{\partial u}{\partial \nu}=0,&\mbox{on } \partial\Omega_0\setminus\Gamma_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}, \end{cases} \end{equation} and denote by $(\mu_j(\ensuremath{\ensuremath{\mathbf{a}}repsilon}))_{j\ge1}$ the corresponding eigenvalues. In order to give a rigorous definitions, we use a weak formulation. We define \begin{equation*} \mathcal{R}_0=\left\{u\in H^1(\Omega)\,;\,\chi_{\partial\Omega_+}\gamma_0u=0\mbox{ in } L^2(\partial \Omega)\right\}, \end{equation*} and, for $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\in(0,\ensuremath{\ensuremath{\mathbf{a}}repsilon}_0]$, \begin{equation*} \mathcal{R}_\ensuremath{\ensuremath{\mathbf{a}}repsilon}=\left\{u\in H^1(\Omega)\,;\,\chi_{\partial\Omega_+\cup\Gamma_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}}\gamma_0u=0\mbox{ in } L^2(\partial \Omega)\right\}. \end{equation*} We denote by $r_0$ and $r_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ the restriction of the quadratic form $q$, defined in Equation \eqref{eqQuad}, to $\mathcal{R}_0$ and $\mathcal{R}_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ respectively. We then define $(\mu_j)_{j\ge1}$ and $(\mu_j(\ensuremath{\ensuremath{\mathbf{a}}repsilon}))_{j\ge1}$ as, respectively, the eigenvalues of the quadratic forms $r_0$ and $r_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$; they are obtained by the min-max principle. \begin{rem} \label{remDecompLap} We can give another interpretation of the eigenvalues $(\mu_j)_{j\ge1}$ and $(\lambda_j)_{j\ge1}$. Using the unitary operator $\Sigma: u\mapsto u\circ\sigma$, we obtain a orthogonal decomposition of $L^2\big(\widehat\Omega\big)$ into symmetric and antisymmetric functions: \begin{equation} \label{eq:DecompLap} L^2\big(\widehat\Omega\big)=\mbox{ker}\left(I-\Sigma\right)\oplus\mbox{ker}\left(I+\Sigma\right). \end{equation} This decomposition is preserved by the action of the Dirichlet Laplacian $-\widehat \Delta$, and we can therefore define $-\Delta^s$ (resp. $-\Delta^a$) as the restriction of $-\widehat \Delta$ to symmetric (resp. antisymmetric) functions in the domain of $-\widehat \Delta$. One can then check that $(\mu_j)_{j\ge1}$ is the spectrum of $-\Delta^s$ and $(\lambda_j)_{j\ge1}$ is the spectrum of $-\Delta^a$. \end{rem} It remains to connect the eigenvalues of Problems \eqref{eqNDN} and \eqref{eqDND} to the eigenvalues of $H_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$. To this end, we define the following linear operator, which performs a gauge transformation: \begin{equation*} \begin{array}{cccc} U_\ensuremath{\ensuremath{\mathbf{a}}repsilon}:&\mathcal H&\to&L^2(\Omega,\ensuremath{\mathbb{C}})\\[3pt] &u&\mapsto&\sqrt2\,\overline\Phi_\ensuremath{\ensuremath{\mathbf{a}}repsilon} u_{|\Omega}. \end{array} \end{equation*} We recall that $L^2(\Omega)$ denotes the real Hilbert space of real-valued $L^2$ functions in $\Omega$. We have the following result. \begin{lem}\label{l:IsoAS} The operator $U_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ satisfies the following properties: \begin{enumerate}[(i)] \item $U_\ensuremath{\ensuremath{\mathbf{a}}repsilon}\left(\mathcal H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\right)\subset L^2(\Omega)$ and $U_\ensuremath{\ensuremath{\mathbf{a}}repsilon}\left(\mathcal Q^{AB}_\ensuremath{\ensuremath{\mathbf{a}}repsilon}\right)\subset H^1(\Omega,\ensuremath{\mathbb{C}})$; \item $U_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ induces a real-unitary bijective map from $\mathcal Q^{AB}_\ensuremath{\ensuremath{\mathbf{a}}repsilon} \cap \mathcal H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^s$ to $\mathcal R_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ such that $q^{AB}_\ensuremath{\ensuremath{\mathbf{a}}repsilon} (u)=q\left(U_\ensuremath{\ensuremath{\mathbf{a}}repsilon} u\right)$ for all $u\in \mathcal Q^{AB}_\ensuremath{\ensuremath{\mathbf{a}}repsilon} \cap \mathcal H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^s$; \item $U_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ induces a real-unitary bijective map from $\mathcal Q^{AB}_\ensuremath{\ensuremath{\mathbf{a}}repsilon} \cap \mathcal H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^a$ to $\mathcal Q_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ such that $q^{AB}_\ensuremath{\ensuremath{\mathbf{a}}repsilon} (u)=q\left(U_\ensuremath{\ensuremath{\mathbf{a}}repsilon} u\right)$ for all $u\in \mathcal Q^{AB}_\ensuremath{\ensuremath{\mathbf{a}}repsilon} \cap \mathcal H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^a$. \end{enumerate} \end{lem} \begin{proof} If $u\in \mathcal H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}$, then $u=\Phi_\ensuremath{\ensuremath{\mathbf{a}}repsilon}^2\overline u$, so that $\overline \Phi_\ensuremath{\ensuremath{\mathbf{a}}repsilon} u= \overline{\overline \Phi_\ensuremath{\ensuremath{\mathbf{a}}repsilon} u}$, that is to say $\overline \Phi_\ensuremath{\ensuremath{\mathbf{a}}repsilon} u$ is real-valued. This proves the first half of (i). For the second half, let us assume that $u\in\mathcal Q^{AB}_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$. Using the definition of $\mathcal Q^{AB}_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$, given in Equation \eqref{eq:QAB2Poles}, and Property (iii) of Lemma \ref{l:gauge}, we find the following identity, in the sense of distributions in $\Omega$: \begin{equation*} \nabla\left(\overline \Phi_\ensuremath{\ensuremath{\mathbf{a}}repsilon} u\right)=\overline \Phi_\ensuremath{\ensuremath{\mathbf{a}}repsilon}\nabla u+\nabla\left(\overline\Phi_\ensuremath{\ensuremath{\mathbf{a}}repsilon}\right)u=\overline \Phi_\ensuremath{\ensuremath{\mathbf{a}}repsilon}\left(\nabla-iA_\ensuremath{\ensuremath{\mathbf{a}}repsilon}\right)u\quad\text{in $\Omega$}. \end{equation*} This proves that $\overline\Phi_\ensuremath{\ensuremath{\mathbf{a}}repsilon} u_{|\Omega} \in H^1(\Omega,\ensuremath{\mathbb{C}})$ and that \begin{equation*} \int_{\Omega}\left|\left(\nabla-iA_\ensuremath{\ensuremath{\mathbf{a}}repsilon}\right)u\right|^2\,dx= \int_{\Omega}\left|\nabla\left(\overline\Phi_\ensuremath{\ensuremath{\mathbf{a}}repsilon} u\right)\right|^2\,dx. \end{equation*} Let us now additionally assume that $u\in \mathcal Q^{AB}_\ensuremath{\ensuremath{\mathbf{a}}repsilon} \cap \mathcal H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^s$. Since $\Sigma^cu=u$, Property (i) of Lemma \ref{l:gauge} implies that $(\overline\Phi_\ensuremath{\ensuremath{\mathbf{a}}repsilon} u)\circ\sigma=\Phi_\ensuremath{\ensuremath{\mathbf{a}}repsilon} \overline u$. Therefore, \begin{equation*} \int_{\widehat\Omega}\left|u\right|^2\,dx=2\int_{\Omega}\left|\overline\Phi_\ensuremath{\ensuremath{\mathbf{a}}repsilon} u\right|^2\,dx=\int_{\Omega}\left|U_\ensuremath{\ensuremath{\mathbf{a}}repsilon} u\right|^2\,dx. \end{equation*} Furthermore, Property (iv) of Lemma \ref{l:gauge} and the equation $\Sigma^cu=u$ imply that $u$ vanishes on $\Gamma_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$, hence $U_\ensuremath{\ensuremath{\mathbf{a}}repsilon} u\in \mathcal R_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$. This implies that $\overline\Phi_\ensuremath{\ensuremath{\mathbf{a}}repsilon} u\in H^1(\widehat \Omega)$ and \begin{equation*} \int_{\widehat\Omega}\left|\left(\nabla-iA_\ensuremath{\ensuremath{\mathbf{a}}repsilon}\right)u\right|^2\,dx =\int_{\widehat\Omega}\left|\nabla\left(\overline\Phi_\ensuremath{\ensuremath{\mathbf{a}}repsilon} u\right)\right|^2\,dx= 2\int_{\Omega}\left|\nabla\left(\overline\Phi_\ensuremath{\ensuremath{\mathbf{a}}repsilon} u\right)\right|^2\,dx=\int_{\Omega}\left|\nabla\left(U_\ensuremath{\ensuremath{\mathbf{a}}repsilon} u\right)\right|^2\,dx. \end{equation*} We conclude that the mapping $U_\ensuremath{\ensuremath{\mathbf{a}}repsilon}: \mathcal Q^{AB}_\ensuremath{\ensuremath{\mathbf{a}}repsilon} \cap \mathcal H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^s\to\mathcal R_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ is well-defined, real-unitary, and that $q^{AB}_\ensuremath{\ensuremath{\mathbf{a}}repsilon} (u)=q\left(U_\ensuremath{\ensuremath{\mathbf{a}}repsilon} u\right)$. To show that the mapping is bijective, we consider the operator $V_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ defined in the following way: given $v\in L^2(\Omega)$, we denote by $\widetilde v$ its extension by symmetry to $\widehat \Omega$ and we set \begin{equation*} V_\ensuremath{\ensuremath{\mathbf{a}}repsilon} v:=\frac1{\sqrt2}\Phi_\ensuremath{\ensuremath{\mathbf{a}}repsilon} \widetilde v. \end{equation*} It can be checked, in a way similar to what has been done for $U_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$, that $V_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ induces the inverse of $U_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$, from $\mathcal R_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ to $Q^{AB}_\ensuremath{\ensuremath{\mathbf{a}}repsilon} \cap \mathcal H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^s$. This proves (ii). The proof of (iii) is similar, the difference being that we must check that $\overline\Phi_\ensuremath{\ensuremath{\mathbf{a}}repsilon} u$ vanishes on $(\ensuremath{\mathbb{R}}\times\{0\})\setminus\Gamma_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ when $u\in Q^{AB}_\ensuremath{\ensuremath{\mathbf{a}}repsilon} \cap \mathcal H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^a$. \end{proof} \begin{cor} \label{c:MagnEV} The spectra of $H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^s$ and $H_{K,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^a$ are $(\mu_j(\ensuremath{\ensuremath{\mathbf{a}}repsilon}))_{j\ge1}$ and $(\lambda_j(\ensuremath{\ensuremath{\mathbf{a}}repsilon}))_{j\ge1}$ respectively. \end{cor} \subsection{Eigenvalues variations} \label{subs:EigVar} Let us first state some auxiliary results, which we prove in Appendix \ref{a:B}. \begin{prop} \label{p:ConvEven} For all $N\in\ensuremath{\mathbb{N}}^*$, $\mu_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})\to \mu_N$ as $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to 0$. \end{prop} \begin{prop} \label{p:AsymptNDN} Let $\mu_N$ be a simple eigenvalue of $-\Delta^s$ (see Remark \ref{remDecompLap}) and $u_N$ be an associated eigenfunction, normalized in $L^2\big(\widehat\Omega\big)$. If $u_N(0)\neq0$, then \begin{equation*} \mu_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})=\mu_N+\frac{2\pi}{|\log(\ensuremath{\ensuremath{\mathbf{a}}repsilon})|}u_N^2 (0)+o\left(\frac1{|\log(\ensuremath{\ensuremath{\mathbf{a}}repsilon})|}\right) \quad\text{as }\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0. \end{equation*} If \begin{equation*} r^{-k} u_N(r\cos t,r\sin t)\to\widehat\beta \cos\left(k t\right) \mbox{ in } C^{1,\tau}\left([0,\pi],\ensuremath{\mathbb{R}}\right) \end{equation*} as $r\to 0^+$ for all $\tau\in (0,1)$, with $k\in \ensuremath{\mathbb{N}}^*$ and $\widehat\beta\in \ensuremath{\mathbb{R}}\setminus \{0\}$, then \begin{equation*} \mu_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})=\mu_N+\frac{k\pi{\widehat\beta}^2}{4^{k-1}}\left(\begin{array}{c} k-1\\ \left\lfloor\frac{k-1}2\right\rfloor\end{array}\right)^2\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{2k}+o\left(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{2k}\right) \quad\text{as }\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0. \end{equation*} \end{prop} We now prove Theorem \ref{t:dipoleSymOdd}. Since $\widehat u_N$ is odd in $x_2$, $\widehat \lambda_N$ belongs to the spectrum of $-\Delta^a$. Since $\widehat\lambda_N$ is simple, it does not belong to the spectrum of $-\Delta^s$, according to the orthogonal decomposition \eqref{eq:DecompLap}. It follows from Remark \ref{remDecompLap} that there exists $K\in \ensuremath{\mathbb{N}}^*$ such that $\widehat \lambda_N=\lambda_K$ and that $\lambda_K$ is a simple eigenvalue of $q_0$ in $\Omega$. By continuity, $\lambda_K(\ensuremath{\ensuremath{\mathbf{a}}repsilon})\to \lambda_K$ as $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to 0^+$. From Corollary \ref{c:MagnEV}, Proposition \ref{p:ConvEven} and the fact that $\widehat\lambda_N$ is simple, it follows that there exists $\ensuremath{\ensuremath{\mathbf{a}}repsilon}_1>0$ such that $\lambda^{AB}_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})=\lambda_K(\ensuremath{\ensuremath{\mathbf{a}}repsilon})$ for every $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\in (0,\ensuremath{\ensuremath{\mathbf{a}}repsilon}_1)$. The conclusion of Theorem \ref{t:dipoleSymOdd} follows from Theorem \ref{t:new_main}, using the fact that $\lambda_K$ is simple. Let us note that the eigenfunction $\widehat u_N$ in Theorem \ref{t:dipoleSymOdd} is normalized in $L^2\big(\widehat\Omega\big)$, while the eigenfunction $u_N$ in Theorem \ref{t:new_main} is normalized in $L^2\big(\Omega\big)$. We therefore have to apply Theorem \ref{t:new_main} with $\beta=\sqrt 2\,\widehat\beta$ to obtain the correct result. We can use the results of the preceding sections to study some multiple eigenvalues. Let $\widehat\lambda_N$ be an eigenvalue of $-\Delta$ on $\widehat\Omega$, possibly multiple. We define \begin{equation*} N_0:=\min\left\{M\in \ensuremath{\mathbb{N}}^*\,;\,\widehat\lambda_M=\widehat\lambda_N\right\} \mbox{ and } N_1:=\max\left\{M\in \ensuremath{\mathbb{N}}^*\,;\,\widehat\lambda_M=\widehat\lambda_N\right\}. \end{equation*} According to Remark \ref{remDecompLap}, there exists $K\in \ensuremath{\mathbb{N}}^*$ such that $\widehat\lambda_N=\lambda_K$ or there exists $L\in \ensuremath{\mathbb{N}}^*$ such that $\widehat\lambda_N=\mu_L$. \begin{prop} \label{p:MultOdd} Let us assume that $\widehat\lambda_N=\lambda_K$ with $K\in \ensuremath{\mathbb{N}}^*$ and that $\lambda_K$ is a simple eigenvalue of $q_0$. Let us denote by $u_K$ an associated normalized eigenfunction for $q_0$, and let us assume that \begin{equation*} r^{-k} u_K(r\cos t,r\sin t)\to \beta \sin\left(k t\right) \mbox{ in } C^{1,\tau}\left([0,\pi],\ensuremath{\mathbb{R}}\right) \end{equation*} as $r\to 0^+$ for all $\tau\in (0,1)$, with $k\in \ensuremath{\mathbb{N}}^*$ and $\beta\in \ensuremath{\mathbb{R}}\setminus \{0\}$. Then \begin{equation*} \lambda_{N_0}^{AB}(\ensuremath{\ensuremath{\mathbf{a}}repsilon})=\widehat\lambda_N-\frac{k\pi\beta^2}{2^{2k-1}}\left(\begin{array}{c} k-1\\ \left\lfloor\frac{k-1}2\right\rfloor\end{array}\right)^2\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{2k}+o\left(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{2k}\right)\quad\text{as }\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0. \end{equation*} \end{prop} \begin{proof} Let us set $m:=N_1-N_0+1$, the multiplicity of $\widehat\lambda_N$. If $m=1$, the conclusion follows from Theorem \ref{t:dipoleSymOdd}. We therefore assume $m\ge2$ in the rest of the proof. Remark \ref{remDecompLap} and the fact that $\lambda_K$ is simple imply that there exists $L\in \ensuremath{\mathbb{N}}^*$ such that $\mu_L=\mu_{L+1}=\dots=\mu_{L+m-2}=\widehat\lambda_N$. From Proposition \ref{p:ConvEven}, we deduce that there exists $\ensuremath{\ensuremath{\mathbf{a}}repsilon}_1>0$ such that, for every $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\in (0,\ensuremath{\ensuremath{\mathbf{a}}repsilon}_1)$, \begin{equation*} \left\{\lambda^{AB}_{N_0}(\ensuremath{\ensuremath{\mathbf{a}}repsilon});\lambda^{AB}_{N_0+1}(\ensuremath{\ensuremath{\mathbf{a}}repsilon}),\dots,\lambda^{AB}_{N_1}(\ensuremath{\ensuremath{\mathbf{a}}repsilon})\right\}=\left\{\lambda_K(\ensuremath{\ensuremath{\mathbf{a}}repsilon}),\mu_L(\ensuremath{\ensuremath{\mathbf{a}}repsilon}),\dots,\mu_{L+m-2}(\ensuremath{\ensuremath{\mathbf{a}}repsilon})\right\}. \end{equation*} The function $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\mapsto \lambda_K(\ensuremath{\ensuremath{\mathbf{a}}repsilon})$ is non-increasing, and the function $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\mapsto\mu_j(\ensuremath{\ensuremath{\mathbf{a}}repsilon})$ is non-decreasing for every $j\in \{L,\dots,L+m-2\}$, therefore $\mu_j(\ensuremath{\ensuremath{\mathbf{a}}repsilon})\ge \mu_j=\widehat\lambda_N=\lambda_K\ge \lambda_K(\ensuremath{\ensuremath{\mathbf{a}}repsilon})$. In particular $\lambda^{AB}_{N_0}(\ensuremath{\ensuremath{\mathbf{a}}repsilon})=\lambda_K(\ensuremath{\ensuremath{\mathbf{a}}repsilon})$ for every $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\in (0,\ensuremath{\ensuremath{\mathbf{a}}repsilon}_1)$. The conclusion follows from Theorem \ref{t:new_main}. \end{proof} \begin{prop}\label{p:MultEven} Let us assume that $\widehat\lambda_N=\mu_L$ with $L\in \ensuremath{\mathbb{N}}^*$ and that $\mu_L$ is a simple eigenvalue of $-\Delta^s$. Let us denote by $u_L$ an associated eigenfunction for $-\Delta^s$, normalized in $L^2(\widehat \Omega)$. If $u_L(0)\neq0$, then \begin{equation*} \lambda_{N_1}^{AB}(\ensuremath{\ensuremath{\mathbf{a}}repsilon})=\widehat\lambda_N+ \frac{2\pi}{|\log(\ensuremath{\ensuremath{\mathbf{a}}repsilon})|}u_L^2 (0)+o\left(\frac1{|\log(\ensuremath{\ensuremath{\mathbf{a}}repsilon})|}\right) \quad\text{as }\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0. \end{equation*} If \begin{equation*} r^{-k} u_L(r\cos t,r\sin t)\to \widehat\beta \cos\left(k t\right) \mbox{ in } C^{1,\tau}\left([0,\pi],\ensuremath{\mathbb{R}}\right) \end{equation*} as $r\to 0^+$ for all $\tau\in (0,1)$, with $k\in \ensuremath{\mathbb{N}}^*$ and $\widehat\beta\in \ensuremath{\mathbb{R}}\setminus \{0\}$, then \begin{equation*} \lambda_{N_1}^{AB}(\ensuremath{\ensuremath{\mathbf{a}}repsilon})=\widehat\lambda_N+\frac{k\pi{\widehat\beta}^2}{4^{k-1}}\left(\begin{array}{c} k-1\\ \left\lfloor\frac{k-1}2\right\rfloor\end{array}\right)^2\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{2k}+o\left(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{2k}\right) \quad\text{as }\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0. \end{equation*} \end{prop} \begin{proof} In a similar way as in the proof of Proposition \ref{p:MultOdd}, we show that there exists $\ensuremath{\ensuremath{\mathbf{a}}repsilon}_1>0$ such that, for every $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\in(0,\ensuremath{\ensuremath{\mathbf{a}}repsilon}_1)$, $\lambda^{AB}_{N_1}(\ensuremath{\ensuremath{\mathbf{a}}repsilon})=\mu_L(\ensuremath{\ensuremath{\mathbf{a}}repsilon})$. The conclusion then follows from Proposition \ref{p:AsymptNDN}. \end{proof} \subsection{Example: the square} \label{subs:Square} As an application of the preceding results, let us study the first four eigenvalues of the Dirichlet Laplacian for the square \begin{equation}\label{eq:quadrato} \widehat\Omega:=\left( -\frac\pi2,\frac\pi2 \right)^2. \end{equation} The open set $\widehat\Omega$ is symmetric with respect to the $x_1$-axis. We define $\Omega:=\widehat\Omega\cap\ensuremath{\mathbb{R}}^2_+$. We denote by $(\widehat\lambda_j)_{j\ge1}$ the eigenvalues of the Dirichlet Laplacian on the square $\widehat \Omega$ and, for $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\in (0,\pi/2)$, we consider the Aharonov-Bohm eigenvalues $\left(\lambda^{AB}_j(\ensuremath{\ensuremath{\mathbf{a}}repsilon})\right)_{j\ge1}$ defined in Section \ref{subs:IntroAB}. It is well known that the eigenvalues of the Dirichlet Laplacian on $\widehat\Omega$ are \begin{equation*} \widehat\lambda_{m,n}:=m^2+n^2, \end{equation*} with $m$ and $n$ positive integers, and that an associated orthonormal family of eigenfunctions is given by \begin{equation*} u_{m,n}(x_1,x_2)=\frac2\pi\,f_m(x_1)f_n(x_2), \end{equation*} where \begin{equation*} f_k(x)= \begin{cases} \sin(kx),&\mbox{ if $k$ is even},\\ \cos(kx),&\mbox{ if $k$ is odd}. \end{cases} \end{equation*} \begin{prop} \label{p:SquareSimple} Let us assume that $\widehat\lambda_N$ is simple. Then $\widehat\lambda_N=\widehat\lambda_{m,m}=2m^2$ for some positive integer $m$, and $\widehat\lambda_N$ cannot be written in any other way as a sum of squares of positive integers. Then we have, as $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0^+$, \begin{equation*} \lambda^{AB}_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})=\widehat\lambda_N+\frac8{\pi|\log(\ensuremath{\ensuremath{\mathbf{a}}repsilon})|}+o\left(\frac1{|\log(\ensuremath{\ensuremath{\mathbf{a}}repsilon})|}\right) \end{equation*} if $m$ is odd and \begin{equation*} \lambda^{AB}_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})=\widehat\lambda_N-\frac{m^4}{2\pi}\ensuremath{\ensuremath{\mathbf{a}}repsilon}^4+o\left(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^4\right) \end{equation*} if $m$ is even. \end{prop} \begin{proof} In the case where $m$ is odd, an associated eigenfunction, normalized in $L^2(\widehat\Omega)$, is \begin{equation*} u_{m,m}(x_1,x_2)=\frac2\pi\cos(mx_1)\cos(mx_2). \end{equation*} The first asymptotic expansion then follows from Theorem \ref{thmNonZero}. In the case where $m$ is even, an associated eigenfunction, normalized in $L^2(\widehat\Omega)$, is \begin{equation*} u_{m,m}(x_1,x_2)=\frac2\pi\sin(mx_1)\sin(mx_2). \end{equation*} Then $\widehat\lambda_N=\lambda_K$, where $\lambda_K$ is a simple eigenvalue of $q_0$. Furthermore, \begin{equation*} r^{-2} u_{m,m}(r\cos t,r\sin t)\to \frac{m^2}\pi \sin\left(2 t\right) \mbox{ in } C^{1,\tau}\left([0,\pi],\ensuremath{\mathbb{R}}\right) \end{equation*} as $r\to 0^+$ for all $\tau\in (0,1)$. An application of Proposition \ref{p:MultOdd}, taking care of normalizing in $L^2(\Omega)$, gives the second asymptotic expansion. \end{proof} \begin{prop} \label{p:SquareDouble} Assume that $\widehat\lambda_N=\widehat\lambda_{m,n}=m^2+n^2$ with $m$ even and $n$ odd, and that $\widehat\lambda_N$ has no other representation as a sum of two squares of positive integers, up to the exchange of $m$ and $n$. Then $\widehat\lambda_N$ has multiplicity two; up to replacing $N$ with $N-1$, we can assume that $\widehat\lambda_N=\widehat\lambda_{N+1}$. Then, as $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0^+$, \begin{align*} \lambda^{AB}_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})&=\widehat\lambda_N-\frac{4m^2}\pi\ensuremath{\ensuremath{\mathbf{a}}repsilon}^2+o\left(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^2\right);\\ \lambda^{AB}_{N+1}(\ensuremath{\ensuremath{\mathbf{a}}repsilon})&=\widehat\lambda_N+\frac{4m^2}\pi\ensuremath{\ensuremath{\mathbf{a}}repsilon}^2+o\left(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^2\right). \end{align*} \end{prop} \begin{proof} The associated eigenfunctions \begin{equation*} u_{m,n}(x_1,x_2)=\frac2\pi\sin(mx_1)\cos(nx_2) \end{equation*} and \begin{equation*} u_{n,m}(x_1,x_2)=\frac2\pi\cos(nx_1)\sin(mx_2) \end{equation*} are normalized in $L^2(\widehat\Omega)$ and respectively symmetric and antisymmetric in the variable $x_2$. It follows that $\widehat\lambda_N=\mu_L=\lambda_K$, where $\mu_L$ is a simple eigenvalue of $r_0$ and $\lambda_K$ a simple eigenvalue of $q_0$. Furthermore, \begin{equation*} r^{-1} u_{m,n}(r\cos t,r\sin t)\to \frac{2m}\pi \cos\left(t\right) \mbox{ in } C^{1,\tau}\left([0,\pi],\ensuremath{\mathbb{R}}\right) \end{equation*} and \begin{equation*} r^{-1} u_{n,m}(r\cos t,r\sin t)\to \frac{2m}\pi \sin\left(t\right) \mbox{ in } C^{1,\tau}\left([0,\pi],\ensuremath{\mathbb{R}}\right) \end{equation*} as $r\to 0^+$ for all $\tau\in (0,1)$. The asymptotic expansions then follow from Propositions \ref{p:MultEven} and \ref{p:MultOdd} \end{proof} \begin{rem} We note that if $\widehat\lambda_N$ is even, in any representation $\widehat\lambda_N=m^2+n^2$, $m$ and $n$ have the same parity. Therefore, if $n\neq m$, $\widehat\lambda_N$ cannot be a simple eigenvalue either of $r_0$ or of $q_0$. On the other hand, if $\widehat\lambda_N$ is odd, in any representation $\widehat\lambda_N=m^2+n^2$, $m$ and $n$ have the opposite parity. Therefore, as soon as $\widehat\lambda_N$ can be written in at least two different ways as the sum of two squares, $\widehat\lambda_N$ cannot be a simple eigenvalue either of $r_0$ or of $q_0$. The cases described in Propositions \ref{p:SquareSimple} and \ref{p:SquareDouble} are thus the only ones in which we can apply the results of Section \ref{subs:EigVar} for the square. \end{rem} The first four eigenvalues of the Dirichlet Laplacian on the square $\widehat\Omega$ satisfy the assumptions of either Proposition \ref{p:SquareSimple} or Proposition \ref{p:SquareDouble}, so we can apply the previous results to derive the following asymptotic expansions of the Aharonov-Bohm eigenvalues $\lambda^{AB}_j(\ensuremath{\ensuremath{\mathbf{a}}repsilon})$ for $j=1,2,3,4$. \begin{cor} \label{c:Square} Let $\lambda^{AB}_j(\ensuremath{\ensuremath{\mathbf{a}}repsilon})$ be the Aharonov-Bohm eigenvalues defined in \eqref{eq:QuadAB2Poles}--\eqref{eq:MinMaxAB1Poles} with $\widehat\Omega$ being the square defined in \eqref{eq:quadrato}. Then we have, as $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to 0^+$, \begin{align*} \lambda^{AB}_1(\ensuremath{\ensuremath{\mathbf{a}}repsilon})&=2+\frac{8}{\pi}\frac1{|\log(\ensuremath{\ensuremath{\mathbf{a}}repsilon})|}+o\left(\frac1{|\log(\ensuremath{\ensuremath{\mathbf{a}}repsilon})|}\right);\\ \lambda^{AB}_2(\ensuremath{\ensuremath{\mathbf{a}}repsilon})&=5-\frac{16}{\pi}\ensuremath{\ensuremath{\mathbf{a}}repsilon}^2+o\left(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^2\right);\\ \lambda^{AB}_3(\ensuremath{\ensuremath{\mathbf{a}}repsilon})&=5+\frac{16}{\pi}\ensuremath{\ensuremath{\mathbf{a}}repsilon}^2+o\left(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^2\right);\\ \lambda^{AB}_4(\ensuremath{\ensuremath{\mathbf{a}}repsilon})&=8-\frac{8}{\pi}\ensuremath{\ensuremath{\mathbf{a}}repsilon}^4+o\left(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^4\right). \end{align*} \end{cor} \subsection{Example: the disk} \label{subs:Disk} Let $(r,t)\in [0,1]\times [0,2\pi)$ be the polar coordinates of the disk. It is well known that the eigenvalues of the Dirichlet Laplacian on the disk are given by the sequences \[ \{{j^2_{0,k}}\}_{k\geq 1} \cup \{ {j^2_{n,k}} \}_{n,k\geq 1}, \] where $j_{n,k}$ denotes the $k$-th zero of the Bessel function $J_n$ for $n\geq 0$, $k\geq 1$. We recall that $j_{n,k}=j_{n',k'}$ if, and only if, $n=n'$ and $k=k'$ (see \cite[Section 15.28]{Watson}). The first set is therefore made of simple eigenvalues; their eigenfunctions are given by the Bessel functions \begin{equation} \label{eqEFDiskSimple} u_{0,k}(r \cos t, r\sin t):=\sqrt{\tfrac1\pi}\tfrac{1}{|J_0'(j_{0,k})|} J_0(j_{0,k}r) \quad \text{ for } k\geq 1. \end{equation} The second set is made of double eigenvalues whose eigenfunctions are spanned by \begin{align} \label{eqEFDisks}u_{n,k}^s(r \cos t, r\sin t)&: =\sqrt{\tfrac2\pi}\tfrac{1}{|J_n'(j_{n,k})|}J_n(j_{n,k}r)\cos nt,\\ \label{eqEFDiska}u_{n,k}^a(r \cos t, r\sin t)&: =\sqrt{\tfrac2\pi}\tfrac{1}{|J_n'(j_{n,k})|} J_n(j_{n,k}r)\sin nt , \end{align} for $n,k\geq 1$. We stress that these eigenfunctions have $L^2$-norm equal to $1$ on the disk. It is convenient to recall (see \cite[Chapter III]{Watson}) that for any $n\in \ensuremath{\mathbb{N}}\cup\{0\}$ \begin{equation}\label{eq:bessel} J_n(z) = \sum_{k=0}^{+\infty} \dfrac{(-1)^k (\tfrac12 z)^{n+2k}}{k!\ \Gamma(n+k+1)}. \end{equation} We denote by $\big(\widehat\lambda_j\big)_{j\ge1}$ the eigenvalues of the Dirichlet Laplacian on the disk \[ D_1=\{(x_1,x_2)\in \ensuremath{\mathbb{R}}^2:x_1^2+x_2^2<1\} \] and, for $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\in (0,1/2)$, we consider the Aharonov-Bohm eigenvalues $\left(\lambda^{AB}_j(\ensuremath{\ensuremath{\mathbf{a}}repsilon})\right)_{j\ge1}$ defined in Section~\ref{subs:IntroAB}. \begin{prop}\label{p:ABDisk} If $\widehat\lambda_N$ is simple, there exists an integer $k\ge1$ such that $\widehat\lambda_N=j_{0,k}^2$. Then \begin{equation}\label{eqEVDiskSimple} \lambda_N^{AB}(\ensuremath{\ensuremath{\mathbf{a}}repsilon})=j_{0,k}^2+ \frac2{|J_0'(j_{0,k})|^2}\frac1{|\log(\ensuremath{\ensuremath{\mathbf{a}}repsilon})|}+o\left(\frac1{|\log(\ensuremath{\ensuremath{\mathbf{a}}repsilon})|}\right) \end{equation} as $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0^+$. If $\widehat\lambda_N$ is double, there exist integers $n\ge1$ and $k\ge1$ such that $\widehat\lambda_N=j_{n,k}^2$. Up to replacing $N$ by $N-1$, we can assume that $\widehat\lambda_N=\widehat\lambda_{N+1}$. Then, as $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0^+$, \begin{align} \label{eqEVDiska}\lambda_N^{AB}(\ensuremath{\ensuremath{\mathbf{a}}repsilon})&=j_{n,k}^2- \frac{2nj_{n,k}^{2n}}{(n!)^24^{2n-1}|J_n'(j_{n,k})|^2}\left(\begin{array}{c}n-1\\ \left\lfloor\frac{n-1}2\right\rfloor\end{array}\right)^{\!2}\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{2n}+o\left(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{2n}\right),\\ \label{eqEVDisks}\lambda_{N+1}^{AB}(\ensuremath{\ensuremath{\mathbf{a}}repsilon})&=j_{n,k}^2+\frac{2nj_{n,k}^{2n}}{(n!)^24^{2n-1}|J_n'(j_{n,k})|^2}\left(\begin{array}{c}n-1\\ \left\lfloor\frac{n-1}2\right\rfloor\end{array}\right)^{\!2}\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{2n}+o\left(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{2n}\right). \end{align} \end{prop} \begin{proof} We first consider the case where the eigenvalue $\widehat\lambda_N=j_{0,k}^2$ is simple; then an associated eigenfunction, normalized in the disk, is $u_{0,k}$ defined by Equation \eqref{eqEFDiskSimple}. It follows from Equation \eqref{eq:bessel} that \begin{equation*} u_{0,k}(0)=\sqrt{\frac1\pi}\frac1{|J_0'(j_{0,k})|}>0. \end{equation*} Theorem \ref{thmNonZero} gives us the asymptotic expansion \eqref{eqEVDiskSimple}. We then consider the case where $\widehat\lambda_N$ is double, with $\widehat\lambda_N=\widehat\lambda_{N+1}=j_{n,k}^2$, $n,k\ge1$. We note that $j_{n,k}^2$ is a simple eigenvalue of $q_0$, and that the restriction of $\sqrt2 u_{n,k}^a$ to the upper half-disk is an associated normalized eigenfunction. It follows from Equation \eqref{eq:bessel} that \begin{equation*} r^{-n}u_{n,k}^a(r \cos t, r\sin t)\to\sqrt{\frac2\pi}\frac1{|J_n'(j_{n,k})|}\frac1{\Gamma(n+1)}\left(\frac{j_{n,k}}2\right)^{\!n}\sin nt\quad\mbox{in } C^{1,\tau}\left([0,\pi],\ensuremath{\mathbb{R}}\right) \end{equation*} as $r\to 0^+$. The asymptotic expansion \eqref{eqEVDiska} then follows from Proposition \ref{p:MultOdd}. In a similar way, $j_{n,k}^2$ is a simple eigenvalue of $-\Delta^s$, and $u_{n,k}^s$ is an associated normalized eigenfunction. It follows from Equation \eqref{eq:bessel} that \begin{equation*} r^{-n}u_{n,k}^s(r \cos t, r\sin t)\to\sqrt{\frac2\pi}\frac1{|J_n'(j_{n,k})|}\frac1{\Gamma(n+1)}\left(\frac{j_{n,k}}2\right)^{\!n}\cos nt\quad\mbox{in } C^{1,\tau}\left([0,\pi],\ensuremath{\mathbb{R}}\right) \end{equation*} as $r\to 0^+$. The asymptotic expansion \eqref{eqEVDisks} then follows from the second case of Proposition \ref{p:MultEven}. \end{proof} Additionally, there exist relations between the zeros of Bessel functions (to this aim we refer to \cite[Chapter XV.22]{Watson}): in particular, the positive zeros of the Bessel function $J_n$ are interlaced with those of the Bessel function $J_{{n+1}}$ and by Porter's Theorem there is an odd number of zeros of $J_{n+2}$ between two consecutive zeros of $J_n$. Then, we have, \[ 0 < j_{0,1} < j_{1,1} < j_{2,1} < j_{0,2} < j_{1,2} < \ldots \] and hence, since $j_{3,1}>j_{2,1}$, the first three zeros of Bessel functions are, in order, \[ 0 < j_{0,1} < j_{1,1} < j_{2,1}. \] Combining this information with Proposition \ref{p:ABDisk}, we find for example the following asymptotic expansions for the first few Aharonov-Bohm eigenvalues $\lambda^{AB}_j(\ensuremath{\ensuremath{\mathbf{a}}repsilon})$ on the disk $D_1$ as $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0^+$: \begin{align*} \lambda^{AB}_1(\ensuremath{\ensuremath{\mathbf{a}}repsilon})&={j_{0,1}^2}+ \frac{2}{|J_0'(j_{0,1})|^2}\frac1{|\log(\ensuremath{\ensuremath{\mathbf{a}}repsilon})|}+o\left(\frac1{|\log(\ensuremath{\ensuremath{\mathbf{a}}repsilon})|}\right),\\ \lambda^{AB}_2(\ensuremath{\ensuremath{\mathbf{a}}repsilon})&={j_{1,1}^2} - \frac12 \dfrac{j_{1,1}^2}{|J'_1(j_{1,1})|^2} \, \ensuremath{\ensuremath{\mathbf{a}}repsilon}^2 + o(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^2), \\ \lambda^{AB}_3(\ensuremath{\ensuremath{\mathbf{a}}repsilon})&={j_{1,1}^2} + \frac12 \dfrac{j_{1,1}^2}{|J'_1(j_{1,1})|^2} \, \ensuremath{\ensuremath{\mathbf{a}}repsilon}^2 + o(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^2),\\ \lambda^{AB}_4(\ensuremath{\ensuremath{\mathbf{a}}repsilon})&={j_{2,1}^2} - \frac{1}{64} \dfrac{j_{2,1}^4}{|J'_2(j_{2,1})|^2}\, \ensuremath{\ensuremath{\mathbf{a}}repsilon}^4 + o(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^4),\\ \lambda^{AB}_5(\ensuremath{\ensuremath{\mathbf{a}}repsilon})&={j_{2,1}^2} + \frac{1}{64} \dfrac{j_{2,1}^4}{|J'_2(j_{2,1})|^2} \, \ensuremath{\ensuremath{\mathbf{a}}repsilon}^4 + o(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^4). \end{align*} \appendix \section{Computation of the constants} \label{a:A} \subsection{The Neumann-Dirichlet case} In the present section, we use the above results to compute the quantities appearing in \cite[Section 4]{abatangelo2015sharp}. In order to avoid a conflict of notation with the present paper, for any odd positive integer $k$, we denote here by $\psi_k'$, $\mathfrak m_k'$ and $w_k'$ what is denoted in \cite{abatangelo2015sharp} by $\psi_k$, $\mathfrak m_k$ and $w_k$ respectively. As in \cite{abatangelo2015sharp}, we use the notation \begin{equation*} s_0:=\left\{\left(x'_1,0\right)\,;\,x'_1\ge 0\right\}; \end{equation*} and \begin{equation*} s:=\left\{\left(x'_1,0\right)\,;\,x'_1\ge 1\right\}. \end{equation*} We now define the mapping $G:\ensuremath{\mathbb{R}}^2_+\to \ensuremath{\mathbb{R}}^2\setminus s_0$ by \begin{equation*} G(x):=(x_1^2-x_2^2,2x_1x_2). \end{equation*} The mapping is conformal; indeed, if for $x\in\ensuremath{\mathbb{R}}^2_+$ we write $z:=x_1+ix_2$ and $z':=x'_1+ix'_2$, with $x'=(x'_1,x'_2):=G(x)$, we have $z'=z^2$. The scale factor associated with $G$ is $h(x)=2|z|=2|x|$. Let $u'$ be a function in $H^1\left(\ensuremath{\mathbb{R}}^2\setminus s_0\right)$ and $u:=u'\circ G$. Since $G$ is conformal, $|\nabla u|$ is in $L^2\left(\ensuremath{\mathbb{R}}^2_+\right)$, with \begin{equation*} \int_{\ensuremath{\mathbb{R}}^2_+}\left|\nabla u\right|^2\,dx=\int_{\ensuremath{\mathbb{R}}^2\setminus s_0}\left|\nabla u'\right|^2\,dx'. \end{equation*} Furthermore, for any $x'$ in the segment $(0,1)\times \{0\}$, which we write as $x'=(x_1',0)$, we have \begin{equation*} \frac{\partial u'}{\partial \nu_+}\left(x'\right)=-\frac1{2\sqrt{x'_1}}\frac{\partial u}{\partial x_2}\left(\sqrt{x_1'},0\right)\mbox{ and }\frac{\partial u'}{\partial \nu_-}\left(x'\right)=-\frac1{2\sqrt{x'_1}}\frac{\partial u}{\partial x_2}\left(-\sqrt{x_1'},0\right), \end{equation*} where $\frac{\partial u'}{\partial\nu_+}(x')$ and $\frac{\partial u'}{\partial\nu_-}(x')$ denote the normal derivative at $x'$ respectively from above and from below. We also note that $u$ is harmonic in $\ensuremath{\mathbb{R}}^2_+$ if, and only if, $u'$ is harmonic in $\ensuremath{\mathbb{R}}^2\setminus s_0$. Let us now denote by $\widetilde{w}_k'$ the extension by reflexion to $\ensuremath{\mathbb{R}}^2\setminus s_0$ of $w_k'$, originally defined on $\ensuremath{\mathbb{R}}^2_+$. We recall that $w_k'$ is the unique finite energy solution to the problem \begin{equation*} \begin{cases} -\Delta w_k'=0, &\text{in }\ensuremath{\mathbb{R}}^2_+, \\ w_k'=0, &\text{on }s, \\ \frac{\partial w_k'}{\partial \nu}=-\frac{\partial \psi'_k}{\partial \nu}, &\text{on }\partial \ensuremath{\mathbb{R}}^2_+\setminus s, \end{cases} \end{equation*} where $ \psi_k'(r\cos t,r\sin t)= r^{k/2} \sin \big(\frac{k}{2}\,t\big)$. \begin{lem} \label{lem2Prob} For any odd positive integer $k$, $w_k=\widetilde{w}_k'\circ G$. \end{lem} \begin{proof} Let us write $v:= \widetilde{w}_k'\circ G$. By uniqueness, it is enough to prove that $v$ solves \eqref{eq:wk}. From the remarks at the beginning of the present section, it follows that $v$ is harmonic in $\ensuremath{\mathbb{R}}^2_+$. Let us now show that $\psi_k:=\psi_k'\circ G$. Indeed, for $x'\in \ensuremath{\mathbb{R}}^2\setminus s_0$, $\psi_k'(x')=\mbox{Im}\left((z')^{k/2}\right)$, and therefore $f(x)=\mbox{Im}\left((z^2)^{k/2}\right)=\mbox{Im}\left(z^k\right)=\psi_k(x)$, where $x'=G(x)$, $z$ and $z'$ are defined as above, and where we use the determination of the square root on $\ensuremath{\mathbb{C}}\setminus s_0$ defined by $G^{-1}$. From this and the previous remarks, it follows that $v$ satisfies the boundary conditions of Problem \eqref{eq:wk}. \end{proof} As in \cite{abatangelo2015sharp} we define \[ \mathfrak m_k'=-\frac12\int_{\ensuremath{\mathbb{R}}^2_+}\left|\nabla w_k'\right|^2\,dx \] and \begin{equation}\label{eq:mathfrak-m} \mathfrak m_k=-\frac12\int_{\ensuremath{\mathbb{R}}^2_+}\left|\nabla w_k\right|^2\,dx. \end{equation} We note that the right hand side of \eqref{eq:gad} is equal to $-2\beta^2\mathfrak m_k$. \begin{cor}\label{cor:mk} For any odd positive integer $k$, $\mathfrak m_k'=\frac12\mathfrak m_k$. \end{cor} \begin{proof} We have \begin{equation*} \mathfrak m_k'=-\frac12\int_{\ensuremath{\mathbb{R}}^2_+}\left|\nabla w_k'\right|^2\,dx'=-\frac14\int_{\ensuremath{\mathbb{R}}^2\setminus s_0}\left|\nabla \widetilde{w}_k'\right|^2\,dx'. \end{equation*} Using Lemma \ref{lem2Prob} and the conformal invariance of the $L^2$-norm of the gradient, we find \begin{equation*} \int_{\ensuremath{\mathbb{R}}^2\setminus s_0}\left|\nabla \widetilde{w}_k'\right|^2\,dx'=\int_{\ensuremath{\mathbb{R}}^2_+}\left|\nabla w_k\right|^2\,dx=-2\mathfrak m_k. \qedhere \end{equation*} \end{proof} In particular, Corollary \ref{cor:mk} and Proposition \ref{propmk} imply that \[ \mathfrak m_k'= -\frac{k\pi}{4\,2^{2k-1}}\left(\begin{array}{c}k-1\\ \left\lfloor\frac{k-1}2\right\rfloor\end{array}\right)^{\!2}, \] thus proving, in view of \cite[Theorem 1.2]{abatangelo2015sharp}, the explicit constant appearing in the asymptotic expansion of Theorem \ref{t:monopole}. \subsection{The $u$-capacities of segments} \label{subs:uCapacity} In this last section, we simplify the constant $C_k$ occurring in \cite[Lemma 2.3]{AFHL2016}. \begin{prop}\label{p:A3} For any positive integer $k$, \begin{equation*} C_k=\frac{k}{4^{k-1}}\left(\begin{array}{c}k-1\\ \left\lfloor\frac{k-1}2\right\rfloor\end{array}\right)^2. \end{equation*} \end{prop} \begin{proof} According to Equation (22) in \cite[Lemma 2.3]{AFHL2016}, \begin{equation*} C_k=\sum_{j=1}^k j\left|A_{j,k}\right|^2, \end{equation*} where $A_{j,k}$ is the $j$-th cosine Fourier coefficient of the function $\eta\mapsto (\cos\eta)^k$. To be more explicit, let us expand $(\cos\eta)^k$ into a trigonometric polynomial. We write \begin{equation*} (\cos\eta)^k=\left(\frac{e^{i\eta}+e^{-i\eta}}2\right)^k=\frac1{2^k}\sum_{j=0}^k\left(\begin{array}{c}k\\ j\end{array}\right)e^{(k-2j)i\eta}. \end{equation*} By grouping the terms of the sum in pairs starting from opposite extremities, we find \begin{equation*} (\cos\eta)^k=\frac1{2^{k-1}}\sum_{j=0}^{\left\lfloor\frac{k-1}2\right\rfloor}\left(\begin{array}{c}k\\ j \end{array}\right)\cos((k-2j)\eta)+c_k \end{equation*} where \begin{equation*} c_k=0 \mbox{ if } k=2p+1 \mbox{\hspace{1cm} and \hspace{1cm}} c_k=\frac1{2^k}\left(\begin{array}{c}k\\ p \end{array}\right) \mbox{ if } k=2p. \end{equation*} It follows that \begin{equation*} C_k=\frac1{4^{k-1}}\sum_{j=0}^{\left\lfloor\frac{k-1}2\right\rfloor}(k-2j)\left(\begin{array}{c}k\\ j \end{array}\right)^2 \end{equation*} and we conclude using Lemma \ref{lemSumBin}. \end{proof} Proposition \ref{p:A3} and \cite[Theorem 1.16]{AFHL2016} provide the explicit constant appearing in the asymptotic expansion of Theorem \ref{t:dipoleSymEven}. \section{Auxiliary results for eigenvalues variations} \label{a:B} This section is dedicated to the proof of Propositions \ref{p:ConvEven} and \ref{p:AsymptNDN}. In order to make a connection to the results of \cite{AFHL2016}, which we use, let us present an alternative characterization of the eigenvalues $(\mu_j)_{j\ge1}$ and $(\mu_j(\ensuremath{\ensuremath{\mathbf{a}}repsilon}))_{j\ge1}$. We define \begin{equation*} \widehat{\mathcal Q}^s_\ensuremath{\ensuremath{\mathbf{a}}repsilon}:= \left\{u\in H^1_0(\widehat\Omega\setminus\Gamma_\ensuremath{\ensuremath{\mathbf{a}}repsilon})\,:\,u\circ\sigma=u\right\}, \end{equation*} and we denote by $\widehat q^s_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ the restriction of $\widehat q_0$ (see the paragraph preceding Theorem \ref{thmNonZero} for the notation) to $\widehat{\mathcal Q}_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$. One can then check that we obtain the eigenvalues $(\mu_j(\ensuremath{\ensuremath{\mathbf{a}}repsilon}))_{j\ge1}$ from $\widehat q^s_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ by the min-max principle. In the same way, we define \begin{equation*} \widehat{\mathcal Q}^s:= \left\{u\in H^1_0(\widehat\Omega)\,:\,u\circ\sigma=u\right\}, \end{equation*} we denote by $\widehat q^s$ the restriction of the quadratic form $\widehat q_0$, and one can check that we obtain the eigenvalues $(\mu_j)_{j\ge1}$ from $\widehat q^s$ by the min-max principle. Let us note that $-\Delta^s$, defined in Remark \ref{remDecompLap} as a self-adjoint operator in $\mbox{ker}\left(I-\Sigma\right)$, is the Friedrichs extension of $\widehat q^s$. We denote by $-\Delta^s_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ the Friedrichs extension of $\widehat q^s_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$, which is also a self-adjoint operator in $\mbox{ker}\left(I-\Sigma\right)$. Let us first prove Proposition \ref{p:ConvEven}. Since $\mu_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})\ge\mu_N$ for all $\ensuremath{\ensuremath{\mathbf{a}}repsilon} \in(0,\ensuremath{\ensuremath{\mathbf{a}}repsilon}_0]$ and since $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\mapsto\mu_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})$ is non-decreasing, we have existence of $\mu_N^*:=\lim_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0^+}\mu_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})$, with $\mu_N^*\ge\mu_N$. It only remains to show that $\mu_N^*\le\mu_N$. In order to do this, let us note that the space \begin{equation*} \mathcal D^s:=\left\{u\in C^\infty_c(\widehat\Omega\setminus\{0\})\,:\, u=u\circ\sigma\ \right\} \end{equation*} is dense in $\mbox{ker}\left(I-\Sigma\right)$. Indeed, the space $C^\infty_c(\widehat\Omega\setminus\{0\})$ is dense in $L^2(\widehat\Omega)$, since $\{0\}$ has measure $0$. Therefore, if we fix $u\in \mbox{ker}\left(I-\Sigma\right)$, there exists a sequence $(\ensuremath{\mathbf{a}}rphi_n)_{n\ge1}$ of elements of $C^\infty_c(\widehat\Omega\setminus\{0\})$ converging to $u$ in $L^2(\widehat\Omega)$. We now set $\widetilde \ensuremath{\mathbf{a}}rphi_n:=1/2(\ensuremath{\mathbf{a}}rphi_n+\ensuremath{\mathbf{a}}rphi_n\circ\sigma)$. We have $\widetilde\ensuremath{\mathbf{a}}rphi_n\in\mathcal D^s$ for every integer $n\ge1$. Since $u=1/2(u+u\circ\sigma)$, we have the inequality \begin{equation*} \|\widetilde\ensuremath{\mathbf{a}}rphi_n-u\|_{L^2(\widehat\Omega)}\le \frac12\|\ensuremath{\mathbf{a}}rphi_n-u\|_{L^2(\widehat\Omega)}+ \frac12\|\ensuremath{\mathbf{a}}rphi_n\circ\sigma-u\circ\sigma\|_{L^2(\widehat\Omega)} =\|\ensuremath{\mathbf{a}}rphi_n-u\|_{L^2(\widehat\Omega)}, \end{equation*} and this implies that the sequence $(\widetilde\ensuremath{\mathbf{a}}rphi_n)_{n\ge1}$ converges to $u$ in $\mbox{ker}\left(I-\Sigma\right)$. According to the min-max characterization of eigenvalues and the previous density result, \begin{equation*} \mu_N=\inf_{\substack{ \mathcal E\subset \mathcal D^s\text{ subspace}\\ \mbox{dim}(\mathcal E)=N}}\max_{u\in \mathcal E} \frac{\widehat q_0(u)}{\|u\|^2}. \end{equation*} Let us now fix $\delta>0$ and an $N$-dimensional subspace $\mathcal E_\delta\subset \mathcal D^s$ such that \begin{equation*} \max_{u\in \mathcal E_\delta} \frac{\widehat q_0(u)}{\|u\|^2}\le \mu_N+\delta. \end{equation*} There exists $\ensuremath{\ensuremath{\mathbf{a}}repsilon}_1>0$ such that $\mathcal E_\delta\subset \widehat{\mathcal Q}_\ensuremath{\ensuremath{\mathbf{a}}repsilon}^s$ for every $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\in(0,\ensuremath{\ensuremath{\mathbf{a}}repsilon}_1]$. This implies that, for every $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\in(0,\ensuremath{\ensuremath{\mathbf{a}}repsilon}_1]$, \begin{equation*} \mu_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})=\min_{\substack{ \mathcal E\subset \widehat{\mathcal Q}_\ensuremath{\ensuremath{\mathbf{a}}repsilon}^s\text{ subspace}\\ \mbox{dim}(\mathcal E)=N}}\max_{u\in \mathcal E} \frac{\widehat q_\ensuremath{\ensuremath{\mathbf{a}}repsilon}^s(u)}{\|u\|^2} \le\max_{u\in \mathcal E_\delta} \frac{\widehat q_0(u)}{\|u\|^2}\le \mu_N+\delta. \end{equation*} Passing to the limit, we obtain first $\mu_N^*\le \mu_N+\delta$, and then $\mu_N^*\le\mu_N$, concluding the proof. Let us finally prove Proposition \ref{p:AsymptNDN}. We recall that, as a corollary of Theorem 1.10 in \cite{AFHL2016}, taking into account Proposition \ref{p:A3} we have the following result. \begin{prop}\label{p:AFHL} Let $\widehat\lambda_N$ be a simple eigenvalue of $-\widehat\Delta$ and $u_N$ an associated eigenfunction normalized in $L^2(\widehat\Omega)$. Let us assume that $u_N\in \widehat{\mathcal Q}^s$. For $\ensuremath{\ensuremath{\mathbf{a}}repsilon}>0$ small, we denote as $\widehat \lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})$ the $N$-th eigenvalue of the Dirichlet Laplacian in $\widehat\Omega\setminus \Gamma_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$. If $u_N(0)\neq0$, then \begin{equation*} \widehat \lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})=\widehat \lambda_N+\frac{2\pi}{|\log(\ensuremath{\ensuremath{\mathbf{a}}repsilon})|}u_N(0)^2+o\left(\frac1{|\log(\ensuremath{\ensuremath{\mathbf{a}}repsilon})|}\right) \quad \text{as $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0^+$}. \end{equation*} If \begin{equation*} r^{-k} u_N(r\cos t,r\sin t)\to \widehat\beta \cos\left(k t\right) \mbox{ in } C^{1,\tau}\left([0,\pi],\ensuremath{\mathbb{R}}\right) \end{equation*} as $r\to 0^+$ for all $\tau\in (0,1)$, with $k\in \ensuremath{\mathbb{N}}^*$ and $\widehat\beta\in \ensuremath{\mathbb{R}}\setminus \{0\}$, then \begin{equation*} \widehat \lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})=\widehat \lambda_N+\frac{k\pi{\widehat\beta}^2}{4^{k-1}}\left(\begin{array}{c} k-1\\ \left\lfloor\frac{k-1}2\right\rfloor\end{array}\right)^2\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{2k}+o\left(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{2k}\right) \quad \text{as $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0^+$}. \end{equation*} \end{prop} Let us note that if the hypotheses of Proposition \ref{p:AFHL} are satisfied, $\widehat\lambda_N$ is a simple eigenvalue of $-\Delta^s$ and $u_N$ an associated eigenfunction. But the converse is not true. Indeed, we have seen in Section \ref{subs:Disk}, in the case of $\widehat\lambda_3$ for the unit disk that $\widehat\lambda_N$ can be simple for $-\Delta^s$ without being simple for $-\widehat\Delta$. Proposition \ref{p:AFHL} is therefore weaker than Proposition \ref{p:AsymptNDN}. However, the proof of Theorem 1.10 in \cite{AFHL2016} can be adapted to prove Proposition \ref{p:AsymptNDN}. Let us sketch the changes to be made. The proof in \cite{AFHL2016} mainly relies on Theorem 1.4 of \cite{AFHL2016}, and uses the $u$-capacity and the associated potential defined in \cite[Equations (6), (7), and (8)]{AFHL2016}. The following Lemma gives an alternative expression when both $u$ and the compact set $K$ are symmetric; it follows easily from Steiner symmetrization arguments. \begin{lem} \label{l:SymCap} If $u\in\widehat Q^s$ and $K\subset \widehat \Omega$ is a compact set such that $\sigma(K)=K$, then \begin{equation*} \mbox{Cap}_{\widehat\Omega}(K,u)=\min\left\{\widehat q^s(V)\,:\,V\in \widehat{\mathcal Q}^s\mbox{ and }u-V\in H^1_0(\widehat\Omega\setminus K)\right\} \end{equation*} and the potential $V_{K,u}$ attaining the above minimum belongs to $\widehat{\mathcal Q}^s$. \end{lem} Our proof of Proposition \ref{p:AsymptNDN} relies on the following analog to \cite[Theorem 1.4]{AFHL2016}. \begin{prop}\label{p:AsymCap} Let $\mu_L$ be a simple eigenvalue of $-\Delta^s$ and $u_L$ an associated eigenfunction, normalized in $L^2(\widehat\Omega)$. Then \begin{equation*} \mu_L(\ensuremath{\ensuremath{\mathbf{a}}repsilon})=\mu_L+\mbox{Cap}_{\widehat\Omega}(\Gamma_\ensuremath{\ensuremath{\mathbf{a}}repsilon},u_L)+o\left(\mbox{Cap}_{\widehat\Omega}(\Gamma_\ensuremath{\ensuremath{\mathbf{a}}repsilon},u_L)\right) \quad \text{as $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0^+$}. \end{equation*} \end{prop} In order to prove Proposition \ref{p:AsymCap}, we note that Lemma \ref{l:SymCap} implies in particular that $u_L-V_{\Gamma_\ensuremath{\ensuremath{\mathbf{a}}repsilon},u_L}$ is the orthogonal projection of $u_L$ on $H^1_0(\widehat\Omega\setminus \Gamma_\ensuremath{\ensuremath{\mathbf{a}}repsilon})\cap \widehat{\mathcal Q}^s$ and $\mbox{Cap}_{\widehat \Omega}(\Gamma_\ensuremath{\ensuremath{\mathbf{a}}repsilon},u_L)$ the square of the distance of $u_L$ from $H^1_0(\widehat\Omega\setminus \Gamma_\ensuremath{\ensuremath{\mathbf{a}}repsilon})\cap \widehat{\mathcal Q}^s$, both defined with respect to the scalar product induced by $\widehat q^s$ on $\widehat{\mathcal Q}^s$. We also note that we can use the estimates of $V_{\Gamma_\ensuremath{\ensuremath{\mathbf{a}}repsilon},u_L}$ given in Lemma A.1 and Corollary A.2 of \cite{AFHL2016}. We can therefore repeat step by step the proof of Theorem 1.4 in Appendix A of \cite{AFHL2016}, replacing $L^2(\widehat\Omega)$ by $\mbox{ker}\left(I-\Sigma\right)$, $H^1_0(\widehat \Omega)$ with $\widehat{\mathcal Q}^s$, $H^1_0(\widehat\Omega\setminus \Gamma_\ensuremath{\ensuremath{\mathbf{a}}repsilon})$ by $H^1_0(\widehat\Omega\setminus \Gamma_\ensuremath{\ensuremath{\mathbf{a}}repsilon})\cap \widehat{\mathcal Q}^s$, $\widehat q$ and $\widehat q_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ by $\widehat q^s$ and $\widehat q_\ensuremath{\ensuremath{\mathbf{a}}repsilon}^s$, $-\widehat\Delta$ and $-\widehat\Delta_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ by $-\Delta^s$ and $-\Delta_\ensuremath{\ensuremath{\mathbf{a}}repsilon}^s$, $\widehat\lambda_N$ by $\mu_L$ and $u_N\in H^1_0(\widehat \Omega)$ by $u_L\in \widehat{\mathcal Q}^s$. We obtain Proposition \ref{p:AsymCap}. The estimates of $\mbox{Cap}_{\widehat\Omega}(\Gamma_\ensuremath{\ensuremath{\mathbf{a}}repsilon},u)$ proved in \cite[Section 2]{AFHL2016} then give us Proposition \ref{p:AsymptNDN}. \section{Alternative proof of Theorem \ref{t:gad}}\label{sec:altern-proof-theor} We find useful to show an alternative proof of Theorem \ref{t:gad}. This proof is based on sharp estimates from above and below of the Rayleigh quotients for the eigenvalues $\lambda_N$ and $\lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})$. Such estimates require energy bounds on eigenfunctions obtained by an Almgren type monotonicity argument and blow-up analysis for scaled eigenfunctions. We mention that such a strategy was first developed in \cite{abatangelo2015sharp,AbatangeloFelli2016SIAM,AbatangeloFelliNorisNys2016,NNT} for eigenvalues of Aharonov--Bohm operators with a moving pole. On the other hand, the implementation of this procedure for our problem requires a quite different technique with respect to the case of Aharonov--Bohm operators with a single pole, when estimating a singular term appearing in the derivate of the Almgren frequency function (i.e. the term \eqref{eq:Meps}). Indeed, in the single pole case estimates can be derived by rewriting the problem as a Laplace equation on the twofold covering, whereas in this case the singular term \eqref{eq:Meps} turns out to have a negative sign and this is enough to proceed with the monotonicity argument (see Subsection \ref{s:monotonicity}). In this argument, an important step is a blow-up result for scaled eigenfunctions. In what follows, we aim at pointing out the main steps of the proof, together with a more deepened analysis at the crucial points. We list below some notation used throughout this appendix.\par \begin{itemize} \item[-] For $r>0$ and $a\in\ensuremath{\mathbb{R}}^2$, $D_r(a)=\{x\in\ensuremath{\mathbb{R}}^2:|x-a|<r\}$ denotes the disk of center $a$ and radius $r$. We also denote the corresponding upper half-disk as $D_r^+(a)=\{(x_1,x_2)\in D_r(a):x_2>0\}$. \item[-] For all $r>0$, $D_r=D_r(0)$ is the disk of center $0$ and radius $r$; $D_r^+=\{(x_1,x_2)\in D_r:x_2>0\}$ denotes the corresponding upper half-disk. \item[-] For $r>0$ and $a\in\ensuremath{\mathbb{R}}^2$, $S_r^+(a)=\{(x_1,x_2)\in\partial D_r(a):x_2>0\}$ denotes the upper half-circle of center $a$ and radius $r$. We also denote $S_r^+:=S_r^+(0)$. \end{itemize} \subsection{Limit profile} This section contains a variational construction of the limit profile which will be used to describe the limit of the blow-up sequence. Let us consider the functional $J_k: \mathcal Q\to\ensuremath{\mathbb{R}}$ (see Subsection \ref{subsec:relres} for the definition of $\mathcal Q$) \begin{equation}\label{eq:Jk} J_k(u) = \frac12 \int_{\ensuremath{\mathbb{R}}^2_+} |\nabla u(x)|^2 \,dx- \int_{-1}^1 u(x_1,0)\frac{\partial \psi_k}{\partial x_2}(x_1,0)\,dx_1, \end{equation} with $\psi_k$ defined in \eqref{eq:psi_k}. We observe that $\frac{\partial \psi_k}{\partial x_2}(x_1,0) = k {x_1}^{k-1}$ and $J_k$ is well-defined on $\mathcal Q$. \begin{lem}\label{l:wk} For all $k\in\ensuremath{\mathbb{N}}$, $k\geq1$, let $w_k\in \mathcal Q$ be the unique weak solution to \eqref{eq:wk} and let $\mathfrak m_k=-\frac12\int_{\ensuremath{\mathbb{R}}^2_+}\left|\nabla w_k\right|^2\,dx$ be as in \eqref{eq:mathfrak-m}. Then \begin{equation}\label{eq:Ik} \mathfrak m_k=\min_{u\in\mathcal Q}J_k(u)=J_k(w_k)<0. \end{equation} Furthermore, $w_k(x)=O\big(\tfrac{1}{|x|}\big)$ as $|x|\to+\infty$. \end{lem} \begin{proof} The proof follows from standard minimization methods, Hardy Inequality and Kelvin Transform. \end{proof} \begin{lem}\label{l:Phi} For every $k\in \ensuremath{\mathbb{N}}$, $k\geq1$, there exists a unique $\Phi_k \in \bigcap_{R>0}H^1(D_R^+)$ such that \begin{equation}\label{eq:3} \begin{cases} \Phi_k-\psi_k\in \mathcal Q,\\ -\Delta \Phi_k =0, &\text{in } \ensuremath{\mathbb{R}}^2_+ \text{ in a distributional sense},\\ \Phi_k =0 &\text{on } s,\\ \frac{\partial \Phi_k}{\partial \nu}=0&\text{on }\Gamma_1, \end{cases} \end{equation} where $\nu=(0,-1)$ is the outer normal unit vector on $\partial \ensuremath{\mathbb{R}}^2_+$. Furthermore, the unique solution to \eqref{eq:3} is given by \[ \Phi_k=\psi_k + w_k, \] where $w_k$ is as in Lemma \ref{l:wk} and $\psi_k$ is defined in \eqref{eq:psi_k}. \end{lem} \begin{proof} The existence part is proved by taking $\Phi_k=\psi_k + w_k$. To prove uniqueness, one can argue by contradiction exploiting the Hardy Inequality (see\cite[Proposition 4.3]{abatangelo2015sharp} for a detailed proof in a similar problem). \end{proof} For future convenience, we state and prove here the following lemma, which relates the limit profile $\Phi_k$ (more precisely, its $k$-th Fourier coefficient) to the minimum $\mathfrak{m}_k$. \begin{lem}\label{l:xi1} Let $\Phi_k$ be as in Lemma \ref{l:Phi}. Then \[ \int_0^\pi \Phi_k(\cos t,\sin t) \,\sin(kt)\,dt = -\frac{\mathfrak{m}_k}k + \frac\pi2. \] \end{lem} \begin{proof} Let us define the function \[ \omega(r):= \int_0^\pi w_k(r\cos t,r\sin t)\,\sin(kt)\,dt,\quad r>0, \] where $w_k$ is as in Lemma \ref{l:wk}. Then, recalling that $\Phi_k= w_k+\psi_k$, we have that \begin{equation}\label{eq:omega1primopasso} \omega(1)= \int_0^\pi \Phi_k(\cos t,\sin t) \,\sin(kt)\,dt - \frac\pi2. \end{equation} Since $\omega$ is the $k$-th Fourier coefficient of the harmonic function $w_k$, it satisfies the differential equation $\omega''+\frac1r\omega'-\frac{k^2}{r^2}\omega=0$ in $(1,+\infty)$, i.e. $(r^{1+2k}(r^{-k} \omega)')'=0$. Hence there exists $C_\omega\in\ensuremath{\mathbb{R}}$ such that $\big( r^{-k}\omega(r) \big)'= C_\omega r^{-(1+2k)}$, for $r>1$. Integrating the previous equation over $(1,r)$ we obtain that \[ \dfrac{\omega(r)}{r^{k}} - \omega(1) = \frac{C_\omega}{2k}\left( 1-\frac1{r^{2k}} \right),\quad\text{for all }r\geq1. \] Lemma \ref{l:wk} provides that $\omega(r)=O(r^{-1})$ as $r\to+\infty$, hence, letting $r\to+\infty$ in the previous identity, we obtain that necessarily $C_\omega=-2k\omega(1)$ and then \begin{equation}\label{eq:120} \omega(r)= \omega(1) r^{-k},\quad \omega'(r)= -k\omega(1) r^{-k -1},\quad\text{for all }r\geq1. \end{equation} On the other hand, by definition \begin{equation}\label{eq:121} \omega'(r)= {r^{-k-1}} \int_{ S_r^+} \frac{\partial w_k}{\partial \nu}\,\psi_k\,ds, \end{equation} with $\nu$ being the outer unit vector to $\partial D_r^+$. Combining \eqref{eq:120} and \eqref{eq:121} we obtain that \begin{equation*} \omega(1)=-\dfrac{1}{k} \int_{S_1^+} \dfrac{\partial w_k}{\partial \nu}\,\psi_k\,ds. \end{equation*} Multiplying the equation $-\Delta w_k=0$ by $\psi_k$, integrating by parts on $D_1^+$, and recalling that $\psi_k\equiv0$ on $\Gamma_1$, we obtain that \begin{equation*} \int_{D_1^+} \nabla w_k \cdot \nabla \psi_k\,dx=\int_{\partial D_1^+} \dfrac{\partial w_k}{\partial \nu}\,\psi_k\,ds = \int_{S_1^+} \dfrac{\partial w_k}{\partial \nu}\,\psi_k\,ds, \end{equation*} whereas multiplying $-\Delta\psi_k=0$ by $w_k$ and integrating by parts on $D_1^+$ we obtain that \begin{equation*} \int_{D_1^+} \nabla w_k \cdot \nabla \psi_k\,dx=\int_{\partial D_1^+} \dfrac{\partial \psi_k}{\partial \nu}\,w_k\,ds. \end{equation*} Taking into account the boundary data, we obtain that \[ \int_{S_1^+} \dfrac{\partial w_k}{\partial\nu}\,\psi_k = \int_{S_1^+} \dfrac{\partial \psi_k}{\partial\nu}\,w_k - \int_{\Gamma_1} \dfrac{\partial\psi_k}{\partial x_2}\,w_k, \] so that \begin{equation}\label{eq:omega1} \omega(1)= -\frac1k \int_{S_1^+} \dfrac{\partial \psi_k}{\partial\nu}\,w_k + \dfrac1k \int_{\Gamma_1} \dfrac{\partial \psi_k}{\partial x_2} \,w_k. \end{equation} Since $\frac{\partial\psi_k}{\partial\nu}=k\psi_k$ on $S_1^+$, it results that $k\omega(1)=\int_{S_1^+} \frac{\partial\psi_k}{\partial\nu}w_k$, so that \eqref{eq:omega1} can be rewritten as $\omega(1)=-\omega(1) + \frac{1}k \int_{\Gamma_1} \frac{\partial \psi_k}{\partial x_2} \,w_k$ and thus \begin{equation*} \omega(1)= {\frac{1}{2k}} \int_{\Gamma_1} \dfrac{\partial \psi_k}{\partial x_2}\,w_k . \end{equation*} From \eqref{eq:Ik} we deduce that $\omega(1)= -\frac{1}{k} {\mathfrak m}_k$, and recalling \eqref{eq:omega1primopasso} the proof is concluded. \end{proof} \subsection{Monotonicity argument}\label{s:monotonicity} In order to prove convergence of blow-up eigenfunctions, energy estimates in small neighborhoods of the Dirichlet-Neumann junctions are needed; such estimates are obtained via an Almgren type monotonicity argument which is sketched here. For $\lambda \in \ensuremath{\mathbb{R}}$, $u \in H^{1}(\Omega)$ and $r\in(0,\ensuremath{\ensuremath{\mathbf{a}}repsilon}_0)$ such that $D_r^+\subset\Omega$, the Almgren frequency function is defined as \[ \mathcal{N}(u,r,\lambda) = \dfrac{E(u,r,\lambda)}{H(u,r)}, \] where \begin{equation*} E(u,r,\lambda) = \int_{D_r^+} \Big( |\nabla u(x)|^2 - \lambda u^2(x) \Big) \,dx, \quad H(u,r) = \dfrac1r \int_{S_r^+} u^2\,ds . \end{equation*} In the following, we assume that assumption \eqref{eq:6} is satisfied, i.e. the $N$-th eigenvalue $\lambda_N$ of $q_0$ is simple, and we fix an associated normalized eigenfunction $u_N$, so that $u_N$ satisfies \eqref{eq:5}. For all $1\leq n< N$, let $u_n\in H^1_0(\Omega)$ be an eigenfunction of $q_0$ associated to the eigenvalue $\lambda_n$ such that \begin{equation*} \int_\Omega |u_n(x)|^2\,dx=1\quad \text{for all }1\leq n<N \end{equation*} and \begin{equation*} \int_\Omega u_n(x) u_m(x)\,dx=0\quad \text{if }1\leq n,m\leq N\text{ and }n\neq m. \end{equation*} For every $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\in (0,\ensuremath{\ensuremath{\mathbf{a}}repsilon}_0]$, let $u_N^{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}$ be an eigenfunction of $q_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}$ associated with $\lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})$, i.e. solving \begin{equation}\label{eqDNDeps} \begin{cases} -\Delta\,u_N^{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}=\lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})\,u_N^{\ensuremath{\ensuremath{\mathbf{a}}repsilon}},& \mbox{in }\Omega,\\ u_N^{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}=0,&\mbox{on } \partial\Omega\setminus \Gamma_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}},\\[5pt] \dfrac{\partial u_N^{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}}{\partial \nu}=0,&\mbox{on } \Gamma_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}, \end{cases} \end{equation} such that \begin{equation}\label{eq:7} \int_\Omega |u_N^{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}(x)|^2\,dx=1\quad\text{and}\quad \int_{\Omega}u_N^{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}(x)\,u_N (x)\,dx\ge0. \end{equation} For all $1\leq n< N$ and $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\in (0,\ensuremath{\ensuremath{\mathbf{a}}repsilon}_0]$, let $u_n^\ensuremath{\ensuremath{\mathbf{a}}repsilon}\in \mathcal Q_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}$ be an eigenfunction of problem \eqref{eqDND} associated to the eigenvalue $\lambda=\lambda_n(\ensuremath{\ensuremath{\mathbf{a}}repsilon})$, i.e. solving \begin{equation}\label{eq:equneps} \begin{cases} -\Delta\,u_n^{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}=\lambda_n(\ensuremath{\ensuremath{\mathbf{a}}repsilon})\,u_n^{\ensuremath{\ensuremath{\mathbf{a}}repsilon}},& \mbox{in }\Omega,\\ u_n^{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}=0,&\mbox{on } \partial\Omega\setminus \Gamma_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}},\\[5pt] \dfrac{\partial u_n^{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}}{\partial \nu}=0,&\mbox{on } \Gamma_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}, \end{cases} \end{equation} such that \begin{equation}\label{eq:8} \int_\Omega |u_n^\ensuremath{\ensuremath{\mathbf{a}}repsilon}(x)|^2\,dx=1\quad \text{for all }1\leq n<N \end{equation} and \begin{equation}\label{eq:14} \int_\Omega u_n^\ensuremath{\ensuremath{\mathbf{a}}repsilon}(x) u_m^\ensuremath{\ensuremath{\mathbf{a}}repsilon}(x)\,dx=0\quad \text{if }1\leq n,m\leq N\text{ and }n\neq m. \end{equation} We observe that, in view of Remark \ref{remIneq}, \begin{equation}\label{eq:bound_l_neps} \lambda_n(\ensuremath{\ensuremath{\mathbf{a}}repsilon})\leq \lambda_N\quad\text{for all }\ensuremath{\ensuremath{\mathbf{a}}repsilon}\in(0,\ensuremath{\ensuremath{\mathbf{a}}repsilon}_0]\text{ and }1\leq n\leq N. \end{equation} Arguing as in \cite[Lemma 5.2]{abatangelo2015sharp}, it is possible to prove the following properties: \begin{enumerate}[\rm (i)] \item there exists $R_0\in\big(0,\min\big\{\ensuremath{\ensuremath{\mathbf{a}}repsilon}_0,\frac1{2\sqrt{\lambda_N}}\big\}\big)$ such that $D^+_{R_0}\subset\Omega$ and \begin{equation*} H(u_n^\ensuremath{\ensuremath{\mathbf{a}}repsilon},r)>0\quad\text{for all }\ensuremath{\ensuremath{\mathbf{a}}repsilon}\in(0,R_0),\ r\in(\ensuremath{\ensuremath{\mathbf{a}}repsilon},R_0) \text{ and }1\leq n\leq N; \end{equation*} \item \label{prop--ii} for every $r\in(0,R_0]$, there exist $C_r > 0$ and $\alpha_r \in (0,r)$ such that $H(u_n^\ensuremath{\ensuremath{\mathbf{a}}repsilon},r) \geq C_r$ for all $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\in(0, \alpha_r)$ and $1 \leq n\leq N$. \end{enumerate} By direct calculations it follows that, for all $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\in(0,R_0)$, $\ensuremath{\ensuremath{\mathbf{a}}repsilon}<r<R_0$, and $n\in\{1,2,\dots,N\}$, \begin{align}\label{eq:derH} & \dfrac{d}{dr} H(u_n^\ensuremath{\ensuremath{\mathbf{a}}repsilon},r) = \dfrac2r\int_{S_r^+}u_n^\ensuremath{\ensuremath{\mathbf{a}}repsilon} \frac{\partial u_n^\ensuremath{\ensuremath{\mathbf{a}}repsilon}}{\partial\nu}\,ds = \dfrac2r E(u_n^\ensuremath{\ensuremath{\mathbf{a}}repsilon},r,\lambda_n(\ensuremath{\ensuremath{\mathbf{a}}repsilon})), \\ \label{eq:derE}& \dfrac{d}{dr} E(u_n^\ensuremath{\ensuremath{\mathbf{a}}repsilon},r,\lambda_n(\ensuremath{\ensuremath{\mathbf{a}}repsilon}))= = 2\int_{S_r^+} \abs{\frac{\partial u_n^\ensuremath{\ensuremath{\mathbf{a}}repsilon}}{\partial\nu}}^2\,ds- \frac2r \left( M(\ensuremath{\ensuremath{\mathbf{a}}repsilon},u_n^\ensuremath{\ensuremath{\mathbf{a}}repsilon},\lambda_n(\ensuremath{\ensuremath{\mathbf{a}}repsilon})) + \lambda_n(\ensuremath{\ensuremath{\mathbf{a}}repsilon}) \int_{D_r^+} (u_n^\ensuremath{\ensuremath{\mathbf{a}}repsilon}(x))^2\,dx \right) \end{align} where $\nu$ denotes the exterior normal unit vector to $D_r^+$ and \begin{equation}\label{eq:Meps} M(\ensuremath{\ensuremath{\mathbf{a}}repsilon},u,\lambda) = \lim_{\delta\to0^+} \int_{\ensuremath{\mathbb{R}}^2_+\cap\partial A_\delta^\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\bigg( \frac12 |\nabla u|^2 x \cdot \boldsymbol n - \frac{\partial u}{\partial \boldsymbol n}(x\cdot\nabla u) - \frac{\lambda}{2} u^2 x\cdot \boldsymbol n\bigg)ds, \end{equation} being $A_\delta^{\ensuremath{\ensuremath{\mathbf{a}}repsilon}} := D ^+_\delta (-\ensuremath{\ensuremath{\mathbf{a}}repsilon},0) \cup D_\delta^+(\ensuremath{\ensuremath{\mathbf{a}}repsilon},0)$ and $\boldsymbol n$ denoting the exterior normal unit vector to $D_r^+\setminus A_\delta^\ensuremath{\ensuremath{\mathbf{a}}repsilon}$. For details in a similar problem see \cite [Lemma 5.5 and 5.6]{NNT}. A crucial step in the monotonicity argument is the possibility of recognizing the sign of the quantity $M(\ensuremath{\ensuremath{\mathbf{a}}repsilon},u,\lambda)$. To this aim, we first state the following result describing the behaviour of solutions to \eqref{eqDND} at Dirichlet-Neumann boundary junctions. \begin{prop}\label{p:asymptotics} Let $\ensuremath{\ensuremath{\mathbf{a}}repsilon} \in (0,\ensuremath{\ensuremath{\mathbf{a}}repsilon}_0)$, $\lambda\in \ensuremath{\mathbb{R}}$, and $u \in \mathcal{Q}_\ensuremath{\ensuremath{\mathbf{a}}repsilon}\setminus\{0\}$ be a nontrivial solution to problem \eqref{eqDND}. Then there exist two odd natural numbers $j_L=j_L(\ensuremath{\ensuremath{\mathbf{a}}repsilon},u,\lambda),j_R =j_R(\ensuremath{\ensuremath{\mathbf{a}}repsilon},u,\lambda)\in \ensuremath{\mathbb{N}}$ and two nonzero real numbers $\beta_L=\beta_L(\ensuremath{\ensuremath{\mathbf{a}}repsilon},u,\lambda),\beta_R=\beta_R(\ensuremath{\ensuremath{\mathbf{a}}repsilon},u,\lambda) \in \ensuremath{\mathbb{R}}\setminus\{0\}$ such that \begin{align} &\delta^{-{j_L}/2} u((-\ensuremath{\ensuremath{\mathbf{a}}repsilon},0) + \delta \boldsymbol \theta(t)) \to \beta_L \cos\big(\tfrac{j_L}{2}t\big) \quad \text{ in } C^{1,\sigma}([0,\pi]), \label{eq:asy-eigen-left}\\ &\delta^{-{j_R}/2} u((\ensuremath{\ensuremath{\mathbf{a}}repsilon},0) + \delta\boldsymbol \theta(t)) \to \beta_R \sin\big(\tfrac{j_R}{2}t\big) \quad \text{ in } C^{1,\sigma}([0,\pi]),\label{eq:asy-eigen-right} \end{align} as $\delta\to0^+$ for any $\sigma\in (0,1)$, where $\boldsymbol \theta(t) = (\cos t,\sin t)$. Moreover, \begin{align} &\delta^{-{j_L}/2+1} \nabla u((-\ensuremath{\ensuremath{\mathbf{a}}repsilon},0) + \delta \boldsymbol \theta(t)) \to \tfrac{j_L \beta_L }{2} \left(\cos\big(\tfrac{j_L}{2}t\big) \boldsymbol\theta(t) - \sin\big(\tfrac{j_L}{2}t\big) \boldsymbol\tau(t)\right)\label{eq:asy-eigen-grad-left}\\ &\delta^{-{j_R}/2+1} \nabla u((\ensuremath{\ensuremath{\mathbf{a}}repsilon},0) + \delta \boldsymbol \theta(t)) \to \tfrac{j_R \beta_R }{2} \left(\sin\big(\tfrac{j_R}{2}t\big)\boldsymbol\theta(t) + \cos\big(\tfrac{j_R}{2}t\big)\boldsymbol\tau(t) \right)\label{eq:asy-eigen-grad-right} \end{align} in $C^{0,\sigma}([0,\pi])$ as $\delta\to0^+$ for any $\sigma\in (0,1)$, where $\boldsymbol \tau(t)=(-\sin t,\cos t)$. \end{prop} \begin{proof} Through a gauge transformation, in a neighbourhood of each junction $(\pm \ensuremath{\ensuremath{\mathbf{a}}repsilon},0)$ the problem can be rewritten as an elliptic equation with an Aharonov--Bohm vector potential with pole located at the junction; then the asymptotics follows from \cite[Theorem 1.3]{FFT}. \end{proof} \begin{lem}\label{l:segnoM} Let $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\in(0,\ensuremath{\ensuremath{\mathbf{a}}repsilon}_0]$ and $u\in \mathcal{Q}_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ be a solution to \eqref{eqDND} for some $\lambda\in\ensuremath{\mathbb{R}}$. Moreover, let $j_L=j_L(\ensuremath{\ensuremath{\mathbf{a}}repsilon},u,\lambda),\,j_R=j_R(\ensuremath{\ensuremath{\mathbf{a}}repsilon},u,\lambda)\in \ensuremath{\mathbb{N}}$ odd and $\beta_L=\beta_L(\ensuremath{\ensuremath{\mathbf{a}}repsilon},u,\lambda)$, $\beta_R=\beta_R(\ensuremath{\ensuremath{\mathbf{a}}repsilon},u,\lambda) \in \ensuremath{\mathbb{R}}\setminus\{0\}$ be as in Proposition \ref{p:asymptotics} and let $M(\ensuremath{\ensuremath{\mathbf{a}}repsilon},u,\lambda)$ be as in \eqref{eq:Meps}. Then \begin{equation*}\label{eq:Meps2} M(\ensuremath{\ensuremath{\mathbf{a}}repsilon},u,\lambda)= \begin{cases} 0, &\text{if }j_L>1 \text{ and }j_R >1,\\ -\ensuremath{\ensuremath{\mathbf{a}}repsilon}\frac{\pi}{8} \beta_L^2, &\text{if }j_L =1 \text{ and }j_R >1,\\ -\ensuremath{\ensuremath{\mathbf{a}}repsilon}\frac{\pi}{8}\beta_R^2, &\text{if }j_L >1 \text{ and }j_R =1,\\ -\ensuremath{\ensuremath{\mathbf{a}}repsilon}\frac{\pi}{8} \big(\beta_L^2 + \beta_R^2 \big), &\text{if }j_L =1 \text{ and }j_R=1. \end{cases} \end{equation*} In particular, $M(\ensuremath{\ensuremath{\mathbf{a}}repsilon},u,\lambda)\leq0$. \end{lem} \begin{proof} Since $\partial A_\delta^\ensuremath{\ensuremath{\mathbf{a}}repsilon}\cap \ensuremath{\mathbb{R}}^2_+ = S_\delta^+(-\ensuremath{\ensuremath{\mathbf{a}}repsilon},0) \cup S_\delta^+(\ensuremath{\ensuremath{\mathbf{a}}repsilon},0)$, we split \eqref{eq:Meps} into the corresponding two contributions. \noindent {\bf Negligible terms.} On $S_\delta^+(-\ensuremath{\ensuremath{\mathbf{a}}repsilon},0)$, we have that $x=(-\ensuremath{\ensuremath{\mathbf{a}}repsilon},0)+\delta \boldsymbol\theta(t)$ for some $t\in[0,\pi]$ and $\boldsymbol n =-\boldsymbol\theta$, where $\boldsymbol\theta(t)=(\cos t,\sin t )$; hence $x\cdot \boldsymbol n = \ensuremath{\ensuremath{\mathbf{a}}repsilon} \cos t - \delta$. From \eqref{eq:asy-eigen-left} and \eqref{eq:asy-eigen-grad-left} we have that $u((-\ensuremath{\ensuremath{\mathbf{a}}repsilon},0)+\delta \boldsymbol\theta(t))\to 0$ and $|\nabla u((-\ensuremath{\ensuremath{\mathbf{a}}repsilon},0)+\delta \boldsymbol\theta(t))|^2 = \frac{j_L^2\beta_L^2}4\delta^{j_L-2}(1+o(1))$ uniformly on $[0,\pi]$ as $\delta\to0$. From the Dominated Convergence Theorem we then obtain \begin{align*} \int_{S_\delta^+(-\ensuremath{\ensuremath{\mathbf{a}}repsilon},0)}& (|\nabla u|^2 -{\lambda}u^2)\,x\cdot \boldsymbol n\,ds\\ & =\delta\!\int_0^\pi\!\! (|\nabla u((-\ensuremath{\ensuremath{\mathbf{a}}repsilon},0)+\delta \boldsymbol\theta(t))|^2 -{\lambda}|u((-\ensuremath{\ensuremath{\mathbf{a}}repsilon},0)+\delta \boldsymbol\theta(t))|^2)\,(\ensuremath{\ensuremath{\mathbf{a}}repsilon} \cos t - \delta) \,dt\\ & \to \begin{cases} 0, &\text{if }j_L>1,\\ \frac{\beta_L^2\ensuremath{\ensuremath{\mathbf{a}}repsilon}}4\int_0^\pi \cos t \,dt=0, &\text{if }j_L=1, \end{cases} \end{align*} as $\delta\to0$. \noindent {\bf Leading term.} We now look at the last term \[ - \int_{S_\delta^+(-\ensuremath{\ensuremath{\mathbf{a}}repsilon},0)} \frac{\partial u}{\partial \boldsymbol n}(x\cdot\nabla u)\,ds = \int_{S_\delta^+(-\ensuremath{\ensuremath{\mathbf{a}}repsilon},0)}(\boldsymbol \theta \cdot\nabla u)(x\cdot\nabla u)\,ds, \] since $\boldsymbol \theta =-\boldsymbol n$ on $S_\delta^+(-\ensuremath{\ensuremath{\mathbf{a}}repsilon},0)$. From \eqref{eq:asy-eigen-grad-left} we have \[ \delta^{-j_L /2 +1}\nabla u((-\ensuremath{\ensuremath{\mathbf{a}}repsilon},0)+\delta \boldsymbol\theta(t)) \cdot \boldsymbol \theta(t) \to \frac{j_L}{2} \beta_L \cos\bigg(\frac{j_L}{2} t\bigg) \] in $C^0([0,\pi])$ as $\delta\to0$. On the other hand, for $x=(-\ensuremath{\ensuremath{\mathbf{a}}repsilon},0)+\delta\boldsymbol \theta(t)$ we have that \[ \delta^{-\frac{j_L}2 +1}\nabla u((-\ensuremath{\ensuremath{\mathbf{a}}repsilon},0)+\delta \boldsymbol\theta(t)) \cdot x \to -\ensuremath{\ensuremath{\mathbf{a}}repsilon} \frac{j_L}{2} \beta_L \left( \cos\Big(\frac{j_L}{2} t\Big) \cos t + \sin\Big(\frac{j_L}{2} t\Big) \sin t\right) \] in $C^0([0,\pi])$ as $\delta\to0$. Thus, by the Dominated Convergence Theorem, we have \begin{align*} & - \int_{S_\delta^+(-\ensuremath{\ensuremath{\mathbf{a}}repsilon},0)} \frac{\partial u}{\partial \boldsymbol n}(x\cdot\nabla u)\,ds\\ &=\delta\!\int_0^\pi \!\!\!\left(\nabla u((-\ensuremath{\ensuremath{\mathbf{a}}repsilon},0)+\delta\boldsymbol \theta(t))\!\cdot\!((-\ensuremath{\ensuremath{\mathbf{a}}repsilon},0)+\delta\boldsymbol \theta(t))\right) \left(\nabla u((-\ensuremath{\ensuremath{\mathbf{a}}repsilon},0)+\delta\boldsymbol \theta(t))\!\cdot\!\boldsymbol\theta(t)\right)dt\\ &\to \begin{cases} 0,&\text{if }j_L>1,\\ -\frac{\ensuremath{\ensuremath{\mathbf{a}}repsilon} }{4}(\beta_L)^2 \int_0^\pi \left( \cos^2\big(\frac{t}{2} \big) \cos t + \cos\big(\frac{t}{2} \big) \sin\big(\frac{t}{2} \big)\sin t \right)dt ,&\text{if }j_L =1, \end{cases}\\ &= \begin{cases} 0,&\text{if }j_L >1,\\ -\frac{\ensuremath{\ensuremath{\mathbf{a}}repsilon} }{4}\beta_L^2 \int_0^\pi \cos^2\big(\frac{t}{2} \big)\,dt ,&\text{if }j_L =1, \end{cases}\\ &= \begin{cases} 0, &\text{if }j_L >1,\\ -\frac{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}{8} \beta_L^2 \pi, &\text{if }j_L =1, \end{cases} \end{align*} as $\delta\to0$. One can follow the same argument to compute the contribution coming from $S_\delta^+ (\ensuremath{\ensuremath{\mathbf{a}}repsilon},0)$. Putting together the two contributions we obtain the thesis. \end{proof} This turns out to be sufficient to prove the following: \begin{lem}\label{l:limitatezza_N_per_blowup} For any $n\in\{1,\ldots,N\}$, $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\in(0,R_0)$, and $r,R$ such that $\ensuremath{\ensuremath{\mathbf{a}}repsilon} <r<R\leq R_0$ we have that \[ \mathcal{N}(u_n^\ensuremath{\ensuremath{\mathbf{a}}repsilon},r,\lambda_n(\ensuremath{\ensuremath{\mathbf{a}}repsilon})) + 1 \leq \left(\mathcal{N}(u_n^\ensuremath{\ensuremath{\mathbf{a}}repsilon},R,\lambda_n(\ensuremath{\ensuremath{\mathbf{a}}repsilon})) + 1\right) e^{2\lambda_N R^2}. \] In particular, for every $\delta \in (0, 1)$ there exists $r_\delta \!\in( 0,R_0)$ such that, for any $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\in(0,r_\delta)$ and $r\in(\ensuremath{\ensuremath{\mathbf{a}}repsilon},r_\delta)$, $\mathcal{N}(u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon}, r, \lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})) \leq k +\delta$, $k$ being as in \eqref{eq:orderk}. \end{lem} \begin{proof} Once the negative sign of $M(\ensuremath{\ensuremath{\mathbf{a}}repsilon},u,\lambda)$ is established (Lemma \ref{l:segnoM}) the proof proceeds as in \cite[Section 5]{abatangelo2015sharp}. \end{proof} Lemma \ref{l:limitatezza_N_per_blowup} is the key point for a priori estimates on energy of the blow up sequence in half-disks. These estimates are in turn fundamental to deduce estimates on the difference of eigenvalues, as it appears in the following subsection. \subsection{Estimates on the difference of eigenvalues}\label{sec:C3} Firstly, we are going to estimate the Rayleigh quotient for $\lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})$. Let $R> 1$. With $R_0$ as in the previous section, for every $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\in (0,\ensuremath{\ensuremath{\mathbf{a}}repsilon}_0)$ such that $R\ensuremath{\ensuremath{\mathbf{a}}repsilon}< R_0$ we define the function \begin{equation*} v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} = \begin{cases} v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int}, &\text{in } D_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+, \\[3pt] u_N, &\text{in } \Omega \setminus D_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+, \end{cases} \end{equation*} where $v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int}$ is the unique solution to \begin{equation}\label{eq:eqvint} \begin{cases} -\Delta v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int} = 0, &\text{in } D_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+, \\[3pt] v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int} = u_N, &\text{on } S_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+, \\[3pt] v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int} = 0, &\text{on } \Gamma_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\setminus\Gamma_\ensuremath{\ensuremath{\mathbf{a}}repsilon}, \\[3pt] \frac{\partial v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int}}{\partial \nu}=0, &\text{on } \Gamma_\ensuremath{\ensuremath{\mathbf{a}}repsilon}, \end{cases} \end{equation} i.e., by the Dirichlet principle, the unique solution to the minimization problem \begin{equation}\label{eq:19-1} \int_{D_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+} |\nabla v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int}|^2 dx= \min \left\{{\textstyle{ \int_{D_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+}}} |\nabla v|^2: v\in H^1(D_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+),\, v= u_N \text{ on }S_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+, \, v=0 \text{ on }\Gamma_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\setminus \Gamma_\ensuremath{\ensuremath{\mathbf{a}}repsilon}\right\}. \end{equation} In order to handle the denominator of the Rayleigh quotient we proceed with a Gram-Schmidt process. Since we are taking into account $u_1,\ldots,u_{N-1}$ as the first $N-1$ test functions for the Rayleigh quotient, which are already orthonormalized in $L^2(\Omega)$, we define \[ \tilde u_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}= \frac{v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} - \sum_{j=1}^{N-1} \left(\int_\Omega v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\,u_j\right) u_j} {\nor{v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} - \sum_{j=1}^{N-1} \left(\int_\Omega v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\,u_j\right) u_j}}_{L^2(\Omega)} . \] Using the Dirichlet Principle and the asymptotics \eqref{eq:orderk} one can easily prove the following energy estimates for $v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int}$ in small disks. \begin{lem}\label{l:estimatesv} There exists a constant $C>0$ (independent of $\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ and $R$) such that, for every $R>1$ and $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\in(0,\ensuremath{\ensuremath{\mathbf{a}}repsilon}_0)$ such that $R\ensuremath{\ensuremath{\mathbf{a}}repsilon}< R_0$, the following estimates hold: \begin{align} \int_{D_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+} |\nabla v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int}|^2dx \leq C(R\ensuremath{\ensuremath{\mathbf{a}}repsilon})^{2k},\label{eq:energyvint}\\ \int_{S_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+} | v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int}|^2ds\leq C(R\ensuremath{\ensuremath{\mathbf{a}}repsilon})^{2k+1},\label{eq:tracevint}\\ \int_{D_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+} | v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int}|^2 dx\leq C(R\ensuremath{\ensuremath{\mathbf{a}}repsilon})^{2k+2}.\label{eq:L2vint} \end{align} \end{lem} To our aim, for every $R>1$ we define $v_R$ as the unique solution to the minimization problem \begin{equation*} \int_{D_R^+} |\nabla v_R|^2dx = \min \left\{ \int_{D_R^+} |\nabla v|^2dx: v\in H^1(D_R^+),\ v= \psi_k \text{ on }S_R^+, \ v=0 \text{ on }\Gamma_R\setminus \Gamma_1\right\}. \end{equation*} The function $v_R$ is the unique weak solution to \begin{equation}\label{eq:vR} \begin{cases} -\Delta v_R =0, &\text{in }D_R^+,\\ v_R= \psi_k, &\text{ on }S_R^+,\\ v_R=0, &\text{ on }\Gamma_R\setminus \Gamma_1,\\ \frac{\partial v_{R}}{\partial \nu}=0, &\text{on } \Gamma_1. \end{cases} \end{equation} As well, we introduce the following blow-up functions \begin{equation}\label{eq:25} U_\ensuremath{\ensuremath{\mathbf{a}}repsilon}(x):= \frac{u_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon} x)}{\ensuremath{\ensuremath{\mathbf{a}}repsilon}^k}, \quad V_\ensuremath{\ensuremath{\mathbf{a}}repsilon}^R(x):= \frac{v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int}(\ensuremath{\ensuremath{\mathbf{a}}repsilon} x)}{\ensuremath{\ensuremath{\mathbf{a}}repsilon}^k}. \end{equation} Combining \eqref{eq:orderk} with the Dirichlet Principle, we can establish the following convergences \begin{align} U_\ensuremath{\ensuremath{\mathbf{a}}repsilon} \to \beta\psi_k \text{ as }\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0 \text{ in }H^1(D_R^+)\text{ for every $R>1$};\label{eq:convergence1}\\ V_\ensuremath{\ensuremath{\mathbf{a}}repsilon}^R \to \beta v_R \text{ for $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0$ and for any $R>1$};\label{eq:convergence2}\\ v_R\to\Phi_k \text{ in $H^{1}(D_r^+)$ as $R\to+\infty$ for any $r>1$.}\label{eq:convergence3} \end{align} \begin{prop}\label{p:stimafacile} For any $R>1$ and $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\in(0,\ensuremath{\ensuremath{\mathbf{a}}repsilon}_0)$ such that $R\ensuremath{\ensuremath{\mathbf{a}}repsilon}<R_0$, we have that \[ \frac{\lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon}) - \lambda_N}{\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{2k}} \leq f_R(\ensuremath{\ensuremath{\mathbf{a}}repsilon}) \] where \[ \lim_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to 0 } f_R(\ensuremath{\ensuremath{\mathbf{a}}repsilon}) =\beta^2 \int_{S_R^+} \psi_k \bigg(\frac{\partial v_R}{\partial \nu} - \frac{\partial \psi_k}{\partial\nu} \bigg)\,ds \] with $\psi_k$ defined in \eqref{eq:psi_k} and $v_R$ in \eqref{eq:vR}. \end{prop} \begin{proof} We note that \begin{align} \notag&\bigg\|v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} - \sum_{j=1}^{N-1} \left(\int_\Omega v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\,u_j\,dx\right) u_j\bigg\|_{L^2(\Omega)}^2= \nor{v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}}_{L^2(\Omega)}^2 - \sum_{j=1}^{N-1} \bigg(\int_\Omega v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\,u_j\,dx\bigg)^{\!\!2}\\ \notag&= 1 - \int_{D_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+} u_N^2\,dx + \int_{D^+_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}} | v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int}|^2\,dx - \sum_{j=1}^{N-1} \bigg(\int_\Omega v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\,u_j\,dx\bigg)^{\!\!2}\\ &= 1 + O(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{2k+2})\quad\text{as }\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to 0 \label{eq:L2gramschmidt} \end{align} in view of \eqref{eq:orderk} and \eqref{eq:L2vint} and since, for all $j<N$, \begin{equation}\label{eq:L2mixedGramSchmidt} \int_\Omega v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\,u_j\,dx = -\int_{D_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+} u_N\,u_j\,dx + \int_{D_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+} v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int}\,u_j\,dx = O(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{k+2})\text{ as }\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0. \end{equation} The functions $u_1,\ldots,u_{N-1},\tilde u_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}$ are linearly independent (since they are nontrivial and mutually orthogonal) and belong to $\mathcal Q_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$; if we plug a linear combination of them into the Rayleigh quotient \eqref{eqMinMaxDND} we obtain \begin{align*} &\lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})-\lambda_N \leq \left(\max_ {\substack{(\alpha_1,\dots, \alpha_{N})\in \ensuremath{\mathbb{R}}^{N}\\ \sum_{j=1}^{N}|\alpha_j|^2 =1}} \int_\Omega \bigg| \nabla \bigg( \sum_{j=1}^{N-1}\alpha_j u_j + \alpha_N \tilde u_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} \bigg) \bigg|^2 \right)-\lambda_N\\ &= \max_{\substack{(\alpha_1,\dots, \alpha_{N})\in \ensuremath{\mathbb{R}}^{N}\\ \sum_{j=1}^{N}|\alpha_j|^2 =1}} \!\!\left[ \sum_{j=1}^{N-1}\alpha_j^2\lambda_j + \alpha_N^2 \int_\Omega\!| \nabla \tilde u_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} |^2 + 2 \sum_{j=1}^{N-1}{\alpha_j} \alpha_N \int_\Omega\!\nabla u_j\! \cdot\! \nabla \tilde u_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\!-\!\lambda_N\!\right]\\ &= \max_{\substack{(\alpha_1,\dots, \alpha_{N})\in \ensuremath{\mathbb{R}}^{N}\\ \sum_{j=1}^{N}|\alpha_j|^2 =1}} \Bigg[\sum_{j=1}^{N-1}\alpha_j^2 (\lambda_j - \lambda_N) + \alpha_N^2 \left( \int_\Omega | \nabla \tilde u_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} |^2 -\lambda_N\right) + 2 \sum_{j=1}^{N-1}{\alpha_j} \alpha_N \int_\Omega \nabla u_j \cdot \nabla \tilde u_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} \Bigg]. \end{align*} In view of \eqref{eq:energyvint} and \eqref{eq:orderk} we have that \begin{equation}\label{eq:9} \int_\Omega \nabla v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\cdot \nabla u_j = -\int_{D_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+} \nabla u_N\cdot \nabla u_j\,dx + \int_{D_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+} \nabla v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int}\cdot\nabla u_j\,dx= O(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{k+1}) . \end{equation} Moreover, from convergences \eqref{eq:convergence1}--\eqref{eq:convergence3} we have \begin{align} \notag &\int_\Omega |\nabla v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}|^2 dx-\lambda_N \notag = -\int_{D_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+} |\nabla u_N|^2 dx + \int_{D_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+} |\nabla v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int}|^2dx \\ \notag&= \ensuremath{\ensuremath{\mathbf{a}}repsilon}^{2k}\bigg(-\int_{D_{R}^+} |\nabla U_\ensuremath{\ensuremath{\mathbf{a}}repsilon}|^2 dx + \int_{D_{R}^+} |\nabla V_\ensuremath{\ensuremath{\mathbf{a}}repsilon}^R|^2dx \bigg)= \ensuremath{\ensuremath{\mathbf{a}}repsilon}^{2k}\beta^2\bigg(-\int_{D_{R}^+} |\nabla \psi_k|^2 dx + \int_{D_{R}^+} |\nabla v_R|^2dx+o(1)\bigg)\\ \label{eq:10}& = \ensuremath{\ensuremath{\mathbf{a}}repsilon}^{2k} \beta^2\left( \int_{S_R^+} \psi_k \bigg(\frac{\partial v_R}{\partial \nu} - \frac{\partial \psi_k}{\partial\nu} \bigg) ds + o(1)\right) \quad \text{as }\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0. \end{align} Collecting \eqref{eq:L2gramschmidt}, \eqref{eq:L2mixedGramSchmidt}, \eqref{eq:9}, and \eqref{eq:10}, we obtain that \begin{align*} & \int_\Omega | \nabla \tilde u_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} |^2 -\lambda_N \\ &= \frac{\int_\Omega |\nabla v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}|^2 + \sum_{j=1}\limits^{N-1} \!\left(\int_\Omega v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\,u_j\right)^2 \!\lambda_j - 2\sum\limits_{j=1}^{N-1} \left(\int_\Omega v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\,u_j\right) \int_\Omega \nabla v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\cdot \nabla u_j} {\Big\|v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} - \sum_{j=1}^{N-1} \left(\int_\Omega v_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\,u_j\right) u_j\Big\|_{L^2(\Omega)}^2} - \lambda_N\\ &= \ensuremath{\ensuremath{\mathbf{a}}repsilon}^{2k} \beta^2\left( \int_{S_R^+} \psi_k \bigg(\frac{\partial v_R}{\partial \nu} - \frac{\partial \psi_k}{\partial\nu} \bigg) ds + o(1)\right) \quad \text{as }\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0. \end{align*} From \eqref{eq:L2gramschmidt}, \eqref{eq:L2mixedGramSchmidt}, and \eqref{eq:9} it follows that, for every $j<N$, \begin{align*} \int_\Omega \nabla u_j \cdot \nabla \tilde u_{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} =O(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{k+1}) \quad \text{as }\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0. \end{align*} Hence, the assumptions in \cite[Lemma 6.1]{abatangelo2015sharp} are fulfilled by $\mu(\ensuremath{\ensuremath{\mathbf{a}}repsilon})= \int_{S_R^+} \psi_k \big(\frac{\partial v_R}{\partial \nu} - \frac{\partial \psi_k}{\partial\nu} \big) ds + o(1)$, $\alpha=1$, $\sigma(\ensuremath{\ensuremath{\mathbf{a}}repsilon})= \beta^2\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{2k}$ and $M=2k-1$ and the conclusion follows. \end{proof} In the sequel we denote \begin{equation}\label{eq:11} \kappa_R := \int_{S_R^+} \psi_k \bigg(\frac{\partial v_R}{\partial \nu} - \frac{\partial \psi_k}{\partial\nu} \bigg)\,ds. \end{equation} \begin{lem}\label{l:limkappaR} Let $\kappa_R$ be defined in \eqref{eq:11}. Then $\lim_{R\to+\infty} \kappa_R = 2\,\mathfrak{m}_k$, with $\mathfrak{m}_k$ as in \eqref{eq:Ik}. \end{lem} \begin{proof} From \eqref{eq:vR} it follows that the function $\sigma_R$ defined as \begin{equation}\label{eq:upsilon_R} \sigma_R(r):= \int_0^{\pi} v_R(r(\cos t,\sin t)) \sin(kt) \,dt,\quad r\in[1,R], \end{equation} satisfies the equation $(r^{1+2k}(r^{-k}\sigma_R)')'=0$ and hence, for some $c_{R}\in\ensuremath{\mathbb{R}}$, $\big( r^{-k}\sigma_R(r) \big)'= \frac{c_{R}}{r^{1+2k}}$ in $(1,R)$. Integrating the previous equation over $(1,r)$ we obtain \begin{equation}\label{eq:125} r^{-k} \sigma_R(r) -\sigma_R(1) = \frac{c_{R}}{2k}\bigg(1-\frac1{r^{2k}}\bigg), \quad \text{for all }r\in(1,R]. \end{equation} Since \eqref{eq:vR} implies that $\sigma_R(R)=\frac12\pi R^{k}$, from \eqref{eq:125} we deduce that \[ \frac{c_R}{2k}=\frac{R^{2k}}{R^{2k}-1}\bigg(\frac\pi2-\sigma_R(1)\bigg) \] and then \begin{align*} \sigma_R(r) =r^{k} \frac{\tfrac\pi2 R^{2k}-\sigma_R(1)}{R^{2k}-1}-r^{-k}\frac{R^{2k}}{R^{2k}-1}(\tfrac\pi2-\sigma_R(1)), \end{align*} for all $r\in(1,R]$. If we differentiate the previous identity and evaluate it in $r=R$, we obtain \begin{equation}\label{eq:127} \sigma_R'(R)=k\,\frac{R^{k-1}}{R^{2k}-1}\Big(\tfrac\pi2(R^{2k}+1)-2\sigma_R(1)\Big). \end{equation} On the other hand, differentiating \eqref{eq:upsilon_R}, we obtain that \begin{equation}\label{eq:128} \sigma_R'(r)= r^{-1-k} \int_{S_r^+} \nabla v_R\cdot \nu\, \psi_k\,ds \end{equation} and then from \eqref{eq:127} and \eqref{eq:128} \begin{equation}\label{eq:12} \sigma_R'(R)= R^{-1-k} \int_{S_R^+} \psi_k\, \frac{\partial v_R}{\partial\nu}\,ds = k\frac{R^{k-1}}{R^{2k}-1} \Big(\tfrac\pi2 (R^{2k}+1) - 2\sigma_R(1) \Big) . \end{equation} As well, from the definition of $\psi_k$ \eqref{eq:psi_k} we have that \begin{equation}\label{eq:13} \int_{S_R^+} \psi_k\,\frac{\partial \psi_k}{\partial\nu}\,ds = \frac\pi2 k\,R^{2k}. \end{equation} Combining \eqref{eq:12} and \eqref{eq:13} we obtain that \[ \kappa_R= \frac{2k\, R^{2k}}{R^{2k}-1} \bigg(\frac\pi2-\sigma_R(1)\bigg) = \frac{2k\, R^{2k}}{R^{2k}-1} \bigg(\frac\pi2-\int_0^{\pi} v_R(\cos t,\sin t) \sin(kt) \,dt\bigg) \] and hence, via \eqref{eq:convergence3}, \[ \lim_{R\to+\infty} \kappa_R = 2k \bigg(\frac\pi2-\int_0^{\pi} \Phi_k(\cos t,\sin t) \sin(kt) \,dt\bigg). \] By Lemma \ref{l:xi1}, the proof is concluded. \end{proof} We are now going to estimate the Rayleigh quotient for $\lambda_N$. Let $R\geq 1$. Choosing $R_0$ as in the previous subsection, for every $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\in (0,\ensuremath{\ensuremath{\mathbf{a}}repsilon}_0)$ such that $R\ensuremath{\ensuremath{\mathbf{a}}repsilon}< R_0$ and for any $j=1,\ldots,N$ we define the function \begin{equation}\label{eq:24} w_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} = \begin{cases} w_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int}, &\text{in } D_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+, \\[3pt] w_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{ext}, &\text{in } \Omega \setminus D_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+, \end{cases} \end{equation} where, letting $u_j^\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ be as in \eqref{eqDNDeps}--\eqref{eq:14}, \[ w_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{ext} = u_j^\ensuremath{\ensuremath{\mathbf{a}}repsilon} \quad \text{ in } \Omega \setminus D_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+, \] and $w_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int}$ is the unique solution to \begin{equation}\label{eq:eqwint} \begin{cases} -\Delta w_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int} = 0, &\text{in } D_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+, \\[3pt] w_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int} = u_j^\ensuremath{\ensuremath{\mathbf{a}}repsilon}, &\text{on } S_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+, \\[3pt] w_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int} = 0, &\text{on } \Gamma_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}. \end{cases} \end{equation} By the Dirichlet principle, we have that $w_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int}$ is the unique solution to the minimization problem \begin{equation}\label{eq:19} \int_{D_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+} |\nabla w_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int}|^2 dx= \min \left\{{\textstyle{ \int_{D_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+}}} |\nabla v|^2dx: v\in H^1(D_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+),\, v= u_j^\ensuremath{\ensuremath{\mathbf{a}}repsilon}\text{ on }S_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+, \, v=0 \text{ on }\Gamma_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\right\}. \end{equation} In order to handle the denominator we proceed with a Gram-Schmidt process. We then define \begin{equation}\label{eq:hatuj} \hat u_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}:= \dfrac{\tilde w_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}}{\|\tilde w_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\|_{L^2(\Omega)}}, \quad j=1,\ldots, N, \end{equation} where $\tilde w_{N,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} := w_{N,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}$ and \begin{equation*} \tilde w_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} := w_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} - \sum_{\ell= j+1}^{N}\dfrac{\int_\Omega w_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} {\tilde w_{\ell,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}}\,dx}{\|\tilde w_{\ell,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\|_{L^2(\Omega)}^2} \tilde w_{\ell,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} \quad \text{for }j=1,\ldots, N-1. \end{equation*} We can derive the following estimate of the energy of eigenfunctions $u_j^\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ in half-disks of radius of order $\ensuremath{\ensuremath{\mathbf{a}}repsilon}$. \begin{lem}\label{l:sec_con} For $1\leq j\leq N$ and $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\in(0,\ensuremath{\ensuremath{\mathbf{a}}repsilon}_0)$, let $u_j^\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ be as in \eqref{eqDNDeps}--\eqref{eq:14}. For every $\delta\in(0,1/2)$, there exists $\mu_\delta>1$ such that, for all $R\geq \mu_\delta$, $\ensuremath{\ensuremath{\mathbf{a}}repsilon}<\frac{R_0}R$, and $1\leq j\leq N$, \begin{align} \label{eq:34}&\int_{S^+_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}}|u_j^\ensuremath{\ensuremath{\mathbf{a}}repsilon}|^2\,ds\leq C (R\ensuremath{\ensuremath{\mathbf{a}}repsilon})^{3-2\delta},\\ \label{eq:35}&\int_{D^+_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}}|\nabla u_j^\ensuremath{\ensuremath{\mathbf{a}}repsilon}|^2\,dx\leq C (R\ensuremath{\ensuremath{\mathbf{a}}repsilon})^{2-2\delta},\\ \label{eq:36}&\int_{D^+_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}}|u_j^\ensuremath{\ensuremath{\mathbf{a}}repsilon}|^2\,dx\leq C (R\ensuremath{\ensuremath{\mathbf{a}}repsilon})^{4-2\delta},\\ \label{eq:L2tracewjint}&\int_{S^+_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}}|w_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int}|^2\,ds\leq C (R\ensuremath{\ensuremath{\mathbf{a}}repsilon})^{3-2\delta},\\ \label{eq:L2gradwjint}&\int_{D^+_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}}|\nabla w_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int}|^2\,dx\leq C (R\ensuremath{\ensuremath{\mathbf{a}}repsilon})^{2-2\delta},\\ \label{eq:L2wjint}&\int_{D^+_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}}|w_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int}|^2\,dx\leq C (R\ensuremath{\ensuremath{\mathbf{a}}repsilon})^{4-2\delta}, \end{align} for some constant $C>0$ depending only on $R_0$ and $\lambda_N$. \end{lem} \begin{proof} From \eqref{eq:equneps} and \eqref{eq:bound_l_neps} we know that $\{u_j^\ensuremath{\ensuremath{\mathbf{a}}repsilon}\}_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}\in(0,\ensuremath{\ensuremath{\mathbf{a}}repsilon}_0)}$ is bounded in $H^1$; hence, from of property (ii) at page \pageref{prop--ii} we deduce that, for $\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ sufficiently small, $N(u_j^\ensuremath{\ensuremath{\mathbf{a}}repsilon}, R_0,\lambda_j(\ensuremath{\ensuremath{\mathbf{a}}repsilon}))$ is bounded uniformly with respect to $\ensuremath{\ensuremath{\mathbf{a}}repsilon}$. Estimates \eqref{eq:34}--\eqref{eq:36} then follow from Lemma \ref{l:limitatezza_N_per_blowup}; we refer to \cite[Lemma 5.8]{abatangelo2015sharp} for a detailed proof in a similar problem. Estimates \eqref{eq:L2tracewjint}--\eqref{eq:L2wjint} can be proved combining estimates \eqref{eq:34}--\eqref{eq:36} with the Dirichlet principle (see \cite[Lemma 6.2]{abatangelo2015sharp} for details in a similar problem). \end{proof} For $\delta\in(0,1/2)$ fixed, let $\mu_\delta$ be as in Lemma \ref{l:sec_con}. For $\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ sufficiently small in such a way that $\mu_\delta\ensuremath{\ensuremath{\mathbf{a}}repsilon}<R_0$, we introduce the following blow-up functions: \begin{equation}\label{eq:blowup} \hat U_\ensuremath{\ensuremath{\mathbf{a}}repsilon}(x):= \frac{u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon}(\ensuremath{\ensuremath{\mathbf{a}}repsilon} x)}{\sqrt{H(u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon},\mu_\delta\ensuremath{\ensuremath{\mathbf{a}}repsilon})}}, \qquad W_\ensuremath{\ensuremath{\mathbf{a}}repsilon}^R(x):= \frac{w_{N,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int}(\ensuremath{\ensuremath{\mathbf{a}}repsilon} x)}{\sqrt{H(u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon},\mu_\delta\ensuremath{\ensuremath{\mathbf{a}}repsilon})}}. \end{equation} We notice that, by scaling, \begin{equation}\label{eq:26} \frac{1}{\mu_\delta}\int_{S_{\mu_\delta}^+}|\hat U_\ensuremath{\ensuremath{\mathbf{a}}repsilon}|^2\,ds=1. \end{equation} \begin{thm}\label{t:stime_blowup} Let $\delta\in(0,1/2)$ be fixed and let $r_\delta>0$ be as in Lemma \ref{l:limitatezza_N_per_blowup}. For all $R\geq \mu_\delta$, \begin{equation}\label{eq:blowupbounded} \text{the family of functions }\big\{ \hat U_\ensuremath{\ensuremath{\mathbf{a}}repsilon}(x):\ R\ensuremath{\ensuremath{\mathbf{a}}repsilon}<r_\delta \big\} \text{ is bounded in $H^{1}(D_R^+)$}. \end{equation} In particular, for all $R\geq \mu_\delta$, \begin{align} &\int_{D_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+} \abs{\nabla u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^2dx=O(H(u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon},\mu_\delta\ensuremath{\ensuremath{\mathbf{a}}repsilon})),\quad\text{as }\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0^+,\label{eq:L2graduNint}\\ &\int_{S_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+} |u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon}|^2ds=O(\ensuremath{\ensuremath{\mathbf{a}}repsilon} H(u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon},\mu_\delta\ensuremath{\ensuremath{\mathbf{a}}repsilon})),\quad\text{as }\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0^+,\\ &\int_{D_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+} |u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon}|^2dx=O(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^2 H(u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon},\mu_\delta\ensuremath{\ensuremath{\mathbf{a}}repsilon})),\quad\text{as }\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0^+. \label{eq:L2uNint} \end{align} \end{thm} \begin{proof}We omit the proof which can be derived from the monotonicity result given in Lemma \ref{l:limitatezza_N_per_blowup} following the same argument as Lemma \ref{l:sec_con}; for details in an analogous problem we refer to \cite[Theorem 5.9]{abatangelo2015sharp}. \end{proof} By the Dirichlet principle and Theorem \ref{t:stime_blowup} we have also the following estimates. \begin{lem}\label{l:stimeWepsR} For all $R>\max\{2,\mu_\delta\}$, \begin{equation}\label{eq:WepsRbounded} \text{the family of functions }\big\{W_\ensuremath{\ensuremath{\mathbf{a}}repsilon}^R: R\ensuremath{\ensuremath{\mathbf{a}}repsilon}<{r_\delta}\big\} \text{ is bounded in $H^{1}(D_R^+)$}. \end{equation} In particular, for all $R>\max\{2,\mu_\delta\}$, \begin{align} &\int_{D_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+} \abs{\nabla w_{N,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int}}^2dx=O(H(u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon},\mu_\delta\ensuremath{\ensuremath{\mathbf{a}}repsilon})),\quad\text{as }\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0^+,\label{eq:L2gradwNint}\\ \label{eq:21}&\int_{S_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+} |w_{N,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int}|^2dx=O(\ensuremath{\ensuremath{\mathbf{a}}repsilon} H(u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon},\mu_\delta\ensuremath{\ensuremath{\mathbf{a}}repsilon})),\quad\text{as }\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0^+,\\ \label{eq:22}&\int_{D_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+} |w_{N,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int}|^2dx=O(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^2 H(u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon},\mu_\delta\ensuremath{\ensuremath{\mathbf{a}}repsilon})),\quad\text{as }\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0^+. \end{align} \end{lem} We are now in position to prove a sharp upper bound for the eigenvalue variation $\lambda_N-\lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})$. \begin{prop}\label{p:stimadifficile} There exists $\tilde R>2$ such that, for all $R>\tilde R$ and $\ensuremath{\ensuremath{\mathbf{a}}repsilon}>0$ with $R\ensuremath{\ensuremath{\mathbf{a}}repsilon}<{R_0}$, \[ \frac{\lambda_N-\lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})}{H(u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon}, \mu_\delta\ensuremath{\ensuremath{\mathbf{a}}repsilon})}\leq g_R(\ensuremath{\ensuremath{\mathbf{a}}repsilon}) \] where \begin{align} g_R(\ensuremath{\ensuremath{\mathbf{a}}repsilon})= \int_{D_R^+}|\nabla W_\ensuremath{\ensuremath{\mathbf{a}}repsilon}^R|^2\,dx-\int_{D_R^+}|\nabla \hat U_\ensuremath{\ensuremath{\mathbf{a}}repsilon}|^2\,dx+o(1) \quad\text{and}\quad g_R(\ensuremath{\ensuremath{\mathbf{a}}repsilon})=O(1) \quad\text{as }\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to 0^+, \label{eq:gR} \end{align} with $\hat U_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ and $W_\ensuremath{\ensuremath{\mathbf{a}}repsilon}^R$ defined in \eqref{eq:blowup}. \end{prop} \begin{proof} As already mentioned, we take into account the Courant--Fisher characterization for $\lambda_N$ recalled in \eqref{eqMinMaxD} and consider the $N$-dimensional space spanned by the functions $\{\hat u_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon},}\}_{j=1}^N$ defined in \eqref{eq:hatuj}. Before proceeding, we note that \begin{align} & \| \tilde w_{N,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} \|_{L^2(\Omega)}^2 = 1 + O\big(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^2 H(u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon},\mu_\delta\ensuremath{\ensuremath{\mathbf{a}}repsilon})\big), \notag \\ & d_{N,j}^{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}:= \dfrac{\int_\Omega w_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} {\tilde w_{N,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}}\,dx}{\|\tilde w_{N,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\|_{L^2(\Omega)}^2} = O\big(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{3-\delta} \sqrt{H(u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon},\mu_\delta\ensuremath{\ensuremath{\mathbf{a}}repsilon})} \big), \quad\text{for all }j< N, \label{eq:stimeNterm} \end{align} as $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0$, thanks to \eqref{eq:L2uNint}, \eqref{eq:22}, \eqref{eq:36} and \eqref{eq:L2wjint}. On the other hand, \begin{align} & \| \tilde w_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} \|_{L^2(\Omega)}^2 = 1 + O(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{4-2\delta}), \notag \\ &d_{\ell,j}^{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}:= \dfrac{\int_\Omega w_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} {\tilde w_{\ell,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}}\,dx}{\|\tilde w_{\ell,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\|_{L^2(\Omega)}^2} = O(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{4-2\delta}), \quad\text{for all }j\neq \ell, \label{eq:stimejterm} \end{align} as $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to 0$ and for any $j=1,\ldots,N-1$ thanks to \eqref{eq:L2wjint} and \eqref{eq:36}. If we plug a linear combination of $\{\hat u_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon},}\}_{j=1}^N$ into the Rayleigh quotient we obtain that \begin{equation*} \lambda_N \leq \max_ {\substack{(\alpha_1,\dots, \alpha_{N})\in \ensuremath{\mathbb{R}}^{N}\\ \sum_{j=1}^{N}|\alpha_j|^2 =1}} \int_{\Omega} \bigg|\nabla \bigg(\sum_{j=1}^{N}\alpha_j \hat u_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\bigg)\bigg|^2 dx, \end{equation*} and then \begin{equation}\label{eq:107} \lambda_N-\lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})\leq \max_ {\substack{(\alpha_1,\dots, \alpha_N)\in \ensuremath{\mathbb{R}}^N\\ \sum_{j=1}^{N}|\alpha_j|^2 =1}}\sum_{j,n=1}^N m_{j,n}^{\ensuremath{\ensuremath{\mathbf{a}}repsilon},R}\alpha_j {\alpha_n}, \end{equation} where \[ m_{j,n}^{\ensuremath{\ensuremath{\mathbf{a}}repsilon},R}= \int_{\Omega} \nabla \hat u_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\cdot \nabla \hat u_{n,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\, dx -\lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon}) \delta_{jn}, \] with $\delta_{jn}=1$ if $j=n$ and $\delta_{jn}=0$ if $j\neq n$. From \eqref{eq:derH} and Lemma \ref{l:limitatezza_N_per_blowup}, if $R\geq \mu_\delta$ and $R\ensuremath{\ensuremath{\mathbf{a}}repsilon}<{r_\delta}$ we have \begin{equation} \frac1 {H(u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon},r)}\dfrac{d}{dr} H(u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon},r) = \dfrac2r \mathcal N(u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon}, r,\lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon}))\leq \frac 2r({k}+\delta)\quad\text{for all }\mu_\delta\ensuremath{\ensuremath{\mathbf{a}}repsilon}\leq r\leq r_\delta,\label{eq:23} \end{equation} Integration of \eqref{eq:23} over the interval $(\mu_\delta\ensuremath{\ensuremath{\mathbf{a}}repsilon}, r_\delta)$ and property (ii) at page \pageref{prop--ii} yield \begin{equation}\label{eq:stima_sotto_radiceH} H(u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon}, \mu_\delta\ensuremath{\ensuremath{\mathbf{a}}repsilon}) \geq C_\delta \ensuremath{\ensuremath{\mathbf{a}}repsilon}^{2k + 2\delta},\quad\text{if }\mu_\delta\ensuremath{\ensuremath{\mathbf{a}}repsilon}<{r_\delta}, \end{equation} for some $C_\delta>0$ independent of $\ensuremath{\ensuremath{\mathbf{a}}repsilon}$. Estimate \eqref{eq:34} implies that \begin{equation}\label{eq:stima_sopra_radiceH} H(u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon}, \mu_\delta\ensuremath{\ensuremath{\mathbf{a}}repsilon})=O(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{2-2\delta})\quad\text{ as }\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0. \end{equation} From \eqref{eq:stimeNterm}, \eqref{eq:blowup}, Theorem \ref{t:stime_blowup}, and Lemma \ref{l:stimeWepsR} we deduce that \begin{align} m_{N,N}^{\ensuremath{\ensuremath{\mathbf{a}}repsilon},R} &= \dfrac{\lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon}) (1-\|w_{N,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} \|_{L^2(\Omega)}^2)}{\|w_{N,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} \|_{L^2(\Omega)}^2} + \dfrac{\left( \int_{D_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+} \big| \nabla w_{N,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int}\big|^2 dx - \int_{D_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+} \big| \nabla u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon} \big|^2 dx\right)}{\|w_{N,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} \|_{L^2(\Omega)}^2}\notag \\ \label{eq:mNN} &= H(u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon},\mu_\delta\ensuremath{\ensuremath{\mathbf{a}}repsilon})\bigg(\int_{D_R^+}|\nabla W_\ensuremath{\ensuremath{\mathbf{a}}repsilon}^R|^2\,dx-\int_{D_R^+}|\nabla \hat U_\ensuremath{\ensuremath{\mathbf{a}}repsilon}|^2\,dx+o(1)\bigg), \end{align} as $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0^+$. On the other hand, if $j<N$, by the convergence of the perturbed eigenvalue, \eqref{eq:stimejterm}, \eqref{eq:L2gradwjint}, \eqref{eq:35} we have that \begin{align*} m_{j,j}^{\ensuremath{\ensuremath{\mathbf{a}}repsilon},R}&= -\lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon}) +\dfrac{1}{\|\tilde w_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\|_{L^2(\Omega)}^2} \left( \lambda_j(\ensuremath{\ensuremath{\mathbf{a}}repsilon}) - \int_{D_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+}\!\! \!\abs{\nabla u_j^\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^2dx + \int_{D_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+}\! \!\!\abs{\nabla w_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int}}^2dx \right) \\ &\ + \dfrac{1}{\|\tilde w_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\|_{L^2(\Omega)}^2} \int_{\Omega} \bigg|\nabla \Big(\sum_{\ell>j} d_{\ell,j}^{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} \tilde w_{\ell,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} \Big)\bigg|^2dx\\ & \ -\dfrac{2}{\|\tilde w_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\|_{L^2(\Omega)}^2} \bigg(\int_{\Omega} \nabla w_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} \cdot {\nabla \Big( \sum_{\ell>j} d_{\ell,j}^{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} \tilde w_{\ell,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} \Big)}\,dx\bigg)\\ &=(\lambda_j-\lambda_N)+o(1)\quad\text{as }\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0. \end{align*} From \eqref{eq:stimeNterm}, \eqref{eq:stimejterm}, \eqref{eq:35}, \eqref{eq:L2gradwjint}, \eqref{eq:L2graduNint}, and \eqref{eq:L2gradwNint}, it follows that, for all $j<N$, \begin{align*} \|\tilde w_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\|_{L^2(\Omega)}& \|\tilde w_{N,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\|_{L^2(\Omega)} m_{j,N}^{\ensuremath{\ensuremath{\mathbf{a}}repsilon},R}= \int_{D_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+}\Big(\nabla w_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int}\cdot {\nabla w_{N,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int} }- \nabla u_j^\ensuremath{\ensuremath{\mathbf{a}}repsilon}\cdot {\nabla u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\Big)\,dx \\ &\quad - \int_{\Omega} \nabla \Big(\sum_{\ell>j}d_{\ell,j}^{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} \tilde w_{\ell,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\Big) \cdot {\nabla w_{N,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} }\,dx =O\Big(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{1-\delta} \sqrt{H(u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon},\mu_\delta\ensuremath{\ensuremath{\mathbf{a}}repsilon}})\Big). \end{align*} Hence, by \eqref{eq:stimeNterm} and \eqref{eq:stimejterm}, we have that \[ m_{j,N}^{\ensuremath{\ensuremath{\mathbf{a}}repsilon},R}=O\Big(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{1-\delta} \sqrt{H(u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon},\mu_\delta\ensuremath{\ensuremath{\mathbf{a}}repsilon}})\Big)\quad\text{and}\quad m_{N,j}^{\ensuremath{\ensuremath{\mathbf{a}}repsilon},R}={ m_{j,N}^{\ensuremath{\ensuremath{\mathbf{a}}repsilon},R}}=O\Big(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{1-\delta} \sqrt{H(u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon},\mu_\delta\ensuremath{\ensuremath{\mathbf{a}}repsilon}})\Big) \] as $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to 0^+$. From \eqref{eq:stimejterm}, \eqref{eq:35}, \eqref{eq:L2gradwjint}, we deduce that, for all $j,n<N$ with $j\neq n$, \begin{align*} \|\tilde w_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}&\|_{L^2(\Omega)}\|\tilde w_{n,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\|_{L^2(\Omega)} m_{j,n}^{\ensuremath{\ensuremath{\mathbf{a}}repsilon},R}\\ &=\int_{D_{R\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^+}\Big(\nabla w_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int}\cdot {\nabla w_{n,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}^{int} }- \nabla u_j^\ensuremath{\ensuremath{\mathbf{a}}repsilon}\cdot {\nabla u_n^\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\Big)\,dx\\ &\quad + \int_{\Omega} \nabla \big( \sum_{\ell>j} d_{\ell,j}^{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} \tilde w_{\ell,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} \big) \cdot {\nabla \big( \sum_{h>n} d_{h,n}^{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} \tilde w_{h,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} \big)}\,dx \\ &\quad - \int_{\Omega} \nabla \big( \sum_{\ell>j} d_{\ell,j}^{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} \tilde w_{\ell,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} \big) \cdot {\nabla w_{n,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}} \,dx \\ &\quad - \int_{\Omega} \nabla w_{j,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} \cdot {\nabla \big( \sum_{h>n} d_{h,n}^{R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} \tilde w_{h,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} \big)}\,dx=O(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{2-2\delta}) \quad\text{as }\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0. \end{align*} Hence, in view of \eqref{eq:stimejterm}, \[ m_{j,n}^{\ensuremath{\ensuremath{\mathbf{a}}repsilon},R}=O(\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{2-2\delta}) \quad\text{as }\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0. \] Taking into account \eqref{eq:stima_sotto_radiceH}, we can then apply \cite[Lemma 6.1]{abatangelo2015sharp} with $\sigma(\ensuremath{\ensuremath{\mathbf{a}}repsilon})=H(u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon},\mu_\delta\ensuremath{\ensuremath{\mathbf{a}}repsilon})$, $\mu(\ensuremath{\ensuremath{\mathbf{a}}repsilon})=g_R(\ensuremath{\ensuremath{\mathbf{a}}repsilon})$, $\alpha=1-\delta$ and $M=4k$ in order to deduce \[ \max_ {\substack{(\alpha_1,\dots, \alpha_{N})\in \ensuremath{\mathbb{R}}^{N}\\ \sum_{j=1}^{N}|\alpha_j|^2 =1}}\sum_{j,n=1}^N m_{j,n}^{\ensuremath{\ensuremath{\mathbf{a}}repsilon},R}\alpha_j {\alpha_n} =H(u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon},\mu_\delta\ensuremath{\ensuremath{\mathbf{a}}repsilon})\bigg( \int_{D_R^+}|\nabla W_\ensuremath{\ensuremath{\mathbf{a}}repsilon}^R|^2\,dx-\int_{D_R^+}|\nabla \hat U_\ensuremath{\ensuremath{\mathbf{a}}repsilon}|^2\,dx+o(1)\bigg) \] as $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to 0^+$, which, in view of \eqref{eq:107}, yields $\frac{\lambda_N-\lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})}{H(u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon},\mu_\delta\ensuremath{\ensuremath{\mathbf{a}}repsilon})}\leq g_R(\ensuremath{\ensuremath{\mathbf{a}}repsilon})$ with $g_R$ as in \eqref{eq:gR}. We notice that, from Theorem \ref{t:stime_blowup} and Lemma \ref{l:stimeWepsR}, for all $R>\max\{2,\mu_\delta\}$, $g_R(\ensuremath{\ensuremath{\mathbf{a}}repsilon})=O(1)$ as $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to 0^+$. The proof is now complete. \end{proof} Combining Proposition \ref{p:stimafacile}, Lemma \ref{l:limkappaR} and Proposition \ref{p:stimadifficile} we obtain the following upper/lower estimates for $\lambda_N-\lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})$. \begin{prop}\label{prop:quasi_finito1} There exists a positive constant $C^*>0$ such that \[ -2\beta^2 \mathfrak{m}_k \,\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{2k} (1+o(1)) \leq \lambda_N - \lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon}) \leq C^*\, H(u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon}, \mu_\delta\ensuremath{\ensuremath{\mathbf{a}}repsilon}), \quad \text{as }\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to 0, \] with $\mathfrak{m}_k<0$ as in \eqref{eq:Ik} and \eqref{eq:mathfrak-m}. \end{prop} \subsection{Sharp blow-up analysis and asymptotics} Let us consider the function \begin{align}\label{def_operatore_F} F: \ensuremath{\mathbb{R}} \times H^{1}_{0}(\Omega) &\longrightarrow \ensuremath{\mathbb{R}} \times H^{-1}(\Omega)\\ \notag (\lambda,\ensuremath{\mathbf{a}}rphi) &\longmapsto \Big( {\textstyle{ q(\ensuremath{\mathbf{a}}rphi) -\lambda_N, \ -\Delta \ensuremath{\mathbf{a}}rphi-\lambda \ensuremath{\mathbf{a}}rphi}}\Big), \end{align} where $q$ is defined in \eqref{eqQuad} and $-\Delta \ensuremath{\mathbf{a}}rphi-\lambda \ensuremath{\mathbf{a}}rphi\in H^{-1}(\Omega)$ acts as \[ \sideset{_{H^{-1}(\Omega)}^{}}{}{\mathop{\Big\langle}} -\Delta \ensuremath{\mathbf{a}}rphi-\lambda \ensuremath{\mathbf{a}}rphi , u \Big\rangle_{\!H^{1}_{0}(\Omega)}\!\!= \int_{\Omega}\nabla\ensuremath{\mathbf{a}}rphi\cdot{\nabla u}\,dx -\!\lambda \!\int_{\Omega} \ensuremath{\mathbf{a}}rphi {u} \,dx \] for all $\ensuremath{\mathbf{a}}rphi\in H^{1}_{0}(\Omega)$. We have that $F(\lambda_N,u_N)=(0,0)$, $F$ is Fr\'{e}chet-differentiable at $(\lambda_N,u_N)$ and its Fr\'{e}chet-differential $dF(\lambda_N,u_N)\in \mathcal L\big( \ensuremath{\mathbb{R}} \times H^{1}_{0}(\Omega),\ensuremath{\mathbb{R}} \times H^{-1}(\Omega)\big)$ is invertible. Therefore we can control $|\lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})-\lambda_N|$ and $\|w_{N,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} - u_N \|_{H^{1}_0(\Omega)}\|$ with $\|F(\lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})-w_{N,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\|_{\ensuremath{\mathbb{R}}\times H^{-1}(\Omega)}$. Then the norm $\|F(\lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})-w_{N,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\|_{\ensuremath{\mathbb{R}}\times H^{-1}(\Omega)}$ can be estimated taking advantage of the computations performed in Section \ref{sec:C3}, thus yielding \[ \|w_{N,R,\ensuremath{\ensuremath{\mathbf{a}}repsilon}} - u_N \|_{H^{1}_0(\Omega)}=O\Big(\sqrt{H(u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon},\mu_\delta\ensuremath{\ensuremath{\mathbf{a}}repsilon})}\Big) \] as $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0^+$ for every $R>2$, $\mu_\delta$ being as in Lemma \ref{l:sec_con} for some $\delta\in(0,1/2)$ fixed. As a consequence, for every $R>2$ \begin{equation}\label{eq:41} \int_{\big(\frac{1}{\ensuremath{\ensuremath{\mathbf{a}}repsilon}}\Omega\big)\setminus D_{R}^+}\bigg|\nabla \Big(\hat U_\ensuremath{\ensuremath{\mathbf{a}}repsilon} - \tfrac{\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{k}}{\sqrt{H(u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon},\mu_\delta\ensuremath{\ensuremath{\mathbf{a}}repsilon})}}U_\ensuremath{\ensuremath{\mathbf{a}}repsilon}\Big)\bigg|^2dx=O(1), \quad\text{as }\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0^+. \end{equation} Using \eqref{eq:41} and the uniqueness part of Lemma \ref{l:Phi}, we can identify univocally the limit of the blow-up family $\{\hat U_\ensuremath{\ensuremath{\mathbf{a}}repsilon}\}_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ introduced in \eqref{eq:blowup} and prove that \[ \lim_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to 0^+}\frac{\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{k}}{\sqrt{H(u_N^\ensuremath{\ensuremath{\mathbf{a}}repsilon},\mu_\delta\ensuremath{\ensuremath{\mathbf{a}}repsilon})}}= \frac1{|\beta|} \sqrt{\frac{\mu_\delta}{\int_{S_{\mu_\delta}^+}|\Phi_k|^2 ds}} \] and \begin{equation}\label{eq:buU} \hat U_\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to \frac{\beta}{|\beta|}\sqrt{\frac{\mu_\delta}{\int_{S_{\mu_\delta}^+}|\Phi_k|^2 ds}} \, \Phi_k \quad\text{as }\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0^+ \end{equation} in $H^{1 }(D_R^+)$ for every $R>1$ and in $C^{2}_{\rm loc}(\overline{\ensuremath{\mathbb{R}}^2_+}\setminus\{{\mathbf e},-{\mathbf e}\})$, see \cite[Theorem 8.1]{abatangelo2015sharp} for details. Combining \eqref{eq:buU} with the Dirichlet principle, we can prove convergence of the blow-up family $W_\ensuremath{\ensuremath{\mathbf{a}}repsilon}^R$ introduced in \eqref{eq:blowup}: for all $R>2$, \begin{equation}\label{eq:buW} W_\ensuremath{\ensuremath{\mathbf{a}}repsilon}^R \to \frac{\beta}{|\beta|} \sqrt{\frac{\mu_\delta}{\int_{S_{\mu_\delta}^+}|\Phi_k|^2ds}}\,w_R \qquad \text{as }\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0^+ \text{ in } H^{1}(D_R^+), \end{equation} where $w_R$ is the unique solution to the minimization problem \[ \int_{D_{R}^+} \!\!|\nabla w_R(x)|^2\,dx = \min\left\{ \int_{D_{R}^+}\!\! |\nabla u|^2\,dx:\, u\in H^{1}(D_{R}^+), \ u= \Phi_k \text{ on }S_{R}^+, \ u=0 \text{ on }\Gamma_R \right\}, \] which then solves \begin{equation*} \begin{cases} -\Delta w_R = 0, &\text{in }D_{R}^+,\\ w_R = \Phi_k, &\text{on }S_{R}^+,\\ w_R = 0, &\text{on }\Gamma_R. \end{cases} \end{equation*} To obtain the exact asymptotics for $\lambda_N- \lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})$ it remains to determine the limit of the function $g_R(\ensuremath{\ensuremath{\mathbf{a}}repsilon})$ defined in \eqref{eq:gR} of Proposition \ref{p:stimadifficile} as $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to 0$ and $R\to+\infty$. \begin{lem}\label{l:limite_kappa_R} For all $R>\tilde R$ and $\ensuremath{\ensuremath{\mathbf{a}}repsilon}>0$ with $R\ensuremath{\ensuremath{\mathbf{a}}repsilon}<{R_0}$, let $g_R(\ensuremath{\ensuremath{\mathbf{a}}repsilon})$ be as in Proposition \ref{p:stimadifficile}. Then \begin{equation}\label{eq:60} \lim_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0^+}g_R(\ensuremath{\ensuremath{\mathbf{a}}repsilon})= \frac{\mu_\delta}{\int_{S_{\mu_\delta}^+}|\Phi_k|^2ds} \tilde \kappa_R \end{equation} where \begin{equation}\label{eq:68} \tilde \kappa_R=\int_{S_R^+}\Big( \nabla w_R\cdot\nu - \nabla \Phi_k\cdot\nu \Big) {\Phi_k}\,ds, \end{equation} with $\nu=\frac{x}{|x|}$. Furthermore $\lim_{R\to+\infty}\tilde \kappa_R=-2{\mathfrak m}_k$, where ${\mathfrak m}_k$ is defined in \eqref{eq:mathfrak-m} and \eqref{eq:Ik}. \end{lem} \begin{proof} We first observe that, by \eqref{eq:gR} and convergences \eqref{eq:buU}--\eqref{eq:buW}, \begin{align*} &\lim_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to0^+}g_R(\ensuremath{\ensuremath{\mathbf{a}}repsilon})= \frac{\mu_\delta}{\int_{S_{\mu_\delta}^+}|\Phi_k|^2ds} \bigg(\int_{D_R^+}|\nabla w_R|^2\,dx-\int_{D_R^+}|\nabla \Phi_k|^2\,dx\bigg) = \frac{\mu_\delta}{\int_{S_{\mu_\delta}^+}|\Phi_k|^2ds} \tilde \kappa_R \end{align*} with $\tilde \kappa_R$ as in \eqref{eq:68}. We observe that \begin{equation}\label{eq:109} \tilde \kappa_R =\int_{S_R^+}\Big( \nabla w_R\cdot\nu - \nabla \Phi_k\cdot\nu \Big) \psi_k\,ds+I_1(R)+I_2(R) \end{equation} where \begin{equation*} I_1(R)= \int_{S_R^+}\Big({\Phi_k}-\psi_k\Big) \nabla \Big(\psi_k-\Phi_k\Big) \cdot\nu \,ds,\quad I_2(R)=\int_{S_R^+}\Big({\Phi_k}-\psi_k\Big) \nabla \Big(w_R-\psi_k \Big) \cdot\nu \,ds. \end{equation*} Testing the equation $-\Delta \big(\psi_k-{ \Phi_k}\big) =0$ with the function $\psi_k-{\Phi_k}$, recalling that $\psi_k-{ \Phi_k}=0$ on $s$, and integrating it over $\ensuremath{\mathbb{R}}^2_+\setminus D_R^+$, thanks to Lemma \ref{l:Phi} we obtain that \[ I_1(R)= \int_{\ensuremath{\mathbb{R}}^2_+\setminus D_R^+} |\nabla(\Phi_k-\psi_k)|^2= \int_{\ensuremath{\mathbb{R}}^2_+\setminus D_R^+} |\nabla w_k|^2 \to 0 \qquad \text{as }R\to+\infty . \] Let $\eta_R:\ensuremath{\mathbb{R}}^2_+\to\ensuremath{\mathbb{R}}$ be a smooth cut-off function such that $\eta_R\equiv 0$ in $D_{R/2}^+$, $\eta_R\equiv 1$ in $\ensuremath{\mathbb{R}}^2_+\setminus D_R^+$, $0\leq \eta_R\leq 1$, and $|\nabla\eta_R|\leq\frac{4}{R}$. Testing the equation $-\Delta (\psi_k - w_R)=0$ in $D_R^+$ with the function $\eta_R({ \Phi_k}-\psi_k)$ we obtain \[ I_2(R)= \int_{D_R^+} \nabla (w_R-\psi_k) \cdot \nabla ((\Phi_k - \psi_k)\eta_R) \] so that, in view of the Dirichlet Principle, Lemma \ref{l:Phi} and the fact that $w_k\in\mathcal Q$, \begin{align*} |I_2(R)| &\leq \int_{D_R^+} |\nabla ((\Phi_k - \psi_k)\eta_R)|^2dx\leq 2 \int_{D_R^+\setminus D_{R/2}^+} |\nabla (\Phi_k - \psi_k)|^2 \,dx+\frac{32}{R^2} \int_{D_R^+\setminus D_{R/2}^+} |\Phi_k - \psi_k|^2 \,dx\\ &\leq 2 \int_{D_R^+\setminus D_{R/2}^+} |\nabla w_k|^2 \,dx+128 \int_{D_R^+\setminus D_{R/2}^+} \frac{w_k^2}{|x-{\mathbf e}|^2} \,dx\to 0 \end{align*} as $R\to+\infty$, where in the last line of the above estimate we have used that $\frac{1}{R}\leq\frac2{|x-{\mathbf e}|}$ for all $x\in D_R^+$. Therefore we need just to study the limit of the quantity $\int_{S_R^+} \frac{\partial}{\partial \nu} (w_R - \Phi_k)\psi_k$ as $R\to+\infty$. To this aim, we consider the function \begin{equation}\label{eq:47} \xi(r):= \int_{0}^\pi \Phi_k(r\cos t,r\sin t) \sin(kt)\,dt,\qquad r\geq 1, \end{equation} and notice that it satisfies the differential equation $\xi''+\frac1r\xi'-\frac{k^2}{r^2}\xi=0$ which can be rewritten as $(r^{1+2k}(r^{-k}\xi)')'=0$ in $[1,+\infty)$. Therefore there exists some $C_\xi\in\ensuremath{\mathbb{R}}$ such that \[ \big( r^{-k}\xi(r) \big)'= \frac{C_\xi}{r^{1+2k}}\quad\text{in } [1,+\infty). \] Integrating the previous equation over $[1,r]$ we obtain that \begin{equation}\label{eq:114} r^{-k} \xi(r) -\xi(1) = \frac{C_\xi}{2k}\left(1-\frac1{r^{2k}}\right). \end{equation} From \eqref{eq:psi_k}, Lemma \ref{l:wk}, and Lemma \ref{l:Phi} it follows that \begin{align*} \xi(r)&=\int_0^{\pi}\psi_k(r\cos t,r\sin t) \sin (k t)\,dt+\int_{0}^{\pi} \Big(\Phi_k(r\cos t,r\sin t)-\psi_k(r\cos t,r\sin t) \Big) \sin (k t)\,dt\\ &=\frac\pi2 r^{k}+O(r^{-1}),\quad\text{as }r\to+\infty, \end{align*} and hence $r^{-k} \xi(r) \to \frac\pi2$ as $r\to+\infty$. Letting $r\to+\infty$ in \eqref{eq:114}, this implies that $\tfrac{C_\xi}{2k}=\frac\pi2 -\xi(1)$, so that \begin{equation} \xi(r)= \tfrac\pi2 \,r^{k} + \big( \xi(1)-\tfrac\pi2\big) r^{-k},\quad \xi'(r)=k \tfrac\pi2 r^{k-1} + k\big(\tfrac\pi2 - \xi(1)\big) r^{-k-1} \label{eq:xi} \end{equation} for all $r\geq1$. In particular, from \eqref{eq:xi} we have that \begin{equation*} \tfrac\pi2 - \xi(1) = \tfrac\pi2 r^{2k} - r^{k}\xi(r),\quad \text{for all }r\geq1, \end{equation*} whose substitution into \eqref{eq:xi} yields \begin{equation*} \xi'(r)=k{\pi}r^{k-1}-k\,\frac{\xi(r)}{r},\quad \text{for all }r\geq1. \end{equation*} On the other hand, differentiating \eqref{eq:47} we obtain also \begin{equation}\label{eq:112} \xi'(r)=\frac1{r^{1+k}}\int_{S_r^+} \frac{\partial\Phi_k}{\partial\nu}\, \psi_k\,ds \end{equation} so that \begin{equation}\label{eq:27} \int_{S_r^+} \frac{\partial\Phi_k}{\partial\nu}\, \psi_k\,ds = k(\pi r^{2k} -r^k \xi(r))\quad\text{for all }r\geq1. \end{equation} Now we turn to \[ \zeta_R(r):= \int_{0}^{\pi} w_R(r\cos t,r\sin t)\,\sin(kt)\,dt \] which is the $k$-th Fourier coefficient of the harmonic function $w_R$ and hence satisfies, for some $C_{R}\in\ensuremath{\mathbb{R}}$, $\big( r^{-k}\zeta_R(r) \big)'= \frac{C_{R}}{r^{1+2k}}$ in $(0,R]$. Integrating the previous equation over $[r,R]$ we obtain that \[ R^{-k} \zeta_R(R) -r^{-k} \zeta_R(r) = \frac{C_{R}}{2k} \left(\frac{1}{r^{2k}}-\frac{1}{R^{2k}}\right),\quad \text{for all $r\in(0,R)$}. \] By regularity of $w_R$ we necessarily have that $C_{R}=0$. Hence \begin{equation} \zeta_R(r)= \tfrac{\zeta_R(R)}{R^{k}} r^{k}\quad \text{and}\quad \label{eq:116}\zeta_R'(r)=k\tfrac{\zeta_R(R)}{R^{k}} r^{k-1}, \quad \text{for all }r\in(0,R]. \end{equation} From the definition of $\zeta_R$ we have that $\zeta_R'(r)=\frac1{r^{1+k}}\int_{S_r^+} \nabla w_R\cdot\nu \, \psi_k\,ds$. Hence \[ \int_{S_r^+} \nabla w_R\cdot\nu \, \psi_k\,ds =k\tfrac{\zeta_R(R)}{R^k}\, r^{2k} \] from which, taking into account the boundary conditions for $w_R$, it follows that \begin{equation}\label{eq:37} \int_{S_R^+} \nabla w_R\cdot\nu \, \psi_k\,ds = kR^k \xi(R) . \end{equation} Combining \eqref{eq:27}, \eqref{eq:37}, and \eqref{eq:xi} we conclude that \[ \int_{S_R^+} \left(\frac{\partial w_R}{\partial \nu} \psi_k - \frac{\partial \Phi}{\partial \nu} \psi_k \right)\,ds = 2kR^k \xi(R)-k\pi R^{2k}= 2k \bigg(\xi(1) - \frac\pi2 \bigg) = - 2\mathfrak{m}_k \] by virtue of Lemma \ref{l:xi1}. \end{proof} By combining the previous results we obtain the following asymptotics for the eigenvalue variation. \begin{thm}\label{t:main_asy_eige} Let $\Omega$ be a bounded open set in $\ensuremath{\mathbb{R}}^2$ satisfying \eqref{eq:38} and \eqref{eq:40}. Let $N\geq 1$ be such that the $N$-th eigenvalue $\lambda_N$ of $q_0$ on $\Omega$ is simple with associated eigenfunctions having in $0$ a zero of order $k$ with $k$ as in \eqref{eq:orderk}. For $\ensuremath{\ensuremath{\mathbf{a}}repsilon}\in (0,\ensuremath{\ensuremath{\mathbf{a}}repsilon}_0)$ let $\lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})$ be the $N$-th eigenvalue of $q_\ensuremath{\ensuremath{\mathbf{a}}repsilon}$ on $\Omega$. Then \begin{equation*} \lim_{\ensuremath{\ensuremath{\mathbf{a}}repsilon}\to 0^+}\frac{\lambda_N-\lambda_N(\ensuremath{\ensuremath{\mathbf{a}}repsilon})}{\ensuremath{\ensuremath{\mathbf{a}}repsilon}^{2k}}= -2 \beta^2\,{\mathfrak m}_k \end{equation*} with $\beta$ being as in \eqref{eq:orderk} and ${\mathfrak m}_k$ being as in \eqref{eq:mathfrak-m} and \eqref{eq:Ik}. \end{thm} In particular, Theorem \ref{t:main_asy_eige} and \eqref{eq:buU} above provide a proof of Theorem \ref{t:gad} that is alternative to the one given in \cite{Gad}. \paragraph{Acknowledgements} The authors thank the anonymous reviewers for their suggestions and comments. They are also indebted to Andr\'e Froehly for bringing relevant references to their attention. L. Abatangelo, V. Felli and C. L\'ena are partially supported by the project ERC Advanced Grant 2013 n. 339958: ``Complex Patterns for Strongly Interacting Dynamical Systems -- COMPAT''. L. Abatangelo and V. Felli are partially supported by the INDAM-GNAMPA 2018 grant ``Formula di monotonia e applicazioni: problemi frazionari e stabilità spettrale rispetto a perturbazioni del dominio''. V. Felli is partially supported by the PRIN 2015 grant ``Variational methods, with applications to problems in mathematical physics and geometry''. C. L\'ena is partially supported by the Portuguese FCT (Project OPTFORMA, IF/00177/2013) and the Swedish Research Council (Grant D0497301). \end{document}
\begin{document} \title{Non-locality from N $>$ 2 Independent Single Photon Emitters} \author{C. Thiel} \affiliation{Institut f\"ur Optik, Information und Photonik, Universit\"at Erlangen-N\"urnberg, 91058 Erlangen, Germany} \author{R. Wiegner} \email{[email protected]} \affiliation{Institut f\"ur Optik, Information und Photonik, Universit\"at Erlangen-N\"urnberg, 91058 Erlangen, Germany} \author{J. von Zanthier} \affiliation{Institut f\"ur Optik, Information und Photonik, Universit\"at Erlangen-N\"urnberg, 91058 Erlangen, Germany} \author{G. S. Agarwal} \affiliation{Department of Physics, Oklahoma State University, Stillwater, Oklahoma 74078-3072, USA} \date{\today} \begin{abstract} We demonstrate that intensity correlations of second order in the fluorescence light of N~$>$~2 single-photon emitters may violate locality while the visibility of the signal remains below $1/\sqrt{2}\approx71\%$. For this, we derive a homogeneous Bell-Wigner-type inequality, which can be applied to a broad class of experimental setups. We trace the violation of this inequality back to path entanglement created by the process of detection. \end{abstract} \pacs{03.65.Ud, 42.50.Dv} \maketitle \section{Introduction\label{1}} The demonstration of non-locality for a system of more than two particles or even for an EPR state of two particles with higher spins has been an outstanding problem in the foundations of quantum theory. There are seminal papers on the theoretical aspects of this subject, notably from Mermin and others~\cite{Mermin:1980,Mermin:1982,Mermin:1990,Agarwal:1993,Zukowski:1993:a}. These papers pointed out the necessity for the use of other Bell-type inequalities \cite{Bell:1964:a} than the celebrated CHSH or CH74 inequalities \cite{Clauser:1969:a,Clauser:1974:a} in case of a system with N~$>$~2 particles or for higher spin systems, involving the measurement of N-particle properties. However, so far, experimental realizations of these inequalities do not seem to exist. In this paper we propose as a source for N~$>$~2 particles a chain of N independent single photon emitters, say trapped ions, trapped neutral atoms, quantum dots, or any other equivalent physical system with access to similar behavior, in order to test non-locality with more than two particles. Using this source, path entanglement among the emitted photons is created in the process of detection due to the absence of which-way information when registering a photon in the far field of the source~\cite{Maser:2009:a,Wiegner:2010:a}. Employing \textit{two} point photon-photon correlations denoted by $G^{(2)}({{\boldsymbol r}}_1, {{\boldsymbol r}}_2)$ in Glauber's notation~\cite{Glauber:1963:a} - a quantity which is easily accessible experimentally and has become the workhorse of experimentalists in quantum information science~\cite{Zeilinger:2008:a} - we are able to show that it is possible to violate a new form of Bell-type inequality. In particular, we show that the violation subsists for increasing N when the visibility of the photon-photon correlation signal continuously reduces to 33\%. We note that several theoretical and experimental papers have dealt with the question of violations of Bell's inequalities employing the photon-photon correlation function for \textit{two} independent emitters~\cite{Mandel:1983:a,Ou:1988:a,Zukowski:1993:b}. In this case violation of locality has been demonstrated if the visibility of the two-photon signal exceeds $1/\sqrt{2}\approx71\%$, what is recovered by our results. Our work therefore clearly suggests that without indicating N the magnitude of the visibility of the G$^{(2)}$-signal can not be taken as a signature of non-locality. In the following we start to introduce our system of N independent single photon emitters. Hereby we assume that each emitter is initially prepared in an excited state which has an appropriate Zeeman degeneracy so that each emitter upon spontaneous decay scatters either a right hand or a left hand circularly polarized photon. Alternatively, we could consider $N$ two-level atoms with a $\lambda/4$ wave plate positioned in front turning the polarization of the scattered photon into the desired circular polarization. We further assume two spatially separated detectors in the far field of the source each capable of measuring single photon events. The two detectors are used to measure $G^{(2)}({{\boldsymbol r}}_1, {{\boldsymbol r}}_2)$ in order to characterize the two point correlations of the photons emitted by our source. As will be shown below, for N~$>$~2 emitters we need new Bell's inequalities to reveal the non-local behavior of the photon correlations. We derive these inequalities and obtain conditions on the spatial locations of the detectors in order to prove the non-local character of the photon correlations. These arise from the path entanglement created a posteriori by the selection of modes due to the process of detection~\cite{Wiegner:2010:a}. This is a novel aspect of our scheme as we do not need to start with a source producing entanglement ab initio among the N emitters. The paper is organized as follows: in Sec.~\ref{2} we introduce our light source of $N$ uncorrelated single-photon emitters and explain how to describe a joint detection measurement of two photons in the far-field of this source. In Sec.~\ref{3}, we recapitulate the well-known set of CH74 inequalities~\cite{Clauser:1969:a,Clauser:1974:a} and explain how these can be violated by the probability of finding two photons at two positions scattered by our source with N = 2 emitters and cannot be violated in case of N~$>$~2. In Sec.~\ref{4}, we derive a new Bell-type inequality~\cite{Janssen:2004:a,Wildfeuer:2008:a} which allows to prove the non-local character of the correlations among the photons scattered by our source for any N~$\geq$~2. In Sec.~\ref{5} we finally conclude. \section{Description of the physical system\label{2}} \subsection{Setup of $N$ single photon emitters\label{21}} We consider the setup shown in Fig.~\ref{f2}: $N$ single-photon emitters regularly arranged in a row at positions ${\bf R}_1, {\bf R}_2, \ldots, {\bf R}_N$ serve as a source for $N$ photons. The internal level scheme of the emitters is assumed to be characterized by a $V$-configuration, e.g., Zeeman sub-levels with two excited states $|e,-1\rangle$ and $|e,+1\rangle$, which both decay to a common ground state $|g,0\rangle$, accompanied by the emission of a ${\boldsymbol\sigma}^+$ or ${\boldsymbol\sigma}^-$ polarized photon, respectively. For the sake of simplicity we suppose that both transitions are equally probable. We further assume that for an even number of emitters, the first $N/2$ atoms are initially in the state $|e,-1\rangle$ and the remaining $N/2$ atoms in the state $|e,+1\rangle$. The initial state of the system can thus be written in the form \begin{eqnarray}\label{e551} |\psi_i\rangle = \prod_{n=1}^{N/2}|e,-1\rangle_n\otimes\prod_{n=\frac{N+2}{2}}^{N}|e,+1\rangle_n, \end{eqnarray} where the subscripts $n$ refers to the atom located at ${\bf R}_n$. For an odd number of emitters, we suppose that the first $(N-1)/2$ emitters are initially in $|e,-1\rangle$ and the remaining $(N+1)/2$ emitters in $|e,+1\rangle$ so that the initial state is given by \begin{eqnarray}\label{e552} |\psi_i\rangle = \prod_{n=1}^{\frac{N-1}{2}}|e,-1\rangle_n\otimes\prod_{\frac{N+1}{2}}^{N}|e,+1\rangle_n. \end{eqnarray} Due to the process of spontaneous decay the $N$ three-level emitters will scatter exactly $\frac{N}{2}$ ($\frac{N-1}{2}$) ${\boldsymbol\sigma}^+$ and $\frac{N}{2}$ ($\frac{N+1}{2}$) ${\boldsymbol\sigma}^-$ polarized photons in the case of an even (odd) number of emitters. Alternatively, we could also consider $N$ two-level atoms with $\lambda/4$ wave plates positioned in front of each particle which turn the polarization of the photons emitted by the atom at ${\bf R}_n$ for $n = 1 \ldots \frac{N}{2} \, (\frac{N-1}{2})$ into ${\boldsymbol\sigma}^+$ and for $n = \frac{N}{2} \, (\frac{N+1}{2}) \ldots N$ into ${\boldsymbol\sigma}^-$ polarization in the case of an even (odd) number of emitters; the only prerequisite for our scheme is that a precisely determined number of ${\boldsymbol\sigma}^+$ and ${\boldsymbol\sigma}^-$ polarized photons of known origin is emitted by the setup. \begin{figure} \caption{\label{f2} \label{f2} \end{figure} In order to measure the intensity correlation function of second order we locate two detectors at ${\bf r}_1$ and ${\bf r}_2$ in the the far-field region of the emitters, each equipped with a polarization filter in front, oriented along ${\boldsymbol\eta}_1$ and ${\boldsymbol\eta}_2$, respectively. The operator $\hat{D}_N(\delta({\bf r}_j),{\boldsymbol\eta}_j)$ which describes a successful detection event of a photon at the detector at ${\bf r}_j$ ($j=1,2$), after having passed a polarization filter oriented along ${\boldsymbol\eta}_j=\sin\vartheta_j{\boldsymbol\sigma}^++\cos\vartheta_j{\boldsymbol\sigma}^-$, can be written in case of an even number of emitters in the initial state~(\ref{e551}) in the form~\cite{Thiel:2007:a} \begin{eqnarray}\label{e553} \hat{D}_N(\delta({\bf r}_j),{\boldsymbol\eta}_j)=\frac{E_0}{\sqrt{2}} (\sin{\vartheta_j}\sum\limits_{n=1}^{N/2}e^{in\delta_j}|g,0\rangle_n\langle e,-1|+\nonumber\\ +\cos{\vartheta_j}\!\!\!\sum\limits_{n=\frac{N+2}{2}}^{N}\!\!\!e^{in\delta_j}|g,0\rangle_n\langle e,+1|) \end{eqnarray} where the sum over $n$ takes into account that principally each atom could have emitted the recorded photon. Here, $|g,0\rangle\langle e,\pm1|$ is an atomic operator projecting the atomic state $|e,\pm1\rangle$ onto $|g,0\rangle$, $E_0$ is the amplitude of the electric field, and $\delta({\bf r}_j)$ is the optical phase difference between photons being emitted by adjacent atoms and registered at ${\bf r}_j$. In the far-field and using a coordinate system where ${\bf R}_n=n\,{\bf R}_1$ (with ${\bf R}_0\equiv{\bf 0}$) the optical phase difference $\delta_j$ is given by~\cite{Thiel:2007:a} \begin{eqnarray}\label{e23} \delta({\bf r}_j):= \delta_j = kd\,\frac{{\bf r}_j\cdot{\bf R}_1}{|{\bf r}_j|\,|{\bf R}_1|}= kd\,\sin\theta_j, \end{eqnarray} where $k$ denotes the wavenumber of the scattered light, $d$ the interatomic spacing and $\theta_j$ the scattering angle as shown in Fig.~\ref{f2}. In analogy, the detection operator for an odd number of $N$ emitters acting on the initial state~(\ref{e552}) can be written as \begin{widetext} \begin{eqnarray}\label{e554} \hat{D}_N({\delta}_j,{\boldsymbol\eta}_j)=\frac{E_0}{\sqrt{2}}\left(\sin{\vartheta_j}\sum\limits_{n=1}^{\frac{N-1}{2}}e^{in\delta_j}|g,0\rangle_n\langle e,-1|+\cos{\vartheta_j}\!\!\!\sum\limits_{n=\frac{N+1}{2}}^{N}\!\!\!e^{in\delta_j}|g,0\rangle_n\langle e,+1|\right). \end{eqnarray} \subsection{Intensity correlation signal of second order\label{22}} With the detection operators $\hat{D}_N({\delta}_j,{\boldsymbol\eta}_j)$ at hand, we can calculate from Eqs.~(\ref{e551})-(\ref{e554}) the intensity correlation function of second order $G^{(2)}_N(\delta_1,\delta_2;{\boldsymbol\eta}_1,{\boldsymbol\eta}_2)$ for our system of $N$ single photon emitters. Hereby, we assume in the following that the first two out of $N$ scattered photons are recorded by the two detectors~\footnote{If other photons than the first two are measured the expressions for $G^{(2)}_N$ have to be multiplied by an overall factor. This does not change the visibility of the expressions but introduces different normalization factors when considering successive measurements. More information regarding this topic is given at the end of the current Section.}. According to~\cite{Thiel:2007:a} we then have \begin{eqnarray}\label{e532} G^{(2)}_N(\delta_1,\delta_2;{{\boldsymbol\eta}}_1,{{\boldsymbol\eta}}_2):= \left|\hat{D}_N(\delta_2,{\boldsymbol\eta}_2)\,\hat{D}_N(\delta_1,{\boldsymbol\eta}_1)|\psi_i\rangle\right|^2. \end{eqnarray} As there is a unique correspondence between ${\boldsymbol\eta}_j$ and $\vartheta_j$, we can write in the following $G^{(2)}_N(\delta_1,\delta_2;{\boldsymbol\eta}_1,{\boldsymbol\eta}_2)$ also as $G^{(2)}_N(\delta_1,\delta_2;\vartheta_1,\vartheta_2)$. By fixing the orientation of the polarization filters in front of the two detectors identical to $\vartheta_1=\vartheta_2=\frac{\pi}{4}$, corresponding to ${\boldsymbol\eta}_j=1/\sqrt{2}({\boldsymbol\sigma}^++{\boldsymbol\sigma}^-)$ (for $j=1,2$), we obtain \begin{eqnarray}\label{e555} G^{(2)}_N(\delta_1,\delta_2;\frac{\pi}{4},\frac{\pi}{4})=\frac{E_0^4}{8}\,\left(1+\frac{2}{N\,(N-1)}\,\sum\limits_{n=1}^N(N-n)\,\cos(n\,(\delta_2-\delta_1))\right), \end{eqnarray} which holds for even or odd $N$. This function is illustrated for different $N$ in Fig.~\ref{f55}. \begin{figure} \caption{\label{f55} \label{f55} \end{figure} In the case that the two polarizers are set orthogonal at $\vartheta_1=\frac{\pi}{4}$ and $\vartheta_2=\frac{3\pi}{4}$ (corresponding to ${\boldsymbol\eta}_1=1/\sqrt{2}({\boldsymbol\sigma}^++{\boldsymbol\sigma}^-)$ and ${\boldsymbol\eta}_2=1/\sqrt{2}({-\boldsymbol\sigma}^++{\boldsymbol\sigma}^-)$, respectively) we find for an even number of emitters $N$ \begin{eqnarray}\label{e556} G^{(2)}_N(\delta_1,\delta_2;\frac{\pi}{4},\frac{3\pi}{4})&=&\frac{E_0^4}{8}\,\left(1+\frac{2}{N\,(N-1)}\,\sum\limits_{n=1}^{N/2}(N-2n)\,\cos(n\,(\delta_2-\delta_1))\right.\\ &-&\left.\frac{2}{N\,(N-1)}\,\sum\limits_{\alpha=1}^{N/2}\sum\limits_{n=1}^{N}\left(\Theta(N-n-\alpha+1)\,\cos(n\,(\delta_2-\delta_1))\,\Theta(n-\alpha+1)\right)\right),\nonumber \end{eqnarray} where the Heaviside step function $\Theta(x)$ is defined as \begin{eqnarray} \Theta(x):=\left\{\begin{array}{cc} 0&x\leq0\\ 1&x>0 \end{array}\right.. \end{eqnarray} In analogy, we find for an odd number of emitters $N$ \begin{eqnarray}\label{e557} G^{(2)}_N(\delta_1,\delta_2;\frac{\pi}{4},\frac{3\pi}{4})&=&\frac{E_0^4}{8}\,\left(1+\frac{2}{N\,(N-1)}\,\sum\limits_{n=1}^{\frac{N-1}{2}}(N-2n)\,\cos(n\,(\delta_2-\delta_1))\right.\\ &-&\left.\frac{2}{N\,(N-1)}\,\sum\limits_{\alpha=1}^{\frac{N-1}{2}}\sum\limits_{n=1}^{N}\left(\Theta(N-n-\alpha+1)\,\cos(n\,(\delta_2-\delta_1))\,\Theta(n-\alpha+1)\right)\right).\nonumber \end{eqnarray} \end{widetext} From Eq.~(\ref{e555}) we can calculate the visibility ${\cal V}_N$ of the intensity correlation signal of second order $G^{(2)}_N(\delta_1,\delta_2;\frac{\pi}{4},\frac{\pi}{4})$ in case of identically oriented polarizers. For even or odd $N$ we find \begin{eqnarray}\label{VN} {\cal V}_N:=\frac{max[G^{(2)}_N]-min[G^{(2)}_N]}{max[G^{(2)}_N]+min[G^{(2)}_N]}=\frac{N}{3\,N-4}, \end{eqnarray} where $max[G^{(2)}_N]$ ($min[G^{(2)}_N]$) corresponds to the maximum (minimum) value of the function $G^{(2)}_N\equiv G^{(2)}_N(\delta_1,\delta_2;\frac{\pi}{4},\frac{\pi}{4})$. Eq.~(\ref{VN}) shows that ${\cal V}_N$ can be uniquely assigned to the number of emitters $N$. Note that ${\cal V}_N$ represents an ideal theoretical value only; in general, experimental uncertainties and insufficiencies will influence and decrease the attainable visibility.\\ In the derivation of Eqs.~(\ref{e555})-(\ref{VN}) it has been assumed that the two photons measured are the first two photons being scattered by our system of $N$ single photon emitters. This scenario enables to work with the initial states given by Eqs.~(\ref{e551}) or (\ref{e552}). One way to achieve this experimentally is to measure all $N$ scattered photons and pick out the first two detection events via post-selection. Thereby the experimental challenge of measuring the intensity correlation function of second order for $N$ possible emitters appears to equal the requirements of measuring the intensity correlation function of $N$th order (see, e.g.,~\cite{Thiel:2007:a}). We note, however, that our measurement scheme requires to resolve the spatial distribution of the two-photon correlation signal $G^{(2)}_N(\delta_1,\delta_2;\vartheta_1,\vartheta_2)$ only and that one can make use of a large bucket detector or a lens system to detect the remaining $N-2$ photons what simplifies the requirements. Besides experimental challenges, the restriction of detecting the first two photons bears a major advantage: since our system consists of a fixed number of scatterers, the number of photons contributing to a successful measurement cycle is precisely known. Therefore, the intensity correlation function of $N$th order is directly proportional to the probability of finding $N$ photons. In particular, the intensity correlation signal of second order $G^{(2)}_N(\delta_1,\delta_2;\vartheta_1,\vartheta_2)$ is related to the detection probability of finding jointly the first two photons $p_{12}^N(\delta_1,\delta_2;\vartheta_1,\vartheta_2)$ via \begin{eqnarray}\label{probability} p_{12}^N(\delta_1,\delta_2;\vartheta_1,\vartheta_2)=\frac{{\cal C}_0^2}{{E_0^4}}\,G^{(2)}_N(\delta_1,\delta_2;\vartheta_1,\vartheta_2), \end{eqnarray} where the superscript $N$ denotes the number of emitters used in the setup and ${\cal C}_0:=\mu\,\frac{\Delta\Omega}{4\pi}$ abbreviates the overall success probability to find a single photon at a detector with quantum efficiency $\mu$ and subtending a solid angle $\Delta\Omega$. \section{CH74 inequalities for multiple emitters\label{3}} In his seminal paper Bell proved that deterministic local theories with hidden variables are incompatible with quantum mechanics~\cite{Bell:1964:a}. In this Section, we want to apply this criterium to investigate whether the photons emitted by our system of $N$ regularly arranged single photon emitters display spatial correlations which are compatible or incompatible with local deterministic theories. For this, we recapitulate a well-known set of homogeneous position dependent Bell-type inequalities, the so-called CH74 inequalities~\cite{Clauser:1974:a,Clauser:1969:a,Ou:1988:a}, which we then apply for our system of $N$ single-photon emitters. \subsection{Theory of CH74 inequalities\label{31}} Let us denote the continuous set of hidden variables by $\lambda$. The probability of registering one photon out of a set of $N$ single photon emitters at a position ${\bf r}_j$ is then determined by $p^N({\bf r}_j,\lambda)$, where we included the hidden variables $\lambda$ in the argument of the single photon detection probability $p^N({\bf r}_j)$. Following the requirement of {\em locality}, the joint probability $p^N_{12}({\bf r}_1,{\bf r}_2,\lambda)$ of detecting two photons at ${\bf r}_1$ and ${\bf r}_2$ can be written as the product of the two independent single detection probabilities \begin{eqnarray}\label{pro0} p_{12}^N({\bf r}_1,{\bf r}_2,\lambda)=p^N({\bf r}_1,\lambda)\cdot p^N({\bf r}_2,\lambda). \end{eqnarray} Though $\lambda$ are hidden variables of a deterministic local theory and thus unknown, the detection probabilities obtained when performing a real experiment are determined by the ensemble averages over all $\lambda$ \begin{eqnarray}\label{pro1} &p^N({\bf r}_j)&=\int d\lambda\,g(\lambda)\,p^N({\bf r}_j,\lambda)\quad\mbox{with}\quad j=1,2\nonumber,\\ &p_{12}^N({\bf r}_1,{\bf r}_2)&=\int d\lambda\,g(\lambda)\,p^N({\bf r}_1,\lambda)\,p^N({\bf r}_2,\lambda), \end{eqnarray} where $g(\lambda)$ denotes an appropriate weight function of the hidden variables. Having introduced the single photon and joint detection probabilty $p^N({\bf r}_j,\lambda)$ and $p_{12}^N({\bf r}_i,{\bf r}_j,\lambda)$, respectively, the homogeneous CHSH-type inequalities can be derived from the following mathematical inequalities~\cite{Clauser:1974:a}, \begin{eqnarray}\label{ch1} -XY\leq x\,y-x\,y'+x'\,y+x'\,y'-Yx'-Xy\leq0, \end{eqnarray} These inequalities hold for any values $x,x',y,y',X,Y$ fulfilling $0\leq x,x'\leq X$ and $0\leq y,y'\leq Y$. Setting $X = Y = 1$, so that $0\leq x,x',y,y'\leq 1$, we can then identify \begin{eqnarray}\label{set4} &\hspace{-4mm}p^N(\delta_1,\vartheta_1,\lambda)\!=\!x,\;p^N(\delta_1',\vartheta_1,\lambda)\!=\!x',\;p^N(\delta_1,\infty,\lambda)\!=\!X,\nonumber\\ &\hspace{-6mm}p^N(\delta_2,\vartheta_2,\lambda)\!=\!y,\;p^N(\delta_2',\vartheta_2,\lambda)\!=\!y',\;p^N(\delta_2,\infty,\lambda)\!=\!Y. \end{eqnarray} where the arguments of the probabilities refer to our setup: the $j$th detector is sensitive to $\vartheta_j$ polarized light only and is located at ${\bf r}_j$ ($j=1,2$) where Eq.~(\ref{e23}) relates the detector position ${\bf r}_j$ to the optical phase $\delta_j$. The notation $\infty$ indicates that the polarization filter is removed for the particular measurement. The constraint $X\geq x,x'$ ($Y\geq y,y'$) is then guaranteed by the so-called {\em no-enhancement} condition~\cite{Clauser:1969:a,Clauser:1974:a,Ou:1988:a}: the detection probability when using a polarization filter cannot exceed a measurement without a polarization filter. Finally, in agreement with the requirements of a local hidden variable (LHV) theory and Eq.~(\ref{pro0}), we can write the two-photon joint detection probability as \begin{eqnarray}\label{pro8} p^N_{12}(\delta_1,\delta_2;\vartheta_1,\vartheta_2,\lambda)=p^N(\delta_1,\vartheta_1,\lambda)\cdot p^N(\delta_2,\vartheta_2,\lambda). \end{eqnarray} Combining Eqs.~(\ref{set4}) and (\ref{pro8}) with Eq.~(\ref{ch1}) we obtain, after multiplying the whole expression with $g(\lambda)$ and integrating over $\lambda$, the following inequality: \begin{widetext} \begin{eqnarray}\label{ch14b} S_N\!&\!\!:=\!\!&\!\!\left[p^N_{12}(\delta_1,\delta_2;\vartheta_1,\vartheta_2)-p^N_{12}(\delta_1,\delta'_2;\vartheta_1,\vartheta_2)+p^N_{12}(\delta'_1,\delta_2;\vartheta_1,\vartheta_2)+p^N_{12}(\delta'_1,\delta'_2;\vartheta_1,\vartheta_2)\right.\!\nonumber\\ \!&\!\!-\!\!&\left.p^N_{12}(\delta'_1,\delta_2;\vartheta_1,\infty)-p^N_{12}(\delta_1,\delta_2;\infty,\vartheta_2)\right]/p^2_{12}(\delta_1,\delta_2;\infty,\infty)\leq0. \end{eqnarray} \end{widetext} Hereby, we restricted ourselves to the upper bound of the inequalities~(\ref{ch1}) which allows to normalize the expression by an arbitrary function. In the following we choose as normalization function the expression $p^2_{12}(\delta_1,\delta_2;\infty,\infty)$ which is a constant, independent of $N$. This allows in particular for a better comparability of the results obtained in the forthcoming sections. Eq.~(\ref{ch14b}) is the position dependent CHSH inequality which can be used to investigate the quantum nature of the spatial correlations of the photons emitted by our source depicted in Fig.~\ref{f2} (see also~\cite{Ou:1988:a,Wiegner:2010:a}). Note that, although we are interested in the spatial behavior of the two-photon correlation signal, the polarization degrees of freedom play a crucial role in the measurements of $p^N$ and $p^N_{12}$: we have to include them necessarily in order to satisfy the no-enhancement condition~\cite{Clauser:1969:a,Clauser:1974:a,Ou:1988:a}. We emphasize, however, that in our investigations we focus on the \textit{spatial} correlations among the emitted photons. In order to violate the inequality~(\ref{ch14b}) maximally, it is advantageous to adjust the polarization filters such that the detection efficiency of the experimental setup is optimized. In the following we thus choose $\vartheta_1=\vartheta_2=\frac{\pi}{4}$ which yields the best results. With these settings, using Eq.~(\ref{probability}) and employing the relation $p_{12}(\delta_1,\delta_2;\vartheta_1,\infty)=p_{12}(\delta_1,\delta_2;\vartheta_1,\vartheta_2)+p_{12}(\delta_1,\delta_2;\vartheta_1,\vartheta_2+\frac{\pi}{2})$ we calculate the joint detection probabilities needed in Eq.~(\ref{ch14b}) to \begin{eqnarray} \label{pro7a}p^N_{12}(\delta_1,\delta_2;\frac{\pi}{4},\frac{\pi}{4})&=&\frac{{\cal C}_0^2}{E_0^4}\,G^2_N(\delta_1,\delta_2;\frac{\pi}{4},\frac{\pi}{4}),\\ \label{pro7b}p^N_{12}(\delta_1,\delta_2;\frac{\pi}{4},\infty)&=&\frac{{\cal C}_0^2}{E_0^4}\,G^2_N(\delta_1,\delta_2;\frac{\pi}{4},\frac{\pi}{4})\\ &+&\frac{{\cal C}_0^2}{E_0^4}\,G^2_N(\delta_1,\delta_2;\frac{\pi}{4},\frac{3\pi}{4}),\nonumber\\ \label{pro7c}p^2_{12}(\delta_1,\delta_2;\infty,\infty)&=&{\cal C}_0^2\,\frac{1}{2}, \end{eqnarray} where we made use of the expressions derived in Eqs.~(\ref{e555}),~(\ref{e556}) and~(\ref{e557}). Whether or not a violation of the position dependent inequality Eq.~(\ref{ch14b}) for $N\geq2$ does occur can be verified by inserting Eqs.~(\ref{pro7a}) - (\ref{pro7c}) into Eq.~(\ref{ch14b}) and looking thereafter for the maxima of $S_N$ as a function of $\delta_1$, $\delta_2$, $\delta'_1$ and $\delta'_2$. \subsection{CH74 inequalities for a system of two single-photon emitters (case N=2)\label{32}} For the case of $N=2$ emitters, we find from Eqs.~(\ref{e555}) and (\ref{e556}) \begin{eqnarray} \label{e538a}G^{(2)}_2(\delta_1,\delta_2;\frac{\pi}{4},\frac{\pi}{4})& = & \frac{E_0^4}{8}\,(1+\cos{[\delta_2-\delta_1]})\\ \label{e538b}G^{(2)}_2(\delta_1,\delta_2;\frac{\pi}{4},\frac{3\pi}{4})& = & \frac{E_0^4}{8}\,(1-\cos{[\delta_2-\delta_1]}) \end{eqnarray} so that Eqs.~(\ref{pro7a}) - (\ref{pro7c}) become \begin{eqnarray} \label{pro6a}p^2_{12}(\delta_1,\delta_2;\frac{\pi}{4},\frac{\pi}{4})&=&{\cal C}_0^2\,\frac{1}{8}(1+\cos{[\delta_2-\delta_1]}),\hspace{0.5cm}\\ \label{pro6b}p^2_{12}(\delta_1,\delta_2;\vartheta_1,\infty)&=&{\cal C}_0^2\,\frac{1}{4},\\ \label{pro6c}p^2_{12}(\delta_1,\delta_2;\infty,\infty)&=&{\cal C}_0^2\,\frac{1}{2}. \end{eqnarray} Plugging these results into Eq.~(\ref{ch14b}) we obtain \begin{eqnarray}\label{ch11} S_2 &=&\frac{1}{4}\Big(\cos{[\delta_2-\delta_1]}-\cos{[\delta_2'-\delta_1]}\\ &+&\cos{[\delta_2-\delta_1']}+\cos{[\delta_2'-\delta_1']}\Big)-\frac{1}{2}\leq0.\nonumber \end{eqnarray} Looking for the extrema of $S_2$ we find the following set of parameters (see also~\cite{Ou:1988:a,Wiegner:2010:a}) \begin{eqnarray} \label{set1a}\delta_2-\delta_1=\frac{1}{8}\,2\pi, & \delta_2'-\delta_1=\frac{3}{8}\,2\pi, \\ \delta_2-\delta_1'=\frac{1}{8}\,2\pi, & \delta_2'-\delta_1'=\frac{1}{8}\,2\pi, \nonumber \end{eqnarray} which lead, in combination with~(\ref{ch11}), to the following inequality with respect to the spatial correlations of the photons scattered by two single-photon emitters \begin{eqnarray}\label{ch12} S_2 = \sqrt{2}-1\leq0. \end{eqnarray} The inequality Eq.~(\ref{ch12}) is derived assuming an ideal visibility of 100\% for the two-photon correlation functions (Eqs.~(\ref{e538a}) and~(\ref{e538b})). However, the visibility that can be achieved in a real experiment is usually below that value due to experimental uncertainties, limited detector efficiencies etc. Taking a reduced visibility ${\cal V} < 1$ for $G^{(2)}_2(\delta_1,\delta_2;\frac{\pi}{4},\frac{\pi}{4})$ and $G^{(2)}_2(\delta_1,\delta_2;\frac{\pi}{4},\frac{3\pi}{4})$ into account, Eq. (\ref{ch12}) reads: \begin{eqnarray}\label{ch12V} S_2 =\sqrt{2} \cdot {\cal V} - 1\leq0. \end{eqnarray} This inequality may be violated only if the visibility exceeds $\frac{1}{\sqrt{2}}\approx71\%$~\cite{Ou:1988:a,Wiegner:2010:a,Zukowski:1993:b}. \subsection{CH74 inequalities for a system of multiple single-photon emitters (case $N>2$)\label{33}} For the case of $N=2$ emitters, the extrema of $S_2$ are obtained using the set of analytical expressions for $\delta_1,\delta_1',\delta_2,\delta_2'$ provided in Eq.~(\ref{set1a}). These can also be written in the form \begin{eqnarray}\label{e559} \delta_1=\alpha_1\,2\pi,\quad\delta_2=(\frac{1}{8}+\alpha_2)\,2\pi,\\ \delta_1'=(\frac{2}{8}+\alpha_3)\,\pi,\quad\delta_2'=(\frac{3}{8}+\alpha_4)\,\pi,\nonumber \end{eqnarray} with $\alpha_i \in {\mathbb N}$ ($i=1,...,4$). In contrast, for $N>2$, the joint detection probabilities present in $S_N$ get more involved (c.f.~Eqs.~(\ref{e555}),~(\ref{e556}) and~(\ref{e557})). The values for $\delta_1,\delta_1',\delta_2,\delta_2'$ giving rise to maxima of $S_N$ were thus determined numerically. This approach unveiled that the maxima of $S_N$ for any $N>2$ can be obtained by choosing \begin{eqnarray}\label{e558} \delta_1=\alpha_1\,2\pi,\quad\delta_2=\alpha_2\,2\pi,\quad\delta_1'=\alpha_3\,\pi,\quad\delta_2'=\alpha_4\,\pi, \end{eqnarray} again with $\alpha_i \in {\mathbb N}$ ($i=1,...,4$, $\alpha_3,\alpha_4\not=0$). \begin{figure} \caption{\label{f56} \label{f56} \end{figure} The results of our numerical calculations for the maxima of $S_N$ (for $N=2,...,10$) are shown in Fig.~\ref{f56}. For $N=2$ we obtain as before $S_2=\sqrt{2}-1$. For $N=3,4$ we find in both cases $S_3=S_4=0$. For $N>4$ the values of $S_N$ are displayed in the plot: we see that the behavior is slightly different for even $N$ (red stars) and for odd $N$ (blue stars). However, we find that a violation of $S_N$ appears only for the case $N=2$. In conclusion, we see from Fig.~\ref{f56} that $S_N$ cannot be violated by the setup shown in Fig.~\ref{f2} for $N>2$ emitters. Taking into account the visibility of the intensity correlation function of second order as derived in Sec.~\ref{22} (c.f Eq.~(\ref{VN})) this result is in agreement with~Eq.(\ref{ch12V})~\cite{Ou:1988:a,Wiegner:2010:a}: while the joint detection probability for our setup in case of $N=2$ shows a modulation with a theoretical visibility of ${\cal V}_2=100\%$, Eq.~(\ref{VN}) reveals that ${\cal V}_N$ drops rapidly with $N>2$. This is illustrated in Fig.~\ref{f57}: already for the case of $N=3$ the visibility is reduced to ${\cal V}_3=60\%$, i.e., below the critical value of $1/\sqrt{2}\approx71\%$. The latter was found to be the required value in order to violate the Bell-type inequalities (\cite{Clauser:1969:a,Clauser:1974:a}, c.f.~Eq.(\ref{ch12V})). \begin{figure} \caption{\label{f57} \label{f57} \end{figure} Triggered by these results, we will consider a different inequality which is more appropriate for our system in the next Section. As it turns out this inequality is able to prove that the spatial intensity-intensity correlations of the photons spontaneously emitted by our source of $N$ single photon emitters are non-local in nature, even in the case of $N>2$ emitters, i.e., for a visibility ${\cal V}_N < 71\%$. \section{A more suitable inequality for multiple emitters\label{4}} In the following we introduce a new Bell-type inequality which allows to reveal the non-classical nature of the spatial intensity-intensity correlations even in the case that a two-photon correlation signal with a visibility less than 71\% is measured. In fact, as will be shown, this new Bell-type inequality allows to reveal the non-classical character of the two-photon signal even for a visbility approaching 33\%. To demonstrate this, we start with a different mathematical inequality based on a so-called Bell Wigner-inequality (see, e.g.,~\cite{Pitowsky:1989:a,Janssen:2004:a}). \subsection{Derivation of a homogeneous Bell-Wigner (HBW) inequality\label{41}} The Bell-Wigner inequality can be written in the following form~\cite{Janssen:2004:a} \begin{eqnarray}\label{pit} 0\leq x_1-x_1\,x_2-x_1\,x_3+x_2\,x_3, \end{eqnarray} which holds under the condition that $0\leq x_1,x_2,x_3\leq1$; for a proof of this inequality we refer to~\cite{Pitowsky:1989:a} (see also Appx.~\ref{app2}). By identifying $x_j$ ($j=1,2,3$) again with single photon detection probabilities we could speak of Eq.~(\ref{pit}) as an (inhomogeneous) Bell-type inequality since it considers both single photon \emph{and} joint detection probabilities. However, as motivated in the derivation of the CHSH-type inequality above, the experimental requirements can be eased if the inequality under investigation involves only detection probabilities of the same order. Hence, our goal is to derive a \emph{homogeneous} Bell-type inequality on the basis of the above Bell-Wigner inequality which considers joint detection probabilities only, being subject to the same overall success probability. Our proposal for a new inequality reads \begin{eqnarray}\label{BWT} 0\leq x_1\,x_4-x_1\,x_2-x_1\,x_3+x_2\,x_3, \end{eqnarray} and holds for the constraints $0\leq x_1,x_2,x_3\leq x_4\leq1$. The proof of~(\ref{BWT}) is provided in Appx.~\ref{app2}. In analogy to the foregoing Section, we consider the setup with an even (odd) number of emitters $N$ as displayed in Fig.~\ref{f2}. Again, a photon detection event registered at the $j$th detector is characterized by two parameters: the position ${\bf r}_j$ giving rise to an optical phase $\delta_j$ and the orientation of the $j$th polarizer ${\boldsymbol\eta}_j$ which we choose to be oriented along $1/\sqrt{2}({\boldsymbol\sigma}^-+{\boldsymbol\sigma}^+)$ ($j=1,2$), corresponding to $\vartheta_2=\vartheta_1=\frac{\pi}{4}$. The latter optimizes the overall success of the photon detection probabilities. We identify again the parameters of Eq.~(\ref{BWT}) with the following detection probabilities \begin{eqnarray}\label{set5} &\hspace{-4mm}p^N(\delta_1,\vartheta_1,\lambda)\!=\!x_1,\;p^N(\delta_2,\vartheta_1,\lambda)\!=\!x_2,\nonumber\\ &\hspace{-6mm}p^N(\delta_3,\vartheta_2,\lambda)\!=\!x_3,\;p^N(\delta_4,\infty,\lambda)\!=\!x_4, \end{eqnarray} where $\infty$ indicates once more that the polarization filter is removed for the particular measurement. The constraint $x_4\geq x_3,x_2,x_1$ of the inequality~(\ref{BWT}) is thus guaranteed by the no-enhancement condition~\cite{Clauser:1969:a,Clauser:1974:a,Ou:1988:a}: the detection probability with a polarization filter cannot exceed the measurement without a polarization filter. Following the usual \begin{widetext} assumptions of an LHV theory, we define the joint detection probability exactly as in Eq.~(\ref{pro8}). Using this relation together with~(\ref{set5}), the inequality~(\ref{BWT}), after multiplying by $g(\lambda)$ and integrating over $\lambda$, reads \begin{eqnarray}\label{BWT2} T_N&:=&\left[p^N_{12}(\delta_1,\delta_4;\vartheta_1,\infty)-p^N_{12}(\delta_1,\delta_2;\vartheta_1,\vartheta_1)\right.\nonumber\\ &-&\left.p^N_{12}(\delta_1,\delta_3;\vartheta_1,\vartheta_2)+p^N_{12}(\delta_2,\delta_3;\vartheta_1,\vartheta_2)\right]/p^2_{12}(\delta_1,\delta_2;\infty,\infty)\geq0.\hspace{1cm} \end{eqnarray} In analogy to the foregoing Section and to provide a better comparability with the results obtained so far we normalized Eq.~(\ref{BWT2}) again by the factor $p^2_{12}(\delta_1,\delta_2;\infty,\infty)$ which is independent of $N$ (c.f.~Eq.~(\ref{pro7c})). In the following, we refer to the inequality~(\ref{BWT2}) as {\em homogeneous Bell-Wigner} (HBW) inequality. \end{widetext} \subsection{Violation of the HBW inequality for a system of multiple single-photon emitters\label{42}} In this subsection, we will test the HBW inequality~(\ref{BWT2}) for $N\geq2$ single-photon emitters by determining the minimum values of $T_N$. For this purpose, employing Eqs.~(\ref{e555}),~(\ref{e556}) and~(\ref{e557}), we make use of the joint detection probabilities Eqs.~(\ref{pro7a}) - (\ref{pro7c}) and search for the minima of $T_N$. However, as the analyses get involved and analytically intricate, we only provide numerical results, which we obtained by scanning through the complete parameter space of $\delta_1,\delta_2,\delta_3,\delta_4$ for each $N$ separately. \begin{figure} \caption{\label{f58} \label{f58} \end{figure} The results for the minima of $T_N$ ($min[T_N]$) for $N=2,...,10$ are displayed in Fig.~\ref{f58}. It shows that we have $min[T_N] < 0$ for $N=2,...,10$. For $N=3$ we obtain the lowest value of $min[T_3]\approx-0.254$ and for $N=10$ we have $min[T_{10}] \approx-0.118$. Even though the values of $min[T_N]$ increase monotonously for $N>2$, numerical calculations indicate that they approach zero only for $N\rightarrow\infty$. This suggests that a violation of the HBW inequalities~(\ref{BWT2}) can be obtained for any finite $N$. Note that for $N=2$ we obtain $min[T_2]=-0.125$ which sticks out of the overall behavior. We explain this outlier by the fact that $T_2$ depends only on three of the four parameters $\delta_1,\delta_2,\delta_3,\delta_4$ since, due to destructive interference, we have $p^2_{12}(\delta_1,\delta_4;\vartheta_1,\infty)=\frac{{\cal C}_0^2}{4}$, i.e., a constant independent of $\delta_1$ and $\delta_4$. In contrast, for $T_N$ with $N>2$ the term $p^N_{12}(\delta_1,\delta_4;\vartheta_1,\infty)$ is not a constant and thus can be employed to shift $min[T_N]$ towards smaller values. \subsection{Interrelationsship between violation of the HBW inequality and visibility of the two-photon correlation signal\label{43}} Let us again consider the theoretically attainable visibility ${\cal V}_N$ of the two-photon correlation signal $G^{(2)}_N(\delta_1,\delta_2;\frac{\pi}{4},\frac{\pi}{4})$ (c.f~Fig.~\ref{f57}). From Eq.~(\ref{VN}) we know that it is given by ${\cal V}_N=\frac{N}{3\,N-4}$ which reaches 50\% for $N=4$ and approaches 33\% for $N\rightarrow\infty$. At the same time we can see from Fig.~\ref{f58} that the HBW inequality remains continuously violated when increasing the number of emitters $N$. This shows that the HBW inequality Eq.~(\ref{BWT2}) can be violated by an intensity correlation signal of second order $G^{(2)}_N(\delta_1,\delta_1,\frac{\pi}{4},\frac{\pi}{4})$ having a visibility of below 71\%. In fact, for finite $N$, our results show that a system of $N$ regularly arranged single photon emitters always displays spatial correlations among the scattered photons which violate the criterion of locality even though the visibility of the two-photon correlation signal approaches 33\%. This clearly demonstrates that without indicating N the magnitude of the visibility of the G$^{(2)}$-signal can not be taken as a signature of non-locality. \section{Conclusion\label{5}} In conclusion, we investigated the non-local behavior of a system of N $\geq$ 2 particles, i.e., photons emitted by a chain of N independent single photon emitters. Path entanglement among the emitted photons is created in the process of detection due to the absence of which-way information when registering a photon in the far field of the source. Introducing a new homogenous Bell-Wigner inequality and employing simple photon-photon correlation functions which are experimentally easily implementable in the laboratory we showed that this inequality can be violated for any finite number $N$ even though the visibility of the two-photon signal approaches 33\% in this case. The violation of the homogenous Bell-Wigner inequality unambigiously proves the non-local correlations of the emitted particles. In contrast, using the well-known CH74 inequalities, it turned out that no such violation can be obtained for $N>2$. For this a visibility greater than 71\% is required which cannot be achieved for $N>2$. \section{Proof of inequality~(\ref{BWT})\label{app2}} In this appendix we prove the inequality~(\ref{BWT}) which is an extension of the Bell-Wigner inequality~(\ref{pit}) (see, e.g., \cite{Pitowsky:1989:a}). The Bell-Wigner inequality usually reads \begin{eqnarray} 0\leq x_1-x_1\,x_2-x_1\,x_3+x_2\,x_3, \end{eqnarray} which is valid under the condition $0\leq x_1,x_2,x_3\leq1$. As explained in Sec.~\ref{4}, it is advantageous to use the inequality~(\ref{BWT}) which reads \begin{eqnarray}\label{BWTA} 0\leq x_1\,x_4-x_1\,x_2-x_1\,x_3+x_2\,x_3, \end{eqnarray} consisting of products of the form $x_i\,x_j$ ($i,j=1,2,3,4$) only. Eq.~(\ref{BWTA}) holds if $1\geq x_4\geq x_1,x_2,x_3\geq0$ is fulfilled. For the proof we consider two cases: \\ First, we assume $x_2\geq x_1$. In this case we can rewrite the inequality~(\ref{BWTA}) as \begin{eqnarray} 0\leq x_1\,(x_4-x_2)+x_3\,(x_2-x_1), \end{eqnarray} which is valid since both brackets are positive or zero due to the fact that $x_2\geq x_1$ and $x_4\geq x_2$ (note that $x_1,x_2,x_3,x_4\geq0$). Second, we assume $x_1>x_2$. Here, we make a further case differentiation: let us assume $x_1\geq x_3$ and rewrite the inequality~(\ref{BWTA}) as \begin{eqnarray} 0\leq x_1\,(x_4-x_2)-x_3\,(x_1-x_2). \end{eqnarray} This inequality is valid since the first bracket is bigger or equals the second due to $x_4\geq x_1$ and since $x_1\geq x_3$. In contrast, if we assume $x_3>x_1$, we can rewrite the inequality~(\ref{BWTA}) as \begin{eqnarray} x_1\,(x_4-x_3)-x_2\,(x_1-x_3)\stackrel{x_3>x_1}{>}x_1\,(x_4-x_3)\geq0, \end{eqnarray} where the last inequality holds due to $x_4\geq x_3$. \end{document}
\begin{document} \title{A new kind of slant helix in Pseudo-Riemannian Manifolds} \author{Evren Z\i plar} \address{ Department of Mathematics, Faculty of Science, \c{C}ank\i r\i \ Karatekin University, \c{C}ank\i r\i , Turkey} \email{[email protected]} \urladdr{} \author{Yusuf Yayl\i } \address{Department of Mathematics, Faculty of Science, University of Ankara, Tando\u{g}an, Turkey} \email{[email protected]} \author{\.{I}smail G\"{o}k} \address{Department of Mathematics, Faculty of Science, University of Ankara, Tando\u{g}an, Turkey} \email{[email protected]} \urladdr{} \date{May 30, 2013.} \subjclass[2000]{14H45, 14H50, 53A04} \keywords{Eikonal slant helice, harmonic curvature.} \begin{abstract} In this paper, we define a new kind of slant helix called $f$-eikonal $V_{n}$ -slant helix in Pseudo- Riemannian manifolds and give the definition of harmonic curvature functions related to the $f$-eikonal $V_{n}$-slant helix in Pseudo- Riemannian manifolds. Moreover, we give some characterizations of $f$-eikonal $V_{n}$-slant helix by making use of the harmonic curvature functions. \end{abstract} \maketitle \section{\textbf{Introduction}} Curves theory is an important framework in the differential geometry studies. Helix is one of the most fascinating curves because we see helical structure in nature, science and mechanical tools. Helices arise in the field of computer aided design, computer graphics, the simulation of kinematic motion or design of highways, the shape of DNA and carbon nonotubes. Also, we can see the helical structure in fractal geometry, for instance hyperhelices ( \cite{jain, scarr, yin}). A curve of constant slope or general helix in Euclidean 3-space $E^{3},$ is defined by the property that its tangent vector field makes a constant angle with a fixed straight line (the axis of general helix). A classical result stated by Lancret in 1802 and first proved by de Saint Venant in 1845 (\cite {Lancret} and \cite{Struik}) is: A necessary and sufficient condition that a curve be a general helix is that the ratio of curvature to torsion be constant. In \cite{haci}$,$ \"{O}zdamar and Hac\i saliho\u{g}lu defined harmonic curvature functions $H_{i}$ $\left( 1\leq i\leq n-2\right) $ of a curve $\alpha $ and generalized helices in $E^{3}$ to in $n-$dimensional Euclidean space $E^{n}$. \ Moreover, they gave a characterization for the inclined curves in $E^{n}$ : \begin{equation} \text{\textquotedblleft A curve is an inclined curve if and only if }\dsum \limits_{i=1}^{n}H_{i}^{2}=\text{constant\textquotedblright } \end{equation} Izumiya and Takeuchi defined a new kind of helix (slant helix) and they gave a characterization of slant helices in Euclidean $3-$space $E^{3}$ \cite {izumiya}. In 2008, \"{O}nder \emph{et al}. defined a new kind of slant helix in Euclidean $4-$space $E^{4}$ which is called $B_{2}-$slant helix and they gave some characterizations of these slant helices in Euclidean $4-$ space $E^{4}$ \cite{onder} . And then in 2009, G\"{o}k \emph{et al}. generalized $B_{2}-$slant helix in $E^{4}$ to $E^{n}$, $n>3$, called $V_{n}-$ slant helix in Euclidean and Minkowski $n$-space (\cite{gok, gok1}). Lots of authors in their papers have investigated inclined curves and slant helices using the harmonic curvature functions in Euclidean and Minkowski $n$-space ( \cite{ali, ali1, klah, oz}). But, \c{S}enol et al.(\cite{zip}) see for the first time that the characterization of inclined curves and slant helices in $(1.1)$ is true only for the case necessity but not true for the case sufficiency in Euclian $n$-space. Then, they consider the pre-characterizations about inclined curves and slant helices and restructure them with the necessary and sufficient condition. Let $M$ be a Riemannian manifold, where $\left \langle ,\right \rangle $ is the metric. Let $f:M\rightarrow \mathbb{R} $ be a function and let $\nabla f$ be its gradient, i.e., $ df(X)=\left \langle \nabla f,X\right \rangle $. We say that $f$ is eikonal if it satisfies: $\left \Vert \nabla f\right \Vert $ is constant \cite{scala} . $\nabla f$ is used many areas of science such as mathematical physics and geometry. So, $\nabla f$ is very important subject. For example, the Riemannian condition $\left \Vert \nabla f\right \Vert ^{2}=1$ (for non-constant $f$ on connected $M$) is precisely the eikonal equation of geometrical optics. Thus on a connected $M$, a non-constant real valued $f$ is Riemannian iff $f$ satisfies this eikonal equation. In the geometrical optical interpretation, the level sets of $f$ are interpreted as wave fronts. The characteristics of the eikonal equation (as a partial differential equation), are then the solutions of the gradient flow equation for $f$ (an ordinary differential equation), $x^{\prime }=\func{grad}f(x)$, which are geodesics of $M$ orthogonal to the level sets of $f$, and which are parametrized by arc length. These geodesics can be interpreted as light rays orthogonal to the wave fronts \cite{Fischer} . In this paper, we define $f$-eikonal $V_{n}$-slant helix in $n$-dimensional pseudo-Riemannian manifolds and give the definition of harmonic curvature functions related to $f$-eikonal $V_{n}$-slant helix in $n$-dimensional pseudo-Riemannian manifolds. Moreover, we give some characterizations of $f$ -eikonal $V_{n}$-slant helix by making use of the harmonic curvature functions. \section{\textbf{Preliminaries}} In this section, we give some basic definitions from differential geometry. \begin{definition} A metric tensor $g$ on a smooth manifold $M$ is a symmetric non-degenerate (0,2) tensor field on $M$. In other words, $g\left( X,Y\right) =g\left( Y,X\right) $ for all $X,Y\in TM$ (tangent bundle) and at the each point $p$ of $M$ if $g\left( X_{p},Y_{p}\right) =0$ for all $Y_{p}\in T_{p}\left( M\right) $ , then $ X_{p}=0$ (non-degenerate), where $T_{p}\left( M\right) $ is the tangent space of $M$ at the point $p$ and $g:T_{p}\left( M\right) \times T_{p}\left( M\right) \rightarrow \mathbb{R} $ \cite{neill} . \end{definition} \begin{definition} A pseudo-Riemannian manifold (or semi-Riemannian manifold) is a smooth manifold $M$ furnished with a metric tensor $g$. That is, a pseudo-Riemannian manifold is an ordered pair $\left( M,g\right) $ \cite {neill} . \end{definition} \begin{definition} We shall recall the notion of a proper curve of order $n$ in a $n$ -dimensional pseudo-Riemannian manifold $M$ with the metric tensor $g$. Let $ \alpha :I\rightarrow M$ be a non-null curve in $M$ parametrized by the arclength $s$, where $I$ is an open interval of the real line $ \mathbb{R} $. We denote the tangent vector field of $\alpha $ by $V_{1}$. We assume that $\alpha $ satisfies the following Frenet formula: \begin{eqnarray*} \nabla _{V_{1}}V_{1} &=&k_{1}V_{2}, \\ \nabla _{V_{1}}V_{i} &=&-\varepsilon _{i-2}\varepsilon _{i-1}k_{i-1}V_{i-1}+k_{i}V_{i+1},\text{ }1<i<n \\ \nabla _{V_{1}}V_{n} &=&-\varepsilon _{n-2}\varepsilon _{n-1}k_{n-1}V_{n-1}, \end{eqnarray*} where \begin{eqnarray*} k_{1} &=&\left \Vert \nabla _{V_{1}}V_{1}\right \Vert >0 \\ k_{i} &=&\left \Vert \nabla _{V_{1}}V_{i}+\varepsilon _{i-2}\varepsilon _{i-1}k_{i-1}V_{i-1}\right \Vert >0,\text{ \ }2\leq i\leq n-1 \\ \varepsilon _{j-1} &=&g\left( V_{j},V_{j}\right) \text{ }\left( =\pm 1\right) \text{ },\text{ }1\leq j\leq n,\text{on }I\text{, } \end{eqnarray*} and $\nabla $ is Levi-Civita connection of $M$. We call such a curve a proper curve of order $n$, $k_{i}$ $\left( 1\leq i\leq n-1\right) $ its $i-th$ curvature and $V_{1},...,V_{n}$ its Frenet Frame field. Morever, let us recall that $\left \Vert X\right \Vert =\sqrt{\left \vert g\left( X,X\right) \right \vert }$ for $X\in TM$, where $TM$ is the tangent bundle of $M$ \cite{song} . \end{definition} \section{$f$\textbf{-eikonal }$V_{n}$\textbf{-slant helix curves in pseudo-Riemannian manifolds}} In this section, we define $f$-eikonal $V_{n}$-slant helix curves and we give characterizations for a $f$-eikonal $V_{n}$-slant helix curve in $n$ -dimensional pseudo-Riemannian manifold $M^{n}$ by using harmonic curvature functions in terms of $V_{n}$ of the curve. \begin{definition} \textbf{\ }Let $M$ be a $n$-dimensional pseudo-Riemannian manifold and let $ \alpha \left( s\right) $ be a proper curve of order $n$ (non-null) with the curvatures $k_{i}$ $\left( i=1,...,n-1\right) $ in $M$. Then, harmonic curvature functions of $\alpha $ are defined by \begin{equation*} H_{i}^{\ast }:I\subset \mathbb{R} \rightarrow \mathbb{R} \end{equation*} along $\alpha $ in $M$, where \begin{eqnarray*} H_{0}^{\ast } &=&0, \\ H_{1}^{\ast } &=&\varepsilon _{n-3}\varepsilon _{n-2}\frac{k_{n-1}}{k_{n-2}}, \\ H_{i}^{\ast } &=&\left( k_{n-i}H_{i-2}^{\ast }-\nabla _{V_{1}}H_{i-1}^{\ast }\right) \frac{\varepsilon _{n-\left( i+2\right) }\varepsilon _{n-\left( i+1\right) }}{k_{n-\left( i+1\right) }},\text{ }2\leq i\leq n-2\text{.} \end{eqnarray*} Note that $\nabla _{V_{1}}H_{i-1}^{\ast }=V_{1}\left( H_{i-1}^{\ast }\right) =H_{i-1}^{\ast \prime }$. \end{definition} \begin{definition} Let $\left( M,g\right) $ be a $n$-dimensional pseudo-Riemannian manifold . Let $f\in \digamma \left( M\right) $ and $\nabla f$ be its gradient, i.e. \begin{equation*} g\left( \nabla f,X\right) =df(X)=X\left( f\right) \end{equation*} for all $X\in TM$, where $\digamma \left( M\right) $ is the set of all smooth real-valued functions on $M$. Then, we say that $f$ is eikonal function if $f$ satisfies the eikonal equation \begin{equation*} g\left( \nabla f,\nabla f\right) =\text{constant.} \end{equation*} \end{definition} \begin{lemma} The Hessian $H^{f}$ of $f\in \digamma \left( M\right) $ is the symmetric (0,2) tensor field such that \begin{equation*} H^{f}\left( X,Y\right) =g\left( \nabla _{X}\left( \func{grad}f\right) ,Y\right) \text{,} \end{equation*} where $\left( M,g\right) $ is a pseudo-Riemannian manifold and $\nabla $ is Levi-Civita connection of $M$ \cite{neill} . The above Lemma has the following corollary. \end{lemma} \begin{corollary} $H^{f}=0$ iff $\nabla f=\func{grad}f$ is parallel in $M$. \end{corollary} \begin{proof} We assume that $H^{f}=0$. Since $g$ is non-degenerate metric, $\nabla _{X}\left( \func{grad}f\right) =0$ for all $X,Y\in TM$. In other words, $ \nabla f$ is parallel in $M$. Conversely, if $\nabla f$ is parallel in $M$, then $\nabla _{X}\left( \func{ grad}f\right) =0$ for all $X\in TM$. Hence, $H^{f}=0$. This completes the proof. \end{proof} \begin{definition} Let $\left( M,g\right) $ be a $n$-dimensional pseudo-Riemannian manifold and let $\alpha \left( s\right) $ be a proper curve of order $n$ (non-null) in $ M $. Let $f\in \digamma \left( M\right) $ be a eikonal function along curve $ \alpha $, i.e. $g\left( \nabla f,\nabla f\right) $ is constant along curve $ \alpha $. If the function \begin{equation*} g\left( \nabla f,V_{n}\right) \end{equation*} is non-zero constant function along $\alpha $, then $\alpha $ is called a $f$ -eikonal $V_{n}$-slant helix curve, where $V_{n}$ is $n$-th Frenet Frame field. And, $\nabla f$ is called the axis of the $f$-eikonal $V_{n}$-slant helix curve $\alpha $. \end{definition} \begin{theorem} Let $\left( M,g\right) $ be a $n$-dimensional pseudo-Riemannian manifold and let $\alpha \left( s\right) $ be a proper curve of order $n$ (non-null) in $ M $. Let us assume that $f\in \digamma \left( M\right) $ be a eikonal function along curve $\alpha $, i.e. $g\left( \nabla f,\nabla f\right) =$ constant along curve $\alpha $ and the Hessian $H^{f}$ $=0$. If $\alpha $ is a $f$-eikonal $V_{n}$-slant helix curve with the axis $\nabla f$, then the system \begin{equation} g\left( V_{n-(i+1)},\nabla f\right) =H_{i}^{\ast }g\left( V_{n},\nabla f\right) ,\text{ }i=1,...,n-2 \end{equation} holds, where $\left \{ V_{1},...,V_{n}\right \} $ and $\left \{ H_{1}^{\ast },...,H_{n-2}^{\ast }\right \} $ are the Frenet frame and the harmonic curvature functions of $\alpha $, respectively. \end{theorem} \begin{proof} Since $\left \{ V_{1},...,V_{n}\right \} $ is the orthonormal frame of the curve $\alpha $ in $M$, $\nabla f$ can be expressed in the form \begin{equation} \nabla f=\lambda _{1}V_{1}+...+\lambda _{n}V_{n}\text{.} \end{equation} By using the definition of $\ f$-eikonal $V_{n}$-slant helix curve and (3.2), we get \begin{equation} g\left( V_{n},\nabla f\right) =\lambda _{n}\varepsilon _{n-1}=\text{constant. } \end{equation} If we take the derivative in each part of (3.3) in the direction $V_{1}$ in $ M$, then we have \begin{equation} g\left( \nabla _{V_{1}}\nabla f,V_{n}\right) +g\left( \nabla f,\nabla _{V_{1}}V_{n}\right) =0\text{.} \end{equation} On the other hand, from Corollary 3.1, $\nabla f$ is parallel in $M$. That is,$\nabla _{X}\nabla f=0$ for all $X\in TM$. So, $\nabla _{V_{1}}\nabla f=0$ for $X=V_{1}$. Hence, by using (3.4) and Frenet formulas, we obtain \begin{equation} -\varepsilon _{n-2}\varepsilon _{n-1}k_{n-1}g\left( \nabla f,V_{n-1}\right) =0\text{.} \end{equation} And, since $\varepsilon _{n-2}\varepsilon _{n-1}k_{n-1}$ is different from zero, from (3.5), we get \begin{equation} g\left( \nabla f,V_{n-1}\right) =0\text{.} \end{equation} By taking the derivative in each part of (3.6) in the direction $V_{1}$ in $ M $, we can write the equality \begin{equation} g\left( \nabla _{V_{1}}\nabla f,V_{n-1}\right) +g\left( \nabla f,\nabla _{V_{1}}V_{n-1}\right) =0\text{.} \end{equation} And, since $\nabla _{V_{1}}\nabla f=0$, by using (3.7) and Frenet formulas, we obtain \begin{equation} -\varepsilon _{n-3}\varepsilon _{n-2}k_{n-2}g\left( \nabla f,V_{n-2}\right) +k_{n-1}g\left( \nabla f,V_{n}\right) =0\text{.} \end{equation} Therefore, from (3.8), we have \begin{eqnarray} g\left( \nabla f,V_{n-2}\right) &=&\frac{k_{n-1}}{\varepsilon _{n-3}\varepsilon _{n-2}k_{n-2}}g\left( \nabla f,V_{n}\right) \notag \\ g\left( \nabla f,V_{n-2}\right) &=&\frac{\varepsilon _{n-3}\varepsilon _{n-2} }{\left( \varepsilon _{n-3}\right) ^{2}\left( \varepsilon _{n-2}\right) ^{2}} \text{.}\frac{k_{n-1}}{k_{n-2}}g\left( \nabla f,V_{n}\right) \notag \\ g\left( \nabla f,V_{n-2}\right) &=&\varepsilon _{n-3}\varepsilon _{n-2}\frac{ k_{n-1}}{k_{n-2}}g\left( \nabla f,V_{n}\right) \text{.} \end{eqnarray} Moreover, since $H_{1}^{\ast }=\varepsilon _{n-3}\varepsilon _{n-2}\dfrac{ k_{n-1}}{k_{n-2}}$, from (3.9), we can write \begin{equation*} g\left( \nabla f,V_{n-2}\right) =H_{1}^{\ast }g\left( \nabla f,V_{n}\right) \text{.} \end{equation*} It follows that the equality (3.1) is true for $i=1$. According to the induction theory, let us assume that the equality (3.1) is true for all $k$, where $1\leq k\leq i$ for some positive integers $i$. Then, we will prove that the equality (3.1) is true for $i+1$. Since the equality (3.1) is true for some positive integers $i$, we can write \begin{equation} g\left( V_{n-\left( i+1\right) },\nabla f\right) =H_{i}^{\ast }g\left( \nabla f,V_{n}\right) \end{equation} for some positive integers $i$. If we take derivative in each part of (3.10) in the direction $V_{1}$ in $M$, we have \begin{equation} g\left( \nabla _{V_{1}}V_{n-(i+1)},\nabla f\right) +g\left( V_{n-(i+1)},\nabla _{V_{1}}\nabla f\right) =V_{1}\left( H_{i}^{\ast }g\left( \nabla f,V_{n}\right) \right) \text{.} \end{equation} And, by using (3.11) and Frenet formulas, we get the equality \begin{eqnarray} V_{1}\left( H_{i}^{\ast }g\left( \nabla f,V_{n}\right) \right) &=&-\varepsilon _{n-\left( i+3\right) }\varepsilon _{n-\left( i+2\right) }k_{n-(i+2)}g\left( V_{n-(i+2)},\nabla f\right) \\ &&+k_{n-(i+1)}g\left( V_{n-i},\nabla f\right) +g\left( V_{n-(i+1)},\nabla _{V_{1}}\nabla f\right) \text{.} \notag \end{eqnarray} Morever, $\nabla _{V_{1}}\nabla f=0$. Hence, from (3.12), we can write \begin{equation} -\varepsilon _{n-\left( i+3\right) }\varepsilon _{n-\left( i+2\right) }k_{n-(i+2)}g\left( V_{n-(i+2)},\nabla f\right) +k_{n-(i+1)}g\left( V_{n-i},\nabla f\right) =V_{1}\left( H_{i}^{\ast }g\left( \nabla f,V_{n}\right) \right) \text{.} \end{equation} And, from (3.13), we obtain \begin{eqnarray} g\left( V_{n-(i+2)},\nabla f\right) &=&\left \{ -V_{1}\left( H_{i}^{\ast }g\left( \nabla f,V_{n}\right) \right) +k_{n-(i+1)}g\left( V_{n-i},\nabla f\right) \right \} . \\ &&\frac{\varepsilon _{n-\left( i+3\right) }\varepsilon _{n-\left( i+2\right) }}{\left( \varepsilon _{n-\left( i+3\right) }\right) ^{2}\left( \varepsilon _{n-\left( i+2\right) }\right) ^{2}}\frac{1}{k_{n-(i+2)}} \notag \\ &=&\left \{ -V_{1}\left( H_{i}^{\ast }g\left( \nabla f,V_{n}\right) \right) +k_{n-(i+1)}g\left( V_{n-i},\nabla f\right) \right \} \varepsilon _{n-\left( i+3\right) }\varepsilon _{n-\left( i+2\right) }\frac{1}{k_{n-(i+2)}}\text{.} \notag \end{eqnarray} On the other hand, since the equality (3.1) is true for $i-1$ according to the induction hypothesis, we have \begin{equation} g\left( V_{n-i},\nabla f\right) =H_{i-1}^{\ast }g\left( \nabla f,V_{n}\right) \text{.} \end{equation} Therefore, by using (3.3), (3.14) and (3.15), we get \begin{equation} g\left( V_{n-(i+2)},\nabla f\right) =\left \{ -V_{1}\left( H_{i}^{\ast }\right) +k_{n-(i+1)}H_{i-1}^{\ast }\right \} \varepsilon _{n-\left( i+3\right) }\varepsilon _{n-\left( i+2\right) }\frac{1}{k_{n-(i+2)}}g\left( \nabla f,V_{n}\right) \end{equation} Moreover, we obtain \begin{equation} H_{i+1}^{\ast }=\left \{ k_{n-(i+1)}H_{i-1}^{\ast }-V_{1}\left( H_{i}^{\ast }\right) \right \} \varepsilon _{n-\left( i+3\right) }\varepsilon _{n-\left( i+2\right) }\frac{1}{k_{n-(i+2)}} \end{equation} for $i+1$ in the Definition 3.1. So, we have \begin{equation*} g\left( V_{n-(i+2)},\nabla f\right) =H_{i+1}^{\ast }g\left( \nabla f,V_{n}\right) \end{equation*} by using (3.16) and (3.17). It follows that the equality (3.1) is true for $ i+1$. Consequently, we get \begin{equation*} g\left( V_{n-(i+1)},\nabla f\right) =H_{i}^{\ast }g\left( \nabla f,V_{n}\right) \end{equation*} for all $i$ according to induction theory. This completes the proof. \end{proof} \begin{theorem} Let $\left( M,g\right) $ be a $n$-dimensional pseudo-Riemannian manifold and let $\alpha \left( s\right) $ be a proper curve of order $n$ (non-null) in $ M $. Let us assume that $f\in \digamma \left( M\right) $ be a eikonal function along curve $\alpha $, i.e. $g\left( \nabla f,\nabla f\right) =$ constant along curve $\alpha $ and the Hessian $H^{f}$ $=0$. If $\alpha $ is a $f$-eikonal $V_{n}$-slant helix curve with the axis $\nabla f$, then the axis of the curve $\alpha $ \begin{equation*} \nabla f=\left \{ \varepsilon _{0}H_{n-2}^{\ast }V_{1}+...+\varepsilon _{n-3}H_{1}^{\ast }V_{n-2}+\varepsilon _{n-1}V_{n}\right \} g\left( \nabla f,V_{n}\right) \text{,} \end{equation*} where $\left \{ V_{1},V_{2},...,V_{n}\right \} $ and $\left \{ H_{1}^{\ast },...,H_{n-2}^{\ast }\right \} $ are the Frenet frame and the harmonic curvatures of $\alpha $, respectively. \end{theorem} \begin{proof} Since $\alpha $ is a $f$-eikonal $V_{n}$-slant helix curve , we can write \begin{equation} g\left( \nabla f,V_{n}\right) =\text{constant.} \end{equation} If we take the derivative in each part of (3.18) in the direction $V_{1}$ in $M$, then we have \begin{equation} g\left( \nabla _{V_{1}}\nabla f,V_{n}\right) +g\left( \nabla f,\nabla _{V_{1}}V_{n}\right) =0\text{.} \end{equation} On the other hand, from Corollary 3.1, $\nabla f$ is parallel in $M$. That's why, $\nabla _{V_{1}}\nabla f=0$. Then, we obtain \begin{equation} -\varepsilon _{n-2}\varepsilon _{n-1}k_{n-1}g\left( \nabla f,V_{n-1}\right) =0 \end{equation} by using (3.19) and Frenet formulas. Since $\varepsilon _{n-2}\varepsilon _{n-1}k_{n-1}$ is positive function, (3.20) implies that \begin{equation*} g\left( \nabla f,V_{n-1}\right) =0\text{.} \end{equation*} Hence, we can write the axis of $\alpha $ as \begin{equation} \nabla f=\lambda _{1}V_{1}+\lambda _{2}V_{2}+...+\lambda _{n-2}V_{n-2}+\lambda _{n}V_{n}\text{.} \end{equation} Moreover, from (3.21), we get \begin{eqnarray*} \varepsilon _{0}\lambda _{1} &=&g\left( \nabla f,V_{1}\right) \\ \varepsilon _{1}\lambda _{2} &=&g\left( \nabla f,V_{2}\right) \\ &&. \\ &&. \\ &&. \\ \varepsilon _{n-3}\lambda _{n-2} &=&g\left( \nabla f,V_{n-2}\right) \\ \varepsilon _{n-1}\lambda _{n} &=&g\left( \nabla f,V_{n}\right) \end{eqnarray*} by using the metric $g$. On the other hand, from Theorem 3.1, we know that \begin{eqnarray} \lambda _{1} &=&g\left( \nabla f,V_{1}\right) =\varepsilon _{0}H_{n-2}^{\ast }g\left( \nabla f,V_{n}\right) \\ \lambda _{2} &=&g\left( \nabla f,V_{2}\right) =\varepsilon _{1}H_{n-3}^{\ast }g\left( \nabla f,V_{n}\right) \notag \\ &&. \notag \\ &&. \notag \\ &&. \notag \\ \lambda _{n-2} &=&g\left( \nabla f,V_{n-2}\right) =\varepsilon _{n-3}H_{1}^{\ast }g\left( \nabla f,V_{n}\right) \notag \\ \lambda _{n} &=&\frac{1}{\varepsilon _{n-1}}g\left( \nabla f,V_{n}\right) = \frac{\varepsilon _{n-1}}{\left( \varepsilon _{n-1}\right) ^{2}}g\left( \nabla f,V_{n}\right) \notag \\ &=&\varepsilon _{n-1}g\left( \nabla f,V_{n}\right) \text{.} \notag \end{eqnarray} Thus, it can be easily obtained the axis of the curve $\alpha $ as \begin{equation*} \nabla f=\left \{ \varepsilon _{0}H_{n-2}^{\ast }V_{1}+...+\varepsilon _{n-3}H_{1}^{\ast }V_{n-2}+\varepsilon _{n-1}V_{n}\right \} g\left( \nabla f,V_{n}\right) \text{,} \end{equation*} by making use of the equality (3.21) and the system (3.22). This completes the proof. \end{proof} \begin{theorem} Let $\left( M,g\right) $ be a $n$-dimensional pseudo-Riemannian manifold and let $\alpha \left( s\right) $ be a proper curve of order $n$ (non-null) in $ M $. Let us assume that $f\in \digamma \left( M\right) $ be a eikonal function along curve $\alpha $, i.e. $g\left( \nabla f,\nabla f\right) =$ constant along curve $\alpha $ and the Hessian $H^{f}$ $=0$. If $\alpha $ is a $f$-eikonal $V_{n}$-slant helix curve, then $H_{n-2}^{\ast }\neq 0$ and $ \varepsilon _{n-3}H_{1}^{\ast 2}+\varepsilon _{n-4}H_{2}^{\ast 2}+...+\varepsilon _{0}H_{n-2}^{\ast 2}$ is non-zero constant, where $ \left \{ H_{1}^{\ast },...,H_{n-2}^{\ast }\right \} $ is the harmonic curvatures of $\alpha $. \end{theorem} \begin{proof} Let $\alpha $ be a $f$-eikonal $V_{n}$-slant helix curve and $\left \{ V_{1},...,V_{n}\right \} $ be the Frenet frame of $\alpha $. Then, from Theorem 3.2, we know that \begin{equation} \nabla f=\left \{ \varepsilon _{0}H_{n-2}^{\ast }V_{1}+...+\varepsilon _{n-3}H_{1}^{\ast }V_{n-2}+\varepsilon _{n-1}V_{n}\right \} g\left( \nabla f,V_{n}\right) \text{.} \end{equation} Therefore, from (3.23), we can write \begin{equation} g\left( \nabla f,\nabla f\right) =\left( g\left( \nabla f,V_{n}\right) \right) ^{2}\left( \varepsilon _{0}^{3}H_{n-2}^{\ast 2}+...+\varepsilon _{n-3}^{3}H_{1}^{\ast 2}+\varepsilon _{n-1}^{3}\right) \text{.} \end{equation} Moreover, by the definition of metric tensor, we have \begin{equation*} \left \vert g\left( \nabla f,\nabla f\right) \right \vert =\left \Vert \nabla f\right \Vert ^{2}\text{.} \end{equation*} According to this Theorem, $\alpha $ is a $f$-eikonal $V_{n}$-slant helix curve. So, $\left \Vert \nabla f\right \Vert =$constant and $g\left( \nabla f,V_{n}\right) $ $=$ non-zero constant along $\alpha $. Hence, from (3.24), we obtain \begin{equation*} \varepsilon _{0}^{3}H_{n-2}^{\ast 2}+...+\varepsilon _{n-3}^{3}H_{1}^{\ast 2}+\varepsilon _{n-1}^{3}=\text{constant.} \end{equation*} In other words, \begin{equation*} \varepsilon _{0}H_{n-2}^{\ast 2}+...+\varepsilon _{n-3}H_{1}^{\ast 2}=\text{ constant.} \end{equation*} Now, we will show that $H_{n-2}^{\ast }\neq 0$ . We assume that $ H_{n-2}^{\ast }=0$. Then, for $i=n-2$ in (3.1), \begin{equation} g\left( V_{1},\nabla f\right) =H_{n-2}^{\ast }g\left( \nabla f,V_{n}\right) =0\text{.} \end{equation} If we take derivative in each part of (3.25) in the direction $V_{1}$ on $M$ , then we have \begin{equation} g\left( \nabla _{V_{1}}V_{1},\nabla f\right) +g\left( V_{1},\nabla _{V_{1}}\nabla f\right) =0\text{.} \end{equation} On the other hand, from Corollary 3.1, $\nabla f$ is parallel in $M$. That's why $\nabla _{V_{1}}\nabla f=0$. Then, from (3.26), we have $g\left( \nabla _{V_{1}}V_{1},\nabla f\right) =k_{1}g\left( V_{2},\nabla f\right) =0$ by using the Frenet formulas. Since $k_{1}$ is positive, $g\left( V_{2},\nabla f\right) =0$. Now, for $i=n-3$ in (3.1), \begin{equation*} g\left( V_{2},\nabla f\right) =H_{n-3}^{\ast }g\left( V_{n},\nabla f\right) \text{.} \end{equation*} And, since $g\left( V_{2},\nabla f\right) $ $=0$, $H_{n-3}^{\ast }=0$. Continuing this process, we get $H_{1}^{\ast }=0$. Let us recall that $ H_{1}^{\ast }=\varepsilon _{n-3}\varepsilon _{n-2}\frac{k_{n-1}}{k_{n-2}}$, thus we have a contradiction because all the curvatures are nowhere zero. Consequently, $H_{n-2}^{\ast }\neq 0$. This completes the proof. \end{proof} \begin{lemma} Let $\left( M,g\right) $ be a $n$-dimensional pseudo-Riemannian manifold and let $\alpha \left( s\right) $ be a proper curve of order $n$ (non-null) in $ M $. Let us assume that $H_{n-2}^{\ast }\neq 0$ for $i=n-2$. Then, $ \varepsilon _{n-3}H_{1}^{\ast 2}+\varepsilon _{n-4}H_{2}^{\ast 2}+...+\varepsilon _{0}H_{n-2}^{\ast 2}$ is non-zero constant if and only if $V_{1}\left( H_{n-2}^{\ast }\right) =H_{n-2}^{\ast \prime }=k_{1}H_{n-3}^{\ast }$, where $V_{1}$ and $\left \{ H_{1}^{\ast },...,H_{n-2}^{\ast }\right \} $ are the unit tangent vector field and the harmonic curvatures of $\alpha $, respectively. \end{lemma} \begin{proof} First,we assume that $\varepsilon _{n-3}H_{1}^{\ast 2}+\varepsilon _{n-4}H_{2}^{\ast 2}+...+\varepsilon _{0}H_{n-2}^{\ast 2}$ is non-zero constant. Consider the following functions given in Definition 3.1 \begin{equation*} H_{i}^{\ast }=\left( k_{n-i}H_{i-2}^{\ast }-H_{i-1}^{\ast \prime }\right) \frac{\varepsilon _{n-\left( i+2\right) }\varepsilon _{n-\left( i+1\right) } }{k_{n-\left( i+1\right) }} \end{equation*} for $3\leq i\leq n-2$. So, from the equality, we can write \begin{equation} k_{n-\left( i+1\right) }H_{i}^{\ast }=\varepsilon _{n-\left( i+2\right) }\varepsilon _{n-\left( i+1\right) }\left( k_{n-i}H_{i-2}^{\ast }-H_{i-1}^{\ast \prime }\right) \text{.} \end{equation} Hence, in (3.27), if we take $i+1$ instead of $i$, we get \begin{equation} \varepsilon _{n-\left( i+3\right) }\varepsilon _{n-\left( i+2\right) }H_{i}^{\ast \prime }=\varepsilon _{n-\left( i+3\right) }\varepsilon _{n-\left( i+2\right) }k_{n-\left( i+1\right) }H_{i-1}^{\ast }-k_{n-\left( i+2\right) }H_{i+1}^{\ast },\text{ }2\leq i\leq n-3 \end{equation} together with \begin{equation*} H_{1}^{\ast \prime }=-\frac{1}{\varepsilon _{n-4}\varepsilon _{n-3}} k_{n-3}H_{2}^{\ast } \end{equation*} or \begin{equation} H_{1}^{\ast \prime }=-\varepsilon _{n-4}\varepsilon _{n-3}k_{n-3}H_{2}^{\ast }\text{.} \end{equation} On the other hand, since $\varepsilon _{n-3}H_{1}^{\ast 2}+\varepsilon _{n-4}H_{2}^{\ast 2}+...+\varepsilon _{0}H_{n-2}^{\ast 2}$ is constant, we have \begin{equation*} \varepsilon _{n-3}H_{1}^{\ast }H_{1}^{\ast \prime }+\varepsilon _{n-4}H_{2}^{\ast }H_{2}^{\ast \prime }+...+\varepsilon _{0}H_{n-2}^{\ast }H_{n-2}^{\ast \prime }=0 \end{equation*} and so, \begin{equation} \varepsilon _{0}H_{n-2}^{\ast }H_{n-2}^{\ast \prime }=-\varepsilon _{n-3}H_{1}^{\ast }H_{1}^{\ast \prime }-\varepsilon _{n-4}H_{2}^{\ast }H_{2}^{\ast \prime }-...-\varepsilon _{1}H_{n-3}^{\ast }H_{n-3}^{\ast \prime }\text{.} \end{equation} By using (3.28) and (3.29), we obtain \begin{equation} H_{1}^{\ast }H_{1}^{\ast \prime }=-\varepsilon _{n-4}\varepsilon _{n-3}k_{n-3}H_{1}^{\ast }H_{2}^{\ast } \end{equation} and \begin{equation} \varepsilon _{n-\left( i+3\right) }\varepsilon _{n-\left( i+2\right) }H_{i}^{\ast }H_{i}^{\ast \prime }=\varepsilon _{n-\left( i+3\right) }\varepsilon _{n-\left( i+2\right) }k_{n-\left( i+1\right) }H_{i-1}^{\ast }H_{i}^{\ast }-k_{n-\left( i+2\right) }H_{i}^{\ast }H_{i+1}^{\ast },\text{ } 2\leq i\leq n-3\text{.} \end{equation} Therefore, by using (3.30), (3.31) and (3.32), an algebraic calculus shows that \begin{equation*} \varepsilon _{0}H_{n-2}^{\ast }H_{n-2}^{\ast \prime }=\varepsilon _{0}k_{1}H_{n-3}^{\ast }H_{n-2}^{\ast } \end{equation*} or \begin{equation*} H_{n-2}^{\ast }H_{n-2}^{\ast \prime }=k_{1}H_{n-3}^{\ast }H_{n-2}^{\ast } \text{.} \end{equation*} Since $H_{n-2}^{\ast }\neq 0$, we get the relation \begin{equation*} H_{n-2}^{\ast \prime }=k_{1}H_{n-3}^{\ast }\text{.} \end{equation*} Conversely, we assume that \begin{equation} H_{n-2}^{\ast \prime }=k_{1}H_{n-3}^{\ast }\text{.} \end{equation} By using (3.33) and $H_{n-2}^{\ast }\neq 0$, we can write \begin{equation} H_{n-2}^{\ast }H_{n-2}^{\ast \prime }=k_{1}H_{n-2}^{\ast }H_{n-3}^{\ast } \end{equation} From (3.32), we have the following equation sysytem: \begin{eqnarray*} \text{for }i &=&n-3\text{, \ \ \ \ \ }\varepsilon _{1}H_{n-3}^{\ast }H_{n-3}^{\ast \prime }=\varepsilon _{1}k_{2}H_{n-4}^{\ast }H_{n-3}^{\ast }-\varepsilon _{0}k_{1}H_{n-3}^{\ast }H_{n-2}^{\ast }\text{,} \\ \text{for }i &=&n-4\text{, \ \ \ \ \ }\varepsilon _{2}H_{n-4}^{\ast }H_{n-4}^{\ast \prime }=\varepsilon _{2}k_{3}H_{n-5}^{\ast }H_{n-4}^{\ast }-\varepsilon _{1}k_{2}H_{n-4}^{\ast }H_{n-3}^{\ast }\text{,} \\ \text{for }i &=&n-5\text{, \ \ \ \ \ }\varepsilon _{3}H_{n-5}^{\ast }H_{n-5}^{\ast \prime }=\varepsilon _{3}k_{4}H_{n-6}^{\ast }H_{n-5}^{\ast }-\varepsilon _{2}k_{3}H_{n-5}^{\ast }H_{n-4}^{\ast }\text{,} \\ &&\cdot \\ &&\cdot \\ &&\cdot \\ \text{for }i &=&2\text{, \ \ \ \ \ \ \ \ \ }\varepsilon _{n-4}H_{2}^{\ast }H_{2}^{\ast \prime }=\varepsilon _{n-4}k_{n-3}H_{1}^{\ast }H_{2}^{\ast }-\varepsilon _{n-5}k_{n-4}H_{2}^{\ast }H_{3}^{\ast }\text{ .} \end{eqnarray*} Moreover, from (3.31) and (3.34), we obtain \begin{equation} \varepsilon _{n-3}H_{1}^{\ast }H_{1}^{\ast \prime }=-\varepsilon _{n-4}k_{n-3}H_{1}^{\ast }H_{2}^{\ast } \end{equation} and \begin{equation} \varepsilon _{0}H_{n-2}^{\ast }H_{n-2}^{\ast \prime }=\varepsilon _{0}k_{1}H_{n-2}^{\ast }H_{n-3}^{\ast }\text{.} \end{equation} So, by using the above equation system, (3.35) and (3.36), an algebraic calculus shows that \begin{equation} \varepsilon _{n-3}H_{1}^{\ast }H_{1}^{\ast \prime }+\varepsilon _{n-4}H_{2}^{\ast }H_{2}^{\ast \prime }+...+\varepsilon _{0}H_{n-2}^{\ast }H_{n-2}^{\ast \prime }=0\text{.} \end{equation} And, by integrating (3.37), we can easily say that \begin{equation*} \varepsilon _{n-3}H_{1}^{\ast 2}+\varepsilon _{n-4}H_{2}^{\ast 2}+...+\varepsilon _{0}H_{n-2}^{\ast 2} \end{equation*} is a non-zero constant. This completes the proof. \end{proof} \begin{corollary} Let $\left( M,g\right) $ be a $n$-dimensional pseudo-Riemannian manifold and let $\alpha \left( s\right) $ be a proper curve of order $n$ (non-null) in $ M $. Let us assume that $f\in \digamma \left( M\right) $ be a eikonal function along curve $\alpha $, i.e. $g\left( \nabla f,\nabla f\right) =$ constant along curve $\alpha $ and the Hessian $H^{f}$ $=0$. If $\alpha $ is a $f$-eikonal $V_{n}$-slant helix curve, $V_{1}\left( H_{n-2}^{\ast }\right) =H_{n-2}^{\ast \prime }=k_{1}H_{n-3}^{\ast }$. \end{corollary} \begin{proof} It is obvious by using Theorem 3.3 and Lemma 3.2. \end{proof} \begin{center} \textbf{4. Conclusions} \end{center} In this work, it is defined $f$-eikonal $V_{n}$-slant helix by the gradient vector field $\nabla f$ and $\nabla f$ is called as the axis of the eikonal slant helix. Besides, it is given new characterizations on eikonal slant helices by using the harmonic curvature functions in $n$-dimensional pseudo-Riemannian manifolds. On the other hand, we want to emphasize an important point. The axis $\nabla f$ defined in this work is non-constant. If the axis $\nabla f$ \ is considered as a constant vector field, then the eikonal slant helix defined in this paper coincides with $V_{n}$-slant helix which is introduced in \cite {gok1}. Also, if $\nabla f$ is a Levi-Civita parallel vector field, then eikonal slant helix is a LC-slant helix defined by in \cite{oz, ali2} \end{document}
\begin{document} \title{Electromagnetically induced transparency and four-wave mixing in a cold atomic ensemble with large optical depth} \author{J.~Geng$^{1,2}$, G.~T.~Campbell$^2$, J.~Bernu$^2$, D.~Higginbottom$^{2}$, B.~M.~Sparkes$^2$, S.~M.~Assad$^2$, W.~P.~Zhang$^1$, N.~P.~Robins$^3$} \author{P.~K.~Lam$^{2,4}$} \email{[email protected]} \author{B.~C.~Buchler$^2$} \email{[email protected]} \address{$^1$Department of Physics, State Key Laboratory of Precision Spectroscopy, East China Normal University, Shanghai 200062, P.~R.~China} \address{$^2$Centre for Quantum Computation and Communication Technology, Department of Quantum Science, The Australian National University, Canberra, ACT 0200, Australia} \address{$^3$Quantum Sensors Lab, Department of Quantum Science, The Australian National University, Canberra, ACT 0200, Australia} \address{$^4$College of Precision Instrument and Opto-electronics Engineering, Key Laboratory of Optoelectronics Information Technology of Ministry of Education, Tianjin University, Tianjin, 300072, P.~R.~China} \date{\today} \begin{abstract} We report on the delay of optical pulses using electromagnetically induced transparency in an ensemble of cold atoms with an optical depth exceeding 500. To identify the regimes in which four-wave mixing impacts on EIT behaviour, we conduct the experiment in both Rb$^{85}$ and Rb$^{87}$. Comparison with theory shows excellent agreement in both isotopes. In Rb$^{87}$, negligible four-wave mixing was observed and we obtained one pulse-width of delay with 50\% efficiency. In Rb$^{85}$ four-wave-mixing contributes to the output. In this regime we achieve a delay-bandwidth product of 3.7 at 50\% efficiency, allowing temporally multimode delay, which we demonstrate by compressing two pulses into the memory medium. \end{abstract} \maketitle \section{Introduction} Electromagnetically induced transparency (EIT) \cite{Marangos1998_EITReview,Fleischhauer:RevEIT:2005} is a coherent atom-optical effect that arises due to quantum interference of optical transitions. Since its first experimental observation in a strontium vapour in 1991 \cite{Boller:1991if}, it has been investigated in numerous atomic systems in a wide variety of settings. It is of fundamental interest for its ability to slow light by up to seven orders of magnitude \cite{Budker:1999hd, Hau:1999cz}. In turn, the increased interaction times afforded by slow light allow enhanced non-linear optical interactions \cite{Harris:1990ey,Jain:1996vk,Harris:1999vj}. EIT can also be used to stop and store light in an atomic spinwave leading to its application as a quantum memory \cite{PhysRevA.65.022314,Alex:PRL:2008:100,Honda:2008p4680,Kimble_nature_sph-QM1,Tittel2009_MemReview}. In order to preserve a quantum state, a quantum memory requires storage that is both efficient and noiseless. Ideally, EIT has the potential to be used as a high fidelity quantum memory \cite{Hetet:2008dm}. Considerable work has been done to improve the efficiency of EIT memories, primarily by increasing the optical depth of the slow-light medium. Recent results have demonstrated up to 69\% efficiency for storage and forward recall of light \cite{Chen2013}. In principle, the achievable storage efficiency using EIT can approach 100\% \cite{Gorshkov2007} as the optical depth is made sufficiently large. In this regime the delay of an optical pulse travelling through the EIT medium can be larger than the duration of the pulse \cite{PhysRevA.71.023801} such that the pulse is contained entirely within the medium. At large optical depths, however, nonlinear processes such as four-wave mixing (4WM) may become significant. In particular, these processes may introduce gain during slow-light propagation that may contribute noise to the output \cite{PhysRevA.78.023801} \cite{PhysRevA.88.013823}, thus diminishing the suitability of EIT as a quantum memory. Experimental work in warm atomic vapours has shown that EIT may have significant 4WM \cite{Phillips2009a,Phillips2011}. In principle, the unbroadened atomic states available in cold atomic ensembles allows high storage efficiency with little added noise due to having a lower 4WM strength than their warm counterparts. Moreover, it has also ben reported that the long atomic coherence in cold system also delivers high storage efficiencies \cite{Chen2013}. In this paper, we report on EIT in an ensemble of cold $^{85}Rb$ atoms with a very high optical depth. We investigate the role of 4WM in the experiment and demonstrate that 4WM can have a non-negligible contribution to the intensity of the signal field after EIT delay. By repeating the experiment in cold $^{87}Rb$ atoms, which have larger detuning between the two ground states, we show that the 4WM can be diminished to the point of having negligible contribution to the signal field at comperable optical depths. Good agreement between our results and theory indicates that the theoretical model used in a recent paper by Lauk {\it et al.} \cite{PhysRevA.88.013823} is a realistic description of cold atom EIT. We also show that with sufficiently high optical depth, delays of more than two pulse-widths are possible. This is a step towards a temporally multimode quantum memory for increasing the success rate for quantum information protocols \cite{Simon:2007}. Previous experiments have demonstrated large fractional delays using the strong dispersion of dense optical vapours \cite{Camacho2007}. In this paper, we present EIT results with sufficient delay to store multimode pulses. In the next section, we begin by reviewing the semi-classical theory of EIT-4WM presented in Ref.~\cite{PhysRevA.88.013823}. We then use this theory to model our system. In section \ref{sec-setup}, we present an overview of the cold atom trap and the timing sequence scheme used to prepare the atomic ensemble. We discuss the experimental results in section \ref{sec-results} before concluding in section \ref{sec-concl}. \section{Theory} \begin{figure} \caption{The atomic level structure used for modeling EIT. A strong control field $E$ (red) is used to introduce coupling between the metastable $|s\rangle$ and the ground state $|g\rangle$. The $\Lambda$-system for EIT is formed by the control field and the signal field, $a_s$ (blue). A second $\Lambda$-system is formed by the control field and the idler field, $a_i$ (green), via another excited level $| e^{\prime} \label{fig:tla} \end{figure} To model the EIT-4WM process, we assume a four-level atom coupled by three optical fields as our model of the system as shown in fig.~\ref{fig:tla}. A strong control field $E$, with Rabi frequency $\Omega$, couples a meta-stable state $\ket{s}$, to the excited state $\ket{e}$ and a weak signal field (blue) $a_s$ couples the ground state $\ket{g}$ to $\ket{e}$, completing a two-photon transition between $\ket{g}$ and $\ket{s}$. The same control field also couples $\ket{g}$ to an auxiliary excited state $\ket{e'}$ off-resonantly, completing a second Raman transition with an idler field (green) $a_i$ that is generated by the four-wave mixing process. Adiabatically eliminating $\ket{e'}$, the equations of motion for this system are \cite{PhysRevA.88.013823} \begin{align} i \partial_t \sigma_{ge} &= -i \gamma_{ge} \sigma_{ge} - g_s a_{s} - \Omega \sigma_{gs} \label{Maxwell_Bloch:1} \\ i \partial_t \sigma_{gs} &= -i \gamma_{gs} \sigma_{gs} - g_i (\Omega'/\Delta) s^\dagger_{i} - \Omega^* \sigma_{ge} \label{Maxwell_Bloch:2} \\ (\partial_t + c \partial_z) a_{s} &= i g_s N \sigma_{ge} \label{Maxwell_Bloch:3} \\ (\partial_t + c \partial_z) a^\dagger_{i} &= -i g_i N (\Omega'/\Delta) \sigma_{gs}, \label{Maxwell_Bloch:4} \end{align} where $\sigma_{ge}$ and $\sigma_{gs}$ are collective spin-polarisation operators corresponding to the $\ket{g} \rightarrow \ket{e}$ and $\ket{g} \rightarrow \ket{s}$ transitions respectively in an ensemble of $N$ atoms. $\Delta$ is the frequency difference between $\ket{g}$ and $\ket{s}$ minus the frequency difference between $\ket{e}$ and $\ket{e'}$. The coupling rate for $a_{s(i)}$ is $g_{s(i)} $ and, similarly, $\Omega(\Omega^{\prime})$ is the Rabi frequency for the control field driving $\ket{s} \rightarrow \ket{e}$ ($\ket{g} \rightarrow \ket{e^{\prime}}$). $\gamma_{ge}$ and $\gamma_{gs}$ are the decay rates from $\sigma_{ge}$ and $\sigma_{gs}$. The rate $g_s$ differs from $g_i$ only due to the different dipole transition strengths associated with each transition, as does $\Omega$ from $\Omega^{\prime}$. We solve equations (\ref{Maxwell_Bloch:1}-\ref{Maxwell_Bloch:4}) in the Fourier domain to obtain the expression for the transfer function of the signal field in the absence of an injected idler. The output signal field, $a_s(z=L,t)$, after propagating through the ensemble of length L, is determined in terms of the input spectrum $a_s(z=0,\omega)$ by \begin{widetext} \begin{equation} a_s(z=L,t) = \int T_s(\omega,z=L) a_s(z=0,\omega) e^{i\omega t} d\omega, \label{outputpulse} \end{equation} \end{widetext} with the transfer function $T_s(\omega)$ given by \begin{widetext} \begin{equation} T_s(\omega) = e^{-\frac{D \gamma_{ge} }{ 4 V(\omega)} \left(i \omega -i \omega \vert\epsilon\vert^2+\vert\epsilon\vert^2\gamma_{ge} -\gamma_{gs}\right)} \left(\frac{\left(\gamma_{ge} \vert\epsilon\vert^2-i \omega -i \vert\epsilon\vert^2\omega +\gamma_{gs}\right)}{ U(\omega)} \sinh\left[\frac{D\gamma_{ge} U(\omega)}{ 4 V(\omega)}\right] +\cosh\left[\frac{D\gamma_{ge} U(\omega)}{ 4 V(\omega)}\right]\right), \label{TransferF} \end{equation} \begin{align} U(\omega) &= \sqrt{\left(i \omega +(i \omega -\gamma_{ge} ) \vert\epsilon\vert^2-\gamma_{gs}\right)^2+4 \vert\epsilon\Omega\vert^2} \nonumber\\ V(\omega) &= (i \gamma_{gs}+\omega ) (\omega +i \gamma_{ge} )-\vert\Omega\vert^2, \nonumber \end{align} \end{widetext} with the definitions \begin{align} D = 2 \frac{g^2 N L}{\gamma_{ge}}; \qquad \epsilon &= \eta\frac{\Omega}{\Delta}; \qquad \eta = \frac{g_{i}\Omega'}{g_{s}\Omega} = \frac{d_{\ket{s} \rightarrow \ket{e'}} \cdot d_{\ket{g} \rightarrow \ket{e'}}}{d_{\ket{g} \rightarrow \ket{e}} \cdot d_{\ket{s} \rightarrow \ket{e}}}. \end{align} The parameter $\eta$ can be expressed in terms of the dipole matrix elements, $d_{\ket{j} \rightarrow \ket{k}}$, for the associated transition and the definition of optical depth, $D$, corresponds to an intensity attenuation of $e^{-D}$ when $\omega \rightarrow 0$ and $\Omega \rightarrow 0$. \begin{figure} \caption{The $^{85} \label{fig:Rb85Levels} \end{figure} The level structure of $^{85}$Rb atoms relevant for the EIT experiment is shown in Figure \ref{fig:Rb85Levels}, assuming $\sigma^+$ polarisations for all of the optical fields. We can identify five degenerate four-level structures coupling from each of the Zeeman sub-levels on the $F=2$ manifold. For each of the degenerate systems, both of the excited state manifolds ($\ket{F'=2}$ and $\ket{F'=3}$) are taken into account by summing the strengths of the off-resonant interaction with each excited state weighted by the relative detuning: \begin{equation} \eta_{m_F} = \frac{d_{3,2} \cdot d_{2,2}}{d_{2,3} \cdot d_{3,3}} + \left( \frac{\Delta_{F'=2}}{\Delta_{F'=3}}\right) \frac{d_{3,3}\cdot d_{2,3}}{d_{2,3} \cdot d_{3,3}} = \frac{d_{3,2} \cdot d_{2,2}}{d_{2,3} \cdot d_{3,3}} + \frac{\Delta_{F'=2}}{\Delta_{F'=3}}, \end{equation} where $d_{i,j} \equiv d_{\ket{F=i, m_{F}} \rightarrow \ket{F'=j, m_{F+1}}}$ and $\Delta_{F'=j}$ is the detuning of the idler from the $F'=j$ excited state. The degenerate EIT systems are reduced to a simple four-level model by the introduction of an effective interaction strength ration $\eta_\mathrm{eff}$. We assume that the population is uniformly distributed across the ground-state manifold, in which case $\eta_\mathrm{eff}$ can be approximated as the mean of the values of $\eta_{m_F}$ for each EIT system. We find that for the D1 line of $^{85}Rb$ $\eta_\mathrm{eff}=1.62$. \section{Experimental setup} \label{sec-setup} \begin{figure*} \caption{a) The experimental setup for EIT in a transiently compressed ensemble of cold ${} \label{fig:setup} \end{figure*} The experiment was conducted using an elongated magneto-optical trap (MOT) for $^{85}$Rb ($^{87}$Rb) atoms \cite{Sparkes2013a}. Trapping was accomplished using two amplified external cavity diode lasers: one for cooling and one to optically pump the atoms back to the hyperfine state used for cooling. The cooling laser was 30 MHz red-detuned from the D$_2$ $F=3 \rightarrow F'=4$ ($F=2 \rightarrow F'=3$) transition and the repump is resonant with the D$_2$ $F=2 \rightarrow F'=3$ ($F=1 \rightarrow F'=2$) transition. We loaded atoms for 480 ms, after which the MOT was compressed in two dimensions by smoothly increasing the transverse magnetic field gradients while simultaneously ramping down both the trapping frequency and intensity and the repump intensity over 20 ms \cite{Sparkes2013a}. Once the MOT was compressed the magnetic fields and the repump field were turned off. The trapping beams were turned off $50\mu$s after the repump field so that the atoms were pumped to the $F = 2$ ($F = 3$) ground state. We imposed a wait time of 500 $\mu$s to allow eddy currents to in the optical bench and other components to dissipate prior to turning on the signal field. Residual eddy currents continued to create a non-negligible magnetic field during the experimental window that varies at a rate of $\approx 3$ mG/$\mu$s. The experimental setup is schematically shown in Figure~\ref{fig:setup}. The control field was produced by a Ti:Sapphire laser that was locked on resonance with the $^{85}$Rb ($^{87}$Rb) $D_1$ transition from $ F=3$ to $F'=3$ ($F=2 \rightarrow F'=2$). The signal field was produced by sending a portion of the Ti:Sapphire light through an electro-optical modulator (EOM) to produce modulation sidebands that were separated by $\sim3.035$ GHz ($\sim6.835$GHz) \cite{Steck85,Steck87}, the hyperfine splitting of $^{85}$Rb ($^{87}$Rb). The higher frequency of these sidebands was isolated using a filtering cavity and used as the signal field. Both the control and signal fields were gated using AOMs and both were $\sigma^+$ polarised. The signal pulse was focused along the long axis of the atom cloud with a beam waist of 200 $\mu$m to match the signal beam diameter to the cross section of the compressed atom cloud. The control beam was collimated to a diameter of 7 mm to ensure coverage of the entire atom cloud with uniform intensity and aligned to propagate with a small angle relative to the signal beam, overlapping it at the location of the MOT. In order to measure the weak signal beam elimination of the control field was required. To accomplish this we employed two stages of spatial filtering. In the first stage, the control field was focused onto the edge of a razor blade which blocked the majority of the optical power. The second stage was a pinhole, through which the signal was focused, that served to eliminate most of the remaining scattered control field. The spatial filtering provided an extinction ratio of $\approx 45$ dB for the control while maintaining $\approx 80$\% signal detection efficiency. \section{Results} \label{sec-results} We examined the propagation of signal pulses through the atomic ensemble under the conditions of slow light. The signal pulse chosen as input had a square temporal profile with 6 $\mu$s width and was recorded after propagation through the ensemble for a variety of control field powers. A reference trace of the signal pulse shape was recorded by blocking the trapping beams so that the MOT was dispersed. The predicted output pulse shape was calculated by applying the transfer function derived from the four-level model, Eq.~(\ref{TransferF}), to the recorded reference trace. A sample of traces recorded at an optical depth of $(550 \pm 20)$ is shown in Figure~\ref{fig:setup} (b) along with the theoretical output pulses. The values for optical depths used in the model were obtained using independent off-resonance absorption measurements. The only free parameters are the decay rate, $\gamma_{gs} = (12.8 \pm 0.5)$ kHz, and the ratio between $\vert\Omega\vert^2$ and the measured control field intensity which are fitted globally and consistent across all of the data. Other parameters can be found in Ref.~\cite{Steck85} with $\gamma_{ge}=\pi \cdot 5.75$MHz and $\Delta=2\pi \cdot 3.035$GHz. We found that there is some departure between the model and experiment in terms of the output pulse shape but that the total output power is well-predicted. We attribute this to pulse distortion caused by a residual time-dependent magnetic field resultant from eddy currents in the optical bench that persist after the trapping magnetic fields are turned off. This is based on the observation that adjustments to the background compensation field can partially correct the effect. \begin{figure} \caption{The efficiencies (red), delay times normalised to pulse width (purple) and the product of delay and efficiency (green) for the slow pulses relative to the reference input. The solid lines show the predicted values based on Eq.~(\ref{TransferF} \label{fig:85efficiency} \end{figure} \begin{figure} \caption{The EIT efficiencies (blue), delay times normalised to pulse width (red) and the product of delay and efficiency (green) for the slow pulses relative to the reference input. The solid lines show the predicted values based on Eq.~(\ref{TransferF} \label{fig:87efficiency} \end{figure} The results demonstrate a gain in the signal after propagation through the slow-light medium, indicating that 4WM occured. In the noise analysis of Ref.~\cite{PhysRevA.88.013823}, the parameter \begin{equation} x = \eta \frac{D}{2}\frac{\gamma_{ge}}{\Delta} \end{equation} is introduced to track the effective 4WM strength and we use it here to compare experiments in different Rb isotopes and at different optical depths. Values larger than 1 correspond to a regime where 4WM substantially impacts on the propagtion dynamics. While the effect is less pronounced for values less than 1, the contribution of noise photons can still be significant. At the highest optical depth achieved in our experiment, where OD $= 550 \pm 20$, the 4WM strength value was $x = 0.34$, corresponding to a regime where additional noise photons would have a detrimental effect on the fidelity of a quantum memory. Figure \ref{fig:85efficiency} shows the integrated output pulse intensities and delay times for a variety of control field powers at different optical depths. The total EIT efficiency and delay times are in good agreement with the model across the entire parameter space that we explored. For the two highest optical depths, theoretical EIT efficiency corresponding to zero 4WM strength are included. At lower optical depths, the theoretical predictions for both cases are indistinguishable with the resolution of the plot. The shaded region indicates the effect of 4WM on the efficiency of EIT. The observed gain is in good agreement with the simple four-level model, and contrasts with previous cold atom experiments that have speculated that four-wave mixing reduces the efficiency \cite{Zhang2011}. In contrast, an experiment conducted with $^{87}$Rb in the paper of Chen {\it et al.}~\cite{Chen2013} reported high storage efficiencies with negligible 4WM at an optical depth of 156. This discrepancy in observed 4WM is a result of the different effective 4WM strength in the experiment due to the lower optical depth and larger ground-state splitting in $^{87}$Rb relative to $^{85}$Rb. We verified that for $^{87}$Rb high delays are indeed possible with minimal 4WM by repeating the experiment with that isotope. While the maximum achieved optical depth of OD $= 350 \pm 30$, was lower than for $^{85}$Rb, high delay-bandwidth products were still obtained, as is shown in Figure~\ref{fig:87efficiency}. Here the effective interation strength ratio was calculated to have a theoretical value of $\eta_\mathrm{eff} = 1.33$, giving an effective 4WM strength of $x=0.08$ for an optical depth of 350. At the resolution of the plot, a theoretical line that includes 4WM is indistinguishable from one that does not have 4WM. Our results presented is consistent with the results of in Ref.~\cite{Chen2013}. The multimodal capacity of EIT-based memory scales poorly with increasing optical depth; at best the modal capacity is $N \approx \sqrt{D}/3$ \cite{Nunn:2008}. In spite of this, the optical depth acheived in our experiment was sufficient to demonstrate enough delay that two pulses are contained entirely within the ensemble simultaneously. Figure \ref{fig:two_pulse} shows results taken at an optical depth of $(560 \pm 40)$ demonstrating a delay-bandwidth product of $\approx 3.7$, calculated by the ratio of the delay to transmitted pulse width at 50\% efficiency. \begin{figure*} \caption{Temporally multimode delay of two signal pulses. Here, the large optical depth is used to slow the input pulses enough that they are both contained entirely within the atomic ensemble. Part (a) shows the ouput from the delay medium as the control field power is varied. Individual traces from this data are shown in (b) with the associated traces highlighted in the same color in part (a). The top trace of (b) also shows the input pulse as a reference, with some 4WM gain being apparent in the output.} \label{fig:two_pulse} \end{figure*} \section{Conclusion} \label{sec-concl} We experimentally investigated slow light under the conditions of high optical-depth electromagnetically induced transparency in an ensemble of cold ${}^{85}Rb$ atoms. This is the first time that EIT has been observed at such large optical depths and we explored the role of four-wave-mixing in the EIT interaction. We found that a simple four-level model, one that has been used to theoretically predict the addition of 4WM noise \cite{PhysRevA.88.013823}, was in good agreement with the experiment over a wide range of optical depths and in two Rb isotopes. This provides a solid foundation for predicting the noise performance of EIT-based optical qauntum memories. In a regime with negligible 4WM, we obtained about 50\% efficiency with one pulse width delay. We additionally demonstrated a delay-bandwidth product of $\approx 3.7$ with 50\% efficiency. Although this corresponded to a regime with some 4WM, it enabled us to preform the first demonstration of multimode delay of temporally separated pulses in a cold-atom ensemble. \section*{ACKNOWLEDGMENTS} We thanks Liqing Chen for the useful discussions. This work is funded by the Australian Research Council Centre of Excellence Program (CE110001027). WPZ acknowledge financial support from the National Basic Research Program of China (973 Program Grant No. 2011CB921604) and the National Natural Science Foundation of China (Grant Nos. 11234003). JG is supported by the Chinese Scholarship Council overseas scholarship. \end{document}
\begin{document} \title{$p$-adic multiple zeta values at roots of unity \\ $\&$ $p$-adic pro-unipotent harmonic actions \\ \text{ } \\ IV : Around $p$-adic continuity and interpolation in $\mathbb{Z}_{p}^{\depth}$ \\ \text{ } \\ IV-1 : $p$-adic multiple zeta values at roots of unity extended to sequences of integers of any sign} \maketitle \noindent \begin{abstract} This work is a study of $p$-adic multiple zeta values at roots of unity ($p$MZV$\mu_{N}$'s), the $p$-adic periods of the crystalline pro-unipotent fundamental groupoid of $(\mathbb{P}^{1} - \{0,\mu_{N},\infty\})/ \mathbb{F}_{q}$. The main tool is new objects which we call $p$-adic pro-unipotent harmonic actions. In this part IV we define and study $p$-adic analogues of some elementary complex analytic functions which interpolate multiple zeta values at roots of unity such as the multiple zeta functions. The indices of $p$MZV$\mu_{N}$'s involve sequences of positive integers ; in this IV-1, by considering an operation which we call localization (inverting certain integration operators) in the pro-unipotent fundamental groupoid of $\mathbb{P}^{1} - \{0,\mu_{N},\infty\}$, and by using $p$-adic pro-unipotent harmonic actions, we extend the definition of $p$MZV$\mu_{N}$'s to indices for which these integers can be negative, and we study these generalized $p$MZV$\mu_{N}$'s. \end{abstract} \tableofcontents \section{Introduction} \subsection{Complex and $p$-adic multiple zeta values at roots of unity} This work is a study of $p$-adic multiple zeta values at roots of unity ($p$MZV$\mu_{N}$'s), the $p$-adic periods of the crystalline pro-unipotent fundamental groupoid (abbreviated $\pi_{1}^{\un,\crys}$) of $(\mathbb{P}^{1} - \{0,\mu_{N},\infty\})/ \mathbb{F}_{q}$, with $\mathbb{F}_{q}$ of characteristic $p$ prime to $N$ which contains a primitive $N$-th root of unity. They are $p$-adic analogues of the (complex) multiple zeta values at roots of unity (MZV$\mu_{N}$'s), which are the following numbers. Let $\tilde{\xi}_{N} \in \mathbb{C}$ be a primitive $N$-th root of unity, $n_{1},\ldots,n_{d} \in \mathbb{N}^{\ast}$, and $j_{1},\ldots,j_{d} \in \mathbb{Z}/N\mathbb{Z}$ such that $(n_{d},j_{d}) \not= (1,0)$, and $(z_{i_{1}},\ldots,z_{i_{n}})=(\xi^{j_{1}},\overbrace{0,\ldots,0}^{n_{1}-1},\ldots,\xi^{j_{d}},\overbrace{0,\ldots,0}^{n_{d}-1})$, the associated multiple zeta value at $N$-th roots of unity is : \begin{equation} \label{eq:multizetas} \zeta \big((n_{i});(\tilde{\xi}_{N}^{j_{i}})\big)_{d} = (-1)^{d}\int_{0}^{1} \frac{d t_{n}}{t_{n} - z_{i_{n}}} \int_{0}^{t_{n-1}} \ldots \int_{0}^{t_{2}} \frac{dt_{1}}{t_{1} - z_{i_{1}}} = \sum_{0<m_{1}<\ldots<m_{d}} \frac{\big( \frac{\tilde{\xi}_{N}^{j_{2}}}{\tilde{\xi}_{N}^{j_{1}}}\big)^{m_{1}} \ldots \big( \frac{1}{\tilde{\xi}_{N}^{j_{d}}}\big)^{m_{d}}}{m_{1}^{n_{1}} \ldots m_{d}^{n_{d}}} \in \mathbb{C} \end{equation} where $n=n_{d}+\ldots+n_{1}$ is called the weight of $\big((n_{i});(\tilde{\xi}_{N}^{j_{i}})\big)_{d}$, and $d$ is called its depth. \newline\indent The $p$MZV$\mu_{N}$'s (Definition \ref{MZV Deligne}) are defined abstractly, without explicit formulas, as $p$-adic analogues of the iterated path integrals in (\ref{eq:multizetas}). They are numbers $\zeta_{p,\alpha}\big((n_{i});(\xi_{N}^{j_{i}})\big)_{d} \in K = \Frac(W(\mathbb{F}_{q})) \subset \overline{\mathbb{Q}_{p}}$, where $\xi_{N} \in K$ is a primitive $N$-th root of unity, $\alpha \in (\mathbb{Z}-\{0\}) \cup \{\pm\infty\}$ represents the number of iterations of the Frobenius of $\pi_{1}^{\un,\crys}((\mathbb{P}^{1} - \{0,\mu_{N},\infty\})/ \mathbb{F}_{q})$, with $\mathbb{F}_{q}$, $d \in \mathbb{N}^{\ast}$ $n_{1},\ldots,n_{d} \in \mathbb{N}^{\ast}$, $j_{1},\ldots,j_{d} \in \mathbb{Z}/N\mathbb{Z}$. \newline\indent In the terminologies above, the term "at roots of unity" is usually omitted if $N=1$. \subsection{Summary of parts I, II, III} In part I \cite{I-1} \cite{I-2} \cite{I-3}, we have found a $p$-adic analogue of the series formula in (\ref{eq:multizetas}) which has two particular features : it is explicit and it keeps track of the motivic Galois action on $\pi_{1}^{\un,\crys}((\mathbb{P}^{1} - \{0,\mu_{N},\infty\})/ \mathbb{F}_{q})$. \newline\indent In part II \cite{II-1} \cite{II-2} \cite{II-3}, we have deduced from these formulas a version of the motivic Galois theory of $p$MZV$\mu_{N}$'s formulated in terms of series instead of being formulated as usual in terms of integrals. \newline\indent In part III \cite{III-1} \cite{III-2}, we have defined and studied a generalization of the notion of $p$MZV$\mu_{N}$'s at roots of unity of order divisible by $p$. \newline\indent In all the previous parts, our results led us to replace the $p$MZV$\mu_{N}$'s by some variants, equivalent to them in a certain sense, which we call the adjoint $p$-adic multiple zeta values at $N$-th roots of unity (Definition \ref{def adjoint MZV}), abbreviated Ad$p$MZV$\mu_{N}$'s. Indeed, the results of \cite{I-2} \cite{I-3} show that the Ad$p$MZV$\mu_{N}$'s are more directly adapted to explicit computations than the $p$MZV$\mu_{N}$'s. The main objects in the previous parts were some group actions called $p$-adic pro-unipotent harmonic actions found in \cite{I-2} and \cite{I-3}. \newline\indent The explicit formulas for $p$MZV$\mu_{N}$'s found in part I appear as relations between Ad$p$MZV$\mu_{N}$'s and the following numbers called weighted multiple harmonic sums, with $m \in \mathbb{N}^{\ast}$, $d \in \mathbb{N}^{\ast}$ $n_{1},\ldots,n_{d} \in \mathbb{N}^{\ast}$, $j_{1},\ldots,j_{d+1} \in \mathbb{Z}/N\mathbb{Z}$ \footnote{For complex or $p$-adic MZV$\mu_{N}$'s, $\big((n_{i});(\tilde{\xi}_{N}^{j_{i}})\big)_{d}$ is an abbreviation of $\big( \begin{array}{cc} \xi_{N}^{j_{1}},\ldots,\xi_{N}^{j_{d}} \\ n_{1},\ldots,n_{d} \end{array} \big)$, whereas for multiple harmonic sums, it is an abbreviation of $\big( \begin{array}{cc} \xi_{N}^{j_{1}},\ldots,\xi_{N}^{j_{d+1}} \\ n_{1},\ldots,n_{d} \end{array} \big)$} : $$ \har_{m} \big((n_{i});(\tilde{\xi}_{N}^{j_{i}})\big)_{d} = \sum_{0<m_{1}<\ldots<m_{d}<m} \frac{\big( \frac{\xi_{N}^{j_{2}}}{\xi_{N}^{j_{1}}}\big)^{m_{1}} \ldots \big( \frac{\xi_{N}^{j_{d+1}}}{\xi_{N}^{j_{d}}}\big)^{m_{d}}\big(\frac{1}{\xi_{N}^{j_{d+1}}}\big)^{m}}{m_{1}^{n_{1}} \ldots m_{d}^{n_{d}}} \in \mathbb{Q}(\xi) $$ \subsection{Motivation for part IV} The MZV's of depth one, i.e. the numbers (\ref{eq:multizetas}) with $N=1$ and $d=1$, are the values of Riemann's zeta function at positive integers, and special values of other classical functions appearing in analytic number theory. These functions have generalizations which depend on any $d\in \mathbb{N}^{\ast}$ variables, defined some iterated series as in (\ref{eq:multizetas}), as well as generalizations "at roots of unity" taking into account the numerators in the iterated series of (\ref{eq:multizetas}). \newline Since we constructed in part I, and studied in parts II and III, a $p$-adic analogue of the series expansions of MZV$\mu_{N}$'s, we want to know if $p$-adic analogues of such interpolating functions exist. \subsection{Motivation for parts IV-1} We take as starting point of this paper the most straightforward example of interpolation of the MZV$\mu_{N}$'s ; namely, the multiple zeta functions at $N$-th roots of unity (MZF$\mu_{N}$'s) : for any $j_{1},\ldots,j_{d} \in \mathbb{Z}/N\mathbb{Z}$ : $$ (s_{1},\ldots,s_{d} \big) \in U_{d} \mapsto \sum_{0<m_{1}<\ldots<m_{d}} \frac{\big( \frac{\tilde{\xi}_{N}^{j_{2}}}{\tilde{\xi}_{N}^{j_{1}}}\big)^{m_{1}} \ldots \big( \frac{1}{\tilde{\xi}_{N}^{j_{d}}}\big)^{m_{d}}}{m_{1}^{s_{1}} \ldots m_{d}^{s_{d}}} \in \mathbb{C} $$ \noindent where $U_{d}= \{(s_{1},\ldots,s_{d}) \in \mathbb{C}^{d} \text{ }|\text{ }\text{Re}(s_{d-r+1}+\ldots+s_{d})> r\text{ for all } r=1,\ldots,d\}$. We want to know whether these functions have natural $p$-adic analogues interpolating $p$MZV$\mu_{N}$'s and, if this is the case, we want to study them. \newline \indent The MZF$\mu_{N}$'s have a meromorphic continuation to $\mathbb{C}^{d}$, defined in \cite{Essouabri}, \cite{Matsumoto}, \cite{Zhao}, \cite{AET}, \cite{Go} for $N=1$, and in \cite{FKMT1} for any $N$. Their meromorphic continuation has singularities along certain hyperplanes, which are identified the most precisely in \cite{AET} and \cite{FKMT1}. \newline One can then define values of these functions at tuples of integers of any sign, i.e. $\zeta\big(\begin{array}{c} \tilde{\xi}_{N}^{j_{1}},\ldots,\tilde{\xi}_{N}^{j_{d}} \\ n_{1},\ldots,n_{d} \end{array} \big)$, for any $n_{1},\ldots,n_{d} \in \mathbb{Z}$ ; this requires to remove a singularity. This can be done by considering the limit at tuples of integers along a certain direction \cite{AET} \cite{AT} \cite{Komori} \cite{O} \cite{Sa1} \cite{Sa2}, or by a certain "renormalization" process \cite{GZ}, \cite{MP} \cite{GPZ}, or by a certain "desingularization" process \cite{FKMT1}. \newline We note that the question of defining numbers $\zeta\big((n_{i}),(\tilde{\xi}_{N}^{j_{i}})\big)$, for any $n_{1},\ldots,n_{d} \in \mathbb{Z}$ does not necessarily involves the meromorphic continuation of MZF$\mu_{N}$'s ; one can consider it only via the formulas of equation (\ref{eq:multizetas}) : we have to find a correct notion of regularizations, either for a generalization of the expression of MZV$\mu_{N}$'s as iterated series : \begin{equation} \label{eq: complex limit series} \zeta_{\text{reg},\Sigma}\big( (\pm n_{i}),(\tilde{\xi}_{N}^{j_{i}}) \big) = \reg \lim_{m \rightarrow \infty} \sum_{0<m_{1}<\ldots<m_{d}<m} \frac{\big( \frac{\tilde{\xi}_{N}^{j_{2}}}{\tilde{\xi}_{N}^{j_{1}}}\big)^{m_{1}} \ldots \big( \frac{1}{\tilde{\xi}_{N}^{j_{d}}}\big)^{m_{d}}}{m_{1}^{\pm n_{1}}\ldots m_{d}^{\pm n_{d}}} \end{equation} \noindent or for a generalization of the expression of MZV$\mu$s as iterated integrals, which amounts to : \begin{equation} \label{eq: complex limit integrals} \zeta_{\text{reg},\smallint}\big((\pm n_{i}),(\tilde{\xi}_{N}^{j_{i}})\big) = \reg\lim_{z \rightarrow 1} \sum_{0<m_{1}<\ldots<m_{d}} \frac{\big( \frac{\tilde{\xi}_{N}^{j_{2}}}{\tilde{\xi}_{N}^{j_{1}}}\big)^{m_{1}} \ldots \big( \frac{z}{\tilde{\xi}_{N}^{j_{d}}}\big)^{m_{d}}}{m_{1}^{\pm n_{1}}\ldots m_{d}^{\pm n_{d}}} \end{equation} \noindent In the end, there are several notions of $\zeta\big((n_{i});(\tilde{\xi}_{N}^{j_{i}})\big)_{d}$ for any $n_{1},\ldots,n_{d} \in \mathbb{Z}$, interrelated in various ways. We would like to know if there are natural $p$-adic analogues of these values. \newline\indent The $p$-adic zeta function of Kubota and Leopoldt, which we will denote by $L_{p}$, is defined as a $p$-adic interpolation of the desingularized values of the Riemann zeta function at positive integers, using Kummer's congruences. Coleman has proved in \cite{Coleman} that, for all $n \in \mathbb{N}^{\ast}$ such that $n \geq 2$, we have $\zeta_{p,1}(n) = p^{n} L_{p}(n,\omega^{1-n})$, where $\omega$ is Teichm\"{u}ller's character, and $\zeta_{p,1}$ refers to $p$MZV$\mu_{N}$'s as denoted in \S1.1. This implies that the map $n \in \mathbb{N}^{\ast} \subset \mathbb{Z}_{p} \mapsto p^{-n}\zeta_{p,1}(n) \in \mathbb{Q}_{p}$ is continuous with respect to $n$ on each class of congruence modulo $p-1$, and can be extended to a continuous function on $\mathbb{Z}_{p}$, except for the class of congruence $n \equiv 1 \mod p-1$, where this is true instead for $n \mapsto (n-1)\zeta_{p}(n)$. This property can be retrieved by the following formula, which is known, and is also a particular case of our formulas of part I for $p$MZV$\mu_{N}$'s : \begin{equation} \label{eq:series expansion depth one} \zeta_{p,1}(n) = \frac{1}{n-1} \sum_{l\geq -1}{-n \choose l} B_{l} \sum_{0<m<p}\frac{p^{n+l-1}}{m^{n+l-1}} \end{equation} This gives hope that $p$MZV$\mu_{N}$'s of higher depth might have some $p$-adic continuity properties and might be interpolated by a continuous function. However, the case of depth one is particular : the Frobenius of $\pi_{1}^{\un,\crys}(\mathbb{P}^{1} - \{0,\mu_{N},\infty\})$ can be described as a relation between certain $p$-adic series indexed as $\sum_{0<m}$ and their variants restricted to $\sum_{\substack{0<m \\ p\nmid m}}$ ; in higher depth, the Frobenius is much more complicated. \newline\indent In higher depth, actually, some generalizations of the Kubota-Leopoldt $L$-function, based on the desingularization of the meromorphic continuation of MZF$\mu_{N}$'s, have been defined \cite{FKMT2}. Some of their values at tuples of positive integers are expressed in terms of $p$-adic iterated integrals on $\mathbb{P}^{1} - \{0,\mu_{cp},\infty\}$, with $c \in \mathbb{N}^{\ast}$ prime to $p$ (\cite{FKMT2}, Theorem 3.41). However, studying the role of these functions in the question explained in \S1.3 goes beyond the scope of this paper. \newline\indent In \cite{FKMT3}, some $p$MZV$\mu_{N}$'s at tuples of integers of any sign are defined in certain particular cases : the indices are $\big((n_{i});(\xi_{N}^{j_{i}})_{d}\big)$ for any $n_{1},\ldots,n_{d} \in \mathbb{Z}$, but $\xi_{N}^{j_{1}}\not=1,\ldots,\xi_{N}^{j_{d}}\not=1$. In terms of our notation of \S1.1, they are generalizations of the values $\zeta_{p,-\infty}(w)$. Their definition relies on the theory of Coleman integration in the sense of \cite{Vologodsky}. \subsection{Main ideas} In the formula (\ref{eq:multizetas}), the exponent $n_{i}$ in the iterated series corresponds to the iteration $n_{i}-1$ times of the operator $f \mapsto \int f \frac{dz}{z}$ in the definition of the iterated integral. Replacing this integration operator by its inverse gives similar series (provided it converges) with $n_{i}$ possibly negative. This has been used in several papers, including in \cite{FKMT3}, and we also have used it in \cite{I-2}. We are going to use again this idea here, but more systematically. We will use the term "localization of $\pi_{1}^{\un,\DR}(\mathbb{P}^{1} - \{0,\mu_{N},\infty\})$" to refer to the inversion of the all the operators $f \mapsto \int f \omega$ with $\omega$ a differential form $\frac{dz}{z-x}$, $x \in \{0,\xi^{1},\ldots,\xi^{N}\}$, on $\mathbb{P}^{1} - \{0,\mu_{N},\infty\}$. \newline\indent If we consider an iterated integral as in (\ref{eq:multizetas}) but on a variable path (in the sense of \cite{Chen}), instead of the path $[0,1] \rightarrow [0,1]$, $t \mapsto t$ which is implicit in (\ref{eq:multizetas}), we obtain functions called multiple polylogarithms \cite{Go}, characterized as solutions to a certain differential equation (Proposition-Definition \ref{prop connexion}). If we allow the inversion of integration operators, we will obtain "localized multiple polylogarithms", which will be $\mathbb{Q}(\xi)$-linear combinations of products of iterated integrals by algebraic functions. This phenomenon already appeared implicitly in \cite{I-2}, \S4-\S5, via the map "loc" which was used to define what we called the $p$-adic pro-unipotent $\Sigma$-harmonic action. Here, this phenomenon will be studied intrinsically, in particular its $p$-adic aspects. By this phenomenon, all the numbers obtained by considering localized iterated integrals at tangential base-points remain in the same algebra of periods : they are certain $\mathbb{Q}(\xi)$-linear combinations of $p$MZV$\mu_{N}$'s. \newline\indent The $p$MZV$\mu_{N}$'s are defined using the notion of Frobenius structure of a $p$-adic differential equation. Thus in order to look for a good meaning of a notion of $p$MZV$\mu_{N}$'s at sequences of integers of any sign, we should show a compatibility between the localization and the Frobenius structure. We will see that imposing this compatibility makes things actually simpler in the $p$-adic case than in the complex case. \newline\indent Our idea is to replace the Frobenius by what we called the harmonic Frobenius in \cite{I-2}, and to use $p$-adic pro-unipotent harmonic actions. Although it is possible to define localized $p$-adic multiple polylogarithms by Coleman integration, their regularization at $z\rightarrow 1$ is not well-defined in general because they have a pole which is not logarithmic. This is the difficulty observed in \cite{FKMT3}. We will see how to avoid it by replacing the Frobenius by the harmonic Frobenius. \newline\indent In part I, we have obtained formulas involving series, representing the $p$MZV$\mu_{N}$'s. However, both the domains of summation and the summands were functions of the indices $n_{d},\ldots,n_{1}$. Here, if we want to study these sums of series as functions of $n_{d},\ldots,n_{1}$, we will make some changes of variables giving domains of summation independent of $n_{d},\ldots,n_{1}$. \subsection{Outline} In \S2, we define the "localization" of $\pi_{1}^{\un,\DR}(X)$ for $X$ equal to a punctured projective line over a field of characteristic zero and the $\KZ$ connection associated with it, on a neighborhood of $0$ (Definition \ref{def of the localization}). This incorporates a notion of localized multiple polylogarithms (Definition \ref{localized Li}). \newline We define maps which encode different expressions of the "localized iterated integrals" in terms of algebraic functions and the iterated integrals (Proposition-Definition \ref{loc for Li}, Proposition-Definition \ref{loc for har n}). \newline Then we define the analytic continuation of the localized multiple polylogarithms, in the complex setting (Definition \ref{analytic continuation loc MPL}) and in the $p$-adic setting (Definition \ref{p-adic continuation loc MPL}). The $p$-adic setting is applied to $\pi_{1}^{\un,\crys}(\mathbb{P}^{1} - \{0,\mu_{N},\infty\}/\mathbb{F}_{q})$. \newline\indent In \S3 we review the notion of $p$MZV$\mu_{N}$'s (Definition \ref{MZV Deligne}, Definition \ref{MZV Coleman}, Definition \ref{MZV Coleman bis}), and the $p$-adic pro-unipotent $\Sigma$-harmonic action $\circ_{\har}^{\Sigma}$ of \cite{I-2}. We define some "localized" variants of $\circ_{\har}^{\Sigma}$ (Proposition-Definition \ref{loc action}) and the localized adjoint $p$MZV$\mu_{N}$'s (Definition \ref{def localized pMZV}). In this new setting, the localized version of $\circ_{\har}^{\Sigma}$ defined in \cite{I-2} will be now viewed as "localized at the source", and we now have two other "localized" variants of $\circ_{\har}^{\Sigma}$, called, respectively, "localized at the target" and "localized at the source and target". \newline\indent In \S4, we explain briefly why the properties from \cite{I-3} describing formulas for the iteration of the harmonic Frobenius can be generalized to the setting of \S3 (Proposition-Definition \ref{iter localized}). \newline\indent In \S5, we bring together the localization built in \S3 and the algebraic relations satisfied by $p$MZV$\mu_{N}$'s, which we studied in part II \cite{II-1} \cite{II-2} \cite{II-3} ; and we show that the localized adjoint $p$MZV$\mu_{N}$'s satisfy a variant of the adjoint double shuffle relations defined in \cite{II-1} (Proposition \ref{localized adjoint quasi shuffle}). \newline\indent The main theorem is a summary of the main properties of our localized $p$-adic multiple zeta values at roots of unity. \newline We refer to the notion of adjoint double shuffle relations defined in \cite{II-1}. \newline \newline \textbf{Theorem IV-1} \newline \emph{i) (Nature of localized Ad$p$MZV$\mu_{N}$'s) \newline The localized Ad $p$MZV$\mu_{N}$'s are in the $\mathbb{Q}(\xi_{N})$-algebra generated by $p$MZV$\mu_{N}$'s. In particular, they are periods of the crystalline pro-unipotent fundamental groupoid of $(\mathbb{P}^{1} - \{0,\mu_{N},\infty\})/\mathbb{F}_{q}$. \newline The totally negative Ad $p$MZV$\mu_{N}$'s (in the sense of Definition \ref{def totally negative}) are algebraic numbers, in $\mathbb{Q}(\xi)$; more precisely, they are in an algebra of functions defined explicitly in terms of polynomials of Bernoulli numbers, or, alternatively, in terms of prime multiple harmonic sums at negative indices. The vanishing of the odd Bernoulli numbers imply the vanishing of certain particular totally negative Ad$p$MZV$\mu_{N}$'s. \newline ii) (Formulas) \newline The formulas of \cite{I-2} and \cite{I-3} for Ad$p$MZV$\mu_{N}$'s can be extended into explicit formulas for the localized Ad $p$MZV$\mu_{N}$'s, involving extensions of the $p$-adic pro-unipotent harmonic action $\circ_{\har}^{\Sigma}$ and the map of iteration of the harmonic Frobenius $\iter_{\har}^{\Sigma}$. \newline iii) (Algebraic relations) \newline The localized Ad $p$MZV$\mu_{N}$'s satisfy an extension of the adjoint double shuffle relations} \newline \newline We consider these properties as a justification of our definition of the localized Ad$p$MZV$\mu_{N}$'s. \newline We note that this theorem can be extended in an obvious way to the $p$-adic multiple zeta values at roots of unity of order divisible by $p$ defined in \cite{III-1}. \newline\indent We will prove later that the formulas of \cite{I-2} can be modified to be formulas with domains of summation independent of $(l,(n_{i});(\xi_{N}^{j_{i}}))$. \newline We will use this later to deduce that the adjoint $p$MZV's $\zeta_{p}^{\Ad}(l;n_{d},\ldots,n_{1})$ (here $N=1$) have some continuity properties with respect to $n_{1}$ and $n_{d}$ viewed as $p$-adic integers. \newline In the next version of this paper, we will also define a notion of localized $p$MZV$\mu_{N}$'s, without the term adjoint. This will be done by using a generalization of the main equation found in \cite{AETbis}. \newline \newline \textbf{Acknowledgments.} I thank Hidekazu Furusho for discussions. \newline The idea of inverting integration operators being standard, they may be some references missing and I apologize if this is the case. \newline This work has been achieved at Universit\'{e} de Strasbourg, supported by Labex IRMIA, and at Universit\'{e} de Genève, supported by NCCR SwissMAP. \section{Localized complex and $p$-adic multiple polylogarithms} We formalize a notion of localization on a neighborhood of 0 of the De Rham pro-unipotent fundamental groupoid of and the KZ connection (\S2.1), we define "localization maps" enabling to "compute" it (\S2.2) and we discuss the analytic continuation of the localized multiple polylogarithms (\S2.3) in the complex (\S2.3.1) and $p$-adic (\S2.3.2) settings. The $p$-adic aspects are restricted to the case of $\mathbb{P}^{1} - \{0,\mu_{N},\infty\}$. \subsection{The De Rham pro-unipotent fundamental groupoid of $\mathbb{P}^{1} - \{0=z_{0},z_{1},\ldots,z_{r},\infty\}$, the KZ connection, and its localization on a neighborhood of the origin} \subsubsection{Review on $\pi_{1}^{\un,\DR}(\mathbb{P}^{1} - \{0=z_{0},z_{1},\ldots,z_{r},\infty\})$ and $\nabla_{\KZ}$} Let $K$ be a field of characteristic zero, $z_{0},\ldots,z_{r} \in K$ with $z_{0}=0$ and $z_{r}=1$, and $X= (\mathbb{P}^{1} - \{0,z_{1},\ldots,z_{r},\infty\})/K$. \newline We review $\pi_{1}^{\un,\DR}(X)$ and the KZ connection (defined and described in \cite{Deligne}), and we define their "localized" version on a neighborhood of $0$. \newline\indent The De Rham pro-unipotent fundamental groupoid of $X$, denoted by $\pi_{1}^{\un,\DR}(X)$ is a groupoid in pro-affine schemes over $X$ ; its base-points are the points of $X$, the points of punctured tangent spaces $T_{x} - \{0\}$, $x \in \{0,\xi^{1},\ldots,\xi^{N},\infty\}$ called tangential base-points (\cite{Deligne}, \S15), and the canonical base-point $\omega_{\DR}$ (\cite{Deligne}, (12.4.1)). \newline\indent All pro-affine schemes $\pi_{1}^{\un,\DR}(X_{K},y,x)$ are canonically isomorphic as schemes to $\pi_{1}^{\un,\DR}(X_{K},\omega_{\DR})$, these isomorphisms being compatible with the groupoid structure (\cite{Deligne}, \S12). Thus, describing the groupoid $\pi_{1}^{\un,\DR}(X_{K})$ is reduced to describing $\pi_{1}^{\un,\DR}(X_{K},\omega_{\DR})$, which is done in the next statement. \begin{Proposition-Definition} Let $\frak{e}$ be the alphabet $\{e_{0},e_{z_{1}},\ldots,e_{z_{r}}\}$, and let $\Wd(\frak{e})$ be the set of words over $\frak{e}$ (including the empty word). \newline i) The shuffle Hopf algebra over $\frak{e}$, denoted by $\mathcal{O}^{\mathcyr{sh},\frak{e}}$, is the $\mathbb{Q}$-vector space $\mathbb{Q}\langle \frak{e} \rangle = \mathbb{Q}\langle e_{z_{0}},e_{z_{1}},\ldots,e_{z_{r}}\rangle$, which admits $\Wd(\frak{e})$ as a basis, with the following operations : \newline a) the shuffle product $\mathcyr{sh}:\mathcal{O}^{\mathcyr{sh},\frak{e}}\otimes \mathcal{O}^{\mathcyr{sh},\frak{e}} \rightarrow \mathcal{O}^{\mathcyr{sh},\frak{e}}$ defined by, for all words : $(e_{z_{i_{n+n'}}}\ldots e_{z_{i_{n+1}}})\text{ }\mathcyr{sh}\text{ }(e_{z_{i_{n}}} \ldots e_{z_{i_{1}}}) = \sum_{\sigma} e_{z_{i_{\sigma^{-1}(n+n')}}} \ldots e_{z_{i_{\sigma^{-1}(1)}}}$ where the sum is over permutations $\sigma$ of $\{1,\ldots,n+n'\}$ such that $\sigma(n)<\ldots<\sigma(1)$ and $\sigma(n+n')<\ldots<\sigma(n+1)$. \newline b) the deconcatenation coproduct $\Delta_{\dec} : \mathcal{O}^{\mathcyr{sh},\frak{e}}\rightarrow \mathcal{O}^{\mathcyr{sh},\frak{e}} \otimes \mathcal{O}^{\mathcyr{sh},\frak{e}}$, defined by, for all words : $\Delta_{\dec}(e_{z_{i_{n}}}\ldots e_{z_{i_{1}}}) = \sum_{n'=0}^{n} e_{z_{i_{n}}}\ldots e_{z_{i_{n'+1}}} \otimes e_{z_{i_{n'}}} \ldots e_{z_{i_{1}}}$ \newline c) the counit $\epsilon : \mathcal{O}^{\mathcyr{sh},\frak{e}} \rightarrow \mathbb{Q}$ sending all non-empty words to $0$. \newline d) the antipode $S : \mathcal{O}^{\mathcyr{sh},\frak{e}} \rightarrow \mathcal{O}^{\mathcyr{sh},\frak{e}}$, defined by, for all words : $S(e_{z_{i_{n}}}\ldots e_{z_{i_{1}}}) = (-1)^{l} e_{z_{i_{1}}}\ldots e_{z_{i_{n}}}$. \newline ii) (\cite{Deligne}, \S12) The group scheme $\Spec(\mathcal{O}^{\mathcyr{sh},\frak{e}})$ is pro-unipotent and canonically isomorphic to $\pi_{1}^{\un,\DR}(X_{K},\omega_{\DR})$. \end{Proposition-Definition} \noindent Since $\Spec(\mathcal{O}^{\mathcyr{sh},\frak{e}})$ is pro-unipotent, its points can be expressed in a canonical way in terms of the dual of the topological Hopf algebra $\mathcal{O}^{\mathcyr{sh},\frak{e}}$. This is written in the next statement, in which, following a common abuse of notation, we denote in the same way the letters $e_{z_{j}}$ and their duals. \begin{Proposition-Definition} \label{shuffle equation} i) Let $K\langle\langle \frak{e}\rangle\rangle = K \langle\langle e_{z_{0}},\ldots,e_{z_{r}} \rangle\rangle$ be the non-commutative $K$-algebra of power series over the variables $e_{z_{0}},\ldots,e_{z_{r}}$ with coefficients in $K$. It is the completion of the universal enveloping algebra of the complete free Lie algebra over the variables $e_{z_{0}},\ldots,e_{z_{r}}$. It thus has a canonical structure of topological Hopf algebra. \newline We write an element $f \in K\langle \langle \frak{e} \rangle\rangle$ as $f = \sum_{w \in \Wd(\frak{e})} f[w]w$, where $f[w] \in K$ for all $w$. We have \begin{multline} \label{eq:shuffle equation modulo products} \begin{array}{ll} \Lie(\mathcal{O}^{\mathcyr{sh},\frak{e}})^{\vee}\otimes_{\mathbb{Q}} K & = \{ f \in K \langle\langle \frak{e} \rangle\rangle \text{ }|\text{ }\forall w\not=\emptyset,w'\not=\emptyset \in \Wd(\frak{e}), f[w\text{ }\mathcyr{sh}\text{ }w']=0\} \\ & = \{\text{ primitive elements of } K \langle\langle \frak{e} \rangle\rangle \} \end{array} \end{multline} The equation above is called the shuffle equation modulo products. \newline ii) We have a canonical isomorphism of topological Hopf algebras $(\mathcal{O}^{\mathcyr{sh},\frak{e}} \otimes_{\mathbb{Q}} K)^{\vee} = K \langle \langle \frak{e} \rangle\rangle$ and \begin{multline} \label{eq:shuffle equation} \begin{array}{ll} \Spec(\mathcal{O}^{\mathcyr{sh},\frak{e}})(K) & = \{ f \in K \langle\langle \frak{e} \rangle\rangle \text{ }|\text{ }\forall w,w' \in \Wd(\frak{e}), f[w\text{ }\mathcyr{sh}\text{ }w']=f[w]f[w'],\text{ and }f[\emptyset] = 1 \} \\ & = \{\text{ grouplike elements of } K \langle\langle \frak{e} \rangle\rangle\} \end{array} \end{multline} The equation above is called the shuffle equation. \end{Proposition-Definition} \noindent We now review the connection $\nabla_{\KZ}$ associated with $\pi_{1}^{\un,\DR}(X_{K})$ and multiple polylogarithms, viewed first as power series. \begin{Proposition-Definition} (follows from \cite{Deligne}, \S7.30 and \S12) \label{prop connexion} \newline i) The connection associated with $\pi_{1}^{\un,\DR}(X)$, called the Knizhnik-Zamolodchikov (for short, KZ) connection of $X$, is the connection on $\pi_{1}^{\un,\DR}(X,\omega_{\DR}) \times X$ defined by $\nabla_{\KZ} : f \mapsto df - \big(\sum_{i=0}^{r} e_{z_{i}} f \frac{dz}{z-z_{i}} \big)f$. \newline ii) The coefficients of its horizontal sections are iterated integrals of $\frac{dz}{z-z_{j}}$, $j=0,\ldots,r$(in the sense of Chen \cite{Chen} if $K\hookrightarrow\mathbb{C}$, and in the sense of Coleman \cite{Coleman} if $K\hookrightarrow \mathbb{C}_{p}$ is unramified), and called multiple polylogarithms \cite{Go}. \newline Assume $K$ is embedded in $\mathbb{C}$ or $\mathbb{C}_{p}$ for $p$ a prime number. For $d \in \mathbb{N}^{\ast}$, $n_{1},\ldots,n_{d} \in \mathbb{N}^{\ast}$, $j_{1},\ldots,j_{d} \in \{1,\ldots,r\}$, let $\Li \big((n_{i});(z_{j_{i}})\big)_{d} \in K[[z]]$ be the formal iterated integral of the sequence of differential forms $(\underbrace{\frac{dz}{z},\ldots,\frac{dz}{z}}_{n_{d}-1},\frac{dz}{z-z_{j_{d}}},\ldots,\underbrace{\frac{dz}{z},\ldots,\frac{dz}{z}}_{n_{1}-1},\frac{dz}{z-z_{j_{1}}})$. Then, for $z\in K$ such that $|z|<1$, we have : \begin{equation} \Li^{0} \big((n_{i});(z_{j_{i}})\big)_{d}(z) = \sum_{0<m_{1}<\ldots<m_{d}} \frac{\big( \frac{z_{j_{2}}}{z_{j_{1}}}\big)^{n_{1}} \ldots \big( \frac{z}{z_{j_{d}}}\big)^{n_{d}}}{m_{1}^{n_{1}} \ldots m_{d}^{n_{d}}} \in K \end{equation} \end{Proposition-Definition} \subsubsection{Localization of $(\pi_{1}^{\un,\DR}(\mathbb{P}^{1} - \{0=z_{0},z_{1},\ldots,z_{r},\infty\}),\nabla_{\KZ})$ on a neighborhood of zero} We now formalize the localization on a neighborhood of $0$ of $\pi_{1}^{\un,\DR}(X_{K}),\nabla_{\KZ}$ (Definition \ref{def of the localization}). \newline \indent Let $A$ be a ring, and $S$ a multiplicative subset of $A$. The localization of $A$ at $S$ is the ring $A S^{-1}$ representing the subfunctor of $\Hom(A,-)$ defined by the homomorphisms mapping $S$ to units. Explicitly, $A S^{-1}$ is the ring whose elements are sums of elements of the form $x_{1}y_{1}^{-1}x_{2}y_{2}^{-1}\ldots x_{i}y_{i}^{-1}$, with $x_{i} \in A$, $y_{i} \in S$. The representability of the functor above is granted because it is continuous and satisfies the solution set condition. This notion is mostly usual when $A$ is commutative, or if the $(A,S)$ satisfies Ore's conditions, which are a weak variant of the commutativity assumption. \newline \indent For us, localizing $\pi_{1}^{\un,\DR}(X)$ will mean replacing $\mathcal{O}^{\mathcyr{sh},\frak{e}}$, the Hopf algebra of $\pi_{1}^{\un,\DR}(X_{K},\omega_{\DR})$, regarded as a ring whose multiplication is the concatenation of words, by its localization at the part of non-zero elements (which is multiplicative because it is an integral ring). We define a ring which will have a surjection onto the localization ; this will be practical for writing some results. \begin{Definition} Let $\frak{e}^{\inv}$ be the alphabet $\{e_{0}^{\inv},e^{\inv}_{z_{1}},\ldots,e^{\inv}_{z_{r}}\}$. \newline Let $\frak{e} \cup \frak{e}^{\inv}$ be the alphabet $\{e_{0},e_{z_{1}},\ldots,e_{z_{r}},e_{0}^{\inv},e^{\inv}_{z_{1}},\ldots,e^{\inv}_{z_{r}}\}$. \newline Let $\Wd(\frak{e} \cup \frak{e}^{\inv})$ be the set of words over $\frak{e} \cup \frak{e}^{\inv}$. \newline Let $K \langle\langle \frak{e} \cup \frak{e}^{\inv} \rangle\rangle$ the non-commutative $K$-algebra of formal power series over the variables equal to the letters of $\frak{e} \cup \frak{e}^{\inv}$. \end{Definition} \noindent It is convenient to reformulate the KZ equation $\nabla_{\KZ}(L)=0$ as a fixed-point equation : \begin{Definition} \label{definition int KZ power series}Let the integration operator $K[[z]][\log(z)]\langle\langle \frak{e} \rangle\rangle \rightarrow K[[z]][\log(z)]\langle\langle \frak{e} \rangle\rangle$ : $$ \Int_{\KZ} : L \mapsto \int_{\vec{1}_{0}}^{z} (\frac{dz'}{z'} e_{0} + \frac{dz'}{z'-1}e_{1})L $$ \end{Definition} \noindent Let $L \in K[[z]][\log(z)]\langle \langle \frak{e} \rangle\rangle$ whose coefficient of $z^{0}$ is $\exp(e_{0} \log(z))$. We have the equivalence $\nabla_{\KZ}(L) = 0 \Leftrightarrow \Int_{\KZ}(L) = L$. These conditions are also equivalent to saying that $L$ is the non-commutative generating series of multiple polylogarithms in the sense of Proposition-Definition \ref{prop connexion}. This way to formulate the KZ equation gives rise to the following definition of its localized variant : \begin{Definition} \label{nabla KZ localized} Let $$ \Int^{\loc}_{\KZ} : L \mapsto \bigg( e_{0}^{\inv} z \frac{d}{dz} + e_{1}^{\inv}(z-1) \frac{d}{dz} \bigg) L + \int_{\vec{1}_{0}}^{z} (\frac{dz'}{z'} e_{0} + \frac{dz'}{z'-1}e_{1}) L $$ \noindent We say that the equation $\Int_{\loc}^{\KZ}(L)=L$ is the localized version of the equation $\nabla_{\KZ}(L)=0$. \end{Definition} \begin{Definition} \label{def of the localization} The localization on a neighborhood of $0$ of $\pi_{1}^{\un,\DR}(X_{K}),\nabla_{\KZ}$ is the data of the $K$-algebra $K\langle\langle \frak{e} \cup \frak{e}^{\inv} \rangle\rangle$, the inclusion $\pi_{1}^{\un,\DR}(X,\omega_{\DR})(K) \subset K\langle\langle \frak{e} \cup \frak{e}^{\inv} \rangle\rangle$, and the operator $\Int^{\loc}_{\KZ}$. \end{Definition} \begin{Proposition-Definition} \label{localized Li} i) The localized KZ equation has a unique solution $\Li^{\loc}_{0}$ such that $L \in K[[z]][\log(z)]\langle \langle \frak{e} \cup \frak{e}^{\inv} \rangle\rangle$ whose coefficient of $z^{0}$ is $\exp(e_{0} \log(z))$. \newline We call localized $p$-adic multiple polylogarithms the coefficients $\Li_{0}^{\loc}[w] \in K[[z]][\log(z)]$. \newline ii) $\Li_{0}^{\loc}$ viewed as an element of $K \langle\langle \frak{e} \cup \frak{e}^{\inv} \rangle\rangle$ descends to an element of the $(K\langle\langle \frak{e} \rangle\rangle - \{0\})^{-1}K\langle\langle \frak{e} \cup \frak{e}^{\inv} \rangle\rangle$, and further to $(K\langle\langle \frak{e} \rangle\rangle - \{0\})^{-1}K\langle\langle \frak{e} \rangle\rangle/I_{\comm}$, where $I_{\comm}$ is the ideal generated by the relations $e_{z} e_{z}^{\inv} = e_{z}^{\inv}e_{z}=1$. \end{Proposition-Definition} \begin{proof} Clear. \end{proof} \noindent The next statement is a generalization of the expression of the power series expansions of multiple polylogarithms in terms of multiple harmonic sums (\cite{Go}, equation (1)) : \begin{Proposition-Definition} \label{loc mhs} i) We call localized multiple harmonic sums the following numbers, for $d \in \mathbb{N}^{\ast}$ $n_{1},\ldots,n_{d} \in \mathbb{Z}$, $j_{1},\ldots,j_{d+1} \in \mathbb{Z}/N\mathbb{Z}$, $m \in \mathbb{N}^{\ast}$ : $$ \frak{h}_{m} \big((n_{i});(\tilde{\xi}_{N}^{j_{i}})\big)_{d} = \sum_{0<m_{1}<\ldots<m_{d}<m} \frac{\big( \frac{z_{j_{2}}}{z_{j_{1}}}\big)^{m_{1}} \ldots \big( \frac{z_{j_{d+1}}}{z_{j_{d}}}\big)^{m_{d}} \big(\frac{1}{z_{j_{d+1}}}\big)^{m} }{m_{1}^{n_{1}} \ldots m_{d}^{n_{d}}} $$ \noindent and weighted localized multiple harmonic sums the numbers $$ \har_{m} \big((n_{i});z_{j_{i}})\big)_{d} = m^{n_{1}-\tilde{n}_{1}+\ldots+n_{d}-\tilde{n}_{d}} \frak{h}_{m} \big((n_{i});(\xi_{N}^{j_{i}})\big)_{d} $$ \noindent ii) Assume that $K$ is embedded in $\mathbb{C}$ or in $\mathbb{C}_{p}$. For all $n_{d},\tilde{n}_{d},\ldots,n_{1},\tilde{n}_{1} \in \mathbb{N}^{\ast}$, $j_{1},\ldots,j_{d} \in \mathbb{Z}/N\mathbb{Z}$, for all $z \in K$, $|z|<1$, the series below is absolutely convergent and we have : $$ \Li_{0}^{\loc}\big[ e_{0}^{n_{d}-1}(e_{0}^{\inv})^{\tilde{n}_{d}}e_{z_{j_{d}}} \ldots e_{0}^{n_{1}-1}(e_{0}^{\inv})^{\tilde{n}_{1}}e_{z_{j_{1}}} \big] = \sum_{0<m_{1}<\ldots<m_{d}} \frac{\big( \frac{z_{j_{2}}}{z_{j_{1}}}\big)^{m_{1}} \ldots \big( \frac{z}{z_{j_{d}}}\big)^{m_{d}}}{m_{1}^{n_{1}-\tilde{n}_{1}} \ldots m_{d}^{n_{d}-\tilde{n}_{d}}} $$ \noindent The coefficients of $\Li^{\loc}_{0}[w]$ with $w$ of the form $\tilde{w}e_{z}^{\inv}$, $z \in \{z_{0},z_{1},\ldots,z_{r}\}$, are equal to $0$. \end{Proposition-Definition} \subsection{Computation of the localization} We define two "localization maps", expressing the localization of $(\pi_{1}^{\un,\DR}(\mathbb{P}^{1} - \{0=z_{0},z_{1},\ldots,z_{r},\infty\}),\nabla_{\KZ})$ on a neighborhood of zero, in terms of iterated integrals and algebraic functions. For certain statements, we restrict for simplicity the study to the localization at the multiplicative part generated by $e_{0}$. \subsubsection{The localization map for multiple polylogarithms $\loc^{\smallint}$} We write the coefficients of $\Li^{\KZ}_{\loc}$ as $\mathbb{Q}$-linear combinations of products of algebraic functions on $\mathbb{P}^{1} - \{0,\mu_{N},\infty\}$ by coefficients $\Li^{\KZ}_{\loc}[w]$ with $w \in \mathcal{W}(e_{X_{K}})$. In the next statement, we view $\Li_{\loc}$ as a map $\mathcal{O}^{\mathcyr{sh},e}_{\loc} \rightarrow \mathbb{C}[[z]]$, and we view $\Li$ as a map $\mathcal{O}^{\mathcyr{sh},e} \rightarrow \mathbb{C}[[z]]$. \begin{Proposition-Definition} \label{loc for Li} Assume $-1 \in \{z_{1},\ldots,z_{r}\}$ (otherwise replace $X$ by $X'=(\mathbb{P}^{1} - \{0,z_{1},\ldots,z_{r},-1,\infty\})/K$). There exists a map $$ \loc^{\smallint} : \mathcal{O}^{\mathcyr{sh},e}_{\loc} \rightarrow \Gamma(X,\mathcal{O}_{X}) \otimes \mathcal{O}^{\mathcyr{sh},e} $$ such that we have $$ \Li_{0}^{\loc} = (\Li^{0} \otimes \Gamma(X,\mathcal{O}_{X})) \circ \loc^{\smallint} $$ \end{Proposition-Definition} \begin{proof} Let us call weight the number of letters of a word $w$ over the alphabet $\frak{e} \cup \frak{e}^{\inv}$. By induction on the weight, we are reduced to prove that for $x \in \{0,z_{1},\ldots,z_{n}\}$, and $w$ a word over $\frak{e}$, and let $n \in \mathbb{N}^{\ast}$. Then $\int_{0}^{z} \frac{dz'}{(z'-x)^{n}}\Li[w](z')$ is a $\Gamma(X,\mathcal{O}_{X})$-linear combination of multiple polylogarithms. \newline If $x \not=0$, we write $\frac{1}{(z'-x)^{n}} = \frac{1}{(-x)^{n}}(\frac{1}{1-\frac{z'}{x}})^{n} =\frac{1}{(-x)^{n}}\sum_{l\geq 0} {-n \choose l} \big(\frac{z'}{x}\big)^{l} = \frac{1}{(-x)^{n}}\sum_{l\geq 0} (-1)^{l} {l+n-1 \choose l} \big(\frac{z'}{x}\big)^{l}$. We use that ${l+n-1 \choose l}$ is a polynomial function of $l$. \newline If $x=0$, integration by parts the KZ equation and induction on the weight reduces us to the case of $w$ is of weight $1$, in which case $\Li[w](z')=\log(z'-x')$ with $x' \in \{0,z_{1},\ldots,z_{r}\}$ ; the result is then proved by another integration by parts. \end{proof} \subsubsection{The localization map for multiple harmonic sums $\loc^{\Sigma}$} \noindent We review from \cite{I-2} the map $\loc^{\Sigma}$ giving an expression of the localized multiple harmonic sums (in the sense of Definition \ref{loc mhs}) in terms of multiple harmonic sums and some polynomials of the upper bound of the domain of iterated summation. \begin{Definition} \label{definition of connected partition}Let $S$ be a subset of $\mathbb{N}$. \noindent\newline i) A connected partition of $S$ is a partition of $S$ into segments. \newline ii) An increasing connected partition of $S$ is a connected partition of $S$ with an order $<$ on the corresponding set of parts of $S$, such that if $C<C'$, we have $j<j'$ in $\mathbb{N}$ for all $j \in C$ and $j' \in C'$. \newline iii) For a part $P$ of $\{1,\ldots,d\}$ and an increasing connected partition of $P$ and the connected components of $\{1,\ldots,d\} - P$. \newline iv) Let $\partial S$ be the set of $x \in S$ such that $x+1 \not\in S$ or $x-1 \not\in S$. \end{Definition} \noindent We apply the previous definitions to define a way to represent localized words which is adapted to our purposes. \begin{Definition} \label{la definition du localise}Let $w =\big(t_{d},\ldots,t_{1}) \in \mathbb{Z}^{d}$. \newline i) Let $\Sign^{-}(w)= \{i \in \{1,\ldots,d\} \text{ | } t_{i}<0\}$, and $\Sign^{+}(w) = \{ i \in \{1,\ldots,d\} \text{ | } t_{i} \geq 0\}$. \newline ii) Let $r(w) \in \mathbb{N}$ be the number of connected components of $\Sign^{-}(w)$ in the sense of Definition \ref{definition of connected partition}, and we denote these connected components by $[I_{1}(w)+1,J_{1}(w)-1],\ldots,[I_{r(w)}(w)+1,J_{r(w)}(w)-1]$, with $I_{1}(w)<J_{1}(w)<I_{2}(w)<J_{2}(w)<\ldots < I_{r(w)}(w)<J_{r(w)}(w)$. We also write $J_{0} = 0$ and $I_{r(w)+1} = d+1$. \newline iii) Let us write $t_{i} = n_{i}$ if $t_{i}>0$, and $t_{i} = -l_{i}$ if $t_{i} \geq 0$. \end{Definition} For technical reasons which will appear in \S3, we are actually going to replace the localized multiple harmonic sums (Proposition-Definition \ref{loc mhs}) by a variant whose domain of summation involves both strict and large inequalities. Indeed, the following variant of multiple harmonic sums appears in a natural way in the computation on $p$MZV$\mu_{N}$'s. \begin{Definition} We take the notations of Proposition-Definition \ref{loc mhs} and Definition \ref{la definition du localise} ; let \begin{equation} \label{eq:def tilde multiple harmonic sums} \widetilde{\frak{h}}_{m}\big( (n_{i});(\xi^{j_{i}})\big)_{d}= \sum_{(m_{1},\ldots,m_{d}) \in \tilde{\Delta}_{w}} \frac{{\big( \frac{\xi^{j_{2}}}{\xi^{j_{1}}}\big)^{m_{1}} \ldots \big( \frac{1}{\xi^{j_{d}}}\big)^{m_{d}}}}{m_{1}^{n_{1}} \ldots m_{d}^{n_{d}}} \end{equation} where $w=\big( (n_{i});(\xi^{j_{i}})\big)_{d}$ and $$ \tilde{\Delta} = \{ (m_{1},\ldots,m_{d}) \in \mathbb{N}^{d}\text{ }|\text{ }0\leq m_{I_{1}(w)} < \ldots < m_{I_{2}(w)-1}\leq m_{I_{2}(w)}<m_{J_{2}(w)} <\ldots \leq m_{I_{r(w)}(w)} < \ldots < m \} $$ \end{Definition} \begin{Proposition-Definition} \label{numbers mathcal B}There exists a unique sequence $(\mathcal{B}^{\delta,\delta'}_{(m_{i});(\xi^{j_{i}})})$ of elements of $\mathbb{Q}(\xi)$ such that, for all, $m,m'$ we have \begin{equation} \label{eq:recurrence} \sum_{m<m_{i}<\ldots<m_{j}<m'} \big( \frac{\tilde{\xi}_{N}^{j_{2}}}{\tilde{\xi}_{N}^{j_{1}}}\big)^{m_{1}} \ldots \big( \frac{1}{\tilde{\xi}_{N}^{j_{d}}}\big)^{m_{d}} m_{1}^{l_{1}}\ldots m_{d}^{l_{d}} = \sum_{\delta,\delta'=0}^{l_{1}+\ldots+l_{d}+d+1}\mathcal{B}^{\delta,\delta'}_{(m_{i});(\xi^{j_{i}})} m^{\delta}m'^{\delta'} \end{equation} \end{Proposition-Definition} \begin{proof}The existence of these coefficients as well as formulas for them can be obtained by induction on $d$, and by considering the two following equalities, valid for $l \in \mathbb{N}^{\ast}$ : $\sum_{m_{1}=0}^{m-1} m_{1}^{l} =\sum_{\delta=0}^{l+1} \frac{1}{l+1}{l+1 \choose \delta} B_{l+1-\delta}T^{\delta}$, and $\sum_{m_{1}=0}^{m-1} m_{1}^{l}T^{m_{1}}= (T\frac{d}{dT})^{l}(\sum_{m_{1}=0}^{m} T^{m_{1}}) = (T\frac{d}{dT})^{l}( \frac{T^{m}-1}{T-1})$. \end{proof} \begin{Proposition-Definition} \label{loc for har n}Let the map $$ \loc^{\Sigma} : \mathcal{O}^{\mathcyr{sh},\frak{e}}_{\loc} \longrightarrow \mathcal{O}^{\mathcyr{sh},\frak{e}} \otimes \mathbb{Q}(\xi)[\frak{m}] $$ \noindent defined recursively as follows : let $[i_{C},j_{C}]$ be a connected component of $\Sign^{-}(w)$. Then applying equation (\ref{eq:recurrence}) gives an expression of the form $\widetilde{\frak{h}}_{m}(w) = \sum_{w'} \frak{h}_{m}(w') P_{w'}(m)$ with, for all $w'$, $depth(w')<depth(w)$. We define $\loc(w)$ as $\sum_{w'} \loc(w') (1 \otimes P_{w'})$. Then, we have : $$ \widetilde{\frak{h}}_{m}(w) = (\frak{h}(m) \times \eval_{m}) (\loc(w)) $$ \noindent where $\frak{h}(m) \times \eval_{m}$ is defined as $\frak{h}(m) \otimes \eval_{m}$ composed with the multiplication of tensor components. \end{Proposition-Definition} \noindent In other terms, a localized multiple harmonic sum $\widetilde{\frak{h}}_{m}(w)$ is a $\mathbb{Q}(\xi)$-linear combination of products of (non-localized) multiple harmonic sums by polynomials of $m$. \begin{Example} Below, $l_{1},l_{2} \in \mathbb{N}$, and $n_{1},n_{2} \in \mathbb{N}^{\ast}$. \newline i) Depth one and $N=1$ : $\tilde{\frak{h}}_{m}(-l_{1})= \sum_{\delta_{1}=1}^{l_{1}+1} \mathcal{B}_{m}^{l_{1}} m^{\delta_{1}} $ \newline ii) Depth two and $N=1$ : $\tilde{\frak{h}}_{m}(-l_{2},-l_{1}) = \sum_{\delta=1}^{l_{1}+l_{2}+2} \mathcal{B}_{\delta}^{l_{2},l_{1}} m^{\delta} \displaystyle$ \noindent\newline $\frak{h}_{m}(n_{2},-l_{1}) = \left\{ \begin{array}{ll} \sum_{\delta_{1}=1}^{l_{1}+1} \mathcal{B}_{\delta_{1}}^{l_{1}} \frak{h}_{m}(n_{2}-\delta_{1}) \text{ if } l_{1}+1 \leq n_{2} \\ \sum_{\delta_{1}=1}^{n_{2}-1} \mathcal{B}_{\delta_{1}}^{l_{1}} \frak{h}_{m}(n_{2}-\delta_{1}) + \sum_{\tilde{\delta}_{1}=0}^{l_{1}-n_{2}+1} \sum_{\delta_{2}=1}^{\delta_{1}-n_{2}+1} \mathcal{B}_{\delta_{1}}^{l_{1}}\mathcal{B}_{\delta_{2}}^{\delta_{1}-n_{2}} m^{\delta_{2}} \text{ if } l_{1}+1 > n_{2} \end{array} \right.$ \newline $\tilde{\frak{h}}_{m}(-l_{2},n_{1}) = \left\{\begin{array}{ll} \tilde{\frak{h}}_{m}(-l_{2})\mathcal{H}_{m}(n_{1}) - \sum_{\delta_{2}=1}^{l_{2}+1} \mathcal{B}_{\delta}^{l_{2}} \frak{h}_{m}(n_{1}-\delta_{2}) \text{ if } l_{2}+1 < n_{1} \\ \tilde{\frak{h}}_{m}(-l_{2})\frak{h}_{m}(n_{1}) - \sum_{\delta_{2}=1}^{n_{1}-1} \mathcal{B}_{\delta_{2}}^{l_{1}} \frak{h}_{m}(n_{1}-\delta_{2}) - \sum_{\tilde{\delta}_{2}=0}^{l_{2}-n_{1}+1}\sum_{\delta_{1}=1}^{\tilde{\delta}_{2}+1} \mathcal{B}_{\tilde{\delta}_{2}+n_{1}}^{(l_{2}-n_{1})+n_{1}} \mathcal{B}_{\delta_{1}}^{\tilde{\delta}_{2}} m^{\delta_{1}} \text{ if } \\ l_{2}+1 \geq n_{1} \end{array} \right.$ \end{Example} The next definitions can be used to give a close formula for the map $\loc^{\Sigma}$. \begin{Definition} Let $\text{SignPart}^{-}$ be the set of couples $(Sign(w),P^{-})$ where $w$ is as in Definition \ref{la definition du localise} and $P^{-}$ is a connected partition of $\Sign^{-}(w)$. We define a map $T : \text{SignPart}^{-} \rightarrow \{\text{Finite trees}\}$ by sending $(w,P^{-})$ to the tree defined recursively by : \newline a) the root of the tree is labeled by $(S^{-}(w),S^{+}(w))$ \newline b) consider a vertex of the tree labeled by a couple of parts $(E^{-},E^{+})$ of $\{1,\ldots,d\}$. If $E^{-} \not= \emptyset$ and $E^{+} \not= \emptyset$, then, for each part $P \subset \partial S^{+}(w)$, we draw an arrow starting from $V$ to a new vertex $V'$, and we label $V'$ by the couple $(E^{+} - P,P)$. \end{Definition} \begin{Example} Below, we choose for all examples the connected partition of $\Sign^{-}(w)$ made of singletons. \newline i) In depth one, the two trivial trees $(1)^{-}$ and $(1)^{+}$ \newline ii) In depth two, we have the two trivial trees $(12)^{-}$ and $(12)^{+}$, as well as \begin{center} \begin{tikzpicture}[->,>=stealth',shorten >=1pt,auto,node distance=2cm, thick,main node/.style={font=\sffamily}] \node[main node] (1) {$(1)^{+}(2)^{-}$}; \node[main node] (2) [below left of=1] {$(1)^{+}$}; \node[main node] (3) [below right of=1] {$(1)^{-}$}; \path[every node/.style={font=\sffamily\small}] (1) edge node [left] {} (2) edge [right] node[left] {} (3) ; \end{tikzpicture} \begin{tikzpicture}[->,>=stealth',shorten >=1pt,auto,node distance=2cm, thick,main node/.style={font=\sffamily}] \node[main node] (1) {$(1)^{-}(2)^{+}$}; \node[main node] (2) [below left of=1] {$(2)^{+}$}; \node[main node] (3) [below right of=1] {$(2)^{-}$}; \path[every node/.style={font=\sffamily\small}] (1) edge node [left] {} (2) edge [right] node[left] {} (3) ; \end{tikzpicture} \end{center} \noindent iii) In depth three, we have the two trivial trees $(123)^{+}$ and $(123)^{-}$, as well as \begin{center} \begin{tikzpicture}[->,>=stealth',shorten >=1pt,auto,node distance=2cm, thick,main node/.style={font=\sffamily}] \node[main node] (1) {$(1)^{-}(23)^{+}$}; \node[main node] (2) [below left of=1] {$(23)^{+}$}; \node[main node] (3) [below right of=1] {$(2)^{-}(3)^{+}$}; \node[main node] (4) [below left of=3] {$(3)^{+}$}; \node[main node] (5) [below right of=3] {$(3)^{-}$}; \path[every node/.style={font=\sffamily\small}] (1) edge node [left] {} (2) edge [right] node[left] {} (3) (3) edge node [left] {} (4) edge [right] node[left] {} (5) ; \end{tikzpicture} \begin{tikzpicture}[->,>=stealth',shorten >=1pt,auto,node distance=2cm, thick,main node/.style={font=\sffamily}] \node[main node] (1) {$(12)^{+}(3)^{-}$}; \node[main node] (2) [below left of=1] {$(12)^{+}$}; \node[main node] (3) [below right of=1] {$(1)^{+}(2)^{-}$}; \node[main node] (4) [below left of=3] {$(1)^{+}$}; \node[main node] (5) [below right of=3] {$(1)^{-}$}; \path[every node/.style={font=\sffamily\small}] (1) edge node [left] {} (2) edge [right] node[left] {} (3) (3) edge node [left] {} (4) edge [right] node[left] {} (5) ; \end{tikzpicture} \noindent \newline \newline \begin{tikzpicture}[->,>=stealth',shorten >=1pt,auto,node distance=2cm, thick,main node/.style={font=\sffamily}] \node[main node] (1) {$(1)^{-}(2)^{+}(3)^{-}$}; \node[main node] (2) [left of=1] {$(13)^{+}$}; \node[main node] (3) [right of=1] {$(13)^{-}$}; \node[main node] (4) [below left of=1] {$(1)^{+}(3)^{-}$}; \node[main node] (5) [below right of=1] {$(1)^{-}(3)^{+}$}; \node[main node] (6) [left of=4] {$(1)^{+}$}; \node[main node] (7) [below left of=4] {$(1)^{-}$}; \node[main node] (8) [below right of=5] {$(3)^{+}$}; \node[main node] (9) [right of=5] {$(3)^{-}$}; ; \path[every node/.style={font=\sffamily\small}] (1) edge node [left] {} (2) edge [right] node[left] {} (3) edge node [left] {} (4) edge [right] node[left] {} (5) (4) edge node [left] {} (6) edge [right] node[left] {} (7) (5) edge node [left] {} (8) edge [right] node[left] {} (9) ; \end{tikzpicture} \begin{tikzpicture}[->,>=stealth',shorten >=1pt,auto,node distance=2cm, thick,main node/.style={font=\sffamily}] \node[main node] (1) {$(12)^{-}(3)^{+}$}; \node[main node] (2) [below left of=1] {$(3)^{+}$}; \node[main node] (3) [below right of=1] {$(3)^{-}$}; \path[every node/.style={font=\sffamily\small}] (1) edge node [left] {} (2) edge [right] node[left] {} (3) ; \end{tikzpicture} \begin{tikzpicture}[->,>=stealth',shorten >=1pt,auto,node distance=2cm, thick,main node/.style={font=\sffamily}] \node[main node] (1) {$(1)^{+}(23)^{-}$}; \node[main node] (2) [below left of=1] {$(1)^{+}$}; \node[main node] (3) [below right of=1] {$(1)^{-}$}; \path[every node/.style={font=\sffamily\small}] (1) edge node [left] {} (2) edge [right] node[left] {} (3); \end{tikzpicture} \end{center} \end{Example} \begin{Proposition} (informal version) A close formula for the map $\loc^{\Sigma}$ can be written as a sum over the set of paths from the root to the leaves (sequences of nodes $(N_{1},\ldots,N_{r})$ such that $N_{1}$ is the root, $N_{r}$ is a leaf and, for each $i$, $N_{i+1}$ is a son of $N_{i}$). \end{Proposition} \begin{proof} Induction on $d$. \end{proof} The explicit version of this proposition formula will appear in the next version of this text, and in the next version of \cite{I-2}. \subsubsection{Correspondence between the two localizations} The localization maps $\loc^{\smallint}$ defined in Proposition-Definition \ref{loc for Li} of \S2.2.1, and $\loc^{\Sigma}$ defined in Proposition-Definition \ref{loc for har n} of \S2.2.2 can be related to each other, via the power series expansions of localized multiple polylogarithms in terms of localized multiple harmonic sums (Proposition-Definition \ref{loc mhs}). \subsection{Multiple polylogarithms, localization and analytic continuation} We now define analytic continuations of the localized multiple polylogarithms of Proposition-Definition \ref{localized Li}. We have to make a distinction between the complex setting (\S2.3.1) and the $p$-adic setting (\S2.3.2). \subsubsection{In the complex setting} We now assume that $K$ is embedded in $\mathbb{C}$. By \cite{Deligne}, there is an isomorphism of comparison between $\pi_{1}^{\un,\DR}(X) \times \mathbb{C}$ and the Betti realization of $\pi_{1}^{\un}(X_{K})\times \mathbb{C}$, and the coefficients of this isomorphism are iterated path integrals in the sense of \cite{Chen}. \begin{Definition} (Goncharov, \cite{Go}) Let $\gamma$ be a path on $X(\mathbb{C})$ in the generalized sense where the extremities of $\gamma$ are not necessarily points of $X(\mathbb{C})$ but can also be tangential base-points. The multiple polylogarithms are the following functions $(j_{1},\ldots,j_{n} \in \{0,\ldots,r\})$ : $$ \Li(\gamma)(e_{z_{j_{n}}} \ldots e_{z_{j_{1}}}) = \int_{t_{n}=0}^{1} \gamma^{\ast}(\frac{dt_{n}}{t_{n}-z_{j_{n}}}) \int_{t_{n-1}=0}^{t_{n}} \ldots \gamma^{\ast}(\frac{dt_{2}}{t_{2}-z_{j_{2}}}) \int_{t_{1}=0}^{t_{2}} \gamma^{\ast}(\frac{dt_{1}}{t_{1}-z_{j_{1}}}) $$ \noindent Then $$\Li(\gamma) = 1 + \sum_{\substack{n \in \mathbb{N}^{\ast} \\ z_{j_{n}},\ldots,z_{j_{1}} \in \{0,\xi^{1},\ldots,\xi^{N} \}}} \Li(\gamma)(e_{z_{j_{n}}} \ldots e_{z_{j_{1}}}) e_{z_{j_{n}}} \ldots e_{z_{j_{1}}} \in \pi_{1}^{\un,\DR}(X_{K},b,a)(\mathbb{C})$$ \noindent where $a$ and $b$ are the extremities of $\gamma$. \end{Definition} \begin{Definition} \label{analytic continuation loc MPL}Let $w$ be a localized word. Let $\gamma$ be a path on $(\mathbb{P}^{1} - \{0,z_{1},\ldots,z_{r},\infty\})(\mathbb{C})$ in the previous sense. We write $\loc^{\smallint}(w) = \sum_{w'} F_{w'} \otimes w'$. \newline Let $\Li^{\loc}_{\gamma}[w]=\sum_{w'}F_{w'}(z)\Li_{\gamma}(w')$ where $z$ is the endpoint of $\gamma$. \end{Definition} \subsubsection{In the $p$-adic setting for $\mathbb{P}^{1} - \{0,\mu_{N},\infty\}$} The notion of crystalline pro-unipotent fundamental groupoid ($\pi_{1}^{\un,\crys}$) has been defined with three different points of view in \cite{Deligne} \S11, \cite{CLS}, and \cite{Shiho 1}, \cite{Shiho 2}). In our simple example, the three points of view are equivalent and we follow \cite{Deligne} \S11. \newline We go back to the notations of \S1.1 : $p$ is a prime number, $N \in \mathbb{N}^{\ast}$ is prime to $p$, $\xi_{N}$ is a primitive $N$-th root of unity in $\overline{\mathbb{Q}_{p}}$. We apply \S2.1 and \S2.2 in the case where $K=\mathbb{Q}_{p}(\xi_{N})$, $r=N$, and $(z_{1},\ldots,z_{r})=(\xi_{N}^{1},\ldots,\xi_{N}^{N})$, thus $X=(\mathbb{P}^{1} - \{0,\mu_{N},\infty\})/ K$. According to \cite{Deligne}, $\pi_{1}^{\un,\crys}(\mathbb{P}^{1} - \{0,\mu_{N},\infty\}\text{ }/\text{ }\mathbb{F}_{q})$ is the data of $\pi_{1}^{\un,\DR}(\mathbb{P}^{1} - \{0,\mu_{N},\infty\}\text{ }/\text{ }K)$ plus the Frobenius structure of the KZ connection. The next definitions refer to Coleman integration as in \cite{Coleman}, \cite{Besser}, \cite{Vologodsky}. They depend on the choice of a determination of the $p$-adic logarithm. The alphabet $\frak{e}$ of the previous paragraphs is now $\{e_{0},e_{\xi^{1}},\ldots,e_{\xi^{N}}\}$ and we denote it by $e_{0 \cup \mu_{N}}$. \begin{Definition} \label{def Li coleman} (Furusho \cite{Furusho 1} for $N=1$, Yamashita \cite{Yamashita} for any $N$). \newline We fix a determination $\log_{p}$ of the $p$-adic logarithm. Let $\Li_{p,\KZ}$ be the non-commutative generating series of Coleman functions on $X$ which satisfies $\nabla_{\KZ}\Li_{p,\KZ} = 0$ and $\Li_{p,\KZ}(z) \underset{z \rightarrow 0}{\sim} e^{e_{0} \log_{p}(z)}$. \end{Definition} The next definition is a generalization of a definition in \cite{FKMT3}. \begin{Definition} \label{p-adic continuation loc MPL} Let $w$ be a localized word. We write $\loc^{\smallint}(w) = \sum_{w'} F_{w'} \otimes w'$. \newline Let $\Li^{\loc}_{p,\KZ}[w]=\sum_{w'}F_{w'}\Li_{p,\KZ}[w']$. \end{Definition} \section{Localized adjoint $p$-adic multiple zeta values at roots of unity} We review the definition of $p$-adic multiple zeta values at roots of unity, (\S3.1), of the pro-unipotent $\Sigma$-harmonic action (\S3.2) and we define and study the localized adjoint $p$-adic multiple zeta values at roots of unity (\S3.3, \S3.4). \subsection{Review on $p$MZV$\mu_{N}$'s and Ad$p$MZV$\mu_{N}$'s} We review definitions of $p$-adic multiple zeta values at roots of unity. \begin{Definition} \label{def of tau} (\cite{Deligne Goncharov}, \S5) Let $\tau$ be the action of $\mathbb{G}_{m}(K)$ on $K\langle\langle e_{X_{K}} \rangle\rangle$, that maps $(\lambda,f) \in \mathbb{G}_{m}(K) \times K \langle\langle \frak{e} \rangle\rangle$ to $\sum_{w\in\Wd(\frak{e})} \lambda^{\weight(w)} f[w]w$. \end{Definition} In the next definition, we adopt this convention, which is different from conventions used by some other authors. \begin{Convention} For $\alpha \in \mathbb{N}^{\ast}$, the Frobenius iterated $\alpha$ times is $\tau(p^{\alpha})\phi^{\alpha}$ where $\phi$ is the Frobenius in the sense of \cite{Deligne}, \S13.6, and, for each $\alpha \in -\mathbb{N}^{\ast}$, the Frobenius iterated $\alpha$ times is $\phi^{-\alpha}$ is in the sense of \cite{Deligne}, \S11. \end{Convention} \begin{Notation} Let $\Pi_{1,0} = \pi_{1}^{\un,\DR}(X_{K},\vec{1}_{1},\vec{1}_{0})$. \end{Notation} \noindent A first point of view on the notion of $p$MZV$\mu_{N}$'s uses the canonical De Rham paths evoked in \S2.1.1 : \begin{Definition} \label{MZV Deligne} (general definition in \cite{I-1}, Definition 2.2.5 ; anterior particular cases : $N=1$, $\alpha=1$, Deligne, Arizona Winter School, 2002 (unpublished) ; $N \in \{1,2\}$, $\alpha=1$, Deligne and Goncharov \cite{Deligne Goncharov} \S5.28 ; $N=1$, $\alpha=-1$ \"{U}nver \cite{Unver MZV} ,\S1 ; any $N$ and $\alpha = \frac{\log(q)}{\log(p)}$, Yamashita \cite{Yamashita}, Definition 3.1 ; any $N$ and $\alpha=-1$ \"{U}nver \cite{Unver cyclotomic}, \S2.2.3). \newline If $\alpha \in \mathbb{N}^{\ast}$, let $\Phi_{p,\alpha} = \tau(p^{\alpha})\phi^{\alpha} ( _{\vec{1}_{{\xi^{j}}^{p^{\alpha}}}} 1 _{\vec{1}_{0}}) \in \Pi_{1,0}(K)$ ; if $\alpha \in -\mathbb{N}^{\ast}$, let $\Phi_{p,\alpha} = \phi^{\alpha} ( _{\vec{1}_{{\xi^{j}}^{p^{\alpha}}}} 1 _{\vec{1}_{0}}) \in \Pi_{1,0}(K)$. \newline For any $\alpha \in \mathbb{N}^{\ast} \cup -\mathbb{N}^{\ast}$, the $p$-adic multiple zeta values at roots of unity are the numbers $\zeta_{p,\alpha}\big((n_{i});(\xi^{j_{i}})\big)= \Phi_{p,\alpha}[e_{0}^{n_{1}-1}e_{\xi^{j_{1}}}\ldots e_{0}^{n_{d}-1}e_{\xi^{j_{d}}}]$, $d \in \mathbb{N}^{\ast}$, and $n_{1},\ldots,n_{d} \in \mathbb{N}^{\ast}$, and $j_{1},\ldots,j_{d} \in \{1,\ldots,N\}$. \newline For all objects $\ast$ above, and $\alpha = \frac{\log(q)}{\log(p)} \tilde{\alpha}$, let $\ast_{q,\tilde{\alpha}} = \ast_{p,\alpha}$. \end{Definition} \indent The second point of view on the notion of $p$MZV$\mu_{N}$'s (which is the general and conceptual one whereas the previous one is more ad hoc) relies on Coleman integration. \begin{Definition} \label{MZV Coleman} (N=1 : \cite{Furusho 1} Definition 2.17 ; any $N$ Yamashita (\cite{Yamashita} Definition 2.4)) \newline Let $\Phi_{p,\KZ}$ be the unique element of $\Pi_{1,0}(K)$ which is invariant by the Frobenius. \newline The numbers $\zeta_{p,\KZ} \big((n_{i});(\xi^{j_{i}})\big) = \Phi_{p,\KZ}[ e_{0}^{n_{d}-1}e_{\xi^{j_{d}}} \ldots e_{0}^{n_{1}-1}e_{\xi^{j_{1}}}] \in K$ are called $p$-adic multiple zeta values at roots of unity. \end{Definition} \indent In \cite{I-3}, we have denoted by $\Phi_{p,-\infty} = \Phi_{p,\KZ}$, $\zeta_{p,-\infty} = \zeta_{p,\KZ}$ and we also defined the following variant. Below, the group law $\circ^{\smallint_{0}^{1}}$ on $\Pi_{1,0}$ is the group law denoted by $\circ$ in \cite{Deligne Goncharov}, \S5.12. \begin{Definition} \cite{I-3}\label{MZV Coleman bis} Let $\Phi_{p,\infty}$ be the inverse of $\Phi_{p,\KZ}$ for the group law $\circ^{\smallint_{0}^{1}}$. The numbers $\zeta_{p,\infty} \big((n_{i});(\xi^{j_{i}})\big) = \Phi_{p,\infty}[ e_{0}^{n_{d}-1}e_{\xi^{j_{d}}} \ldots e_{0}^{n_{1}-1}e_{\xi^{j_{1}}}] \in K$ are called $p$-adic multiple zeta values at roots of unity. \end{Definition} In the Definitions \ref{MZV Deligne}, \ref{MZV Coleman} and \ref{MZV Coleman bis}, we are actually adopting a terminology which differs from the terminologies in other works : the $p$-adic multiple zeta values at roots of unity for $\alpha=-1$ are called cyclotomic $p$-adic multiple zeta values in \cite{Unver cyclotomic}, those for $\alpha = \frac{\log(q)}{\log(p)}$ or $\alpha=-\infty$ are called $p$-adic multiple $L$-values in \cite{Yamashita}. \newline For any $\alpha,\alpha' \in \mathbb{Z} \cup \{\pm \infty\} - \{0\}$, $\zeta_{p,\alpha}$ and $\zeta_{p,\alpha'}$ can be expressed in terms of each other : for certain particular $\alpha$, this is written in \cite{Furusho 2}, Theorem 2.14, and in \cite{Yamashita} ; and this is expressed in terms of $p$-adic pro-unipotent harmonic actions in \cite{I-3}. We have also defined : \begin{Definition} \label{def adjoint MZV}(\cite{II-1}) For $\alpha \in \mathbb{N}^{\ast}$, the numbers $\zeta^{\Ad}_{p,\alpha}\big(l;(n_{i});(\xi^{j_{i}})\big) =$ \newline $\sum_{j=1}^{N} \xi^{-jp^{\alpha}}(z \mapsto \xi^{j}z)_{\ast} (\Phi_{p,\alpha}^{-1}e_{1}\Phi_{p,\alpha})[e_{0}^{l-1}e_{\xi^{j_{d+1}}}e_{0}^{n_{d}-1}e_{\xi^{j_{d}}}\ldots e_{0}^{n_{1}-1}e_{\xi^{j_{1}}}]$. \newline We call these numbers the adjoint $p$-adic multiple zeta values at $N$-th roots of unity (Ad$p$MZV$\mu_{N}$'s). \end{Definition} In the particular case of $\mathbb{P}^{1} - \{0,1,\infty\}$, these are called adjoint $p$-adic multiple zeta values (Ad$p$MZV's) and are the numbers $\zeta_{p,\alpha}^{\Ad}\big(l;(n_{i})\big)_{d}=(\Phi_{p,\alpha}^{-1}e_{1}\Phi_{p,\alpha})[e_{0}^{l-1}e_{1}e_{0}^{n_{d}-1}e_{1}\ldots e_{0}^{n_{1}-1}e_{1}]$. \subsection{Review on the $p$-adic pro-unipotent $\Sigma$-harmonic action} This paragraph is a preliminary for the definition of localized Ad$p$MZV$\mu_{N}$'s in \S3.4, it is a review on definitions in \cite{I-2} \S4, \S5. We adopt the notations of \cite{I-2}. \newline Below, $\loc^{\vee}$ is the dual of the map $\loc^{\Sigma}$ defined in Proposition-Definition \ref{loc for har n}. \newline Let $K\langle\langle e_{0}^{\pm 1},e_{\xi^{1}},\ldots,e_{\xi^{N}}\rangle\rangle$ be the set of linear maps $\mathbb{Q}\langle e_{0}^{\pm 1},e_{1}\rangle \rightarrow K$ where $\mathbb{Q}\langle e_{0}^{\pm 1},,e_{\xi^{1}},\ldots,e_{\xi^{N}}\rangle$ is the localization of the non-commutative ring $\mathbb{Q}\langle e_{0},e_{\xi^{1}},\ldots,e_{\xi^{N}}\rangle$ equipped with the concatenation product at the multiplicative part generated by $e_{0}$. The variant $K\langle\langle e_{0}^{\pm 1},e_{\xi^{1}},\ldots,e_{\xi^{N}}\rangle\rangle_{\har} =\prod_{d \in \mathbb{N},\text{ } (n_{i})_{i} \in \mathbb{Z}^{d},\text{ } (j_{i})_{i} \in (\mathbb{Z}/N\mathbb{Z})^{d+1}} K.((n_{i});(\xi^{j_{i}}))_{d}$ contains, for each $m \in \mathbb{N}$, the generating sequence $\har_{m}^{\loc}$ of localized multiple harmonic sums $\har_{m}(w)$. Below, the subscript $S$ denotes a condition on the $p$-adic valuations of the coefficients defined in \cite{I-2}. \newline The localized $p$-adic pro-unipotent $\Sigma$-harmonic action defined in \cite{I-2}, \S5 is a map : $$ (\circ_{\har}^{\Sigma})_{\loc} : (K \langle \langle \frak{e} \rangle\rangle_{\har}^{\Sigma})_{S} \times \Map(\mathbb{N},K \langle\langle e_{0}^{\pm 1},e_{\xi^{1}},\ldots,e_{\xi^{N}} \rangle\rangle_{\har}) \rightarrow \Map(\mathbb{N},K\langle\langle e_{0},e_{1}\rangle\rangle_{\har}) $$ \noindent In this paper, we will call it the $p$-adic pro-unipotent $\Sigma$-harmonic action localized at the source. The $p$-adic pro-unipotent $\Sigma$-action defined in \cite{I-2} is the map $$ \circ_{\har}^{\Sigma} : K \langle\langle e_{0\cup \mu_{N}} \rangle\rangle_{S} \times \Map(\mathbb{N} \times K \langle\langle e_{0\cup \mu_{N}} \rangle\rangle_{\har}^{\Sigma}) \rightarrow \Map(\mathbb{N} \times K \langle\langle e_{0\cup \mu_{N}} \rangle\rangle_{\har}^{\Sigma}) $$ \noindent defined as $\circ_{\har}^{\Sigma}= (\circ_{\har}^{\Sigma})_{\loc} \circ (\id \times \loc^{\vee})$. We have, in the sense of \cite{I-2}, \S5, $$ \har_{p^{\alpha}\mathbb{N}} = \har_{p^{\alpha}} (\circ_{\har}^{\Sigma})_{\loc} \har_{\mathbb{N},\loc}^{(p^{\alpha})} = \har_{p^{\alpha}} (\circ_{\har}^{\Sigma})_{\loc} \har_{\mathbb{N},\loc}^{(p^{\alpha})} $$ \begin{Example} (\cite{I-2}, \S5) $N=1$, $d=2$ : $\har_{p^{\alpha}m}(n_{1},n_{2}) = \har_{m}(n_{1},n_{2}) +$ \newline $\sum_{l_{1},l_{2}\geq 0} \prod_{i=1}^{2} {-n_{i} \choose l_{i}} m^{n_{i}} \times \bigg[ \tilde{\frak{h}}_{m}(-l_{1}-l_{2}) \har_{p^{\alpha}}(n_{2}+l_{2},n_{1}+l_{1}) + \tilde{\frak{h}}_{m}(-l_{2},-l_{1}) \prod_{i=1}^{2} \har_{p^{\alpha}}(n_{i}+l_{i}) \bigg]$ \newline $+ m^{n_{1}+n_{2}} \bigg[ \sum_{l_{1}\geq 0} \har_{p^{\alpha}}(n_{1}+l_{1}) {-n_{1} \choose l_{1}} \widetilde{\frak{h}}_{m}(n_{2},-l_{1}) + \sum_{l_{2}\geq 0} \har_{p^{\alpha}}(n_{2}+l_{2}) {-n_{2} \choose l_{2}} \widetilde{\frak{h}}_{m}(-l_{2}, n_{1}) \bigg]$ \newline $= \har_{m}(n_{1},n_{2}) +$ \newline $\sum_{t \geq 1} m^{n_{1}+n_{2}+t} \bigg[ \sum_{\substack{l_{1},l_{2} \geq 0 \\ l_{1}+l_{2} \geq t-2}} \mathcal{B}_{t}^{l_{2},l_{1}} \prod_{i=1}^{2} {-n_{i} \choose l_{i}} \har_{p^{\alpha}}(n_{i}+l_{i})+ \sum_{\substack{l_{1},l_{2} \geq 0 \\ l_{1}+l_{2} \geq t-1}} \mathcal{B}_{t}^{l_{1}+l_{2}} \prod_{i=1}^{2} {-n_{i} \choose l_{i}} \har_{p^{\alpha}}(n_{2}+l_{2},n_{1}+l_{1}) \bigg] + \sum_{t \geq 1} m^{n_{2}+n_{1}+t} \sum_{l \geq t-1} \bigg[ {-n_{1} \choose l+n_{2}} \mathcal{B}_{t}^{l+s_{2},-n_{2}} - {-n_{2} \choose l+s_{1}} \mathcal{B}_{t}^{l+n_{1},-n_{1}} \bigg] $ \newline $- m^{n_{2}+n_{1}} \bigg[ \sum_{l_{1} \geq n_{2}-1} \mathcal{B}_{n_{2}}^{l_{1}} {-n_{1} \choose l_{1}} \har_{p^{\alpha}}(n_{1}+l_{1}) - \sum_{l_{2} \geq n_{1}-1} \mathcal{B}_{n_{1}}^{l_{2}} {-n_{2} \choose l_{2}} \har_{p^{\alpha}}(n_{2}+l_{2}) \bigg]$ \newline $+ \sum_{\substack{ 1 \leq t < n_{2} \\ l \geq t-1}} m^{n_{1}+t} \har_{m}(n_{2}-t) \mathcal{B}_{t}^{l} {-n_{1} \choose l} \har_{p^{\alpha}}(n_{1}+l) - \sum_{\substack{1 \leq t < n_{1} \\ l' \geq t-1}} m^{n_{2}+t} \har_{m}(n_{1}-t) \mathcal{B}_{t}^{l_{2}} {-n_{2} \choose l'} \har_{p^{\alpha}}(n_{2}+l')$ \end{Example} \subsection{Localized $p$MZV$\mu_{N}$'s : the point of view of Frobenius-invariant paths} The problem which we want to tackle is to define $p$MZV$\mu_{N}$'s at indices $\big((n_{i});(\xi^{j_{i}})\big)_{d}$ such that $n_{1},\ldots,n_{d}$ are not necessarily $>0$. \newline In this paragraph, we consider the notion of $p$MZV$\mu_{N}$'s in the sense of Coleman integration (Definition \ref{MZV Coleman}). \newline A partial solution to our problem is already given by Furusho, Komori, Matsumoto and Tsumura, using Vologodsky's version of Coleman integration \cite{Vologodsky}. We reformulate it with our terminologies. \begin{Proposition-Definition} (Furusho, Komori, Matsumoto, Tsumura, \cite{FKMT3}) Assume that $\xi_{N}^{j_{1}}\not=1, ,\ldots,\xi_{N}^{j_{d}}\not=1$. Then $\Li_{p,\KZ}^{\loc}\big((n_{i});(\xi^{j_{i}})\big)_{d}(z)$ has an asymptotic expansion in $K[\log_{p}(1-z)]$ when $z \rightarrow 1$. \newline The constant coefficient of this power series expansion is a generalized $p$-adic multiple zeta value at $N$-th roots of unity. \end{Proposition-Definition} In \cite{FKMT3}, only the inversion of the integration operator associated with $e_{0}$ is considered, whereas here we invert the integration operators associated with all letters $e_{z_{i}}$ (\S2). Thus, the definition above can be extended to our more general framework. \newline\indent However, even with this generalization, the answer is only partial. For indices which do not necessarily satisfy the hypothesis $\xi_{N}^{j_{1}}\not=1, ,\ldots,\xi_{N}^{j_{d}}\not=1$, we have a different asymptotic expansion : \begin{Lemma} Each function $\Li_{p,\KZ}^{\loc}[w]$ admits, when $z \rightarrow 1$ and $z \in K$, an asymptotic expansion in the ring $K[\frac{1}{(z-1)}][\log_{p}(1-z)]$. \end{Lemma} \begin{proof} This follows from Definition 2.23 and auxiliary results to Furusho's definition of $p$MZVs (\cite{Furusho 1}, Theorem 2.13 to Theorem 2.18, and Theorem 3.15). \end{proof} It may be tempting to define a notion of regularized $p$MZV$\mu_{N}$'s by regularizing brutally the asymptotic expansion above and taking the constant term with respect to both $\log(1-z)$ and $\frac{1}{1-z}$. However, this definition is not relevant. It would imply that $\zeta_{p,\KZ}(-n)$ is zero for all $n \in \mathbb{N}^{\ast}$, whereas we expect non-zero values in odd weights, corresponding to the values at negative integers of the Riemann zeta function. These observations motivate to consider the point of view on $p$MZV$\mu_{N}$'s in terms of canonical De Rham paths (Definition \ref{MZV Deligne}), which we will do in the next paragraph. \subsection{Localized Ad$p$MZV$\mu_{N}$'s : the point of view of canonical De Rham paths and pro-unipotent harmonic actions} As in the previous papers, we are going to replace the Frobenius by the harmonic Frobenius in the sense of \cite{I-2}, to use the $p$-adic pro-unipotent harmonic actions, and to replace $p$MZV$\mu_{N}$'s by adjoint $p$MZV$\mu_{N}$'s. \newline\indent What we want to define is numbers $\zeta^{\Ad}_{p,\alpha}\big( \begin{array}{c} \xi^{j_{1}}, \ldots, \xi^{j_{d}} \\ l;n_{1},\ldots,n_{d} \end{array} \big)$ with $n_{1},\ldots,n_{d}$ of any sign. \newline We are going to see that this approach will give us a solution to the problem observed in \S3.3, defining implicitly. \newline The map $(\circ_{\har}^{\Sigma})_{\loc}$ mentioned in \S3.2 is defined by lifting an equation involving multiple harmonic sums. We now define a $p$-adic pro-unipotent $\Sigma$-harmonic action involving a localization both at the source and at the target, by a similar procedure. \begin{Proposition-Definition} \label{loc action} Let the map $$ \circ_{\har}^{\Sigma,\loc,\loc} : K \langle \langle e_{0 \cup \mu_{N}} \rangle\rangle \times \Map(\mathbb{N},K\langle\langle e_{0 \cup \mu_{N}} \rangle \rangle_{\har}^{\Sigma}) \rightarrow \Map(\mathbb{N},K\langle\langle e_{0 \cup \mu_{N}} \rangle \rangle_{\har}^{\Sigma}) $$ \noindent called the $p$-adic pro-unipotent $\Sigma$-harmonic action localized at the source and at the target be the map defined by extending the following procedure, used for defining $(\circ_{\har}^{\Sigma})_{\loc}$, to localized multiple harmonic sums : we consider a multiple harmonic sum, whose domain of summation is defined by inequalities of the form $0<m_{1}<\ldots<m_{d}<p^{\alpha}m$. We write the Euclidean division of each $m_{i}$ by $p^{\alpha}$ : $m_{i}=p^{\alpha}u_{i}+r_{i}$ and we express the domain of summation in terms of the $u_{i}$'s and $r_{i}$'s. Then, we write $m_{i}^{-n_{i}} = r_{i}^{-n_{i}}\sum_{l_{i}\geq 0} {-n_{i} \choose l_{i}} \big( \frac{p^{\alpha}u_{i}}{r_{i}} \big)^{l_{i}}$. The map $\circ_{\har}^{\Sigma,\loc,\loc}$ is the natural essentialization of the equation relating localized multiple harmonic sums which appears. \newline Let the $p$-adic pro-unipotent $\Sigma$-harmonic action localized at the target be the map $\circ_{\har}^{\Sigma,\ast,\loc} = \circ_{\har}^{\Sigma,\loc,\loc} \circ (id \times \loc^{\vee})$. We have : \begin{equation} \label{eq: the new equation} \har_{p^{\alpha}\mathbb{N},\loc} = \har_{p^{\alpha}} \circ_{\har,\ast,\loc}^{\Sigma} \har_{\mathbb{N}}^{(p^{\alpha})} \end{equation} \end{Proposition-Definition} \begin{proof} Similar to the previous statements from \cite{I-2}, \S5 reviewed in \S3.2. \end{proof} \noindent We can now recuperate the localized Ad$p$MZV$\mu$'s, as the coefficients of the term of depth $0$ in the $p$-adic pro-unipotent $\Sigma$-harmonic action localized at the target. \begin{Definition} \label{def localized pMZV} Let $\alpha \in \mathbb{N}^{\ast}$. For any localized word $(l;(n_{i});(\xi^{j_{i}}))_{d}$, let us consider expression of $\har_{p^{\alpha}m}\big((n_{i});(\xi^{j_{i}})\big)_{d}$ in terms of $m$, $\har_{m}$ and $\har_{p^{\alpha}}$ given by equation (\ref{eq: the new equation}). \newline Let $\zeta_{p,\alpha}^{\Ad}(l;(n_{i});(\xi^{j_{i}}))_{d}$ be the coefficient of $\xi^{jm}m^{l}\har_{m}(\emptyset)$ (where $j$ is the unique element of $\mathbb{Z}/N\mathbb{Z}$ such that such a term appears in the expression). \newline The weight of an index $\big( \begin{array}{c} \xi^{j_{1}}, \ldots, \xi^{j_{d}} \\ l;n_{1},\ldots,n_{d} \end{array} \big)$ is $l+n_{d}+\ldots+n_{1}$. \end{Definition} \noindent We now focus on a particular case : \begin{Definition} \label{def totally negative} The totally negative Ad$p$MZV$\mu_{N}$'s are the numbers $\zeta_{p,\alpha}^{\Ad}(l;(n_{i});(\xi^{j_{i}})$ with $n_{i} \leq 0$ for all $i$. \end{Definition} \noindent The next proposition is an analogue of the fact that the desingularized values of multiple zeta functions at tuples of negative integers are rational numbers, having a natural expression as polynomials of Bernoulli numbers. \begin{Proposition} The totally negative Ad$p$MZV$\mu_{N}$'s are elements of $\mathbb{Q}(\xi)$. \newline They can be non-zero only if $1 \leq l+n_{1}+\ldots+n_{d} \leq n_{1}+\ldots+n_{d} +d$, i.e. $1 - (n_{1}+\ldots+n_{d}) \leq l \leq d$. \end{Proposition} \begin{proof} Follows directly from Proposition-Definition \ref{numbers mathcal B} and Definition \ref{def localized pMZV}. \end{proof} In the next statement, we write formulas for some examples of the totally negative localized Ad $p$MZV$\mu_{N}$'s for $\mathbb{P}^{1} - \{0,1,\infty\}$. \begin{Example} Depth one and two, $N=1$. Let $n_{1},n_{2} \in \mathbb{N}$. \begin{equation} \zeta_{p,\alpha}^{\Ad}(l+n_{1};-n_{1}) = \left\{ \begin{array}{ll} \displaystyle (p^{\alpha})^{-n_{1}}\mathcal{B}_{l}^{n_{1}} = \frac{{n_{1}+1 \choose l_{1}}}{n_{1}+1} B_{n_{1}+1-l_{1}} \text{ if } 1 \leq l \leq n_{1}+1 \\ 0 \text{ otherwise} \end{array} \right. \end{equation} \begin{multline} \zeta_{p,\alpha}^{\Ad}(l+n_{2}+n_{1};-n_{2},-n_{1}) \\ = \left\{ \begin{array}{ll} (p^{\alpha})^{-n_{2}-n_{1}} \mathcal{B}_{l}^{n_{2},n_{1}} = \sum_{l_{1}=1}^{n_{1}+1} \displaystyle \frac{{n_{1}+1 \choose l_{1}}}{n_{1}+1} \frac{{l_{1}+n_{2}+1 \choose l }}{k_{1}+n_{2}+1} B_{n_{1}+1-l_{1}} B_{l_{1}+n_{1}+1-l} \text{ if } 1 \leq l \leq 2 + n_{1}+n_{2} \\ 0 \text{ otherwise} \end{array} \right. \end{multline} \end{Example} \section{Localized iteration of the harmonic Frobenius} The map of iteration of the $\Sigma$-harmonic Frobenius introduced in \cite{I-3} gives a canonical expression of multiple harmonic sums of the form $\har_{q^{\tilde{\alpha}}}$ in terms of multiple harmonic sums of the form $\har_{q^{\tilde{\alpha}_{0}}}$, for $(\tilde{\alpha}_{0},\tilde{\alpha}) \in (\mathbb{N}^{\ast})^{2}$, built by using sums of series. We review it (\S4.1) and explain briefly its generalization to the "localized" framework of \S3. \subsection{Review of the map of iteration of the harmonic Frobenius} Let $\Lambda$ and $\textbf{a}$ be formal variables. For $n \in \mathbb{N}^{\ast}$, let $\pr_{n} : K \langle \langle \frak{e} \rangle\rangle \rightarrow K \langle\langle \frak{e} \rangle\rangle$ be the map of "projection onto the terms of weight $n$" i.e. the sequence $(\pr_{n})_{n \in \mathbb{N}}$ is characterized by : for all $f \in K \langle \langle \frak{e}\rangle\rangle$, and $\lambda \in K^{\ast}$, $\tau(\lambda)(f) = \sum_{n \in \mathbb{N}} \pr_{n}(f)\lambda^{n}$. \newline Let $(\tilde{\alpha}_{0},\tilde{\alpha}) \in (\mathbb{N}^{\ast})^{2}$ such that $\tilde{\alpha}_{0} | \tilde{\alpha}$. \newline Below, we are using notations of \cite{I-3}. There exists an explicit map, the $\Sigma$-harmonic iteration of the Frobenius $$(\widetilde{\text{iter}}_{\har}^{\Sigma})^{\textbf{a},\Lambda} : (K\langle\langle \frak{e} \rangle \rangle_{\har}^{\Sigma})_{S} \rightarrow K[[\Lambda^{\textbf{a}}]][\textbf{a}](\Lambda)\langle\langle \frak{e} \rangle\rangle^{\smallint_{0}^{1}}_{\har} $$ \noindent such that, the map $(\text{iter}_{\har}^{\Sigma})^{\frac{\alpha}{\alpha_{0}},p^{\alpha_{0}}} : (K\langle\langle \frak{e}\rangle \rangle_{\har}^{\Sigma})_{S} \rightarrow K\langle\langle \frak{e}\rangle\rangle^{\smallint_{0}^{1}}_{\har}$ defined as the composition of $\widetilde{\text{iter}}_{\har,\Sigma}^{\textbf{a},\Lambda}$ by the reduction modulo $(\textbf{a}-\frac{\alpha}{\alpha_{0}},\Lambda-p^{\alpha_{0}})$, satisfies, \begin{equation} \label{eq:third of I-3} \har_{q^{\tilde{\alpha}}} = \iter_{\har,\Sigma}^{\frac{\alpha}{\alpha_{0}},p^{\alpha_{0}}} (\har_{q^{\tilde{\alpha}_{0}}}) \end{equation} \noindent This was used in \cite{I-3} to study the iterated Frobenius as a function of its number of iterations, with the application to have a natural indirect explicit computation of the $p$MZV$\mu_{N}$'s associated with Frobenius-invariant paths (Definition \ref{MZV Coleman}, Definition \ref{MZV Coleman}). \subsection{Generalization to the localized setting} \begin{Definition} \label{iter localized} Let the localized iteration of the $\Sigma$-harmonic Frobenius be the map $(\text{iter}_{\har}^{\Sigma})^{\frac{\alpha}{\alpha_{0}},p^{\alpha_{0}}}$ composed with the map $\loc^{\vee}$, dual of the map $\loc^{\Sigma}$ of Proposition-Definition 2.15. \end{Definition} \noindent Alternatively, we can construct this map by generalizing the procedure of \cite{I-3} for defining the localized iteration of the $\Sigma$-harmonic Frobenius. Taking a multiple harmonic sums whose domain of summation is of the form $0<m_{1}<\ldots<m_{d}<q^{\tilde{\alpha}}$, we introduce the new parameters $v_{1},\ldots,v_{d}$ equal respectively to the $q$-adic valuations of $m_{1},\ldots,m_{d}$ ; we write $m_{i}= q^{v_{i}}(qu_{i}+r_{i})$ with $r_{i} \in \{1,\ldots,q^{\tilde{\alpha}}-1\}$. We rewrite the domain of summation defined by inequalities $m_{1}<\ldots<m_{d}$ in terms of $v_{i}$'s, $u_{i}$'s and $r_{i}$'s, and sum over these new variables. \newline \newline In the next version of this paper, we will use this map to define a generalization to the localized setting of the Ad$p$MZV$\mu_{N}$'s in the sense of Definition \ref{MZV Coleman} and Definition \ref{MZV Coleman bis}, i.e. the $p$MZV$\mu_{N}$'s in the sense of Coleman integration. \section{Localization and algebraic relations} \subsection{Generalities} In this paragraph we take the context of \S2.1 : $\pi_{1}^{\un,\DR}(X)$ where $X=(\mathbb{P}^{1} - \{0=z_{0},z_{1},\ldots,z_{r},\infty\})/K$, $K$ being a field of characteristic zero. \subsubsection{Localization and shuffle equation} The shuffle equation (\ref{shuffle equation}) is satisfied by multiple polylogarithms (Proposition-Definition \ref{prop connexion}) : namely, we have for all words $w,w'$ on the alphabet $\frak{e}$, $\Li[w\text{ }\mathcyr{sh}\text{ }w'] = \Li[w]\Li[w']$. If we apply $(z-z_{i})\frac{d}{dz}$ a certain number of times to this equation (where $i \in \{0,\ldots,r\}$), since this operator is a derivation, we obtain a variant of the shuffle relation which applies to certain localized multiple polylogarithms. The right-hand side of the relation obtained in this way is encoded by the following generalization of the deconcatenation coproduct $\Delta_{\dec}$ below, which is well-defined on a quotient of the space of words. \begin{Definition} i) Let $i \in \{0,\ldots,r\}$. Let $e_{z_{i}}^{-\mathbb{N}}\mathbb{Q}\langle \frak{e}\rangle$ be the $\mathbb{Q}$-vector space freely generated by words of the form $e_{z_{i}}^{l}w$ with $w$ a word over $\frak{e}$ and $l \in \mathbb{Z}$. Let $I_{\loc}$ be the ideal of $e_{z_{i}}^{-\mathbb{N}}\mathbb{Q}\langle \frak{e}\rangle$ generated by the relations $\sum_{(w'_{1},w'_{2})\text{ }|\text{ }w'_{1}w'_{2}=w'} w'_{1} \otimes w'_{2} = \sum_{(w_{1},w_{2})\text{ }|\text{ }w_{1}w_{2}=e_{0}^{l}w'} \sum_{m=0}^{l} {l \choose m} e_{0}^{-m}w_{1} \otimes e_{0}^{-(l-m)}w_{2}$ for $w'$ word over $e_{0 \cup \mu_{N}}$ and $l \in \mathbb{N}$. \newline ii) Let $\Delta_{\dec} : \mathbb{Q}\langle e_{0},e_{0}^{-1},e_{\mu_{N}}\rangle / I_{\loc} \rightarrow \mathbb{Q}\langle e_{0},e_{0}^{-1},e_{\mu_{N}}\rangle / I_{\loc} \otimes \mathbb{Q}\langle e_{0},e_{0}^{-1},e_{\mu_{N}}\rangle / I_{\loc}$ be the linear map defined by, for all words $w$ over $e_{0 \cup \mu_{N}}$ : $\Delta_{\dec}(e_{z_{i}}^{-l}w) = \sum_{(w_{1},w_{2})\text{ }|\text{ }w_{1}w_{2}=w} \sum_{m=0}^{l} {l \choose m} e_{z_{i}}^{-m}w_{1} \otimes e_{0}^{-(l-m)}w_{2} $ \end{Definition} \begin{Definition} Let $K\langle\langle e_{z_{i}}^{-\mathbb{N}} \frak{e} \rangle\rangle$ be the set of linear maps $e_{z_{i}}^{-\mathbb{N}}\mathbb{Q}\langle \frak{e} \rangle \rightarrow K$. \end{Definition} \subsubsection{Localization quasi-shuffle relation} The quasi-shuffle relation \cite{Hoffman} is a consequence of the fact that a product of two sets $\{(m_{1},\ldots,m_{d}) \in \mathbb{N}^{d}\text{ }|\text{ }0<m_{1}<\ldots<m_{d}<m\}$ and $\{(m'_{1},\ldots,m_{d'}) \in \mathbb{N}^{d'}\text{ }|\text{ }0<m'_{1}<\ldots<m'_{d}<m\}$ can be written canonically as a disjoint union of sets of the same type in $\mathbb{N}^{r}$, $r \in \{\max(d,d'),\ldots,d+d'\}$. For example, if $d=d'=1$ : \begin{multline} \{ m_{1} \in \mathbb{N}\text{ }|\text{ }0<m_{1}\} \times \{ m'_{1} \in \mathbb{N}\text{ }|\text{ }0<m'_{1}\} = \bigg( \{(m_{1},m'_{1}) \in \mathbb{N}^{2}|\text{ }0<m_{1}<m'_{1}<m \} \\ \amalg \{(m_{1},m'_{1}) \in \mathbb{N}^{2}\text{ }|\text{ }0<m'_{1}<m_{1}<m \} \amalg \{(m_{1},m'_{1}) \in \mathbb{N}^{2}\text{ }|\text{ }0<m_{1}=m'_{1}<m\} \bigg) \end{multline} \noindent The quasi-shuffle relation of MZV$\mu_{N}$'s is obtained by applying this equality to multiple harmonic sums and taking the limit $m \rightarrow \infty$ in $\mathbb{C}$. Example : $\zeta(n)\zeta(n') = \zeta(n,n') + \zeta(n',n) + \zeta(n+n')$. \newline It can be encoded in the form $\frak{h}_{m}(w) \frak{h}_{m}(w') = \frak{h}_{m}(w \ast w')$ where $\ast$ is a bilinear map $\mathcal{O}^{\mathcyr{sh},e_{0 \cup \mu_{N}}} \times \mathcal{O}^{\mathcyr{sh},e_{0 \cup \mu_{N}}} \rightarrow \mathcal{O}^{\mathcyr{sh},e_{0 \cup \mu_{N}}}$ called the quasi-shuffle product \cite{Hoffman}. \newline The following statement is clear ; it should also be a priori already known and appear in several works, although we do not have references : \begin{Proposition-Definition} \label{localized adjoint quasi shuffle} There exists an explicit bilinear map $\ast_{\loc} : e_{0}^{-1}\mathcal{O}^{\mathcyr{sh},e_{0 \cup \mu_{N}}} \times e_{0}^{-1}\mathcal{O}^{\mathcyr{sh},e_{0 \cup \mu_{N}}} \rightarrow e_{0}^{-1}\mathcal{O}^{\mathcyr{sh},e_{0 \cup \mu_{N}}}$ (where the factor $e_{0}^{-1}$ means the localization at the multiplicative part generated by $e_{0}$ for the concatenation product), the localized quasi-shuffle product, such that, for localized multiple harmonic sums in the sense of Definition \ref{loc mhs}, we have, for all $w,w'$ localized words as in that Definition, $$ \frak{h}_{m}(w)\frak{h}_{m}(w') = \frak{h}_{m}(w \ast_{\loc} w') $$ \end{Proposition-Definition} \subsection{Application to localized Ad$p$MZV$\mu_{N}$'s} We consider now the context of \S2.3.2 and \S3 : $\pi_{1}^{\un,\crys}(\mathbb{P}^{1} - \{0,\mu_{N},\infty\})$. \newline \indent In \cite{II-1} we defined a notion of adjoint double shuffle relations, satisfied by the Ad$p$MZV$\mu_{N}$'s of \ref{def adjoint MZV}. This includes a notion of adjoint quasi-shuffle relations. In \cite{II-2}, we have showed that the adjoint quasi-shuffle relations of Ad$p$MZV$\mu_{N}$'s can be retrieved by the formulas of part I involving the pro-unipotent harmonic actions. Here is a variant for the localized Ad$p$MZV$\mu_{N}$'s introduced in \ref{def localized pMZV}. \begin{Proposition} The localized Ad$p$MZV$\mu_{N}$'s satisfy a canonical family of polynomial equations which generalizes the adjoint quasi-shuffle relations of \cite{II-1}, and which we call the localized adjoint quasi-shuffle relations. \end{Proposition} \begin{proof} Same with the proof in \cite{II-2} of the fact that we can retrieve the fact that Ad$p$MZV$\mu_{N}$'s satisfy the adjoint quasi-shuffle relations from the fact that multiple harmonic sums satisfy the quasi-shuffle equation and the equation relating Ad$p$MZV$\mu_{N}$'s and multiple harmonic sums involving the $p$-adic pro-unipotent harmonic action $\circ_{\har}^{\smallint_{0}^{z<<1}}$. \newline Here, let us write the localized quasi-shuffle relation for the multiple harmonic sums $\har_{p^{\alpha}m}(w)\har_{p^{\alpha}m}(w')=\har_{p^{\alpha}m}(w\ast_{\loc}w')$ ; then, we use that $\har_{p^{\alpha}m}$'s have an expression in terms of $\har_{m}$'s and certain power series of variable $m$ whose coefficients are written in terms of localized Ad$p$MZV$\mu_{N}$'s : equation (\ref{eq: the new equation}). By the linear independence of the $\har_{m}$'s over the ring of overconvergent power series expansion of $m \in \mathbb{N} \subset \mathbb{Z}_{p}$, this implies a family of polynomial equations satisfied by the localized Ad$p$MZV$\mu_{N}$'s which we call as in the statement. \end{proof} \end{document}
\begin{document} \title{Weibel's conjecture for twisted K-theory} \begin{abstract} \noindent We prove Weibel's conjecture for twisted $K$-theory when twisting by a smooth proper connective dg-algebra. Our main contribution is showing we can kill a negative twisted $K$-theory class using a projective birational morphism (in the same twisted setting). We extend the vanishing result to relative twisted $K$-theory of a smooth affine morphism and describe counter examples to some similar extensions. \end{abstract} \section{Introduction} The so-called fundamental theorem for $K_1$ and $K_0$ states that for any ring $R$ there is an exact sequence \[ 0 \rightarrow K_1(R) \rightarrow K_1(R[t]) \oplus K_1(R[t^{-1}]) \rightarrow K_1(R[t^\pm]) \rightarrow K_0(R) \rightarrow 0. \] We see $K_0$ can be defined using $K_1$. There is an analogous exact sequence, truncated on the right, for $K_0$. Bass defines $K_{-1}(X)$ as the cokernel of the final morphism. He then iterates the construction to define a theory of negative K-groups \cite[Sections XII.7 and XII.8]{bass_algebraic_k-theory}. Weibel's conjecture, originally posed in \cite{weibel_conjecture}, asks if $K_{-i}(R) = 0$ for $i > \dim R$ when $R$ has finite Krull dimension. Kerz--Strunk--Tamme \cite{kerz_strunk_tamme} have proven Weibel's conjecture for any Noetherian scheme of finite Krull dimension (see the introduction for a historical summary of progress) by establishing pro cdh-descent for algebraic $K$-theory. Land--Tamme \cite{land_tamme} have shown that a general class of localizing invariants satisfy pro cdh-descent. With this improvement, we extend Weibel's vanishing to some cases of twisted $K$-theory. \begin{theorem}\label{main_theorem} Let $X$ be a Noetherian $d$-dimensional scheme and $\sheaffont{A}$ a sheaf of smooth proper connective quasi-coherent differential graded algebras over $X$, then $K_{-i}(\textnormal{Perf}(\sheaffont{A}))$ vanishes for $i > d$. \end{theorem} The original goal of this paper was to extend Weibel's conjecture to an Azumaya algebra over a scheme. To an Azumaya algebra $\sheaffont{A}$ of rank $r^2$ on $X$ we can associate a Severi-Brauer variety $P$ of relative dimension $r-1$ over $X$. Such a variety is \'etale-locally isomorphic over $X$ to $\mathbb{P}^{r-1}_X$. In Quillen's work \cite{quillen_higher_algebraic_k-theory}, he generalizes the projective bundle formula to Severi-Brauer varities showing (for $i \geq 0$) \[ K_i(P) \cong \bigoplus_{n=0}^{r-1} K_i(\sheaffont{A}^{\otimes n}). \] At the root of this computation is a semi-orthogonal decomposition of $\textnormal{Perf}(P)$. Consequently, the computation lifts to the level of nonconnective $K$-theory spectra. Statements about the $K$-theory of Azumaya algebras can generally be extracted through this decomposition. In our case, the dimension of the Severi-Brauer variety jumps and so Weibel's conjecture (for our noncommutative dg-algebra) does not follow from the commutative setting. We could remedy this by characterizing a class of morphisms to $X$, which should include Severi-Brauer varieties, and then show the relative $K$-theory vanishes under $-d-1$. In Remark \ref{counter_example}, we show that smooth and proper morphisms (in fact, smooth and projective) are not sufficient. We warn the reader that we will use the overloaded words ``smooth and proper'' in both the scheme and dg-algebra settings. For dg-algebras and dg-categories, properness and smoothness are module and algebraic finiteness conditions, see To\"en--Vaqui\'e \cite[Definition 2.4]{toen_vaquie}. Together, the two conditions characterize the dualizable objects in $\textnormal{Mod}_{\textnormal{Mod}_R}(\textnormal{Pr}^{\textnormal{L}}stomega)$, whose objects are $\omega$-compactly generated $R$-linear stable presentable $\infty$-categories. More surprisingly, the invertible objects of $\textnormal{Mod}_{\textnormal{Mod}_R}(\textnormal{Pr}^{\textnormal{L}}stomega)$ are exactly the module categories over derived Azumaya algebras, see Antieau--Gepner \cite[Theorem 3.15]{antieau_gepner}. So Theorem \ref{main_theorem} recovers the discrete Azumaya algebra case. However, any connective derived Azumaya algebras is discrete. After base-changing to a field $k$, $\sheaffont{A}_k \cong H_*\sheaffont{A}_k$ is a connective graded $k$-algebra and $H_*\sheaffont{A}_k \otimes_k (H_*\sheaffont{A}_k)^{op}$ is Morita equivalent to $k$. So $H_*\sheaffont{A}_k$ is discrete. The scope of Theorem \ref{main_theorem} is not wasted as smooth proper connective dg-algebras can be nondiscrete, see Raedschelders--Stevenson \cite[Section 4.3]{raedschelders_stevenson}. The proof of Theorem \ref{main_theorem} follows Kerz \cite{kerz}. In Section $2$, we define and study twisted $K$-theory. We kill a negative twisted $K$-theory class using a projective birational morphism in Section $3$. Lastly, Section $4$ holds the main theorem and we consider some extensions. \textbf{Conventions:} We make very little use of the language of $\infty$-categories. For a commutative ring $R$, there is an equivalence of $\infty$-categories between the $\mathbb{E}_1$-ring spectra over $HR$ and differential graded algebras over $R$ localized at the quasi-isomorphisms (see \cite[7.1.4.6]{lurie_ha}). For a dg-algebra (or $\mathbb{E}_1$-ring) $\sheaffont{A}$, we can consider the $\infty$-category $\mathrm{RMod}(\sheaffont{A})$ of spectra which have a right $\sheaffont{A}$-module structure. We will refer to this $\infty$-category as the derived category of $\sheaffont{A}$ and denote it by $D(\sheaffont{A})$. The subcategory $\textnormal{Perf}(\sheaffont{A})$ consists of all compact objects of $\mathrm{RMod}(\sheaffont{A})$, or the right $\sheaffont{A}$-modules which corepresent a functor that commutes with filtered colimits. We shall refer to objects of $\textnormal{Perf}(\sheaffont{A})$ as perfect complexes over $\sheaffont{A}$. We use $K(-)$ undecorated as non-connective algebraic $K$-theory and consider it as a localizing invariant in the sense of Blumberg--Gepner--Tabuada \cite{blumberg_gepner_tabuada_universal}. In particular, it is an $\infty$-functor from $\textnormal{Cat}^{\textnormal{perf}}_{\infty}$, the $\infty$-category of idempotent complete small stable infinity categories with exact functors, taking values in $\textnormal{Sp}$, the $\infty$-category of spectra. For $X$ a quasi-compact quasi-separated scheme, $K(\textnormal{Perf}(X))$ is equivalent to the non-connective $K$-theory spectrum of Thomason--Trobaugh \cite{thomason_trobaugh}. The $\infty$-category $\textnormal{Cat}^{\textnormal{perf}}_{\infty}$ has a symmetric monoidal structure which we will denote by $\widehat{\otimes}$. For $R$ an $\mathbb{E}_\infty$-ring spectrum, $\textnormal{Perf}(R)$ is an $\mathbb{E}_\infty$ algebra in $\textnormal{Cat}^{\textnormal{perf}}_{\infty}$. We will restrict the domain of algebraic $K$-theory to $\textnormal{Mod}_{\textnormal{Perf} (R)}(\textnormal{Cat}^{\textnormal{perf}}_{\infty})$. \textbf{Acknowledgements:} The author is thankful to his advisor, Benjamin Antieau, for the suggested project, patience, and guidance. He also thanks Maximilien P\'{e}roux for helpful comments on an earlier draft. The author was partially supported by NSF Grant DMS-1552766 and NSF RTG grant DMS-1246844. \section{Twisted $K$-theory} In Grothendieck's original papers \cite{grothendieck_brauer_i} \cite{grothendieck_brauer_ii} \cite{grothendieck_brauer_iii}, he globalizes the notion of a central simple algebra over a field. \begin{defin} A locally free sheaf of $\mathscr{O}_X$-algebras $\sheaffont{A}$ is a \textit{sheaf of Azumaya algebras} if it is \'etale-locally isomorphic to $\mathcal{M}_n(\mathscr{O}_X)$ for some $n$. \end{defin} An Azumaya algebra is then a $PGL_n$-torsor over the \'etale topos of $X$ and so, by Giraud, isomorphism classes are in bijection with $H^1_{\tn{\'et}}(X, PGL_n)$. The central extension of sheaves of groups in the \'etale topology \[ 1 \rightarrow \mathbb{G}_m \rightarrow GL_n \rightarrow PGL_n \rightarrow 1 \] leads to an exact sequence of nonabelian cohomology \[ \begin{tikzcd} \cdots \ar[r] & H_{\tn{\'et}}^1(X, \mathbb{G}_m) \ar[r] & H_{\tn{\'et}}^1(X, GL_n) \ar[r] & H_{\tn{\'et}}^1(X, PGL_n) \ar[r] & H_{\tn{\'et}}^2(X, \mathbb{G}_m). \end{tikzcd} \] For $d\,|\,n$ we have a morphism of exact sequences \[ \begin{tikzcd} 1 \ar[r] & \mathbb{G}_m \ar[r] & GL_n \ar[r] & PGL_n \ar[r] & 1 \\ 1 \ar[r] & \mathbb{G}_m \ar[u, equal] \ar[r] & GL_d \ar[u] \ar[r] & PGL_d \ar[u, ] \ar[r] & 1 \end{tikzcd} \] with the two right arrows given by block-summing the matrix along the diagonal $n/d$ times. The Brauer group is the filtered colimit of cofibers \[ Br(X) := \colim(\textnormal{cofib}(H_{\tn{\'et}}^1(X, GL_n) \rightarrow H_{\tn{\'et}}^1(X, PGL_n))) \] along the partially-ordered set of the natural numbers under division. This is the group of Azumaya algebras modulo Morita equivalence with group operation given by tensor product (see \cite{grothendieck_brauer_i}). We have an injection $Br(X) \hookrightarrow H^2_{\tn{\'et}}(X, \mathbb{G}_m)$ and when $X$ is quasi-compact this injection factors through the torsion subgroup. We will call $Br'(X) := H^2_{\tn{\'et}}(X, \mathbb{G}_m)_{tor}$ the cohomological Brauer group. Grothendieck asked if the injection $Br(X) \hookrightarrow Br'(X)$ is an isomorphism. This map is not generally surjective. Edidin--Hassett--Kresch--Vistoli \cite{edidin_hassett_kresch_vistoli} give a non-separated counter example by connecting the image of the Brauer group to quotient stacks. There are two ways to proceed in addressing the question. The first is to provide a class of schemes for when this holds. In \cite{deJong_gabber}, de Jong publishes a proof of O. Gabber that $Br(X) \cong Br'(X)$ when $X$ is equipped with an ample line bundle. Along with reproving Gabber's result for affines, Lieblich \cite{lieblich_thesis} shows that for a regular scheme with dimension less than or equal to $2$ there are isomorphisms $Br(X) \cong Br'(X) \cong H^2_{\tn{\'et}}(X, \mathbb{G}_m)$. The second perspective is to enlarge the class of objects considered. The Morita equivalence classes of $\mathbb{G}_m$-gerbes over the \'etale topos of a scheme $X$ are in bijection with $H^2_{\tn{\'et}}(X, \mathbb{G}_m)$. In \cite{lieblich_thesis}, Lieblich associates to any Azumaya algebra $\sheaffont{A}$ a $\mathbb{G}_m$-gerbe of Morita-theoretic trivializations. Over an \'etale open $U \rightarrow X$, the gerbe gives a groupoid of Morita equivalences from $\sheaffont{A}$ to $\mathscr{O}_X$. The gerbe of trivializations represents the boundary class $\delta([\sheaffont{A}]) = \alpha \in H^2_{\tn{\'et}}(X, \mathbb{G}_m)$. Any class $\alpha \in H^2_{\tn{\'et}}(X, \mathbb{G}_m)$ is realizable on a \v{C}ech cover. We can use this data to build a well-defined category of sheaves of $\mathscr{O}_X$-modules which ``glue up to $\alpha$'', see C\u ald\u araru \cite[Chapter 1]{caldararu_thesis}. Let $\textnormal{Mod}_X^\alpha$ denote the corresponding derived $\infty$-category and $\textnormal{Perf}_X^\alpha$ the full subcategory of compact objects. $K(\textnormal{Perf}_X^{\alpha})$ is the classical definition of $\alpha$-twisted algebraic $K$-theory. Determining when the cohomology class $\alpha$ is represented by an Azumaya algebra reduces to finding a twisted locally-free sheaf with trivial determinant on a $\mathbb{G}_m$-gerbe associated to $\alpha$ \cite[Section 2.2.2]{lieblich_thesis}. The endomorphism algebra of the twisted locally-free sheaf gives the Azumaya algebra and the twisted module represents the tilt $\textnormal{Mod}_X^{\alpha} \simeq \textnormal{Mod}_\sheaffont{A}$. Lieblich also compactifies the moduli of Azumaya algebras. This necessarily includes developing a definition of a derived Azumaya algebra. \begin{defin} A \textit{derived Azumaya algebra} over a commutative ring $R$ is a proper dg-algebra $\sheaffont{A}$ such that the natural map of $R$-algebras \[ \sheaffont{A} \otimes_R^\mathbb{L} \sheaffont{A}^{op} \xrightarrow{\simeq} \mathbb{R}Hom_{D(R)}(\sheaffont{A}, \sheaffont{A}) \] is a quasi-isomorphism. \end{defin} After Lieblich, To\"en \cite{toen_azumaya} and (later) Antieau--Gepner \cite{antieau_gepner} consider the analogous problem posed by Grothendieck in the dg-algebra and $\mathbb{E}_\infty$-algebra settings, respectively. Antieau--Gepner construct an \'etale sheaf $\mathbf{Br}$ in the $\infty$-topos $\mathrm{Shv}^{\tn{\'et}}_R$. For any \'etale sheaf $X$, we can now associate a Brauer space $\mathbf{Br}(X)$. For $X$ a quasi-compact quasi-separated scheme, they show $\pi_0(\mathbf{Br}(X)) \cong H^1_{\tn{\'et}}(X, \mathbb{Z}) \times H^2_{\tn{\'et}}(X, \mathbb{G}_m)$ and every such Brauer class is algebraic. Now for any (possibly nontorsion) $\alpha \in H^2_{\acute{e}t}(X, \mathbb{G}_m)$ there is a derived Azumaya algebra $\sheaffont{A}$ and an equivalence $\textnormal{Mod}_X^\alpha \simeq \textnormal{Mod}_\sheaffont{A}$ of stable $\infty$-categories. This reframes classical twisted $K$-theory as $K$-theory with coefficients in a particularly special dg-algebra in $D(X)$. For our purposes, we work with a generalized definition of twisted $K$-theory which allows ``twisting'' by any dg-algebra. \begin{defin} Let $R$ be a commutative ring. For a dg-algebra $\sheaffont{A}$ over $R$, we define the $\sheaffont{A}$-\textit{twisted $K$-theory} $K^\sheaffont{A}: \textnormal{Mod}_{\textnormal{Perf}(R)}(\textnormal{Cat}^{\textnormal{perf}}_{\infty}) \rightarrow \tn{Sp}$ by $K^\sheaffont{A}(\mathcal{C}) := K( \mathcal{C} \widehat{\otimes}_{\textnormal{Perf}(R)} \textnormal{Perf}(\sheaffont{A}))$. \end{defin} When the dg-algebra ``$\sheaffont{A}$'' is clear, we just write twisted K-theory. If our input to $K^\sheaffont{A}$ is an $R$-algebra $S$ then \[ K^\sheaffont{A}(S) = K(\textnormal{Perf}(S) \widehat{\otimes}_{\textnormal{Perf}(R)} \textnormal{Perf}(\sheaffont{A})) \simeq K(\textnormal{Perf}(S\otimes_R \sheaffont{A})) \simeq K(S \otimes_R \sheaffont{A}). \] Our definition recovers the historical definition of twisted K-theory when $\sheaffont{A}$ is a derived Azumaya algebra and we evaluate on the base ring $R$. The same definition works for a scheme $X$ and $\sheaffont{A} \in \tn{Alg}_{\mathbb{E}_1}(D_{qc}(X))$. We will refer to such an $\sheaffont{A}$ as \textit{a sheaf of quasi-coherent dg-algebras over $X$}. By Theorem 9.36 of Blumberg--Gepner--Tabuada \cite{blumberg_gepner_tabuada_universal}, twisted $K$-theory is a localizing invariant. When $X$ is a quasi-compact quasi-separated scheme, Proposition A.15 of Clausen--Mathew--Naumann--Noel \cite{clausen_mathew_naumann_noel} establishes Nisnevich descent when $X$ is quasi-compact quasi-separated. \begin{defin} A dg-algebra $\sheaffont{A}$ over a ring $R$ is \textit{proper} if it is perfect as a complex over $R$ and \textit{smooth} if it is perfect over $\sheaffont{A}^{op} \otimes_{R} \sheaffont{A}$. \end{defin} The following is Lemma 2.8 of \cite{toen_vaquie} and is an essential property for our proof in Section 3. \begin{lemma}\label{perfection_transference} Let $\sheaffont{A}$ be a smooth proper dg-algebra over a ring $R$. Then a complex of $D(\sheaffont{A})$ is perfect over $\sheaffont{A}$ if and only if it is perfect as an object of $D(R)$. \end{lemma} The previous definition and lemma both generalize to a sheaf of quasi-coherent dg-algebras over a scheme as perfection is a local property. For the remainder of the section, we prove some basic properties of $\sheaffont{A}$-twisted K-theory, often assuming $\sheaffont{A}$ is connective. We will not use smooth and properness until the later sections. \begin{proposition} \label{pi_zero} Let $\sheaffont{A}$, $S$ be connective dg-algebras over $R$. Then the natural maps induce isomorphisms \[ K_i^\sheaffont{A}(S) \cong K_i^\sheaffont{A}(\pi_0(S)) \cong K_i^{\pi_0(\sheaffont{A})}(S) \cong K_i^{\pi_0(\sheaffont{A})}(\pi_0(S)) \] for $i \leq 0$. \end{proposition} \begin{proof} We have the following isomorphisms of discrete rings \[ \pi_0(\sheaffont{A} \otimes_R S) \cong \pi_0(\sheaffont{A} \otimes_R \pi_0(S)) \cong \pi_0(\pi_0(\sheaffont{A}) \otimes_R S) \cong \pi_0(\pi_0(\sheaffont{A}) \otimes_R \pi_0(S)). \] The lemma follows since $K_i(R) \cong K_i(\pi_0(R))$ for $i \leq 0$ (see Theorem 9.53 of \cite{blumberg_gepner_tabuada_universal}). \end{proof} The previous proposition suggests we can work discretely and then transfer the results to the derived setting. This is true to some extent. However, taking $\pi_0$ of a connective dg-algebra does not preserve smoothness, which is a necessary property for our proof of Proposition \ref{platification}. We will also need reduction invariance for low dimensional K-groups. \begin{proposition}\label{reduction_invariance} Let $R$ be a commutative ring and $\sheaffont{A}$ a connective dg-algebra over $R$. Let $S$ be a commutative ring under $R$ and Let $I$ be a nilpotent ideal of $S$. Then the induced morphism $K_i^\sheaffont{A}(S) \xrightarrow{\cong} K_i^\sheaffont{A}(S/I)$ is an isomorphism for $i \leq 0$. \end{proposition} \begin{proof} By naturality of the fundamental exact sequence of twisted $K$-theory (see (\ref{fundamental}) and the surrounding discussion at the beginning of Section 3), we can restrict the proof to $K_0^\sheaffont{A}$. By Proposition \ref{pi_zero}, we can assume $\sheaffont{A}$ is a discrete algebra. Let $\varphi: S \twoheadrightarrow S/I$ be the surjection. After $- \otimes_R \sheaffont{A}$ we have a surjection $(\ker \varphi) \otimes_R \sheaffont{A} \twoheadrightarrow \ker(\varphi \otimes_R \sheaffont{A})$. The nonunital ring $(\ker \varphi) \otimes_R \sheaffont{A}$ is nilpotent. So $\ker(\varphi \otimes_R \sheaffont{A})$ is nilpotent as well. The proposition follows from nil-invariance of $K_0$. \end{proof} A Zariski descent spectral sequence argument gives us a global result. \begin{corollary}\label{scheme_reduction_invariance} Let $X$ be a quasi-compact quasi-separated scheme of finite Krull dimension $d$ and $\sheaffont{A}$ a sheaf of connective quasi-coherent dg-algebras over $X$. The natural morphism $f: X_{red} \rightarrow X$ induces isomorphisms \[ K_{-i}^{f^*\sheaffont{A}}(X_{red}) \cong K_{-i}^\sheaffont{A}(X) \] for $i \geq d$. \end{corollary} \begin{proof} We have descent spectral sequences \begin{align*} E_2^{p,q} &= H^p_{Zar}(X, (\pi_qK^\sheaffont{A})^\sim) \Rightarrow \pi_{q-p}K^\sheaffont{A}(X) \tn{ and } \\ E_2^{p,q} &= H^p_{Zar}(X, f_*(\pi_qK^{f^*(\sheaffont{A})})^\sim) \Rightarrow \pi_{q-p}K^{f^*\sheaffont{A}}(X_{red}) \end{align*} both with differential $d_2 = (2, 1)$. We let $F^\sim$ denote the Zariski sheafification of the presheaf $F$. The spectral sequences agree for $q \leq 0$. By Corollary 3.27 of \cite{clausen_mathew}, the spectral sequences vanishes for $p > d$. \end{proof} In Theorem \ref{relative_main_theorem}, we extend our main theorem across smooth affine morphisms. We will need reduction invariance in this setting. \begin{defin} For $f: S \rightarrow X$ a morphism of quasi-compact quasi-separated schemes and $\sheaffont{A}$ a sheaf of quasi-coherent dg-algebras over $X$, the \textit{relative $\sheaffont{A}$-twisted $K$-theory of $f$} is \[ K^\sheaffont{A}(f) := \textnormal{fib}(K^\sheaffont{A}(X) \xrightarrow{f^*} K^\sheaffont{A}(S)). \] \end{defin} As defined, $K^\sheaffont{A}(f)$ is a spectrum. There is an associated presheaf of spectra on the base scheme $X$ given by $U \mapsto K^\sheaffont{A}(f_{|_U})$. This presheaf sits in a fiber sequence \[ K^\sheaffont{A}(f) \rightarrow K^\sheaffont{A} \rightarrow K^\sheaffont{A}_S \] where the presheaf $K^\sheaffont{A}_S$ is also defined by pullback along $f$. Both presheaves $K^\sheaffont{A}$ and $K^\sheaffont{A}_S$ satisfy Nisnevich descent and so $K^\sheaffont{A}(f)$ does as well. \begin{corollary} \label{relative_reduction_invariance} Let $f: S \rightarrow X$ be an affine morphism of quasi-compact quasi-separated schemes. Suppose $X$ has Krull dimension $d$ and let $\sheaffont{A}$ be a sheaf of connective quasi-coherent dg-algebras over $X$. Then the commutative diagram \[ \begin{tikzcd} S_{red} \ar[d] \ar[r, "f_{red}"] & X_{red} \ar[d, "g"] \\ S \ar[r, "f"] & X \end{tikzcd} \] induces an isomorphism of relative twisted $K$-theory groups \[ K_{-i}^{g^*\sheaffont{A}}(f_{red})\cong K_{-i}^{\sheaffont{A}}(f) \] for $i \geq d + 1$. \end{corollary} \begin{proof} We have two descent spectral sequences \begin{align*} E_2^{p,q} &= H^p_{Zar}(X, (\pi_qK^\sheaffont{A}(f))^\sim) \Rightarrow \pi_{q-p}K^\sheaffont{A}(f)(X) \tn{ and } \\ E_2^{p,q} &= H^p_{Zar}(X, g_*(\pi_qK^{g^*\sheaffont{A}}(f_{red}))^\sim) \Rightarrow \pi_{q-p}K^{g^*\sheaffont{A}}(f_{red})(X_{red}) \\ \end{align*} with differential of degree $d = (2, 1)$ and $F^\sim$ the sheafification of the presheaf $F$. For an open affine $\textnormal{Spec}\, R \rightarrow X$ with pullback $\textnormal{Spec}\, A \rightarrow S$ we examine the morphism of long exact sequences when $q \leq 0$ \[ \begin{tikzcd}[column sep=tiny] \cdots \ar[r] & \pi_{q}K^\sheaffont{A}(R) \ar[d, "\cong"] \ar[r] & \pi_{q}K^\sheaffont{A}(A) \ar[d, "\cong"] \ar[r] & \pi_{q-1}K^\sheaffont{A}(f) \ar[d] \ar[r] &\pi_{q-1}K^\sheaffont{A}(R) \ar[d, "\cong"] \ar[r] & \pi_{q-1}K^\sheaffont{A}(A) \ar[d, "\cong"] \ar[r] & \cdots \\ \cdots \ar[r] & \pi_{q}K^\sheaffont{A}(R_{red}) \ar[r] & \pi_{q}K^\sheaffont{A}(A_{red}) \ar[r] & \pi_{q-1}K^\sheaffont{A}(f_{red}) \ar[r] &\pi_{q-1}K^\sheaffont{A}(R_{red}) \ar[r] & \pi_{q-1}K^\sheaffont{A}(A_{red}) \ar[r] &\cdots \end{tikzcd} \] By the 5-lemma, this induces sheaf isomorphisms $g_*(\pi_qK^{g^*\sheaffont{A}}(f_{red}))^\sim \cong (\pi_qK^\sheaffont{A}(f))^\sim$ for $q < 0$ and, as in Corollary \ref{scheme_reduction_invariance}, cohomology vanishes for $p > d$. \end{proof} We will need pro-excision for abstract blow-up squares. Recall that an abstract blow-up square is a pullback square \begin{equation} \label{abs} \begin{tikzcd} D \ar[d] \ar[r] & \tilde{X} \ar[d] \\ Y \ar[r] & X \end{tikzcd} \tag{$*$} \end{equation} with $Y \rightarrow X$ a closed immersion and $\tilde{X} \rightarrow X$ a proper morphism which restricts to an isomorphism of open subschemes $\tilde{X} \setminus D \rightarrow X \setminus Y$. The theorem is stated using the $\infty$-category of pro-spectra $\cat{Pro}(\textnormal{Sp})$, where an object is a small cofiltered diagram, $E: \Lambda \rightarrow \textnormal{Sp}$, valued in spectra. We write $\{E_n\}$ for the corresponding pro-spectrum. If the brackets and index are omitted, then the pro-spectrum is considered constant. After adjusting equivalence class representatives, we may assume the cofiltered diagram is fixed when working with a finite set of pro-spectra. Any morphism can then be represented by a natural transformation of diagrams (also known as a level map). We will need no knowledge of the $\infty$-category beyond the following definition. \begin{defin} A square of pro-spectra \[ \begin{tikzcd} \{E_n\} \ar[r] \ar[d] & \{F_n\} \ar[d] \ar[d] \\ \{ X_n\} \ar[r] & \{ Y_n\} \end{tikzcd} \] is \textit{pro-cartesian} if and only if the induced map on the level-wise fiber pro-spectra is a weak equivalence (see Definition 2.27 of \cite{land_tamme}). \end{defin} The following is Theorem A.8 of Land--Tamme \cite{land_tamme}. The theorem holds much more generally for any $k$-connective localizing invariant (see Definition 2.5 of \cite{land_tamme}). Twisted $K$-theory is $1$-connective. \begin{theorem}[Land--Tamme \cite{land_tamme}]\label{land_tamme} Given an abstract blow-up square (\ref{abs}) of schemes and a sheaf of dg-algebras $\sheaffont{A}$ on $X$ then the square of pro-spectra \[ \begin{tikzcd} \ar[d] K^\sheaffont{A}(X) \ar[r] & K^\sheaffont{A}(\tilde{X}) \ar[d] \\ \{K^\sheaffont{A}(Y_n)\} \ar[r] & \{K^\sheaffont{A}(D_n)\} \end{tikzcd} \] is pro-cartesian (where $Y_n$ is the infinitesimal thickening of $Y$). \end{theorem} The pro-cartesian square of pro-spectra gives a long exact sequence of pro-groups \[ \begin{tikzcd}[column sep=small] \cdots \ar[r] & \{K_{-i+1}^\sheaffont{A}(E_n)\} \ar[r] & K_{-i}^\sheaffont{A}(X) \ar[r] & K_{-i}^\sheaffont{A}(\tilde{X}) \oplus \{ K_{-i}^\sheaffont{A}(Y_n)\} \ar[r]& \{ K_{-i}^\sheaffont{A}(E_n)\} \ar[r] & \cdots \end{tikzcd} \] which is the key to our induction argument. \section{Blowing-up negative twisted $K$-theory classes} We turn to our main contribution of the existence of a projective birational morphism which kills a given negative twisted $K$-theory class (when twisting by a smooth proper connective dg-algebra). Let $X$ be a quasi-compact quasi-separated scheme and $\sheaffont{A}$ a sheaf of quasi-coherent dg-algebras on $X$. We first construct geometric cycles for negative twisted K-theory classes on $X$ using a classical argument of Bass (see XII.7 of \cite{bass_algebraic_k-theory}) which works for a general additive invariant. We have an open cover \[ \begin{tikzcd} X[t^{\pm}] \ar[r, "f"] \ar[d, "g"] & X[t^-] \ar[d, "j"] \\ X[t] \ar[r, "k"] & \mathbb{P}^1_X. \end{tikzcd} \] Since twisted $K$-theory satisfies Zariski descent, there is an associated Mayer-Vietoris sequence of homotopy groups \[ \begin{tikzcd} \cdots \ar[r] &K_{-n}^\sheaffont{A}(\mathbb{P}^1_X) \ar[r, "{(j^* k^*)}"] & K_{-n}^\sheaffont{A}(X[t]) \oplus K_{-n}^\sheaffont{A}(X[t^-]) \ar[r, "f^*-g^*"] & K^\sheaffont{A}_{-n}(X[t^{\pm}]) \ar[r, "\partial"] & K^\sheaffont{A}_{-n-1}(\mathbb{P}^1_X) \ar[r] & \cdots \end{tikzcd}. \] As an additive invariant, $K^\sheaffont{A}(\mathbb{P}^1_X) \simeq K^\sheaffont{A}(X) \oplus K^\sheaffont{A}(X)$ splits as a $K^\sheaffont{A}(X)$-module with generators \[ [\mathscr{O} \otimes_{\mathscr{O}_X} \sheaffont{A}] = [\sheaffont{A}] \text{ and } [\mathscr{O}(1) \otimes_{\mathscr{O}_X} \sheaffont{A}]=[\sheaffont{A}(1)] \] corresponding to the Beilinson semiorthogonal decomposition. Adjusting the generators to $[\sheaffont{A}]$ and $[\sheaffont{A}] - [\sheaffont{A}(1)]$, we can identify the map $(j^*, k^*)$ as it is a map of $K^\sheaffont{A}(X)$-modules. The second generator vanishes under each restriction. This identifies the map as \[ K^\sheaffont{A}(\mathbb{P}^1_X) \simeq K^\sheaffont{A}(X)[\sheaffont{A}] \oplus K^\sheaffont{A}(X)([\sheaffont{A}] - [\sheaffont{A}(1)]) \xrightarrow{\Delta \oplus 0} K^\sheaffont{A}(X[t]) \oplus K^\sheaffont{A}(X[t^-]) \] with $\Delta$ the diagonal map corresponding to pulling back along the projections $X[t] \rightarrow X$ and $X[t^-] \rightarrow X$. As $\Delta$ is an embedding the long exact sequence splits as \begin{equation}\label{fundamental} \begin{tikzcd} 0 \ar[r] & K_{-n}^\sheaffont{A}(X) \ar[r, "\Delta"] & K_{-n}^\sheaffont{A}(X[t]) \oplus K_{-n}^\sheaffont{A}(X[t^-]) \ar[r, "\pm"] & K^\sheaffont{A}_{-n}(X[t^{\pm}]) \ar[r, "\partial"] & K^\sheaffont{A}_{-n-1}(X)\ar[r] & 0 \end{tikzcd}. \tag{$\dagger$} \end{equation} After iterating the complex \[ K_{-n}^\sheaffont{A}(X[t]) \rightarrow K^\sheaffont{A}_{-n}(X[t^\pm]) \twoheadrightarrow K^\sheaffont{A}_{-n-1}(X), \] we can piece together a complex \[ K_0^\sheaffont{A}(\mathbb{A}^{n+1}_X) \rightarrow K^\sheaffont{A}_0(\mathbb{G}_{m,X}^{n+1}) \twoheadrightarrow K^\sheaffont{A}_{-n-1}(X). \] Negative twisted $K$-theory classes have geometric representations as twisted perfect complexes on $\mathbb{G}^i_{m, X}$. There is even a sufficient geometric criterion implying a given representative is $0$; it is the restriction of a twisted perfect complex on $\mathbb{A}^i_{X}$. Our proof of the main proposition of this section will use these representatives. We first need a lemma about extending finitely-generated discrete modules in a twisted setting. \begin{lemma} \label{twisted_extension} Let $j: U \rightarrow X$ be an open immersion of quasi-compact quasi-separated schemes. Let $\sheaffont{A}$ be a sheaf of proper connective quasi-coherent dg-algebras on $X$ and $j^*\sheaffont{A}$ its restriction. Let $\mathcal{N}$ be a discrete $j^*\sheaffont{A}$-module which is finitely generated as an $\mathscr{O}_U$-module. Then there exists a discrete $\sheaffont{A}$-module $\mathcal{M}$, finitely generated over $\mathscr{O}_X$, such that $j^*\mathcal{M} \cong \mathcal{N}$. \end{lemma} \begin{proof} Note that $H_{\geq 1}(j^*\sheaffont{A})$ necessarily acts trivially on $\mathcal{N}$. So the $j^*\sheaffont{A}$-module structure on $\mathcal{N}$ comes from forgetting along the map $j^*\sheaffont{A} \rightarrow H_0(j^*\sheaffont{A})$ and the natural $H_0(j^*\sheaffont{A})$-module structure. Under restriction, \[ j^*H_0(\sheaffont{A}) \cong H_0(j^*\sheaffont{A}). \] We reduce to when $\sheaffont{A}$ is a quasi-coherent sheaf of discrete $\mathscr{O}_X$-algebras, finite over the structure sheaf. We have an isomorphism $\mathcal{N} \cong j^*j_*\mathcal{N}$. Write $j_*\mathcal{N}$ as a filtered colimit of its finitely generated $\sheaffont{A}$-submodules $j_*\mathcal{N} \cong \underset{\lambda}\colim \mathcal{M}_\lambda$. The pullback is exact, so we can write $\mathcal{N} \cong \underset{\lambda}\colim j^*\mathcal{M}_\lambda$ as a filtered colimit of finitely generated submodules. As $\mathcal{N}$ is finitely generated itself, this isomorphism factors at some stage and $\mathcal{N} \cong j^*\mathcal{M}_\lambda$. \end{proof} \begin{proposition} \label{platification} Let $X$ be a reduced scheme which is quasi-projective over a Noetherian affine scheme. Let $\sheaffont{A}$ be a sheaf of smooth proper connective quasi-coherent dg-algebras on $X$. Let $\gamma \in K_{-i}^\sheaffont{A} (X)$ for $i > 0$. Then there is a projective birational morphism $\rho: \tilde{X} \rightarrow X$ so that $\rho^*\gamma = 0 \in K_{-i}^\sheaffont{A}(\tilde{X})$. \end{proposition} \begin{proof} We fix a diagram of schemes over $X$ \[ \begin{tikzcd} \mathbb{G}_{m, X}^i \ar[dr, "\pi_1"'] \ar[rr, "j"] & & \ar[ld, "\pi_2"] \mathbb{A}_X^i \\ & X & \end{tikzcd}. \] For any morphism $f: Y_1 \rightarrow Y_2$, we let $\tilde{f}: \mathbb{G}_{m, Y_1}^i \rightarrow \mathbb{G}_{m, Y_2}^i$ denote the pullback. Lift $\gamma$ to a $K_0^\sheaffont{A}(\mathbb{G}_{m,X}^i)$-class $[P_\bullet]$, with $P_\bullet$ some $\pi_1^*\sheaffont{A}$-twisted perfect complexes on $\mathbb{G}_{m,X}^i$. \\ \textit{The Induction Step}: \\ We induct on the range of homology of $P_\bullet$. As $\pi_1^*\sheaffont{A}$ is a sheaf of proper quasi-coherent dg-algebras, $P_\bullet$ is perfect on $\mathbb{G}_{m,X}^i$ by Lemma \ref{perfection_transference}. Since $\mathbb{G}_{m,X}^i$ has an ample family of line bundles, we may choose $P_\bullet$ to be strict perfect without changing the quasi-isomorphism class. After some (de)suspension, we may assume $P_\bullet$ is connective as this only alters the $K_0$-class by $\pm 1$. For the lowest nontrivial differential of $P_\bullet$, $d_1$, we utilize part (iv) of Lemma 6.5 of \cite{kerz_strunk_tamme} (with the morphism $\mathbb{G}_{m, X}^i \rightarrow X$) to construct a projective birational morphism $\rho: X_1 \rightarrow X$ so that $\textnormal{coker}\,(\tilde{\rho}^* d_1)$ ($=H_0(\tilde{\rho}^*P_\bullet)$) has tor-dimension $\leq 1$ over $X_1$. Consider the following distinguished triangle of $\tilde{\rho}^*\pi_1^*\sheaffont{A}$-complexes on $\mathbb{G}_{m, X_1}^i$ \[ F_\bullet \rightarrow \tilde{\rho}^*P_\bullet \rightarrow H_0(\tilde{\rho}^*P_\bullet) \cong \textnormal{coker}\, \tilde{\rho}^*d_1. \] In Lemma \ref{base_platification} below, we cover the base induction step, when the homology is concentrated in a single degree. Using this, construct a projective birational morphism $\phi: X_2\rightarrow X_1$ such that $L\tilde{\phi}^*H_0(\tilde{\rho}^*P_\bullet)$ is a perfect complex and is the restriction of a perfect complex from $\mathbb{A}^i_{X_2}$. By two out of three, $L\tilde{\phi}^*F_\bullet$ is perfect and $[\tilde{\phi}^*\tilde{\rho^*}P_\bullet] = [L\tilde{\phi}^*F_\bullet] + [L\tilde{\phi}^*H_0(\tilde{\rho}^*P_\bullet)]$ in $K_0^\sheaffont{A}(\mathbb{G}_{m, X_2}^i)$. We then repeat the entire induction step with $L\tilde{\phi}^*F_\bullet$. We need the induction will terminate, which is the purpose of the first projective birational morphism of each step. Since $\textnormal{coker}\,(\tilde{\rho}^* d_1)$ has tor-dimension $\leq 1$ over $X_1$, by \cite{kerz_strunk_tamme}[Lemma 6.5], $L\tilde{\phi}^*\textnormal{coker}\, (\tilde{\rho}^*d_1) \cong \tilde{\phi}^* \textnormal{coker}\, (\tilde{\rho}^*d_1)$. This implies $L \tilde{\phi}^*F_\bullet$ will have no homology outside the original range of homology of $P_\bullet$. Since $\tilde{\phi}^* \textnormal{coker}\, (\tilde{\rho}^*d_1) \cong \textnormal{coker}\, (\tilde{\phi}^*\tilde{\rho}^*d_1)$, this guarantees $H_0(L\tilde{\phi}^*F_\bullet) = 0$, so the homology of $L\tilde{\phi}^*F_\bullet$ lies in a strictly smaller range than $\tilde{\phi}^*\tilde{\rho}^*P_\bullet$. Proposition \ref{platification} follows from the next lemma. \end{proof} \begin{lemma}\label{base_platification} Let $X$ be a reduced scheme which is quasi-projective over a Noetherian affine scheme. Let $\sheaffont{A}$ be a sheaf of smooth proper connective quasi-coherent dg-algebras on $X$. Let $\sheaffont{N}$ be a discrete $\pi_1^*\sheaffont{A}$-module which is coherent on $\mathbb{G}_{m, X}^i$. Then there exists a birational blow-up $\phi: \tilde{X} \rightarrow X$ so that $\tilde{\phi}^*\sheaffont{N}$ is perfect over $\tilde{\phi}^*\pi_1^*\sheaffont{A}$ on $\mathbb{G}_{m, \tilde{X}}$ and is the restriction of a perfect complex over the pullback of $\sheaffont{A}$ to $\mathbb{A}^i_{\tilde{X}}$. \end{lemma} \begin{proof} Using Lemma \ref{twisted_extension}, extend $\sheaffont{N}$ from $\mathbb{G}_{m, X}^i$ to a coherent $\pi_2^*\sheaffont{A}$-module $\sheaffont{M}$ on $\mathbb{A}_X^i$. Using the ample family, choose a resolution in $\mathscr{O}_{\mathbb{A}_X^i}$-modules of the form \[ 0 \rightarrow \sheaffont{K} \rightarrow \sheaffont{F} \rightarrow \sheaffont{M} \rightarrow 0 \] where $\sheaffont{F}$ is a vector bundle and $\sheaffont{K}$ is the coherent kernel. As $X$ is reduced, $\sheaffont{K}$ is flat over some dense open set $U$ of $X$. By platification par \'eclatement (see Theorem 5.2.2 of Raynaud--Gruson \cite{raynaud_gruson}), there is a $U$-admissable blow-up $\phi: \tilde{X} \rightarrow X$ so that the strict transform of $\sheaffont{K}$ along the pullback morphism $p: \mathbb{A}_{\tilde{X}}^i \rightarrow \mathbb{A}_X^i$ is flat over $\tilde{X}$. We now show the pullback $p^*\sheaffont{M}$ is perfect as a $p^*\pi_2^*\sheaffont{A}$-module. Let $j: \mathbb{A}_U^i \rightarrow \mathbb{A}_{\tilde{X}}^i$ be the inclusion of the open set and $Z$ the closed complement. For any sheaf of modules $\sheaffont{G}$ on $\mathbb{A}_{\tilde{X}}^i$, we let $\sheaffont{G}_Z$ denote the subsheaf of sections supported on $Z$. We have a short exact sequence natural in $\sheaffont{G}$ \[ 0 \rightarrow \sheaffont{G}_Z \rightarrow \sheaffont{G} \rightarrow j^{st}\sheaffont{G} \rightarrow 0. \] We also obtain the following exact sequence of sheaves of abelian groups via pullback \[ 0 \rightarrow \mathscr{T}or_1^{p^{-1}\mathscr{O}_{\mathbb{A}^i_X}}(p^{-1}\sheaffont{M}, \mathscr{O}_{\mathbb{A}^i_{\tilde{X}}}) \rightarrow p^*\sheaffont{K} \rightarrow p^*\sheaffont{F} \rightarrow p^*\sheaffont{M} \rightarrow 0. \] To make our notation clearer, we set $\sheaffont{T} = \mathscr{T}or_1^{ p^{-1}\mathscr{O}_{\mathbb{A}^i_X}}(p^{-1}\sheaffont{M}, \mathscr{O}_{\mathbb{A}^i_{\tilde{X}}})$. We flesh both these exact sequences out into a (nonexact) commutative diagram of $p^{-1}\mathscr{O}_{\mathbb{A}^i_X}$-modules \[ \begin{tikzcd} & 0 \ar[d] & 0 \ar[d] & 0 \ar[d] & \\ 0 \ar[r] & \sheaffont{T}_Z \ar[r]\ar[d] & \sheaffont{T} \ar[d]\ar[r] & \ar[d] j^{st}\sheaffont{T} \ar[r] & 0 \\ 0 \ar[r] & (p^*\sheaffont{K})_Z \ar[r] \ar[d] & p^*\sheaffont{K} \ar[r] \ar[d] & j^{st}p^*\sheaffont{K} \ar[r] \ar[d] & 0\\ 0 \ar[r] & (p^*\sheaffont{F})_Z\ar[r] \ar[d] & p^*\sheaffont{F} \ar[r] \ar[d] &j^{st}p^*\sheaffont{F} \ar[r] \ar[d] & 0\\ 0 \ar[r] &(p^*\sheaffont{M})_Z \ar[r] \ar[d] & p^*\sheaffont{M} \ar[r] \ar[d] & j^{st}p^*\sheaffont{M} \ar[r] \ar[d] & 0\\ & 0 & 0 & 0 & \end{tikzcd}. \] We observe that every row and the middle column is exact. The first map in the left column is an injection and the last map in the right column is a surjection. Since $p^*\sheaffont{F}$ is flat, we have $(p^*\sheaffont{F})_Z = 0$. This induces a lifting of the injection \[ \begin{tikzcd} \sheaffont{T}_Z \ar[d] \ar[r] & \sheaffont{T} \ar[d] \\ (p^*\sheaffont{K})_Z \ar[r] \ar[ur, dashed] & p^*\sheaffont{K} \end{tikzcd}. \] We finish the proof by showing $j^*\mathscr{T}or_1^{p^{-1}\mathscr{O}_{\mathbb{A}^i_X}}(p^{-1}\sheaffont{M}, \mathscr{O}_{\mathbb{A}^i_{\tilde{X}}}) = 0$. Since $j: \mathbb{A}^i_U \rightarrow \mathbb{A}^i_{\tilde{X}}$ is flat, the sheaf is isomorphic to $\mathscr{T}or_1^{\mathbb{A}^i_U}(j^*p^{-1}\sheaffont{M}, j^*\mathscr{O}_{\mathbb{A}^i_{\tilde{X}}})$ and $j^*\mathscr{O}_{\mathbb{A}^i_{\tilde{X}}} \cong \mathscr{O}_{\mathbb{A}^i_U}$. Our big diagram can be rewritten as \[ \begin{tikzcd} & 0 \ar[d] & 0 \ar[d] & 0 \ar[d] & \\ 0 \ar[r] & \sheaffont{T}_Z \ar[r, "\cong"]\ar[d, "\cong"] & \sheaffont{T} \ar[d]\ar[r] & \ar[d] 0 \ar[r] & 0 \\ 0 \ar[r] & (p^*\sheaffont{K})_Z \ar[r] \ar[d] & p^*\sheaffont{K} \ar[r] \ar[d] & j^{st}p^*\sheaffont{K} \ar[r] \ar[d] & 0\\ 0 \ar[r] & 0\ar[r] \ar[d] & p^*\sheaffont{F} \ar[r] \ar[d] &j^{st}p^*\sheaffont{F} \ar[r] \ar[d] & 0\\ 0 \ar[r] &(p^*\sheaffont{M})_Z \ar[r] \ar[d] & p^*\sheaffont{M} \ar[r] \ar[d] & j^{st}p^*\sheaffont{M} \ar[r] \ar[d] & 0\\ & 0 & 0 & 0 & \end{tikzcd} \] and we can glue together to get a flat resolution of $p^*\sheaffont{M}$ as an $\mathscr{O}_{\mathbb{A}^i_{\tilde{X}}}$-module \[ 0 \rightarrow j^{st}p^*\sheaffont{K} \rightarrow p^*\sheaffont{F} \rightarrow p^*\sheaffont{M} \rightarrow 0 \] implying globally finite Tor-amplitude. It remains to show the complex is pseudo-coherent. This follows since $\mathbb{A}^i_{\tilde{X}}$ is Noetherian and $p^*\sheaffont{M}$ is coherent. Since $p^*\pi_2^*\sheaffont{A}$ is a sheaf of smooth quasi-coherent dg-algebras over $\mathscr{O}_{\mathbb{A}^i_{\tilde{X}}}$, the complex $p^*\sheaffont{M}$ is perfect over $p^*\pi_2^*\sheaffont{A}$ by Lemma \ref{perfection_transference}. By commutativity, $p^*\sheaffont{M}$ restricts to $\tilde{\phi}^*\sheaffont{N}$ on $\mathbb{G}_{m, \tilde{X}}^i$. This completes the proof of Proposition \ref{platification}. \end{proof} We will need a relative version of Proposition \ref{platification}. \begin{corollary} \label{relative_platification} Let $f: S \rightarrow X$ be a smooth quasi-projective morphism of Noetherian schemes with $X$ reduced and quasi-projective over a Noetherian base ring. Let $\sheaffont{A}$ be a sheaf of smooth proper connective quasi-coherent dg-algebras over $X$ and consider a negative twisted $K$-theory class $\gamma \in K_{i}^\sheaffont{A}(S)$ for $i < 0$. Then there exists a projective birational morphism $\rho: \tilde{X} \rightarrow X$ such that, under the pullback of the pullback morphism, $\rho_S^*\gamma = 0$. \end{corollary} \begin{proof} We will briefly check that we can run the induction argument in the proof of Proposition \ref{platification}. The assumptions of this corollary are invariant under pullback along projective birational morphisms $\tilde{X} \rightarrow X$. We need to ensure we can select projective birational morphisms to our base $X$. Lemma 6.5 of Kerz--Strunk--Tamme \cite{kerz_strunk_tamme} is stated in a relative setting. The proof also relies on platification par \'{e}clatement. This can still be applied in our relative setting as $X$ is reduced (see Proposition 5 of Kerz--Strunk \cite{kerz_strunk}). \end{proof} \section{Twisted Weibel's conjecture} We now prove Theorem \ref{main_theorem} and an extension across a smooth affine morphism. We begin with the base induction step for both theorems. Kerz--Strunk \cite{kerz_strunk} use a sheaf cohomology result of Grothendieck along with a spectral sequence argument to show vanishing for a Zariski sheaf of spectra can be reduced to the setting of local ring. \begin{proposition}\label{base_case} Let $R$ be a regular Noetherian ring of Krull dimension $d$ over a local Artinian ring $k$. Let $\sheaffont{A}$ be a smooth proper connective dg-algebra over $R$, then $K^\sheaffont{A}_i(R) = 0$ for $i < 0$. \end{proposition} \begin{proof} By Proposition \ref{relative_reduction_invariance}, we may assume $k$ is a field. Proposition 5.4 of \cite{raedschelders_stevenson} shows that the t-structure on $D(\sheaffont{A})$ restricts to a t-structure on $\textnormal{Perf}(\sheaffont{A})$, which is observably bounded. The heart is the category of finitely-generated modules over $ H_0(\sheaffont{A})$. As $H_0(\sheaffont{A})$ is finite-dimensional over $k$, this is a Noetherian abelian category. By Theorem 1.2 of Antieau--Gepner--Heller \cite{antieau_gepner_heller}), the negative $K$-theory vanishes. \end{proof} \begin{customthm}{1.1} Let $X$ be a Noetherian scheme of Krull dimension $d$ and $\sheaffont{A}$ a sheaf of smooth proper connective quasi-coherent dg-algebras on $X$, then $K^\sheaffont{A}_{-i}(X)$ vanishes for $i > d$. \end{customthm} \begin{proof} Proposition \ref{base_case} covers the base case so assume $d > 0$. By the Kerz--Strunk spectral sequence argument and Corollary \ref{scheme_reduction_invariance}, we may assume $X$ is a Noetherian reduced affine scheme. Choose a negative $K^\sheaffont{A}$-theory class $\gamma \in K^\sheaffont{A}_{-i}(X)$ for $i \geq \dim X + 1$. Using Proposition \ref{platification}, construct a projective birational morphism that kills $\gamma$ and extend it to an abstract blow-up square \[ \begin{tikzcd} E \ar[d] \ar[r] & \tilde{X} \ar[d] \\ Y \ar[r] & X \end{tikzcd}. \] By \cite[Theorem A.8]{land_tamme}, there is a Mayer-Vietoris exact sequence of pro-groups \[ \begin{tikzcd}[column sep=small] \cdots \ar[r] & \{ K_{-i+1}^\sheaffont{A}(E_n)\} \ar[r] & K_{-i}^\sheaffont{A}(X) \ar[r] & K_{-i}^\sheaffont{A}(\tilde{X}) \oplus \{ K_{-i}^\sheaffont{A}(Y_n)\} \ar[r]& \{ K_{-i}^\sheaffont{A}(E_n)\} \ar[r] & \cdots \end{tikzcd}. \] When $i \geq \dim X + 1$, by induction every nonconstant pro-group vanishes and $K_{-i}^\sheaffont{A}(X) \cong K_{-i}^\sheaffont{A}(\tilde{X})$ showing $\gamma = 0$. \end{proof} By \cite[Theorem 3.15]{antieau_gepner}, we recover Weibel's vanishing for discrete Azumaya algebras. \begin{corollary} For $X$ a Noetherian $d$-dimensional scheme and $\sheaffont{A}$ a quasi-coherent sheaf of discrete Azumaya algebras, then $K_{-i}^\sheaffont{A}(X) = 0$ for $i > d$. \end{corollary} The next result nearly covers the K-regularity portion of Weibel's conjecture, but we are missing the boundary case $K_{-d}^\sheaffont{A}(X) \cong K_{-d}^\sheaffont{A}(\mathbb{A}^n_X)$. \begin{theorem} \label{relative_main_theorem} Let $f: S \rightarrow X$ be a smooth affine morphism of Noetherian schemes and $\sheaffont{A}$ a sheaf of smooth proper connective quasi-coherent dg-algebras on $X$. Then $K_{-i}^\sheaffont{A}(f) = 0$ for $i > \dim X + 1$. \end{theorem} \begin{proof} The base case is covered by Proposition \ref{base_case} and our reductions are analagous to those in the proof of Theorem \ref{main_theorem}. So assume $X$ is a Noetherian reduced affine scheme of dimension $d$. Choose $\gamma \in K_{-i}^\sheaffont{A}(S)$ with $i > d$. Using Corollary \ref{relative_platification}, construct a projective birational morphism $\rho: \tilde{X} \rightarrow X$ that kills $\gamma$. We then build a morphism of abstract blow-up squares \[ \begin{tikzcd}[column sep=small, row sep=small] D \ar[dd] \ar[rr] \ar[rd] & & \tilde{S} \ar[dd]\ar[rd] & \\ & E \ar[rr] \ar[dd] & & \tilde{X} \ar[dd] \\ V \ar[rr] \ar[rd] & & S \ar[rd] &\\ & Y \ar[rr] & & X \end{tikzcd} \] By Theorem \ref{land_tamme}, we again get a long exact sequence of pro-groups corresponding to the back square \[ \begin{tikzcd}[column sep=small] \cdots \ar[r] & \{ K_{-i+1}^\sheaffont{A}(D_n)\} \ar[r] & K_{-i}^\sheaffont{A}(S) \ar[r] & K_{-i}^\sheaffont{A}(\tilde{S}) \oplus \{ K_{-i}^\sheaffont{A}(V_n)\} \ar[r] & \{ K_{-i}^\sheaffont{A}(D_n)\} \ar[r] & \cdots \end{tikzcd}. \] When $i \geq \dim X +1$, every nonconstant pro-group vanishes by induction and we have an isomorphism $K_{-i}^\sheaffont{A}(S) \cong K_{-i}^\sheaffont{A}(\tilde{S})$ implying $\gamma = 0$. \end{proof} \begin{rem} \label{counter_example} The conditions on the morphism in Corollary \ref{relative_platification} are more general than those of Theorem \ref{relative_main_theorem}. We might hope to generalize Theorem \ref{relative_main_theorem} to a smooth quasi-projective or smooth projective map of Noetherian schemes. Although the induction step is present, both base cases fail. Consider the descent spectral sequence \[ E_2^{p, q} := H^p(X, \tilde{K_{q}}) \Rightarrow K_{q-p}(X) \text{ with }d_2 = (2, 1) \] If $\dim X \leq 3$, then \[ E_3^{2,1} = E_{\infty}^{2, 1} = \textnormal{coker}\,(H^0(X, \mathbb{Z}) \xrightarrow{d_2} H^2(X, \mathscr{O}_X^*)) \] contributes to $K_{-1}(X)$. The differential is zero as the edge morphism \[ \begin{tikzcd} K_0(X) \ar[r, two heads, "rank"] & E_\infty^{0, 0} \end{tikzcd} \] identifies $E_\infty^{0,0}$ with the rank component of $K_0$, implying $E_2^{0,0} = E_\infty^{0, 0}$. We now construct a family of examples for schemes $X$ with nontrivial $H^2(X, \mathscr{O}_X^*)$. Let $X_{red}$ be quasi-projective smooth over a field $k$ and form the cartesian diagram \[ \begin{tikzcd} X \ar[r, "f"] \ar[d] & X_{red} \ar[d] \\ \textnormal{Spec}\, (k[t]/(t^2)) \ar[r] & \textnormal{Spec}\, k \end{tikzcd}. \] The pullback $X$ will be our counter-example. We have an isomorphism \[ \mathscr{O}_{X}^* \cong g_*(\mathscr{O}_{X_{red}}^*) \oplus g_*(\mathscr{O}_{X_{red}}) \] of sheaves of abelian groups on $X$ with $g: X_{red} \rightarrow X$ the pullback of the reduction morphism $\textnormal{Spec}\, k \rightarrow \textnormal{Spec}\, k[t]/(t^2)$. Locally, $(R[t]/(t^2))^\times$ consists of all elements of the form $u + v\cdot t$ where $u \in R^\times$ and $v \in R$. Sheaf cohomology commutes with coproducts so this turns into an isomorphism \[ H^2(X, \mathscr{O}_X^*) \cong H^2(X, g_*(\mathscr{O}_{X_{red}}^*)) \oplus H^2(X, g_*(\mathscr{O}_{X_{red}})) \cong H^2(X_{red}, \mathscr{O}_{X_{red}}^*) \oplus H^2(X_{red}, \mathscr{O}_{X_{red}}). \] Now the problem reduces to finding a surface or $3$-fold $X_{red}$ with nontrivial degree $2$ sheaf cohomology. Take a smooth quartic in $\mathbb{P}^3_k$ for a counter-example which is smooth and proper. Here is a counter-example which is smooth and quasi-affine. Let $(A, \mathfrak{m})$ be a 3-dimensional local ring which is smooth over a field $k$. Take $X = \textnormal{Spec}\, A \setminus \{\mathfrak{m}\}$ to be the punctured spectrum. Then $H^2(X, \mathscr{O}_X) \cong H^3_\mathfrak{m}(A)$, which is the injective hull of the residue field $A/\mathfrak{m}$. \end{rem} \end{document}
\betaegin{equation}gin{document} \pagestyle{empty} \title{Multiplet classification for SU(n,n)} \alphauthor{V.K.~Dobrev} \alphaddress{Institute of Nuclear Research and Nuclear Energy, Bulgarian Academy of Sciences, 72 Tsarigradsko Chaussee, 1784 Sofia, Bulgaria} \betaegin{equation}gin{abstract} In the present paper we review our project of systematic construction of invariant differential operators on the example of the non-compact algebras $su(n,n)$ for $n=2,3,4$. We give explicitly the main multiplets of indecomposable elementary representations and some reduced multiplets. We give explicitly the minimal representations. Due to the recently established parabolic relations the multiplet classification results are valid also for the algebras $sl(2n,\muathbb{R})$ and when $n=2k$ for the algebras $su^*(4k)$ with suitably chosen maximal parabolic subalgebras. \end{abstract} \sigmaection{Introduction} \betalu{Invariant differential operators} play very important role in the description of physical symmetries - starting from the early occurrences in the Maxwell, d'Allembert, Dirac, equations, to the latest applications of (super-)differential operators in conformal field theory, supergravity and string theory. Thus, it is important for the applications in physics to study systematically such operators. In a recent paper \cite{Dobinv} we started the systematic explicit construction of invariant differential operators. We gave an explicit description of the building blocks, namely, the \betalu{parabolic subgroups and subalgebras} from which the necessary representations are induced. Thus we have set the stage for study of different non-compact groups. Since the study and description of detailed classification should be done group by group we had to decide which groups to study. Since the most widely used algebras are the \betalu{conformal algebras} ~\rhoed{so(n,2)}~ in $n$-dimensional Minkowski space-time we concentrated on a class that shares most of their properties. This class consists of: $$ so(n,2), ~~sp(n,\muathbb{R}), ~~su(n,n), ~~so^*(4n), ~~E_{7(-25)} $$ the corresponding analogs of Minkowski space-time $V$ being: $$\muathbb{R}^{n-1,1}, ~~{\rhom Sym}(n,\muathbb{R}), ~~{\rhom Herm}(n,\muathbb{C}), ~~{\rhom Herm}(n,\muathbb{Q}), ~~ {\rhom Herm}(3,\muathbb{O})$$ involving the four division algebras ~$\muathbb{R},\muathbb{C},\muathbb{Q},\muathbb{O}$. In view of applications to physics, we proposed to call these algebras '\betalu{conformal Lie algebras}', (or groups) \cite{Dobeseven}. We have started the study of the above class in the framework of the present approach in the cases: ~$so(n,2)$, ~$su(n,n)$, ~$sp(n,\muathbb{R})$, ~$E_{7(-25)}$, cf. \cite{Dobeseven,Dobsunn,Dobspn,Dobparab}. Lately, we discovered an efficient way to extend our considerations beyond this class introducing the notion of 'parabolically related non-compact semisimple Lie algebras' \cite{Dobparab}. \noindent $\betaullet~$ {\it Definition:} ~~~Let ~${\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I},{\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I}'$~ be two non-compact semisimple Lie algebras with the same complexification ~${\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I}^\betabc \cong {\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I}'^\betabc$. We call them ~\rhoed{parabolically related}~ if they have parabolic subalgebras ~${\cal P}} \def\cq{{\cal Q}} \def\car{{\cal R} = {\cal M}} \def\cn{{\cal N}} \def\co{{\cal O} \oplus {\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C} \oplus \cn$, ~${\cal P}} \def\cq{{\cal Q}} \def\car{{\cal R}' = {\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}' \oplus {\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}' \oplus \cn'$, such that: ~${\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}^\betabc ~\cong~ {\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}'^\betabc$~ ($\Rightarrow {\cal P}} \def\cq{{\cal Q}} \def\car{{\cal R}^\betabc ~\cong~ {\cal P}} \def\cq{{\cal Q}} \def\car{{\cal R}'^\betabc$).{$\deltaiamondsuit$} Certainly, there are many such parabolic relationships for any given algebra ~${\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I}$. Furthermore, two algebras ~${\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I},{\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I}'$~ may be parabolically related with different parabolic subalgebras. In the present paper we review our results on the case of ~$su(n,n)$, cf. \cite{Dobsunn,Dobsutt,Dobsuff}. Due to the parabolic relationships these would be valid also for ~$sl(2n,\muathbb{R})$, and if ~$n=2k$~ also for ~$su^*(4k)$. \sigmaection{Preliminaries} Let $G$ be a semisimple non-compact Lie group, and $K$ a maximal compact subgroup of $G$. Then we have an {\it Iwasawa decomposition} ~$G=KA_0N_0$, where ~$A_0$~ is Abelian simply connected vector subgroup of ~$G$, ~$N_0$~ is a nilpotent simply connected subgroup of ~$G$~ preserved by the action of ~$A_0$. Further, let $M_0$ be the centralizer of $A_0$ in $K$. Then the subgroup ~$P_0 ~=~ M_0 A_0 N_0$~ is a {\it minimal parabolic subgroup} of $G$. A {\it parabolic subgroup} ~$P ~=~ M' A' N'$~ is any subgroup of $G$ which contains a minimal parabolic subgroup. Further, let ~${\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I},\ck,{\cal P}} \def\cq{{\cal Q}} \def\car{{\cal R},{\cal M}} \def\cn{{\cal N}} \def\co{{\cal O},{\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C},\cn$~ denote the Lie algebras of ~$G,K,P,M,A,N$, resp. For our purposes we need to restrict to ~{\it maximal ~ parabolic subgroups} ~$P=MAN$, i.e. ${\rhom rank} A =1$, resp. to ~{\it maximal ~ parabolic subalgebras} ~${\cal P}} \def\cq{{\cal Q}} \def\car{{\cal R} = {\cal M}} \def\cn{{\cal N}} \def\co{{\cal O} \oplus {\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C} \oplus \cn$~ with ~$\deltaim\, {\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}=1$. Let ~${\nu}u$~ be a (non-unitary) character of ~$A$, ~${\nu}u\in{\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}^*$, parameterized by a real number ~{\it $d$}, called the {\it conformal weight} or energy. Further, let ~ $\muu$ ~ fix a discrete series representation ~$D^\muu$~ of $M$ on the Hilbert space ~$V_\muu\,$, or the finite-dimensional (non-unitary) representation of $M$ with the same Casimirs. We call the induced representation ~$\chi =$ Ind$^G_{P}(\muu\otimes{\nu}u \otimes 1)$~ an ~\betalu{\it elementary representation} of $G$ \cite{DMPPT}. (These are called {\it generalized principal series representations} (or {\it limits thereof}) in \cite{Knapp}.) Their spaces of functions are: $$ \cc_\chi ~=~ \{ \cf \in C^\infty(G,V_\muu) ~ \vert ~ \cf (gman) ~=~ e^{-{\nu}u(H)} {\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}ot D^\muu(m^{-1})\, \cf (g) \} $$ where ~$a= \exp(H)\in A'$, ~$H\in{\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}'\,$, ~$m\in M'$, ~$n\in N'$. The representation action is the \betalu{left regular action}: $$ (\ct^\chi(g)\cf) (g') ~=~ \cf (g^{-1}g') ~, \quad g,g'\in G\ .$$ ERs are important due to the following fundamental result:\\ {\nu}oindent\betalu{Theorem \cite{Lan,KnZu}:} Every irreducible admissible representation of G is equivalent to a subrepresentation of an ER. \noindent $\betaullet~$ An important ingredient in our considerations are the ~\betalu{\it highest/lowest weight representations}~ of ~${\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I}^\betabc$. These can be realized as (factor-modules of) Verma modules ~$V^\Lambda$~ over ~${\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I}^\betabc$, where ~$\Lambda\in (\ch^\betabc)^*$, ~$\ch^\betabc$ is a Cartan subalgebra of ~${\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I}^\betabc$, weight ~$\Lambda = \Lambda(\chi)$~ is determined uniquely from $\chi$ \cite{Dob}. Actually, since our ERs may be induced from finite-dimensional representations of ~${\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}$~ (or their limits) the Verma modules are always reducible. Thus, it is more convenient to use ~\betalu{\it generalized Verma modules} ~$\tV^\Lambda$~ such that the role of the highest/lowest weight vector $v_0$ is taken by the (finite-dimensional) space ~$V_\muu\,v_0\,$. For the generalized Verma modules (GVMs) the reducibility is controlled only by the value of the conformal weight $d$. Relatedly, for the intertwining differential operators{} only the reducibility w.r.t. non-compact roots is essential. \noindent $\betaullet~$ One main ingredient of our approach is as follows. We group the (reducible) ERs with the same Casimirs in sets called \rhoed{~{\it multiplets}} \cite{Dobmul}. The multiplet corresponding to fixed values of the Casimirs may be depicted as a connected graph, the \betalu{vertices} of which correspond to the reducible ERs and the \betalu{lines (arrows)} between the vertices correspond to intertwining operators. The explicit parametrization of the multiplets and of their ERs is important for understanding of the situation. In fact, the multiplets contain explicitly all the data necessary to construct the intertwining differential operators{}. Actually, the data for each intertwining differential operator{} consists of the pair ~$(\beta,m)$, where $\beta$ is a (non-compact) positive root of ~${\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I}^\betabc$, ~$m\in\muathbb{N}$, such that the \betalu{BGG Verma module reducibility condition} (for highest weight modules) is fulfilled \cite{BGG}: $$ (\Lambda+\rho, \beta^\varepsilone ) ~=~ m \ , \quad \beta^\varepsilone \equiv 2 \beta /(\beta,\beta) \ $$ $\rho$ is half the sum of the positive roots of ~${\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I}^\betabc$. When the above holds then the Verma module with shifted weight ~$V^{\Lambda-m\beta}$ (or ~$\tV^{\Lambda-m\beta}$ ~ for GVM and $\beta$ non-compact) is embedded in the Verma module ~$V^{\Lambda}$ (or ~$\tV^{\Lambda}$). This embedding is realized by a singular vector ~$v_s$~ determined by a polynomial ~${\cal P}} \def\cq{{\cal Q}} \def\car{{\cal R}_{m,\beta}({\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I}^-)$~ in the universal enveloping algebra ~$(U({\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I}_-))\ v_0\,$, ~${\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I}^-$~ is the subalgebra of ~${\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I}^\betabc$ generated by the negative root generators \cite{Dix}. More explicitly, \cite{Dob}, ~$v^s_{m,\beta} = {\cal P}} \def\cq{{\cal Q}} \def\car{{\cal R}_{m,\beta}\, v_0$ (or ~$v^s_{m,\beta} = {\cal P}} \def\cq{{\cal Q}} \def\car{{\cal R}_{m,\beta}\, V_\muu\,v_0$ for GVMs). Then there exists \cite{Dob} an \rhoed{intertwining differential operator{}} $$ {\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}_{m,\beta} ~:~ \cc_{\chi(\Lambda)} ~\lambdaongrightarrow ~ \cc_{\chi(\Lambda-m\beta)} $$ given explicitly by: $$ {\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}_{m,\beta} ~=~ {\cal P}} \def\cq{{\cal Q}} \def\car{{\cal R}_{m,\beta}(\widehat{{\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I}^-}) $$ where ~$\widehat{{\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I}^-}$~ denotes the \betalu{right action} on the functions ~$\cf$. In most of these situations the invariant operator ~${\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}_{m,\beta}$~ has a non-trivial invariant kernel in which a subrepresentation of ${\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I}$ is realized. Thus, studying the equations with trivial RHS: $$ {\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}_{m,\beta}\ f ~=~ 0 \ , \qquad f \in \cc_{\chi(\Lambda)} \ ,$$ is also very important. For example, in many physical applications in the case of first order differential operators, i.e., for ~$m=m_\beta = 1$, these equations are called ~\betalu{conservation laws}, and the elements ~$f\in \ker {\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}_{m,\beta}$~ are called ~\betalu{conserved currents}. Below in our exposition we shall use the so-called Dynkin labels: $$ m_i ~\equiv~ (\Lambda+\rho,\alpha^\varepsilone_i) \ , \quad i=1,\lambdadots,n, $$ where ~$\Lambda = \Lambda(\chi)$, ~$\rho$ is half the sum of the positive roots of ~${\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I}^\betabc$. We shall use also the so-called Harish-Chandra parameters \cite{Har}: $$ m_\beta \equiv (\Lambda+\rho, \beta )\ , $$ where $\beta$ is any positive root of ${\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I}^\betabc$. These parameters are redundant, since they are expressed in terms of the Dynkin labels, however, some statements are best formulated in their terms. (Clearly, both the Dynkin labels and Harish-Chandra parameters have their origin in the BGG reducibility condition.) \sigmaection{The Lie algebra \betalu{$su(n,n)$} and parabolically related} {\nu}oindent Let ~${\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I} ~=~ su(n,n)$, ~$n\gammaeq 2$. The maximal compact subgroup is ~$\ck \cong u(1)\oplus su(n)\oplus su(n)$, while ~${\cal M}} \def\cn{{\cal N}} \def\co{{\cal O} = sl(n,\muathbb{C})_\muathbb{R}\,$. The number of ERs in the main multiplets is equal to \cite{Dobparab} $$\vert W({\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I}^\betabc,\ch^\betabc)\vert\, /\, \vert W({\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}^\betabc,\ch_m^\betabc)\vert = \lambdaeft( {2n\alphatop n}\rhoight)$$ The signature of the ERs of ${\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I}$ is: \eqnn{} \chi &=& \{ n_1 , \lambdadots, n_{n-1} , n_{n+1} \lambdadots, n_{2n-1} ;\ c\, \} \ , \quad n_j \in \muathbb{N}\ , \quad c = d - {\textstyle{\frac{1}{2}}} n^2 \end{equation}a The restricted Weyl reflection is given by the Knapp--Stein integral operators \cite{KnSt}: \eqnn{knast} && G_{KS} : \cc_\chi \lambdaongrightarrow \cc_{\chi'} \ ,\qquad \chi' ~=~ \{ (n_1,\lambdadots,n_{n-1},n_{n+1},\lambdadots,n_{2n-1})^* ; \ -c \, \} \ , \\ && (n_1,\lambdadots,n_{n-1},n_{n+1},\lambdadots,n_{2n-1})^* \deltaoteq (n_{n+1},\lambdadots,n_{2n-1},n_1,\lambdadots,n_{n-1}) {\nu}onumber\end{equation}a {\nu}oindent{\betaf Multiplets} Below we give the multiplets for $su(n,n)$ for $n=2,3,4$. They are valid also for ~$sl(2n,\muathbb{R})$~ with ~${\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}$-factor ~$sl(n,\muathbb{R}) \oplus sl(n,\muathbb{R})$, and when $n=2k$ these are multiplets also for the parabolically related algebra ~$su^*(4k)$~ with ~${\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}$-factor $su^*(2k) \oplus su^*(2k)$, There are several types of multiplets: the main type, which contains maximal number of ERs/GVMs, the finite-dimensional and the discrete series representations, and many reduced types of multiplets. The multiplets of the main type are in 1-to-1 correspondence with the finite-dimensional irreps of $su(n,n)$, i.e., they will be labelled by the $2n-1$ positive Dynkin labels $m_i\in\muathbb{N}$. \sigmaection{Multiplets of ~SU(2,2), SL(4,$\muathbb{R}$) and SU$^*$(4)} The main multiplet contains six ERs whose signatures can be given in the following pair-wise manner: \eqnn{tabl} \chi_0^\pm &=& \{ ( m_1, m_3)^\pm ; \pm \frac{(m_1 + 2m_2 + m_3)}{2} \} \\ \chi'^\pm &=& \{ ( m_{12}, m_{23})^\pm ; \pm {\textstyle{\frac{1}{2}}} (m_1 + m_3) \} {\nu}onumber\\ \chi''^\pm &=& \{ ( m_{2}, m_{13})^\pm ; \pm {\textstyle{\frac{1}{2}}} (m_3 - m_1) \} {\nu}onumber \end{equation}a where we have used for the numbers $m_\beta = (\Lambda(\chi)+\rho,\beta)$ the same compact notation as for the roots $\beta$, and \eqnn{conutw} && (n_1,n_{3})^- = (n_1,n_{3})\ , \qquad (n_1,n_{3})^+ = (n_1,n_{3})^* = (n_3,n_{1}) \end{equation}a These multiplets were given first for ~$su^*(4)$~ \cite{DoPe}. Obviously, the pairs in \eqref{tabl} are related by Knapp-Stein integral operators, i.e., \eqn{ackin} G_{KS} : \cc_{\chi^\mup} \lambdaongrightarrow \cc_{\chi^\pm} {\nu}onumber\end{equation} The multiplets are given explicitly in Fig. 1, where we use the notation: $\Lambda^\pm = \Lambda(\chi^\pm)$. Each intertwining differential operator\ is represented by an arrow accompanied by a symbol $i_{jk}$ encoding the root $\alpha_{jk}$ and the number $m_{\alpha_{jk}}$ which is involved in the BGG criterion. This notation is used to save space, but it can be used due to the fact that only intertwining differential operators\ which are non-composite are displayed, and that the data $\beta,m_\beta $, which is involved in the embedding $V^\Lambda \lambdaongrightarrow V^{\Lambda-m_\beta,\beta}$ turns out to involve only the $m_i$ corresponding to simple roots, i.e., for each $\beta,m_\beta$ there exists $i = i(\beta,m_\beta,\Lambda)\in \{ 1,\lambdadots,2n-1\}$, such that $m_\beta=m_i $. Hence the data $\alpha_{jk} $, $m_{\alpha_{jk}}$ is represented by $i_{jk}$ on the arrows. \vskip 5mm \fig{}{diag-su22.eps}{10cm} The pairs $\Lambda^\pm$ are symmetric w.r.t. to the bullet in the middle of the figure - this represents the Weyl symmetry realized by the Knapp-Stein operators. Matters are arranged so that in every multiplet only the ER with signature $\chi_0^-$ contains a finite-dimen\-sional nonunitary subrepresentation in a finite-dimen\-sional subspace $\ce$. The latter corresponds to the finite-dimensional irrep of ${\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I}$ with signature $\{ m_1 , m_2 , m_3 \}$ of dimension: $m_1m_2m_3 m_{12}m_{23}m_{13}/6$. The subspace $\ce$ is annihilated by the operator $G^+ $,\ and is the image of the operator $G^- $. The subspace $\ce$ is annihilated also by the intertwining differential operator{} acting from $\chi^-$ to $\chi'^-$. When all $m_i=1$ then $\deltaim \ce = 1$, and in that case $\ce$ is also the trivial one-dimensional UIR of the whole algebra ${\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I}$. Furthermore in that case the conformal weight is zero: $d=2+c=2-{\textstyle{\frac{1}{2}}}(m_1+2m_2+m_3)_{\varepsilonrt_{m_i=1}}=0$. In the conjugate ER ~$\chi_0^+$~ there is a unitary subrepresentation in an infinite-dimen\-sional subspace ${\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}$. It is annihilated by the operator $G^- $,\ and is the image of the operator $G^+ $. All the above is valid also for the algebras ~$sl(4,\muathbb{R}) \cong so(3,3)$~ and ~$su^*(4) \cong so(5,1)$. However, the latter two do not have discrete series representations. On the other hand the algebra ~$su(2,2) \cong so(4,2)$~ had discrete series representations and furthermore highest/lowest weight series representations. Thus, in the case of ~$su(2,2)$~ the ER $\chi_0^+ $ contains both the holomorphic discrete series representation and the conjugate anti-holomorphic discrete series. The direct sum of the latter two is realized in the invariant subspace ${\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}$ of the ER $\chi_0^+ $. Note that the corresponding lowest weight GVM is infinitesimally equivalent only to the holomorphic discrete series, while the conjugate highest weight GVM is infinitesimally equivalent to the anti-holomorphic discrete series. \betareak The conformal weight of the ER $\chi_0^+$ has the restriction $d = 2+c = 2 + {\textstyle{\frac{1}{2}}}(m_1+2m_2+m_3) \gammaeq 4$. {\nu}oindent{\betaf Remark on SU(1,1)} \betareak As we mentioned the case $su(1,1)$ is well known - it was studied 60 years ago in the isomorphic form $sl(2,\muathbb{R})$ by Gelfand et al \cite{GeNa} and by Bargmann \cite{Barg}. In the current setting it was given in \cite{Dobpeds}. Here we shall only mention that the multiplets contain two ERS/GVMs (cf. ${2n\choose n}_{n=1}=2$), and we can take as their representatives the pair $\Lambda^\pm$ and all statements that fit the setting are true. In fact, the old results are prototypical for these pairs, which appear once for each algebra of the conformal type.{$\deltaiamondsuit$} {\nu}oindent{\betaf Reduced multiplets.} \betareak There are three types of reduced multiplets, $R_1 $, $R_2 $, $R_3 $. Each of them contains two ERS/GVMs and may be obtained from the main multiplet by setting formally $m_1=0$, $m_2=0$, $m_3=0$, resp. The signatures are \eqnn{redu} _1\chi^\pm &=& \{ ( m_{2}, m_{23})^\pm ; \pm {\textstyle{\frac{1}{2}}} m_3 \} \\ _2\chi^\pm &=& \{ ( m_{1}, m_{3})^\pm ; \pm {\textstyle{\frac{1}{2}}} (m_1 + m_3) \} {\nu}onumber\\ _3 \chi^\pm &=& \{ ( m_{12}, m_{2})^\pm ; \pm {\textstyle{\frac{1}{2}}} m_1 \} {\nu}onumber \end{equation}a The above is valid for the parabolically related algebras ~$su(2,2), su^*(4), sl(4,\muathbb{R})$. For ~$su(2,2)$~ the ER $_2\chi^+$ contains the limits of the (anti)holomorphic discrete series representations. Its conformal weight has the restriction $d = 2 + {\textstyle{\frac{1}{2}}} (m_1 + m_3) \gammaeq 3$. Actually, types $R_1 $, $R_3 $ are conjugated under the $^*$ operation (that is not the Weyl symmetry since the sign of $c$ is not changed). Finally, there is the reduced multiplet $R_{13}$ containing a single representation \eqn{redus} \chi^s = \{ ( m, m) ; 0 \} {\nu}onumber\end{equation} This multiplet may be omitted from this classification since it contains no operators, but its importance was understood in the framework of conformal supersymmetry, i.e., in the multiplet classification for the superconformal algebra $su(2,2/N)$ given in \cite{DPm}. It turns out that the infinite multiplets of $su(2,2/N)$ have as building blocks all mentioned above multiplets of $su(2,2)$ - sextets, doublets and singlets. \sigmaection{Multiplets of ~SU(3,3) and SL(6,$\muathbb{R}$)} {\nu}oindent The main multiplet contains 20 ERs/GVMs whose signatures can be given in the following pair-wise manner: \eqnn{tabltri} \chi_0^\pm &=& \{ ( m_1, m_2, m_4, m_5)^\pm ; \pm m_\rho \} \\ \chi_a^\pm &=& \{ ( m_1, m_{23}, m_{34}, m_5)^\pm ; \pm (m_\rho - m_3) \} {\nu}onumber\\ \chi_b^\pm &=& \{ ( m_{12}, m_{3}, m_{24}, m_5)^\pm ; \pm (m_\rho - m_{23}) \} {\nu}onumber\\ \chi_{b'}^\pm &=& \{ ( m_{1}, m_{24}, m_{3}, m_{45})^\pm ; \pm (m_\rho - m_{34}) \} {\nu}onumber\\ \chi_c^\pm &=& \{ ( m_{2}, m_{3}, m_{14}, m_5)^\pm ; \pm (m_\rho - m_{13}) \} {\nu}onumber\\ \chi_{c'}^\pm &=& \{ ( m_{12}, m_{34}, m_{23}, m_{45})^\pm ; \pm (m_\rho - m_{24}) \} {\nu}onumber\\ \chi_{c''}^\pm &=& \{ ( m_{1}, m_{25}, m_{3}, m_{4})^\pm ; \pm (m_\rho - m_{35}) \} {\nu}onumber\\ \chi_d^\pm &=& \{ ( m_{2}, m_{34}, m_{13}, m_{45})^\pm ; \pm (m_\rho - m_{14}) \} {\nu}onumber\\ \chi_{d'}^\pm &=& \{ ( m_{12}, m_{35}, m_{23}, m_{4})^\pm ; \pm (m_\rho - m_{25}) \} {\nu}onumber\\ \chi_e^\pm &=& \{ ( m_{2}, m_{35}, m_{13}, m_{4})^\pm ; \pm (m_\rho - m_{15}) \} {\nu}onumber \end{equation}a where ~$m_\rho = {\textstyle{\frac{1}{2}}}( m_1+ 2m_{2} + 3m_3 + 2m_{4} + m_{5})$. They are given in Fig. 2. \fig{}{diag-su33.eps}{9cm} All general facts that were stated in the $SU(2,2)$ case are valid also here, in particular, the special role of the pair $\chi^\pm_0 $. The finite-dimensional irreps $\ce$ of $su(3,3)$ or $sl(6,\muathbb{R})$ are sitting in the ERs $\chi^-_0 $ and have dimension as the UIRs of $SU(6)$. \betaigskip {\nu}oindent{\betaf Reduced multiplets.} \betareak There are five types of reduced multiplets, $R^3_a $, $a=1,\lambdadots,5$, which may be obtained from the main multiplet by setting formally $m_a=0$. Multiplets of type $R^3_4 $, $R^3_5 $, are conjugate to the multiplets of type $R^3_2 $, $R^3_1 $, resp., and are not shown. The reduced multiplets of type $R^3_3$ contain 14 ERs/GVMs with signatures:\eqnn{tabltrtr} \chi_0^\pm ~&=&~ \{\, ( m_1, m_2, m_4, m_5)^\pm\,;\,\pm m_\rho \,\} \\ \chi_b^\pm ~&=&~ \{\, ( m_{12}, 0, m_{24}, m_5)^\pm\,;\,\pm (m_\rho - m_{2}) \,\} \cr \chi_{b'}^\pm ~&=&~ \{\, ( m_{1}, m_{24}, 0, m_{45})^\pm\,;\,\pm (m_\rho - m_{4}) \,\} \cr \chi_c^\pm ~&=&~ \{\, ( m_{2}, 0, m_{14}, m_5)^\pm\,;\,\pm (m_\rho - m_{12}) \,\} \cr \chi_{c''}^\pm ~&=&~ \{\, ( m_{1}, m_{25}, 0, m_{4})^\pm\,;\,\pm (m_\rho - m_{45}) \,\} \cr \chi_d^\pm ~&=&~ \{\, ( m_{2}, m_{4}, m_{12}, m_{45})^\pm\,;\,\pm (m_\rho - m_{12,4}) = \mup (m_\rho - m_{2,45}) \,\} \cr \chi_e^\pm ~&=&~ \{\, ( m_{2}, m_{45}, m_{12}, m_{4})^\pm\,;\,\pm (m_\rho - m_{2,4}) = \pm {\textstyle{\frac{1}{2}}} (m_1+m_5) \,\} \ ,{\nu}onumber\end{equation}a here ~$m_\rho = {\textstyle{\frac{1}{2}}} (m_1 + 2m_2 + 2m_4 + m_5)$. These multiplets are given in Fig. 3. They may be called the main type of reduced multiplets since for ~$su(3,3)$~ in ~$\chi_0^+$~ are contained the limits of the (anti)holomorphic discrete series. \fig{}{diag-su33-3.eps}{9cm} The reduced multiplets of type $R^3_2$, resp., $R^3_1$, contain 14 ERs/GVMs each. These multiplets are given in Fig. 4, resp., Fig. 5~: \vskip 5mm \fig{}{diag-su33-2.eps}{8cm} \fig{}{diag-su33-1.eps}{8cm} {\nu}oindent{\betaf Further reduction of multiplets} There are further reductions of the multiplets denoted by ~$R^3_{ab}\,$, $a,b=1,\lambdadots,5$, $a< b$, which may be obtained from the main multiplet by setting formally ~$m_a=m_b=0$. From these ten reductions four (for $(a,b)=(1,2),(2,3),(3,4),(4,5)$) do not contain representations of physical interest, i.e., induced from finite-dimensional irreps of the ~${\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}$~ subalgebra. From the others ~$R^3_{35}$~ and ~$R^3_{25}$~ are conjugated to ~$R^3_{13}$~ and ~$R^3_{14}\,$, resp., as explained above. Thus, we present only four types of multiplets. The reduced multiplets of type $R^3_{13}$ contain 10 ERs/GVMs with signatures: \eqnn{tabltrona} \chi_a^\pm ~&=&~ \{\, ( 0, m_2, m_4, m_5)^\pm\,;\,\pm m_\rho \,\} \\ \chi_b^\pm ~&=&~ \{\, ( m_{2}, 0, m_{2,4}, m_5)^\pm\,;\,\pm (m_\rho - m_{2}) \,\} \cr \chi_{b'}^\pm ~&=&~ \{\, ( 0, m_{2,4}, 0, m_{45})^\pm\,;\,\pm (m_\rho - m_{4})\,\} \cr \chi_{c}^\pm ~&=&~ \{\, ( 0, m_{2,45}, 0, m_{4})^\pm\,;\,\pm (m_\rho - m_{45}) \,\} \cr \chi_d^\pm ~&=&~ \{\, ( m_{2}, m_{4}, m_{2}, m_{45})^\pm\,;\,\pm (m_\rho - m_{2,4}) = \pm{\textstyle{\frac{1}{2}}} m_5\,\} \ ,{\nu}onumber \end{equation}a here ~$m_\rho = m_2 + m_4 + {\textstyle{\frac{1}{2}}} m_5$. The multiplets are given in Fig. 6. \vskip 5mm \fig{}{diag-su33-13.eps}{10cm} Note that the differential operator (of order $m_5$) from ~$\chi_d^-$~ to ~$\chi_d^+$~ is a degeneration of an integral Knapp-Stein operator. The reduced multiplets of type $R^3_{15}$ contain 10 ERs/GVMs with signatures: \eqnn{tabltronc} \chi_0^\pm ~&=&~ \{\, ( 0, m_2, m_4, 0)^\pm\,;\,\pm m_\rho \,\} \\ \chi_a^\pm ~&=&~ \{\, ( 0, m_{23}, m_{34}, 0)^\pm\,;\,\pm (m_\rho - m_3 )\,\} \cr \chi_b^\pm ~&=&~ \{\, ( m_{2}, m_{3}, m_{24}, 0)^\pm\,;\,\pm (m_\rho - m_{23}) \,\} \cr \chi_{b'}^\pm ~&=&~ \{\, ( 0, m_{24}, m_{3}, m_{4})^\pm\,;\,\pm (m_\rho - m_{34})\,\} \cr \chi_d^\pm ~&=&~ \{\, ( m_{2}, m_{34}, m_{23}, m_{4})^\pm\,;\,\pm (m_\rho - m_{24})=\pm {\textstyle{\frac{1}{2}}} m_3 \,\} \ , {\nu}onumber\end{equation}a here ~$m_\rho = m_2 + {\textstyle{3\over2}} m_3 + m_4$. The multiplets are given in Fig. 7. Here the differential operator (of order $m_3$) from ~$\chi_d^-$~ to ~$\chi_d^+$~ is a degeneration of an integral Knapp-Stein operator. \vskip 5mm \fig{}{diag-su33-15.eps}{10cm} The reduced multiplets of type $R^3_{14}$, $R^3_{24}$, contain 10 ERs/GVMs each, the corresponding multiplets being given below: \fig{}{diag-su33-14.eps}{10cm} \fig{}{diag-su33-24.eps}{10cm} {\nu}oindent{\betaf Last reduction of multiplets} There are further reductions of the multiplets - triple and quadruple, but only one triple reduction contains representations of physical interest. Namely, this is the multiplet $R^3_{135}\,$, which may be obtained from the main multiplet by setting formally $m_1=m_3=m_5=0$. It contains 7 ERs/GVMs with signatures: \eqnn{tabltrona} &&\chi_a^\pm = \{\, ( 0, m_2, m_4, 0)^\pm\,;\,\pm m_\rho = \pm m_{2,4} \,\} \\ &&\chi_{b}^\pm = \{\, ( 0, m_{2,4}, 0, m_{4})^\pm\,;\,\pm m_{2}\,\} {\nu}onumber\\ &&\chi_{b'}^\pm = \{\, ( m_{2}, 0, m_{2,4}, 0)^\pm\,;\,\pm m_{4}\,\} {\nu}onumber\\ &&\chi_d = \{\, ( m_{2}, m_{4}, m_{2}, m_{4})\,;\, 0 \,\} {\nu}onumber\end{equation}a The multiplets are given below: \fig{}{diag-su33-135.eps}{10cm} {\nu}oindent The representation $\chi^d$ is a singlet, not in a pair, since it has zero weight $c$, and the ${\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}$ entries are self-conjugate. It is placed in the middle of the figure as the bullet. That ER contains the ~ {\it minimal irreps} ~ characterized by two positive integers which are denoted in this context as $m_2\,,m_4\,$. Each such irrep is the kernel of the two invariant differential operators ${\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}^{m_2}_{14}$ and ${\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}^{m_4}_{25}$, which are of order $m_2\,$, $m_4\,$, resp., corresponding to the noncompact roots $\alpha_{14}\,$, $\alpha_{25}\,$, resp. \sigmaection{Multiplets of ~SU(4,4), SL(8,$\muathbb{R}$) and SU$^*$(8)} The main multiplet ~$R^4$~ contains 70 ERs/GVMs whose signatures can be given in the following pair-wise manner: \eqnn{tablf} &&\chi_0^\pm ~=~ \{\, ( m_1, m_2, m_3, m_5, m_6, m_7)^\pm\,;\,\pm m_\rho \,\} \\ &&\chi_{00}^\pm ~=~ \{\, ( m_1, m_2, m_{34}, m_{45}, m_6, m_7)^\pm\,;\,\pm ( m_\rho - m_{4}) \,\} \cr &&\chi_{10}^\pm ~=~ \{\, ( m_1, m_{23}, m_{4}, m_{35}, m_6, m_7)^\pm\,;\,\pm ( m_\rho - m_{34}) \,\} \cr &&\chi_{01}^\pm ~=~ \{\, ( m_1, m_{2}, m_{35}, m_{4}, m_{56}, m_7)^\pm\,;\,\pm ( m_\rho - m_{45}) \,\} \cr &&\chi_{20}^\pm ~=~ \{\, ( m_{12}, m_{3}, m_{4}, m_{25}, m_6, m_7)^\pm\,;\,\pm (m_\rho-m_{24})\,\} \cr &&\chi_{11}^\pm ~=~ \{\, ( m_1, m_{23}, m_{45}, m_{34}, m_{56}, m_7)^\pm\,;\,\pm (m_\rho -m_{35})\,\} \cr &&\chi_{02}^\pm ~=~ \{\, ( m_1, m_{2}, m_{36}, m_{4}, m_{5}, m_{67})^\pm\,;\,\pm (m_\rho-m_{46})\,\} \cr &&\chi_{30}^\pm ~=~ \{\, ( m_{2}, m_{3}, m_{4}, m_{15}, m_6, m_7)^\pm\,;\,\pm (m_\rho -m_{14} )\,\} \cr &&\chi_{21}^\pm ~=~ \{\, ( m_{12}, m_{3}, m_{45}, m_{24}, m_{56}, m_7)^\pm\,;\,\pm (m_\rho-m_{25})\,\} \cr &&\chi_{12}^\pm ~=~ \{\, ( m_1, m_{23}, m_{46}, m_{34}, m_{5}, m_{67})^\pm\,;\,\pm (m_\rho-m_{36}) \,\} \cr &&\chi_{03}^\pm ~=~ \{\, ( m_1, m_{2}, m_{37}, m_{4}, m_{5}, m_{6})^\pm\,;\,\pm (m_\rho-m_{47}) \,\} \cr &&\chi_{31}^\pm ~=~ \{\, ( m_{2}, m_{3}, m_{45}, m_{14}, m_{56}, m_7)^\pm\,;\,\pm (m_\rho -m_{15} ) \,\} \cr &&\chi_{22}^\pm ~=~ \{\, ( m_{12}, m_{3}, m_{46}, m_{24}, m_{5}, m_{67})^\pm\,;\,\pm (m_\rho-m_{26}) \,\} \cr &&\chi_{13}^\pm ~=~ \{\, ( m_1, m_{23}, m_{47}, m_{34}, m_{5}, m_{6})^\pm\,;\,\pm (m_\rho-m_{37})\,\} \cr &&\chi_{32}^\pm ~=~ \{\, ( m_{2}, m_{3}, m_{46}, m_{14}, m_{5}, m_{67})^\pm\,;\,\pm (m_\rho -m_{16}) \,\} \cr &&\chi_{23}^\pm ~=~ \{\, ( m_{12}, m_{3}, m_{47}, m_{24}, m_{5}, m_{6})^\pm\,;\,\pm (m_\rho-m_{27}) \,\} \cr &&\chi_{33}^\pm ~=~ \{\, ( m_{2}, m_{3}, m_{47}, m_{14}, m_{5}, m_{6})^\pm\,;\, \pm (m_\rho-m_{17}) \,\} \cr &&\chi_{00}'^\pm ~=~ \{\, ( m_1, m_{24}, m_{5}, m_{3}, m_{46}, m_7)^\pm\,;\,\pm (m_\rho-m_{35}-m_{4}) \,\} \cr &&\chi_{10}'^\pm ~=~ \{\, ( m_{12}, m_{34}, m_{5}, m_{23}, m_{46}, m_7)^\pm\,;\,\pm (m_\rho-m_{25}-m_{4}) \,\} \cr &&\chi_{01}'^\pm ~=~ \{\, ( m_1, m_{24}, m_{56}, m_{3}, m_{45}, m_{67})^\pm\,;\,\pm (m_\rho-m_{36}-m_{4}) \,\} \cr &&\chi_{20}'^\pm ~=~ \{\, ( m_{2}, m_{34}, m_{5}, m_{13}, m_{46}, m_7)^\pm\,;\,\pm (m_\rho -m_{15}-m_{4}) \,\} \cr &&\chi_{11}'^\pm ~=~ \{\, ( m_{12}, m_{34}, m_{56}, m_{23}, m_{45}, m_{67})^\pm\,;\,\pm (m_\rho-m_{26}-m_{4}) \,\} \cr &&\chi_{02}'^\pm ~=~ \{\, ( m_1, m_{24}, m_{57}, m_{3}, m_{45}, m_{6})^\pm\,;\,\pm (m_\rho-m_{37}-m_{4}) \,\} \cr &&\chi_{20}''^\pm ~=~ \{\, ( m_{13}, m_{4}, m_{5}, m_{2}, m_{36}, m_7)^\pm\,;\,\pm (m_\rho-m_{25}-m_{34}) \,\} \cr &&\chi_{21}''^\pm ~=~ \{\, ( m_{13}, m_{4}, m_{56}, m_{2}, m_{35}, m_{67})^\pm\,;\,\pm (m_\rho-m_{26}-m_{34}) \,\} \cr &&\chi_{12}''^\pm ~=~ \{\, ( m_{12}, m_{35}, m_{6}, m_{23}, m_{4}, m_{57})^\pm\,;\,\pm (m_\rho-m_{26}-m_{45}) \,\} \cr &&\chi_{02}''^\pm ~=~ \{\, ( m_{1}, m_{25}, m_{6}, m_{3}, m_{4}, m_{57})^\pm\,;\,\pm (m_\rho-m_{36}-m_{45}) \,\} \cr &&\chi_{30}'^\pm ~=~ \{\, ( m_{23}, m_{4}, m_{5}, m_{12}, m_{36}, m_7)^\pm\,;\,\pm (m_\rho -m_{15}-m_{34}) \,\} \cr &&\chi_{21}'^\pm ~=~ \{\, ( m_{2}, m_{34}, m_{56}, m_{13}, m_{45}, m_{67})^\pm\,;\,\pm (m_\rho -m_{16} - m_{4}) \,\} \cr &&\chi_{12}'^\pm ~=~ \{\, ( m_{12}, m_{34}, m_{57}, m_{23}, m_{45}, m_{6})^\pm\,;\,\pm (m_\rho-m_{27}-m_{4}) \,\} \cr &&\chi_{03}'^\pm ~=~ \{\, ( m_{1}, m_{25}, m_{67}, m_{3}, m_{4}, m_{56})^\pm\,;\,\pm (m_\rho -m_{37}-m_{45}) \,\} \cr &&\chi_{40}'^\pm ~=~ \{\, ( m_{3}, m_{4}, m_{5}, m_{1}, m_{26}, m_7)^\pm\,;\,\pm (m_\rho -m_{15}-m_{24}) \,\} \cr &&\chi_{31}'^\pm ~=~ \{\, ( m_{23}, m_{4}, m_{56}, m_{12}, m_{35}, m_{67})^\pm\,;\,\pm (m_\rho -m_{16}-m_{34}) \,\} \cr &&\chi_{22}'^\pm ~=~ \{\, ( m_2, m_{34}, m_{57}, m_{13}, m_{45}, m_{6})^\pm\,;\,\pm (m_\rho-m_{17}-m_{4}) \,\} \cr &&\chi_{22}''^\pm ~=~ \{\, ( m_{13}, m_{4}, m_{57}, m_{2}, m_{35}, m_{6})^\pm\,;\,\pm (m_\rho-m_{27}-m_{34}) \,\} \ , {\nu}onumber\end{equation}a where ~$m_\rho = {\textstyle{\frac{1}{2}}}( m_1+ 2m_{2} + 3m_3 + 4m_{4} + 3m_{5}+ 2m_{6}+ m_{7})$. The multiplets are given explicitly in Fig. 11 (first in \cite{Dobsunn}). \fig{}{diag-su44.eps}{17cm} {\nu}oindent{\betaf Main reduced multiplets} There are nine physically relevant and essentially different reductions of multiplets denoted by $R^4_3\,$, $R^4_3\,$, $R^4_2\,$, $R^4_1\,$. Each of them contains 50 ERs/GVMs \cite{Dobsuff}. Here we present only the reduced multiplets ~$R^4_4$. Their 50 ERs/GVMs has signatures that can be given in the following pair-wise manner: \eqnn{tablff} &&\chi_0^\pm ~=~ \{\, ( m_1, m_2, m_3, m_5, m_6, m_7)^\pm\,;\,\pm m_\rho \,\}\ \\ &&\chi_{10}^\pm ~=~ \{\, ( m_1, m_{23}, 0, m_{3,5}, m_6, m_7)^\pm\,;\,\pm ( m_\rho - m_{3}) \,\} \cr &&\chi_{01}^\pm ~=~ \{\, ( m_1, m_{2}, m_{3,5}, 0, m_{56}, m_7)^\pm\,;\,\pm ( m_\rho - m_{5}) \,\} \cr &&\chi_{20}^\pm ~=~ \{\, ( m_{12}, m_{3}, 0, m_{23,5}, m_6, m_7)^\pm\,;\,\pm (m_\rho-m_{23})\,\} \cr &&\chi_{11}^\pm ~=~ \{\, ( m_1, m_{23}, m_{5}, m_{3}, m_{56}, m_7)^\pm\,;\,\pm (m_\rho -m_{3,5})\,\} \cr &&\chi_{02}^\pm ~=~ \{\, ( m_1, m_{2}, m_{3,56}, 0, m_{5}, m_{67})^\pm\,;\,\pm (m_\rho-m_{56})\,\} \cr &&\chi_{30}^\pm ~=~ \{\, ( m_{2}, m_{3}, 0, m_{13,5}, m_6, m_7)^\pm\,;\,\pm (m_\rho -m_{13} )\,\} \cr &&\chi_{21}^\pm ~=~ \{\, ( m_{12}, m_{3}, m_{5}, m_{23}, m_{56}, m_7)^\pm\,;\,\pm (m_\rho-m_{23,5})\,\} \cr &&\chi_{12}^\pm ~=~ \{\, ( m_1, m_{23}, m_{56}, m_{3}, m_{5}, m_{67})^\pm\,;\,\pm (m_\rho-m_{3,56}) \,\} \cr &&\chi_{03}^\pm ~=~ \{\, ( m_1, m_{2}, m_{3,57}, 0, m_{5}, m_{6})^\pm\,;\,\pm (m_\rho-m_{57}) \,\} \cr &&\chi_{31}^\pm ~=~ \{\, ( m_{2}, m_{3}, m_{5}, m_{13}, m_{56}, m_7)^\pm\,;\,\pm (m_\rho -m_{13,5} ) \,\} \cr &&\chi_{22}^\pm ~=~ \{\, ( m_{12}, m_{3}, m_{56}, m_{23}, m_{5}, m_{67})^\pm\,;\,\pm (m_\rho-m_{23,56}) \,\} \cr &&\chi_{13}^\pm ~=~ \{\, ( m_1, m_{23}, m_{57}, m_{3}, m_{5}, m_{6})^\pm\,;\,\pm (m_\rho-m_{3,57})\,\} \cr &&\chi_{32}^\pm ~=~ \{\, ( m_{2}, m_{3}, m_{56}, m_{13}, m_{5}, m_{67})^\pm\,;\,\pm (m_\rho -m_{13,56}) \,\} \cr &&\chi_{23}^\pm ~=~ \{\, ( m_{12}, m_{3}, m_{57}, m_{23}, m_{5}, m_{6})^\pm\,;\,\pm (m_\rho-m_{23,57}) \,\} \cr &&\chi_{33}^\pm ~=~ \{\, ( m_{2}, m_{3}, m_{57}, m_{13}, m_{5}, m_{6})^\pm\,;\, \pm (m_\rho-m_{13,57}) \,\} \cr &&\chi_{20}''^\pm ~=~ \{\, ( m_{13}, 0, m_{5}, m_{2}, m_{3,56}, m_7)^\pm\,;\,\pm (m_\rho-m_{23,5}-m_{3}) \,\} \cr &&\chi_{21}''^\pm ~=~ \{\, ( m_{13}, 0, m_{56}, m_{2}, m_{3,5}, m_{67})^\pm\,;\,\pm (m_\rho-m_{23,56}-m_{3}) \,\} \cr &&\chi_{12}''^\pm ~=~ \{\, ( m_{12}, m_{3,5}, m_{6}, m_{23}, 0, m_{57})^\pm\,;\,\pm (m_\rho-m_{23,56}-m_{5}) \,\} \cr &&\chi_{02}''^\pm ~=~ \{\, ( m_{1}, m_{23,5}, m_{6}, m_{3}, 0, m_{57})^\pm\,;\,\pm (m_\rho-m_{3,56}-m_{5}) \,\} \cr &&\chi_{30}'^\pm ~=~ \{\, ( m_{23}, 0, m_{5}, m_{12}, m_{3,56}, m_7)^\pm\,;\,\pm (m_\rho -m_{13,5}-m_{3}) \,\} \cr &&\chi_{03}'^\pm ~=~ \{\, ( m_{1}, m_{23,5}, m_{67}, m_{3}, 0, m_{56})^\pm\,;\,\pm (m_\rho -m_{3,57}-m_{5}) \,\} \cr &&\chi_{40}'^\pm ~=~ \{\, ( m_{3}, 0, m_{5}, m_{1}, m_{23,56}, m_7)^\pm\,;\,\pm (m_\rho -m_{13,5}-m_{23}) \,\} \cr &&\chi_{31}'^\pm ~=~ \{\, ( m_{23}, 0, m_{56}, m_{12}, m_{3,5}, m_{67})^\pm\,;\,\pm (m_\rho -m_{13,56}-m_{3}) \,\} \cr &&\chi_{22}''^\pm ~=~ \{\, ( m_{13}, 0, m_{57}, m_{2}, m_{3,5}, m_{6})^\pm\,;\,\pm (m_\rho-m_{23,57}-m_{3}) \,\}\ , {\nu}onumber\end{equation}a here ~$m_\rho = {\textstyle{\frac{1}{2}}}( m_1+ 2m_{2} + 3m_3 + 3m_{5}+ 2m_{6}+ m_{7})$. This is a very important type of reduced multiplets since for ~$su(4,4)$~ in ~$\chi_0^+$~ are contained the limits of the (anti)holomorphic discrete series. The multiplets are given in Fig. 12. \fig{}{diag-su44-4.eps}{16cm} {\nu}oindent{\betaf Further reduction of multiplets} There are nine physically relevant and essentially different further reductions of multiplets denoted by ~$R^3_{ab}\,$, $(a,b)=(13),(14),(15),(16),(17),(24),(25),(26),(35)$. They contain 36 ERs/GVMs each and were given in \cite{Dobsuff}. Here we give only type $R^4_{13}$~: \eqnn{tablfot} &&\chi_0^\pm ~=~ \{\, ( 0, m_2, 0, m_5, m_6, m_7)^\pm\,;\,\pm m_\rho )\,\} \\ &&\chi_{10}^\pm ~=~ \{\, ( 0, m_{2}, m_{4}, m_{45}, m_6, m_7)^\pm\,;\,\pm ( m_\rho - m_{4}) \,\} \cr &&\chi_{20}^\pm ~=~ \{\, ( m_{2}, 0, m_{4}, m_{2,45}, m_6, m_7)^\pm\,;\,\pm (m_\rho-m_{2,4}) \,\} \cr &&\chi_{11}^\pm ~=~ \{\, ( 0, m_{2}, m_{45}, m_{4}, m_{56}, m_7)^\pm\,;\,\pm (m_\rho -m_{45}) \,\} \cr &&\chi_{21}^\pm ~=~ \{\, ( m_{2}, 0, m_{45}, m_{2,4}, m_{56}, m_7)^\pm\,;\,\pm (m_\rho-m_{2,45}) \,\} \cr &&\chi_{12}^\pm ~=~ \{\, ( 0, m_{2}, m_{46}, m_{4}, m_{5}, m_{67})^\pm\,;\,\pm (m_\rho-m_{46}) \,\} \cr &&\chi_{22}^\pm ~=~ \{\, ( m_{2}, 0, m_{46}, m_{2,4}, m_{5}, m_{67})^\pm\,;\,\pm (m_\rho-m_{2,46}) \,\} \cr &&\chi_{13}^\pm ~=~ \{\, ( 0, m_{2}, m_{47}, m_{4}, m_{5}, m_{6})^\pm\,;\,\pm (m_\rho-m_{47}) \,\} \cr &&\chi_{23}^\pm ~=~ \{\, ( m_{2}, 0, m_{47}, m_{2,4}, m_{5}, m_{6})^\pm\,;\,\pm (m_\rho-m_{2,47}) \,\} \cr &&\chi_{00}'^\pm ~=~ \{\, ( 0, m_{2,4}, m_{5}, 0, m_{46}, m_7)^\pm\,;\,\pm (m_\rho-m_{5}-2m_{4}) \,\} \cr &&\chi_{10}'^\pm ~=~ \{\, ( m_{2}, m_{4}, m_{5}, m_{2}, m_{46}, m_7)^\pm\,;\,\pm (m_\rho-m_{2,5}-2m_{4}) \,\} \cr &&\chi_{01}'^\pm ~=~ \{\, ( 0, m_{2,4}, m_{56}, 0, m_{45}, m_{67})^\pm\,;\,\pm (m_\rho-m_{56}-2m_{4}) \,\} \cr &&\chi_{11}'^\pm ~=~ \{\, ( m_{2}, m_{4}, m_{56}, m_{2}, m_{45}, m_{67})^\pm\,;\,\pm (m_\rho-m_{2,56}-2m_{4}) \,\} \cr &&\chi_{02}'^\pm ~=~ \{\, ( 0, m_{2,4}, m_{57}, 0, m_{45}, m_{6})^\pm\,;\,\pm (m_\rho-m_{57}-2m_{4}) \,\} \cr &&\chi_{02}''^\pm ~=~ \{\, ( 0, m_{2,45}, m_{6}, 0, m_{4}, m_{57})^\pm\,;\,\pm (m_\rho-m_{6}-2m_{45}) \,\} \cr &&\chi_{12}'^\pm ~=~ \{\, ( m_{2}, m_{4}, m_{57}, m_{2}, m_{45}, m_{6})^\pm\,;\,\pm (m_\rho-m_{2,57}-2m_{4}) \,\} \cr &&\chi_{03}'^\pm ~=~ \{\, ( 0, m_{2,45}, m_{67}, 0, m_{4}, m_{56})^\pm\,;\,\pm (m_\rho -m_{67}-2m_{45}) \,\} \cr &&\chi_{40}'^\pm ~=~ \{\, ( 0, m_{4}, m_{5}, 0, m_{2,46}, m_7)^\pm\,;\,\pm (m_\rho -m_{5}-2m_{2,4}) \,\}\ ,{\nu}onumber \end{equation}a here ~$m_\rho = {\textstyle{\frac{1}{2}}}( 2m_{2} + 4m_4 + 3m_{5}+ 2m_{6}+ m_{7})$. Their diagram is given in Fig. 13. \vskip 5mm \fig{}{diag-su44-13.eps}{14cm} {\nu}oindent{\betaf Yet further reduction of multiplets} There are six physically relevant and essentially different further reductions of multiplets denoted by ~$R^3_{abc}\,$, $(a,b,c)=(1,3,5),(1,3,6),(1,3,7),(1,4,6),(1,4,7),(2,4,6)$. They contain 26 ERs/GVMs each and were given in \cite{Dobsuff}. Here we give only two types. First we give $R^3_{135}$~: \eqnn{tablfofit} &&\chi_0^\pm ~=~ \{\, ( 0, m_2, 0, 0, m_6, m_7)^\pm\,;\,\pm m_\rho \,\} \\ &&\chi_{11}^\pm ~=~ \{\, ( 0, m_{2}, m_{4}, m_{4}, m_{6}, m_7)^\pm\,;\,\pm (m_\rho -m_{4}) \,\} \cr &&\chi_{21}^\pm ~=~ \{\, ( m_{2}, 0, m_{4}, m_{2,4}, m_{6}, m_7)^\pm\,;\,\pm (m_\rho-m_{2,4}) \,\} \cr &&\chi_{12}^\pm ~=~ \{\, ( 0, m_{2}, m_{4,6}, m_{4}, 0, m_{67})^\pm\,;\,\pm (m_\rho-m_{4,6}) \,\} \cr &&\chi_{22}^\pm ~=~ \{\, ( m_{2}, 0, m_{4,6}, m_{2,4}, 0, m_{67})^\pm\,;\,\pm (m_\rho-m_{2,4,6}) \,\} \cr &&\chi_{13}^\pm ~=~ \{\, ( 0, m_{2}, m_{4,67}, m_{4}, 0, m_{6})^\pm\,;\,\pm (m_\rho-m_{4,67}) \,\} \cr &&\chi_{23}^\pm ~=~ \{\, ( m_{2}, 0, m_{4,67}, m_{2,4}, 0, m_{6})^\pm\,;\,\pm (m_\rho-m_{2,4,67}) \,\} \cr &&\chi_{00}'^\pm ~=~ \{\, ( 0, m_{2,4}, 0, 0, m_{4,6}, m_7)^\pm\,;\,\pm (m_\rho -2m_{4}) \,\} \cr &&\chi_{01}'^\pm ~=~ \{\, ( 0, m_{2,4}, m_{6}, 0, m_{4}, m_{67})^\pm\,;\,\pm (m_\rho-m_{6}-2m_{4}) \,\} \cr &&\chi_{02}'^\pm ~=~ \{\, ( 0, m_{2,4}, m_{67}, 0, m_{4}, m_{6})^\pm\,;\,\pm (m_\rho-m_{67}-2m_{4}) \,\} \cr &&\chi_{30}'^\pm ~=~ \{\, ( m_{2}, m_{4}, 0, m_{2}, m_{4,6}, m_7)^\pm\,;\,\pm (m_\rho -m_{2}-2m_{4}) \,\} \cr &&\chi_{40}'^\pm ~=~ \{\, ( 0, m_{4}, 0, 0, m_{26}, m_7)^\pm\,;\,\pm (m_\rho -2m_{2,4}) \,\} \cr &&\chi_{31}'^\pm ~=~ \{\, ( m_{2}, m_{4}, m_{6}, m_{2}, m_{4}, m_{67})^\pm\,;\,\pm (m_\rho -m_{2,6}-2m_{4}) = \pm{\textstyle{\frac{1}{2}}} m_7 \,\} \ ,{\nu}onumber \end{equation}a here ~$m_\rho = m_{2} + 2m_4 + m_{6}+ {\textstyle{\frac{1}{2}}} m_{7}\,$. The multiplets are given in Fig. 3-135. Note that the differential operator (of order $m_7$) from ~$\chi_{31}^-$~ to ~$\chi_{31}^+$~ is a degeneration of an integral Knapp-Stein operator. \vskip 5mm \fig{}{diag-su44-135.eps}{14cm} Then we give type ~$R^4_{137}$~: \eqnn{tablfosetr} &&\chi_0^\pm ~=~ \{\, ( 0, m_2, 0, m_5, m_6, 0)^\pm\,;\,\pm m_\rho \,\} \\ &&\chi_{10}^\pm ~=~ \{\, ( 0, m_{2}, m_{4}, m_{45}, m_6, 0)^\pm\,;\,\pm ( m_\rho - m_{4}) \,\} \cr &&\chi_{20}^\pm ~=~ \{\, ( m_{2}, 0, m_{4}, m_{2,45}, m_6, 0)^\pm\,;\,\pm (m_\rho-m_{2,4}) \,\} \cr &&\chi_{11}^\pm ~=~ \{\, ( 0, m_{2}, m_{45}, m_{4}, m_{56}, 0)^\pm\,;\,\pm (m_\rho -m_{45}) \,\} \cr &&\chi_{21}^\pm ~=~ \{\, ( m_{2}, 0, m_{45}, m_{2,4}, m_{56}, 0)^\pm\,;\,\pm (m_\rho-m_{2,45}) \,\} \cr &&\chi_{12}^\pm ~=~ \{\, ( 0, m_{2}, m_{46}, m_{4}, m_{5}, m_{6})^\pm\,;\,\pm (m_\rho-m_{46}) \,\} \cr &&\chi_{22}^\pm ~=~ \{\, ( m_{2}, 0, m_{46}, m_{2,4}, m_{5}, m_{6})^\pm\,;\, (m_\rho-m_{2,46}) \,\} \cr &&\chi_{00}'^\pm ~=~ \{\, ( 0, m_{2,4}, m_{5}, 0, m_{46}, 0)^\pm\,;\,\pm (m_\rho-m_{5}-2m_{4}) \,\} \cr &&\chi_{01}'^\pm ~=~ \{\, ( 0, m_{2,4}, m_{56}, 0, m_{45}, m_{6})^\pm\,;\,\pm (m_\rho-m_{56}-2m_{4}) \,\} \cr &&\chi_{30}'^\pm ~=~ \{\, ( m_{2}, m_{4}, m_{5}, m_{2}, m_{46}, 0)^\pm\,;\,\pm (m_\rho -m_{2,5}-2m_{4}) \,\} \cr &&\chi_{03}'^\pm ~=~ \{\, ( 0, m_{2,45}, m_{6}, 0, m_{4}, m_{56})^\pm\,;\,\pm (m_\rho -m_{6}-2m_{45}) \,\} \cr &&\chi_{40}'^\pm ~=~ \{\, ( 0, m_{4}, m_{5}, 0, m_{2,46}, 0)^\pm\,;\,\pm (m_\rho -m_{5}-2m_{2,4}) \,\} \cr &&\chi_{31}'^\pm ~=~ \{\, ( m_{2}, m_{4}, m_{56}, m_{2}, m_{45}, m_{6})^\pm\,;\, (m_\rho -m_{2,56}-2m_{4})= \pm{\textstyle{\frac{1}{2}}} m_5 \,\} \ ,{\nu}onumber \end{equation}a here ~$m_\rho = m_{2} + 2m_4 + {\textstyle{3\over2}} m_{5}+ m_{6}$. The multiplets are given in Fig. 15. Note that the differential operator (of order $m_5$) from ~$\chi_{31}^-$~ to ~$\chi_{31}^+$~ is a degeneration of an integral Knapp-Stein operator. \vskip 5mm \fig{}{diag-su44-137.eps}{12cm} {\nu}oindent{\betaf Last reduction of multiplets} There are further reductions of the multiplets - quadruple, etc., but only one quadruple reduction contains representations of physical interest. Namely, this is the multiplet ~$R^4_{1357}\,$, which may be obtained from the main multiplet by setting formally ~$m_1=m_3=m_5=m_7=0$. These multiplets contain 19 ERs/GVMs whose signatures can be given in the following manner: \eqnn{tablfosetrfi} &&\chi_0^\pm ~=~ \{\, ( 0, m_2, 0, 0, m_6, 0)^\pm\,;\,\pm m_\rho = \pm (m_{2} + 2m_4 + m_{6})\,\} \\ &&\chi_{11}^\pm ~=~ \{\, ( 0, m_{2}, m_{4}, m_{4}, m_{6}, 0)^\pm\,;\,\pm (m_\rho -m_{4}) = \pm m_{2,4,6} \,\} \cr &&\chi_{21}^\pm ~=~ \{\, ( m_{2}, 0, m_{4}, m_{2,4}, m_{6}, 0)^\pm\,;\,\pm (m_\rho-m_{2,4})= \pm m_{4,6} \,\} \cr &&\chi_{12}^\pm ~=~ \{\, ( 0, m_{2}, m_{46}, m_{4}, 0, m_{6})^\pm\,;\,\pm (m_\rho-m_{4,6})= \pm m_{2,4} \,\} \cr &&\chi_{22}^\pm ~=~ \{\, ( m_{2}, 0, m_{46}, m_{2,4}, 0, m_{6})^\pm\,;\, \pm (m_\rho-m_{2,4,6}) = \pm m_4 \,\} \cr &&\chi_{00}'^\pm ~=~ \{\, ( 0, m_{2,4}, 0, 0, m_{46}, 0)^\pm\,;\,\pm (m_\rho -2m_{4})= \pm m_{2,6}\,\} \cr &&\chi_{01}'^\pm ~=~ \{\, ( 0, m_{2,4}, m_{6}, 0, m_{4}, m_{6})^\pm\,;\, \pm (m_\rho-m_{6}-2m_{4}) = \pm m_{2}\,\} \cr &&\chi_{30}'^\pm ~=~ \{\, ( m_{2}, m_{4}, 0, m_{2}, m_{46}, 0)^\pm\,;\, \pm (m_\rho -m_{2}-2m_{4})=\pm m_{6}\,\} \cr &&\chi_{40}'^\pm ~=~ \{\, ( 0, m_{4}, 0, 0, m_{2,46}, 0)^\pm\,;\,\pm (m_\rho -2m_{2,4})= \pm (m_{6} -m_{2} )\,\} \cr &&\chi_{31} ~=~ \{\, ( m_{2}, m_{4}, m_{6}, m_{2}, m_{4}, m_{6})\,;\, 0 \,\} {\nu}onumber \end{equation}a The multiplets are given in Fig. 3-1357: \vskip 5mm \fig{}{diag-su44-1357.eps}{10cm} Note that the ER $\chi_{31}$ is not in a pair -- it has $c=0$ and its ~${\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}$~ signature is self conjugated. It is placed in the middle of the figure as the bullet. That ER contains the ~{\it minimal irreps}~ characterized by three positive integers which are denoted in this context as $m_2 ,m_4 ,m_6 $. Each such irrep is the kernel of the three invariant differential operators ${\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}^{m_2}_{15} $, ${\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}^{m_4}_{26} $, ${\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}^{m_6}_{37}$, which are of order $m_2 $, $m_4 $, $m_6 $, resp., and correspond to the noncompact roots $\alpha_{15} $, $\alpha_{26} $, $\alpha_{37} $, resp. \sigmaection*{Acknowledgments} The author thanks the Organizers for the kind invitation to give a plenary talk at the International Conference on Integrable Systems and Quantum Symmetries, Prague, June 2014. \sigmaection*{References} \betaegin{equation}gin{thebibliography}{9} \betaibitem{Dobinv}V.K. Dobrev, Rev. Math. Phys. {\betaf 20} (2008) 407-449. \betaibitem{Dobeseven} V.K. Dobrev, J. Phys. A: Math. Theor. {\betaf 42} (2009) 285203. \betaibitem{Dobsunn} V.K. Dobrev, Physics of Atomic Nuclei, {\betaf 76}, No 8, 983-990 (2013). \betaibitem{Dobspn} V.K. Dobrev, Invariant Differential Operators for Non-Compact Lie Groups: the Sp(n,R) Case, in: Springer Proceedings in Mathematics and Statistics, Vol. 36 (ISBN 978-4-431-54269-8), (Springer, Tokyo-Heidelberg, 2013) pp. 311-335. \betaibitem{Dobparab} V.K. Dobrev, J. High Energy Phys. 02 (2013) 015. \betaibitem{Dobsutt} V.K. Dobrev, Invariant Differential Operators for Non-Compact Lie Groups: the Reduced SU(3,3) Multiplets, arXiv:1312.5998, Plenary talk at the International Workshop 'Supersymmetries and Quantum Symmetries', Dubna, July 29 - August 3, 2013. \betaibitem{Dobsuff} V.K. Dobrev, Invariant Differential Operators for Non-Compact Lie Groups: the Reduced SU(4,4) Multiplets, arXiv:1402.0190, Lectures at "Third International School on Symmetry and Integrable Systems" and at "Humboldt Kolleg on Symmetry and Integrable Systems", Tsakhkadzor, Armenia, July 2013. \betaibitem{DMPPT}V.K. Dobrev, G. Mack, V.B. Petkova, S.G. Petrova and I.T. Todorov, {\it Harmonic Analysis on the $n$-Dimensional Lorentz Group and Its Applications to Conformal Quantum Field Theory}, Lecture Notes in Physics, Vol. 63 (Springer, Berlin, 1977). \betaibitem{Knapp}A.W. Knapp, {\it Representation Theory of Semisimple Groups (An Overview Based on Examples)}, (Princeton Univ. Press, 1986). \betaibitem{Lan}R.P. Langlands, {\it On the classification of irreducible representations of real algebraic groups}, Math. Surveys and Monographs, Vol. 31 (AMS, 1988), first as IAS Princeton preprint (1973). \betaibitem{KnZu}A.W. Knapp and G.J. Zuckerman, ``Classification theorems for representations of semisimple groups'', in: Lecture Notes in Math., Vol. 587 (Springer, Berlin, 1977) pp. 138-159; Ann. Math. {\betaf 116} (1982) 389-501. \betaibitem{Dob}V.K. Dobrev, Rept. Math. Phys. {\betaf 25}, 159-181 (1988) ; first as ICTP Trieste preprint IC/86/393 (1986). \betaibitem{Dobmul}V.K. Dobrev, Lett. Math. Phys. {\betaf 9}, 205-211 (1985). \betaibitem{BGG}I.N. Bernstein, I.M. Gel'fand and S.I. Gel'fand, Funkts. Anal. Prilozh. {\betaf 5} (1) (1971) 1-9; English translation: Funct. Anal. Appl. {\betaf 5} (1971) 1-8. \betaibitem{Dix}J. Dixmier, {\it Enveloping Algebras}, (North Holland, New York, 1977). \betaibitem{Har}Harish-Chandra, Ann. Math. {\betaf 116} (1966) 1-111. \betaibitem{KnSt}A.W. Knapp and E.M. Stein, Ann. Math. {\betaf 93} (1971) 489-578; Inv. Math. {\betaf 60} (1980) 9-84. \betaibitem{DoPe} V.K. Dobrev and V.B. Petkova, Rept. Math. Phys. {\betaf 13}, 233-277 (1978) \betaibitem{GeNa} I.M. Gelfand and M.A. Naimark, Acad. Sci. USSR. J. Phys. {\betaf 10} (1946) 93-94. \betaibitem{Barg}V. Bargmann, Annals Math. {\betaf 48}, (1947) 568-640. \betaibitem{Dobpeds}V.K. Dobrev, J. Phys. A: Math. Theor. {\betaf 41} (2008) 425206, \betaibitem{DPm} V.K. Dobrev and V.B. Petkova, Lett. Math. Phys. {\betaf 9} (1985) 287-298. \end{thebibliography} \end{document}
\begin{document} \title[Basic trigonometric power sums with applications]{Basic trigonometric power sums with applications} \author{Carlos M. da Fonseca} \address{Department of Mathematics, Kuwait University, Safat 13060, Kuwait} \email{[email protected]} \author{M. Lawrence Glasser} \address{Department of Physics, Clarkson University, Potsdam, NY 13699-5820, USA} \email{[email protected]} \author{Victor Kowalenko} \address{ARC Centre of Excellence for Mathematics and Statistics of Complex Systems, Department of Mathematics and Statistics, The University of Melbourne, Victoria 3010, Australia} \email{[email protected]} \subjclass[2000]{33B10, 05A15, 11B65} \keywords{basic trigonometric power sum, binomial coefficient, closed walk, cosine, cycle, generating function, graph, path, sine} \begin{abstract} We present the transformation of several sums of positive integer powers of the sine and cosine into non-trigonometric combinatorial forms. The results are applied to the derivation of generating functions and to the number of the closed walks on a path and in a cycle. \end{abstract} \maketitle \section{Introduction} \label{sec1} Over the last half-century there has been widespread interest in finite sums involving powers of trigonometric functions. In $1966$ Quoniam posed an open problem in which the following result was conjectured \begin{equation}\label{eq1} \sum_{k=1}^{\lfloor n/2\rfloor} 2^{2 m} \cos^{2m} \left( \frac{k\pi}{n+1}\right)= (n+1)\binom{2m-1}{m-1}-2^{2m-1}\, , \end{equation} where $m$ is a positive integer and subject to $m<n+1$ \cite{Q1966}. In this equation $\lfloor n/2 \rfloor$ denotes the floor function of $n/2$ or the greatest integer less than or equal to $n/2$. A solution to the above problem was presented shortly after by Greening et al. in \cite{G1968}. Soon afterwards, there appeared a problem involving powers of the secant proposed by Gardner \cite{Ga1969}, which was solved partially by Fisher \cite{Fi1971,Kl1990} and completely, only recently, in \cite{dF2015,K2011}. The activity subsided somewhat until such series arose in string theory in the early 1990's with the work of Dowker \cite{Do1992}. Subsequently, a surge occurred with studies of related sums and identities as evidenced by the work of: (1) Berndt and Yeap \cite{BY2002} on reciprocity theorems and (2) Cvijovi\'c and Srivastava \cite{Cv2012,Cv2007} on Dowker and related sums, while \cite{C2003,CM1999,WZ2007} were motivated by the intrinsic fascination these sums possess and derived formulas where the summand was a power of the secant, e.g., $$ \sum_{k=0}^{n-1} \sec^{2p} \left( \frac{k\pi}{n}\right)= n\sum_{k=1}^{2p-1}(-1)^{p+k}\binom{p-1+kn}{2p-1} \sum_{j=k}^{2p-1}\binom{2p}{j+1}\, . $$ By far, the most interesting sums have been those trigonometric power sums with inverse powers of the sine or cosine, since their evaluation invariably involves the zeta function directly or through related numbers such as the Bernoulli and Euler numbers. A typical example is the finite sum of powers of the contangent studied by Berndt and Yeap \cite{BY2002}, \begin{equation} \label{eq1a} \frac{1}{k} \, \sum_{r=1}^{k-1} \cot^{2n} \left( \frac{r\pi}{k}\right)= (-1)^n-(-1)^n 2^{2n}\sum_{\substack{ j_0,j_1,\ldots,j_{2n} > 0 \\j_0+j_1+\cdots+j_{2n}=n}} k^{2j_0-1}\prod_{r=0}^{2n}\frac{B_{2j_r}}{(2j_r)!}\, . \end{equation} Here, $B_j$ denotes the Bernoulli number with index $j\geqslant 0$. As described in Appendix A, Berndt and Yeap use contour integration to derive this result, although more recently it has been studied with the aid of sampling theorems \cite{AA2011}. Unfortunately, \eqref{eq1a} is, if not incorrect, confusing or misleading because it states that the $j_i$ cannot equal zero. Yet, some of them are required to be zero in order to evaluate the polynomials on the rhs. In addition, it does not matter if any of the $j_i$ are zero since $B_0$ is equal to unity anyway. A more precise statement of \eqref{eq1a} is \begin{equation} \label{eq1b} \frac{1}{k} \, \sum_{r=1}^{k-1} \cot^{2n} \left( \frac{r\pi}{k}\right)= (-1)^n -(-1)^n 2^{2n} \sum_{j_1,j_2,\ldots,j_{2n} =0}^{n,n-j_1,\ldots, n-j_s+j_{2n}}k^{2j_{2n}-1} \prod_{r=1}^{2n} \frac{B_{2j_{r}}}{(2j_r)!}\, \frac{B_{2(n-j_s)}}{(2(n-j_s))!} \;\;, \end{equation} where $j_s = \sum_{i=1}^{2n}j_i$. The main difference between the two equations is that there is now an upper limit for each sum over the $j_i$, which is dependent on the number of sums preceding it. As a consequence, there is no requirement for the $j_0$ index to appear as a sum as in \eqref{eq1a}. Instead, it has been replaced by $n-j_s$ in \eqref{eq1b}. Although the above material is incidental to the material presented in this paper, because of its importance, the derivation of \eqref{eq1b} is presented in Appendix A together with a description of how it is to be implemented when determining specific powers of the sum, which is another issue overlooked in \cite{BY2002}. In doing so, we give the $n=3$ and $n=4$ forms for the sum, thereby demonstrating to the reader just how intricate the evaluation of trigonometric power sums can be. It should also be mentioned that although the arguments inside the trigonometric power of the finite sums discussed above are composed of rational numbers multiplied by $\pi$, the actual sequence of numbers has a profound effect on the final value for the trigonometric power sum. For example, by using a recursive approach, Byrne and Smith \cite{BS1997} have derived the following result for the same finite sum over powers of cotangent: $$ \sum_{r=1}^k \cot^{2n}\left( \frac{(r-1/2) \pi}{2 k}\right)= (-1)^k k+ \sum_{j=1}^n b_{n,j}\, k^{2j}\, , $$ where the coefficients are given by $$ b_{n,j}=\frac{1}{2^{2(n-j)-1}}\sum_{\ell=1}^{n-j} (-1)^\ell \binom{2n}{\ell} b_{n-\ell,j} \quad \mbox{and}\quad \sum_{j=1}^n b_{n,j}=1+(-1)^{n-1}\, ,\quad \mbox {for $j<n$.} $$ Moreover, Byrne and Smith express the $b_{n,j}$ in terms of the odd-indexed Euler numbers as opposed to the Bernoulli numbers appearing in \eqref{eq1a} and \eqref{eq1b}. Hence we see that the sum yields completely different results when the argument inside the cotangent is altered to $(r - 1/2)\pi/2k$ as opposed to $r \pi/k$ in \eqref{eq1a} and \eqref{eq1b}. Although there has been a greater interest in sums with inverse powers of sine or cosine (including powers of contangent and tangent), there are still many basic trigonometric power sums that have not been solved. By a basic trigonometric power sum, we mean a finite sum involving positive powers of a cosine or sine whose arguments are rational multiples of $\pi$. Such series can be as difficult to evaluate as their ``inverse power" counterparts even though they tend to yield combinatorial solutions directly rather than involve the Riemann zeta function or related quantities (Bernoulli numbers), as in \eqref{eq1a} or \eqref{eq1b}, before ultimately reducing to the simple polynomial solutions in Appendix A. As we shall see, just like their inverse power counterparts, the closed form expressions for basic trigonometric power sums depend greatly, not only on the power of the trigonometric function, but also on their limits and the values of the rationals multiplying $\pi$ in the argument. In response to the situation concerning basic trigonometric power sums, Merca \cite{M2012} recently derived formulas for various basic cosine power sums including \begin{equation} \label{merca1} \sum_{k=1}^{\lfloor (n-1)/2\rfloor} \cos^{2p} \left( \frac{k\pi}{n}\right)= -\frac 12 +\frac{n}{2^{2p+1}}\sum_{k=-\lfloor p/n\rfloor}^{\lfloor p/n\rfloor}\binom{2p}{p+kn} \end{equation} and \begin{equation} \label{merca2} \sum_{k=1}^{\lfloor n/2\rfloor} \cos^{2p} \left(\frac{(k-1/2) \pi}{n}\right)=\frac{n}{2^{2p+1}} \sum_{k=-\lfloor p/n\rfloor}^{\lfloor p/n\rfloor}(-1)^k\binom{2p}{p+kn} \;, \end{equation} where $n$ and $p$ represent positive integers. By using these results he was able to derive in a series of corollaries several new combinatorial identities involving finite sums over $k$ of the binomial coefficient $\binom{2m}{n-rk}$, where $r$ is an integer. Following this work, two of us \cite{FK2013} derived a formula for the basic trigonometric power sum with the alternating phase factor $(-1)^k$ in the summand and an extra factor of 2 in the denominator of the argument. The formula was also found to be combinatorial in nature, but its actual form was different when both the power of the sine or cosine and its upper limit were varied. Consequently, a computer program was required to evaluate the series in rational form for each set of these values. In a more recent work using Dickson and Chebyshev polynomials Barbero \cite{B2014} initially obtained the following result: \begin{equation} \label{barb1} R_{m,n} =2^{2m} \sum_{k=1}^{n+1} \cos^{2m} \Bigl( \frac{k \pi}{2n+3} \Bigr)= \Bigl( n+ \frac{3}{2} \Bigr) \binom{2m}{m}- 2^{2m-1}\;, \quad m \geqslant 1\,, \end{equation} with $R_{n,0}=n+1$ and $R_{0,m}=1$. Though elegant, this result was found not to be entirely correct. E.g., for $m=12$ and $n=3$, the representation in terms of the trigonometric power sum for $R_{m,n}$ gives a value of $3\,798\,310$, while its combinatorial form on the rhs gives a value of $3\,780\,094$. Yet, if one replaces $n$ and $p$, respectively, by $2n+3$ and $m$ in \eqref{merca1}, then after multiplying by $2^{2m}$ one finds that the combinatorial form on the rhs yields the correct value of $3\,798\,310$. Apparently, the discrepancy between the lhs and rhs of \eqref{barb1} has been brought to Barbero's attention, since he has amended the original result to \begin{equation} \label{barb2} 2^{2m} \sum_{k=1}^{n+1} \cos^{2m}\Bigl( \frac{k \pi}{2n+3}\Bigr) = \begin{cases} \displaystyle \Bigl( n+ \frac{3}{2} \Bigr) \binom{2m}{m}- 2^{2m-1}\;, \quad 1 \leqslant m < (2n+3)\,, \cr \displaystyle \Bigl(( n+ \frac{3}{2} \Bigr) \binom{2m}{m}- 2^{2m-1} \cr \displaystyle + (2n+3) \sum_{i=1}^{\lfloor m/(2n+3) \rfloor} \binom{2m}{m-(2n+3)i}, \quad m \geq 2n+3\;\;, \end{cases} \end{equation} with $R_{n,0}$ and $R_{0,m}$ as given above. Consequently, we find that the extra sum on the rhs of the second result in \eqref{barb2} yields the discrepancy of 18216 on the rhs of \eqref{barb1} when $m=12$ and $n=3$. This highlights the necessity for conducting numerical checks on results rather than solely relying on proofs, where small terms such as the extra sum in the second result of \eqref{barb2} can often be neglected. In this paper we aim to continue with the derivation of combinatorial forms for basic trigonometric power sums possessing different arguments and limits than those calculated previously. Typically, the basic trigonometric power sums studied here will be of the form: $$ S= \sum_{k=0}^{g(n)} (\pm 1)^k \, f(k) \left\{ \begin{matrix} \cos^{2 m} \\ \sin^{2m} \end{matrix} \right\} \left( \frac{qk \pi}{n} \right) \;\;, $$ where $m$, $q$ and $n$ are positive integers, $g(n)$ depends upon $n$, e.g., $n - 1$ or $\lfloor m/n\rfloor$, and $f(k)$ is a simple function of $k$, e.g., unity or $\cos(k \pi/p)$ with $p$, an integer. Surprisingly, the results presented here will be required when we study more complicated sums with inverse powers of trigonometric functions, such as the general or twisted Dowker \cite{Do1992} and related sums \cite{Cv2012}, in the future. Furthermore, we shall apply the results of Section \ref{sec2} in the derivation of generating functions and finally consider an application to spectral graph theory by determining the number of closed walks of a specific length on a path and in a cycle. \section{Main result} \label{sec2} The main result in this paper is presented in the following theorem: \begin{theorem} \label{main} Let $m$ and $n$ be positive integers in the basic trigonometric power sums $$ C(m,n):= \sum_{k=0}^{n-1} \cos^{2m} \left( \frac{k\pi}{n}\right) \quad \mbox{and} \quad S(m,n):= \sum_{k=0}^{n-1} \sin^{2m} \left( \frac{k\pi}{n}\right)\, . $$ Then it can be shown that \begin{equation}\label{eq2a} C(m,n) = \begin{cases} \displaystyle 2^{1-2m} \, n \Bigl(\binom{2m-1}{m-1} +\sum_{p=1}^{\lfloor m/n\rfloor}\binom{2m}{m-pn} \Bigr) \, , & m\geqslant n\,, \cr \displaystyle 2^{1-2m} \, n \binom{2m-1}{m-1}\;, & m<n\,, \end{cases} \end{equation} and \begin{equation}\label{eq2b} S(m,n)= \begin{cases} \displaystyle 2^{1-2m} \,n \Bigl(\binom{2m-1}{m-1}+\sum_{p=1}^{\lfloor m/n\rfloor}(-1)^{pn}\binom{2m}{m-pn} \Bigr)\,, & m \geqslant n \,, \cr \displaystyle 2^{1-2m} \,n \binom{2m-1}{m-1} \,, & m<n. \end{cases} \end{equation} \end{theorem} \begin{remark} After preparing the manuscript, it came to our attention that \eqref{eq2a} appears in a more unwieldy form as (18.1.5) in \cite{Ha1975}, while the second result of \eqref{eq2a} appears as No. 4.4.2.11 in \cite{PBM2003}. This suggests our proof is entirely different from these references. Moreover, we shall adapt the proof to determine other results not given in these references. \end{remark} \begin{proof} We begin by stating well-known trigonometric power sums, which appear as No. 4.4.2.1 in \cite{PBM2003}. These are \begin{equation} \label{eq3a} \sum_{k=1}^{n} \cos^{2m} (kx) = 2^{1-2m} \sum_{k=1}^{m}\binom{2m}{m-k}\frac{\sin (nkx)}{\sin (kx)} \; \cos\bigl( (n+1)kx \bigr) + 2^{-2m} \,n \binom{2m}{m} \end{equation} and \begin{equation}\label{eq3b} \sum_{k=1}^{n} \sin^{2m} (kx) = 2^{1-2m}\sum_{k=1}^{m}(-1)^k \binom{2m}{m-k}\frac{\sin (nkx)}{\sin (kx)}\; \cos \bigl( (n+1) kx \bigr) + 2^{-2m}\, n \binom{2m}{m}\, . \end{equation} For $x=\pi /n$, \eqref{eq3a} becomes \begin{equation}\label{eq4} C(m,n)=2^{-2m}\, n \binom{2m}{m}+ 2^{1-2m} \sum_{k=1}^{m} (-1)^k \,\binom{2m}{m-k}\, R(k) \cos(k \pi/n) \, , \end{equation} where \begin{equation*}\label{eq4a} R(k) =\lim_{y \to \pi} \left\{ \frac{\sin(ky)}{\sin(ky/n)} \right\} = \lim_{\epsilon \to 0} \left\{ \frac{\sin(k(\pi +\epsilon))}{\sin(k(\pi +\epsilon)/n)} \right\} \;. \end{equation*} The quotient of sines given above by $R(k)$ vanishes for all values of $k$ except when $k$ is a multiple of $n$, i.e., when $k = pn$, where $p=1,2,\ldots,\lfloor m/n \rfloor$. For these values of $k$, we find that $R(k) = (-1)^{(n-1)p} n$. The phase factor in $R(k)$ cancels $(-1)^k \cos(k\pi/n)$ in the summand of $C(m,n)$. Moreover, since $ \binom{2m}{m}=2\binom{2m-1}{m}$, \eqref{eq4} becomes \begin{equation*}\label{eq4b} C(m,n)= 2^{1-2m}\,n \binom{2m-1}{m}+ 2^{1-2m}\,n \sum_{p=1}^{\lfloor m/n \rfloor} \binom{2m}{m-pn} \;. \end{equation*} Re-arranging the terms on the rhs of the above result then yields the first result in \eqref{eq2a}. For $m<n$, the sum over $p$ vanishes and we are left with the second result in \eqref{eq2a}. In addition, for $m=0$, the second term not only vanishes, but also the first term yields $n$. Finally, adopting the same approach to \eqref{eq3b} yields both results in \eqref{eq2b}. This completes the proof. \end{proof} From Theorem \ref{main}, we can obtain further results beginning with the following corollary: \begin{corollary} For $q\equiv 0 \, ({\rm mod}\, n)$, where $n$ is a positive integer, the following generalizations of the above basic trigonometric power sums are given by \begin{equation}\label{eq4c} \sum_{k=0}^{q-1} \cos^{2m} \Bigl( \frac{k \pi}{n} \Bigr) = \frac{q}{n}\, \,C(m,n)\,, \end{equation} and \begin{equation}\label{eq4d} \sum_{k=0}^{q-1} \sin^{2m} \Bigl( \frac{k \pi}{n} \Bigr) = \frac{q}{n} \, S(m,n) \,, \end{equation} where $C(m,n)$ and $S(m,n)$ are obtained from \eqref{eq2a} and \eqref{eq2b} respectively. \end{corollary} \begin{proof} From the condition on $q$, we let $q=sn$, where $s$ is a positive integer. Then we note that the basic trigonometric power sum in \eqref{eq4c} can be subdivided according to \begin{equation}\label{eq4e} \sum_{k=1}^{q-1} \cos^{2m} \Bigl( \frac{k \pi}{n} \Bigr) = \sum_{j=0}^{s-1} \sum_{k=jn +1}^{(j+1)n-1} \cos^{2m} \Bigl(\frac{k \pi}{n} \Bigr) + \sum_{j=1}^{s-1} \cos^{2m}(j \pi) \,, \end{equation} while \eqref{eq4d} can be expressed as \begin{equation}\label{eq4f} \sum_{k=0}^{q-1} \sin^{2m} \Bigl( \frac{k \pi}{n} \Bigr) = \sum_{j=0}^{s-1} \sum_{k=jn +1}^{(j+1)n-1} \sin^{2m} \Bigl(\frac{k \pi}{n} \Bigr) + \sum_{j=1}^{s-1} \sin^{2m}(j \pi) \,. \end{equation} The second sum on the rhs of \eqref{eq4e} represents a sum over unity and hence, yields $s -1$, while the second sum on the rhs of \eqref{eq4f} vanishes. In the first sum on the rhs of both equations we replace $k$ by $k + nj$, where $k$ now ranges from unity to $n - 1$. Then \eqref{eq4e} and \eqref{eq4f} become \begin{equation}\label{eq4g} \sum_{k=0}^{q-1} \cos^{2m} \Bigl(\frac{k \pi}{n} \Bigr) = s \sum_{k=1}^{n-1} \cos^{2m} \Bigl(\frac{k \pi}{n} \Bigr) + s \,, \end{equation} and \begin{equation}\label{eq4h} \sum_{k=0}^{q-1} \sin^{2m} \Bigl( \frac{k \pi}{n} \Bigr) = s \sum_{k=0}^{n-1} \sin^{2m} \Bigl(\frac{k \pi}{n} \Bigr) \,. \end{equation} From the definitions in Theorem \ref{main}, the sums on the rhs of \eqref{eq4g} and \eqref{eq4h} are $C(m,n)$ and $S(m,n)$, respectively. Moreover, by replacing $s$ by $q/n$, we arrive at the results in the corollary, which completes the proof. \end{proof} \begin{corollary} \label{cor2} If we define the basic power sums $$ C(m,n,q) := \sum_{k=0}^{n-1} \cos^{2m} \left( \frac{qk\pi}{n}\right)\;, \quad \mbox{and} \quad S(m,n,q):= \sum_{k=0}^{n-1} \sin^{2m} \left( \frac{qk\pi}{n}\right)\, , $$ where $n$ and $q$ are co-prime, then \begin{equation}\label{eq5a} C(m,n,q) = C(m,n) \, , \end{equation} while \begin{equation}\label{eq5b} S(m,n,q)= S(m,n) \end{equation} \end{corollary} \begin{proof} Returning to the proof of Theorem \ref{main}, we now introduce $x = q \pi/n$, where $q$ is co-prime to $n$, into \eqref{eq3a}. (If $q$ is a negative integer, then we take its absolute value in what follows.) Hence \eqref{eq3a} becomes \begin{equation}\label{eq5c} C(m,n,q)=2^{-2m}\, n \binom{2m}{m}-1+ 2^{1-2m} \sum_{k=1}^{m} (-1)^{qk} \,\binom{2m}{m-k}\, R(kq) \cos(qk \pi/n) \, , \end{equation} where \begin{equation*}\label{eq5d} R(kq) = \lim_{\epsilon \to 0} \left\{ \frac{\sin(k(q\pi +\epsilon))}{\sin(k(q \pi +\epsilon)/n)} \right\} \;. \end{equation*} That is, the argument of $R(k)$ has been replaced by $kq$, while the cosine is now dependent upon $qk \pi/n$ instead of $k \pi/n$. This means that the sum on the rhs of \eqref{eq5c} is, once again, non-zero for all those integer values, where $k=pn$ with $p$ ranging from unity to $\lfloor m/n \rfloor$, provided $n$ and $q$ are co-prime. If $m < n$, the sum of the rhs of \eqref{eq5c} vanishes and we are left with the second result in \eqref{eq2a}. Hence we find that \begin{equation} \label{eq5e} (-1)^{npq} R(kq) \cos(qp \pi/n )= (-1)^{npq} (-1)^{pq(n+1)} \,n \,(-1)^{qp}= n \,. \end{equation} Introducing the above result into \eqref{eq5c} yields the first result in the corollary for $C(m,n,q)$. To obtain \eqref{eq5b}, we put $x = q \pi/n$ in \eqref{eq3b}. Then we arrive at \begin{equation}\label{eq5f} S(m,n,q)=2^{-2m}\, n \binom{2m}{m}+ 2^{1-2m} \sum_{k=1}^{m} (-1)^{k(q+1)}\binom{2m}{m-k}\, R(kq) \cos(qk \pi/n) \, . \end{equation} As indicated above, the sum will only contribute when $R(kq)$ is non-zero, which occurs when $k$ is an integer multiple of $n$. By multiplying \eqref{eq5e} throughout with the phase factor of $(-1)^{npq}$, we obtain the value of $R(kq) \cos(qk \pi/n)$, which equals $(-1)^{qnp} n$. Introducing this value into \eqref{eq5f} yields \eqref{eq5b}. This completes the proof. \end{proof} Although it was stated that $n$ and $q$ need to be co-prime for \eqref{eq5a} to hold, let us now assume that they are both even numbers, but are co-prime once the factor of 2 has been removed. If we let $q = 2s$ and $n = 2\ell$, then we have \begin{equation} C(m,n,q)= \sum_{k=0}^{n-1} \cos^{2m} \Bigl( \frac{sk \pi}{\ell} \Bigr)=2 \sum_{k=0}^{\ell-1} \cos^{2m} \Bigl( \frac{sk \pi}{\ell} \Bigr). \label{eq5g}\end{equation} Since $s$ and $\ell$ are co-prime, we can apply Corollary\ 2.3. If, however, there was another factor of 2 before $s$ and $\ell$ became co-prime, i.e.\ $n=4 \ell$ and $q=4s$, then we find that \begin{equation}\label{eq5h} C(m,n,q)= 4 \sum_{k=0}^{\ell-1} \cos^{2m} \Bigl( \frac{s k \pi}{\ell} \Bigr)\;. \end{equation} Moreover, if $r$ represents the product of all the common factors of $n$ and $q$, then we find that \begin{equation}\label{eq5i} \sum_{k=0}^{n-1}\left\{ \begin{matrix} \cos^{2 m} \\ \sin^{2m} \end{matrix} \right\} \left( \frac{qk \pi}{n} \right)= r \sum_{k=0}^{\ell-1} \left\{ \begin{matrix} \cos^{2 m} \\ \sin^{2m} \end{matrix} \right\} \Bigl( \frac{s k \pi}{\ell} \Bigr), \end{equation} where the curly brackets have been introduced to signify that the above results apply to either a cosine or sine power.Therefore, for $n = r \ell$ and $q = r s$, $C(m,n,q) = r C(m,\ell,s) =r C(m,\ell)$ and $S(m,n,q)=r S(m,\ell,s)=rS(m,\ell)$ according to Corollary\ 2.3. \section{Extensions} \label{sec3} In this section we shall use the results of the previous section to derive solutions for more advanced basic trigonometric power sums. As stated in the introduction, Merca \cite{M2012} has evaluated several basic trigonometric power sums by deriving \eqref{merca1} and \eqref{merca2} via the multisection series method. However, these results can also be derived via Theorem \ref{main}. To demonstrate this, we express $C(p,n)$ as \begin{equation*}\label{eq6} C(p,n)= 2\sum_{k=1}^{\lfloor(n-1)/2 \rfloor} \cos^{2p}(k \pi/n) + 1 \;, \end{equation*} where for the terms between $k = \lfloor (n-1)/2 \rfloor$ and $k = n - 1$, we have replaced $k$ by $n-k$, thereby obtaining twice the first sum via symmetry. In other words, the finite sum studied by Merca in \cite{M2012} is just half of $C(p,n)$. Dividing through by 2 yields Merca's result when one realises that: (1) the sum over negative values of $k$ in \eqref{merca1} is identical to that over of positive values of $k$, and (2) the $k = 0$ term produces the combinatorial term on the rhs of \eqref{eq2a}. We can determine formulas for other basic trigonometric power sums by manipulating \eqref{eq2a} and \eqref{eq2b}. First we express $C(m,n)$ as $$ C(m,n) = \sum_{k=0}^{n-1} \cos^{2m}( 2k \pi/2n)= \sum_{k=0,2,4,\ldots}^{2n-2} \cos^{2m}\left(k \pi/2n \right)\;. $$ This result can be written alternatively as \begin{equation}\label{eq6a} C(m,n) = \frac{1}{2} \sum_{k=0}^{2n-1} \left( 1+ (-1)^k \right) \cos^{2m} (k \pi/2n) \,. \end{equation} The first sum on the rhs is simply $C(m,2n)$. Therefore, \eqref{eq6a} becomes \begin{equation}\label{eq7} \sum_{k=0}^{2n-1} (-1)^k \cos^{2m}(k \pi/2n) = 2 C(m,n) - C(m,2n) \;. \end{equation} In fact, the above result can be extended to yield \begin{equation*}\label{eq7a} \sum_{k=0}^{2pn-1} (-1)^k \cos^{2m}(k \pi/2pn) = 2 C(m,pn) - C(m,2pn) \;, \end{equation*} where $p$ is a positive integer. Hence the alternating form of $C(m,n)$ is given by \begin{equation}\label{eq7b} \sum_{k=0}^{n-1} (-1)^k \cos^{2m}(k \pi/n) = 2 C(m,n/2) - C(m,n) \;, \end{equation} which is only valid for even values of $n$. If we introduce \eqref{eq2a} into \eqref{eq7}, then we obtain three distinct cases depending on whether $m<n$, $n \leqslant m <2n$ and $m\geqslant 2n$. Hence we arrive at \begin{equation}\label{eq8} \sum_{k=0}^{2n-1} (-1)^k \cos^{2m} \Bigl( \frac{k \pi}{2n} \Bigr) = \begin{cases} \displaystyle 2^{2-2m} \, n \Bigl( \sum_{p=1}^{\lfloor m/n \rfloor}\binom{2m}{m-pn} -\sum_{p=1}^{\lfloor m/2n \rfloor}\binom{2m}{m-2pn} \Bigr)\,, & m\geqslant 2 n \, , \cr \displaystyle 2^{2-2m} \sum_{p=1}^{\lfloor m/n\rfloor}\binom{2m}{m-pn}\,, & n \leqslant m<2n \,, \cr \displaystyle 0 \, , & m<n\, . \end{cases} \end{equation} In similar fashion, we can obtain the corresponding result when $\cos^{2m}(k \pi/2n)$ is replaced by $\sin^{2m}(k \pi/2n)$. Therefore, repeating the above calculation, we obtain \begin{equation*}\label{eq9} \sum_{k=0}^{2n-1} (-1)^k \sin^{2m}(k \pi/2n) = 2 S(m,n) - S(m,2n) \;, \end{equation*} which yields after the introduction of \eqref{eq2b} \begin{equation} \sum_{k=0}^{2n-1} (-1)^k \sin^{2m} \Bigl( \frac{k \pi}{2n} \Bigr) = \begin{cases} \displaystyle 2^{2-2m} \, n \Bigl( \sum_{p=1}^{\lfloor m/n \rfloor} (-1)^{pn} \binom{2m}{m-pn} -\sum_{p=1}^{\lfloor m/2n \rfloor}\binom{2m}{m-2pn} \Bigr)\,, & m\geqslant 2 n \, , \cr \displaystyle 2^{2-2m} \sum_{p=1}^{\lfloor m/n\rfloor}\binom{2m}{m-pn} \,, & n \leqslant m<2n \,, \cr \displaystyle 0\, , & m<n\, . \label{eq10} \end{cases} \end{equation} We can also express \eqref{eq7} as \begin{equation}\label{eq10a} \sum_{k=0,2,4,\ldots}^{2n-2} \cos^{2m}(k \pi/2n) -\sum_{k=1,3,5,\ldots}^{2n-1} \cos^{2m}(k \pi/2n) = 2 C(m,n) - C(m,2n) \;. \end{equation} Alternatively, the above can be written as \begin{equation*}\label{eq10b} \sum_{k=0}^{n-1} \cos^{2m}(k \pi/n) -\sum_{k=0}^{n-1} \cos^{2m}((k+ 1/2) \pi/n) = 2 C(m,n) - C(m,2n) \;. \end{equation*} The above result can be simplified further to yield \begin{equation}\label{eq10c} \sum_{k=0}^{n-1} \cos^{2m}((k+ 1/2) \pi/n) = C(m,2n) - C(m,n) \;. \end{equation} Before we can combine the terms on the rhs, we need to relate the upper upper limit in the sum for $C(m,2n)$ in \eqref{eq10c}, viz.\ $\lfloor m/2n \rfloor$, with that for $C(m,n)$, which is $\lfloor m/n \rfloor$. If we let $m= rn+b$, where $0<b<n$, then $\lfloor m/n \rfloor= r$, while $\lfloor m/2n \rfloor= \lfloor r/2+b/2n\rfloor$. If $r$ is even, then we find that $\lfloor m/n \rfloor= 2 \lfloor m/2n \rfloor$, but if it is odd, then we find that $\lfloor m/n \rfloor = 2 \lfloor m/2n \rfloor +1$. In other words, we require the following identity: $$ \lfloor m/n \rfloor = 2 \lfloor m/2n \rfloor + \left(1- (-1)^{\lfloor m/n \rfloor} \right)/2 \;. $$ We now introduce \eqref{eq2a} into \eqref{eq10c}, which yields \begin{equation*}\label{eq10d} \sum_{k=0}^{n-1} \cos^{2m}((k+ 1/2) \pi/n) = 2^{1-2m} \, n \biggl(\binom{2m-1}{m-1} + 2 \sum_{p=2,4, \ldots}^{2\lfloor m/2n\rfloor} \binom{2m}{m-pn} - \sum_{p=1}^{\lfloor m/n\rfloor} \binom{2m}{m-pn} \biggr) \,. \end{equation*} At this stage we require the identity given above. Then we arrive at \begin{equation}\label{eq10e} \sum_{k=0}^{n-1} \cos^{2m} \bigl((k+ 1/2) \pi/n \bigr) = 2^{1-2m} \, n \biggl(\binom{2m-1}{m-1} + \sum_{p=1}^{\lfloor m/n\rfloor} (-1)^p \binom{2m}{m-pn} \biggr) \,. \end{equation} This is basically twice Merca's result, which has been given here as \eqref{merca2}. By carrying out a similar calculation with the cosine power in \eqref{eq7} replaced by a sine power and using \eqref{eq2b} instead, one finds that \begin{equation}\label{eq10f} \sum_{k=0}^{n-1} \sin^{2m} \bigl( (k+ 1/2) \pi/n \bigr) = 2^{1-2m} \, n \biggl(\binom{2m-1}{m-1} + \sum_{p=1}^{\lfloor m/n\rfloor} \Bigl( 1 +(-1)^p - (-1)^{np} \Bigr) \binom{2m}{m-pn} \biggr) \,. \end{equation} For odd values of $n$, \eqref{eq10f} reduces to \begin{equation*}\label{eq10fa} \sum_{k=0}^{n-1} \sin^{2m} \bigl((k+ 1/2) \pi/n \bigr) = 2^{1-2m} \, n \biggl(\binom{2m-1}{m-1} + \sum_{p=1}^{\lfloor m/n\rfloor} \binom{2m}{m-pn} \biggr) \,, \end{equation*} while for even values of $n$, it becomes \begin{equation*}\label{eq10fb} \sum_{k=0}^{n-1} \sin^{2m} \bigl( (k+ 1/2) \pi/n \bigr) = 2^{1-2m} \, n \biggl(\binom{2m-1}{m-1} + \sum_{p=1}^{\lfloor m/n\rfloor} (-1)^p \binom{2m}{m-pn} \biggr) \,. \end{equation*} It was mentioned that Merca was able to evaluate finite sums involving the binomial coefficient in a few corollaries by fixing $n$ to small values ranging from unity to 5 or 6 in \eqref{merca1} and \eqref{merca2} and directly evaluating the sums. These results can be verified by carrying out the same procedure with the results in Theorem \ref{main} and by using \eqref{eq10e}. The preceding results, given by \eqref{eq8} and \eqref{eq10}, have had a factor $\ell = 2$ introduced into the denominators of the cosine and sine powers in the basic trigonometric power sums of Theorem \ref{main}. We can develop other interesting results by multiplying and dividing the argument in the trigonometric power by integers. For example, if we multiply and divide the argument in the cosine power by 3, then $C(m,n)$ can be expressed as \begin{equation} \label{eq10g} C(m,n) =\sum_{k=0,3,6,\ldots}^{3n-3} \cos^{2m} \Bigl( \frac{k \pi}{3 n} \Bigr) \;. \end{equation} The same applies to $S(m,n)$ when we multiply and divide the argument by 3. To obtain a sum over all values of $k$ from 1 to $3n-1$, we need to write the above sum as \begin{equation} C(m,n)= \frac{1}{3} \sum_{k=0}^{3n-1} \Bigl( 2 \cos \Bigl( \frac{2k \pi}{3} \Bigr)+1 \Bigr) \cos^{2m} \Bigr( \frac{k \pi}{3 n} \Bigr) \;. \end{equation} Consequently, we arrive at the following interesting result \begin{equation}\label{eq11} \sum_{k=0}^{3n-1} \cos \Bigl( \frac{2k \pi}{3} \Bigr) \cos^{2m} \Bigr( \frac{k \pi}{3 n} \Bigr) = \frac{1}{2} \Bigl(3\, C(m,n) -C(m,3n) \Bigr) \,. \end{equation} The corresponding result for $S(m,n)$ is \begin{equation} \label{eq12} \sum_{k=0}^{3n-1} \cos \Bigl( \frac{2k \pi}{3} \Bigr) \sin^{2m} \Bigr( \frac{k \pi}{3 n} \Bigr)= \frac{1}{2} \Bigl(3\, S(m,n) -S(m,3n) \Bigr) \,. \end{equation} Next, by introducing the results in Theorem \ref{main} we obtain explicit expressions for both sums, which are \begin{equation*} \small \sum_{k=0}^{3n-1} \cos \Bigl( \frac{2k \pi}{3} \Bigr) \cos^{2m} \Bigr( \frac{k \pi}{3 n} \Bigr)= \begin{cases} \displaystyle \frac{3n}{2^{2m}} \Bigl( \sum_{p=1}^{\lfloor m/n\rfloor}\binom{2m}{m-pn} -\sum_{p=1}^{\lfloor m/3n\rfloor}\binom{2m}{m-3pn} \Bigr) \,,& m \geqslant 3n\,, \cr \displaystyle \frac{3n}{2^{2m}}\sum_{p=1}^{\lfloor m/n\rfloor}\binom{2m}{m-pn} \,,& n \leqslant m< 3n\,, \cr \displaystyle 0\;, & m<n\,, \end{cases} \end{equation*} and \begin{equation*} \small \sum_{k=0}^{3n-1} \cos \Bigl( \frac{2k \pi}{3} \Bigr) \sin^{2m} \Bigr( \frac{k \pi}{3 n} \Bigr)= \begin{cases} \displaystyle \frac{3n}{2^{2m}} \Bigl( \sum_{p=1}^{\lfloor m/n\rfloor} (-1)^{pn} \binom{2m}{m-pn} -\sum_{p=1}^{\lfloor m/3n\rfloor} (-1)^{3pn} & \cr \displaystyle \times \;\; \binom{2m}{m-3pn} \Bigr) \,,& m \geqslant 3n\,, \cr \displaystyle \frac{3n}{2^{2m}}\sum_{p=1}^{\lfloor m/n\rfloor} (-1)^{pn} \binom{2m}{m-pn} \,,& n \leqslant m< 3n\,, \cr \displaystyle 0\;, & m<n\,. \end{cases} \end{equation*} In the above results we see that the final expressions for the sums are now dependent on whether $m$ is greater or less than either $n$ or $3n$, rather than $n$ and $2n$ in the previous example. Furthermore, the series on the lhs can be written as \begin{equation} \sum_{k=0}^{3n-1} \cos \Bigl(\frac{2 k \pi}{3} \Bigr) \left\{ \begin{matrix} \cos^{2 m} \\ \sin^{2m} \end{matrix} \right\} \left( \frac{k \pi}{3n}\right) = \sum_{k=0}^{3n-1} (-1)^k \cos \Bigl(\frac{k \pi}{3} \Bigr) \left\{ \begin{matrix} \cos^{2 m} \\ \sin^{2m} \end{matrix} \right\} \left( \frac{k \pi}{3n}\right)\, . \end{equation} Nevertheless, we are unable to obtain the corresponding sum with $\cos (k \pi/3)$ in the summand instead of $\cos(2 k \pi/3)$. To accomplish that, we need to examine the situation when we multiply and divide by $\ell=4$ inside the trigonometric power. For the $\ell=4$ situation the corresponding form of \eqref{eq10a} becomes \begin{equation*} \label{eq12a} C(m,n) =\sum_{k=0,4,8,\ldots}^{4n-4} \cos^{2m} \Bigl( \frac{k \pi}{4 n} \Bigr) \;. \end{equation*} In this case the sum over all values of $k$ becomes \begin{equation*}\label{eq13} C(m,n)= \frac{1}{4} \sum_{k=1}^{4n-1} \Bigl( 2 \cos \Bigl( \frac{k \pi}{2} \Bigr) +1 +(-1)^k \Bigr) \cos^{2m} \Bigl( \frac{k \pi}{4 n} \Bigr) \;. \end{equation*} The term involving unity in the above equation yields $C(m,4n)$, while the term with the oscillating phase is simply \eqref{eq7} with $n$ replaced by $2n$ or $2 C(m,2n)-C(m,4n)$. Therefore, we find that the $C(m,4n)$ contributions cancel each other and we are left with \begin{equation*}\label{eq14} \sum_{k=0}^{4n-1} \cos \Bigl( \frac{k \pi}{2} \Bigr) \cos^{2m} \Bigl( \frac{k \pi}{4n} \Bigr)= 2 \,C(m,n) - C(m,2n) \,, \end{equation*} which is just another derivation of \eqref{eq7}. That is, we do not obtain a new basic trigonometric power sum as we did when we divided and multiplied by 3 inside the cosine power. In fact, the same reducibility arises when we divide and multiply by 8. Therefore, we conjecture that multiplying and dividing by $2^n$ in either $C(m,n)$ or $S(m,n)$ will not produce new formulas. So let us now turn our attention to when we multiply and divide by 6 in inside the cosine power of $C(m,n)$. Since we are dealing with an even number, we expect some reducibility to occur. Then $C(m,n)$ becomes \begin{equation*} \label{eq15} C(m,n) =\sum_{k=0,6,12,\ldots}^{6n-6} \cos^{2m} \Bigl( \frac{k \pi}{6 n} \Bigr) \;. \end{equation*} With the aid of the identity \begin{equation} \label{eq15a} 2 \cos \Bigl( \frac{k \pi}{3} \Bigr) + 2 \cos \Bigl( \frac{2 k \pi}{3} \Bigr) +1 -(-1)^{k+1} = \begin{cases} 6\;, & k \equiv 0 \; ({\rm mod}\, 6)\, , \\ 0\;,& {\rm otherwise}\,, \end{cases} \end{equation} the above equation can be written alternatively as \begin{equation}\label{eq16} C(m,n)= \frac{1}{6} \sum_{k=0}^{6n-1} \Bigl( 2 \cos \Bigl( \frac{k \pi}{3} \Bigr) +2 \cos \Bigl( \frac{2k \pi}{3} \Bigr) + (-1)^k +1 \Bigr) \cos^{2m} \Bigr( \frac{k \pi}{6 n} \Bigr) \;. \end{equation} Expressing the sum of cosines as a product, we find that \eqref{eq16} becomes \begin{equation*}\label{eq17} C(m,n)= \frac{1}{6} \sum_{k=0,2,4,\ldots}^{6n-2} \Bigl( 4 \cos \Bigl( \frac{k \pi}{2} \Bigr) \,\cos \Bigl( \frac{k \pi}{6} \Bigr) + (-1)^k +1 \Bigr) \cos^{2m} \Bigr( \frac{k \pi}{6 n} \Bigr) \;. \end{equation*} Replacing $2k$ by $k$ in the above result yields \begin{equation*}\label{eq18} C(m,n)= \frac{1}{3} \sum_{k=1}^{3n-1} \Bigl( 2 (-1)^k \, \cos \Bigl( \frac{k \pi}{3} \Bigr) +1 \Bigr) \cos^{2m} \Bigr( \frac{k \pi}{3 n} \Bigr) \;. \end{equation*} Hence we arrive at \begin{equation*} \sum_{k=0}^{3n-1} (-1)^k \,\cos \Bigl( \frac{k \pi}{3} \Bigr)\, \cos^{2m} \Bigr( \frac{k \pi}{3 n} \Bigr) =\frac{1}{2} \, \Bigl( 3\, C(m,n) -C(m,3n) + 3 \Bigr) \,. \end{equation*} This is just \eqref{eq11} again except for the term of 3/2. Even though we have demonstrated the reducible nature of basic trigonometric power sums, we have not been able to produce a result with $\cos(k \pi/3)$ in the summand instead of $\cos(2 k \pi/3)$. However, it can be seen that $\cos(k \pi/3)$ does appear in the identity given by \eqref{eq15a}. Therefore, let us construct a situation involving the identity and the cosine power together, viz.\ \begin{equation}\label{eq19} \frac{1}{3} \sum_{k=0}^{3n-1} \Biggl( \cos \Bigl( \frac{\pi k}{3} \Bigr) + \cos \Bigl( \frac{2 \pi k}{3} \Bigr) + \frac{1-(-1)^{k+1}}{2} \Biggr) \cos^{2m} \Bigl( \frac{ k \pi}{3 n} \Bigr) = \sum_{k=0,6,12,\ldots}^{6 \lfloor(3n-1)/6 \rfloor}\cos^{2m} \Bigl( \frac{k \pi}{3 n} \Bigr)\,. \end{equation} The first series on the lhs of the above equation is the result we wish to determine, while the second series is given by \eqref{eq11}. The next term with $1/2$ is simply $C(m,3n)/2$. Thus, we are left with two series to evaluate. The first of these can be determined by replacing $n$ with $3n/2$ in \eqref{eq7}, which yields \begin{equation} \label{eq20} \sum_{k=0}^{3n-1} (-1)^k \cos^{2m} \Bigl( \frac{k \pi}{3 n} \Bigr)= 2 C(m,3n/2) -C(m,3n) \,. \end{equation} Inserting the results for $C(m,n)$ in Theorem \ref{main} yields \begin{equation} \label{eq20a} \small \sum_{k=0}^{3n-1} (-1)^k \cos^{2m} \Bigl( \frac{k \pi}{3 n} \Bigr)= \begin{cases} \displaystyle \frac{6n}{2^{2m}} \Biggl( \sum_{p=1}^{\lfloor 2m/3n\rfloor}\binom{2m}{m-3pn/2} -\sum_{p=1}^{\lfloor m/3n\rfloor}\binom{2m}{m-3pn} \Biggr) \,,& m \geqslant 3n\,, \cr \displaystyle \frac{6n}{2^{2m}}\sum_{p=1}^{\lfloor 2m/3n\rfloor}\binom{2m}{m-3pn/2} \,,& 3n/2 \leqslant m< 3n\,, \cr \displaystyle 0\;, & m<3 n/2 \,. \end{cases} \end{equation} For the above result to be valid, $n$ must also be even. Since $n$ is even, $\lfloor (3n-1)/6 \rfloor = n/2-1$. Then the series on the rhs of \eqref{eq19} can be expressed as \begin{equation} \label{eq21} \sum_{k=0,6,12,\ldots}^{6 \lfloor (3n-1)/6 \rfloor} \cos^{2m} \Bigl( \frac{k \pi}{3 n} \Bigr)= \sum_{k=0}^{n/2-1} \cos^{2m} \Bigl(\frac{k \pi}{n/2} \Bigr)\,. \end{equation} In other words, the above sum is equal to $C(m,n/2)$ provided $n$ is even. If we introduce \eqref{eq20} and \eqref{eq21} into \eqref{eq19} together with the other previously mentioned results, then after a little algebra we find that $$ \sum_{k=0}^{3n-1} \cos \Bigl( \frac{k \pi}{3} \Bigr)\, \cos^{2m} \Bigr( \frac{k \pi}{3 n} \Bigr)= 3\, C(m,n/2) -3\, C(m,n)/2 + C(m,3n)/2-C(m,3n/2) \, , $$ where $n$ is an even positive integer. Introducing the results from Theorem \ref{main} into the above equation yields \begin{equation*} \small \sum_{k=0}^{3n-1} \cos \Bigl( \frac{k \pi}{3} \Bigr)\, \cos^{2m} \Bigr( \frac{k \pi}{3 n} \Bigr)= \begin{cases} \displaystyle \frac{3n}{2^{2m}} \Biggl( \sum_{p=1}^{\lfloor 2m/n\rfloor}\binom{2m}{m-pn/2} - \sum_{p=1}^{\lfloor m/n\rfloor}\binom{2m}{m-pn} & \cr \displaystyle - \sum_{p=1}^{\lfloor 2m/3n\rfloor} \binom{2m}{m-3pn/2} + \sum_{p=1}^{\lfloor m/3n \rfloor} \binom{2m}{m-3pn}\Biggr) \,,& m \geq 3n\,, \cr \displaystyle \frac{3n}{2^{2m}} \Biggl( \sum_{p=1}^{\lfloor 2m/n\rfloor}\binom{2m}{m-pn/2} - \sum_{p=1}^{\lfloor m/n\rfloor}\binom{2m}{m-pn} & \cr \displaystyle - \sum_{p=1}^{\lfloor 2m/3n\rfloor} \binom{2m}{m-3pn/2}\Biggr) \,,& 3n/2 \leqslant m < 3n\,, \cr \displaystyle \frac{3n}{2^{2m}} \Biggl( \sum_{p=1}^{\lfloor 2m/n\rfloor}\binom{2m}{m-pn/2} - \sum_{p=1}^{\lfloor m/n\rfloor}\binom{2m}{m-pn} \Biggr) \,,& n \leqslant m < 3n/2\,, \cr \displaystyle \frac{3n}{2^{2m}}\sum_{p=1}^{\lfloor 2m/n\rfloor}\binom{2m}{m-pn/2} \,,& n/2 \leqslant m< n\,, \cr \displaystyle 0\;, & m< n/2\,. \end{cases} \end{equation*} The sine version of the above basic trigonometric power sum can be obtained in a similar manner with the various $C(m,n)$ terms replaced by their corresponding $S(m,n)$ terms. In Appendix B we examine the case when the argument of the cosine power is multiplied and divided by 5. In this case two different prefactors of the cosine powers, viz., $\cos(2 \pi k/5)$ and $\cos(4 \pi k /5)$, arise, which are not easily decoupled from one another. This implies that it is not possible without additional information to obtain elegant results such as those above when we multiply and divide by numbers that possess prime number factors greater than or equal to 5. \section{Generating functions} \label{sec4} In this section we use the results of Section \ref{sec2} to determine several generating functions. We begin by defining the exponential generating function \begin{equation} \label{gf1} G_1(n;z):=\sum_{m=0}^{\infty} \frac{z^m}{m!}\, C(m/2,n)\, . \end{equation} After some algebra we eventually arrive at \begin{align}\label{gf2} G_1(n;z) & = \sum_{k=0}^{n-1} e^{z\cos(k\pi /n)} = \sum_{k=0}^{n-1} \cosh \Bigl( z \cos \Bigl( \frac{k\pi}n \Bigr) \Bigr) + \sinh z \nonumber \\ & = 2n\sum_{k=0}^{\infty}\Bigl( \frac{z}{2} \Bigr)^{2k} \sigma_k(n) +n I_0(z) + \sinh z \, , \end{align} where \begin{equation}\label{gf2a} \sigma_k(n):= \frac 1{(2k)!}\sum_{p=1}^{\lfloor k/n \rfloor}\binom{2k}{k+pn}\, . \end{equation} and $I_0(z)$ represents the modified Bessel function of zeroth order. Note that for $k < n$, $\sigma_k(n) =0$. The result given by \eqref{gf2} can also be regarded as the generating function for $\sigma_k(n)$. For fixed small values of $n$, the $\sigma_k(n)$ can be determined by the direct evaluation of the series $C(m,n)$ in Theorem \ref{main} or its variants in the corollaries. Moreover, it was mentioned in the introduction that Merca has evaluated several combinatorial identities involving the binomial coefficient in a series of corollaries in \cite{M2012}. In fact, Corollary 6 of this reference presents specific values of the $\sigma_k(n)$ for $n$ ranging from unity to six. These results have been determined via \eqref{merca1}, which we have seen follows from Theorem \ref{main}. We can extend $G_1(n;z)$ to $G_1(n,q;z)$ by introducing the result for $C(m/2,n,q)$ as given in Corollary\ \ref{cor2} into \eqref{gf1}. Then for odd values of $q$, the generating function $G_1(n,q;z)$ becomes \begin{align}\label{gf3} G_1(n,q;z) & = \sum_{k=0}^{n-1} e^{z\cos(q k\pi /n)} = \sum_{k=0}^{n-1} \cosh \Bigl( z \cos \Bigl( \frac{qk\pi}n \Bigr) \Bigr) + \sinh z \nonumber \\ & = 2n\sum_{k=0}^{\infty}\Bigl( \frac{z}{2} \Bigr)^{2k} \sigma_k(n) +n I_0(z) + \sinh z \,. \end{align} The intermediate member involving the summation over the hyperbolic cosine has been obtained by: (1) expanding the exponential as a series in the first sum, (2) splitting the resultant sum into two equal parts, (3) substituting $\cos^m(qk \pi/n)$ by $(-1)^{qm} \cos^m(\pi q(n-k)/n)$ in one of the parts and (4) replacing $n-k$ by $k$. Moreover, the intermediate member does not apply for even values of $q$, although the first and third members are still equal to one another. We can also derive the generating function for the case when $C(m/2,n,q)$ is replaced by $S(m/2,n,q)$ in the preceding analysis. That is, by defining the exponential generating function, $H_1(n,q:z)$, as \begin{equation*} \label{gf4} H_1(n,q;z):=\sum_{m=0}^{\infty} \frac{z^m}{m!}\, S(m/2,n,q)= \sum_{k=0}^{n-1} e^{z \sin(qk \pi/n)} \, , \end{equation*} we can obtain a similar closed-form solution to \eqref{gf3}, provided $q$ is an even integer. In this instance we introduce the result in Corollary \ref{cor2} and split the resulting sum. Then we replace $\sin^m(k\pi/n)$ by $\sin^m(\pi-k\pi/n)$ and proceed by combining the summations. Hence we find that \begin{align}\label{gf4a} H_1(n,q;z) & = n I_0(z) + 2n\sum_{k=0}^{\infty}\Bigl( \frac{z}{2} \Bigr)^{2k} \sigma^{-}_k(n) \, , \end{align} where $\sigma^{-}_k(n,q)$ has replaced $\sigma_k(n)$ and is defined as \begin{equation*}\label{gf4b} \sigma^{-}_k(n):= \frac{1}{(2k)!} \, \sum_{p=1}^{\lfloor k/n \rfloor} (-1)^{pn} \binom{2k}{k+pn} \,. \end{equation*} \label{gf4c} In obtaining \eqref{gf4a} we have split the basic sine power sum and replaced $\sin^{2m}(\pi k/n)$ in one of the resulting sums by $\sin^{2m}(\pi +\pi(n-k)/n)$. If $n$ is even, then $\sigma^{-}_k(n)$ reduces to $\sigma_k(n)$, while if it is odd, then \eqref{gf4a} becomes \begin{equation*} \sigma_k(n,q):= \frac{1}{(2k)!} \, \sum_{p=1}^{\lfloor k/n \rfloor} (-1)^{p} \binom{2k}{k+pn} \,. \end{equation*} As mentioned in the introduction Merca \cite{M2012} obtains identities for the above result by fixing $n$ and evaluating the series in \eqref{merca2} directly. The above results are not the only examples, where the results of Section \ref{sec2} can be used to obtain generating functions. For example, when $|z|<1$, we can expand the denominator in the sum $\sum_{k=1}^{n-1} 1/(1-z \cos^2(k \pi/n))$ and introduce \eqref{eq2a}, thereby obtaining \begin{align*}\label{gf5} \frac{1}{n}\sum_{k=1}^{n-1}\frac 1{1-z\cos^2(k \pi/n)} = \sum_{k=0}^{\infty} \Bigl(\frac{z}{4} \Bigr)^k \left( \frac{(2k)!}{(k!)^2} +2 \sum_{p=1}^{\lfloor k/n \rfloor} \binom{2k}{k-pn} \right) - \frac{1}{n(1-z)}\, . \end{align*} The last term on the rhs, which arises from removing the $k=0$ term in $C(m,n)$, can be incorporated as the $k=0$ term in the sum on the lhs. In addition, by introducing the duplication formula for the gamma function, viz., No. 8.335(1) in \cite{GR1994}, we find that the first term on the rhs reduces to the binomial series for $1/\sqrt{1-z}$, while we introduce the definition for $\sigma_k(n)$ or \eqref{gf2a} into the second term on the rhs. Consequently, we arrive at $$ \frac{1}{n}\sum_{k=0}^{n-1}\frac 1{1-z\cos^2(k \pi/n)}=\frac{1}{\sqrt{1-z}}+2\sum_{k=0}^{\infty}\left( \frac{z}{4} \right)^k (2k)!\,\sigma_k(n)\, . $$ Similarly, one can consider the analogous sum where $\cos(k \pi/n)$ is replaced by $\sin(k \pi/n)$. In this instance one employs \eqref{eq2b} in the analysis, which finally yields $$ \frac{1}{n}\sum_{k=1}^{n-1}\frac 1{1-z\sin^2(k \pi/n)}=\frac{1}{\sqrt{1-z}}+2\sum_{k=0}^{\infty}\left( \frac{z}{4} \right)^k (2k)!\,\sigma^{-}_k(n)\, . $$ \section{Closed walks} \label{sec5} Here we demonstrate that the main result of Section \ref{sec2} can be used in calculating closed walks on a path and also in a cycle. We begin by recalling that the adjacency matrix $A$ of a graph $G$ is the binary matrix with rows and columns indexed by the vertices of $G$, such that the $(i,j)$-entry is equal to $1$ if $i$ and $j$ are adjacent, and zero otherwise. Since loops are not allowed in the graphs under consideration, the diagonal entries of $A$ are all zero. A walk of length $r$ on $G$ represents a sequence along $r+1$ adjacent vertices (not necessarily different) and hence, possesses $r$ edges. A walk is said to be closed if the first and terminal vertices or endpoints are the same. A circuit is known as a closed walk when it has no repeating edges, while a closed walk with repeating vertices is referred to as a cycle. Evaluating the number of closed walks on a graph has been an active topic of research that spans across combinatorics, graph theory, and linear algebra (cf. \cite{CRS2010, HS1979, KN2013, S2013, TWKHM2013}). Although our result for the number of closed walks will be general, when we turn to cycles, we will need to restrict the closed walks to even length and the cycles to odd order. With the aid of \eqref{eq2a} we can now prove the following theorem: \begin{theorem} \label{thm51} The number of closed walks of length $2m$ on a path $P_{n-1}$ is given by $$ p(2m) = \begin{cases} \displaystyle 2n\left(\binom{2m-1}{m-1}+\sum_{k=1}^{\lfloor m/n\rfloor}\binom{2m}{m-kn}\right)-2^{2m} \, , & m\geqslant n\,, \cr \displaystyle 2n \binom{2m-1}{m-1}-2^{2m}\;, & m<n\,, \end{cases} $$ \end{theorem} \begin{proof} It is well-known that the $(i,j)$ entry of $A^k$ represents the number of walks on $G$ of length $k$ with endpoints $i$ and $j$. Furthermore, if $\lambda$ is an eigenvalue of $A$, then $\lambda^k$ is an eigenvalue of $A^k$. Hence the trace of $A^k$ is equal to the sum of the $k$th powers of the eigenvalues of $A$, which, in turn, equals the total number of closed walks of length $k$ on $G$, which we denote here by $p(k)$ (cf. \cite[p.14]{CRS2010}). Because the adjacency matrix can be represented by a tridiagonal matrix with ones on the sub- and super-diagonals and zeros elsewhere, its eigenvalues for a path $P_{n-1}$ with $n-1$ vertices are $2\cos(\ell\pi/n)$, where $\ell=1,\ldots,n-1$, (cf., e.g., \cite{dF2006}). The result in the theorem follows by summing over all values of $\ell$ and then by applying \eqref{eq2a}. This completes the proof. \end{proof} It is interesting to notice that with Theorem \ref{thm51} we get the sequence A$198632$ in \cite{L2011}. We now turn our attention to closed walks in a cycle by presenting the following theorem. \begin{theorem} The number of closed walks of length $2m$ on the cycle $C_{n}$, where $n$ is odd, is given by $$ p(2m) = \begin{cases} \displaystyle 2n \Bigl(\binom{2m-1}{m-1} +\sum_{r=1}^{\lfloor m/n\rfloor} \binom{2m}{m-rn} \Bigr)\, , & m\geqslant n\;, \cr \displaystyle 2 n \binom{2m-1}{m-1}\;, & m<n\;. \end{cases} $$ \end{theorem} \begin{proof} In this instance the eigenvalues of an $n$-cycle are equal to $2\cos(2\ell\pi/n)$, for $\ell=0,1,\ldots,n-1$. Because of this, we can follow the previous proof except that we use \eqref{eq5a} instead of \eqref{eq2a}. Consequently, we arrive at the result in the theorem. This completes the proof. \end{proof} \section{Conclusion} In this paper we have presented combinatorial forms for the two main basic trigonometric power sums $C(m,n)$ and $S(m,n)$ in Theorem \ref{main}. We have been able to extend these results to derive combinatorial forms for other basic trigonometric power sums, where either the arguments in the trigonometric power and/or their limits have been altered. Where possible we have been able to relate our results to existing solutions such as those appearing in \cite{M2012}. In addition, we have demonstrated that our main results can be applied to generating functions, but even more interesting, is that they were used to determine the number of closed walks on a path and in a cycle. In the future we intend to apply the results presented here when we study the general or twisted Dowker \cite{Do1992} and related sums \cite{Cv2012}. \section{Appendix A} In the introduction it was stated that Berndt and Yeap's result for the sum over even powers of cotangent, viz., \eqref{eq1a}, was imprecise and that a better formulation was given by \eqref{eq1b}. Here, we prove this by referring to \cite{BY2002}. To enable the reader to develop an understanding of how polynomials in $k$ arise when evaluating finite sums of powers of the cotangent, we shall also show how the formula is implemented for specific values of $n$, which is also lacking in \cite{BY2002}. Broadly speaking, Berndt and Yeap derive \eqref{eq1b} via the third case considered in \cite[Theorem 2.1]{BY2002}. The other two cases will be discussed in a future work. The theorem deals with the contour integration of the function $$f(z)=\cot^m(\pi z) \cot(\pi(hz-a))\cot(\pi(kz-b))$$ over a positively oriented rectangle with vertices at $\pm i R$ and $1 \pm iR$, where $R>\epsilon$, and possessing semi-circular indentations at 0 and 1 of radius $\epsilon$, where $\epsilon< \min\{ (h-1+a)/h,(k-1+b)/k \}$. The third case is represented by $a=b=0$ and hence $f(z)$ becomes \begin{equation}\label{ap1} f(z) = (hk)^{-1} (\pi z)^{-m-2} \Bigl(\sum_{j=0}^{\infty} a(j) x^j \Bigr)^m \sum_{\mu=0}^{\infty} a(\mu) (h' x)^{\mu} \sum_{\nu=0}^{\infty} a(\nu) (k' x)^{\nu} \;\;, \end{equation} where $a_j = (-1)^j 2^{2j} B_{2j}/(2j)!$, $x = (\pi z)^2$, $h' = h^2$ and $k' = k^2$. From \eqref{ap1} we see that there is a pole of order $m+2$ at $z = 0$. Therefore, the aim is to evaluate the residue of $f(z)$ at $z = 0$, which is given by \begin{equation}\label{ap2} {\rm Res} \,f(z) \Bigl{|}_{z=0}= \frac{1}{(m+1)!} \, \frac{d^{m+1}}{dz^{m+1}}\, \Bigl( z^{m+2} f(z) \Bigr) \Bigl{|}_{z=0}\;. \end{equation} Before we can evaluate this, we need to evaluate the product of the infinite series on the rhs of \eqref{ap1}. Berndt and Yeap proceed by introducing coefficients $C(j_1,\ldots,j_m,\mu,\nu)$, which are not given explicitly, but that they arise when a sum over all $(m+2)$-tuples $(j_1,\ldots, j_m,\mu,\nu)$ is evaluated under the condition that $2\left(\sum_{i=1}^m j_i +\mu+ \right. $ $\left.\nu \right)= m+1$. This, however, leads to the imprecision in \eqref{eq1b}. Here we adopt a different approach based on extending the Cauchy product formula \cite{Wei2015}. We begin by multiplying one of the series in the power by the penultimate series in \eqref{ap1}. If we denote this product as $P_1$, then by the Cauchy product formula, it can be expressed as \begin{equation*} \label{ap3} P_1= \sum_{j=0}^{\infty} x^j A_1(j) \;, \end{equation*} where $A_1(j) = \sum_{\ell_1=0}^{j} a(\ell_1) h^{'\,\ell_1} a(j-\ell_1)$. Now we multiply $P_1$ by the final series in \eqref{ap1}, which yields \begin{equation*}\label{ap4} P_2= P_1 \; \sum_{\nu=0}^{\infty} a(\nu) k^{'\,\nu} x^{\nu}= \sum_{j=0}^{\infty} x^j \sum_{\ell_1=0}^{j}a(\ell_1) k^{'r\, \ell_1} A_1(j-\ell_1) = \sum_{j=0}^{\infty} x^j A_2(j-\ell_1) \;, \end{equation*} and \begin{equation*} \label{ap5} A_2(j) = \sum_{\ell_1=0}^{j} \sum_{\ell_2=0}^{j-\ell_1} a(\ell_1) \, a(\ell_2)\, k^{'\,\ell_1} h^{'\,\ell_2} \, a(j-\ell_1-\ell_2) \, . \end{equation*} Next we multiply $P_2$ by another series in the power to obtain $P_3$, obtaining \begin{equation*}\label{ap6} P_3= P_2 \sum_{j=0}^{\infty} a(j) x^{j}= \sum_{j=0}^{\infty} x^j \sum_{\ell_1=0}^{j} a(\ell_1) A_2(j-\ell_1)= \sum_{j=0}^{\infty} A_3(j) x^j\;\;, \end{equation*} where \begin{equation*}\label{ap7} A_3(j) =\sum_{\ell_1=0}^{j}\sum_{\ell_2=0}^{j-\ell_1} \sum_{\ell_3=0}^{j-\ell_1-\ell_2} h^{'\,\ell_2}k^{'\, \ell_3}\, a(\ell_1)\, a(\ell_2)\, a(\ell_3) \, a(j-\ell_1-\ell_2-\ell_3) \;. \end{equation*} Continuing this process until all the series have been multiplied out, we eventually arrive at \begin{equation*}\label{ap8} \Bigl(\sum_{j=0}^{\infty} a(j) x^j \Bigr)^m \sum_{\mu=0}^{\infty} a(\mu) (h' x)^{\mu} \sum_{\nu=0}^{\infty} a(\nu) (k' x)^{\nu} = \sum_{j=0}^{\infty} A_{m+1}(j) \,x^j \;\;, \end{equation*} where the coefficients are given by \begin{align}\label{ap9} A_{m+1}(j) =& \sum_{\ell_1=0}^{j}\sum_{\ell_2=0}^{j-\ell_1} \sum_{\ell_3=0}^{j-\ell_1-\ell_2} \cdots \sum_{\ell_{m+1}=0}^{j-\ell_1-\ell_2-\cdots-\ell_{m}} h^{'\,\ell_m}k^{'\,\ell_{m+1}}\, \nonumber\\ & \times \;\; \prod_{i=1}^{m+1} a(\ell_i) \, a(j-\ell_1-\ell_2-\dots -\ell_m-\ell_{m+1}) \;. \end{align} If we let $\ell_s=\sum_{i=1}^{m+1} \ell_{i}$ and replace the various terms in \eqref{ap9} by their values in $f(z)$, then we find that \begin{align}\label{ap10} f(z) & = (\pi z)^{-m-2}\sum_{j=0}^{\infty} (-1)^j (2\pi z)^{2j} \sum_{\ell_1,\ell_2,\ell_3, \ldots,\ell_{m+1}=0}^{j,j-\ell_1,j-\ell_1-\ell_2,\ldots,j-\ell_s+\ell_{m+1}} h^{'\,2\ell_m-1}\,k^{'\,2\ell_{m+1}-1} \nonumber\\ & \times \;\; \prod_{i=1}^{m+1} \frac{B_{2\ell_i}}{(2\ell_i)!} \, \frac{B_{2(j-\ell_s)}}{(2(j-\ell_s))!}\;. \end{align} Hence there is a pole of order $m+2$ at $z=0$. Introducing the above result into \eqref{ap2}, we see that there is only a residue when $m+1$ is equal to one of the even powers of $z$ inside the summation over $j$. Therefore, $m$ must be odd for $f(z)$ to yield a residue. Introducing \eqref{ap10} into \eqref{ap2}, with $m$ replaced by $2n-1$, where $n$ is a positive integer, yields \begin{equation*}\label{ap11} {\rm Res} \,f(z) \Bigl{|}_{z=0}= \frac{(-1)^n \,2^{2n} }{\pi} \sum_{\ell_1=0,\ell_2=0, \ldots,\ell_{2n}=0}^{n, n-\ell_1,\ldots,n-\ell_s+\ell_{2n}} h^{'\,2\ell_{2n-1}-1} k^{'\,2\ell_{2n}-1} \prod_{i=1}^{2n} \frac{B_{2\ell_i}}{(2\ell_i)!} \, \frac{B_{2n-2\ell_s}}{(2n-2\ell_s)!}\;, \end{equation*} where $\ell_s =\sum_{i=1}^{2n} \ell_i$. To obtain a finite sum over powers of the cotangent, we need to consider the entire contour around $f(z)$. This means that there are simple poles at $z = (j+a)/h$ and $z = (r+b)/k$, where $j$ and $r$ are non-negative integers such that $0<j+a<h$ and $0<r+b<k$. Since this is the third case, where $a=b=0$, these become $z=j/h$ and $z=r/k$. Moreover, because $h=1$, we can disregard the poles at $z=j$ , while $r$ ranges from 1 to $k - 1$. In addition, by noting that $\lim_{y \to \infty} \cot(c(x\pm iy)+d)= \pm i$, for $c>0$ and $d$ real, Berndt and Yeap are able to evaluate the contour integral directly, whereby obtaining \begin{equation*} \label{ap12} \frac{1}{2\pi \, i}\int_C f(z) \, dz= \frac{(-1)^n}{\pi} \;. \end{equation*} By applying Cauchy's residue theorem, we finally arrive at \eqref{eq1b}. To conclude this appendix, let us now discuss the implementation of \eqref{eq1b} for $n = 2$. Then the $j_i$ range from $j_1$ to $j_4$. For $n-j_s$ to be non-zero, we require some of the $j_i$ to be zero, whereas according to the Berndt-Yeap result given by \eqref{eq1a}, they should be greater than zero. For $n =2$, $j_0$ can be equal to 0, 1, or 2. When $j_0=2$, all the other $j_i$'s must vanish and the sum in \eqref{eq1b} contributes the value $-(-1)^2 (2^4) k^3 B_4/4!$, which in turn equals $-k^3/45$ since $B_4 = -1/30$. When $j_0=1$, either the remaining $j_i$ equal unity or $2-j_s$ equals unity. Hence there are four possibilities, each yielding the same contribution. The total contribution for $j_0 =1$ becomes $4 k ((-1) (2^2) B_2/2!)^2$ or $4k/9$ since $B_2 = 1/6$. When $j_0=0$, we have two separate cases. In the first of these cases either one of the remaining $j_i$ or $2-j_s$ equals 2. Since there are four possibilities, we obtain a contribution of $4 \cdot 2^4 B_4/(4!\, k)$ for this case. For the second case one of the remaining $j_i$ or $2-j_s$ is equal to unity and another one must also be equal to unity. Since there are effectively four variables including $2-j_s$, this means there are $\binom{4}{2}$ or 6 combinations. Therefore, the contribution from the second case is $6 (-2^2 B_2/2!)^2/k$. Combining the two cases yields the total contribution for $j_0=0$, which is $(-4/45 +2/3)k^{-1}$. Thus, \eqref{eq1b} for $n=2$ gives \begin{equation*}\label{ap13} \frac{1}{k} \sum_{r=1}^{k-1} \cot^4 \Bigl( \frac{\pi r}{k} \Bigr) =1- (-k^3/45 +4 k/9 +26/45 k) \;. \end{equation*} After a little algebra, one eventually obtains \begin{equation*}\label{ap14} \sum_{r=1}^{k-1} \cot^4 \Bigl( \frac{\pi r}{k} \Bigr) = \frac{1}{45} \, (k-1) (k-2) (k^2 +3k-13)\;, \end{equation*} which appears as Corollary\ 2.6a in Berndt and Yeap \cite{BY2002}. In a similar fashion one can calculate the results for $n = 3$ and $n = 4$, the details of which are not presented here. After a little algebra, one finds that \begin{equation}\label{ap15} \sum_{r=1}^{k-1} \cot^6 \Bigl( \frac{\pi r}{k} \Bigr) = \frac{1}{945} \, (k-1) (k-2) \bigr( 2k^4+6 k^3-28 k^2 -96 k +251 \bigr) \;\;, \end{equation} and \begin{eqnarray*} \sum_{r=1}^{k-1} \cot^8 \Bigl( \frac{\pi r}{k} \Bigr) &= & \frac{1}{14175} \, (k-1) (k-2) \bigl( 3k^6+9 k^5-59 k^4 \\ & &- \;195 k^3+ 457 k^2 +1761 k -3551 \bigr) \;. \end{eqnarray*} By using a different method, Gessel has obtained \eqref{ap15}, which, aside from a phase factor, appears as $q_6(n)$ in \cite{G1997}. It should also be mentioned that beyond $n = 4$, the calculations become cumbersome due to the rapidly increasing number of combinations when the $j_i$ are summed to $n$. For these values of $n$, a computer program will be needed to evaluate \eqref{eq1b}. \section{Appendix B} In this appendix we consider multiplying and dividing the argument in the trigonometric powers of the sums $C(m,n)$ and $S(m,n)$ by 5 or what is referred to as the $\ell =5$ case according to the terminology of Section \ref{sec4}. In so doing, the material presented here should enable the reader to consider other values of $\ell$, although we shall see that higher values of $\ell$ are not as tractable as the cases studied in Section \ref{sec3}. To investigate the $\ell=5$ case, we require the following general identity: \begin{equation}\label{b1} \sum_{j=1}^{\ell} e^{2 \pi ij k/\ell} = \begin{cases} \ell\,, & \quad k \equiv 0\;\; ({\rm mod}\; \ell)\, , \cr 0 \,, & \quad \;\; {\rm otherwise}. \end{cases} \end{equation} Multiplying and dividing the argument in the cosine power of $C(m,n)$ as defined in Section \ref{sec4} by $5$, we obtain $$ C(m,n) = \sum_{k=0,5,10,\ldots}^{5n-5} \cos^{2m} \Bigl( \frac{k \pi}{5 n} \Bigr)\,. $$ Next we put $\ell = 5$ in \eqref{b1} and introduce it into the above equation. After a little algebra, we arrive at \begin{equation} \label{b2} C(m,n)= \frac{1}{5}\sum_{k=0}^{5n-1} \Bigl( 2\, \cos \Bigl( \frac{2 \pi k}{5} \Bigr) + 2 \, \cos \Bigl( \frac{4 \pi k}{5} \Bigr) +1 \Bigr) \cos^{2m} \Bigl( \frac{k \pi}{5 n} \Bigr) \,. \end{equation} The last sum on the rhs of \eqref{b2} is $C(m,5n)$. Hence we are left with two distinct sums. To isolate these sums, we need to consider an even multiple of $5$, e.g., $\ell = 10$, since we observed that the basic trigonometric sums in Section \ref{sec4} turned out to be reducible when $\ell$ was even. By multiplying and dividing the argument of the cosine power in $C(m,n)$ by $10$, we find that $$ C(m,n) = \sum_{k=0,10,20,\ldots}^{10n-10} \cos^{2m} \Bigl( \frac{k \pi}{10 n} \Bigr)\,. $$ Now we introduce the $\ell= 10$ version of \eqref{b1} into the above result, which after a little algebra yields \begin{eqnarray*} C(m,n)& = & \frac{1}{10} \sum_{k=0}^{10n-1} \Bigl( 2\, \cos \Bigl( \frac{\pi k}{5} \Bigr) + 2\, \cos \Bigl( \frac{2 \pi k}{5} \Bigr) + 2\, \cos \Bigl( \frac{3 \pi k}{5} \Bigr) \\ & & + \; 2 \, \cos \Bigl( \frac{4 \pi k}{5} \Bigr) +1 +(-1)^k \Bigr) \cos^{2m} \Bigl( \frac{k \pi}{10 n} \Bigr) \,. \end{eqnarray*} The above result can be simplified by introducing the trigonometric identity for the sum of two cosines, which is given as No. 1.314(3) in \cite{GR1994}. In this instance we sum the first and fourth cosines on the rhs and then the second and third cosines. Then we obtain \begin{eqnarray} 10\, C(m,n)& = & \sum_{k=0}^{10n-1} \Bigl( 4\, \cos \Bigl( \frac{\pi k}{2} \Bigr) \cos \Bigl( \frac{3 \pi k}{10} \Bigr) + 4\, \cos \Bigl( \frac{\pi k}{2} \Bigr) \cos \Bigl( \frac{\pi k}{10} \Bigr) \nonumber\\ & & + \; 1 +(-1)^k \Bigr) \cos^{2m} \Bigl( \frac{k \pi}{10 n} \Bigr) \,. \label{b4} \end{eqnarray} In \eqref{b4} all terms with odd values of $k$ vanish, so we can replace $k$ by $2k$, which leads to \begin{equation*} \label{b5} 10\, C(m,n)-2\, C(m,5n) = \sum_{k=0}^{5n-1} (-1)^k \Bigl( 4\, \cos \Bigl( \frac{3 \pi k}{5} \Bigr) + 4\, \cos \Bigl( \frac{\pi k}{5} \Bigr) \Bigr) \cos^{2m} \Bigl( \frac{k \pi}{5 n} \Bigr) \,. \end{equation*} Alternatively, the above result can be written as \begin{equation} \label{b6} 10 \,C(m,n)-2\, C(m,5n) = \sum_{k=0}^{5n-1} \Bigl( 4\, \cos \Bigl( \frac{2 \pi k}{5} \Bigr) + 4\, \cos \Bigl( \frac{4 \pi k}{5} \Bigr) \Bigr) \cos^{2m} \Bigl( \frac{k \pi}{5 n} \Bigr) \,. \end{equation} This, however, is twice \eqref{b2}. Therefore, the $\ell = 10$ case reduces to the $\ell = 5$ case, just as we observed in the $\ell = 3$ and $\ell = 6$ cases. Worse still, the two series involving $\cos(2\pi k/5)$ and $\cos(4\pi k /5)$ cannot be decoupled. That is, extra information is required before each of these series can be evaluated separately. However, we can express \eqref{b2} as \begin{equation} \label{b7} C(m,n)= \frac{1}{5}\sum_{k=0}^{5n-1} \Bigl( 2\, \cos \Bigl( \frac{2 \pi k}{5} \Bigr) + 2 \, \cos \Bigl( \frac{6 \pi k}{5} \Bigr) +1 \Bigr) \cos^{2m} \Bigl( \frac{k \pi}{5 n} \Bigr) \,. \end{equation} By applying the identity for the sum of two cosines, we finally arrive at the following basic cosine power sum: \begin{equation}\label{b8} \sum_{k=0}^{5n-1} \cos \Bigl( \frac{ 2\pi k}{5} \Bigr) \cos \Bigl( \frac{4 \pi k}{5} \Bigr) \cos^{2m} \Bigl( \frac{k \pi}{5 n} \Bigr)= \frac{1}{4} \, \left( 5 C(m,n) -C(m,5n) \right) \,. \end{equation} In actual fact the above result is not very surprising because the product of the cosines external to the cosine power is given by \begin{equation*} \label{b9} \cos \Bigl( \frac{ 2\pi k}{5} \Bigr) \cos \Bigl( \frac{4 \pi k}{5} \Bigr) = \begin{cases} 1\,, & k \equiv 0\;,\; ({\rm mod} \; 5)\;, \cr -1/4 \,, & {\rm otherwise}. \end{cases} \end{equation*} It is also interesting to note that we cannot use the alternating version of $C(m,n)$ to decouple the sums in \eqref{b7}. From \eqref{eq7b} we have $$ \sum_{k=0}^{n-1} (-1)^k \cos^{2m} \Bigl( \frac{5k \pi}{5n} \Bigr)= \sum_{k=0,5,\ldots}^{5n-5} \cos \Bigl( \frac{ \pi k}{5}\Bigr) \cos^{2m} \Bigl(\frac{k \pi}{5n} \Bigr) =2C(m,n/2) - C(m,n)\,, $$ where $n$ can only be even. By following \eqref{b2} we can express the above result as $$ \frac{1}{5} \sum_{k=0}^{5n-1} \cos \Bigl( \frac{\pi k}{5}\Bigr) \Bigl( 2 \cos \Bigl( \frac{2 \pi k}{5} \Bigr) + 2 \cos \Bigl( \frac{4 \pi k}{5} \Bigr) +1 \Bigr) \cos^{2m} \Bigl(\frac{k \pi}{5n} \Bigr) =2C(m,n/2) - C(m,n)\,. $$ After a little algebra we arrive at \begin{align*} & \sum_{k=0}^{5n-1} \Bigl( 2 \cos \Bigl( \frac{3 \pi k}{5} \Bigr) + 2 \cos \Bigl( \frac{ \pi k}{5} \Bigr) \Bigr) \cos^{2m} \Bigl(\frac{k \pi}{5n} \Bigr) \\ & = \;\; 10C(m,n/2) - 2 C(m,5n/2) + C(m,5n)- 5 C(m,n) \,. \end{align*} Once again, we are unable to decouple the component sums. In fact, as one goes to higher primes, there will be more component sums appearing in the final result, which makes the task of isolating them on their own even more difficult to accomplish. Nevertheless, we can combine the cosines on the lhs, thereby obtaining \begin{align*}\label{b11} & \sum_{k=0}^{5n-1} \cos \Bigl( \frac{\pi k}{5} \Bigr) \cos \Bigl( \frac{ 2\pi k}{5} \Bigr) \cos^{2m} \Bigl(\frac{k \pi}{5n} \Bigr) \nonumber\\ & = \;\;\frac{1}{4} \Bigl( 10 C(m,n/2) - 2 C(m,5n/2) + C(m,5n)- 5 C(m,n) \Bigr) \,. \end{align*} Comparing the above result with \eqref{b8}, we see that they are the alternating versions of one another. We can, however, derive a result for the first sum on the rhs of \eqref{b6}, although it may not be regarded as very elegant. First, we express the sum as \begin{equation}\label{b12} \sum_{k=0}^{5n-1} \cos \Bigl( \frac{2 k \pi}{5} \Bigr) \cos^{2m} \Bigl(\frac{k \pi}{5n} \Bigr)= \sum_{k=0}^{5n-1} \cos \Bigl(\frac{2kn \pi}{5n} \Bigr) \cos^{2m} \Bigl(\frac{k \pi}{5n} \Bigr)\,. \end{equation} From \cite[No. I.1.10]{PBM2003}, we have \begin{equation}\label{b13} \cos \Bigl(\frac{2n k\pi}{5n} \Bigr) = 2^{2n-1} \cos^{2n} \Bigl(\frac{k \pi}{5n} \Bigr) + n \sum_{j=0}^{n-1} \frac{(-1)^{j+1}}{j+1}\, \binom{2n-j-2}{j} 2^{2n-2j-2}\,\cos^{2n-2j-2} \Bigl(\frac{k \pi}{5n} \Bigr)\,. \end{equation} Introducing \eqref{b13} into \eqref{b12} yields \begin{align*} \sum_{k=0}^{5n-1} \cos \Bigl( \frac{2 k \pi}{5} \Bigr) \cos^{2m} \Bigl(\frac{k \pi}{5n} \Bigr) = & \sum_{k=0}^{5n-1} \Bigl( 2^{2n-1} \cos^{2m+2n} \Bigl(\frac{k \pi}{5n} \Bigr)+ n \sum_{j=0}^{n-1} \frac{(-1)^{j+1}}{j+1}\, 2^{2n-2j-2} \nonumber\\ & \times \binom{2n-j-2}{j} \cos^{2m+2n-2j-2} \Bigl(\frac{k \pi}{5n} \Bigr) \Bigr) \,. \end{align*} Recognizing that the sum over $k$ is the basic cosine power sum defined in Section \ref{sec2}, we finally arrive at \begin{align}\label{b16} \sum_{k=0}^{5n-1} \cos \Bigl( \frac{2 k \pi}{5} \Bigr) \cos^{2m} \Bigl(\frac{k \pi}{5n} \Bigr) = & \; 2^{2n-1}\, C(m+n,5n) +n \sum_{j=0}^{n-1} \frac{(-1)^{j+1}}{j+1}\; 2^{2n-2j-2} \nonumber\\ & \times \;\; \binom{2n-j-2}{j}\, C(m+n-j-1,5n) \,. \end{align} The other sum involving $\cos(4 k \pi /5)$ instead of $\cos(2 k \pi/5)$ can be directly obtained from \eqref{b6}. Although \eqref{b16} is cumbersome, it does nevertheless demonstrate that the basic cosine power sum given above is combinatorial in nature or rational as a consequence of Theorem \ref{main}. \end{document}
\begin{document} \title{The Maximum Principle for Global Solutions of Stochastic Stackelberg Differential Games\thanks{The first author is supported by WCU (World Class University) program through the National Research Foundation of Korea funded by the Ministry of Education, Science and Technology (R31 - 20007) and by the Research Grants Council of HKSAR (PolyU 5001/11P). The second author is supported by NNSF of China (Grant No.11101140).}} \author{Alain Bensoussan\footnote{Naveen Jindal School of Management, The University of Texas at Dallas, Richardson, TX, USA.} \footnote{Graduate School of Business, The Hong Kong Polytechnic University, Hong Kong, and Graduate Department of Financial Engineering, Ajou University, Suwon, South Korea.}\ , Shaokuan Chen$^\dag$ and Suresh P. Sethi$^\dag$} \maketitle \noindent \textbf{Abstract:} This paper obtains the maximum principle for both stochastic (global) open-loop and stochastic (global) closed-loop Stackelberg differential games. For the closed-loop case, we use the theory of controlled forward-backward stochastic differential equations to derive the maximum principle for the leader's optimal strategy. In the special case of the open-loop linear quadratic Stackelberg game, we consider the follower's Hamiltonian system as the leader's state equation, derive the related stochastic Riccati equation, and show the existence and uniqueness of the solution to the Riccati equation under appropriate assumptions. However, for the closed-loop linear quadratic Stackelberg game, we can write the related Riccati equation consisting of forward-backward stochastic differential equations, while leaving the existence of its solution as an open problem.\\ \textbf{Keywords:} Stackelberg differential game, maximum principle, forward-backward stochastic differential equation, Riccati equation. \section{Introduction}\label{sec1} In 1934, H. von Stackelberg introduced a concept of a hierarchical solution for markets where some firms have power of domination over others \cite{Stackelberg34}. This solution concept is now known as the Stackelberg equilibrium or the Stackelberg solution which, in the context of two-person nonzero-sum static games, involves players with asymmetric roles, one leading (called the leader) and the other following (called the follower). A Stackelberg game proceeds with the leader announcing his policy prior to the start of the game. With the knowledge of the leader's strategy, the follower chooses a policy so as to optimize his own performance index. The leader, anticipating the follower's optimal response, picks the policy which optimizes his performance index on the rational reaction curve of the follower, which together with the corresponding policy of the follower is known as the Stackelberg solution. In dynamic Stackelberg games, it becomes important to know the player's information sets at any given time. In this paper, we will consider two different information structures: i) open-loop for both players and ii) closed-loop perfect state (CLPS) for both players. Moreover, we will only treat global solution where the leader announces his entire strategy at the start of the game and the follower reacts to the entire strategy. The solutions of games with the first information structure will be termed (global) open-loop Stackelberg solutions, whereas the solutions of the games with the second information structure will be termed (global) closed-loop Stackelberg solutions. It is known that both these solutions suffer from time inconsistency, which results from the functional dependence of the follower's optimal response strategy on the leader's entire strategy on the duration of the game. In addition to these concepts, there is another concept of feedback Stackelberg solution, where the Stackelberg property is retained at every stage (in the discrete-time setting) with the leader having only stagewise advantage over the follower. Since the continuous-time problem can be viewed as the number of stages becomes unbounded in any finite interval, stagewise advantage of the leader over the follower turns into instantaneous advantage. A good aspect of this solution is that it is time consistent. Readers interested in the theory and applications of this solution can refer to \cite{BasarHaurie84}, \cite{Bensoussanetal12}, \cite{Dockner et al00}, \cite{HePrasadSethi09}, \cite{Heetal07} and \cite{KoganTapiero07}. In an open-loop or closed-loop Stackelberg differential game, the follower aims at minimizing his cost functional in accordance with the leader's strategy on the whole duration of the game. Anticipating the follower's optimal response depending on his entire strategy, the leader chooses an optimal one in advance to minimize his own cost functional, based on the Hamiltonian system satisfied by the follower's optimal response. The difference between the two kinds of games is whether the information sets of the players involve the history of the state. The introduction of the history of the state in the closed-loop Stackelberg game, even in the deterministic case, makes it difficult to tackle, as the follower may not obtain his optimal response if the leader's announced strategy incorporates the memory of the state. Two approaches to circumvent this difficulty are introduced: the team approach and the maximum principle. For the former, one can refer to \cite{Basar79a}, \cite{BasarSelbuz79b} in the discrete-time setting and \cite{Papavassi79}, \cite{PapavassiCruz79b}, \cite{PapavassiCruz80} and \cite{BarsarOlsder80} in the continuous-time setting. For the latter, one can refer to \cite{PapavassiCruz79} for nonclassical control problems arising from Stackelberg games. The idea of team approach is as follows: the leader first minimizes his cost functional over the controls of both the leader and the follower, yielding a lower bound on his cost functional and the team strategies for both players. Then the leader makes an effort to find a closed-loop strategy such that the follower's optimal response and the state trajectory will coincide with his team strategy and the team optimal trajectory, which leads to the lower bound on the leader's cost functional. The maximum principle approach restricts the leader's strategy to depend only on the initial state and the current state (memoryless perfect state information structure) and a nonclassical control problem faced by the leader is solved. It is worth noting that in this case, the follower's adjoint equation involves the derivative of the leader's strategy with respect to the state. Therefore, after incorporating the follower's adjoint variable as an augmented state, the leader encounters a nonclassical control problem with the feature that both the control and its derivative with respect to the state appear in the controlled forward-backward ordinary differential equation system. The authors provide two approaches to tackle this problem and give the necessary conditions satisfied by the leader's optimal strategy. One is to directly apply the variational technique to the state system with mixed-boundary conditions (the adjoint equation of the follower with a terminal condition). The other is to establish an equivalent relationship between such a nonclassical control problem and a classical control problem, which yields that the optimal strategy could be found in the space of affine functions. The phenomenon of time inconsistency is also analyzed by the authors. We will elaborate on the technical details and generalize their result to the stochastic setting in section \ref{sec3}. For the stochastic formulation of Stackelberg games involving white noise terms, Yong \cite{Yong02} studies the open-loop linear quadratic case, with control variables appearing in diffusion term of the state. To give a state feedback representation of the open-loop Stackelberg solution (in a non-anticipating way), the related Riccati equation is derived and sufficient conditions for the existence of its solution with deterministic coefficients are discussed. More recently, {\O}ksendal et al \cite{Oksendaletal11} have considered a general stochastic open-loop Stackelberg differential game, proved a sufficient maximum principle, and applied the theory to continuous-time newsvendor problems. In this paper, we study stochastic global Stackelberg differential games with open-loop and closed-loop information structures. As we shall see, the problems confronted by the leader in both cases, from the current point of view, are control problems with the state equations being forward-backward stochastic differential equations (FBSDEs). The theories for nonlinear backward stochastic differential equations (BSDEs) and FBSDEs have been extensively studied over the last two decades following the initial work by Pardoux and Peng \cite{PardouxPeng90}. One can refer to, among others, \cite{Maetal94}, \cite{MaYong99}, \cite{PardouxTang99}, \cite{PengWu99}, \cite{Yong10a}, and the references therein, for the development of the theory of FBSDEs and their applications. With the help of the results in optimization problems for controlled FBSDEs (see, e.g., \cite{ShiWu06} and \cite{Yong10b}), we obtain the maximum principle for the leader's optimal strategies in stochastic global Stackelberg games, and discuss linear quadratic problems as well as the corresponding Riccati equations. This paper is organized as follows. In section 2 we formulate a stochastic Stackelberg game and give three types of concepts of equilibria. In section 3 we present the maximum principle for a stochastic open-loop Stackelberg game. In section 4 we focus on a stochastic closed-loop Stackelberg game and derive a maximum principle for the leader's optimal strategy. As examples, linear quadratic stochastic open-loop and closed-loop Stackelberg games are studied in section 5. For the open-loop linear quadratic case, we show the existence and uniqueness of the solution to the associated stochastic Riccati equation under some assumptions. For the closed-loop case, we simply derive a new Riccati equation consisting of FBSDEs, without investigating the issue of the existence of its solution. \section{Problem formulation and definition of equilibria}\label{sec2} Let $(\Omega,\mathcal {F},P)$ be a complete probability space on which is defined a $d$-dimensional standard Brownian motion $\{W(t),0\leq t\leq T\}$. $\{\mathcal {F}_t\}_{0\leq t\leq T}$ is the natural filtration generated by $W$ and augmented by all the $P$-null sets in $\mathcal {F}$ and $\mathcal {P}$ is the predictable sub-$\sigma$-field of $\mathcal {B}([0,T])\times\mathcal {F}$. We consider a stochastic differential system \begin{equation}\label{o1} \left\{ \begin{split} dx(t)&=f(t,x(t),u(t),v(t))dt+\sigma(t,x(t))dW(t),\\ x(0)&=x_0, \end{split}\right. \end{equation} where $$f:\Omega\times[0,T]\times\mathbb{R}^n\times\mathbb{R}^{m_1}\times\mathbb{R}^{m_2}\rightarrow\mathbb{R}^n,$$ $$\sigma:\Omega\times[0,T]\times\mathbb{R}^n\rightarrow\mathbb{R}^{n\times d},$$ are $\mathcal {P}\times\mathcal {B}(\mathbb{R}^{n+m_1+m_2})/\mathcal {B}(\mathbb{R}^{n})$ and $\mathcal {P}\times\mathcal {B}(\mathbb{R}^{n})/\mathcal {B}(\mathbb{R}^{n\times d})$ measurable, respectively, and $(u(\cdot),v(\cdot))$ are the decision variables of the leader and the follower, respectively. The cost functionals for the leader and the follower to minimize are described as follows \begin{equation*}\label{o2} \begin{split} J_1(u,v)&=E[\int_0^Tg_1(t,x(t),u(t),v(t))dt+G_1(X(T))],\\ J_2(u,v)&=E[\int_0^Tg_2(t,x(t),u(t),v(t))dt+G_2(x(T))], \end{split} \end{equation*} with $$g_i:\Omega\times[0,T]\times\mathbb{R}^{n}\times U\times V\rightarrow\mathbb{R},$$ $$G_i:\Omega\times\mathbb{R}^n\rightarrow\mathbb{R},$$ $i=1,2$, being $\mathcal {P}\times\mathcal {B}(\mathbb{R}^{n})\times\mathcal {B}(U)\times\mathcal {B}(V)/\mathcal {B}(\mathbb{R})$ and $\mathcal {F}_T\times\mathcal {B}(\mathbb{R}^n)/\mathcal {B}(\mathbb{R})$ measurable, respectively. According to the player's information sets at any given time, there are three types of Stackelberg games: (global) open-loop, (global) closed-loop, and feedback Stackelberg games. \textbf{Open-loop games:} In an open-loop Stackelberg game, the leader's information set at time $t$ is $\{x_0,\mathcal {F}_t\}$. Therefore, the strategy $u$ announced by the leader is an $\mathcal {F}_t$-adapted process. The follower aims at minimizing his cost functional $J_2(u,v)$ in accordance with the leader's strategy $u$ on the whole duration of the game. His optimal response $\Phi(u)$ will be an adapted process such that $$J_2(u,\Phi(u))\leq J_2(u,v),\ \ \forall\ u,v.$$ The leader, anticipating the follower's optimal response $\Phi$, picks the policy $u^*$ which optimizes his performance index on the rational reaction curve of the follower, i.e., $$J_1(u^*,\Phi(u^*))\leq J_1(u,\Phi(u)),\ \forall\ u.$$ $(u^*,\Phi(u^*))$ is a Stackelberg solution for an open-loop game. \textbf{Closed-loop games:} In a closed-loop Stackelberg game, the information set for the leader at time $t$ is $\{\mathcal {F}_t,x_s,s\in[0,t]\}$ (closed-loop perfect state information). The strategy that the leader adopts now can incorporate the history information of the state. Since in general it is difficult for the follower to obtain his optimal response if the leader's announced strategy incorporates the whole history of the state, we only consider the closed-loop case under the memoryless perfect state information pattern, i.e., the information set of the leader at time $t$ is $\{x_0,x_t,\mathcal {F}_t\}$. For leader's each strategy $u(t,x_0,x)$, which is now a stochastic field, the follower tries to find his optimal response $\Psi(u)$ such that $$J_2(u,\Psi(u))\leq J_2(u,v),\ \forall\ u,v.$$ Taking into account the follower's optimal response, the leader should choose $u^*$ such that $$J_1(u^*,\Psi(u^*)\leq J_1(u,\Psi(u)),\ \forall\ u.$$ $(u^*,\Psi(u^*))$ is a Stackelberg solution for a closed-loop game. \textbf{Feedback games:} In a feedback Stackelberg game, the information set for the leader at time $t$ is $\{x_t,\mathcal {F}_t\}$ (feedback pattern). The significant mechanism difference between feedback games and the former two types of games is that the advantage of the leader over the follower in a feedback Stackelberg game is instantaneous not global, as the differential game could be viewed as the limit of the discrete-time game as the number of stages becomes unbounded (see \cite{BasarHaurie84}). Therefore, corresponding to the leader's instantaneous strategy $u(t,x)$, the follower will make an instantaneous response of the form $v(t,x,u(t,x))$, which depends on the current state and the leader's current action. A feedback solution is a pair of strategies $(u^*,v^*)$ such that \begin{align*} &J_1(u^*,v^*(u^*))\leq J_1(u,v^*(u)),\ \forall\ u,\\ &J_2(u^*,v^*(u^*))\leq J_2(u^*,v(u^*)).\ \forall\ v. \end{align*} From the definition we can see that the feedback Stackelberg solution has some equilibrium feature, whereas the open-loop or closed-loop solution involves a sequential optimization at the level of the follower and the leader. \section{Stochastic open-loop Stackelberg differential games}\label{open} We first introduce some notations. For two vectors $x$ and $y$ in $\mathbb{R}^n$, $\langle x,y\rangle$ means the inner product $\sum_{i=1}^nx_iy_i$. For a function $f$ defined on $\mathbb{R}^n$, $Df$ or $\partial f$ means the gradient of $f$. Here we specify that throughout this paper all the vectors are column vectors and the gradient of a scalar function $f$ is $\frac{\partial f}{\partial x}=(\frac{\partial f}{\partial x_1},\cdots,\frac{\partial f}{\partial x_n})^\top$, while the gradient of a vector function $f=(f_1,\cdots,f_m)^\top$ is a matrix \begin{equation*} \frac{\partial f}{\partial x}=\left( \begin{array}{ccc} \frac{\partial f_1}{\partial x_1}&\cdots&\frac{\partial f_1}{\partial x_n}\\ \vdots&\vdots&\vdots\\ \frac{\partial f_m}{\partial x_1}&\cdots&\frac{\partial f_m}{\partial x_n}\\ \end{array}\right). \end{equation*} We further introduce two spaces of adapted processes to be used in the definition of the solution to a FBSDE, \begin{align*} \mathcal{S}^2(0,T;\mathbb{R}^n):=\{&\psi|\ \psi:\Omega\times[0,T]\rightarrow\mathbb{R}^n\ \textrm{is a continous adapted process such that}\\ &E\sup_{0\leq t\leq T}|\psi(t)|^2<\infty\},\\ \mathcal{M}^2(0,T;\mathbb{R}^n):=\{&\psi|\ \psi:\Omega\times[0,T]\rightarrow\mathbb{R}^n\ \textrm{is an adapted process such that}\\ &E\int_0^T|\psi(t)|^2dt<\infty\}. \end{align*} And the above two spaces will be simply written as $\mathcal{S}^2$ and $\mathcal{M}^2$, respectively, if no confusion arises. The admissible strategy spaces for the leader and the follower are denoted by \begin{equation*}\label{f1} \begin{split} \mathcal {U}&=\{u|u: \Omega\times[0,T]\rightarrow U\ \textrm{is}\ \mathcal {F}_t\textrm{-adapted and}\ E\int_0^T|u(t)|^2dt<+\infty\},\\ \mathcal {V}&=\{v|v: \Omega\times[0,T]\rightarrow V\ \textrm{is}\ \mathcal {F}_t\textrm{-adapted and}\ E\int_0^T|v(t)|^2dt<+\infty\}, \end{split} \end{equation*} where $U$ and $V$ are subsets of $\mathbb{R}^{m_1}$ and $\mathbb{R}^{m_2}$. For the completeness of this paper, we state the formulation of general stochastic open-loop Stackelberg games and the corresponding maximum principle. From the definition in section \ref{sec2}, given the leader's strategy $u\in\mathcal {U}$, the follower is faced the stochastic control problem $$\min_{v\in\mathcal {V}} J_2(u,v)=E[\int_0^Tg_2(t,x(t),u(t),v(t))dt+G_2(x(T))]$$ subject to \begin{equation*} \left\{\begin{aligned} dx(t)&=f(t,x(t),u(t),v(t))dt+\sigma(t,x(t))dW(t),\\ x(0)&=x_0. \end{aligned}\right. \end{equation*} Suppose there exists a unique solution $v^*(u(\cdot))\in\mathcal {V}$ to the above problem for each $u\in\mathcal {U}$. If we define $$H_2(t,x,u,v,p_2,q_2):=\langle p_2, f(t,x,u,v)\rangle+\langle q_2,\sigma(t,x)\rangle+g_2(t,x,u,v),$$ then the maximum principle (see \cite{YongZhou99}) yields that there exists a pair of adapted processes $(p_2,q_2)\in\mathcal{S}^2\times\mathcal{M}^2$ such that \begin{equation}\label{o3} \left\{ \begin{split} dx(t)=&f(t,x(t),u(t),v^*(t))dt+\sigma(t,x(t))dW(t),\\ -dp_2(t)=&\big\{(\frac{\partial f}{\partial x})^\top(t,x(t),u(t),v^*(t))p_2(t)+(\frac{\partial \sigma}{\partial x})^\top(t,x(t))q_2(t)\\ &+\frac{\partial g_2}{\partial x}(t,x(t),u(t),v^*(t))\big\}dt-q_2(t)dW(t),\\ x(0)=&x_0,\ \ p_2(T)=\frac{\partial G_2}{\partial x}(x(T)),\\ v^*(t)=&arg \min_{v\in V} H_2(t,x(t),u(t),v,p_2(t),q_2(t)). \end{split}\right. \end{equation} We assume that by the last equation in \eqref{o3} a function $v=v^*(t,x,u,p_2)$ is implicitly and uniquely defined. After substituting $v=v^*(t,x,u,p_2)$ into the follower's maximum principle, we get the control problem faced by the leader $$\min_{u\in\mathcal {U}}\ J_1(u)=E[\int_0^Tg_1(t,x(t),u(t),v^*(t,x(t),u(t),p_2(t)))dt+G_1(X(T))]$$ subject to \begin{equation}\label{04} \left\{ \begin{split} dx(t)=&f(t,x(t),u(t),v^*(t,x(t),u(t),p_2(t)))dt+\sigma(t,x(t))dW(t),\\ -dp_2(t)=&\big\{(\frac{\partial f}{\partial x})^\top(t,x(t),u(t),v^*(t,x(t),u(t),p_2(t)))p_2(t)+(\frac{\partial \sigma}{\partial x})^\top(t,x(t))q_2(t)\\ &+\frac{\partial g_2}{\partial x}(t,x(t),u(t),v^*(t,x(t),u(t),p_2(t)))\big\}dt-q_2(t)dW(t),\\ x(0)=&x_0,\ \ p_2(T)=\frac{\partial G_2}{\partial x}(x(T)). \end{split}\right. \end{equation} We denote \begin{equation}\label{05} \begin{split} &H_1(t,u,x,y,p_1,p_2,q_1,q_2)\\ =&\langle p_1, f(t,x,u,v^*(t,x,u,p_2))\rangle+\langle q_1,\sigma(t,x)\rangle+g_1(t,x,u,v^*(t,x,u,p_2))\\ &-\langle y,(\frac{\partial f}{\partial x})^\top(t,x,u,v^*(t,x,u,p_2))p_2+(\frac{\partial \sigma}{\partial x})^\top(t,x)q_2+\frac{\partial g_2}{\partial x}(t,x,u,v^*(t,x,u,p_2))\rangle. \end{split} \end{equation} Suppose $u^*$ is an optimal strategy for the leader. Then the maximum principle for controlled forward-backward stochastic differential equations (see, e.g., \cite{ShiWu06} or \cite{Yong10b}) yields that there exists a triple of adapted processes $(p_1,q_1,y)$ such that \begin{equation}\label{oo6} u^*(t)=\arg\min H_1(t,u,x(t),y(t),p_1(t),p_2(t),q_1(t),q_2(t)), \end{equation} and \begin{equation}\label{o6} \left\{ \begin{split} dy(t)=&-\frac{\partial H_1}{\partial p_2}dt-\frac{\partial H_1}{\partial q_2}dW(t),\\ =&-\{(\frac{\partial f}{\partial v}\frac{\partial v^*}{\partial p_2})^\top p_1-\frac{\partial f}{\partial x}y-\sum_{i=1}^{n}y_i(\frac{\partial v^*}{\partial p_2})^\top\frac{\partial}{\partial v}(\frac{\partial f}{\partial x_i})^\top p_2\\ &-(\frac{\partial^2 g_2}{\partial x\partial v}\frac{\partial v^*}{\partial p_2})^\top y+(\frac{\partial v^*}{\partial p_2})^\top\frac{\partial g_1}{\partial v}\}dt-\frac{\partial \sigma}{\partial x}ydW(t),\\ dp_1(t)=&-\frac{\partial H_1}{\partial x}dt+q_1dW(t)\\ =&-\{\frac{\partial f}{\partial x}+\frac{\partial f}{\partial v}\frac{\partial v^*}{\partial x}+(\frac{\partial\sigma}{\partial x})^\top q_1+\frac{\partial g_1}{\partial x}+(\frac{\partial v^*}{\partial x})^\top\frac{\partial g_1}{\partial v}\\ &-\sum_iy_i[\frac{\partial}{\partial x}(\frac{\partial f}{\partial x_i})^\top+(\frac{\partial v^*}{\partial x})^\top\frac{\partial}{\partial v}(\frac{\partial f}{\partial x_i})^\top]p_2\\ &-\sum_iy_i\frac{\partial}{\partial x}(\frac{\partial \sigma}{\partial x_i})^\top q_2-(\frac{\partial^2g_2}{\partial x^2}+\frac{\partial^2g_2}{\partial x\partial v}\frac{\partial v^*}{\partial x})^\top y\}dt+q_1dW(t),\\ y(0)=&0,\ \ p_1(T)=-\frac{\partial^2G_2}{\partial x^2}(x(T))y(T)+\frac{\partial G_1}{\partial x}(x(T)). \end{split}\right. \end{equation} \section{Stochastic closed-loop Stackelberg games}\label{sec3} In this section, we consider a stochastic closed-loop Stackelberg game which is a stochastic version of the paper \cite{PapavassiCruz79}. The difference between open-loop Stackelberg games and closed-loop Stackelberg games is that in the former case the leader's information set is the $\sigma$-field $\mathcal {F}_t$ generated by the Brownian motion $W$, whereas in the latter case the leader's information set involves both the $\sigma$-field $\mathcal {F}_t$ and the history of the state $x$. As stated in the introduction, the difficulty of studying closed-loop Stackelberg games arises from the fact that the reaction of the follower can not be determined explicitly if the leader's strategy depends on the whole history of the state (CLPS information structure). However, if the leader's strategy is restricted to be memoryless, i.e., only the current state is involved in the strategy, Papavassilopoulos and Cruz \cite{PapavassiCruz79} provide an efficient way to solve such a problem. As demonstrated in \cite{PapavassiCruz79}, the derivative $\frac{\partial u}{\partial x}$ of the leader's strategy $u$ will appear in the follower's adjoint equation and further in the leader's augmented state equation, which makes the leader's control problem a nonclassical one. \subsection{The deterministic case revisited} Since we apply the approach in Papavassilopoulos and Cruz \cite{PapavassiCruz79} to solve the stochastic version of closed-loop Stackelberg games, we fist elaborate their techniques in this subsection. The state and the cost functionals for the leader and the follower are as follows \begin{equation}\label{gg3} \left\{ \begin{split} \dot{x}(t)&=f(t,x(t),u(t),v(t)),\\ x(0)&=x_0, \end{split}\right. \end{equation} \begin{equation}\label{gg4} \begin{split} J_1(u,v)&=\int_0^T g_1(t,x(t),u(t),v(t))dt+G_1(x_T),\\ J_2(u,v)&=\int_0^T g_2(t,x(t),u(t),v(t))dt+G_2(x_T). \end{split} \end{equation} Given the leader's strategy $u(t,x)_{t\in[0,T]}$ (we omit to write the dependence on the initial state $x_0$) which is continuously differentiable in $x$, if the follower's optimal response is $v^*$, then according to the deterministic maximum principle, there exists a function $p$ such that \begin{equation}\label{gg5} \left\{ \begin{split} &\dot{x}=f(t,x,u,v^*),\\ &-\dot{p}=(\frac{\partial f}{\partial x}+\frac{\partial f}{\partial u}\frac{\partial u}{\partial x})^\top p+\frac{\partial g_2}{\partial x}+(\frac{\partial u}{\partial x})^\top\frac{\partial g_2}{\partial u},\\ &\frac{\partial g_2}{\partial v}+\frac{\partial f}{\partial v}p=0,\\ &x(0)=x_0,\ p(T)=\frac{\partial G_2(x(T))}{\partial x}. \end{split}\right. \end{equation} Suppose we can get the unique solution \begin{equation}\label{gg6} v=\varphi(t,x,p,u) \end{equation} from solving $$\frac{\partial g_2}{\partial v}+\frac{\partial f}{\partial v}p=0.$$ Then, after substituting the expression \eqref{gg6} into \eqref{gg5} and $J_1$, the leader will be faced with the following problem \begin{equation}\label{gg7} \min_{u} J_1(u)=\int_0^T g_1(t,x,u,\varphi(t,x,p,u))dt+G_1(x_T) \end{equation} subject to \begin{equation}\label{gg8} \left\{ \begin{split} \dot{x}&=f(t,x,u,\varphi(t,x,p,u)),\\ -\dot{p}&=[\frac{\partial f}{\partial x}+\frac{\partial f}{\partial u}\frac{\partial u}{\partial x}]^\top p+\frac{\partial g_2}{\partial x}+(\frac{\partial u}{\partial x})^\top\frac{\partial g_2}{\partial u},\\ x(0)&=x_0,\ p(T)=\frac{\partial G_2(x(T))}{\partial x}. \end{split}\right. \end{equation} Since the derivative $\frac{\partial u}{\partial x}$ of the control variable $u$ is involved in the adjoint equation \eqref{gg8}, the above problem is a nonclassical one. The authors provide two approaches to overcome this difficulty. One is the direct application of variational techniques. The other one is more interesting, which reveals the relative independence of $u$ and $\frac{\partial u}{\partial x}$ and the time inconsistency property. To be more precise, with $\frac{\partial u}{\partial x}$ replaced by another new control variable $\tilde{u}$, they construct a new classical problem \begin{equation}\label{gg9} \min_{u,\tilde{u}} \tilde{J}_1(u)=\int_0^T g_1(t,x,u,\varphi(t,x,p,u))dt+G_1(x_T) \end{equation} subject to \begin{equation}\label{gg10} \left\{ \begin{split} \dot{x}&=f(t,x,u,\varphi(t,x,p,u)),\\ -\dot{p}&=(\frac{\partial f}{\partial x}+\frac{\partial f}{\partial u}\tilde{u})^\top p+\frac{\partial g_2}{\partial x}+(\tilde{u})^\top\frac{\partial g_2}{\partial u},\\ x(0)&=x_0,\ p(T)=\frac{\partial G_2(x(T))}{\partial x}, \end{split}\right. \end{equation} and prove the equivalence of the above nonclassical problem \eqref{gg7}-\eqref{gg8} and the constructed classical problem \eqref{gg9}-\eqref{gg10} in the sense that they have the same optimal trajectory and costs. Indeed, if we denote by $J_1^*$ and $J_2^*$ the optimal values of problems \eqref{gg7}-\eqref{gg8} and \eqref{gg9}-\eqref{gg10}, respectively, then $J_1^*\geq J_2^*$. On the other hand, suppose that $(u^*,\tilde{u}^*)$ is an optimal control for problem \eqref{gg9}-\eqref{gg10} and $x^*$ is the corresponding trajectory, then control \begin{equation}\label{gg11} \hat{u}(t,x):=\tilde{u}^*(t)x+u^*(t)-\tilde{u}^*(t)x^*(t) \end{equation} yields the same trajectory $x^*$ and thus the same cost in problem \eqref{gg7}-\eqref{gg8}. Consequently, $J_1^*=J_2^*$ and $\hat{u}$ is an optimal control for the nonclassical problem \eqref{gg7}-\eqref{gg8}. Therefore, one can substitute $\frac{\partial u}{\partial x}$ for $\tilde{u}$ in the maximum principle for the problem \eqref{gg9}-\eqref{gg10} and finally get the maximum principle for the nonclassical problem \eqref{gg7}-\eqref{gg8} faced by the leader. \begin{rem} Given the leader's strategy $u(t,x)_{t\in[0,T]}$, the follower can also solve the following Hamilton-Jacobi-Bellman equation \begin{equation}\label{gg12} \left\{ \begin{split} &\frac{\partial V_2}{\partial t}+\inf_{v\in\mathbb{R}^n}\{\langle\frac{\partial V_2}{\partial x}, f(t,x,u(t,x),v)\rangle+g_2(t,x,u(t,x),v)\}=0,\\ &V_2(T,x)=G_2(x), \end{split}\right. \end{equation} and obtain the optimal feedback strategy $$v^*(t,x)=arg\inf_{v\in\mathbb{R}^n}\{\langle\frac{\partial V_2}{\partial x}, f(t,x,u(t,x),v)\rangle+g_2(t,x,u(t,x),v)\}.$$ However, since $V_2$ depends on the whole function $u(\cdot)$, it is impossible for the leader to employ dynamic programming to depict his optimal strategy. The maximum principle approach turns out to be more appropriate for closed-loop Stackelberg games. \end{rem} \subsection{The stochastic case}\label{sgs} In this subsection we tackle closed-loop Stackelberg games in the stochastic context, with the same idea as \cite{PapavassiCruz79}. After introducing a stochastic disturbance term in the state equation \eqref{gg3}, the adjoint equation for the follower, which also acts as the state equation in the leader's problem, will be a BSDE rather than an ODE with a terminal condition. Therefore, the leader will end up with a control problem in which the state equation consists of a SDE and a BSDE, with the feature that both the control $u$ and its derivative $\frac{\partial u}{\partial x}$ are introduced in the controlled system. With the results on the maximum principle for control problems of FBSDEs, we present the necessary conditions for the leader's optimal strategy to satisfy in a closed-loop Stackelberg game. We first introduce the admissible strategy spaces for the leader and the follower \begin{equation*} \begin{split} \mathcal {U}&:=\{u:u:\Omega\times[0,T]\times\mathbb{R}^n\rightarrow U\ \textrm{is}\ \mathcal {F}_t\textrm{-adapted for any}\ x\in\mathbb{R}^n, u(t,x)\ \textrm{is continuously}\\ &\ \ \textrm{differentible in}\ x\ \textrm{for any}\ (\omega,t)\in\Omega\times[0,T],\ \textrm{and the derivative}\ \frac{\partial u}{\partial x}\ \textrm{is bounded}\},\\ \mathcal {V}&:=\{v:v:\Omega\times[0,T]\times\mathbb{R}^n\rightarrow V\ \textrm{is}\ \mathcal {F}_t\textrm{-adapted for any}\ x\in\mathbb{R}^n\}. \end{split} \end{equation*} Then, given the leader's strategy $u(t,x)$, the follower's optimal response strategy $v^*(t,x)$ is a solution to the following classical optimal control problem, \begin{equation}\label{g1} \min_{v\in\mathcal {V}} J_2=E\int_0^Tg_2(t,x(t),u(t,x(t)),v(t))dt+EG_2(X(T)), \end{equation} subject to \begin{equation} \left\{ \begin{split}\label{g2} dx(t)&~=f(t,x(t),u(t,x(t)),v(t))dt+\sigma(t,x(t))dW(t),\\ x(0)&~=x_0. \end{split}\right. \end{equation} According to the maximum principle, there exists a pair of adapted processes $(p_2, q_2)\in\mathcal{S}^2\times\mathcal{M}^2$ such that \begin{equation}\label{g3} v^*(t,x(t))=arg \min_{v\in V}\{\langle p_2(t),f(t,x(t),u(t,x(t)),v)\rangle+\langle q_2,\sigma(t,x)\rangle+g_2(t,x(t),u(t,x(t)),v)\}, \end{equation} and \begin{equation}\label{g4} \left\{ \begin{split} dp_2(t)=&-[(\frac{\partial f}{\partial x}+\frac{\partial f}{\partial u}\frac{\partial u}{\partial x})^\top p_2+(\frac{\partial \sigma}{\partial x})^\top q_2\\ &+\frac{\partial g_2}{\partial x}+(\frac{\partial u}{\partial x})^\top\frac{\partial g_2}{\partial u}]dt+q_2(t)dW(t),\\ p_2(T)=&\frac{\partial G_2}{\partial x}(x(T)), \end{split}\right. \end{equation} where $x(\cdot)$ is the solution of \eqref{g2} with policies $u(t,x)$ and $v^*(t,x)$. Suppose for any leader's strategy $u(t,x)$, there exists a unique strategy $v^*(t,x)$ for the follower that minimizes his cost functional $J_2$. We also suppose that \eqref{g3} yields $v^*=\varphi(t,x,u,p_2)$. Then, taking into account the follower's optimal response, the leader will be confronted with the optimal control problem \begin{equation}\label{g5} \min_{u\in\mathcal {U}} J_1=E\int_0^Tg_1(t,x(t),u(t,x(t)),\varphi(t,x(t),u(t,x(t)),p_2(t)))dt+EG_1(x(T)) \end{equation} subject to \begin{equation}\label{g6} \left\{ \begin{split} dx(t)=&f(t,x(t),u(t,x(t)),\varphi(t,x(t),u(t,x(t)),p_2(t)))dt+\sigma(t,x(t))dW(t),\\ dp_2(t)=&-[(\frac{\partial f}{\partial x}+\frac{\partial f}{\partial u}\frac{\partial u}{\partial x})^\top p_2+(\frac{\partial \sigma}{\partial x})^\top q_2\\ &+\frac{\partial g_2}{\partial x}+(\frac{\partial u}{\partial x})^\top\frac{\partial g_2}{\partial u}]dt+q_2(t)dW(t),\\ x(0)=&x_0,\ \ p_2(T)=\frac{\partial G_2}{\partial x}(x(T)). \end{split}\right. \end{equation} It can be seen that, after incorporating the follower's adjoint variable as an augmented state, the leader encounters a controlled FBSDE, which is the counterpart of \eqref{gg8} in the deterministic context. For the solvability of FBSDEs, one can refer to \cite{Maetal94}, \cite{PengWu99}, \cite{PardouxTang99}, \cite{Yong10a}, and the references therein. Here we assume that the leader's problem is well-posed, i.e., for each $u(\cdot)\in\mathcal {U}$, there exists a unique triple $(x,p_2,q_2)\in\mathcal{S}^2\times\mathcal{S}^2\times\mathcal{M}^2$ solving FBSDE \eqref{g6}. Since the derivative $\frac{\partial u}{\partial x}$ of the control variable $u$ is involved in the BSDE in \eqref{g6}, we apply the techniques in the deterministic case to relate the above nonclassical control problem to a classical one. Consider the optimization problem of a controlled FBSDE \begin{equation}\label{g7} \min_{u_1,u_2} J(u_1(\cdot),u_2(\cdot))=E\int_0^Tg_1(t,x(t),u_1(t),\varphi(t,x(t),u_1(t),p_2(t)))dt+EG_1(x(T)), \end{equation} subject to \begin{equation}\label{g8} \left\{ \begin{split} dx(t)~=&f(t,x(t),u_1(t),\varphi(t,x(t),u_1(t),p_2(t)))dt+\sigma(t,x(t))dW(t),\\ dp_2(t)~=&-[(\frac{\partial f}{\partial x}+\frac{\partial f}{\partial u}u_2)^\top p_2+(\frac{\partial \sigma}{\partial x})^\top q_2\\ &+\frac{\partial g_2}{\partial x}+(u_2)^\top\frac{\partial g_2}{\partial u}]dt+q_2(t)dW(t),\\ x(0)~=&x_0,\ p_2(T)=\frac{\partial G_2}{\partial x}(x(T)), \end{split}\right. \end{equation} where $u_1$ and $u_2$ are adapted control variables with values in $U$ and some bounded subset in $\mathbb{R}^{m_1\times n}$, respectively. Again we assume the above problem is well-posed. Obviously, if we denote by $J_1^*$ and $J^*$ the optimal values of problems \eqref{g5}-\eqref{g6} and \eqref{g7}-\eqref{g8}, respectively, then $J_1^*\geq J^*$. On the other hand, if $(u_1^*,u_2^*)$ is a solution to problem \eqref{g7}-\eqref{g8} and $x^*$ is the corresponding optimal state trajectory, then we can construct an optimal control $u^*$ for problem \eqref{g5}-\eqref{g6} as follows \begin{equation}\label{g9} u^*(t,x):=u_2^*(t)x+u_1^*(t)-u_2^*(t)x^*(t). \end{equation} Therefore, $J_1^*=J^*$, which implies that if $u^*(t,x)$ is a solution to problem \eqref{g5}-\eqref{g6} and $x^*$ is the corresponding optimal state trajectory, then $(u^*(t,x^*(t)),\frac{\partial u^*}{\partial x}(t,x^*(t)))$ is an optimal control for problem \eqref{g7}-\eqref{g8} and leads to the same optimal state trajectory $x^*$. Thus we can obtain the maximum principle for problem \eqref{g5}-\eqref{g6} faced by the leader by means of the necessary conditions satisfied by the optimal control for problem \eqref{g7}-\eqref{g8} (see, e.g., \cite{ShiWu06} or \cite{Yong10b}). To this end, we define \begin{equation}\label{g10} \begin{split} &H_1(t,u_1,u_2,x,y,p_1,p_2,q_1,q_2)\\ =~&\langle p_1, f(t,x,u_1,\varphi(t,x,u_1,p_2))\rangle+\langle q_1,\sigma(t,x)\rangle-\langle y,(\frac{\partial f}{\partial x}+\frac{\partial f}{\partial u}u_2)^\top p_2\\ &+(\frac{\partial \sigma}{\partial x})^\top q_2+\frac{\partial g_2}{\partial x}+(u_2)^\top\frac{\partial g_2}{\partial u}\rangle+g_1(t,x,u_1,\varphi(t,x,u_1,p_2)). \end{split} \end{equation} \begin{thm} Suppose $u^*(t,x)$ is a solution to the leader's problem \eqref{g5}-\eqref{g6}. Then there exists a triple $(y,p_1,q_1)$ such that \begin{equation}\label{g11} \begin{split} &(u^*(t,x(t)),\frac{\partial u^*}{\partial x}(t,x(t)))\\ =&arg_{(u^1,u^2)}\min H_1(t,u^1,u^2,x(t),y(t),p_1(t),p_2(t),q_1(t),q_2(t)) \end{split} \end{equation} and \begin{equation}\label{g12} \left\{ \begin{split} dy(t)=&-\frac{\partial H_1}{\partial p_2}dt-\frac{\partial H_1}{\partial q_2}dW(t),\\ dp_1(t)=&-\frac{\partial H_1}{\partial x}dt+q_1(t)dW(t),\\ y(0)=&~0,\ \ p_1(T)=-\frac{\partial^2 G_2}{\partial x^2}(x(T))y(T)+\frac{\partial G_1}{\partial x}(x(T)), \end{split}\right. \end{equation} where $(x,p_2,q_2)$ is the solution of state equation \eqref{g6} with control $u^*(t,x)$, and $\frac{\partial H_1}{\partial p_2}$, $\frac{\partial H_1}{\partial q_2}$ and $\frac{\partial H_1}{\partial x}$ in \eqref{g12} are evaluated at $$(t,u^*(t,x(t)),\frac{\partial u^*}{\partial x}(t,x(t)),x(t),y(t),p_1(t),p_2(t),q_1(t),q_2(t)).$$ \end{thm} \begin{rem} If $u$ is independent of $x$, we conclude in comparison with the arguments in section \ref{open} that the closed-loop Stackelberg solution is reduced to the open-loop Stackelberg solution and the maximum principles for both cases are identical. \end{rem} \section{The linear quadratic Stackelberg games} In this section we consider linear quadratic open-loop and closed-loop Stackelberg games. Yong derives the Riccati equation for the open-loop Stackelberg game in \cite{Yong02} where the weighting matrices of the state and controls in the cost functionals are assumed not necessarily positive definite, and controls are allowed to appear in the diffusion term. For the follower's problem, the author uses the solutions of the follower's Riccati equation and a BSDE to give the state feedback representation of the follower's optimal strategy (one can also refer to \cite[Page 313]{YongZhou99} for a similar derivation of the state feedback representation for a linear quadratic stochastic control problem with deterministic coefficients). To be precise, the author assumes that the follower's adjoint variable $p_2$ in \eqref{o9} has the affine form $$p_2=Px+\phi.$$ Applying It\^{o}'s formula to $p_2$ and taking into account \eqref{o7} and \eqref{o9}, one can get the follower's Riccati equation with respect to $P$ and a BSDE for $\phi$. Then the author views the above BSDE for $\phi$, which contains the solution of the follower's Riccati equation and the leader's adopted strategy, and the original state equation as the leader's controlled system and further derives the leader's Riccati equation. Under some assumptions the author also discusses the solvability of the Riccati equations for the case of deterministic coefficients. Here we consider the follower's Hamiltonian system \eqref{o10} as the leader's controlled state equation and hence the state feedback representation of the Stackelberg solution can be obtained at the same time for the leader and the follower. As a result, the corresponding Riccati equation here is of different form from the one in \cite{Yong02}. Since we deal with the case without decision variables in the diffusion term, we also show, under some appropriate assumptions, the existence and uniqueness of the solution to the derived Riccati equation with stochastic coefficients by means of a linear transformation to the standard stochastic Riccati equation. For the linear quadratic closed-loop Stackelberg game, we will see that the Hamiltonian system for the leader is no longer linear, which prevents us from getting an exogenous Riccati equation if we proceed the same way as in the open-loop case. Instead, we assume that the forward variable $y$ is linear with respect to the original state $x$ and derive an exogenous FBSDE which plays the same role as the Riccati equation in open-loop case. Throughout this section we assume the coefficients $A,B_i,C,Q_i,R_i,G_i$ are adapted bounded matrices, $Q_i,R_i,G_i$ are symmetric and nonnegative, and $R_i$ are uniformly positive, $i=1,2$. \subsection{The open-loop case} The state equation and cost functionals are given as follows. \begin{equation}\label{o7} \left\{ \begin{split} dx(t)&=(Ax+B_1u+B_2v)dt+CxdW(t),\\ x(0)&=x_0, \end{split}\right. \end{equation} \begin{equation}\label{o8} \begin{split} J_1(u,v)&=\frac{1}{2}E[\int_0^T(\langle Q_1x(t),x(t)\rangle+\langle R_1u(t),u(t)\rangle)dt+\langle G_1x(T),x(T)\rangle],\\ J_2(u,v)&=\frac{1}{2}E[\int_0^T(\langle Q_2x(t),x(t)\rangle+\langle R_2v(t),v(t)\rangle)dt+\langle G_2x(T),x(T)\rangle]. \end{split} \end{equation} Given leader's strategy $u\in\mathcal {U}$, it is well known that the follower's problem $$\min_{v\in\mathcal {V}}\ J_2(u,v)=\frac{1}{2}E[\int_0^T(\langle Q_2x(t),x(t)\rangle+\langle R_2v(t),v(t)\rangle)dt+\langle G_2x(T),x(T)\rangle]$$ subject to \begin{equation*} \left\{ \begin{split} dx(t)&=(Ax+B_1u+B_2v)dt+CxdW(t),\\ x(0)&=x_0, \end{split}\right. \end{equation*} is a standard linear quadratic optimal control problem and the unique solution is $$v^*(t)=-R_2^{-1}B_2^\top p_2,$$ where $p_2$ is the first part of the solution $(p_2,q_2)\in\mathcal{S}^2\times\mathcal{M}^2$ to the adjoint equation \begin{equation}\label{o9} \left\{ \begin{split} -dp_2(t)&=(A^\top p_2+C^\top q_2+Q_2x)dt-q_2dW(t),\\ p_2(T)&=G_2x(T). \end{split}\right. \end{equation} Then, the leader's problem is $$\min_{u\in\mathcal {U}}\ J_1(u)=\frac{1}{2}E[\int_0^T(\langle Q_1x(t),x(t)\rangle+\langle R_1u(t),u(t)\rangle)dt+\langle G_1x(T),x(T)\rangle]$$ subject to (the Hamiltonian system of the follower) \begin{equation}\label{o10} \left\{ \begin{split} dx(t)&=(Ax+B_1u-B_2R_2^{-1}B_2^\top p_2)dt+CxdW(t),\\ -dp_2(t)&=(A^\top p_2+C^\top q_2+Q_2x)dt-q_2dW(t),\\ x(0)&=x_0,\ p_2(T)=G_2x(T). \end{split}\right. \end{equation} The leader's problem is well-posed since for every $u\in\mathcal {U}$, the coefficients of the system \eqref{o10} satisfy the monotonicity condition proposed by Peng and Wu \cite{PengWu99}, which yields the existence and uniqueness of the solution $(x,p_2,q_2)$ to the system \eqref{o10}. Moreover, by similar arguments of Tang \cite{Tang03}, we can get the following estimate \begin{equation}\label{o11} E\sup_{0\leq t\leq T}|p_2(t)|^2+E\sup_{0\leq t\leq T}|x(t)|^2+E\int_0^T|q_2(t)|^2dt\leq L(|x_0|^2+E\int_0^T|u(t)|^2dt), \end{equation} where $L$ is a positive constant. With this estimate, we can adopt relevant arguments for standard linear quadratic optimal control problems in \cite{Meng11} and get the fact that the leader's objective functional $J_1(u)$ is convex in $u$, $$\lim_{\|u\|\rightarrow\infty}J_1(u)=\infty,$$ and $J_1(u)$ is Fr\'{e}chet differentiable over $\mathcal {U}$ with the representation \begin{equation} \begin{split} \langle J_1'(u),w\rangle=&E\int_0^T(\langle Q_1(t)x(t;x_0,u),x(t;0,w)\rangle+\langle R_1(t)u(t),w(t)\rangle)dt\\ &+\langle G_1x(T;x_0,u),x(T;0,w)\rangle. \end{split} \end{equation} Here we use $x(\cdot;x_0,u)$ to represent the solution of \eqref{o10} with initial state $x(0)=x_0$ and control $u$. As a conclusion of Proposition 2.1.2 in \cite{EkelandTeman76}, we know that the leader has a unique optimal strategy $u^*\in\mathcal {U}$ which satisfies $J_1'(u^*)=0$. Now we use dual representation to characterize the optimal strategy $u^*$. \begin{thm} For each $u\in\mathcal {U}$, there exists a unique solution $(x,y,p_1,q_1,p_2,q_2)$ to the FBSDE \begin{equation}\label{o12} \left\{ \begin{split} dx(t)&=(Ax+B_1u-B_2R_2^{-1}B_2^\top p_2)dt+CxdW(t),\\ -dp_2(t)&=(A^\top p_2+C^\top q_2+Q_2x)dt-q_2dW(t),\\ dy(t)&=(Ay+B_2R_2^{-1}B_2^\top p_1)dt+CydW(t),\\ -dp_1(t)&=(A^\top p_1+C^\top q_1-Q_2y+Q_1x)dt-q_1dW(t),\\ x(0)&=x_0,\ y(0)=0,\ p_1(T)=-G_2y(T)+G_1x(T),\ p_2(T)=G_2x(T). \end{split}\right. \end{equation} The necessary and sufficient condition for $u$ to be the leader's optimal strategy is $$u(t)=-R_1^{-1}B_1p_1(t).$$ \end{thm} \begin{proof} It can be seen that the FBSDEs consisting of $(x,p_2,q_2)$ and $(y,p_1,q_1)$ are two decoupled systems. Therefore, for given $u\in\mathcal {U}$, we can first get the unique solution $(x,p_2,q_2)$ to the equation \begin{equation}\label{o13} \left\{ \begin{split} dx(t)&=(Ax+B_1u-B_2R_2^{-1}B_2^\top p_2)dt+CxdW(t),\\ -dp_2(t)&=(A^\top p_2+C^\top q_2+Q_2x)dt-q_2dW(t),\\ x(0)&=x_0,\ p_2(T)=G_2x(T). \end{split}\right. \end{equation} Let $\tilde{y}:=-y$. Then FBSDE consisting of $(y,p_1,q_1)$ in \eqref{o12} can be converted into the following one \begin{equation}\label{o14} \left\{ \begin{split} d\tilde{y}(t)&=(A\tilde{y}-B_2R_2^{-1}B_2^\top p_1)dt+C\tilde{y}dW(t),\\ -dp_1(t)&=(A^\top p_1+C^\top q_1+Q_2\tilde{y}+Q_1x)dt-q_1dW(t),\\ \tilde{y}(0)&=0,\ p_1(T)=G_2\tilde{y}(T)+G_1x(T). \end{split}\right. \end{equation} The coefficients in the above system also satisfy the monotonicity condition in \cite{PengWu99}. So there exists a unique solution to \eqref{o14}, which also implies the existence and uniqueness of the solution $(x,y,p_1,q_1,p_2,q_2)$ to FBSDE \eqref{o12}. The necessary part comes directly from the maximum principle \eqref{oo6} and \eqref{o6}. Now we prove the sufficient part. Denote by $$(x(\cdot;x_0,u),y(\cdot;x_0,u),p_1(\cdot;x_0,u),q_1(\cdot;x_0,u),p_2(\cdot;x_0,u),q_2(\cdot;x_0,u))$$ and $$(x(\cdot;0,w),y(\cdot;0,w),p_1(\cdot;0,w),q_1(\cdot;0,w),p_2(\cdot;0,w),q_2(\cdot;0,w))$$ the solutions to the system of FBSDEs \eqref{o12} with initial states and controls as $(x_0,u)$ and $(0,w)$, respectively. Using It\^{o}'s formula to compute $$\langle p_1(t;x_0,u),x(t;0,w)\rangle+\langle p_2(t;0,w),y(t;x_0,u)\rangle$$ and taking the expectation, we can get \begin{equation}\label{o15} \begin{split} \langle J_1'(u),w\rangle =&E\langle G_1x(T;x_0,u),x(T;0,w)\rangle\\ &+E\int_0^T\langle Q_1(t)x(t;x_0,u),x(t;0,w)\rangle +\langle R_1(t)u(t),w(t)\rangle dt\\ =&E\int_0^T\langle R_1(t)u(t)+B_1^\top(t)p_1(t;x_0,u),w(t)\rangle dt. \end{split} \end{equation} Obviously $u=-R_1^{-1}B_1^\top p_1$ makes $J_1'(u)$ equal to zero, so it is an optimal strategy for the leader. \end{proof} From the uniqueness of the optimal strategy, we also know that FBSDE \begin{equation}\label{o16} \left\{ \begin{split} dx(t)&=(Ax-B_1R_1^{-1}B_1^\top p_1-B_2R_2^{-1}B_2^\top p_2)dt+CxdW(t),\\ -dp_2(t)&=(A^\top p_2+C^\top q_2+Q_2x)dt-q_2dW(t),\\ dy(t)&=(Ay+B_2R_2^{-1}B_2^\top p_1)dt+CydW(t),\\ -dp_1(t)&=(A^\top p_1+C^\top q_1-Q_2y+Q_1x)dt-q_1dW(t),\\ x(0)&=x_0,\ y(0)=0,\ p_1(T)=-G_2y(T)+G_1x(T),\ p_2(T)=G_2x(T), \end{split}\right. \end{equation} has a unique solution $(x,y,p_1,q_1,p_2,q_2)$. And the Stackelberg solution $(u^*,v^*)$ can be written as \begin{equation}\label{oo16} u^*=-R_1^{-1}B_1^\top p_1,\ \ v^*=-R_2^{-1}B_2^\top p_2. \end{equation} In what follows we see $(x,y)$ as the state and derive the feedback representation of the Stackelberg solution $(u^*,v^*)$ in terms of $(x,y)$. We denote \begin{equation*} \hat{x}=\left( \begin{array}{c} x\\ y\\ \end{array}\right), \hat{p}=\left( \begin{array}{c} p_1\\ p_2\\ \end{array}\right), \hat{q}=\left( \begin{array}{c} q_1\\ q_2\\ \end{array}\right), \end{equation*} and \begin{equation*} \begin{split} \hat{A}=&\left( \begin{array}{cc} A&0\\ 0&A\\ \end{array}\right), \hat{B}=\left( \begin{array}{cc} B_1R_1^{-1}B_1^\top&B_2R_2^{-1}B_2^\top\\ -B_2R_2^{-1}B_2^\top&0\\ \end{array}\right), \hat{C}=\left( \begin{array}{cc} C&0\\ 0&C\\ \end{array}\right),\\ \hat{Q}=&\left( \begin{array}{cc} Q_1&-Q_2\\ Q_2&0\\ \end{array}\right), \hat{G}=\left( \begin{array}{cc} G_1&-G_2\\ G_2&0\\ \end{array}\right). \end{split} \end{equation*} Then FBSDE \eqref{o16} can be rewritten as \begin{equation}\label{o17} \left\{ \begin{split} d\hat{x}(t)&=(\hat{A}\hat{x}(t)-\hat{B}\hat{p}(t))dt+\hat{C}\hat{x}dW(t),\\ d\hat{p}(t)&=-(\hat{A}^\top\hat{p}+\hat{C}^\top\hat{q}+\hat{Q}\hat{x})dt+\hat{q}dW(t),\\ \hat{x}(0)&=0, \ \hat{p}(T)=\hat{G}\hat{x}(T). \end{split}\right. \end{equation} Suppose there is a matrix-valued process $K$ such that \begin{equation}\label{oo18} \hat{p}=K\hat{x}, \end{equation} and $K$ has a stochastic differential form \begin{equation}\label{o18} dK(t)=M(t)dt+L(t)dW(t). \end{equation} Applying It\^{o}'s formula to $K\hat{x}$, we get \begin{equation}\label{o19} \begin{split} &M\hat{x}dt+L\hat{x}dW(t)+K(\hat{A}\hat{x}-\hat{B}K\hat{x}(t))dt+K\hat{C}\hat{x}dW(t)+L\hat{C}\hat{x}dt\\ =&d\hat{p}(t)\\ =&-(\hat{A}^\top K\hat{x}+\hat{C}^\top\hat{q}+\hat{Q}\hat{x})dt+\hat{q}dW(t). \end{split} \end{equation} Comparing the diffusion terms in \eqref{o19}, we have \begin{equation}\label{o20} \hat{q}=L\hat{x}+K\hat{C}\hat{x}. \end{equation} Substituting the expression into \eqref{o19} and comparing the drift terms, we get \begin{equation}\label{o21} \begin{split} &M\hat{x}+K(\hat{A}\hat{x}-\hat{B}K\hat{x}(t))+L\hat{C}\hat{x}\\ =&-\hat{A}^\top K\hat{x}-\hat{C}^\top(L\hat{x}+K\hat{C}\hat{x})-\hat{Q}\hat{x}, \end{split} \end{equation} which yields $$M=-K\hat{A}-\hat{A}^\top K+K\hat{B}K-L\hat{C}-\hat{C}^\top L-\hat{C}^\top K\hat{C}-\hat{Q}.$$ Therefore, we get the Riccati equation \begin{equation}\label{o22} \left\{ \begin{split} dK(t)&=-(K\hat{A}+\hat{A}^\top K-K\hat{B}K+L\hat{C}+\hat{C}^\top L+\hat{C}^\top K\hat{C}+\hat{Q})dt+LdW(t),\\ K(T)&=\hat{G}. \end{split}\right. \end{equation} The difference between the above Riccati equation and the standard one from stochastic LQ problems without control in diffusion terms (see, e.g., \cite{Peng92}) is that $\hat{B}$, $\hat{Q}$ and $\hat{G}$ here are not symmetric matrices. For $n=1$ and under some appropriate assumptions on the coefficient matrices, we show in the following proposition that Riccati equation \eqref{o22} can be connected to a standard one through a linear transformation for FBSDE \eqref{o17}. \begin{prop} Suppose that n=1 and $\alpha$ and $\beta$ are two positive constants such that $$\frac{Q_2}{Q_1}=\frac{G_2}{G_1}=\alpha,\ \ \frac{B_2R_2^{-1}B_2^\top}{B_1R_1^{-1}B_1^\top}=\beta.$$ Then, the Riccati equation \eqref{o22} has a unique solution. \end{prop} \begin{proof} We make the transformation \begin{equation}\label{o23} \hat{x}=\tilde{x},\ \hat{p}=\Phi\tilde{p},\ \hat{q}=\Phi\tilde{q}, \end{equation} where $$\Phi=\left(\begin{array}{cc} 1&-2\beta\\ 2\alpha&1\\ \end{array}\right).$$ Then FBSDE \eqref{o17} can be converted into the following one \begin{equation}\label{o24} \left\{ \begin{split} d\tilde{x}(t)&=(\tilde{A}\tilde{x}(t)-\tilde{B}\tilde{p}(t))dt+\tilde{C}\tilde{x}dW(t),\\ d\tilde{p}(t)&=-(\tilde{A}^\top\tilde{p}+\tilde{C}^\top\tilde{q}+\tilde{Q}\tilde{x})dt+\tilde{q}dW(t),\\ \tilde{x}(0)&=0, \ \tilde{p}(T)=\tilde{G}\tilde{x}(T), \end{split}\right. \end{equation} where $$\tilde{A}=\hat{A},\ \tilde{C}=\hat{C},$$ $$\tilde{B}=\left( \begin{array}{cc} B_1R_1^{-1}B_1^\top+2\alpha B_2R_2^{-1}B_2^\top&-B_2R_2^{-1}B_2^\top\\ -B_2R_2^{-1}B_2^\top&2\beta B_2R_2^{-1}B_2^\top\\ \end{array}\right),$$ $$\tilde{Q}=\frac{1}{4\alpha\beta}\left( \begin{array}{cc} Q_1+2\beta Q_2&-Q_2\\ -Q_2&2\alpha Q_2\\ \end{array}\right),$$ $$\tilde{G}=\left( \begin{array}{cc} G_1+2\beta G_2&-G_2\\ -G_2&2\alpha G_2\\ \end{array}\right).$$ Now the matrices $\tilde{B}$, $\tilde{Q}$ and $\tilde{G}$ are symmetric and positive definite. Suppose $$\tilde{p}=\tilde{K}\tilde{x},$$ and $$d\tilde{K}=\tilde{K}_1dt+\tilde{L}dW(t).$$ With the same procedure to derive Riccati equation \eqref{o22}, we can get a standard Riccati equation for $(\tilde{K},\tilde{L})$ \begin{equation}\label{o25} \left\{ \begin{split} d\tilde{K}(t)&=-(\tilde{K}\tilde{A}+\tilde{A}^\top \tilde{K}-\tilde{K}\tilde{B}\tilde{K}+\tilde{L}\tilde{C}+\tilde{C}^\top \tilde{L}+\tilde{C}^\top \tilde{K}\tilde{C}+\tilde{Q})dt+\tilde{L}dW(t),\\ \tilde{K}(T)&=\tilde{G}. \end{split}\right. \end{equation} According to the results in \cite{Bismut78} or \cite{Peng92}, or more general case in \cite{Tang03}, we know that Riccati equation \eqref{o25} has a unique solution $(\tilde{K},\tilde{L})$ and $$\tilde{p}=\tilde{K}\tilde{x},\ \tilde{q}=(\tilde{L}+\tilde{K}\tilde{C})\tilde{x}.$$ Consequently, \begin{equation}\label{o26} \begin{split} \hat{p}&=\Phi\tilde{p}=\Phi\tilde{K}\tilde{x}=\Phi\tilde{K}\hat{x},\\ \hat{q}&=\Phi\tilde{q}=\Phi(\tilde{L}+\tilde{K}\tilde{C})\tilde{x}=\Phi(\tilde{L}+\tilde{K}\tilde{C})\hat{x}. \end{split} \end{equation} Comparing \eqref{o26} with \eqref{oo18} and \eqref{o20}, we finally get $$K=\Phi\tilde{K},\ L=\Phi\tilde{L}.$$ From \eqref{oo16} we obtain that the Stackelberg solution $(u^*,v^*)$ has a feedback representation in terms of the state $(x,y)$. \end{proof} \subsection{The closed-loop case} As pointed out in the deterministic case \cite{PapavassiCruz79}, the relative independence of the leader's strategy $u$ and its derivative $\frac{\partial u}{\partial x}$ in a closed-loop Stackelberg game makes the leader so powerful that his Hamiltonian $H$ is likely to achieve $-\infty$ if there is no restriction on the derivative $\frac{\partial u}{\partial x}$. One way to restrict the leader's strength is to add a penalty term $\frac{\partial u}{\partial x}$ in his cost functional in order that $H$ is convex with respect to $(u,\frac{\partial u}{\partial x})$. The other way is to impose a prior bounds on $\frac{\partial u}{\partial x}$ to retain $H$ finite. In this section we will adopt the latter way to assume $\frac{\partial u}{\partial x}$ to be bounded since it will appear as the coefficient of the unknowns in adjoint equations and the boundedness of the derivative $\frac{\partial u}{\partial x}$ implies the well-posedness of the leader's problem when affine strategies are adopted. For simplicity, we consider one-dimensional linear quadratic game, with the state equation and cost functionals of the two players as follows \begin{equation*} \left\{ \begin{split} dx(t)&=[Ax(t)+B_1u(t)+B_2v(t)]dt+Cx(t)dW(t),\\ x(0)&=x_0, \end{split}\right. \end{equation*} and \begin{align*} J_1 &= \frac{1}{2}E [\int_{0}^T(Q_1x^2(t)+R_1u^{2}(t))dt+G_1x^2(T)],\\ J_2 &= \frac{1}{2}E [\int_{0}^T(Q_2x^2(t)+R_2v^{2}(t))dt+G_2x^2(T)]. \end{align*} The admissible strategy spaces from which the leader and the follower choose their strategies are given by \begin{equation*} \begin{split} \mathcal {U}&:=\{u|u:\Omega\times[0,T]\times\mathbb{R}\rightarrow U\ \textrm{is}\ \mathcal {F}_t\textrm{-adapted for any}\ x\in\mathbb{R},\ u(t,x)\ \textrm{is continuously}\\ &\ \ \ \ \ \ \textrm{differentible in}\ x\ \textrm{for any}\ (\omega,t)\in\Omega\times[0,T],\ \textrm{and the derivative}\ |\frac{\partial u}{\partial x}|\leq K\\ &\ \ \ \ \ \ \textrm{for some postive constant $K$}\},\\ \mathcal {V}&:=\{v|v:\Omega\times[0,T]\times\mathbb{R}^n\rightarrow V\ \textrm{is }\ \mathcal {F}_t\textrm{-adapted for any}\ x\in\mathbb{R}^n\}. \end{split} \end{equation*} Suppose for leader's each strategy $u\in\mathcal {U}$, the follower has a unique optimal response $v^*\in\mathcal {V}$. From \eqref{g3} we know $$v^*=-R_2^{-1}B_2p_2,$$ with $p_2$ satisfying \begin{equation*} \left\{ \begin{split} dp_2(t)&=-[(A+B_1\frac{\partial u}{\partial x})p_2+Cq_2+Q_2x]dt+q_2dW(t),\\ p_2(T)&=G_2x(T). \end{split}\right. \end{equation*} Therefore the leader's problem is \begin{equation}\label{lq2} \min_{u\in\mathcal{U}}J_1= \frac{1}{2}E [\int_{0}^T(Q_1x^2(t)+R_1u^{2}(t))dt+G_1x^2(T)] \end{equation} subject to \begin{equation}\label{lq3} \left\{ \begin{split} dx(t)&=[Ax(t)+B_1u(t,x(t))-R_2^{-1}B^2_2p_2(t)]dt+Cx(t)dW(t),\\ dp_2(t)&=-[(A+B_1\frac{\partial u}{\partial x})p_2+Cq_2+Q_2x]dt+q_2dW(t),\\ x(0)&=x_0,\ p_2(T)=G_2x(T). \end{split}\right. \end{equation} Suppose that for every $u(t,x)\in\mathcal {U}$, there is a unique solution $(x,p_2,q_2)$ to FBSDE \eqref{lq3}. According to the discussions in section \ref{sgs}, we know that the leader will lose nothing if he chooses his strategy among affine functions $$u(t,x)=u_2(t)x+u_1(t),$$ with $u_1$ and $u_2$ being adapted processes and $|u_2|\leq K$. Then the leader's equivalent problem can be written as \begin{equation}\label{lq4} \min_{u_1,u_2}\ J_1= \frac{1}{2}E \{\int_{0}^T[Q_1x^2(t)+R_1(u_2(t)x(t)+u_1(t))^2]dt+G_1x^2(T)\} \end{equation} subject to \begin{equation}\label{lq5} \left\{ \begin{split} dx(t)&=[(A+B_1u_2)x+B_1u_1-R_2^{-1}B^2_2p_2]dt+Cx(t)dW(t),\\ dp_2(t)&=-[(A+B_1u_2)p_2+Cq_2+Q_2x]dt+q_2dW(t),\\ x(0)&=x_0,\ p_2(T)=G_2x(T). \end{split}\right. \end{equation} For every pair $(u_1,u_2)$, the monotonicity condition guarantees the existence and uniqueness of the solution to \eqref{lq5}. Therefore, the leader's problem with strategies restricted being of affine form is well-posed. In what follows we use the maximum principle to get the Hamiltonian system and related Riccati equation for leader's problem \eqref{lq4}-\eqref{lq5}. Denote \begin{equation}\label{lq6} \begin{split} &H_1(t,u_1,u_2,x,y,p_1,p_2,q_1,q_2)\\ =&p_1[(A+B_1u_2)x+B_1u_1-R_2^{-1}B^2_2p_2]+Cxq_1\\ &-y[(A+B_1u_2)p_2+Cq_2+Q_2x]+\frac{1}{2}[Q_1x^2+R_1(u_2x+u_1)^2]. \end{split} \end{equation} To obtain $(u_1^*,u_2^*)$ that minimizes $H_1(t,u_1,u_2,x,y,p_1,p_2,q_1,q_2)$, we first fix $u_2$ and minimize $H_1$ with respect to $u_1$. By computation, \begin{equation}\label{lq7} u_1^*=-u_2x-R_1^{-1}B_1p_1. \end{equation} Substituting \eqref{lq7} into the expression \eqref{lq6} of $H$, we can see the only term containing $u_2$ is \begin{equation}\label{lq9} -B_1yp_2u_2. \end{equation} Therefore, the optimal $u_2^*$ is \begin{equation}\label{lq10} u_2^*=\left\{ \begin{array}{ccc} -K, &\ \mbox{if $\Delta>0$},\\ K, &\ \mbox{if $\Delta<0$},\\ \mbox{undefined,} &\ \mbox{if $\Delta=0$}, \end{array}\right. \end{equation} where $$\Delta:=-B_1yp_2.$$ To find a candidate of optimal pair $(u_1^*,u_2^*)$, we set \begin{equation*} \begin{split} u_2^*:=&bang(K,-K;\Delta)\\ :=&sgn(B_1yp_2)K\\ =&sgn(y)sgn(B_1p_2)K\\ =&sgn(p_2)sgn(B_1y)K, \end{split} \end{equation*} where $sgn$ is the sign function defined by \begin{equation*} sgn(x)=\left\{ \begin{array}{ccc} 1&\ \mbox{if $x>0$,}\\ 0&\ \mbox{if $x=0$,}\\ -1&\ \mbox{if $x<0$.} \end{array}\right. \end{equation*} From \eqref{lq7} we get \begin{equation}\label{lq11} u_1^*=-bang(K,-K;\Delta)x-R_1^{-1}B_1p_1. \end{equation} If $(u_1^*,u_2^*)\in\mathcal {U}\times\mathcal {V}$ is a solution to the leader's problem \eqref{lq4}-\eqref{lq5}, then the maximum principle yields that there exist adapted processes $y$, $p_1$, and $q_1$ such that \begin{equation}\label{lq11} \left\{ \begin{split} dx(t)&=[(A+B_1u_2^*)x+B_1u_1^*-R_2^{-1}B^2_2p_2]dt+Cx(t)dW(t),\\ dy(t)&=[(A+B_1u_2^*)y+R_2^{-1}B^2_2p_1]dt+CydW(t),\\ dp_1(t)&=-[(A+B_1u_2^*)p_1+Cq_1-Q_2y+Q_1x+R_1u_2^*(u_2^*x+u_1^*)]dt+q_1dW(t),\\ dp_2(t)&=-[(A+B_1u_2^*)p_2+Cq_2+Q_2x]dt+q_2dW(t),\\ x(0)&=x_0,\ y(0)=0,\ p_1(T)=-G_2y(T)+G_1x(T),\ p_2(T)=G_2x(T),\\ u_1^*&=-bang(K,-K;\Delta)x-R_1^{-1}B_1p_1,\ u_2^*:=bang(K,-K;\Delta). \end{split}\right. \end{equation} Like the open-loop case, we proceed to express the optimal strategy $(u_1^*,u_2^*)$ in a non-anticipating way by means of the state feedback representation. Substituting the expressions of $u_1^*$ and $u_2^*$ into the FBSDE in \eqref{lq11}, we get \begin{equation}\label{glq12} \left\{ \begin{split} dx(t)&=[Ax-R_1^{-1}B_1^2p_1-R_2^{-1}B^2_2p_2]dt+Cx(t)dW(t),\\ dy(t)&=[(A+B_1bang(K,-K;\Delta))y+R_2^{-1}B^2_2p_1]dt+CydW(t)\\ &=[Ay+sgn(p_2)K|B_1y|+R_2^{-1}B^2_2p_1]dt+CydW(t),\\ dp_1(t)&=-[Ap_1+Cq_1-Q_2y+Q_1x]dt+q_1dW(t),\\ dp_2(t)&=-[(A+B_1bang(K,-K;\Delta))p_2+Cq_2+Q_2x]dt+q_2dW(t)\\ &=-[Ap_2+sgn(y)K|B_1p_2|+Cq_2+Q_2x]dt+q_2dW(t),\\ x(0)&=x_0,\ y(0)=0,\ p_1(T)=-G_2y(T)+G_1x(T),\ p_2(T)=G_2x(T). \end{split}\right. \end{equation} In contrast to FBSDE \eqref{o16} in the open-loop case, the presence of the additional nonlinear term $bang(K,-K;\Delta)$ in FBSDE \eqref{glq12} makes it a nonlinear system. Moreover, the Lipschitz continuity assumption usually made for the coefficients in the literature does not hold here. Therefore, the existence and uniqueness of the solution to \eqref{glq12}, as far as we know, is still not available. On the other hand, if we still view $(x,y)$ as the ``state'' and represent $(p_1,p_2)$ in terms of $(x,y)$ as in the open-loop case, we can not derive an exogenous Riccati equation. Instead, we only see $x$ as the state and suppose \begin{equation}\label{lq12} y(t)=\xi(t)x(t),\ \ p_1(t)=\eta(t)x(t),\ \ p_2(t)=\zeta(t)x(t), \end{equation} and \begin{equation}\label{lq13} \begin{split} d\xi(t)&=\xi_1(t)dt+\xi_2(t)dW(t),\\ d\eta(t)&=\eta_1(t)dt+\eta_2(t)dW(t),\\ d\zeta(t)&=\zeta_1(t)dt+\zeta_2(t)dW(t). \end{split} \end{equation} By It\^{o}'s formula and in view of \eqref{lq12} \begin{equation}\label{lq14} \begin{split} dy(t)=&\xi(t)dx(t)+x(t)d\xi(t)+Cx(t)\xi_2(t)dt\\ =&\xi(t)[Ax-R_1^{-1}B_1^2p_1-R_2^{-1}B^2_2p_2]dt+C\xi(t)x(t)dW(t)\\ &+\xi_1(t)x(t)dt+\xi_2(t)x(t)dW(t)+C\xi_2(t)x(t)dt\\ =&\{[A-R_1^{-1}B_1^2\eta(t)-R_2^{-1}B^2_2\zeta(t)]\xi(t)\\ &+\xi_1(t)+C\xi_2(t)\}x(t)dt+[C\xi(t)+\xi_2(t)]x(t)dW(t). \end{split} \end{equation} On the other hand, \begin{equation}\label{lq15} \begin{split} dy(t)=&[(A+B_1bang(K,-K;\Delta))y+R_2^{-1}B^2_2p_1]dt+CydW(t)\\ =&[(A+B_1bang(K,-K;\tilde{\Delta}))\xi(t)+R_2^{-1}B^2_2\eta(t)]x(t)dt+C\xi(t)x(t)dW(t), \end{split} \end{equation} where $$\tilde{\Delta}:=-B_1\xi(t)\zeta(t).$$ Comparing \eqref{lq14} and \eqref{lq15}, we have \begin{equation*} \begin{split} \xi_2(t)=&0,\\ \xi_1(t)=&[R_1^{-1}B_1^2\eta(t)+R_2^{-1}B^2_2\zeta(t)+B_1bang(K,-K;\tilde{\Delta})]\xi(t)+R_2^{-1}B^2_2\eta(t). \end{split} \end{equation*} With the same procedure, we can get \begin{equation*} \left\{ \begin{split} \eta_1(t)=&[R_1^{-1}B_1^2\eta(t)+R_2^{-1}B^2_2\zeta(t)-2A-C^2]\eta(t)+Q_2\xi(t)-2C\eta_2(t)-Q_1,\\ \zeta_1(t)=&[R_1^{-1}B_1^2\eta(t)+R_2^{-1}B^2_2\zeta(t)-2A-C^2-B_1bang(K,-K;\tilde{\Delta})]\zeta(t)-2C\zeta_2(t)-Q_2. \end{split}\right. \end{equation*} Therefore, we derive the related Riccati equation for problem \eqref{lq4}-\eqref{lq5} \begin{equation*} \left\{ \begin{split} d\xi(t)=&\{[R_1^{-1}B_1^2\eta(t)+R_2^{-1}B^2_2\zeta(t)+B_1bang(K,-K;\tilde{\Delta})]\xi(t)+R_2^{-1}B^2_2\eta(t)\}dt\\ =&\{[R_1^{-1}B_1^2\eta(t)+R_2^{-1}B^2_2\zeta(t)]\xi(t)+sgn(\zeta(t))|B_1\xi(t)|+R_2^{-1}B^2_2\eta(t)\}dt,\\ d\eta(t)=&\{[R_1^{-1}B_1^2\eta(t)+R_2^{-1}B^2_2\zeta(t)-2A-C^2]\eta(t)+Q_2\xi(t)-2C\eta_2(t)\\ &-Q_1\}dt+\eta_2(t)dW(t),\\ d\zeta(t)=&\{[R_1^{-1}B_1^2\eta(t)+R_2^{-1}B^2_2\zeta(t)-2A-C^2-B_1bang(K,-K;\tilde{\Delta})]\zeta(t)\\ &-2C\zeta_2(t)-Q_2\}dt+\zeta_2(t)dW(t)\\ =&\{[R_1^{-1}B_1^2\eta(t)+R_2^{-1}B^2_2\zeta(t)-2A-C^2]\zeta(t)-sgn(\xi(t))|B_1\zeta(t)|\\ &-2C\zeta_2(t)-Q_2\}dt+\zeta_2(t)dW(t),\\ \xi(0)=&0,\ \eta(T)=-G_2\xi(T)+G_1,\ \zeta(T)=G_2. \end{split}\right. \end{equation*} Suppose $(\xi,\eta,\zeta,\eta_2,\zeta_2)$ is a solution to the above FBSDE and $x^*$ solves the linear SDE \begin{equation*} \left\{ \begin{split} dx(t)&=[A-R_1^{-1}B_1^2\eta-R_2^{-1}B^2_2\zeta]x(t)dt+Cx(t)dW(t),\\ x(0)&=x_0. \end{split}\right. \end{equation*} Then we can use It\^{o}'s formula to verify that \begin{equation*} \begin{split} y(t):=&\xi(t)x^*(t),\ p_1(t):=\eta(t)x^*(t),\ p_2(t):=\zeta(t)x^*(t),\\ q_1(t):=&[C\eta(t)+\eta_2(t)]x^*(t),\ q_2(t):=[C\zeta(t)+\zeta_2(t)]x^*(t), \end{split} \end{equation*} together with $x^*$ solve the leader's Hamiltonian system \eqref{glq12}. Therefore, $$u(t,x)=bang(K,-K;\tilde{\Delta})x-bang(K,-K;\tilde{\Delta})x^*(t)-R_1^{-1}B_1\eta(t)x^*(t)$$ with $\tilde{\Delta}=-B_1\xi(t)\zeta(t)$ is a candidate of the leader's optimal strategy. \end{document}
\begin{document} \title{Canonical Representations for Circular-Arc Graphs Using Flip Sets} \begin{abstract} We show that computing canonical representations for circular-arc (CA) graphs reduces to computing certain subsets of vertices called flip sets. For a broad class of CA graphs, which we call uniform, it suffices to compute a CA representation to find such flip sets. As a consequence canonical representations for uniform CA graphs can be obtained in polynomial-time. We then investigate what kind of CA graphs pose a challenge to this approach. This leads us to introduce the notion of restricted CA matrices and show that the canonical representation problem for CA graphs is logspace-reducible to that of restricted CA matrices. As a byproduct, we obtain the result that CA graphs without induced 4-cycles can be canonized in logspace. \end{abstract} \section{Introduction} We consider an arc to be a connected set of points on the unit circle including the endpoints. A CA graph is a graph whose vertices can be assigned arcs such that two vertices are adjacent iff their corresponding arcs intersect. More formally, given a graph $G$ we call it a CA graph if there exists a function $\rho$ which maps every vertex $u$ of $G$ to an arc $\rho(u)$ such that $u$ and $v$ are adjacent iff their arcs $\rho(u)$ and $\rho(v)$ have non-empty intersection. We call such a mapping $\rho$ a CA representation of $G$. CA graphs are a form of geometrical intersection graphs. Let $\mathcal{X}$ be a family of sets over some ground set. Any subset $Y$ of $\mathcal{X}$ defines a graph $G_Y$ which has $Y$ as its vertex set and two vertices are adjacent if they have non-empty intersection. The graph $G_Y$ is called intersection graph of $Y$. We say a (finite) graph $G$ is an intersection graph of $\mathcal{X}$ if it is isomorphic to the intersection graph of $Y$ for some $Y \subseteq \mathcal{X}$. In this language CA graphs are intersection graphs of arcs. The intersection graphs of intervals on the real line are called interval graphs. In this sense any set of geometrical objects defines a (geometrical intersection) graph class. CA graphs are a generalization of interval graphs since every set of intervals on the real line can be `bent' into arcs while preserving the intersection relation. Therefore every interval graph is a CA graph. Being a generalization of interval graphs---the archetype of geometrical intersection graphs---CA graphs are quite prominent as well and have been known for decades. Since then structural properties and algorithmic problems for this class have been thoroughly investigated with \cite{gav} and \cite{tuc} being two of the earliest works in this regard. In particular, finding characterizations of CA graphs and constructing a CA representation for a given CA graph have received a great deal of attention. Remarkably, finding a forbidden induced subgraph characterization of CA graphs is still an open problem. See \cite{lin2} for a survey on this line of research and \cite{cao} for one of the most recent results in that direction. It should also be mentioned that CA graphs are of practical relevance with applications arising in disciplines such as genetics and operations research. An explanation of the connection between genetics and interval graphs in layman's terms can be found in \cite{wat}. For a specialized account on this connection emphasizing circularity see \cite{stahl}. An example of how CA graphs can be used to model the problem of phasing traffic lights is given in \cite{gol}. In this work we consider the canonical representation problem for CA graphs. The representation problem for CA graphs is as follows. Given a CA graph $G$ as input we want to output a CA representation $\rho_G$ of $G$. The canonical variant of this problem imposes the additional requirement that for every pair of isomorphic CA graphs $G$ and $H$ their representations $\rho_G$ and $\rho_H$ should have identical underlying sets of arcs, i.e.~$\set{\rho_G(v)}{ v \in V(G)} = \set{\rho_H(v)}{ v \in V(H)}$. Notice that solving the representation problem for CA graphs implies solving the recognition problem for CA graphs, i.e.~the question given a graph $G$ is it a CA graph. Likewise, solving the canonical representation problem for CA graphs implies solving the isomorphism problem for CA graphs, i.e.~deciding whether two given CA graphs are isomorphic. Consider the following generalization of interval graphs: 2-interval graphs are intersection graphs of two intervals on the real line. It is easy to see that this class contains CA graphs because given a set of arcs one can cut the circle at some point and straighten the arcs. The arcs which are cut can be modeled as two intervals. It is interesting to note that the isomorphism problem for interval graphs is logspace-complete \cite{kob:intv} while the one for 2-interval graphs is already GI-complete and CA graphs lie inbetween these two classes. The GI-completeness for 2-interval graphs follows from the fact that every line graph is a 2-interval graph and line graphs are already GI-complete. To see why this inclusion holds consider a graph $G$ and its line graph $L(G)$. Assign every vertex $v$ of $G$ an interval $I_v$ on the real line such that no two intervals $I_u$ and $I_v$ intersect for every pair of distinct vertices $u$ and $v$ of $G$. The 2-interval model for $L(G)$ is obtained by mapping every edge $\{u,v\}$ of $G$ to the two intervals $I_u$ and $I_v$. While a polynomial-time algorithm for deciding isomorphism of interval graphs is known since 1976 due to Booth and Lueker\nocite{lue} this question still remains open for CA graphs. There have been two claimed polynomial-time algorithms for deciding isomorphism of CA graphs in \cite{wu} and \cite{hsu} which were shown to be incorrect in \cite{esc} and \cite{cur} respectively. For interval graphs even a linear-time algorithm for isomorphism is known \cite{lue}. A more recent result is that canonical interval representations for interval graphs can be computed in logspace and that this is optimal in the sense that recognition and deciding isomorphism for interval graphs is logspace-complete \cite{kob:intv}. These two hardness results also carry over to the class of CA graphs. Furthermore, the isomorphism problem for proper CA graphs \cite{kob:pca} and Helly CA graphs \cite{kob:hca} have been shown to be decidable in logspace. It is also shown how to obtain canonical representations for these subclasses in logspace. In this article we explain how the method used in \cite{kob:hca} to obtain canonical representation for Helly CA graphs can be adapted to CA graphs in general. Following this approach, canonical representations for CA graphs can be found by computing certain subsets of vertices called flip sets in an isomorphism-invariant manner. We introduce the class of uniform CA graphs for which this method yields canonical representations in polynomial-time. We then aim to isolate the instances of CA graphs which are difficult to handle with this method. We try to capture these hard instances by what we call restricted CA matrices and show that the canonical representation problem for CA graphs is logspace-reducible to that of restricted CA matrices. During this isolation process we find a subset of uniform CA graphs, namely $\Delta$-uniform CA graphs, for which canonical representations can be computed in logspace. The $\Delta$-uniform CA graphs contain Helly CA graphs and every CA graph without an induced 4-cycle. This generalizes the canonization result for Helly CA graphs given in \cite{kob:hca}. A preliminary version of this work appeared in \cite{cha1}. The paper is organized as follows. In the third section we formalize the idea of computing invariant flip sets in order to obtain canonical representations for CA graphs. This leads to the definition of invariant flip set functions. In the fourth section we investigate for what CA graphs a particular invariant flip set function is easy to compute. This leads to the class of uniform CA graphs. We also provide an alternative characterization of uniform CA graphs in terms of whether certain triangles in a CA graph have an unambiguous representation. The main result of this section is that the representation problem for uniform CA graphs, the canonical representation problem for uniform CA graphs and the non-Helly triangle representability problem (introduced in section 4) for uniform CA graphs are all logspace-equivalent. In the fifth section we consider the structure of non-uniform CA graphs, introduce restricted CA matrices and show how the canonical representation problem for CA graphs can be reduced to that of restricted CA matrices. In the process of proving this reduction the class of $\Delta$-uniform CA graphs is defined and it is shown that this class can be canonized in logspace. \section{Preliminaries} For a number $n \in \mathbb{N}$ we write $[n]$ for $\{1,\dots,n \}$. Given two sets $A,B$ we say $A$ and $B$ intersect if $A \cap B \neq \emptyset$. We say $A$ and $B$ overlap, in symbols $A \between B$, if $A \cap B, A \setminus B$ and $B \setminus A$ are non-empty. We consider graphs without self-loops which sometimes have colored vertices and colored edges. They can be seen as relational structures with the vertex set as universe and vertex colors encoded as unary relations and edge colors as binary relations. The standard notion of isomorphism for relational structures applies. We describe a graph with vertex colors as tuple $(G,c)$ where $c$ is a function that maps the vertices of $G$ to the colors. We talk about a graph with edge colors as a square matrix whose entries represent the edge colors and identify the indices of the matrix and the vertices of the graph. Consequently, we identify a square matrix with the graph that it represents and talk about it in graph-theoretical terms. By a class of (relational) structures we mean a set of such structures which is closed under isomorphism. We call a bijective function $\tau$ which maps the vertices of a graph $G$ to some set $V'$ a relabeling of $G$ and $\tau(G)$ denotes the graph obtained after relabeling the vertices of $G$ according to $\tau$. Let $G$ and $H$ be two graphs and let $X \subseteq V(G)$ and $Y \subseteq V(H)$. We say $X$ and $Y$ are in the same orbit, in symbols $X \sim_{\mathrm{orb}} Y$, if there exists an isomorphism $\pi$ from $G$ to $H$ such that $\pi(X) = Y$. Let $f$ be a function which maps a graph along with a subset of its vertex set to a binary string, i.e.~$f(G,X) \in \{0,1\}^*$ and $X \subseteq V(G)$. We call $f$ an invariant for a graph class $\mathcal{C}$ if $f(G,X) = f(H,Y)$ whenever $X \sim_{\mathrm{orb}} Y$ and $G,H \in \mathcal{C}$. Let us call a function $f$ which maps a graph $G$ to a family of subsets of its vertex set, i.e.~$f(G) \subseteq \mathcal{P}(V(G))$, a vertex set selector. For example, the function that maps a graph to the set of its cliques is a vertex set selector. The characteristic function $\chi_f$ of a vertex set selector $f$ is defined as $\chi_f(G,X) = 1 \Leftrightarrow X \in f(G)$. We say a vertex set selector $f$ is invariant for a graph class $\mathcal{C}$ if its characteristic function $\chi_f$ is an invariant for $\mathcal{C}$. We call $f$ globally invariant if $\chi_f$ is an invariant for all graphs. Intuitively, a vertex set selector $f$ is invariant for $\mathcal{C}$ if a graph $G \in \mathcal{C}$ can be arbitrarily relabeled and $f$ still returns the `same' vertex sets as before w.r.t.~$\sim_{\mathrm{orb}}$. The following definitions are with respect to a graph $G$. Throughout the paper it will be always clear from context with respect to what graph these expressions are to be interpreted. For a vertex $v$ we define its open neighborhood $N(v)$ as the set of vertices which are adjacent to $v$ and its closed neighborhood $N[v] = N(v) \cup \{v\}$. A vertex $v$ is called universal if $N[v] = V(G)$. For two vertices $u, v$ we say that $u$ and $v$ are twins if $N[u] = N[v]$. A graph $G$ is twin-free if for every pair of distinct vertices $u \neq v$ it holds that $N[u] \neq N[v]$. A twin class is an inclusion-maximal set of vertices $X$ such that for all $u,v \in X$ it holds that $u$ and $v$ are twins. For two subsets of vertices $S,S'$ with $S' \subseteq S$ we define the exclusive neighborhood $N_{S}(S')$ as all vertices $v \in V(G) \setminus S$ such that $v$ is connected to all vertices in $S'$ and to none in $S \setminus S'$. Let $A$ be a square matrix with entries from a set $\mathcal{E}$. For a vertex $u$ of $A$ and $x \in \mathcal{E}$ we define $N^x(u) = \set{v \in V}{ A_{u,v} = x}$. \subparagraph*{Logspace Transducers and Reductions.} We assume deterministic Turing machines as default model of computation. A logspace transducer is a deterministic Turing machine $M$ with a read-only input tape, a work tape and a write-only output tape. The work tape is only allowed to use at most $\mathcal{O}(\log n)$ cells where $n$ denotes the input length. To write onto the output tape $M$ has a designated state called output state with the following semantic. If $M$ enters the output state then the symbol in the current cell of the work tape is written to the current cell of the output tape and the head on the output tape is moved one cell to the right. Other than that, $M$ cannot write or move the head on the output tape. This means as soon as something is written to the output tape it cannot be modified afterwards. Let $\Sigma$ and $\Gamma$ be the input and work alphabet of $M$ respectively. Then $M$ computes a function $f_M \colon \Sigma^* \rightarrow \Gamma^*$. We say a (partial) function $f$ is computed by a logspace transducer $M$ if $f(x) = f_M(x)$ whenever $f(x)$ is defined. We call $f$ logspace-computable if there exists a logspace transducer $M$ which computes $f$. The class of logspace-computable functions is closed under composition. Let $f$ be a function which maps words over some alphabet to words over some other alphabet. We say that the length of $f$ is polynomially bounded if $|f(x)|$ is polynomially bounded by $|x|$. Only functions whose length is polynomially bounded can be logspace-computable since the runtime of a logspace transducer is polynomially bounded. A language $L \subseteq \Sigma^*$ is in logspace if its characteristic function is logspace-computable. Given two functions $f$ and $g$ we say $f$ is logspace-reducible to $g$ if there exists $l \in \mathbb{N}$ and logspace-computable functions $r_1,\dots,r_l$ such that $f$ can be expressed as composition of $g$ and $r_1,\dots,r_l$. Intuitively, this means that an oracle which can compute $g$ can be queried a constant number of times when constructing a logspace transducer for $f$. For two functions $f$ and $g$ we say that they are logspace-equivalent if $f$ is logspace-reducible to $g$ and vice versa. Analogously, given three functions $f,g_1,g_2$ we say $f$ is logspace-reducible to $g_1$ and $g_2$ if there exists $l \in \mathbb{N}$ and logspace-computable functions $r_1,\dots,r_l$ such that $f$ can be expressed as composition of $g_1,g_2$ and $r_1,\dots,r_l$. \subparagraph*{Circular-Arc Graphs and Representations.} A CA model is a set of arcs $\mathcal{A} = \{A_1,\dots,A_n\}$ on the circle. Let $p \neq p'$ be two points on the circle. Then the arc $A$ specified by $[p,p']$ is given by the part of the circle that is traversed when starting from $p$ going in clockwise direction until $p'$ is reached. We say that $p$ is the left and $p'$ the right endpoint of $A$ and write $l(\cdot),r(\cdot)$ to denote the left and right endpoint of an arc in general. If $A = [p,p']$ then the arc obtained by swapping the endpoints $\overline{A} = [p',p]$ covers exactly the opposite part of the circle plus the endpoints. We say $\overline{A}$ is obtained by flipping $A$. In our context, we are only interested in the intersection structure of a CA model and thus only the relative position of the endpoints to each other matter. All endpoints can w.l.o.g.~be assumed to be pairwise different and no arc covers the full circle. Under these assumptions, a CA model $\mathcal{A}$ with $n$ arcs can be described as a unique string as follows. Pick an arbitrary arc $A \in \mathcal{A}$ and relabel the arcs with $1, \dots, n$ in order of appearance of their left endpoints when traversing the circle clockwise starting from the left endpoint of $A$. Then write down the endpoints in order of appearance when traversing the circle clockwise starting from the left endpoint of $A$. Do this for every arc and pick the lexicographically smallest resulting string as representation for $\mathcal{A}$. For example, the smallest such string for the CA model in Figure~\ref{fig:ca_intro} would result from choosing $A_1$: ($l(1),r(1),l(2),r(5),l(3),r(2),\dots$). Let $\mathrm{str}(\mathcal{A})$ denote this smallest string representation. For a CA model $\mathcal{A}$ let $\mathcal{A}^{\mathrm{r}}$ be the CA model obtained after reversing the order of its endpoints. Observe that reversing the endpoints does not affect the intersection structure of a CA model. Therefore we consider two CA models $\mathcal{A}$ and $\mathcal{B}$ to be equal if $\mathrm{str}(\mathcal{A}) = \mathrm{str}(\mathcal{B})$ or $\mathrm{str}(\mathcal{A}^{\mathrm{r}}) = \mathrm{str}(\mathcal{B})$. Let $G$ be a graph and $\rho = (\mathcal{A},f)$ consists of a CA model $\mathcal{A}$ and a bijective mapping $f$ from the vertices of $G$ to the arcs in $\mathcal{A}$. Then $\rho$ is called a CA representation of $G$ if for all $u \neq v \in V(G)$ it holds that $\{u,v\} \in E(G) \Leftrightarrow f(u) \cap f(v) \neq \emptyset$. We write $\rho(x)$ to mean the arc $f(x)$ corresponding to the vertex $x$, $\rho(G)$ for the CA model $\mathcal{A}$ and for a subset $V' \subseteq V(G)$ let $\rho[V'] = \left\{ \rho(v) \mid v \in V' \right\}$. A graph is a CA graph if it has a CA representation. \begin{figure} \caption{A CA graph and a representation of it} \label{fig:ca_intro} \end{figure} We say a CA model $\mathcal{A}$ has a hole if there exists a point on the circle which isn't contained by any arc in $\mathcal{A}$. Every such CA model can be understood as interval model (a set of intervals on the real line) by straightening the arcs. Conversely, every interval model can be seen as CA model by bending the intervals. Therefore a graph is an interval graph iff it admits a CA representation with a hole. A family of sets $\mathcal{F}$ over some ground set is called Helly if for all subsets $\mathcal{F}'$ of $\mathcal{F}$ such that all elements in $\mathcal{F}'$ intersect pairwise it holds that $\cap_{A \in \mathcal{F}'} A$ is non-empty. A CA graph $G$ is called Helly (HCA graph) if it has a CA representation $\rho$ with a Helly CA model $\rho(G)$. This is the case iff for all inclusion-maximal cliques $C$ in $G$ it holds that the overall intersection of $C$ in $\rho$ is non-empty, i.e.~$\bigcap_{v \in C} \rho(v) \neq \emptyset$. Every interval model has the Helly property and therefore every interval graph is a Helly CA graph. The intersection type of two circular arcs $A$ and $B$ can be one of the following five types: \begin{itemize} \item $\ensuremath{\mathtt{di}}$: $A$ and $B$ are disjoint --- $A \cap B = \emptyset$ \item $\ensuremath{\mathtt{cs}}$: $A$ contains $B$ --- $B \subset A$ \item $\ensuremath{\mathtt{cd}}$: $A$ is contained by $B$ --- $A \subset B$ \item $\ensuremath{\mathtt{cc}}$: $A$ and $B$ jointly cover the circle (circle cover) --- $A \between B$ and $A \cup B = $ whole circle \item $\ensuremath{\mathtt{ov}}$: $A$ and $B$ overlap --- $A \between B$ and $A \cup B \neq $ whole circle \end{itemize} \begin{figure} \caption{A CA model $\mathcal{A} \label{fig:imatrix} \end{figure} Using these types we can associate a matrix with every CA model. An intersection matrix is a square matrix with entries $\ensuremath{\{ \circlecover, \contained, \contains, \disjoint, \overlap \}}$. Given a CA model $\mathcal{A}$ we define its intersection matrix $\mu_\mathcal{A}$ such that $(\mu_{\mathcal{A}})_{A,B} \in \ensuremath{\{ \circlecover, \contained, \contains, \disjoint, \overlap \}}$ reflects the intersection type of the arcs $A \neq B \in \mathcal{A}$. An intersection matrix $\mu$ is called a CA (interval) matrix if it is the intersection matrix of some CA model (with a hole). See Figure \ref{fig:imatrix} for an example of a CA model and the CA matrix which it induces. Given an intersection matrix $\mu$ and two distinct vertices $u, v$ of $\mu$ we sometimes write $u \: \alpha \: v$ instead of $\mu_{u,v} = \alpha$ if $\mu$ is clear from the context. Also, we sometimes talk about an intersection matrix $\mu$ as if it were a graph. In that case we consider two vertices $u,v$ of $\mu$ to be adjacent if they do not have a $\ensuremath{\mathtt{di}}$-entry in $\mu$. When trying to construct a CA representation for a CA graph $G$ it is clear that whenever two vertices are non-adjacent their corresponding arcs must be disjoint in every CA representation of $G$. For two adjacent vertices the intersection type of their corresponding arcs might depend on the particular CA representation of $G$ that one considers. Hsu has shown that this ambiguity can be removed as follows \cite{hsu}. We adopt the notation of \cite{kob:hca}. \begin{definition} For a graph $G$ we define its neighborhood matrix $\lambda_G$ which is an intersection matrix as \[ (\lambda_G)_{u,v} = \begin{cases} \ensuremath{\mathtt{di}} &, \text{if } \{ u,v \} \notin E(G) \\ \ensuremath{\mathtt{cd}} &, \text{if } N[u] \subsetneq N[v] \\ \ensuremath{\mathtt{cs}} &, \text{if } N[v] \subsetneq N[u] \\ \ensuremath{\mathtt{cc}} &, \text{if } N[u] \between N[v] \text{ and } N[u] \cup N[v] = V(G) \\ & \text{ and } \forall w \in N[u]\setminus N[v]: N[w] \subset N[u] \\ & \text{ and } \forall w \in N[v]\setminus N[u]: N[w] \subset N[v] \\ \ensuremath{\mathtt{ov}} &, \text{otherwise} \end{cases} \] for all $u \neq v \in V(G)$. \end{definition} Let $\mu$ be an intersection matrix with vertex set $V$ and let $\rho = (\mathcal{A},f)$ where $\mathcal{A}$ is a CA model and $f$ is a bijective mapping from $V$ to $\mathcal{A}$. We say $\rho$ is a CA representation of $\mu$ if $f$ is an isomorphism from $\mu$ to the intersection matrix $\mu_{\mathcal{A}}$ of $\mathcal{A}$. We denote the set of such CA representations for $\mu$ with $\mathcal{N}(\mu)$. The representation problem for CA matrices is to compute a CA representation for a given CA matrix $\mu$. The canonical representation problem for CA matrices is defined analogously to the canonical representation problem for CA graphs. We say $\rho$ is a normalized CA representation for a graph $G$ if $\rho$ is a CA representation for the neighborhood matrix $\lambda_G$ of $G$. An example of a normalized representation can be seen in Figure~\ref{fig:normrepr}. Let us denote the set of all normalized CA representations for $G$ with $\mathcal{N}(G) = \mathcal{N}(\lambda_G)$. \begin{figure} \caption{A CA graph and a normalized representation thereof. Every non-labeled edge corresponds to an $\ensuremath{\mathtt{ov} \label{fig:normrepr} \end{figure} \begin{lemma}[Corollary 2.3.~\cite{hsu}] Every twin-free CA graph $G$ without a universal vertex has a normalized CA representation, that is $\mathcal{N}(G) \neq \emptyset$. \end{lemma} \begin{lemma} The canonical representation problem for CA graphs is logspace-reducible to the canonical representation problem for vertex-colored twin-free CA graphs without a universal vertex. \end{lemma} \begin{proof} For a graph $G$ let $G_0$ denote the induced subgraph of $G$ that is obtained by removing all universal vertices from $G$ and only taking one vertex from each twin-class and deleting the rest. Let $c_0$ be a coloring of $G_0$ which assigns each vertex the cardinality of its twin class in $G$. It holds that $(G_0,c_0)$ and the number of universal vertices in $G$ suffice to reconstruct $G$. Let $G$ be a CA graph. Compute the graph $(G_0,c_0)$. Since $(G_0,c_0)$ is twin-free and without universal vertices we can compute a canonical representation $\rho_0$ for it. For a vertex $v$ of $G$ let $v_0$ denote the twin of $v$ that occurs in $G_0$. A canonical representation of $G$ is given by $v \mapsto \rho_0(v_0)$ for every non-universal vertex $v$ of $G$ and every universal vertex of $G$ is represented by an arc which intersects with all other arcs. \end{proof} Therefore for our purposes it suffices to consider only twin-free graphs without universal vertices and a vertex-coloring. \begin{proviso} From this point on we assume every graph to be twin-free and without a universal vertex unless explicitly stated otherwise. As a consequence we view CA graphs as a set of CA matrices in the sense that the neighborhood matrix of every CA graph is a CA matrix. \end{proviso} \subparagraph*{Flips in Intersection Matrices.} \begin{table}[b] \caption{Algebraic flip functions $Z_{xy} \colon \ensuremath{\{ \circlecover, \contained, \contains, \disjoint, \overlap \}} \rightarrow \ensuremath{\{ \circlecover, \contained, \contains, \disjoint, \overlap \}}$} \label{tab:flip} \begin{center} \begin{tabular}{l | c c c c c} $Z_{xy}(\alpha)$ & \ensuremath{\mathtt{cc}} & \ensuremath{\mathtt{cd}} & \ensuremath{\mathtt{cs}} & \ensuremath{\mathtt{di}} & \ensuremath{\mathtt{ov}} \\ \hline $Z_{00}$ & \ensuremath{\mathtt{cc}} & \ensuremath{\mathtt{cd}} & \ensuremath{\mathtt{cs}} & \ensuremath{\mathtt{di}} & \ensuremath{\mathtt{ov}} \\ $Z_{01}$ & \ensuremath{\mathtt{cs}} & \ensuremath{\mathtt{di}} & \ensuremath{\mathtt{cc}} & \ensuremath{\mathtt{cd}} & \ensuremath{\mathtt{ov}} \\ $Z_{10}$ & \ensuremath{\mathtt{cd}} & \ensuremath{\mathtt{cc}} & \ensuremath{\mathtt{di}} & \ensuremath{\mathtt{cs}} & \ensuremath{\mathtt{ov}} \\ $Z_{11}$ & \ensuremath{\mathtt{di}} & \ensuremath{\mathtt{cs}} & \ensuremath{\mathtt{cd}} & \ensuremath{\mathtt{cc}} & \ensuremath{\mathtt{ov}} \end{tabular} \end{center} \end{table} McConnell \cite{mcc} observed that the operation of flipping arcs in CA models has a counterpart in intersection matrices. He called this counterpart operation algebraic flips. Note that for two arcs $A,B$ with intersection type $\alpha \in \ensuremath{\{ \circlecover, \contained, \contains, \disjoint, \overlap \}}$ the intersection type of $\overline{A}$ and $B$ is solely determined by $\alpha$. More precisely, the intersection type of $\overline{A}$ and $B$ is $Z_{10}(\alpha)$ where $Z_{10}$ is the function defined in Table~\ref{tab:flip}. Similarly, the intersection type of $A$ and $\overline{B}$ is given by $Z_{01}(\alpha)$. Using the functions $Z_{ij}$ we can define the operation of flipping a set of vertices in an intersection matrix. \begin{definition} Let $\mu$ be an intersection matrix with vertex set $V$ and $X \subseteq V$. We define the intersection matrix $\mu^{(X)}$ obtained after flipping the vertices $X$ in $\mu$ as $$ \mu^{(X)}_{u,v} = Z_{ij}(\mu_{u,v}) \text{ with } i = 1 \text{ iff } u \in X \text{ and } j = 1 \text{ iff } v \in X $$ for all $u \neq v$ in $V$. \end{definition} Since flipping the same set of arcs twice is an involution it follows that $(\mu^{(X)})^{(X)} = \mu$. \begin{definition} Let $V$ be a set of vertices, let $\mathcal{A}$ be a set of arcs and let $\rho$ be a function that maps $V$ to $\mathcal{A}$. Then $\rho^{(X)} \colon V \rightarrow \mathcal{A}$ for $X \subseteq V$ is defined as follows: $$ \rho^{(X)}(v) = \begin{cases} \overline{\rho(v)} & \text{, if $v \in X$} \\ \rho(v) & \text{, if $v \notin X$} \end{cases} $$ \end{definition} Notice that flipping vertices in an intersection matrix is equivalent to flipping arcs in a CA representation in the following sense. Given an intersection matrix $\lambda$ and a subset of its vertices $X$ it holds that $\rho \in \mathcal{N}(\lambda) \Leftrightarrow \rho^{(X)} \in \mathcal{N}(\lambda^{(X)})$. Also, it is not difficult to observe that flipping is an isomorphism-invariant operation in the sense that flipping sets of vertices which are in the same orbit lead to isomorphic intersection matrices. \section{Flip Trick} In this section we generalize the idea used by Köbler, Kuhnert and Verbitsky in \cite{kob:hca} to compute canonical representations for Helly CA graphs. They showed that finding canonical representations for Helly CA graphs can be reduced to finding canonical representations for vertex-colored interval matrices. We show that the idea behind this reduction also works for CA matrices in general. Recall that CA graphs can be seen as special case of CA matrices since the neighborhood matrix of every CA graph is a CA matrix. The converse does not hold, i.e.~there exist CA matrices which are not expressible as the neighborhood matrix of a CA graph (for instance any CA matrix with only two vertices that are not disjoint). The key result here, which is used in the subsequent sections, is that finding canonical representations for CA matrices is logspace-reducible to the task of computing what we call an invariant flip set function. McConnell showed in \cite{mcc} that CA representations for CA graphs can be computed as follows. Given a CA graph $G$ with neighborhood matrix $\lambda$ one can compute a set of vertices $X$ of $G$ such that $\lambda^{(X)}$ is an interval matrix. We call such a set $X$ a flip set. Then by computing an interval representation $\rho$ for $\lambda^{(X)}$ and flipping back the arcs $X$ in $\rho$ one obtains a CA representation for $\lambda$ and therefore for $G$ as well \cite{mcc}. We essentially use the same argument to obtain canonical CA representations. \begin{definition} Let $\lambda$ be a CA matrix. A subset of vertices $X$ of $\lambda$ is called a flip set if there exists a representation $\rho \in \mathcal{N}(\lambda)$ and a point $x$ on the circle such that $v \in X$ iff $\rho(v)$ contains the point $x$. \end{definition} The concept of flip sets has already been implicitly defined and used in both \cite{mcc} and \cite{kob:hca}. They observed that $\lambda^{(X)}$ is an interval matrix whenever $X$ is a flip set of a CA matrix $\lambda$. In fact, the other direction holds as well leading to the following characterization. \begin{lemma} Let $\lambda$ be a CA matrix and $X$ is a subset of vertices of $\lambda$. It holds that $X$ is a flip set iff $\lambda^{(X)}$ is an interval matrix. \label{lem:fsim} \end{lemma} \begin{proof} ``$\Rightarrow$'': Let $X$ be a flip set of $\lambda$. Let $\rho \in \mathcal{N}(\lambda)$ be a witnessing representation of the fact that $X$ is a flip set, i.e.~there exists a point $x$ on the circle such that every arc $\rho(v)$ with $v \in X$ contains $x$ and every arc $\rho(v)$ with $v \notin X$ does not contain $x$. Consider the representation $\rho^{(X)} \in \mathcal{N}(\lambda^{(X)})$. It holds that no arc $\rho^{(X)}(v)$ with $v \in V(\lambda)$ contains the point $x$ which implies that there is a hole in $\rho^{(X)}$ and thus $\lambda^{(X)}$ is an interval matrix. ``$\Leftarrow$'': Let $X$ be a subset of vertices of $\lambda$ such that $\lambda^{(X)}$ is an interval matrix. We argue that $X$ must be a flip set. Let $\rho \in \mathcal{N}(\lambda^{(X)})$ be a CA representation of $\lambda^{(X)}$ containing a hole at point $x$ on the circle. Such a representation must exist since $\lambda^{(X)}$ is an interval matrix. This means the arc $\rho(v)$ does not contain the point $x$ for every vertex $v \in V(\lambda)$. Consider the representation $\rho^{(X)} \in \mathcal{N}({(\lambda^{(X)})}^{(X)}) = \mathcal{N}(\lambda)$. Then it can be checked that $\rho^{(X)}(v)$ contains the point $x$ iff $v$ is in $X$ and therefore $X$ is a flip set with respect to $\lambda$. \end{proof} We already mentioned that the canonical representation problem for vertex-colored interval matrices can be solved in logspace due to \cite{kob:hca}. However, since the theorem that we reference just states this result for uncolored interval matrices we shortly explain how to modify the proof to incorporate the coloring, which is a straightforward task for anyone familiar with the proof. \begin{theorem}[{\cite[Thm.~5.5]{kob:hca}}] The canonical representation problem for vertex-colored interval matrices can be solved in logspace. \label{thm:vcim_canon} \end{theorem} \begin{proof} In Theorem 5.5 of \cite{kob:hca} it is stated that a canonical interval representation for an interval matrix can be found in logspace. To prove this they convert the input interval matrix $\lambda$ into a colored tree $\mathbb{T}(\lambda)$ called $\Delta$ tree which is a complete invariant for interval matrices. The leafs of this tree correspond to the vertices of $\lambda$. By appending the color of a vertex from our vertex-colored interval matrix $\lambda$ to the existing color of its corresponding leave node in the colored $\Delta$ tree $\mathbb{T}(\lambda)$ one obtains a complete invariant for vertex-colored interval matrices. Then by applying the same argument given in the proof of Theorem 5.5 one can also compute a canonical representation for a vertex-colored interval matrix using this slightly modified colored $\Delta$ tree. \end{proof} A consequence of Lemma~\ref{lem:fsim} and Theorem~\ref{thm:vcim_canon} is that flip sets can be recognized in logspace. Given an intersection matrix $\lambda$ and a subset of vertices $X$ of $\lambda$ it suffices to check whether $\lambda^{(X)}$ is an interval matrix by trying to compute an interval representation. \begin{definition} Let $\mathcal{C}$ be a class of CA matrices and $f$ is a vertex set selector. The function $f$ is called an invariant flip set function for $\mathcal{C}$ if the following conditions hold: \begin{enumerate} \item For every $\lambda \in \mathcal{C}$ there exists an $X \in f(\lambda)$ such that $X$ is a flip set of $\lambda$ \item $f$ is invariant for $\mathcal{C}$ \end{enumerate} Recall that $f$ is globally invariant if $f$ is invariant for all intersection matrices. \label{def:cfsf} \end{definition} \begin{theorem} Let $\mathcal{C}$ be a class of CA matrices. The canonical representation problem for vertex-colored $\mathcal{C}$ is logspace-reducible to the problem of computing an invariant flip set function for $\mathcal{C}$. \label{thm:cfsf_cr} \end{theorem} \begin{proof} Let $f$ be an invariant flip set function for $\mathcal{C}$. Given a vertex-colored CA matrix $(\lambda,c)$ with $ \lambda \in \mathcal{C}$ a canonical representation can be computed as follows. For every flip set $X \in f(\lambda)$ we associate it with the colored interval matrix $I_X = (\lambda^{(X)},c_X)$ where $c_X(v) = (c(v),\mathrm{red})$ if $v$ is in $X$ and $(c(v),\mathrm{blue})$ if $v$ is not in $X$ for all $v \in V(\lambda)$. For a colored interval matrix $I$ let $\hat{\rho}_I$ denote a canonical representation of $I$. Such a canonical representation can be computed in logspace due to Theorem~\ref{thm:vcim_canon}. Let $\hat{X}$ denote a flip set in $f(\lambda)$ such that the interval model of $\hat{\rho}_{I_{\hat{X}}}$ is lexicographically minimal, i.e.~for all flip sets $X$ in $f(\lambda)$ it holds that the model of $\hat{\rho}_{I_{X}}$ is not smaller than the model of $\hat{\rho}_{I_{\hat{X}}}$. Let $\hat{\rho}$ denote the CA representation that is obtained after flipping the red arcs in $\hat{\rho}_{I_{\hat{X}}}$. Since these are the arcs that were flipped to convert $\lambda$ into $I_X$ it holds that $\hat{\rho}$ is a representation of $\lambda$. To see that this leads to a canonical representation consider two isomorphic vertex-colored CA matrices $(\lambda,c)$ and $(\mu,d)$ with $\lambda,\mu \in \mathcal{C}$ and $V(\lambda)$ and $V(\mu)$ are disjoint. Let $\mathcal{I}_\lambda$ be the set of colored interval matrices $I_X$ for all flip sets $X \in f(\lambda)$, and the set $\mathcal{I}_\mu$ is defined analogously. Let $\mathcal{M}_\lambda$ be the set of interval models $M$ such that there exists an $I \in \mathcal{I}_\lambda$ and $M$ is the model underlying the canonical representation $\hat{\rho}_I$ of $I$. The set $\mathcal{M}_\mu$ is defined analogously. Since $f$ is invariant it follows that for every $I \in \mathcal{I}_\lambda$ there exists an $I' \in \mathcal{I}_{\mu}$ such that $I$ and $I'$ are isomorphic, and vice versa. Since the models in $\mathcal{M}_\lambda$ and $\mathcal{M}_\mu$ only depend on the isomorphism type of the matrices in $\mathcal{I}_\lambda$ and $\mathcal{I}_\mu$ it follows that $\mathcal{M}_\lambda = \mathcal{M}_\mu$. The CA models which underlie the canonical representations of $\lambda$ and $\mu$ are both derived from the smallest element in $\mathcal{M}_\lambda = \mathcal{M}_\mu$ and thus are identical. \end{proof} Suppose that there is a partition of the set of CA graphs into two classes $\mathcal{C}$ and $\mathcal{D}$ such that you can efficiently compute invariant flip set functions for both classes. One might be misled into thinking that this implies canonical representations for all CA graphs can be found efficiently. However, this is not the case unless the class $\mathcal{C}$ (or $\mathcal{D}$) can be efficiently recognized, or one of the two invariant flip set functions is globally invariant. \begin{lemma} Let $\mathcal{C}$ and $\mathcal{D}$ be classes of CA matrices. The canonical representation problem for $\mathcal{C} \cup \mathcal{D}$ is logspace-reducible to the canonical representation problem for $\mathcal{C}$ and the problem of computing a globally invariant flip set function for $\mathcal{D}$. \label{lem:gir} \end{lemma} \begin{proof} Let $f$ be a globally invariant flip set function for $\mathcal{D}$. Let $\mathcal{D}'$ be the set of CA matrices $\lambda$ such that $f(\lambda)$ contains a flip set. Clearly, $\mathcal{D}$ is a subset of $\mathcal{D}'$. It holds that $f(\lambda)$ contains a flip set iff $\lambda \in \mathcal{D}'$. Since $f$ is globally invariant it follows that $f$ is an invariant flip set function for $\mathcal{D}'$. To obtain a canonical representation for a matrix $\lambda \in \mathcal{C} \cup \mathcal{D}$ first compute $f(\lambda)$. If $f(\lambda)$ contains a flip set it holds that $\lambda \in \mathcal{D}'$ and therefore the output of $f$ can be used to find a canonical representation for $\lambda$. If $f(\lambda)$ contains no flip set it must be the case that $\lambda \in \mathcal{C}$ and therefore the canonization algorithm for $\mathcal{C}$ can be applied. \end{proof} We conclude this section by restating the invariant flip set function that was used in \cite{kob:hca} to compute canonical representations for Helly CA graphs and explain why it is correct: $$f_{\mathrm{HCA}}(G) = \big\{ N[u] \cap N[v] \mid u,v \in V(G) \big\}$$ In a Helly CA graph $G$ every inclusion-maximal clique $C$ of $G$ is a flip set. To see why this holds let $\rho$ be a representation of $G$ with the Helly property. Since $C$ is a clique this means every pair of arcs $\rho(u)$ and $\rho(v)$ with $u,v \in C$ intersects. By the Helly property it follows that the overall intersection $\bigcap_{v \in C} \rho(v)$ is non-empty. This means there exists a point $x$ on the circle such that every arc $\rho(v)$ with $v \in C$ contains $x$. Assume there exists a vertex $w \in V(G) \setminus C$ such that $\rho(w)$ contains $x$. This means $w$ must be adjacent to every vertex in $C$, which contradicts that $C$ is inclusion-maximal. Hence $C$ is a flip set. In \cite[Thm.~3.2]{kob:hca} it is shown that every Helly CA graph contains at least one inclusion-maximal clique which can be expressed as the common neighborhood of two vertices. Therefore $f_{\mathrm{HCA}}(G)$ returns at least one flip set for every Helly CA graph $G$. Also, it is trivial to see that $f_{\mathrm{HCA}}$ is globally invariant. \section{Uniform Circular-Arc Graphs} We define the class of uniform CA graphs for which computing a particular invariant flip set function reduces to computing a representation. As a consequence, canonical representations for this class of CA graphs can be computed in polynomial-time. This is an interesting class for two reasons. First, it seems to capture the instances where it is easy to apply the flip trick. Secondly, its complement (within the CA graphs) is a rather exotic class of CA graphs with a quite particular structure. While the initial definition of uniformity makes it apparent why it suffices to find an arbitrary representation in order to obtain a canonical one, it is rather impractical when trying to understand what constitutes a uniform CA graph. We provide a more pleasant characterization of uniform CA graphs in terms of how certain triangles in a CA graph can be represented. This alternative characterization also reveals that every Helly CA graph is uniform. Additionally, we show that the canonical representation problem for uniform CA graphs is logspace-equivalent to what we call the non-Helly triangle representability problem. This problem is: given a CA graph $G$ and a set $T$ of three pairwise overlapping vertices as input, does there exist a representation $\rho$ of $G$ such that $T$ covers the whole circle in $\rho$? The following kind of flip set will lead us to uniform CA graphs when trying to compute canonical representations. Given a CA matrix $\lambda$ recall that $X$ is a flip set of $\lambda$ if there exists a representation $\rho \in \mathcal{N}(\lambda)$ and a point $x$ on the circle such that $x \in \rho(v)$ iff $v \in X$ for all vertices $v$ of $\lambda$. We impose the additional restriction that $x$ is not allowed to be an arbitrary point on the circle but instead has to be one of the endpoints in $\rho$. \begin{definition} Let $\lambda$ be a CA matrix and $u \in V(\lambda)$. A flip set $X$ of $\lambda$ is a $u$-flip set if there exists a representation $\rho \in \mathcal{N}(\lambda)$ and an endpoint $x$ of $\rho(u)$ such that $v \in X$ iff $\rho(v)$ contains the point $x$. \end{definition} \begin{figure}\label{fig:uniform_flipset} \label{fig:uovpart} \end{figure} Clearly, every CA graph has a $u$-flip set for every vertex $u$. On the other hand, there are CA graphs that have flip sets which are not $u$-flip sets for any vertex $u$. For example, consider the cycle graph with $n \geq 4$ vertices. Every flip set that consists of exactly one vertex is not a $u$-flip set for any vertex $u$ of the cycle graph. Consider the following task: given a CA graph $G$ and a vertex $u$, find a $u$-flip set of $G$. Clearly, no vertex $v$ which is disjoint from $u$ or contained by $u$ belongs to $X$ since in every representation the arc of $v$ does not contain any of the two endpoints of the arc of $u$. Similarly, if a vertex $v$ contains $u$ or forms a circle cover with $u$ then in every representation the arc of $v$ contains both endpoints of $u$ and therefore must be included in $X$. See Figure \ref{fig:uniform_flipset} for a schematic overview. It remains to decide for the set of vertices $N^{\ensuremath{\mathtt{ov}}}(u)$ that overlap with $u$ whether they should be included in $X$. A vertex $v$ which overlaps with $u$ contains exactly one of the endpoints of $u$ in any representation. Let $x,y$ be two vertices that overlap with $u$. We say $x$ and $y$ overlap from the same side with $u$ in $\rho$ if $\rho(x)$ and $\rho(y)$ contain the same endpoint of $\rho(u)$. Evidently, this is an equivalence relation with respect to $v$ and $\rho$ which partitions $N^{\ensuremath{\mathtt{ov}}}(u)$ into two parts, namely the part which contains the left endpoint and the one which contains the right endpoint. If $X$ is a $u$-flip set then $X \cap N^{\ensuremath{\mathtt{ov}}}(u)$ must be an equivalence class of the `overlap from the same side with $u$ in $\rho$'-relation for some $\rho \in \mathcal{N}(G)$. \begin{definition} For a CA matrix $\lambda$ and a vertex $u$ of $\lambda$ we say a partition $Y$ of $N^{\ensuremath{\mathtt{ov}}}(u)$ into two parts is a $u$-$\ensuremath{\mathtt{ov}}$-partition if there exists a representation $\rho \in \mathcal{N}(\lambda)$ such that two vertices $x,y \in N^{\ensuremath{\mathtt{ov}}}(u)$ are in the same part of $Y$ iff $\rho(x)$ and $\rho(y)$ overlap from the same side with $\rho(u)$. We say $\ensuremath{\mathtt{ov}}$-partition to mean an $u$-$\ensuremath{\mathtt{ov}}$-partition for an arbitrary $u \in V(\lambda)$. \end{definition} In general, for a vertex $u$ of a CA graph $G$ there can be multiple $u$-$\ensuremath{\mathtt{ov}}$-partitions. In fact, there are instances with exponentially many $u$-$\ensuremath{\mathtt{ov}}$-partitions with respect to $|N^{\ensuremath{\mathtt{ov}}}(u)|$. A trivial way of obtaining at least one $u$-$\ensuremath{\mathtt{ov}}$-partition for every vertex $u$ of a CA graph $G$ is to compute an arbitrary representation $\rho \in \mathcal{N}(G)$. But the $\ensuremath{\mathtt{ov}}$-partitions obtained by this method are not invariant and thus do not yield canonical representations. However, if one considers CA graphs where there is only one $u$-$\ensuremath{\mathtt{ov}}$-partition for every vertex $u$ then an arbitrary representation suffices. \begin{definition}[Uniform CA Graphs] A CA graph $G$ is uniform if for every vertex $u$ in $G$ there exists exactly one $u$-$\ensuremath{\mathtt{ov}}$-partition. This partition is denoted by $P_u = \{P_{u,1}, P_{u,2}\}$. \label{fact:uovpart} \end{definition} \begin{lemma} The following mapping is an invariant flip set function for uniform CA graphs. Let $G$ be a uniform CA graph. $$ F_{\mathrm{uniform}}(G) = \bigcup_{\substack{u \in V(G) \\ i \in \{1,2\}}} \big\{ \{ u \} \cup N^{\ensuremath{\mathtt{cd}}}(u) \cup N^{\ensuremath{\mathtt{cc}}}(u) \cup P_{u,i} \big\} $$ \label{lem:funiform} \end{lemma} \begin{proof} Let $G$ be a uniform CA graph and $X$ is in $F_{\mathrm{uniform}}(G)$ with $X = \{ u \} \cup N^{\ensuremath{\mathtt{cd}}}(u) \cup N^{\ensuremath{\mathtt{cc}}}(u) \cup P_{u,i}$ for some $u \in V(G)$ and $i \in \{1,2\}$. It follows from Figure \ref{fig:uniform_flipset} and the definition of $\ensuremath{\mathtt{ov}}$-partitions that $X$ is a $u$-flip set. The invariance of $F_{\mathrm{uniform}}(G)$ follows from the fact that the intersection type of two vertices as well as the property of being an $\ensuremath{\mathtt{ov}}$-partition is independent of the vertex labels. \end{proof} We remark that the function $F_{\mathrm{uniform}}$ is undefined for non-uniform CA graphs since the sets $P_{u,1}$ and $P_{u,2}$ are not well-defined in that context. \begin{theorem} Canonical representations for uniform CA graphs can be computed in polynomial-time. \label{thm:uca_canon} \end{theorem} \begin{proof} Let $G$ be a uniform CA graph. Compute a normalized representation $\rho$ of $G$ and extract the $u$-$\ensuremath{\mathtt{ov}}$-partition for each vertex $u$ from $\rho$. Then compute $F_{\mathrm{uniform}}(G)$ from Lemma~\ref{lem:funiform} to obtain a canonical CA representation for $G$. Since CA representations can be computed in polynomial-time (see for instance \cite{mcc}) it follows that this procedure also works in polynomial-time. \end{proof} Considering that our definition of uniform CA graphs arose from the desire to compute invariant $u$-flip sets one might expect that these graphs are only a small special case of CA graphs. Surprisingly, quite the opposite is the case as we will see. We give an alternative definition of uniform CA graphs which gives a better intuition as to why many CA graphs are uniform. \begin{definition} Let $\lambda$ be a CA matrix. An $\ensuremath{\mathtt{ov}}$-triangle $T$ of $\lambda$ is a set of three vertices that overlap pairwise, i.e.~for all $u \neq v$ in $T$ it holds that $u \: \ensuremath{\mathtt{ov}} \: v$. An $\ensuremath{\mathtt{ov}}$-triangle $T$ is representable as non-Helly triangle (interval triangle) if there exists a representation $\rho \in \mathcal{N}(\lambda)$ such that the set of arcs $\set{\rho(x)}{x \in T}$ does (not) cover the whole circle. Let $\ensuremath{\mathcal{T}}nht(\lambda)$ and $\ensuremath{\mathcal{T}}it(\lambda)$ denote the sets of $\ensuremath{\mathtt{ov}}$-triangles representable as non-Helly triangles and interval triangles respectively. \label{def:ovtriangle} \end{definition} This definition also applies to CA graphs via their neighborhood matrix, i.e.~$\ensuremath{\mathcal{T}}it(G) = \ensuremath{\mathcal{T}}it(\lambda)$ and $\ensuremath{\mathcal{T}}nht(G) = \ensuremath{\mathcal{T}}nht(\lambda)$ where $\lambda$ is the neighborhood matrix of $G$. See Figure \ref{fig:ucathm} for an example where the vertices $u,x,z$ are represented as non-Helly triangle on the left and interval triangle on the right. Recall that a set of arcs which intersect pairwise but have overall empty intersection is called non-Helly. Since three pairwise overlapping arcs that cover the whole circle have overall empty intersection we call such a set a non-Helly triangle. In fact, one can verify that this is the only non-Helly arrangement of three arcs. A complete list of inclusion-minimal non-Helly CA models can be found in \cite[Corrollary~3.1]{joe}. \begin{figure} \caption{``$\Leftarrow$''-direction in the proof of Theorem \ref{thm:uca_char} \label{fig:ucathm} \end{figure} \begin{theorem} A CA graph $G$ is uniform iff $\ensuremath{\mathcal{T}}it(G) \cap \ensuremath{\mathcal{T}}nht(G) = \emptyset$. \label{thm:uca_char} \end{theorem} \begin{proof} ``$\Rightarrow$'': Assume there exists a uniform CA graph $G$ with $\ensuremath{\mathcal{T}}it(G) \cap \ensuremath{\mathcal{T}}nht(G) \neq \emptyset$. Let $T$ be an $\ensuremath{\mathtt{ov}}$-triangle in $\ensuremath{\mathcal{T}}it(G) \cap \ensuremath{\mathcal{T}}nht(G)$ and $T = \{x,y,z\}$. This means there exist two representations $\rho_I,\rho_N \in \mathcal{N}(G)$ such that $T$ is represented as interval triangle in $\rho_I$ and as non-Helly triangle in $\rho_N$. We assume w.l.o.g.~that $\rho_I(y) \subset \rho_I(x) \cup \rho_I(z)$, i.e.~$y$ is placed in-between $x$ and $z$ in $\rho_I$. This means $y$ and $z$ must be in the same part of the unique $x$-$\ensuremath{\mathtt{ov}}$-partition $P_x$. However, $y$ and $z$ do not contain the same endpoint of $x$ in the representation $\rho_N$, which contradicts that $G$ is uniform. ``$\Leftarrow$'': Assume there exists a CA graph $G$ with $\ensuremath{\mathcal{T}}it(G) \cap \ensuremath{\mathcal{T}}nht(G) = \emptyset$ that is not uniform. This means there exist a vertex $u$, two vertices $x,y \in N^\ensuremath{\mathtt{ov}}(u)$ and two representations $\rho,\rho' \in \mathcal{N}(G)$ such that $x$ and $y$ overlap from the same side with $u$ in $\rho$ but not in $\rho'$. This implies that $x$ and $y$ must overlap and therefore $T=\{u,x,y\}$ is an $\ensuremath{\mathtt{ov}}$-triangle. Notice that $T$ must be represented as interval triangle in $\rho$ because $x$ and $y$ both contain the same endpoint of $u$. It holds that $T$ is represented as interval triangle in $\rho'$ as well since otherwise $T \in \ensuremath{\mathcal{T}}it(G) \cap \ensuremath{\mathcal{T}}nht(G)$. Also, we assume w.l.o.g.~that $\rho(y) \subset \rho(x) \cup \rho(u)$. Since $u$ and $y$ overlap it holds that $N[u] \setminus N[y] \neq \emptyset$. Due to $\rho'$ it follows that $N[u] \setminus N[y] \subseteq N[u] \cap N[x]$. For a vertex $z \in N[u] \setminus N[y]$ to intersect with both $u$ and $x$ it is necessary that $z$ overlaps with $u$ and $x$ due to the representation $\rho$. It follows that $\{u,x,z\}$ is represented as non-Helly triangle in $\rho$. On the other hand, $\{u,x,z\}$ must be represented as interval triangle in $\rho'$ and therefore $\ensuremath{\mathcal{T}}it(G) \cap \ensuremath{\mathcal{T}}nht(G) \neq \emptyset$, contradiction. See Figure \ref{fig:ucathm} for a schematic overview of $\rho$ and $\rho'$. \end{proof} Observe that if an $\ensuremath{\mathtt{ov}}$-triangle $T$ of $G$ is representable as non-Helly triangle then this implies that $T$ must have certain structural properties in $G$. For example, every vertex of $G$ must be adjacent to at least one of the vertices in $T$ since $T$ covers the whole circle in some representation. Similarly, if $T$ is representable as interval triangle this also implies some structural properties. For instance, there must be an $x \in T$ such that every vertex that is adjacent to $x$ must also be adjacent to at least one other vertex in $T$. If an $\ensuremath{\mathtt{ov}}$-triangle is representable as both non-Helly triangle and interval triangle then it must satisfy all of these structural properties at once. As a consequence such an $\ensuremath{\mathtt{ov}}$-triangle must have a very particular structure which extends to the whole graph as we will see in the next section. A CA graph is Helly if it has a Helly CA representation. In \cite[Theorem~4.1]{joe} it is shown that every `stable' representation of a Helly CA graph is Helly. Since every normalized representation has the `stable' property it follows that a CA graph is Helly iff every normalized representation of it is Helly. If a CA graph $G$ is Helly this implies that $\ensuremath{\mathcal{T}}nht(G)$ is empty, and therefore every Helly CA graph is uniform. A natural question to consider is the computational complexity of deciding whether an $\ensuremath{\mathtt{ov}}$-triangle is representable as non-Helly triangle or interval triangle. Given a CA graph $G$ and an $\ensuremath{\mathtt{ov}}$-triangle $T$ of $G$ let us call the problem of deciding whether $T$ is in $\ensuremath{\mathcal{T}}nht(G)$ the non-Helly triangle representability problem. Analogously, deciding whether $T$ is in $\ensuremath{\mathcal{T}}it(G)$ is called the interval triangle representability problem. In the case of uniform CA graphs these two problems are complementary, i.e.~an $\ensuremath{\mathtt{ov}}$-triangle $T$ is in $\ensuremath{\mathcal{T}}nht(G)$ iff $T$ is not in $\ensuremath{\mathcal{T}}it(G)$. In the following, we show that solving either of these two problems for uniform CA graphs is logspace-equivalent to computing a canonical representation for uniform CA graphs. \begin{definition} Let $G$ be a CA graph and $T=\{u,v,w\}$ is an $\ensuremath{\mathtt{ov}}$-triangle of $G$. We say $v$ is amidst $u$ and $w$ if one of the following conditions holds: \begin{enumerate} \item $N_T(u)$ and $N_T(w)$ are non-empty \item there exists a $z \in N_T(u,w)$ such that $\{u,w,z\} \in \ensuremath{\mathcal{T}}nht(G)$ \end{enumerate} \label{def:amidst} \end{definition} \begin{lemma} \label{lem:inbtw} Let $G$ be a uniform CA graph and $T=\{u,v,w\}$ is an $\ensuremath{\mathtt{ov}}$-triangle of $G$ with $T \notin \ensuremath{\mathcal{T}}nht(G)$. Then the following statements are equivalent: \begin{enumerate} \item $v$ is amidst $u$ and $w$ \item $\exists \rho \in \mathcal{N}(G): \rho(v) \subset \rho(u) \cup \rho(w) $ \item $\forall \rho \in \mathcal{N}(G): \rho(v) \subset \rho(u) \cup \rho(w) $ \end{enumerate} \label{lem:amidst} \end{lemma} \begin{proof} ``2 $\Rightarrow$ 1'': Let $\rho$ be in $\mathcal{N}(G)$ such that $\rho(v) \subset \rho(u) \cup \rho(w)$ and assume that $v$ is not amidst $u,w$. Since $v$ overlaps with $u$ and $w$ it holds that $N[u] \setminus N[v]$ and $N[w] \setminus N[v]$ are non-empty. Because $N_T(u) = N_T(w) = \emptyset$ it must hold that $N_T(u,w) \neq \emptyset$. Let $z \in N_T(u,w)$. For $z$ to intersect with $u$ and $w$ in $\rho$ it must hold that $\{u,w,z\}$ is represented as non-Helly triangle in $\rho$. This contradicts the assumption that $v$ is not amidst $u,w$. ``1 $\Rightarrow$ 3'': Let $v$ be amidst $u$ and $w$ and assume that there exists a $\rho \in \mathcal{N}(G)$ such that $\rho(v) \not\subset \rho(u) \cup \rho(w)$. Since $T \notin \ensuremath{\mathcal{T}}nht(G)$ and $G$ is uniform it follows by Theorem \ref{thm:uca_char} that $T$ must be represented as interval triangle in every representation, which includes $\rho$. We assume w.l.o.g.~that $\rho(w) \subset \rho(u) \cup \rho(v)$. From that it follows that $N_T(w)$ is empty and therefore there must be a $z \in N_T(u,w)$ such that $\{u,w,z\}$ is a non-Helly triangle in $\rho$, which is impossible. ``3 $\Rightarrow$ 2'': clear. \end{proof} \begin{definition} Let $G$ be a CA graph and $u \in V(G)$. Let the binary relation $\sim_u$ on $N^{\ensuremath{\mathtt{ov}}}(u)$ be defined such that $x \sim_u y$ holds if one of the following holds: \begin{enumerate} \item $x = y$ \item $x \: \ensuremath{\mathtt{cd}} \: y$ or $x \: \ensuremath{\mathtt{cs}} \: y$ \item $x \: \ensuremath{\mathtt{ov}} \: y$, $\{u,x,y\} \notin \ensuremath{\mathcal{T}}nht(G)$ and $u$ is not amidst $x$ and $y$ \end{enumerate} \label{def:simu} \end{definition} \begin{lemma} For every uniform CA graph $G$ and $u \in V(G)$ it holds that the partition induced by $\sim_u$ equals the unique $u$-$\ensuremath{\mathtt{ov}}$-partition $P_u$. Stated differently, $x \sim_u y$ iff $x$ and $y$ are in the same part of $P_u$. \label{lem:simu} \end{lemma} \begin{proof} ``$\Rightarrow$'': Let $x \sim_u y$ and assume for the sake of contradiction that $x$ and $y$ are not in the same part of the $u$-$\ensuremath{\mathtt{ov}}$-partition. This means there exists a representation $\rho \in \mathcal{N}(G)$ such that $\rho(x)$ and $\rho(y)$ contain different endpoints of $\rho(u)$. This is only possible if $x$ and $y$ overlap. Since $\{u,x,y\} \notin \ensuremath{\mathcal{T}}nht(G)$ this means $\{u,x,y\}$ must be represented as interval triangle in $\rho$. In order for $\rho(x)$ and $\rho(y)$ to contain different endpoints of $\rho(u)$ it must hold that $\rho(u) \subset \rho(x) \cup \rho(y)$, which implies that $u$ is amidst $x$ and $y$ by Lemma \ref{lem:amidst}. This contradicts $x \sim_u y$. ``$\Leftarrow$'': Let $x$ and $y$ be in the same part of the $u$-$\ensuremath{\mathtt{ov}}$-partition and assume that $x \sim_u y$ does not hold. This implies that $x$ and $y$ must overlap and therefore $\{u,x,y\}$ form an $\ensuremath{\mathtt{ov}}$-triangle. For $x \sim_u y$ to not hold it must be either the case that $\{u,x,y\}$ is only representable as non-Helly triangle or $u$ is amidst $x$ and $y$. In both cases this contradicts $x$ and $y$ being in the same part of the $u$-$\ensuremath{\mathtt{ov}}$-partition. \end{proof} \begin{theorem} The representation, canonical representation, non-Helly triangle representability and interval triangle representability problem for uniform CA graphs are logspace-equivalent. \label{thm:eq_problem} \end{theorem} \begin{proof} The non-Helly triangle representability and interval triangle representability problem for uniform CA graphs are logspace-equivalent because they are complementary in the sense that an $\ensuremath{\mathtt{ov}}$-triangle is representable as non-Helly triangle iff it is not representable as interval triangle. This follows from the fact that an $\ensuremath{\mathtt{ov}}$-triangle can only be either represented as non-Helly triangle or interval triangle and these two possibilities are mutually exclusive in the case of uniform CA graphs. As a consequence these two problems are trivially reducible to the representation problem for uniform CA graphs. Given a uniform CA graph $G$, an $\ensuremath{\mathtt{ov}}$-triangle $T$ of $G$ and a representation $\rho \in \mathcal{N}(G)$ it holds that $T \in \ensuremath{\mathcal{T}}nht(G)$ iff $T \notin \ensuremath{\mathcal{T}}it(G)$ iff $T$ is represented as non-Helly triangle in $\rho$. The representation problem is obviously reducible to the canonical representation problem. Therefore it remains to show that the canonical representation problem for uniform CA graphs is reducible to the non-Helly triangle representability problem. To obtain a canonical representation for a uniform CA graph we can use the invariant flip set function given in Lemma \ref{lem:funiform}. To compute this function we need to figure out the unique $\ensuremath{\mathtt{ov}}$-partitions for each vertex. By Lemma \ref{lem:simu} this can be done by computing the equivalence relation $\sim_u$ for each vertex $u$. It can be verified that this relation is computable in logspace using queries of the form $T \in \ensuremath{\mathcal{T}}nht(G)$. \end{proof} The isomorphism problem for CA graphs can be reduced to the one for non-uniform CA graphs in polynomial-time due to Theorem~\ref{thm:uca_canon}. However, a reduction from the canonical representation problem for CA graphs to the one for non-uniform CA graphs does not immediately follow from Theorem~\ref{thm:uca_canon} unless uniform CA graphs can be recognized in polynomial-time. An alternative approach to construct such a reduction is to solve the non-Helly triangle representability problem for uniform CA graphs with an additional requirement. \begin{definition} The globally invariant non-Helly triangle representability problem for uniform CA graphs is defined as follows. Let $A$ be an algorithm that correctly decides the non-Helly triangle representability problem for uniform CA graphs. Let $f_A$ be the function computed by $A$, i.e.~for a graph $G$ and an $\ensuremath{\mathtt{ov}}$-triangle $T$ of $G$ it holds that $f_A(G,T) = 1$ iff $A$ accepts $(G,T)$. We say $A$ decides the globally invariant non-Helly triangle representability problem for uniform CA graphs if $f_A$ is an invariant \emph{for all graphs}. Stated differently, the output of $A$ must be independent of the vertex labels. \end{definition} \begin{lemma} The canonical representation problem for CA graphs is logspace-reducible to the globally invariant non-Helly triangle representability problem for uniform CA graphs and the canonical representation problem for vertex-colored non-uniform CA graphs. \label{lem:ginhtp} \end{lemma} \begin{proof} Suppose we are given an algorithm $A$ which solves the globally invariant non-Helly triangle representability problem for uniform CA graphs. We argue that $A$ can be used to compute a globally invariant flip set function for uniform CA graphs. From Lemma \ref{lem:gir} it then follows that the canonical representation problem for CA graphs reduces to that for vertex-colored non-uniform CA graphs. Given a CA graph $G$ let $\Delta(G,A)$ be the set of $\ensuremath{\mathtt{ov}}$-triangles $T$ of $G$ such that $A$ accepts $(G,T)$. If $G$ is a uniform CA graph then $\Delta(G,A) = \ensuremath{\mathcal{T}}nht(G)$. Consider Definition \ref{def:amidst} and \ref{def:simu} and suppose that each occurrence of $\ensuremath{\mathcal{T}}nht(G)$ is replaced by $\Delta(G,A)$. Let us call the new relation $\sim_u^A$. Clearly, in the case of uniform CA graphs $\sim_u$ and $\sim_u^A$ coincide. Next, consider the following variant of $F_{\mathrm{uniform}}$: $$ F_{\mathrm{uniform}}^A(G) = \bigcup_{\substack{u \in V(G) \\ X \in (N^{\ensuremath{\mathtt{ov}}}(u) / \sim_u^A)}} \big\{ \{ u \} \cup N^{\ensuremath{\mathtt{cd}}}(u) \cup N^{\ensuremath{\mathtt{cc}}}(u) \cup X \big\} $$ where $(N^{\ensuremath{\mathtt{ov}}}(u) / \sim_u^A)$ denotes the equivalence classes of $\sim_u^A$. If $\sim_u^A$ is not an equivalence relation let $(N^{\ensuremath{\mathtt{ov}}}(u) / \sim_u^A) = \emptyset$. If $G$ is a uniform CA graph then it follows from Lemma~\ref{lem:simu} that $F_{\mathrm{uniform}}(G) = F_{\mathrm{uniform}}^A(G)$. Therefore $F_{\mathrm{uniform}}^A$ is an invariant flip set function for uniform CA graphs. Additionally, it can be verified that $F_{\mathrm{uniform}}^A$ is globally invariant due to the fact that the answer of $A$ is independent of the vertex labels. Also, the function $F_{\mathrm{uniform}}^A$ can be computed in logspace using queries of the form $T \in \Delta(G,A)$. Observe that $\Delta(G,A)$ only provides $n^3$ bits of information with $n = |V(G)|$ and therefore can be computed `in a single query' by a functional oracle which outputs the $n^3$ bits of information. \end{proof} \section{Non-Uniform CA Graphs and Restricted CA Matrices} In the first part of this section we examine the structure of non-uniform CA graphs. Every such graph must have two $\ensuremath{\mathtt{ov}}$-triangles which have exactly one vertex in common and both are representable as interval triangle and as non-Helly triangle. This pair of $\ensuremath{\mathtt{ov}}$-triangles enforces a particular structure in non-uniform CA graphs. In the second part we introduce restricted CA matrices, which try to partly capture this structure. Roughly speaking, restricted CA matrices can be seen as a generalization of the neighborhood matrices of non-uniform CA graphs. We pay the price of considering this more general class of structures in order to provide a logspace reduction from the canonical representation problem for CA graphs to that of restricted CA matrices. \begin{definition} Given a CA graph $G$, an induced 4-cycle $C = (u,w,w',u')$ of $G$ and $v \in V(G) \setminus C$. We say $(C,v)$ is a non-uniformity witness of $G$ if $\{u,v,w \}, \{ u',v,w' \} \in \ensuremath{\mathcal{T}}it(G) \cap \ensuremath{\mathcal{T}}nht(G)$. We also simply call $(C,v)$ a witness of $G$. \end{definition} \begin{theorem} A CA graph $G$ is non-uniform iff $G$ has a non-uniformity witness. \label{thm:nuwc} \end{theorem} \begin{proof} ``$\Rightarrow$'': Let $G$ be a non-uniform CA graph. Due to Theorem \ref{thm:uca_char} there exists an $\ensuremath{\mathtt{ov}}$-triangle $T$ of $G$ with $T \in \ensuremath{\mathcal{T}}it(G) \cap \ensuremath{\mathcal{T}}nht(G)$. Let $T= \{u,v,w\}$ and $\rho_{\text{I}} \in \mathcal{N}(G)$ such that $v$ is in-between $u$ and $w$, i.e.~$\rho_{\text{I}}(v) \subset \rho_{\text{I}}(u) \cup \rho_{\text{I}}(w)$. First, we show that there exists an induced 4-cycle $C = (u,w,w',u')$ in $G$. From the non-Helly triangle representation of $T$ it follows that $N[u] \cup N[v] \cup N[w] = V(G)$. Since $v$ is in-between $u$ and $w$ this means $N[u] \cup N[w] = V(G)$. It holds that $u$ and $w$ overlap. Therefore one of the conditions in the definition of the neighborhood matrix for $u$ and $w$ to form a circle cover must be violated. Let us assume w.l.o.g.~that the violated condition is that there exists a $u' \in N[u] \setminus N[w]$ such that $N[u'] \not\subseteq N[u]$. This means $u'$ must overlap with $u$ and there exists a $w' \in N[u'] \setminus N[u]$. Since $w' \notin N[u]$ it follows from $N[u] \cup N[w] = V(G)$ that $w' \in N[w]$ and because $w$ is disjoint from $u'$, and because $w'$ intersects with both $u'$ and $w$ it follows that $w'$ overlaps with $u'$ and $w$. Therefore $C = (u,w,w',u')$ is an induced 4-cycle in $G$. It remains to show that $\{u',v,w'\}$ is an $\ensuremath{\mathtt{ov}}$-triangle and that it is in both $\ensuremath{\mathcal{T}}it(G)$ and $\ensuremath{\mathcal{T}}nht(G)$. Consider the representation $\rho_{\text{I}}$ from before. Assume for the sake of contradiction that $v$ does not overlap with $u'$. Then due to $\rho_{\text{I}}$ it must be the case that $u'$ is disjoint from $v$ and thus $u' \in N_T(u)$. However, due to fact that $T$ is representable as non-Helly triangle this would imply that $u'$ is contained by $u$, which is not the case. Therefore $u'$ overlaps with $v$ as the other intersections types are out of question. For the same reason $w'$ overlaps with $v$ and hence $T'= \{u',v,w'\}$ is an $\ensuremath{\mathtt{ov}}$-triangle. Now, it can be verified that in every representation of $G$ where $T$ is a non-Helly triangle it follows that $T'$ must be an interval triangle and vice versa. This concludes that $T'$ is in $\ensuremath{\mathcal{T}}it(G) \cap \ensuremath{\mathcal{T}}nht(G)$. ``$\Leftarrow$'': Follows directly from Theorem \ref{thm:uca_char}. \end{proof} \begin{figure} \caption{Examples of non-uniform CA graphs and one uniform CA graph $X_4$} \label{fig:ex_nuca} \end{figure} In Figure~\ref{fig:ex_nuca} five non-uniform CA graphs and one uniform CA graph ($X_4$) are given by their CA models. We explain how to verify this claim. First, we have to check that every CA model is normalized. This means the graphs which are induced by these models must be twin-free and without a universal vertex. Additionally, the intersection types of the arcs must match the intersection types in the induced graph (or more precisely its neighborhood matrix). A quick way to determine whether two overlapping arcs also overlap in the graph is to check if they jointly occur in an induced $n$-cycle for some $n \geq 4$. To see that the first five CA graphs are non-uniform we have to find an $\ensuremath{\mathtt{ov}}$-triangle that is representable as both interval and non-Helly triangle. In the case of $\overline{3K_2}$ this $\ensuremath{\mathtt{ov}}$-triangle can be $\{u,v,w\}$. In the given representation $\{u,v,w\}$ is represented as interval triangle. Observe that $v$ and $v'$ are in the same orbit and therefore the labels $v$ and $v'$ can be swapped in the representation. After swapping $v$ and $v'$ the $\ensuremath{\mathtt{ov}}$-triangle $\{u,v,w\}$ is represented as non-Helly triangle. For the graph $X_0$ we can also choose the $\ensuremath{\mathtt{ov}}$-triangle $\{u,v,w\}$. In this case there is an automorphism which swaps $u$ with $u'$ and $w$ with $w'$ and has the other vertices as fix-points. After changing the labels in the representation according to this automorphism it holds that $\{u,v,w\}$ is represented as non-Helly triangle. We remark that $\overline{3K_2}$ and $X_0$ are minimal in the sense that no induced subgraph of them is a non-uniform CA graph. Next, let us consider the graphs $X_1$ to $X_3$. Observe that the black arcs in each of these graphs form an induced $\overline{3K_2}$ subgraph. We assume that the black arcs are labeled with $u,u',v,v',w,w'$ in the same way that the representation of $\overline{3K_2}$ is labeled. It holds that $v$ and $v'$ are in the same orbit in all of these four graphs because they have the same open neighborhood. Therefore $\{u,v,w\}$ is representable as both interval and non-Helly triangle due to the same argument that we made for $\overline{3K_2}$. To show that $X_4$ is uniform we argue that it has a unique normalized representation, i.e.~$|\mathcal{N}(X_4)| = 1$. Observe that this graph has a unique CA model. Additionally, it has no non-trivial automorphism (it is rigid). Therefore $X_4$ has a unique CA representation. \begin{fact} Every non-uniform CA graph contains $\overline{3K_2}$ or $X_0$ as induced subgraph. \end{fact} \begin{proof} Let $G$ be a non-uniform CA graph. Due to Theorem \ref{thm:nuwc} there exists a witness $(C,v)$ of $G$ with $C=(u,w,w',u')$. Since $G$ does not contain a universal vertex it holds that $V(G) \setminus N[v]$ is non-empty. Due to the fact that $\{u,v,w \}$ and $\{u',v,w' \}$ can be represented as interval triangles it follows that $N_C(C \setminus \{x\}) \subseteq N[v]$ for all $x \in C$. Therefore $V(G) \setminus N[v] \subseteq N_C(C) \cup N_C(u,u') \cup N_C(w,w')$. Suppose there is a $v' \in N_C(C) \setminus N[v]$. Then the vertices of $C$ along with $v$ and $v'$ form an induced $\overline{3K_2}$-subgraph of $G$. Assume that this is not the case, i.e.~$N_C(C) \subseteq N[v]$. Since $u$ and $v$ overlap it must hold that $N[u] \setminus N[v] \neq \emptyset$. The only vertices that can be adjacent to $N[u]$ but not to $N[v]$ must be in $N_C(u,u')$ since $N_C(C) \subseteq N[v]$. Therefore there exists a vertex $x \in N_C(u,u')$ that is not adjacent to $v$. For the same reason there must be a vertex $y \in N_C(w,w')$ not adjacent to $v$ because $N[w] \setminus N[v] \neq \emptyset$. The vertices of $C$ along with $v$, $x$ and $y$ form an induced $X_0$-subgraph. \end{proof} \begin{definition}[Restricted CA Matrix] Let $\lambda$ be a CA matrix. We say $\lambda$ is a restricted CA matrix if it contains an induced 4-cycle $C=(u,w,w',u')$ called witness cycle such that: \begin{enumerate} \item $N_C(u,w)$, $N_C(u',w')$ and $N_C(x)$ are empty for every $x \in C$ \item For all $x \in N_C(C)$ it holds that $x$ overlaps with all vertices in $C$ \end{enumerate} \label{def:rca} \end{definition} Observe that the intersection matrix of every CA model that is shown in Figure~\ref*{fig:ex_nuca} is a restricted CA matrix. \begin{table} \begin{tabular}{ l | l l l l | l l l l | l l | l l | l l | l l | l } & \multicolumn{4}{c|}{1 } & \multicolumn{4}{c|}{2 } & \multicolumn{2}{c|}{3 } & \multicolumn{2}{c|}{4 } & \multicolumn{2}{c|}{5 } & \multicolumn{2}{c|}{6 } & \multicolumn{1}{c}{7 } \\ \hline $u$&$\ensuremath{\mathtt{cs}}$&$\ensuremath{\mathtt{ov}}$&$\ensuremath{\mathtt{cs}}$&$\ensuremath{\mathtt{ov}}$&\cellcolor{gray!25}$\ensuremath{\mathtt{di}}$&\cellcolor{gray!25}$\ensuremath{\mathtt{di}}$&\cellcolor{gray!25}$\ensuremath{\mathtt{di}}$&\cellcolor{gray!25}$\ensuremath{\mathtt{di}}$&$\ensuremath{\mathtt{ov}}$&$\ensuremath{\mathtt{ov}}$&$\ensuremath{\mathtt{ov}}$&$\ensuremath{\mathtt{cs}}$&$\ensuremath{\mathtt{ov}}$&$\ensuremath{\mathtt{ov}}$&\cellcolor{gray!25}$\ensuremath{\mathtt{di}}$&\cellcolor{gray!25}$\ensuremath{\mathtt{di}}$&$\ensuremath{\mathtt{ov}}$\\ $w$&\cellcolor{gray!25}$\ensuremath{\mathtt{di}}$&\cellcolor{gray!25}$\ensuremath{\mathtt{di}}$&\cellcolor{gray!25}$\ensuremath{\mathtt{di}}$&\cellcolor{gray!25}$\ensuremath{\mathtt{di}}$&$\ensuremath{\mathtt{cs}}$&$\ensuremath{\mathtt{ov}}$&$\ensuremath{\mathtt{cs}}$&$\ensuremath{\mathtt{ov}}$&\cellcolor{gray!25}$\ensuremath{\mathtt{di}}$&\cellcolor{gray!25}$\ensuremath{\mathtt{di}}$&$\ensuremath{\mathtt{ov}}$&$\ensuremath{\mathtt{ov}}$&$\ensuremath{\mathtt{ov}}$&$\ensuremath{\mathtt{cs}}$&$\ensuremath{\mathtt{ov}}$&$\ensuremath{\mathtt{ov}}$&$\ensuremath{\mathtt{ov}}$\\ $w'$&\cellcolor{gray!25}$\ensuremath{\mathtt{di}}$&\cellcolor{gray!25}$\ensuremath{\mathtt{di}}$&\cellcolor{gray!25}$\ensuremath{\mathtt{di}}$&\cellcolor{gray!25}$\ensuremath{\mathtt{di}}$&$\ensuremath{\mathtt{cs}}$&$\ensuremath{\mathtt{cs}}$&$\ensuremath{\mathtt{ov}}$&$\ensuremath{\mathtt{ov}}$&$\ensuremath{\mathtt{ov}}$&$\ensuremath{\mathtt{ov}}$&\cellcolor{gray!25}$\ensuremath{\mathtt{di}}$&\cellcolor{gray!25}$\ensuremath{\mathtt{di}}$&$\ensuremath{\mathtt{ov}}$&$\ensuremath{\mathtt{ov}}$&$\ensuremath{\mathtt{ov}}$&$\ensuremath{\mathtt{cs}}$&$\ensuremath{\mathtt{ov}}$\\ $u'$&$\ensuremath{\mathtt{cs}}$&$\ensuremath{\mathtt{cs}}$&$\ensuremath{\mathtt{ov}}$&$\ensuremath{\mathtt{ov}}$&\cellcolor{gray!25}$\ensuremath{\mathtt{di}}$&\cellcolor{gray!25}$\ensuremath{\mathtt{di}}$&\cellcolor{gray!25}$\ensuremath{\mathtt{di}}$&\cellcolor{gray!25}$\ensuremath{\mathtt{di}}$&$\ensuremath{\mathtt{ov}}$&$\ensuremath{\mathtt{cs}}$&$\ensuremath{\mathtt{ov}}$&$\ensuremath{\mathtt{ov}}$&\cellcolor{gray!25}$\ensuremath{\mathtt{di}}$&\cellcolor{gray!25}$\ensuremath{\mathtt{di}}$&$\ensuremath{\mathtt{ov}}$&$\ensuremath{\mathtt{ov}}$&$\ensuremath{\mathtt{ov}}$ \end{tabular} \caption{Intersection types of restricted CA matrices with witness cycle $(u,w,w',u)$} \label{tab:rca} \end{table} \begin{fact} Given an intersection matrix $\lambda$, vertices $x,y_1,\dots,y_k$ of $\lambda$ and intersection types $\alpha_1,\dots,\alpha_k$, we say $x$ is an $(\alpha_1,\dots,\alpha_k)$-neighbor of $(y_1,\dots,y_k)$ if $\lambda_{x,y_i} = \alpha_i$ for all $i \in [k]$. A CA matrix $\lambda$ is restricted iff $\lambda$ contains an induced 4-cycle $C=(u,w,w',u')$ such that for all vertices $x \in V(\lambda) \setminus C$ there exists a column $\overline{\alpha}$ in Table \ref{tab:rca} such that $x$ is a $\overline{\alpha}$-neighbor of $C$. \label{fact:rcait} \end{fact} \begin{proof} We use the numbers in the table headline to refer to the different columns. For example, 2.3 refers to the third column from left in the second part of the table: $(\ensuremath{\mathtt{di}},\ensuremath{\mathtt{cs}},\ensuremath{\mathtt{ov}},\ensuremath{\mathtt{di}})$. ``$\Rightarrow$'': Let $\lambda$ be a restricted CA matrix with witness cycle $C=(u,w,w',u')$. We need to show for every $x \in V(\lambda) \setminus C$ there exists a column $\overline{\alpha}$ in Table \ref{tab:rca} such that $x$ is a $\overline{\alpha}$-neighbor of $C$. Due to the definition of restricted CA matrices it must hold that $x$ is in (exactly) one of the following seven sets: $N_C(C), N_C(u,u'), N_C(w,w')$ or $N_C(C \setminus \{z\})$ for a $z \in C$. If $x$ is in $N_C(C)$ then $x$ overlaps with every vertex of $C$ by definition. This corresponds to the last column 7.1 of the table. If $x \in N_C(u,u')$ then $x$ is disjoint from $w$ and $w'$. In that case $x$ is an $\overline{\alpha}$-neighbor of $C$ where $\overline{\alpha}$ must be one of the four columns in part one of the table. For the same reason if $x \in N_C(w,w')$ then it is an $\overline{\alpha}$-neighbor of $C$ where $\overline{\alpha}$ corresponds to one of the two columns in the second part of the table. If $x$ is in $N_C(C \setminus \{w\})$ then $x$ is disjoint from $w$ and $x$ overlaps with both $u$ and $w'$. The intersection type between $x$ and $u'$ can be one of the following: $x$ overlaps with $u$ or $x$ is contained by $u$ or $x$ contains $u$. The first two cases are covered by the third part of the table. However, if $x$ contains $u$ then there exists no corresponding column in the table since it does not have any $\ensuremath{\mathtt{cd}}$-entries. This can be resolved by using the following observation: if $x$ is in $N_C(C \setminus \{w\})$ and contains $u'$ then $(u,w,w',x)$ is a witness cycle of $\lambda$ as well. As a consequence we can assume without loss of generality that a witness cycle $C$ of $\lambda$ can be chosen such that there exists no $x \in N_C(C \setminus \{w\})$ which contains $u'$. The same argument applies to the remaining three cases $x \in N_C(C \setminus \{z\})$ with $z \in \{u,u',w'\}$. ``$\Leftarrow$'': clear. \end{proof} In the remainder of this section we prove that the canonical representation problem for CA graphs is logspace-reducible to the canonical representation problem for vertex-colored restricted CA matrices. The proof outline looks as follows. First, we define a subset of uniform CA graphs, namely $\Delta$-uniform CA graphs, for which the globally invariant non-Helly triangle representability problem can be solved in logspace. Therefore the canonical representation problem for CA graphs is logspace-reducible to that of CA graphs which are not $\Delta$-uniform. This reduction follows from a slightly modified version of Lemma~\ref{lem:ginhtp}. Then we show that the neighborhood matrix of a non-$\Delta$-uniform CA graph can be converted into a vertex-colored restricted CA matrix by flipping `long' arcs. By coloring the flipped arcs the isomorphism type is preserved. \begin{definition} For a graph $G$ we define $\Delta_G$ as the following set of $\ensuremath{\mathtt{ov}}$-triangles (see Definition~\ref{def:ovtriangle}). An $\ensuremath{\mathtt{ov}}$-triangle $T$ of $G$ is in $\Delta_G$ if there exist three pairwise different vertices $u,v,w$ in $T$ such that the following holds: \begin{enumerate} \item $N[u] \cup N[v] \cup N[w] = V(G)$ \item For all $z \in T$ it holds that if a vertex $x \in N_T(z)$ then $x \: \ensuremath{\mathtt{cd}} \: z$ \item If there exist $u',w'$ such that $(u,w,w',u')$ is an induced 4-cycle and $v$ overlaps with $u'$ and $w'$ then $N[v] \subseteq N[u'] \cup N[w']$ \end{enumerate} \label{def:deltag} \end{definition} \begin{definition} A CA graph $G$ is $\Delta$-uniform if $\Delta_G \cap \ensuremath{\mathcal{T}}it(G) = \emptyset$. \end{definition} Let us explain the intuition behind these two definitions. The set $\Delta_G$ approximates $\ensuremath{\mathcal{T}}nht(G)$. More precisely, whenever an $\ensuremath{\mathtt{ov}}$-triangle $T = \{u,v,w\}$ is in $\ensuremath{\mathcal{T}}nht(G)$ this implies that $T$ satisfies certain constraints such as for example $N[u] \cup N[v] \cup N[w] = V(G)$. The set $\Delta_G$ consists of three such constraints. Therefore if an $\ensuremath{\mathtt{ov}}$-triangle is representable as non-Helly triangle it must also be in $\Delta_G$, i.e.~$\ensuremath{\mathcal{T}}nht(G) \subseteq \Delta_G$. The $\Delta$-uniform CA graphs can be alternatively seen as the subset of uniform CA graphs where the constraints of $\Delta_G$ suffice to characterize $\ensuremath{\mathcal{T}}nht(G)$, i.e.~$\Delta_G = \ensuremath{\mathcal{T}}nht(G)$. \begin{lemma} For every graph $G$ it holds that $\ensuremath{\mathcal{T}}nht(G) \subseteq \Delta_G$. If $G$ is a $\Delta$-uniform CA graph then $\ensuremath{\mathcal{T}}nht(G) = \Delta_G$. \label{lem:nht_deltag} \end{lemma} \begin{proof} For the first claim consider a graph $G$. If $G$ is not a CA graph then $\ensuremath{\mathcal{T}}nht(G) = \emptyset$. Therefore we can assume that $G$ is a CA graph. Given an $\ensuremath{\mathtt{ov}}$-triangle $T \in \ensuremath{\mathcal{T}}nht(G)$ we show that it must be in $\Delta_G$. Let $\rho \in \mathcal{N}(G)$ be a representation such that $T = \{u,v,w\}$ is represented as non-Helly triangle in it. Since $\rho(u) \cup \rho(v) \cup \rho(w)$ covers the whole circle it follows that $N[u] \cup N[v] \cup N[w] = V(G)$, which is the first condition of Definition \ref{def:deltag}. To see that the second condition holds we consider a vertex $x \in N_T(u)$ without loss of generality. Since $x$ is not adjacent to $v$ and $w$ it holds that $\rho(x) \subseteq \mathbb{C} \setminus \left( \rho(v) \cup \rho(w) \right) $ where $\mathbb{C}$ denotes the whole circle. Since $\mathbb{C} \setminus \left( \rho(v) \cup \rho(w) \right) \subset \rho(u)$ it follows that $\rho(x) \subset \rho(u)$. Due to the fact that $\rho$ is a normalized representation this implies that $x$ is contained by $u$. To see that the third condition of $\Delta_G$ holds let $u',w'$ be vertices such that $(u,w,w',u')$ is an induced 4-cycle of $G$. Since $T$ is represented as non-Helly triangle in $\rho$ it must hold that $\{ u',v,w' \}$ is an interval triangle in $\rho$ with $\rho(v) \subset \rho(u') \cup \rho(w')$ and therefore $N[v] \subseteq N[u'] \cup N[w']$. For the second claim let $G$ be a $\Delta$-uniform CA graph. From the previous claim we know that $\ensuremath{\mathcal{T}}nht(G) \subseteq \Delta_G$. Since every $\ensuremath{\mathtt{ov}}$-triangle must be in $\ensuremath{\mathcal{T}}nht(G) \cup \ensuremath{\mathcal{T}}it(G)$ it follows that $\Delta_G \subseteq \ensuremath{\mathcal{T}}nht(G) \cup \ensuremath{\mathcal{T}}it(G)$. The definition of $\Delta$-uniform requires $\Delta_G \cap \ensuremath{\mathcal{T}}it(G) = \emptyset$ and thus $\Delta_G \subseteq \ensuremath{\mathcal{T}}nht(G)$. \end{proof} \begin{fact} $\Delta$-uniform CA graphs are a strict subset of uniform CA graphs. \end{fact} \begin{proof} Assume there exists a $\Delta$-uniform CA graph $G$ which is not uniform. This means there exists an $\ensuremath{\mathtt{ov}}$-triangle $T \in \ensuremath{\mathcal{T}}nht(G) \cap \ensuremath{\mathcal{T}}it(G)$. Due to the previous lemma it holds that $\ensuremath{\mathcal{T}}nht(G) \subseteq \Delta_G$. This implies that $T \in \Delta_G \cap \ensuremath{\mathcal{T}}it(G)$ which contradicts that $G$ is $\Delta$-uniform. Therefore every $\Delta$-uniform CA graph is uniform. An example of a uniform CA graph that is not $\Delta$-uniform is the graph $X_4$ in Figure~\ref{fig:ex_nuca}. In the third paragraph after Theorem~\ref{thm:nuwc} we argued that $X_4$ is a uniform CA graph because it has a unique normalized representation. Assume that the black arcs of $X_4$ are labeled with $u,u',v,v',w,w'$ in the same way that the representation of $\overline{3K_2}$ is labeled in Figure~\ref{fig:ex_nuca}. To see that $X_4$ is not $\Delta$-uniform it suffices to check that the $\ensuremath{\mathtt{ov}}$-triangle $\{u,v,w\}$ is in $\Delta_{X_4}$ and represented as interval triangle. \end{proof} \begin{corollary} The globally invariant non-Helly triangle representability problem for $\Delta$-uniform CA graphs can be solved in logspace. \label{corol:deltag_ginhtr} \end{corollary} \begin{proof} Given a CA graph $G$ and an $\ensuremath{\mathtt{ov}}$-triangle $T$ output yes iff $T \in \Delta_G$. This is correct because in the case of a $\Delta$-uniform CA graph $G$ it holds that $\Delta_G = \ensuremath{\mathcal{T}}nht(G)$ (Lemma~\ref{lem:nht_deltag}). Clearly, $\Delta_G$ is computable in logspace and an invariant. \end{proof} \begin{lemma} Let $G$ be a CA graph that is not $\Delta$-uniform. Then there exists an induced 4-cycle $C = (u,w,w',u')$ such that $N[u] \cup N[w] = N[u'] \cup N[w'] = V(G)$ and a vertex $v$ that overlaps with every vertex in $C$. \label{lem:nduwc} \end{lemma} \begin{proof} The argument is essentially the same as the one made for the ``$\Rightarrow$''-direction in the proof of Theorem \ref{thm:nuwc}. The difference is that instead of the stronger assumption that $T \in \ensuremath{\mathcal{T}}nht(G)$ we only require that $T \in \Delta_G$. Since $G$ is not $\Delta$-uniform there exists an $\ensuremath{\mathtt{ov}}$-triangle $T=\{u,v,w\}$ of $G$ such that $T \in \Delta_G$ and there is a representation $\rho \in \mathcal{N}(G)$ such that $T$ is represented as interval triangle in $\rho$. Furthermore, let us assume w.l.o.g.~that $\rho(v) \subset \rho(u) \cup \rho(w)$. Since $T \in \Delta_G$ it holds that $N[u] \cup N[v] \cup N[w] = V(G)$. Due to the interval representation of $T$ in $\rho$ it follows that $N[u] \cup N[w] = V(G)$. Since $u$ and $w$ do not form a circle cover it must hold that there exists a vertex $u' \in N[u] \setminus N[w]$ such that $N[u'] \setminus N[u]$ is non-empty. If $u'$ is disjoint from $v$ it follows that $u'$ must be contained by $u$ from the second condition in Definition~\ref{def:deltag} of $\Delta_G$. This cannot be the case and therefore $u' \in N_T(u,v)$. For $u'$ to have a neighbor which is not adjacent to $u$ it must hold that $\rho(u') \not\subseteq \rho(u)$. Therefore $u'$ overlaps with $u$ and $v$. Let $w' \in N[u'] \setminus N[u]$. If $w' \in N_T(w)$ then $w'$ would be contained by $w$ due to the second condition of $\Delta_G$. Again, this cannot be the case and therefore $w' \in N_T(v,w)$. From the representation $\rho$ it follows that $w$ must overlap with $u'$, $v$ and $w$. Then $C=(u,w,w',u')$ is an induced 4-cycle of $G$ such that $v$ overlaps with every vertex of $C$. It remains to show that $N[u'] \cup N[w'] = V(G)$. Due to the third condition of $\Delta_G$ it holds that $N[v] \subseteq N[u'] \cup N[w']$. Additionally, it holds that $\rho(u) \setminus \rho(v) \subset \rho(u')$ and $\rho(w) \setminus \rho(v) \subset \rho(w')$. As a consequence $N[u'] \cup N[w'] = V(G)$. \end{proof} \begin{corollary} Canonical representations for CA graphs without induced 4-cycle can be computed in logspace. \end{corollary} \begin{proof} By Lemma \ref{lem:nduwc} the class of CA graphs without induced 4-cycle is a subset of $\Delta$-uniform CA graphs and due to Corollary \ref{corol:deltag_ginhtr} and Theorem \ref{thm:eq_problem} a canonical representation for such graphs can be computed in logspace. \end{proof} \begin{corollary} Helly CA graphs are a strict subset of $\Delta$-uniform CA graphs. \end{corollary} \begin{proof} Assume $G$ is a Helly CA graph which is not $\Delta$-uniform. Then due to Lemma \ref{lem:nduwc} there exists an induced 4-cycle $C$ and a vertex $v$ not in $C$ which overlaps with every vertex in $C$. In any normalized representation of $G$ it must hold that $v$ forms a non-Helly triangle with two vertices from $C$. This contradicts that $G$ is Helly. The graph \input{figs/netgraph} is a $\Delta$-uniform CA graph which is not Helly. \end{proof} \begin{theorem} The canonical representation problem for CA graphs is logspace-reducible to the canonical representation problem for vertex-colored restricted CA matrices. \label{thm:rca_matrix} \end{theorem} \begin{proof} For brevity let $\mathcal{Z}$ denote the set of all CA graphs which are not $\Delta$-uniform. Since the globally invariant non-Helly triangle representability problem for $\Delta$-uniform CA graphs can be solved in logspace (see Corollary \ref{corol:deltag_ginhtr}) it follows from a modified version of Lemma~\ref{lem:ginhtp} that the canonical representation problem for CA graphs is logspace-reducible to the canonical representation problem for vertex-colored $\mathcal{Z}$. To see this replace `uniform' with `$\Delta$-uniform' and `non-uniform' with `non-$\Delta$-uniform' in the statement (and proof) of Lemma~\ref{lem:ginhtp}. For a CA graph $G$ let us say a subset of vertices $X$ of $G$ is an R-flip set if $\lambda_G^{(X)}$ is a restricted CA matrix. To find canonical representations for $\mathcal{Z}$ we construct an invariant vertex set selector $f$ such that $f(G)$ contains at least one R-flip set for every $G \in \mathcal{Z}$. Then to obtain a canonical representation for $G \in \mathcal{Z}$ let $\hat{X}$ denote the R-flip set in $f(G)$ such that $\mathrm{canon}(\lambda_G^{(\hat{X})},c_{\hat{X}})$ is lexicographically minimal with $c_X$ being the coloring which assigns every vertex $v \in X$ the color red and the other vertices are blue. Let $\rho$ be a canonical normalized representation for $(\lambda_G^{(\hat{X})},c_{\hat{X}})$. Then $\rho^{(\hat{X})}$ is a canonical representation for $G$. Notice, that $\rho^{(\hat{X})}$ can be computed in logspace by computing canonical representations for vertex-colored restricted CA matrices. The correctness of this approach follows from the same argument made in the proof of Theorem \ref{thm:cfsf_cr} in the flip trick section. The analogy is straightforward. The R-flip sets in this context correspond to flip sets and the invariant vertex set selector $f$ takes the place of the invariant flip set function. Given a CA graph $G$ and $X \subseteq V(G)$ it can be easily checked in logspace whether $\lambda_G^{(X)}$ is a restricted CA matrix. For a CA graph $G$ let $C(G)$ denote the set of all ordered induced 4-cycles in $G$. Now, we claim that the following logspace-computable function $f$ is an invariant vertex set selector with the desired property: $$ f(G) = \bigcup_{C \in C(G)} \big\{ \set{ x \in V(G) \setminus C }{ \exists y \in C : x \: \ensuremath{\mathtt{cs}} \: y } \big\} $$ It is not difficult to check that $f$ is invariant. It remains to argue why $f(G)$ contains at least one R-flip set for every $G \in \mathcal{Z}$. Let $G \in \mathcal{Z}$ and $C =(u,w,w',u')$ is an induced 4-cycle in $G$ such that $N[u] \cup N[w] = N[u'] \cup N[w'] = V(G)$. The existence of such an induced 4-cycle is guaranteed by Lemma \ref{lem:nduwc}. Observe that if there exists a $u_1 \in N_C(u,w,u')$ with $u_1 \: \ensuremath{\mathtt{cs}} \: u$ then $C_1 = (u_1,w,w',u')$ also satisfies the previous condition $N[u_1] \cup N[w] = V(G)$. Therefore we can assume that there exists no $z \in C$ and $z_1 \in N_C(N[z] \cap C)$ such that $z_1 \: \ensuremath{\mathtt{cs}} \: z$. From $N[u] \cup N[w] = N[u'] \cup N[w'] = V(G)$ it immediately follows that $N_C(u,w)$, $N_C(u',w')$ and $N_C(x)$ are empty for every $x \in C$. We prove that $\lambda^{(X)}$ is a restricted CA matrix with witness cycle $C$ where $\lambda$ is the neighborhood matrix of $G$ and $X = \set{ x \in V(G) \setminus C }{ \exists y \in C : x \: \ensuremath{\mathtt{cs}} \: y }$. Note that $X \in f(G)$ via $C$. To reference the neighborhoods of $G$ (which are the same as the ones of $\lambda$) or $\lambda^{(X)}$ we write $N^G$ and $N^{\lambda^{(X)}}$ to distinguish between them. First, we show that $N_C^{\lambda^{(X)}}(u,w) = \emptyset$. Assume the opposite, i.e.~there exists $x \in N_C^{\lambda^{(X)}}(u,w)$. If $x$ was not flipped, i.e.~$x \notin X$, then it also holds that $x \in N^G_C(u,w)$, which contradicts that $N^G_C(u,w)$ is empty. If $x$ was flipped, i.e.~$x \in X$, then it must be the case that $x$ contains $u'$ and $w'$ in $\lambda$. This means $N_G[u'] \cup N_G[w'] \subseteq N_G[x]$ which implies that $x$ is a universal vertex in $G$ since $N_G[u'] \cup N_G[w'] = V(G)$, contradiction. For the same reason it holds that $N_C^{\lambda^{(X)}}(u',w')$ and $N_C^{\lambda^{(X)}}(z)$ are empty for all $z \in C$. It remains to show that for all $x \in N_C^{\lambda^{(X)}}(C)$ it holds that $x$ overlaps with all vertices of $C$ in $\lambda^{(X)}$. Notice that $\lambda^{(X)}_{x,z} \in \{\ensuremath{\mathtt{ov}}, \ensuremath{\mathtt{cs}}, \ensuremath{\mathtt{cc}} \}$ for every $z \in C$. Otherwise $x$ would not be in $N_C(C)$. We consider the following two cases: in the first one we assume that $x$ contains one vertex of $C$ in $\lambda^{(X)}$ and in the second one we assume that $x$ forms a circle cover with one vertex of $C$ in $\lambda^{(X)}$. We prove that neither of these cases can occur and therefore $x$ must overlap with all vertices of $C$ in $\lambda^{(X)}$. For the first case assume that w.l.o.g.~$x$ contains $u$ in $\lambda^{(X)}$ and intersects with the other vertices of $C$ in $\lambda^{(X)}$. If $x \in X$ then it was flipped. It follows that $x$ was disjoint from $u$ in $\lambda$ and therefore $x \in N_C^G(w,w',u')$. Since $x \in X$ it also must hold that $x$ contains at least one of the vertices $w,w',u'$ in $G$. It follows that $x$ contains $w'$ since it cannot contain the other two in $\lambda$. However, this contradicts our choice of $C$ which says that there exists no $w'_1 \in N_C^G(w,w',u')$ such that $w'_1$ contains $w'$ in $\lambda$. If $x \notin X$ then it must hold that $x$ already contained $u$ in $\lambda$. But then $x$ should be in $X$, contradiction. For the second case assume $x$ forms a circle cover with $u$ in $\lambda^{(X)}$. If $x$ forms a circle cover with $u$ then this implies that $x$ contains $w'$ in $\lambda^{(X)}$ and therefore this reduces to the first case. We conclude that both conditions of Definition \ref{def:rca} are satisfied and hence $\lambda^{(X)}$ is a restricted CA matrix. \end{proof} \section{Further Research} Finding a polynomial-time isomorphism test for CA graphs remains an open problem. We have shown that it suffices to consider only non-uniform CA graphs for this problem. This particular class of CA graphs offers quite a lot of structure, which is caused by what we named non-uniformity witnesses. It seems plausible that such witnesses can be exploited to devise an isomorphism test. Additionally, we proved that the canonical representation problem for CA graphs is logspace-reducible to that of restricted CA matrices. The central question with regard to the flip trick is how invariant flip sets for restricted CA matrices or non-uniform CA graphs can be computed. Also, we remark that CA representations for CA graphs can be computed in logspace if flip sets for restricted CA matrices can be found in logspace. Another interesting problem is to extend Definition~\ref{def:deltag} of $\Delta_G$ such that it captures $\ensuremath{\mathcal{T}}nht(G)$ on uniform CA graphs, i.e.~$\Delta_G = \ensuremath{\mathcal{T}}nht(G)$ for all uniform CA graphs $G$. If this can be done in such a way that $\Delta_G$ remains an invariant and computable in logspace then everything that is said about $\Delta$-uniform CA graphs in section 5 also applies to uniform CA graphs. \subparagraph*{Acknowledgements.} We thank the anonymous reviewers for their insightful comments and suggestions that helped us to improve the quality of this work. \end{document}
\begin{document} \title{Geometric realization and its variants} \begin{abstract} In this paper, we present a unified approach using model category theory and an associative law to compare some classic variants of the geometric realization functor. \end{abstract} \section{Introduction} Given an internal category $\mathcal{C}$ in $\mathfrak{Top}$, the category of weakly Hausdorff $k$-spaces, there are at least three different internal categories in $\mathfrak{Top}$ associated to it: \[\mathcal{C}^{\mathbb{N}},\hspace*{.5em} \mathcal{C}^{\operatorname{fat}},\hspace*{.3em} \text{ and } \hspace*{.3em}\mathcal{C}^{\operatorname{simp}}.\] The category $\mathcal{C}^{\mathbb{N}}$ is Segal's unraveled category defined as the subcategory of the product category $\mathcal{C}\times \mathbb{N}$ given by deleting the morphisms $(f,i\leq i)$ with $f\neq \operatorname{id}$, where $\mathbb{N}$ is the linearly ordered set of the natural numbers \cite{Se1}. The geometric realization of the nerve $\vert\operatorname{Ner}_{\cdot}\mathcal{C}^{\mathbb{N}}\vert$ generalizes Milnor's classifying space of a topological group \cite{Mil2}, \cite{Mil3}, \cite{Ha}, \cite{Mo1}, \cite{Mo3}. To define the internal category $\mathcal{C}^{\operatorname{fat}}$ in $\mathfrak{Top}$, we let $\mathcal{OR}$ be the one-object category with morphisms consisting of two elements $0,1$ and composition of morphisms given by the truth table for the operator $\mathsf{or}$; meaning, the composition $a\circ b$ is $1$ when $a$ or $b$ is $1$, and otherwise, it is $0$. Then $\mathcal{C}^{\operatorname{fat}}$ is the subcategory of $\mathcal{C}\times \mathcal{OR}$ given by deleting those morphisms $(f,a)$ with $a=0$ and $f\neq \operatorname{id}$. This construction is functorial, and the geometric realization $\vert\operatorname{Ner}_{\cdot}\mathcal{C}^{\operatorname{fat}}\vert$ is canonically homeomorphic to the fat realization $\vert\vert\operatorname{Ner}_{\cdot}\mathcal{C}\vert\vert$. The category $\mathcal{C}^{\operatorname{simp}}$ is the simplex category of the nerve $\operatorname{Ner}_{\cdot}\mathcal{C}$ \cite{Se3}. Its spaces of objects and morphisms are the disjoint unions \[\coprod_{[n]}\operatorname{Ner}_{n}\mathcal{C}\hspace*{1em}\text{and}\hspace*{1em}\coprod\limits_{\mathclap{[n]\rightarrow [m]}}\operatorname{Ner}_{m}\mathcal{C},\hspace*{1em}\text{respectively}.\] The geometric realization $\vert\operatorname{Ner}_{\cdot}\mathcal{C}^{\operatorname{simp}}\vert$ computes the homotopy colimit of the simplicial space $\operatorname{Ner}_{\cdot}\mathcal{C}$ \cite{Du1}, \cite{Hi2}. Each realization has its own advantage and plays a part in the development of topology. The geometric realization is an important construction in algebraic $K$-theory and delooping theory, and also, it connects the category of simplicial sets $s\operatorname{Sets}$ with the category $\mathfrak{Top}$, making combinatorial methods available in topology. On the other hand, the Segal unraveling construction bridges the gaps between geometry and homotopy theory as, given any topological groupoid $\mathcal{G}$, Segal's construction always gives us the classifying space of numerable $\mathcal{G}$-structures. In fact, by the construction, the space $\vert\operatorname{Ner}\mathcal{G}^{\mathbb{N}}\vert$ admits a numerable universal $\mathcal{G}$-structure \cite[Appendix]{Bo}. On the contrary, the geometric realization of the nerve of a topological groupoid does not always give us the right homotopy type of the classifying space, for example, a topological group that is homeomorphic to the Cantor set; we learn this example from A. Henriques on MathOverflow. However, if replacing geometric realization with fat realization, we get the right homotopy type of the classifying space \cite{HG}. The third construction $\mathcal{C}^{\operatorname{simp}}$ is of importance in model category theory as it computes homotopy colimit \cite{Hi2}, with respect to the projective model structure on the category of simplicial spaces. Also, it is a useful tool in proving theorems (e.g. \cite{Wa3}). Comparison theorems between these constructions allow us to choose models appropriate to different problems and help us understand the geometric meanings of homotopy-theoretic constructions---for instance, the geometric meaning of the algebraic $K$-theory space of a structured category, e.g. an exact category or a Waldhausen category internal in $\mathfrak{Top}$. These constructions can be generalized to simplicial spaces or even simplicial objects in a topologically (simplicially) enriched model category $\mathcal{M}$. Furthermore, given a simplicial object $X_{\cdot}$ in $\mathcal{M}$, they are connected by the natural morphisms \begin{equation}\label{Intro:fivespaces} X^{\mathbb{N}}_{\cdot}\xrightarrow{\pi} X^{\operatorname{fat}}_{\cdot}\xrightarrow{q} X_{\cdot}\xleftarrow{\mathfrak{l}} X_{\cdot}^{\operatorname{simp}}. \end{equation} The map $\mathfrak{l}$ is the Bousfield-Kan map (or the last vertex map); in the case of simplicial spaces, it has been studied in \cite{BK}, \cite{Se3}, and in the case of simplicial objects in $\mathcal{M}$, \cite{Hi2}, \cite{Du1}. The map $q$, the canonical quotient map from the fat realization to the geometric realization, is rather well understood; \cite{Se3} and \cite{tD} treat the case of simplicial spaces and \cite{Du1} the case of simplicial objects in $\mathcal{M}$. On the contrary, the map $\pi$ is less studied, and \cite{tD} is the only reference dealing with the map $\pi$ that we can find in the literature. \cite[Proposition $2$]{tD} asserts that the map $\pi$ is a homotopy equivalence in the case of simplicial spaces, but it appears that the proof contains some gaps (see Remark \ref{themaprho}); nevertheless, it remains a very promising assertion. In this paper, we present a unified approach, due to Segal, that allows us to compare these constructions simultaneously; the approach is based on an associative law implicitly used in \cite[Appendix $A$]{Se3} and the Reedy model structure on $s\mathcal{M}$, the category of simplicial objects in $\mathcal{M}$. With this approach, we obtain two comparison theorems that recover and generalize most of the known results we know of concerning the relation between these four realization functors, and in particular, we obtain a complete proof of a generalized tom Dieck theorem. Our approach relies heavily on a generalized Segal lemma (\cite[Lemma $A.5$]{Se3}) for $\mathfrak{Top}$-enriched model categories. \begin{theorem}\label{Intro:theSegaltheorem} Let $s\mathcal{M}$ and $c\mathfrak{Top}$ be the Reedy model categories of simplicial objects in $\mathcal{M}$ and cosimplicial spaces, respectively, and suppose the morphisms $f_{\cdot}:X_{\cdot}\rightarrow Y_{\cdot}\in s\mathcal{M}$ and $g^{\cdot}:I^{\cdot}\rightarrow J^{\cdot}\in c\mathfrak{Top}$ are level-wise weak equivalences between cofibrant objects. Then the induced map between the associated coends \[\mathclap{\int^{\triangle}}f_{\cdot} \square g^{\cdot}\hspace*{-.1em}: \hspace*{1em}\mathclap{\int^{\triangle}} X_{\cdot} \square I^{\cdot} \rightarrow\hspace*{.5em} \mathclap{\int^{\triangle}} Y_{\cdot}\square J^{\cdot} \] is a weak equivalence in $\mathcal{M}$, where $\square$ is the tensor product functor from $\mathcal{M}\times\mathfrak{Top}$ to $\mathcal{M}$, and \hspace*{.7em}$\mathclap{\int^{\triangle}}\hspace*{.5em}(-)\square(-)\in \mathcal{M}$ denotes the coend of a simplicial object in $\mathcal{M}$ and a cosimplicial space. $\triangle$ is the simplex category. \end{theorem} The category $\mathfrak{Top}$ admits at least three different model structures \cite{MP} and the theorem applies to all of them. However, for our purpose, we are primarily concerned with the Str\o m model structure. The simplicial version of Theorem \ref{Intro:theSegaltheorem} is discussed in details in \cite[$18.4$]{Hi2}; our proof is different from \cite{Hi2} and based on the decomposition of latching objects in \cite[VII]{GJ}. $\mathfrak{Top}$-($s\operatorname{Sets}$-)enriched model categories of interest to us are the category $\mathpzc{Top}$ \cite{MP}, the category of simplicial sets $s\operatorname{Sets}$ \cite{GJ}, the category of (simplicial) spectra or $\Gamma$-spaces \cite{BF}, and the category of chain complexes \cite{MP}. \subsection{Main Theorems} \begin{theorem}\label{Intro:Thm1} For any cofibrant object $X_{\cdot}$ in $s\mathcal{M}$, the natural morphisms \[\hspace*{.5em}\mathclap{\int^{\triangle}} X^{\mathbb{N}}_{\cdot}\square \triangle^{\cdot} \xrightarrow{\pi}\hspace*{.5em}\mathclap{\int^{\triangle}} X^{\operatorname{fat}}_{\cdot}\square \triangle^{\cdot} \xrightarrow{q}\hspace*{.5em} \mathclap{\int^{\triangle}} X_{\cdot} \square \triangle^{\cdot} \xleftarrow{\mathfrak{l}}\hspace*{.5em} \mathclap{\int^{\triangle}} X_{\cdot}^{\operatorname{simp}} \square \triangle^{\cdot}\] are weak equivalences in $\mathcal{M}$, where the cosimplicial space $\triangle^{\cdot}$ is given by the geometric realization of the standard $n$-simplex $\triangle^{n}_{\cdot}$ in $s\operatorname{Sets}$. \end{theorem} The theorem implies that the geometric realizations of the simplicial spaces in \eqref{Intro:fivespaces} are homotopy equivalent when $X_{\cdot}$ is a proper simplicial space. \begin{theorem}\label{Intro:Thm2} Given a level-wise cofibrant object $X_{\cdot}$ in $s\mathcal{M}$, the following coends \[\mathclap{\int^{\triangle}} X_{\cdot}^{\mathbb{N}}\square \triangle^{\cdot} \xrightarrow[\simeq]{\pi} \hspace*{.5em} \mathclap{\int^{\triangle}} X_{\cdot}^{\operatorname{fat}}\square \triangle^{\cdot} \simeq\hspace*{.5em} \mathclap{\int^{\triangle}} X_{\cdot}^{\operatorname{simp}} \square \triangle^{\cdot}\] are weakly homotopy equivalent---connected by a zig-zag of weak equivalences. \end{theorem} Applying the theorem to the Str\o m model category $\mathpzc{Top}$, we see the projection \begin{equation}\label{tomDieckobservation} \pi:\vert\vert X_{\cdot}\times S_{\cdot}\vert\vert=\vert X_{\cdot}^{\mathbb{N}}\vert\rightarrow \vert X_{\cdot}^{\operatorname{fat}}\vert=\vert\vert X_{\cdot}\vert\vert \end{equation} is a homotopy equivalence, for every simplicial space $X_{\cdot}$, and hence recover \cite[Proposition $2$]{tD}, where $S_{\cdot}$ is the semi-simplicial set defined by $S_{n}:=\{i_{0}<...<i_{n}\mid i_{j}\in\mathbb{N}\}$. The idea of the proof comes from \cite[p309-310]{Se3}, where a kind of associativity is implicitly employed---we interpret it as an associative law in infinite-dimensional linear algebra, namely \[(v^{T}A)w=v^{T}(Aw),\] for any $\infty$-by-$\infty$ matrix $A$ and column vectors $v$ and $w$. In the last section, we define a map $\tau: \int^{\triangle} X_{\cdot}^{\operatorname{fat}}\square \triangle^{\cdot}\rightarrow \int^{\triangle} X_{\cdot}^{\mathbb{N}}\square \triangle^{\cdot}$ to replace the map $\rho$ constructed in \cite[p.47]{tD}\footnote{The construction of $\rho$ appears not to give a well-defined map (see Remark \ref{themaprho}).} and prove that, under the same condition of Theorem \ref{Intro:Thm2}, the map $\tau$ is a homotopy inverse to the map $\pi$. The author wishes to thank Sebastian Goette for suggesting the construction of the map $\tau$. He gratefully acknowledges use of facilities at and the financial support from Mathematical Research Institute of Oberwolfach. He thanks God, who gives him life and sustains him. \section{Left Kan extension} Let $\triangle_{+}$ denote the subcategory of the simplex category $\triangle$ consisting of injective morphisms. Then a semi-simplicial object in $\mathcal{M}$ is a functor $X_{\cdot}:\triangle_{+}^{\operatorname{op}}\rightarrow \mathcal{M}$ and its left Kan extension $\mathfrak{L}X_{\cdot}$, with respect to the inclusion $\triangle_{+}\hookrightarrow \triangle$, is given by \[\mathfrak{L}X_{n}:=\coprod_{\mathclap{[n]\overset{v}{\twoheadrightarrow }[k]}}X_{k},\] where $\twoheadrightarrow$ (resp. $\rightarrowtail$) stands for a surjective (resp. injective) morphism \cite[Chapter $X$]{Mac2}, \cite[p.42]{tD}. The simplicial structure of $\mathfrak{L}X_{\cdot}$ can be described as follows: Given a morphism $[n^{\prime}]\xrightarrow{u} [n]$, we let $[n^{\prime}]\overset{s_{u,v}}{\twoheadrightarrow} [k^{\prime}]\overset{i_{u,v}}\rightarrowtail [k]$ be the unique factorization of the composition $[n^{\prime}]\xrightarrow{u} [n]\overset{v}{\twoheadrightarrow} [k]$. Then $u^{\ast}:\mathfrak{L}X_{n}\rightarrow \mathfrak{L}X_{n^{\prime}}$ is given by \[\coprod_{\mathclap{[n]\overset{v}{\twoheadrightarrow} [k]}}X_{k}\xrightarrow{\coprod\limits_{v}i_{u,v}^{\ast}} \coprod_{\mathclap{[n^{\prime}]\overset{s_{u,v}}{\twoheadrightarrow} [k^{\prime}]}}X_{k^{\prime}}.\] The following generalizes \cite[Lemma $1$]{tD}. \begin{lemma}\label{GentomDieckLemma} Given a semi-simplicial object $X_{\cdot}$, there is a canonical isomorphism \[\mathclap{\int^{\triangle_{+}}}\hspace*{.5em} X_{\cdot}\square \triangle^{\cdot} \xrightarrow{\cong}\hspace*{.5em} \mathclap{\int^{\triangle}}\hspace*{.5em} \mathfrak{L}X_{\cdot}\square \triangle^{\cdot}.\] \end{lemma} \begin{proof} The isomorphism is given by the inclusion \[ \coprod_{[n]} X_{n}\square \triangle^{n}\xrightarrow{\coprod\operatorname{id}} \coprod_{\mathclap{\substack{[n]\\ [n]\xrightarrow{=}[n]}}} X_{n}\square \triangle^{n}\subset \coprod_{\mathclap{\substack{[n]\\ [n]\twoheadrightarrow [m]}}} X_{m}\square \triangle^{n}.\] To define its inverse, we observe that the following morphisms \begin{align*} \coprod_{\mathclap{\substack{[n]\\ [n]\overset{v}{\twoheadrightarrow} [s]}}}X_{s}\square \triangle^{n}& \xrightarrow{\coprod\limits_{\tiny \mathclap{ [n]; v} }\operatorname{id}\square v_{\ast}} \coprod_{[s]} X_{s}\square \triangle^{s};\\ \coprod_{\mathclap{\substack{[n]\xrightarrow{u} [m]\\ [m]\overset{v}{\twoheadrightarrow} [s]}}}X_{s}\square \triangle^{n}&\xrightarrow{ \coprod\limits_{\mathclap{\tiny u,v}}\operatorname{id}\square s_{u,v,\ast}} \coprod_{\mathclap{[s^{\prime}]\overset{i_{u,v}}{\rightarrowtail}[s]}} X_{s}\square \triangle^{s^{\prime}}\\ \end{align*} respect face and degeneracy maps in $\mathfrak{L}X_{\cdot}$ and face maps in $X_{\cdot}$, and hence they induce a morphism \[ \mathclap{\int^{\triangle}}\hspace*{.5em}\mathfrak{L}X_{\cdot}\square\triangle^{\cdot} \rightarrow \hspace*{.7em} \mathclap{\int^{\triangle_{+}}}\hspace*{.5em}X_{\cdot}\square \triangle^{\cdot}.\] \end{proof} \noindent \textbf{Example $1$:} Let $X_{\cdot}$ be a simplicial object in $\mathcal{M}$. Regarding it as a semi-simplicial object by the inclusion $\triangle_{+}\hookrightarrow \triangle$, we denote its left Kan extension by $X_{\cdot}^{\operatorname{fat}}$, and there is a canonical projection $X_{\cdot}^{\operatorname{fat}}\rightarrow X_{\cdot}$ given by the assignment \[\coprod_{\mathclap{[n]\overset{u}{\twoheadrightarrow} [m]}}X_{m}\xrightarrow{\coprod\limits_{\tiny u}u^{\ast}} \coprod_{n}X_{n}.\] \noindent \textbf{Example $2$:} Given an object $X_{\cdot}$ in $s\mathcal{M}$, we denote the left Kan extension of the semi-simplicial object $X_{\cdot}\times S_{\cdot}$ \eqref{tomDieckobservation} by $X_{\cdot}^{\mathbb{N}}$, whose $n$-th component $X_{n}^{\mathbb{N}}$ can be described as follows: \[\coprod_{\mathclap{[n]\twoheadrightarrow [k]\rightarrowtail \mathbb{N}}}X_{k}.\] There is a canonical projection from $X_{\cdot}^{\mathbb{N}}\rightarrow X_{\cdot}^{\operatorname{fat}}$ given by \[\coprod_{\mathclap{[n]\twoheadrightarrow [k]\rightarrowtail \mathbb{N}}}X_{k}\xrightarrow{\coprod\operatorname{id}} \coprod_{\mathclap{[n]\twoheadrightarrow [k]}}X_{k}\] The constructions $(-)^{\mathbb{N}}$ and $(-)^{\operatorname{fat}}$ can be viewed as functors from $s\mathcal{M}$ to itself, and they generalize Segal's unraveling construction and the fat construction defined in the introduction; namely, the following diagrams are commutative \begin{center} \begin{equation}\label{unravellingconstr} \begin{tikzpicture}[baseline=(current bounding box.center)] \node(Lu) at (0,2) {$\operatorname{Cat}^{\mathcal{M}}$}; \node(Ll) at (0,0) {$s\mathcal{M}$}; \node(Ru) at (2,2) {$\operatorname{Cat}^{\mathcal{M}}$}; \node(Rl) at (2,0) {$s\mathcal{M}$}; \path[->, font=\scriptsize,>=angle 90] (Lu) edge node [above]{$(-)^{\mathbb{N}}$}(Ru) (Lu) edge node [right]{$\operatorname{Ner}_{\cdot}$}(Ll) (Ll) edge node [above]{$(-)^{\mathbb{N}}$}(Rl) (Ru) edge node [right]{$\operatorname{Ner}_{\cdot}$}(Rl); \node(Luf) at (4,2) {$\operatorname{Cat}^{\mathcal{M}}$}; \node(Llf) at (4,0) {$s\mathcal{M}$}; \node(Ruf) at (6,2) {$\operatorname{Cat}^{\mathcal{M}}$}; \node(Rlf) at (6,0) {$s\mathcal{M}$}; \path[->, font=\scriptsize,>=angle 90] (Luf) edge node [above]{$(-)^{\operatorname{fat}}$}(Ruf) (Luf) edge node [right]{$\operatorname{Ner}_{\cdot}$}(Llf) (Llf) edge node [above]{$(-)^{\operatorname{fat}}$}(Rlf) (Ruf) edge node [right]{$\operatorname{Ner}_{\cdot}$}(Rlf); \end{tikzpicture} \end{equation} \end{center} where $\operatorname{Cat}^{\mathcal{M}}$ is the category of internal categories in $\mathcal{M}$. \begin{lemma}\label{lemmaofhtyequivalences} The canonical maps of simplicial sets \[\triangle^{n,\mathbb{N}}_{\cdot}\xrightarrow{\pi}\triangle^{n,\operatorname{fat}}_{\cdot}\xrightarrow{q}\triangle^{n}_{\cdot}\xleftarrow{\mathfrak{l}} \triangle^{simp}_{\cdot}\] induce homotopy equivalences \[\triangle^{n,\mathbb{N}}\xrightarrow{\pi} \triangle^{n,\operatorname{fat}}\xrightarrow{q} \triangle^{n}\xleftarrow{\mathfrak{l}} \triangle^{simp} \] after geometric realization. \end{lemma} \begin{proof} Since the standard $n$-simplex $\triangle^{n}_{\cdot}$ is the nerve of the linearly ordered set $[n]:=\{0\leq 1...\leq n\}$, it suffices to show that the following functors \[[n]^{\mathbb{N}}\xrightarrow{\pi}[n]^{\operatorname{fat}}\xrightarrow{q}[n]\xleftarrow{\mathfrak{l}}[n]^{\operatorname{simp}}\] induce homotopy equivalences, where $[n]^{\operatorname{simp}}$ is the over category $\triangle\downarrow [n]$. It is clear that $[n]^{\operatorname{simp}}$ have the terminal object $n = n$, and the object $n$ is the terminal object in $[n]^{\operatorname{fat}}$ and $[n]$. Hence, if we can show that the composition $q\circ \pi:[n]^{\mathbb{N}}\rightarrow [n]$ induces a homotopy equivalence, then the lemma follows. To see this, we define an intermediate category $[n]^{\mathbb{N},\prime}$ of $[n]^{\mathbb{N}}$ which consists of objects $(k,l)$ with $k\leq l$ and observe that $q\circ\pi$ can be decomposed into two natural projections \begin{align*} \pi_{1}:[n]^{\mathbb{N}}&\rightarrow [n]^{\mathbb{N},\prime},\\ (k,l)&\mapsto (k,k)& k\geq l,\\ (k,l)&\mapsto (k,l)& k\leq l;\\ \pi_{2}:[n]^{\mathbb{N},\prime}&\rightarrow [n]\\ (k,l)&\mapsto k. \end{align*} It is clear that the slice category $\pi_{1}\downarrow (k,l)$ has a terminal object $(k,l)$ and the slice category $\pi_{2}\downarrow k$ has an initial object $(0,0)$. By Quillen's theorem $A$, the functors $\pi_{1}$, $\pi_{2}$ induce homotopy equivalences, and hence the proof is complete. \end{proof} \section{An associative law} \begin{lemma}\label{associativitylemma} There are canonical isomorphisms \begin{align*} \mathclap{\int^{\triangle}}X_{\cdot} \square \triangle^{\cdot,\mathbb{N}} &\xrightarrow{\cong}\hspace*{.5em}\mathclap{\int^{\triangle}}X^{\mathbb{N}}_{\cdot} \square \triangle^{\cdot} \\ \mathclap{\int^{\triangle}} X_{\cdot}\square \triangle^{\cdot,\operatorname{fat}} &\xrightarrow{\cong}\hspace*{.5em}\mathclap{\int^{\triangle}} X_{\cdot}^{\operatorname{fat}}\square\triangle^{\cdot}\\ \mathclap{\int^{\triangle}}X_{\cdot} \square \triangle^{\cdot,\operatorname{simp}} &\xrightarrow{\cong}\hspace*{.5em}\mathclap{\int^{\triangle}} X_{\cdot}^{\operatorname{simp}} \square \triangle^{\cdot} \end{align*} \end{lemma} In the case of simplicial spaces, the last two isomorphisms has been implicitly used in \cite[p.309]{Se3} and \cite[p.359]{Wa3}, and a detailed explanation of the second isomorphism using the universal property of Kan extension is given in \cite[Lemma $1$]{tD}. Here, we present a unified approach to such isomorphisms, viewing them as a consequence of an associative law in infinite dimensional linear algebra. \begin{proof} Firstly, we observe that the assignments \begin{align*} X_{n} \square \triangle^{n,\mathbb{N}}_{k} =\coprod_{\mathclap{\substack{[r]\xrightarrow{u} [n]\\ [k]\twoheadrightarrow [r]\hookrightarrow \mathbb{N}}}}X_{n}&\xrightarrow{\coprod u^{\ast}} \coprod_{\mathclap{[k]\twoheadrightarrow [r]\hookrightarrow \mathbb{N}}}X_{r}=X^{\mathbb{N}}_{k},\\ X_{n} \square \triangle^{n,\operatorname{fat}}_{k} = \coprod_{\mathclap{\substack{[r]\xrightarrow{u} [n] \\ [k]\twoheadrightarrow [r]}}}X_{n}&\xrightarrow{\coprod u^{\ast}} \coprod_{\mathclap{[k]\twoheadrightarrow [r]}}X_{r}=X^{\operatorname{fat}}_{k},\\ X_{n} \square \triangle^{n,\operatorname{simp}}_{k} =\coprod_{\mathclap{\substack{\tiny [r_{0}]\rightarrow...\\ \rightarrow [r_{k}]\xrightarrow{u} [n]}}} X_{n}&\xrightarrow{\coprod u^{\ast}} \coprod_{\mathclap{\tiny [r_{0}]\rightarrow... \rightarrow [r_{k}]}}X_{r_{k}}=: X^{\operatorname{simp}}_{k} \end{align*} induce the isomorphisms, whose inverses are given by the obvious inclusions, \begin{align} \mathclap{\int^{\triangle}}X_{\cdot} \square \triangle^{\cdot,\mathbb{N}}_{\cdot} &\xrightarrow{\cong} X^{\mathbb{N}}_{\cdot}\nonumber\\ \mathclap{\int^{\triangle}} X_{\cdot}\square \triangle^{\cdot,\operatorname{fat}}_{\cdot}&\xrightarrow{\cong} X_{\cdot}^{\operatorname{fat}}\label{eq:pf:associativity:Nconstr}\\ \mathclap{\int^{\triangle}} X_{\cdot} \square \triangle^{\cdot,\operatorname{simp}}_{\cdot} &\xrightarrow{\cong} X_{\cdot}^{\operatorname{simp}}.\nonumber \end{align} Then, as different ways of computing colimits yield the same result, there is an isomorphism \begin{equation}\label{eq:associativity:colimit} \mathclap{\int^{\triangle}}\hspace*{.4em} X_{\cdot}\square(\hspace*{.7em}\mathclap{\int^{\triangle}}\hspace*{.5em}\triangle^{\cdot,-}_{\cdot}\square\triangle^{\cdot})\cong \hspace*{.5em}\mathclap{\int^{\triangle}}\hspace*{.5em} (\hspace*{.7em}\mathclap{\int^{\triangle}}\hspace*{.5em}X_{\cdot}\square \triangle^{\cdot,-}_{\cdot} )\square\triangle^{\cdot}, \end{equation} where $-$ can be $\mathbb{N}$, $\operatorname{fat}$ or $\operatorname{simp}$. By \eqref{eq:pf:associativity:Nconstr}, isomorphism \eqref{eq:associativity:colimit} gives the isomorphism \[\mathclap{\int^{\triangle}} X_{\cdot}\square \triangle^{\cdot,-}\cong \hspace*{.5em}\mathclap{\int^{\triangle}} X_{\cdot}^{-} \square \triangle^{\cdot}.\] \end{proof} \begin{remark} If we view $(\triangle^{n,\mathbb{N}}_{k})$, $(\triangle^{n,\operatorname{fat}}_{k})$, and $(\triangle^{n,\operatorname{simp}}_{k})$ as matrices and $(X_{n})$ and $(\triangle^{k})$ column vectors, then \eqref{eq:associativity:colimit} resembles an associative law in linear algebra. \end{remark} \section{Comparison theorems} This section discuss a generalized version of Segal's lemma \cite[Lemma $A.5$]{Se3} for a $\mathpzc{Top}$-enriched model category $\mathcal{M}$; an analogous version for simplicially enriched model categories can be found in \cite[Corollary $19.4.13$-$14$]{Hi2}. \begin{lemma}\label{theSegallemma} Let $f_{\cdot}:X_{\cdot}\rightarrow Y_{\cdot}$ and $g^{\cdot}:I^{\cdot}\rightarrow J^{\cdot}$ be level-weak equivalences between cofibrant objects in $s\mathcal{M}$ and $c\mathpzc{Top}$, respectively. Then the induced map between the associated coends \[\mathclap{\int^{\triangle}} f_{\cdot}\square g^{\cdot}\hspace*{-.1em}:\hspace*{1em}\mathclap{\int^{\triangle}} X_{\cdot}\square I^{\cdot} \rightarrow\hspace*{.5em} \mathclap{\int^{\triangle}} Y_{\cdot}\square J^{\cdot} \] is a weak equivalence in $\mathcal{M}$. \end{lemma} \begin{proof} The idea has been sketched in \cite[Appendix]{Se3} (see also \cite[p.43]{tD}, \cite[p.11]{Du1}, \cite[p.375]{GJ}). For the sake of completeness, we give a detailed proof here. Firstly, we claim that the latching object of any cofibrant object in $s\mathcal{M}$ (resp. $c\mathpzc{Top}$) is cofibrant. Following \cite[p.362-6]{GJ}, we consider the category $\mathcal{O}_{n}$ whose objects are surjective morphisms $[n]\twoheadrightarrow [m]$ with $n>m$ and morphisms from $[n]\twoheadrightarrow [m]$ to $[n]\twoheadrightarrow [m^{\prime}]$ are those morphisms $[m]\rightarrow [m^{\prime}]$ satisfying the commutative diagram \begin{center} \begin{tikzpicture} \node(Ll) at (0,0){$[m]$}; \node(Rl) at (2,0){$[m^{\prime}]$}; \node(Mu) at (1,1) {$[n]$}; \path[->, font=\scriptsize,>=angle 90] (Mu) edge (Rl) (Mu) edge (Ll) (Ll) edge (Rl); \end{tikzpicture} \end{center} The $n$-th latching object of a (co)simplicial object $X_{\cdot}$, denoted by $L_{n}X_{\cdot}$, is then given by the colimit \[\operatorname*{colim}\limits_{\tiny\mathclap{[n]\twoheadrightarrow [m] \in \mathcal{O}^{\operatorname{op}}_{n}}}X_{m}.\] Now, for each $1\leq k\leq n$, we can define the subcategories of $\mathcal{O}_{n}$ \begin{align*} \mathcal{M}_{n,k}&:=\{\phi:[n]\twoheadrightarrow [m]\mid \phi(k)\leq k\}\\ \mathcal{M}(k-1)&:=\{\phi:[n]\twoheadrightarrow [m]\mid \phi(k-1)=\phi(k)\}. \end{align*} Let $L_{n,k}X_{\cdot}$ be the colimit \[\operatorname*{colimt}\limits_{\tiny\mathclap{[n]\twoheadrightarrow [m]\in\mathcal{M}_{n,k}}}X_{m}.\] Then there is a filtration of $L_{n}X_{\cdot}$ given by \begin{equation}\label{pf:segallemma:filtration} X_{n-1}=L_{n,1}X_{\cdot}\subset L_{n,2}X_{\cdot}\subset...\subset L_{n,n}X_{\cdot}=L_{n}X_{\cdot} \end{equation} and a pushout diagram \begin{center} \begin{equation}\label{pf:segallemma:pushoutforlatching} \begin{tikzpicture}[baseline=(current bounding box.center)] \node(Lu) at (0,1.5) {$L_{n-1,k}X_{\cdot}$}; \node(Ll) at (0,0) {$L_{n,k}X_{\cdot}$}; \node(Ru) at (5,1.4) {$\operatorname*{colim}\limits_{\tiny\mathclap{[n]\twoheadrightarrow [m]\in\mathcal{M}(k)}}X_{m}=X_{n-1}$}; \node(Rl) at (4,0) {$L_{n,k+1}X_{\cdot}$}; \draw[->] (Lu) to node [above]{\scriptsize $s_{k}^{\ast}$}(3.3,1.5); \draw[->] (Lu) to node [right]{\scriptsize $s_{k}^{\ast}$}(Ll); \draw[->] (Ll) to (Rl); \draw[->] (4,1) to (Rl); \end{tikzpicture} \end{equation} \end{center} where $s_{k}:[n]\twoheadrightarrow [n-1]$ is the degeneracy map with $s_{k}(k)=s_{k}(k+1)$. By induction and pushout diagram \eqref{pf:segallemma:pushoutforlatching}, we see filtration \eqref{pf:segallemma:pushoutforlatching} is a sequence of cofibrations and the object $L_{n,k}X_{\cdot}$ is cofibrant, for every $n,k$. In particular, the objects $L^{n}I^{\cdot}$, $L^{n}J^{\cdot}$, $L_{n}X_{\cdot}$, and $L_{n}Y_{\cdot}$ are cofibrant, for every $n$. Now, since $\mathcal{M}$ is a $\mathpzc{Top}$-enriched category, given a cofibrant object $Z$ in $\mathcal{M}$ and a cofibrant object $A$ in $\mathpzc{Top}$, the functors below \begin{align*} A\square -:\mathcal{M}&\mapsto \mathcal{M}\\ -\square Z:\mathpzc{Top}&\mapsto \mathcal{M} \end{align*} preserve cofibrations and weak equivalences between cofibrant objects \cite[Lemmas $14.2.9$; $16.4.5$]{MP}. Thus, we have the following weak equivalences between two cospan of cofibrations \begin{equation}\label{pf:segallemma:pushout1} (X_{n}\square L^{n}I^{\cdot}\leftarrow L_{n}X_{\cdot}\square L^{n}I^{\cdot}\rightarrow L_{n}X_{\cdot}\square I^{n})\rightarrow (Y_{n}\square L^{n}J^{\cdot}\leftarrow L_{n}Y_{\cdot}\square L^{n}J^{\cdot}\rightarrow L_{n}Y_{\cdot}\square J^{n}). \end{equation} Since the subcategory of cofibrant objects in a model category is always proper, \eqref{pf:segallemma:pushout1} induces a weak equivalence between the pushouts \begin{equation}\label{pf:segallemma:weakeq} X_{n}\square L^{n}I^{\cdot}\cup_{L_{n}X_{\cdot}\square L^{n}I^{\cdot}}L_{n}X_{\cdot}\square I^{n}\rightarrow Y_{n}\square L^{n}J^{\cdot}\cup_{L_{n}Y_{\cdot}\square L^{n}J^{\cdot}}L_{n}Y_{\cdot}\square J^{n}. \end{equation} Now, recall that the $n$-skeleton object $(\operatorname{sk}_{n}Z_{\cdot})_{\cdot}$ of a simplicial object is defined by first truncating $Z_{\cdot}$ at the $n$-th degree, denoted by $\bar{Z}_{\cdot}$ and then freely throwing the degeneracies \cite[p.354]{GJ}, namely \[[m]\mapsto (\operatorname{sk}_{n}Z_{\cdot})_{m}:=\operatorname*{colim}\limits_{\tiny \mathclap{\substack{[m]\twoheadrightarrow [k]\\ k\leq n}}}\bar{Z}_{k}.\] By the definition, we have $\operatorname*{colim}\limits_{n}(\operatorname{sk}_{n}Z_{\cdot})_{\cdot}=Z_{\cdot}$. Since the $n$-skeleton \hspace*{.7em}$\mathclap{\int^{\triangle}}\hspace*{.5em}(\operatorname{sk}_{n}X_{\cdot})_{\cdot}\square I^{\cdot}$ (resp. \hspace*{.7em}$\mathclap{\int^{\triangle}}\hspace*{.5em}(\operatorname{sk}_{n}Y_{\cdot})_{\cdot}\square J^{\cdot}$) is the pushout of the cospan \[ \mathclap{\int^{\triangle}}\hspace*{.5em}(\operatorname{sk}_{n-1}X_{\cdot})_{\cdot}\square I^{\cdot}\leftarrow X_{n}\square L^{n}I^{\cdot}\cup_{L_{n}X_{\cdot}\square L^{n}I^{\cdot}}L_{n}X_{\cdot}\square I^{n}\rightarrow X_{n}\square I^{n}\] \[(\text{resp. }\hspace*{.7em}\mathclap{\int^{\triangle}}\hspace*{.5em}(\operatorname{sk}_{n-1}Y_{\cdot})_{\cdot}\square J^{\cdot}\leftarrow Y_{n}\square L^{n}J^{\cdot}\cup_{L_{n}Y_{\cdot}\square L^{n}J^{\cdot}}L_{n}Y_{\cdot}\square J^{n}\rightarrow Y_{n}\square J^{n}),\] and the second arrow in each span is a cofibration---$\mathcal{M}$ is $\mathfrak{Top}$-enriched \cite[Lemma $16.4.5$]{MP}, by induction, we get the weak equivalence \[\mathclap{\int^{\triangle}}\hspace*{.5em}\operatorname{sk}_{n}X_{\cdot}\square I^{\cdot}\rightarrow \hspace*{.5em}\mathclap{\int^{\triangle}}\hspace*{.5em}\operatorname{sk}_{n}Y_{\cdot}\square J^{\cdot},\] for every $n$. The theorem then follows from the fact that the functor \hspace*{.7em}$\mathclap{\int^{\triangle}}\hspace*{.5em}(-)\square I^{\cdot}$ (resp. \hspace*{.7em}$\mathclap{\int^{\triangle}}\hspace*{.5em}(-)\square J^{\cdot}$) is a left adjoint and commutes with colimits. \end{proof} As a corollary of Lemmas \ref{lemmaofhtyequivalences}, \ref{associativitylemma}, and \ref{theSegallemma}, we have the following theorem (compare with \cite[Theorem 18.7.4]{Hi2}, \cite[Proposition $1$]{Se3}, \cite[Proposition $1$]{tD}). \begin{theorem}\label{mainthm1} Given a cofibrant object $X_{\cdot}$ in $s\mathcal{M}$, the canonical maps \[ \mathclap{\int^{\triangle}}\hspace*{.5em}X_{\cdot}^{\mathbb{N}} \square\triangle^{\cdot}\xrightarrow{\pi}\hspace*{.5em}\mathclap{\int^{\triangle}}\hspace*{.5em}X_{\cdot}^{\operatorname{fat}}\square\triangle^{\cdot}\xrightarrow{q}\hspace*{.5em} \mathclap{\int^{\triangle}}\hspace*{.5em}X_{\cdot} \square\triangle^{\cdot}\xleftarrow{\mathfrak{l}} \hspace*{.5em} \mathclap{\int^{\triangle}}\hspace*{.5em} X_{\cdot}^{\operatorname{simp}}\square\triangle^{\cdot}\] are weak homotopy equivalences. \end{theorem} The following can be deduced from Theorem \ref{mainthm1}. \begin{theorem}\label{mainthm2} Let $X_{\cdot}$ be a level-wise cofibrant object in $s\mathcal{M}$. Then the objects below are weakly equivalent \[\mathclap{\int^{\triangle}}\hspace*{.5em}X_{\cdot}^{\mathbb{N}}\square\triangle^{\cdot}\xrightarrow[\simeq]{\pi}\hspace*{.5em}\mathclap{\int^{\triangle}}\hspace*{.5em}X_{\cdot}^{\operatorname{fat}}\square\triangle^{\cdot}\simeq \hspace*{.5em} \mathclap{\int^{\triangle}}\hspace*{.5em} X_{\cdot}^{\operatorname{simp}} \square\triangle^{\cdot}.\] \end{theorem} \begin{proof} Let $Y_{\cdot}$ be a cofibrant replacement of $X_{\cdot}$. Then the theorem ensues from the following commutative diagram of weak equivalences \begin{center} \begin{tikzpicture} \node(Lu) at (0,1.5) {$ \int^{\triangle}Y_{\cdot}^{\mathbb{N}} \square\triangle^{\cdot}$}; \node(Ll) at (0,0) {$ \int^{\triangle}X_{\cdot}^{\mathbb{N}}\square\triangle^{\cdot}$}; \node(Mu1) at (2.7,1.5) {$ \int^{\triangle}Y_{\cdot}^{\operatorname{fat}}\square\triangle^{\cdot}$}; \node(Ml1) at (2.7,0) {$ \int^{\triangle}X_{\cdot}^{\operatorname{fat}}\square\triangle^{\cdot}$}; \node(Mu2) at (5.5,1.5) {$ \int^{\triangle}Y_{\cdot}\square\triangle^{\cdot}$}; \node(Ru) at (9,1.5) {$ \int^{\triangle} Y_{\cdot}^{\operatorname{simp}} \square\triangle^{\cdot}$}; \node(Rl) at (9,0) {$\int^{\triangle} X_{\cdot}^{\operatorname{simp}} \square\triangle^{\cdot}$}; \path[->, font=\scriptsize,>=angle 90] (Ll) edge (Ml1) (Lu) edge (Mu1) (Mu1) edge (Mu2) (Lu) edge (Ll) (Mu1) edge (Ml1) (Ru) edge (Mu2) (Ru) edge (Rl); \end{tikzpicture} \end{center} The horizontal arrows above are weak equivalences by Theorem \ref{mainthm1}. The vertical arrows are weak equivalences by Lemma \ref{theSegallemma} because $Y^{\mathbb{N}}_{\cdot}\rightarrow X^{\mathbb{N}}_{\cdot}$ (resp. $Y^{\operatorname{fat}}_{\cdot}\rightarrow X^{\operatorname{fat}}_{\cdot}$ and $Y_{\cdot}^{\operatorname{simp}}\rightarrow X_{\cdot}^{\operatorname{simp}}$) is a level-wise weak equivalence between cofibrant objects in $s\mathcal{M}$---the assumption that $X_{\cdot}$ is level-wise cofibrant is used here. \end{proof} \begin{remark} The coend \hspace*{.7em}$\mathclap{\int^{\triangle}}\hspace*{.5em} X_{\cdot}^{\operatorname{simp}} \square\triangle^{\cdot}$ computes the homotopy colimit \cite[Part $I.4$]{Du1}, \cite[Chapter $18.1$]{Hi2} of the diagram $X_{\cdot}$, and hence, all these variants of geometric realization in Theorem \ref{mainthm2} compute the homotopy colimit of a level-wise cofibrant object in $s\mathcal{M}$ (compare with \cite[Section $17.4$]{Du1}). However, when viewing the homotopy colimit functor as the left Kan extension of the colimit functor, we do not use the Reedy model structure on $s\mathcal{M}$ but the projective one \cite[Section $11.6$]{Hi2}, \cite[Section $5.8$]{Du1}. \end{remark} \section{The homotopy inverse to $\pi$} Let $\operatorname{Sd}_{\cdot}\triangle^{n}$ be the semi-simplicial set given by \[\operatorname{Sd}_{k}\triangle^{n}:=\{[l_{0}]\rightarrowtail...\rightarrowtail [l_{k}]\rightarrowtail [n]\mid l_{i}<l_{i+1}\leq n \text{ for }i=0...k-1\}.\] Then the fat realization of $\operatorname{Sd}_{\cdot}\triangle^{n}$\hspace*{.7em} \[\mathclap{\int^{\triangle_{+}}}\operatorname{Sd}_{\cdot}\triangle^{n}\times \triangle^{\cdot}\] is the barycentric subdivision of $\triangle^{n}$ and canonically homeomorphic to $\triangle^{n}$. Now, consider the assignment below \begin{align*} \bar{\tau}^{n}:\coprod_{m,k}\triangle^{n}_{m}\times \operatorname{Sd}_{k}\triangle^{m}\times\triangle^{k}&\rightarrow \coprod_{k}\triangle^{n}_{k}\times S_{k}\times \triangle^{k}\\ (x,[l_{0}]\rightarrowtail [l_{1}]\rightarrowtail...\rightarrowtail [l_{k}]\rightarrowtail [m],t)&\mapsto (u^{\ast}x,l_{0} < l_{1} <...<l_{k} ,t), \end{align*} where $u:[k]\rightarrow [m]$ (the last vertex map) is defined by letting $u(i)$ be the image of $l_{i}$ under the composition \[[l_{i}]\rightarrowtail...\rightarrowtail [l_{k}]\rightarrowtail [m],\] and observe that the assignment descends to a map of coends \[\tau^{n}:\triangle^{n,\operatorname{fat}}=\hspace*{.7em}\mathclap{\int^{\triangle_{+}}}\hspace*{.5em}\triangle^{n}_{\cdot}\times\hspace*{.8em}\mathclap{\int^{\triangle_{+}}}\hspace*{.5em}\operatorname{Sd}_{\cdot}\triangle^{\cdot}\times\triangle^{\cdot}\rightarrow\hspace*{.7em}\mathclap{\int^{\triangle_{+}}}(\triangle^{n}_{\cdot}\times S_{\cdot})\times\triangle^{\cdot}=\triangle^{n,\mathbb{N}}, \] where the second identity follows from Lemma \ref{GentomDieckLemma}. Observe also that the composition \[\triangle^{n,\operatorname{fat}}\xrightarrow{\tau^{n}}\triangle^{n,\mathbb{N}}\xrightarrow{\pi}\triangle^{n,\operatorname{fat}}\] is induced by the standard map from the barycentric subdivision of a simplex to itself, and hence the linear homotopy gives a homotopy of cosimplicial spaces \[H:\triangle^{\cdot,\operatorname{fat}}\times I\rightarrow \triangle^{\cdot,\operatorname{fat}}\] between the identity and $\pi\circ\tau^{n}$. Now, let $X_{\cdot}$ be a simplicial object in $\mathcal{M}$. Then, by Lemma \ref{associativitylemma}, the cosimplicial map $\tau^{\cdot}$ induces a morphism \[\tau:\hspace*{.7em}\mathclap{\int^{\triangle}}\hspace{.5em}X_{\cdot}^{\operatorname{fat}}\square\triangle^{\cdot}=\hspace*{.7em}\mathclap{\int^{\triangle}}\hspace{.5em}X_{\cdot}\square\triangle^{\cdot,\operatorname{fat}}\rightarrow\hspace*{.7em}\mathclap{\int^{\triangle}}\hspace{.5em}X_{\cdot}\square\triangle^{\cdot,\mathbb{N}}=\hspace*{.7em}\mathclap{\int^{\triangle}}\hspace{.5em}X_{\cdot}^{\mathbb{N}}\square\triangle^{\cdot},\] and the homotopy $H$ shows that the morphism $\tau$ is the homotopy inverse to $\pi$ when $X_{\cdot}$ is level-wise cofibrant (Theorem \ref{mainthm2}). Furthermore, the morphism $\tau$ is functorial with respect to $X_{\cdot}$ as we have the commutative diagram below, for any morphism $X_{\cdot}\rightarrow Y_{\cdot}$ in $s\mathcal{M}$, \begin{center} \begin{tikzpicture} \node(Lu) at (0,1.5) {$ \int^{\triangle}X_{\cdot}\square\triangle^{\cdot,\operatorname{fat}}$}; \node(Ll) at (0,0) {$ \int^{\triangle} Y_{\cdot}\square\triangle^{\cdot,\operatorname{fat}}$}; \node(Ru) at (5,1.5) {$ \int^{\triangle}X_{\cdot}\square\triangle^{\cdot,\mathbb{N}}$}; \node(Rl) at (5,0) {$ \int^{\triangle}Y_{\cdot}\square\triangle^{\cdot,\mathbb{N}}$}; \path[->, font=\scriptsize,>=angle 90] (Lu) edge node [above] {$\tau$}(Ru) (Lu) edge (Ll) (Ll) edge node [above] {$\tau$}(Rl) (Ru) edge (Rl); \end{tikzpicture} \end{center} Hence, we have proved the following theorem. \begin{theorem} Given a simplicial object $X_{\cdot}$ in $\mathcal{M}$, there is a well-defined morphism \[\tau:\hspace*{.7em}\mathclap{\int^{\triangle}}\hspace{.5em}X_{\cdot}^{\operatorname{fat}}\square\triangle^{\cdot}\rightarrow\hspace*{.7em}\mathclap{\int^{\triangle}}\hspace{.5em}X_{\cdot}^{\mathbb{N}}\square\triangle^{\cdot}\] such that the composition $\pi\circ\tau$ is left homotopy to $\operatorname{id}$ and $\tau$ is functorial with respect to $X_{\cdot}$. If, in addition, $X_{\cdot}$ is level-wise cofibrant, then the morphism $\tau$ is a homotopy inverse to the morphism $\pi$. \end{theorem} \begin{remark}\label{themaprho} To define a homotopy inverse to the map $\pi$ in the case of simplicial spaces, \cite[p.45-7]{tD} considers the following assignment \begin{align}\label{tomDieckmap} X_{n}\times \triangle^{n}&\rightarrow X_{n}\times S_{n}\times \triangle^{n}\\ (y;t_{0},...,t_{n})&\mapsto (y,1<...<n;s_{1,n}(t_{0},...,t_{n}),...,s_{n,n}(t_{0},...,t_{n}))\nonumber, \end{align} where \[s_{j,n}(t_{0},...,t_{n}):=(j+1)\sum_{E}\max(0,\min_{j\in E}t_{j}-\max_{j\not\in E}t_{j})\] and $E$ runs through all subsets of $[n]$ with $j+1$ elements---the map $s_{j,n}$ is a kind of folding map sending all the $n$-simplices in the barycentric subdivision of an $n$-simplex to the $n$-simplex. However, the assignment does not respect face maps---the first and second components in assignment \eqref{tomDieckmap} should depend on $(t_{0},...,t_{n})$, the coordinate of points in $\triangle^{n}$. The map $\rho$ is used in cohomology theories of spaces with two topologies \cite[Theorem $7.1$]{Mo1}. \end{remark} \addcontentsline{toc}{section}{\hspace*{2.2em} References} \end{document}
\begin{document} \mathfrak{m} aketitle \begin{abstract} Let $D$ be a weighted oriented graph and $I(D)$ be its edge ideal. If $D$ contains an induced odd cycle of length $2n+1$, under certain condition, we show that $ {I(D)}^{(n+1)} \mathfrak{m} athfrak{n} eq {I(D)}^{n+1}$. We give necessary and sufficient condition for the equality of ordinary and symbolic powers of edge ideal of weighted oriented graph having each edge in some induced odd cycle of it. We characterize the weighted naturally oriented unicyclic graphs with unique odd cycles and weighted naturally oriented even cycles for the equality of ordinary and symbolic powers of their edge ideals. Let $ D^{\mathcal{P}rime} $ be the weighted oriented graph obtained from $D$ after replacing the weights of vertices with non-trivial weights which are sinks, by trivial weights. We show that the symbolic powers of $I(D)$ and $I(D^{\mathcal{P}rime})$ behave in a similar way. Finally, if $D$ is any weighted oriented star graph, we prove that $ {I(D)}^{(s)} = {I(D)}^s $ for all $s \geq 2.$ \mathfrak{m} athfrak{n} oindent Keywords: Weighted oriented graph, sink vertex, edge ideal, symbolic power, induced odd cycle, even cycle, star graph. \end{abstract} \section{Introduction} Let $k$ be a field and $R=k[x_1,\ldots ,x_n]$ be a polynomial ring in $n$ variables. Let $I$ be a homogeneous ideal of $R$. Then for $s\geq 1$, the $s$-th symbolic power of $I$ is defined as $I^{(s)}=\displaystyle{\bigcap_{P\in \mathcal{A}ss I}(I^sR_P\cap R)}$. Geometrically, the symbolic powers are important since they capture, all the polynomials that vanish with a given multiplicity. We refer \cite{huneke} to the reader to analyse the background results of symbolic powers of ideals. By definition it is clear that $I^s\subseteq I^{(s)}$ for all $s\geq 1$ but the reverse containment may fail. It is always an interesting problem to find the necessary and sufficient condition for holding the reverse containment. There is no such criteria to be known when the equality $I^s = I^{(s)}$ holds for any arbitrary ideal. But for certain classes of ideals such as prime or radical ideals, there are equivalent conditions given by Hochster in \cite{hochster} and by Li and Swanson in \cite{li}. In this paper we compare the ordinary and symbolic powers of edge ideals of weighted oriented graphs. \vspace*{2mm}pace*{0.1cm}\\ Let $D = (V (D), E(D), w)$ be a weighted oriented graph with the vertex set $V(D)=\{x_1,\ldots,x_n\},$ the edge set $E(D)$ consists of ordered pairs of the form $(x_i,x_j)$ which represents a directed edge from the vertex $x_i$ to the vertex $x_j$ and the weight function $ w : V (D) \longrightarrow \mathfrak{m} athbb N$. The weight of a vertex $x_i\in V(D)$ is $w(x_i)$ denoted by $w_i$ or $ w_{x_i}.$ If a vertex $x_i$ of $D$ is a source (i.e., has only arrows leaving $x_i$), we set $w_i = 1$. The edge ideal of $D$ is denoted by $I(D)$ and is defined as $I(D)=(x_ix_j^{w_j}|(x_i,x_j)\in E(D)).$ Let $G=(V(G),E(G))$ be the underlying graph of $ D $ whose vertex set is $V(G) = V(D) $ and edge set is $E(G)= \{ \{x_i, x_j \}~|~(x_i, x_j) \in E(D) \} .$ The edge ideal of $ G $ is $I(G) = ( x_ix_j ~|~ \{x_i, x_j\} \in E(G) ).$ \vspace*{2mm}pace*{0.1cm}\\ In general, even for monomial ideals comparison of ordinary and symbolic powers is a difficult problem. For simple graphs, by \cite[Theorem 5.9]{simis}, we know that all the ordinary and symbolic powers of edge ideal coincide if and only if the graph is bipartite. But there is no such result for weighted oriented graphs. As the edge ideal of weighted oriented graph depends upon both the orientation of edges and weights on vertices, it is actually difficult to get the necessary and sufficient condition, even for a particular class of weighted oriented graphs. Recently in \cite{kanoy}, the authors characterize the weighted oriented complete graphs and weighted oriented complete bipartite graphs for the equality of all the ordinary and symbolic powers of their edge ideals. If a simple graph $G$ contains an induced odd cycle of length $2n+1$, we know that $ {I(G)}^{(n+1)} \mathfrak{m} athfrak{n} eq {I(G)}^{n+1} $. We prove that the same is true for weighted oriented graphs, if $D$ contains an induced odd cycle $ D^{\mathcal{P}rime} $ of length $2n+1$ with the condition `` $ V (D^{\mathcal{P}rime}) \setminus N_D^+(V^{+}(D))$ contains one vertex which is not source in $D^{\mathcal{P}rime}$, otherwise, it contains a vertex which is source in $D^{\mathcal{P}rime}$ with trivial weight in $D$", then $ {I(D)}^{(n+1)} \mathfrak{m} athfrak{n} eq {I(D)}^{n+1}$ (see Proposition \ref{oddcycle}). As one of its application, we characterize the weighted oriented graph having each edge in some induced odd cycle of it, for the equality of ordinary and symbolic powers of its edge ideal in Theorem \ref{oddcycle2}. Also we characterize the weighted naturally oriented unicyclic graphs with unique odd cycles and weighted naturally oriented even cycles for the equality of ordinary and symbolic powers of their edge ideals (see Theorem \ref{unicyclic}, Corollary \ref{evencycle1} and Proposition \ref{evencycle2}, respectively). \vspace*{2mm}pace*{0.1cm}\\ Let $ D^{\mathcal{P}rime} $ be the weighted oriented graph obtained from $D$ after replacing the weights of vertices with non-trivial weights which are sinks, by trivial weights. If we assume all vertices of $D$ with non-trivial weights are sinks, then $ D^{\mathcal{P}rime} = G $ and in \cite{mandal1}, we proved that the symbolic powers of $I(D)$ and $I(G)$ behave in a similar way. In this paper, even if we do not assume all vertices of $D$ with non-trivial weights are sinks, we show that the symbolic powers of $I(D)$ and $I(D^{\mathcal{P}rime})$ behave in a similar way (see Theorem \ref{sym.theorem.1}). We prove that the symbolic defects of edge ideals of $D$ and $D^{\mathcal{P}rime}$ are same in Proposition \ref{sdefect} and so we have ${{I(D^{\mathcal{P}rime})}}^{(s)} = {I(D^{\mathcal{P}rime})}^s $ if and only if $ {I(D)}^{(s)} = {I(D)}^s $ for each $s \geq 1.$ As an instant application of this result, we characterize the weighted oriented even cycles of length $ 4 $ which are not naturally oriented for the equality of all the ordinary and symbolic powers of their edge ideals (see Proposition \ref{evencycle4}). Finally in Theorem \ref{stargraph}, we prove the equality of ordinary and symbolic powers of edge ideal of any weighted oriented star graph. \section{Preliminaries} In this section, we recall some definitions and results for the weighted oriented graphs. \begin{definition} A vertex cover $ C $ of $ D $ is a subset of $ V(D) $ such that if $ (x, y) \in E(D) ,$ then $ x \in C $ or $ y \in C . $ A vertex cover $ C $ of $ D $ is minimal if each proper subset of $ C $ is not a vertex cover of $ D. $ We set $(C)$ to be the ideal generated by the vertices of $C.$ \end{definition} \begin{definition} Let $ x $ be a vertex of a weighted oriented graph $ D, $ then the sets $ N_D^+ (x) = \{y ~|~ (x, y) \in E(D)\} $ and $ N_D^- (x) = \{y ~|~ (y, x) \in E(D)\} $ are called the out-neighbourhood and the in-neighbourhood of $ x ,$ respectively. Moreover, the neighbourhood of $ x $ is the set $ N_D(x) = N_D^+ (x)\cup N_D^- (x) .$ For a subset of the vertices $ W \subseteq V (D), $ we define $ N_D^+(W) $, $ N_D^-(W) $ and $ N_D(W) $ similarly. For $T \subset V(D) ,$ we define the induced subgraph $\mathfrak{m} athcal{D} = (V( \mathfrak{m} athcal{D}), E(\mathfrak{m} athcal{D}), w)$ of $D$ on $T$ to be the weighted oriented graph such that $V (\mathfrak{m} athcal{D}) = T$ and for any $ u, v \in V (\mathfrak{m} athcal{D}), $ $ (u,v) \in E(\mathfrak{m} athcal{\mathfrak{m} athcal{D}})$ if and only if $(u,v) \in E(D)$. Here $ \mathfrak{m} athcal{D} = (V (\mathfrak{m} athcal{D}), E(\mathfrak{m} athcal{D}), w) $ is a weighted oriented graph with the same orientation as in $D$ and for any $ u \in V (\mathfrak{m} athcal{D}), $ if $ u $ is not a source in $ \mathfrak{m} athcal{D}, $ then its weight equals to the weight of $ u $ in $D,$ otherwise, its weight in $ \mathfrak{m} athcal{D} $ is $ 1. $ For a subset $W \subset V(D) $ of the vertices in $ D, $ define $ D \setminus W $ to be the induced subgraph of $ D $ with the vertices in $ W $ (and their incident edges) deleted. Define $\deg_D(x) = |N_D(x)|$ for $ x \in V(D) $. A vertex $ x \in V(D) $ is called a source vertex if $N_D (x)= N_D^+ (x) .$ A vertex $ x \in V(D) $ is called a sink vertex if $N_D (x)= N_D^- (x) .$ We set $V^+(D)$ as the set of vertices of $D$ with non-trivial weights. \end{definition} \begin{definition}\cite[Definition 4]{pitones} Let $ C $ be a vertex cover of a weighted oriented graph $ D.$ We define \vspace*{2mm}pace*{0.2cm}\\ \operatorname{ht}space*{3cm}$ L_1^D(C) = \{x \in C ~|~ N_D^+ (x) \cap C^c \mathfrak{m} athfrak{n} eq \mathcal{P}hi \}, $ \vspace*{2mm}pace*{0.2cm}\\ \operatorname{ht}space*{2.85cm} $L_2^D(C) = \{x \in C ~|~x\mathfrak{m} athfrak{n} otin L_1^D(C) ~\mathfrak{m} box{and}~ N_D^-(x) \cap C^c \mathfrak{m} athfrak{n} eq \mathcal{P}hi \}$ and \vspace*{2mm}pace*{0.2cm}\\ \operatorname{ht}space*{2.8cm} $ L_3^D(C) = C \setminus (L_1^D(C) \cup L_2^D(C))$ \vspace*{2mm}pace*{0.2cm}\\ where $ C^c $ is the complement of $ C ,$ i.e., $ C^c = V(D) \setminus C. $ \end{definition} \begin{lemma}\cite[Proposition 6]{pitones}\label{s.v.0} Let $ C $ be a vertex cover of $ D. $ Then $ L_3^D(C) =\mathcal{P}hi $ if and only if $ C $ is a minimal vertex cover of $ D .$ \end{lemma} \begin{lemma}\cite[Proposition 5]{pitones}\label{L3} If $ C $ is a vertex cover of $ D, $ then $ L_3^D(C) =\{x\in C~|~N_D(x) \subset C\}. $ \end{lemma} \begin{definition}\cite[Definition 7]{pitones} A vertex cover $ C $ of $ D $ is strong if for each $ x \in L_3^D(C) $ there is $ (y, x) \in E(D) $ such that $ y \in L_2^D(C) \cup L_3^D(C)$ with $ y \in V^+(D)$ (i.e., $ w(y) \mathfrak{m} athfrak{n} eq 1 $). \end{definition} \begin{remark}\cite[Remark 8, Proposition 5]{pitones}\label{s.v.1} A vertex cover $ C $ of $D$ is strong if and only if for each $ x \in L_3^D(C),$ we have $N_D^{-}(x) \cap V^+(D) \cap [C\setminus {L_1^D{(C)}} ] \mathfrak{m} athfrak{n} eq \mathcal{P}hi.$ \end{remark} \begin{lemma}\cite[Corollary 9.]{pitones}\label{minimal to strong} If $ C $ is a minimal vertex cover of $ D, $ then $ C $ is strong. \end{lemma} \begin{definition} A strong vertex cover $C$ of $D$ is said to be a maximal strong vertex cover of $D$ if it is not contained in any other strong vertex cover of $D.$ \end{definition} \begin{definition}\cite[Definition 32]{pitones} A weighted oriented graph $ D $ has the minimal-strong property if each strong vertex cover is a minimal vertex cover. \end{definition} \begin{lemma}\cite[Lemma 47]{pitones}\label{s.v.4} If the vertices of $ V^+(D) $ are sinks, then $ D $ has the minimal-strong property. \end{lemma} \begin{definition}\cite[ Definition 19]{pitones}\label{definition19} Let $ C $ be a vertex cover of $ D. $ The irreducible ideal associated to $ C $ is the ideal $ I_C =:(L_1^D(C)\cup \{x_j^{w(x_j)} |x_j \in L_2^D(C) \cup L_3^D(C) \}).$ \end{definition} \begin{lemma}\cite[Lemma 20]{pitones}\label{edge} Let $ D $ be a weighted oriented graph. Then $I(D) \subseteq I_C$, for each vertex cover $ C $ of $ D $. \end{lemma} The next lemma describes the irreducible decomposition of the edge ideal of a weighted oriented graph $ D $. \begin{lemma}\cite[Theorem 25, Remark 26]{pitones}\label{s.v.2} Let $ D $ be a weighted oriented graph and $C_1,\ldots, C_s$ are the strong vertex covers of $ D ,$ then the irredundant irreducible decomposition of $ I(D) $ is $$I(D) = I_{C_1} \cap\operatorname{codim}ots\cap I_{C_s} $$ where each $ I_{C_i} = ( L_1^D(C_i) \cup \{x_j^{w(x_j)}~|~x_j \in L_2^D(C_i) \cup L_3^D(C_i)\} ) ,$ $ \operatorname{rad}(I_{C_i})=P_i = (C_i)$. \end{lemma} \begin{corollary}\cite[Remark 26]{pitones}\label{s.v.3} Let $ D $ be a weighted oriented graph. Then $ P $ is an associated prime of $ I(D) $ if and only if $ P = (C) $ for some strong vertex cover $ C $ of $ D. $ \end{corollary} Let $ I \subset R$ and $ I = Q_1\cap \operatorname{codim}ots \cap Q_m $ be the primary decomposition of ideal $I$. For $ P \in \mathcal{A}ss(R/I), $ we denote $ Q_{\subseteq P} $ to be the intersection of all $ Q_i $ with $ \sqrt{Q_i} \subseteq P. $ If $C$ is a strong vertex cover of a weighted oriented graph $ D $, then $(C) \in \mathcal{A}ss(R/I(D))$. We denote $ I_{\subseteq {C}} $ as $ I_{\subseteq {(C)}} $. In the following lemma, we write the \cite[Theorem 3.7]{cooper} for edge ideals of weighted oriented graphs. \begin{lemma}\cite[Theorem 3.7]{cooper}\label{cooper} Let $I$ be the edge ideal of a weighted oriented graph $ D $ and $C_1,\ldots,C_r$ are the maximal strong vertex covers of $D.$ Then $$I^{(s)}=(I_{\subseteq {C_1}})^s \cap\operatorname{codim}ots\cap (I_{\subseteq {C_r}})^s.$$ \end{lemma} \begin{lemma}\cite[Lemma 3.1]{mandal1}\label{mandal} Let $ D $ be a weighted oriented graph. If $ V(D) $ is a strong vertex cover of $ D ,$ then $ I(D)^{(s)}=I(D)^s $ for all $ s \geq 2.$ \end{lemma} \begin{lemma}\cite{kanoy}\label{kanoy} Let $ D $ be a weighted oriented graph. Then $ V (D) $ is a strong vertex cover if and only if $ N^+_D (V^+(D)) = V (D). $ \end{lemma} \begin{lemma}\cite[Corollary 3.8]{mandal1}\label{DG} Let $ D $ be a weighted oriented bipartite graph where the vertices of $V^{+}(D)$ are sinks. Then $ {{I(D)}^{(s)}} = {{I(D)}^{s}} $ for all $ s \geq 2. $ \end{lemma} \begin{definition} Let $ I \subset R $ be a monomial ideal. Let $ \mathfrak{m} athcal{G}(I) $ be the set of minimal generators of the ideal $ I $. Let $ J $ be the ideal we need to add to $ I^s $ to achieve $ I^{(s)}, $ i.e., $ I^{(s)} = I^s + J $. We set $ \operatorname{sdefect}(I,s) $ is the number of elements of $ \mathfrak{m} athcal{G}(J) $. \end{definition} The following lemma based on the extension of ideals. \begin{lemma}\cite{atiyah}\label{atiyah} Let $ f: A \longrightarrow B $ be a ring homomorphism. Let $I$ be an ideal of $A.$ The extension $I^e$ of $I$ is the ideal $Bf(I)$ generated by $f(I)$ in $B$. If $I_1$ and $I_2$ are ideals of $ A $, then \begin{enumerate} \item[(a)] $(I_1 \cap I_2)^{e} \subseteq (I_1)^{e} \cap (I_2)^{e}$, \item[(b)] $(I_1 I_2)^{e} = (I_1)^{e} (I_2)^{e}$. \end{enumerate} \end{lemma} \begin{notation} Let $g \in k[x_1,\ldots,x_n]$ be a monomial. We define support of $g$ $= \{x_i:x_i \divides g\} $ and we denote it by $\operatorname{supp}(g).$ \end{notation} \section{Comparing ordinary and symbolic powers of weighted oriented graphs} In this section, we compare the ordinary and symbolic powers of edge ideals of weighted oriented graphs containing induced odd cycles and weighted naturally oriented even cycles. In \cite[Proposition 4.10]{huneke}, if a simple graph contains an induced odd cycle $ C_{2n+1} = (x_1,\ldots,x_{2n+1}) $, the authors have shown that the $(n+1)-$th ordinary and symbolic power of its edge ideal are different. In this paper we extend this result for weighted oriented graphs under certain condition. \begin{proposition}\label{oddcycle} Let $ D $ be a weighted oriented graph. Let $ D^{\mathcal{P}rime} $ be an induced odd cycle with underlying graph $ C_{2n+1} = (x_1,\ldots,x_{2n+1}) $ where $ V (C_{2n+1}) \mathfrak{m} athfrak{n} subseteq N_D^+(V^{+}(D))$ and it satisfies the condition `` $ V (C_{2n+1}) \setminus N_D^+(V^{+}(D))$ contains one vertex which is not source in $D^{\mathcal{P}rime}$, otherwise, it contains a vertex which is source in $D^{\mathcal{P}rime}$ with trivial weight in $D$". Then $ I(D)^{(n+1)} \mathfrak{m} athfrak{n} eq I(D)^{n+1}. $ \end{proposition} \begin{proof} Let $w_i = w(x_i)$ for $x_i \in V(C_{2n+1})$. Let $f={x_1}^{a_1}\operatorname{codim}ots{x_{2n+1}}^{a_{2n+1}}$ where each $a_i=w_i$ if $ N^-_{D^{\mathcal{P}rime}}(x_i) \mathfrak{m} athfrak{n} eq \mathcal{P}hi$ (i.e., $x_i$ is not source in $D^{\mathcal{P}rime}$) and $a_i=1$ if $ N^-_{D^{\mathcal{P}rime}}(x_i) = \mathcal{P}hi$ (i.e., $x_i$ is source in $D^{\mathcal{P}rime}$). We claim that $ f \in I(D)^{(n+1)} \setminus I(D)^{n+1}. $ We set $ m_{\{x_i,x_j\}} $ as the the minimal generator of $ I(D^{\mathcal{P}rime})$ corresponding to the edge $ \{x_i, x_j\} \in E(C_{2n+1}) $. Note that $ m_{\{x_i,x_j\}} $ can be $ x_ix_j^{w_j} $ or $ x_jx_i^{w_i} $ and $x_i^{a_i} x_j^{a_j}$ is multiple of $ m_{\{x_i,x_j\}} $. Let $ u \in V (C_{2n+1}) \setminus N_D^+(V^+(D))$ be that vertex which is not source in $D^{\mathcal{P}rime}$ and if $u$ is source in $D^{\mathcal{P}rime}$, then its weight is $1$ in $D$. Without loss of generality we can assume that $ u = x_1$. Here $N_{D^{\mathcal{P}rime}}(x_1)= \{x_2, x_{2n+1}\}.$ Let $ C $ be a maximal strong vertex cover of $ D $. Suppose $x_1 \mathfrak{m} athfrak{n} otin C$. Then for any strong vertex cover $C^{\mathcal{P}rime} \subseteq C,$ $x_1 \mathfrak{m} athfrak{n} otin C^{\mathcal{P}rime}$ and so $x_2$ and $x_{2n+1} \in C^{\mathcal{P}rime}$. If $(x_2,x_1) \in E(D^{\mathcal{P}rime}),$ then for each strong vertex cover $C^{\mathcal{P}rime} \subseteq C,$ $ x_1 \in N^+_{D}(x_2) \cap {C^{\mathcal{P}rime}}^c$ and hence $x_2 \in L_1^D(C^{\mathcal{P}rime})$. This implies $x_2 \in I_{\subseteq C}.$ If $(x_1,x_2) \in E(D^{\mathcal{P}rime}),$ then $x_2^{a_2} = x_2^{w_2} \in I_{\subseteq C}.$ In both cases $x_2^{a_2} \in I_{\subseteq C}.$ By the same argument we can show $x_{2n+1}^{a_{2n+1}} \in I_{\subseteq C}.$ By Lemma \ref{edge}, $m_{\{x_3,x_4\}},\ldots,m_{\{x_{2n-1},x_{2n}\}} \in I_{\subseteq C}$. So $x_{2}^{a_2}\operatorname{codim}ot x_{2n+1}^{a_{2n+1}}\operatorname{codim}ot m_{\{x_3,x_4\}}\operatorname{codim}ots m_{\{x_{2n-1},x_{2n}\}} \in ({I_{\subseteq C}})^{n+1}.$ Hence $f={x_1}^{a_1}\operatorname{codim}ots{x_{2n+1}}^{a_{2n+1}}\in ({I_{\subseteq C}})^{n+1}$. Suppose $x_1 \in C$. By Remark \ref{s.v.1}, we have $x_1 \mathfrak{m} athfrak{n} otin L^{D}_3(C)$ and by Lemma \ref{L3}, at least one element of $N_D(x_1)$ does not belong to $C$. Then for any strong vertex cover $C^{\mathcal{P}rime} \subseteq C,$ at least one element of $N_D(x_1)$ does not belong to $C^{\mathcal{P}rime}$ and so $x_1 \in C^{\mathcal{P}rime}$. If $x_1$ is not source in $D^{\mathcal{P}rime}$, $x_1^{a_1} = x_1^{w_1} \in I_{\subseteq C}.$ If $x_1$ is source in $D^{\mathcal{P}rime}$, by our assumption $w_1 = 1 $ in $D$ and hence $x_1 \in I_{\subseteq C}.$ In both cases $x_1^{a_1} \in I_{\subseteq C}.$ By Lemma \ref{edge}, $m_{\{x_2,x_3\}},\ldots,m_{\{x_{2n},x_{2n+1}\}} \in I_{\subseteq C}$. So $x_1^{a_1}\operatorname{codim}ot m_{\{x_2,x_3\}}\operatorname{codim}ots m_{\{x_{2n},x_{2n+1}\}} \in ({I_{\subseteq C}})^{n+1}.$ Hence $f={x_1}^{a_1}\operatorname{codim}ots{x_{2n+1}}^{a_{2n+1}}\in ({I_{\subseteq C}})^{n+1}$. Similarly for any maximal strong vertex cover $ C $ of $ D $, we can show $f={x_1}^{a_1}\operatorname{codim}ots{x_{2n+1}}^{a_{2n+1}}\in ({I_{\subseteq C}})^{n+1}$. Hence $f \in I(D)^{(n+1)}$. It remains to show that $f \mathfrak{m} athfrak{n} otin I(D)^{n+1}.$ Since $ \operatorname{supp}(f) = V(D^{\mathcal{P}rime})$ and $D^{\mathcal{P}rime}$ is an induced subgraph of $D$, it is enough to show $f \mathfrak{m} athfrak{n} otin I(D^{\mathcal{P}rime})^{n+1}.$ Here $|\operatorname{supp}(f)| = 2n+1.$ Thus if we want to express $f$ as a multiple of product of some $n+1$ minimal generators of $I(D^{\mathcal{P}rime}),$ then one $ x_i^{a_i} $ of $f$ must involve in two minimal generators of $I(D^{\mathcal{P}rime})$. But by definition of $a_i,$ any $ x_i^{a_i} $ of $f$ can not involve in two minimal generators of $I(D^{\mathcal{P}rime})$. Therefore $f={x_1}^{a_1}\operatorname{codim}ots{x_{2n+1}}^{a_{2n+1}} \mathfrak{m} athfrak{n} otin I(D^{\mathcal{P}rime})^{n+1}.$ Thus $f \in I(D)^{(n+1)} \setminus I(D)^{n+1}$. Hence the proof follows. \end{proof} \begin{remark} The above proposition may not be true if we remove the given condition. \begin{figure} \caption{A weighted oriented graph $D$ containing an induced odd cycle $D^{\mathcal{P} \label{fig.3} \end{figure} \operatorname{ht}space*{0.5cm}For example consider the weighted oriented graph $D$ as in Figure \ref{fig.3}. Then $I(D) = (x_1x_2,x_2x_3^3,x_1x_3,x_4x_1^3)$. Here $D$ contains an induced cycle $D^{\mathcal{P}rime}$ of length $3$ where $ x_2\in N_D^+(V^+(D)) $, $x_3 \in N_D^+(V^+(D))$ and $ x_1 \in V (D^{\mathcal{P}rime}) \setminus N_D^+(V^+(D))$. Note that $x_1$ is source in $D^{\mathcal{P}rime}$ but $w(x_1) \mathfrak{m} athfrak{n} eq 1$ in $D$. Using Macaulay $ 2 $, we see that $I(D)^{(2)} = I(D)^2$. \end{remark} Now we see some applications of Proposition \ref{oddcycle} to weighted oriented graphs containing induced odd cycles. \begin{theorem}\label{oddcycle2} Let $ D $ be a weighted oriented graph such that each edge of $ D $ lies in some induced odd cycle of it. Then $ V(D) $ is a strong vertex cover of $ D $ if and only if $ {I(D)}^{(s)} = {I(D)}^s $ for all $ s \geq 2. $ \end{theorem} \begin{proof} If $ V(D) $ is a strong vertex cover of $ D $, then by Lemma \ref{mandal}, $ {I(D)}^{(s)} = {I(D)}^s $ for all $ s \geq 2. $ Assume that $ {I(D)}^{(s)} = {I(D)}^s $ for all $ s \geq 2. $ Suppose $ V (D) $ is not a strong vertex cover. By Lemma \ref{kanoy}, we have $ N^+_D (V^+(D)) \mathfrak{m} athfrak{n} eq V (D). $ Let $u \in V(D) \setminus N^+_D (V^+(D)).$ \textbf{Case (1)} Suppose $u$ is not source in $D$. Since $u$ is not source in $D$, there exists some induced odd cycle $D^{\mathcal{P}rime}$ of $D$ such that $u $ is not source in $D^{\mathcal{P}rime}$. Here $u \in V(D^{\mathcal{P}rime}) \setminus N^+_D (V^+(D))$. If $|V(D^{\mathcal{P}rime})| = 2m+1$ for some $m$, then by Proposition \ref{oddcycle}, we get $ I(D)^{(m+1)} \mathfrak{m} athfrak{n} eq I(D)^{m+1},$ which is a contradiction. \textbf{Case (2)} Suppose $u$ is source in $D$. Then consider any induced odd cycle $D^{\mathcal{P}rime\mathcal{P}rime}$ of $D$ containing the vertex $u$. Here $u$ is source in $D^{\mathcal{P}rime\mathcal{P}rime}$ and $w(u)=1$ in $D$. Here $u \in V(D^{\mathcal{P}rime\mathcal{P}rime}) \setminus N^+_D (V^+(D))$. If $|V(D^{\mathcal{P}rime\mathcal{P}rime})| = 2k+1$ for some $k$, then by Proposition \ref{oddcycle}, we get $ I(D)^{(k+1)} \mathfrak{m} athfrak{n} eq I(D)^{k+1},$ which is a contradiction. \end{proof} The following result is an immediate consequence of the above result. \begin{corollary}\label{oddcycle3} Let $ D $ be a weighted oriented odd cycle. Then $ V(D) $ is a strong vertex cover of $ D $ if and only if $ {I(D)}^{(s)} = {I(D)}^s $ for all $ s \geq 2. $ \end{corollary} \begin{corollary}\label{clique.oddcycle.completegraph} Let $ D $ be a weighted oriented graph with underlying graph G is a clique sum of finite number of odd cycles and complete graphs. Then $ V(D) $ is a strong vertex cover of $ D $ if and only if $ {I(D)}^{(s)} = {I(D)}^s $ for all $ s \geq 2. $ \end{corollary} \begin{proof} In a complete graph, each edge of complete graph lies in some induced odd cycle of length $3$. Thus each edge of $ D $ lies in some induced odd cycle of it. Hence the proof follows from Theorem \ref{oddcycle2}. \end{proof} \begin{corollary}\label{m-partite graph} Let $ D $ be a weighted oriented graph with underlying graph G is a complete $m-$partite graph for some $m \geq 3$. Then $ V(D) $ is a strong vertex cover of $ D $ if and only if $ {I(D)}^{(s)} = {I(D)}^s $ for all $ s \geq 2. $ \end{corollary} \begin{proof} Note that each edge of $G$ lies in some induced odd cycle of length $3$. Therefore each edge of $ D $ lies in some induced odd cycle of it. Hence the proof follows from Theorem \ref{oddcycle2}. \end{proof} In the next result we see that presence of certain induced weighted naturally oriented path guarantees the failure in the equality of $3$rd ordinary and symbolic power of edge ideal of weighted oriented graph. \begin{definition} A path is naturally oriented if all edges of path are oriented in one direction. \end{definition} \begin{lemma}\label{atmost} Let $ D $ be a weighted oriented graph such that at most one edge oriented into each vertex. Let $D^{\mathcal{P}rime}$ be an induced weighted naturally oriented path of length $3$ of $D$ with $V(D^{\mathcal{P}rime}) = \{x_{i-1},x_i,x_{i+1},x_{i+2}\}$, $E(D^{\mathcal{P}rime}) = \{ (x_{j},x_{j+1}) ~|~ i-1 \leq j \leq i+1 \}$, $w(x_i) \geq 2$ and $w(x_{i+1}) = 1$. Then $ {I(D)}^{(3)} \mathfrak{m} athfrak{n} eq {I(D)}^3 $. \end{lemma} \begin{proof} Let $w_j = w(x_j)$ for $x_j \in V(D^{\mathcal{P}rime})$. We claim $g= x_{i-1}x_i^{w_i}x_{i+1}^2x_{i+2}^{w_{i+2}} \in {I(D)}^{(3)}$. Let $ C $ be a maximal strong vertex cover of $ D $. Suppose $x_{i+2} \mathfrak{m} athfrak{n} otin C$. Then for any strong vertex cover $C^{\mathcal{P}rime} \subseteq C,$ $x_{i+2} \mathfrak{m} athfrak{n} otin C^{\mathcal{P}rime}$ and so $x_{i+1}$ $ \in C^{\mathcal{P}rime}$. Since $(x_{i+1},x_{i+2}) \in E(D),$ for each strong vertex cover $C^{\mathcal{P}rime} \subseteq C,$ $ x_{i+2} \in N^+_{D}(x_{i+1}) \cap {C^{\mathcal{P}rime}}^c$ and hence $x_{i+1} \in L_1^D(C^{\mathcal{P}rime})$. This implies $x_{i+1} \in I_{\subseteq C}.$ By Lemma \ref{edge}, $x_{i-1}x_i^{w_i} \in I_{\subseteq C}$. So $ x_{i-1}x_i^{w_i}.(x_{i+1})^2 \in ({I_{\subseteq C}})^{3}$. Hence $g \in ({I_{\subseteq C}})^{3}$. Suppose $x_{i+2} \in C$. By definition of $D,$ $|N^-_{D}(x_{i+2})|=1$. Since $N^-_{D}(x_{i+2}) = \{x_{i+1}\} \mathfrak{m} athfrak{n} subseteq V^+(D),$ we have $x_{i+2} \mathfrak{m} athfrak{n} otin L^{D}_3(C)$ and by Lemma \ref{L3}, at least one element of $N_D(x_{i+2})$ does not belong to $C$. Then for any strong vertex cover $C^{\mathcal{P}rime} \subseteq C,$ at least one element of $N_D(x_{i+2})$ does not belong to $C^{\mathcal{P}rime}$ and so $x_{i+2} \in C^{\mathcal{P}rime}$. This implies $x_{i+2}^{w_{i+2}} \in I_{\subseteq C}.$ By Lemma \ref{edge}, $ x_ix_{i+1} \in I_{\subseteq C}$. So $(x_ix_{i+1})^2.x_{i+2}^{w_{i+2}} \in ({I_{\subseteq C}})^{3}.$ Hence $g \in ({I_{\subseteq C}})^{3}$. Similarly for any maximal strong vertex cover $ C $ of $ D $, we can prove that $g \in ({I_{\subseteq C}})^{3}$. Therefore $g \in I(D)^{(3)}$. Notice that $g \mathfrak{m} athfrak{n} otin I(D^{\mathcal{P}rime})^{n+1}.$ Since $ \operatorname{supp}(g) = V(D^{\mathcal{P}rime})$ and $D^{\mathcal{P}rime}$ is an induced subgraph of $D$, $g \mathfrak{m} athfrak{n} otin I(D)^{3}.$ Hence the result follows. \end{proof} Next we see some applications of Lemma \ref{atmost} to some weighted oriented graphs. \begin{definition} A cycle is naturally oriented if all edges of cycle are oriented in clockwise direction. In a naturally oriented unicyclic graph, the cycle is naturally oriented and each edge of the tree connected with the cycle is oriented away from the cycle. \end{definition} \begin{remark}\label{svc1} Let $D$ be a weighted naturally oriented unicyclic graph. By \cite[Proposition 15]{pitones}, $V(D)$ is a strong vertex cover of $D$ if and only if $D$ is naturally oriented and $w(x) \geq 2$ when $\deg_D(x)\geq 2 $ for all $x \in V(D)$. \end{remark} In the next result, we characterize the weighted naturally oriented unicyclic graphs with a unique odd cycles for the equality of ordinary and symbolic powers of their edge ideals. \begin{theorem}\label{unicyclic} Let $D$ be a weighted naturally oriented unicyclic graph with a unique odd cycle $C_{2n+1} = (x_1,\ldots,x_{2n+1})$. Then $ I(D)^{(s)} = I(D)^s $ for all $s \geq 2$ if and only if $w(x) \geq 2$ when $\deg_D(x)\geq 2 $ for all $x \in V(D)$. \end{theorem} \begin{proof} If $w(x) \geq 2$ when $\deg_D(x)\geq 2 $ for all $x \in V(D)$, then by Remark \ref{svc1} and Lemma \ref{mandal}, we have ${I(D)}^{(s)}={I(D)}^s $ for all $ s \geq 2.$ Now we assume that ${I(D)}^{(s)}={I(D)}^s $ for all $ s \geq 2.$ First we claim $w(x) \mathfrak{m} athfrak{n} eq 1$ for all $x \in V(C_{2n+1})$. Suppose its not true. Without loss of generality we can assume that $w(x_1)=1.$ Here $N_D^-(x_2) = \{x_1\} \mathfrak{m} athfrak{n} subseteq V^+(D)$. Thus $ x_2 \in V (C_{2n+1}) \setminus N_D^+(V^+(D))$. Hence by Proposition \ref{oddcycle}, $ I(D)^{(n+1)} \mathfrak{m} athfrak{n} eq I(D)^{n+1} $, which is a contradiction. So our claim follows. Now we claim $w(x) \geq 2$ when $\deg_D(x)\geq 2 $ for all $x \in V(D)\setminus V(C_{2n+1})$. Suppose its not true. Then there exists some $x_{i_t} \in V(D)\setminus V(C_{2n+1}) $ such that $\deg_D(x_{i_t}) \geq 2$ and $w(x_{i_t} ) = 1 $. Without loss of generality we can assume that $x_{i_t} $ is in some tree $T$ connected with $x_1$. As $T$ is a tree, there is only one path $P$ from $x_1 $ to $ x_{i_t} $. Let $P = x_1x_{i_1} x_{i_2}\ldots x_{i_t}$ be that path whose length is $t$ and since $\deg_D(x_{i_t}) \geq 2$, there exists some $y_j \in N_D^+(x_{i_t})$. Without loss of generality we can assume that $t$ be the least integer such that $\deg_D(x_{i_t}) \geq 2$ and $w(x_{i_t} ) = 1 $. That means $w(x_{i_1} ) \geq 2,\ldots,w(x_{i_{t-1}} ) \geq 2 $ and $ w(x_{i_{t}} ) = 1 $. Note that $t \geq 1$. \textbf{Case (1)} Suppose $t =1 $. There exists an induced weighted naturally oriented path $D^{\mathcal{P}rime}$ of $D$ with $V(D^{\mathcal{P}rime}) = \{x_{2n+1},x_1,\\x_{i_1},y_j\}$, $E(D^{\mathcal{P}rime}) = \{ (x_{2n+1},x_1),(x_{1},x_{i_1}),(x_{i_1},y_j) \}$, $w(x_1) \geq 2$ and $w(x_{i_1}) = 1$. \textbf{Case (2)} Suppose $t =2 $. There exists an induced weighted naturally oriented path $D^{\mathcal{P}rime}$ of $D$ with $V(D^{\mathcal{P}rime}) = \{x_1,x_{i_1},\\x_{i_2},y_j\}$, $E(D^{\mathcal{P}rime}) = \{ (x_{1},x_{i_1}),(x_{i_1},x_{i_2}),(x_{i_2},y_j) \}$, $w(x_{i_1}) \geq 2$ and $w(x_{i_2}) = 1$. \textbf{Case (3)} Suppose $t \geq 3 $. There exists an induced weighted naturally oriented path $D^{\mathcal{P}rime}$ of $D$ with $V(D^{\mathcal{P}rime}) = \{x_{i_{t-2}},x_{i_{t-1}},\\x_{i_t},y_j\}$, $E(D^{\mathcal{P}rime}) = \{(x_{i_{t-2}},x_{i_{t-1}}),(x_{i_{t-1}},x_{i_t} ), (x_{i_t},y_j) \}$, $w(x_{i_{t-1}}) \geq 2$ and $w(x_{i_t}) = 1$. Thus by Lemma \ref{atmost}, for any $t \geq 1$, we have $ {I(D)}^{(3)} \mathfrak{m} athfrak{n} eq {I(D)}^3 $, which is a contradiction. Hence the claim follows. \end{proof} Now we compare the ordinary and symbolic powers of edge ideals of weighted oriented even cycles. We observe that \cite[Lemma 48]{pitones} is true for weighted oriented even cycle of length $4$. Hence we get the following result. \begin{lemma}\cite[Lemma 48]{pitones}\label{C6} Let $ D $ be a weighted oriented cycle with underlying graph is $ C_n = (x_1,\ldots,x_n) $ where $ n \geq 4 $ and $ n \mathfrak{m} athfrak{n} eq 5 .$ Then $ D $ has the minimal-strong property if and only if the vertices of $ V^+(D) $ are sinks. \end{lemma} \begin{theorem}\label{evencycle} Let $ D $ be a weighted oriented even cycle. If $ V(D) $ is a strong vertex cover of $ D $ or $ D $ has the minimal-strong property, then $ {I(D)}^{(s)} = {I(D)}^s $ for all $s \geq 2.$ \end{theorem} \begin{proof} If $ V(D) $ is a strong vertex cover of $ D ,$ then by Lemma \ref{kanoy}, we get $ {I(D)}^{(s)} = {I(D)}^s $ for all $s \geq 2.$ If $ D $ has the minimal-strong property, then by Lemma \ref{C6} and Lemma \ref{DG}, we have $ {I(D)}^{(s)} = {I(D)}^s $ for all $s \geq 2.$ \end{proof} \begin{remark}\label{one.weight} Converse of the Theorem \ref{evencycle} need not be true.\\ \operatorname{ht}space*{0.5cm}For example consider the weighted oriented even cycle $D$ as in Figure \ref{fig.1}. Let $I= I(D)$. Then $ I = ( x_1x_2^{w_2},x_2x_3,x_3x_4,x_4x_1 )$. \begin{figure} \caption{A weighted naturally oriented even cycle $D$ of length $4$.} \label{fig.1} \end{figure} Since $N_D^-(x_2) \cap V^+(D) = \mathcal{P}hi,$ by Remark \ref{s.v.1}, $ V(D) $ is not a strong vertex cover of $D.$ Let the vertex covers of $D$ except $V(D)$ are $C_1= \{x_1, x_3\}$, $C_2 = \{x_2, x_4\},$ $C_3 = \{x_2, x_3, x_4 \},$ $C_4 =\{x_1, x_2, x_4 \},$ $C_5 =\{x_1, x_3, x_4 \}$ and $C_6 =\{x_1, x_2, x_3 \} .$ Consider $C_1 =\{ x_1,x_3 \}$. Here $ x_2 \in N_D^+(x_1) \cap C_{1}^c$ and $ x_4 \in N_D^+(x_3) \cap C_{1}^c$. So $L_1^D(C_1) = \{x_1,x_3\} .$ Note that $C_1$ is minimal and by Lemma \ref{minimal to strong}, $C_1$ is strong. Hence $I_{C_{1}} =( x_1,x_3).$ By the same argument we can prove that $C_2 $ is strong and $I_{C_{2}} =(x_2,x_4).$ Consider $C_3 = \{x_2, x_3, x_4 \}.$ Here $ x_1 \in N_D^+(x_4) \cap C_{3}^c$, $ x_3 \mathfrak{m} athfrak{n} otin N_D^+(x_2) \cap C_3^c$ and $ x_4 \mathfrak{m} athfrak{n} otin N_D^+(x_3) \cap C_3^c$. So $L_1^D(C_3) = \{x_4\} .$ Here $ x_1 \in N_D^-(x_2) \cap C_3^c$ and $ x_2 \mathfrak{m} athfrak{n} otin N_D^-(x_3) \cap C_3^c.$ Thus $L_2^D(C_3) = \{x_2\}$ and $L_3^D(C_3) = \{x_3\}.$ Since $ x_2 \in N_D^-(x_3) \cap V^+(D) \cap L_2^D(C_{3}),$ $C_{3}$ is strong. Hence we have $I_{C_{3}} =( x_2^{w_2},x_3^{w_3}, x_{4} )=( x_2^{w_2},x_3, x_{4} )$. Consider $C_4 = \{x_1, x_2, x_4 \}.$ By Lemma \ref{L3}, $L_3^D(C_4) = \{x_1\}.$ Since $N_D^-(x_1) = \{x_4\} \mathfrak{m} athfrak{n} subseteq V^+(D),$ by Remark \ref{s.v.1}, $C_{4}$ is not strong. By the same argument we can prove that $C_5 $ and $C_6 $ are not strong. Thus $C_1,C_2$ and $C_3$ are the only strong vertex covers of $D.$ Note that $C_3$ is not a minimal vertex cover of $ D .$ So $ V(D) $ is neither a strong vertex cover of $ D $ nor $ D $ has the minimal-strong property. We claim that $ I^{(s)} = I^s $ for all $s \geq 2.$ By Lemma \ref{cooper}, we have $I^{(s)} = ((x_2^{w_2},x_3,x_4)\cap(x_2, x_4) )^s \bigcap (x_1, x_3)^s = (x_2^{w_2},x_2x_3,x_4)^s \cap (x_1, x_3)^s.$ Let $\bar{m} \in \mathfrak{m} athcal{G}(I^{(s)} )$. Then $\bar{m} = \operatorname{lcm}(m_1,m_2)$ for some $m_1 \in \mathfrak{m} athcal{G}((x_2^{w_2},x_2x_3,x_4)^s )$ and $m_2 \in \mathfrak{m} athcal{G}((x_1, x_3)^s ).$ Thus $ m_1 = (x_2^{w_2})^{a_1 } (x_2x_3)^{a_2}(x_4)^{a_3} $ and $ m_2 = (x_1)^{b_1 } (x_3)^{b_2} $ for some $a_i, b_i \geq 0$ with $a_1 + a_2 +a_3 = s$ and $b_1 + b_2 = s.$ \textbf{Case (1)} Assume that $b_2 \leq a_2 .$ Then $b_1 \geq a_1 + a_3.$\\ Thus $\bar{m} = \operatorname{lcm}(m_1,m_2) = (x_2^{w_2})^{a_1 } (x_2x_3)^{a_2}(x_4)^{a_3} (x_1)^{b_1 }= (x_1x_2^{w_2})^{a_1 } (x_2x_3)^{a_2}(x_4x_1)^{a_3}\\ (x_1)^{b_1 - (a_1 + a_3)}.$ Hence $\bar{m} \in I^{a_1 +a_2 +a_3} = I^s.$\\ \textbf{Case (2)} Assume that $b_2 > a_2 .$\\ Thus $\bar{m} = \operatorname{lcm}(m_1,m_2) = (x_2^{w_2})^{a_1 } (x_2x_3)^{a_2}(x_4)^{a_3}(x_3)^{b_2 -a_2} (x_1)^{b_1 }$. Here $b_2 - a_2 + b_1 = s- a_2 =a_1 + a_3.$ Note that $x_2^{w_2}$ can pair up with $x_1$ to get some element of $\mathfrak{m} athcal{G}(I)$. Also $x_2^{w_2}$ can pair up with $x_3$ to get a multiple of some element of $\mathfrak{m} athcal{G}(I)$. Similarly $x_4$ can pair up with $x_3$ or $x_1$ to get some element of $\mathfrak{m} athcal{G}(I)$. Thus $(x_2^{w_2})^{a_1 } (x_4)^{a_3}(x_3)^{b_2 -a_2} (x_1)^{b_1 }$ can be expressed as a multiple of product of $a_1 + a_3$ elements of $\mathfrak{m} athcal{G}(I)$. Therefore $\bar{m} \in I^{ (a_1 + a_3)+a_2} = I^s.$ Hence the claim follows. \end{remark} In this paper we characterize the weighted naturally oriented even cycles for the equality of all the ordinary and symbolic powers of their edge ideals. \begin{remark}\label{svc} Let $D$ be a weighted oriented cycle. By Remark \ref{svc1}, $V(D)$ is a strong vertex cover of $D$ if and only if $D$ is naturally oriented and all vertices of $D$ have non-trivial weights. \end{remark} By the above remark and Corollary \ref{oddcycle3}, we see that, if $ D $ is a weighted naturally oriented odd cycle, then all vertices of $D$ have non-trivial weights if and only if $ I(D)^{(s)} = I(D)^s $ for all $s \geq 2$. \begin{lemma}\label{all.weights} Let $ D $ be a weighted naturally oriented cycle. If all vertices of $D$ have non-trivial weights, then $ {I(D)}^{(s)}={I(D)}^s $ for all $ s \geq 2.$ \end{lemma} \begin{proof} It follows from Remark \ref{svc} and Lemma \ref{mandal}. \end{proof} In the next result, if $ D $ is a weighted naturally oriented cycle of length $ n \mathfrak{m} athfrak{n} eq 4 $ where at least one vertex has non-trivial weight, we see that only the equality of $ 3 $rd ordinary and symbolic power ensures the non-trivial weight of each vertex. \begin{theorem}\label{cycle1} Let $ D $ be a weighted naturally oriented cycle with underlying graph $G$ is $ C_n=(x_1,x_2,\ldots,x_n) ,$ where $n \mathfrak{m} athfrak{n} eq 4$ and at least one vertex of $D$ has non-trivial weight. Then all vertices of $D$ have non-trivial weights if and only if $ I(D)^{(3)} = I(D)^3 .$ \end{theorem} \begin{proof} Here $V(D) = \{x_1,\ldots,x_{n} \}$. Let $w_i = w(x_i)$ for $x_i \in V(D).$ If all vertices of $D$ have non-trivial weights, then by Lemma \ref{all.weights}, we have $ {I(D)}^{(3)} = {I(D)}^3.$ Now we assume that ${I(D)}^{(3)} = {I(D)}^3.$ Suppose all vertices of $D$ do not have non-trivial weights. We know that at least one vertex of $D$ has non-trivial weight. Without loss of generality we can assume that $w(x_2) \geq 2$ and $w(x_3)=1.$ \textbf{Case (1)} Assume that $n \geq 5.$ Then there exists an induced weighted naturally oriented path $D^{\mathcal{P}rime}$ of $D$ with $V(D^{\mathcal{P}rime}) = \{x_{1},x_{2},x_{3},x_{4}\}$, $E(D^{\mathcal{P}rime}) = \{(x_{1},x_{2}),(x_{2},x_{3}), (x_{3},x_{4}) \}$, $w(x_{2}) \geq 2$ and $w(x_{3}) = 1$. By Lemma \ref{atmost}, we have $x_1x_2^{w_2}x_3^2x_4^{w_4} \in {I(D)}^{(3)} \setminus {I(D)}^3 $, which is a contradiction. \textbf{Case (2)} Assume that $n =3.$ Note that $ x_1 \in V (C_{3}) \setminus N_D^+(V^+(D))$. By Proposition \ref{oddcycle}, $x_1^{w_1}x_2^{w_2}x_3 \in {I(D)}^{(2)} \setminus {I(D)}^2$. Then by Lemma \ref{edge} and Lemma \ref{cooper}, we have $(x_1^{w_1}x_2^{w_2}x_3)(x_1x_2^{w_2}) \in {I(D)}^{(3)} \setminus {I(D)}^3$, which is a contradiction. \end{proof} \begin{remark}\label{C6.} Let $ D $ be a weighted naturally oriented cycle of length $n$ with underlying graph $G$ is $ C_n=(x_1,x_2,\ldots,x_n) ,$ where $ n \mathfrak{m} athfrak{n} eq 4,6 $ and at least one vertex of $D$ has non-trivial weight. If we assume $w(x_2) \geq 2$ and $w(x_3)=1$, then by the similar argument as in Theorem \ref{cycle1}, we find that $x_1x_2^{w_2}x_3x_6^{w_6} \in I(D)^{(2)} \setminus I(D)^2$ for $ n \geq 7 $ and $x_1^{w_1}x_2^{w_2}x_3 \in I(D)^{(2)} \setminus I(D)^2$ for $ n = 3 $ and $ 5 $. Hence $I(D)^{(2)} = I(D)^2$ implies all vertices of $ D $ have non-trivial weights and it ensures the equality of all the ordinary and symbolic powers. But it is not true for weighted naturally oriented even cycles of length $ 6. $ For example consider $ D $ to be a weighted naturally oriented cycle $ D $ where the underlying graph is $ C_6 = (x_1, x_2, \ldots, x_6) $ and only $ x_2 $ has non-trivial weight $2$. Then $ I(D) = (x_1x_2^{2}, x_2x_3, x_3x_4, x_4x_5, x_5x_6, x_6x_1).$ Using Macaulay 2, we observe $I(D)^{(2)} = I(D)^2$ but $I(D)^{(3)} \mathfrak{m} athfrak{n} eq I(D)^3$ and in this case all the vertices except $ x_2 $ have trivial weights. In Theorem \ref{cycle1}, $I(D)^{(3)} = I(D)^3$ guarantees that all vertices of $ D $ have non-trivial weights and it ensures the equality of all the ordinary and symbolic powers. But if $ D $ is a weighted naturally oriented cycle with underlying graph is $ C_4 = (x_1, x_2, x_3, x_4) $ and at least one vertex of $ D $ has non-trivial weight, we do not even need each vertex to be of non-trivial weight to ensure the equality of all the ordinary and symbolic powers (see Proposition \ref{evencycle2}). \end{remark} In the next two results, we give necessary and sufficient condition for the equality of ordinary and symbolic powers of edge ideals of weighted naturally oriented even cycles. \begin{corollary}\label{evencycle1} Let $ D $ be a weighted naturally oriented even cycle with underlying graph $C_{n} = (x_1,\ldots,x_{n}),$ where $n \mathfrak{m} athfrak{n} eq 4$ and at least one vertex of $D$ has non-trivial weight. Then $ I(D)^{(s)} = I(D)^s $ for all $s \geq 2$ if and only if all vertices of $D$ have non-trivial weights. \end{corollary} \begin{proof} If $ I(D)^{(s)} = I(D)^s $ for all $s \geq 2$, then by Theorem \ref{cycle1}, all vertices of $D$ have non-trivial weights. If all vertices of $D$ have non-trivial weights, then by Lemma \ref{all.weights}, we have $ I(D)^{(s)} = I(D)^s $ for all $s \geq 2$ \end{proof} In the next result, we characterize the weighted naturally oriented even cycles of length $ 4 $ for the equality of ordinary and symbolic powers of their edge ideals. \begin{proposition}\label{evencycle2} Let $ D $ be a weighted naturally oriented even cycle with underlying graph $C_{4} = (x_1,x_2,x_3,x_{4})$ and at least one vertex of $D$ has non-trivial weight. Then $ I(D)^{(s)} = I(D)^s $ for all $s \geq 2$ if and only if $D$ satisfies one of the following conditions: \begin{enumerate} \item all vertices of $D$ have non-trivial weights, \item one vertex of $D$ has non-trivial weight, \item only two non-consecutive vertices of $D$ have non-trivial weights. \end{enumerate} \end{proposition} \begin{proof} Let $I = I(D)$. If $D$ satisfies $ (1), $ then by Lemma \ref{all.weights}, we have $ I^{(s)} = I^s $ for all $s \geq 2$. If $D$ satisfies $ (2), $ then by Remark \ref{one.weight}, we get $ I^{(s)} = I^s $ for all $s \geq 2$. Now assume $D$ satisfies $ (3). $ Here two non-consecutive vertices of $D$ have non-trivial weights. Without loss of generality we can assume that $w(x_2) \mathfrak{m} athfrak{n} eq 1$ and $w(x_4) \mathfrak{m} athfrak{n} eq 1.$ Then $I=(x_1x_2^{w_2},x_2x_3,x_3x_4^{w_4},x_4x_1)$ and by the similar argument as in Remark \ref{one.weight}, we find $I^{(s)} = (x_1,x_3 )^s \bigcap ((x_2^{w_2},x_3,x_4)\cap(x_2, x_4) )^s \bigcap ((x_1,x_2,x_4^{w_4})\cap(x_2, x_4) )^s = (x_1,x_3 )^s \cap (x_2^{w_2},x_2x_3,x_4)^s \cap (x_2, x_1x_4, x_4^{w_4})^s.$ We claim that $I^{(s)} \subseteq I^s.$\\ We prove this by induction on $s.$ The case for $s=1$ is trivial. Let $\bar{m} \in \mathfrak{m} athcal{G}(I^{(s)} )$. Then $\bar{m} = \operatorname{lcm}(m_1,m_2,m_3)$ for some $m_1 \in \mathfrak{m} athcal{G}((x_1,x_3)^s )$, $m_2 \in \mathfrak{m} athcal{G}((x_2^{w_2},x_2x_3,x_4)^s )$ and $m_3 \in \mathfrak{m} athcal{G}((x_2, x_1x_4, x_4^{w_4})^s ).$ Thus $ m_1 = (x_1)^{a_1 } (x_3)^{a_2} $, $ m_2 = (x_2^{w_2})^{b_1 } (x_2x_3)^{b_2}(x_4)^{b_3} $ and $ m_3 = (x_2)^{c_1 } (x_1x_4)^{c_2}(x_4^{w_4})^{c_3} $ for some $a_i, b_i,c_i \geq 0$ with $a_1 + a_2 = s,$ $b_1 + b_2 + b_3 = s$ and $c_1 + c_2 +c_3 = s.$ \vspace*{2mm}pace*{0.1cm}\\ \textbf{Case (1)} Assume that $a_1 \mathfrak{m} athfrak{n} eq 0.$ If $b_3 \mathfrak{m} athfrak{n} eq 0,$ then $\bar{m}$ is divisible by $x_1x_4$ and observe that $\frac{\bar{m}}{x_1x_4}$ $ \in (x_1,x_3 )^{s-1} \cap (x_2^{w_2},x_2x_3,x_4)^{s-1}\\ \cap (x_2, x_1x_4, x_4^{w_4})^{s-1}=I^{(s-1)}.$ Hence by induction hypothesis $\frac{\bar{m}}{x_1x_4}$ $ \in I^{s-1}$ and so $\bar{m} \in I^s.$ If $b_2 \mathfrak{m} athfrak{n} eq 0,$ then $\bar{m}$ is divisible by $x_2x_3$ and notice that $\frac{\bar{m}}{x_2x_3}$ $ \in (x_1,x_3 )^{s-1} \cap (x_2^{w_2},x_2x_3,x_4)^{s-1} \cap (x_2, x_1x_4, x_4^{w_4})^{s-1}=I^{(s-1)}.$ Hence by induction hypothesis $\frac{\bar{m}}{x_2x_3}$ $ \in I^{s-1}$ and so $\bar{m} \in I^s.$ Now we assume $b_3=b_2=0.$ Then $b_1=s$ and $\operatorname{lcm}(m_1,m_2) \in I^s.$ So $\bar{m} \in I^s.$ \textbf{Case (2)} Assume that $a_1 = 0.$ Then $a_2=s$ and $\operatorname{lcm}(m_1,m_3) \in I^s.$ Hence $\bar{m} \in I^s.$ \vspace*{2mm}pace*{0.1cm}\\ Next we prove the converse part. Let us assume that $ I^{(s)} = I^s $ for all $s \geq 2$. Suppose none of $ (1), $ $ (2) $ and $(3)$ is true. Then $D$ must satisfy one of the following conditions: \item [(a)] only two consecutive vertices of $D$ have non-trivial weights, \item [(b)] only three vertices of $D$ have non-trivial weights. \item [(a)] Assume that $D$ has only two consecutive vertices with non-trivial weights. Without loss of generality we can assume that $w(x_2) \mathfrak{m} athfrak{n} eq 1$ and $w(x_3) \mathfrak{m} athfrak{n} eq 1.$ Then $I=(x_1x_2^{w_2},x_2x_3^{w_3},\\x_3x_4,x_4x_1)$ and by the similar argument as in Remark \ref{one.weight}, we find that $I^{(3)} = ((x_1,x_3^{w_3},x_4)\\\cap(x_1, x_3) )^3 \bigcap ((x_2^{w_2},x_3^{w_3},x_4)\cap(x_2, x_4) )^3 = (x_1,x_3^{w_3},x_3x_4)^3 \cap (x_2^{w_2},x_2x_3^{w_3},x_4)^3.$ Observe that $x_1x_2x_3^{w_3}x_4^2 \in I^{(3)} \setminus I^3$, which is a contradiction. \item [(b)] Assume that $D$ has only three vertices with non-trivial weights. Without loss of generality we can assume that $w(x_2) \mathfrak{m} athfrak{n} eq 1,$ $w(x_3) \mathfrak{m} athfrak{n} eq 1$ and $w(x_4) \mathfrak{m} athfrak{n} eq 1.$ Then $I=(x_1x_2^{w_2},x_2x_3^{w_3},x_3x_4^{w_4},\\x_4x_1)$ and by the similar argument as in Remark \ref{one.weight}, we find that $I^{(2)} = ((x_1,x_3^{w_3},x_4^{w_4})\cap(x_1, x_3) )^2 \bigcap ((x_1,x_2,x_4^{w_4})\cap(x_2, x_4) )^2 \bigcap ((x_2^{w_2},x_3^{w_3},x_4)\cap(x_2, x_4) )^2 = (x_1,x_3^{w_3},x_3x_4^{w_4})^2 \cap (x_2,x_1x_4,x_4^{w_4})^2 \cap (x_2^{w_2},x_2x_3^{w_3},x_4)^2.$ Notice that $x_1x_2x_3x_4^{w_4} \in I^{(2)} \setminus I^2$, which is a contradiction. Hence the proof follows. \end{proof} \section{Comparing symbolic powers of weighted oriented graphs} In this section, we show that the symbolic powers of edge ideals of a weighted oriented graph $ D $ and the new weighted oriented graph $ D^{\mathcal{P}rime} $ obtained from $D$ after replacing the weights of vertices with non-trivial weights which are sink, by trivial weights, behave in a similar way. We see that using the symbolic powers of edge ideals of one class of weighted oriented graphs, we can compute the symbolic powers of edge ideals of another class of weighted oriented graphs. \begin{notation} \label{phi.} Let $ D $ be a weighted oriented graph, where $ U \subseteq V^{+}(D) $ be the set of vertices which are sinks and $ w_j =w(x_j) $ if $ x_j \in V^{+}(D) .$ Let $ D^{\mathcal{P}rime} $ be the weighted oriented graph obtained from $D$ after replacing $ w_j $ by $ w_j = 1 $ if $ x_j \in U .$ Let $ V(D) = V(D^{\mathcal{P}rime}) = V = \{x_1,\ldots,x_n\}. $ Let $R=k[x_1,\ldots,x_n]=$ $\displaystyle{{\bigoplus_{d=0}^{\infty}}R_d}$ be the standard graded polynomial ring. Consider the map \begin{align*} \Phi : R \longrightarrow R ~ \mathfrak{m} box{where} ~ x_j \longrightarrow x_j ~ \mathfrak{m} box{if} ~ x_j \mathfrak{m} athfrak{n} otin U \mathfrak{m} box{and} ~ x_j \longrightarrow x_j^{w_j}~ \mathfrak{m} box{if} ~ x_j \in U . \end{align*} \end{notation} Here $ \Phi $ is an injective homomorphism of $ k- $algebras. By \cite[Corollary 5]{gimenez}, $I(D)$ is Cohen– Macaulay if and only if $I(D^{\mathcal{P}rime})$ is Cohen–Macaulay. We want to investigate the relationship between the symbolic powers of $I(D)$ and $I(D^{\mathcal{P}rime}).$ The next two lemma's are very important to prove our result in Theorem \ref{sym.theorem.1}. \begin{lemma}\label{strong} Let $D,$ $D^{\mathcal{P}rime}$ and $\Phi$ are same as defined in Notation \ref{phi.}. Then $C$ is a strong vertex cover of $D$ if and only if $C$ is a strong vertex cover of $D^{\mathcal{P}rime}$. \end{lemma} \begin{proof} Since $D$ and $D^{\mathcal{P}rime}$ have the same underlying graph, $C$ is a vertex cover of $D$ if and only if $C$ is a vertex cover of $D^{\mathcal{P}rime}$. Now consider $C$ to be a vertex cover of both $D$ and $D^{\mathcal{P}rime}.$ Here $D$ and $D^{\mathcal{P}rime}$ have the same orientation on edges. Thus $L_i^D(C)$ $=$ $L_i^{D^{\mathcal{P}rime}}(C)$ for $1 \leq i \leq 3$. This implies $C \setminus L_1^D(C)$ $=$ $C \setminus L_1^{D^{\mathcal{P}rime}}(C)$. Since each element of $U$ is sink, $N_{D}^-(x) \cap U = \mathcal{P}hi$ for $x \in V \setminus U.$ Since two adjacent vertices can not be sink vertices, $N_{D}^-(x) \cap U = \mathcal{P}hi$ for $x \in U.$ Hence for each $ x \in C $, $N_{D}^-(x) \cap U = \mathcal{P}hi$. Note that $V^+(D)\setminus U = V^+(D^{\mathcal{P}rime}).$ Since $D$ and $D^{\mathcal{P}rime}$ have the same orientation on edges, $N_{D}^-(x) = N_{D^{\mathcal{P}rime}}^-(x)$ for each $x \in C.$ Thus for each $ x \in C $, $N_{D}^-(x) \cap V^+(D) =N_{D}^-(x) \cap [V^+(D)\setminus U] = N_{D^{\mathcal{P}rime}}^-(x) \cap [V^+(D)\setminus U] = N_{D^{\mathcal{P}rime}}^-(x) \cap V^+(D^{\mathcal{P}rime}).$ Therefore for each $ x \in $ $L_3^D(C) =$ $L_3^{D^{\mathcal{P}rime}}(C),$ we have $N_{D}^-(x) \cap V^+(D)\cap [C\setminus {L_1^D{(C)}} ] = N_{D^{\mathcal{P}rime}}^-(x) \cap V^+(D^{\mathcal{P}rime})\cap [C\setminus {L_1^{D^{\mathcal{P}rime}}{(C)}} ]$ and $N_{D}^-(x) \cap V^+(D)\cap [C\setminus {L_1^{D}{(C)}} ] \mathfrak{m} athfrak{n} eq \mathcal{P}hi $ if and only if $ N_{D^{\mathcal{P}rime}}^-(x) \cap V^+(D^{\mathcal{P}rime})\cap [C\setminus {L_1^{D^{\mathcal{P}rime}}{(C)}} ]\mathfrak{m} athfrak{n} eq \mathcal{P}hi$. Hence by Remark \ref{s.v.1}, $C$ is a strong vertex cover of $D$ if and only if $C$ is a strong vertex cover of $D^{\mathcal{P}rime}$. \end{proof} \begin{lemma}\label{intersections} Let $D,$ $D^{\mathcal{P}rime}$ and $\Phi$ are same as defined in Notation \ref{phi.}. Let ${I}$ and $\tilde{I}$ be the edge ideals of $D$ and $D^{\mathcal{P}rime},$ respectively. Let $C_{1_1},\ldots,C_{{r}_1}$ are the maximal strong vertex covers of both $ D $ and $ D^{\mathcal{P}rime}.$ Let $C_{i_2},\ldots,C_{i_{t_i}}$ are the strong vertex covers of both $ D $ and $ D^{\mathcal{P}rime} $ such that $C_{i_j} \subset C_{i_1} $ for $2 \leq j \leq t_i$ and $1 \leq i \leq r.$ Let ${I}_{C_{i_j}}$ and $\tilde{I}_{C_{k_l}} $ are the irreducible ideals associated to $ C_{i_j} $ and $C_{k_l} ,$ respectively. Then $$\Phi(\tilde{I}_{C_{i_1}} \cap \tilde{I}_{C_{i_2}} \cap \operatorname{codim}ots \cap \tilde{I}_{C_{i_{t_i}}} ) = {I}_{C_{i_1}} \cap {I}_{C_{i_2}} \cap \operatorname{codim}ots \cap {I}_{C_{i_{t_i}}} ~\mathfrak{m} box{for} ~ 1 \leq i \leq r.$$ Moreover, every element of $ \mathfrak{m} athcal{G}( ({I}_{C_{1_1}} \cap {I}_{C_{1_2}} \cap \operatorname{codim}ots \cap {I}_{C_{1_{t_1}}})^s )$ is of the form $$\displaystyle{ (\mathcal{P}rod_{x_j} x_j^{d_{j_1}} | x_j \in V \setminus U)(\mathcal{P}rod_{x_k} (x_k^{w_k})^{e_{k_1}} | x_k \in U)} \mathfrak{m} box{~for some $d_{j_1},e_{k_1} \geq 0.$}$$ \end{lemma} \begin{proof} First we claim that $ \Phi(\tilde{I}_{C_{1_1}} \cap \tilde{I}_{C_{1_2}} \cap \operatorname{codim}ots \cap \tilde{I}_{C_{1_{t_1}}}) = {I}_{C_{1_1}} \cap {I}_{C_{1_2}} \cap \operatorname{codim}ots \cap {I}_{C_{1_{t_1}}}. $ Consider any strong vertex cover $C_{1_i}$ of both $D$ and $D^{\mathcal{P}rime}.$ From the proof of Lemma \ref{strong}, $L_p^D(C_{1_i})$ $=$ $L_p^{D^{\mathcal{P}rime}}(C_{1_i})$ for $1 \leq p \leq 3$. Notice that $N_{D}^+(x) = N_{D^{\mathcal{P}rime}}^+(x) = \mathcal{P}hi $ for each $x \in U$. So $L_1^{D}(C_{1_i}) \cap U = L_1^{D^{\mathcal{P}rime}}(C_{1_i}) \cap U = \mathcal{P}hi$. This implies $ U \subseteq L_2^{D}(C_{1_i}) \cup L_3^{D}(C_{1_i}) = L_2^{D^\mathcal{P}rime}(C_{1_i}) \cup L_3^{D^\mathcal{P}rime}(C_{1_i}) $. By definition $ \tilde{I}_{C_{1_i}} = ( L_1^{D^\mathcal{P}rime}(C_{1_i}) \cup \{x_j^{w_j}~|~x_j \in [L_2^{D^\mathcal{P}rime}(C_{1_i}) \cup L_3^{D^\mathcal{P}rime}(C_{1_i})] \setminus U )\} \cup \{x_k~|~x_k \in U )\} ) $ and $ {I}_{C_{1_i}} = ( L_1^{D}(C_{1_i}) \cup \{x_j^{w_j}~|~x_j \in [L_2^{D}(C_{1_i}) \cup L_3^{D} (C_{1_i})] \setminus U )\} \cup \{x_k^{w_k}~|~x_k \in U )\} ) $. Note that $ \Phi(\tilde{I}_{C_{1_i}}) = {I}_{C_{1_i}} $ for $1 \leq i \leq t_1.$ By Lemma \ref{atiyah}, we have $\Phi(\tilde{I}_{C_{1_1}} \cap \tilde{I}_{C_{1_2}} \cap \operatorname{codim}ots \cap \tilde{I}_{C_{1_{t_1}}} ) \subseteq \Phi(\tilde{I}_{C_{1_1}}) \cap \Phi(\tilde{I}_{C_{1_2}}) \cap \operatorname{codim}ots \cap \Phi(\tilde{I}_{C_{1_{t_1}}} ) = {I}_{C_{1_1}} \cap {I}_{C_{1_2}} \cap \operatorname{codim}ots \cap {I}_{C_{1_{t_1}}} $. \vspace*{2mm}pace*{0.2cm}\\ Let $ f \in \mathfrak{m} athcal{G}({I}_{C_{1_1}} \cap {I}_{C_{1_2}} \cap \operatorname{codim}ots \cap {I}_{C_{1_{t_1}}}).$ Then $f= \operatorname{lcm} (f_1,f_2,\ldots,f_{t_1})$ for some $f_i \in \mathfrak{m} athcal{G}({I}_{C_{1_i}} )$ where $1 \leq i \leq t_1.$ Here each $ f_i $ involves only one variable. Fix any $i \in [t_1]$. If $f_i$ involves the variable $x_l,$ then \begin{equation*} f_i= \begin{cases} &x_l \vspace*{2mm}pace*{0.2cm}~\mathfrak{m} box{if}~ x_l \in L_1^{D}(C_{1_i}) \\ &x_l^{w_l} ~\mathfrak{m} box{if}~ x_l \in [L_2^{D}(C_{1_i}) \cup L_3^{D} (C_{1_i})] \setminus U\vspace*{2mm}pace*{0.2cm}\\ &x_l^{w_l} ~\mathfrak{m} box{if}~ x_l \in U \end{cases} \end{equation*} Thus we can express $f$ as \begin{equation}\label{f} f = \displaystyle{(\mathcal{P}rod_{x_j} (x_j^{a_j})^{b_{j}} | x_j \in V \setminus U)(\mathcal{P}rod_{x_k} (x_k^{w_k})^{c_{k}} | x_k \in U)} \end{equation} for some $a_{j} = 1 $ or $w_{j},$ $b_{j} = 0 $ or $1$ and $c_k = 0 $ or $1.$ We want to find some $g \in \tilde{I}_{C_{1_1}} \cap \tilde{I}_{C_{1_2}} \cap \operatorname{codim}ots \cap \tilde{I}_{C_{1_{t_1}}} $ such that $\Phi(g) = f.$ We set $g_i=f_i$ if $f_i$ involves variable from $V\setminus U$ and $g_i=x_l$ if $f_i = x_l^{w_l}$ where $x_l \in U$ for $1 \leq i \leq t_1.$ Therefore \begin{equation*} g_i= \begin{cases} &x_l \vspace*{2mm}pace*{0.2cm}~\mathfrak{m} box{if}~ x_l \in L_1^{D^\mathcal{P}rime}(C_{1_i}) \\ &x_l^{w_l} ~\mathfrak{m} box{if}~ x_l \in [L_2^{D^\mathcal{P}rime}(C_{1_i}) \cup L_3^{D^\mathcal{P}rime} (C_{1_i})] \setminus U\vspace*{2mm}pace*{0.2cm}\\ &x_l ~\mathfrak{m} box{if}~ x_l \in U \end{cases} \end{equation*} Here each $g_i \in \mathfrak{m} athcal{G}({\tilde{I}}_{C_{1_i}} )$ and $\displaystyle{ \operatorname{lcm} (g_1,g_2,\ldots,g_{t_1}) = (\mathcal{P}rod_{x_j} (x_j^{a_j})^{b_{j}} | x_j \in V \setminus U)(\mathcal{P}rod_{x_k} (x_k)^{c_{k}} }| x_k \in U).$ Let $g=\operatorname{lcm} (g_1,g_2,\ldots,g_{t_1}).$ Notice that $\Phi(g) = f.$ Thus $f = \Phi(g)= \Phi(\operatorname{lcm} (g_1,g_2,\ldots,g_{t_1}))$$ \in \Phi(\tilde{I}_{C_{1_1}} \cap \tilde{I}_{C_{1_2}} \cap \operatorname{codim}ots \cap \tilde{I}_{C_{1_{t_1}}} ) .$ Hence $ \Phi(\tilde{I}_{C_{1_1}} \cap \tilde{I}_{C_{1_2}} \cap \operatorname{codim}ots \cap \tilde{I}_{C_{1_{t_1}}} ) = {I}_{C_{1_1}} \cap {I}_{C_{1_2}} \cap \operatorname{codim}ots \cap {I}_{C_{1_{t_1}}} $. By the similar argument, for $2 \leq i \leq r,$ we can prove that $$ \Phi(\tilde{I}_{C_{i_1}} \cap \tilde{I}_{C_{i_2}} \cap \operatorname{codim}ots \cap \tilde{I}_{C_{i_{t_i}}} ) = {I}_{C_{i_1}} \cap {I}_{C_{i_2}} \cap \operatorname{codim}ots \cap {I}_{C_{i_{t_i}}}. $$ Let $h \in \mathfrak{m} athcal{G}( ({I}_{C_{1_1}} \cap {I}_{C_{1_2}} \cap \operatorname{codim}ots \cap {I}_{C_{1_{t_1}}})^s ).$ Then $h = h_1\operatorname{codim}ots h_s$ for some $h_i$'s $ \in \mathfrak{m} athcal{G}({I}_{C_{1_1}} \cap {I}_{C_{1_2}} \cap \operatorname{codim}ots \cap {I}_{C_{1_{t_1}}}).$ By (\ref{f}), for $1 \leq i \leq s,$ $\displaystyle{h_i = (\mathcal{P}rod_{x_j} (x_j^{a_{j_i}})^{b_{j_i}} | x_j \in V \setminus U)(\mathcal{P}rod_{x_k} (x_k^{w_k})^{c_{k_i}} | x_k \in U)}$\\ \operatorname{ht}space*{6cm} for some $a_{j_i} = 1 $ or $w_{j},$ $b_{j_i} = 0 $ or $1$ and $c_{k_i} = 0 $ or $1.$ Hence $\displaystyle{h = (\mathcal{P}rod_{x_j} x_j^{d_{j_1}} | x_j \in V \setminus U)(\mathcal{P}rod_{x_k} (x_k^{w_k})^{e_{k_1}} | x_k \in U)}$ where $d_{j_1} = {a_{j_1}}{b_{j_1}} + \operatorname{codim}ots + {a_{j_s}}{b_{j_s}} $ and $e_{k_1} = {c_{k_1}} + \operatorname{codim}ots + {c_{k_s}}.$ \end{proof} In the following theorem, we show that the symbolic powers of edge ideals of $D$ and $D^{\mathcal{P}rime}$ behave in a similar way. \begin{theorem}\label{sym.theorem.1} Let ${I}$ and $\tilde{I}$ be the edge ideals of $D$ and $D^{\mathcal{P}rime},$ respectively. Then $ \Phi(\tilde{I}^s) = {I}^s $ and $ \Phi(\tilde{I}^{(s)}) = {I}^{(s)}$ for all $ s \geq 1.$ \end{theorem} \begin{proof} By the definitions of ${I}$ and $\tilde{I}$, $ \Phi(\tilde{I}) = {I} $. Thus by Lemma \ref{atiyah}, we have $ \Phi(\tilde{I}^s) = (\Phi(\tilde{I}) )^s = {I}^s $ for all $s \geq 1.$ Now we claim that $ \Phi(\tilde{I}^{(s)}) = {I}^{(s)}.$ Let $C_{1_1},\ldots,C_{{r}_1}$ are the maximal strong vertex covers of both $ D $ and $ D^{\mathcal{P}rime} $. Let $C_{i_2},\ldots,C_{i_{t_i}}$ are the strong vertex covers of both $ D $ and $ D^{\mathcal{P}rime} $ such that $C_{i_j} \subset C_{i_1} $ for $2 \leq j \leq t_i$ and $1 \leq i \leq r.$ Then $\Phi(\tilde{I}^{(s)}) = \Phi((\tilde{I}_{C_{1_1}} \cap \tilde{I}_{C_{1_2}} \cap \operatorname{codim}ots \cap \tilde{I}_{C_{1_{t_1}}} )^s \cap \operatorname{codim}ots \cap (\tilde{I}_{C_{r_1}} \cap \tilde{I}_{C_{r_2}} \cap \operatorname{codim}ots \cap \tilde{I}_{C_{r_{t_r}}} )^s)\operatorname{ht}space*{0.5cm}$ (by Lemma \ref{cooper})\\ $\operatorname{ht}space*{1.35cm}\subseteq \Phi((\tilde{I}_{C_{1_1}} \cap \tilde{I}_{C_{1_2}} \cap \operatorname{codim}ots \cap \tilde{I}_{C_{1_{t_1}}} )^s) \cap \operatorname{codim}ots \cap \Phi((\tilde{I}_{C_{r_1}} \cap \tilde{I}_{C_{r_2}} \cap \operatorname{codim}ots \cap \tilde{I}_{C_{r_{t_r}}} )^s)\\\operatorname{ht}space*{11.9cm}$ (by Lemma \ref{atiyah})\\ $\operatorname{ht}space*{1.35cm}= ({I}_{C_{1_1}} \cap {I}_{C_{1_2}} \cap \operatorname{codim}ots \cap {I}_{C_{1_{t_1}}} )^s \cap \operatorname{codim}ots \cap ({I}_{C_{r_1}} \cap {I}_{C_{r_2}} \cap \operatorname{codim}ots \cap {I}_{C_{r_{t_r}}} )^s$\\\operatorname{ht}space*{9.3cm} (by Lemma \ref{atiyah} and Lemma \ref{intersections})\\ $\operatorname{ht}space*{1.35cm}= {I}^{(s)} $ \operatorname{ht}space*{6.85cm} (by Lemma \ref{cooper}). Hence $ \Phi(\tilde{I}^{(s)}) \subseteq {I}^{(s)}$ for all $ s \geq 1. $ It remains to show that $ {I}^{(s)} \subseteq \Phi(\tilde{I}^{(s)})$ for all $ s \geq 1. $ Let $ q \in \mathfrak{m} athcal{G}({I}^{(s)}) = \mathfrak{m} athcal{G}(({I}_{C_{1_1}} \cap {I}_{C_{1_2}} \cap \operatorname{codim}ots \cap {I}_{C_{1_{t_1}}} )^s \cap \operatorname{codim}ots \cap ({I}_{C_{r_1}} \cap {I}_{C_{r_2}} \cap \operatorname{codim}ots \cap {I}_{C_{r_{t_r}}} )^s ) $. Then $q = \operatorname{lcm}(q_1,q_2,\ldots,q_r)$ for some $q_i \in \mathfrak{m} athcal{G}( ({I}_{C_{i_1}} \cap {I}_{C_{i_2}} \cap \operatorname{codim}ots \cap {I}_{C_{i_{t_i}}})^s)$ where $1 \leq i \leq r.$ By Lemma \ref{intersections}, for $1 \leq i \leq r,$ we have\\ $\operatorname{ht}space*{1cm}q_i = \displaystyle{(\mathcal{P}rod_{x_j} x_j^{d_{j_i}} | x_j \in V \setminus U)(\mathcal{P}rod_{x_k} (x_k^{w_k})^{e_{k_i}} | x_k \in U)}$ for some $d_{j_i},e_{k_i} \geq 0 .$ Hence $\displaystyle{ q = \operatorname{lcm}(q_1,q_2,\ldots,q_r) = (\mathcal{P}rod_{x_j} x_j^{u_{j}} | x_j \in V \setminus U)(\mathcal{P}rod_{x_k} (x_k^{w_k})^{v_{k}} | x_k \in U) }$ where each $u_j = \mathfrak{m} ax\\ \{d_{j_1},\ldots,d_{j_r}\}$ and $v_k = \mathfrak{m} ax \{e_{k_1},\ldots,e_{k_r}\}$. Let $p_i = \displaystyle{(\mathcal{P}rod_{x_j} x_j^{d_{j_i}} | x_j \in V \setminus U)(\mathcal{P}rod_{x_k} (x_k)^{e_{k_i}} | x_k \in U)}$ for $1 \leq i \leq r.$ Then $ \operatorname{lcm}(p_1,p_2,\ldots,p_r) = \displaystyle{ (\mathcal{P}rod_{x_j} x_j^{u_{j}} | x_j \in V \setminus U)(\mathcal{P}rod_{x_k} (x_k)^{v_{k}} | x_k \in U) }$ because each $u_j = \mathfrak{m} ax \{d_{j_1},\ldots,d_{j_r}\}$ and $v_k = \mathfrak{m} ax \{e_{k_1},\ldots,e_{k_r}\}$. Since $\Phi(p_i) = q_i\in ({I}_{C_{i_1}} \cap {I}_{C_{i_2}} \cap \operatorname{codim}ots \cap {I}_{C_{i_{t_i}}})^s=\Phi((\tilde{I}_{C_{i_1}} \cap \tilde{I}_{C_{i_2}} \cap \operatorname{codim}ots \cap \tilde{I}_{C_{i_{t_i}}} )^s) $ and $ \Phi $ is an injective, $p_i \in (\tilde{I}_{C_{i_1}} \cap \tilde{I}_{C_{i_2}} \cap \operatorname{codim}ots \cap \tilde{I}_{C_{i_{t_i}}} )^s$ for $1 \leq i \leq r.$ Let $p = \operatorname{lcm}(p_1,p_2,\ldots,p_r).$ Then $ \Phi(p) = q $ where $p = \operatorname{lcm}(p_1,p_2,\ldots,p_r) \in (\tilde{I}_{C_{1_1}} \cap \tilde{I}_{C_{1_2}} \cap \operatorname{codim}ots \cap \tilde{I}_{C_{1_{t_1}}} )^s \cap \operatorname{codim}ots \cap (\tilde{I}_{C_{r_1}} \cap \tilde{I}_{C_{r_2}} \cap \operatorname{codim}ots \cap \tilde{I}_{C_{r_{t_r}}} )^s =\ \tilde{I}^{(s)}$. Thus $q \in \Phi(\tilde{I}^{(s)}) $. Hence $ {I}^{(s)} = \Phi(\tilde{I}^{(s)}) $ for all $ s \geq 1.$ \end{proof} In the next result, we prove that the symbolic defects of edge ideals of $D$ and $D^{\mathcal{P}rime}$ are same. \begin{proposition}\label{sdefect} Let $D$, $D^{\mathcal{P}rime}$ and $\Phi$ are same as defined in Notation \ref{phi.}. Let ${I}$ and $\tilde{I}$ be the edge ideals of $D$ and $D^{\mathcal{P}rime},$ respectively. Then for each $s \geq 1,$ $$\operatorname{sdefect}(\tilde{I},s) = \operatorname{sdefect}(I,s).$$ \end{proposition} \begin{proof} Fix any $s \geq 1.$ Let $X = \mathfrak{m} athcal{G}({I}^{(s)}) \setminus {I}^s$ and $X^{\mathcal{P}rime} = \mathfrak{m} athcal{G}(\tilde{I}^{(s)}) \setminus \tilde{I}^s$. First we claim that $\Phi( X^{\mathcal{P}rime} ) \subseteq X.$ Suppose $ p $ $\in \mathfrak{m} athcal{G}(\tilde{I}^{(s)}) \setminus \tilde{I}^s $. By the similar argument used to get the general form of any element of $ \mathfrak{m} athcal{G}({I}^{(s)}) $ in Theorem \ref{sym.theorem.1}, we can write $\displaystyle{ p = (\mathcal{P}rod_{x_j} x_j^{u_{j}} | x_j \in V \setminus U)(\mathcal{P}rod_{x_k} (x_k)^{v_{k}} | x_k \in U) }$ for some $ u_j , v_k \geq 0. $ By Theorem \ref{sym.theorem.1}, $\displaystyle{ \Phi(p) = (\mathcal{P}rod_{x_j} x_j^{u_{j}} | x_j \in V \setminus U)(\mathcal{P}rod_{x_k} (x_k^{w_k})^{v_{k}} | x_k \in U) } \in {I}^{(s)}$. Let $q = \Phi(p).$ Suppose $q \mathfrak{m} athfrak{n} otin $ $ \mathfrak{m} athcal{G}({I}^{(s)}) $. Then $q$ must be multiple of some element of $ \mathfrak{m} athcal{G}({{I}}^{(s)})$ (say $q^{\mathcal{P}rime}$). By Theorem \ref{sym.theorem.1}, we can write $ q^{\mathcal{P}rime}=$$\displaystyle{ (\mathcal{P}rod_{x_j} x_j^{{u_{j}}^{\mathcal{P}rime}} | x_j \in V \setminus U)(\mathcal{P}rod_{x_k} (x_k^{w_k})^{{v_{k}}^{\mathcal{P}rime}} | x_k \in U) }$ where each $ {{u_{j}}^{\mathcal{P}rime}} \leq u_j , {{v_{k}}^{\mathcal{P}rime}} \leq v_k $ and at least one $ {{u_{j}}^{\mathcal{P}rime}} < u_j $ or $ {{v_{k}}^{\mathcal{P}rime}} < v_k $. Let $\displaystyle{ p^{\mathcal{P}rime} = (\mathcal{P}rod_{x_j} x_j^{{u_{j}}^{\mathcal{P}rime}} | x_j \in V \setminus U)}\\{(\mathcal{P}rod_{x_k} (x_k)^{{v_{k}}^{\mathcal{P}rime}} | x_k \in U) }$. By Theorem \ref{sym.theorem.1}, we have ${I}^{(s)} = \Phi(\tilde{I}^{(s)})$. Since $ \Phi(p^{\mathcal{P}rime}) = q^{\mathcal{P}rime} \in {I}^{(s)} = \Phi(\tilde{I}^{(s)})$ and $ \Phi $ is an injective, $p^{\mathcal{P}rime} \in \tilde{I}^{(s)}$. Thus $ p $ is multiple of $p^{\mathcal{P}rime}\in \tilde{I}^{(s)},$ which is a contradiction because $p \in \mathfrak{m} athcal{G}(\tilde{I}^{(s)}) $. Therefore $q \in \mathfrak{m} athcal{G}({I}^{(s)})$. Since $ p $ $\mathfrak{m} athfrak{n} otin \tilde{I}^s ,$ it is easy to see that $ q $ $\mathfrak{m} athfrak{n} otin {I}^s$. So $ q $ $\in \mathfrak{m} athcal{G}({I}^{(s)}) \setminus {I}^s $ and hence $\Phi( X^{\mathcal{P}rime} ) \subseteq X.$ Now consider the map $ \Phi|_{X^{\mathcal{P}rime}} : X^{\mathcal{P}rime} \longrightarrow X. $ It is enough to show $\Phi|_{X^{\mathcal{P}rime}}$ is bijective. We know $\Phi|_{X^{\mathcal{P}rime}}$ is injective. Suppose $ g \in \mathfrak{m} athcal{G}({I}^{(s)}) \setminus {I}^s $. Then by Theorem \ref{sym.theorem.1}, there exists $ f \in \mathfrak{m} athcal{G}(\tilde{I}^{(s)}) \setminus \tilde{I}^s $ such that $ \Phi|_{X^{\mathcal{P}rime}}(f) = g.$ So $\Phi|_{X^{\mathcal{P}rime}}$ is surjective and hence $\Phi|_{X^{\mathcal{P}rime}}$ is bijective. \end{proof} \begin{corollary}\label{cor} Let $D$, $D^{\mathcal{P}rime}$ and $\Phi$ are same as defined in Notation \ref{phi.}. Let ${I}$ and $\tilde{I}$ be the edge ideals of $D$ and $D^{\mathcal{P}rime},$ respectively. Then $ {\tilde{I}}^{(s)} = {\tilde{I}}^s $ if and only if $ {I}^{(s)} = {I}^s $ for each $s \geq 1.$ \end{corollary} \begin{proof} By Proposition \ref{sdefect}, $\operatorname{sdefect}(\tilde{I},s) =0$ if and only if $ \operatorname{sdefect}(I,s)=0$ for each $s \geq 1.$ Hence the proof follows. \end{proof} If all the vertices of $~V^{+}(D)$ are sinks, we get the following two results. \begin{corollary}\label{cor2} Let $D$ be a weighted oriented graph $ D $ where the vertices of $~V^{+}(D)$ are sinks and its underlying graph is $G$. Then $ {{I(G)}}^{(s)} = {{I(G)}}^s $ if and only if $ {I(D)}^{(s)} = {I(D)}^s $ for each $s \geq 1.$ . \end{corollary} \begin{proof} Let $D^{\mathcal{P}rime}$ is same as defined in Notation \ref{phi.}. Then $D^{\mathcal{P}rime} = G$ and the proof follows from Corollary \ref{cor}. \end{proof} \begin{corollary}\label{cor3} Let $D$ be a weighted oriented graph where the vertices of $~V^{+}(D)$ are sinks and its underlying graph is $G$. Then $G$ is bipartite if and only if $ {{I(D)}^{(s)}} = {{I(D)}^{s}} $ for all $ s \geq 2 $. . \end{corollary} \begin{proof} It follows from \cite[Theorem 5.9]{simis} and Corollary \ref{cor2}. \end{proof} \begin{remark} As an application of Theorem \ref{sym.theorem.1} and Corollary \ref{cor}, by studying the symbolic powers of edge ideals of one class of weighted oriented graphs, we can get information about the symbolic powers of edge ideals of another class of weighted oriented graphs. When we try to find the necessary and sufficient condition for the equality of ordinary and symbolic powers of edge ideals of a certain class of weighted oriented graphs, as an application of Corollary \ref{cor}, we can omit the checking of equality of ordinary and symbolic powers of the edge ideals of those weighted oriented graphs where some vertex with non-trivial weight is sink. Hence we need to check the equality only for a smaller class of graphs. \end{remark} In the next result, as an application of Corollary \ref{cor}, we give necessary and sufficient condition for the equality of ordinary and symbolic powers of edge ideals of weighted oriented even cycles of length $ 4 $ which are not naturally oriented. \begin{proposition}\label{evencycle4} Let $D$ be a weighted oriented even cycle which is not naturally oriented with underlying graph $C_{4} = (x_1,x_2,x_3,x_{4})$ and at least one vertex of $D$ has non-trivial weight. Then $ I(D)^{(s)} = I(D)^s $ for all $s \geq 2$ if and only if $D$ is not of class $ (7) $ (See Figure \ref{fig.2}). \end{proposition} \begin{proof} If all vertices of $ D $ have non-trivial weights, then $ D $ is naturally oriented. So $ D $ has at most three vertices with non-trivial weights. Let $D^{\mathcal{P}rime} $ is same as defined in Notation \ref{phi.}. Let $I= I(D)$ and $\tilde{I}= I(D^{\mathcal{P}rime})$. We check the equality of ordinary and symbolic powers by considering different cases depending upon the number of vertices with non-trivial weights. We do not consider any weighted oriented even cycle which can be regarded as some weighted naturally oriented even cycle by changing the orientation of edges. \textbf{Case (1)} $ D $ has only one vertex with non-trivial weight. Then $ D $ is of class $ (1) $ (see Figure \ref{fig.2}). Here we can think $D^{\mathcal{P}rime} $ as a simple bipartite graph. Hence by Corollary \ref{cor3}, we get $ I^{(s)} = I^s $ for all $s \geq 2$. \begin{figure} \caption{All classes of weighted oriented even cycles of length $ 4 $ which are not naturally oriented.} \label{fig.2} \end{figure} \textbf{Case (2)} $ D $ has only two vertices with non-trivial weights. Then $ D $ is one of the classes $ (2) , (3), (4)$ and $ (5) $ (see Figure \ref{fig.2}). If $ D $ is of class (2), then we can think $D^{\mathcal{P}rime} $ as a weighted naturally oriented even cycle where only one vertex has non-trivial weight. Using Proposition \ref{evencycle2}, we get $ \tilde{I}^{(s)} = \tilde{I}^s $ for all $s \geq 2$. Hence by Corollary \ref{cor}, we have $ I^{(s)} = I^s $ for all $s \geq 2$. If $ D $ is of class $ (3) $, then by the similar argument as in class $ (2) $, $ I^{(s)} = I^s $ for all $s \geq 2$. If $ D $ is of class $ (4) $, then by the similar argument as in class $ (1) $, $ I^{(s)} = I^s $ for all $s \geq 2$. Now assume $ D $ is of class $ (5) $. There is no vertex with non-trivial weight which is sink. Without loss of generality we can assume that $ w(x_2) \mathfrak{m} athfrak{n} eq 1$ and $ w(x_4) \mathfrak{m} athfrak{n} eq 1 $. Then $ I = (x_1x_2^{w_2}, x_2x_3, x_3x_4, x_1x_4^{w_4}).$ By the similar argument as in Remark \ref{one.weight}, we find $ I^{(s)} = (x_1, x_3)^s \bigcap ((x_2^{w_2} , x_3, x_4^{w_4}) \cap (x_2, x_4))^s = (x_1, x_3)^s \cap (x_2^{w_2} , x_2x_3, x_3x_4, x_4^{w_4})^s $. Let $ \bar{m} \in \mathfrak{m} athcal{G}(I^{(s)}). $ Then $ \bar{m} = \operatorname{lcm}(m_1,m_2) $ for some $ m_1 \in \mathfrak{m} athcal{G}((x_1, x_3)^s) $ and $ m_2 \in \mathfrak{m} athcal{G}((x_2^{w_2} , x_2x_3, x_3x_4, x_4^{w_4})^s) $. Thus $ m_1 = x_1^{a_1}x_3^{a_2} $ and $ m_2 = (x_2^{w_2})^{b_1}(x_2x_3)^{b_2}(x_3x_4)^{b_3}(x_4^{w_4})^{b_4} $ for some $ a_i, b_i \geq 0 $ with $ a_1 + a_2 = s $ and $ b_1 + b_2 + b_3 + b_4 = s. $ Assume that $ a_1 \geq b_1 + b_4.$ Then $ x_1^{a_1}(x_2^{w_2})^{b_1}(x_2x_3)^{b_2}(x_3x_4)^{b_3}(x_4^{w_4})^{b_4} \divides \operatorname{lcm}(m_1,m_2) = \bar{m}.$ Since $ x_1x_2^{w_2} $ and $ x_1x_4^{w_4} \in \mathfrak{m} athcal{G}(I), $ $ x_1^{a_1}(x_2^{w_2})^{b_1}(x_4^{w_4})^{b_4}$ can be expressed as a multiple of product of $ b_1 + b_4 $ elements of $ \mathfrak{m} athcal{G}(I). $ So $ \bar{m} \in I^{(b_2+b_3)+(b_1+b_4) }= I^s.$ Now assume that $ a_1 < b_1 + b_4.$ Then $ a_2 > b_2 + b_3.$ Thus $ \bar{m} = \operatorname{lcm}(m_1,m_2) = x_1^{a_1}x_3^{a_2-(b_2+b_3)}(x_2^{w_2})^{b_1}(x_2x_3)^{b_2}(x_3x_4)^{b_3}(x_4^{w_4})^{b_4} $. Here $ a_1 + a_2 - (b_2 + b_3) = s - (b_2 + b_3) = b_1 + b_4.$ Thus $x_1^{a_1}x_3^{a_2-(b_2+b_3)}(x_2^{w_2})^{b_1}(x_4^{w_4})^{b_4}$ can be expressed as a multiple of product of $ b_1 + b_4 $ elements of $ \mathfrak{m} athcal{G}(I). $ So $ \bar{m} \in I^{(b_2+b_3)+(b_1+b_4) }= I^s.$ Hence $ I^{(s)} = I^s $ for all $s \geq 2$. \textbf{Case (3)} $ D $ has only three vertices with non-trivial weights. Then $ D $ is one of the classes $(6)$ and $ (7) $ (see Figure \ref{fig.2}). If $ D $ is of class $ (6) $, then $D^{\mathcal{P}rime} $ is of class $ (5) $. We know $ \tilde{I}^{(s)} = \tilde{I}^s $ for all $s \geq 2$. Hence by Corollary \ref{cor}, we have $ I^{(s)} = I^s $ for all $s \geq 2$. If $ D $ is of class $ (7) $, then we can think $D^{\mathcal{P}rime} $ as a weighted naturally oriented even cycle where only two consecutive vertices have non-trivial weights. By Proposition \ref{evencycle2}, we have $ \tilde{I}^{(s)} \mathfrak{m} athfrak{n} eq \tilde{I}^s $ for some $s \geq 2$. Then by Corollary \ref{cor}, we get $ I^{(s)} \mathfrak{m} athfrak{n} eq I^s $. Hence the proof follows. \end{proof} Next we see another application of Corollary \ref{cor} to weighted oriented star graphs. \begin{definition} A star graph $ S_n $ of order $ n $ is a tree on $n+1$ vertices with one vertex having degree $n$ and the other $ n $ vertices having degree $1.$ \end{definition} In the next theorem, we show that the ordinary and symbolic powers of edge ideal of any weighted oriented star graph are equal. \begin{theorem}\label{stargraph} Let $D$ be a weighted oriented star graph with underlying graph is $S_n$ for some $n \geq 2$. Then $ I(D)^{(s)} = I(D)^s $ for all $s \geq 2.$ \end{theorem} \begin{proof} Let $V(D) = \{x_0,x_1,\ldots,x_n \}$ with $\deg_D(x_0) = n$ and $\deg_D(x_i)=1$ if $i \mathfrak{m} athfrak{n} eq 0$. Here $E(S_n) = \{ \{x_0,x_1 \},\{x_0,x_2 \},\ldots,\{x_0,x_n\} \}$. \textbf{Case (1)} Assume that $w(x_0) = 1.$ If $w(x_i) \mathfrak{m} athfrak{n} eq 1$ for some $i \mathfrak{m} athfrak{n} eq 0$, then $(x_0,x_i)$ $\in E(D).$ This implies $x_i$ is a sink vertex. So all vertices of $V^+(D)$ are sinks. Thus by Corollary \ref{cor3}, we have $ I(D)^{(s)} = I(D)^s $ for all $s \geq 2.$ \textbf{Case (2)} Assume that $w(x_0) \mathfrak{m} athfrak{n} eq 1,$ i.e., $x_0 \in V^+(D).$ Then $x_0$ is not a source vertex, i.e., $ N_D^-(x_0) \mathfrak{m} athfrak{n} eq \mathcal{P}hi. $ $\textbf{\underline{\mathfrak{m} box{Case (2.a)}}}$ Suppose $ N_D^+(x_0) = \mathcal{P}hi. $ Then $x_0$ is a sink vertex. This implies that each $x_i$ for $i \mathfrak{m} athfrak{n} eq 0$ is a source vertex. So $w_i = 1 $ for each $i \mathfrak{m} athfrak{n} eq 0$. Hence the only vertex of $V^+(D)$ is $x_0$ and it is sink. Then by Corollary \ref{cor3}, we have $ I(D)^{(s)} = I(D)^s $ for all $s \geq 2.$ $\textbf{\underline{\mathfrak{m} box{Case (2.b)}}}$ Suppose $ N_D^+(x_0) \mathfrak{m} athfrak{n} eq \mathcal{P}hi. $ Without loss of generality we can assume that $ N_D^+(x_0) = \{ x_1,x_2,\ldots,x_r \} $ and $ N_D^-(x_0) = \{ x_{r+1},x_{r+2},\ldots,x_n\}$ for some $r \geq 1$. If $ x_i \in V^+(D) $ for some $ i \in [r], $ then $ x_i $ is sink. Let $D^{\mathcal{P}rime} $ is same as defined in Notation \ref{phi.}. Then $w_i=1$ for $1 \leq i \leq n$ in $D^{\mathcal{P}rime} $. Let $\tilde{I}= I(D^{\mathcal{P}rime})$. By Corollary \ref{cor}, it is enough to show that $\tilde{I}^{(s)} = \tilde{I}^s $ for all $s \geq 2$. Note that the two minimal vertex covers of $D^{\mathcal{P}rime}$ are $\{x_0\}$ and $\{x_1,\ldots,x_n \}$. By Lemma \ref{minimal to strong}, these are strong vertex covers of $D^{\mathcal{P}rime}.$ Consider a vertex cover $C= \{x_0,x_1,x_2,\ldots,x_r\}$. Then $C^c = \{ x_{r+1},x_{r+2},\ldots,x_n\}$. Here $ N_{D^{\mathcal{P}rime}}^+(x_0) \cap C^c = \mathcal{P}hi $ and $ N_{D^{\mathcal{P}rime}}^-(x_0) \cap C^c \mathfrak{m} athfrak{n} eq \mathcal{P}hi $. So $x_0 \in L_2^{D^{\mathcal{P}rime}}(C)$. By Lemma \ref{L3}, $ L_3^{D^{\mathcal{P}rime}}(C) = \{ x_1,x_2,\ldots,x_r \}.$ Since $ x_0 \in N_{D^{\mathcal{P}rime}}^-(x_i) \cap V^+({D^{\mathcal{P}rime}}) \cap L_2^{D^{\mathcal{P}rime}}(C)$ for $1 \leq i \leq r,$ $C$ is a strong vertex cover of ${D^{\mathcal{P}rime}}$. Consider $C^{\mathcal{P}rime} \subsetneq C$ as a vertex cover of ${D^{\mathcal{P}rime}}.$ Since $x_{r+1} \mathfrak{m} athfrak{n} otin C^{\mathcal{P}rime},$ $x_0 \in C^{\mathcal{P}rime}$. Thus there exist $j$ and $k \in [r]$ such that $x_j\in C^{\mathcal{P}rime}$ and $x_k \mathfrak{m} athfrak{n} otin C^{\mathcal{P}rime}.$ Without loss of generality $x_j = x_1$ and $x_k = x_2.$ By Lemma \ref{L3}, $x_{1} \in L_3^{D^{\mathcal{P}rime}}(C^{\mathcal{P}rime})$. Here $N_{D^{\mathcal{P}rime}}^-(x_1) = \{x_0\} \subseteq L_1^{D^{\mathcal{P}rime}}(C^{\mathcal{P}rime})$ because $ x_2 \in N_{D^{\mathcal{P}rime}}^+(x_0) \cap {C^{\mathcal{P}rime}}^c.$ Hence by Remark \ref{s.v.1}, $C^{\mathcal{P}rime}$ is not strong. Let $C_1= \{x_0\}$, $C_2 = \{x_1,\ldots,x_n \}$ and $C_3 = \{x_0,x_1,\ldots,x_r \}$. Suppose there exists a strong vertex cover $C_4$ of ${D^{\mathcal{P}rime}}$ other than $C_1,C_2$ and $C_3$. We know that any vertex cover which is a proper subset of $C_3$ can not be strong. Thus $C_4$ must contain $x_0$ and some vertex $x_i \in \{ x_{r+1},x_{r+2},\ldots,x_n\}$. Without loss of generality we can assume that $C_4$ contains $x_0$ and $x_{r+1}$. By Lemma \ref{L3}, $x_{r+1} \in L_3^{D^{\mathcal{P}rime}}(C_4) $. Since $N_{D^{\mathcal{P}rime}}^-(x_{r+1}) = \mathcal{P}hi ,$ by Remark \ref{s.v.1}, $C_4$ is not a strong vertex cover of ${D^{\mathcal{P}rime}}$. Hence $C_1,C_2$ and $C_3$ are the only strong vertex covers of ${D^{\mathcal{P}rime}}.$ Consider $C_1= \{x_0\}.$ Here $ x_1 \in N_{D^{\mathcal{P}rime}}^+(x_0) \cap C_1^c $. So $ L_1^{D^{\mathcal{P}rime}}(C_1) = \{x_0\}$. Hence $\tilde{I}_{C_1} =(x_0).$ Consider $C_2 = \{x_1,\ldots,x_n \}.$ Then $\tilde{I}_{C_2} =( x_1,\ldots, x_r, x_{r+1},\ldots,x_n ).$ Consider $C_3 = \{x_0,x_1,\ldots,x_r \}.$ We know $ L_2^{D^{\mathcal{P}rime}}(C_3) = \{ x_0 \} $. This implies $\tilde{I}_{C_3} =( x_0^{w_0}, x_1,\ldots, x_r ).$ Hence by Lemma \ref{cooper}, we have \begin{align*} \tilde{I}^{(s)} &= ( ( x_0^{w_0}, x_1,\ldots, x_r )\cap(x_0) )^s \bigcap ( x_1,\ldots, x_r, x_{r+1},\ldots,x_n )^s\\ &= ( x_0^{w_0}, x_0x_1,\ldots, x_0x_r )^s \bigcap ( x_1,\ldots, x_r, x_{r+1},\ldots,x_n )^s. \end{align*} Let $\bar{m} \in \mathfrak{m} athcal{G}(\tilde{I}^{(s)} )$. Then $\bar{m} = \operatorname{lcm}(m_1,m_2)$ for some $m_1 \in \mathfrak{m} athcal{G}(( x_0^{w_0}, x_0x_1,\ldots , x_0x_r )^s )$ and\\ $m_2 \in \mathfrak{m} athcal{G}(( x_1,\ldots , x_r, x_{r+1},\ldots,x_n )^s ).$ Thus $ m_1 = (x_0^{w_0})^{a_0} (x_0x_1)^{a_1}\operatorname{codim}ots (x_0x_r)^{a_r}$ and $ m_2 = x_1^{b_1} \operatorname{codim}ots x_r^{b_r} x_{r+1}^{b_{r+1}} \operatorname{codim}ots x_n^{b_n} $ for some $a_i, b_i \geq 0$ with $\displaystyle{ \sum_{i=0}^{r}a_i = s}$ and $\displaystyle{ \sum_{i=1}^{n}b_i = s}.$ If $a_0 = 0,$ then $m_1 \in \tilde{I}^s$ and so $\bar{m} \in \tilde{I}^s$. Now we assume that $a_0 \mathfrak{m} athfrak{n} eq 0$ and $b_{r+1}+\operatorname{codim}ots+b_n \geq a_0.$ Here $m_1x_{r+1}^{b_{r+1}} \operatorname{codim}ots x_n^{b_n}\divides\operatorname{lcm}(m_1,m_2) =\bar{m}$. Then we can express $ m_1x_{r+1}^{b_{r+1}} \operatorname{codim}ots x_n^{b_n}= (x_0x_1)^{a_1}\ldots (x_0x_r)^{a_r} [(x_0^{w_0})^{a_0}x_{r+1}^{b_{r+1}} \operatorname{codim}ots x_n^{b_n}]. $ Since $x_{i}x_0^{w_0} \in \mathfrak{m} athcal{G}(\tilde{I})$ for $r+1 \leq i \leq n,$ $(x_0^{w_0})^{a_0}x_{r+1}^{b_{r+1}} \operatorname{codim}ots x_n^{b_n}$ can be expressed as a multiple of product of $a_0$ elements of $\mathfrak{m} athcal{G}(\tilde{I})$. So $m_1x_{r+1}^{b_{r+1}} \operatorname{codim}ots x_n^{b_n} \in I^{(a_1+a_2+\operatorname{codim}ots+a_r)+a_0} = \tilde{I}^s$. Hence $\bar{m} \in \tilde{I}^s$. Finally, we assume that $a_0 \mathfrak{m} athfrak{n} eq 0$ and $b_{r+1}+\operatorname{codim}ots+b_n < a_0.$ Here $ (x_0^{w_0})^{a_0}x_0^{a_1 +\operatorname{codim}ots+a_r} m_2 \divides\operatorname{lcm}(m_1,m_2) = \bar{m}$. Then we can express \begin{align*} \operatorname{ht}space*{0.5cm} &\operatorname{ht}space*{0.43cm}(x_0^{w_0})^{a_0}x_0^{a_1 +\operatorname{codim}ots+a_r} m_2\\ &=(x_0^{w_0})^{a_0}x_0^{a_1 +\operatorname{codim}ots+a_r} x_1^{b_1} \operatorname{codim}ots x_r^{b_r} x_{r+1}^{b_{r+1}} \operatorname{codim}ots x_n^{b_n}\\ &= (x_0^{w_0})^{a_0 -(b_{r+1}+\operatorname{codim}ots+b_n ) }x_0^{a_1 +\operatorname{codim}ots+a_r} x_1^{b_1} \operatorname{codim}ots x_r^{b_r} [x_{r+1}^{b_{r+1}} \operatorname{codim}ots x_n^{b_n}(x_0^{w_0})^{(b_{r+1}+\operatorname{codim}ots+b_n ) }] \\ &= (x_0^{w_0-1})^{a_0 -(b_{r+1}+\operatorname{codim}ots+b_n ) }[x_0^{a_0-(b_{r+1}+\operatorname{codim}ots+b_n ) +a_1 +\operatorname{codim}ots+a_r} x_1^{b_1} \operatorname{codim}ots x_r^{b_r}] [(x_{r+1}x_0^{w_0})^{b_{r+1}} \operatorname{codim}ots (x_nx_0^{w_0})^{b_n}]\\ &= (x_0^{w_0-1})^{a_0 -(b_{r+1}+\operatorname{codim}ots+b_n ) }[x_0^{b_1 +\operatorname{codim}ots+b_r} x_1^{b_1} \operatorname{codim}ots x_r^{b_r}] [(x_{r+1}x_0^{w_0})^{b_{r+1}} \operatorname{codim}ots (x_nx_0^{w_0})^{b_n}]\\ &= (x_0^{w_0-1})^{a_0 -(b_{r+1}+\operatorname{codim}ots+b_n ) } [(x_0x_1)^{b_1} \operatorname{codim}ots (x_0 x_r)^{b_r}] [(x_{r+1}x_0^{w_0})^{b_{r+1}} \operatorname{codim}ots (x_nx_0^{w_0})^{b_n}]. \end{align*} So $ (x_0^{w_0})^{a_0}x_0^{a_1 +\operatorname{codim}ots+a_r} m_2 \in \tilde{I}^{(b_1 +\operatorname{codim}ots+b_r) + (b_{r+1} + \operatorname{codim}ots+ b_n)} = \tilde{I}^s$. Therefore $\bar{m} \in \tilde{I}^s$. Hence the proof follows. \end{proof} \end{document}
\begin{document} \maketitle \begin{abstract} Richard P. Stanley defined the chromatic symmetric function of a simple graph and has conjectured that every tree is determined by its chromatic symmetric function. Recently, Takahiro Hasebe and the author proved that the order quasisymmetric functions, which are analogs of the chromatic symmetric functions, distinguish rooted trees. In this paper, using a similar method, we prove that the chromatic symmetric functions distinguish trivially perfect graphs. Moreover, we also prove that claw-free cographs, that is, $ \{K_{1,3},P_{4}\} $-free graphs belong to a known class of $ e $-positive graphs. \end{abstract} {\footnotesize {\textit{Keywords:}} chromatic symmetric function, threshold graph, trivially perfect graph, cograph, claw-free, $ e $-positive } {\footnotesize {\textit{2010 MSC:}} 05C15, 05C25, 05C31, 05C60, 05E05, } \section{Introduction}\label{Sec:introduction} Let $ G=(V_{G},E_{G}) $ be a finite simple graph. A \textbf{proper coloring} of $ G $ is a function $ \kappa \colon V_{G} \to \mathbb{N} = \{1, 2, \dots\} $ such that $ \{u,v\} \in E_{G} $ implies $ \kappa(u) \neq \kappa(v) $. Every proper coloring of $ G $ can be regarded as a graph homomorphism from $ G $ to $ K_{\mathbb{N}} $, the complete graph on $ \mathbb{N} $. Let $ \Hom(G,K_{\mathbb{N}}) $ denote the set of proper colorings of $ G $. Stanley \cite{stanley1995symmetric-aim} defined the \textbf{chromatic symmetric function} of $ G $ as follows: \begin{align*} X(G,\boldsymbol{x}) \coloneqq \sum_{\kappa \in \Hom(G,K_{\mathbb{N}})} \prod_{v \in V_{G}}x_{\kappa(v)}, \end{align*} where $ \boldsymbol{x} $ denotes infinitely many indeterminates $ (x_{1}, x_{2}, \dots) $. By definition, the chromatic symmetric function is homogeneous of degree $ |V_{G}| $. Stanley conjectured in \cite{stanley1995symmetric-aim} that the chromatic symmetric function distinguishes trees. Namely, if two trees $ T_{1}, T_{2} $ have the same chromatic symmetric function, then $ T_{1} $ and $ T_{2} $ are isomorphic. A finite poset $ P $ admits the order quasisymmetric functions, which are kinds of $ P $-partition generating functions studied by Gessel \cite{gessel1984multipartite-cm}. The order quasisymmetric functions are considered to be analogs of the chromatic symmetric function. A recent study \cite{hasebe2017order-joac} by Hasebe and the author showed that the order quasisymmetric functions distinguish rooted trees (with the natural poset structures). The proof is based on algebraic structures of the ring of quasisymmetric functions. In this paper, we will focus on algebraic structures of the ring of symmetric functions and consider the similar problem for trivially perfect graphs. We will define classes of graphs which are treated in this paper. Let $ G,H $ be simple graphs. The \textbf{disjoint union} $ G \sqcup H $ is defined by $ V_{G \sqcup H} \coloneqq V_{G} \sqcup V_{H} $ and $ E_{G\sqcup H} \coloneqq E_{G} \sqcup E_{H} $ (the set theoretical disjoint unions). The \textbf{join} $ G + H $ is defined by $ V_{G + H} \coloneqq V_{G} \sqcup V_{H} $ and $ E_{G+H} \coloneqq E_{G} \sqcup E_{H} \sqcup \Set{\{u,v\} | u \in V_{G}, v \in V_{H}} $. Note that some authors use the symbol $ ``+" $ for disjoint unions. See Figure \ref{Fig:disjoint union and join} for examples. \begin{figure} \caption{Examples of the disjoint union and the join} \label{Fig:disjoint union and join} \end{figure} Some classes $ \mathcal{C} $ of simple graphs can be generated by graph operations. We consider the following rules. \begin{enumerate}[(1)] \item \label{rule 1} $ K_{1} \in \mathcal{C} $. \item \label{rule 2} If $ G \in \mathcal{C} $, then $ G \sqcup K_{1} \in \mathcal{C} $. \item \label{rule 3} If $ G \in \mathcal{C} $, then $ G + K_{1} \in \mathcal{C} $. \item \label{rule 4} If $ G,H \in \mathcal{C} $, then $ G \sqcup H \in \mathcal{C} $. \item \label{rule 5} If $ G,H \in \mathcal{C} $, then $ G+H \in \mathcal{C} $. \item \label{rule 6} If $ G \in \mathcal{C} $, then $ \overline{G} \in \mathcal{C} $. \end{enumerate} Note that $ K_{n} $ denotes the complete graph on $ n $ vertices and $ \overline{G} $ denotes the complement of $ G $. A member of the class generated by rules (\ref{rule 1},\ref{rule 2},\ref{rule 3}) is called a \textbf{threshold graph}. Threshold graphs were introduced by Chv{\'a}tal and Hammer \cite{chvatal1977aggregation-aodm} by a different definition and they gave several characterizations. Our definition of threshold graphs is equivalent to the original definition by \cite[Theorem 1]{chvatal1977aggregation-aodm}. A member of the class generated by rules (\ref{rule 1},\ref{rule 3},\ref{rule 4}) is called a \textbf{trivially perfect graph} (or a \textbf{quasi-threshold graph}). Trivially perfect graphs were introduced by Wolk \cite{wolk1962comparability-potams,wolk1965note-potams} as a comparability graph of an order-theoretic tree. A number of characterizations for trivially perfect graphs are known. Our definition of trivially perfect graphs is equivalent to the original definition by \cite[Theorem 3]{jing-ho1996quasi-threshold-dam}. A member of the class generated by rules (\ref{rule 1},\ref{rule 4},\ref{rule 6}) is called a \textbf{cograph} (short for \textbf{complement reducible graph}). Cographs were discovered independently by several researchers and many characterizations are known. In the definition, we can replace the rule (\ref{rule 6}) by (\ref{rule 5}) since we have the formula $ G + H = \overline{\overline{G} \sqcup \overline{H}} $. Obviously, we have the inclusions \begin{align*} \{\text{threshold graphs}\} \subseteq \{\text{trivially perfect graphs}\} \subseteq \{\text{cographs}\}. \end{align*} For a class $ \mathcal{F} $ of simple graphs, a simple graph is said to be \textbf{$ \mathcal{F} $-free} if it has no induced subgraphs isomorphic to a member of $ \mathcal{F} $. The three classes above have forbidden induced subgraph characterizations. \begin{theorem}[{\cite[Theorem 3]{chvatal1977aggregation-aodm}},\ {\cite[Theorem 2]{golumbic1978trivially-dm}}, \ {\cite[Theorem 2]{corneil1981complement-dam}}]\label{FISC} Let $ G $ be a simple graph. \begin{enumerate}[(1)] \item \label{FISC1} $ G $ is threshold if and only if $ G $ is $ \{2K_{2},C_{4},P_{4}\} $-free. \item \label{FISC2} $ G $ is trivially perfect if and only if $ G $ is $ \{C_{4}, P_{4}\} $-free. \item \label{FISC3} $ G $ is a cograph if and only if $ G $ is $ P_{4} $-free. \end{enumerate} Here, $ 2K_{2} = K_{2} \sqcup K_{2} $, $ C_{4} $ is a cycle of length four, and $ P_{4} $ is a path on four vertices (see Figure \ref{Fig:forbidden}). \end{theorem} \begin{figure} \caption{The forbidden graphs} \label{Fig:forbidden} \end{figure} One of two main theorems of this paper is as follows. \begin{theorem}\label{main theorem 1} The chromatic symmetric function distinguishes trivially perfect graphs. Namely, if two trivially perfect graphs $ G,H $ have the same chromatic symmetric function, then $ G $ and $ H $ are isomorphic. \end{theorem} \begin{corollary} The chromatic symmetric function distinguishes threshold graphs. \end{corollary} However, the chromatic symmetric function cannot distinguish cographs. We will give the smallest counter example (see Subsection \ref{Subsec:CSF cannot distinguish cographs}). To state the other main theorem, we will define $ e $-positivity of graphs. An \textbf{integer partition} $ \lambda $ is a finite multiset consisting of positive integers. We write an integer partition as $ \langle 1^{r_{1}} \, 2^{r_{2}}, \dots \rangle $, where $ r_{i} $ is the multiplicity of $ i $. If $ \lambda \neq \varnothing $ (the empty set), we may write $ \lambda $ as a non-increasing sequence $ (\lambda_{1}, \dotsm \lambda_{\ell}) $ of positive integers. We call $ \ell $ the \textbf{length} of $ \lambda $. For a positive integer $ k $, we define the elementary symmetric function $ e_{k} $ to be \begin{align*} e_{k} \coloneqq \sum_{i_{1}< \dots < i_{k}}x_{i_{1}} \cdots x_{i_{k}}. \end{align*} Moreover, given an integer partition $ \lambda = (\lambda_{1}, \dots, \lambda_{\ell}) $, define $ e_{\lambda} $ to be \begin{align*} e_{\lambda} \coloneqq e_{\lambda_{1}} \cdots e_{\lambda_{\ell}} \end{align*} and $ e_{\varnothing} \coloneqq 1 $. It is well known that $ \{e_{\lambda}\}_{\lambda} $ forms a basis for the vector space of symmetric functions over $ \mathbb{Q} $. There is another well-known basis $ \{s_{\lambda}\}_{\lambda} $, where $ s_{\lambda} $ denotes the Schur function (we omit the definition in this paper). A simple graph is called \textbf{$ e $-positive} (resp. \textbf{$ s $-positive}) if its chromatic symmetric function can be written as non-negative linear combination of elementary symmetric functions (resp. Schur functions). It is known that $ e $-positivity implies $ s $-positivity. Stanley and Stembridge (\cite[Conjecture 5.5]{stanley1993immanants-joctsa} and \cite[Conjecture 5.1]{stanley1995symmetric-aim}) have conjectured that the incomparability graph of $ (\boldsymbol{3}+\boldsymbol{1}) $-free poset is $ e $-positive. Gasharov \cite[Theorem 2]{gasharov1996incomparability-dm} gave a weaker result: the incomparability graph of $ (\boldsymbol{3}+\boldsymbol{1}) $-free poset is $ s $-positive. The \textbf{claw graph} is a complete bipartite graph $ K_{1,3} $ (see Figure \ref{Fig:claw}). \begin{figure} \caption{The claw graph $ K_{1,3} \label{Fig:claw} \end{figure} A $ K_{1,3} $-free graph is called \textbf{claw-free}. Note that every incomparability graph of $ (\boldsymbol{3}+\boldsymbol{1}) $-free poset is claw-free. Gasharov has conjectured that every claw-free graph is $ s $-positive, which is stated in Stanley's paper \cite[Conjecture 1.4]{stanley1998graph-dm}. The complete graph $ K_{n} $ is $ e $-positive since $ X(K_{n},\boldsymbol{x})=n!e_{n} $. The edgeless graph $ \overline{K}_{n} $ is also $ e $-positive since $ X(\overline{K}_{n})=e_{1}^{n} $. Path graphs and cycle graphs are also known to be $ e $-positive (\cite[Proposition 5.3, Proposition 5.4]{stanley1995symmetric-aim}). To prove our second theorem, we need the following lemma. \begin{lemma}[{\cite[Excercise 7.47j]{stanley1999enumerative}}] If the complement of a simple graph $ G $ is $ K_{3} $-free, then $ G $ is $ e $-positive. \end{lemma} Our second main theorem is as follows. \begin{theorem}\label{main theorem 2} Let $ G $ be a claw-free cograph, that is, a $ \{K_{1,3},P_{4}\} $-free graph. Then the complement $ \overline{G} $ is $ K_{3} $-free and hence $ G $ is $ e $-positive. \end{theorem} This paper is organized as follows. In Section \ref{Sec:preliminaries}, we review a few basic concepts of the ring of symmetric functions and investigate properties of chromatic symmetric functions. In Section \ref{Sec:discrimination}, we give a proof of Theorem \ref{main theorem 1} and the counter example for cographs. In Section \ref{Sec:e-positivity}, we prove Theorem \ref{main theorem 2}. \section{Preliminaries}\label{Sec:preliminaries} \subsection{The ring of symmetric functions} In this subsection, we review some basic concepts on the theory of symmetric functions. Our standard reference is \cite{macdonald1995symmetric}. Recall that $ \boldsymbol{x} = (x_{1},x_{2}, \dots) $ denotes infinitely many indeterminates. A formal series $ f \in \mathbb{Q}[[\boldsymbol{x}]] $ is called a \textbf{symmetric function} if the following conditions are satisfied. \begin{enumerate}[(i)] \item The degrees of the monomials of $ f $ are bounded. \item $ f $ is invariant under any permutation of the indeterminates. \end{enumerate} Let $ \Sym_{\mathbb{Q}} $ denote the subset of the symmetric functions. It is well known that $ \Sym_{\mathbb{Q}} $ is a subring of $ \mathbb{Q}[[\boldsymbol{x}]] $, which is called the \textbf{ring of symmetric functions}. For every integer partition $ \lambda $, we associate it with the \textbf{monomial symmetric function} $ m_{\lambda} $, defined by \begin{align*} m_{\lambda} \coloneqq \sum_{\alpha} \prod_{i=1}^{\infty}x_{i}^{\alpha_{i}}, \end{align*} where $ \alpha=(\alpha_{1},\alpha_{2}, \dots) $ runs over all distinct rearrangements of $ \lambda $ considered as a sequence $ (\lambda_{1}, \dots, \lambda_{\ell}, 0, \dots) $ of non-negative integers. Moreover, we define the \textbf{augmented monomial symmetric function} $ \tilde{m}_{\lambda} $ to be \begin{align*} \tilde{m}_{\lambda} \coloneqq \left(\prod_{i=1}^{\infty}r_{i}!\right)m_{\lambda}, \end{align*} where $ r_{i} $ denotes the multiplicity of $ i $ in $ \lambda $, that is $ \lambda=\langle 1^{r_{1}}, 2^{r_{2}}, \dots \rangle $. For the empty partition, define $ \tilde{m}_{\varnothing} \coloneqq 1 $. It is easy to show that the set $ \{\tilde{m}_{\lambda}\}_{\lambda} $ forms a linear basis for $ \Sym_{\mathbb{Q}} $ over $ \mathbb{Q} $. As with the case of symmetric polynomials (in finite indeterminates), the ring of symmetric function $ \Sym_{\mathbb{Q}} $ is a free commutative algebra, that is, there exists a system of symmetric functions $ \{f_{k}\}_{k \in \mathbb{N}} $ which is algebraically independent over $ \mathbb{Q} $ such that $ \Sym_{\mathbb{Q}}=\mathbb{Q}[f_{k} \mid k \in \mathbb{N}] $. One of those systems is the system $ \{e_{k}\}_{k \in \mathbb{N}} $ of the elementary symmetric functions. Another well-known system is the system $ \{p_{k}\}_{k \in \mathbb{N}} $ of \textbf{power sum symmetric functions}, defined by \begin{align*} p_{k} \coloneqq \tilde{m}_{k} = \sum_{i=1}^{\infty}x_{i}^{k}. \end{align*} We also define $ p_{\lambda} \coloneqq p_{\lambda_{1}} \cdots p_{\lambda_{\ell}} $ for an integer partition $ \lambda=(\lambda_{1},\dots,\lambda_{\ell}) $ and $ p_{\varnothing} \coloneqq 1 $. Note that the set $ \{p_{\lambda}\}_{\lambda} $ forms a $ \mathbb{Q} $-basis for $ \Sym_{\mathbb{Q}} $. \subsection{Chromatic symmetric functions} In this subsection, we review some properties of chromatic symmetric functions and prepare to prove our main theorem. For each simple graph $ G $, it is well known that there exists a polynomial $ \chi(G,t) \in \mathbb{Z}[t] $ such that \begin{align*} \chi(G,n) = |\Hom(G,K_{n})| \text{ for all } n \in \mathbb{N}. \end{align*} The polynomial $ \chi(G,t) $ is called the \textbf{chromatic polynomial} of $ G $. From the definition of the chromatic symmetric function, we have \begin{align*} X(G,\boldsymbol{1}^{n}) = \chi(G,n) \text{ for all } n \in \mathbb{N}, \text{ where } \boldsymbol{1}^{n} \coloneqq (\underbrace{1, \dots, 1}_{n}, 0, \dots). \end{align*} Recall that every symmetric function is represented by a polynomial in the power sum symmetric functions. Define a ring homomorphism $ \varepsilon_{p} \colon \Sym_{\mathbb{Q}} \to \mathbb{Q}[t] $ by the extension of $ \varepsilon_{p}(p_{k}) \coloneqq t $. \begin{proposition} Given a simple graph $ G $, we have \begin{align*} \varepsilon_{p}(X(G,\boldsymbol{x})) = \chi(G,t). \end{align*} \end{proposition} \begin{proof} This follows by $ p_{k}(\boldsymbol{1}^{n})=n $ and the discussion above. \end{proof} Every simple graph $ G $ has a decomposition $ G=G_{1}\sqcup \dots \sqcup G_{s} $ into the connected components. The chromatic symmetric function $ X(G,\boldsymbol{x}) $ is determined by the connected components of $ G $. \begin{proposition}[{\cite[Proposition 2.3]{stanley1995symmetric-aim}}]\label{Stanley CSF disjoint} Let $ G,H $ be simple graphs. Then \begin{align*} X(G \sqcup H, \boldsymbol{x}) = X(G, \boldsymbol{x})X(H, \boldsymbol{x}). \end{align*} \end{proposition} Cho and van Willigenburg made generators of $ \Sym_{\mathbb{Q}} $ consisting of chromatic symmetric functions. \begin{theorem}[{\cite[Theorem 5]{cho2016chromatic-tejoc}}]\label{CvW} Let $ \{G_{k}\}_{k \in \mathbb{N}} $ be a set of connected simple graphs $ G_{k} $ on $ k $ vertices. Then $ \Sym_{\mathbb{Q}}=\mathbb{Q}[X(G_{k},\boldsymbol{x}) \mid k \in \mathbb{N}] $ and $ \{X(G_{k},\boldsymbol{x})\}_{k \in \mathbb{N}} $ is algebraically independent over $ \mathbb{Q} $. \end{theorem} In this paper, the following corollary is required. \begin{corollary}\label{CvW cor} Let $ G $ be a simple graph. Then $ G $ is connected if and only if $ X(G,\boldsymbol{x}) $ is irreducible in $ \Sym_{\mathbb{Q}} $. \end{corollary} \begin{proof} If $ X(G,\boldsymbol{x}) $ is irreducible, then $ G $ is connected by Proposition \ref{Stanley CSF disjoint}. To show the converse, suppose that $ G $ is a connected graph on $ n $ vertices. Define a collection of graphs $ \{G_{k}\}_{k \in \mathbb{N}} $ by $ G_{n} \coloneqq G $ and $ G_{k} \coloneqq K_{k} $ for any $ k \neq n $. By Theorem \ref{CvW}, the set $ \{G_{k}\}_{k \in \mathbb{N}} $ is algebraically independent over $ \mathbb{Q} $ and generates $ \Sym_{\mathbb{Q}} $. Assume that $ X(G,\boldsymbol{x}) $ is reducible. Then $ X(G,\boldsymbol{x}) $ can be represented as a polynomial in $ \{X(G_{k},\boldsymbol{x})\}_{k < n} $, which is a contradiction. Therefore $ X(G,\boldsymbol{x}) $ is irreducible. \end{proof} A set partition of the vertex set $ V_{G} $ of a simple graph $ G $ is a collection $ \pi = \{B_{1}, \dots, B_{\ell}\} $ of non-empty subsets of $ V_{G} $ such that $ B_{1} \sqcup \dots \sqcup B_{s} = V_{G} $. Every $ B_{i} $ is called a block. the \textbf{type} of a partition $ \pi $ is the integer partition $ \{|B_{1}|, \dots, |B_{\ell}|\} $, denoted by $ \type(\pi) $. A set partition is called \textbf{stable} if every block induces an edgeless subgraph of $ G $. Let $ \St_{\lambda}(G) $ denote the set of stable partitions of $ G $ of type an integer partition $ \lambda $. The chromatic symmetric function can be represented in terms of stable partitions. \begin{proposition}[{\cite[Proposition 2.4]{stanley1995symmetric-aim}}]\label{Stanley CSF stable partition} Given a simple graph $ G $, we have \begin{align*} X(G,\boldsymbol{x}) = \sum_{\lambda} |\St_{\lambda}(G)| \tilde{m}_{\lambda}, \end{align*} where $ \lambda $ runs over all integer partitions. \end{proposition} This proposition may be considered as a generalization of the following proposition. \begin{proposition}[{\cite[Theorem 15]{read1968introduction-joct}}]\label{Read coeff falling factorial} Given a simple graph $ G $, we have \begin{align*} \chi(G,t) = \sum_{\ell=1}^{|V_{G}|}|\St_{\ell}(G)|(t)_{\ell}, \end{align*} where $ \St_{\ell}(G) $ denotes the set of stable partitions of $ G $ consisting of $ \ell $ blocks and $ (t)_{\ell} \in \mathbb{Q}[t] $ denotes the falling factorial. Namely $ (t)_{\ell} \coloneqq t(t-1)\cdots(t-\ell+1) $. \end{proposition} Define a map $ \varepsilon_{\tilde{m}} \colon \Sym_{\mathbb{Q}} \to \mathbb{Q}[t] $ by the linear extension of $ \varepsilon_{\tilde{m}}(\tilde{m}_{\lambda}) \coloneqq (t)_{\ell} $, where $ \ell $ is the length of $ \lambda $. \begin{proposition}\label{map m_tilde} Given a simple graph $ G $, we have \begin{align*} \varepsilon_{\tilde{m}}(X(G,\boldsymbol{x})) = \chi(G,t). \end{align*} \end{proposition} \begin{proof} This follows immediately by Propositions \ref{Stanley CSF stable partition} and \ref{Read coeff falling factorial}. \end{proof} Note that the maps $ \varepsilon_{p} $ and $ \varepsilon_{\tilde{m}} $ are different since $ \varepsilon_{\tilde{m}} $ is not a ring homomorphism from $ \Sym_{\mathbb{Q}} $ to $ \mathbb{Q}[t] $. However, if we restricts the domain to the set of chromatic symmetric functions, then $ \varepsilon_{p} $ and $ \varepsilon_{\tilde{m}} $ coincide. We will introduce multiplications on $ \Sym_{\mathbb{Q}} $ and $ \mathbb{Q}[t] $ such that the map $ \varepsilon_{\tilde{m}} $ becomes a ring homomorphism. For integer partitions $ \lambda $ and $ \mu $, let $ \lambda \uplus \mu $ denote the union as multisets. For example, $ (3,2,2,1) \uplus (4,2,1) = (4,3,2,2,2,1) $. Define a multiplication $ \odot $ on $ \Sym_{\mathbb{Q}} $ by the linear extension of $ \tilde{m}_{\lambda} \odot \tilde{m}_{\mu} \coloneqq \tilde{m}_{\lambda \uplus \mu} $. Let $ (\Sym_{\mathbb{Q}}, \odot) $ denote the $ \mathbb{Q} $-algebra equipped with the usual addition and the multiplication $ \odot $. Since $ \{\tilde{m}_{\lambda}\}_{\lambda} $ is a $ \mathbb{Q} $-basis for $ \Sym_{\mathbb{Q}} $, the algebra $ (\Sym_{\mathbb{Q}}, \odot) $ is a free commutative algebra generated by $ \{\tilde{m}_{k}\}_{k \in \mathbb{N}} $. Moreover, define a multiplication $ \odot $ on $ \mathbb{Q}[t] $ by the linear extension of $ (t)_{\ell} \odot (t)_{m} \coloneqq (t)_{\ell+m} $. Let $ (\mathbb{Q}[t],\odot) $ be the $ \mathbb{Q} $-algebra equipped with the usual addition and the multiplication $ \odot $. Then $ (\mathbb{Q}[t],\odot) $ is a free commutative algebra generated by $ (t)_{1} $. It is easy to verify that the map $ \varepsilon_{\tilde{m}} $ is a ring homomorphism from $ (\Sym_{\mathbb{Q}}, \odot) $ to $ (\mathbb{Q}[t], \odot) $. We will see that the chromatic symmetric function of the join $ G+H $ is a product of the chromatic symmetric functions of $ G $ and $ H $ with respect to the multiplication $ \odot $. The following proposition is required, which is an analogy of \cite[Proposition 3.11]{hasebe2017order-joac}. \begin{proposition}\label{stable partitions of join} Let $ G $ and $ H $ be simple graphs. For every integer partition $ \lambda $, there exists a bijection \begin{align*} \St_{\lambda}(G+H) \simeq \bigsqcup_{\mu \uplus \nu = \lambda} \left(\St_{\mu}(G) \times \St_{\nu}(H)\right). \end{align*} \end{proposition} \begin{proof} Every block of a stable partition $ \pi \in \St_{\lambda}(G+H) $ consists of either vertices in $ G $ or vertices in $ H $ since each vertex of $ G $ is adjacent to the vertices of $ H $. Let $ \pi_{G}, \pi_{H} $ denote the collection of blocks consisting of vertices in $ G, H $, respectively. Then we have that $ \pi = \pi_{G} \sqcup \pi_{H} $. Hence the mapping $ \pi \mapsto (\pi_{G},\pi_{H}) $ is a desired bijection. \end{proof} The following proposition is an analogy of \cite[Proposition 3.12]{hasebe2017order-joac}. \begin{lemma}\label{CSF join} Let $ G $ and $ H $ be simple graphs. Then \begin{align*} X(G+H,\boldsymbol{x}) = X(G,\boldsymbol{x}) \odot X(H,\boldsymbol{x}). \end{align*} \end{lemma} \begin{proof} By Propositions \ref{Stanley CSF stable partition} and \ref{stable partitions of join}, we have \begin{align*} X(G+H,\boldsymbol{x}) &= \sum_{\lambda} |\St_{\lambda}(G+H)| \tilde{m}_{\lambda} \\ &= \sum_{\lambda} \sum_{\mu \uplus \nu = \lambda}|\St_{\mu}(G)||\St_{\nu}(H)|\tilde{m}_{\mu \uplus \nu} \\ &= \sum_{\mu,\nu} |\St_{\mu}(G)||\St_{\nu}(H)|\tilde{m}_{\mu} \odot \tilde{m}_{\nu} \\ &= \left(\sum_{\mu}|\St_{\mu}(G)|\tilde{m}_{\mu}\right)\odot\left(\sum_{\nu}|\St_{\nu}(H)|\tilde{m}_{\nu}\right) \\ &= X(G,\boldsymbol{x})\odot X(H,\boldsymbol{x}). \end{align*} \end{proof} Using Proposition \ref{map m_tilde} and Lemma \ref{CSF join}, we can recover the following result of Read. \begin{proposition}[{\cite[Theorem 4]{read1968introduction-joct}}] Let $ G,H $ be simple graphs. Then \begin{align*} \chi(G + H, t) = \chi(G,t) \odot \chi(H,t). \end{align*} \end{proposition} \begin{remark} There is no unary operation on $ \Sym_{\mathbb{Q}} $ which is compatible with taking the complement. Stanley's example shows that the graphs $ G $ and $ H $ in Figure \ref{Fig:stanley's example} have the same chromatic symmetric function: \begin{align*} X(G,\boldsymbol{x}) = X(H,\boldsymbol{x}) = \tilde{m}_{11111}+4\tilde{m}_{2111}+2\tilde{m}_{221}. \end{align*} However, the chromatic symmetric functions of their complements are distinct: \begin{align*} X(\overline{G},\boldsymbol{x}) &= \tilde{m}_{11111}+6\tilde{m}_{2111}+5\tilde{m}_{221}+2\tilde{m}_{311}+2\tilde{m}_{32}, \\ X(\overline{H},\boldsymbol{x}) &= \tilde{m}_{11111}+6\tilde{m}_{2111}+5\tilde{m}_{221}+2\tilde{m}_{311}+\tilde{m}_{32}. \end{align*} \end{remark} \begin{figure} \caption{Stanley's examples and their complements} \label{Fig:stanley's example} \end{figure} \section{Discrimination}\label{Sec:discrimination} \subsection{Discrimination for trivially perfect graphs} We now ready to prove Theorem \ref{main theorem 1}. The following proof is almost as same as the proof of \cite[Theorem 1.3]{hasebe2017order-joac}. \begin{proof}[Proof of Theorem \ref{main theorem 1}] We proceed by induction on $ |V_{G}| $. When $ |V_{G}|=1 $, we have $ G=H=K_{1} $. Suppose that $ |V_{G}| \geq 2 $. Decompose $ G $ and $ H $ into their connected components: \begin{align*} G=\bigsqcup_{i=1}^{n}G_{i}, \qquad H=\bigsqcup_{i=1}^{m}H_{i}. \end{align*} By the assumption $ X(G,\boldsymbol{x}) = X(H,\boldsymbol{x}) $ and Proposition \ref{Stanley CSF disjoint}, we have \begin{align*} \prod_{i=1}^{n}X(G_{i},\boldsymbol{x}) = \prod_{i=1}^{m}X(H_{i},\boldsymbol{x}). \end{align*} The ring of symmetric functions $ \Sym_{\mathbb{Q}} $ is a free commutative algebra and hence it is a unique factorization domain. Using Corollary \ref{CvW cor}, we have that $ n=m $ and $ X(G_{i},\boldsymbol{x}) = X(H_{i}, \boldsymbol{x}) $ for each $ i $ after a suitable renumbering. Assume that $ n \geq 2 $. The induced subgraphs $ G_{i},H_{i} $ are also trivially perfect by Theorem \ref{FISC}(\ref{FISC2}) and the number of vertices of $ G_{i} $ is less than $ |V_{G}| $. Therefore, by our induction hypothesis, we have that $ G_{i} $ is isomorphic to $ H_{i} $. Hence $ G $ and $ H $ are isomorphic. Now consider the case $ n=1 $, that is, $ G $ and $ H $ are connected. By the definition of trivially perfect graphs, there are trivially perfect graphs $ G^{\prime}, H^{\prime} $ such that $ G=G^{\prime}+K_{1} $ and $ H=H^{\prime}+K_{1} $. Since $ X(K_{1},\boldsymbol{x}) = \tilde{m}_{1} $, using Lemma \ref{CSF join}, we have \begin{align*} X(G^{\prime}, \boldsymbol{x}) \odot \tilde{m}_{1} = X(H^{\prime}, \boldsymbol{x}) \odot \tilde{m}_{1}. \end{align*} Since the algebra $ (\Sym_{\mathbb{Q}},\odot) $ is an integral domain, we have $ X(G^{\prime},\boldsymbol{x})=X(H^{\prime},\boldsymbol{x}) $. Our induction hypothesis forces that $ G^{\prime} $ is isomorphic to $ H^{\prime} $. Thus $ G $ and $ H $ are isomorphic. \end{proof} \subsection{Discrimination for cographs}\label{Subsec:CSF cannot distinguish cographs} As mentioned in Section \ref{Sec:introduction}, the chromatic symmetric function cannot distinguish cographs. We will raise an example. A simple graph is called \textbf{coconnected} if its complement is connected. Consider a simple graph $ G $ and a decomposition $ \overline{G} = \overline{G}_{1} \sqcup \dots \sqcup \overline{G}_{n} $, where $ \overline{G}_{i} $ is a connected component of $ \overline{G} $. Taking complements of the both sides, we obtain $ G = G_{1} + \dots + G_{n} $. Every $ G_{i} $ is called a \textbf{coconnected component}. Since the connected components of a simple graph are uniquely determined, hence coconnected components are also uniquely determined. The isomorphic classes of cographs is closed under taking the disjoint union $ \sqcup $ and taking the join $ + $. Let $ \Cograph $ denote the algebraic system equipped with two commutative and associative operations $ \sqcup $ and $ + $ whose underlying set consists of the isomorphic classes of cographs. \begin{proposition}\label{cograph free} The algebraic system $ \Cograph $ is free and generated by $ K_{1} $. \end{proposition} \begin{proof} Let $ G $ be a cograph. We proceed by induction on $ |V_{G}| $. If $ |V_{G}|=1 $, then $ G=K_{1} $ and there are no other representations. Assume that $ |V_{G}| \geq 2 $. By the definition of cographs, $ G $ is either a disjoint union or a join of some cographs. By the induction hypothesis, the connected components or the coconnected components of $ G $ are represented uniquely by using $ K_{1} $. Therefore $ G $ also has a unique representation by using $ K_{1} $. Thus $ \Cograph $ is a free algebraic system. \end{proof} \begin{remark} One can construct an algebraic system called a commutative De Morgan bisemigroup from $ \Cograph $. A generalized result of Proposition \ref{cograph free} was proven by \cite{esik2003free-ac}. In \cite{corneil1981complement-dam}, it was shown that every cograph admits a unique cotree representation, which is equivalent to Proposition \ref{cograph free}. \end{remark} For the proof of Theorem \ref{main theorem 1}, it plays an important role that a simple graph is connected if and only if its chromatic symmetric function is irreducible in $ \Sym_{\mathbb{Q}} $ (Corollary \ref{CvW cor}). However, there is no reason why the chromatic symmetric function of a coconnected cograph is irreducible in $ (\Sym_{\mathbb{Q}}, \odot) $. In fact, we have the following equalities by using Proposition \ref{Stanley CSF stable partition}. \begin{align*} X(K_{2} \sqcup K_{1}, \boldsymbol{x}) &= \tilde{m}_{111}+2\tilde{m}_{21} = \tilde{m}_{1} \odot (\tilde{m}_{11}+2\tilde{m}_{2}), \\ X(K_{6}\sqcup K_{1}, \boldsymbol{x}) &= \tilde{m}_{1111111}+6\tilde{m}_{211111} = \tilde{m}_{11111}\odot(\tilde{m}_{11}+6\tilde{m}_{2}), \\ X(K_{4}\sqcup K_{2},\boldsymbol{x}) &= \tilde{m}_{111111}+8\tilde{m}_{21111}+12\tilde{m}_{2211} = \tilde{m}_{11}\odot(\tilde{m}_{11}+2\tilde{m}_{2})\odot(\tilde{m}_{11}+6\tilde{m}_{2}), \\ X(K_{4},\boldsymbol{x}) &= \tilde{m}_{1111}. \end{align*} By Lemma \ref{CSF join}, these equalities yield that both of the cographs $ (K_{2}\sqcup K_{1})+(K_{6}\sqcup K_{1}) $ and $ (K_{4}\sqcup K_{2})+K_{4} $ have the same chromatic symmetric function \begin{align*} \tilde{m}_{111111}\odot(\tilde{m}_{11}+2\tilde{m}_{2})\odot(\tilde{m}_{1}+6\tilde{m}_{2}). \end{align*} Furthermore, by Proposition \ref{cograph free}, we have that these graphs are not isomorphic (Figure \ref{Fig:example cographs}). \begin{figure} \caption{The smallest example of two non-isomorphic cographs which have the same chromatic symmetric function} \label{Fig:example cographs} \end{figure} \section{$ e $-positivity of claw-free cographs}\label{Sec:e-positivity} In this section, we will prove Theorem \ref{main theorem 2} and conclude that every claw-free cograph is $ e $-positive. \begin{lemma}\label{coconnected components} Every coconnected component of a connected claw-free cograph is $ K_{1} $ or a disjoint union of two complete graphs. \end{lemma} \begin{proof} Let $ G $ be a connected claw-free graph. If $ G $ is complete, then the assertion holds since $ G $ is the join of some single-vertex graphs. Suppose that $ G $ is non-complete. The connectivity of $ G $ shows that $ G $ has at least two coconnected components. Assume that there is a coconnected component $ G_{1} $ such that it consists of at least three connected components. Take vertices $ a,b,c $ from distinct connected components of $ G_{1} $ and take a vertex $ d $ from a coconnected component distinct from $ G_{1} $. Then the subgraph of $ G $ induced by $ \{a,b,c,d\} $ is isomorphic to the claw graph, which is a contradiction. Therefore the number of connected components of every coconnected component of $ G $ is at most two. \end{proof} Now we ready to prove Theorem \ref{main theorem 2}. \begin{proof}[Proof of Theorem \ref{main theorem 2}] Let $ G $ be a claw-free graph. Without loss of generality we may assume that $ G $ is connected and non-complete. By Lemma \ref{coconnected components}, our graph $ G $ is one of the following form: \begin{align*} &(G_{1} \sqcup G_{1}^{\prime}) + \dots + (G_{m} \sqcup G_{m}^{\prime}), \\ &(G_{1} \sqcup G_{1}^{\prime}) + \dots + (G_{m} \sqcup G_{m}^{\prime}) + G_{m+1}, \end{align*} where $ G_{i},G_{i}^{\prime} $ are complete graphs on some vertices. In order to show that $ \overline{G} $ is $ K_{3} $-free, it suffices to show that any subgraph of $ G $ induced by three vertices $ \{a,b,c\} $ has at least one edge. If $ a $ belongs to $ G_{m+1} $, then $ a $ is adjacent to any other vertices. In particular, we obtain edges $ \{a,b\} $ and $ \{a,c\} $. Suppose that two of $ \{a,b,c\} $ belong to distinct coconnected components. Then there is an edge connecting these two vertices. Hence we may assume that $ a,b,c $ belong to $ G_{i} \sqcup G_{i}^{\prime} $ for some $ i $. In this case, at least two of $ \{a,b,c\} $ belong to the same component and hence we have an edge. \end{proof} \end{document}
\begin{document} \begin{abstract} We construct small models of number fields and deduce a better bound for the number of number fields of given degree and bounded discriminant. \end{abstract} \title{Enumerating number fields} \author{Jean-Marc Couveignes} \address{Jean-Marc Couveignes, Univ. Bordeaux, CNRS, Bordeaux-INP, IMB, UMR 5251, F-33400 Talence, France.} \address{Jean-Marc Couveignes, INRIA, F-33400 Talence, France.} \email{[email protected]} \date{\today} \maketitle \setcounter{tocdepth}{2} \tableofcontents \section{Introduction} We prove the two theorems below. \begin{theorem}[Number fields have small models]\label{th:sm} There exists a positive constant ${\mathcal O}$ such that the following is true. Let ${\mathbf K}$ be a number field of degree $n\geqslant {\mathcal O}$ and discriminant $d_{\mathbf K}$ over ${\mathbf Q}$. There exist integers $r\leqslant {\mathcal O} \log n$ and $d\leqslant {\mathcal O} \log n$ such that ${d+r\choose r}\leqslant {\mathcal O} n\log n$ and there exists $r$ polynomials $E_1$, $E_2$, \dots , $E_r$ of degree $\leqslant d$ in ${\mathbf Z}[x_1, \ldots, x_r]$ all having coefficients bounded in absolute value by $n^{{\mathcal O}\log n}d_{\mathbf K}^{{\mathcal O}\frac{\log n}{n}}$ such that the (smooth and zero-dimensional affine) scheme with equations \[E_1 = E_2 = \dots = E_r=0 \text{ and } \det \left( \partial E_i/\partial x_j \right)_{1\leqslant i, \, j\leqslant r} \not = 0\] contains $\mathop{\rm{Sp}}\nolimits ec {\mathbf K}$ as one of its irreducible components. \end{theorem} \begin{theorem}[Number fields with bounded discriminant]\label{th:nf} There exists a positive constant ${\mathcal O}$ such that the following is true. Let $n\geqslant {\mathcal O}$ be an integer. Let $H\geqslant 1$ be an integer. The number of isomorphism classes of number fields with degree $n$ and discriminant $\leqslant H$ is $\leqslant n^{{\mathcal O} n\log^3n}H^{{\mathcal O} \log^3 n}$. \end{theorem} The meaning of Theorem \ref{th:sm} is that we can describe a number field using few parameters in some sense. We have a short description of it as a quotient of a finite algebra : the smooth zero-dimensional part of a complete intersection of small degree and small height in a projective space of small dimension. Theorem \ref{th:nf} improves on previous results by Schmidt \cite{Sch} and Ellenberg-Venkatesh \cite{EV}. Schmidt obtains a bound $H^{\frac{n+2}{4}}$ times a function of $n$. Ellenberg and Venkatesh obtain a bound $H^{\exp ({\mathcal O} \sqrt{\log n})}$ times a function of $n$. We combine techniques from geometry of numbers and interpolation theory to produces small projective models of $\mathop{\rm{Sp}}\nolimits ec {\mathbf K}$ and lower the exponent of $H$ down to ${\mathcal O} \log^3 n$. A key point is to look for local equations rather than a full set of generators of the ideal of these models. Our estimate is not sharp of course. Indeed for $n=1$ the exponent of $H$ can be taken to be $0$. For $2\leqslant n\leqslant 5$ the exponent of $H$ can be taken to be $1$ according to work by Davenport and Heilbronn \cite{DH} for $n=3$, and Bhargava \cite{Bh4, Bh5} for $n=4, \, 5$. It is a bit delicate to infer a general conjecture from these results because the techniques used for these small values of $n$ seem to be quite specific. Cohen, Diaz and Olivier have collected experimental data e.g. in \cite{Co1, Co2, CDO} suggesting that the number of isomorphism classes of number fields of degree $n$ and discriminant $\leqslant H$ should grow linearly in $H$ for fixed $n\geqslant 2$. Malle has stated in \cite{Malle} a more general and accurate conjecture on the distribution of Galois groups of number fields that would confirm this intuition. In Section \ref{sec:short} we recall notation, definitions and elementary results from the geometry of numbers. In Section \ref{sec:small} we construct models for number fields as irreducible components of complete intersections with small height in low dimensional projective spaces. The last section is devoted to the proof of Theorem \ref{th:sm} and Theorem \ref{th:nf}. The author thanks Pascal Autissier, Karim Belabas, Georges Gras and Christian Maire for their comments and suggestions. \section{Short integers}\label{sec:short} Let ${\mathbf K}$ be a number field and let $n$ be the degree of ${\mathbf K}$ over ${\mathbf Q}$. Let ${\mathbf {O}}$ be the ring of integers of ${\mathbf K}$. Let $(\rho_i)_{1\leqslant i\leqslant r}$ be the $r$ real embeddings of ${\mathbf K}$. Let $(\sigma_j, \bar\sigma_j)_{1\leqslant j\leqslant s}$ be the $2s$ complex embeddings of ${\mathbf K}$. We also denote by $(\tau_k)_{1\leqslant k\leqslant n}$ the $n=r+2s$ embeddings of ${\mathbf K}$. Let \[{\mathbf K}_{\mathbf R} = {\mathbf K}\otimes_{\mathbf Q}{\mathbf R} = {\mathbf R} ^r \times {\mathbf C}^s\] be the Minkowski space. We follow the presentation in \cite[Chapitre 1, \S 5]{Neukirch}. An element $x$ of ${\mathbf K}_{\mathbf R}$ can be given by $r$ real components $(x_\rho)_\rho$ and $s$ complex components $(x_\sigma)_\sigma$. So we write $x = ((x_\rho)_\rho , (x_\sigma)_\sigma)$. For such an $x$ in ${\mathbf K}_{\mathbf R}$ we denote by $||x||$ the maximum of the absolute values of its $r+s$ components. The canonical metric on ${\mathbf K}_{\mathbf R}$ is defined by \[<x,y> = \sum_{1\leqslant i\leqslant r}x_iy_i+ \sum_{1\leqslant j\leqslant s}x_j\bar y_j+\bar x_jy_j.\] In particular the contribution of complex embeddings is counted twice \[<x,x> = \sum_{1\leqslant i\leqslant r}x_i^2+ 2\sum_{1\leqslant j\leqslant s}|x_j|^2.\] The corresponding Haar measure is said to be canonical also. The canonical measure of the convex body $\{x, ||x||\leqslant 1\}$ is \[2^r(2\pi )^{s}\geqslant 2^n.\] The map $a\mapsto a\otimes 1$ injects ${\mathbf K}$ and ${\mathbf {O}}$ into ${\mathbf K}_{\mathbf R}$. For every non-zero $x$ in ${\mathbf {O}}$ we have \[||x||\geqslant 1.\] Let $(\alpha_i)_{1\leqslant i\leqslant n}$ be any ${\mathbf Z}$-basis of ${\mathbf {O}}$. Set $A = (\tau_j (\alpha_i))_{1\leqslant i, j \leqslant n}$. The product $A\bar A^t$ is the Gram matrix $B = (<\alpha_i, \alpha_j>)_{1\leqslant i, j \leqslant n}$ of the canonical form in the basis $(\alpha_i)_i$. This is a real symmetric positive matrix. The volume of ${\mathbf {O}}$ according to the canonical Haar measure is \[{{v}}_{\mathbf {O}} = \sqrt{\det (B)} = |\det (A)|.\] The square of the volume of ${\mathbf {O}}$ is the discriminant of ${\mathbf K}$ \[d_{\mathbf K} = \det (B) = |\det (A)|^2={{v}}_{\mathbf {O}}^2.\] Applying Minkowski's second theorem \cite[Lecture III, \S 4, Theorem 16]{Siegel} to the gauge function $x\mapsto ||x||$ we find that ${\mathbf {O}}$ contains $n$ linearly independant elements $\omega_1$, $\omega_2$, \ldots, $\omega_n$ such that \[\prod_{1\leqslant i\leqslant n}||\omega_i||\leqslant {{v}}_{\mathbf {O}}=d_{\mathbf K}^{1/2}.\] We assume that the sequence $i\mapsto ||\omega_i||$ is non-decreasing and deduce that \[||\omega_i||\leqslant v_{\mathbf {O}}^{1/(n+1-i)}\]for every $1\leqslant i\leqslant n$. This inequality is a bit unsatisfactory because it provides little information on the largest $\omega_i$. To improve on this estimate we use the fact that ${\mathbf {O}}$ is an integral domain. We let $m = \lceil (n+1)/2\rceil$ be the smallest integer bigger than $n/2$. On the one hand \[||\omega_i||\leqslant d_{\mathbf K}^{1/n}\]for every $1\leqslant i\leqslant m$. On the other hand the products \[(\omega_i\omega_j)_{1\leqslant i, j\leqslant m}\] generate a ${\mathbf Z}$-module of rank $n$. Otherwise there would exist a non-zero linear form $f : {\mathbf {O}} \rightarrow {\mathbf Z}$ vanishing on these products. So the $m$ forms $f\circ \omega_i$ would be orthogonal to the $m$ vectors $\omega_j$. Then $m+m\leqslant n$. A contradiction. We deduce that all the successive minima of ${\mathbf {O}}$ are \[\leqslant d_{\mathbf K}^{\, 2/n}.\] In other words ${\mathbf {O}}$ is well balanced. \begin{proposition}[Number fields have small integers]\label{prop:balanced} The ring of integers ${\mathbf {O}}$ of a number field ${\mathbf K}$ with degree $n$ and discriminant $d_{\mathbf K}$ contains $n$ linearly independant elements $(\alpha_i)_{1\leqslant i\leqslant n}$ over ${\mathbf Z}$ such that all the absolute values of all the $\alpha_i$ are $\leqslant d_{\mathbf K}^{\, 2/n}$. \end{proposition} Bhargava, Shankar, Taniguchi, Thorne, Tsimerman, and ZhaoSee prove in \cite{Bha}[Theorem 3.1] a similar statement which is somewhat stronger but less accurate. \section{Small models}\label{sec:small} Let \[{\mathbf K}_{\mathbf C} = {\mathbf K}\otimes_{\mathbf Q}{\mathbf C} = {\mathbf C} ^n.\] Let $d\geqslant 5$ and $r\geqslant 1$ be two integers. We assume that \[n(r+1)\leqslant {d+r\choose d}.\] Let $M$ be the set of monomials of total degree $\leqslant d$ in the $r$ variables $x_1$, \ldots, $x_r$. We have \[{\mathbf A}^r_{\mathbf C} = \mathop{\rm{Sp}}\nolimits ec {\mathbf C}[x_1, \ldots, x_r] \subset \mathop{\rm{Proj}}\nolimits {\mathbf C}[x_0, x_1, \ldots, x_r] = \mathbf P^r_{\mathbf C}.\] Let $V_{\mathbf C}$ be the ${\mathbf C}$-linear space generated by $M$. We may associate to every element in $M$ the corresponding degree $d$ monomial in the $r+1$ variables $x_0$, $x_1$, \ldots, $x_r$. We thus identify $V_{\mathbf C}$ with $H^0({\mathcal O}_{\mathbf P^r_{\mathbf C}}(d))$, the space of homogeneous polynomials of degree $d$. Let $(P_\tau)_\tau$ be $n$ pairwise distinct points in \[{\mathbf C}^r = {\mathbf A}^r({\mathbf C}).\] The $P_\tau$ are indexed by the $n$ embeddings of ${\mathbf K}$. These $n$ points form a set (a reduced zero-dimensional subscheme of $\mathbf P^r_{\mathbf C}$) called $P$. We call ${\mathcal I}$ the corresponding ideal sheaf on $\mathbf P^r_{\mathbf C}$. We denote by $2P$ the scheme associated with ${\mathcal I}^2$. It consists of $n$ double points. We say that the scheme $2P$ is well poised (or non-special) in degree $d$ if it imposes $n(r+1)$ independent conditions on degree $d$ homogeneous polynomials. Equivalently, the map \[H^0({\mathcal O}_{\mathbf P^r}(d))\rightarrow H^0({\mathcal O}_{2P}(d))\] is surjective. This is the case if and only if the $n(r+1) \times {d+r\choose d}$ matrix \[{\mathcal M}_P^1 = [(m(P_\tau))_{ \tau , \, m\in M }, (\partial m/\partial x_1 (P_\tau))_{ \tau ,\, m\in M}, (\partial m/\partial x_2 (P_\tau))_{\tau , \, m\in M }, \dots, (\partial m/\partial x_r (P_\tau))_{\tau , \, m\in M}] \] has maximal rank $n(r+1)$. We note that ${\mathcal M}_P^1$ consists of $r+1$ blocks of size $n\times {d+r\choose d}$ piled vertically. It has maximal rank for a generic $P$ when $d\geqslant 5$, according to a theorem of Alexander \cite{Alexander}, generalized by Alexander and Hirschowitz \cite{AH}. Chandler \cite[Theorem 1]{Chandler} provides a simpler statement and proof. The recent exposition and simplification by Brambilla and Ottaviani \cite{Bram} is very useful also. We now let $(\alpha_i)_{1\leqslant i\leqslant n}$ be $n$ linearly independant short elements in ${\mathbf {O}}$ as in Proposition \ref{prop:balanced}. We pick $rn$ rational integers $(u_{i,j})_{1\leqslant i\leqslant n, \, 1\leqslant j \leqslant r}$ and we set \[\kappa_j = \sum_{1\leqslant i\leqslant n}u_{i,j}\alpha_{i}\] for $1\leqslant j\leqslant r$. Let \[\epsilon_{\mathbf Q} : {\mathbf Q}[x_1, \ldots, x_r] \rightarrow {\mathbf K}\] be the homomorphism of ${\mathbf Q}$-algebras sending $x_j$ to $\kappa_j$ for $1\leqslant j \leqslant r$. Let \[e_{\mathbf Q} : \mathop{\rm{Sp}}\nolimits ec {\mathbf K} \rightarrow {\mathbf A}^r_{\mathbf Q} \subset \mathbf P^r_{\mathbf Q}\] be the corresponding morphism of schemes. Tensoring $\epsilon_{\mathbf Q}$ by ${\mathbf R}$ we obtain an homomorphism \[\epsilon_{\mathbf R} : {\mathbf R}[x_1, \ldots, x_r] \rightarrow {\mathbf K}_{\mathbf R}\] sending $x_j$ to $((\rho(\kappa_j))_\rho, (\sigma (\kappa_j))_\sigma)$. We call \[e_{\mathbf R} : \mathop{\rm{Sp}}\nolimits ec {\mathbf K}_{\mathbf R} \rightarrow {\mathbf A}^r_{\mathbf R} \subset \mathbf P^r_{\mathbf R} \] the corresponding morphism of schemes. We define \[\epsilon_{\mathbf C} : {\mathbf C}[x_1, \ldots, x_r] \rightarrow {\mathbf K}_{\mathbf C}\] and \[e_{\mathbf C} : \mathop{\rm{Sp}}\nolimits ec {\mathbf K}_{\mathbf C} \rightarrow {\mathbf A}^r_{\mathbf C} \subset \mathbf P^r_{\mathbf C} \] similarly. In particular $\epsilon_{\mathbf C}$ maps $x_j$ onto $(\tau (\kappa_j))_\tau$. We now consider the points $(P_\tau)_\tau$ such that $x_0(P_\tau)=1$ and \[(x_j(P_\tau))_\tau= \left( \sum_{1\leqslant i\leqslant n}u_{i,j}\tau(\alpha_{i}) \right)_\tau ,\] for $1\leqslant j\leqslant r$ or equivalently \[P_\tau = ( \sum_{1\leqslant i\leqslant n}u_{i,j}\tau(\alpha_{i}) )_{1\leqslant j\leqslant r}\in {\mathbf C}^r = {\mathbf A}^r({\mathbf C})\subset \mathbf P^r({\mathbf C}).\] The maximal minors of the corresponding matrix ${\mathcal M}_P^1$ are polynomials of total degree $\leqslant dn(r+1)$ in the $u_{i,j}$ and one of them is not identically zero. The latter determinant cannot vanish on the cartesian product $[0,dn(r+1)]^{nr}$. Thus there exist $nr$ rational integers $u_{i,j}$ in the range \[[0,dn(r+1)]\] such that the corresponding scheme $2P$ is well poised. We assume that the $u_{i,j}$ meet these conditions. Since $2P$ is well poised, $P$ is well poised also. So $e_{\mathbf Q}$, $e_{\mathbf R}$ and $e_{\mathbf C}$ are closed embeddings. In order to describe them efficiently we look for polynomials with degree $\leqslant d$ and small integer coefficients vanishing at $P$. We denote by $V_{\mathbf R} = {\mathbf R}[x_1, \ldots, x_r]_d$ the ${\mathbf R}$-vector space of polynomials in ${\mathbf R}[x_1, \ldots, x_r]$ of degree $\leqslant d$. There is a unique ${\mathbf R}$-bilinear form on $V_{\mathbf R}$ that turns the set $M$ of monomials into an orthonormal basis. The lattice of relations with integer coefficients and degree $\leqslant d$ is the intersection between $\mathop{\rm{Ker}}\nolimits \epsilon_{\mathbf R}$ and \[V_{\mathbf Z} = {\mathbf Z} [x_1, \ldots, x_r]_d.\] This is a free ${\mathbf Z}$-module ${\mathcal L} \subset V_{\mathbf R}$ of rank \[\ell = {d+r\choose d} -n.\] We set $L = {\mathcal L}\otimes_{\mathbf Q} {\mathbf R}$ the underlying ${\mathbf R}$-vector space and $L^\perp$ its orthogonal complement in $V_{\mathbf R}$. We denote by ${\mathcal L}^\perp$ the intersection ${\mathcal L}^\perp = L^\perp \cap V_{\mathbf Z}$. Since $V_{\mathbf Z}$ is unimodular, ${\mathcal L}$ and ${\mathcal L}^\perp$ have the same volume. See \cite[Corollary 1.3.5.]{Martinet}. We denote by $\hat {\mathbf {O}} = \mathop{\rm{Hom}}\nolimits ({\mathbf {O}},{\mathbf Z})$ the dual of ${\mathbf {O}}$, the ring of integers of ${\mathbf K}$, as a ${\mathbf Z}$-module. We call \[\epsilon_{{\mathbf Z}, d} : {\mathbf Z} [x_1, \ldots, x_r]_d\rightarrow {\mathbf {O}}\] the evaluation map in degree $\leqslant d$. We observe that ${\mathcal L}^\perp$ contains the image of $\hat {\mathbf {O}}$ by the transpose map \[\hat \epsilon_{{\mathbf Z}, d} : \hat {\mathbf {O}} \rightarrow {\mathbf Z} [x_1, \ldots, x_r]_d\] \noindent where we have identified ${\mathbf Z} [x_1, \ldots, x_r]_d$ with its dual thanks to the canonical bilinear form. So the volume of ${\mathcal L}$ is bounded from above by the volume of $\hat \epsilon_{{\mathbf Z}, d} (\hat {\mathbf {O}})$. We consider the matrix \[{\mathcal M}_P^0 = [(m(P_\tau))_{\tau , \, m\in M }]\] of the map $\epsilon_{{\mathbf C}, d} = \epsilon_{{\mathbf Z}, d}\otimes_{\mathbf Z}{\mathbf C}$ in the canonical bases. If we prefer to use an integral basis of ${\mathbf {O}}$ on the right we should multiply ${\mathcal M}_P^0$ on the left by the inverse $T$ of the matrix of a basis of ${\mathbf {O}}$ in the canonical basis. We deduce that the square of the volume of $\hat \epsilon_{{\mathbf Z}, d} (\hat {\mathbf {O}})$ is the determinant of $T{\mathcal M}_P^0({\mathcal M}_P^0)^tT^t$. Since $T{\mathcal M}_P^0$ has real coefficients we have \[\det (T{\mathcal M}_P^0({\mathcal M}_P^0)^tT^t) = \det \left(T{\mathcal M}_P^0\left( \overline{{\mathcal M}_P^0}\right)^t\bar T^t\right) =\det \left({\mathcal M}_P^0 \left( \overline{{\mathcal M}_P^0}\right)^t \right) / d_{\mathbf K}.\] So the square of the volume of the lattice of relations is bounded by the determinant of the hermitian positive definite matrix ${\mathcal M}_P^0\left( \overline{{\mathcal M}_P^0}\right)^t$ divided by $d_{\mathbf K}$. Recall that the coefficients in ${\mathcal M}_P^0$ are degree $\leqslant d$ monomials in the $\kappa_j = \sum_{1\leqslant i\leqslant n}u_{i,j}\alpha_{i}$. The coefficients $u_{i,j}$ are bounded form above by $dn(r+1)$. All the absolute values of the $\alpha_i$ are bounded from above by $d_{\mathbf K}^{2/n}$. So the coefficients in ${\mathcal M}_P^0$ are bounded from above by \[(n^2d(r+1))^dd_{\mathbf K}^{2d/n}.\] The coefficients in ${\mathcal M}_P^0\left( \overline{{\mathcal M}_P^0}\right)^t$ are bounded from above by \[{\mathfrak D} = {d+r\choose d}(n^2d(r+1))^{2d}d_{\mathbf K}^{4d/n}.\] The matrix ${\mathcal M}_P^0\left( \overline{{\mathcal M}_P^0}\right)^t$ being hermitian positive definite, its determinant is bounded from above by the product of the diagonal terms. We deduce that the volume of the lattice ${\mathcal L}$ of relations is bounded from above by ${\mathfrak D} ^{n/2}$. Recall that the dimension of ${\mathcal L}$ is \[\ell = {d+r\choose d}-n.\] For any $x$ in $V_{\mathbf R}$ we denote by $||x||$ the $\ell_2$-norm in the monomial basis. The volume of the sphere $\{x \in L, ||x||\leqslant 1\}$ is $\geqslant 2^{\ell}\ell^{-\ell/2}$. Applying Minkowski's second theorem \cite[Lecture III, \S 4, Theorem 16]{Siegel} to the gauge function $x\mapsto ||x||$ we find that ${\mathcal L}$ contains $\ell$ linearly independant elements $E_1$, $E_2$, \ldots, $E_\ell$ such that \[\prod_{1\leqslant i\leqslant \ell}||E_i||\leqslant \ell^{\ell/2}{\mathfrak D}^{n/2}.\] We assume that the sequence $i\mapsto ||E_i||$ is non-decreasing and deduce that the size of the $i$-th equation is bounded from above \[||E_i||\leqslant \ell^{\frac{\ell}{2(\ell+1-i)}} {\mathfrak D}^{\frac{n}{2(\ell+1-i)}}\]for every $1\leqslant i\leqslant \ell$. Again, this inequality is a bit unsatisfactory because it provides little information on the largest equations. This time we see no other way around than forgetting the last $n-1$ equations. On the one hand \[ ||E_i||\leqslant \ell^{\ell/2n} {\mathfrak D}^{1/2}\] for every $1\leqslant i\leqslant \ell +1 -n$. On the other hand the scheme $2P$ is well poised and the ${\mathbf C}$-vector space generated by the $E_i$ for $1\leqslant i\leqslant \ell+1-n$ has codimension $n-1 < n$ in $L\otimes_{\mathbf R}{\mathbf C}$. So there exists at least one embedding $\tau$ such that the $(\ell +1 -n)\times r$ matrix \[\left((\partial E_i / \partial x_j)(P_\tau)\right)_{1\leqslant i\leqslant \ell+1-n, \, 1\leqslant j\leqslant r}\] has maximal rank $r$. In more geometric terms the ${\mathbf C}$-vector space generated by the $\ell+1-n$ first equations $(E_i)_{1\leqslant i\leqslant \ell +1-n}$ surjects onto the cotangent space to $\mathbf P^r_{\mathbf C}$ at the geometric point $P_\tau$ for at least one $\tau$. This means that there exist $r$ integers $1\leqslant i_1 < i_2 < \dots < i_r \leqslant \ell +1-n$ such that the minor determinant \[\det \left( (\partial E_{i_k} / \partial x_j)(P_\tau)\right)_{1\leqslant k,\, j \leqslant r}\] is non-zero for some $\tau$ and thus for all $\tau$ by Galois action. \begin{proposition}[Number fields have small models]\label{prop:sm} Let ${\mathbf K}$ be a number field of degree $n$ and discriminant $d_{\mathbf K}$ over ${\mathbf Q}$. Let $d\geqslant 5$ and $r\geqslant 1$ be rational integers such that \[n(r+1)\leqslant {d+r\choose d}.\] There exists $r$ polynomials $E_1$, $E_2$, \dots , $E_r$ of degree $\leqslant d$ in ${\mathbf Z}[x_1, \ldots, x_r]$ having coefficients bounded in absolute value by \[\ell^{\ell/2n} \times {d+r\choose d}^{1/2}(n^2d(r+1))^{d}d_{\mathbf K}^{2d/n}\] where \[\ell = {d+r\choose d}-n,\] and such that the (smooth and zero-dimensional affine) scheme with equations \[E_1 = E_2 = \dots = E_r=0 \text{ and } \det \left( \partial E_i/\partial x_j \right)_{1\leqslant i, \, j\leqslant r} \not = 0\] contains $\mathop{\rm{Sp}}\nolimits ec {\mathbf K}$ as one of its irreducible components. \end{proposition} \section{Proof of main results}\label{sec:proof} In this section, the notation ${\mathcal O}$ stands for a positive absolute constant. Any sentence containing this symbol becomes true if the symbol is replaced in every occurrence by some large enough real number. We specialize the values of the parameters $r$ and $d$ in Proposition \ref{prop:sm}. We will take $d=r$. It is evident that ${2r \choose r}\geqslant 2^r$ so \[\frac{1}{r+1}{2r \choose r}\geqslant 2^{\frac{r}{2}}\] for $r$ large enough. Further \[\frac{1}{r+2}{2r+2 \choose r+1}\leqslant \frac{1}{r+1}{2r \choose r}\times 4.\] We choose $r$ to be the smallest positive integer such that $n(r+1)\leqslant {2r\choose r}$. We have \begin{equation}\label{eq:choo} n(r+1)\leqslant {2r\choose r}\leqslant 4n(r+1) \text{ and } r\leqslant 3\log n \end{equation} for $n$ large enough. We deduce that $\ell = {2r\choose r}-n\leqslant 4n(r+1)\leqslant {\mathcal O} n\log n$. So \[\ell^{\ell/2n}\leqslant n^{{\mathcal O} \log n}.\] From Equation (\ref{eq:choo}) we deduce that ${2r\choose r}\leqslant {\mathcal O} n\log n$. Also $n^2d(r+1)\leqslant {\mathcal O} n^2\log^2 n$ and \[\left(n^2d(r+1)\right)^{r}\leqslant n^{{\mathcal O} \log n}.\] So the coefficients of equations $E_i$ are bounded in absolute value by \[n^{{\mathcal O} \log n} d_{\mathbf K}^{\frac{{\mathcal O} \log n}{n}}.\] This proves Theorem \ref{th:sm}. Theorem \ref{th:nf} follows because there are $r{2r\choose r}$ coefficients to be fixed. We note also that there may appear several number fields in the smooth zero dimensional part of the complete intersection $E_1=E_2=\dots=E_r=0$. However the Chow class of this intersection is $r^r\leqslant (\log n)^{{\mathcal O}\log n}$ and the number of isolated points is bounded by this intersection number \cite[Chapter 13]{Fulton}. \end{document}
\begin{document} \noindent{Published in: \textit{Acta Applicandae Mathematicae.} \textbf{88} (2005), 2: 143-175} \noindent{\LARGE\bf Conditional Log-Laplace Functionals of} \noindent{\LARGE\bf Immigration Superprocesses with Dependent} \noindent{\LARGE\bf Spatial Motion} \noindent{Zenghu Li\,$^a$\footnote{ Supported by the NSFC (No.\,10121101 and No.\,10131040).}, Hao Wang\,$^b$\footnote{ Supported by the research grant of UO.} and Jie Xiong\,$^c$\footnote{ Research supported partially by NSA and by Alexander von Humboldt Foundation.}} \noindent{\small $^a$ School of Mathematical Sciences, Beijing Normal University, Beijing 100875, P.R. China} \noindent{\small E-mail: \tt [email protected]} \noindent{\small $^b$ Department of Mathematics, University of Oregon, Eugene OR 97403-1222, U.S.A.} \noindent{\small E-mail: \tt [email protected]} \noindent{\small $^c$ Department of Mathematics, University of Tennessee, Knoxville, TN 37996-1300, U.S.A. and} \noindent{\small Department of Mathematics, Hebei Normal University, Shijiazhuang 050016, P.R. China} \noindent{\small E-mail: \tt [email protected]} \noindent{(Received: 7 August 2003; in finial form: 23 February 2005)} \noindent{\bf Abstract.} A non-critical branching immigration superprocess with dependent spatial motion is constructed and characterized as the solution of a stochastic equation driven by a time-space white noise and an orthogonal martingale measure. A representation of its conditional log-Laplace functionals is established, which gives the uniqueness of the solution and hence its Markov property. Some properties of the superprocess including an ergodic theorem are also obtained. \noindent{\bf Mathematics Subject Classification (2000):} Primary 60J80, 60G57; Secondary 60J35 \noindent{\bf Key words and phrases:} branching particle system, superprocess, dependent spatial motion, immigration process, non-linear SPDE, conditional log-Laplace functional \section{Introduction} \setcounter{equation}{0} A class of superprocesses with dependent spatial motion (SDSM) over the real line $\mathbb}\def\mbf{\mathbf{R}$ were introduced and constructed in Wang \cite{W97, W98}. A generalization of the model was then given in Dawson \textit{et al} \cite{DLW01}. Let $c \in C^2_b(\mathbb}\def\mbf{\mathbf{R})$ and $h \in C^2_b(\mathbb}\def\mbf{\mathbf{R})$ and assume both $h$ and $h^\prime$ are square-integrable. Let \begin{eqnarray*} \rho(x) = \int_{\mathbb}\def\mbf{\mathbf{R}}h(y-x)h(y) dy, \quad x\in\mathbb}\def\mbf{\mathbf{R}, \end{eqnarray*} and $a(x) = c(x)^2 + \rho(0)$. Let $\sigma\in C^2_b(\mathbb}\def\mbf{\mathbf{R})^+$ be a strictly positive function. We denote by $M(\mathbb}\def\mbf{\mathbf{R})$ the space of finite Borel measures on $\mathbb}\def\mbf{\mathbf{R}$ endowed with a metric compatible with its topology of weak convergence. For $f\in C_b(\mathbb}\def\mbf{\mathbf{R})$ and $\mu\in M(\mathbb}\def\mbf{\mathbf{R})$ set $\langle}\def\>{\ranglef,\mu\> = \int fd\mu$. Then an SDSM $\{X_t: t\ge0\}$ is characterized by the following martingale problem: For each $\phi \in C^2_b(\mathbb}\def\mbf{\mathbf{R})$, \begin{eqnarray}\label{1.1} M_t(\phi) = \langle}\def\>{\rangle\phi,X_t\> - \langle}\def\>{\rangle\phi,X_0\> - \frac{1}{\,2\,} \int_0^t \langle}\def\>{\ranglea\phi^{\prime\prime},X_s\> ds, \quad t\ge0, \end{eqnarray} is a continuous martingale with quadratic variation process \begin{eqnarray}\label{1.2} \langle}\def\>{\rangleM(\phi)\>_t = \int_0^t\langle}\def\>{\rangle\sigma\phi^2,X_s\> ds + \int_0^t ds\int_{\mathbb}\def\mbf{\mathbf{R}} \langle}\def\>{\rangleh(z - \cdot) \phi^\prime, X_s\>^2 dz. \end{eqnarray} Clearly, the SDSM reduces to a usual critical branching Dawson-Watanabe superprocess if $h(\cdot) \equiv 0$; see e.g.\ Dawson \cite{D93}. A general SDSM arises as the weak limit of critical branching particle systems with dependent spatial motion. Consider a family of independent Brownian motions $\{B_i(t): t\ge0, i=1,2,\cdots\}$, the individual noises, and a time-space white noise $\{W_t(B): t\ge 0, B\in {\cal B}(\mathbb}\def\mbf{\mathbf{R})\}$, the common noise. The migration of a particle in the approximating system with label $i$ is defined by the stochastic equation \begin{eqnarray}\label{1.3} dx_i(t) = c(x_i(t)) dB_i(t) + \int_{\mathbb}\def\mbf{\mathbf{R}} h(y-x_i(t))W(dt,dy), \end{eqnarray} where $W(ds,dy)$ denotes the time-space stochastic integral relative to $\{W_t(B)\}$. The SDSM possesses properties very different from those of the usual Dawson-Watanabe superprocess. For example, a Dawson-Watanabe superprocess in $M(\mathbb}\def\mbf{\mathbf{R})$ is usually absolutely continuous whereas the SDSM with $c(\cdot) \equiv 0$ is purely atomic; see \cite{KS88} and \cite{W97, W02}, respectively. In this paper, we consider a further extension of the model of Wang \cite{W97, W98}. Let $b\in C^2_b(\mathbb}\def\mbf{\mathbf{R})$ and let $m\in M(\mathbb}\def\mbf{\mathbf{R})$. A modification of the above martingale problem is to replace (\ref{1.1}) by \begin{eqnarray}\label{1.4} M_t(\phi) = \langle}\def\>{\rangle\phi,X_t\> - \langle}\def\>{\rangle\phi,X_0\> - t\langle}\def\>{\rangle\phi,m\> - \frac{1}{2}\int_0^t\langle}\def\>{\ranglea\phi^{\prime\prime}, X_s\> ds + \int_0^t\langle}\def\>{\rangleb\phi,X_s\> ds. \end{eqnarray} We shall prove that there is indeed a solution $\{X_t: t\ge0\}$ to the martingale problem given by (\ref{1.2}) and (\ref{1.4}). The process $\{X_t: t\ge0\}$ may be regarded as a non-critical branching \textit{SDSM with immigration} (SDSMI), where $b(\cdot)$ is the linear growth rate and $m(dx)$ gives the immigration rate. This modification is related to the recent work of Dawson and Li \cite{DL03}, where an interactive immigration given by \begin{eqnarray}\label{1.5} \int_0^t\langle}\def\>{\rangleq(\cdot,X_s)\phi, m\>ds \end{eqnarray} was considered, where $q(\cdot,\cdot)$ is a function on $\mathbb}\def\mbf{\mathbf{R}\times M(\mathbb}\def\mbf{\mathbf{R})$ representing a state dependent immigration density. However, it was assumed in \cite{DL03} that $b(\cdot) \equiv c(\cdot) \equiv 0$ and the approach there relies essentially on the purely atomic property of the process, which is not available for the present model. The main purpose of the paper is to give a representation of the conditional log-Laplace functionals of solution of (\ref{1.2}) and (\ref{1.4}) and to illustrate some applications of the representation. This approach was stimulated by Xiong \cite{X04}, who established a similar characterization for the model of Skoulakis and Adler \cite{SA01}. The key idea of the representation is to decompose the martingale (\ref{1.4}) into two orthogonal components, which arise respectively from the migration and the branching. Since the decomposition uses additional information which is not provided by (\ref{1.2}) and (\ref{1.4}), we shall start with the corresponding particle system and consider the high density limit following \cite{DVW00}. In this way, we can easily separate the two kinds of noises. It turns out that the common migration noise $\{W(ds,dy)\}$ remains after the limit procedure and the limit process satisfies the following martingale problem: For each $\phi \in C^2_b(\mathbb}\def\mbf{\mathbf{R})$, \begin{eqnarray}\label{1.6} Z_t(\phi) &=& \langle}\def\>{\rangle\phi,X_t\> - \langle}\def\>{\rangle\phi,X_0\> - t\langle}\def\>{\rangle\phi,m\> - \frac{1}{2}\int_0^t\langle}\def\>{\ranglea\phi^{\prime\prime}, X_s\> ds \nonumber \\ & & + \int_0^t\langle}\def\>{\rangleb\phi,X_s\> ds - \int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}}\langle}\def\>{\rangleh(y-\cdot) \phi^\prime,X_s\> W(ds,dy) \end{eqnarray} is a continuous martingale orthogonal to $\{W_t(\phi)\}$ with quadratic variation process \begin{eqnarray}\label{1.7} \langle}\def\>{\rangleZ(\phi)\>_t = \int_0^t\langle}\def\>{\rangle\sigma\phi^2, X_s\>ds. \end{eqnarray} This formulation suggests that we may regard $\{X_t: t\ge0\}$ as a generalized inhomogeneous Dawson-Watanabe superprocess with immigration, where \begin{eqnarray*} \int_{\mathbb}\def\mbf{\mathbf{R}}h(y-\cdot) W(dt,dy) \end{eqnarray*} gives a generalized drift in the underlying migration. Based on the techniques developed in Kurtz and Xiong \cite{KX99, X04}, we prove that for each $\phi \in H_1(\mathbb}\def\mbf{\mathbf{R})\cap C_b(\mathbb}\def\mbf{\mathbf{R})$ there is a pathwise unique solution of the non-linear SPDE \begin{eqnarray}\label{1.8} \psi_{r,t}(x) &=& \phi(x) + \int_r^t \bigg[\frac{1}{2} a(x)\psi_{s,t} ^{\prime\prime}(x) - \frac{1}{2}\sigma(x) \psi_{s,t}(x)^2\bigg] ds \nonumber \\ & & - \int_r^t b(x)\psi_{s,t}(x) ds + \int_r^t\int_{\mathbb}\def\mbf{\mathbf{R}} h(y-x)\psi_{s,t}^\prime(x) \cdot W(ds,dy), \end{eqnarray} where the last term on the right hand side denotes the backward stochastic integral with respect to the white noise. Then we show that the conditional log-Laplace functionals of $\{X_t: t\ge0\}$ given $\{W(ds,dy)\}$ can be represented by the solution of (\ref{1.8}). The representation of the conditional log-Laplace functionals is proved by direct analysis based on (\ref{1.6}), (\ref{1.7}) and (\ref{1.8}). This approach is different from that of Xiong \cite{X04}, where a Wong-Zakai type approximation was used. The idea of conditional log-Laplace approach has also been used by Crisan \cite{C04} for a different model. In fact, the approach in Section 5 is adapted from \cite{C04} which simplifies our original arguments. It is well-known that non-conditional log-Laplace functionals play very important roles in the study of classical Dawson-Watanabe superprocesses. We shall see that conditional Laplace functionals are almost as efficient as the non-conditional Laplace functionals in studying some properties of the SDSMI. In particular, the characterization of the conditional Laplace functionals gives immediately the uniqueness of solution of (\ref{1.6}) and (\ref{1.7}), which in turn implies the Markov property of $\{X_t: t\ge0\}$. It follows that $\{X_t: t\ge0\}$ is a diffusion process with generator ${\cal L}$ given by \begin{eqnarray}\label{1.9} {\cal L} F(\mu) &=& \frac{1}{2}\int_{\mathbb}\def\mbf{\mathbf{R}^2}\rho(x-y)\frac{d^2}{dxdy} \frac{\delta^2 F(\mu)}{\delta\mu(x)\delta\mu(y)} \mu(dx)\mu(dy) \nonumber \\ & & + \frac{1}{2}\int_{\mathbb}\def\mbf{\mathbf{R}} a(x)\frac{d^2}{dx^2} \frac{\delta F(\mu)}{\delta\mu(x)}\mu(dx) + \frac{1}{2} \int_{\mathbb}\def\mbf{\mathbf{R}} \sigma(x)\frac{\delta^2 F(\mu)} {\delta\mu(x)^2} \mu(dx) \nonumber \\ & & - \int_{\mathbb}\def\mbf{\mathbf{R}} b(x)\frac{\delta F(\mu)} {\delta\mu(x)} \mu(dx) + \int_{\mathbb}\def\mbf{\mathbf{R}} \frac{\delta F(\mu)} {\delta\mu(x)} m(dx), \end{eqnarray} where \begin{eqnarray}\label{1.10} \frac{\delta F(\mu)}{\delta\mu(x)} = \lim_{r\to 0^+}\frac{1}{\,r\,}[F(\mu + r\delta_x) - F(\mu)] \end{eqnarray} and $\delta^2F(\mu) / \delta\mu(x)\delta\mu(y)$ is defined in the same way with $F$ replaced by $(\delta F/ \delta\mu(y))$ on the right hand side; see Section 3. We also prove some properties of the SDSMI including an ergodic theorem. There are also some other applications of the conditional log-Laplace functional. For instance, based on this characterization the conditional excursion theory of the SDSM have been developed in \cite{LWX04b}. However, consideration of the interactive immigration (\ref{1.5}) for this present process seems sophisticated. The remainder of the paper is organized as follows. In Section 2 we give a formulation of the system of branching particles with dependent spatial motions and immigration. Some useful estimates of the moments of the system are also given. In Section 3 we obtain a solution of the martingale problem (\ref{1.6}) and (\ref{1.7}) as the high density limit of a sequence of particle systems. The existence and uniqueness of the solution of (\ref{1.8}) is established in Section 4. In Section 5 we give the representation of the conditional log-Laplace functionals of the solution of (\ref{1.6}) and (\ref{1.7}). Some properties of the SDSMI are discussed in Section 6. \section{Branching particle systems} \setcounter{equation}{0} The main purpose of this section is to give an explicit construction for the immigration branching particle system with dependent spatial motion by modifying the constructions of \cite{DVW00, W86}. This construction provides a useful set up of the process. We start with a simple interacting particle system. Let $\theta > 0$ be a constant and $(c,h)$ be given as in the introduction. Let $N(\mathbb}\def\mbf{\mathbf{R}) \subset M(\mathbb}\def\mbf{\mathbf{R})$ be the set of integer-valued measures on $\mathbb}\def\mbf{\mathbf{R}$ and let $M_\theta(\mathbb}\def\mbf{\mathbf{R}) := \{\theta^{-1} \sigma: \sigma \in N(\mathbb}\def\mbf{\mathbf{R})\}$. Given $\{a_i: i=1,\cdots,n\}$, let $\{x_i(t): t\ge0, i=1,\cdots,n\}$ be given by \begin{eqnarray}\label{2.1} x_i(t) = a_i + \int_0^tc(x_i(s)) dB_i(s) + \int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}} h(y-x_i(s))W(dy, ds). \end{eqnarray} We may define a measure-valued process $\{X_t: t\ge0\}$ by \begin{eqnarray}\label{2.2} \langle}\def\>{\rangle\phi,X_t\> = \sum_{i=1}^n \theta^{-1}\phi(x_i(t)), \qquad t\ge0. \end{eqnarray} By the discussions in \cite{DLW01, W97, W98}, the process $\{X_t: t\ge0\}$ is a diffusion in $M_\theta (\mathbb}\def\mbf{\mathbf{R})$. Let ${\cal A}_\theta$ denote the generator of this diffusion process. If $F_{f,\{\phi_i\}} (\mu) := f(\langle}\def\>{\rangle\phi_1,\mu\>, \cdots, \langle}\def\>{\rangle\phi_n,\mu\>)$ for $f\in C^2_0(\mathbb}\def\mbf{\mathbf{R}^n)$ and $\{\phi_i\}\subset C^2_b(\mathbb}\def\mbf{\mathbf{R})$, by It\^o's formula it is easy to see that \begin{eqnarray}\label{2.3} {\cal A}_\theta F_{f,\{\phi_i\}}(\mu) &=& \frac{1}{2}\sum_{i,j=1}^n f_{ij}^{\prime\prime}(\langle}\def\>{\rangle\phi_1, \mu\>,\cdots,\langle}\def\>{\rangle\phi_n,\mu\>)\int_{\mathbb}\def\mbf{\mathbf{R}^2}\rho(x-y) \phi_i^\prime(x)\phi_j^\prime(y)\mu(dx)\mu(dy) \nonumber \\ & & +\,\frac{1}{2}\sum_{i=1}^n f_i^\prime(\langle}\def\>{\rangle\phi_1,\mu\>,\cdots, \langle}\def\>{\rangle\phi_n,\mu\>)\langle}\def\>{\ranglea\phi_i^{\prime\prime},\mu\> \nonumber \\ & & +\,\frac{1}{2\theta}\sum_{i,j=1}^n f_{ij}^{\prime\prime} (\langle}\def\>{\rangle\phi_1,\mu\>,\cdots,\langle}\def\>{\rangle\phi_n,\mu\>) \langle}\def\>{\ranglec^2\phi_i^\prime \phi_j^\prime,\mu\>. \end{eqnarray} More generally, if $F$ is a function on $M_\theta(\mathbb}\def\mbf{\mathbf{R})$ that can be extended to a sufficiently smooth function on $M(\mathbb}\def\mbf{\mathbf{R})$, then \begin{eqnarray}\label{2.4} {\cal A}_\theta F(\mu) &=& \frac{1}{2}\int_{\mathbb}\def\mbf{\mathbf{R}^2}\rho(x-y)\frac{d^2}{dxdy} \frac{\delta^2 F(\mu)}{\delta\mu(x)\delta\mu(y)} \mu(dx)\mu(dy) \nonumber \\ & & +\,\frac{1}{2}\int_{\mathbb}\def\mbf{\mathbf{R}} a(x)\frac{d^2}{dx^2}\frac{\delta F(\mu)}{\delta\mu(x)}\mu(dx) \nonumber \\ & & +\,\frac{1}{2\theta}\int_{\mathbb}\def\mbf{\mathbf{R}^2} c(x)c(y) \frac{d^2}{dxdy} \frac{\delta^2 F(\mu)}{\delta\mu(x)\delta\mu(y)} \delta_x(dy)\mu(dx), \end{eqnarray} where $\delta F(\mu) / \delta\mu(x)$ and $\delta^2F(\mu) / \delta\mu(x)\delta\mu(y)$ are defined as in the introduction. This can be seen by approximating the function $F$ by functions of the form $F_{f,\{\phi_i\}}$. A more interesting particle system involves branching and immigration. Let $\gamma>0$ be a constant and let $m\in M(\mathbb}\def\mbf{\mathbf{R})$. Let $p(x,\cdot) = \{p_0(x), p_1(x), p_2(x), \cdots\}$ be a family of discrete probability distributions which measurably depends on the index $x\in \mathbb}\def\mbf{\mathbf{R}$ and satisfies $p_1(\cdot) \equiv 0$. In addition, we assume that \begin{eqnarray}\label{2.5} q(x) := \sum_{i=1}^\infty ip_i(x), \quad x\in \mathbb}\def\mbf{\mathbf{R}, \end{eqnarray} is a bounded function. We shall construct an immigration branching particle system with parameters $(a,\rho,\gamma,p,\theta m,1/\theta)$. Let ${\cal A}$ be the set of all strings of the form $\alpha = n_0n_1 \cdots n_{l(\alpha)}$, where $l(\alpha)$ is the length of $\alpha$ and the $n_j$ are non-negative integers with $0\le n_0\le 1$ and $n_j\ge1$ for $j\ge1$. We shall label the particles by the strings in ${\cal A}$. We here use the first digit $n_0$ in the string to distinguish the aboriginal and the immigratory particles. More precisely, strings started with $0$ refer to descendants of aboriginal ancestors and strings started with $1$ refer to descendants of immigratory ancestors. (Note that the first digit is not counted in the length $l(\alpha)$.) We provide ${\cal A}$ with the arboreal ordering, that is, $m_0\cdots m_p \prec n_0\cdots n_q$ if and only if $p\le q$ and $m_0=n_0, \cdots, m_p=n_p$. Then $\alpha$ has exactly $l(\alpha)$ predecessors, which we denote respectively by $\alpha-1$, $\alpha-2$, $\cdots$, $\alpha - l(\alpha)$. For example, if $\alpha = 12431$, then $\alpha-2 = 124$ and $\alpha-4 = 1$. We need a collection of random variables to construct the immigration branching particle system. Let $\{a_{01}, \cdots, a_{0n}\}$ be a finite sequence of real-valued random variables. Let $\{W(ds,dx): s\ge0, x\in \mathbb}\def\mbf{\mathbf{R}\}$ be a time-space white noise and $\{N(ds,dx): s\ge0, x\in \mathbb}\def\mbf{\mathbf{R}\}$ a Poisson random measure with intensity $\theta dsm(dx)$. We shall assume $\langle}\def\>{\rangle1,m\> >0$, otherwise the construction of the immigration part is trivial. In this case, we can enumerate the atoms of $N(ds,dx)$ as \begin{eqnarray}\label{2.6} \{(s_i,a_{1i}): 0<s_1<s_2<\cdots, a_{1i}\in\mathbb}\def\mbf{\mathbf{R}\}. \end{eqnarray} We also define the families \begin{eqnarray}\label{2.7} \{B_\alpha(t): t\ge0, \alpha\in{\cal A}\},\quad \{S_\alpha: \alpha\in{\cal A}\},\quad \{\eta_{a,\alpha}: a\in\mathbb}\def\mbf{\mathbf{R},\alpha\in{\cal A}\}, \end{eqnarray} where $\{B_\alpha\}$ are independent standard Brownian motions, $\{S_\alpha\}$ are i.i.d.\ exponential random variables with parameter $\gamma$, and $\{\eta_{a,\alpha}\}$ are independent random variables with distribution $p(a,\cdot)$. We assume that the families $\{W(ds,dx)\}$, $\{N(ds,dx)\}$, $\{a_{0i}\}$, $\{B_\alpha\}$, $\{S_\alpha\}$ and $\{\eta_{a,\alpha}\}$ are independent. We define $\beta_{0n_1} = 0$ if $1\le n_1 \le n$ and $\beta_{0n_1} = \infty$ if $n_1 > n$, and define $\beta_{1n_1} = s_{n_1}$ for all $n_1\ge 1$. For $\alpha\in{\cal A}$ with $l(\alpha)= 1$ we let $\zeta_{\alpha} = \beta_{\alpha} + S_{\alpha}$. Heuristically, $S_\alpha$ is the life-span of the particle with label $\alpha$, $\beta_\alpha$ is its birth time and $\zeta_\alpha$ is its death time. The random variables $a_\alpha$ defined above can be interpreted as the birth place of the particle with label $\alpha$. The trajectory $\{x_\alpha(t): t\ge\beta_\alpha\}$ of the particle is the solution of the equation \begin{eqnarray}\label{2.8} x(\beta_\alpha+t) = a_\alpha + \int_{\beta_\alpha}^{\beta_\alpha+t}c(x(s))dB_\alpha(s) + \int_{\beta_\alpha}^{\beta_\alpha+t}\int_{\mathbb}\def\mbf{\mathbf{R}} h(y-x(s)) W(ds,dy). \end{eqnarray} For $\alpha\in {\cal A}$ with $l(\alpha)>1$ the trajectory $\{x_\alpha (t): t\ge\beta_\alpha\}$ is defined by the above equation with $a_\alpha = x_{\alpha-1} (\zeta_{\alpha-1}^-)$, $\zeta_{\alpha} = \beta_{\alpha} + S_{\alpha}$ and \begin{eqnarray}\label{2.9} \beta_\alpha =\left\{\begin{array}{ll} \zeta_{\alpha-1} &\mbox{ if $n_{l(\alpha)} \le \eta_{x_{\alpha-1}(\zeta_{\alpha-1}-),\alpha-1}$} \\ \infty &\mbox{ if $n_{l(\alpha)} > \eta_{x_{\alpha-1}(\zeta_{\alpha-1}-),\alpha-1}$,} \end{array}\right. \end{eqnarray} where $x_{\alpha-1}(\zeta_{\alpha-1}-)$ denotes the left limit of $x_{\alpha-1}(t)$ at $t=\zeta_{\alpha-1}$. Clearly, \begin{eqnarray}\label{2.10} \langle}\def\>{\rangle\phi,Y_t\> = \sum_{\alpha\in{\cal A}} \theta^{-1} \phi(x_\alpha(t)) 1_{[\beta_\alpha,\zeta_\alpha)}(t), \qquad t\ge0. \end{eqnarray} defines an $M_\theta(\mathbb}\def\mbf{\mathbf{R})$-valued process $\{Y_t: t\ge0\}$. It is easy to see that $\{Y_t: t\ge0\}$ has countably many jumps, and between those jumps it behaves just as the diffusion process $\{X_t: t\ge0\}$ constructed by (\ref{2.2}). We call $\{Y_t: t\ge0\}$ an \textit{immigration branching particle system} with parameters $(c,h,\gamma,p,\theta m,1/\theta)$. Intuitively, $p(x,\cdot)$ gives the location dependent offspring distribution and $\{N(ds,dx)\}$ gives the landing times and sites of the immigrants. Indeed, we may regard $\{Y_t: t\ge0\}$ as a concatenation of a sequence of independent copies of $\{X_t: t\ge0\}$. We refer the reader to \cite{S88} for discussions of concatenation of general Markov processes. As in \cite{LLW04} it can be seen that $\{Y_t: t\ge0\}$ is a Markov process with generator ${\cal L}_\theta := {\cal A}_\theta + {\cal B}_\theta$, where \begin{eqnarray}\label{2.11} {\cal B}_\theta F(\mu) &=& \sum_{j=0}^\infty \int_{\mathbb}\def\mbf{\mathbf{R}} \theta \gamma p_j(x) \big[F\big(\mu + (j-1)\theta^{-1} \delta_x\big) - F(\mu)\big] \mu(dx) \nonumber \\ & &\quad + \int_{\mathbb}\def\mbf{\mathbf{R}} \theta \big[F\big(\mu + \theta^{-1} \delta_x\big) - F(\mu)\big] m (dx). \end{eqnarray} The first term on the right hand side of (\ref{2.11}) represents the jumps given by the branching and the second terms represents the jumps given by the immigration. In particular, it is easy to show that \begin{eqnarray}\label{2.12} {\cal B}_\theta F_{f,\{\phi_i\}}(\mu) &=& \sum_{j=0}^\infty \int_{\mathbb}\def\mbf{\mathbf{R}} \theta \gamma p_j(x) \big[f(\langle}\def\>{\rangle\phi_1,\mu\>+\theta^{-1}\phi_1(x),\cdots, \langle}\def\>{\rangle\phi_n,\mu\>+\theta^{-1}\phi_n(x)) \nonumber \\ & &\hskip2cm - f(\langle}\def\>{\rangle\phi_1,\mu\>,\cdots, \langle}\def\>{\rangle\phi_n,\mu\>)\big] \mu(dx) \nonumber \\ & & + \int_{\mathbb}\def\mbf{\mathbf{R}} \theta \big[f(\langle}\def\>{\rangle\phi_1,\mu\>+\theta^{-1}\phi_1(x),\cdots, \langle}\def\>{\rangle\phi_n,\mu\>+\theta^{-1}\phi_n(x)) \nonumber \\ & &\hskip2cm - f(\langle}\def\>{\rangle\phi_1,\mu\>,\cdots, \langle}\def\>{\rangle\phi_n,\mu\>)\big] m (dx). \end{eqnarray} Let ${\cal D}_1 ({\cal L}_\theta)$ denote the collection of all functions $F_{f,\{\phi_i\}}$ with $f\in C^2_0(\mathbb}\def\mbf{\mathbf{R}^n)$ and $\{\phi_i\}\subset C^2_b(\mathbb}\def\mbf{\mathbf{R})$. By the general theory of Markov processes, we have the following \begin{theorem}}\def\etheorem{\end{theorem}\label{t2.1} The process $\{Y_t: t\ge0\}$ defined by (\ref{2.10}) solves the $({\cal L}_\theta,{\cal D}_1 ({\cal L}_\theta))$-martingale problem, that is, for each $F \in {\cal D}_1 ({\cal L}_\theta)$, \begin{eqnarray*} F(X_t) - F(X_0) - \int_0^t {\cal L}_\theta F(X_s)ds, \qquad t\ge0, \end{eqnarray*} is a martingale. \etheorem Let us give another useful formulation of the immigration particle system. {From} (\ref{2.8}), (\ref{2.10}) and It\^o's formula we get \begin{eqnarray*} \langle}\def\>{\rangle\phi,Y_t\> &=& \langle}\def\>{\rangle\phi,Y_0\> + \sum_{i=1}^\infty \theta^{-1}\phi(a_{1i})1_{(0,t]}(s_i) \\ & & + \sum_{\alpha\in{\cal A}} [\eta_{x_\alpha(\zeta_\alpha-), \alpha} - 1]\theta^{-1}\phi(x_\alpha(\zeta_\alpha-)) 1_{(0,t]}(\zeta_\alpha) \\ & & + \sum_{\alpha\in{\cal A}}\int_0^t\theta^{-1}\phi^\prime (x_\alpha(s))1_{[\beta_\alpha,\zeta_\alpha)}(s)c(x_\alpha(s)) dB_\alpha(s) \\ & & + \sum_{\alpha\in{\cal A}}\int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}}\theta^{-1} \phi^\prime(x_\alpha(s))1_{[\beta_\alpha,\zeta_\alpha)} (s) h(y-x_\alpha(s)) W(ds,dy) \\ & & + \frac{1}{2}\sum_{\alpha\in{\cal A}}\int_0^t\theta^{-1} \phi^{\prime\prime}(x_\alpha(s))1_{[\beta_\alpha, \zeta_\alpha)}(s) a(x_\alpha(s))ds, \end{eqnarray*} which can be rewritten as \begin{eqnarray}\label{2.13} \langle}\def\>{\rangle\phi,Y_t\> &=& \langle}\def\>{\rangle\phi,Y_0\> + \int_{(0,t]}\int_{\mathbb}\def\mbf{\mathbf{R}} \theta^{-1}\phi(x)N(ds,dx) \nonumber \\ & & + \sum_{\alpha\in{\cal A}} [\eta_{x_\alpha(\zeta_\alpha-), \alpha} - 1]\theta^{-1}\phi(x_\alpha(\zeta_\alpha-)) 1_{(0,t]}(\zeta_\alpha) \nonumber \\ & & + \sum_{\alpha\in{\cal A}}\int_0^t\theta^{-1}\phi^\prime (x_\alpha(s))1_{[\beta_\alpha,\zeta_\alpha)}(s) c(x_\alpha(s)) dB_\alpha(s) \nonumber \\ & & + \int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}} \langle}\def\>{\rangleh(y-\cdot)\phi^\prime,Y_s\> W(ds,dy) + \frac{1}{2}\int_0^t \langle}\def\>{\ranglea\phi^{\prime\prime}, Y_s\> ds. \end{eqnarray} On the right hand side, the second term comes from the immigration, the third term represents branching of the particles, and the last three terms are determined by the spatial motion. It is not hard to see that, for any $\psi\in C_b(\mathbb}\def\mbf{\mathbf{R})$, \begin{eqnarray}\label{2.14} U_t(\psi) := \sum_{\alpha\in{\cal A}}\int_0^t \theta^{-1} \psi(x_\alpha(s)) 1_{[\beta_\alpha,\zeta_\alpha)}(s) c(x_\alpha(s)) dB_\alpha(s) \end{eqnarray} is a continuous local martingale with quadratic variation process \begin{eqnarray}\label{2.15} \langle}\def\>{\rangleU(\psi)\>_t := \int_0^t\langle}\def\>{\rangle\theta^{-1} c^2\psi^2,Y_s\> ds. \end{eqnarray} In the sequel, we assume \begin{eqnarray}\label{2.16} \sigma(x) = \sum_{i=0}^\infty p_i(x)(i-1)^2, \qquad x\in \mathbb}\def\mbf{\mathbf{R}, \end{eqnarray} is a bounded function on $\mathbb}\def\mbf{\mathbf{R}$. \begin{proposition}}\def\eproposition{\end{proposition}\label{p2.1} For any $\phi\in C_b(\mathbb}\def\mbf{\mathbf{R})$, \begin{eqnarray}\label{2.17} Z_t(\phi) := \sum_{\alpha\in{\cal A}} [\eta_{x_\alpha(\zeta_\alpha-),\alpha} - 1] \theta^{-1} \phi(x_\alpha(\zeta_\alpha-))1_{(0,t]} (\zeta_\alpha) - \int_0^t\langle}\def\>{\rangle\gamma(q-1)\phi,Y_s\> ds \end{eqnarray} is a local martingale with predictable quadratic variation process \begin{eqnarray}\label{2.18} \langle}\def\>{\rangleZ(\phi)\>_t = \int_0^t\langle}\def\>{\rangle\theta^{-1}\gamma\sigma\phi^2,Y_s\> ds. \end{eqnarray} \eproposition \noindent{\it Proof.~~} Recall that $\{S_\alpha\}$ are i.i.d.\ exponential random variables with parameter $\gamma$. Let \begin{eqnarray}\label{2.19} J_t(\phi) = \sum_{\alpha\in{\cal A}} \theta^{-1}[\eta_{x_\alpha (\zeta_\alpha-),\alpha} - 1]\phi(x_\alpha(\zeta_\alpha-)) 1_{(0,t]}(\zeta_\alpha). \end{eqnarray} Observe that the process $\{J_t(\phi): t\ge0\}$ jumps only when a particle in the population splits. It is not hard to show that $\{(Y_t,J_t(\phi)): t\ge0\}$ is a Markov process with generator ${\cal J}_\theta$ such that \begin{eqnarray*} {\cal J}_\theta F(\mu,z) &=& {\cal A}_\theta F(\cdot,z)(\mu) + \int_{\mathbb}\def\mbf{\mathbf{R}} \theta [F(\mu + \theta^{-1}\delta_x,z) - F(\mu,z)] m(dx) \nonumber \\ & & + \sum_{j=0}^\infty \int_{\mathbb}\def\mbf{\mathbf{R}} \theta \gamma p_j(x) [F(\mu + (j-1)\theta^{-1} \delta_x,z + (j-1)\theta^{-1}\phi(x)) - F(\mu,z)] \mu(dx). \end{eqnarray*} In particular, if $F(\mu,z) = z$, then \begin{eqnarray*} {\cal J}_\theta F(\mu,z) = \sum_{j=0}^\infty \int_{\mathbb}\def\mbf{\mathbf{R}} \gamma p_j(x)(j-1)\phi(x) \mu(dx) = \langle}\def\>{\rangle\gamma(q-1)\phi,\mu\>. \end{eqnarray*} This shows that (\ref{2.17}) is a local martingale. Let $\Delta_n := \{0= t_{n,0} < t_{n,1} < \cdots < t_{n,n} =t\}$ be a sequence of partitions of $[0,t]$ such that $D_n := \max_{1\le i\le n} |t_{n,i} - t_{n,i-1}| \to 0$ as $n \to \infty$. Since the second term on the right hand side of (\ref{2.17}) is of locally finite variations, we have \begin{eqnarray*} [Z(\phi)]_{t\land\tau_l} &:=& \lim_{n\to \infty}\sum_{i=0}^n |Z_{t_{n,i}\land\tau_l}(\phi) - Z_{t_{i-1}\land\tau_l}(\phi)|^2 \nonumber \\ &=& \sum_{\alpha\in{\cal A}} \theta^{-2} [\eta_{x_\alpha (\zeta_\alpha-),\alpha} - 1]^2\phi(x_\alpha (\zeta_\alpha-))^2 1_{(0,t\land\tau_l]}(\zeta_\alpha). \end{eqnarray*} By martingale theory, $Z_{t\land\tau_l} (\phi)^2 - [Z(\phi)]_{t \land \tau_l}$ is a martingale. Note that $[Z(\phi)]_{t \land \tau_l}$ has same jump times as $J_{t\land\tau_l} (\phi)$ but with squared jump sizes. By an argument similar to the beginning of this proof, we conclude that $[Z(\phi)]_{t \land \tau_l} - \langle}\def\>{\rangleZ(\phi)\>_{t \land \tau_l}$ is a martingale. Then $\langle}\def\>{\rangleZ(\phi)\>_{t \land \tau_l}$ is a predictable process such that $Z_{t\land\tau_l}(\phi)^2 - \langle}\def\>{\rangleZ(\phi)\>_{t \land \tau_l}$ is a martingale, implying the desired result. \qed Let $\tilde N(ds,dx) = N(ds, dx) - \theta dsm(dx)$. Note that the assumptions on independence imply that the four martingale measures $\{W(ds,dx)\}$, $\{\tilde N(ds,dx)\}$, $\{Z(ds,dx)\}$ are $\{U(ds,dx)\}$ are orthogonal to each other. Now we may rewrite (\ref{2.13}) into \begin{eqnarray}\label{2.20} \langle}\def\>{\rangle\phi,Y_t\> &=& \langle}\def\>{\rangle\phi,Y_0\> + t\langle}\def\>{\rangle\phi,m\> + \int_{(0,t]}\int_{\mathbb}\def\mbf{\mathbf{R}} \theta^{-1}\phi(x) \tilde N(ds,dx) \nonumber \\ & & + \int_0^t\langle}\def\>{\rangle\gamma(q-1)\phi,Y_s\> ds + Z_t(\phi) + U_t(\phi^\prime) \nonumber \\ & & + \int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}} \langle}\def\>{\rangleh(y-\cdot)\phi^\prime,Y_s\> W(ds,dy) + \frac{1}{2}\int_0^t \langle}\def\>{\ranglea\phi^{\prime\prime},Y_s\> ds. \end{eqnarray} Clearly, the third term on the right hand side of (\ref{2.20}) has a c\`adl\`ag modification. By \cite[p.69, Theorem VI.4]{DM82}, the martingale $\{Z_t(\phi): t\ge0\}$ has a c\`adl\`ag modification. All other terms on the right hand side have continuous modifications. Therefore, the measure-valued process $\{Y_t: t\ge0\}$ has a c\`adl\`ag modification and (\ref{2.20}) gives an SPDE formulation of this immigration branching particle system. The following result shows that (\ref{2.14}) and (\ref{2.17}) are in fact square-integrable martingales. \begin{proposition}}\def\eproposition{\end{proposition}\label{p2.2} Let $B_1 := \|\gamma(q-1)\|$ and $B_2 := \|\theta \gamma \sigma\|$, where $\|\cdot\|$ denotes the supremum norm. Then there is a locally bounded function $C_2$ on $\mathbb}\def\mbf{\mathbf{R}_+^3$ such that \begin{eqnarray}\label{2.21} \mbf{E}\{\mbox{$\sup_{0\le s\le t}$}\langle}\def\>{\rangle1,Y_s\>^2\} \le C_2(B_1,B_2,t) (1+\langle}\def\>{\rangle1,\mu\>^2 + \langle}\def\>{\rangle1,m\>^2), \quad t\ge 0. \end{eqnarray} \eproposition \noindent{\it Proof.~~} Applying (\ref{2.20}) to $\phi \equiv 1$ we get \begin{eqnarray}\label{2.22} \langle}\def\>{\rangle1,Y_t\> = \langle}\def\>{\rangle1,\mu\> + \theta^{-1} N((0,t] \times \mathbb}\def\mbf{\mathbf{R}) + \int_0^t\langle}\def\>{\rangle\gamma(q-1),Y_s\> ds + Z_t(1), \end{eqnarray} where $N((0,t] \times \mathbb}\def\mbf{\mathbf{R})$ is a Poisson random variable with parameter $\theta t\langle}\def\>{\rangle1,m\>$ and $\{Z_t(1): t\ge0\}$ is a local martingale with quadratic variation process \begin{eqnarray}\label{2.23} \langle}\def\>{\rangleZ(1)\>_t = \int_0^t\langle}\def\>{\rangle\theta^{-1}\gamma\sigma,Y_s\> ds. \end{eqnarray} Based on (\ref{2.22}) and (\ref{2.23}), the desired estimate follows by an application of Gronwall's inequality. \qed \section{Stochastic equation of the SDSMI} \setcounter{equation}{0} Let $(c, h, \sigma, b, m)$ be given as in the introduction. Suppose that $W(ds,dx)$ is a time-space white noise. For $\mu\in M(\mathbb}\def\mbf{\mathbf{R})$ we consider the stochastic equation: \begin{eqnarray}\label{3.1} \langle}\def\>{\rangle\phi,X_t\> &=& \langle}\def\>{\rangle\phi,\mu\> + t\langle}\def\>{\rangle\phi,m\> + \frac{1}{2}\int_0^t\langle}\def\>{\ranglea\phi^{\prime\prime}, X_s\> ds - \int_0^t\langle}\def\>{\rangleb\phi,X_s\> ds \nonumber \\ & & + \int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}}\phi(y) Z(ds,dy) + \int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}}\langle}\def\>{\rangleh(y-\cdot) \phi^\prime,X_s\> W(ds,dy), \end{eqnarray} where $Z(ds,dy)$ is an orthogonal martingale measure which is orthogonal to the white noise $W(ds,dy)$ and has covariation measure $\sigma(y)X_s(dy)ds$. Clearly, this is equivalent to the martingale problem given by (\ref{1.6}) and (\ref{1.7}). We shall prove that (\ref{3.1}) has a weak solution $\{X_t: t\ge0\}$, which will serve as a candidate of the SDSMI with parameters $(c, h, \sigma, b, m)$. For a function $F$ on $M(\mathbb}\def\mbf{\mathbf{R})$, let \begin{eqnarray}\label{3.2} {\cal A} F(\mu) &=& \frac{1}{2}\int_{\mathbb}\def\mbf{\mathbf{R}^2}\rho(x-y)\frac{d^2}{dxdy} \frac{\delta^2 F(\mu)}{\delta\mu(x)\delta\mu(y)} \mu(dx)\mu(dy) \nonumber \\ & &\quad +\,\frac{1}{2}\int_{\mathbb}\def\mbf{\mathbf{R}} a(x)\frac{d^2}{dx^2} \frac{\delta F(\mu)}{\delta\mu(x)}\mu(dx) \end{eqnarray} and \begin{eqnarray}\label{3.3} {\cal B} F(\mu) &=& \frac{1}{2} \int_{\mathbb}\def\mbf{\mathbf{R}} \sigma(x)\frac{\delta^2 F(\mu)} {\delta\mu(x)^2} \mu(dx) - \int_{\mathbb}\def\mbf{\mathbf{R}} b(x)\frac{\delta F(\mu)} {\delta\mu(x)} \mu(dx) \nonumber \\ & &\qquad + \int_{\mathbb}\def\mbf{\mathbf{R}} \frac{\delta F(\mu)} {\delta\mu(x)} m (dx) \end{eqnarray} if the right hand sides are meaningful. We shall also prove that $\{X_t: t\ge0\}$ solves a martingale problem associated with ${\cal L} := {\cal A} + {\cal B}$. It is easily seen that formally ${\cal A} = \lim_{\theta\to0} {\cal A}_\theta$ and ${\cal B} = \lim_{\theta\to0} {\cal B}_\theta$. Heuristically, $\{X_t: t\ge0\}$ arises as the high density limit of the immigration branching particle system discussed in the last section. In particular, if $F_{f,\{\phi_i\}} (\mu) = f(\langle}\def\>{\rangle\phi_1,\mu\>, \cdots, \langle}\def\>{\rangle\phi_n,\mu\>)$ for $f\in C^2_0(\mathbb}\def\mbf{\mathbf{R}^n)$ and $\{\phi_i\} \subset C^2_b(\mathbb}\def\mbf{\mathbf{R})$, then \begin{eqnarray}\label{3.4} {\cal A} F_{f,\{\phi_i\}}(\mu) &=& \frac{1}{2}\sum_{i,j=1}^n f_{ij}^{\prime\prime}(\langle}\def\>{\rangle\phi_1, \mu\>,\cdots,\langle}\def\>{\rangle\phi_n,\mu\>)\int_{\mathbb}\def\mbf{\mathbf{R}^2}\rho(x-y) \phi_i^\prime(x)\phi_j^\prime(y)\mu(dx)\mu(dy) \nonumber \\ & & +\,\frac{1}{2}\sum_{i=1}^n f_i^\prime(\langle}\def\>{\rangle\phi_1,\mu\>,\cdots, \langle}\def\>{\rangle\phi_n,\mu\>)\langle}\def\>{\ranglea\phi_i^{\prime\prime},\mu\> \end{eqnarray} and \begin{eqnarray}\label{3.5} {\cal B} F_{f,\{\phi_i\}}(\mu) &=& \frac{1}{2}\int_{\mathbb}\def\mbf{\mathbf{R}} \sigma(x)\bigg[\sum_{i,j=1}^n f^{\prime\prime}_{ij}(\langle}\def\>{\rangle\phi_1,\mu\>,\cdots, \langle}\def\>{\rangle\phi_n,\mu\>)\phi_i(x)\phi_j(x)\bigg] \mu(dx) \nonumber \\ & & - \int_{\mathbb}\def\mbf{\mathbf{R}} b(x)\bigg[\sum_{i=1}^n f^\prime_i (\langle}\def\>{\rangle\phi_1,\mu\>,\cdots,\langle}\def\>{\rangle\phi_n,\mu\>)\phi_i(x)\bigg] \mu(dx) \nonumber \\ & & + \int_{\mathbb}\def\mbf{\mathbf{R}} \bigg[\sum_{i=1}^n f^\prime_i (\langle}\def\>{\rangle\phi_1,\mu\>,\cdots,\langle}\def\>{\rangle\phi_n,\mu\>)\phi_i(x)\bigg] m(dx). \end{eqnarray} Let ${\cal D}_1({\cal L})$ denote the collection of all functions $F_{f,\{\phi_i\}}$ with $f\in C^2_0(\mathbb}\def\mbf{\mathbf{R}^n)$ and $\{\phi_i\} \subset C^2_b(\mathbb}\def\mbf{\mathbf{R})$. We shall obtain (\ref{3.1}) as the limit of a sequence of equations of immigration branching particle systems. Let $(c, h, \gamma_k, p^{(k)}, \theta_km, \theta_k^{-1})$ be a sequence of parameters such that $\theta_k \to \infty$ as $k\to \infty$. Let $q_k$ and $\sigma_k$ be defined by (\ref{2.5}) and (\ref{2.16}) in terms of $(\gamma_k, p^{(k)}, \theta_k)$. We assume that $\{X^{(k)}_t: t\ge0\}$ is a immigration particle system which satisfies \begin{eqnarray}\label{3.6} \langle}\def\>{\rangle\phi,X_t^{(k)}\> &=& \langle}\def\>{\rangle\phi,X_0^{(k)}\> + t\langle}\def\>{\rangle\phi,m\> + \int_{(0,t]}\int_{\mathbb}\def\mbf{\mathbf{R}}\theta_k^{-1}\phi(x)\tilde N^{(k)}(ds,dx) \nonumber \\ & & + \int_0^t\langle}\def\>{\rangle\gamma_k(q_k-1)\phi,X_s^{(k)}\> ds + Z_t^{(k)}(\phi) + U_t^{(k)}(\phi^\prime) \nonumber \\ & & + \int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}}\langle}\def\>{\rangleh(y-\cdot)\phi^\prime,X_s^{(k)}\> W^{(k)}(ds,dy) + \frac{1}{2}\int_0^t \langle}\def\>{\ranglea\phi^{\prime\prime}, X_s^{(k)}\> ds, \end{eqnarray} where $(N^{(k)},Z^{(k)},M^{(k)},W^{(k)})$ are as in (\ref{2.20}) with parameters $(c, h, \gamma_k, p^{(k)}, \theta_km, \theta_k ^{-1})$. We assume that the $X^{(k)}_0$ are deterministic and $X^{(k)}_0 \to \mu$ as $k\to \infty$. \begin{lemma}}\def\elemma{\end{lemma}\label{l3.1} Suppose that $B_1 := \sup_{k\ge1} \|\gamma_k(q_k-1)\| < \infty$ and $B_2 :=\sup_{k\ge1} \|\theta_k^{-1} \gamma_k \sigma_k\| < \infty$. Then for any $\phi\in C^2_b(\mathbb}\def\mbf{\mathbf{R})$, the sequence $\{(\langle}\def\>{\rangle\phi,X_t^{(k)}\>) _{t\ge0}, k=1,2,\cdots\}$ is tight in the Skorokhod space $D([0,\infty), \mathbb}\def\mbf{\mathbf{R})$. \elemma \noindent{\it Proof.~~} Suppose that $\{\tau_k\}$ is a bounded sequence of stopping times. Let \begin{eqnarray*} V_t^{(k)}(\phi^\prime) = \int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}}\langle}\def\>{\rangleh(y-\cdot) \phi^\prime,X_s^{(k)}\> W^{(k)}(ds,dy) \end{eqnarray*} and \begin{eqnarray*} Y_t^{(k)}(\phi) = \int_0^t\langle}\def\>{\rangle\gamma_k(q_k-1)\phi,X_s^{(k)}\> ds. \end{eqnarray*} It is easily seen that \begin{eqnarray*} \mbf{E}\{|V_{\tau_k+t}^{(k)}(\phi^\prime) - V_{\tau_k}^{(k)}(\phi^\prime)|^2\} &=& \mbf{E}\bigg\{\int_0^tds\int_{\mathbb}\def\mbf{\mathbf{R}}\langle}\def\>{\rangleh(y-\cdot) \phi^\prime,X_{\tau_k+s}^{(k)}\>^2 dy\bigg\} \\ &=& \mbf{E}\bigg\{\int_0^tds\int_{\mathbb}\def\mbf{\mathbf{R}^2}\rho(x-z)\phi^\prime(x) \phi^\prime(z) X_{\tau_k+s}^{(k)}(dx)X_{\tau_k+s}^{(k)}(dz)\bigg\} \\ &\le& \|\rho\|\int_0^t\mbf{E}\{\langle}\def\>{\rangle\phi^\prime,X_{\tau_k+s}^{(k)}\>^2\}ds \end{eqnarray*} and \begin{eqnarray*} \mbf{E}\{|Y_{\tau_k+t}^{(k)}(\phi) - Y_{\tau_k}^{(k)}(\phi)|^2\} \le B_1^2t\int_0^t\mbf{E}\{\langle}\def\>{\rangle\phi,X_{\tau_k+s}^{(k)}\>^2\} ds. \end{eqnarray*} The remaining terms on the right hand side of (\ref{3.6}) can be estimated by similar calculations. Combining those estimates and Proposition~\ref{p2.2} we get \begin{eqnarray*} \sup_{0\le t\le T}\sup_{k\ge1} \mbf{E}\{\langle}\def\>{\rangle\phi,X_{t}^{(k)}\>^2\} < \infty \end{eqnarray*} and \begin{eqnarray*} \sup_{k\ge1} \mbf{E}\{|\langle}\def\>{\rangle\phi,X_{\tau_k+t}^{(k)}\> - \langle}\def\>{\rangle\phi, X_{\tau_k}^{(k)}\>|^2\} \to 0 \end{eqnarray*} as $t\to 0$. Then the sequence $\{(\langle}\def\>{\rangle\phi,X_t^{(k)}\>)_{t\ge0}, k=1,2,\cdots\}$ is tight in $D([0,\infty),\mathbb}\def\mbf{\mathbf{R})$; see \cite{A78}. \qed \begin{lemma}}\def\elemma{\end{lemma}\label{l3.2} Suppose that $\gamma_k(1-q_k(\cdot)) \to b(\cdot)$ and $\theta_k^{-1}\gamma_k \sigma_k(\cdot) \to \sigma(\cdot)$ uniformly for $b\in C_b(\mathbb}\def\mbf{\mathbf{R})$ and $\sigma\in C_b(\mathbb}\def\mbf{\mathbf{R})^+$. Then the sequence $\{X_t^{(k)}: t\ge0, k=1,2,\cdots\}$ is tight in $D([0,\infty), M(\mathbb}\def\mbf{\mathbf{R}))$. Moreover, the limit process $\{X_t: t\ge0\}$ of any subsequence of $\{X_t^{(k)}: t\ge0, k = 1,2, \cdots\}$ is a.s.\ continuous and solves the $({\cal L},{\cal D}_1({\cal L}))$-martingale problem, that is, for each $F \in {\cal D}_1 ({\cal L}_\theta)$, \begin{eqnarray}\label{3.7} F(X_t) - F(X_0) - \int_0^t {\cal L} F(X_s)ds, \qquad t\ge0, \end{eqnarray} is a martingale. \elemma \noindent{\it Proof.~~} By Lemma~\ref{l3.1} and a result of \cite{RC86}, the sequence of processes $\{X_t^{(k)}: t\ge0, k=1,2,\cdots\}$ is tight in $D([0,\infty), M(\bar\mathbb}\def\mbf{\mathbf{R}))$. We write $\phi \in C^2_b (\bar\mathbb}\def\mbf{\mathbf{R})$ if $\phi \in C^2_b(\mathbb}\def\mbf{\mathbf{R})$ and its derivatives up to the second degree can be extended continuously to $\bar\mathbb}\def\mbf{\mathbf{R}$. If $\{\phi_i\} \subset C^2 (\bar \mathbb}\def\mbf{\mathbf{R})$, we can extend $F_{f,\{\phi_i\}}$, ${\cal A} F_{f,\{\phi_i\}}$ and ${\cal B} F_{f,\{\phi_i\}}$ continuously to $M(\bar \mathbb}\def\mbf{\mathbf{R})$. Let $\bar F_{f,\{\phi_i\}}$, $\bar{\cal A} \bar F_{f,\{\phi_i\}}$ and $\bar{\cal B} \bar F_{f,\{\phi_i\}}$ denote respectively those extensions. Let $({\cal A}_k,{\cal B}_k)$ and $(\bar{\cal A}_k, \bar{\cal B}_k)$ denote the corresponding operators associated with $\{X_t^{(k)}: t\ge0\}$. Clearly, if $\mu_k\in M_k(\bar\mathbb}\def\mbf{\mathbf{R})$ and $\mu_k\to\mu$, then $\bar{\cal A}_k \bar F_{f,\{\phi_i\}}(\mu_k) \to \bar{\cal A} \bar F_{f,\{\phi_i\}}(\mu)$. By Taylor's expansion, \begin{eqnarray*} & & \bar{\cal B}_k \bar F_{f,\{\phi_i\}}(\mu_k) \\ &=& \sum_{j=0}^\infty \int_{\mathbb}\def\mbf{\mathbf{R}} \theta_k \gamma_k p_j(x) \big[f(\langle}\def\>{\rangle\phi_1,\mu_k\>+(j-1)\theta_k^{-1}\phi_1(x),\cdots, \langle}\def\>{\rangle\phi_n,\mu_k\>+(j-1)\theta_k^{-1}\phi_n(x)) \nonumber \\ & &\hskip2cm - f(\langle}\def\>{\rangle\phi_1,\mu_k\>,\cdots, \langle}\def\>{\rangle\phi_n,\mu_k\>)\big] \mu_k(dx) \nonumber \\ & & + \int_{\mathbb}\def\mbf{\mathbf{R}} \theta_k \big[f(\langle}\def\>{\rangle\phi_1,\mu_k\>+\theta_k^{-1}\phi_1(x),\cdots, \langle}\def\>{\rangle\phi_n,\mu_k\>+\theta_k^{-1}\phi_n(x)) \nonumber \\ & &\hskip2cm - f(\langle}\def\>{\rangle\phi_1,\mu_k\>,\cdots, \langle}\def\>{\rangle\phi_n,\mu_k\>)\big] m (dx) \\ &=& \int_{\mathbb}\def\mbf{\mathbf{R}} \gamma_k(q_k(x)-1)\bigg[\sum_{i=1}^n f_i^\prime (\langle}\def\>{\rangle\phi_1,\mu_k\>,\cdots,\langle}\def\>{\rangle\phi_n,\mu_k\>)\phi_i(x)\bigg] \mu_k(dx) \nonumber \\ & & + \int_{\mathbb}\def\mbf{\mathbf{R}} \frac{\gamma_k\sigma_k(x)}{2\theta_k} \bigg[\sum_{i,j=1}^n f_{ij}^{\prime\prime}(\langle}\def\>{\rangle\phi_1,\mu_k\> +\eta_k\phi_1(x),\cdots,\langle}\def\>{\rangle\phi_n,\mu_k\>+\eta_k\phi_n(x)) \phi_i(x)\phi_j(x)\bigg]\mu_k(dx) \nonumber \\ & & + \int_{\mathbb}\def\mbf{\mathbf{R}} \sum_{i=1}^n \bigg[f^\prime_i(\langle}\def\>{\rangle\phi_1,\mu_k\>+\zeta_k\phi_1(x),\cdots, \langle}\def\>{\rangle\phi_n,\mu_k\>+\zeta_k\phi_n(x))\phi_i(x)\bigg] m (dx), \end{eqnarray*} where $0<\eta_k, \zeta_k<\theta_k^{-1}$. Then $\bar{\cal B}_k \bar F_{f,\{\phi_i\}}(\mu_k) \to \bar{\cal B} \bar F_{f,\{\phi_i\}}(\mu)$ under the assumption. Let $\{X_t: t\ge0\}$ be the limit of any subsequence of $\{X_t^{(k)}: t\ge0, k = 1,2, \cdots\}$. As in the proof of Lemma~4.2 of Dawson \textit{et al} \cite{DLW01} one can show that \begin{eqnarray*} \bar F_{f,\{\phi_i\}}(X_t) - \bar F_{f,\{\phi_i\}}(X_0) - \int_0^t \bar{\cal L} \bar F_{f,\{\phi_i\}}(X_s)ds \end{eqnarray*} is a martingale, where $\bar{\cal L} = \bar{\cal A} + \bar{\cal B}$. As in \cite{W98}, it is not hard to check that the ``gradient squared'' operator associated with $\bar{\cal L}$ satisfies the derivation property of \cite{BE85}. Then $\{X_t: t\ge0\}$ is actually almost surely continuous as an $M(\bar \mathbb}\def\mbf{\mathbf{R})$-valued process. By a modification of the proof of Theorem~4.1 of \cite{DLW01} one can show that $\{X_t: t\ge0\}$ is almost surely supported by $\mathbb}\def\mbf{\mathbf{R}$. Thus $\{X_t^{(k)}: t\ge0, k=1,2,\cdots\}$ is tight in $D([0,\infty), M(\mathbb}\def\mbf{\mathbf{R}))$ and $\{X_t: t\ge0\}$ is a.s.\ continuous as an $M(\mathbb}\def\mbf{\mathbf{R})$-valued process. \qed \begin{lemma}}\def\elemma{\end{lemma}\label{l3.3} If $\{X_t: t\ge0\}$ is the continuous solution of the $({\cal L},{\cal D}_1({\cal L}))$-martingale problem, then for each integer $n\ge 1$ there is a locally bounded function $C_n$ on $\mathbb}\def\mbf{\mathbf{R}_+^3$ such that \begin{eqnarray}\label{3.8} \mbf{E}\{\mbox{$\sup_{0\le s\le t}\langle}\def\>{\rangle1,X_s\>^n$}\} \le C_n(\|b\|,\|\sigma\|,t)(1+\langle}\def\>{\rangle1,\mu\>^n + \langle}\def\>{\rangle1,m\>^n), \quad t\ge 0. \end{eqnarray} \elemma \noindent{\it Proof.~~} If $\{X_t: t\ge0\}$ is the continuous solution of the $({\cal L},{\cal D}_1({\cal L}))$-martingale problem, then \begin{eqnarray}\label{3.9} Z_t(1) := \langle}\def\>{\rangle1,X_t\> - \langle}\def\>{\rangle1,\mu\> - t\langle}\def\>{\rangle1,m\> + \int_0^t\langle}\def\>{\rangleb,X_s\> ds \end{eqnarray} is a continuous local martingale with quadratic variation process \begin{eqnarray}\label{3.10} \langle}\def\>{\rangleZ(1)\>_t = \int_0^t \langle}\def\>{\rangle\sigma,X_s\> ds. \end{eqnarray} For $l >0$ let $\tau_l = \inf\{s\ge0: \langle}\def\>{\rangle1,X_s\> \ge l\}$. The inequalities for $n=1$ and $n=2$ can be proved as in the proof of Proposition~\ref{p2.2}. Now the Burkholder-Davis-Gundy inequality implies that \begin{eqnarray*} \mbf{E}\{\mbox{$\sup_{0\le s\le t}\langle}\def\>{\rangle1,X_{s\land\tau_l}\>^{2n}$}\} &\le& C_n\bigg[\langle}\def\>{\rangle1,\mu\>^{2n} + t^{2n}\langle}\def\>{\rangle1,m\>^{2n} + \mbf{E}\bigg\{\bigg(\int_0^{t\land\tau_l} \langle}\def\>{\rangle|b|,X_s\>ds\bigg)^{2n}\bigg\} \\ & & +\,\mbf{E}\bigg\{\bigg(\int_0^{t\land\tau_l} \langle}\def\>{\rangle\sigma,X_s\> ds\bigg)^n\bigg\}\bigg] \\ &\le& C_n\bigg[\langle}\def\>{\rangle1,\mu\>^{2n} + t^{2n}\langle}\def\>{\rangle1,m\>^{2n} + \theta^{-n} t^n\langle}\def\>{\rangle1,m\>^n \nonumber \\ & & +\,\|b\|^{2n}t^{2n-1}\int_0^t \mbf{E}\{\mbox{$\sup_{0\le r\le s}\langle}\def\>{\rangle1,X_{r\land\tau_l}\>^{2n}$}\} ds\bigg] \nonumber \\ & & +\,\|\sigma\|^n t^{n-1}\int_0^t\mbf{E}\{\langle}\def\>{\rangle1,X_s\>^n\} ds, \end{eqnarray*} where $C_n\ge0$ is a universal constant. By using the above estimate and Gronwall's inequality inductively, we get some estimates for $\mbf{E}\{\sup_{0\le s\le t} \langle}\def\>{\rangle1,$ $X_{t \land \tau_l}\>^n\}$. Then we obtain the inequalities for $\mbf{E}\{\sup_{0\le s\le t} \langle}\def\>{\rangle1,X_t\>^n\}$ by Fatou's lemma. \qed \begin{lemma}}\def\elemma{\end{lemma}\label{l3.4} Suppose there are constants $d_0>0$ and $\delta>1/2$ such that $h(x)\le d_0(1+|x|)^{-\delta}$ for all $x\in \mathbb}\def\mbf{\mathbf{R}$. If $\gamma_k (1-q_k(\cdot)) \to b(\cdot)$ and $\theta_k^{-1} \gamma_k \sigma_k (\cdot) \to \sigma(\cdot)$ uniformly for $b\in C_b(\mathbb}\def\mbf{\mathbf{R})$ and $\sigma \in C_b(\mathbb}\def\mbf{\mathbf{R})^+$, then the limit process $\{X_t: t\ge0\}$ of any subsequence of $\{X_t^{(k)}: t\ge0, k= 1,2, \cdots\}$ is a weak solution of (\ref{3.1}). \elemma \noindent{\it Proof.~~} By the proof of Lemma~\ref{l3.1} and the results of \cite{M83, RC86}, $\{(X_t^{(k)}, U_t^{(k)}, W_t^{(k)}, Z_t^{(k)}): t\ge0, k=1,2,\cdots\}$ is a tight sequence in $D([0,\infty)$, $M(\bar\mathbb}\def\mbf{\mathbf{R})\times {\cal S}^\prime (\mathbb}\def\mbf{\mathbf{R})^3)$. By passing to a subsequence, we simply assume that $\{(X_t^{(k)}, U_t^{(k)}, W_t^{(k)}, Z_t^{(k)}): t\ge0\}$ converges in distribution to some process $\{(X_t, U_t, W_t, Z_t): t\ge0\}$. By Lemma~\ref{l3.2}, $\{X_t: t\ge0\}$ is a.s.\ continuous and solves the $({\cal L},{\cal D}_1({\cal L}))$-martingale problem. Considering the Skorokhod representation, we assume $\{(X_t^{(k)}, U_t^{(k)}, W_t^{(k)}, Z_t^{(k)}): t\ge0\}$ converges almost surely to the process $\{(X_t, U_t, W_t, Z_t): t\ge0\}$ in the topology of $D([0,\infty), M(\bar\mathbb}\def\mbf{\mathbf{R})\times {\cal S}^\prime(\mathbb}\def\mbf{\mathbf{R})^3)$. Since each $\{W_t^{(k)}: t\ge0\}$ is a time-space white noise, so is $\{W_t: t\ge0\}$. In view of (\ref{2.15}), we have a.s.\ $U_t(\phi)=0$ for all $t\ge0$ and $\phi \in {\cal S}(\mathbb}\def\mbf{\mathbf{R})$. Then the theorem follows once it is proved that $\{(X_t, W_t, Z_t): t\ge0\}$ satisfies (\ref{3.1}). Clearly, it is sufficient to prove this for $\phi \in {\cal S}(\mathbb}\def\mbf{\mathbf{R})$ with compact support $\mbox{supp} (\phi)$. Let $Y_t(y) = \langle}\def\>{\rangleh(y-\cdot) \phi^\prime, X_t\>$ and $Y_t^{(k)}(y) = \langle}\def\>{\rangleh(y-\cdot) \phi^\prime, X_t^{(k)}\>$. For $l >0$ let $\tau_l = \inf\{s\ge0: \langle}\def\>{\rangle1,X_s^{(k)}\> \ge l$ for some $k\ge1\}$. Since the weak convergence of measures can be induced by the (Vasershtein) metric defined in \cite[p.150]{EK86}, it is easy to show that $\{Y_t^{(k)}1_{\{t<\tau_l\}}: t\ge0\}$ converges to $\{Y_t1_{\{t<\tau_l\}}: t\ge0\}$ in $D([0,\infty), C_0(\mathbb}\def\mbf{\mathbf{R}))$, where $C_0(\mathbb}\def\mbf{\mathbf{R})$ is furnished with the uniform norm. By \cite[Theorem~2.1]{C95}, for $\psi\in{\cal S}(\mathbb}\def\mbf{\mathbf{R})$ we have almost surely \begin{eqnarray}\label{3.11} \lim_{k\to\infty}\int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}}\psi(y)Y_s^{(k)}(y) 1_{\{s<\tau_l\}} W^{(k)}(ds,dy) = \int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}} \psi(y)Y_s(y)1_{\{s<\tau_l\}} W(ds,dy). \end{eqnarray} Let $\alpha = \sup\{|x|, x\in \mbox{supp} (\phi)\}$. We have \begin{eqnarray*} \sup_{|z|\le \alpha}|h(y-z)| \le d(y) := d_0[1_{\{|y|\le \alpha\}} + 1_{\{|y|>\alpha\}}(1+|y|-\alpha)^{-\delta}], \end{eqnarray*} and hence \begin{eqnarray*} |Y_t(y)| \le \langle}\def\>{\rangle|\phi^\prime|, X_t\>d(y) \quad\mbox{and}\quad |Y_t^{(k)}(y)| \le \langle}\def\>{\rangle|\phi^\prime|, X_t^{(k)}\> d(y). \end{eqnarray*} By the Burkholder-Davis-Gundy inequality, \begin{eqnarray}\label{3.12} &&\mbf{E}\bigg\{\bigg(\int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}}\psi(y) Y_s^{(k)}(y) 1_{\{s<\tau_l\}}W^{(k)}(ds,dy)\bigg)^4\bigg\} \nonumber \\ &&\hskip2cm\le \mbox{const}\cdot\mbf{E}\bigg\{\bigg(\int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}} \psi(y)^2Y_s^{(k)}(y)^2 1_{\{s<\tau_l\}}ds dy\bigg)^2\bigg\} \nonumber \\ &&\hskip2cm\le \mbox{const}\cdot l^4\|\phi^\prime\|^4\langle}\def\>{\rangle\psi^2d^2,\lambda\>^2t^2, \end{eqnarray} where $\lambda$ denotes the Lebesgue measure on $\mathbb}\def\mbf{\mathbf{R}$. Since the right hand side of (\ref{3.12}) is independent of $k\ge1$, the convergence of (\ref{3.11}) also holds in the $L^2$-sense. For each $\epsilon>0$, it is not hard to choose $\psi\in {\cal S}(\mathbb}\def\mbf{\mathbf{R})$ so that \begin{eqnarray}\label{3.13} &&\mbf{E}\bigg\{\bigg(\int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}}(1-\psi(y)) Y_s^{(k)}(y) 1_{\{s<\tau_l\}}W^{(k)}(ds,dy)\bigg)^2\bigg\} \nonumber \\ &&\hskip2cm \le \mbox{const}\cdot l^2\|\phi^\prime\|^2\langle}\def\>{\rangle|1-\psi|^2d^2,\lambda\>t \le \epsilon. \end{eqnarray} The same estimate is available with $Y^{(k)}$ and $W^{(k)}$ replaced respectively by $Y$ and $W$. Clearly, (\ref{3.11}) and (\ref{3.13}) imply that \begin{eqnarray}\label{3.14} \lim_{k\to\infty}\int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}}Y_s^{(k)}(y) 1_{\{s<\tau_l\}}W^{(k)}(ds,dy) = \int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}}Y_s(y) 1_{\{s<\tau_l\}}W(ds,dy) \end{eqnarray} in the $L^2$-sense. Passing to a suitable subsequence we get the almost sure convergence for (\ref{3.14}). Now letting $k\to \infty$ in (\ref{3.6}) we get \begin{eqnarray*} \langle}\def\>{\rangle\phi,X_{t\land\tau_l}\> &=& \langle}\def\>{\rangle\phi,\mu\> + (t\land\tau_l) \langle}\def\>{\rangle\phi,m\> + \frac{1}{2}\int_0^{t\land\tau_l} \langle}\def\>{\ranglea\phi^{\prime\prime}, X_s\> ds - \int_0^{t\land\tau_l}\langle}\def\>{\rangleb\phi,X_s\> ds \nonumber \\ & & + \int_0^{t\land\tau_l}\int_{\mathbb}\def\mbf{\mathbf{R}}\phi(y) Z(ds,dy) + \int_0^{t\land\tau_l}\int_{\mathbb}\def\mbf{\mathbf{R}}\langle}\def\>{\rangleh(y-\cdot) \phi^\prime,X_s\> W(ds,dy), \end{eqnarray*} from which (\ref{3.1}) follows. The extensions from $\phi\in {\cal S}(\mathbb}\def\mbf{\mathbf{R})$ to $\phi\in C^2_b(\mathbb}\def\mbf{\mathbf{R})$ is immediate. \qed \begin{theorem}}\def\etheorem{\end{theorem}\label{t3.1} Suppose there are constants $d_0>0$ and $\delta>1/2$ such that $h(x)\le d_0(1+|x|)^{-\delta}$ for all $x\in \mathbb}\def\mbf{\mathbf{R}$. Then the stochastic equation (\ref{3.1}) has a continuous weak solution $\{X_t: t\ge0\}$. Moreover, $\{X_t: t\ge0\}$ also solves the $({\cal L}, {\cal D}_1({\cal L}))$-martingale problem. \etheorem \noindent{\it Proof.~~} Given $b\in C_b(\mathbb}\def\mbf{\mathbf{R})$ and $\sigma\in C_b(\mathbb}\def\mbf{\mathbf{R})^+$, we set $\theta_k=k$, $\gamma_k = \sqrt{k}$ and \begin{eqnarray*} p_0^{(k)} = 1 - p_2^{(k)} - p_k^{(k)}, \quad p_2^{(k)} = \frac{(k-1)^2(1-b/\sqrt{k})-k\sigma_k}{2(k-1)^2-k}, \quad p_k^{(k)} = \frac{2\sigma_k-1+b/\sqrt{k}}{2(k-1)^2-k}, \end{eqnarray*} where $\sigma_k(\cdot) = \sqrt{k}\sigma(\cdot) + 1$. Then the sequence $(\gamma_k, p^{(k)}, \theta_k)$ satisfies the conditions of Lemma~\ref{l3.4}. By Lemmas~\ref{l3.2} and \ref{l3.4}, equation (\ref{3.1}) has a continuous weak solution $\{X_t: t\ge0\}$ which solves the $({\cal L},{\cal D}_1({\cal L}))$-martingale problem. \qed \section{Stochastic log-Laplace equations} \setcounter{equation}{0} In this section, we establish the existence and uniqueness of solution of the stochastic log-Laplace equation (\ref{1.8}). The techniques here are based on the results of Kurtz and Xiong \cite{KX99} and have been stimulated by \cite{C04, X04}. Let $(c, h, \sigma, b, m)$ be given as in the introduction. Suppose that $W(ds,dx)$ is a time-space white noise. The main objective is to discuss the non-linear SPDE: \begin{eqnarray}\label{4.1} \psi_t(x) &=& \phi(x) + \int_0^t \bigg[\frac{1}{2}a(x)\partial_x^2\psi_s(x) - b(x)\psi_s(x) - \frac{1}{2}\sigma(x)\psi_s(x)^2\bigg] ds \nonumber \\ & & + \int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}} h(y-x) \partial_x\psi_s(x) W(ds,dy), \qquad t\ge 0. \end{eqnarray} Let $\{H_k(\mathbb}\def\mbf{\mathbf{R}): k=0,\pm 1, \pm 2, \cdots\}$ denote the Sobolev spaces on $\mathbb}\def\mbf{\mathbf{R}$. Let ``$\|\cdot\|_0$'' and ``$\langle}\def\>{\rangle\cdot, \cdot\>_0$'' denote respectively the norm and the inner product in $H_0(\mathbb}\def\mbf{\mathbf{R}) = L^2(\mathbb}\def\mbf{\mathbf{R})$. For $\phi\in H_k(\mathbb}\def\mbf{\mathbf{R})$ let \begin{eqnarray}\label{4.2} \|\phi\|_k^2 = \sum_{i=0}^k \|\partial_x^{i}\phi\|_0^2. \end{eqnarray} Following Xiong \cite{X04}, we first consider a smoothed version of equation (\ref{4.1}). Let $(T_t)_{t\ge0}$ denote the transition semigroup of a standard Brownian motion. Let $\{h_j: j=1,2,\cdots\}$ be a complete orthonormal system of $H_0(\mathbb}\def\mbf{\mathbf{R})$. Then \begin{eqnarray}\label{4.3} W_j(t) =\int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}}h_j(y)W(ds,dy), \qquad t\ge 0 \end{eqnarray} defines a sequence of independent standard Brownian motions $\{W_j: j=1,2,\cdots\}$. For $\epsilon>0$ let \begin{eqnarray}\label{4.4} W^\epsilon(dt,dx) = \sum_{j=1}^{[1/\epsilon]} h_j(x)W_j(dt)dx, \qquad s\ge0, y\in\mathbb}\def\mbf{\mathbf{R}. \end{eqnarray} For $\phi\in H_0(\mathbb}\def\mbf{\mathbf{R})$ we set $d_\epsilon(\phi) = (\|T_\epsilon \phi\| \land \epsilon^{-1}) \|T_\epsilon \phi\|^{-1}$. By the general results of \cite[Theorem~3.5]{KX99} and \cite[p.133]{R90}, for any $\phi\in H_1(\mathbb}\def\mbf{\mathbf{R}) \cap C_b(\mathbb}\def\mbf{\mathbf{R})^+$ there is a pathwise unique $H_2(\mathbb}\def\mbf{\mathbf{R})$-valued solution $\{\psi_t^\epsilon: t\ge0\}$ of the equation \begin{eqnarray}\label{4.5} \psi_t^\epsilon(x) &=& T_{\epsilon}\phi(x) + \int_0^t \bigg[\frac{1}{2}a(x)\partial_x^2\psi_s^\epsilon(x) - b(x)\psi_s^\epsilon(x) - \frac{1}{2}\sigma(x) \psi_s^\epsilon(x) d_\epsilon(\psi^\epsilon_s) T_\epsilon \psi_s^\epsilon(x)\bigg] ds \nonumber \\ & & + \int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}} h(y-x)\partial_x\psi_s^\epsilon(x) W^\epsilon(ds,dy), \qquad t\ge 0. \end{eqnarray} \begin{lemma}}\def\elemma{\end{lemma}\label{l4.1} The solution $\{\psi_t ^\epsilon: t\ge0\}$ of (\ref{4.5}) is non-negative and satisfies a.s.\ $\|\psi_t^\epsilon\|_{\mbox{\rm ess}} \le e^{-b_0t} \|\phi\|_{\mbox{\rm ess}}$ for all $t\ge0$, where $b_0 = \inf_x b(x)$ and $\|\cdot\|_{\mbox{\rm ess}}$ denote the essential supremum norm. \elemma \noindent{\it Proof.~~} Indeed, for any non-negative and non-trivial function $\phi\in H_0(\mathbb}\def\mbf{\mathbf{R})$, the solution of (\ref{4.5}) can be obtained in the following way. Let $\{B_i(t)\}$ be a sequence of independent Brownian motions which are also independent of the white noise $\{W(ds,dy)\}$. As in \cite[Theorems~2.1 and 2.2]{KX99}, one can show that there is a pathwise unique solution $\psi_t^\epsilon(x)$ of the stochastic system \begin{eqnarray}\label{4.6} \xi_i(t) - \xi_i(0) &=& \int_0^t c(\xi_i(s))dB_i(s) + 2\int_0^t c(\xi_i(s))c^\prime(\xi_i(s))ds \hskip1cm \nonumber \\ & & -\int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}} h(y-\xi_i(s)) W^\epsilon(ds,dy), \end{eqnarray} \begin{eqnarray}\label{4.7} m_i(t) - m_i(0) &=& \int_0^t \bigg[\frac{1}{2}a^{\prime\prime} (\xi_i(s)) - b(\xi_i(s))\bigg]m_i(s)ds \nonumber \\ & & - \frac{1}{2}\int_0^t\sigma(\xi_i(s))d_\epsilon(\psi_s^\epsilon) T_\epsilon\psi_s^\epsilon(\xi_i(s)) m_i(s)ds \nonumber \\ & & - \int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}} h^\prime(y-\xi_i(s)) m_i(s) W^\epsilon(ds,dy), \end{eqnarray} and \begin{eqnarray}\label{4.8} \psi_t^\epsilon(x)dx = \lim_{n\to\infty}\frac{1}{n}\sum_{i=1}^n m_i(t) \delta_{\xi_i(t)}(dx), \qquad t\ge0, x\in \mathbb}\def\mbf{\mathbf{R}, \end{eqnarray} where $\{(m_i(0),\xi_i(0)): i=1,2,\cdots\}$ is a sequence of exchangeable random variables on $[0,\infty)\times \mathbb}\def\mbf{\mathbf{R}$ which are independent of $\{B_i(t)\}$ and $\{W(ds,dy)\}$ and satisfy $$ \lim_{n\to\infty} n^{-1}\sum_{i=1}^n m_i(0) \delta_{\xi_i(0)}(dx) = T_{\epsilon}\phi(x)dx. $$ By the arguments of \cite[Theorems~3.1-3.5]{KX99}, it can be proved that $\psi_t^\epsilon (x)$ is also the pathwise unique solution of (\ref{4.5}). By a duality argument similar to the proof of \cite[Lemma~2.2]{X04} we get $\|\psi_t^\epsilon\| _{\mbox{ess}} \le e^{-b_0t} \|\phi\|_{\mbox{ess}}$. \qed \begin{lemma}}\def\elemma{\end{lemma}\label{l4.2} There is a locally bounded function $K(\cdot)$ on $[0,\infty)$ such that \begin{eqnarray}\label{4.9} \mbf{E}\bigg\{\sup_{0\le r\le t} \|\psi_r^\epsilon\|_0^4\bigg\} \le K(t), \qquad t\ge0. \end{eqnarray} \elemma \noindent{\it Proof.~~} Although the arguments are similar to those of \cite{X04}, we shall give the detailed proof for the convenience of the reader. For any $f\in C^\infty(\mathbb}\def\mbf{\mathbf{R})$ with compact support, \begin{eqnarray*} \langle}\def\>{\rangle\psi_t^\epsilon,f\>_0 &=& \langle}\def\>{\rangleT_{\epsilon}\phi,f\>_0 + \int_0^t \bigg[\frac{1}{2}\langle}\def\>{\ranglea\partial_x^2 \psi_s^\epsilon,f\>_0 - \langle}\def\>{\rangleb\psi_s^\epsilon,f\>_0 - \frac{1}{2}\langle}\def\>{\rangle\sigma\psi_s^\epsilon d_\epsilon(\psi_s^\epsilon) T_\epsilon\psi_s^\epsilon,f\>_0\bigg]ds \nonumber \\ & & + \int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}} \langle}\def\>{\rangleh(y-\cdot)\partial_x \psi_s^\epsilon, f\>_0 W^\epsilon (ds,dy). \end{eqnarray*} By It\^o's formula, \begin{eqnarray*} \langle}\def\>{\rangle\psi_t^\epsilon,f\>_0^2 &=& \langle}\def\>{\rangleT_{\epsilon}\phi,f\>_0^2 + \int_0^t \langle}\def\>{\rangle\psi_s^\epsilon,f\>_0 \langle}\def\>{\ranglea\partial_x^2 \psi_s^\epsilon - 2b\psi_s^\epsilon - \sigma\psi_s^\epsilon d_\epsilon(\psi_s^\epsilon)T_\epsilon\psi_s^\epsilon, f\>_0 ds \nonumber \\ & & +\, 2\int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}}\langle}\def\>{\rangle\psi_s^\epsilon,f\>_0 \langle}\def\>{\rangleh(y-\cdot)\partial_x\psi_s^\epsilon,f\>_0 W^\epsilon(ds,dy) \nonumber \\ & & + \sum_{j=1}^{[1/\epsilon]}\int_0^t\bigg[\int_{\mathbb}\def\mbf{\mathbf{R}} h_j(y)\langle}\def\>{\rangleh(y-\cdot)\partial_x\psi_s^\epsilon,f\>_0 dy\bigg]^2 ds. \end{eqnarray*} Then we may add $f$ over in a complete orthonormal system of $H_0(\mathbb}\def\mbf{\mathbf{R})$ to get \begin{eqnarray}\label{4.10} \|\psi_t^\epsilon\|_0^2 &=& \|T_{\epsilon}\phi\|_0^2 + \int_0^t \langle}\def\>{\ranglea\partial_x^2\psi_s^\epsilon - 2b\psi_s^\epsilon - \sigma\psi_s^\epsilon d_\epsilon(\psi_s^\epsilon) T_\epsilon \psi_s^\epsilon,\psi_s^\epsilon\>_0 ds \nonumber \\ & & +\, 2\int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}}\langle}\def\>{\rangleh(y-\cdot)\partial_x \psi_s^\epsilon, \psi_s^\epsilon\>_0 W^\epsilon(ds,dy) \nonumber \\ & & + \sum_{j=1}^{[1/\epsilon]}\int_0^t ds \int_{\mathbb}\def\mbf{\mathbf{R}} \bigg[ \int_{\mathbb}\def\mbf{\mathbf{R}} h_j(y) h(y-z)\partial_x \psi_s^\epsilon(z) dy\bigg]^2dz \nonumber\\ &\le& \|T_{\epsilon}\phi\|_0^2 + \int_0^t\langle}\def\>{\ranglec^2\partial_x^2 \psi_s^\epsilon, \psi_s^\epsilon\>_0 ds + \int_0^t \langle}\def\>{\rangle\rho(0)\partial_x^2\psi_s^\epsilon, \psi_s^\epsilon\>_0 ds \nonumber \\ & & + \int_0^t \langle}\def\>{\rangle - 2b\psi_s^\epsilon - \sigma\psi_s^\epsilon d_\epsilon(\psi_s^\epsilon) T_\epsilon \psi_s^\epsilon,\psi_s^\epsilon\>_0 ds \nonumber \\ & & +\, 2\int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}} \langle}\def\>{\rangleh(y-\cdot)\partial_x \psi_s^\epsilon,\psi_s^\epsilon\>_0 W^\epsilon(ds,dy) \nonumber \\ & & + \int_0^t ds \int_{\mathbb}\def\mbf{\mathbf{R}} \bigg[ \int_{\mathbb}\def\mbf{\mathbf{R}} h(y-z)^2(\partial_x \psi_s^\epsilon(x))^2 dy\bigg] dx. \end{eqnarray} Note that the third and the last terms on the right hand side cancel out. Since $\psi^{\epsilon}_s\in H_2(\mathbb}\def\mbf{\mathbf{R})$, there exists a sequence $f_n\in C^{\infty}_0(\mathbb}\def\mbf{\mathbf{R})$ such that $f_n\to\psi^{\epsilon}_s$ in $H_2(\mathbb}\def\mbf{\mathbf{R})$. By the assumption, both $c^2$ and $(c^2)''$ are bounded. Then there is a constant $K\ge0$ such that \begin{eqnarray*} \langle}\def\>{\ranglec^2f''_n,f_n\> = \langle}\def\>{\rangle(c^2)'',f_n^2\>/2 - \langle}\def\>{\ranglec^2,(f'_n)^2\> \le K\|f_n\|^2_0. \end{eqnarray*} Taking $n\to\infty$ we have \begin{eqnarray}\label{4.11} \langle}\def\>{\ranglec^2\partial_x^2\psi_s^\epsilon, \psi_s^\epsilon\>_0\le K\|\psi_s^\epsilon\|^2_0. \end{eqnarray} By Lemma~\ref{l4.1}, it is easy to find a locally bounded non-negative function $K(\cdot)$ such that \begin{eqnarray*} \langle}\def\>{\rangle - 2b\psi_s^\epsilon - \sigma\psi_s^\epsilon d_\epsilon(\psi_s^\epsilon) T_\epsilon \psi_s^\epsilon, \psi_s^\epsilon\>_0 \le K(s)\|\psi_s^\epsilon\|^2_0. \end{eqnarray*} Therefore, we can redesign $K(\cdot)$ suitably and get from (\ref{4.10}) that \begin{eqnarray*} \|\psi_t^\epsilon\|_0^2 &\le& \|\phi\|_0^2 + K(t)\int_0^t \|\psi_s^\epsilon\|^2_0ds + 2\int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}} \langle}\def\>{\rangleh(y-\cdot)\partial_x\psi_s^\epsilon, \psi_s^\epsilon\>_0 W^\epsilon(ds,dy). \end{eqnarray*} By Schwarz' and Burkholder's inequalities we can redesign $K(\cdot)$ again to get \begin{eqnarray}\label{4.12} \mbf{E}\bigg\{\sup_{0\le r\le t}\|\psi_r^\epsilon\|_0^4\bigg\} &\le& 3\|\phi\|_0^4 + K(t)\mbf{E}\bigg\{\int^t_0 \|\psi_s^\epsilon\|^4_0 ds\bigg\} \nonumber\\ & & +\, 24\mbf{E}\bigg\{\int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}} \langle}\def\>{\rangleh(y-\cdot) \partial_x\psi_s^\epsilon,\psi_s^\epsilon\>_0^2 dy ds\bigg\} \nonumber\\ &\le& 3\|\phi\|_0^4 + K(t)\mbf{E}\bigg\{\int^t_0 \|\psi_s^\epsilon\|^4_0ds\bigg\}, \end{eqnarray} where the last inequality follows from the same arguments as those leading to (\ref{4.11}). Using stopping times if necessary, we may assume that $\mbf{E}\{\|\psi_t^\epsilon\|_0^4\} < \infty$ for each $t\ge 0$. Then we obtain (\ref{4.9}) by Gronwall's inequality. \qed \begin{lemma}}\def\elemma{\end{lemma}\label{l4.3} There is a locally bounded function $K(\cdot)$ on $[0,\infty)$ such that \begin{eqnarray}\label{4.13} \mbf{E}\bigg\{\sup_{0\le r\le t} \|\psi_r^\epsilon\|_1^4\bigg\} \le K(t), \qquad t\ge0. \end{eqnarray} \elemma \noindent{\it Proof.~~} We shall omit some details since they are similar to those in the proof of Lemma~\ref{l4.2}. {From} (\ref{4.5}) it follows that \begin{eqnarray*} \partial_x\psi_t^\epsilon(x) &=& \partial_xT_{\epsilon}\phi(x) + \int_0^t\bigg[\frac{1}{2} a^\prime(x)\partial_x^2\psi_s^\epsilon(x) + \frac{1}{2}a(x) \partial_x^3\psi_s^\epsilon(x) - b^\prime(x)\psi_s^\epsilon(x) - b(x)\partial_x\psi_s^\epsilon(x) \\ & & - \frac{1}{2}\sigma^\prime(x)\psi_s^\epsilon(x)d_\epsilon (\psi^\epsilon_s) T_\epsilon \psi_s^\epsilon(x) - \frac{1}{2}\sigma(x)\partial_x \psi_s^\epsilon(x) d_\epsilon(\psi^\epsilon_s) T_\epsilon\psi_s^\epsilon(x) \\ & & - \frac{1}{2}\sigma(x) \psi_s^\epsilon(x)d_\epsilon (\psi^\epsilon_s)T_\epsilon\partial_\cdot \psi_s^\epsilon(x)\bigg] ds \\ & & + \int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}} [h(y-x)\partial_x^2\psi_s^\epsilon(x) - h^\prime(y-x)\partial_x\psi_s^\epsilon(x)] W^\epsilon(ds,dy). \end{eqnarray*} Then we have \begin{eqnarray*} \|\partial_x\psi_t^\epsilon\|_0^2 &=& \|T_{\epsilon}\partial_x\phi\|_0^2 + \int_0^t \bigg[\langle}\def\>{\rangle\partial_x\psi_s^\epsilon,a^\prime\partial_x^2 \psi_s^\epsilon + a\partial_x^3\psi_s^\epsilon\>_0 - 2 \langle}\def\>{\rangle\partial_x\psi_s^\epsilon, b^\prime\psi_s^\epsilon + b \partial_x \psi_s^\epsilon\>_0 \\ & & - d_\epsilon(\psi^\epsilon_s) \langle}\def\>{\rangle\partial_x\psi_s^\epsilon, \sigma^\prime \psi_s T_\epsilon \psi_s^\epsilon + \sigma \partial_x \psi_s^\epsilon T_\epsilon \psi_s^\epsilon + \sigma \psi_s^\epsilon T_\epsilon\partial_x \psi_s^\epsilon\>_0\bigg] ds \nonumber \\ & & + 2\int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}} \langle}\def\>{\rangle\partial_x\psi_s^\epsilon, h(y-\cdot)\partial_x^2\psi_s^\epsilon - h^\prime(y-\cdot)\partial_x \psi_s^\epsilon\>_0 W^{\epsilon}(ds,dy) \nonumber \\ & & + \int_0^tds\int_{\mathbb}\def\mbf{\mathbf{R}} \|h(y-\cdot)\partial_x^2\psi_s^\epsilon - h^\prime(y-\cdot)\partial_x\psi_s^\epsilon\|_0^2 dy. \end{eqnarray*} As in the proof of the previous lemma, we have that \begin{eqnarray}\label{4.14} \mbf{E}\bigg\{\sup_{0\le r\le t}\|\partial_x \psi_t^\epsilon \|_0^4\bigg\} \le4\|\partial_x\phi\|_0^4 + K(t) \mbf{E}\int^t_0\left(\|\psi_s^\epsilon\|^4_0 +\|\partial_x\psi_s^\epsilon\|^4_0\right)ds. \end{eqnarray} Again, we may assume $\mbf{E}\bigg\{\sup_{0\le r\le t} \|\partial_x \psi_r^\epsilon \|_0^4\bigg\}<\infty$ for all $t\ge 0$. Then we obtain (\ref{4.13}) by Gronwall's inequality. \qed \begin{theorem}}\def\etheorem{\end{theorem}\label{t4.1} For any $\phi \in H_1(\mathbb}\def\mbf{\mathbf{R}) \cap C_b(\mathbb}\def\mbf{\mathbf{R})^+$, equation (\ref{4.1}) has a pathwise unique $H_1(\mathbb}\def\mbf{\mathbf{R})^+$-valued solution $\{\psi_t: t\ge0\}$. We have a.s.\ $\|\psi_t\|_{\mbox{\rm ess}} \le e^{-b_0t} \|\phi\|_{\mbox {\rm ess}}$ for all $t\ge0$. Moreover, there is a locally bounded function $K(\cdot)$ on $[0,\infty)$ such that \begin{eqnarray}\label{4.15} \mbf{E}\Big\{\sup_{0\le r\le t} \|\psi_r\|_1^4\Big\} \le K(t), \end{eqnarray} and so $\{\psi_t(\cdot): t\ge0\}$ has an $H_1(\mathbb}\def\mbf{\mathbf{R}) \cap C_b(\mathbb}\def\mbf{\mathbf{R})^+$-valued version. \etheorem \noindent{\it Proof.~~} Let $z_t(x) = \psi^{\epsilon}_t(x) - \psi^{\eta}_t(x)$. For any $t\ge0$, by the same arguments leading to (2.12) of \cite{X04} we have \begin{eqnarray}\label{4.16} \mbf{E}\Big\{\sup_{0\le s\le t}\|z_s\|^4_0\Big\} &\le& K\int^t_0\mbf{E}\{\|z_{r}\|^4_0\}dr + K\mbf{E} \bigg\{\int^t_0|d_{\epsilon}(\psi^{\epsilon}_r) - d_{\eta}(\psi^{\eta}_r)|^4dr\bigg\} \nonumber \\ & & +\, 3\|\phi\|^4 \mbf{E} \bigg\{\int^t_0\left(\int|T_{\epsilon} \psi_r^{\epsilon}(x) - T_{\eta}\psi^{\eta}_{r}(x)|^2 dx\right)^2dr\bigg\} \nonumber \\ & & +\, K\mbf{E}\bigg\{\sum^{[1/\epsilon]}_{j=[1/\eta]+1}\int^t_0 \left(\int_{\mathbb}\def\mbf{\mathbf{R}} \left<h(y-\cdot)\partial_x\psi^{\eta}_s, z_s\right> h_j(y)dy\right)^2ds\bigg\}. \end{eqnarray} As in Section 2.4 of \cite{X04}, the second and third terms on the right hand side of (\ref{4.16}) converge to zero as $\epsilon$ and $\eta\to 0$. On the other hand, the last term is bounded by \begin{eqnarray*} \int^t_0\int_{\mathbb}\def\mbf{\mathbf{R}}\sum^{[1/\epsilon]}_{j=[1/\eta]+1} \left(\int_{\mathbb}\def\mbf{\mathbf{R}} h_j(y)h(y-x)dy\right)^2\mbf{E} \{z_s(x)^2\} dx \int_{\mathbb}\def\mbf{\mathbf{R}}\mbf{E}\{(\partial_x\psi^{\eta}_s(x))^2\}dx ds, \end{eqnarray*} which tends to zero $\epsilon$ and $\eta\to 0$. As in Section 2.4 of \cite{X04} we can show that $\psi^{\epsilon}$ is a Cauchy sequence in $H_0(\mathbb}\def\mbf{\mathbf{R})$ and its limit $\psi$ is the pathwise unique solution of (\ref{4.1}). The second assertion follows from Lemma~\ref{l4.1} and Fatou's lemma. Finally, we obtain (\ref{4.15}) by Lemma~\ref{l4.3} and Sobolev's result. \qed Based on Theorem~\ref{t4.1}, let us consider the following more useful backward SPDE: \begin{eqnarray}\label{4.17} \psi_{r,t}(x) &=& \phi(x) + \int_r^t \bigg[\frac{1}{2}a(x)\partial_x^2 \psi_{s,t}(x) - b(x)\psi_{s,t}(x) - \frac{1}{2}\sigma(x) \psi_{s,t}(x)^2\bigg] ds \nonumber \\ & & + \int_r^t\int_{\mathbb}\def\mbf{\mathbf{R}} h(y-x)\partial_x\psi_{s,t}(x) \cdot W(ds,dy), \qquad t\ge r\ge 0, \end{eqnarray} where ``$\cdot$'' denotes the backward stochastic integral. \begin{theorem}}\def\etheorem{\end{theorem}\label{t4.2} For any $\phi \in H_1(\mathbb}\def\mbf{\mathbf{R}) \cap C_b(\mathbb}\def\mbf{\mathbf{R})^+$, the backward equation (\ref{4.17}) has a pathwise unique $H_1(\mathbb}\def\mbf{\mathbf{R}) \cap C_b(\mathbb}\def\mbf{\mathbf{R})^+$-valued solution $\{\psi_{r,t}: t\ge r\ge0\}$. Further, we have a.s.\ $\|\psi_{r,t} \| \le e^{-b_0(t-r)} \|\phi\|$ for all $t\ge r\ge0$. \etheorem \noindent{\it Proof.~~} For fixed $t>0$, define the white noise \begin{eqnarray}\label{4.18} W_t([0,s]\times B) = - W([t-s,t]\times B), \qquad 0\le s\le t, B\in {\cal B}(\mathbb}\def\mbf{\mathbf{R}). \end{eqnarray} By Theorem~\ref{t4.1}, there is a pathwise unique solution $\{\phi_{r,t}: 0\le r\le t\}$ of the equation \begin{eqnarray}\label{4.19} \phi_{r,t}(x) &=& \phi(x) + \int_0^r \bigg[\frac{1}{2}a(x)\partial_x^2\phi_{s,t}(x) - b(x)\phi_{s,t}(x) - \frac{1}{2}\sigma(x)\phi_{s,t}(x)^2\bigg] ds \nonumber \\ & & + \int_0^r\int_{\mathbb}\def\mbf{\mathbf{R}} h(y-x) \partial_x\phi_{s,t}(x) W_t(ds,dy). \end{eqnarray} Setting $\psi_{r,t}(x) := \phi_{t-r,t}(x)$, we have \begin{eqnarray*} \psi_{r,t}(x) &=& \phi(x) + \int_0^{t-r} \bigg[\frac{1}{2}a(x)\partial_x^2 \psi_{t-s,t}(x) - b(x)\psi_{t-s,t}(x) - \frac{1}{2} \sigma(x)\psi_{t-s,t}(x)^2\bigg] ds \nonumber \\ & & + \int_0^{t-r}\int_{\mathbb}\def\mbf{\mathbf{R}} h(y-x) \partial_x\psi_{t-s,t}(x) W_t(ds,dy) \nonumber \\ &=& \phi(x) + \int_r^t \bigg[\frac{1}{2}a(x)\partial_x^2 \psi_{s,t}(x) - b(x)\psi_{s,t}(x) - \frac{1}{2}\sigma(x) \psi_{s,t}(x)^2\bigg] ds \nonumber \\ & & + \int_r^t\int_{\mathbb}\def\mbf{\mathbf{R}} h(y-x)\partial_x\psi_{s,t}(x) \cdot W(ds,dy). \end{eqnarray*} That is, $\{\psi_{r,t}: t\ge r\ge0\}$ solves (\ref{4.17}). The remaining assertions are immediate by Theorem~\ref{t4.1}. \qed We may regard the white noise $\{W(ds,dy)\}$ as a random variable taking values in the Schwartz apace ${\cal S}^\prime([0,\infty) \times \mathbb}\def\mbf{\mathbf{R})$. As in the classical situation of \cite[p.163]{IW89}, the result of Theorem~\ref{t4.2} implies the existence of a measurable mapping $F: (\phi,w) \mapsto \psi_{r,t}^w(\phi,\cdot)$ from $(H_1(\mathbb}\def\mbf{\mathbf{R}) \cap C_b(\mathbb}\def\mbf{\mathbf{R})^+) \times {\cal S}^\prime([0,\infty) \times \mathbb}\def\mbf{\mathbf{R})$ to $H_1(\mathbb}\def\mbf{\mathbf{R}) \cap C_b(\mathbb}\def\mbf{\mathbf{R})^+$ such that $\psi_{r,t}^W(\phi,\cdot)$ is the pathwise unique solution of (\ref{4.17}). \section{Conditional log-Laplace functionals} \setcounter{equation}{0} Let $(c, h, \sigma, b, m)$ be given as in the introduction. Let $\{X_t: t\ge0\}$ be a continuous solution of the SPDE: \begin{eqnarray}\label{5.1} \langle}\def\>{\rangle\phi,X_t\> &=& \langle}\def\>{\rangle\phi,\mu\> + t\langle}\def\>{\rangle\phi,m\> + \frac{1}{2}\int_0^t\langle}\def\>{\ranglea\phi^{\prime\prime}, X_s\> ds - \int_0^t\langle}\def\>{\rangleb\phi,X_s\> ds \nonumber \\ & & + \int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}}\phi(y) Z(ds,dy) + \int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}}\langle}\def\>{\rangleh(y-\cdot) \phi^\prime,X_s\> W(ds,dy), \end{eqnarray} where $W(ds,dx)$ is a time-space white noise and $Z(ds,dy)$ is an orthogonal martingale measure which is orthogonal to $W(ds,dy)$ and has covariation measure $\sigma(y)X_s(dy)ds$. Let $({\cal F}_t)_{t\ge0}$ denote the filtration generated by $\{W(ds,dy)\}$ and $\{Z(ds,dy)\}$. Since $\sigma$ is strictly positive, the process $\{X_t: t\ge0\}$ can be represented in terms of the covariation measure of $Z(ds,dy)$, so it is adapted to $({\cal F}_t)_{t\ge0}$. By Theorem~\ref{t4.2}, for $\phi\in H_1(\mathbb}\def\mbf{\mathbf{R}) \cap C_b(\mathbb}\def\mbf{\mathbf{R})^+$ the equation \begin{eqnarray}\label{5.2} \psi_{r,t}(x) &=& \phi(x) + \int_r^t \bigg[\frac{1}{2} a(x)\psi_{s,t} ^{\prime\prime}(x) - b(x)\psi_{s,t}(x) - \frac{1}{2}\sigma(x) \psi_{s,t}(x)^2\bigg] ds \nonumber \\ & & + \int_r^t\int_{\mathbb}\def\mbf{\mathbf{R}} h(y-x)\psi_{s,t}^\prime(x) \cdot W(ds,dy), \qquad t\ge r\ge 0, \end{eqnarray} has a pathwise unique solution $\psi_{r,t} = \psi^W_{r,t}$ in $H_1(\mathbb}\def\mbf{\mathbf{R}) \cap C_b(\mathbb}\def\mbf{\mathbf{R})^+$. Let $\mbf{P}^W$ and $\mbf{E}^W$ denote respectively the conditional probability and expectation given the white noise $\{W(ds,dy)\}$. The main result of this section is the following \begin{theorem}}\def\etheorem{\end{theorem}\label{t5.1} For $t\ge r\ge0$ and $\phi\in H_1(\mathbb}\def\mbf{\mathbf{R}) \cap C_b(\mathbb}\def\mbf{\mathbf{R})^+$ we have a.s.\ \begin{eqnarray}\label{5.3} \mbf{E}^W\{ e^{-\langle}\def\>{\rangle\phi,X_t\>}|{\cal F}_r\} = \exp\bigg\{-\langle}\def\>{\rangle\psi^W_{r,t},X_r\> - \int_r^t\langle}\def\>{\rangle\psi^W_{s,t},m\>ds\bigg\}, \end{eqnarray} where $\psi^W_{r,t}$ is defined by (\ref{5.2}). Consequently, $\{X_t: t\ge0\}$ is a diffusion process with Feller transition semigroup $(Q_t)_{t\ge 0}$ given by \begin{eqnarray}\label{5.4} \int_{M(\mathbb}\def\mbf{\mathbf{R})} e^{-\langle}\def\>{\rangle\phi,\nu\>} Q_t(\mu,d\nu) = \mbf{E}\exp\bigg\{-\langle}\def\>{\rangle\psi^W_{0,t},\mu\> - \int_0^t\langle}\def\>{\rangle\psi^W_{s,t},m\>ds\bigg\}. \end{eqnarray} \etheorem Our proof of the theorem are based on direct calculations derived from (\ref{5.1}) and (\ref{5.2}). The argument is different from that of \cite{X04}, where the Wong-Zakai approximation was used to get the result. We shall give four lemmas which together with the proof of the theorem show clearly the key steps of the calculations. Suppose that $\alpha$ and $\beta$ are bounded measurable functions on $[0,\infty) \times \mathbb}\def\mbf{\mathbf{R}$ and that \begin{eqnarray*} \int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}} \alpha(s,y)^2dsdy < \infty. \end{eqnarray*} For $t\ge r\ge0$, define \begin{eqnarray}\label{5.5} \theta_\alpha(r,t) = \exp\bigg\{\int_r^t\int_{\mathbb}\def\mbf{\mathbf{R}} \alpha(s,y)W(ds,dy) - \frac{1}{2}\int_r^t\int_{\mathbb}\def\mbf{\mathbf{R}} \alpha(s,y)^2dsdy\bigg\}, \end{eqnarray} and \begin{eqnarray}\label{5.6} \zeta_\beta(r,t) = \exp\bigg\{\int_r^t\int_{\mathbb}\def\mbf{\mathbf{R}} \beta(s,y)Z(ds,dy) - \frac{1}{2}\int_r^t \langle}\def\>{\rangle\sigma \beta(s,\cdot)^2,X_s\> ds\bigg\}. \end{eqnarray} Then we have the following \begin{lemma}}\def\elemma{\end{lemma}\label{l5.1} Under the conditional probability measure $\mbf{P}^W$, the process $\{\zeta_\beta(0,t): t\ge 0\}$ is a martingale with respect to $({\cal F}_t)_{t\ge 0}$. \elemma \noindent{\it Proof.~~} Clearly, both $\{\theta_\alpha(0,t): t\ge 0\}$ and $\{\zeta_\beta(0,t): t\ge 0\}$ are martingales under the original probability measure $\mbf{P}$. Recall that the martingale measures $\{W(ds,dy)\}$ and $\{Z(ds,dy)\}$ are orthogonal. By integration by parts it is easy to see that $\{\theta_\alpha(0,t) \zeta_\beta(0,t): t\ge 0\}$ is a martingale. Since $\alpha$ is arbitrary, for any $u\ge t\ge r\ge0$ and any bounded ${\cal F}_r$-measurable random variable $Z$ we obtain \begin{eqnarray*} \mbf{E}\{\theta_\alpha(0,u)\zeta_\beta(0,t)Z\} = \mbf{E}\{\theta_\alpha(0,r)\zeta_\beta(0,r)Z\} = \mbf{E}\{\theta_\alpha(0,u)\zeta_\beta(0,r)Z\}. \end{eqnarray*} Note that the linear span of the functionals $\{\theta_\alpha (0,u)\}$ is dense in the space of squared-integrable and $\sigma(W)$-measurable random variables; see e.g.\ \cite[p.81]{B92} and \cite{C04}. Then we have the desired equality $\mbf{E}^W \{\zeta_\beta(0,t)| {\cal F}_r\} = \zeta_\beta(0,r)$. \qed By the property of independent increments of the white noise $\{W(ds,dy)\}$ we have \begin{eqnarray}\label{5.7} \xi_{r,t}(x) := \mbf{E}\{\psi_{r,t}(x)\theta_\alpha(r,t)\} = \mbf{E}\{\psi_{r,t}(x)\theta_\alpha(r,t)|{\cal F}_r\} \end{eqnarray} and \begin{eqnarray}\label{5.8} \eta_{r,t}(x) := \mbf{E}\{\psi_{r,t}(x)^2\theta_\alpha(r,t)\} = \mbf{E}\{\psi_{r,t}(x)^2\theta_\alpha(r,t)|{\cal F}_r\}. \end{eqnarray} \begin{lemma}}\def\elemma{\end{lemma}\label{l5.2} For $t\ge r\ge 0$, we have a.s.\ \begin{eqnarray}\label{5.9} \mbf{E}\{\langle}\def\>{\rangle\psi_{r,t},X_r\>\theta_\alpha(0,t)\zeta_\beta(0,t)|{\cal F}_r\} = \langle}\def\>{\rangle\xi_{r,t},X_r\>\theta_\alpha(0,r)\zeta_\beta(0,r) \end{eqnarray} and \begin{eqnarray}\label{5.10} \mbf{E}\{\langle}\def\>{\rangle\sigma\psi_{r,t}^2,X_r\>\theta_\alpha(0,t)\zeta_\beta(0,t) | {\cal F}_r\} = \langle}\def\>{\rangle\sigma\eta_{r,t},X_r\>\theta_\alpha(0,r)\zeta_\beta(0,r). \end{eqnarray} \elemma \noindent{\it Proof.~~} By Lemma~\ref{l5.1} it is easy to see that $\mbf{E}^W[\zeta_\beta(r,t) |{\cal F}_r] = 1$. Since $\theta_\alpha (0,r) \zeta_\beta(0,r)$ is ${\cal F}_r$-measurable and $\langle}\def\>{\rangle\psi_{r,t},X_r\> \theta_\alpha(r,t)$ is $\sigma(W, {\cal F}_r)$-measurable, we have \begin{eqnarray*} && \mbf{E}\{\langle}\def\>{\rangle\psi_{r,t},X_r\>\theta_\alpha(0,t) \zeta_\beta(0,t)|{\cal F}_r\} \nonumber \\ &&\qquad= \mbf{E}\{\langle}\def\>{\rangle\psi_{r,t},X_r\>\theta_\alpha(r,t)\zeta_\beta(r,t)|{\cal F}_r\} \theta_\alpha(0,r)\zeta_\beta(0,r) \nonumber \\ &&\qquad= \mbf{E}\{\langle}\def\>{\rangle\psi_{r,t},X_r\>\theta_\alpha(r,t)\mbf{E}^W[\zeta_\beta(r,t) |{\cal F}_r]|{\cal F}_r\}\theta_\alpha(0,r)\zeta_\beta(0,r) \nonumber \\ &&\qquad= \mbf{E}\{\langle}\def\>{\rangle\psi_{r,t},X_r\>\theta_\alpha(r,t) |{\cal F}_r\}\theta_\alpha(0,r)\zeta_\beta(0,r) \nonumber \\ &&\qquad= \langle}\def\>{\rangle\xi_{r,t},X_r\>\theta_\alpha(0,r)\zeta_\beta(0,r). \end{eqnarray*} A similar calculation gives (\ref{5.10}). \qed \begin{lemma}}\def\elemma{\end{lemma}\label{l5.3} For $t\ge r\ge 0$ and $x\in \mathbb}\def\mbf{\mathbf{R}$, we have \begin{eqnarray}\label{5.11} \xi_{r,t}(x) &=& \phi(x) + \int_r^t \bigg[\frac{1}{2}a(x)\xi_{s,t}^{\prime\prime}(x) - b(x) \xi_{s,t}(x) - \frac{1}{2}\sigma(x)\eta_{s,t}(x)\bigg] ds \nonumber \\ & & + \int_r^t\langle}\def\>{\rangleh(\cdot-x), \alpha(s,\cdot)\> \xi_{s,t}^\prime(x)ds, \end{eqnarray} where the derivatives are taken in the classical sense. \elemma \noindent{\it Proof.~~} Note that the backward and forward integrals coincide for deterministic integrands. Then we may fix $t>0$ and apply It\^o's formula to the process $\{\theta_\alpha(r,t): r\in [0,t]\}$ to get \begin{eqnarray}\label{5.12} \theta_\alpha(r,t) = 1 + \int_r^t\int_{\mathbb}\def\mbf{\mathbf{R}} \theta_\alpha(s,t) \alpha(s,y) \cdot W(ds,dy). \end{eqnarray} By (\ref{5.2}), (\ref{5.12}) and backward It\^o formula, for any $f\in C^\infty_b(\mathbb}\def\mbf{\mathbf{R})$ we have \begin{eqnarray}\label{5.13} \langle}\def\>{\rangle\psi_{r,t},f\>\theta_\alpha(r,t) &=& \langle}\def\>{\rangle\phi,f\> + \int_r^t\bigg[\frac{1}{2}\langle}\def\>{\ranglea\psi_{s,t} ^{\prime\prime}, f\> - \langle}\def\>{\rangleb\psi_{s,t},f\> - \frac{1}{2} \langle}\def\>{\rangle\sigma\psi_{s,t}^2,f\>\bigg]\theta_\alpha(s,t) ds \nonumber \\ & & + \int_r^t\int_{\mathbb}\def\mbf{\mathbf{R}} [\langle}\def\>{\rangleh(y-\cdot)\psi_{s,t}^\prime,f\> + \langle}\def\>{\rangle\psi_{s,t},f\>\alpha(s,y)]\theta_\alpha(s,t) \cdot W(ds,dy) \nonumber \\ & & + \int_r^t\int_{\mathbb}\def\mbf{\mathbf{R}} \langle}\def\>{\rangleh(y-\cdot)\psi_{s,t}^\prime,f\> \theta_\alpha(s,t)\alpha(s,y) ds dy. \end{eqnarray} (See e.g.\ \cite[p.124]{B92} for the backward It\^o formula.) Observe that for fixed $t>0$, the process \begin{eqnarray*} \int_r^t\int_{\mathbb}\def\mbf{\mathbf{R}} [\langle}\def\>{\rangleh(y-\cdot)\psi_{s,t}^\prime,f\> + \langle}\def\>{\rangle\psi_{s,t},f\>\alpha(s,y)]\theta_\alpha(s,t) \cdot W(ds,dy) \end{eqnarray*} is a backward martingale in $r\le t$. Taking the expectation in (\ref{5.13}) we obtain \begin{eqnarray*} \langle}\def\>{\rangle\xi_{r,t},f\> &=& \langle}\def\>{\rangle\phi,f\> + \int_r^t \bigg[\frac{1}{2} \langle}\def\>{\ranglea\xi_{s,t}^{\prime\prime}, f\> - \langle}\def\>{\rangleb\xi_{s,t},f\> - \frac{1}{2} \langle}\def\>{\rangle\sigma\eta_{s,t},f\>\bigg] ds \nonumber \\ & & + \int_r^t\int_{\mathbb}\def\mbf{\mathbf{R}} \langle}\def\>{\rangleh(y-\cdot)\xi_{s,t}^\prime,f\> a(s,y) ds dy. \end{eqnarray*} Then $\{\xi_{r,t}\}$ must coincides with the classical solution of the parabolic equation (\ref{5.11}). \qed \begin{lemma}}\def\elemma{\end{lemma}\label{l5.4} For any $t\ge r\ge 0$, we have a.s.\ \begin{eqnarray}\label{5.14} \langle}\def\>{\rangle\phi,X_t\> = \langle}\def\>{\rangle\psi_{r,t},X_r\> + \int_r^t\int_{\mathbb}\def\mbf{\mathbf{R}}\psi_{s,t}(x)Z(ds,dx) + \frac{1}{2}\int_r^t \langle}\def\>{\rangle\sigma\psi_{s,t}^2,X_s\>ds + \int_r^t \langle}\def\>{\rangle\psi_{s,t},m\>ds. \end{eqnarray} \elemma \noindent{\it Proof.~~} In view of (\ref{5.1}) and (\ref{5.11}), we may integrate $\xi_{s,t}$ backward relative to $X_s$ to see that \begin{eqnarray*} d\langle}\def\>{\rangle\xi_{s,t},X_s\> &=& \frac{1}{2}\langle}\def\>{\rangle\sigma\eta_{s,t},X_s\> ds - \int_{\mathbb}\def\mbf{\mathbf{R}} \langle}\def\>{\rangleh(y-\cdot)\xi_{s,t}^\prime,X_s\> \alpha(s,y) ds dy + \langle}\def\>{\rangle\xi_{s,t},m\>ds \nonumber \\ & & + \int_{\mathbb}\def\mbf{\mathbf{R}} \xi_{s,t}(y) Z(ds,dy) + \int_{\mathbb}\def\mbf{\mathbf{R}} \langle}\def\>{\rangleh(y-\cdot) \xi_{s,t}^\prime,X_s\> W(ds,dy), \end{eqnarray*} where the first two terms from (\ref{5.11}) cancelled out with the second and third terms from (\ref{5.1}). Since the two martingale measures $\{W(ds,dy)\}$ and $\{Z(ds,dy)\}$ are orthogonal, by It\^o's formula we have \begin{eqnarray}\label{5.15} d\langle}\def\>{\rangle\xi_{s,t},X_s\> \theta_\alpha(0,s)\zeta_\beta(0,s) &=& \frac{1}{2}\langle}\def\>{\rangle\sigma\eta_{s,t},X_s\>\theta_\alpha(0,s) \zeta_\beta(0,s) ds + \langle}\def\>{\rangle\xi_{s,t},m\>\theta_\alpha(0,s) \zeta_\beta(0,s)ds \nonumber \\ & & + \int_{\mathbb}\def\mbf{\mathbf{R}} \langle}\def\>{\rangleh(y-\cdot)\xi_{s,t}^\prime,X_s\> \theta_\alpha(0,s)\zeta_\beta(0,s) W(ds,dy) \nonumber \\ & & + \int_{\mathbb}\def\mbf{\mathbf{R}} \xi_{s,t}(y)\theta_\alpha(0,s) \zeta_\beta(0,s)Z(ds,dy) \nonumber \\ & & + \int_{\mathbb}\def\mbf{\mathbf{R}} \langle}\def\>{\rangle\xi_{s,t},X_s\>\theta_\alpha(0,s) \zeta_\beta(0,s)\alpha(s,x) W(ds,dy) \nonumber \\ & & + \int_{\mathbb}\def\mbf{\mathbf{R}} \langle}\def\>{\rangle\xi_{s,t},X_s\>\theta_\alpha(0,s) \zeta_\beta(0,s)\beta(s,y) Z(ds,dy) \nonumber \\ & & + \,\langle}\def\>{\rangle\sigma\xi_{s,t} \beta(s,\cdot),X_s\> \theta_\alpha(0,s)\zeta_\beta(0,s)ds. \end{eqnarray} By a calculation similar to the proof of Lemma~\ref{l5.2} we get \begin{eqnarray}\label{5.16} \mbf{E}\{\langle}\def\>{\rangle\psi_{s,t},m\>\theta_\alpha(0,t)\zeta_\beta(0,t)|{\cal F}_s\} = \langle}\def\>{\rangle\xi_{s,t},m\>\theta_\alpha(0,s)\zeta_\beta(0,s). \end{eqnarray} {From} (\ref{5.10}), (\ref{5.15}) and (\ref{5.16}) it follows that \begin{eqnarray}\label{5.17} & &\mbf{E}\{\langle}\def\>{\rangle\phi,X_t\>\theta_\alpha(0,t)\zeta_\beta(0,t)\} - \mbf{E}\{\langle}\def\>{\rangle\xi_{r,t},X_r\>\theta_\alpha(0,r) \zeta_\beta(0,r)\} \nonumber \\ &=& \frac{1}{2}\mbf{E}\bigg\{\int_r^t\langle}\def\>{\rangle\sigma\eta_{s,t},X_s\> \theta_\alpha(0,s)\zeta_\beta(0,s) ds\bigg\} + \mbf{E}\bigg\{\int_r^t\langle}\def\>{\rangle\xi_{s,t},m\> \theta_\alpha(0,s)\zeta_\beta(0,s)ds\bigg\} \nonumber \\ & & +\, \mbf{E}\bigg\{\int_r^t\langle}\def\>{\rangle\sigma\xi_{s,t} \beta(s,\cdot),X_s\> \theta_\alpha(0,s)\zeta_\beta(0,s)ds\bigg\} \nonumber \\ &=& \frac{1}{2}\mbf{E}\bigg\{\int_r^t\langle}\def\>{\rangle\sigma\psi_{s,t}^2,X_s\> \theta_\alpha(0,t)\zeta_\beta(0,t) ds\bigg\} + \mbf{E}\bigg\{\int_r^t\langle}\def\>{\rangle\psi_{s,t},m\> \theta_\alpha(0,t) \zeta_\beta(0,t)ds\bigg\} \nonumber \\ & & +\, \mbf{E}\bigg\{\int_r^t\langle}\def\>{\rangle\sigma\xi_{s,t} \beta(s,\cdot),X_s\> \theta_\alpha(0,s)\zeta_\beta(0,s)ds\bigg\}. \end{eqnarray} By (\ref{5.6}) and It\^o's formula we have \begin{eqnarray*} \zeta_\beta(0,t) = 1 + \int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}} \zeta_\beta(0,s)\beta(s,y) Z(ds,dy), \end{eqnarray*} and hence \begin{eqnarray*} &&\mbf{E}\bigg\{\int_r^t\int_{\mathbb}\def\mbf{\mathbf{R}}\psi_{s,t}(y) Z(ds,dy) \theta_\alpha(0,t)\zeta_\beta(0,t)\bigg\} \\ &&\qquad = \mbf{E}\bigg\{\mbf{E}^W\bigg[\int_r^t\int_{\mathbb}\def\mbf{\mathbf{R}} \psi_{s,t}(y) Z(ds,dy)\zeta_\beta(0,t)\bigg] \theta_\alpha(0,t)\bigg\} \\ &&\qquad = \mbf{E}\bigg\{\mbf{E}^W\bigg[\int_r^t\langle}\def\>{\rangle\sigma \psi_{s,t}\beta(s,\cdot),X_s\> \zeta_\beta(0,s)ds\bigg] \theta_\alpha(0,t)\bigg\} \\ &&\qquad = \int_r^t\mbf{E}\bigg[\langle}\def\>{\rangle\sigma \psi_{s,t}\beta(s,\cdot),X_s\>\theta_\alpha(0,t) \zeta_\beta(0,s)\bigg]ds \\ &&\qquad = \int_r^t\mbf{E}\bigg[\langle}\def\>{\rangle\sigma \xi_{s,t}\beta(s,\cdot),X_s\>\theta_\alpha(0,s) \zeta_\beta(0,s)\bigg]ds, \end{eqnarray*} where the last equality follows from (\ref{5.9}). Then we substitute the above into (\ref{5.17}) to get \begin{eqnarray*} && \mbf{E}\{\langle}\def\>{\rangle\phi,X_t\>\theta_\alpha(0,t)\zeta_\beta(0,t)\} - \mbf{E}\{\langle}\def\>{\rangle\xi_{r,t},X_r\>\theta_\alpha(0,r) \zeta_\beta(0,r)\} \nonumber \\ &&\qquad = \frac{1}{2}\mbf{E}\bigg\{\int_r^t\langle}\def\>{\rangle\sigma\psi_{s,t}^2,X_s\> \theta_\alpha(0,t)\zeta_\beta(0,t) ds\bigg\} + \mbf{E}\bigg\{\int_r^t\langle}\def\>{\rangle\psi_{s,t},m\> \theta_\alpha(0,t) \zeta_\beta(0,t)ds\bigg\} \nonumber \\ & &\qquad\qquad +\, \mbf{E}\bigg\{\int_r^t\int_{\mathbb}\def\mbf{\mathbf{R}}\psi_{s,t}(y) Z(ds,dy)\theta_\alpha(0,t)\zeta_\beta(0,t)\bigg\}. \end{eqnarray*} On the other hand, by (\ref{5.9}) we have \begin{eqnarray*} & &\mbf{E}\{\langle}\def\>{\rangle\phi,X_t\>\theta_\alpha(0,t)\zeta_\beta(0,t)\} - \mbf{E}\{\langle}\def\>{\rangle\xi_{r,t},X_r\>\theta_\alpha(0,r)\zeta_\beta(0,r)\} \\ &=& \mbf{E}\{[\langle}\def\>{\rangle\phi,X_t\> - \langle}\def\>{\rangle\psi_{r,t},X_r\>] \theta_\alpha(0,t)\zeta_\beta(0,t)\}. \end{eqnarray*} It follows that \begin{eqnarray*} \lefteqn{\mbf{E}\bigg\{\bigg[\langle}\def\>{\rangle\phi,X_t\> - \langle}\def\>{\rangle\psi_{r,t},X_r\> - \frac{1}{2}\int_r^t \langle}\def\>{\rangle\sigma\psi_{s,t}^2,X_s\> ds} \\ & & - \int_r^t \langle}\def\>{\rangle\psi_{s,t},m\>ds - \int_r^t\int_{\mathbb}\def\mbf{\mathbf{R}}\psi_{s,t}(x)Z(ds,dx)\bigg] \theta_\alpha(0,t)\zeta_\beta(0,t)\bigg\} = 0. \end{eqnarray*} Then we have the desired equation; see e.g.\ \cite[p.81]{B92} and \cite{C04}. \qed \noindent\textit{Proof of Theorem~\ref{t5.1}.~} Recall that $Z(ds,dy)$ is an orthogonal martingale measure with covariation measure $\sigma(y) X_s(dy)ds$. By Lemma~\ref{l5.1}, for any fixed $u\ge r$ the process \begin{eqnarray*} \exp\bigg\{- \int_r^t\int_{\mathbb}\def\mbf{\mathbf{R}}\psi_{s,u}(y)Z(ds,dy) - \frac{1}{2}\int_r^t \langle}\def\>{\rangle\sigma\psi_{s,u}^2,X_s\> ds\bigg\}, \qquad r\le t\le u, \end{eqnarray*} is a martingale under $\mbf{P}^W$. By Lemma~\ref{l5.4} we get a.s.\ \begin{eqnarray*} \mbf{E}^W\{ e^{-\langle}\def\>{\rangle\phi,X_t\>}|{\cal F}_r\} &=& \mbf{E}^W\bigg[ \exp\bigg\{-\langle}\def\>{\rangle\psi_{r,t},X_r\> - \int_r^t\int_{\mathbb}\def\mbf{\mathbf{R}}\psi_{s,t}(y)Z(ds,dy) \\ & & - \frac{1}{2}\int_r^t \langle}\def\>{\rangle\sigma\psi_{s,t}^2, X_s\> ds - \int_r^t\langle}\def\>{\rangle\psi_{s,t},m\>ds\bigg\} \bigg|{\cal F}_r\bigg] \nonumber \\ &=& \exp\bigg\{-\langle}\def\>{\rangle\psi_{r,t},X_r\> - \int_r^t\langle}\def\>{\rangle\psi_{s,t},m\>ds\bigg\}, \end{eqnarray*} giving (\ref{5.3}). In particular, we have \begin{eqnarray}\label{5.18} \mbf{E}\{ e^{-\langle}\def\>{\rangle\phi,X_t\>}\} = \mbf{E}\exp\bigg\{-\langle}\def\>{\rangle\psi_{0,t},\mu\> - \int_0^t\langle}\def\>{\rangle\psi_{s,t},m\>ds\bigg\}. \end{eqnarray} The distribution of $X_t$ is uniquely determined by (\ref{5.18}) and the uniqueness of solution of (\ref{5.1}) follows. This in turn implies the strong Markov property of $\{X_t: t\ge0\}$. Since $\psi_{r,t} (x)$ is continuous in $x\in \mathbb}\def\mbf{\mathbf{R}$, the transition semigroup $(Q_t)_{t\ge 0}$ defined by (\ref{5.4}) is Feller. \qed \section{Some properties of the SDSMI} \setcounter{equation}{0} We here investigate some properties of the SDSMI. Let $(c, h, \sigma, b, m)$ be given as in the introduction. As in the last section, let $\mbf{P}^W$ and $\mbf{E}^W$ denote respectively the conditional probability and expectation given the white noise $\{W(ds,dy)\}$. The equality (\ref{5.3}) suggests that $\{X_t: t\ge 0\}$ under $\mbf{P}^W$ is a Markov process with transition semigroup $(Q^W_{r,t})_{t\ge r}$ satisfying a.s.\ \begin{eqnarray}\label{6.1} \int_{M(E)}e^{-\langle}\def\>{\rangle\phi,\nu\>} Q^W_{r,t}(\mu, d\nu) = \exp\bigg\{-\langle}\def\>{\rangle\psi^W_{r,t},\mu\> - \int_r^t\langle}\def\>{\rangle\psi^W_{s,t},m\>ds\bigg\}. \end{eqnarray} In other words, the SDSMI conditioned upon $\{W(ds,dy)\}$ should be an inhomogeneous immigration superprocess. This observation suggests a number of applications of the conditional log-Laplace functional. For instance, based on the results in the last section, the conditional excursion theory of the SDSM have been developed in \cite{LWX04b}. Moreover, some moment formulas can be also derived from (\ref{5.3}) in a similar way as \cite{X04}. As another application of the conditional Laplace functionals, we prove the following ergodicity property of the SDSMI. \begin{theorem}}\def\etheorem{\end{theorem}\label{t6.1} Suppose that there is a constant $\epsilon >0$ such that $b(x) \ge \epsilon$ for all $x\in \mathbb}\def\mbf{\mathbf{R}$. Then the SDSMI has a unique stationary distribution $Q_\infty$ given by \begin{eqnarray}\label{6.2} \int_{M(\mathbb}\def\mbf{\mathbf{R})} e^{-\langle}\def\>{\rangle\phi,\nu\>} Q_\infty(d\nu) = \mbf{E}\exp\bigg\{- \int_0^\infty\langle}\def\>{\rangle\psi^W_t,m\>dt\bigg\}, \end{eqnarray} where $\psi^W_t(x)$ is the solution of (\ref{4.1}). Moreover, we have $\lim_{t\to\infty} Q_t(\mu,\cdot) = Q_\infty(\cdot)$ in the topology of weak convergence for each $\mu \in M(\mathbb}\def\mbf{\mathbf{R})$. \etheorem \noindent{\it Proof.~~} Using the notation of the proof of Theorem~\ref{t4.2}, for any $t\ge r\ge 0$ we have \begin{eqnarray*} \mbf{E}\exp\bigg\{-\int_r^t\langle}\def\>{\rangle\psi^W_{s,t},m\>ds\bigg\} &=& \mbf{E}\exp\bigg\{-\int_r^t\langle}\def\>{\rangle\phi^W_{t-s,t},m\>ds\bigg\} \\ &=& \mbf{E}\exp\bigg\{-\int_0^{t-r}\langle}\def\>{\rangle\phi^W_{s,t},m\>ds\bigg\} \\ &=& \mbf{E}\exp\bigg\{-\int_0^{t-r}\langle}\def\>{\rangle\psi^W_s,m\>ds\bigg\}, \end{eqnarray*} where the last equality follows by the property of independent and stationary increments of the time-space white noise. By Theorem~\ref{t4.2} we have $\|\psi^W_{s,t}\| \le e^{-\epsilon (t-s)}\|\phi\|$ for $s\le t$. It follows that \begin{eqnarray*} \lim_{t\to\infty}\int_{M(\mathbb}\def\mbf{\mathbf{R})} e^{-\langle}\def\>{\rangle\phi,\nu\>} Q_t(\mu,d\nu) &=& \lim_{t\to\infty}\mbf{E}\exp\bigg\{-\langle}\def\>{\rangle\psi^W_{0,t},\mu\> - \int_0^t\langle}\def\>{\rangle\psi^W_{s,t},m\>ds\bigg\} \\ &=& \lim_{t\to\infty}\mbf{E}\exp\bigg\{ - \int_0^t\langle}\def\>{\rangle\psi^W_{s,t},m\>ds\bigg\} \\ &=& \mbf{E}\exp\bigg\{ - \int_0^{\infty}\langle}\def\>{\rangle\psi^W_s,m\>ds\bigg\}. \end{eqnarray*} On the other hand, by Theorem~\ref{t4.1} it is easy to get \begin{eqnarray*} \lim_{\|\phi\|\to 0}\mbf{E}\exp\bigg\{- \int_0^{\infty} \langle}\def\>{\rangle\psi^W_s,m\>ds\bigg\} = 1. \end{eqnarray*} Then (\ref{6.2}) defines a probability measure $Q_\infty$ on $M(\mathbb}\def\mbf{\mathbf{R})$ and $\lim_{t\to\infty} Q_t(\mu,\cdot) = Q_\infty(\cdot)$ in the topology of weak convergence; see e.g.\ \cite[Lemma~2.1]{L02}. \qed The properties of the SDSMI varies sharply for different choices of the parameters. The special case where $b(\cdot) \equiv 0$ and $\langle}\def\>{\rangle1, m\> = 0$ was discussed in \cite{DLW01, DVW00, W97, W02}. In this case, we have \begin{eqnarray}\label{6.3} \langle}\def\>{\rangle\phi,X_t\> &=& \langle}\def\>{\rangle\phi,\mu\> + \frac{1}{2}\int_0^t\langle}\def\>{\ranglea\phi^{\prime\prime}, X_s\> ds + \int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}}\phi(y) Z(ds,dy) \nonumber \\ & & + \int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}}\langle}\def\>{\rangleh(y-\cdot) \phi^\prime,X_s\> W(ds,dy). \end{eqnarray} The solution of (\ref{6.3}) is a critical branching SDSM without immigration. In particular, if $c(\cdot)$ is bounded away from zero, then $\{X_t: t>0\}$ is absolutely continuous for any initial state $X_0$; see \cite{DLW01, DVW00, W97}. On the other hand, if $c(\cdot) \equiv 0$, then $\{X_t: t>0\}$ is purely atomic for any initial state $X_0$; see \cite{DL03, W97, W02}. Another special case is where $\sigma(\cdot) \equiv 0$ and $\langle}\def\>{\rangle1,m\> = 0$. In this case, we get from (\ref{6.3}) the linear equation \begin{eqnarray}\label{6.4} \langle}\def\>{\rangle\phi,X_t\> = \langle}\def\>{\rangle\phi,\mu\> + \frac{1}{2}\int_0^t\langle}\def\>{\ranglea\phi^{\prime\prime}, X_s\> ds - \int_0^t\langle}\def\>{\rangleb\phi,X_s\> ds + \int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}}\langle}\def\>{\rangleh(y-\cdot) \phi^\prime,X_s\> W(ds,dy). \end{eqnarray} The process defined in this way is closely related to the superprocesses arising from isotropic stochastic flows investigated by \cite{MX01}. The following theorem shows that $\{X_t: t\ge0\}$ is absolutely continuous for a large class of absolutely continuous initial states. \begin{theorem}}\def\etheorem{\end{theorem}\label{t6.2} If $\{X_t: t\ge0\}$ is a solution of (\ref{6.4}) with $X_0(dx) = v_0(x)dx$ for some $v_0\in H_0(\mathbb}\def\mbf{\mathbf{R})$, then there is an $H_0(\mathbb}\def\mbf{\mathbf{R})$-valued process $\{v_t: t\ge0\}$ such that $X_t(dx) = v_t(x)dx$ a.s.\ holds. \etheorem \noindent{\it Proof.~~} By \cite[Theorem~3.5]{KX99}, the equation \begin{eqnarray}\label{6.5} v_t(x) = v_0(x) + \int_0^t \bigg[\frac{1}{2}(av_s)^{\prime\prime}(x) - b(x)v_s(x)\bigg] ds - \int_0^t\int_{\mathbb}\def\mbf{\mathbf{R}} (h(y-\cdot)v_s)^\prime(x) W(ds,dy) \end{eqnarray} has a unique $H_0(\mathbb}\def\mbf{\mathbf{R})$-valued solution $\{v_t: t\ge0\}$. Let $X_t(dx) = v_t(x)dx$. Clearly, $\{X_t: t\ge0\}$ solves (\ref{6.4}). \qed \noindent \end{document}
\begin{document} \title{Co-Design quantum simulation of nanoscale NMR} \author{Manuel G. Algaba} \thanks{Both authors contributed equally to this work. \\ Corresponding author: [email protected] \\ Corresponding author: [email protected]} \affiliation{IQM Quantum Computers, Nymphenburgerstr. 86, 80636 Munich, Germany} \author{Mario Ponce-Martinez} \thanks{Both authors contributed equally to this work. \\ Corresponding author: [email protected] \\ Corresponding author: [email protected]} \affiliation{IQM Quantum Computers, Nymphenburgerstr. 86, 80636 Munich, Germany} \affiliation{Department of Physics and Arnold Sommerfeld Center for Theoretical Physics, Ludwig-Maximilians-Universit\" at M\" unchen, Theresienstrasse 37, 80333 Munich, Germany} \author{Carlos Munuera-Javaloy} \affiliation{Department of Physical Chemistry, University of the Basque Country UPV/EHU, Apartado 644, 48080 Bilbao, Spain} \author{Vicente Pina-Canelles} \affiliation{IQM Quantum Computers, Nymphenburgerstr. 86, 80636 Munich, Germany} \author{Manish J. Thapa} \affiliation{IQM Quantum Computers, Nymphenburgerstr. 86, 80636 Munich, Germany} \author{Bruno G. Taketani} \affiliation{IQM Quantum Computers, Nymphenburgerstr. 86, 80636 Munich, Germany} \author{Martin Leib} \affiliation{IQM Quantum Computers, Nymphenburgerstr. 86, 80636 Munich, Germany} \author{In\'es de Vega} \affiliation{IQM Quantum Computers, Nymphenburgerstr. 86, 80636 Munich, Germany} \affiliation{Department of Physics and Arnold Sommerfeld Center for Theoretical Physics, Ludwig-Maximilians-Universit\" at M\" unchen, Theresienstrasse 37, 80333 Munich, Germany} \author{Jorge Casanova} \affiliation{Department of Physical Chemistry, University of the Basque Country UPV/EHU, Apartado 644, 48080 Bilbao, Spain} \affiliation{IKERBASQUE, Basque Foundation for Science, Plaza Euskadi 5, 48009 Bilbao, Spain} \author{Hermanni Heimonen} \affiliation{IQM Quantum Computers, Keilaranta 19, FI-02150 Espoo, Finland} \begin{abstract} Quantum computers have the potential to efficiently simulate the dynamics of nanoscale NMR systems. In this work we demonstrate that a noisy intermediate-scale quantum computer can be used to simulate and predict nanoscale NMR resonances. In order to minimize the required gate fidelities, we propose a superconducting application-specific Co-Design quantum processor that reduces the number of SWAP gates by over 90\% for chips with more than 20 qubits. The processor consists of transmon qubits capacitively coupled via tunable couplers to a central co-planar waveguide resonator with a quantum circuit refrigerator (QCR) for fast resonator reset. The QCR implements the non-unitary quantum operations required to simulate nuclear hyperpolarization scenarios. \end{abstract} \date{\today} \maketitle \section{Introduction} Computer simulations are the backbone of scientific research and technological development. Quantum computers promise in the long term to enable simulations of systems that are intractable to even the largest supercomputers~\cite{feynman2018simulating,lloyd1996universal}. Currently, scientists have access to so-called noisy intermediate-scale quantum (NISQ) computers~\cite{preskill2018quantum}, that present limited qubit counts without error correction. While applications of error-corrected quantum computers are well established, use cases where NISQ devices might achieve quantum advantage are still elusive~\cite{bharti2021noisy}. In the search for these early applications, the problem must fit the hardware, and the hardware must enable implementation with minimal overheads. Application-Specific Integrated Chips (ASICs) are highly specialized processors optimized for specific problems when execution speed, power efficiency, or miniaturization is of utmost importance~\cite{smith1997application}. A prominent example where computational speed and energy efficiency are optimised through the use of ASICs is training of artifical neural networks using tensor processing units~\cite{hsu2021gptpu,lu2020accelerating}. Building a general-purpose quantum computer capable of rivaling the most powerful classical computers has proven to be a difficult task, so it is likely that the first devices reaching useful quantum advantage will use quantum ASICs, also called Co-Design quantum computers. A good example of a problem with suitable structure for simulation by quantum computers is nanoscale nuclear magnetic resonance (NMR)~\cite{Staudacher2013}. The problem can be described by a number of mutually interacting spins, which natively map to the qubits of a quantum computer, thereby circumventing the overheads in mapping the problem to qubits, such as in the case of fermions~\cite{nielsen2002quantum}. In general, fast and reliable quantum simulations of interacting spin systems would improve the interpretability of solid-state NMR and electron spin resonance (ESR) spectra, where advanced numerical techniques present very limited performance~\cite{kuprovspinach}. This shows the potential of quantum computers with a moderate number of qubits to shed light on the dynamics of these important systems. A Co-Design quantum computer that minimizes algorithm implementation overheads could be the first method to access these simulations. Note that, other NMR problems, such as zero-field NMR~\cite{seetharam2021digital} and Hamiltonian learning~\cite{o2021quantum}, have already attracted research on how quantum computers can be used to tackle them and methods based on Bayesian computation~\cite{sels2020quantum} and generative models~\cite{sels2021quantum} have been developed for computing NMR spectra as well. NMR techniques have a profound impact in research areas such as material science, chemistry, biology, and medicine~\cite{levitt2013spin}. Recently they have approached the nanoscale through solid-state quantum sensors such as the nitrogen vacancy (NV) center in diamond~\cite{doherty2013nitrogen}. This is a particularly powerful quantum device, as it enables detection and control of nearby nuclear spins with nanoscale resolution~\cite{abobeih2019atomic}. Applications of the device are, e.g., the precise determination of the structure and dynamics of nuclear ensembles such as proteins~\cite{MunueraJavaloy2021}, finding inter-label distances (via, e.g., Bayesian analysis of the NV response) in electronically labelled biomolecules~\cite{munuerajavaloy2021detection}, and the exploration of bespoke microwave (MW) sequences that efficiently transfer NV center polarization to the nuclear environment. Hyperpolarization (i.e. polarization beyond that of a thermal state in a magnetic field) of nuclear spins in diamond presents the potential to develop new and safer contrast agents for magnetic resonance imaging. This problem, which we aim to address through simulation by a quantum computer, could lead to improved detection of different malformations in tissues --such as heart or brain-- without the need to deliver ionizing radiation, in contrast to other techniques~\cite{ajoy2018orientation}. This manuscript describes a Co-Design process for a quantum chip able to efficiently simulate nanoscale NMR scenarios. It is structured in three main parts, each of which is a crucial step in the Co-Design process: 1. Identifying the problem (Sec.~\ref{sec:hyperpolarization}), which here is simulating a nanoscale NMR system for hyperpolarizing nuclear spins. 2. Choosing an algorithm for the nanoscale NMR problem and showing that a star-topology chip implements it with minimal overhead (Sec.~\ref{sec:algorithm}), and 3. Co-Designing the corresponding quantum chip using a central resonator bus (Sec.~\ref{sec:co-design_HW}). The sections are followed by results and discussions (Sec.~\ref{section:results}) and an outlook (Sec.~\ref{sec:conclusions}). \section{Nanoscale NMR: Hyperpolarization } \label{sec:hyperpolarization} Let us consider a system consisting of $M$ nitrogen-vacancy (NV) centers and $N$ carbon-13 isotopes in the presence of a driving field and an external magnetic field $\vec{B}_Z$. NV centers and nuclei are all effectively described as spin-1/2 systems. The representation of such a system for $M=1$, $N=2$ is shown in Fig.~\ref{fig:interaction_scheme}. For simplicity, we consider the NV centers aligned with the external magnetic field, leading to the following Hamiltonian: \begin{equation}{\label{eq:hamiltonian1}} \begin{split} H = &\sum_{j = 1}^M \delta_j \sigma^z_j-\sum_{k = 1}^N\vec{\omega}^c_k\cdot\vec{I}_k+ \sum_{j = 1}^M\sum_{k = 1}^N\frac{\sigma^z_j}{2} \vec{A}_{jk}\cdot\vec{I}_k+\\&+\sum_{k>k'}^N g_{k'k}\left[I_{k'}^z I_k^z-\frac{1}{4}(I_{k'}^+I_k^-+I_{k'}^-I_k^+)\right]+\\&+\sum_{j>j'}^M h_{j'j}\left[\sigma_{j'}^z \sigma_j^z-2(\sigma_{j'}^+\sigma_j^-+\sigma_{j'}^-\sigma_j^+)\right] + H_{\textrm{dr}}. \end{split} \end{equation} In Eq.~(\ref{eq:hamiltonian1}) we find the spin operators in the joint Hilbert space $\mathbb{C}^{2^{(M+N)}}$ of NV centers and nuclei: $$\sigma_j^{\mu}=\underbrace{\mathbb{1} \otimes \cdots \otimes \mathbb{1} \otimes \overbrace{\sigma_{\mu}}^{j^{\textrm{th}} \textrm{pos.}} \otimes \, \mathbb{1} \otimes \cdots \otimes \mathbb{1}}_{M \text { factors }} \otimes \underbrace{\mathbb{1} \otimes \ldots \otimes \mathbb{1}}_{N \text { factors }},$$ $$I_k^\mu=\underbrace{\mathbb{1} \otimes \ldots \otimes \mathbb{1}}_{M \text { factors }} \otimes \underbrace{\mathbb{1} \otimes \cdots \otimes \mathbb{1} \otimes \overbrace{\tfrac{1}{2} \sigma_\mu}^{(M+k)^{\textrm{th}} \textrm{pos.}} \otimes \, \mathbb{1} \otimes \cdots \otimes \mathbb{1}}_{N \text { factors }},$$ where $(\sigma_{\mu})_{2\times 2}$, $\mu \in \{x,y,z \}$ is the corresponding $2\times 2$ Pauli matrix on the $j^{\textrm{th}}$ NV center and the $k^{\textrm{th}}$ nucleus respectively, and $\mathbb{1}$ is the $2\times 2$ identity matrix. Accordingly, $\sigma^\pm_j = \frac{\sigma^x_j\pm i\sigma^y_j}{2}\left(I^\pm_k = I^x_k\pm iI^y_k\right)$ are the $j^{\textrm{th}}$ NV center ($k^{\textrm{th}}$ nucleus) ladder operators. The term $\delta_j$ is the detuning of the $j^{_{\textrm{th}}}$ NV center with respect to the microwave drive $H_{\textrm{dr}}$. The hyperfine coupling vector $\vec{A}_{jk}$ represents the coupling between the $j^{_{\textrm{th}}}$ NV center and the $k^{_{\textrm{th}}}$ nucleus, while $\vec{\omega}_k^c = \gamma_c \vec{B}_Z-\frac{1}{2}\sum_{j = 1}^M \vec{A}_{jk}$ is the modified Larmor frequency of the $k^{_{\textrm{th}}}$ nucleus with the $^{13}C$ gyromagnetic ratio $\gamma_c\approx (2\pi) \times 10.7$ MHz/T, $g_{k'k}$ is the coupling between the $k^{_{\textrm{th}}}$ and $k'^{_{\textrm{th}}}$ nuclei, and $h_{j'j}$ is the coupling between the $j^{_{\textrm{th}}}$ and $j'^{_{\textrm{th}}}$ NV centers. Note that, Eq.~(\ref{eq:hamiltonian1}) is expressed in a rotating frame with respect to the free NV Hamiltonian, while $H_{\rm dr}$ represents an external driving tuned near resonance with a certain NV energy transition. The derivation of Eq.~(\ref{eq:hamiltonian1}) can be found in Appendix~\ref{appendix:H_derivation}. \begin{figure} \caption{NV center with a microwave drive interacting with two mutually interacting $^{13} \label{fig:interaction_scheme} \end{figure} In order to hyperpolarize a diamond sample at room temperature, the NV centers are first optically polarized employing laser light, and then their state is transferred to the surrounding nuclei with the aid of a tailored microwave radiation scheme. The initial state of the nuclei in a room-temperature sample is well described by a fully-mixed state due to the small energy splitting of the nuclear spins. By re-initializing the NV centers and repeating this procedure, the polarization transferred into the sample can be amplified. In this paper we will consider the quantum simulation of the polarization transfer mechanism and study two different driving schemes acting on the NV centers in a room-temperature diamond. The first driving scheme is a continuous driving whose Hamiltonian in the rotating frame mentioned earlier is $H_{\textrm{dr}} =\frac{\Omega}{2} \sigma^{\phi}$, where $\sigma^\phi=e^{-i\phi} |1\rangle\langle 0|+e^{i\phi} |0\rangle\langle 1| = e^{-i\phi}\sigma^{-}+e^{i\phi} \sigma^{+}$, $\phi$ a phase, $\Omega$ the Rabi frequency and the kets $|1\rangle$ and $|0\rangle$ are the eigenvectors of the operator $\sigma_z$ with eigenvalues $\pm1$ respectively. The set $\{|0\rangle,|1\rangle\}$ is called the computational basis of the state space of a two level system, and will be our standard choice for a basis, $|0\rangle \equiv (1,0)^t$ and $|1\rangle \equiv (0,1)^t$. NV-nucleus polarization transfer is achieved when the Rabi frequency matches the modified nuclear Larmor frequency (i.e. when $ \Omega = |\vec{\omega}_c|$), leading to the Hartmann-Hahn double-resonance condition~\cite{hartmann1962nuclear}. For a single NV center and nucleus, the Hamiltonian in Eq.~\eqref{eq:hamiltonian1} reduces, in an interaction picture, to $H_I= \frac{A^\perp}{4}\left(|+\rangle\langle-|I^++|-\rangle\langle+|I^-\right)$, where $\ket{\pm} = \ket{0}\pm\ket{1}$, which shows a polarization transfer mechanism with the effective transfer rate $\frac{A^\perp}{4}$ (a detailed derivation can be found in Appendix~\ref{HH_sequence}). The second type of driving we consider is a pulsed-driving scheme, $H_{\textrm{dr}}=\frac{\Omega(t)}{2}\sigma^\phi$, where $\Omega(t)$ is a train of $\pi$-pulses, such as the Carr-Purcell-Meiboom-Gill sequence~\cite{carr1954effects,meiboom1958modified} or the XY8 sequence~\cite{maudsley1986modified,gullion1990new}. We consider pulses with a negligible width compared to the time spacing $\tau$ between the $\pi$-pulses. If $\tau$ is selected such that $\tau = \frac{n\pi}{|\vec{\omega}^c|}$ ($n$ being an arbitrary integer number) and the pulses are evenly spaced one finds that, in an interaction picture, for a single nucleus and NV center, the Hamiltonian reduces to $H_I = \alpha A^\perp \sigma_z I_x$, where $\alpha$ is a factor that depends on the integer $n$ (see Appendix~\ref{HH_sequence}). A phase imprinted on the pulse sequence through a time delay turns the interaction into $H_I = \alpha A^\perp \sigma_z I_y$. By combining both sequences with the appropriate rotations over the NV center, the polarization transfer interaction $H_I = -\frac{\alpha A^\perp}{4}\left(\sigma^+I^-+\sigma^-I^+\right)$ is achieved (see Appendix~\ref{HH_sequence} and Ref.~\cite{casanova2016noise} for more details). Regarding common error sources, NV centers located at different positions in the diamond lattice experience stress conditions that lead to local energy deviations from the zero-field splitting. The corresponding term in Eq.~\eqref{eq:hamiltonian1} is the detuning $\delta_{j}$. Another common type of imperfection appears due to unavoidable fluctuations of the Rabi frequency of the driving. This fluctuation can be modelled as an Ornstein-Uhlenbeck (OU) process~\cite{uhlenbeck1930theory}, which has been shown to be an accurate description for NV centers~\cite{cai2012robust}. It is a Gaussian process of the following form~\cite{OUformula}: \begin{equation} \label{eq:OU} X(t+\Delta t)=X(t) \, \mathrm{e}^{-\Delta t / \tau}+\left[\frac{c \tau}{2}\left(1-\mathrm{e}^{-2 \Delta t / \tau}\right)\right]^{1 / 2} N(t), \end{equation} where $\Delta t$ is the time step, $\tau$ the correlation time, $c$ the diffusion constant of the process and $N(t)$ a temporally uncorrelated normally distributed random variable. It is a dimensionless term, which yields an effective Rabi frequency of $\left(1+X \right)\Omega$. Neither of the system error types lead to considerable overheads in a simulation on a quantum computer. Finally, $^{13}C$ nuclear spin decay is not a relevant error source on the time scale of the protocol, since it is of the order of seconds~\cite{13clifetime}, while the hyperpolarization process operates in the order of microseconds. \section{Co-Design algorithm} \label{sec:algorithm} In this section we provide an in-depth description of our Co-Design algorithm, starting with the choice of a simulation technique, followed by a short listing of hardware assumptions related to the allowed qubit operations (gates and resets), as well as the noise and errors present in the physical NMR system and in the quantum computer. Subsequently, the algorithm components are introduced. We end the section with a discussion on layout and gate-level optimization. The high-level structure of the simulation protocol is shown in Fig. \ref{subfig:algorithm_sketch}. \subsection{Simulation technique} \label{subsec:simulation_technique} The best established digital quantum simulation technique is based on decomposing the time-evolution operator into single-qubit and two-qubit gates through the Lie-Trotter-Suzuki formula~\cite{Suzuki1976}, known as Trotterization. To simulate our problem on a quantum computer, we base our strategy on regular Trotterization~\cite{lloyd1996universal} but we also explore the randomized Trotterization method qDRIFT~\cite{Campbell2019} in Appendix~\ref{section:other_sim}. Other, more NISQ-specific, simulation techniques such as the variational quantum simulator~\cite{yuan2019theory}, the quantum assisted simulator~\cite{bharti2021quantum}, numerical quantum circuit synthesis~\cite{younis2021qfast}, and a plethora of other quantum algorithms~\cite{bharti2021noisy} can also be used as simulation methods. One advantage of Trotterization over some of these NISQ methods is that it closely follows the real time evolution for each time step. This is particularly important for pulsed-driving schemes, where the free evolution in between different pulses always starts with a different initial state. Variational and quantum assisted methods would then require that each interpulse evolution is solved independently, making them impractical for the problem. A second advantage of Trotterization is that its complexity and precision are straightforward to analyze. The Trotterization procedure can also be expanded to higher orders, and symmetrized expansions converge more rapidly and reduce the error with respect to the continuum time limit~\cite{Hatano2005}. \subsection{Hardware assumptions} \subsubsection{Native gates} \label{subsubsec:native_gates} The hardware for the quantum simulation plays a major role in choosing the optimal quantum algorithm and its specific implementation. In our case, we consider a quantum computer based on superconducting qubits with the following native single-qubit gate set: \begin{eqnarray} R_{xy}(\phi,\theta)&=&e^{-i(\cos{\phi}X+\sin{\phi}Y)\frac{\theta}{2}}; \,\,\textmd{and}\\\ R_{z}(\theta)&=&e^{-iZ\frac{\theta}{2}}, \end{eqnarray} where $X$, $Y$, and $Z$ are Pauli operators on the superconducting transmon qubits. The $R_{xy}(\phi,\theta)$ can physically be implemented through a microwave drive~\cite{Krantz2019}. The gate $R_{z}(\theta)$ on the other hand does not need to be implemented directly, but can be performed virtually by tuning the phase of the subsequent gates applied on the qubit~\cite{McKay2017}. This reduces the number of single-qubit gates (SQGs) that need to be implemented. The native two-qubit gate (TQG) that arises from the superconducting system Hamiltonian shown in Sec.~\ref{sec:co-design_HW} and Appendix~\ref{gate_theory}, is a continuously-parameterized controlled-$Z$ (CZ) interaction~\cite{yan2018tunable}, which can be transformed through local virtual $R_{z}$-rotations into the form of a $ZZ$-interaction: \begin{align} \label{ZZ_unitary} U_{ZZ}(\phi) = \left( \begin{array}{cccc} e^{-i\phi} & 0 & 0 & 0\\ 0 & e^{i\phi} & 0 & 0\\ 0 & 0 & e^{i\phi} & 0\\ 0 & 0 & 0 & e^{-i\phi} \end{array} \right). \end{align} Even though the $ZZ$-interaction and the controlled-$Z$ interactions appear different, their physical implementation is identical since they are related through virtual $R_{z}$-rotations which come at no additional cost. Sec.~\ref{sec:co-design_HW} goes into more depth on the two-qubit-gate implementation on our Co-Design quantum chip. \subsubsection{Qubit reset} In the hyperpolarization process the state of the NV needs to be re-initialized after each cycle. It is therefore necessary to be able to reset the state of the qubit representing the NV center in the quantum computer. A qubit reset operation can be defined by two Kraus operators: \begin{align} K^{\textrm{reset}}_1= \begin{pmatrix}{} 1 & 0 \\ 0 & 0 \\ \end{pmatrix}, \, K^{\textrm{reset}}_2= \begin{pmatrix}{} 0 & 1 \\ 0 & 0 \\ \end{pmatrix}. \end{align} On superconducting hardware this can be realized through connecting a quantum circuit refrigerator (QCR) to each circuit element that needs to be reset~\cite{tan2017quantum,silveri2017theory,hsu2020tunable,sevriuk2019fast}. Different reset schemes are discussed in Sec.~\ref{subsec:HW_reset}. \subsubsection{Noise and errors} \label{subsec:noise_and_errors} In this paper we show that the simulation can tolerate the noise of the quantum processing unit (QPU), and that the simulation does not require large overheads to implement imperfections present in the nanoscale-NMR system, as discussed in Sec.~\ref{sec:hyperpolarization}. We will refer by $\textit{system imperfections}$ to effects in the nanoscale NMR system only, while the QPU is affected by $\textit{noise}$, referring to the effect of the environment on the qubits, and $\textit{errors}$, referring to inaccuracies of gates. In our simulation of the algorithm, we use the most common noise models for superconducting transmon qubits~\cite{Krantz2019}, namely an amplitude damping channel modelled by the Kraus operators: \begin{equation} \begin{aligned} &K^{\textrm{amp}}_{1}(t)=|0\rangle\langle 0|+\sqrt{1-p(t)}| 1\rangle\langle 1|=\left(\begin{array}{cc} 1 & 0 \\ 0 & \sqrt{1-p(t)} \end{array}\right), \\ &K^{\textrm{amp}}_{2}(t)=\sqrt{p(t)}|0\rangle\langle 1|=\left(\begin{array}{cc} 0 & \sqrt{p(t)} \\ 0 & 0 \end{array}\right), \end{aligned} \end{equation} with $p(t) = 1 - \exp\left(-t/T_1\right)$ and $T_1 = 60\,\mu s$, and a pure dephasing channel represented by the Kraus operators: \begin{equation} K^{\textrm{deph}}_{1}(t)=\left(\begin{array}{cc} 1 & 0 \\ 0 & \sqrt{1-p(t)} \end{array}\right), \, K^{\textrm{deph}}_{2}(t)=\left(\begin{array}{cc} 1 & 0 \\ 0 & \sqrt{p(t)} \end{array}\right), \end{equation} with $p(t)=1-\exp (-\Gamma(t))$ and $\Gamma(t)$ given by the expression $\Gamma(t)=\frac{t^{2}}{2} \int_{0}^{\infty} d \omega I(\omega) \operatorname{cotanh}\left(\frac{\beta \omega}{2}\right) \operatorname{sinc}^{2}\left(\frac{\omega t}{2}\right)$ where $\beta$ is the inverse temperature of the environment. We chose the spectral function $I(\omega)$ to be of the type $1/f$~\cite{Krantz2019}, and $T_2 = 60\,\mu s$. Additionally, each gate operation is assumed to be calibrated up to a two-qubit-gate (TQG) error $\varepsilon_{\textrm{TQG}} \in [10^{-4},10^{-2}]$, with the induced effective noise modelled by a depolarizing channel defined for single-qubit gates by the Kraus operators: \begin{equation} \begin{aligned} K^{\textrm{depol}}_{1} &=\sqrt{1-p} \ I, \\ K^{\textrm{depol}}_{2} &=\sqrt{p / 3} \ X, \\ K^{\textrm{depol}}_{3} &=\sqrt{p / 3} \ Y, \\ K^{\textrm{depol}}_{4} &=\sqrt{p / 3} \ Z, \end{aligned} \end{equation} and for two-qubit gates by an analogous expression with the tensor products of two Pauli matrices and the coefficients $\sqrt{1-p}$ for the identity and $\sqrt{p/15}$ for the other operators. Single-qubit-gate (SQG) errors $\varepsilon_{\textrm{SQG}}$ are assumed to be one order of magnitude lower than TQG errors. \subsection{Algorithm components} \begin{figure*} \caption{(a) Sketch of the overall operation of the simulation algorithm for one NV center and two nuclei, with continuous driving; (b) corresponding gate sequence of one Trotter step on a star-topology chip for non-interacting nuclei. $H_{\textrm{SQG} \label{subfig:algorithm_sketch} \label{subfig:one_trotter_step} \label{fig:circuit_comparison_graphics} \end{figure*} Our simulation of the nanoscale NMR problem follows the general structure shown in Fig.~\ref{subfig:algorithm_sketch}. It starts by initializing the states of all qubits, according to whether they represent a nucleus or a NV center, then evolving them using Trotter steps, followed by reset and re-initialization of the qubits representing NV centers. The cycle of time evolution and re-initialization is then repeated as many times as the protocol calls for. Finally the qubits are measured, and the polarization of the NV centers and nuclei are extracted as the expectation values of the qubit representing each element. Fig.~\ref{subfig:algorithm_sketch} shows the circuits for the case of continuous driving, while the details of pulsed driving schemes are shown in Fig.~\ref{subfig:algorithm_sketch_pulsed} in Appendix~\ref{HH_sequence}. In the following, we go through these steps in more detail for the case of a single NV center. \subsubsection{Initial state preparation} To enable the polarization transfer, it is necessary to prepare the NV center in a specific initial state that depends on the driving scheme. For the continuous-driving scheme it is the $|+\rangle$ or $|-\rangle$ state, and for the pulsed-driving scheme it is one of the two computational basis states, $|0\rangle$ or $|1\rangle$. For a diamond at room temperature, the initial state of the nuclear spins is well described by a fully-mixed state $\rho_{\textrm{mixed}}=\frac{\mathbb{1}^{\otimes N}}{2^{N}}$, where $\mathbb{1}^{\otimes N}$ is the $2^N \times 2^N$ identity matrix. The state can be approximated by running the algorithm several times, each time with a different initial state obtained by applying $X$ gates randomly on the qubits representing nuclei. A faster alternative to this sampling is the random-phase-approximation-inspired method, described in~\cite{celio1986new}, and introduced into quantum computing in~\cite{McArdle2021}. In this method, the qubits are all prepared in an equal superposition by applying Hadamard gates, and then the phases are randomized through the application of random phase gates. The method effectively reduces the prefactor in the scaling of the sampling error~\cite{McArdle2021}. \subsubsection{Time evolution} \label{subsec:time_evol} We choose to implement the time evolution generated by the Hamiltonian in Eq.~\eqref{eq:hamiltonian1} through Trotterization. For that, the Hamiltonian is rewritten in terms of qubit Pauli operators and arranged into non-commuting terms for an optimal Trotter splitting. The resulting circuit, which performs one Trotter step of the evolution in the continuous driving case, is depicted in Fig.~\ref{subfig:one_trotter_step}. It consists of a set of initial single-qubit gates, including the ones corresponding to the driving and the detuning of the NV center, followed by three two-qubit gates per nucleus. There are three types of interaction terms, of the form $XZ$, $YZ$ and $ZZ$, when no internuclear interactions are considered. With interactions there are a total of five interaction terms. Our native gate set only includes one type of two-qubit interaction as explained in section~\ref{subsubsec:native_gates}. Therefore, some SQGs need to be applied in order to convert the interaction terms into the right form, as discussed in Appendix~\ref{section:appendix_time_evol}. Under specific circumstances, some TQGs can be removed by rotating the Hamiltonian into a more suitable basis as shown in Appendix~\ref{section:rotational_opt}. \subsubsection{Cycles and reset} The dynamics of the system is known to produce an exchange of polarization between the NV center and the nuclei. This exchange is oscillatory, and therefore choosing a proper stopping time is important in order to achieve an effective polarization transfer from the NV center to the nuclei. In practice, a sub-optimal transfer time can suffice, and the protocol is then repeated several times by resetting the NV center to its initial state and letting the system evolve under the drive again. Due to the re-initializations the full evolution of the system is non-unitary and a net gain of polarization of the system is enabled. This structure is represented in the quantum circuit in Fig.~\ref{subfig:algorithm_sketch} by the repeated Trotter evolution, followed by reset operations on the qubit representing the NV center, and a single-qubit gate to prepare the initial state of the driving protocol. \subsection{\label{subsection:layout}Layout optimization} When implementing a quantum algorithm on a superconducting QPU, the planar qubit connectivity forces us to solve the qubit-routing problem by introducing additional SWAP gates to connect distant qubits. In this subsection, we study the advantages of an optimized chip topology, a star topology, over a square-grid array of qubits in terms of reducing the number of SWAP gates that must be inserted to run the algorithm in Fig.~\ref{fig:circuit_comparison_graphics} on the device. \begin{figure} \caption{(a) Three steps of the SWAP patterns in a five-qubit linear chain displayed from top to bottom. Green (blue) arrows represent the SWAP pattern for the case with (without) internuclear interactions. The green pattern is known as the `odd-even' SWAP pattern. The numbers are expressed according to the blue pattern, where label 0 represents the position of the NV center. (b) Star chip topology with the SWAP pattern for the interaction with internuclear interactions. } \label{fig:swap_square} \label{fig:star_chip_topology} \label{fig:swap_patterns} \end{figure} Different topologies will imply different counts of SWAPs added on top of the gates arising from the algorithm itself, as shown in Fig.~\ref{fig:swap_patterns}. On a NISQ device, this implies different computational precision for the same gate error magnitudes. We choose the SWAP count as our metric to compare different topologies, as commonly gates have fidelities limited by calibration. The errors could be due to crosstalk, leakage, or filtering causing disturbances to the control signals. Under this scenario we want to minimize the gate count. On the other hand, for a highly tuned up device whose gates are limited by qubit coherence times, it would be optimal to minimize the circuit depth instead of the TQG count. Assuming the gate errors are independent, the total error will be bounded by: \begin{equation} \varepsilon_{\textrm{gates}}=1- {(1-\varepsilon_{\textrm{TQG}})} ^{N_{\textrm{TQG}}} (1-\varepsilon_{\textrm{SQG}})^{ N_{\textrm{SQG}}}, \end{equation} where $N_{\textrm{TQG}}$ is the number of two-qubit gates, $N_{\textrm{SQG}}$ the number of single-qubit gates, and $\varepsilon_{\textrm{SQG}}$ is the SQG error. Consequently, reducing the gate count, especially $N_{\textrm{TQG}}$, has an exponential effect on the precision of the computations, underlining the effect of minimizing the SWAP gate overhead. As SWAP gates are not native to the hardware, but must be compiled out of three CZ gates, their effective error rate is also much higher than those of native gates. \subsubsection{Square grid} A common choice in superconducting quantum chips is the square grid of qubits. It has high connectivity and is suitable for performing the surface code error correction when scaled to large enough qubit counts with fast measurement and feedback~\cite{Fowler2012}. The qubit routing problem on a square grid can be tackled using various numerical approaches~\cite{Hirata2009,Li2019,Saeedi2010,Zulehner2018}. However, these methods are inefficient. In our case, a tailored SWAP routing method, shown in Fig.~\ref{fig:swap_square}, has been chosen and developed in Appendix~\ref{sec:routing} that can be shown to be well suited from two perspectives. First, a comparison against the cited numerical approaches (shown in Appendix~\ref{sec:routing}) reveals that our routing method is better in terms of number of gates. Second, it is completely deterministic and does not rely on expensive numerical optimization methods. It can also be shown not to be far from optimal: on a square grid each qubit has at most 4 nearest neighbors, implying that any SWAP operation provides at most 3 new neighbors. For an all-to-all (ATA) interacting Hamiltonian there are $\frac{n^2}{2}$ interactions, to leading order, for a simulation performed on $n$ qubits (corresponding to $N$ nuclei and one NV center). This implies a lower bound of at least $\frac{n^2}{6}$ SWAPs for any SWAP pattern on the square grid topology. Our SWAP pattern with $\frac{n^2}{2}$ SWAPs, discussed in Appendix~\ref{sec:routing}, is thus not far from optimal. \subsubsection{Star architecture} A star topology allows to implement the simulation of the simplified case without internuclear interactions directly, without any SWAP gates. With internuclear interactions considered, we still find a reduction in SWAP gates as compared to the square grid topology, as shown in Fig.~\ref{fig:star_chip_topology}. This reduction comes from the SWAP routing we implement, that consists of making the qubit $0$ in Fig.~\ref{fig:star_chip_topology} interact with all the external qubits and then swap its state with that of qubit $1$ and repeat this process until all interactions have been performed. This allows us to use only $n-1$ SWAP gates. The percentage of SWAP gates that can be saved can be observed in Fig.~\ref{fig:TQG_count_and_saved_swaps.pdf}. However, this improvement in the number of gates comes with a price to pay in the depth of the algorithm. We can only do one TQG at a time in the star chip and we have $\frac{3}{2}n(n-1)$ TQGs from simulating the physical interactions and $3(n-2)$ TQGs from the SWAPs. This yields a depth for the TQGs of $\frac{3}{2}n^{2}+\frac{3}{2}n-6$ in a star chip, while for a square grid it is $6n$. Such depth increase comes from the reduction in parallelization, since all gates now act via the central qubit. On the other hand, less parallelization reduces the types of possible crosstalk errors. Adding connections between external qubits reduces the depth of the circuit, since the main cause of circuit depth is the fact that the interaction of two external qubits needs to be done exclusively by the central qubit. Further studies are required to see if the addition of more external layers to this topology (such as in a spiderweb) can lead to better compromises between depth and gate count, especially for simulating systems with clusters of strongly interacting nuclei. \begin{figure} \caption{The (top) panel shows the percentage of SWAP gates saved by using a star topology instead of a square grid for $n$ qubits for the cases with and without internuclear interactions. The (bottom) panel shows the total TQG count against the qubit count in the interacting case for the square grid and the star architecture.} \label{fig:TQG_count_and_saved_swaps.pdf} \end{figure} \subsection{Gate-level optimization} The two-qubit interactions that appear in the algorithm are the $XZ$, $YZ$ and $ZZ$ interactions, as shown in section~\ref{subsec:time_evol} and Fig.~\ref{subfig:one_trotter_step}. When compiling the algorithm into the native gates of the device, all these interactions must be implemented in terms of some available gate set. We study in Table~\ref{tab:gates-overhead} the overhead introduced by decomposing these interactions into different examples of native TQGs of superconducting devices; namely, the parametrizable and fixed-phase $U_{ZZ}$ gate, the fixed-phase controlled-$Z$ gate $\mathrm{CZ}$, and the $\mathrm{CNOT}$ gate. The $\mathrm{CNOT}$ gate is usually performed by making use of the cross-resonance gate \cite{Krantz2019, magesan2020crossresonance}, which introduces an $U_{XZ}$ interaction, making it equivalent to the $U_{ZZ}$ for the purpose of this algorithm. We assume that the SQGs that can be implemented are the $R_{xy}$ and the $R_{z}$ gates. These numbers can be further reduced if the first and last SQGs introduced by this compilation are combined with the adjacent SQGs in the algorithm. \begin{table}[] \begin{tabular}{|l|c|c|c|c|} \hline & $U_{ZZ}(\phi)$ & $U_{ZZ}(-\pi/4)$ & $\mathrm{CZ}(\pi)$ & CNOT \\ \hline \textbf{TQGs} & $1$ & $2$ & $2$ & $2$ \\ \hline \textbf{SQGs} & $0$ & $5$ & $3$ & $1$ \\ \hline \end{tabular} \caption{Overheads introduced by the decomposition of $U_{ZZ}(\phi)$ gates into different examples of native TQGs in superconducting devices. The single-qubit-gate (SQG) count includes only $R_{xy}$ rotations, as the $R_{z}$ rotations can be implemented virtually.} \label{tab:gates-overhead} \end{table} The conclusion is that fixed-angle gates will double the number of TQGs that need to be physically performed. In Ref.~\cite{lacroix2020continuousgates}, the improvements coming from the reduction of the gate count are compared to the new errors introduced by the interpolation of the calibrated phases. For two instances of a Quantum Approximate Optimization Algorithm (QAOA) \cite{farhi}, it is shown that the performance is better when using parametrized TQGs. The gate sequences for some of the gate decompositions are shown in Fig.~\ref{fig:gate_decomp}. \begin{figure} \caption{(a) Gate decomposition of $e^{-i\phi ZZ} \label{fig:gate_decomp1} \label{fig:gate_decomp3} \label{fig:gate_decomp} \end{figure} \section{Co-Design hardware} \label{sec:co-design_HW} \begin{figure} \caption{(a) Central $\lambda/4$ resonator with 6 qubits coupled via tunable couplers. The resonator is also coupled to a quantum circuit refrigerator enabling fast reset. The device acts effectively as a 6 qubit star-architecture chip.\\ (b) Electrical diagram of transmon qubit (left) coupled to a resonator mode (right) via a tunable coupler (center). The qubit has frequency $\omega_\textrm{q} \label{subfig:6donis} \label{subfig:QCR_circuit} \label{fig:ndonis} \end{figure} A star-architecture chip has fundamental scaling issues using a transmon as the central qubit as the number of neighbors grows. Every neighbor added to the center qubit would decrease its charging energy $E_{c}$. To keep the qubit frequency constant and anharmonicity in the transmon regime, the ratio of the qubit's Josephson energy to its charging energy, $E_j/E_c$, must remain unaffected. Therefore we cannot afford to change its charging energy. This leads to a trade-off between the number of coupled qubits and their coupling strength to the central element. The spirit of Co-Design calls for replacing the central transmon with another object that enables this scaling in size. A resonator has no Josephson energy $E_{j}$, so the $E_j/E_c$ ratio is not altered by adding more capacitive couplings to the resonator. Only small corrections to its frequency are introduced by adding coupled qubits. As a distributed element, a co-planar waveguide resonator also has physically more space for couplings than a central transmon qubit. By elongating the resonator and choosing the mode with the target frequency, the number of qubits coupled to it can further be increased. These properties make a resonator a favourable component in the center of the chip. In the device in Fig.~\ref{subfig:6donis} the qubits are capacitively coupled to the resonator via tunable couplers~\cite{mariantoni2008two,yan2018tunable,foxen2020demonstrating} in the proximity of a voltage maximum of a standing wave in the resonator. As the resonator is elongated, we must use higher harmonic excitations of the resonator to keep the frequency around the operational frequency of the qubits. Tunable couplers avoid the frequency crowding issues related to direct coupling~\cite{song201710,song2019generation}, and the linear resonator has higher connectivity in the center than ring resonator structures with quasi-all-to-all connectivities~\cite{hazra2021ring}. A linear resonator cannot in general be used as a qubit, since a microwave drive on it will not only populate the $\{\ket{0},\ket{1} \}$ subspace, but also higher excited states. However, the effective interactions mediated via the tuneable coupler in Fig.~\ref{subfig:6donis} are of the type $a^{\dagger}a Z$ and $(a+a^{\dagger})X+(a-a^{\dagger})Y$ where $a$ and $a^{\dagger}$~\cite{Krantz2019} are the resonator creation and annihilation operators. These types of interactions conserve excitation number, so when at most one excitation is in the qubit-resonator system, the resonator cannot be populated beyond its first excited state through interaction with a qubit mediated a tuneable coupler. CZ and iSWAP gates between the resonator and a qubit can be performed using the two interactions, and the theory is developed more fully in Sec.~\ref{subsec:gate_theory}. Then, a resonator together with an external qubit can be used as an effective central qubit in the following way: \begin{enumerate} \item Prepare all qubits and the resonator in their ground states \item Select one qubit to form the effective central qubit together with the resonator \item Prepare an arbitrary state in the selected qubit \item Perform an iSWAP operation from the selected qubit to the resonator initially in the ground state \item Perform CZ gates between the resonator and any other qubits \item Perform an iSWAP operation back from the resonator to the selected qubit for measurement \end{enumerate} The theoretically most straightforward protocol would be to perform a SWAP gate from the qubit to the resonator. The iSWAP, on the other hand, is a native gate that can directly be implemented on the hardware in Fig.~\ref{subfig:QCR_circuit}. The iSWAP gate between the resonator and the qubit is represented by the unitary operator: \begin{align} U_{\mathrm{iSWAP}} = \left( \begin{array}{cccc} 1 & 0 & 0 & 0\\ 0 & 0 & -i & 0\\ 0 & -i & 0 & 0\\ 0 & 0 & 0 & 1 \end{array} \right). \label{iSWAP_unitary} \end{align} Since the CZ gates performing the computation following the iSWAP are diagonal in the computational basis, the phase introduced by the iSWAP is uninvolved in the gate. This enables substituting the SWAP gate by an iSWAP gate in the protocol to further minimize the gate count. \subsection{Gate theory and simulations} \label{subsec:gate_theory} Here we demonstrate that in our star architecture CZ and iSWAP-type gates between any of the qubits and the $\{\ket{0},\ket{1} \}$ subspace of a chosen resonator mode can be implemented. The operational principles of these gates are very similar to those between two qubits coupled with a tunable coupler~\cite{mariantoni2008two,yan2018tunable,foxen2020demonstrating, chu2021coupler}. The main limitation of our architecture (where one transmon is replaced by a resonator) is that iSWAP operations can only be performed in the zero- and single-excitation subspace of the two-qubit computational basis. \begin{table}[H] \centering \begin{tabular}{ |c|c|c| } \hline Parameter & Symbol & Value \\ \hline Resonator frequency & $\omega_r$ & 2$\pi \times$4.3 GHz \\ Qubit anharmonicity & $\alpha_q$ & - 2$\pi \times$0.187 GHz \\ Coupler anharmonicity & $\alpha_c$ & - 2$\pi \times$0.110 GHz \\ Resonator-coupler coupling & $g_{rc}$ & 2$\pi \times$98.5 MHz \\ Qubit-coupler coupling & $g_{qc}$ & 2$\pi \times$101.8 MHz \\ Resonator-qubit coupling & $g_{rq}$ & 2$\pi \times$8.9 MHz \\ Resonator relaxation & $T_1^r$ & 60 $\mu s$ \\ Qubit relaxation & $T_1^q$ & 60 $\mu s$ \\ Coupler relaxation & $T_1^c$ & 30 $\mu s$ \\ Resonator dephasing & $T_2^r$ & 60 $\mu s$ \\ Qubit dephasing & $T_2^q$ & 60 $\mu s$ \\ Coupler dephasing & $T_2^c$ & 30 $\mu s$ \\ \hline \end{tabular} \caption{Parameters of star-architecture chip.} \label{tab:ndonis_parameters} \end{table} \subsubsection{Conditional-Z gate} The CZ operation between the resonator and the qubit is described by the unitary operator: \begin{align} \label{CPHASE_unitary} \textrm{CZ}(\phi) = \left( \begin{array}{cccc} 1 & 0 & 0 & 0\\ 0 & 1 & 0 & 0\\ 0 & 0 & 1 & 0\\ 0 & 0 & 0 & e^{-i\phi} \end{array} \right). \end{align} This gate is equivalent to the $U_{ZZ}(\phi)$ gate in Eq.~\ref{ZZ_unitary} up to two $R_z$-rotations. To operate a CZ gate, we initialize the resonator-coupler-qubit set up shown in Fig.~\ref{subfig:QCR_circuit} at the idling configuration with zero effecting coupling between the qubit and resonator. Note that the coupler is also a transmon that shows a higher sensitivity to the magnetic flux than regular qubits. We next apply a flux pulse that lowers the coupler frequency, turning on the effective coupling between the resonator and the qubit. Depending on the flux pulse shape, the state collects conditional phase $\phi$ and possibly experiences population oscillations between computational and non-computational states, as a function of the time spent at the gate-operation frequency. We optimize the pulse amplitude and duration such that after the flux pulse the CZ gate fidelity is maximized. Details of the gate theory can be found in Appendix~\ref{gate_theory} and the considered device parameters in Table~\ref{tab:ndonis_parameters}. In Fig.~\ref{fig:CPHASE}, we operate our CZ gate by tuning the coupler frequency using a flattop Gaussian shaped flux pulse. The width of our Gaussian filter was fixed at 3 ns. Applying such a flux pulse to coupler results in a coupler frequency shift by $\omega_c^{\mathrm{shift}}$ from the idling configuration. Then by appropriately tuning $\omega_c^{\mathrm{shift}}$ and the gate time $\tau$, one locates the optimal pulse configuration that minimizes the CZ($\pi$) gate error $\varepsilon_{\textrm{CZ}} =1 - \big( \mathrm{tr}\sqrt{\sqrt{\rho}\sigma \sqrt{\rho}}\big)^2$, where $\sigma$ is the target density matrix obtained after propagating some initial state $|\Psi \rangle \langle\Psi|$ with the ideal unitary of Eq.~\ref{CPHASE_unitary} and $\rho$ the final density matrix obtained after propagating $|\Psi \rangle \langle\Psi|$ with the Lindbladian corresponding to our system defined in Eq.~\eqref{H2QG}. For our device parameters, the maximal decoherence limited CZ gate error averaged over a number of random initial states is $1.6 \times 10^{-3}$. Note that the system parameters in Table~\ref{tab:ndonis_parameters} were chosen such that they allow for the possibility to find a good idling configuration, where the residual CZ interaction vanishes before the gate operation. In our simulations, we have included environmental noise, such as amplitude damping and pure dephasing and treated them using a Lindblad master equation solver in QuTiP \cite{qutip, qutip2}. \begin{figure} \caption{(a) CZ gate error landscape averaged over random initial states. Contours with a low error are highlighted with a dashed line. (b) iSWAP gate error landscape obtained by averaging over a number of random initial states in the zero- and one-excitation manifolds. Both plots are produced using system parameters shown in Table~\ref{tab:ndonis_parameters} \label{fig:CPHASE} \label{fig:iswap} \label{fig:CPHASE_iSWAP} \end{figure} \subsubsection{iSWAP gate} Just as the CZ gate, the iSWAP gate can be natively realized in superconducting quantum computing architecture~\cite{Krantz2019}. With our device, we can perform high-fidelity iSWAP gates between zero- and single-excitation computational states. The two-photon state $|\rm 1\rangle_r \otimes |\rm 1\rangle$, where $|\rm 1\rangle_r$ denotes the first excited state of the resonator, must be excluded because it resonantly interacts with the state $|\rm 2\rangle_r \otimes |\rm 0\rangle$ inducing a population exchange between the states. Hence the resulting operation in this subspace does not match the action of the targeted iSWAP operation. The capacitive coupling between the elements of the electrical circuit shown in Fig.~\ref{subfig:QCR_circuit} gives rise to an effective $XY$-interaction between the qubit and resonator under the rotating wave approximation. Such an interaction conserves excitation number. With only the qubit or resonator (or neither) initially populated, we stay within the single excitation subspace of the joint system, thereby minimizing leakage of quantum population into the higher excited states of the resonator. The $XY$-interaction can be turned on by first tuning the qubit in resonance with the resonator, and then applying a flux-pulse to the coupler to turn on the coupling, similar to the CZ gate operation. Fig.~\ref{fig:iswap} shows iSWAP gate error landscape for the same device parameters (given in Table~\ref{tab:ndonis_parameters}). The optimal average iSWAP gate error $\varepsilon_{\mathrm{iSWAP}}$ obtained for our device is $1.7 \times 10^{-3}$. This result is obtained by averaging over a number of random initial states within the zero- and one-excitation manifolds. The results of our two-qubit-gate simulations demonstrate that our star architecture supports operating gates with similar fidelities as regular transmon qubits coupled together. The increased local connectivity of the device reduces the need for SWAP gates to simulate the nanoscale NMR problem (and others with a similar structure) and consequently in the end improves simulation fidelities. \subsection{Reset} \label{subsec:HW_reset} The hyperpolarization protocol described in Sec.~\ref{sec:hyperpolarization} needs regular re-initializations of the state of the NV center. The Co-Design hardware for simulating the protocol must therefore support this operation within qubit lifetimes. This is a hardware challenge, but one with solutions in sight. In particular, the quantum circuit refrigerator (QCR) has been used to perform the reset in tens of nanoseconds~\cite{tan2017quantum,silveri2017theory,hsu2020tunable,sevriuk2019fast}, which is a similar timescale to gate operations. The advantage of using a QCR for the reset is the possibility to reset the central resonator directly, without the need transfer the resonator population back to the central qubit using an iSWAP gate. Alternatively, a fast reset is possible through applying a flux drive to a qubit to SWAP its state with its measurement line~\cite{zhou2021rapid}. This scheme has the advantage of not requiring any additional hardware not already present on the chip, but comes with a small cost in the circuit depth, as the state of the resonator must be transported using an iSWAP gate into the designated central qubit and be re-initialized there. The reset timescale is also somewhat longer than when using a QCR. \section{Results and discussion} \label{section:results} \begin{figure*} \caption{Polarization transfer from one NV center to two interacting nuclei for a simulation time $t_f = 30 \, \mu $s and a single cycle with $s=32$ Trotter steps. The relevant observables to represent polarization are those included in the legends. (Left) with continuous driving and (right) with pulsed driving. Both plots depict with solid lines an exact simulation of the nanoscale NMR system including its usual imperfections, namely a detuning of $\delta_1 = 120$ kHz and a fluctuating microwave drive amplitude which follows an OU process with correlation time $\tau = 500 \, \mu$s and diffusion constant $c = 4\cdot 10^{-7} \label{fig:sim} \label{fig:simulation_results} \end{figure*} In this section we discuss the two main results of the paper: namely the predicted performance of our proposed quantum algorithm on a regular noisy QPU, as well as the performance increase obtained with our proposed Co-Design QPU. To this aim, we will focus on the polarizations of the NV center and nuclear spins, that are relevant quantities of the problem and straightforward to measure in a quantum computer. In Fig.~\ref{fig:simulation_results} we compare the frequency response of the polarization transfer process on two different simulated devices: a QPU with realistic noise parameters, and an ideal noiseless QPU. We consider one NV center, two interacting nuclei and different driving frequencies for both continuous and pulsed driving schemes. In the simulation we ignore errors in the preparation of the fully-mixed state of the qubits representing the nuclei. The blue curves show the remaining polarization in the NV center after one cycle of initialization and time evolution, while the red and the green curves correspond to the nuclear polarizations at the end of the cycle. For each nucleus there appears a resonance frequency in the system, for which the polarization transfer is optimal for said nucleus, depicted in the figure by the peaks of the curves. Both simulations include the effects of the most common imperfections in nanoscale NMR systems, i.e. energy detunings and Rabi frequency fluctuations discussed in Sec.~\ref{sec:hyperpolarization}. The simulation of the quantum algorithm additionally includes noise and gate errors present in the QPU. It is notable that the noise affects the height and shape of the peaks more than their location. The system imperfections include a detuning of $120$ kHz of the NV center from the zero-field splitting that shifts the peaks in Fig.~\ref{fig:simulation_results} (left) to frequencies lower than their predicted Larmor frequencies (dotted vertical black lines). Fig.~\ref{fig:simulation_results} (right) shows how the pulsed-driving scheme XY8~\cite{maudsley1986modified,gullion1990new} acts as a robust dynamical decoupling sequence, eliminating such frequency shifts both in the ideal and noisy simulations. Regarding the QPU noise and errors, the amplitude damping channel causes an overall shift down of all polarizations at all driving frequencies. Dephasing noise and gate errors (as modelled by depolarizing noise) cause the curves in Fig.~\ref{fig:simulation_results} to flatten and lose contrast. While we have discussed how the product of gate errors is minimized by reducing the SWAP overhead through Co-Design hardware, the loss of contrast can also be addressed through error mitigation techniques such as zero-noise extrapolation~\cite{endo2018practical,cai2021multi,krebsbach2022}. Dephasing can also be reduced through dynamical decoupling techniques~\cite{Krantz2019}, thus extending the system coherence and increasing the effective $T_2$ time. The simulations presented in Fig.~\ref{fig:simulation_results} include the decoherence times and gate fidelities that can be achieved with the hardware in Sec.~\ref{sec:co-design_HW}. This implies an overestimation of the actual errors in the simulation, since the gate fidelities already include some decoherence. \begin{figure} \caption{Performance gain from Co-Design: a comparison between a Co-Design star-architecture against a square grid, taking as reference an ideal simulation without QPU noise. The comparison highlights the negative effect that the SWAP gates on the square grid have on extracting relevant information from the simulation. We consider two quantities: in subplot (a) the ratio $\bar{\xi} \label{subfig:comparison_codesign_signal_to_noise_error} \label{subfig:comparison_codesign_peak} \label{fig:comparison_codesign_} \end{figure} To quantify the advantage of our Co-Design processor, Fig.~\ref{fig:comparison_codesign_} shows how the reduction in TQGs improves our ability to extract relevant information from the simulation. The figure compares the star-architecture chip to qubits connected on a square grid simulating a six qubit system with one NV center and five non-interacting nuclei. On the two chips we use SWAP patterns according to the schemes discussed in Sec.~\ref{subsection:layout}. First, Fig.~\ref{subfig:comparison_codesign_signal_to_noise_error} shows the average height-to-width ratio $\bar{\xi}$ of the nuclear polarization peaks obtained with star and square grid topologies with respect to an ideal error-free simulation. It serves as an indicator of how much the QPU noise degrades the simulation for each case. The ratio $\bar{\xi}$ is computed by fitting a Gaussian function on each peak, and computing: \begin{equation} \bar{\xi} = \Big{\langle}\frac{h}{\sigma}\Big{\rangle}, \end{equation} where $h$ is the height and $\sigma$ the variance of the fitted Gaussian function, averaged over the five nuclei. The curves for both topologies must coincide at $\bar{\xi} = 0$ for a maximal-error device, and at $\bar{\xi} = \bar{\xi}_{\textrm{ideal}}$ for an error-free quantum computer, since for a maximal-error device the output is pure noise and for an error-free quantum computer the number of SWAPs is irrelevant to the precision. For NISQ devices in between these limits, a performance difference between the architectures is observed. For systems with more nuclei and NV centers, the differences between topologies start to appear at lower errors, since the number of total operations grows. This shows how the QPU topology is of great importance for the computational precision of NISQ devices, while for fault-tolerant quantum computers the precision is unaffected by the topology. Second, Fig.~\ref{subfig:comparison_codesign_peak} shows the average relative error in the central frequency of the NMR peaks: \begin{equation} \bar{\Delta}_{\textrm{peak}} = \Big{\langle}\Big|\frac{\omega_{\textrm{noisy}}-\omega_{\textrm{ideal}}}{\omega_{\textrm{ideal}}}\Big|\Big{\rangle}, \end{equation} where $\omega_{\textrm{noisy}}$ and $\omega_{\textrm{ideal}}$ are the peak-center frequencies extracted from the Gaussian fittings for the noisy and ideal cases, respectively. The peak centers correspond to driving frequencies that efficiently transfer polarization to different parts of the diamond lattice. With the quantum simulation we can individually identify the nuclear resonance peaks by directly measuring the polarization of each qubit. This could enable exploration of how the polarization diffuses in the lattice with single-nucleus precision. In contrast, in a standard nanoscale NMR experiment, one typically only has only access to the excitation loss of the NV ( and thus only to the average transmitted polarization). This demonstrates the advantage of simulating the system on a quantum computer, as a it provides access to the relevant microscopic details of the dynamics that are otherwise inaccessible. The figures demonstrate that the Co-Design chip is able to detect the resonance frequencies and predict the peak heights better at all considered noise levels. The power of Co-Design is particularly evident in Fig.~\ref{subfig:comparison_codesign_peak}, where the square grid is shown to require two orders of magnitude lower noise levels to reach the same accuracy as the Co-Design chip. \section{Conclusions and outlook} \label{sec:conclusions} We have presented a quantum algorithm to simulate a nanoscale NMR problem, namely a hyperpolarization protocol. We have simulated the proposed quantum algorithm with typical noise processes of a NISQ superconducting quantum computer with state-of-the-art parameters. We find that, despite considering a noisy QPU, our protocol still allows to identify the positions of the nuclear resonances (corresponding to the maximal polarizations) in the frequency domain, as well as the behavior in the vicinity of such resonant frequencies, thus enabling the exploration of optimized protocols and driving parameters to hyperpolarize the nuclear ensemble. Moreover, we have shown that a specific Co-Design architecture adapted to the problem provides an advantage over general-purpose designs in the NISQ era, thanks to the reduction in two-qubit-gate count. Consequently, the adapted design reduces the necessary gate fidelities to solve practical problems in nanoscale NMR. This application-specific QPU consists of a central resonator, representing an NV center, coupled to a number of qubits representing the nuclei. The design can be scaled to more NV centers and a potentially large number of qubits around them. This is an example of a shortcut to quantum advantage. Adapting more NISQ-friendly algorithm alternatives, such as those listed in~\cite{bharti2021noisy}, adapted to the problem and to the Co-Design hardware can provide further shortcuts. Our work opens interesting directions for further investigation, since a quantum processor able to efficiently simulate nanoscale-NMR scenarios with a large number of nuclear spins would have a great impact on NMR-based applications. Fast and reliable quantum simulations of interacting spin systems would improve the interpretability of zero- and low-field NMR where spin-spin interactions become dominant~\cite{seetharam2021digital}, and nanoscale-NMR systems where a quantum sensor is strongly coupled via dipole-dipole interactions to nuclear or electron spin clusters. A possible application of the latter is the estimation of inter-label distances (via, e.g., Bayesian analysis of the NV center response) in electronically labelled biomolecules~\cite{munuerajavaloy2021detection}. In this case, the numerical analysis of systems beyond two-electron spin labels in realistic conditions, including protein motion and decoherence channels, is already numerically challenging. \section*{Acknowledgments} The authors would like to thank Caspar Ockeloen-Korppi, Alessandro Landra and Johannes Heinsoo for their help in developing the idea of the star-architecture chip, Jani Tuorila for his support in developing the gate theory, Amin Hosseinkhani and Tianhan Liu for reviewing the manuscript, and Henrikki M\"akynen and Hoang-Mai Nguyen for graphic design. J.C. additionally acknowledges the Ram\'on y Cajal program (RYC2018-025197-I). We further acknowledge support from Atos with the Quantum Learning Machine (QLM). Finally, the authors acknowledge financial support to BMBF through the Q-Exa project No. FZK: 13N16062. \appendix \onecolumngrid \section{Derivation of the system Hamiltonian} \label{appendix:H_derivation} The Hamiltonian in Eq.~\eqref{eq:hamiltonian1} can be derived from first principles. Let us first assume a model including only two $^{13}C$ nuclei and one NV center (Fig.~\ref{fig:interaction_scheme}) with dipole-dipole interactions. For simplicity we also consider the NVs to be aligned with the external magnetic field. In that case, the Hamiltonian of the system reads: \begin{equation} H=D S_{z}^{2}-\gamma_{e} B_{z} S_{z}-\gamma_{c} B_{z}\left(I_{1}^{z}+I_{2}^{z}\right)+\sum_{k=1}^{2} \frac{\hbar \mu_{0} \gamma_{e} \gamma_{c}}{2\left|\vec{r}_{k}\right|^{3}}\left[\vec{S} \cdot \vec{I}_{k}-\frac{3\left(\vec{S} \cdot \vec{r}_{k}\right)\left(\vec{I}_{k} \cdot \vec{r}_{k}\right)}{\left|\vec{r}_{k}\right|^{2}}\right]+\frac{\hbar \mu_{0} \gamma_{c}^{2}}{2\left|\vec{r}_{1,2}\right|^{3}}\left[\vec{I}_{1} \cdot \vec{I}_{2}-\frac{3\left(\vec{I}_{1} \cdot \vec{r}_{1,2}\right)\left(\vec{I}_{2} \cdot \vec{r}_{1,2}\right)}{\left|\vec{r}_{1,2}\right|^{2}}\right], \end{equation} where $S_j$ is the $j$-th spin component of the NV center, $I^j_k$ the $j$-th spin component of nucleus $k$, $D$ is the zero-field splitting of the NV center, $\gamma_e$ and $\gamma_c$ are the gyromagnetic factors of the NV center and the nuclei respectively, $B_z$ is the external magnetic field, which is aligned with the symmetry axis of the NV center $\vec{r}_k$ is the relative position vector between the NV center and nucleus $k$ and $\vec{r}_{1,2}$ is the relative position vector between both nuclei. We go into an interaction picture with respect to $H_{0}=D S_{z}^{2}-\gamma_{e} B_{z} S_{z}$. The $\mathrm{NV}$-nuclei interaction term reads: \begin{equation} H_{\mathrm{NV}-\mathrm{N}}^{I}=\sum_{k=1}^{2} \frac{\hbar \mu_{0} \gamma_{e} \gamma_{c}}{2\left|\vec{r}_{k}\right|^{3}}\left\{\left[S_{z} I_{k}^{z}-\frac{3\left(S_{z} r_{k}^{z}\right)\left(\vec{I}_{k} \cdot \vec{r}_{k}\right)}{\left|\vec{r}_{k}\right|^{2}}\right]+U_{0}^{\dagger}\left[S_{x} I_{k}^{x}+S_{y} I_{k}^{y}-\frac{3\left(S_{x} r_{k}^{x}+S_{y} r_{k}^{y}\right)\left(\vec{I}_{k} \cdot \vec{r}_{k}\right)}{\left|\vec{r}_{k}\right|^{2}}\right] U_{0}\right\}, \end{equation} where we split the expression in commuting and non-commuting operators. The non-commuting operators pick a fast-rotating phase and can be neglected through the rotating-wave approximation. By performing an interaction-picture transformation with respect to $H_{0}=-\gamma_{c} B_{z}\left(I_{1}^{z}+I_{2}^{z}\right)=\omega\left(I_{1}^{z}+I_{2}^{z}\right)$, the nucleus-nucleus interaction term reads: \begin{equation} \begin{aligned} H_{\mathrm{N}-\mathrm{N}}^{I}=&\frac{\hbar \mu_{0} \gamma_{c}^{2}}{2\left|\vec{r}_{1,2}\right|^{3}} U_{0}^{\dagger}\left[\vec{I}_{1} \cdot \vec{I}_{2}-\frac{3\left(\vec{I}_{1} \cdot \vec{r}_{1,2}\right)\left(\vec{I}_{2} \cdot \vec{r}_{1,2}\right)}{\left|\vec{r}_{1,2}\right|^{2}}\right] U_{0}= \\ &=\frac{\hbar \mu_{0} \gamma_{c}^{2}}{2\left|\vec{r}_{1,2}\right|^{3}}\left\{I_{1}^{z} I_{2}^{z}+\frac{1}{2}\left(I_{1}^{+} I_{2}^{-}+I_{1}^{-} I_{2}^{+}\right) - \right. \\ & \left. -\frac{3\left[I_{1}^{+} e^{i \omega t}\left(r_{1,2}^{x}-i r_{1,2}^{y}\right)+I_{1}^{-} e^{-i \omega t}\left(r_{1,2}^{x}+i r_{1,2}^{y}\right)\right]\left[I_{2}^{+} e^{i \omega t}\left(r_{1,2}^{x}-i r_{1,2}^{y}\right)+I_{2}^{-} e^{-i \omega t}\left(r_{1,2}^{x}+i r_{1,2}^{y}\right)\right]}{4\left|\vec{r}_{1,2}\right|^{2}}\right\}, \end{aligned} \end{equation} with $I_{k}^{\pm}=I_{k}^{x} \pm i I_{k}^{y}$. Applying again the rotating-wave approximation and undoing the interaction picture we finally arrive at: \begin{equation} H_{I}=-\gamma_{c} B_{z}\left(I_{1}^{z}+I_{2}^{z}\right)+S_{z}\left(\vec{A}_{1} \cdot \vec{I}_{1}+\vec{A}_{2} \cdot \vec{I}_{2}\right)+g_{1,2}\left[I_{1}^{z} I_{2}^{z}-\frac{1}{4}\left(I_{1}^{+} I_{2}^{-}+I_{1}^{-} I_{2}^{+}\right)\right], \label{eq::hamilt_inter_appendix} \end{equation} with $\vec{A}_{k}=\frac{\hbar \mu_{0} \gamma_{e} \gamma_{c}}{2\left|\overrightarrow{r_{k}}\right|^{3}}\left[\hat{z}-\frac{3\left(\hat{z} \cdot \vec{r}_{k}\right) \vec{r}_{k}}{\left|\vec{r}_{k}\right|^{2}}\right]$, and $g_{1,2}=\frac{\hbar \mu_{0} \gamma_{c}^{2}}{2\left|\vec{r}_{1,2}\right|^{3}}\left[1-3\left(\frac{r_{1,2}^{z}}{\left|\vec{r}_{1,2}\right|}\right)^{2}\right]$. We rewrite $S_{z}$ in the $\{|0\rangle,|1\rangle\}$ subspace by dropping out the $|-1\rangle$ energy state as it will not participate in the dynamics. Leakage to that state would not be a problem because of the energy difference between states $|0\rangle,|1\rangle$ and $|0\rangle,|-1\rangle$. Then by using that $|1\rangle\langle 1|=\frac{\mathbb{1}-\sigma^{z}}{2}$ to we get: \begin{equation} H_{I}=-\vec{\omega}^{c}_{1} \cdot \vec{I}_{1}-\vec{\omega}^{c}_{2} \cdot \vec{I}_{2}+\frac{\sigma_{z}}{2}\left(\vec{A}_{1} \cdot \vec{I}_{1}+\vec{A}_{2} \cdot \vec{I}_{2}\right)+g_{1,2}\left[I_{1}^{z} I_{2}^{z}-\frac{1}{4}\left(I_{1}^{+} I_{2}^{-}+I_{1}^{-} I_{2}^{+}\right)\right], \label{eq:interaction_hamiltonian} \end{equation} where $\vec{\omega}^{c}_{k}=-\left(\frac{A_{k}^{x}}{2}, \frac{A_{j}^{y}}{2}, \frac{A_{j}^{z}}{2}-\gamma_{c} B_{z}\right)$ is the modified nuclear Larmor term due to the presence of the NV center. Generalizing equation~\eqref{eq:interaction_hamiltonian} to $M$ NV centers and $N$ nuclei, including the detuning of the NV centers and adding the microwave driving term we obtain precisely the Hamiltonian in Eq.~\eqref{eq:hamiltonian1}. \section{\label{HH_sequence} Hyperpolarization sequences} \subsection{Hartmann-Hahn sequence} Here we explain the dynamics induced by the continuous driving on the hyperpolarization protocol. To illustrate the mechanism, we consider a system including a single NV center and a single nucleus. The corresponding Hamiltonian, now including the driving term, reads: \begin{equation} H=D S_{z}^{2}-\gamma_{e} B_{z} S_{z}-\gamma_{c} B_{z}I_{z}+S_{z}\vec{A}\cdot \vec{I} +S_{x} \sqrt{2} \, \Omega \cos (\omega t-\phi), \end{equation} In the interaction picture with respect to $D S_{z}^{2}-\gamma_{e} B_{z} S_{z}$ we obtain: \begin{equation} H_{I}=-\gamma_{n} B_{z} I_{z}+S_{z} \vec{A} \cdot \vec{I}+\frac{\Omega}{2} \left( e^{i p_{+} t}|1\rangle\langle 0|+e^{i p_{-} t}|-1\rangle\langle 0|+\textrm{H.c.} \right) \left[e^{i(\omega t-\phi)}+e^{-i(\omega t-\phi)}\right], \end{equation} where $p_{+/-}=D \pm\left|\gamma_{e}\right| B_{z}$. Choosing the resonance condition $\omega=p_{+}$ and applying the rotating-wave approximation we get: \begin{equation} H_{I}=-\gamma_{n} B_{z} I_{z}+S_{z} \vec{A} \cdot \vec{I}+\frac{\Omega}{2}\left(e^{i \phi}|1\rangle\langle 0|+e^{-i \phi}|0\rangle\langle 1|\right). \end{equation} Finally we can use the identity $|1\rangle\langle 1|=\frac{\mathbb{1}-\sigma^{z}}{2}$ and the fact that there will be no transitions to the $|-1\rangle$ because of energy differences: \begin{equation}\label{seq:simphamiltonian} H_I = -\vec{\omega}^c\cdot\vec{I} - \frac{\sigma_z}{2}\vec{A}\cdot\vec{I} + \frac{\Omega}{2}\sigma^\phi, \end{equation} where $\sigma^\phi=e^{-i\phi} |1\rangle\langle 0|+e^{i\phi} |0\rangle\langle 1| = e^{-i\phi}\sigma^{-}+e^{i\phi} \sigma^{+}$ and $\vec{\omega}_{n}=-\left(\frac{A_{x}}{2}, \frac{A_{y}}{2}, \frac{A_{z}}{2}-\gamma_{n} B_{z}\right)$. More details about the different terms were discussed in the main text, in section~\ref{sec:hyperpolarization}. Choosing $\phi = 0$ and further moving to an interaction picture with respect to the terms $-\vec{\omega}^c\cdot\vec{I} + \frac{\Omega}{2}\sigma_x$ we obtain: \begin{equation} H_I =\frac{e^{i\frac{\Omega}{2}\sigma_x t}\sigma_{z}e^{-i\frac{\Omega}{2}\sigma_x t}}{2}e^{-i\vec{\omega}^c\cdot \vec{I}t}\vec{A}\cdot\vec{I}e^{i\vec{\omega}^c\cdot \vec{I}t}. \end{equation} We choose now $\Omega = |\vec{\omega}^c|$, leading to the so-called Hartmann-Hahn double-resonance condition. Applying the identity $e^{i \vec{I}\cdot \hat{l}\phi} \vec{I}\cdot\vec{b} e^{-i \vec{I}\cdot \hat{l}\phi} = \vec{I}\left[(\vec{b}-(\vec{b}\cdot\hat{l})\hat{l})\cos{\phi}-\hat{l}\times\vec{b}\sin{\phi}+(\vec{b}\cdot\hat{l})\hat{l}\right]$ and the rotating-wave approximation to remove time-dependent terms, we get the flip-flop Hamiltonian: \begin{equation} H_I= \frac{A^\perp}{4}\left(|+\rangle\langle-|I^++|-\rangle\langle+|I^-\right), \label{eq:flipflopcont} \end{equation} with $A^\perp = \left|\vec{A}_x^\perp\right| = \left|\vec{A}-\left(\vec{A}\cdot\hat{\omega}^c\right)\hat{\omega}^c\right|$ and the nuclear coordinates changed so that $\hat{x} = \hat{A}_x^\perp$ and $\hat{z} = \hat{A}_z^\parallel$ with $\vec{A}_z^\parallel = (\vec{A}\cdot\hat{\omega}^c)\hat{\omega}^c$. \subsection{Pulsed sequence} \label{subsec:pulsed} Now we consider the pulsed case, represented by the driving term $H_{\textrm{dr}} = \frac{\Omega(t)}{2}\sigma^\phi$ where $\Omega(t)$ is a train of $\pi$-pulses. The Hamiltonian is already expressed in the interaction picture from Eq.~\eqref{seq:simphamiltonian}. From there, we further move into a rotating frame with respect to the driving term. The corresponding unitary transformation is $U_0 = (-i \sigma^\phi)^k$ for the time interval between pulses $k$ and $k+1$. This leads to: \begin{equation} H_I = -\vec{\omega}^c\cdot\vec{I} + F(t) \frac{\sigma_z}{2}\vec{A}\cdot\vec{I}, \label{eq:filter} \end{equation} where $F(t)$ is the so-called filter function, with value $+1$ when $k$ is even, and $-1$ when $k$ is odd, representing the sign of the operator $\sigma_z$, flipped by the action of each pulse. It is necessary to apply two different patterns of pulses. The "symmetric case", meaning an evenly-distributed sequence of pulses for which the filter function is even and can be expanded in Fourier series of cosines as: \begin{equation} F(t) = \sum_{n = 1}^\infty f_n \cos\left(\frac{2\pi n}{T} t\right), \end{equation} with $f_n = 0$ when $n$ is even and $f_n = -\frac{4}{\pi n}$ when $n$ is odd, if the pulses are distributed such that the interpulse spacing is constant. We choose the resonance condition $T = \frac{2\pi n}{|\vec{\omega}^c|}$, where $n$ is the harmonic number. This is the same resonance condition that we introduced in section \ref{sec:hyperpolarization}, but here it is formulated with the period $T$ that appears in the Fourier expansion, instead of with the interpulse spacing $\tau = \frac{T}{2}$ from before. Going to an interaction picture with respect to $ -\vec{\omega}^c\cdot\vec{I}$ and repeating the procedure we used above in the Hartmann-Hahn case, we get: \begin{equation} H_I = \alpha A^\perp\sigma_zI_x, \end{equation} where $\alpha = \frac{f_n}{4}$. With the second pattern of pulses, called the "asymmetric case", we apply an oddly-distributed sequence of pulses for which the filter function is odd and can be expanded in a Fourier series of sines. Note that this sequence of pulses is identical to the even sequence but shifted by a $\pi/2$ phase. An analogous derivation gives: \begin{equation} H_I = \beta A^\perp\sigma_zI_y, \end{equation} with $\beta = \frac{g_m}{4}$ and $g_m$ coming from the Fourier expansion of sines, analogously to $f_n$. Combining these two patterns one can generate an effective Hamiltonian of the form: \begin{equation} H_I = \alpha A^\perp\sigma_zI_x + \beta A^\perp\sigma_zI_y, \end{equation} which can be transformed with simple rotations on the qubit representing the NV into: \begin{equation} H_I = \alpha A^\perp\sigma_xI_x + \beta A^\perp\sigma_yI_y, \label{hamilt_pulsed} \end{equation} and this is equivalent to an interaction-exchange flip-flop Hamiltonian, similar to the one for the continuous-driving case (\ref{eq:flipflopcont}). A more detailed description of this whole process, including the expressions of the Fourier coefficients $f_n$ and $g_m$ can be found in reference~\cite{MunueraJavaloy2021}. In order to visualize the structure of the pulsed-driving case, we have included in Fig.~\ref{fig:circuit_comparison_graphics_pulsed} the circuit implementing all these terms on a quantum chip for the case of one NV center and two nuclei. \begin{figure*} \caption{(a) Sketch of one cycle of the simulation algorithm for one NV center and two nuclei, with pulsed driving. Compare with Fig. \ref{fig:circuit_comparison_graphics} \label{subfig:algorithm_sketch_pulsed} \label{subfig:one_trotter_step_pulsed} \label{fig:circuit_comparison_graphics_pulsed} \end{figure*} \section{\label{section:other_sim}Randomized Trotter techniques} As explained in section~\ref{sec:algorithm}, we chose Trotter expansion. Besides this, we can consider other simulation approaches such as the variational quantum simulator~\cite{yuan2019theory}, the quantum assisted simulator~\cite{bharti2021quantum}, numerical quantum circuit synthesis~\cite{younis2021qfast}, or a plethora of other quantum simulation algorithms aimed at NISQ devices~\cite{bharti2021noisy}. In addition, other approaches like randomized Trotter have been recently shown to provide some advantage compared to standard Trotter expansions~\cite{Childs2019}. We also propose to use one randomized approach, qDRIFT~\cite{Campbell2019}, that consists of the following: instead of splitting the whole evolution operator $e^{-it_f\sum_j h_j H_j}$ into simpler terms as done in full Trotterization, the method applies a random selection of such terms to the quantum circuit. This random selection is based on the probability distribution given by the weight of each term $h_j H_j$. For a certain evolution time, this set of gates can approximate the whole evolution operator by statistically drifting the state of the circuit towards the deterministic final state. The error bound for this method is given as~\cite{Campbell2019}: \begin{equation} \varepsilon^{\textrm{qDRIFT}}_{\textrm{sim}} \leq \frac{2\lambda^2 t_f^2}{N_{\textrm{terms}}}, \end{equation}where $\lambda = \sum_j h_j$ and $N_{\textrm{terms}}$ is the number of individual two-qubit evolution operators that are implemented. These evolution operators have the form $e^{-i\tau H_j}$, being $\tau$ a constant related to the relative weight $\frac{h_j}{\lambda}$ that the term $H_j$ has in the Hamiltonian. The advantage of qDRIFT compared to Trotterization is particularly apparent when dealing with Hamiltonians with a large number of terms with small coefficients, simulated for short times. While in the standard Trotter case, every term has to be simulated for each step no matter how small its effect is, in qDRIFT this is not required. A more thorough analysis of errors in qDRIFT and gate counts can be found in~\cite{Chen2021}. This method is particularly suitable to our problem, since the range of coefficients in the Hamiltonian of a real diamond is large due to the length scales involved. In this case, with qDRIFT the terms with smaller coefficients do not add a significant amount of gates as they would in conventional Trotterization approaches. We note that other adapted protocols such as SparSto~\cite{Ouyang2020} can further enhance the simulation of this type of systems. SparSto represents a compromise between Trotterization and qDRIFT, generally guaranteeing an equal or better performance than both of them. We will not go into detail on this method since Trotterization and qDRIFT are enough to illustrate the main ideas behind this work. \section{\label{section:appendix_time_evol}Hamiltonian decomposition for Trotterized time evolution} In order to simulate the dynamics generated by the Hamiltonian in Eq.~\eqref{eq:hamiltonian1} on a quantum computer using Trotterization, we first need to express it in a suitable way. To begin with, we split the Hamiltonian into two parts: \begin{equation} H=H_{\textrm{SQG}}+H_{\textrm{TQG}}, \label{Hamilt_Paulis} \end{equation} which can be expressed in terms of qubit Pauli operators: \begin{equation} H_{\textrm{SQG}} = \sum_{k=1}^{N} \Big [ \frac{A^x_k}{2}\frac{X_k}{2}+\frac{A^y_k}{2}\frac{Y_k}{2} + \Big ( \frac{A^z_k}{2}-\gamma_c B_z \Big )\frac{Z_k}{2} \Big ] +\sum_{j=1}^{M} \delta_j Z_j, \end{equation} \begin{align} \begin{split} H_{\textrm{TQG}} &= \sum_{j=1}^{M} \sum_{k=1}^{N} \Big [ \frac{A^x_k}{2}\frac{X_k}{2}Z_j+\frac{A^y_k}{2}\frac{Y_k}{2}Z_j+\frac{A^z_k}{2}\frac{Z_k}{2}Z_j \Big ] +\\ &+ \sum_{k'>k=1}^{N} \frac{g_{k'k}}{4} \Big [ Z_{k'} Z_k -\frac{1}{2} X_{k'} X_k - \frac{1}{2} Y_{k'} Y_k \Big ] +\\ &+\sum_{j>j'}^M h_{j'j} \Big [Z_{j'} Z_j-X_{j'}X_j-Y_{j'}Y_j \Big ]. \end{split} \end{align} Since in the rotating frame with the drive the Hamiltonian is time independent, the time-evolution operator is simply given by: \begin{equation} U=e^{-i t_f H}, \end{equation} where $t_f$ is the time for which the simulation runs. The time-evolution operator is split into $s$ discrete steps through Trotter decomposition: \begin{equation} U =e^{-it_f H} =e^{-it_f(H_{\textrm{SQG}}+H_{\textrm{TQG}})} \approx \left[ e^{-i\frac{t_f}{s} H_{\textrm{SQG}}}e^{-i\frac{t_f}{s}H_{\textrm{TQG}}}\right]^{s}+ \mathcal{O}\left( \left(\frac{t_f}{s} \right)^{2}\right). \label{eq:first_splitting} \end{equation} The evolution operator associated with single-qubit gates in each Trotter step of equation~\eqref{eq:first_splitting} needs to be rewritten in terms of our native gate set. It is always possible to decompose any single-qubit unitary exactly, up to a global phase, into a sequence of three single-qubit rotations such as, for example, a rotation about the $y$-axis in between two rotations about the $z$-axis: \begin{equation} U_1 = R_{z}(\beta)R_{xy}(\pi/2, \gamma)R_{z}(\delta), \label{eq:unitary} \end{equation} where the angles $\beta, \gamma,$ and $\delta$ need to be determined from the specific entries of the unitary in question to simulate the evolution of the $p^{_{\textrm{th}}}$ qubit: \begin{equation} U_1^p = e^{-i\frac{t_f}{s}\left(\frac{A^x_p}{2}\frac{X_p}{2}+\frac{A^y_p}{2}\frac{Y_p}{2}+ \left( \frac{A^z_p}{2}-\gamma_c B_z \right)\frac{Z_p}{2}\right)}. \label{eq:uone} \end{equation} From now on, we will concentrate on the case of a single NV center, which will be encoded in qubit $0$. Then, the evolution operator associated to single-qubit gates for the NV center will be: \begin{equation} U^0_1 = e^{-i\frac{t_f}{s}\delta_0 Z_0}. \label{eq:uonep} \end{equation} Matching the entries of the matrices corresponding to the unitaries on equations~\eqref{eq:uone} and~\eqref{eq:uonep} we get a system of equations for the angles $\beta, \gamma,$ and $\delta$ for each Trotter step $s$. There are 3 (5) types of interaction terms of the form ${XZ,YZ,ZZ,\cdots}$ in $H_{\textrm{TQG}}$ without (with) internuclear interactions. Due to the native TQG being of only $ZZ$ interaction type (see Eq.~\eqref{ZZ_unitary}), local rotations need to be introduced for simulating the rest of the TQG terms. These are $R^{\sigma_i \rightarrow \sigma_j}_k$, which have the effect of converting the Pauli operator $\sigma_i$ into the Pauli operator $\sigma_j$ for qubit $k$. After the Trotterization introduced in equation (\ref{eq:first_splitting}), the term $H_{\textrm{TQG}}$ corresponding to TQG contains some elements which do not commute with each other, and some of them which do commute with each other. We choose to split all terms in order to express the time-evolution operator in terms of the native gates that we assumed in section \ref{subsubsec:native_gates}. Only the elements that do not commute with each other contribute to the total Trotter error, which remains of the same order: \begin{align} \label{eq:TQG_evolution} \begin{split} e^{-i\frac{t_f}{s}H_{\textrm{TQG}}} \approx \, &e^{-i\frac{t_f}{s}\left(\sum_k \frac{A^x_k}{2}\frac{X_k}{2}Z_0\right)} e^{-i\frac{t_f}{s}\left(\sum_k \frac{A^y_k}{2}\frac{Y_k}{2}Z_0\right)} \\ &e^{-i\frac{t_f}{s}\left(\sum_k \frac{A^z_k}{2}\frac{Z_k}{2}Z_0\right)} e^{-i\frac{t_f}{s}\left(\sum_{k'>k} \frac{g_{k'k}}{4}Z_{k'}Z_k\right)}\\ &e^{i\frac{t_f}{s}\left(\sum_{k'>k} \frac{g_{k'k}}{8}X_{k'}X_k\right)}e^{i\frac{t_f}{s}\left(\sum_{k'>k} \frac{g_{k'k}}{8}Y_{k'}Y_k\right)}\\ &+ \mathcal{O}\left( \left(\frac{t_f}{s} \right)^{2}\right). \end{split} \end{align} Finally, we observe that the operators $Z_kZ_0$ (and the rest of the TQG terms) commute with each other, so the exponentials can be further split without Trotterizing: \begin{equation} e^{-i\frac{t_f}{s}(\sum_k \frac{A^z_k}{2}\frac{Z_k}{2}Z_0)} = \Pi_k e^{-i\frac{t_f}{s}(\frac{A^z_k}{2}\frac{Z_k}{2}Z_0)}. \end{equation} The time-evolution operator implementing the continuous sinusoidal driving $\sigma^{\phi}$ is: \begin{equation} e^{-i\frac{t_f}{s}\frac{\Omega}{2}\sigma^\phi}=R_{xy}(-\phi,\theta = \Omega \frac{t_f}{s} ). \end{equation} The quantum algorithm for simulating the system under a pulsed-driving scheme is somewhat more involved than the continuous-driving case, due to the two different time-dependent processes involved in the Trotter decomposition: the free dynamics of the spins and the sequence of pulses. The most crucial point to be aware of is the interplay between Trotter steps and interpulse spacing. The number of interpulse evolutions, i.e. number of pulses minus one, bounds from below the minimum number of Trotter steps for the simulation. Clearly, at least one Trotter step is needed for each interpulse evolution. Taking this interplay into account, the most straightforward setup is to choose a frequency which will determine the spacing of the pulse sequence, and to identify each interpulse evolution with a single Trotter step. If the achieved precision is not high enough, more Trotter steps can be added for each interpulse evolution. Each $\pi$-pulse itself is simply implemented as an $X$- or $Y$-gate on the qubit representing the NV center. The OU-distributed Rabi frequency fluctuations present in nanoscale NMR systems are then simulated by over- and under-rotations of the $X$- and $Y$-gates. \section{\label{section:rotational_opt}Rotational optimization} In principle, we had a Hamiltonian with terms of the type $ZX$, $ZY$ and $ZZ$ for the case of no internuclear interactions. However, we can rotate the basis so the Hamiltonian loses the $ZX$ and $ZY$ terms, allowing to reduce the number of TQGs. To make up for this rotation, we need to introduce different constants \(\vec{A}^{\textrm{rot}}_i\) for the problem and rotate the vector state we obtain at the end before measuring it. The rotations that we will consider are only one-qubit rotations on nuclei qubits and we are applying this just to the case with no internuclear interactions. Therefore, we can consider the effect of this rotation on only one qubit representing an arbitrary nucleus. We will exemplify this procedure using nucleus 1. If we want to obtain the mean value of \(\sigma_z\) acting on the nucleus: \begin{equation}\label{eq:sigmaz1} \begin{split} \langle \sigma_z\rangle = \Tr \left(\rho(t_f) \sigma_z\right) = \Tr \left(U(0,t_f)\rho(0)U^\dagger(0,t_f)\sigma_z\right), \end{split} \end{equation} where \(U(0,t_f)\) represents the evolution operator from \(t=0\) to \(t=t_f\). The density matrix $\rho(0)$ contains the state of the NV center (which is in the $|+\rangle$ state at $t=0$) and nucleus 1, i.e. $\rho(0)=|+\rangle \langle +|_{\textrm{NV}} \otimes \frac{\mathbb{1}_{1}}{2}$. Our intention is to obtain an expression of this mean value in terms of the rotated evolution operators and later, we will find the appropriate rotation to be perfomed. Then, taking into account that the trace is invariant under a rotation $R=\mathbb{1}_{\textrm{NV}} \otimes R_1$ we get: \begin{equation}\label{eq:sigmaz2} \langle \sigma_z\rangle = \Tr \left(RU(0,t_f)\rho(0)U^\dagger(0,t_f)\sigma_z R^\dagger \right) = \Tr \left(RU(0,t_f)R^\dagger R\rho(0)R^\dagger R U^\dagger(0,t_f)R^\dagger R\sigma_z R^\dagger \right). \end{equation} This can be expressed as: \begin{equation}\label{eq:sigmaz3} \begin{split} \langle \sigma_z\rangle = \Tr \left(U_{\textrm{rot}}(0,t_f)\rho_{\textrm{rot}}(0)U_{\textrm{rot}}^\dagger(0,t_f) R\sigma_z R^\dagger \right). \end{split} \end{equation} The density matrix of the nucleus is the identity. Thus, any rotation on nuclei qubits leaves the density matrix unaffected, leading to: \begin{equation}\label{eq:sigmaz4} \begin{split} \langle \sigma_z\rangle = \Tr \left(U_{\textrm{rot}}(0,t_f)\rho(0)U_{\textrm{rot}}^\dagger(0,t_f) R\sigma_z R^\dagger \right) . \end{split} \end{equation} Then we need to rotate the system previous to the measurement. By using the invariance of the trace under cyclic permutations we get: \begin{equation}\label{eq:sigmaz5} \begin{split} \langle \sigma_z\rangle = \Tr \left(R^\dagger U_{\textrm{rot}}(0,t_f)\rho(0)U_{\textrm{rot}}^\dagger(0,t_f) R\sigma_z \right), \end{split} \end{equation} which is equivalent to introducing a counter-rotation in the circuit before measurement. Now let us focus on the specific rotation we have to implement. Since the constants multiplying the Pauli matrices in the Hamiltonian are \(\frac{\vec{A_1}}{2}\) and \(\vec{\omega^c_1} = \frac{\vec{A_1}}{2}-\gamma_c B_z \vec{e_z}\) (for nucleus 1), we can rotate the basis to obtain a representation in which the vectors have only $z$-component for \(\vec{A_1}\) and thus, $XZ$ and $YZ$ terms are removed. The vectors before and after the needed rotation can be seen in \hyperref[\detokenize{figures/fig-basis-rotation.png}]{Fig.\@~\ref{\detokenize{figures/fig-basis-rotation.png}}}. \begin{figure} \caption{\(a)\) Coefficients vectors of the first qubit $\vec{A} \label{subfig:basis_rotation1.pdf} \label{subfig:basis_rotation2.pdf} \label{\detokenize{figures/fig-basis-rotation.png} \end{figure} To compute the new vectors (and thus the new coefficients for the gates of our algorithm), we can use Rodrigues' rotation formula to rotate a vector $\vec{v}$ an angle $\theta$ around a unitary axis $\hat{k}$: \begin{equation} \vec{v}_{rot} = \vec{v} \cos\theta + (\hat{k} \times \vec{v}) \sin \theta + \hat{k}(\hat{k}\cdot \vec{v})(1- \cos\theta), \end{equation} being in our case, $\theta = \arccos{(A^z_1/|\vec{A}_1|)}$ and $\hat{k} = (\cos(\phi),\sin(\phi),0)$, with $\phi = -\frac{\pi}{2}+\phi_{xy}= -\frac{\pi}{2}+\arctan{(A^y_1/A^x_1)}$. For implementing the counter-rotation of this in the quantum circuit, we use: \begin{equation} R_1^{\dagger} = e^{i\frac{\theta}{2}(\cos(\phi)X-\sin(\phi)Y)}. \end{equation} \section{\label{sec:routing}SWAP routing} Our qubit routing method consists of mapping the square grid to a linear chain with qubits labeled from 0 to $n$. Then, in the simplified case of no internuclear interactions, the optimal SWAP method for the one-to-all interaction case on a linear chain can be used. For a single NV center the protocol goes as follows: \begin{enumerate} \item Initialize the state of the NV center in the second qubit; \item Perform interactions with the first and third qubits; \item SWAP the NV center qubit to the right; \item Perform interaction with right qubit; \item Repeat steps 3-4 until all interactions have been achieved. \end{enumerate} The pattern is seen in Fig.~\ref{fig:swap_patterns}a denoted by the intense blue arrows. With internuclear interactions we need to perform a swap pattern that enables all-to-all interactions. The so-called odd-even mapping in Fig \ref{fig:swap_square} is an efficient one~\cite{Cowtan2019} represented by green arrows in Fig.~\ref{fig:swap_patterns}a. This consists of swapping first all the even qubits with their right neighbors and then swapping all the odd qubits with their right neighbors. This way, we will obtain all-to-all interactions with $\frac{1}{2}(n-1)(n-2)$ SWAP gates and a total TQG depth of $6n$. A summary of the TQG counts is shown in Table~\ref{tab:gate_count}. To motivate the creation of a chip with a star topology and the use of an alternative linearized SWAP routing for a square grid instead of standard numerical approaches, a comparison between all the cases is provided in Fig. \ref{fig:swap_comp_with_numerical}. A reduction in the number of SWAPs can be noticed for both the linear chain approach and the star-topology chip against standard numerical approaches for a square grid. \begin{figure} \caption{a) Comparison of the required number of SWAPs for simulating the proposed system with no internuclear interactions for each Trotter step. Numerical approaches from references are applied to a square grid. b) Equivalent comparison with internuclear interactions. Zulehner et al. and Saeedi et al. do not improve the linear chain approach for few qubits and are intractable for larger numbers of qubits and thus are not displayed.} \label{subfig:swap_comp_with_numerical1} \label{subfig:swap_comp_with_numerical2} \label{fig:swap_comp_with_numerical} \end{figure} \begin{center} \begin{table}[] \def1.2{1.2} \begin{tabular}{|l|c|c|c|c|c|} \hline & All-To-All & Star topology & Square grid \\ \hline \textbf{$N^{\textrm{nonint}}_{\textrm{TQG}}$} & $n-1$ & $n-1$ & $4n-4$ \\ \hline \textbf{$N^{\textrm{nonint}}_{\textrm{SQG}}$} & $\frac{5}{2}n+2$ & $\frac{5}{2}n+2$ & $\frac{21}{2}n-\frac{47}{2}$ \\ \hline \textbf{$N^{\textrm{int}}_{\textrm{TQG}}$} & $\frac{3}{2}n^2-\frac{3}{2}n$ & $\frac{3}{2}n^2+\frac{3}{2}n-6$ & $3n^2-6n+3$ \\ \hline \textbf{$N^{\textrm{int}}_{\textrm{SQG}}$} & $4n^2-\frac{9}{2}n+\frac{7}{2}$ & $4n^2+\frac{7}{2}n-\frac{25}{2}$ & $8n^2-\frac{33}{2}n+\frac{11}{2}$ \\ \hline \end{tabular} \caption{Gate count for one Trotter step and for one cycle for different topologies with and without internuclear interactions.} \label{tab:gate_count} \end{table} \end{center} \section{Qubit-resonator gate theory} \label{gate_theory} In the following discussion, we consider gate operation between the resonator and one of the qubits, and neglect any effects that arise from the interactions with spectator qubits and other resonator modes. The time dynamics in such a system are determined by the Hamiltonian: \begin{align} \label{H2QG} \begin{split} H= H_0 + H_{rc} + H_{qc} + H_{rq}, \end{split} \end{align} where the uncoupled part of the total Hamiltonian $H_0 = H_r + H_c + H_q $ is: \begin{align} \begin{split} H_r&=\hbar \omega_r b_r^\dagger b_r,\\ H_c&=\hbar \omega_c b_c^\dagger b_c + \frac{\hbar}{2} \alpha_c b_c^\dagger b_c^\dagger b_c b_c, \\ H_q&=\hbar \omega_q b_q^\dagger b_q + \frac{\hbar}{2} \alpha_q b_q^\dagger b_q^\dagger b_q b_q, \end{split} \end{align} where $b_{\lambda}$ and $\omega_{\lambda}$ are the annihilation operator and fundamental frequency for the mode $\lambda=\{r,c,q\}$, respectively, and $\alpha_{\gamma}$ is the anharmonicity of the mode $\gamma=\{q,c\}$. The interaction component of the Hamiltonian is: \begin{align} \begin{split} H_{\lambda \mu} = -\hbar g_{\lambda \mu}(b^{\dag}_{\lambda}- b_{\lambda})( b^{\dag}_{\mu}- b_{\mu}), \end{split} \end{align} where $\lambda \mu=\{rc,qc,rq\}$, and $g_{\lambda \mu}$ denote resonator-coupler, qubit-coupler and resonator-qubit coupling frequencies. With the Hamiltonian of Eq.~\eqref{H2QG}, we are now in a position to perform simulations of two-qubit gates by propagating a suitably chosen initial state. Before the gate operation, we choose the idling frequencies for the qubit, resonator, and the coupler such that the CZ coupling rate $\zeta$ is minimized. This CZ coupling rate is defined as: \begin{align} \label{ZZdef} \begin{split} \zeta = \omega_{\textrm{101}} - \omega_{\textrm{100}} - \omega_{\textrm{001}} + \omega_{\textrm{000}}, \end{split} \end{align} where $\omega_{n_r0n_q}$ corresponds to the eigenenergy of Hamiltonian in Eq.~\eqref{H2QG} with $n_r$ excitations in resonator and $n_q$ excitations in qubit with coupler being in the ground state. The point of minimal $|\zeta|$ is also known as the idling configuration, which we found to be at $[\omega_{\rm r},\omega_{\rm c},\omega_{\rm q}]/(2\pi) = [4.30, 6.14, 4.47]$ GHz for the parameters given in Table~\ref{tab:ndonis_parameters}. The CZ gate is operated by sending a flux pulse that modifies the coupler frequency $\omega_c$, which then in the coupled basis modifies the frequencies $\omega_{\textrm{101}},\omega_{\textrm{100}},\omega_{\textrm{001}}$ and $\omega_{\textrm{000}}$. This makes $\zeta$ non-zero, so the system collects a CZ phase. \twocolumngrid \end{document}
\begin{document} \begin{frontmatter} \title{Continuous-stage Runge-Kutta-Nystr\"{o}m methods} \author[a,b]{Wensheng Tang\corref{cor1}} \end{array}d{[email protected]}\cortext[cor1]{Corresponding author.} \address[a]{College of Mathematics and Statistics,\\ Changsha University of Science and Technology, Changsha 410114, China} \address[b]{Hunan Provincial Key Laboratory of \\ Mathematical Modeling and Analysis in Engineering, Changsha 410114, China} \author[]{} \begin{abstract} We develop continuous-stage Runge-Kutta-Nystr\"{o}m (csRKN) methods in this paper. By leading weight function into the formalism of csRKN methods and modifying the original pattern of continuous-stage methods, we establish a new and larger framework for csRKN methods and it enables us to derive more effective RKN-type methods. Particularly, a variety of classical weighted orthogonal polynomials can be used in the construction of RKN-type methods. As an important application, new families of symmetric and symplectic integrators can be easily acquired in such framework. Numerical experiments have verified the effectiveness of the new integrators presented in this paper. \end{abstract} \begin{keyword} Continuous-stage Runge-Kutta-Nystr\"{o}m methods; Hamiltonian systems; Symplectic methods; Symmetric methods; Orthogonal polynomial expansion; Simplifying assumptions. \end{keyword} \end{frontmatter} \section{Introduction} \label{} The seminal idea of continuous-stage methods was introduced by Butcher (1972) in \cite{butcher72ato} (see also \cite{butcher87tna,butcherw96rkm} for a more detailed description), which suggests a ``continuous" extension of Runge-Kutta (RK) methods by allowing the number of stages to be infinite so that the discrete index set $\{1,2,\cdots,s\}$ becomes the interval $[0,1]$. Unfortunately, this creative idea has been completely ignored in a very long period of time. Such situation was continued until the year 2010, Hairer activated the idea by using it to interpret his energy-preserving collocation methods \cite{hairer10epv} and then an elegant mathematical formalism for continuous-stage Runge-Kutta methods was created by him. Since then, there has been a revival of interest in the study of continuous-stage methods, and some researchers consciously or unconsciously conduct their studies closely related with such a subject. The first related work after Hairer's was given by Tang \& Sun \cite{Tangs12tfe}, stating that there is an interesting connection between Galerkin variational methods and continuous-stage methods, and it was shown in \cite{Tangs12tfe} that energy-preserving methods such as $s$-stage trapezoidal methods \cite{Iavernarop07sst}, average vector field methods \cite{quispelm08anc}, and infinite Hamiltonian boundary value methods \cite{brugnanoit10hbv} (as well as Hairer's energy-preserving collocation methods \cite{hairer10epv}) can be unified in the framework of continuous-stage methods. In recent years, there are a series of papers intensively studying in such subject \cite{Tangs12ana,Tangs14cor,Tanglx16cos,Tangz18spc,Tang18ano, Tang18csr,Tang18csm,Tang18siw,Tang18aef,Tangz18sib}. So far, the available methods with continuous stage can be grouped into the following three classes: continuous-stage Runge-Kutta (csRK) methods \cite{Tangs12ana,Tangs14cor,Tang18ano,Tang18csr}, continuous-stage partitioned Runge-Kutta (csPRK) methods \cite{Tanglx16cos}, and continuous-stage Runge-Kutta-Nystr\"{o}m (csRKN) methods \cite{Tangsz18hos,Tangz18spc}. It turns out that with the idea of continuous-stage methods we can easily construct many effective integrators of arbitrarily-high order, without needing to solve the tedious nonlinear algebraic equations (usually associated with the order conditions) in terms of many unknown coefficients. Particularly, a crucial technique for constructing continuous-stage methods with arbitrary order is developed in \cite{Tangs14cor,Tangsz18hos,Tanglx16cos,Tangz18spc,Tang18csr}, which is mainly based on the orthogonal polynomial expansion. Compared with standard RK \& RK-like discretizations, the continuous-stage approaches may provide us a new insight in many aspects of numerical solution of differential equations, seeing that the Butcher coefficients (as functions) are assumed to be ``continuous" or ``smooth" which potentially allows us to use some analytical tools such as Taylor expansion, inner product, limit operation, orthogonal expansion, differentiation, integration, etc \cite{liw16ffe,Tangs14cor,Tangsz18hos,Tanglx16cos,Tangz18spc,Tang18csr}. Owing to this point, sometimes it may lead to surprising applications. A good case in point is that no RK methods are energy-preserving for general non-polynomial Hamiltonian systems \cite{Celledoni09mmoqw}, whereas energy-preserving csRK methods can be easily constructed \cite{brugnanoit10hbv,hairer10epv,miyatake14aee, miyatakeb16aco,quispelm08anc,Tangs12ana,Tangs12tfe,Tangs14cor}. Another example is given by Tang \& Sun \cite{Tangs12tfe}, which states that some Galerkin variational methods can be interpreted as continuous-stage (P)RK methods, but they can not be completely understood in the classical (P)RK framework. Over the last few decades, geometric integration for the numerical solution of differential equations has attracted much attention (see, for example, \cite{Benetting94oth,Channels90sio,Feng84ods,Feng95kfc,Fengqq10sga, hairerlw06gni,lasagni88crk,Leimkuhlerr04shd,ruth83aci,sanz88rkm, sanzc94nhp,suris88otc,suris89ctg,Vogelaere56moi}), for the reason that numerical discretization respecting the geometric properties of the exact flow are very important for long-time integration \cite{Benetting94oth,hairerlw06gni,Shang99kam,Tang94feo}. In recent years, continuous-stage methods have found their interesting applications in geometric integration. For example, symplectic and multi-symplectic integrators can be derived by using Galerkin variational approaches, and these integrators can be interpreted and analyzed in the framework of continuous-stage methods \cite{Tangs12tfe,Tangsc17dgm,Tang18sio}; some newly-developed energy-preserving methods can be closely related to continuous-stage methods \cite{brugnanoit10hbv,Celledoni09mmoqw,cohenh11lei,hairer10epv,liw16ffe, miyatake14aee,miyatakeb16aco,quispelm08anc,Tangs12tfe,Tang18epi}; new families of symplectic and symmetric methods can be constructed by using the idea of continuous-stage methods \cite{Tangs12ana,Tangs14cor,Tangz18spc,Tang18ano,Tang18csr,Tang18csm, Tang18siw,Tang18aef,Tangz18sib}; the study of conjugate symplecticity of energy-preserving methods may be promoted in the context of continuous-stage methods \cite{hairer10epv,hairerz13oco}, etc. Undoubtedly, other new applications of continuous-stage methods in geometric integration are actively under development. More recently, the present author et al. \cite{Tangz18spc,Tangsz18hos} have developed symplectic RKN-type integrators by virtue of continuous-stage methods. In this paper, we are going to enlarge the primitive framework of csRKN methods to a new one which enables us to treat more complicated cases. For this sake, by using the similar idea presented in \cite{Tang18aef}, we will lead weight function into the formalism of csRKN methods and define the continuous-stage methods in a general interval $I$ (finite or infinite). By doing this, a variety of classical weighted orthogonal polynomials can be used in the construction of RKN-type methods. As an important application, new symmetric and symplectic integrators can be easily derived in this new framework. This paper will be organized as follows. In Section 2, we introduce the new definition of csRKN methods for solving second-order differential equations. This is followed by Section 3, where the order theory by using simplifying assumptions will be given. Section 4 is devoted to present our approach for deriving symmetric and symplectic integrators accompanied with some examples. We exhibit our numerical results in Section 5. At last, we end our paper in Section 6 with some concluding remarks. \section{Continuous-stage Runge-Kutta-Nystr\"{o}m methods} We are concerned with the initial value problem governed by a second-order system \begin{align}\label{eq:second} q''=f(t, q),\;\;q(t_0)=q_0,\;\;q'(t_0)=q'_0, \end{align} where $f:\mathbb{R}\times\mathbb{R}^{d}\rightarrow\mathbb{R}^{d}$ is assumed to be a smooth vector-valued function. \begin{defn}\label{weight_func} A non-negative function $w(x)$ is called a \emph{weight function} on the interval $I$, if it satisfies the following two conditions: \begin{itemize} \item[(a)] The $k$-th moment $\int_I x^k w(x)\,\mathrm{d} x, \;k\in\mathbb{N}$ exists; \item[(b)] For any non-negative function $u(x)$, $\int_Iu(x)w(x)\,\mathrm{d} x=0$ implies $u(x)\equiv0$. \end{itemize} \end{defn} Based on the notion of weight function, we introduce the following definition of continuous-stage Runge-Kutta-Nystr\"{o}m methods which is an extended version of that given in \cite{Tangsz18hos,Tangz18spc}. \begin{defn}\label{csRKN:def} Let $w(x)$ be a weight function defined on $I$ (finite or infinite), $\begin{array}r{A}_{\tau, \sigmagma}$ be a function of variables $\tau, \sigmagma\in I$ and $\begin{array}r{B}_\tau,\;B_\tau,\;C_\tau$ be functions of $\tau\in I$. The continuous-stage Runge-Kutta-Nystr\"{o}m (csRKN) method for solving \eqref{eq:second} is given by \begin{subequations} \begin{alignat}{2} \label{eq:csrkn1} &Q_\tau=q_0 +hC_\tau q'_0 +h^2\int_I \begin{array}r{A}_{\tau, \sigmagma} w(\sigmagma) f(t_0+C_\sigmagma h, Q_\sigmagma) \mathrm{d} \sigmagma, \;\;\tau \in I,\\ \label{eq:csrkn2} &q_{1}=q_0+ h q'_0+h^2 \int_I \begin{array}r{B}_\tau w(\tau)f(t_0+C_\tau h, Q_\tau) \mathrm{d}\tau, \\ \label{eq:csrkn3} &q'_1 = q'_0 +h\int_I B_\tau w(\tau) f(t_0+C_\tau h, Q_\tau) \mathrm{d} \tau, \end{alignat} \end{subequations} which can be characterized by the following Butcher tableau \[\begin{array}{c|c} C_\tau & \begin{array}r{A}_{\tau,\sigmagma}w(\sigmagma)\\[4pt] \hline & \begin{array}r{B}_\tau w(\tau)\\ \hline\\[-15pt] & B_\tau w(\tau)\end{array}\] \end{defn} \begin{rem} For the case when $I$ is an infinite interval, we assume that the improper integrals of (\ref{eq:csrkn1}-\ref{eq:csrkn3}) satisfy some conditions (in terms of uniform convergence) such that differentiation under the integral sign with respect to parameter $h$ (step size) is legal. \end{rem} \begin{rem} If we let $I=[0,1]$ and $w(x)=1$, then it results in the methods developed in \cite{Tangsz18hos,Tangz18spc}. However, remark that the primitive framework of csRKN methods given in \cite{Tangsz18hos,Tangz18spc} can not be applicable for more complicated cases, e.g., the case for weighting on a infinite interval $(-\infty,+\infty)$ or any other general interval $I$. \end{rem} \section{Discussions on the order theory} \begin{defn}\cite{hairernw93sod} A csRKN method is called order $p$, if for all sufficiently regular problem \eqref{eq:second}, as $h\rightarrow0$, its \emph{local error} satisfies \begin{equation*} q(t_0+h)-q_1=\mathcal{O}(h^{p+1}),\quad q'(t_0+h)-q'_1=\mathcal{O}(h^{p+1}). \end{equation*} \end{defn} \subsection{The order of csRKN methods} Following the idea of classical cases \cite{hairernw93sod,hairerlw06gni}, we propose the following simplifying assumptions\footnote{It should be noticed that in $\mathcal{DN}(\zeta)$ we have removed ``$w(\sigmagma)$" from both sides of the formula.} \begin{equation*}\label{csRKN-simpl-assump} \begin{split} &\mathcal{B}(\xi):\quad \int_IB_\tau w(\tau)C_\tau^{\kappa-1}\,\mathrm{d} \tau=\frac{1}{\kappa},\;\; 1\leq\kappa\leq\xi,\\ &\mathcal{CN}(\eta):\quad \int_I\begin{array}r{A}_{\tau,\,\sigmagma}w(\sigmagma)C_\sigmagma^{\kappa-1}\,\mathrm{d} \sigmagma=\frac{C_\tau^{\kappa+1}}{\kappa(\kappa+1)},\;\;1\leq\kappa\leq\eta-1,\\ &\mathcal{DN}(\zeta):\quad \int_IB_\tau w(\tau) C_\tau^{\kappa-1} \begin{array}r{A}_{\tau,\,\sigmagma}\,\mathrm{d} \tau=\frac{B_\sigmagma C_\sigmagma^{\kappa+1}}{\kappa(\kappa+1)}-\frac{B_\sigmagma C_\sigmagma}{\kappa} +\frac{B_\sigmagma}{\kappa+1},\;\; 1\leq\kappa\leq\zeta-1, \end{split} \end{equation*} where $ \tau,\,\sigmagma\in I$. \begin{thm}\label{ord_csRKN} If the csRKN method (\ref{eq:csrkn1}-\ref{eq:csrkn3}) with its coefficients satisfying the simplifying assumptions $\mathcal{B}(p),\,\mathcal{CN}(\eta),\,\mathcal{DN}(\zeta)$, and if $\begin{array}r{B}_\tau=B_\tau(1-C_\tau)$ is always fulfilled, then the method is of order at least $\min\{p,\,2\eta+2,\eta+\zeta\}$. \end{thm} \begin{proof} This is a straightforward result of Theorem 3.3 in \cite{Tangsz18hos}. \end{proof} In what follows, we will use the hypothesis $C_\tau=\tau$ (and thus $\begin{array}r{B}_\tau=B_\tau(1-\tau)$) throughout this paper. Let us establish a lemma in the first place. \begin{lem}\label{lem_assum} With the hypothesis $C_\tau=\tau$, the simplifying assumptions $\mathcal{B}(\xi), \mathcal{CN}(\eta)$ and $\mathcal{DN}(\zeta)$ are equivalent to, respectively, {\small\begin{align}\label{eq:cd1} &\mathcal{B}(\xi):\; \int_IB_\tau w(\tau) \phi(\tau)\,\mathrm{d} \tau=\int_0^1\phi(x)\,\mathrm{d} x,\;\; \forall\, \phi\; \text{with}\;\emph{deg}(\phi)\leq\xi-1,\\\label{eq:cd2} &\mathcal{CN}(\eta):\; \int_I\begin{array}r{A}_{\tau,\,\sigmagma} w(\sigmagma) \phi(\sigmagma)\,\mathrm{d} \sigmagma=\int_0^\tau \int_0^\alpha\phi(x)\,\mathrm{d} x\,\mathrm{d}\alpha,\;\; \forall\, \phi\; \text{with}\; \emph{deg}(\phi)\leq\eta-2,\\\label{eq:cd3} &\mathcal{DN}(\zeta):\; \int_IB_\tau \begin{array}r{A}_{\tau,\,\sigmagma}w(\tau)\phi(\tau)\,\mathrm{d} \tau=B_\sigmagma\Big(\int_0^\sigmagma\int_1^\alpha\phi(x)\,\mathrm{d} x\,\mathrm{d}\alpha+\int_0^1x\phi(x)\,\mathrm{d} x\Big),\;\;\forall\, \phi\; \text{with}\; \emph{deg}(\phi)\leq\zeta-2, \end{align}} where $\emph{deg}(\phi)$ stands for the degree of polynomial function $\phi$. \end{lem} \begin{proof} With the hypothesis $C_\tau=\tau$, we can rewrite $\mathcal{B}(\xi), \mathcal{CN}(\eta)$ and $\mathcal{DN}(\zeta)$ as {\small\begin{equation*} \begin{split} &\mathcal{B}(\xi):\;\; \int_IB_\tau w(\tau)\tau^{\kappa-1}\,\mathrm{d} \tau=\int_0^1x^{\kappa-1}\,\mathrm{d} x,\;\; 1\leq\kappa\leq\xi,\\ &\mathcal{CN}(\eta):\;\; \int_I\begin{array}r{A}_{\tau,\,\sigmagma}w(\sigmagma)\sigmagma^{\kappa-1}\,\mathrm{d} \sigmagma=\int_0^\tau \int_0^\alpha x^{\kappa-1}\,\mathrm{d} x\,\mathrm{d}\alpha,\;\;1\leq\kappa\leq\eta-1,\\ &\mathcal{DN}(\zeta):\;\; \int_IB_\tau w(\tau) \tau^{\kappa-1} \begin{array}r{A}_{\tau,\,\sigmagma}\,\mathrm{d} \tau=B_\sigmagma\Big(\int_0^\sigmagma\int_1^\alpha x^{\kappa-1}\,\mathrm{d} x\,\mathrm{d}\alpha+\int_0^1x\cdot x^{\kappa-1}\,\mathrm{d} x\Big),\;\; 1\leq\kappa\leq\zeta-1, \end{split} \end{equation*}} Therefore, these formulae are satisfied for all monomials like $x^\iota$ with degree $\iota$ no lager than $\xi-1, \eta-2$ and $\zeta-2$ respectively. Consequently, the final result follows from the fact that any polynomial function $\phi$ can be expressed as a linear combination of monomials. \end{proof} It is known that for a given weight function $w(x)$, there exists a sequence of orthogonal polynomials in the \emph{weighted function space} (Hilbert space) \cite{Szeg85op} \begin{equation*} L^2_w(I)=\{u \text{ is measurable on}\, I:\;\int_I|u(x)|^2w(x)\,\mathrm{d} x<+\infty\} \end{equation*} which is linked with the inner product \begin{equation}\label{w_ip} \begin{align}g<u,v\begin{align}g>_w=\int_Iu(x)v(x)w(x)\,\mathrm{d} x. \end{equation} To proceed with our discussions, we denote a sequence of weighted orthogonal polynomials by $\{P_n(x)\}_{n=0}^\infty$, which consists of a complete set in the Hilbert space $L^2_w(I)$. It is known that $P_n(x)$ has exactly $n$ real simple zeros in the interval $I$. For convenience, in what follows we always assume the orthogonal polynomials are normalized, i.e., satisfying \begin{equation*} \begin{align}g<P_i,P_j\begin{align}g>_w=\delta_{ij},\;\;i, j=0,1,2,\cdots. \end{equation*} \begin{thm}\label{ordcon_var} Let $C_\tau=\tau$ and suppose\footnote{The notation $\begin{array}r{A}_{\ast,\,\sigmagma}$ stands for the one-variable function in terms of $\sigmagma$, and $\begin{array}r{A}_{\tau,\,\ast}$ can be understood likewise.} $B_{\tau},\,\,\begin{array}r{A}_{\ast,\,\sigmagma},\,\,(B_\tau\,\begin{array}r{A}_{\tau,\,\ast})\in L^2_w(I)$, then we have \begin{itemize} \item[\emph{(a)}] $\mathcal{B}(\xi)$ holds $\Longleftrightarrow$ $B_\tau$ has the following form in terms of the normalized orthogonal polynomials in $L^2_w(I)$: \begin{equation}\label{Bt} B_\tau=\sum\limits_{j=0}^{\xi-1}\int_0^1P_j(x)\,\mathrm{d} x P_j(\tau)+\sum\limits_{j\geq\xi}\lambda_j P_j(\tau), \end{equation} where $\lambda_j$ are any real parameters; \item[\emph{(b)}] $\mathcal{CN}(\eta)$ holds $\Longleftrightarrow$ $\begin{array}r{A}_{\tau,\,\sigmagma}$ has the following form in terms of the normalized orthogonal polynomials in $L^2_w(I)$: \begin{equation}\label{Ats} \begin{array}r{A}_{\tau,\,\sigmagma}=\sum\limits_{j=0}^{\eta-2}\int_0^\tau \int_0^\alpha P_j(x)\,\mathrm{d} x\,\mathrm{d}\alpha P_j(\sigmagma)+\sum\limits_{j\geq\eta-1}\phi_j(\tau) P_j(\sigmagma), \end{equation} where $\phi_j(\tau)$ are any $L^2_w$-integrable real functions; \item[\emph{(c)}] $\mathcal{DN}(\zeta)$ holds $\Longleftrightarrow$ $B_\tau \begin{array}r{A}_{\tau,\,\sigmagma}$ has the following form in terms of the normalized orthogonal polynomials in $L^2_w(I)$: \begin{equation}\label{BtAts} B_\tau\,\begin{array}r{A}_{\tau,\,\sigmagma}=\sum\limits_{j=0}^{\zeta-2}B_\sigmagma\Big(\int_0^\sigmagma\int_1^\alpha P_j(x)\,\mathrm{d} x\,\mathrm{d}\alpha+\int_0^1xP_j(x)\,\mathrm{d} x\Big) P_j(\tau)+\sum\limits_{j\geq\zeta-1}\psi_j(\sigmagma) P_j(\tau), \end{equation} where $\psi_j(\sigmagma)$ are any $L^2_w$-integrable real functions. \end{itemize} \end{thm} \begin{proof} This theorem can be proved in the same manner as Theorem 2.3 of \cite{Tang18csr}. For part $(a)$, consider the following orthogonal polynomial expansion in $L^2_w(I)$ \begin{equation*} B_\tau=\sum\limits_{j\geq0}\lambda_j P_j(\tau),\;\;\lambda_j\in \mathbb{R}, \end{equation*} and substitute the formula above into \eqref{eq:cd1} (with $\phi$ replaced by $P_j$) in Lemma \ref{lem_assum}, then it follows \begin{equation*} \lambda_j=\int_0^1P_j(x)\,\mathrm{d} x,\;\;j=0,\cdots,\xi-1, \end{equation*} which gives \eqref{Bt}. For part $(b)$ and $(c)$, consider the following orthogonal expansions of $\begin{array}r{A}_{\tau,\,\sigmagma}$ with respect to $\sigmagma$ and $B_\tau\,\begin{array}r{A}_{\tau,\,\sigmagma}$ with respect to $\tau$ in $L^2_w(I)$, respectively, \begin{equation*} \begin{array}r{A}_{\tau,\,\sigmagma}=\sum\limits_{j\geq0}\phi_j(\tau) P_j(\sigmagma),\;\;\phi_j(\tau)\in L^2_w(I), \end{equation*} \begin{equation*} B_\tau\,\begin{array}r{A}_{\tau,\,\sigmagma}=\sum\limits_{j\geq0}\psi_j(\sigmagma) P_j(\tau),\;\;\psi_j(\sigmagma)\in L^2_w(I), \end{equation*} and then substitute them into \eqref{eq:cd2} and \eqref{eq:cd3}, which then leads to the final results. \end{proof} \begin{rem}\label{rem:csRKN_trunc} For the sake of obtaining a practical csRKN method, we have to define a finite form for $B_\tau$ and $\begin{array}r{A}_{\tau,\,\sigmagma}$. A natural and simple way is to truncate the series \eqref{Bt} and \eqref{Ats}. As a consequence, $B_\tau$ and $\begin{array}r{A}_{\tau,\,\sigmagma}$ become polynomial functions. \end{rem} \subsection{The order of RKN methods by using quadrature formulas} In the practical implementation, generally we have to approximate the integrals of the csRKN method by numerical quadrature formulas. For this sake, we introduce the following $s$-point \emph{weighted interpolatory quadrature formula} \begin{equation}\label{wquad} \int_I\Phi(\tau)w(\tau)\,\mathrm{d} \tau\approx\sum\limits_{i=1}^sb_i \Phi(c_i),\;\;c_i\in I, \end{equation} where \begin{equation*} b_i=\int_I\ell_i(\tau)w(\tau)\,\mathrm{d} \tau,\;\;\ell_i(\tau)=\prod\limits_{j=1,j\neq i}^s\frac{\tau-c_j}{c_i-c_j},\;\;i=1,\cdots,s. \end{equation*} After applying the quadrature formula to \eqref{eq:csrkn1}-\eqref{eq:csrkn3}, it gives rise to an $s$-stage RKN method \begin{subequations} \begin{alignat}{2} \label{eq:rkn1} &Q_i=q_0 +hC_i q'_0+h^2\sum\limits_{j=1}^{s}b_j\begin{array}r{A}_{ij}f(t_0+C_jh,\,Q_j), \quad i=1,\cdots, s, \\ \label{eq:rkn2} &q_{1}=q_0+ h q'_0+h^2\sum\limits_{i=1}^{s}b_i\begin{array}r{B}_if(t_0+C_ih,\,Q_i), \\ \label{eq:rkn3} &q'_{1}= q'_0+h\sum\limits_{i=1}^{s}b_iB_if(t_0+C_ih,\,Q_i), \end{alignat} \end{subequations} where $Q_i:=Q_{c_i}, \begin{array}r{A}_{ij}:=\begin{array}r{A}_{c_i, c_j}, \begin{array}r{B}_i:=\begin{array}r{B}_{c_i}, B_i:=B_{c_i}, C_i:=C_{c_i}=c_i$ (recall that $C_\tau=\tau$), which can be characterized by \begin{equation}\label{RKN:qua} \begin{array}{c|ccc} c_1 & b_1\begin{array}r{A}_{11} & \cdots & b_s\begin{array}r{A}_{1s}\\[2pt] \vdots &\vdots &\vdots\\[2pt] c_s & b_1\begin{array}r{A}_{s1} & \cdots & b_s\begin{array}r{A}_{ss}\\[2pt] \hline & b_1\begin{array}r{B}_{1} & \cdots & b_s\begin{array}r{B}_{s}\\[2pt] \hline & b_1B_{1} & \cdots & b_sB_{s}\end{array} \end{equation} In order to analyze the order of the RKN method \eqref{RKN:qua}, we propose the following result which is closely related with Remark \ref{rem:csRKN_trunc}. \begin{thm}\label{qua:csRKN} Assume the underlying quadrature formula \eqref{wquad} is of order $p$, and $\begin{array}r{A}_{\tau,\,\sigmagma}$ is of degree $\pi_A^\tau$ with respect to $\tau$ and of degree $\pi_A^{\sigmagma}$ with respect to $\sigmagma$, and $B_{\tau}$ is of degree $\pi_B^\tau$. If we assume $C_\tau=\tau,\,\begin{array}r{B}_\tau=B_\tau(1-\tau)$, and all the simplifying assumptions $\mathcal{B}(\xi)$, $\mathcal{CN}(\eta)$, $\mathcal{DN}(\zeta)$ are fulfilled, then the RKN method \eqref{RKN:qua} is at least of order $$\min\{\rho, \,2\alpha+2, \,\alpha+\beta\},$$ where $\rho=\min\{\xi,\,p-\pi_B^\tau\}$, $\alpha=\min\{\eta,\,p-\pi_A^{\sigmagma}+1\}$ and $\beta=\min\{\zeta,\, p-\pi_A^\tau-\pi_B^\tau+1\}$. \end{thm} \begin{proof} Since the quadrature formula \eqref{wquad} holds for any polynomial $\Phi(x)$ of degree up to $p-1$, by using it to compute the integrals of $\mathcal{B}(\xi)$, $\mathcal{CN}(\eta)$, $\mathcal{DN}(\zeta)$ it gives \begin{equation*} \begin{split} &\sum_{i=1}^s(b_iB_i)c_i^{\kappa-1}=\frac{1}{\kappa},\;\kappa=1,\cdots,\rho,\\ &\sum_{j=1}^s(b_{j}\begin{array}r{A}_{ij})c_j^{\kappa-1}=\frac{c_i^{\kappa+1}}{\kappa(\kappa+1)}, \;i=1,\cdots,s,\;\kappa=1,\cdots,\alpha-1,\\ &\sum_{i=1}^s(b_iB_i)c_i^{\kappa-1}(b_{j}\begin{array}r{A}_{ij})= \frac{(b_jB_j)c_j^{\kappa+1}}{\kappa(\kappa+1)}-\frac{(b_jB_j)c_j}{\kappa}+\frac{b_jB_j}{\kappa+1}, \;j=1,\cdots,s,\;\kappa=1,\cdots,\beta-1. \end{split} \end{equation*} where $\rho=\min\{\xi,\,p-\pi_B^\tau\}$, $\alpha=\min\{\eta,\,p-\pi_A^{\sigmagma}+1\}$ and $\beta=\min\{\zeta,\, p-\pi_A^\tau-\pi_B^\tau+1\}$. These formulas imply that the RKN method with coefficients given by \eqref{RKN:qua} satisfies the classical simplifying assumptions $B(\rho)$, ${CN}(\alpha)$ and ${DN}(\beta)$ (see \cite{hairerlw06gni}), and it is observed that we also have $b_i\begin{array}r{B}_i=b_iB_i(1-c_i)$ for each $i=1,\ldots, s$. Consequently, it gives rise to the order of the method by the classical result \cite{hairerlw06gni,hairernw93sod}. \end{proof} \section{Geometric integration by csRKN methods} In this section, we discuss the geometric integration by csRKN methods. As pointed out in \cite{hairerlw06gni}, symplectic integrators for Hamiltonian systems and symmetric integrators for reversible systems play a central role in the geometric integration of differential equations, for the reason that they possess excellent numerical behaviors in long-time integration. So far, there are many literatures concentrating on the theoretical analysis and empirical study of these integrators, see \cite{Benetting94oth,Feng84ods,Feng95kfc,Fengqq10sga,hairerlw06gni, Leimkuhlerr04shd,sanzc94nhp} and references therein. \subsection{Symplectic integrators} A very important subclass of dynamical systems in classical and non-classical mechanics are the so-called Hamiltonian systems \cite{Arnold89mmo}, which read \begin{equation}\label{Hs} z'=J^{-1}\nabla_{z}H(z),\;\;z(t_0)=z_0\in\mathbb{R}^{2d},\;\; z=\begin{pmatrix} p \\ q \\ \end{pmatrix},\;\; J=\begin{pmatrix} 0 & I_{d\times d} \\ -I_{d\times d} & 0 \\ \end{pmatrix}, \end{equation} where $q\in\mathbb{R}^{d}$ represents the position coordinates, $p\in\mathbb{R}^{d}$ the momentum coordinates, and $H$ the Hamiltonian function (generally represents the total energy). Such system possesses a symplectic structure (a characteristic property of the system \cite{hairerlw06gni}), which means the phase flow $\varphi_t$ satisfies \cite{Arnold89mmo} \begin{equation*} \mathrm{d}\varphi_t(z_0)\wedge J\mathrm{d}\varphi_t(z_0)=\mathrm{d} z_0 \wedge J\mathrm{d} z_0,\quad \forall\,z_0\in D, \end{equation*} where $\wedge$ represents the wedge product, and $D$ is an open subset in the phase space. For the sake of respecting such geometric structure in numerical discretization, symplectic integrators are suggested by some earlier scientists (see \cite{Feng84ods,ruth83aci,Vogelaere56moi} and references therein), the definition of which can be stated as follows. \begin{defn} A one-step method $\phi_h: z_0=(p_0,\,q_0)\mapsto(p_{1},\,q_{1})=z_1$ is called symplectic if and only if $$\mathrm{d}\phi_h(z_0)\wedge J\mathrm{d}\phi_h(z_0)=\mathrm{d} z_0 \wedge J\mathrm{d} z_0,\quad \forall\,z_0\in D,$$ whenever the method is applied to a smooth Hamiltonian system. \end{defn} A class of Hamiltonian systems frequently encountered in practice is the following \begin{equation}\label{eq:first} p'=-\nabla_q V(q),\;\; q'=Mp, \end{equation} with the Hamiltonian $$H(z)=\frac{1}{2}p^TMp+V(q),$$ where $M$ is a constant symmetric matrix, and $V(q)$ is a scalar function. This equations can also be rewritten as a second-order system \begin{equation}\label{eq:Hs} q''=-M\nabla_q V(q). \end{equation} By using the notations $f(q)=-M\nabla_q V(q)$ and $g(q)=-\nabla_q V(q)$, we propose the following csRKN method for solving \eqref{eq:Hs} \begin{subequations}\label{csRKN:Hs} \begin{alignat}{2} \label{Heq:csrkn1} &Q_\tau=q_0 +hC_\tau Mp_0 +h^2\int_I \begin{array}r{A}_{\tau, \sigmagma}w(\sigmagma) f(Q_\sigmagma) \mathrm{d} \sigmagma, \;\;\tau \in I, \\ \label{Heq:csrkn2} &q_{1}=q_0+ h Mp_0+h^2 \int_I \begin{array}r{B}_\tau w(\tau) f(Q_\tau) \mathrm{d}\tau, \\ \label{Heq:csrkn3} &p_1 = p_0 +h\int_I B_\tau w(\tau) g(Q_\tau) \mathrm{d}\tau. \end{alignat} \end{subequations} Remark that here we have removed the constant matrix $M$ from both sides of \eqref{Heq:csrkn3} which will not affect the order of the method. The following theorems have extended the corresponding results previously presented in \cite{Tangsz18hos}. \begin{thm}\label{symp_cond_ori} If the coefficients of a csRKN method (\ref{Heq:csrkn1}-\ref{Heq:csrkn3}) satisfy \begin{subequations} \begin{alignat}{2} \label{sym_cond_orig01} \begin{array}r{B}_\tau&=B_\tau(1-C_\tau),\quad\tau\in I,\\ \label{sym_cond_orig02} B_\tau(\begin{array}r{B}_\sigmagma-\begin{array}r{A}_{\tau,\sigmagma})&=B_\sigmagma(\begin{array}r{B}_\tau -\begin{array}r{A}_{\sigmagma,\tau}),\quad\tau,\sigmagma\in I, \end{alignat} \end{subequations} then the method is symplectic for solving the system \eqref{eq:Hs}. \end{thm} \begin{proof} The proof is very the same as that of Theorem 4.2 in \cite{Tangsz18hos} with the range of integration replaced by a general interval $I$. \end{proof} \begin{rem} Theorem \ref{symp_cond_ori} implies that the symplecticity of the csRKN methods is independent of its weight function. \end{rem} \begin{thm}\label{constr_symcsRKN} Suppose that $C_\tau=\tau$ and $\begin{array}r{A}_{\tau,\sigmagma}/B_\sigmagma\in L_w^2(I\times I)$, then the symplectic condition given in Theorem \ref{symp_cond_ori} is equivalent to the fact that $\begin{array}r{B}_\tau$ and $\begin{array}r{A}_{\tau,\sigmagma}$ have the following form in terms of the normalized orthogonal polynomials $P_n(x)$ in $L_w^2(I)$ \begin{equation}\label{sym_cond} \begin{split} \begin{array}r{B}_\tau&=B_\tau(1-\tau),\quad\tau\in I,\\ \begin{array}r{A}_{\tau,\sigmagma}&=B_\sigmagma\Big(\alpha_{(0,0)}+\alpha_{(0,1)}P_1(\sigmagma) +\alpha_{(1,0)}P_1(\tau)+\sum\limits_{i+j>1}\alpha_{(i,j)} P_i(\tau)P_j(\sigmagma)\Big),\quad\tau,\sigmagma\in I, \end{split} \end{equation} where $\alpha_{(0,0)}$ is an arbitrary real number, $\alpha_{(0,1)}-\alpha_{(1,0)}=-\begin{align}g<x,\,P_1(x)\begin{align}g>_w$ (see \eqref{w_ip}), and the parameters $\alpha_{(i,j)}$ are symmetric, i.e., $\alpha_{(i,j)}=\alpha_{(j,i)}$ for $\forall\,i+j>1$. \end{thm} \begin{proof} On account of $C_\tau=\tau$, we have $$\begin{array}r{B}_\tau=B_\tau(1-\tau),$$ inserting it into \eqref{sym_cond_orig02}, then it yields \begin{equation}\label{reduced_symp} B_\tau\begin{array}r{A}_{\tau,\,\sigmagma}-B_\sigmagma\begin{array}r{A}_{\sigmagma,\,\tau}=B_\tau B_\sigmagma(\tau-\sigmagma), \end{equation} which leads to \begin{equation}\label{eq:AB0} \frac{\begin{array}r{A}_{\tau,\,\sigmagma}}{B_\sigmagma}-\frac{\begin{array}r{A}_{\sigmagma,\,\tau}}{B_\tau}= \tau-\sigmagma. \end{equation} Here we assume $B_\tau\neq0$, otherwise the csRKN method will be not practical for possessing no order accuracy. With the help of $\tau=\sum^1_{i=0}\begin{align}g<x,\,P_i(x)\begin{align}g>_w P_i(\tau)$ and notice that $P_0(\tau)=P_0(\sigmagma)=constant$, \eqref{eq:AB0} becomes \begin{equation}\label{eq:AB} \begin{split} \frac{\begin{array}r{A}_{\tau,\,\sigmagma}}{B_\sigmagma}-\frac{\begin{array}r{A}_{\sigmagma,\,\tau}}{B_\tau}&= \sum^1_{i=0}\begin{align}g<x,\,P_i(x)\begin{align}g>_w P_i(\tau)-\sum^1_{i=0}\begin{align}g<x,\,P_i(x)\begin{align}g>_w P_i(\sigmagma),\\ &=\begin{align}g<x,\,P_1(x)\begin{align}g>_w \begin{align}g(P_1(\tau)-P_1(\sigmagma)\begin{align}g). \end{split} \end{equation} Next, consider the expansion of $\begin{array}r{A}_{\tau,\,\sigmagma}/B_\sigmagma$ along the normalized orthogonal basis $\left\{P_i(\tau)P_j(\sigmagma)\right\}_{i,j=0}^\infty$ of $L^2_w(I\times I)$ \begin{equation}\label{AdivB} \begin{array}r{A}_{\tau,\,\sigmagma}/B_\sigmagma=\alpha_{(0,0)}+\alpha_{(0,1)}P_1(\sigmagma) +\alpha_{(1,0)}P_1(\tau)+\sum\limits_{i+j>1}\alpha_{(i,j)} P_i(\tau)P_j(\sigmagma),\quad\alpha_{(i,j)}\in\mathbb{R}. \end{equation} By exchanging $\tau$ and $\sigmagma$ it gives \begin{equation*} \begin{array}r{A}_{\sigmagma,\,\tau}/B_\tau=\alpha_{(0,0)}+\alpha_{(0,1)}P_1(\tau) +\alpha_{(1,0)}P_1(\sigmagma)+\sum\limits_{i+j>1}\alpha_{(j,i)} P_j(\sigmagma)P_i(\tau), \end{equation*} where we have interchanged the indexes $i$ and $j$. By substituting the above two expressions into \eqref{eq:AB}, it yields \begin{equation}\label{coef_Ats} \alpha_{(0,0)}\in\mathbb{R},\;\,\alpha_{(0,1)}-\alpha_{(1,0)}=-\begin{align}g<x,\,P_1(x)\begin{align}g>_w, \;\,\alpha_{(i,j)}=\alpha_{(j,i)},\;\forall\,i+j>1, \end{equation} which completes the proof by using \eqref{AdivB}. \end{proof} \begin{thm}\label{sym_quad} If the coefficients of a csRKN method (\ref{Heq:csrkn1}-\ref{Heq:csrkn3}) satisfies the symplectic conditions (\ref{sym_cond_orig01}-\ref{sym_cond_orig02}), then the RKN method \eqref{RKN:qua} derived by using the quadrature formula \eqref{wquad} is always symplectic. \end{thm} \begin{proof} Please refer to Theorem 4.1 of \cite{Tangz18spc} for a similar proof. \end{proof} \begin{thm}\label{sym_design} With the hypothesis $C_\tau=\tau$, for a symplectic csRKN method with coefficients satisfying (\ref{sym_cond_orig01}-\ref{sym_cond_orig02}), we have the following statements: \begin{itemize} \item[\emph{(a)}] $\mathcal{B}(\xi)$ and $\mathcal{CN}(\eta)$ $\Longrightarrow$ $\mathcal{DN}(\zeta)$, where $\zeta=\min\{\xi,\,\eta\}$; \item[\emph{(b)}] $\mathcal{B}(\xi)$ and $\mathcal{DN}(\zeta)$ $\Longrightarrow$ $\mathcal{CN}(\eta)$, where $\eta=\min\{\xi,\,\zeta\}$. \end{itemize} \end{thm} \begin{proof} Here we only provide the proof of (a), as (b) can be proved in a similar manner. From the proof of Theorem \ref{constr_symcsRKN}, we have get the formula \eqref{reduced_symp} using the hypothesis $C_\tau=\tau$. Based on this, by multiplying $\sigmagma^{\kappa-1}$ from both sides of \eqref{reduced_symp} and taking integral it gives \begin{equation}\label{symp_assum} B_\tau\int_I\begin{array}r{A}_{\tau,\sigmagma}\sigmagma^{\kappa-1}\,\mathrm{d} \sigmagma-\int_IB_\sigmagma\sigmagma^{\kappa-1} \begin{array}r{A}_{\sigmagma,\tau}\,\mathrm{d} \sigmagma= B_\tau \int_IB_\sigmagma\sigmagma^{\kappa-1}(\tau-\sigmagma)\,\mathrm{d} \sigmagma,\;\;\; \kappa=1,2,\cdots,\zeta-1. \end{equation} Now let $\zeta=\min\{\xi,\,\eta\}$, and then $\mathcal{B}(\zeta)$ and $\mathcal{CN}(\zeta)$ can be used for calculating the integrals of \eqref{symp_assum}. As a result, we have \begin{equation*} B_\tau\frac{\tau^{\kappa+1}}{\kappa(\kappa+1)}-\int_IB_\sigmagma\sigmagma^{\kappa-1} \begin{array}r{A}_{\sigmagma,\tau}\,\mathrm{d} \sigmagma= \frac{B_\tau \tau}{\kappa}-\frac{B_\tau}{\kappa+1},\;\;\; \kappa=1,2,\cdots,\zeta-1. \end{equation*} Recall that $C_\tau=\tau$, it gives rise to \begin{equation*} \int_IB_\sigmagma\sigmagma^{\kappa-1} \begin{array}r{A}_{\sigmagma,\tau}\,\mathrm{d} \sigmagma= \frac{B_\tau C_\tau^{\kappa+1}}{\kappa(\kappa+1)}-\frac{B_\tau C_\tau}{\kappa} +\frac{B_\tau}{\kappa+1},\;\;\; \kappa=1,2,\cdots,\zeta-1. \end{equation*} Finally, by exchanging $\tau\leftrightarrow\sigmagma$ in the formula above, it gives $\mathcal{DN}(\zeta)$ with $\zeta=\min\{\xi,\,\eta\}$. \end{proof} \begin{rem} A counterpart result for classical symplectic RKN methods can be similarly obtained. \end{rem} On the basis of these preliminaries, following the same idea of \cite{Tang18siw}, we introduce an operational \emph{procedure} for deriving symplectic RKN-type integrators:\\ \textbf{Step 1.} Let $C_\tau=\tau,\,\begin{array}r{B}_\tau=B_\tau(1-C_\tau)$ and make an ansatz for $B_\tau$ by using \eqref{Bt} so as to satisfy $B(\xi)$. Note that a finite number of parameters, say $\lambda_\iota$, could be kept as free parameters;\\ \textbf{Step 2.} Suppose $\begin{array}r{A}_{\tau,\,\sigmagma}$ is in the form (by Theorem \ref{constr_symcsRKN}, a truncation is needed) \begin{equation}\label{Ats_proc} \begin{array}r{A}_{\tau,\sigmagma}=B_\sigmagma\Big(\alpha_{(0,0)}+\alpha_{(0,1)}P_1(\sigmagma) +\alpha_{(1,0)}P_1(\tau)+\sum\limits_{i+j>1}\alpha_{(i,j)} P_i(\tau)P_j(\sigmagma)\Big), \end{equation} where the parameters $\alpha_{(i,j)}$ satisfy \eqref{coef_Ats}, and then substitute $\begin{array}r{A}_{\tau,\,\sigmagma}$ into\footnote{An alternative technique is to consider using $\mathcal{DN}(\zeta)$.} $\mathcal{CN}(\eta)$ (usually let $\eta<\xi$) for determining $\alpha_{(i,j)}$: \begin{equation*} \int_I\begin{array}r{A}_{\tau,\,\sigmagma}w(\sigmagma)\phi_k(\sigmagma)\,\mathrm{d} \sigmagma=\int_0^\tau \int_0^\alpha\phi_k(x)\,\mathrm{d} x\,\mathrm{d}\alpha,\;\;k=0,1,\cdots,\eta-2, \end{equation*} Here, $\phi_k(x)$ stands for any polynomial of degree $k$, which performs very similarly as the ``test function" used in general finite element analysis; \\ \textbf{Step 3.} Write down $B_\tau,\,\begin{array}r{B}_\tau$ and $\begin{array}r{A}_{\tau,\,\sigmagma}$ (satisfy $\mathcal{B}(\xi)$ and $\mathcal{CN}(\eta)$ automatically), which results in a symplectic csRKN method of order at least $\min\{\xi,\,2\eta+2,\,\eta+\zeta\}=\min\{\xi,\,\eta+\zeta\}$ with $\zeta=\min\{\xi,\,\eta\}$ by Theorem \ref{ord_csRKN} and \ref{sym_design}. If needed, we then acquire symplectic RKN methods by using quadrature rules (see Theorem \ref{sym_quad}). The procedure above gives a general framework for deriving symplectic integrators. In view of Theorem \ref{qua:csRKN} and \ref{sym_design}, it is suggested to design Butcher coefficients with low-degree $\begin{array}r{A}_{\tau,\,\sigmagma}$ and $B_\tau$, and $\eta$ is better to take as $\eta\approx\frac{1}{2}\xi$. Besides, for the sake of conveniently computing those integrals of $\mathcal{CN}(\eta)$ in the second step, the following ansatz may be advisable (let $\rho\geq\eta$ and $\xi\geq2\eta-1$) \begin{equation}\label{symBA} \begin{split} C_\tau&=\tau,\quad B_\tau=\sum\limits_{j=0}^{\xi-1}\int_0^1P_j(x)\,\mathrm{d} x P_j(\tau), \quad\begin{array}r{B}_\tau=B_\tau(1-\tau),\\ \begin{array}r{A}_{\tau,\sigmagma}&=B_\sigmagma\Big(\alpha_{(0,0)}+\alpha_{(0,1)}P_1(\sigmagma) +\alpha_{(1,0)}P_1(\tau)+\sum_{1<i+j\in \mathbb{Z}\atop i\leq\rho,\,j\leq \xi-\eta+1}\alpha_{(i,j)} P_i(\tau)P_j(\sigmagma)\Big), \end{split} \end{equation} where $\alpha_{(0,1)}-\alpha_{(1,0)}=-\begin{align}g<x,\,P_1(x)\begin{align}g>_w, \;\alpha_{(i,j)}=\alpha_{(j,i)},\;i+j>1$. Because of the index $j$ restricted by $j\leq \xi-\eta+1$ in \eqref{symBA}, we can use $\mathcal{B}(\xi)$ to arrive at (please c.f. \eqref{eq:cd1}) \begin{equation*} \begin{split} &\int_I\begin{array}r{A}_{\tau,\,\sigmagma}w(\sigmagma)\phi_k(\sigmagma)\,\mathrm{d} \sigmagma\\ &=\int_IB_\sigmagma\Big(\alpha_{(0,0)}+\alpha_{(0,1)}P_1(\sigmagma) +\alpha_{(1,0)}P_1(\tau)+\sum_{1<i+j\in \mathbb{Z}\atop i\leq\rho,\,j\leq \xi-\eta+1}\alpha_{(i,j)} P_i(\tau)P_j(\sigmagma)\Big)w(\sigmagma)\phi_k(\sigmagma)\,\mathrm{d} \sigmagma\\ &=\begin{align}g(\alpha_{(0,0)}+\alpha_{(1,0)}P_1(\tau)\begin{align}g)\int_0^1 \phi_k(x)\,\mathrm{d} x+\alpha_{(0,1)}\int_0^1 P_1(x)\phi_k(x)\,\mathrm{d} x\\ &\;\;\;+\sum_{1<i+j\in \mathbb{Z}\atop i\leq\rho,\,j\leq \xi-\eta+1}\alpha_{(i,j)} P_i(\tau)\int_0^1P_j(x)\phi_k(x)\,\mathrm{d} x,\quad0\leq k\leq\eta-2. \end{split} \end{equation*} Therefore, $\mathcal{CN}(\eta)$ implies that \begin{equation}\label{symp_eqs} \begin{split} &\begin{align}g(\alpha_{(0,0)}+\alpha_{(1,0)}P_1(\tau)\begin{align}g)\int_0^1 \phi_k(x)\,\mathrm{d} x+\alpha_{(0,1)}\int_0^1 P_1(x)\phi_k(x)\,\mathrm{d} x\\ &+\sum_{1<i+j\in \mathbb{Z}\atop i\leq\rho,\,j\leq \xi-\eta+1}\alpha_{(i,j)}P_i(\tau)\int_0^1P_j(x)\phi_k(x)\,\mathrm{d} x=\int_0^\tau \int_0^\alpha\phi_k(x)\,\mathrm{d} x\,\mathrm{d}\alpha,\quad0\leq k\leq\eta-2. \end{split} \end{equation} where $\alpha_{(0,1)}-\alpha_{(1,0)}=-\begin{align}g<x,\,P_1(x)\begin{align}g>_w, \;\alpha_{(i,j)}=\alpha_{(j,i)},\;i+j>1$. Finally, it needs to settle $\alpha_{(i,j)}$ by transposing, comparing or merging similar items of \eqref{symp_eqs} after the polynomial on right-hand side of \eqref{symp_eqs} being represented by the basis $\{P_j(\tau)\}_{j=0}^\infty$. In view of the symmetry of $\alpha_{(i,j)}$, if we let $r=\min\{\rho,\xi-\eta+1\}$, then actually the number of degrees of freedom of these parameters is $(r+1)(r+2)/2$, by noticing that \begin{equation*} \alpha_{(i,j)}=0, \;\;\text{for}\;i>r\;\text{or}\;j>r. \end{equation*} \subsection{Symmetric integrators} Theoretical analyses and a large number of numerical tests indicate that symmetric integrators applied to (near-)integrable reversible systems share similar properties to symplectic integrators applied to (near-)integrable Hamiltonian systems: linear error growth, near-conservation of first integrals, existence of invariant tori \cite{hairerlw06gni}. The good long-time behavior of symmetric integrators motivates us to find more new integrators. \begin{defn}\cite{hairerlw06gni} A numerical one-step method $\phi_h$ is called symmetric (or time-reversible) if it satisfies $$\phi^*_h=\phi_h,$$ where $\phi^*_h=\phi^{-1}_{-h}$ is referred to as the adjoint method of $\phi_h$. \end{defn} \begin{rem} Symmetry implies that the original method and the adjoint method give identical numerical results. A well-known property of symmetric integrators is that they possess an \emph{even order} \cite{hairerlw06gni}. By the definition, a one-step method $z_1=\phi_h(z_0; t_0,t_1)$ is symmetric if exchanging $h\leftrightarrow -h$, $z_0\leftrightarrow z_1$ and $t_0\leftrightarrow t_1$ leaves the original method unaltered. \end{rem} In order to derive symmetric integrators, we assume the interval $I$ to be the following two cases: \begin{itemize} \item[(i)] $I=[a,b]$ (finite interval) with $a+b=1$; \item[(ii)] $I=(-\infty,+\infty)$ (infinite interval). \end{itemize} In what follows, we first establish the adjoint method of a given csRKN method. From (\ref{eq:csrkn1}-\ref{eq:csrkn3}), by interchanging $t_0, q_0, q'_0, h$ with $t_1, q_1, q'_1, -h$, respectively, we have \begin{subequations} \begin{alignat}{3} \label{eq:rever1} &Q_\tau=q_1 -hC_\tau q'_1 +h^2\int_I \begin{array}r{A}_{\tau, \sigmagma}w(\sigmagma) f(t_1-C_\sigmagma h, Q_\sigmagma) \mathrm{d} \sigmagma, \;\;\tau \in I, \\ \label{eq:rever2} &q_{0}=q_1- h q'_1+h^2 \int_I \begin{array}r{B}_\tau w(\tau) f(t_1-C_\tau h, Q_\tau) \mathrm{d}\tau, \\ \label{eq:rever3} &q'_0 = q'_1-h\int_I B_\tau w(\tau) f(t_1-C_\tau h, Q_\tau) \mathrm{d}\tau. \end{alignat} \end{subequations} Note that $t_1-C_\tau h=t_0+(1-C_\tau)h$, \eqref{eq:rever3} becomes \begin{equation}\label{recast01} q'_1=q'_0+h\int_I B_\tau w(\tau)f(t_0+(1-C_\tau)h, Q_\tau) \mathrm{d}\tau, \end{equation} substituting it into \eqref{eq:rever2} then we get \begin{equation}\label{recast02} q_1=q_{0}+hq'_0+h^2 \int_I (B_\tau-\begin{array}r{B}_\tau) w(\tau) f(t_0+(1-C_\tau)h, Q_\tau)\mathrm{d}\tau. \end{equation} Next, inserting \eqref{recast01} and \eqref{recast02} into \eqref{eq:rever1}, it follows that \begin{equation}\label{recast03} Q_\tau=q_0 +h(1-C_\tau)q'_0+h^2\int_I\Big(B_\sigmagma(1-C_\tau)-\begin{array}r{B}_\sigmagma+\begin{array}r{A}_{\tau, \sigmagma}\Big) w(\sigmagma) f(t_0+(1-C_\sigmagma) h, Q_\sigmagma) \mathrm{d} \sigmagma. \end{equation} By a change of variables (replacing $\tau$ and $\sigmagma$ with $1-\tau$ and $1-\sigmagma$ respectively), \eqref{recast03}, \eqref{recast02} and \eqref{recast01} can be recast as \begin{equation}\label{adjo} \begin{split} &Q^*_\tau=q_0+hC^*_\tau q'_0 +h^2\int_I\begin{array}r{A}^*_{\tau, \sigmagma}w(1-\sigmagma) f(t_0+C^*_\sigmagma h, Q^*_\sigmagma) \mathrm{d} \sigmagma, \;\;\tau \in I, \\ &q_{1}=q_0+ h q'_0+h^2 \int_I \begin{array}r{B}^*_\tau w(1-\tau) f(t_0+C^*_\tau h, Q^*_\tau) \mathrm{d}\tau, \\ &q'_1 = q'_0 +h\int_I B^*_\tau w(1-\tau) f(t_0+C^*_\tau h, Q^*_\tau) \mathrm{d}\tau, \end{split} \end{equation} where $Q^*_\tau=Q_{1-\tau},\,\tau\in I$ and \begin{equation}\label{adjo:coe} \begin{split} C^*_\tau&=1-C_{1-\tau},\\ \begin{array}r{A}^*_{\tau,\sigmagma}&=B_{1-\sigmagma}(1-C_{1-\tau})-\begin{array}r{B}_{1-\sigmagma}+\begin{array}r{A}_{1-\tau, 1-\sigmagma},\\ \begin{array}r{B}^*_\tau&=B_{1-\tau}-\begin{array}r{B}_{1-\tau},\\ B^*_\tau&=B_{1-\tau}, \end{split} \end{equation} for $\tau,\,\sigmagma\in I$. Consequently, we get the adjoint method given by (\ref{adjo}-\ref{adjo:coe}). Hence if we require \begin{equation*} C_\tau=C^*_\tau,\,\begin{array}r{A}_{\tau,\sigmagma}w(\sigmagma)=\begin{array}r{A}^*_{\tau,\sigmagma}w(1-\sigmagma), \,\begin{array}r{B}_\tau w(\tau)=\begin{array}r{B}^*_\tau w(1-\tau),\,B_\tau w(\tau)=B^*_\tau w(1-\tau), \end{equation*} then the original csRKN method is symmetric. We summarize the results above in the following theorem. \begin{thm}\label{symm_cond_ori} If a csRKN method (\ref{eq:csrkn1}-\ref{eq:csrkn3}) satisfies \begin{equation}\label{sym_conds01} \begin{split} C_\tau&=1-C_{1-\tau},\\ \begin{array}r{A}_{\tau,\sigmagma}w(\sigmagma)&=\Big(B_{1-\sigmagma}(1-C_{1-\tau})- \begin{array}r{B}_{1-\sigmagma}+\begin{array}r{A}_{1-\tau,1-\sigmagma}\Big)w(1-\sigmagma),\\ \begin{array}r{B}_\tau w(\tau)&=\begin{align}g(B_{1-\tau}-\begin{array}r{B}_{1-\tau}\begin{align}g) w(1-\tau),\\ B_\tau w(\tau)&=B_{1-\tau} w(1-\tau), \end{split} \end{equation} for $\forall\,\tau,\,\sigmagma\in I$, then the method is symmetric. Particularly, if the weight function $w(x)$ satisfies $w(x)\equiv w(1-x)$, then the symmetric condition \eqref{sym_conds01} becomes \begin{equation}\label{sym_conds02} \begin{split} C_\tau&=1-C_{1-\tau},\\ \begin{array}r{A}_{\tau,\sigmagma}&=B_{1-\sigmagma}(1-C_{1-\tau})-\begin{array}r{B}_{1-\sigmagma}+\begin{array}r{A}_{1-\tau,1-\sigmagma},\\ \begin{array}r{B}_\tau &=B_{1-\tau}-\begin{array}r{B}_{1-\tau},\\ B_\tau&=B_{1-\tau}, \end{split} \end{equation} for $\forall\,\tau,\,\sigmagma\in I$. \end{thm} \begin{thm}\label{symm_quad} If $w(x)\equiv w(1-x)$ and the coefficients of the underlying csRKN method (\ref{eq:csrkn1}-\ref{eq:csrkn3}) satisfying \eqref{sym_conds02}, then the RKN method with tableau \eqref{RKN:qua} is symmetric, provided that the weights and nodes of the quadrature formula satisfy $b_{s+1-i}=b_i$ and $c_{s+1-i}=1-c_i$ for all $i$. \end{thm} \begin{proof} Please refer to Theorem 4.1 of \cite{Tangz18sib} for a similar proof. \end{proof} \begin{rem} Theorem \ref{symm_cond_ori} and \ref{symm_quad} is also applicable for the case of csRKN method (\ref{Heq:csrkn1}-\ref{Heq:csrkn3}). \end{rem} By using orthogonal polynomial expansion technique, we acquire a useful result for designing symmetric integrators. \begin{thm}\label{symmcon3} Suppose that $w(x)\equiv w(1-x),\,C_\tau=\tau,\,\begin{array}r{B}_\tau=B_\tau(1-C_\tau)$ and $\begin{array}r{A}_{\tau,\sigmagma}/B_\sigmagma\in L_w^2(I\times I)$, then the symmetric condition \eqref{sym_conds01} is equivalent to the fact that $\begin{array}r{A}_{\tau,\sigmagma}$ has the following form in terms of the orthogonal polynomials $P_n(x)$ in $L_w^2(I)$ \begin{equation}\label{symmconvari} \begin{array}r{A}_{\tau,\sigmagma}=B_\sigmagma\Big(\alpha_{(0,0)}+\alpha_{(0,1)}P_1(\sigmagma) +\alpha_{(1,0)}P_1(\tau)+\sum\limits_{i+j\,\text{is}\,\text{even} \atop 1<i+j\in \mathbb{Z}}\alpha_{(i,j)} P_i(\tau)P_j(\sigmagma)\Big),\quad\alpha_{(i,j)}\in \mathbb{R}, \end{equation} with $B_{\sigmagma}\equiv B_{1-\sigmagma}$, where $\alpha_{(0,1)}=-\alpha_{(1,0)}=-\frac{1}{2}\begin{align}g<x,\,P_1(x)\begin{align}g>_w$, provided that the orthogonal polynomials $P_n(x)$ satisfy \begin{equation}\label{symm_relation} P_n(1-x)=(-1)^nP_n(x),\;n\in \mathbb{Z}. \end{equation} \end{thm} \begin{proof} We only give the proof for the necessity, seeing that the sufficiency part is rather trivial. Since under the assumption $w(x)\equiv w(1-x)$, we have get \eqref{sym_conds02}. Hence, by using $C_\tau=\tau,\,\begin{array}r{B}_\tau=B_\tau(1-C_\tau)$ and $B_{\sigmagma}\equiv B_{1-\sigmagma}$, the second formula of \eqref{sym_conds02} becomes \begin{equation*} \begin{array}r{A}_{\tau,\sigmagma}-\begin{array}r{A}_{1-\tau,1-\sigmagma}=B_{\sigmagma}(\tau-\sigmagma), \end{equation*} which leads to \begin{equation}\label{ABsymm} \frac{\begin{array}r{A}_{\tau,\sigmagma}}{B_{\sigmagma}}-\frac{\begin{array}r{A}_{1-\tau,1-\sigmagma}}{B_{1-\sigmagma}}=\tau-\sigmagma. \end{equation} Analogously to the proof of Theorem \ref{constr_symcsRKN}, let us consider the expansion of $\begin{array}r{A}_{\tau,\,\sigmagma}/B_\sigmagma$ given by \eqref{AdivB}. By using \eqref{AdivB} and \eqref{symm_relation}, it yields \begin{equation*} \begin{array}r{A}_{1-\tau,\,1-\sigmagma}/B_{1-\sigmagma}=\alpha_{(0,0)}-\alpha_{(0,1)}P_1(\sigmagma) -\alpha_{(1,0)}P_1(\tau)+\sum\limits_{i+j>1}(-1)^{i+j}\alpha_{(i,j)} P_i(\tau)P_j(\sigmagma). \end{equation*} By substituting \eqref{AdivB} and the above formula into \eqref{ABsymm} and comparing the like basis, it gives \eqref{symmconvari}. \end{proof} For the sake of employing Theorem \ref{symmcon3}, we also need some useful results which are quoted from \cite{Tang18aef}. \begin{thm}\cite{Tang18aef} If $w(x)$ is an even function, i.e., satisfying $w(-x)\equiv w(x)$, then the shifted function defined by $\widehat{w}(x)=w(2\theta x-\theta)$ satisfies the symmetry relation: $\widehat{w}(x)\equiv \widehat{w}(1-x)$. Here $\theta$ is a non-zero constant. \end{thm} \begin{thm}\cite{Tang18aef}\label{shiftpol} If a sequence of polynomials $\{P_n(x)\}_{n=0}^\infty$ satisfy the symmetry relation \begin{equation}\label{symm_relat} P_n(-x)=(-1)^nP_n(x),\;n\in \mathbb{Z}, \end{equation} then the shifted polynomials defined by $\widehat{P}_n(x)=P_n(2\theta x-\theta)$ are bound to satisfy the property \eqref{symm_relation}. Here $\theta$ is a non-zero constant. \end{thm} \begin{thm}\cite{Tang18aef} If a sequence of polynomials $\{P_n(x)\}_{n=0}^\infty$ satisfy \eqref{symm_relation}, then we have \begin{equation*} \int_0^1P_j(x)\,\mathrm{d} x=0,\;\;\text{for}\;\;j\;\;\text{is}\;\;\text{odd}, \end{equation*} and the following function \begin{equation} B_\sigmagma=\sum\limits_{j=0}^{\xi-1}\int_0^1P_j(x)\,\mathrm{d} x P_j(\sigmagma)+\sum\limits_{\text{even}\;j\geq \xi}\lambda_j P_j(\sigmagma),\;\; \xi\geq1, \end{equation} always satisfies $B_{\sigmagma}\equiv B_{1-\sigmagma}$. \end{thm} As pointed out in \cite{Tang18aef}, many classical (standard) orthogonal polynomials including Hermite polynomials, Legendre polynomials, Chebyshev polynomials of the first and second kind, and any other general Gegenbauer polynomials etc., do not satisfy \eqref{symm_relation}, but they possess the symmetry property \eqref{symm_relat}. Nevertheless, by using Theorem \ref{shiftpol} we can always shift them to a suitable interval such that the condition \eqref{symm_relation} is fulfilled. With these discussions, we can also propose an operational \emph{procedure} for constructing symmetric integrators in a similar way as that given in the preceding subsection. \subsection{Some examples} In the following, we provide some examples for illustrating the application of our theoretical results. On account of \eqref{coef_Ats}, we only present the values of $\alpha_{(i,j)}$ with $i\leq j$ in our examples. Besides, the Gaussian-Christoffel's quadrature rules (please see \eqref{wquad}) will be used, which means the quadrature nodes $c_1, c_2,\cdots, c_s$ are exactly the zeros of the normalized orthogonal polynomial $P_s(x)$ in $L^2_w(I)$. For the sake of deriving symmetric methods we mainly consider the weighted orthogonal polynomials shifted into a suitable interval. \begin{exa}\label{Legend} Consider using the shifted normalized Legendre polynomials which are orthogonal with respect to the weight function $w(x)=1$ on $[0,1]$. These Legendre polynomials $L_n(x)$ can be defined by Rodrigues' formula \cite{hairerw96sod} \begin{equation*} L_0(x)=1,\;L_n(x)=\frac{\sqrt{2n+1}}{n!}\frac{{\mathrm{d}}^n}{\mathrm{d} t^n} \Big(t^n(t-1)^n\Big),\;x\in[0,1],\;\;n=1,2,\cdots. \end{equation*} \end{exa} Let $\xi=3,\,\eta=2,\,\rho=2$ in \eqref{symBA}, $r=2$ and thus the number of degrees of freedom is $(r+1)(r+2)/2=6$. For simplicity, we set $\alpha_{(i,j)}=0$ for $0\leq i, j\leq2,i+j>2$. After some elementary calculations, it gives \begin{equation*} \alpha_{(0,0)}=\frac{1}{6},\;\;\alpha_{(0,1)}=-\frac{\sqrt{3}}{12},\; \;\alpha_{(1,1)}=\mu,\;\;\alpha_{(0,2)}=\frac{\sqrt{5}}{60}, \end{equation*} where $\alpha_{(1,1)}=\mu$ is a free parameter, then we get a $\mu$-parameter family of symmetric (by Theorem \eqref{symmcon3}) and symplectic csRKN methods of order $4$. By using Gauss-Christoffel's quadrature rules with $2$ nodes, we get a family of symmetric and symplectic RKN methods of order $4$ which are shown in Tab. \ref{exa1:symp01} with $\gamma=\frac{1}{2}\mu$. It is found that this family of methods coincides with the methods presented in \cite{Tangsz18hos}. \begin{table} \[\begin{array}{c|cc} \frac{3-\sqrt{3}}{6}& \frac{1}{12}+\gamma& \frac{1-\sqrt{3}}{12}-\gamma\\[2pt] \frac{3+\sqrt{3}}{6} & \frac{1+\sqrt{3}}{12}-\gamma& \frac{1}{12}+\gamma\\[2pt] \hline & \frac{3+\sqrt{3}}{12}& \frac{3-\sqrt{3}}{12}\\[2pt] \hline & \frac{1}{2}& \frac{1}{2} \end{array} \] \caption{A family of $2$-stage $4$-order symmetric and symplectic RKN methods, based on the shifted Legendre polynomials $L_n(x)$.}\label{exa1:symp01} \end{table} \begin{exa}\label{Cheby} Consider using the shifted normalized Chebyshev polynomials of the first kind which are orthogonal with respect to the weight function $w(x)=\frac{1}{2\sqrt{x(1-x)}}$ on $[0,1]$. These Chebyshev polynomials $T_n(x)$ can be defined by \cite{Tang18csm} \begin{equation*} T_0(x)=\frac{\sqrt{2}}{\sqrt{\pi}},\;\;T_n(x)=\frac{2\cos \begin{align}g(n\arccos(2x-1)\begin{align}g)}{\sqrt{\pi}},\;x\in[0,1],\;\;n=1,2,\cdots. \end{equation*} \end{exa} Let $\xi=3,\,\eta=2,\,\rho=2$ in \eqref{symBA} and set $\alpha_{(i,j)}=0$ for $0\leq i, j\leq2,i+j>2$. After some elementary calculations, it gives \begin{equation*} \alpha_{(0,0)}=\frac{5}{24},\;\;\alpha_{(0,1)}=-\frac{\sqrt{\pi}}{8},\; \;\alpha_{(1,1)}=\mu,\;\;\alpha_{(0,2)}=\frac{\sqrt{2}}{64}\pi, \end{equation*} where $\alpha_{(1,1)}=\mu$ is a free parameter, then we get a $\mu$-parameter family of symplectic csRKN methods of order at least $3$. However, since we have $\alpha_{(0,1)}=-\frac{1}{2}\begin{align}g<x,\,T_1(x)\begin{align}g>_w=-\frac{\sqrt{\pi}}{8}$ and other conditions of Theorem \ref{symmcon3} are fulfilled, the newly-derived methods are symmetric and of an even order $4$. By using Gauss-Christoffel's quadrature rules with $3$ nodes, we get a family of symmetric and symplectic RKN methods of order $4$ which are shown in Tab. \ref{exa2:symp01} with $\gamma=\frac{2\mu}{3\pi}$. \begin{table} \[\begin{array}{c|ccc} \frac{2-\sqrt{3}}{4} & \frac{13}{216}+\gamma & \frac{85-60\sqrt{3}}{864} & \frac{13-12\sqrt{3}}{216}-\gamma\\[2pt] \frac{1}{2} &\frac{17+12\sqrt{3}}{432} & \frac{5}{108} & \frac{17-12\sqrt{3}}{432}\\[2pt] \frac{2+\sqrt{3}}{4} & \frac{13+12\sqrt{3}}{216}-\gamma & \frac{85-60\sqrt{3}}{864} & \frac{13}{216}+\gamma\\[2pt] \hline & \frac{2+\sqrt{3}}{18} & \frac{5}{18} & \frac{2-\sqrt{3}}{18} \\[2pt] \hline & \frac{2}{9} & \frac{5}{9} & \frac{2}{9} \end{array} \] \caption{A family of $3$-stage $4$-order symmetric and symplectic RKN methods, based on the shifted Chebyshev polynomials of the first kind $T_n(x)$.}\label{exa2:symp01} \end{table} \begin{table} \[\begin{array}{c|ccc} \frac{2-\sqrt{6}}{4} & \frac{11}{216}+\gamma & \frac{91-42\sqrt{6}}{432} & \frac{11-6\sqrt{6}}{216}-\gamma\\[2pt] \frac{1}{2} & \frac{13+6\sqrt{6}}{432} & \frac{7}{108} & \frac{13-6\sqrt{6}}{432}\\[2pt] \frac{2+\sqrt{6}}{4} & \frac{11+6\sqrt{6}}{216}-\gamma & \frac{91+42\sqrt{6}}{432} & \frac{11}{216}+\gamma\\[2pt] \hline & \frac{2+\sqrt{6}}{36} & \frac{7}{18} & \frac{2-\sqrt{6}}{36}\\[2pt] \hline & \frac{1}{9} & \frac{7}{9} & \frac{1}{9}\end{array}\] \caption{A family of $3$-stage $4$-order symmetric and symplectic RKN methods, based on the shifted Hermite polynomials $\widehat{H}_n(x)$.}\label{exa3:symp01} \end{table} \begin{table} \[\begin{array}{c|ccc} -\frac{\sqrt{6}}{2} & \frac{4-3\sqrt{6}}{432} & \frac{70-21\sqrt{6}}{108} & \frac{40+87\sqrt{6}}{432}\\[2pt] 0 & \frac{-7-9\sqrt{6}}{216} & \frac{7}{108} & \frac{-7+9\sqrt{6}}{216}\\[2pt] \frac{\sqrt{6}}{2} & \frac{40-87\sqrt{6}}{432} & \frac{70+21\sqrt{6}}{108} & \frac{4+3\sqrt{6}}{432} \\[2pt] \hline & \frac{-5-\sqrt{6}}{36} & \frac{7}{9} & \frac{-5+\sqrt{6}}{36}\\[2pt] \hline & \frac{4-3\sqrt{6}}{36} & \frac{7}{9} & \frac{4+3\sqrt{6}}{36} \end{array}\] \caption{A $3$-stage $3$-order symplectic RKN method (non-symmetric), based on the Hermite polynomials $H_n(x)$.}\label{exa3:symp02} \end{table} \begin{exa}\label{Hermite} Consider using the shifted normalized Hermite polynomials which are orthogonal with respect to the weight function $\widehat{w}(x)=e^{-(2x-1)^2}$ on $(-\infty,+\infty)$. These Hermite polynomials $\widehat{H}_n(x)$ can be defined by \cite{Tang18aef} \begin{equation*} \widehat{H}_n(x)=\sqrt{2}H_n(2x-1),\;\;x\in(-\infty,+\infty),\;\;n=0,1,\cdots, \end{equation*} where $H_n(x)$ is the standard normalized $n$-degree Hermite polynomial \begin{equation}\label{standardHerm} H_0(x)=\frac{1}{\pi^{\frac{1}{4}}},\;H_n(x)=\frac{(-1)^ne^{x^2}}{\sqrt{2^nn!}\pi^{\frac{1}{4}}} \frac{\mathrm{d}^n}{dx^n}\begin{align}g(e^{-x^2}\begin{align}g),\;\;x\in(-\infty,+\infty),\;\;n=1,2,\cdots, \end{equation} with the weight function given by $w(x)=e^{-x^2}$. \end{exa} Let $\xi=3,\,\eta=2,\,\rho=2$ in \eqref{symBA} and set $\alpha_{(i,j)}=0$ for $0\leq i, j\leq2,i+j>2$. After some elementary calculations, it gives \begin{equation*} \alpha_{(0,0)}=\frac{5}{24},\;\;\alpha_{(0,1)}=-\frac{\pi^{\frac{1}{4}}}{8},\; \;\alpha_{(1,1)}=\mu,\;\;\alpha_{(0,2)}=\frac{\sqrt{2\pi}}{32}, \end{equation*} where $\alpha_{(1,1)}=\mu$ is a free parameter, then we get a $\mu$-parameter family of symplectic csRKN methods of order at least $3$. However, since we have $\alpha_{(0,1)}=-\frac{1}{2}\begin{align}g<x,\,\widehat{H}_1(x)\begin{align}g>_{\widehat{w}} =-\frac{\pi^{\frac{1}{4}}}{8}$ and other conditions of Theorem \ref{symmcon3} are fulfilled, the newly-derived methods are symmetric and of an even order $4$. By using Gauss-Christoffel's quadrature rules with $3$ nodes, we get a family of symmetric and symplectic RKN methods of order $4$ which are shown in Tab. \ref{exa3:symp01} with $\gamma=\frac{2\mu}{3\sqrt{\pi}}$. We claim that if we do not use the shifted Hermite polynomials $\widehat{H}_n(x)$, then it may result in symplectic methods without the symmetric property. Let us consider using $H_n(x)$ to construct symplectic methods, take the same $\xi=3,\,\eta=2,\,\rho=2$, and also set $\alpha_{(i,j)}=0$ for $0\leq i, j\leq2,i+j>2$. Additionally, we impose $\alpha_{(0,1)}=-\frac{1}{2}\begin{align}g<x,\,H_1(x)\begin{align}g>_w =-\frac{\sqrt{2}\pi^{\frac{1}{4}}}{4}$, then we get \begin{equation*} \alpha_{(0,0)}=\frac{7}{12},\;\;\alpha_{(0,1)}=-\frac{\sqrt{2}\pi^{\frac{1}{4}}}{4},\; \;\alpha_{(1,1)}=-\frac{\sqrt{\pi}}{2},\;\;\alpha_{(0,2)}=\frac{\sqrt{2\pi}}{4}. \end{equation*} In such a case, we get a symplectic csRKN method with order $3$. One can verify that such method does not satisfy all the bushy tree order condition for order $4$ (see $\mathcal{B}(\xi)$), e.g., \begin{equation*} \int_{-\infty}^{+\infty}B_\tau w(\tau) C_\tau^3\,\mathrm{d} \tau=\int_{-\infty}^{+\infty}\frac{7+6\tau-2\tau^2}{6\sqrt{\pi}}e^{-\tau^2}\tau^3\,\mathrm{d} \tau=\frac{3}{4}\neq\frac{1}{4}, \end{equation*} hence it is of an odd order and can not be a symmetric method. Besides, by using corresponding Gauss-Christoffel's quadrature rules with $3$ nodes, we get a $3$-stage $3$-order symplectic RKN method which is shown in Tab. \ref{exa3:symp02}. Although it looks like as if the quadrature weights and nodes possess a kind of ``symmetry", the method is essentially not symmetric according to the classical symmetric conditions for RKN methods \cite{okunbors92ecm}. \section{Numerical tests}\label{sec:numerical_examples} In this section, we perform some numerical tests to verify our theoretical results. For ease of description and comparison studies, we denote our methods shown in Tab.~\ref{exa1:symp01}-\ref{exa3:symp02} (with $\gamma=0$) by Legendre-4, Chebyshev-4, Hermite-4 and Hermite-3 in turn and all of them will be applied to two classical mechanical problems. \begin{figure} \caption{Energy (Hamiltonian) errors by four new symplectic RKN methods for Kepler's problem, with step size $h=0.1$.} \caption{Angular momentum errors by four new symplectic RKN methods for Kepler's problem, with step size $h=0.1$.} \label{ex1_f1} \label{ex1_f2} \end{figure} \begin{figure} \caption{RLP invariant errors by four new symplectic RKN methods for Kepler's problem, with step size $h=0.1$.} \caption{Solution errors by four new symplectic RKN methods for Kepler's problem, with step size $h=0.1$.} \label{ex1_f3} \label{ex1_f4} \end{figure} \begin{figure} \caption{Numerical orbits by four new symplectic RKN methods for Kepler's problem, with step size $h=0.1$.} \label{ex1_f5} \end{figure} \begin{exa} Consider the numerical integration of the well-known Kepler's problem \cite{hairerlw06gni}. The Kepler's problem describes the motion of two bodies which attract each other under the universal gravity. The motion of two-bodies can be described by \begin{equation}\label{Kepler} q''_1=-\frac{q_1}{(q_1^2+q_2^2)^{\frac{3}{2}}}, \quad q''_2=-\frac{q_2}{(q_1^2+q_2^2)^{\frac{3}{2}}}. \end{equation} \end{exa} By introducing the momenta $p_1=q'_1, p_2=q'_2$, we can transform \eqref{Kepler} into a nonlinear Hamiltonian system with Hamiltonian \begin{equation*} H=\frac{1}{2}(p_1^2+p_2^2)-\frac{1}{\sqrt{q_1^2+q_2^2}}. \end{equation*} Beside the Hamiltonian, the system possesses other two invariants: the quadratic angular momentum \begin{equation*} I=q_1p_2-q_2p_1=q^T\left( \begin{array}{cc} 0 & 1 \\ -1 & 0 \\ \end{array} \right)q',\;\;q=\left( \begin{array}{c} q_1 \\ q_2 \\ \end{array} \right), \end{equation*} and the Runge-Lenz-Pauli-vector (RLP) invariant \begin{equation*} L=\left( \begin{array}{c} p_1 \\ p_2 \\ 0 \\ \end{array} \right)\times \left( \begin{array}{c} 0 \\ 0 \\ q_1p_2-q_2p_1 \\ \end{array} \right)-\frac{1}{\sqrt{q_1^2+q_2^2}}\left( \begin{array}{c} q_1 \\ q_2 \\ 0 \\ \end{array} \right). \end{equation*} In our numerical tests, we take the initial values as \begin{equation*} q_1(0)=1, \;q_2(0)=0,\;p_1(0)=0, \;p_2(0)=1, \end{equation*} and the corresponding exact solution is \begin{equation*} q_1(t)=\cos(t),\;\;q_2(t)=\sigman(t),\;\;p_1(t)=-\sigman(t),\;\;p_2(t)=\cos(t). \end{equation*} Applying our symplectic integrators to \eqref{Kepler}, we compute the approximation errors of the numerical solution to the exact solution, as well as the errors in terms of the above three invariants. These errors are shown in Fig. \ref{ex1_f1}-\ref{ex1_f4}, where the errors at each time step are carried out in the maximum norm $||x||_{\infty}=\max(|x_1|,\cdots,|x_n|)$ for $x=(x_1,\cdots,x_n)\in \mathbb{R}^n$. It indicates that all the symplectic integrators show a near-preservation of the Hamiltonian and RLP invariant, and a practical preservation (up to the machine precision) of the quadratic angular momentum --- symplectic RKN methods can preserve all quadratic invariants of the form $q^TDq'$ with $D$ a skew-symmetric matrix (see \cite{hairerlw06gni}, page 104). The solution errors of $p$-variable and $q$-variable measured in Euclidean norm are shown in Fig. \ref{ex1_f4} which implies a linear error growth. It is observed that amongst four methods the Hermite-4 method gives the best result, while the Hermite-3 method is inferior to other three methods due to its lower accuracy. Moreover, all the numerical orbits by four methods (see Fig. \ref{ex1_f5}) are in the shape of an ellipse, closely approximating to the exact one (we do not show it here). These numerical observations have well conformed with the common features of symplectic integration. \begin{figure} \caption{Energy (Hamiltonian) errors by four new symplectic RKN methods for H\'{e} \caption{Chaotic orbits by four new symplectic RKN methods for H\'{e} \label{ex2_f1} \label{ex2_f2} \end{figure} \begin{exa} Consider the numerical integration of the well-known H\'{e}non-Heiles model problem \cite{hairerlw06gni}, which was created for describing stellar motion. The problem can be described by \begin{equation}\label{HH} q''_1=-q_1-2q_1q_2, \quad q''_2=-q_2-q_1^2+q_2^2. \end{equation} \end{exa} It is clear to see that \eqref{HH} can be reduced to a first-order Hamiltonian system determined by the Hamiltonian \begin{equation*} H=\frac{1}{2}(p_1^2+p_2^2)+\frac{1}{2}(q_1^2+q_2^2)+q_1^2q_2-\frac{1}{3}q_2^3. \end{equation*} In our experiment, the initial values are taken as \begin{equation*} q_1(0)=0.1, \;q_2(0)=-0.5,\;p_1(0)=0, \;p_2(0)=0, \end{equation*} which will result in a chaotic behavior and the chaotic orbits should stay in the interior zone of an equilateral triangle \cite{hairerlw06gni,quispelm08anc}. We present our numerical results in Fig. \ref{ex2_f1} and \ref{ex2_f2}. It is observed that all the symplectic methods have a well near-preservation of the energy (see Fig. \ref{ex2_f1}) and they numerically reproduce the correct behavior of the original system without points escaping from the equilateral triangle (see Fig. \ref{ex2_f2}). \section{Concluding remarks} The constructive theory of continuous-stage Runge-Kutta-Nystr\"{o}m methods is examined in this paper. We establish a new framework for such methods by leading weight function into the formalism and imposing the range of integration to be a general interval $I$ (finite or infinite). Particularly, we intensively discuss its applications in the geometric integration of second-order differential equations. A systematic way for deriving symplectic and symmetric integrators is presented. We stress that our crucial technique for deriving these geometric integrators is the orthogonal polynomial expansion and the simplifying assumptions for order conditions. It is hoped that in the forthcoming future other new applications of the presented theoretical results will be discovered. \end{document}
\begin{document} \title{Finitely presented simple groups with at least exponential Dehn function} \date{\today} \subjclass[2020]{Primary 20F65; Secondary 20E08} \keywords{Simple group, Dehn function, self-similar group, R\"over--Nekrashevych group, Thompson group, Baumslag--Solitar group} \author[M.~C.~B.~Zaremsky]{Matthew C.~B.~Zaremsky} \address{Department of Mathematics and Statistics, University at Albany (SUNY), Albany, NY} \email{[email protected]} \begin{abstract} We construct examples of finitely presented simple groups whose Dehn functions are at least exponential. To the best of our knowledge, these are the first such examples known. Our examples arise from R\"over--Nekrashevych groups, using carefully calibrated self-similar representations of Baumslag--Solitar groups. \end{abstract} \maketitle \thispagestyle{empty} \section*{Introduction} Finitely presented simple groups have enjoyed a recent surge of interest from a geometric and topological standpoint. Caprace and R\'emy proved in \cite{caprace09,caprace10} that there exist infinitely many quasi-isometry classes of finitely presented simple groups. In \cite{skipper19}, Skipper, Witzel, and the author found examples of finitely presented simple groups with arbitrary finiteness length. Hyde and Lodha recently found examples of finitely presented simple groups that are left-orderable \cite{hyde23}. This followed a great deal of work constructing and analyzing finitely generated left-orderable simple groups \cite{hyde19,mattebon20,hyde21,fournierfacio21}. Another recent geometric result about finitely generated simple groups, proved by Belk and the author \cite{belk22}, and independently by Darbinyan and Steenbock \cite{darbinyan22}, is that every finitely generated group isometrically embeds as a subgroup of a finitely generated simple group. In this paper we investigate another topic of interest in geometric group theory, namely Dehn functions, applied to finitely presented simple groups. The Dehn function of a finitely presented group can be viewed as a measurement of how difficult it is to finitely present the group. The larger the Dehn function, the harder it is to realize every relation as a consequence of the finitely many defining relations. It is well known that a finitely presented group has solvable word problem if and only if its Dehn function is recursively defined, and in general the Dehn function can also be viewed as a geometric measurement of how difficult it is to solve the word problem (or at least as a sort of upper bound). Finitely presented simple groups have solvable word problem \cite{kuznetsov58}, hence recursively defined Dehn function. However, among the existing examples of finitely presented simple groups where something is known about their Dehn function, the function is not only recursive but polynomial, i.e., very small. More precisely, to the best of the author's knowledge, the only finitely presented simple groups where something is known about their Dehn functions are the Burger--Mozes groups \cite{burger00}, which have quadratic Dehn function thanks to acting geometrically on a product of trees, and Thompson's groups $T$ and $V$, which are known to have polynomial Dehn function (the best bounds we are aware of are $\delta_T\preceq n^5$ \cite{wang15} and $\delta_V\preceq n^{11}$ \cite{guba00}, and one would conjecture that they are quadratic, like for Thompson's group $F$ \cite{guba06}). Presumably, close relatives like the Higman--Thompson groups $T_n$ and $V_n$, which are virtually simple, also have polynomial Dehn functions, using similar arguments. Thus, all known examples have ``very small'' Dehn function. It is worth mentioning the Brin--Thompson groups $nV$ \cite{brin04}, which are finitely presented and simple, and whose Dehn functions are unknown; it turns out that if $nV$ ($n\ge 2$) has polynomial Dehn function (or even embeds in a finitely presented group with polynomial Dehn function), then $\sf{NP}=\sf{coNP}$ \cite{birget20}. In this paper, we construct examples of finitely presented simple groups whose Dehn functions are strictly larger than polynomial, namely they are at least exponential. The main tool is the family of R\"over--Nekrashevych groups $V_d(G)$ of self-similar groups $G$, introduced in \cite{roever99,nekrashevych04}. Our specific examples are denoted \[ [V_{n+2}(BS(1,n)),V_{n+2}(BS(1,n))] \text{,} \] the notation of which we now unpack. The group $BS(1,n)$ is the usual Baumslag--Solitar group $BS(1,n)=\langle a,b\mid aba^{-1}=b^n\rangle$ for $n\ge 2$, which has exponential Dehn function \cite{gersten92}. We find a certain self-similar representation of $BS(1,n)$ acting on the infinite rooted $(n+2)$-ary tree $\mathcal{T}_{n+2}$, inspired by Bartholdi and Sunic's self-similar representation of $BS(1,n)$ acting on $\mathcal{T}_{n+1}$ from \cite{bartholdi06}. This representation is calibrated to have a variety of properties, inspired by \cite{skipper19}, which ensure that, among other things, the R\"over--Nekrashevych group $V_{n+2}(BS(1,n))$ has $BS(1,n)$ as a quasi-retract, and its commutator subgroup has finite index and is simple. Putting all of this together yields our main result: \begin{main:exist_simple} There exist finitely presented simple groups with at least exponential Dehn function. \end{main:exist_simple} One might also like to find an upper bound, and presumably say that these examples have precisely exponential Dehn function, but for a couple reasons we do not approach this here. One the one hand, this would involve completely different techniques than those used here, and on the other hand, this deserves to be part of a broader program to find upper bounds on Dehn functions of arbitrary R\"over--Nekrashevych groups. In particular, we suspect that the Dehn function of any $V_d(G)$ should be bounded above by some combination of the Dehn functions of $V_d$ and $G$, and this could perhaps be approached by looking at the action of $V_d(G)$ on a simply connected, cocompact truncation of the Stein--Farley complex of $V_d(G)$ (see, e.g., \cite{skipper21}). In any case, this is all beyond the scope of the present paper. It would be very interesting to find examples of finitely presented self-similar groups $G$ with even larger Dehn functions, which could then perhaps lead to finitely presented simple groups $[V_d(G),V_d(G)]$ with even larger Dehn functions, using the techniques here. There is a restriction though, that self-similar implies residually finite, and examples of residually finite groups with large Dehn function were only recently found by Kharlampovich, Myasnikov, and Sapir in \cite{kharlampovich17} using some very complicated constructions. We do not know whether their groups admit faithful self-similar representations. Another, easier, example of groups with very large Dehn function comes from the ``hydra groups'' of Dison and Riley \cite{dison13}, but Pueschel proved that these are not residually finite \cite{pueschel16}. One could also try to bypass self-similarity by looking for examples among twisted Brin--Thompson groups \cite{belk22}, which have similar properties to R\"over--Nekrashevych groups but have the advantage that the input group $G$ can be any group, not necessarily self-similar. The downside is that, unlike for R\"over--Nekrashevych groups, finite presentability of the twisted Brin--Thompson group does not follow for free from finite presentability of $G$, and in fact is rather difficult to achieve. As a remark, one reason to expect that there exist finitely presented simple groups with large (perhaps even arbitrarily large recursive) Dehn function is the Boone--Higman Conjecture, which predicts that every finitely generated group with solvable word problem embeds in a finitely presented simple group \cite{boone74}. If this holds, then embedding a group with arbitrarily difficult, solvable word problem into a finitely presented simple group would provide an arbitrarily large, recursive lower bound on the Dehn function of the simple group. The solution to the word problem in \cite{kuznetsov58} for finitely presented simple groups does not give any particular uniform upper complexity bound, so a priori there is not any reason to doubt that arbitrarily large, recursive Dehn functions are possible. This paper is organized as follows. In Section~\ref{sec:dehn} we recall some background on Dehn functions and quasi-retracts. In Section~\ref{sec:nekr} we discuss self-similar groups and R\"over--Nekrashevych groups, along with the various properties of self-similar actions that will lead to our results. Finally, in Section~\ref{sec:examples} we construct our examples. \subsection*{Acknowledgments} Thanks are due to Emmanuel Rauzy and Giles Gardam for helpful comments and pointing out references. The author is supported by grant \#635763 from the Simons Foundation. \section{Dehn functions and quasi-retracts}\label{sec:dehn} In this section we recall some background on Dehn functions and quasi-retracts. \subsection{Dehn functions}\label{ssec:dehn} We will not need to use too many details about Dehn functions, so we just give a quick definition and overview following \cite[Section~I.8A.4]{bridson99}. Let $G=\langle S\mid R\rangle$ be a finite presentation, so $S$ is a finite set, $R$ is a finite subset of the free group $F(S)$, and $G$ is the quotient of $F(S)$ by the normal closure of $R$. Write $\pi\colon F(S)\to G$ for this quotient map. An element of $\ker(\pi)$ can be written as a product of elements of $S^\pm$, or as a product of elements of the set $(R^\pm)^{F(S)}$ of conjugates of $R^\pm$ (here we write $X^\pm$ to mean the union of $X$ with the set of inverses of elements of $X$). Roughly speaking, the Dehn function of $G$ measures how different the lengths of these expressions can be. To be more precise, for $w\in\ker(\pi)$ define the \emph{area} $\Area(w)$ of $w$ to be the word length of $w$ in the (likely infinite) generating set $(R^\pm)^{F(S)}$ of $\ker(\pi)$. Also define the \emph{length} $\ell(w)$ of $w$ to be its usual word length in the generating set $S$ of $F(S)$. Now the \emph{Dehn function} of $G$ is the function $\delta_G\colon \mathbb{N}\to\mathbb{N}$ defined via \[ \delta_G(n) := \max\{\Area(w)\mid w\in\ker(\pi)\text{, }\ell(w)\le n\}\text{.} \] Note that for any $n$, only finitely many $w$ have length at most $n$, so $\delta_G$ is well defined. (As a remark, up until now we have not actually needed $R$ to be finite, but this is a necessary assumption for various upcoming results to be true.) The function $\delta_G$ as defined depends on the choice of finite presentation for $G$, so we tend to consider Dehn functions up to a certain equivalence relation. Given two functions $f,g\colon \mathbb{N}\to\mathbb{N}$, write $f\preceq g$ if there is a constant $K>0$ such that for all $n\in\mathbb{N}$ we have $f(n)\le Kg(Kn)+Kn$. If $f\preceq g$ and $g\preceq f$, write $f\simeq g$. This is an equivalence relation, and it turns out that the Dehn functions arising from two finite presentations of the same group are equivalent. More generally, the Dehn functions of any two quasi-isometric finitely presented groups are equivalent. Note that if $f\simeq g$ then certain features are common to both $f$ and $g$. For instance, if one is linear then so is the other, and more generally if one is a polynomial of degree $m\ge 1$ then so is the other. If one of $f$ or $g$ is an exponential function, then so is the other, perhaps with a different base (for example $f(n)=2^n$ and $g(n)=3^n$ are equivalent since $3^n=2^{\log_2(3)n}$). Thus it makes sense to say that a finitely presented group, ``has a polynomial Dehn function,'' or, ``has an exponential Dehn function.'' \subsection{Quasi-retracts}\label{ssec:qr} The proof that Dehn functions of quasi-isometric finitely presented groups are equivalent, which for example is in \cite{alonso90}, actually shows that if $H$ is a so called quasi-retract of $G$, then $\delta_H \preceq \delta_G$. This is implicit in \cite{alonso90}, and is stated explicitly for example in \cite[Theorem~3]{alonso96}. Let us recall the details of quasi-retracts now. A function $f\colon X\to Y$ between metric spaces, with metrics $d_X$ and $d_Y$ respectively, is called \emph{$(C,D)$-Lipschitz} for $C\ge 1$ and $D\ge 0$ if for all $x,x'\in X$ we have \[ d_Y(f(x),f(x'))\le C d_X(x,x') + D \text{.} \] \begin{definition}[Quasi-retract(ion)] Let $X$ and $Y$ be metric spaces, with metrics $d_X$ and $d_Y$ respectively. If there exist $(C,D)$-Lipschitz functions $r\colon X\to Y$ and $\iota\colon Y\to X$ such that $d_Y(r\circ\iota(y),y)\le D$ for all $y\in Y$, then we call $r$ a \emph{quasi-retraction}, and call $Y$ a \emph{quasi-retract} of $X$. \end{definition} \begin{cit}\cite{alonso90,alonso96}\label{cit:qr_dehn} Let $G$ and $H$ be finitely presented groups, viewed as metric spaces via word metrics coming from finite generating sets. Suppose $H$ is a quasi-retract of $G$. Then $\delta_H \preceq \delta_G$. \end{cit} As a remark, if a pair of functions satisfy the quasi-retraction condition when composed in either order, then they are quasi-isometries. Thus, we get that Dehn functions are a quasi-isometry invariant of finitely presented groups. \section{Self-similar groups and R\"over--Nekrashevych groups}\label{sec:nekr} In this section we discuss the source of our examples of finitely presented simple groups. Let $\mathcal{T}_d$ be the infinite rooted $d$-ary tree. We identify the vertex set of $\mathcal{T}_d$ with the set $\{1,\dots,d\}^*$ of finite words in the alphabet $\{1,\dots,d\}$; the root is the empty word $\varnothing$. Two vertices are adjacent if they are of the form $w$ and $wi$ for some $i\in\{1,\dots,d\}$. An \emph{automorphism} of $\mathcal{T}_d$ is a bijection from $\{1,\dots,d\}^*$ to itself that preserves adjacency. Now consider the group $\Aut(\mathcal{T}_d)$ of automorphisms of $\mathcal{T}_d$. Since the root is the only vertex of degree $d$, every automorphism preserves the measurement ``distance to root''. In particular the set $\{1,\dots,d\}$ of the children of the root is stabilized by every automorphism. This gives us a surjective homomorphism $\rho_d\colon \Aut(\mathcal{T}_d)\to S_d$, which clearly splits. The kernel of $\rho_d$ is isomorphic to $\Aut(\mathcal{T}_d)^d$, and so we get a wreath product decomposition \[ \Aut(\mathcal{T}_d) \cong S_d \wr \Aut(\mathcal{T}_d) \text{.} \] For $g\in \Aut(\mathcal{T}_d)$, the \emph{wreath recursion} of $g$ is the identification $g\leftrightarrow\rho_d(g)(g_1,\dots,g_d)$ induced by this isomorphism. The automorphisms $g_i$ are called the \emph{level-1 states} of $g$. The \emph{states} of $g$ are the elements of the smallest set containing $g$ that is closed under taking level-1 states. \begin{definition}[Self-similar] Call a subgroup $G\le\Aut(\mathcal{T}_d)$ \emph{self-similar} if for all $g\in G$, every state of $g$ is in $G$. (Equivalently, for all $g\in G$, every level-1 state of $g$ is in $G$.) \end{definition} For a wealth of background on self-similar groups, see \cite{nekrashevych05}. Note that sometimes in the literature ``self-similar'' requires $\rho_d(G)$ to act transitively on $\{1,\dots,d\}$, but we do not require this. We will also refer to a \emph{self-similar action} of a group, which is a homomorphism from the group to $\Aut(\mathcal{T}_d)$ whose image is self-similar. When we call a group self-similar, we are really implicitly referring to a fixed faithful self-similar action of the group. Given a group $G$ together with a declared wreath recursion for each element of some generating set, we get a well defined self-similar action of $G$ assuming that the defining relations of $G$ are satisfied by the wreath recursions. \begin{definition}[Rational] An element of a self-similar group is \emph{rational} (or \emph{finite-state}) if it has finitely many states. Call the group itself \emph{rational} if every element is rational. \end{definition} If every generator of the group is rational, then the same is true of every element, so it suffices to check rationality on the elements of some choice of generating set. Now we shift focus to the boundary of $\mathcal{T}_d$, which is the $d$-ary Cantor set $C_d=\{1,\dots,d\}^\mathbb{N}$. For each $w\in\{1,\dots,d\}^*$, the \emph{cone} on $w$ is the basic open set $C_d(w):=\{w\kappa\mid \kappa\in C_d\}$ in $C_d$. Any cone is canonically homeomorphic to $C_d$, via the \emph{canonical homeomorphism} \[ h_w \colon C_d \to C_d(w) \] sending $\kappa$ to $w\kappa$. \begin{definition}[R\"over--Nekrashevych group] Let $G\le \Aut(\mathcal{T}_d)$ be self-similar. The \emph{R\"over--Nekrashevych group} $V_d(G)$ is the subgroup of $\Homeo(C_d)$ consisting of all homeomorphisms constructed as follows: \begin{enumerate} \item Partition $C_d$ into finitely many cones $C_d(w_1^+),\dots,C_d(w_n^+)$. \item Partition $C_d$ into the same number of cones in some possibly different way $C_d(w_1^-),\dots,C_d(w_n^-)$. \item Map $C_d$ to itself by sending each $C_d(w_i^+)$ to some $C_d(w_j^-)$ via the map $h_{w_j^-}\circ g_i\circ h_{w_i^+}^{-1}$ for some $g_i\in G$. \end{enumerate} \end{definition} In words, an element of $V_d(G)$ acts on a cone $C_d(w_i^+)$ in the domain partition by removing the old prefix $w_i^+$, then acting via some element of $G$, and then adding a new prefix $w_j^-$. The self-similarity condition ensures that $V_d(G)$ is closed under compositions, and so really is a group. R\"over--Nekrashevych groups were introduced first by R\"over in \cite{roever99} for the special case when $G$ is the Grigorchuk group from \cite{grigorchuk80,grigorchuk84}, and in generality by Nekrashevych in \cite{nekrashevych04}. If $G$ is finitely generated, then so is $V_d(G)$, and if $G$ is finitely presented, then so is $V_d(G)$ (more generally if $G$ is of type $\F_n$ then so is $V_d(G)$ \cite[Theorem~4.15]{skipper19}). \begin{definition}[Weakly diagonal] Call a self-similar group $G\le\Aut(\mathcal{T}_d)$ \emph{weakly diagonal} if there exists a generating set $S$ for $G$ such that for all $s\in S$ with wreath recursion $s\leftrightarrow\rho_d(s)(s_1,\dots,s_d)$, each $s_i$ satisfies that $s_is^{-1}[G,G]$ has finite order in the abelianization $G/[G,G]$. (And call a self-similar action weakly diagonal if its image is.) \end{definition} In \cite{skipper19}, the key property of a self-similar group $G$ that ensured virtual simplicity of $V_d(G)$ was being ``coarsely diagonal''---this means that for all $g\in G$, for any level-1 state $g_i$, the element $g^{-1}g_i$ has finite order (which is equivalent to $g_i g^{-1}$ having finite order). Our notion of weakly diagonal here is a much weaker condition, since it only requires a condition on generators, and only requires finite order in the abelianization. The examples we will construct later will not be coarsely diagonal, so we really need this new notion of weakly diagonal. As we now see, it is still sufficient to ensure virtual simplicity. \begin{lemma}\label{lem:v_simple} If $G\le\Aut(\mathcal{T}_d)$ is self-similar and weakly diagonal, then $V_d(G)$ is virtually simple. More precisely, the commutator subgroup $[V_d(G),V_d(G)]$ is simple and has finite index in $V_d(G)$. \end{lemma} \begin{proof} Nekrashevych proved that the commutator subgroup $[V_d(G),V_d(G)]$ is always simple \cite[Theorem~4.7]{nekrashevych04}, so it suffices to prove that $V_d(G)$ has finite abelianization. We will roughly follow the strategy from the proof of \cite[Theorem~3.3]{skipper19}, where it was assumed that $G$ is coarsely diagonal (here we only assume it is weakly diagonal). The group $V_d(G)$ is generated by the Higman--Thompson group $V_d=V_d(\{1\})$ together with a certain copy of $G$, which we now explain. Let $\iota_1\colon V_d(G)\to V_d(G)$ send $h$ to the homeomorphism defined by $\iota_1(h)(1\kappa):=1h(\kappa)$ and $\iota_1(h)(i\kappa)=i\kappa$ for all $2\le i\le d$ and all $\kappa\in C_d$. In words, $\iota_1(h)$ acts like $h$ on the cone $C_d(1)$ and acts trivially everywhere else. Now $V_d(G)$ is generated by $V_d$ and $\iota_1(G)$. More generally, for any $w\in \{1,\dots,d\}^*$, let $\iota_w\colon V_d(G)\to V_d(G)$ send $h$ to the homeomorphism that acts like $h$ on $C_d(w)$ and trivially everywhere else. It is easy to see that $\iota_w(h)$ is conjugate in $V_n(G)$ to $\iota_{w'}(h)$ for any $h\in V_d(G)$ and any non-empty $w$ and $w'$. Now it suffices to prove that every generator of $V_d(G)$ from the generating set $V_d\cup\iota_1(G)$ has finite order in the abelianization. The group $V_d$ is virtually simple, so elements of $V_d$ have finite order in the abelianization. Since $G$ is weakly diagonal, we can choose a generating set $S$ for $G$ satisfying the property from the definition of weakly diagonal. Now $\iota_1(G)$ is generated by $\iota_1(S)$, so we want to show that every element of $\iota_1(S)$ has finite order in the abelianization of $V_d(G)$. For $s\in S$, let $s\leftrightarrow \rho_d(s)(s_1,\dots,s_d)$ be the wreath recursion of $s$. Using the $\iota_w$ maps, this is the same as $s=\rho_d(s) \iota_1(s_1)\cdots\iota_d(s_d)$. Since $\iota_1$ is a homomorphism, and clearly $\iota_w\circ \iota_{w'}=\iota_{ww'}$ for any $w$ and $w'$, we get $\iota_1(s)=\sigma \iota_{11}(s_1)\cdots\iota_{1d}(s_d)$ for some permutation $\sigma$. Since each $\iota_{1i}(s_i)$ is conjugate to $\iota_1(s_i)$, we conclude that $\iota_1(s_1)\cdots\iota_1(s_d)\iota_1(s)^{-1}$ has finite order in the abelianization. Next note that by weak diagonality each $s_i s^{-1}$ has finite order in the abelianization, and since $\iota_1$ is a homomorphism the same is true of each $\iota_1(s_i)\iota_1(s)^{-1}$. Now multiplying $\iota_1(s_1)\cdots\iota_1(s_d)\iota_1(s)^{-1}$ by the inverse of each $\iota_1(s_i)\iota_1(s)^{-1}$, we conclude that $\iota_1(s)^{d-1}$ has finite order in the abelianization. Since $d\ge 2$, this implies that $\iota_1(s)$ has finite order in the abelianization, as desired. \end{proof} \begin{definition}[Persistent] Call a self-similar group $G\le \Aut(\mathcal{T}_d)$ \emph{persistent} if for all $g\in G$, in the wreath recursion $g\leftrightarrow\rho_d(g)(g_1,\dots,g_d)$ we have $g_d=g$. \end{definition} \begin{lemma}\label{lem:persistent} For any self-similar group $G\le \Aut(\mathcal{T}_{d-1})$, there is a faithful, persistent, self-similar action of $G$ on $\mathcal{T}_d$. If the action on $\mathcal{T}_{d-1}$ is rational then so is the action on $\mathcal{T}_d$. If the action on $\mathcal{T}_{d-1}$ is weakly diagonal then so is the action on $\mathcal{T}_d$. \end{lemma} \begin{proof} For $g\in G$ write the wreath recursion of $g$ as $g\leftrightarrow\rho_{d-1}(g)(g_1,\dots,g_{d-1})$. Now define an action of $G$ on $\mathcal{T}_d$ by first extending the action $\rho_{d-1}$ of $G$ on $\{1,\dots,d-1\}$ to an action $\rho_d$ of $G$ on $\{1,\dots,d\}$ by fixing $d$, and then recursively defining an action on all of $\mathcal{T}_d$ via the wreath recursion $g\leftrightarrow\rho_d(g)(g_1,\dots,g_{d-1},g)$. This new action is persistent by construction, so we just need to prove that it is faithful. Note that there is a natural copy of $\mathcal{T}_{d-1}$ inside $\mathcal{T}_d$, coming from the natural inclusion of $\{1,\dots,d-1\}$ into $\{1,\dots,d\}$, and the action of $G$ on $\mathcal{T}_d$ stabilizes $\mathcal{T}_{d-1}$. The restriction of the new action of $G$ on this copy of $\mathcal{T}_{d-1}$ is equal to the original action, and so we conclude that since the action of $G$ on $\mathcal{T}_{d-1}$ is faithful, so is the action of $G$ on $\mathcal{T}_d$. The set of states of a given element under the action on $\mathcal{T}_{d-1}$ is equal to the set of states of that element under the action on $\mathcal{T}_d$, so if the original action is rational, so is the new action. If the original action is weakly diagonal, then it is trivial to see that the new action is too. \end{proof} \begin{corollary}\label{cor:simple_and_qr} Let $G\le\Aut(\mathcal{T}_d)$ be a finitely generated, persistent, weakly diagonal, rational, self-similar group. Then $V_d(G)$ is virtually simple by virtue of $[V_d(G),V_d(G)]$ being simple and finite index, and there exists a quasi-retraction $V_d(G)\to G$. If $G$ is finitely presented, then so is $V_d(G)$ and we have $\delta_G\preceq \delta_{V_d(G)}$. \end{corollary} \begin{proof} Lemma~\ref{lem:v_simple} says that $[V_d(G),V_d(G)]$ is simple and finite index. The existence of a quasi-retraction follows from \cite[Proposition~5.5]{skipper19}. If $G$ is finitely presented, then so is $V_d(G)$, for example by \cite[Theorem~4.15]{skipper19}. That $\delta_G \preceq \delta_{V_d(G)}$ now follows from Citation~\ref{cit:qr_dehn}. \end{proof} \section{Our examples}\label{sec:examples} Now we can build our examples of finitely presented simple groups with at least exponential Dehn function. They will arise as commutator subgroups of R\"over--Nekrashevych groups where the self-similar input group is a Baumslag--Solitar group. Recall that the \emph{Baumslag--Solitar group} $BS(m,n)$ is the group \[ BS(m,n):=\langle a,b\mid ab^m a^{-1} = b^n\rangle \text{.} \] For most values of $(m,n)$, the group $BS(m,n)$ does not stand a chance of being self-similar, since it is not even residually finite. (It is easy to see that $\Aut(\mathcal{T}_d)$ is residually finite, and hence so are all self-similar groups.) Thus, we focus on the residually finite case of $m=1$, where moreover $BS(1,n)$ is known to be self-similar \cite{bartholdi06}. It is well known that $BS(1,n)$ has exponential Dehn function for all $n\ge 2$ \cite{gersten92}. Let us explicitly realize $BS(1,n)$ ($n\ge 2$) as a self-similar group, using wreath recursions inspired by the automata in \cite{bartholdi06}. We want to define an action of $BS(1,n)$ on $\mathcal{T}_{n+1}$. First let $\rho_{n+1}\colon BS(1,n)\to S_{n+1}$ send $a$ to $\alpha:=(2~n+1)(3~n)(4~n-1)\cdots$ and $b$ to $\beta:=(1~2~\cdots~n+1)$. To be more precise, if $n$ is even then $\alpha$ ends with $(\frac{n}{2}+1 ~ \frac{n}{2}+2)$ and if $n$ is odd then it ends with $(\frac{n-1}{2}+1 ~ \frac{n-1}{2}+3)$. It is easy to check that $\alpha\beta\alpha^{-1}=\beta^n$, so $\rho_{n+1}$ is well defined. Now we extend this to an action on all of $\mathcal{T}_{n+1}$ via the wreath recursions \[ a\leftrightarrow \alpha(a,a,ba,b^2a,\dots,b^{n-1}a)\text{ and } b\leftrightarrow \beta(1,\dots,1,b) \text{.} \] \begin{proposition}\label{prop:check_everything} The self-similar action of $BS(1,n)$ on $\mathcal{T}_{n+1}$ defined by the above wreath recursions is well defined, faithful, rational, and weakly diagonal. \end{proposition} \begin{proof} To check that the action is well defined, we need to confirm that the words $ab$ and $b^n a$ have the same action on $\mathcal{T}_d$. Concatenating the wreath recursions, we compute that $ab$ corresponds to \begin{align*} \alpha(a,a,ba,b^2a,\dots,b^{n-1}a)\beta(1,\dots,1,b) &= \alpha\beta(a,ba,b^2a,\dots,b^{n-1}a,a)(1,\dots,1,b) \\ &= \alpha\beta(a,ba,b^2a,\dots,b^{n-1}a,ab) \end{align*} and $b^n a$ corresponds to \begin{align*} (\beta(1,\dots,1,b))^n \alpha(a,a,ba,b^2a,\dots,b^{n-1}a) &= \beta^{-1}(1,b,\dots,b) \alpha(a,a,ba,b^2a,\dots,b^{n-1}a) \\ &= \beta^{-1}\alpha(1,b,\dots,b)(a,a,ba,b^2a,\dots,b^{n-1}a) \\ &= \alpha\beta(a,ba,b^2a,\dots,b^{n-1}a,b^na)\text{.} \end{align*} Thus we see that the wreath recursion of $b^{-1}a^{-1}b^n a$ is $b^{-1}a^{-1}b^n a \leftrightarrow (1,\dots,1,b^{-1}a^{-1}b^n a)$, which means $b^{-1}a^{-1}b^n a$ acts trivially on $\mathcal{T}_{n+1}$ as desired. To check that the action is faithful, we will use the fact that every non-trivial normal subgroup of $BS(1,n)$ contains a non-trivial power of $b$. Indeed, any element can be written in the form $a^{-k}b^q a^\ell$ for $k,\ell\ge 0$ and $q\in\mathbb{Z}$, so if a normal subgroup contains this element, then conjugating by $a$ it must also contain $a^{-k}b^{nq} a^\ell$, hence $a^{-k}b^{(n-1)q} a^k$, hence $b^{(n-1)q}$. Now if our original element is non-trivial, then either $q\ne 0$, so $b^{(n-1)q}$ is non-trivial and we are done, or else $q=0$ and our non-trivial element was $a^{\ell-k}$. Thus our normal subgroup contains $a^{\ell-k} b a^{-(\ell-k)} b^{-1} = b^{n^{\ell-k} - 1}$, which is a non-trivial power of $b$. Now to see that the action is faithful, we just need to show that no non-trivial power of $b$ acts trivially on $\mathcal{T}_{n+1}$, but this is immediate by construction. Now we prove that the action is rational. We just need to check that the generators $a$ and $b$ have finitely many states. Clearly $1$ and $b$ are the only states of $b$. We claim that the states of $a$ comprise the finite set $\{a,ba,b^2 a,\dots,b^n a\}$. The level-1 states of $a$ are precisely these, except $b^n a$, which is a level-1 state of $ba$, hence a level-2 state of $a$. To see that this set is state-closed, note that for any of $1,b,b^2,\dots,b^n$, the level-1 states are all $1$ or $b$. Thus for any elements of our set, the level-1 states are all of the form $1$ or $b$ times a level-1 state of $a$, which is again in our set. Finally, we need to show that this action is weakly diagonal. Note that $b^{n-1}=aba^{-1}b^{-1}$ is trivial in the abelianization of $BS(1,n)$. Thus, every power of $b$ has finite order in the abelianization. By looking at the wreath recursions of $a$ and $b$, it is clear that for any level-1 state $a_i$ of $a$ the element $a_ia^{-1}$ is a power of $b$, and for any level-1 state $b_i$ of $b$ the element $b_ib^{-1}$ is a power of $b$ (namely $1$ or $b^{-1}$). Thus, indeed the action is weakly diagonal. \end{proof} Note that the action is not coarsely diagonal, e.g., $a_3=ba$, so $a_3 a^{-1}=b$ has infinite order. Thus, we could not directly use the results of \cite{skipper19}, and it was necessary to introduce the new notion of weakly diagonal. Now all the pieces are in place to prove our main result. \begin{theorem}\label{thrm:exist_simple} There exist finitely presented simple groups with at least exponential Dehn function. \end{theorem} \begin{proof} View $BS(1,n)$ as a self-similar group in $\Aut(\mathcal{T}_{n+1})$ as above. By Proposition~\ref{prop:check_everything}, $BS(1,n)$ is rational and weakly diagonal. Now use Lemma~\ref{lem:persistent} to view $BS(1,n)$ as a persistent self-similar group in $\Aut(\mathcal{T}_{n+2})$, which is still rational and weakly diagonal. By Corollary~\ref{cor:simple_and_qr}, $V_{n+2}(BS(1,n))$ is finitely presented with $\delta_{BS(1,n)}\preceq \delta_{V_{n+2}(BS(1,n))}$, so the Dehn function of $V_{n+2}(BS(1,n))$ is at least exponential. Corollary~\ref{cor:simple_and_qr} also says that the commutator subgroup $[V_{n+2}(BS(1,n)),V_{n+2}(BS(1,n))]$ has finite index in $V_{n+2}(BS(1,n))$, and is simple. Thus, we conclude that $[V_{n+2}(BS(1,n)),V_{n+2}(BS(1,n))]$ is a finitely presented simple group whose Dehn function is at least exponential. \end{proof} \end{document}
\begin{document} \begin{abstract} Motivated by two open questions about two-cardinal tree properties, we introduce and study generalized narrow system properties. The first of these questions asks whether the strong tree property at a regular cardinal $\kappa \geq \omega_2$ implies the Singular Cardinals Hypothesis (${\sf SCH}$) above $\kappa$. We show here that a certain narrow system property at $\kappa$ that is closely related to the strong tree property, and holds in all known models thereof, suffices to imply ${\sf SCH}$ above $\kappa$. The second of these questions asks whether the strong tree property can consistenty hold simultaneously at all regular cardinals $\kappa \geq \omega_2$. We show here that the analogous question about the generalized narrow system property has a positive answer. We also highlight some connections between generalized narrow system properties and the existence of certain strongly unbounded subadditive colorings. \end{abstract} \keywords{narrow systems, Singular Cardinals Hypothesis, Shelah's Strong Hypothesis, tree property, large cardinals, subadditive colorings} \subseteqjclass[2020]{03E05, 03E35, 03E55, 03E04} \title{Narrow systems revisited} \section{Introduction} A significant line of research in modern combinatorial set theory concerns the study of compactness principles that hold at (and sometimes characterize) large cardinals, the extent to which these compactness principles can hold at smaller cardinals, and the extent to which these principles can be said to capture the ``essence" of the respective large cardinal. To take a classical example, the tree property characterizes weakly compact cardinals among strongly inaccessible cardinals, while Mitchell \cite{mitchell} showed that the tree property at $\aleph_2$ is equiconsistent with the existence of a weakly compact cardinal. A number of questions remain open, though, about the extent to which the tree property can hold at smaller cardinals. The most prominent, due to Magidor, asks whether it is consistent that the tree property holds simultaneously at all regular cardinals greater than or equal to $\aleph_2$. Generalizations of the tree property, known collectively as \emph{two-cardinal tree properties}, were introduced in the 1970s by Jech \cite{jech_combinatorial_problems} and Magidor \cite{magidor_combinatorial_characterization} to provide combinatorial characterizations of strongly compact and supercompact cardinals. Let us now recall some of the important definitions, in their modern formulation. \begin{definition} \langlebel{tp_def} Suppose that $\kappa \leq \langlembda$ are uncountable cardinals, with $\kappa$ regular. A $(\kappa, \langlembda)$-tree is a structure $\mathcal T = \langlengle T_x \mid x \in \ensuremath{\mathscr{P}}_\kappa \langlembda \ranglengle$ such that \begin{itemize} \item for all $x \in \ensuremath{\mathscr{P}}_\kappa \langlembda$, $T_x$ is a nonempty collection of subsets of $x$; \item for all $x \subseteqseteq y \in \ensuremath{\mathscr{P}}_\kappa \langlembda$ and all $t \in T_y$, we have $t \cap x \in T_x$. \end{itemize} A $(\kappa, \langlembda)$-tree $\mathcal T$ is \emph{thin} if $|T_x| < \kappa$ for all $x \in \ensuremath{\mathscr{P}}_\kappa \langlembda$. A \emph{cofinal branch} through $\mathcal T$ is a set $b \subseteqseteq \langlembda$ such that $b \cap x \in T_x$ for all $x \in \ensuremath{\mathscr{P}}_\kappa \langlembda$. The \emph{$(\kappa, \langlembda)$-tree property}, denoted $\bb{T}P(\kappa, \langlembda)$, is the assertion that every thin $(\kappa, \langlembda)$-tree has a cofinal branch. The \emph{ineffable $(\kappa, \langlembda)$-tree property}, denoted ${\sf ITP}(\kappa, \langlembda)$, is the assertion that, for every thin $(\kappa, \langlembda)$-tree $\mathcal T = \langlengle T_x \mid x \in \ensuremath{\mathscr{P}}_\kappa \langlembda \ranglengle$ and every choice function $d \in \prod_{x \in \ensuremath{\mathscr{P}}_\kappa \langlembda} T_x$, there is a set $b \subseteqseteq \langlembda$ such that the set \[ \{x \in \ensuremath{\mathscr{P}}_\kappa \langlembda \mid b \cap x = d(x)\} \] is stationary in $\ensuremath{\mathscr{P}}_\kappa \langlembda$. The \emph{strong tree property} at $\kappa$, denoted $\bb{T}P_\kappa$, is the assertion that $\bb{T}P(\kappa, \langlembda)$ holds for all $\langlembda \geq \kappa$.\footnote{To head off potential confusion, we note that $\bb{T}P_\kappa$ is stronger than the classical tree property at $\kappa$, which is typically denoted $\bb{T}P(\kappa)$ and is equivalent to $\bb{T}P(\kappa, \kappa)$ in our notation.} The \emph{super tree property} at $\kappa$, denoted ${\sf ITP}_\kappa$, is the assertion that ${\sf ITP}(\kappa, \langlembda)$ holds for all $\langlembda \geq \kappa$. \end{definition} \begin{fact} Suppose that $\kappa$ is an inaccessible cardinal. \begin{itemize} \item (Jech \cite{jech_combinatorial_problems}) $\kappa$ is strongly compact if and only if $\bb{T}P_\kappa$ holds. \item (Magidor \cite{magidor_combinatorial_characterization}) $\kappa$ is supercompact if and only if ${\sf ITP}_\kappa$ holds. \end{itemize} \end{fact} The modern study of two-cardinal tree properties at accessible cardinals did not begin until the 2000s, when the relevant definitions (including, e.g., the notion of a \emph{thin} $(\kappa, \langlembda)$-tree introduced above) were isolated by Wei\ss\ \cite{weiss}. Since then, they have been the focus of a large amount of research, a considerable amount of which has been directed toward the study of their influence on cardinal arithmetic. Most notably, results of Viale \cite{viale_guessing_models} and Krueger \cite{krueger_sch} together show that, for a regular cardinal $\kappa \geq \omega_2$, ${\sf ISP}_\kappa$, which is a strengthening of ${\sf ITP}_\kappa$ also introduced by Wei\ss\ in \cite{weiss}, implies the Singular Cardinals Hypothesis (${\sf SCH}$) above $\kappa$. In \cite[Theorem A]{arithmetic_paper}, the author and Stejskalov\'{a} show that ${\sf SCH}$ above $\kappa$ (and in fact Shelah's Strong Hypothesis ($\sf SSH$), a strengthening of ${\sf SCH}$, above $\kappa$) already follows from a significant weakening of ${\sf ISP}_\kappa$ that holds if, e.g., $\kappa$ is strongly compact or if $\kappa = \omega_2$ and we are in an extension by Mitchell forcing starting with a strongly compact cardinal. We note also the seminal result of Solovay \cite{solovay_sch} stating that if $\kappa$ is a strongly compact cardinal, then ${\sf SCH}$ holds above $\kappa$. Recalling that, among inaccessible cardinals, $\bb{T}P_\kappa$ characterizes strongly compact cardinals whereas ${\sf ITP}_\kappa$ characterizes supercompact cardinals, this, together with the results mentioned in the previous paragraph, naturally leads to the following question, already asked in, e.g., \cite{hachtman_sinapova_itp} and \cite{ineffable_tree_property}: \begin{question} \langlebel{sch_q} Suppose that $\kappa \geq \omega_2$ is a regular cardinal. Does ${\sf ITP}_\kappa$ (or $\bb{T}P_\kappa$) imply ${\sf SCH}$ above $\kappa$? \end{question} The analogue of Magidor's question is also of interest for these two-cardinal tree properties (see \cite{ineffable_tree_property} for more discussion of this question): \begin{question} \langlebel{global_q} Is it consistent that ${\sf ITP}_\kappa$ (or $\bb{T}P_\kappa$) holds for all regular cardinals $\kappa \geq \omega_2$? \end{question} Questions \ref{sch_q} and \ref{global_q} have a tight connection: by a theorem of Specker \cite{specker}, if $\mu$ is a cardinal and $2^\mu = \mu^+$, then the tree property fails at $\mu^{++}$ and therefore, \emph{a fortiori}, $\bb{T}P_{\mu^{++}}$ fails. Thus, if $\mu$ is a singular strong limit cardinal and ${\sf SCH}$ holds at $\mu$, then $\bb{T}P_{\mu^{++}}$ fails, so a positive answer to Question \ref{sch_q} would entail a negative answer to Question \ref{global_q}. Motivated by these question, we prove here some results that we feel hint at a positive answer to Question \ref{sch_q} or at least indicate that genuinely new ideas would be needed to establish a negative answer. This work will involve introducing and analyzing generalizations of the classical notion of a \emph{narrow $\kappa$-system}, introduced by Magidor and Shelah \cite{magidor_shelah} to facilitate study of the tree property, particularly at successors of singular cardinals. Narrow systems have continued to play a central role in the study of the tree property, with verifications of the tree property at a cardinal $\kappa$ (especially when $\kappa$ is the successor of a singular cardinal) typically at least implicitly consisting of the following two-step process: \begin{enumerate} \item Prove that every $\kappa$-tree has a narrow $\kappa$-subsystem. \item Prove that every narrow $\kappa$-system has a cofinal branch. The cofinal branch through the subsystem identified in step (1) then generates a branch through the given $\kappa$-tree. \end{enumerate} In this paper, we generalize the notion of narrow system from the setting of cardinals $\kappa$ to arbitrary directed partial orders $\Lambda$ and show that these generalized system can play the same role in the study of generalized tree properties that narrow $\kappa$-systems play in the study of the classical tree property at $\kappa$. We introduce the generalized narrow system properties $\mathsf{NSP}(\Lambda)$, asserting that every narrow $\Lambda$-system has a cofinal branch, and study these properties, particularly in relation to their connections to Questions \ref{sch_q} and \ref{global_q}. In Section \ref{concrete_sec}, before introducing narrow systems in their full generality, we define a specific type of system, which we call a \emph{concrete $\ensuremath{\mathscr{P}}_\kappa \langlembda$-system}, that is particularly relevant to the study of $\bb{T}P(\kappa, \langlembda)$. The narrow system property ${\sf cNSP}(\ensuremath{\mathscr{P}}_\kappa \langlembda)$ then asserts that every narrow concrete $\ensuremath{\mathscr{P}}_\kappa \langlembda$-system has a cofinal branch, and ${\sf cNSP}_\kappa$ denotes the assertion that ${\sf cNSP}(\ensuremath{\mathscr{P}}_\kappa \langlembda)$ holds for all $\langlembda \geq \kappa$. ${\sf cNSP}_\kappa$ holds in all known models of $\bb{T}P_\kappa$, and is often used, at least implicitly, in verifications that $\bb{T}P_\kappa$ holds in a given model, especially if $\kappa$ is the successor of a singular cardinal. At the same time, we show that this narrow system property is strong enough to imply instances of $\sf SSH$: \begin{thma} Suppose that $\kappa \geq \omega_2$ is a regular cardinal and ${\sf cNSP}_\kappa$ holds. Then $\sf SSH$ holds above $\kappa$. \end{thma} Then, in Section \ref{system_sec}, we introduce the notion of narrow $\Lambda$-system and the narrow $\Lambda$-system property ($\mathsf{NSP}(\Lambda)$) for an arbitrary directed partial order $\Lambda$ and, as an illustration of their utility, use them to prove that generalized tree properties hold at successors of singular limits of strongly compact cardinals (Theorem \ref{strongly_compact_tp_thm}). In Section \ref{subbadditive_sec}, we connect narrow $\Lambda$-systems with strongly unbounded subadditive colorings, proving both that instances of $\bb{N}SP(\Lambda)$ entail the nonexistence of such functions on $\Lambda^{[2]}$ and, in turn, that the nonexistence of such functions on $(\ensuremath{\mathscr{P}}_\kappa \langlembda)^{[2]}$ can be used in place of ${\sf cNSP}_\kappa$ in the hypothesis of Theorem A (cf.\ Corollaries \ref{nsp_sub_cor} and \ref{sub_ssh_cor}, respectively). The remainder of the paper is devoted to a global consistency result showing that Question \ref{global_q} has a positive answer if the two-cardinal tree properties are replaced by generalized narrow system properties: \begin{thmb} Suppose that there is a proper class of supercompact cardinals. Then there is a (class) forcing extension in which $\bb{N}SP(\Lambda)$ holds for every directed partial order $\Lambda$. \end{thmb} Section \ref{preservation_sec} contains the proof of a technical branch preservation lemma for generalized narrow systems, and then Section \ref{consistency_sec} applies this lemma to prove Theorem B. \subseteqsection{Notational conventions} Unless otherwise noted, we follow standard set theoretic notational conventions. $\mathrm{On}$ denotes the class of all ordinals. Given an infinite cardinal $\kappa$ and a set $X$, $\ensuremath{\mathscr{P}}(X)$ denotes the power set of $X$, and $\ensuremath{\mathscr{P}}_\kappa X$ denotes $\{x \subseteqseteq X \mid |x| < \kappa\}$. If $x$ is a set of ordinals, then the \emph{strong supremum} of $x$ is the ordinal $\mathrm{ssup}(x) := \sup\{\alpha + 1 \mid \alpha \in x\}$, i.e., $\mathrm{ssup}(x)$ is the least ordinal $\beta$ such that $\alpha < \beta$ for all $\alpha \in x$. Given a partial order $\Lambda$, we let $\Lambda^{[2]}$ denote the set of ordered pairs $(u,v)$ from $\Lambda$ such that $u <_\Lambda v$. Sets of the form $\ensuremath{\mathscr{P}}_\kappa X$ will be interpreted as partial orders with the order relation given by $\subseteqsetneq$. In particular, $(\ensuremath{\mathscr{P}}_\kappa X)^{[2]}$ denotes the set of pairs $(x,y)$ of elements of $\ensuremath{\mathscr{P}}_\kappa X$ with $x \subseteqsetneq y$. \section{Concrete systems} \langlebel{concrete_sec} Before we introduce the general notion of \emph{(narrow) $\Lambda$-system} for an arbitrary directed order $\Lambda$, and in order to help motivate the more abstract general definition, we first consider an important special case. \begin{definition} \langlebel{concrete_system_def} Suppose that $\kappa \leq \langlembda$ are uncountable cardinals, with $\kappa$ regular. A \emph{concrete $\ensuremath{\mathscr{P}}_\kappa \langlembda$-system} is a structure $\mathcal S = \langlengle S_x \mid x \in A \ranglengle$ such that \begin{enumerate} \item $A$ is a $\subseteqseteq$-cofinal subset of $\ensuremath{\mathscr{P}}_\kappa \langlembda$; \item for all $x \in A$, $\emptyset \neq S_x \subseteqseteq \ensuremath{\mathscr{P}}(x)$; \item for all $x \subseteqseteq y$, both in $A$, there is $t \in S_y$ such that $t \cap x \in S_x$. \end{enumerate} The \emph{width} of $\mathcal S$ is defined to be $\mathrm{width}(\mathcal S) := \sup\{|S_x| \mid x \in A\}$. We say that $\mathcal S$ is a \emph{narrow} concrete $\ensuremath{\mathscr{P}}_\kappa \langlembda$-system if $\mathrm{width}(\mathcal S)^+ < \kappa$. A \emph{cofinal branch} through $\mathcal S$ is a set $b \subseteqseteq \langlembda$ such that the set $\{x \in A \mid b \cap x \in S_x\}$ is $\subseteqseteq$-cofinal in $\ensuremath{\mathscr{P}}_\kappa \langlembda$. \end{definition} Classical narrow systems, with levels indexed by ordinals, were introduced by Magidor and Shelah in \cite{magidor_shelah} as a central tool in the study of the tree property, particularly at successors of singular cardinals. Indeed, all known verifications of the tree property at the successor of a singular cardinal $\mu$ at least implicitly go through the following two steps: \begin{enumerate} \item Show that that every $\mu^+$-tree $\mathcal T$ has a narrow subsystem $\mathcal S$ of height $\mu^+$. \item Show that every narrow system of height $\mu^+$ has a cofinal branch; in particular, $\mathcal S$ has a cofinal branch, which gives rise to a cofinal branch through $\mathcal T$. \end{enumerate} One of the motivating observations for this paper is that narrow concrete $\ensuremath{\mathscr{P}}_\kappa \langlembda$-systems play an analogous role for $(\kappa, \langlembda)$-trees. For example, by an analogue of the two-step argument outlined above, we can show that the two-cardinal tree property $\bb{T}P_\kappa$ holds if $\kappa$ is the successor of a singular limit of strongly compact cardinals. Since a more general version of this statement is true, we postpone its proof until after we introduce the more general definition of ``narrow system"; it follows as a special case of Theorem \ref{strongly_compact_tp_thm} below. We now turn to showing that the existence of cofinal branches through certain narrow concrete systems implies instances of ${\sf SCH}$ (and $\sf SSH$). To state the results concisely, we introduce the following terminology. \begin{definition} \langlebel{concrete_prop_def} Let $\kappa$ be a regular uncountable cardinal. For a cardinal $\langlembda \geq \kappa$, we say that the \emph{concrete narrow $\ensuremath{\mathscr{P}}_\kappa \langlembda$-system property} holds (denoted ${\sf cNSP}(\ensuremath{\mathscr{P}}_\kappa \langlembda)$) if every narrow concrete $\ensuremath{\mathscr{P}}_\kappa \langlembda$-system has a cofinal branch. We say that ${\sf cNSP}_\kappa$ holds if ${\sf cNSP}(\ensuremath{\mathscr{P}}_\kappa \langlembda)$ holds for all $\langlembda \geq \kappa$. \end{definition} \begin{remark} \langlebel{remark_23} It is worth taking the time to compare Definitions \ref{concrete_system_def} and Definition \ref{concrete_prop_def} with Definition \ref{tp_def}, as the definitions of narrow concrete $\ensuremath{\mathscr{P}}_\kappa \langlembda$-systems and thin $(\kappa, \langlembda)$-trees are quite similar. The two salient differences are: \begin{itemize} \item The definition of narrow concrete $\ensuremath{\mathscr{P}}_\kappa \langlembda$-system is more restrictive with regards to the size of each level, requiring $\mathrm{width}(\mathcal S)^+ < \kappa$, whereas a thin $(\kappa, \langlembda)$-tree $\mathcal T$ is only required to satisfy $|T_x| < \kappa$ for all $x \in \ensuremath{\mathscr{P}}_\kappa \langlembda$. \item On the other hand, the definition of thin $(\kappa, \langlembda)$-tree is more restrictive with regards to the \emph{coherence properties} of the structure, requiring that, for all $x \subseteqseteq y$ and \emph{all} $t \in T_y$, we have $t \cap x \in T_x$, whereas the analogous requirement in the definition of narrow concrete $\ensuremath{\mathscr{P}}_\kappa \langlembda$-system only requires the existence of one such $t$. \end{itemize} Therefore, it is not immediately evident whether either $\bb{T}P_\kappa$ or ${\sf cNSP}_\kappa$ implies the other, though we shall see that, in practice, ${\sf cNSP}_\kappa$ is easier to arrange than, and does not imply, $\bb{T}P_\kappa$ (cf.\ Remark \ref{cnsp_tp_remark} below). The question of whether $\bb{T}P_\kappa$ implies ${\sf cNSP}_\kappa$ remains open and very much of interest. \end{remark} Our verifications of $\sf SSH$ will go through the machinery of \emph{covering matrices} introduced by Viale in his proof that ${\sf SCH}$ follows from the Proper Forcing Axiom \cite{viale_pfa_sch}. \begin{definition} Let $\theta < \langlembda$ be regular cardinals. A \emph{$\theta$-covering matrix for $\langlembda$} is a matrix $\mathcal{D} = \langlengle D(i, \beta) \mid i < \theta, ~ \beta < \langlembda \ranglengle$ such that: \begin{enumerate} \item for all $\beta < \langlembda$, $\langlengle D(i, \beta) \mid i < \theta \ranglengle$ is a $\subseteqseteq$-increasing sequence and $\bigcup_{i < \theta} D(i, \beta) = \beta$; \item for all $\beta < \gamma < \langlembda$ and $i < \theta$, there is $j < \theta$ such that $D(i, \beta) \subseteqseteq D(j, \gamma)$. \end{enumerate} \end{definition} We will be especially interested in covering matrices satisfying certain additional properties. \begin{definition} Suppose that $\theta < \langlembda$ are regular cardinals and $\mathcal D$ is a $\theta$-covering matrix for $\langlembda$. \begin{enumerate} \item $\mathcal D$ is \emph{transitive} if, for all $\beta < \gamma < \langlembda$ and all $i < \theta$, if $\beta \in D(i, \gamma)$, then $D(i, \beta) \subseteqseteq D(i, \gamma)$. \item $\mathcal D$ is \emph{uniform} if, for every limit ordinal $\beta < \langlembda$, there is $i < \theta$ such that $D(i, \beta)$ contains a club in $\beta$. \item $\bb{C}P(\mathcal D)$ holds if there is an unbounded $A \subseteqseteq \langlembda$ such that $[A]^\theta$ is covered by $\mathcal D$, i.e., for all $X \in [A]^\theta$, there are $\beta < \langlembda$ and $i < \theta$ for which $X \subseteqseteq D(i, \beta)$. \end{enumerate} \end{definition} The following theorem is proven in \cite{arithmetic_paper} (it was previously known in the case in which $\mu$ is strong limit (cf.\ \cite[Lemma 6]{viale_covering})). \begin{theorem}[\cite{arithmetic_paper}] \langlebel{downward_coherence_thm} Suppose that $\mu$ is a singular cardinal, $\theta = \mathrm{cf}(\mu)$, and $\mathcal D$ is a uniform, transitive $\theta$-covering matrix for $\mu^+$. Then, for every $x \in \ensuremath{\mathscr{P}}_\mu \mu^+$, there is $\gamma_x < \mu^+$ such that, for all $\beta \in [\gamma_x, \mu^+)$, there is $i < \theta$ such that, for all $j \in [i, \theta)$, we have $x \cap D(j,\beta) = x \cap D(j, \gamma_x)$. \end{theorem} We will also need to recall some basic background about Shelah's Strong Hypothesis. $\sf SSH$ is the assertion that $\mathrm{pp}(\mu) = \mu^+$ for every singular cardinal $\mu$, where $\mathrm{pp}(\mu)$ denotes the \emph{pseudopower} of $\mu$. For a cardinal $\kappa$, we say that $\sf SSH$ holds \emph{above $\kappa$} if $\mathrm{pp}(\mu) = \mu^+$ for every singular cardinal $\mu > \kappa$. For our purposes, we will not need to recall the definition of $\mathrm{pp}(\mu)$; the following facts will suffice: \begin{fact} \langlebel{pp_fact} In what follows, if $\vec{\mu} = \langlengle \mu_i \mid i < \theta \ranglengle$ is a sequence of regular cardinals, then $\prod \vec{\mu}$ denotes the set of functions $f$ such that $\dom{f} = \theta$ and $f(i) < \mu_i$ for all $i < \theta$. Given $f,g \in \prod{\vec{\mu}}$, we say that $f <^* g$ if there is $i < \theta$ such that $f(j) < g(j)$ for all $j \in [i, \theta)$. The second and third facts below are both implicit in \cite{cardinal_arithmetic}; the cited references provide more explicit explanations. \begin{enumerate} \item \cite[\S 2, Claim 2.4]{cardinal_arithmetic} If $\mu$ is a singular cardinal of uncountable cofinality and $\{\nu < \mu \mid \mathrm{pp}(\nu) = \nu^+\}$ is stationary in $\mu$, then $\mathrm{pp}(\mu) = \mu^+$. \item \cite[Observation 4.4]{matet_meeting_numbers} Suppose that $\mu$ is a singular cardinal and $\mathrm{pp}(\mu) > \mu^+$. Then there is an increasing sequence of regular cardinals $\vec{\mu} = \langlengle \mu_i \mid i < \mathrm{cf}(\mu) \ranglengle$ converging to $\mu$ such that $\mathrm{cf}(\prod \vec{\mu}, <^*) > \mu^+$. \item \cite[Proposition 4.18]{arithmetic_paper} Let $\kappa$ be an infinite cardinal such that $\sf SSH$ holds above $\kappa$. Then ${\sf SCH}$ holds above $\kappa$. \end{enumerate} \end{fact} The connection between covering matrices and $\sf SSH$ comes via the following result. \begin{theorem} \langlebel{cp_ssh_thm} \cite[Theorem 4.19]{arithmetic_paper} Suppose that $\mu$ is a singular cardinal, $\theta = \mathrm{cf}(\mu)$, and $\vec{\mu} = \langlengle \mu_i \mid i < \theta \ranglengle$ is an increasing sequence of regular cardinals converging to $\mu$. Suppose moreover that $\mathcal D = \langlengle D(i,\beta) \mid i < \theta, ~ \beta < \mu^+ \ranglengle$ is a $\theta$-covering matrix for $\mu^+$ such that \begin{enumerate} \item for all $i < \theta$ and $\beta < \mu^+$, we have $|D(i,\beta)| < \mu_i$; \item $\bb{C}P(\mathcal D)$ holds. \end{enumerate} Then $\mathrm{cf}(\prod \vec{\mu}, <^*) = \mu^+$. \end{theorem} We are now ready for the main result of this section, which will then yield Theorem A. \begin{theorem} \langlebel{nsp_cp_thm} Suppose that $\mu$ is a singular cardinal, $\theta = \mathrm{cf}(\mu)$, and there is a regular cardinal $\kappa \in [\theta^{++}, \mu)$ such that ${\sf cNSP}(\ensuremath{\mathscr{P}}_\kappa \mu^+)$ holds. Then $\bb{C}P(\mathcal D)$ holds for every uniform, transitive $\theta$-covering matrix $\mathcal D$ for $\mu^+$. \end{theorem} \begin{proof} Let $\langlembda := \mu^+$. Fix a uniform, transitive $\theta$-covering matrix $\mathcal D = \langlengle D(i,\beta) \mid i < \theta, ~ \beta < \langlembda \ranglengle$ for $\langlembda$, and let $A := \{x \in \ensuremath{\mathscr{P}}_\kappa \langlembda \mid \mathrm{cf}(\mathrm{ssup}(x)) > \theta \}$. Since $\kappa > \theta^+$ is a regular cardinal, $A$ is cofinal in $\ensuremath{\mathscr{P}}_\kappa \langlembda$; note that, for $x \in A$, we have $\sup(x) = \mathrm{ssup}(x)$. For each $x \in A$, let $\gamma_x < \mu^+$ be the least ordinal satisfying the conclusion of Theorem~\ref{downward_coherence_thm}; namely, for all $\beta \in [\gamma_x, \langlembda)$ and all sufficiently large $j < \theta$, we have $x \cap D(j, \beta) = x \cap D(j, \gamma_x)$. Note that we must have $\gamma_x \geq \sup(x)$ and, if $x \subseteqseteq y$ are both in $A$, then $\gamma_x \leq \gamma_y$. For each $x \in A$, let \[ S_x := \{x \cap D(i, \gamma_x) \mid i < \theta \text{ and } \sup(x \cap D(i, \gamma_x)) = \sup(x)\}. \] Since $\mathrm{cf}(\sup(x)) > \theta$ and $x = \bigcup_{i < \theta} (x \cap D(i, \gamma_x))$, it must be the case that $x \cap D(i, \gamma_x) \in S_x$ for all sufficiently large $i < \theta$. We claim that $\mathcal S = \langlengle S_x \mid x \in A \ranglengle$ is a concrete $\ensuremath{\mathscr{P}}_\kappa \langlembda$-system. The only nontrivial condition to check is clause (3) of Definition~\ref{concrete_system_def}. To this end, fix $x \subseteqseteq y$, both in $A$. We know that, for all sufficiently large $i < \theta$, we have \begin{itemize} \item $\sup(x \cap D(i, \gamma_x)) = \sup(x)$; \item $\sup(y \cap D(i, \gamma_y)) = \sup(y)$; \item either $\gamma_x = \gamma_y$ or $\gamma_x \in D(i, \gamma_y)$; in either case, since $\mathcal D$ is transitive, we have $D(i, \gamma_x) \subseteqseteq D(i, \gamma_y)$. \end{itemize} Therefore, choosing $i < \theta$ sufficiently large, we have $y \cap D(i, \gamma_y) \in S_y$ and \[ (y \cap D(i, \gamma_y)) \cap x = D(i, \gamma_y) \cap x = D(i, \gamma_x) \cap x \in S_x, \] where the second equality holds by the choice of $\gamma_x$. Therefore, we have found $t \in S_y$ for which $t \cap x \in S_x$, as desired. Moreover, we have $|S_x| \leq \theta$ for all $x \in A$, so $\mathcal S$ is a \emph{narrow} concrete $\ensuremath{\mathscr{P}}_\kappa \langlembda$-system. We can therefore apply ${\sf cNSP}(\ensuremath{\mathscr{P}}_\kappa \langlembda)$ to find a cofinal branch $b$ through $\mathcal S$. \begin{claim} $b$ is unbounded in $\langlembda$. \end{claim} \begin{proof} Fix $\alpha < \langlembda$; we will show that $b \setminus \alpha$ is nonempty. Find $x \in A$ such that $\alpha \in x$ and $b \cap x \in S_x$. By the definition of $S_x$, it follows that $\sup(b \cap x) = \sup(x) > \alpha$. \end{proof} We will therefore be done if we show that $[b]^\theta$ is covered by $\mathcal D$, as then $b$ will witness $\bb{C}P(\mathcal D)$. To this end, fix $z \in [b]^\theta$. Since $b$ is a cofinal branch through $\mathcal S$, we can find $x \in A$ such that $z \subseteqseteq x$ and $b \cap x \in S_x$. Then $z \subseteqseteq b \cap x$, and there is $i < \theta$ such that $b \cap x = x \cap D(i, \gamma_x)$; therefore, $z \subseteqseteq D(i, \gamma_x)$, as desired. \end{proof} We are now ready to prove Theorem A, asserting that, for a regular cardinal $\kappa \geq \omega_2$, ${\sf cNSP}_\kappa$ implies $\sf SSH$ above $\kappa$. \begin{proof}[Proof of Theorem A] By Fact \ref{pp_fact}(1), to establish $\sf SSH$ above $\kappa$, it suffices to show that $\mathrm{pp}(\mu) = \mu^+$ for every singular cardinal $\mu > \kappa$ of countable cofinality. Fix such a $\mu$. Next, by Fact \ref{pp_fact}(2), to establish $\mathrm{pp}(\mu) = \mu^+$, it suffices to prove that $\mathrm{cf}(\prod \vec{\mu}, <^*) = \mu^+$ for every increasing sequence of regular cardinals $\vec{\mu} = \langlengle \mu_i \mid i < \omega \ranglengle$ converging to $\mu$. Fix such a sequence $\vec{\mu}$. By the proof of \cite[Lemma 2.4]{sharon_viale} (cf.\ also \cite[Lemma 4.4]{arithmetic_paper}), there is a uniform, transitive $\omega$-covering matrix $\mathcal D = \langlengle D(i,\beta) \mid i < \omega, ~ \beta < \mu^+ \ranglengle$ for $\mu^+$ such that $|D(i,\beta)| < \mu_i$ for all $i < \omega$ and $\beta < \mu^+$. By Theorem \ref{nsp_cp_thm} and the assumption that ${\sf cNSP}_\kappa$ holds, we know that $\bb{C}P(\mathcal D)$ holds, and then, by Theorem \ref{cp_ssh_thm}, we have $\mathrm{cf}(\prod \vec{\mu}, <^*) = \mu^+$, as desired. \end{proof} \section{General systems} \langlebel{system_sec} We now move to the more general setting of systems indexed by arbitrary directed partial orders. Given a partial order $(\Lambda, \leq_\Lambda)$, we will sometimes abuse notation and use the symbol $\Lambda$ to denote the partial order. If a partial order is denoted by $\Lambda$, it should be understood that its order relation is denoted by $\leq_\Lambda$. The strict portion of $\leq_\Lambda$ will be denoted by $<_\Lambda$. Given $u \in \Lambda$, let $u^\uparrow$ denote $\{v \in \Lambda \mid u <_\Lambda v\}$. \begin{definition} Suppose that $\Lambda$ is a partial order and $\kappa$ is an infinite cardinal. We say that $\Lambda$ is \emph{$\kappa$-directed} if every element of $\ensuremath{\mathscr{P}}_\kappa \Lambda$ has an upper bound, i.e., for every $x \in \ensuremath{\mathscr{P}}_\kappa \Lambda$, there is $v \in \Lambda$ such that $u \leq_{\Lambda} v$ for all $u \in x$. We say that $\Lambda$ is \emph{directed} if it is $\aleph_0$-directed; equivalently, for all $u,v \in \Lambda$, there is $w \in \Lambda$ such that $u,v \leq_\Lambda w$. \end{definition} \begin{definition} Suppose that $\Lambda$ is a directed partial order. The \emph{directedness} of $\Lambda$, denoted $d_\Lambda$, is the largest cardinal $\kappa$ such that $\Lambda$ is $\kappa$-directed. It is readily verified that this is well-defined and that $d_\Lambda$ is a regular cardinal for every directed partial order $\Lambda$. \end{definition} \begin{definition} Let $R$ be a binary relation on a set $X$. For $x,y \in X$, we will typically write $x <_R y$ to denote $(x,y) \in R$ and $x \leq_R y$ to denote the statement \[ (x,y) \in R \text{ or } x = y. \] Two elements $x$ and $y$ of $X$ are said to be \emph{$R$-comparable} if either $x \leq_R y$ or $y \leq_R x$. Otherwise, $x$ and $y$ are \emph{$R$-incomparable}. \end{definition} \begin{definition} \langlebel{system_def} Let $\Lambda$ be a directed partial order. A \emph{$\Lambda$-system} is a structure \[ \mathcal S = \left \langlengle \langlengle S_u \mid u \in \Lambda \ranglengle, \mathcal R \right \ranglengle \] satisfying the following conditions. \begin{enumerate} \item $\langlengle S_u \mid u \in \Lambda \ranglengle$ is a sequence of pairwise disjoint nonempty sets. We will sometimes refer to $\bigcup_{u \in \Lambda} S_u$ as the \emph{underlying set} of $\mathcal S$, and we will sometimes simply denote it by $S$. For each $x \in S$, let $\precl(x)$ denote the unique $u \in \Lambda$ such that $x \in S_u$. \item $\mathcal R$ is a nonempty set of binary, transitive relations on $S$. \item For all $x,y \in S$ and $R \in \mathcal R$, if $x <_R y$, then $\precl(x) <_\Lambda \precl(y)$. \item \langlebel{treelike_clause} For all $x,y,z \in S$ and $R \in \mathcal R$, if $x,y <_R z$ and $\precl(x) \leq_{\Lambda} \precl(y)$, then $x \leq_R y$. \item \langlebel{completeness_clause} For all $(u,v) \in \Lambda^{[2]}$, there are $x \in S_u$, $y \in S_v$, and $R \in \mathcal R$ such that $x <_R y$. \end{enumerate} If $\mathcal S$ is a $\Lambda$-system, then we define $\mathrm{width}(\mathcal S)$ to be $\max\{\sup\{|S_u| \mid u \in \Lambda\}, |\mathcal R|\}$. We say that $\mathcal S$ is a \emph{narrow $\Lambda$-system} if $\mathrm{width}(\mathcal S)^+ < d_\Lambda$. If $\mathcal S = \left \langlengle \langlengle S_u \mid u \in \Lambda \ranglengle, \mathcal R \right \ranglengle$ is a $\Lambda$-system, $x,y \in S$, and $R \in \mathcal R$, then we say that $x$ and $y$ are \emph{$R$-compatible}, denoted $x \parallel_R y$, if there is $z \in S$ such that $x,y \leq_R z$. We say that $x$ and $y$ are \emph{$R$-incompatible}, denoted $x \perp_R y$, if there is no such $z$. Note that, if $x \parallel_R y$ and $\precl(x) \leq_{\Lambda} \precl(y)$, then Clause \ref{treelike_clause} above implies that $x \leq_R y$. Given $R \in \mathcal R$, a \emph{branch through $R$ in $\mathcal S$} is a set $b \subseteqseteq S$ such that, for all $x,y \in b$, we have $x \parallel_R y$ (note that this implies that $|b \cap S_u| \leq 1$ for all $u \in \Lambda$). We will sometimes say that $b$ is a \emph{branch in $\mathcal S$} to mean that there is $R \in \mathcal R$ such that $b$ is a branch through $R$ in $\mathcal S$. A branch $b$ is said to be \emph{cofinal} if $\{u \in \Lambda \mid b \cap S_u \neq \emptyset\}$ is cofinal in $\Lambda$. \end{definition} \begin{remark} The concrete $\ensuremath{\mathscr{P}}_\kappa \langlembda$-systems of Section~\ref{concrete_sec} are indeed special cases of Definition~\ref{system_def}: suppose that $\mathcal S = \langlengle S_x \mid x \in A \ranglengle$ is a concrete $\ensuremath{\mathscr{P}}_\kappa \langlembda$-system. Then there is a natural way to view $\mathcal S$ as an $(A, \subseteqsetneq)$-system in the sense of Definition~\ref{system_def}. Namely, for each $x \in A$, let $S'_x := \{x\} \times S_x$, and define a binary relation $R$ on $\bigcup_{x \in A} S'_x$ by letting $(x,t) <_R (y,s)$ iff $x \subseteqsetneq y$ and $y \cap t = x$. Then $\mathcal S' := \langlengle \langlengle S'_x \mid x \in A \ranglengle, \{R\} \ranglengle$ is readily verified to be an $(A, \subseteqsetneq)$-system in the sense of Definition~\ref{system_def}, and cofinal branches through $\mathcal S$ in the sense of Definition~\ref{concrete_system_def} naturally correspond to cofinal branches through $\mathcal S'$ in the sense of Definition~\ref{system_def}. \end{remark} \begin{definition} Let $\Lambda$ be a directed partial order. We say that the \emph{$\Lambda$-narrow system property} (denoted $\bb{N}SP(\Lambda)$) holds if every narrow $\Lambda$-system has a cofinal branch. \end{definition} Since all of the questions considered in this paper become trivial when addressing systems indexed by partial orders with maximal elements, we will always assume when working with arbitrary $\Lambda$-systems that $\Lambda$ has no maximal element, even when this assumption is not explicitly stated. For notational simplicity, we often prefer to work with systems having only one relation. The following proposition shows that, in the context of questions about the existence of narrow $\Lambda$-systems without cofinal branches, this involves no loss of generality. \begin{proposition} \langlebel{single_relation_prop} Suppose that $\Lambda$ is a directed partial order and \[ \mathcal S = \left \langlengle \langlengle S_u \mid u \in \Lambda \ranglengle, \mathcal R \right \ranglengle \] is a $\Lambda$-system. Then there is a $\Lambda$-system $\mathcal S' = \left \langlengle \langlengle S'_u \mid u \in \Lambda \ranglengle, \mathcal R' \right \ranglengle$ such that \begin{itemize} \item $|\mathcal R'| = 1$; \item $\mathrm{width}(\mathcal S') = \mathrm{width}(\mathcal S)$; \item $\mathcal S'$ has a cofinal branch if and only if $\mathcal S$ has a cofinal branch. \end{itemize} \end{proposition} \begin{proof} For each $u \in \Lambda$, let $S'_u := S_u \times \mathcal R$, and let $\mathcal R'$ consist of a single binary relation, $<'$. For all $x_0,x_1 \in S$ and $R_0, R_1 \in \mathcal R$, let $(x_0,R_0) <' (x_1,R_1)$ if and only if $R_0=R_1$ and $x_0 <_{R_0} x_1$ (in $\mathcal S$). It is readily verified that $\mathcal S'$ thus defined is a $\Lambda$-system and $\mathrm{width}(\mathcal S') = \mathrm{width}(\mathcal S)$. If $R \in \mathcal R$ and $b \subseteqseteq S$ is a cofinal branch through $R$ in $\mathcal S$, then $b' := \{(x,R) \mid x \in b\}$ is a cofinal branch in $\mathcal S'$. Conversely, if $d'$ is a cofinal branch in $\mathcal S'$, then there must be a single $R \in \mathcal R$ such that every element of $d'$ is of the form $(x,R)$ for some $x \in S$. Then $d := \{x \in S \mid (x,R) \in d'\}$ is a cofinal branch through $R$ in $\mathcal S$. \end{proof} The following basic proposition is reminiscent of K\"{o}nig's Infinity Lemma, asserting that every infinite finitely-branching tree has an infinite branch. \begin{proposition} \langlebel{finite_width_prop} Suppose that $\Lambda$ is a directed partial order and $\mathcal S$ is a $\Lambda$-system with finite width. Then $\mathcal S$ has a cofinal branch. \end{proposition} \begin{proof} By Proposition \ref{single_relation_prop}, we can assume that $\mathcal S$ has a single relation, which we will denote by $R$. Since $\mathrm{width}(\mathcal S)$ is finite, we can fix an $n < \omega$ such that $|S_u| \leq n$ for all $u \in \Lambda$. Enumerate each $S_u$ as $\langlengle x_{u, k} \mid k < n \ranglengle$, with repetitions if necessary (i.e., if $|S_u| < n$). Since $\Lambda$ is directed, $\mathcal F := \{u^\uparrow \mid u \in \Lambda\}$ is a filter over $\Lambda$. Let $\mathcal U$ be an ultrafilter over $\Lambda$ extending $\mathcal F$. Temporarily fix $u \in \Lambda$. Since $\mathcal S$ is a $\Lambda$-system, it follows that, for every $v \in u^\uparrow$, we can find (not necessarily unique) $j(u,v), k(u,v) < n$ such that $x_{u,j(u,v)} <_R x_{v,k(u,v)}$. Since $\mathcal U$ is an ultrafilter extending $\mathcal F$, we can then find fixed numbers $j(u), k(u) < n$ such that the set \[ X_u := \{v \in u^\uparrow \mid (j(u,v), k(u,v)) = (j(u),k(u))\} \] is in $\mathcal U$.\footnote{We are using the implicit assumption that $\Lambda$ has no maximal element to ensure that we can find such $j(u)$ and $k(u)$.} We can then find fixed numbers $j^*,k^* < n$ such that the set \[ Y := \{u \in \Lambda \mid (j(u),k(u))=(j^*,k^*)\} \] is in $\mathcal U$. In particular, $Y$ is cofinal in $\Lambda$. Let $b := \{x_{u,j^*} \mid u \in Y\}$. Since $Y$ is cofinal in $\Lambda$, in order to show that $b$ is a cofinal branch in $\mathcal S$ it suffices to show that, for all $u_0, u_1 \in Y$, we have $x_{u_0,j^*} \parallel_R x_{u_1, j^*}$. To this end, fix such $u_0, u_1$. Since $X_{u_0}, X_{u_1} \in \mathcal U$, we can fix $v \in X_{u_0} \cap X_{u_1}$. then $x_{u_0, j^*}, x_{u_1, j^*}, <_{R} x_{v,k^*}$, so $x_{u_0,j^*} \parallel_R x_{u_1, j^*}$, as desired. \end{proof} An analogous result holds at strongly compact cardinals: \begin{proposition} \langlebel{strongly_compact_prop} Suppose that $\kappa$ is a strongly compact cardinal, $\Lambda$ is a directed partial order with $d_\Lambda \geq \kappa$, and $\mathcal S$ is a $\Lambda$-system such that $\mathrm{width}(\mathcal S) < \kappa$. Then $\mathcal S$ has a cofinal branch. \end{proposition} \begin{proof} The proof is essentially the same as that of Proposition \ref{finite_width_prop} and is thus mostly left to the reader. We remark only that, due to the fact that $d_\Lambda \geq \kappa$, the filter $\mathcal F := \{u^\uparrow \mid u \in \Lambda\}$ is $\kappa$-complete and, since $\kappa$ is strongly compact, it can be extended to a $\kappa$-complete ultrafilter $\mathcal U$ over $\Lambda$. The rest of the proof is precisely as in Proposition \ref{finite_width_prop}. \end{proof} As mentioned already, classical narrow systems were introduced by Magidor and Shelah in the context of the study of the tree property at successors of singular cardinals; their first application came in the proof that, if $\mu$ is a singular limit of strongly compact cardinals, then the tree property holds at $\mu^+$ \cite[Theorem 3.1]{magidor_shelah}. To help get a feel for the utility of narrow $\Lambda$-systems, we present here the analogous result in the more general setting. We first need to recall the notion of a $\kappa$-$\Lambda$-tree for an arbitrary directed partial order $\Lambda$. \begin{definition}[\cite{kurepa_paper}] Let $\Lambda$ be a directed partial order. A \emph{$\Lambda$-tree} is a structure $\mathcal T = (\langlengle T_u \mid u \in \Lambda \ranglengle, <_{\mathcal T})$ such that the following conditions all hold. \begin{compactenum}[(i)] \item $\langlengle T_u \mid u \in \Lambda \ranglengle$ is a sequence of nonempty, pairwise disjoint sets. \item $<_{\mathcal T}$ is a transitive partial ordering on $\bigcup_{u \in \Lambda} T_u$. \item For all $u,v \in \Lambda$, all $s \in T_u$, and all $t \in T_v$, if $s <_{\mathcal T} t$, then $u <_\Lambda v$. \item $<_{\mathcal T}$ is \emph{tree-like}, i.e., for all $u <_\Lambda v <_\Lambda w$, all $r \in T_u$, all $s \in T_v$ and all $t \in T_w$, if $r, s <_{\mathcal T} t$, then $r <_{\mathcal T} s$. \item For all $u \leq_\Lambda v$ in $\Lambda$ and all $t \in T_v$, there is a unique $s \in T_u$, denoted $t \restriction u$, such that $s \leq_{\mathcal T} t$. \end{compactenum} For a cardinal $\kappa$, we say that $\mathcal T$ is a $\kappa$-$\Lambda$-tree if, in addition to the above requirements, we have $|T_u| < \kappa$ for all $u \in \Lambda$. Suppose that $\mathcal T$ is a $\Lambda$-tree. A \emph{cofinal branch} through $\mathcal T$ is a function $b \in \prod_{u \in \Lambda} T_u$ such that, for all $u <_\Lambda v$ in $\Lambda$, we have $b(u) <_{\mathcal T} b(v)$. The \emph{$(\kappa, \Lambda)$-tree property}, denoted $\bb{T}P_\kappa(\Lambda)$, is the assertion that every $\kappa$-$\Lambda$-tree has a cofinal branch. We let $\bb{T}P(\Lambda)$ denote $\bb{T}P_{d_\Lambda}(\Lambda)$. \end{definition} \begin{theorem} \langlebel{strongly_compact_tp_thm} Suppose that $\mu$ is a singular limit of strongly compact cardinals and $\Lambda$ is a $\mu^+$-directed partial order. Then $\bb{T}P_{\mu^+}(\Lambda)$ holds. \end{theorem} \begin{proof} Let $\theta := \mathrm{cf}(\mu)$, and let $\langlengle \mu_i \mid i < \theta \ranglengle$ be an increasing sequence of strongly compact cardinals, converging to $\mu$, with $\mu_0 > \theta$. Let $\mathcal T = \langlengle \langlengle T_u \mid u \in \Lambda \ranglengle, <_{\mathcal T} \ranglengle$ be a $\Lambda$-tree with $|T_u| \leq \mu$ for all $u \in \Lambda$. We first show that $\mathcal T$ has a narrow subsystem indexed by a cofinal subset of $\Lambda$. \begin{claim} There is a cofinal $G^{\mathrm{ps}}amma \subseteqseteq \Lambda$ and, for each $u \in G^{\mathrm{ps}}amma$, a nonempty $S_u \subseteqseteq T_u$ such that $\mathcal S := \langlengle \langlengle S_u \mid u \in G^{\mathrm{ps}}amma \ranglengle, \{<_{\mathcal S}\} \ranglengle$ is a narrow $G^{\mathrm{ps}}amma$-system, where $<_{\mathcal S}$ is the restriction of $<_{\mathcal T}$ to $\bigcup_{u \in G^{\mathrm{ps}}amma} S_u$. \end{claim} \begin{proof} For all $u \in \Lambda$, enumerate $T_u$ as $\langlengle t^u_\mathrel{\&}a \mid \mathrel{\&}a < \mu \ranglengle$ (with repetitions, if necessary). Fix an elementary embedding $j:V \rightarrow M$ witnessing that $\mu_0$ is $|\Lambda|$-strongly compact. In particular, we have \begin{itemize} \item $\mathrm{crit}(j) = \mu_0$; \item $j(\mu_0) > |\Lambda|$; \item there is $W \in M$ such that $W \subseteqseteq j(\Lambda)$, $|W|^M < j(\mu_0)$, and $j``\Lambda \subseteqseteq W$. \end{itemize} Let $j(\mathcal T) = \mathcal T' = \langlengle T'_v \mid v \in j(\Lambda) \ranglengle$. Since $|W|^M < j(\mu_0) < j(\mu^+)$ and $j(\Lambda)$ is $j(\mu^+)$-directed in $M$, we can find $z \in j(\Lambda)$ such that $w <_{j(\Lambda)} z$ for all $w \in W$; in particular, $j(u) <_{j(\Lambda)} z$ for all $u \in \Lambda$. Choose an arbitrary $t \in T'_z$. For each $u \in \Lambda$, enumerate $T'_{j(u)}$ as $\langlengle (t')^{j(u)}_\mathrel{\&}a \mid \mathrel{\&}a < j(\mu) \ranglengle$. For each $u \in \Lambda$, there is $i_u < \theta$ and $\mathrel{\&}a_u < j(\mu_{i_u})$ such that $(t')^{j(u)}_{\mathrel{\&}a_u} <_{j(\mathcal T)} t$. Since $\Lambda$ is $\mu^+$-directed, we can find a fixed $i < \theta$ and a cofinal $G^{\mathrm{ps}}amma \subseteqseteq \Lambda$ such that $i_u = i$ for all $u \in G^{\mathrm{ps}}amma$. Then, for all $u <_\Lambda v$, both in $G^{\mathrm{ps}}amma$, we have $(t')^{j(u)}_{\mathrel{\&}a_u}, (t')^{j(v)}_{\mathrel{\&}a_v} <_{j(\mathcal T)} t$, and hence $ (t')^{j(u)}_{\mathrel{\&}a_u} <_{j(\mathcal T)} (t')^{j(v)}_{\mathrel{\&}a_v}$. In particular, \[ M \models \exists \mathrel{\&}a, \timesi < j(\mu_i) \left[ (t')^{j(u)}_\mathrel{\&}a <_{j(\mathcal T)} (t')^{j(v)}_\timesi \right], \] as witnessed by $\mathrel{\&}a = \mathrel{\&}a_u$ and $\timesi = \timesi_v$. By elementarity, we have \[ V \models \exists \mathrel{\&}a, \timesi < \mu_i \left[ t^u_\mathrel{\&}a <_{\mathcal T} t^v_\timesi \right]. \] It is now readily verified that, if we let $S_u := \{t^u_\mathrel{\&}a \mid \mathrel{\&}a < \mu_i\}$ for all $u \in G^{\mathrm{ps}}amma$, then $\mathcal S$ as in the statement of the claim is indeed a narrow $G^{\mathrm{ps}}amma$-system: clauses (1)--(4) of Definition~\ref{system_def} are immediate, and clause (5) follows from the elementarity argument in the previous paragraph. \end{proof} Let $\mathcal S$ be as given by the claim, and let $i < \theta$ be such that $\mathrm{width}(\mathcal S) < \mu_i$. Then we can apply Proposition~\ref{strongly_compact_prop} with $\mu_i$ and $G^{\mathrm{ps}}amma$ in place of $\kappa$ and $\Lambda$, respectively, to conclude that $\mathcal S$ has a cofinal branch, $b \subseteqseteq S$. This readily gives rise to a cofinal branch $b' \in \prod_{u \in \Lambda} T_u$ through $\mathcal T$: for each $u \in \Lambda$, find $v \in G^{\mathrm{ps}}amma$ such that $u \leq_\Lambda v$ and $b \cap S_v \neq \emptyset$. Let $s$ be the unique element of $b \cap S_v$, and then let $b'(u) := s \restriction u$. \end{proof} \section{Subadditive colorings} \langlebel{subbadditive_sec} In this brief section, we highlight a connection between the narrow system properties introduced in the previous section and the existence of certain strongly unbounded subadditive colorings on arbitrary directed partial orders. Such colorings on \emph{ordinals} have been extensively studied and have proven to be useful in a variety of contexts (cf.\ \cite{knaster_iii}). Here we generalize the notion to arbitrary directed orders, show that instances of the narrow system property imply the nonexistence of certain strongly unbounded subadditive colorings, and then show that the nonexistence of certain strongly unbounded subadditive colorings can replace the narrow system property hypothesis in the statement of Theorem A. \begin{definition} Suppose that $\Lambda$ is a directed partial order and $\theta$ is an infinite regular cardinal. Let $c:\Lambda^{[2]} \rightarrow \theta$ be a function. \begin{enumerate} \item We say that $c$ is \emph{subadditive} if, for all triples $u <_\Lambda v <_\Lambda w$ from $\Lambda$, we have \begin{enumerate} \item $c(u,w) \leq \max\{c(u,v), c(v,w)\}$; and \item $c(u,v) \leq \max\{c(u,w), c(v,w)\}$. \end{enumerate} \item We say that $c$ is \emph{strongly unbounded} if, for every cofinal subset $G^{\mathrm{ps}}amma \subseteqseteq \Lambda$, $c``G^{\mathrm{ps}}amma^{[2]}$ is unbounded in $\theta$. \end{enumerate} \end{definition} \begin{proposition} Suppose that $\Lambda$ is a directed partial order, $\theta$ is an infinite regular cardinal, and $c:\Lambda^{[2]} \rightarrow \theta$ is a strongly unbounded subadditive function. Then there is a $\Lambda$-system with width $\theta$ and no cofinal branch. \end{proposition} \begin{proof} We will define a $\Lambda$-system $\mathcal S = \left \langlengle \langlengle S_u \mid u \in \Lambda \ranglengle, \mathcal R \right \ranglengle$. First, for each $u \in \Lambda$, let $S_u := \{u\} \times \theta$, and let $\mathcal R = \{R\}$ consist of a single relation. Now, for $(u,v) \in \Lambda^{[2]}$ and $i,j < \theta$, set $(u,i) <_R (v,j)$ if and only if $i = j$ and $c(u,v) \leq i$. The fact that $\mathcal S$ is a $\Lambda$-system follows from the subadditivity of $c$, and it is evident that $\mathrm{width}(\mathcal S) = \theta$. Now suppose for sake of contradiction that $\mathcal S$ has a cofinal branch, $b$. Then $b$ is necessarily of the form $\{(u,i) \mid u \in G^{\mathrm{ps}}amma\}$ for some fixed $i < \theta$ and some cofinal $G^{\mathrm{ps}}amma \subseteqseteq \Lambda$. But then $c``G^{\mathrm{ps}}amma^{[2]} \subseteqseteq i+1$, contradicting the fact that $c$ is strongly unbounded. \end{proof} \begin{corollary} \langlebel{nsp_sub_cor} Suppose that $\Lambda$ is a directed partial order and $\bb{N}SP(\Lambda)$ holds. Then, for every infinite regular cardinal $\theta$ with $\theta^+ < d_\Lambda$, there does not exist a strongly unbounded subadditive coloring $c:\Lambda^{[2]} \rightarrow \theta$. \qed \end{corollary} We now show that the nonexistence of strongly unbounded subadditive colorings from $(\ensuremath{\mathscr{P}}_\kappa \mu^+)^{[2]}$ to $\mathrm{cf}(\mu)$ can be used in place of ${\sf cNSP}(\ensuremath{\mathscr{P}}_\kappa \mu^+)$ to yield the conclusion of Theorem~\ref{nsp_cp_thm}. \begin{theorem} \langlebel{sub_cp_thm} Suppose that $\kappa < \mu$ are infinite cardinals such that \begin{itemize} \item $\mathrm{cf}(\mu) < \kappa$; and \item there does not exist a strongly unbounded subadditive coloring \[ c:(\ensuremath{\mathscr{P}}_\kappa \mu^+)^{[2]} \rightarrow \mathrm{cf}(\mu). \] \end{itemize} Then $\bb{C}P(\mathcal D)$ holds for every uniform, transitive $\mathrm{cf}(\mu)$-covering matrix for $\mu^+$. \end{theorem} \begin{proof} Let $\theta := \mathrm{cf}(\mu)$ and $\langlembda := \mu^+$, and let $\mathcal D = \langlengle D(i, \beta) \mid i < \theta, ~ \beta < \langlembda^+ \ranglengle$ be a uniform, transitive $\theta$-covering matrix for $\langlembda$. By Theorem~\ref{downward_coherence_thm}, $\mathcal D$ has the property that, for every $x \in \mathscr P_\kappa \langlembda$, there is $\gamma_x < \langlembda$ such that, for all $\beta \in [\gamma_x, \langlembda)$, there is $i < \theta$ such that, for all $j \in [i, \theta)$, we have $x \cap D(j, \beta) = x \cap D(j, \gamma_x)$. For each $x \in \mathscr P_\kappa \langlembda$ and each $j < \theta$, let $x_j := x \cap D(j, \gamma_x)$. Note that $x = \bigcup_{j < \theta} x_j$. \begin{claim} \langlebel{agreement_claim} For all $(x,y) \in (\mathscr P_\kappa \langlembda)^{[2]}$, there is $i < \theta$ such that, for all $j \in [i, \theta)$, we have $x_j = y_j \cap x$. \end{claim} \begin{proof} Fix $(x,y) \in (\mathscr P_\kappa \langlembda)^{[2]}$, and let $\gamma := \max\{\gamma_x, \gamma_y\}$. Then, by definition of $x_j$ and $y_j$ and the choice of $\gamma_x$ and $\gamma_y$, there is $i < \theta$ such that, for all $j \in [i, \theta)$, we have $x_j = x \cap D(j, \gamma)$ and $y_j = y \cap D(j, \gamma)$. But then, for all $j \in [i, \theta)$, we have $y_j \cap x = D(j, \gamma) \cap x = x_j$, as desired. \end{proof} Now define a function $c:(\ensuremath{\mathscr{P}}_\kappa \langlembda)^{[2]} \rightarrow \theta$ by letting $c(x,y)$ be the least $i < \theta$ as in Claim \ref{agreement_claim} for all $(x,y) \in (\mathscr P_\kappa \langlembda)^{[2]}$. \begin{claim} $c$ is subadditive. \end{claim} \begin{proof} Fix $x \subseteqsetneq y \subseteqsetneq z$ in $\mathscr P_\kappa\langlembda$, and fix $j < \theta$. First, if $j \geq \max\{c(x,y), c(y,z)\}$, then $z_j \cap y = y_j$ and $y_j \cap x = x_j$. It follows that $z_j \cap x = x_j$, and from this we can conclude that $c(x,z) \leq \max\{c(x,y), c(y,z)\}$. Second, if $j \geq \max\{c(x,z), c(y,z)\}$, then $z_j \cap y = y_j$ and $z_j \cap x = x_j$. It then follows that $y_j \cap x = (z_j \cap y) \cap x = z_j \cap x = x_j$, and again we can conclude that $c(x,y) \leq \max\{c(x,z), c(y,z)\}$. Therefore, $c$ is subadditive. \end{proof} By assumption, $c$ cannot be strongly unbounded. Therefore, there is a $\subseteqseteq$-cofinal $X \subseteqseteq \mathscr P_\kappa \langlembda$ and an $i < \theta$ such that $c(x,y) \leq i$ for all $x \subseteqsetneq y$ in $X$. \begin{claim} For all $j \in [i, \theta)$ and all $x, y \in X$, we have $x_j \cap y = y_j \cap x$. \end{claim} \begin{proof} Fix such $j$, $x$, and $y$, and find $z \in X$ such that $x \cup y \subseteqseteq z$. Then $x_j = z_j \cap x$ and $y_j = z_j \cap y$, so $x_j \cap y = (z_j \cap x) \cap y = (z_j \cap y) \cap x = y_j \cap x$. \end{proof} For all $j \in [i, \theta)$, let $A_j = \bigcup_{x \in X} x_j$. It follows immediately from the previous claim that, for all $x \in X$, we have $A_j \cap x = x_j$. \begin{claim} There is $j \in [i, \theta)$ such that $A_j$ is unbounded in $\langlembda$. \end{claim} \begin{proof} If not, then, for every $j \in [i, \theta)$, there would be $\beta_j < \langlembda$ such that $A_j \subseteqseteq \beta_j$. Let $\beta := \sup\{\beta_j \mid j \in [i, \theta)\} < \langlembda$, and find $x \in X$ such that $\beta \in x$. Then, for all large enough $j < \theta$, we must have $\beta \in x_j$ and hence $\beta \in A_j$, contradicting the fact that $A_j \subseteqseteq \beta_j \subseteqseteq \beta$. \end{proof} Fix $j \in [i, \theta)$ such that $A_j$ is unbounded in $\langlembda$. We claim that $A_j$ witnesses $\bb{C}P(\mathcal D)$. To this end, fix $w \in [A_j]^\theta$. Let $x \in X$ be such that $w \subseteqseteq x$. Then $w \subseteqseteq A_j \cap x = x_j \subseteqseteq D(j, \gamma_x)$, so $[A_j]^\theta$ is indeed covered by $\mathcal D$, as desired. \end{proof} \begin{corollary} \langlebel{sub_ssh_cor} Suppose that $\kappa \geq \omega_2$ is a regular cardinal and, for every singular cardinal $\mu > \kappa$ of countable cofinality, there does not exist a strongly unbounded subadditive coloring $c:(\ensuremath{\mathscr{P}}_\kappa \mu^+)^{[2]} \rightarrow \omega$. Then $\sf SSH$ holds above $\kappa$. \end{corollary} \begin{proof} By Theorem \ref{sub_cp_thm}, the hypothesis implies that, for every singular cardinal $\mu > \kappa$ of countable cofinality and every uniform, transitive $\omega$-covering matrix $\mathcal D$ for $\mu^+$, we have $\bb{C}P(\mathcal D)$. Then $\sf SSH$ above $\kappa$ follows exactly as in proof of Theorem A at the end of Section \ref{concrete_sec}. \end{proof} \section{A preservation lemma} \langlebel{preservation_sec} The remainder of the paper is dedicated to the proof of Theorem B, our global consistency result. In this section, we prove a technical preservation lemma indicating that if a sufficiently closed forcing adds a rich set of branches to a narrow $\Lambda$-system, then that system necessarily has a cofinal branch in the ground model. The lemma is a generalization of \cite[Lemma 4.3]{narrow_systems} (which itself is a slight improvement on a previous result of Sinapova \cite[Theorem 14]{sinapova_tp}) from the context of classical (ordinal-indexed) narrow systems to the context of narrow $\Lambda$-systems for arbitrary directed orders $\Lambda$. We first need a preliminary definition. \begin{definition} Suppose that $\Lambda$ is a directed partial order and \[ \mathcal S = \left \langlengle \langlengle S_u \mid u \in \Lambda \ranglengle, \mathcal R \right \ranglengle \] is a $\Lambda$-system with $\mathrm{width}(\mathcal S) = \theta$. Then a \emph{full set of branches in $\mathcal S$} is a set $\{b_i \mid i < \theta\}$ such that \begin{itemize} \item for all $i < \theta$, $b_i$ is a branch in $\mathcal S$; \item for all $u \in \Lambda$, there is $i < \theta$ such that $b_i \cap S_u \neq \emptyset$. \end{itemize} \end{definition} \begin{proposition} \langlebel{full_cofinal_prop} Suppose that $\Lambda$ is a directed partial order, \[ \mathcal S = \left \langlengle \langlengle S_u \mid u \in \Lambda \ranglengle, \mathcal R \right \ranglengle \] is a $\Lambda$-system with $\mathrm{width}(\mathcal S) = \theta < d_\Lambda$, and $\{b_i \mid i < \theta\}$ is a full set of branches in $\mathcal S$. Then there is $i < \theta$ such that $b_i$ is a cofinal branch in $\mathcal S$. \end{proposition} \begin{proof} Suppose not. Then, for every $i < \theta$, there is $u_i \in \Lambda$ such that $b_i \cap S_v = \emptyset$ for all $v \in u_i^\uparrow$. Since $d_\Lambda > \theta$, we can find $u^* \in \Lambda$ such that $u_i \leq_{\Lambda} u^*$ for all $i < \theta$. Since $\Lambda$ has no maximal element, $(u^*)^\uparrow \neq \emptyset$. However, for all $v \in (u^*)^\uparrow$ and all $i < \theta$, we have $b_i \cap S_v = \emptyset$, contradicting the fact that $\{b_i \mid i < \theta\}$ is a full set of branches. \end{proof} We are now ready for the main preservation lemma. \begin{lemma} \langlebel{preservation_lemma} Suppose that $\Lambda$ is a directed partial order, $\mathcal S$ is a narrow $\Lambda$-system, $\theta = \mathrm{width}(S)$, $\bb{P}$ is a $\theta^+$-closed forcing poset, and \[ \Vdash_{\bb{P}}``\text{there is a full set of branches in } \mathcal S". \] Then, in $V$, there is a cofinal branch in $\mathcal S$. \end{lemma} \begin{proof} Suppose for sake of contradiction that there is no cofinal branch in $\mathcal S$. By assumption, we can fix $\bb{P}$-names $\{\dot{b}_i \mid i < \theta\}$ such that \[ \Vdash_{\bb{P}}``\{\dot{b}_i \mid i < \theta\} \text{ is a full set of branches in } \mathcal S". \] Using the $\theta^+$-closure of $\bb{P}$, construct a decreasing sequence $\langlengle p_i \mid i < \theta \ranglengle$ of conditions in $\bb{P}$ such that, for each $i < \theta$: \begin{itemize} \item there is $R_i \in \mathcal R$ such that $p_i \Vdash_{\bb{P}} ``\dot{b}_i \text{ is a branch through } R_i"$; \item $p_i$ decides the truth value of the statement $``\dot{b}_i \text{ is a cofinal branch in } \mathcal S"$; \item if $p_i \Vdash_{\bb{P}} ``\dot{b}_i \text{ is not a cofinal branch}"$, then there is $u_i \in \Lambda$ such that $p_i \Vdash_{\bb{P}} ``\forall v \in u^\uparrow ~ (\dot{b}_i \cap S_v = \emptyset)"$. \end{itemize} Again using the $\theta^+$-closure of $\bb{P}$, let $p^*$ be a lower bound for $\langlengle p_i \mid i < \theta \ranglengle$. Let \[ A := \{i < \theta \mid p_i \Vdash_{\bb{P}}``\dot{b}_i \text{ is a cofinal branch}"\} \] and, using the fact that $d_\Lambda > \theta$, find a $u^* \in \Lambda$ such that $u_i \leq_\Lambda u^*$ for all $i \in \theta \setminus A$. Note that, by Proposition \ref{full_cofinal_prop}, it must be the case that $A \neq \emptyset$. \begin{claim} \langlebel{splitting_claim_1} Suppose that $p \leq_{\bb{P}} p^*$ and $i \in A$. Then there are $q_0, q_1 \leq_{\bb{P}} p$ and $x_0, x_1 \in S$ such that \begin{enumerate} \item for $\varepsilon < 2$, $q_\varepsilon \Vdash_{\bb{P}} ``x_\varepsilon \in \dot{b}_i"$; \item $x_0 \perp_{R_i} x_1$. \end{enumerate} \end{claim} \begin{proof} Suppose not, and let $p$ and $i$ form a counterexample. Let \[ b := \{x \in S \mid \exists q \leq_{\bb{P}} p ~ (q \Vdash_{\bb{P}}``x \in \dot{b}_i)"\}. \] We claim that $b$ is a cofinal branch through $R_i$ in $\mathcal S$. Since $i \in A$ and $p \leq_{\bb{P}} p_i$, it is immediate that $\{u \in \Lambda \mid b \cap S_u \neq \emptyset\}$ is cofinal in $\Lambda$. Also, for all $x,y \in b$, our assumption that $p$ and $i$ form a counterexample to the claim implies that $x \parallel_{R_i} y$. Thus, $b$ is a cofinal branch in $\mathcal S$, contradicting our assumption that no such branch exists. \end{proof} \begin{claim} \langlebel{splitting_claim_2} Suppose that $p_0, p_1 \leq_{\bb{P}} p^*$ and $i \in A$. Then there are $q_0 \leq_{\bb{P}} p_0$, $q_1 \leq_{\bb{P}} p_1$, and $x_0, x_1 \in S$ such that \begin{enumerate} \item for $\varepsilon < 2$, $q_\varepsilon \Vdash_{\bb{P}} ``x_\varepsilon \in \dot{b}_i"$; \item $x_0 \perp_{R_i} x_1$. \end{enumerate} \end{claim} \begin{proof} First apply Claim \ref{splitting_claim_1} to obtain $q_{0,0},q_{0,1} \leq_{\bb{P}} p_0$ and $x_{0,0}, x_{0,1} \in S$ such that $q_{0, \varepsilon} \Vdash_{\bb{P}} ``x_{0, \varepsilon} \in \dot{b}_i"$ for $\varepsilon < 2$ and $x_{0,0} \perp_{R_i} x_{0,1}$. Then find $q_1 \leq_{\bb{P}} p_1$ and $x_1 \in S$ such that $\precl(x_{0,0}), \precl(x_{0,1}) \leq_{\Lambda} \precl(x_1)$ and $q_1 \Vdash_{\bb{P}}``x_1 \in \dot{b}_i"$. It cannot be the case that $x_1$ is $R_i$-compatible with both $x_{0,0}$ and $x_{0,1}$, as otherwise $x_1$ would witness that $x_{0,0}$ and $x_{0,1}$ are $R_i$-compatible. Therefore, we can fix $\varepsilon < 2$ such that $x_{0,\varepsilon} \perp_{R_i} x_1$. Let $q_0 := q_{0,\varepsilon}$ and $x_0 := x_{0, \varepsilon}$. Then $q_0$, $q_1$, $x_0$, and $x_1$ are as desired. \end{proof} \begin{claim} \langlebel{splitting_claim_3} Suppose that $p \leq p^*$. Then there are $q_0, q_1 \leq_{\bb{P}} p$ and $\{x^i_\varepsilon \mid i \in A, ~ \varepsilon < 2\} \subseteqseteq S$ such that \begin{enumerate} \item for every $i \in A$ and $\varepsilon < 2$, we have $q_\varepsilon \Vdash_{\bb{P}}``x^i_\varepsilon \in \dot{b}_i"$; \item for every $i \in A$, we have $x^i_0 \perp_{R_i} x^i_1$. \end{enumerate} \end{claim} \begin{proof} We recursively build two decreasing sequences $\langlengle q_{0, i} \mid i < \theta \ranglengle$ and $\langlengle q_{1, i} \mid i < \theta \ranglengle$ from $\bb{P}$, together with elements $\{x^i_\varepsilon \mid i \in A, ~ \varepsilon < 2\}$ as follows. First, let $q_{0,0} = q_{1,0} = p$. If $j < \theta$ is a limit ordinal, $\varepsilon < 2$, and we have defined $\langlengle q_{\varepsilon, i} \mid i < j \ranglengle$, then let $q_{\varepsilon, j}$ be any lower bound for $\langlengle q_{\varepsilon, i} \mid i < j \ranglengle$. If $i \in \theta \setminus A$, $\varepsilon < 2$, and $q_{\varepsilon, i}$ has been defined, then simply let $q_{\varepsilon, i+1} = q_{\varepsilon, i}$. Finally, suppose that $i \in A$ and we have defined $\langlengle q_{0, j} \mid j \leq i \ranglengle$ and $\langlengle q_{1,j} \mid j \leq i \ranglengle$. Then apply Claim \ref{splitting_claim_2} to $q_{0,i}$, $q_{1,i}$, and $i$ to obtain $q_{0, i+1} \leq_{\bb{P}} q_{0,i}$, $q_{1, i+1} \leq_{\bb{P}} q_{1,i}$, and $x^i_0, x^i_1 \in S$ such that \begin{itemize} \item for $\varepsilon < 2$, $q_{\varepsilon, i+1} \Vdash_{\bb{P}}``x^i_\varepsilon \in \dot{b}_i"$; \item $x^i_0 \perp_{R_i} x^i_1$. \end{itemize} At the end of the construction, for each $\varepsilon < 2$, let $q_\varepsilon$ be a lower bound for $\langlengle q_{\varepsilon, i} \mid i < \theta \ranglengle$. Then $q_0$, $q_1$, and $\{x^i_\varepsilon \mid i \in A, ~ \varepsilon < 2\}$ are as desired. \end{proof} Now use Claim \ref{splitting_claim_3} and the closure of $\bb{P}$ to recursively build a tree of conditions $\{ p_\sigma \mid \sigma \in {^{<\theta}}2\}$ and elements $\{x^{\sigma, i}_\varepsilon \mid \sigma \in {^{<\theta}}2, ~ i \in A, ~ \varepsilon < 2\}$ of $S$ as follows. We will maintain the hypothesis that, for all $\tau, \sigma \in {^{<\theta}}2$, if $\tau$ is an initial segment of $\sigma$, then $p_\sigma \leq_{\bb{P}} p_\tau$. Let $p_\emptyset := p^*$. If $\mathrel{\&}a < \theta$ is a limit ordinal, $\sigma \in {^{\mathrel{\&}a}}2$, and $p_{\sigma \restriction \timesi}$ has been defined for every $\timesi < \mathrel{\&}a$, then let $p_\sigma$ be any lower bound for $\langlengle p_{\sigma \restriction \timesi} \mid \timesi < \mathrel{\&}a \ranglengle$. If $\sigma \in {^{<\theta}}2$ and $p_\sigma$ has been defined, then apply Claim \ref{splitting_claim_3} to find $p_{\sigma ^\frown \langlengle 0 \ranglengle}, p_{\sigma ^\frown \langlengle 1 \ranglengle} \leq p_\sigma$, and $\{x^{\sigma, i}_\varepsilon \mid i \in A, ~ \varepsilon < 2\} \subseteqseteq S$ such that \begin{enumerate} \item for every $i \in A$ and $\varepsilon < 2$, we have $p_{\sigma ^\frown \langlengle \varepsilon \ranglengle} \Vdash_{\bb{P}}``x^{\sigma, i}_\varepsilon \in \dot{b}_i"$; \item for every $i \in A$, we have $x^{\sigma, i}_0 \perp_{R_i} x^{\sigma, i}_1$. \end{enumerate} For each $f \in {^{\theta}}2$, let $p_f$ be a lower bound for $\langlengle p_{f \restriction \mathrel{\&}a} \mid \mathrel{\&}a < \theta \ranglengle$. Choose $B \subseteqseteq {^{\theta}}2$ with $|B| = \theta^+$, and use the fact that $d_{\Lambda} > \theta^+$ to find $v \in \Lambda$ such that $\precl(x^{f \restriction \mathrel{\&}a, i}_\varepsilon) <_{\Lambda} v$ for all $f \in B$, $\mathrel{\&}a < \theta$, $i \in A$, and $\varepsilon < 2$. We can also assume that $u^* <_\Lambda v$. For each $f \in B$, use the fact that $\{\dot{b}_i \mid i < \theta\}$ is forced to be a full set of branches in $\mathcal S$ to find a $q_f \leq_\bb{P} p_f$, an $i_f < \theta$, and an $x_f \in S_v$ such that $q_f \Vdash_{\bb{P}}``x_f \in \dot{b}_{i_f}"$. Since $u^* <_\Lambda v$ and each $q_f$ extends $p^*$, it must be the case that $i_f \in A$ for all $f \in B$. Since $|B| = \theta^+ > \mathrm{width}(\mathcal S)$, we can find distinct $f,g \in B$, $i \in A$, and $x \in S$ such that $i_f = i_g = i$ and $x_f = x_g = x$. Let $\mathrel{\&}a^* < \theta$ be the least $\mathrel{\&}a$ such that $f(\mathrel{\&}a) \neq g(\mathrel{\&}a)$, and let $\sigma := f \restriction \mathrel{\&}a^* = g \restriction \mathrel{\&}a^*$. Without loss of generality, assume that $f(\mathrel{\&}a^*) = 0$ and $g(\mathrel{\&}a^*) = 1$. Then $q_f \leq_{\bb{P}} q_{\sigma ^\frown \langlengle 0 \ranglengle}$, and therefore $q_f \Vdash_{\bb{P}}``x^{\sigma, i}_0 \in \dot{b}_i"$. Similarly, $q_g \Vdash_{\bb{P}}``x^{\sigma, i}_1 \in \dot{b}_i"$. Since both $q_f$ and $q_g$ extend $p_i$ and force $x$ to be in $\dot{b}_i$, and since $\precl(x^{\sigma, i}_0), \precl(x^{\sigma, i}_1) <_\Lambda v = \precl(x)$, it must be the case that $x^{\sigma, i}_0 <_{R_i} x$ and $x^{\sigma, i}_1 <_{R_i} x$, contradicting the fact that $x^{\sigma, i}_0 \perp_{R_i} x^{\sigma, i}_1$. \end{proof} \section{A global consistency result} \langlebel{consistency_sec} We are finally ready to prove our consistency result. For organizational reasons, it will be helpful to have the following definition. \begin{definition} For every infinite regular cardinal $\kappa$, we say that $\kappa$ has the \emph{strong narrow system property}, denoted $\mathsf{SNS}P_\kappa$, if, for every directed partial order $\Lambda$ with $d_\Lambda \geq \kappa$, every $\Lambda$-system $\mathcal S$ with $\mathrm{width}(\mathcal{S})^+ < \kappa$ has a cofinal branch. \end{definition} \begin{theorem} \langlebel{single_collapse_theorem} Let $\mu < \kappa$ be regular uncountable cardinals, with $\kappa$ supercompact, and let $\bb{P} := \mathrm{Coll}(\mu, {<}\kappa)$. Then, in $V^{\bb{P}}$, $\mathsf{SNS}P_\kappa$ holds and moreover is indestructible under $\kappa$-directed closed set forcing. \end{theorem} \begin{proof} Let $G$ be $\bb{P}$-generic over $V$. Since trivial forcing is $\kappa$-directed closed, it suffices to prove that, if $\bb{Q}$ is a $\kappa$-directed closed set forcing in $V[G]$ and $H$ is $\bb{Q}$-generic over $V[G]$, then $\mathsf{SNS}P_\kappa$ holds in $V[G][H]$. To this end, fix a $\kappa$-directed closed $\bb{Q} \in V[G]$ and a $\bb{Q}$-generic filter $H$ over $V[G]$. In $V[G][H]$, let $\Lambda$ be a $\kappa$-directed partial order, and let $\mathcal S = \left \langlengle \langlengle S_u \mid u \in \Lambda \ranglengle, \mathcal R \right \ranglengle$ be a $\Lambda$-system with $\mathrm{width}(\mathcal S) < \mu$. For concreteness, assume that the underlying sets of both $\bb{Q}$ and $\Lambda$ are ordinals. We will show that, in $V[G][H]$, there is a cofinal branch in $\mathcal S$. By Proposition \ref{single_relation_prop}, we can assume that $\mathcal S$ has a single relation, which we will denote by $R$. In $V$, let $\dot{\bb{Q}}$ be a $\bb{P}$-name for $\bb{Q}$, and let $\dot{\Lambda}$ be a $\bb{P} \ast \dot{\bb{Q}}$-name for $\Lambda$. Fix a cardinal $\delta > \kappa$ such that $|\ensuremath{\mathscr{P}}(\bb{P} \ast \dot{\bb{Q}})| < \delta$ and $\Vdash_{\bb{P} \ast \dot{\bb{Q}}}``|\dot{\Lambda}| < \delta"$, and let $j:V \rightarrow M$ be an elementary embedding witnessing that $\kappa$ is $\delta$-supercompact, i.e., $\mathrm{crit}(j) = \kappa$, $j(\kappa) > \delta$, and ${^{\delta}}M \subseteqseteq M$. We have $j(\bb{P}) = \mathrm{Coll}(\mu, {<}j(\kappa))$ so, by \cite[Lemma 3]{magidor_reflecting} (cf.\ also \cite[Fact 6.11]{cfm}), the natural complete embedding $\iota$ of $\bb{P}$ into $j(\bb{P})$ can be extended to a complete embedding $\iota'$ of $\bb{P} \ast \dot{\bb{Q}}$ into $j(\bb{P})$ in such a way that the quotient forcing $j(\bb{P})/\iota'[\bb{P} \ast \dot{\bb{Q}}]$ is $\mu$-closed. Let $\dot{\bb{R}}$ be a $\bb{P} \ast \dot{\bb{Q}}$-name for this quotient forcing. We then have $j(\bb{P}) \cong \bb{P} \ast \dot{\bb{Q}} \ast \dot{\bb{R}}$, and $\dot{\bb{R}}$ is forced by $\bb{P} \ast \dot{\bb{Q}}$ to be $\mu$-closed. Let $\bb{R}$ be the realization of $\dot{\bb{R}}$ in $V[G][H]$, and let $K$ be an $\bb{R}$-generic filter over $V[G][H]$. Since, for all $p \in \bb{P}$, $j(p) = p$, which is naturally identified with $(p, 1_{\dot{\bb{Q}}}, 1_{\dot{\bb{R}}})$ in $\bb{P} \ast \dot{\bb{Q}} \ast \dot{\bb{R}}$, we have $j``G \subseteqseteq G \ast H \ast K$, so, in $V[G][H][K]$, we can extend $j$ to $j:V[G] \rightarrow M[G][H][K]$. By the closure of $M$, we know that $j``H \in M[G][H][K]$. Moreover, in that model, $j``H$ is a directed subset of $j(\bb{Q})$ with $|j``H| < \delta < j(\kappa)$, and $j(\bb{Q})$ is $j(\kappa)$-directed closed. We can therefore find $q^* \in j(\bb{Q})$ such that $q^* \leq_{j(\bb{Q})} j(q)$ for all $q \in H$. Let $H^+$ be $j(\bb{Q})$-generic over $V[G][H][K]$ with $q^* \in H^+$. Then $j``H \subseteqseteq H^+$, so, in $V[G][H][K][H^+]$, we can extend $j$ one last time to $j:V[G][H] \rightarrow M[G][H][K][H^+]$. Again by the closure of $M$, we have $j``\Lambda \in M[G][H][K][H^+]$. Moreover, in that model, $j``\Lambda$ is a subset of $j(\Lambda)$ with $|j``\Lambda| < \delta < j(\kappa)$, and $j(\Lambda)$ is $j(\kappa)$-directed. We can therefore find $v^* \in j(\Lambda)$ such that $j(u) <_{j(\Lambda)} v^*$ for all $u \in \Lambda$. Let $\theta := \mathrm{width}(\mathcal S)$. Since $\theta < \mu$, we have $\theta = j(\theta) = \mathrm{width}(j(\mathcal S))$. Write $j(\mathcal S)$ as $\left \langlengle \langlengle S'_v \mid v \in j(\Lambda) \ranglengle, \{j(R)\} \right \ranglengle$. Enumerate $S'_{v^*}$ as $\langlengle y_i \mid i < \theta \ranglengle$, with repetitions if $|S'_{v^*}| < \theta$. For each $i < \theta$, let $b_i := \{x \in S \mid j(x) <_{j(R)} y_i\}$. We claim that $\{b_i \mid i < \theta\}$ is a full set of branches in $\mathcal S$. Let us first verify that each $b_i$ is a branch in $\mathcal S$. To this end, fix $i < \theta$ and $x,y \in b_i$. Then, in $j(\mathcal S)$, we have $j(x), j(y) <_{j(R)} y_i$, and hence $j(x) \parallel_{j(R)} j(y)$. By elementarity, we have $x \parallel_{R} y$, as desired. We next verify that, for all $u \in \Lambda$, there is $i < \theta$ such that $b_i \cap S_u \neq \emptyset$. To this end, fix $u \in \Lambda$. Since $j(u) <_{j(\Lambda)} v^*$, clause \ref{completeness_clause} of Definition \ref{system_def} implies that there are $i < \theta$ and $w \in S'_{j(u)}$ such that $w <_{j(R)} y_i$. Since $|S_u| \leq \theta < \kappa$, we have $S'_{j(u)} = j``S_u$, so we can find $x \in S_u$ such that $j(x) = w$. Then $x \in b_i \cap S_u$. We have thus shown that $\{b_i \mid i < \theta\} \in V[G][H][K][H^+]$ is a full set of branches in $\mathcal S$. Therefore, since $\theta^+ \leq \mu$, we can apply Lemma \ref{preservation_lemma} in $V[G][H]$ to $\mathcal S$ and the $\mu$-closed poset $\bb{R} \ast j(\bb{Q})$ to conclude that, in $V[G][H]$, there is a cofinal branch in $\mathcal S$, thus completing the proof of the theorem. \end{proof} \begin{remark} \langlebel{cnsp_tp_remark} Theorem \ref{single_collapse_theorem} provides a way of verifying our earlier claim from Remark \ref{remark_23} that, in general ${\sf cNSP}_\kappa$ does not imply $\bb{T}P_\kappa$. For example, if $\kappa$ is supercompact and $\bb{P} = \bb{C}oll(\omega_1, {<}\kappa)$, then, in $V^{\bb{P}}$, we have $\kappa = \aleph_2$, and Theorem \ref{single_collapse_theorem} implies that ${\sf cNSP}_\kappa$ holds. On the other hand, $\bb{C}H$ holds in $V^{\bb{P}}$, so there exists an $\aleph_2$-Aronszajn tree, and hence even $\bb{T}P(\kappa, \kappa)$ fails. \end{remark} Note that the assertion ``$\mathsf{SNS}P_\kappa$ holds for every infinite regular cardinal $\kappa$" is equivalent to the assertion ``$\bb{N}SP(\Lambda)$ holds for every directed partial order $\Lambda$". The following therefore yields Theorem B. \begin{theorem} \langlebel{global_theorem} Suppose that there is a proper class of supercompact cardinals. Then there is a class forcing extension in which $\mathsf{SNS}P_\kappa$ holds for every infinite regular cardinal $\kappa$. \end{theorem} \begin{proof} Let $\langlengle \kappa_\mathrel{\&}a \mid \mathrel{\&}a \in \mathrm{On} \ranglengle$ be an increasing, continuous sequence of cardinals such that \begin{itemize} \item $\kappa_0 = \aleph_0$; \item if $\mathrel{\&}a$ is a limit ordinal (including $0$), then $\kappa_{\mathrel{\&}a+1} = \kappa_\mathrel{\&}a^+$; \item if $\mathrel{\&}a$ is a successor ordinal, then $\kappa_{\mathrel{\&}a + 1}$ is supercompact. \end{itemize} We may assume that $\kappa_\mathrel{\&}a$ is singular for every nonzero limit ordinal $\mathrel{\&}a$; if not, then simply truncate the universe below $\kappa_\mathrel{\&}a$ for the least nonzero limit ordinal $\mathrel{\&}a$ such that $\kappa_\mathrel{\&}a$ is regular (and hence strongly inaccessible). We now force with a class-length iteration of L\'{e}vy collapses to turn each $\kappa_\mathrel{\&}a$ into $\aleph_\mathrel{\&}a$. More formally, recursively define posets $\langlengle \bb{P}_\mathrel{\&}a \mid \mathrel{\&}a \in \mathrm{On} \ranglengle$ as follows: \begin{itemize} \item $\bb{P}_0$ and $\bb{P}_1$ are trivial forcing; \item if $\mathrel{\&}a$ is a successor ordinal, then $\bb{P}_{\mathrel{\&}a+1} = \bb{P}_\mathrel{\&}a \ast \dot{\mathrm{Coll}} (\kappa_\mathrel{\&}a, {<}\kappa_{\mathrel{\&}a+1})$; \item if $\mathrel{\&}a$ is a nonzero limit ordinal, then $\bb{P}_\mathrel{\&}a$ is the inverse (i.e., full-support) limit of $\langlengle \bb{P}_\timesi \mid \timesi < \mathrel{\&}a \ranglengle$. \end{itemize} For ordinals $\timesi < \mathrel{\&}a$, let $\dot{\bb{P}}_{\timesi\mathrel{\&}a}$ be a $\bb{P}_\timesi$-name for the quotient $\bb{P}_\mathrel{\&}a/\bb{P}_\timesi$. Then $\dot{\bb{P}}_{\timesi\mathrel{\&}a}$ is a name for a full-support iteration of L\'{e}vy collapses, each of which is forced to be $\kappa_\timesi$-directed closed. It follows that $\dot{\bb{P}}_{\timesi\mathrel{\&}a}$ is forced to be $\kappa_\timesi$-closed. In particular, $(H(\kappa_\timesi))^{V^{\bb{P}_\timesi}} = (H(\kappa_\timesi))^{V^{\bb{P}_\mathrel{\&}a}}$, so $V^{\bb{P}} := \bigcup_{\mathrel{\&}a \in \mathrm{On}} V^{\bb{P}_\mathrel{\&}a}$ is a model of $\sf ZFC$. Also, standard arguments show that, in $V^{\bb{P}}$, we have $\kappa_\mathrel{\&}a = \aleph_\mathrel{\&}a$ for all $\mathrel{\&}a \in \mathrm{On}$. We claim that $\mathsf{SNS}P_\kappa$ holds in $V^{\bb{P}}$ for every infinite regular cardinal $\kappa$. Note that the infinite regular cardinals in $V^{\bb{P}}$ are precisely the cardinals $\kappa_\mathrel{\&}a$ for which $\mathrel{\&}a$ is either $0$ or a successor ordinal. If $\mathrel{\&}a \leq 1$, then every system $\mathcal S$ such that $(\mathrm{width}(\mathcal{S}))^+ < \kappa_\mathrel{\&}a$ has finite width. It therefore follows from Proposition \ref{finite_width_prop} that $\mathsf{SNS}P_{\aleph_0}$ and $\mathsf{SNS}P_{\aleph_1}$ are true in $\sf ZFC$. We next note that, if $\mathrel{\&}a$ is a nonzero limit ordinal and $\mathcal S$ is a system such that $(\mathrm{width}(\mathcal S))^+ < \kappa_{\mathrel{\&}a + 1}$, then there must be a $\timesi < \mathrel{\&}a$ such that $(\mathrm{width}(\mathcal S))^+ < \kappa_\timesi$. Therefore, $\mathsf{SNS}P_{\kappa_{\mathrel{\&}a+1}}$ will follow from the conjunction of $\mathsf{SNS}P_{\kappa_{\timesi}}$ for all $\timesi < \mathrel{\&}a$. We are therefore left with the task of verifying $\mathsf{SNS}P_{\kappa_{\mathrel{\&}a+1}}$ for all successor ordinals $\mathrel{\&}a$. To this end, fix a successor ordinal $\mathrel{\&}a$ and fix in $V^{\bb{P}}$ a directed partial order $\Lambda$ with $d_{\Lambda} \geq \kappa_{\mathrel{\&}a+1}$ and a $\Lambda$-system $\mathcal S$ with $\mathrm{width}(\mathcal S)^+ < \kappa_{\mathrel{\&}a+1}$. In $V^{P_\mathrel{\&}a}$, we have $\kappa_\mathrel{\&}a = \aleph_\mathrel{\&}a$ and, since $|\bb{P}_\mathrel{\&}a| < \kappa_{\mathrel{\&}a+1}$, we know that $\kappa_{\mathrel{\&}a+1}$ is still supercompact. Moreover, $\bb{P}_{\mathrel{\&}a,\mathrel{\&}a+1} = \mathrm{Coll}(\kappa_\mathrel{\&}a, {<}\kappa_{\mathrel{\&}a+1})$ so, by Theorem \ref{single_collapse_theorem}, $\mathsf{SNS}P_{\kappa_{\mathrel{\&}a+1}}$ holds in $V^{\bb{P}_{\mathrel{\&}a+1}}$ and every $\kappa_{\mathrel{\&}a+1}$-directed closed set forcing extension thereof. Let $\zeta > \mathrel{\&}a + 1$ be large enough so that $\Lambda$ and $\mathcal S$ are in $V^{\bb{P}_\zeta}$. In $V^{\bb{P}_{\mathrel{\&}a+1}}$, $\bb{P}_{\mathrel{\&}a+1, \zeta}$ is $\kappa_{\mathrel{\&}a+1}$-directed closed, so $\mathsf{SNS}P_{\kappa_{\mathrel{\&}a+1}}$ holds in $V^{\bb{P}_\zeta}$. In particular, $\mathcal S$ has a cofinal branch in $V^{\bb{P}_\zeta}$ and hence also in $V^{\bb{P}}$. \end{proof} We close the paper with what we feel are the most prominent remaining open questions. \begin{question} \langlebel{open_q} Suppose that $\kappa \geq \omega_2$ is a regular cardinal. Does $\bb{T}P_\kappa$ imply ${\sf cNSP}_\kappa$? More specifically, if $\langlembda \geq \kappa$ is a cardinal, does $\bb{T}P(\kappa, \langlembda)$ imply ${\sf cNSP}(\ensuremath{\mathscr{P}}_\kappa \langlembda)$? More generally, suppose that $\Lambda$ is a directed partial order with $d_\Lambda \geq \aleph_2$. Does $\bb{T}P(\Lambda)$ imply $\bb{N}SP(\Lambda)$? \end{question} By Theorem A, a positive answer to the first part of Question \ref{open_q} would entail a positive answer to Question \ref{sch_q} and therefore a negative answer to Question \ref{global_q}. On the other hand, a negative answer to Question \ref{open_q} would seem to require genuinely new ideas, since the known methods to verify $\bb{T}P(\kappa, \langlembda)$ in practice inevitably yield ${\sf cNSP}(\ensuremath{\mathscr{P}}_\kappa \langlembda)$, as well. \end{document}
\begin{document} \date{\today} {\mathcal{M}}aketitle \begin{abstract} $2$-nondegenerate real hypersurfaces in complex manifolds play an important role in CR-geometry and the theory of Hermitian Symmetric Domains. In this paper, we construct a complete convergent normal form for everywhere $2$-nondegenerate real-analytic hypersurfaces in complex $3$-space. We do so by developing the homological approach of Chern-Moser in the $2$-nondegenerate setting. This seems to be the first such construction for hypersurfaces of {\varepsilonm infinite Catlin multitype}. Our approach is based on using a {\varepsilonm rational (nonpolynomial) model} for everywhere $2$-nondegenerate hypersurfaces, which is the local realization due to Fels-Kaup of the well known {\varepsilonm tube over the light cone}. As an application, we obtain, in the spirit of Chern-Moser theory, a criterion for the {\varepsilonm local sphericity} (i.e. local equivalence to the model) for a $2$-nondegenerate hypersurface in terms of its normal form. As another application, we obtain an explicit description of the moduli space of everywhere $2$-nondegenerate hypersurfaces. \varepsilonnd{abstract} \date{\today} \tableofcontents \section{Introduction} \subsection{Historic outline} Understanding invariants and symmetries of real hypersurfaces in complex space is one of the central goals in Several Complex Variables. The study of them was initiated in the seminal 1907 work of Poincar\'e \cite{poincare} who discovered the finite-dimensionality of the local automorphism group for nondegenerate real hypersurfaces, and further, the non-triviality of the holomorphic mapping problem (which is due to existence of {\varepsilonm local holomorphic invariants} of hypersurfaces). Under the assumption of the {\varepsilonm Levi-nondegeneracy} of a real hypersurface, a complete picture of invariants and symmetries has been provided in the well known works of Cartan \cite{cartan}, Tanaka \cite{tanaka}, Chern and Moser \cite{chern}. The differential-geometric constructions of Cartan, Tanaka, and Chern are certain developments of Cartan's moving frame method. They provide a solution to the problem via presenting a canonical frame on a certain bundle associated with a hypersurface. Unlike that, Moser uses an approach inspired by Dynamical Systems and provides a {\varepsilonm convergent normal form} for a hypersurface. Such a normal form provides a distinguished choice of local holomorphic coordinates for a hypersurface, in which its defining equation is approximated "as close as possible"\, by that for the {\varepsilonm local quadratic model}: a real hyperquadric \begin{equation}\Label{quadric} \ensuremath{\mbox{\rm Im}\,} w=Q(z,\bar z),\quad (z,w)\in\CC{n}\times\CC{} \varepsilonnd{equation} (where $Q(z,\bar z)$ is a nondegenerate Hermitian form on $\CC{n}$). A biholomorphic transformation bringing a hypersurface to a normal form at a point is defined uniquely, up to the automorphism group of the model \varepsilonqref{quadric}. That is, Moser's normal form is {\varepsilonm complete}. Symmetries of a hypersurface can be read subsequently from the constructed normal form (see e.g. Beloshapka \cite{belold} and Kruzhilin-Loboda \cite{krlo}). The problem of describing invariants and symmetries of a real hypersurface appears to be much more difficult when a hypersurface is {\varepsilonm Levi-degenerate} at the reference point $p\in M$. A powerful approach here is coming from the notion of {\varepsilonm Catlin multitype} of a real hypersurface \cite{catlin} at a point, which is often used in the theory of subelliptic estimates. For hypersurfaces of finite Catlin multitype at a point, an approach of studying symmetries and normal forms (based on using higher degree polynomial models) was developed by Kolar-Meylan-Zaitsev \cite{kmz} (see also the subsequent work of Kolar-Meylan \cite{km}). It is also notable that a hypersurface of finite Catlin multitype is "good"\, in that it still contains Levi-nondegenerate points. More precisely, a generic point in a hypersurface is Levi-nondegenerate, and this allows to read a lot of information on the CR-structure in a neighborhood of the degeneracy point from the nearby nondegenerate points. Probably one of the most intriguing phenomena in CR-geometry is the existence of {\varepsilonm everywhere Levi-degenerate} real hypersurfaces in complex space of dimension $3$ and higher, which still posses a {\varepsilonm finite-dimensional} local automorphism group. The relevant nondegeneracy concepts for such hypersurfaces are due to Freeman \cite{freeman} ({\varepsilonm Freeman sequences}), Stanton and Baoeundi-Ebenfelt-Rothschild \cite{stanton2,beralg} ({\varepsilonm holomorphic nondegeneracy}), and Baouendi-Huang-Rothschild \cite{bhr} ({\varepsilonm finite nondegeneracy}). We refer to the book of Baouendi-Ebenfelt-Rothschild for precise definitions and further details. For the hypersurfaces under discussion, the situation changes dramatically compared to the finite multitype case: since there are no Levi-nondegenerate points in $M$ {\varepsilonm at all}, Cartan-Tanaka-Chern-Moser invariants at nearby points {\varepsilonm can not} be used anymore for describing invariants at a given point $p\in M$ of Levi-degeneracy. Besides of that, everywhere $k$-nondegenerate hypersurfaces with $k\mathfrak{g}eq 2$ have {\varepsilonm infinite} Catlin mutitype. In this paper, we address an important class of infinite Catlin multitype hypersurfaces which is the class of {\varepsilonm everywhere $2$-nondegenerate hypersurfaces}. The latter can be considered, in a certain sense, as the first possible nondegeneracy class within the category of infinite multitype hypersurfaces. For a hypersurface $M$ sitting in $\CC{N},\,N\mathfrak{g}eq 3$, the everywhere $2$-nondegeneracy means the validity of the following two conditions. First of all, the Levi form of $M$ has everywhere rank $N-2$ (and hence the distribution $K$ of pointwise Levi kernels $K_p\subset T^{\CC{}}_pM$ is well defined). Once the first condition is satisfied, the second one can be formulated in the language of local sections of the complex tangent bundle, namely, it requires: \begin{equation}\Label{2nondeg} [K,T^{\CC{}}M]\not\subset K. \varepsilonnd{equation} For an alternative definition of $2$-nondegeneracy we refer to \cite{bhr} and \cite{ber}. We note that hypersuraces satisfying (on an open subset) the first condition but not the second are {\varepsilonm holomorphically degenerate} and hence their automorphism groups are infinite-dimensional (see \cite{ber}). The simplest example of a $2$-nondegenerate hypersurface is the well known {\varepsilonm tube over the light cone} \begin{equation}\Label{tube} C=\left\{(z_1,z_2,z_3)\in\CC{3}:\,\,y_1^2+y_2^2=y_3^2,\quad y_j=\ensuremath{\mbox{\rm Im}\,} z_j, \,\, y_3 \neq 0\right\}\subset\CC{3}. \varepsilonnd{equation} This hypersurface has everywhere Levi rank 1 (and hence a $1$-dimensional Levi kernel) and is everywhere $2$-nondegenerate. This hypersurface has the "large"\, but still finite-dimensional automorphism group $SO(3,2)$. Notably, it is {\varepsilonm holomorphically homogeneous} (that is, its automorphism group acts transitively on it). The example of the hypersurface $C$ in \varepsilonqref{tube} motivated the long-lasting and somewhat intriguing study of the class ${\mathcal{M}}athcal C_{2,1}$ of real hypersurfaces $M\subset\CC{3}$ which are, in the same manner as $C$, {\varepsilonm everywhere $2$-nondegenerate} (and hence have everywhere Levi rank one and a finite-dimensional local automorphism group, see e.g. \cite{ber}). Earlier work here includes the work of Ebenfelt \cite{ebenfeltC3}, where a partial (formal) normal form for real-analytic hypersurfaces ${\mathcal{M}}athcal C_{2,1}$ was obtained (notably, this normal form already provides a finite-dimensional reduction of the equivalence problem); the work of Ebenfelt \cite{ebenfeltduke}, where a canonical frame for a smooth ${\mathcal{M}}athcal C_{2,1}$ hypersurface $M$ was provided under certain additional assumptions; the work of Beloshapka \cite{belC3}, where the optimal dimension bound for the (full) automorphism group of a real-analytic ${\mathcal{M}}athcal C_{2,1}$ hypersurface was obtained; the work of Kaup-Zaitsev \cite{kaupzaitsev}, where the full automorphism group of the tube \varepsilonqref{tube} was finally established correctly (as a Lie group); the work of Fels-Kaup \cite{kaup2}, where the useful local rational model \begin{equation}\Label{cone} v=P(z,\ensuremath{\zetaeta},\bar z,\bar\ensuremath{\zetaeta}),\quad \,\,{\mathcal{M}}box{where}\,\,\,\,P(z,\ensuremath{\zetaeta},\bar z,\bar\ensuremath{\zetaeta}):=\frac{|z|^2+\ensuremath{\mbox{\rm Re}\,}(z^2\bar\ensuremath{\zetaeta})}{1-|\ensuremath{\zetaeta}|^2}, \quad (z,\zeta,w=u+iv)\in\CC{3}, \varepsilonnd{equation} for the tube \varepsilonqref{tube} was discovered and the automorphism group of it was subsequently computed explicitely; finally, the work of Fels-Kaup \cite{kaup} ({\varepsilonm Acta Math., 2008}), where locally homogeneous hypersurfaces of class ${\mathcal{M}}athcal C_{2,1}$ in $\CC{3}$ where classified. More recent work here includes the work of Isaev-Zaitsev \cite{iz} where the equivalence problem for smooth ${\mathcal{M}}athcal C_{2,1}$ hypersurfaces was solved; an independent solution of the equivalence problem due to Medori-Spiro \cite{ms}; explicit CR-curvature computation for ${\mathcal{M}}athcal C_{2,1}$ hypersurfaces due to Pocchiola \cite{pocchiola}; the work of Beloshapka-Kossovskiy \cite{bk} on the parameterization of the stability group of a ${\mathcal{M}}athcal C_{2,1}$ hypersurface by that of the sphere $S^3\subset\CC{2}$; a development of the work \cite{iz} and \cite{ms} in complex dimension $4$ due to Porter-Zelenko \cite{zelenko}. Notably, in the cited work, the role of the tube over the light cone \varepsilonqref{tube} as the "model"\, hypersurface for the class ${\mathcal{M}}athcal C_{2,1}$ was clearly demonstrated. In particular, it follows that maps between (germs of) two hypersurfaces of class ${\mathcal{M}}athcal C_{2,1}$ are parameterized by the stability group of the model \varepsilonqref{tube}. We note that the tube over the light cone admits higher dimensional generalizations $$C_{k,l}=\left\{(z_1,...,z_{k+l}):\,\,y_1^2+\cdots y_k^2=y_{k+1}^2+\cdots+y_{k+l}^2,\,\,\, y_j=\ensuremath{\mbox{\rm Im}\,} z_j, \,\, y_1^2+\cdots y_k^2 \neq 0\right\}\subset\CC{k+l}$$ for $k,l\mathfrak{g}eq 1,\,\,k+l\mathfrak{g}eq 3.$ All the tubes $C_{k,l}$ have a Levi kernel of rank one everywhere, are everywhere $2$-nondegenerate and their full automorphism group is $SO(k+1,l+1)$ (see Fels and Kaup \cite{kaup}). The tubes $C_{k,l}$ are important in that they are boundaries of {\varepsilonm Hermitian Symmetric Domains}. There exist further classes of $2$-nondegenerate hypersurfaces related to Hermitian Symmetric Domains, for example, {\varepsilonm Lie spheres}. The latter fact further motivates the study of everywhere $2$-nondegenerate hypersurfaces and their mappings. We shall mention here recent results of Mir \cite{mirdef} and Kossovskiy-Lamel-Xiao \cite{klx} on the regularity of mappings into $2$-nondegenerate hypersurfaces, and their applications in studying proper holomorphic maps of Hermitian Symmetric Domains due to Xiao-Yuan \cite{xiao}. For more on the geometry of holomorphic maps between Hermitian Symmetric Domains, we refer to the Introduction in \cite{xiao} and references therein. \subsection{Overview of the results} The above mentioned substantial study of everywhere $2$-nondegenerate hypersurfaces still leaves completely open the following aspect of the problem: {\varepsilonm extend Moser's normal form theory to the class of (real-analytic) everywhere $2$-nondegenerate hypersurfaces}. That is, we aim for {\varepsilonm a complete convergent normal form} for real-analytic everywhere $2$-nondegenerate hypersurfaces. We shall explain here breifly the main difficulty in extending Moser's homological approach and obtaining a complete normal form. Namely, as follows from the infinite Catlin multitype property, there is {\varepsilonm no} choice of positive weights for the coordinates in $\CC{3}$ relevant to everywhere $2$-nondegenerate hypersurfaces such that each hypersurface becomes a perturbation of a holomorphically nondegenerate polynomial model. In this way, $2$-nondegenerate hypersurfaces can {\varepsilonm not} be treated in a manner of Chern-Moser (and subsequent work) for obtaining their complete normal form. The main goal of this paper is to provide a new technique for overcoming difficulties as above and, as an outcome, obtain the desired complete normal form for everywhere $2$-nondegenerate hypersurfaces in $\CC{3}$. The normal form, accordingly, is defined uniquely up to the stability group of the model: the tube over the light cone \varepsilonqref{tube} (or, equivalently, its local rational realization \varepsilonqref{cone}). Notably, we are able to present any hypersurface of class ${\mathcal{M}}athcal C_{2,1}$ as a perturbation of the rational model, and then set up, in the spirit of Moser, a {\varepsilonm homological procedure} for investigating maps between two hypersurfaces. The latter is accomplished by a choice of weights for the coordinates in $\CC{3}$ where the coordinate corresponding to the Levi kernel gets weight $0$. Somewhat surprisingly, even though this approach leads to natural difficulties (for example, "weighted polynomials"\, are not polynomials anymore but power series), we are still able to reduce the space of mappings between hypersurfaces to studying the kernel of an appropriate {\varepsilonm homological operator}. The kernel of the latter operator is precisely the automorphism algebra of the model (given below). We shall note that such approach shares certain distinctive traits with that due to Huang-Yin in \cite{hy} (where a certain non-standard choice of weights served as a key to deal with the infinite-dimensionality of the automorphism group of the weighted homogeneous model over there). Similarly to the situation of Chern-Moser, the convergence is achieved by employing {\varepsilonm chains}: distinguished curves in $M$, pointwise transverse to the complex tangent and being mapped by normalizing transformations into the standard "vertical"\, curve (see \varepsilonqref{Gamma} below). Since orbits of the linear part of the stabilizer do {\varepsilonm not} act transitively on transverse directions anymore (unlike the Levi-nondegenerate situation), possible directions of chains form in the tangent space a certain {\varepsilonm canonical cone} (given in appropriate coordinates by \varepsilonqref{cancone}). The construction of chains and their properties are discussed in detail in Section 3.2. We shall also mention that certain (degenerate) chains were previously used by Kossovskiy-Zaitsev in \cite{generic},\cite{cmhyper} for proving convergence of normal forms of finite type hypersurfaces (obtained by Kolar in \cite{kol05}). Chains for submanifolds of high codimension were also used in the work \cite{es} of Ezhov-Schmalz. We shall emphasize that, to the best of our knowledge, the present paper gives the first development of Moser's homological method to the class of infinite Catlin multitype hypersurfaces. As an application of our theory, we obtain, similarly to the situation in Chern-Moser's theory, a characterization of {\varepsilonm sphericity} for an everywhere $2$-nondegenerate hypersurface (i.e., the property of being equivalent to the model). The sphericity then amounts to the pointwise vanishing of two specific coefficients in a normal of the hypersurface at a point (see \autoref{main2} below). Finally, we are able to apply the normal form for describing explicitly the {\varepsilonm moduli space} of (real-analytic) everywhere $2$-nondegenerate hypersurafaces, considered up to a local biholomorphic equivalence (\autoref{main3} below). A certain difficulty in applying the normal form for describing the moduli space is that our normal form space describes in fact a larger class of hypersurfaces than that of {everywhere} $2$-nondegenerate ones. We overcome this difficulty by selecting a certain {\varepsilonm distinguished part} of the normal form, which uniquely determines the rest of the normal form under the assumption of everywhere $2$-nondegeneracy. \subsection{The normal form} Let $M\subset\CC{3}$ be an everywhere $2$-nondegenerate hypersurface. The tangent bundle $TM$ is endowed then with the canonical subbundles $T^{\CC{}}M$ and $K$, where $K_p$ is the Levi kernel at $p$. Accordingly, the quotient bundles $TM/T^{\CC{}}M$ and $TM/K$ are well defined. Further, let us introduce the space ${\mathcal{M}}athcal N$ of real-valued formal power series $$\Phi(z,\zeta,\bar z,\bar \zeta, u)=\sum_{k+l+\alpha+\beta\mathfrak{g}eq 5}\Phi_{kl\alpha\beta}(u)z^k\zeta^l\bar z^\alpha\bar\zeta^\beta$$ satisfying, in addition, \begin{equation}\Label{nspace} \begin{aligned} &\Phi_{kl00}=\Phi_{kl10}=\Phi_{kl20}=0,\,\,k,l\mathfrak{g}eq 0;\\ &\Phi_{3001}=\Phi_{4001}=\Phi_{3011}=\Phi_{4011}=\Phi_{3030}=0. \varepsilonnd{aligned} \varepsilonnd{equation} Here $(z,\zetaeta,w=u+iv)$ are the coordinates in $\CC{3}$. \begin{definition}\Label{innf} We say that a (formal or analytic) hypersurface $M\subset\CC{3}$ passing through $0$ {\varepsilonm is in normal form}, if it is given by a defining equation \begin{equation}\Label{model+} v=P(z,\zeta,\bar z,\bar\zeta)+\Phi(z,\zeta,\bar z,\bar \zeta, u), \varepsilonnd{equation} \varepsilonnd{definition} \noindent where $P$ is as in \varepsilonqref{cone} and $\Phi\in{\mathcal{M}}athcal N$. We now formulate our main result. We make use of the local representation \varepsilonqref{germ} below for an everywhere $2$-nondegenerate hypersurface. \begin{theorem}\lambdabel{main} Let $M$ be a real-analytic everywhere $2$-nondegenerate hypersurface in $\CC{3}$, and $p\in M$. Then, there exists a biholomorphic transformation $H:\,(\CC{3},p)\lr(\CC{3},0)$ mapping $M$ into a hypersurface in normal form. A normalizing transformation $H$ is determined uniquely by the action of its differential $dH_p$ on the quotient space $T_pM/K_p$ and the transverse second order derivative at $p$ of the transverse component of $H$. In turn, in any coordinates \varepsilonqref{germ}, a normalizing transformation is unique up to the right action of the $5$-dimensional stability group of the model \varepsilonqref{cone}. \varepsilonnd{theorem} The stability group of the model \varepsilonqref{cone} is described in detail in Section 2.1 below. \begin{remark}\lambdabel{2nondegnf} We shall emphasize once more that the class of hypersufaces in normal form \varepsilonqref{nspace} is larger than that of {everywhere} $2$-nondegenerate hypersurfaces in normal form \varepsilonqref{nspace}. However, as shown in \autoref{main3} below, it is possible to select a distinguished part of the normal form which, first of all, can be chosen arbitrary, and second, determines one and only one everywhere $2$-nondegenerate hypersurface with the given distinguished part of the normal form. \varepsilonnd{remark} \subsection{Applications of the normal form} In this section, we provide two applications of the complete convergent normal form provided in \autoref{main}. First of all, by combining \autoref{main} with the result of Pocchiola \cite{pocchiola}, we obtain a criterion for the {\varepsilonm sphericity} of a general hypersurface, i.e. its local equivalence to the model: the tube over the light cone. \begin{theorem}\Label{main2} Let $M\subset\CC{3}$ be a real-analytic everywhere $2$-nondegenerate hypersurface, and $p\in M$. Then $M$ is locally biholomorphic near $p$ to the tube over the light cone \varepsilonqref{tube} if and only if for every point $q\in M$ nearby $p$ we have: \begin{equation}\Label{sphericity} \Phi_{3002}(0)=\Phi_{5001}(0)=0 \varepsilonnd{equation} for the coefficients of some (and hence any) normal form at $q$. \varepsilonnd{theorem} \autoref{main2} is a direct analogue in the $2$-nondegenerate setting of Chern-Moser's theorem in \cite{chern} on the characterization of sphericity of a Levi-nondegenerate hypersurface via coefficients of its normal form. \begin{remark}\Label{smooth} The assertion of \autoref{main2} can be extended to the class of merely {\varepsilonm smooth} everywhere $2$-nondegenerate submanifolds $M$. In this case, one has to employ the {\varepsilonm formal} normal form for $M$, which is still available in the smooth case. \varepsilonnd{remark} We now describe our second application of the normal form. Since the inception of the notion of finite nondegeneracy (originally introduced, as mentioned above, by Baouendi-Huang-Rothschild in \cite{bhr}), it has been an open problem to describe the moduli space of (real-analytic) everywhere $k$-nondegenerate hypersurfaces in $\CC{N}$ for various $k,N$. To the best of our knowledge, no such description has been available till present in any dimension. Our normal form allows for solving the latter problem for $2$-nondegenerate hypersurfaces in $\CC{3}$. Namely, fix a real-analytic everywhere $2$-nondegenerate hypersurface $M\subset\CC{3}$ in normal form \varepsilonqref{model+} at the origin. Consider then, for the normal form, the sum of all terms in $\Phi$ which are not divisible by $\zeta\bar \zeta$. The latter has the form \begin{equation}\lambdabel{disting} \Phi(z,\zeta,\bar z,0,u)+\Phi(z,0,\bar z,\bar\zeta,u)-\Phi(z,0,\bar z,0,u). \varepsilonnd{equation} Analytic at the origin expressions \varepsilonqref{disting} are in one-to-one correspondence with analytic the near the origin functions $$\chi(z,\zeta,\bar z,u)=\sum_{k,l,\alpha\mathfrak{g}eq 0}\chi_{kl\alpha }(u)z^k\zeta^l\bar z^\alpha$$ appearing as $$\chi(z,\zeta,\bar z,u):=\Phi(z,\zeta,\bar z,0,u)=\sum_{k,l,\alpha\mathfrak{g}eq 0}\Phi_{kl\alpha 0}(u)z^k\zeta^l\bar z^\alpha.$$ \noindent The latter functions $\chi$ satisfy, in case $\Phi$ is in normal form, the conditions: \begin{equation}\lambdabel{Dspace} \begin{aligned} &\chi(z,0,\bar z,u)\in\RR{},\\ &\chi_{kl0}=\chi_{kl1}=\chi_{kl2}=0,\,\,k,l\mathfrak{g}eq 0,\\ &\chi_{013}=\chi_{014}=\chi_{113}=\chi_{114}=\chi_{303}=0 \varepsilonnd{aligned} \varepsilonnd{equation} (coming respectively from the reality requirement $\Phi(z,\zeta,\bar z,\bar\zeta,u)\in\RR{}$ and the the normal form conditions \varepsilonqref{nspace}). \begin{definition} We call the function \varepsilonqref{disting} {\varepsilonm the distinguished part of the normal form of $M$ at $p$}. We also denote by ${\mathcal{M}}athcal D$ the respective linear functional space \varepsilonqref{Dspace}. \varepsilonnd{definition} We are now in the position to describe the moduli space under discussion. \begin{theorem}\lambdabel{main3} The natural map $\pi$ assigning to a real-analytic everywhere $2$-nondegenerate hypersurface $M\subset\CC{3}$ in normal form \varepsilonqref{nspace} the distinguished part of the normal form is a bijection onto the space ${\mathcal{M}}athcal D$. In this way, the moduli space of real-analytic everywhere $2$-nondegenerate hypersurfaces considered up to a local biholomorphic equivalence carries the structure $${\mathcal{M}}athcal D/G,$$ where $G$ is the ($5$-dimensional) stability subgroup in the automorphism group of the rational model \varepsilonqref{cone}. \varepsilonnd{theorem} The action of the group $G$ on the space ${\mathcal{M}}athcal D$ can be viewed from the normalization procedure in Section 2.3 (while the group $G$ is described, once again, in Section 2.1). \section{A formal normal form} \subsection{The light cone and its automorphisms} We shall now describe in detail the automorphisms of the local rational model \varepsilonqref{cone} for the tube over the light cone \varepsilonqref{tube} (obtained by Fels-Kaup in \cite{kaup2}). According to \cite{kaup2}, the cone \varepsilonqref{tube} is locally bi-rationally equivalent to the real-algebraic (rational) hypersurface \varepsilonqref{cone}. In \cite{kaup2}, the infinitesimal automorphism algebra of \varepsilonqref{cone} is described using a grading by complex integers. Our approach is different though. We introduce the following choice of weights playing a crucial role in our construction: \begin{equation}\Label{weights} [z]=[\bar z]=1,\,\,[\zetaeta]=[\bar \zetaeta]=0,\,\,[w]=[\bar w]=2 \varepsilonnd{equation} The rational model \varepsilonqref{cone} is then a {\varepsilonm homogeneous hypersurface} with respect to the choice of weights \varepsilonqref{weights}. Let us now employ the weight system \varepsilonqref{weights} to present the infinitesimal automorphism algebra $\mathfrak{g}$ of \varepsilonqref{cone} as a graded Lie algebra. Let us set: \begin{equation}\Label{grading} [z]=1,\,\,[\zetaeta]=0,\,\,[w]=2,\,\,\left[\frac{\partial}{\partial z}\right]=-1,\,\,\left[\frac{\partial}{\partial \ensuremath{\zetaeta}}\right]=0,\,\,\left[\frac{\partial}{\partial w}\right]=-2. \varepsilonnd{equation} Now the results in \cite{kaup2} can be interpreted as presenting $\mathfrak{g}$ in the form: $$\mathfrak{g}=\mathfrak{g}_{-2}\oplus\mathfrak{g}_{-1}\oplus\mathfrak{g}_{0}\oplus\mathfrak{g}_{1}\oplus\mathfrak{g}_{2},$$ where the negatively graded components are \begin{equation}\Label{g-} \begin{aligned} \mathfrak{g}_{-2}=&{\mathcal{M}}box{span}\,\left\{\frac{\partial}{\partial w}\right\},\\ \mathfrak{g}_{-1}=&{\mathcal{M}}box{span}\,\left\{(1-\ensuremath{\zetaeta})\frac{\partial}{\partial z}+2iz\frac{\partial}{\partial w},\,\,i(1+\ensuremath{\zetaeta})\frac{\partial}{\partial z}+2z\frac{\partial}{\partial w}\right\},\qquad\qquad\qquad\qquad\qquad \varepsilonnd{aligned} \varepsilonnd{equation} the zero component is split as $\mathfrak{g}_{0}=\mathfrak{g}_0^c\oplus\mathfrak{g}_0^s$ with \begin{equation}\Label{g0} \begin{aligned} \mathfrak{g}_0^c=&{\mathcal{M}}box{span}\,\left\{z\frac{\partial}{\partial z}+2w\frac{\partial}{\partial w},\,\,iz\frac{\partial}{\partial z}+2i\ensuremath{\zetaeta}\frac{\partial}{\partial \ensuremath{\zetaeta}}\right\},\\ \mathfrak{g}_0^s=&{\mathcal{M}}box{span}\,\left\{ -z\ensuremath{\zetaeta}\frac{\partial}{\partial z}+ (1-\ensuremath{\zetaeta}^2)\frac{\partial}{\partial \ensuremath{\zetaeta}}+iz^2\frac{\partial}{\partial w},\,\,iz\ensuremath{\zetaeta}\frac{\partial}{\partial z}+i(1+\ensuremath{\zetaeta}^2)\frac{\partial}{\partial \ensuremath{\zetaeta}}+z^2\frac{\partial}{\partial w}\right\}, \varepsilonnd{aligned} \varepsilonnd{equation} and the positively graded components are \begin{equation}\Label{g+} \begin{aligned} \quad\mathfrak{g}_{1}=&{\mathcal{M}}box{span}\,\left\{(z^2+iw+i\ensuremath{\zetaeta} w)\frac{\partial}{\partial z}+2(z+z\ensuremath{\zetaeta})\frac{\partial}{\partial \ensuremath{\zetaeta}}+2zw\frac{\partial}{\partial w},\right.\\ \qquad{\mathcal{M}}box{}&\left.\qquad\qquad\qquad \qquad\qquad\quad(iz^2+w+\ensuremath{\zetaeta} w)\frac{\partial}{\partial z}+2i(z\ensuremath{\zetaeta}-z)\frac{\partial}{\partial \ensuremath{\zetaeta}}+2izw\frac{\partial}{\partial w}\right\},\\ \mathfrak{g}_{2}=&{\mathcal{M}}box{span}\,\left\{zw\frac{\partial}{\partial z}-iz^2\frac{\partial}{\partial \ensuremath{\zetaeta}}+w^2\frac{\partial}{\partial w}\right\}. \varepsilonnd{aligned} \varepsilonnd{equation} We conclude that the stability subalgebra ${\mathcal{M}}athfrak h\subset\mathfrak{g}$ is decomposed as \begin{equation}\Label{isotropy} {\mathcal{M}}athfrak h=\mathfrak{g}_0^c\oplus\mathfrak{g}_1\oplus\mathfrak{g}_2. \varepsilonnd{equation} The stability group $G$ of the rational model \varepsilonqref{cone} is generated by the algebra \varepsilonqref{isotropy}. Its subgroup $G_0^c$ generated by the component $\mathfrak{g}_0^c$ consists of the scalings \begin{equation}\Label{scalings} z{\mathcal{M}}apsto re^{i\varphi}z,\quad \ensuremath{\zetaeta}{\mathcal{M}}apsto e^{2i\varphi}\ensuremath{\zetaeta}, \quad w{\mathcal{M}}apsto r^2w, \quad r>0,\,\,\varphi\in\RR{}. \varepsilonnd{equation} The subgroup $G_+$ generated by the subalgebra $\mathfrak{g}_+:=\mathfrak{g}_1\oplus\mathfrak{g}_2$ consists of certain rational transformations. As the respective rational expressions are cumbersome and are not used in the paper, we do not provide them here. \subsection{The light cone as a model} Recall that, according to Ebenfelt \cite{ebenfeltC3}, in appropriate local holomorphic coordinates a germ of an everywhere Levi degenerate but holomorphically nondegenerate hypersurface $M\subset\CC{3}$ can be represented as: \begin{equation}\Label{germ} v=|z|^2+\sum_{k\mathfrak{g}eq 3} Q^k(z,\zetaeta,\bar z,\bar \zetaeta,u), \varepsilonnd{equation} where all $Q^k$ are homogeneous polynomials of weight $k$ with respect to the grading system \begin{equation}\Label{initweights} [z]=[\zetaeta]=[\bar z]=[\bar \zetaeta]=1,\,\,[w]=[\bar w]=2, \varepsilonnd{equation} and, in addition, \begin{equation}\Label{Q3} Q^3=\frac{1}{2}(z^2\bar\zetaeta+\bar z^2\zetaeta). \varepsilonnd{equation} Furthermore, as shown in \cite{bk}, we may assume (after a formal coordinate change) that all terms of the kind \begin{equation}\Label{killed} z^k\ensuremath{\zetaeta}^l\bar z^0\bar\ensuremath{\zetaeta}^0u^m, \quad z^k\ensuremath{\zetaeta}^l\bar z^1\bar\ensuremath{\zetaeta}^0u^m,\,(k,l)\neq (1,0), \quad z^k\ensuremath{\zetaeta}^l\bar z^2\bar\ensuremath{\zetaeta}^0u^m,\,(k,l)\neq (0,1) \varepsilonnd{equation} and their conjugated are not present in \varepsilonqref{germ}. We will furtheremore show in Section 3 that coordinates \varepsilonqref{killed} can be in turn chosen holomorphic. \begin{definition} In what follows, a (formal) hypersurface \varepsilonqref{germ}, satisfying \varepsilonqref{Q3} and the condition of absence of terms \varepsilonqref{killed} is called {\varepsilonm prenormalized}. \varepsilonnd{definition} As discussed in the Introduction, the key point of this paper is using, in contrast to \varepsilonqref{initweights}, the weight system \varepsilonqref{weights}, which reflects the (infinite) Catlin multitype of the manifold. We make at this point the following \begin{convention} We say that a formal series in $z,\ensuremath{\zetaeta},\bar z,\bar\ensuremath{\zetaeta},u$ {\varepsilonm has weight $k\mathfrak{g}eq 0$} in the grading \varepsilonqref{weights}, if each Taylor polynomial of it has the weight $k$ in the grading \varepsilonqref{weights}. \varepsilonnd{convention} Our immediate goal is to show that, with respect to the weights \varepsilonqref{weights}, any everywhere Levi degenerate hypersurface $M\subset\CC{3}$ is a perturbation of (the rigid model) the light cone \varepsilonqref{cone}. Recall that $P(z,\ensuremath{\zetaeta},\bar z,\bar\ensuremath{\zetaeta})$ denotes the right hand side of \varepsilonqref{cone}. \begin{proposition}\Label{goodperturb} Let $M$ be a (formal or analytic) prenormalized hypersurface \varepsilonqref{germ}. Then the defining equation of $M$ can be written as \begin{equation}\Label{perturb} v=P(z,\ensuremath{\zetaeta},\bar z,\bar\ensuremath{\zetaeta})+\sum_{k\mathfrak{g}eq 3}\Phi_k(z,\zetaeta,\bar z,\bar \zetaeta,u), \varepsilonnd{equation} where each $\Phi_k$ has weight $k$ with respect to the grading \varepsilonqref{weights}. \varepsilonnd{proposition} \begin{proof} It is convenient to switch to the complex defining equation $$w=\theta(z,\bar z,\bar w)$$ in \varepsilonqref{germ} (see \cite{ber}), and write the result as \begin{equation}\Label{germc} w=\bar w+2i\Bigl(|z|^2+\sum_{k\mathfrak{g}eq 3} Q^k(z,\zetaeta,\bar z,\bar \zetaeta,\bar w)\Bigr) \varepsilonnd{equation} (the polynomials $Q^k$ in \varepsilonqref{germ} and \varepsilonqref{germc} are in principle different, but we keep the same notations for simplicity). Let us denote by $Q^k_0,\,k\mathfrak{g}eq 3,$ the polynomials $Q^k$, as in \varepsilonqref{germc}, corresponding to the model hypersurface \varepsilonqref{cone}. That is, $$Q^{2j}_0=|z|^2|\ensuremath{\zetaeta}|^{2j-2},\,j\mathfrak{g}eq 2,$$ and $$Q^{2j+1}_0=\frac{1}{2}(z^2\bar\zetaeta+\bar z^2\zetaeta)|\ensuremath{\zetaeta}|^{2j-2},\,j\mathfrak{g}eq 1.$$ The assertion of the proposition reads then as \begin{equation}\Label{Qk} Q^k=Q^k_0+O(|z|^3)+O(|z||\bar w|)+O(|\bar w|^2). \varepsilonnd{equation} Recall that the Levi determinant in terms of the complex defining equation equals to $$\begin{vmatrix} \theta_{\bar z} & \theta_{\bar \ensuremath{\zetaeta}} & \theta_{\bar w} \\ \theta_{z\bar z} & \theta_{z\bar \ensuremath{\zetaeta}} & \theta_{z\bar w} \\ \theta_{\ensuremath{\zetaeta}\bar z} & \theta_{\ensuremath{\zetaeta}\bar \ensuremath{\zetaeta}} & \theta_{\ensuremath{\zetaeta}\bar w} \varepsilonnd{vmatrix}$$ (see \cite{ber}). Accordingly, the uniform Levi degeneracy condition for $M$ reads as \begin{equation}\Label{det} \begin{vmatrix} 2iz+2i\sum Q^k_{\bar z} & 2i\sum Q^k_{\bar \ensuremath{\zetaeta}} & 1+2i\sum Q^k_{\bar w} \\ 2i+2i\sum Q^k_{z\bar z} & 2i\sum Q^k_{z\bar \ensuremath{\zetaeta}} & 2i\sum Q^k_{z\bar w} \\ 2i\sum Q^k_{\ensuremath{\zetaeta}\bar z} & 2i\sum Q^k_{\ensuremath{\zetaeta}\bar \ensuremath{\zetaeta}} & 2i\sum Q^k_{\ensuremath{\zetaeta}\bar w} \varepsilonnd{vmatrix}\varepsilonquiv 0. \varepsilonnd{equation} We then inspect a series of identities obtained by collecting in \varepsilonqref{det} terms of a fixed weight $k-2,\,k\mathfrak{g}eq 3$ {\varepsilonm with respect to the standard grading \varepsilonqref{initweights}}. For $k=3$ such an identity holds in view of \varepsilonqref{Q3}. However, for $k\mathfrak{g}eq 4$ we obtain non-trivial conditions involving the polynomials $Q^3,...,Q^k$. It is not difficult to see that these conditions have the form: \begin{equation}\Label{iterative} -4Q^k_{\ensuremath{\zetaeta}\bar\ensuremath{\zetaeta}}=\cdots, \varepsilonnd{equation} where dots stand for a polynomial expression in $Q^j,\,j<k$ and their derivatives (and also in $z$). We will prove \varepsilonqref{Qk} by induction in $k\mathfrak{g}eq 3$. For $k=3$ it holds in view of the above discussed formula for $Q^3$. For the induction step, we assume that \varepsilonqref{Qk} holds for all $j\leq k$, and consider the identity obtained by collecting in \varepsilonqref{det} terms with weight $k-1$. In view of \varepsilonqref{iterative}, this identity has the form \begin{equation}\Label{iterative1} -4Q^{k+1}_{\ensuremath{\zetaeta}\bar\ensuremath{\zetaeta}}=\cdots \varepsilonnd{equation} We claim that, in fact, \varepsilonqref{iterative1} has the more specific form \begin{equation}\Label{iterative2} -4Q^{k+1}_{\ensuremath{\zetaeta}\bar\ensuremath{\zetaeta}}=-4\left(Q^{k+1}_0)\right)_{\ensuremath{\zetaeta}\bar\ensuremath{\zetaeta}}+\cdots, \varepsilonnd{equation} where dots stand for terms of the form \begin{equation}\Label{remainder} O(|z|^3)+O(|z|^2|\bar w|)+O(|\bar w|^2). \varepsilonnd{equation} To prove the claim, we apply \varepsilonqref{Qk} in the right hand side of \varepsilonqref{iterative1} and conclude that we have there two kinds of terms: those arising from $Q^j_0$ only (with $j\leq k$) and all the others. For the first group of terms, we note that they must have simply the form $\left(Q^{k+1}_0)\right)_{\ensuremath{\zetaeta}\bar\ensuremath{\zetaeta}}$. This is concluded from considering the equation \varepsilonqref{det} for the model hypersurface \varepsilonqref{cone} itself. To analyze the second group of terms, we first observe that the $(1,2)$ and the $(3,2)$ entries of the determinant \varepsilonqref{det} already have the form \varepsilonqref{remainder}. Hence it is enough to consider only the two "diagonal" products within the expansion of the determinant \varepsilonqref{det}, assuming that at least one $Q^j$ within these products is substituted by an expression of the form \varepsilonqref{remainder}. Note that, within the two diagonal products, the $Q^j$'s are either differentiated once in $z$, or once in $\bar z$, or once in $\bar w$ (and further possibly differentiated in $\ensuremath{\zetaeta},\bar\ensuremath{\zetaeta}$ which is of no interest to us). Putting all this information together, it is not difficult to conclude now that the second group under discussion has the form \varepsilonqref{remainder}, and this proves the claim. It remains to deal with the identity \varepsilonqref{iterative2} that holds true according to the claim. Integrating this identity in $\ensuremath{\zetaeta},\bar\ensuremath{\zetaeta}$, we get: \begin{equation}\Label{almost} Q^{k+1}=Q^{k+1}_0+R_1+R_2, \varepsilonnd{equation} where a remainder $R_1$ has the form \varepsilonqref{remainder}, while $R_2$ satisfies $\bigl(R_2\bigr)_{\ensuremath{\zetaeta}\bar\ensuremath{\zetaeta}}=0$. In view of the absence of the terms \varepsilonqref{killed}, this gives $R_2\varepsilonquiv 0$, and implies \varepsilonqref{Qk}. Proposition is proved now. \varepsilonnd{proof} As the final outcome of this subsection, we may restrict our considerations to (formal or analytic) hypersurfaces of the kind \varepsilonqref{perturb} (that is, to "good" perturbations of the light cone \varepsilonqref{cone} in the weight system \varepsilonqref{weights}). That is why in what follows we use the weight system \varepsilonqref{weights} only. \subsection{Normalization of initial terms of a CR-map} Let $M,M^*$ be two (formal or analytic) prenormalized hypersurfaces of the kind \varepsilonqref{perturb}, and $$H=(f,g,h):\,\,(M,0)\lr(M^*,0)$$ be a (formal) invertible holomorphic map between them. Let us first note that the representation \varepsilonqref{perturb} is invariant under the group of scalings \varepsilonqref{scalings}. The main goal of this subsection is to prove the following \begin{proposition}\Label{initterms} There exists a scaling $\Lambda$, as in \varepsilonqref{scalings}, such that $H$ can be decomposed as $H=\tilde H\circ\Lambda$, where the (formal) map $\tilde H$ has the form: \begin{equation}\Label{normalmap} z{\mathcal{M}}apsto z+f_2+f_3+\cdots, \quad \ensuremath{\zetaeta}{\mathcal{M}}apsto \ensuremath{\zetaeta}+g_1+g_2+\cdots, \quad h=w+h_3+h_4+\cdots, \varepsilonnd{equation} where $f_j,g_j,h_j$ are formal power series of weight $j$ with respect to the grading \varepsilonqref{weights}. \varepsilonnd{proposition} \begin{proof} The basic identity for the map $H$ gives: \begin{equation}\Label{basic} \begin{aligned} \ensuremath{\mbox{\rm Im}\,} h=P(f,g,\bar f,\bar g)+&\sum_{j\mathfrak{g}eq 3}\Phi^*_j(f,g,\bar f,\bar g,\ensuremath{\mbox{\rm Re}\,} h), \\ f=f(z,\ensuremath{\zetaeta},w),\,g=g(z,\ensuremath{\zetaeta},w),\,h=h(z,\ensuremath{\zetaeta},w),\, & w=u+iP(z,\ensuremath{\zetaeta},\bar z,\bar\ensuremath{\zetaeta})+i\sum_{j\mathfrak{g}eq 3}\Phi_j(z,\zetaeta,\bar z,\bar\ensuremath{\zetaeta},u). \varepsilonnd{aligned} \varepsilonnd{equation} Comparing in \varepsilonqref{basic} terms of weight $0$, we get \begin{equation}\Label{weight0} \ensuremath{\mbox{\rm Im}\,} h_0(\ensuremath{\zetaeta})=P(f_0(\ensuremath{\zetaeta}),g_0(\ensuremath{\zetaeta}),\bar f_0(\bar \ensuremath{\zetaeta}),\bar g_0(\bar\ensuremath{\zetaeta}))+\sum_{j\mathfrak{g}eq 3}\Phi^*_j(f_0(\ensuremath{\zetaeta}),g_0(\ensuremath{\zetaeta}),\bar f_0(\bar\ensuremath{\zetaeta}),\bar g_0(\bar\ensuremath{\zetaeta}),\ensuremath{\mbox{\rm Re}\,} h_0(\ensuremath{\zetaeta})) \varepsilonnd{equation} (note that weight $0$ components can depend on $\ensuremath{\zetaeta}$ only). Recall that both hypersurfaces are prenormalized, hence the right hand side in \varepsilonqref{basic} does not contain harmonic terms and we conclude that $h_0=0$. We next claim that $f_0=0$. Indeed, asssume that $m:=ord_0\,f_0(\ensuremath{\zetaeta})<\infty$. Then the hermitian term in $P$ gives in the right hand side of \varepsilonqref{weight0} a non-zero term with $\ensuremath{\zetaeta}^m\bar\ensuremath{\zetaeta}^m$. On the other hand, no other terms in the right hand side of \varepsilonqref{weight0} contribute to $\ensuremath{\zetaeta}^m\bar\ensuremath{\zetaeta}^m$ (this follows from $h_0=0$ and from the fact that all $\Phi^*_j$ have weight at least $3$). Thus we conclude that $f_0=0$. Finally, since the map $H$ is invertibe, we can claim that $g_0(\ensuremath{\zetaeta})$ has a nonzero linear part. We next collect terms of weight $1$ in \varepsilonqref{basic}. Since we have $f_0=h_0=0$, this simply gives: $\ensuremath{\mbox{\rm Im}\,} h_1(z,\ensuremath{\zetaeta})=0$, so that $h_1=0$. For the components $f_1,g_1$ we observe that they have respectively the form $zF_1(\ensuremath{\zetaeta}),zG_1(\ensuremath{\zetaeta})$. The invertibility of the map also gives $F_1(0)\neq 0$ and $h_w(0,0,0)\neq 0$. As the last step, we collect in \varepsilonqref{basic} terms of weight $2$. This gives: \begin{equation}\Label{weight2} \begin{aligned} \ensuremath{\mbox{\rm Im}\,} h_2=P(f_1,g_0,\bar f_1,\bar g_0) ,\,\,\, w=u+iP(z,\ensuremath{\zetaeta},\bar z,\bar\ensuremath{\zetaeta}). \varepsilonnd{aligned} \varepsilonnd{equation} We conclude that the invertibe map $$(z,\ensuremath{\zetaeta},w)\lr \bigl(f_1(z,\ensuremath{\zetaeta}),g_0(\ensuremath{\zetaeta}),h_2(z,\ensuremath{\zetaeta},w)\bigr)$$ is an automorphism of the light cone \varepsilonqref{cone}. In view of the explicit description given by \varepsilonqref{g0},\varepsilonqref{g+},\varepsilonqref{isotropy}, this implies $$f_1=\lambdambda z,\,\,g_0={\mathcal{M}}u\ensuremath{\zetaeta},\,\,h_2=\nu w, \quad \lambdambda,{\mathcal{M}}u,\nu\neq 0.$$ Substitutting the latter into \varepsilonqref{weight2} and comparing terms with $z^2\bar\ensuremath{\zetaeta}$, we obtain: \begin{equation}\Label{munu} {\mathcal{M}}u=e^{2i\varphi},\quad \nu=r^2,\,\,{\mathcal{M}}box{where}\,\,\lambdambda=re^{i\varphi},\,\,r>0,\varphi\in\RR{}. \varepsilonnd{equation} The identity \varepsilonqref{munu} (together with the earlier obtained description of the weighted components of the map $H$) implies the assertion of the proposition. \varepsilonnd{proof} We end this subsection by noting that any map $H$, as in \varepsilonqref{normalmap}, can be further decomposed in a unique way as \begin{equation}\Label{factord} H=\tilde H\circ\psi, \varepsilonnd{equation} where $\psi$ is an automorphism of the light cone \varepsilonqref{cone} belonging to the subgroup \varepsilonqref{g+}, and the map $\tilde H=(z+f,\ensuremath{\zetaeta}+g,w+h)$ has the form \varepsilonqref{normalmap} and satisfies, in addition, \begin{equation}\Label{specialmap} f_{zz}=0,\quad \ensuremath{\mbox{\rm Re}\,} h_{ww}=0. \varepsilonnd{equation} That is why, in what follows, {\varepsilonm for the normalization procedure we may consider only maps of the special form \varepsilonqref{specialmap}}. \subsection{Homological operator and formal normal form} Let us consider a map $H:\,\,(M,0)\lr(M^*,0)$ between two prenormalized (formal) hypersurfaces $M,M^*$, which is decomposed as in \varepsilonqref{normalmap} and satisfies, in addition, \varepsilonqref{specialmap}. Let us consider, for each fixed $m\mathfrak{g}eq 3$, the equation obtained by collecting in the basic identity \varepsilonqref{basic} all terms of weight $m$. In view of \varepsilonqref{perturb},\varepsilonqref{normalmap}, it is not difficult to see that such an identity must have the following form: \begin{equation}\Label{homolog} \ensuremath{\mbox{\rm Re}\,}\Bigl(ih_m(z,\ensuremath{\zetaeta},w)+2f_{m-1}(z,\ensuremath{\zetaeta},w)P_z+2g_{m-2}(z,\ensuremath{\zetaeta},w)P_{\ensuremath{\zetaeta}}\Bigr)|_{w=u+iP}=\Phi^*_m-\Phi_m+\cdots, \varepsilonnd{equation} where $\Phi^*_j,\Phi_j$ are as in \varepsilonqref{perturb} and dots stand for a polynomial expression in: (i) $\Phi^*_j,\Phi_j$ with $j<m$ and their derivatives of order $\leq m-1$; (ii) the collections $(f_{j-1},g_{j-2},h_j)$ with $j<m$ and ther derivatives of order $\leq m-1$; (iii) the local coordinates $z,\ensuremath{\zetaeta},\bar z,\bar\ensuremath{\zetaeta},u$. This leads to the consideration of the {\varepsilonm homological operator} \begin{equation} {\mathcal{M}}athcal L(f,g,h):=\ensuremath{\mbox{\rm Re}\,}\Bigl(ih(z,\ensuremath{\zetaeta},w)+2f(z,\ensuremath{\zetaeta},w)P_z+2g(z,\ensuremath{\zetaeta},w)P_{\ensuremath{\zetaeta}}\Bigr)|_{w=u+iP}, \varepsilonnd{equation} defined on the linear space ${\mathcal{M}}athcal V$ of tuples $\bigl(f(z,\ensuremath{\zetaeta},w),g(z,\ensuremath{\zetaeta},w),h(z,\ensuremath{\zetaeta},w)\bigr)$ of the kind \begin{equation}\Label{source} f=f_2+f_3+\cdots,\,\,g=g_1+g_2+\cdots,\,\,h=h_3+h_4+\cdots \varepsilonnd{equation} satisfying, in addition, \varepsilonqref{specialmap}, and valued in the space ${\mathcal{M}}athcal W$ of series $\Phi(z,\ensuremath{\zetaeta},\bar z,\bar\ensuremath{\zetaeta},u)$ of the kind $$\Phi=\Phi_3+\Phi_4+\cdots.$$ We shall recall now that the normal form space ${\mathcal{M}}athcal N$, as in \varepsilonqref{nspace} is given in more detail by the conditions: \begin{equation}\Label{Nspace} \Psi_{\a \b 00} = \Psi_{\a \b 10} = \Psi_{\a \b 2 0} = 0 \varepsilonnd{equation} for all $\a, \b$, then \begin{equation}\Label{Nspace2} \Psi_{4011} = \Psi_{3001} = \Psi_{4001} = 0 \varepsilonnd{equation} and finally \begin{equation}\Label{Nspace3} \Psi_{3030} = \Psi_{3011} = 0. \varepsilonnd{equation} Let us then take into consideration the range ${\mathcal{M}}athcal R$ of the operator ${\mathcal{M}}athcal L$. We shall prove the following \begin{proposition}\Label{directsum} The operator ${\mathcal{M}}athcal L$ is injective on ${\mathcal{M}}athcal V$. Moreover, the target space ${\mathcal{M}}athcal W$ described above can be decomposed as the direct sum $${\mathcal{M}}athcal W={\mathcal{M}}athcal R\oplus{\mathcal{M}}athcal N,$$ where ${\mathcal{M}}athcal N\subset{\mathcal{M}}athcal W$ is the above normal form space. \varepsilonnd{proposition} \begin{proof} The assertion of the proposition can be reformulated like that: an equation \begin{equation}\Label{homeq} 2{\mathcal{M}}athcal L(f,g,h)=\Psi,\,\,\,\Psi\in{\mathcal{M}}athcal W \varepsilonnd{equation} has a unique solution in ${\mathcal{M}}athcal V$, modulo an element of the space ${\mathcal{M}}athcal N$ standing in the right hand side of \varepsilonqref{homeq}. To solve an equation \varepsilonqref{homeq}, let us expand \begin{equation}\Label{expandf} f(z,\ensuremath{\zetaeta},u+iP)=f(z,\ensuremath{\zetaeta},u)+f_w(z,\ensuremath{\zetaeta},u)iP-\frac{1}{2}f_{ww}(z,\ensuremath{\zetaeta},u)P^2-\frac{i}{6}f_{www}(z,\ensuremath{\zetaeta},u)P^3+\cdots, \varepsilonnd{equation} and similarly for $g,h$. Further, we use the expansion \begin{equation}\Label{fk} f(z,\ensuremath{\zetaeta},u)=\sum_{k,l\mathfrak{g}eq 0}f_{kl}(u)z^k\ensuremath{\zetaeta}^l, \varepsilonnd{equation} and similarly for $g,h$. We now substitute \varepsilonqref{expandf},\varepsilonqref{fk} into \varepsilonqref{homeq}, and apply {\varepsilonm all} the linear functionals, annihilating the above subspace ${\mathcal{M}}athcal N$, to the resulting identity. This amounts to collecting certain terms in \varepsilonqref{homeq}, which we do step-by-step. Collecting all terms of the kind $z^k\ensuremath{\zetaeta}^l\bar z^0\bar\ensuremath{\zetaeta}^0u^j$ gives: \begin{equation}ih_{kl}(u)=0,\,\,k,l\mathfrak{g}eq 0, \lambdabel{e1} \varepsilonnd{equation} except for $(k,l) = (0,0), (1,0), (2,0)$. For these values we obtain, respectively, \begin{equation} i h_{00 } - i \bar h_{00 } = 0,\lambdabel{e2} \varepsilonnd{equation} \begin{equation} 2 \bar f_{00 } + i h_{10 } = 0,\lambdabel{e3} \varepsilonnd{equation} \begin{equation} \bar g_{00 } + i h_{20 } = 0. \lambdabel{e4} \varepsilonnd{equation} For terms of the form $z^k\ensuremath{\zetaeta}^l\bar z^1\bar\ensuremath{\zetaeta}^0u^j,\,$ we get for $k > 0$ and $ l\mathfrak{g}eq 0$: \begin{equation}-h'_{k-1,l}+2f_{kl}=0,\,\,k,l\mathfrak{g}eq 0,\lambdabel{e5} \varepsilonnd{equation} except when $(k,l) = (1,0), (2,0), (3,0), (1,1).$ If $k = 0$ and $l >1$ we obtain \begin{equation} 2 f_{0l} = 0.\lambdabel{e6} \varepsilonnd{equation} For $z \bar z \zeta $ terms, i.e. $(k,l) = (0,1)$ we obtain \begin{equation} 2 \bar g_{00} + 2 f_{11} - h'_{01} = 0.\lambdabel{e7} \varepsilonnd{equation} Further, for $z \bar z $ terms, i.e. $(k,l) = (1,0)$, we have \begin{equation} - h'_{00} - \bar h'_{00} + 2f_{10} + 2\bar f_{10} = 0.\lambdabel{e8} \varepsilonnd{equation} For $\zeta \bar z$ terms , i.e. $(k,l) = (0,1)$, we obtain \begin{equation} 2 f_{01} + 2 \bar f_{00}= 0,\lambdabel{e9} \varepsilonnd{equation} and for $z^2 \bar z $ terms , i.e. $(k,l) = (2,0)$, \begin{equation} - h'_{10} + 2 f_{20} - 2i \bar f'_{00} + \bar g_{10}= 0.\lambdabel{e10} \varepsilonnd{equation} Finally for $z^3 \bar z$ terms, i.e. $(k,l) = (3,0)$, we obtain \begin{equation} -i \bar g'_{00} + 2 f_{30} - h'_{20} = 0.\lambdabel{e11} \varepsilonnd{equation} Next, consider terms of the form $z^k\ensuremath{\zetaeta}^l\bar z^2\bar\ensuremath{\zetaeta}^0u^j$. For $ k > 1$ and $l>0$, except for $(k,l) = (2,1)$, we obtain \begin{equation}- \frac12 h'_{k, l-1} - \frac{i}{2} h''_{k-2, l} + 2i f'_{k-1, l} + g_{kl} = 0. \lambdabel{e12} \varepsilonnd{equation} When $k > 3$ and $l=0$, i.e. for the coefficients of $z^k \bar z^2$ we have \begin{equation} - \frac{i}{2} h''_{k-2, 0} + 2i f'_{k-1, 0} + g_{k0} = 0. \lambdabel{e13} \varepsilonnd{equation} If $k = 1$ and $l>0$ i.e. for the coefficients of $z \zeta^l \bar z^2$, except for $(k,l) = (1,1)$, we have \begin{equation} - \frac{1}{2} h'_{1, l-1} + 2i f'_{0, l} + g_{1l} = 0. \lambdabel{e14} \varepsilonnd{equation} Finally, if $k = 0$ and $l>2$, i.e. for $\zeta^l \bar z^2 $ we obtain \begin{equation} - \frac{1}{2} h'_{0, l-1} + g_{0l} = 0. \lambdabel{e15} \varepsilonnd{equation} Further, for $(k,l) = (0,2)$ we obtain \begin{equation}\bar g_{00}- \frac12 h'_{0, 1} + g_{02} = 0. \lambdabel{e16} \varepsilonnd{equation} For $(k,l) = (1,1)$ we obtain \begin{equation} -3i \bar f'_{00} + 2\bar g_{10} - \frac{1}{2} h'_{10} + 2i f'_{01} + g_{11} = 0. \lambdabel{e17} \varepsilonnd{equation} For $(k,l) = (3,0)$ we obtain \begin{equation}-i \bar g'_{10} - \frac{i}{2} h''_{10} + 2i f'_{20} - \bar f_{00}'+ g_{30} = 0. \lambdabel{e18} \varepsilonnd{equation} For $(k,l) = (0,1)$ we obtain \begin{equation}- \frac{1}{2} h'_{00} - \frac{1}{2} \bar h'_{00} + 2 \bar f_{10} + g_{01} = 0. \lambdabel{e19} \varepsilonnd{equation} For $(k,l) = (2,0)$ we obtain \begin{equation} \frac{i}2 \bar h''_{00} - \frac{i}{2} h''_{00} + 2i f'_{10} + g_{20} -2i \bar f'_{10} + \bar g_{20} = 0.\lambdabel{e20} \varepsilonnd{equation} For $(k,l) = (2,1)$ we obtain \begin{equation} -\frac52 i \bar g'_{00} - \frac{1}{2} h'_{20} - \frac{i}2 h'_{00} + 2i f'_{11} + g_{21} = 0. \lambdabel{e21} \varepsilonnd{equation} Collecting all terms of the kind $z^3 \bar\zeta u^j$ gives: \begin{equation}- \frac12 h'_{10 }+2f_{20} - i \bar f'_{00} =0. \lambdabel{e22} \varepsilonnd{equation} Collecting all terms of the kind $z^3 \bar z^3 u^j$ gives: \begin{equation}\mathop{\rm Re}\nolimits (\frac16 h'''_{00 } - f''_{10} + i g'_{20}) =0. \lambdabel{e23} \varepsilonnd{equation} Further, collecting terms with $z^4\bar\zetaeta u^j$ we get: \begin{equation}-\frac{1}{2}h'_{20}+2f_{30}-\frac{i}{2}\bar g_{00}'=0.\lambdabel{e24} \varepsilonnd{equation} Collecting all terms of the kind $z^3 \bar z \bar \zeta u^j$ gives: \begin{equation}- \frac{i}{2} h''_{00 }+3i f'_{10} + 2 g_{20} + \frac{i}{2} \bar h''_{00} - i \bar f'_{10} -i \bar g'_{01} =0. \lambdabel{e25} \varepsilonnd{equation} Finally, collecting all terms of the kind $z^4 \bar z \bar \zeta u^j$ gives: \begin{equation}-\frac{i}{2}h_{10}''+3if_{20}'-\bar f_{00}''+2g_{30}-\frac{i}{2}\bar g_{10}'=0.\lambdabel{e26} \varepsilonnd{equation} Now all the terms appearing in the normal form space conditions \varepsilonqref{Nspace}, \varepsilonqref{Nspace2}, \varepsilonqref{Nspace3} are considered, and we have to show that the resulting system of equations for $f_{kl},g_{kl},h_{kl},\,kl,l\mathfrak{g}eq 0$ determines the latter ones uniquely. We do it step-by-step. We also make use of the following \noindent{\bf Convention.} In what follows, dots stand for linear expressions in the previously determined coefficient functions $f_{kl},g_{kl},h_{kl}$. {\mathcal{M}}edskip Equation \varepsilonqref{e1} determines $h_{kl}$ for all $(k,l)$, except for $h_{00}$, $h_{10}$ $h_{20}$. Equation \varepsilonqref{e2} determines $\mathop{\rm Im}\nolimits h_{00}$. Equation \varepsilonqref{e3} expresses $h_{10}$ by $f_{00}$, namely \begin{equation} h_{10} = 2i \bar f_{00}. \varepsilonnd{equation} Equation \varepsilonqref{e4} expresses $h_{20}$ by $g_{00}$, namely \begin{equation} h_{20} = i \bar g_{00}. \varepsilonnd{equation} Equation \varepsilonqref{e5} determines $f_{kl}$ for $ k > 0$, except for $f_{10}$, $f_{20}$ $f_{30}$ and $f_{11}$. Equation \varepsilonqref{e6} determines $f_{0l}$, except for $f_{00}$, $f_{01}$. Equation \varepsilonqref{e7} expresses $f_{11}$ by $g_{00}$, namely \begin{equation} f_{11} = - \bar g_{00} + \dots. \varepsilonnd{equation} (see the Convention above). Equation \varepsilonqref{e8} expresses $\mathop{\rm Re}\nolimits f_{10}$ by $\mathop{\rm Re}\nolimits h'_{00}$, namely \begin{equation} \mathop{\rm Re}\nolimits f_{10} = \frac12 \mathop{\rm Re}\nolimits h'_{00}. \varepsilonnd{equation} Equation \varepsilonqref{e9} expresses $f_{01}$ by $f_{00}$, namely \begin{equation} f_{01} = - \bar f_{00}. \varepsilonnd{equation} Equation \varepsilonqref{e10} expresses $f_{20}$ by $f'_{00}$ and $g_{10}$, namely \begin{equation} f_{20} = 2i \bar f'_{00} - \frac12 \bar g_{10}. \varepsilonnd{equation} Equation \varepsilonqref{e11} expresses $f_{30}$ by $g'_{00}$, using \varepsilonqref{e4}, namely \begin{equation} f_{30} = \frac12 h'_{20} + \frac{i}2 \bar g'_{00} = i \bar g'_{00}. \varepsilonnd{equation} Equation \varepsilonqref{e12} determines $g_{kl}$ for $k>1$ and $l>0$, except for $g_{21}$. Equation \varepsilonqref{e13} determines $g_{k0}$ for $k>3$. In particular, $g_{40}$ is expressed by $g'_{00}$, as \begin{equation} g_{40} = \frac{i}2 h''_{20} - 2i f'_{30} = \frac32 \bar g''_{00}. \varepsilonnd{equation} Equation \varepsilonqref{e14} determines $g_{1l}$ for $l>0$, except for $g_{11}$. Equation \varepsilonqref{e15} determines $g_{0l}$, except for $g_{00}$, $g_{01}$ and $g_{02}$. Equation \varepsilonqref{e16} expresses $g_{02}$ by $g_{00}$, namely \begin{equation} g_{02} = - \bar g_{00}+\cdots. \varepsilonnd{equation} Equation \varepsilonqref{e17} expresses $g_{11}$ by $g_{10}$ and $f_{00}$, namely \begin{equation} g_{11} = 6i \bar f'_{00} - 2\bar g_{10}+\cdots. \varepsilonnd{equation} Equation \varepsilonqref{e18} expresses $g_{30}$ by $g'_{10}$ and $f'_{00}$, namely \begin{equation} g_{30} = 2i \bar g'_{10} +4 \bar f''_{00}+\cdots. \varepsilonnd{equation} Equation \varepsilonqref{e19} gives $g_{01}$ by $\mathop{\rm Im}\nolimits f_{10}$ and $\mathop{\rm Re}\nolimits h_{00}$, namely \begin{equation} \mathop{\rm Im}\nolimits g_{01} = 2 \mathop{\rm Im}\nolimits f_{10} \varepsilonnd{equation} and \begin{equation} \mathop{\rm Re}\nolimits g_{01} = \mathop{\rm Re}\nolimits h'_{00} - 2 \mathop{\rm Re}\nolimits f_{10}. \varepsilonnd{equation} Equation \varepsilonqref{e20} gives $\mathop{\rm Re}\nolimits g_{20}$ by $\mathop{\rm Im}\nolimits f_{10}$, namely \begin{equation} \mathop{\rm Re}\nolimits g_{20} = 2 \mathop{\rm Im}\nolimits f'_{10}+\cdots \varepsilonnd{equation} Equation \varepsilonqref{e21} gives $g_{21}$ by $g'_{00}$ and $\mathop{\rm Re}\nolimits h_{00}$, namely \begin{equation} g_{21} = \frac52 i \bar g'_{00} + \frac{i}2 \mathop{\rm Re}\nolimits h'_{00}+\cdots \varepsilonnd{equation} Next, equations \varepsilonqref{e22} and \varepsilonqref{e26} give a system for $f_{00}$ and $g_{10}$, namely \begin{equation} 2i \bar f'_{00} - \bar g_{10} +\cdots= 0 \varepsilonnd{equation} and \begin{equation} 6 \bar f''_{00} - \frac{i}{2} \bar g'_{10} +\cdots= 0. \varepsilonnd{equation} Equation \varepsilonqref{e24} determines $\bar g'_{00}$, by \begin{equation} i \bar g'_{00} + \dots = 0. \varepsilonnd{equation} The real part of equation \varepsilonqref{e25} determines $\mathop{\rm Im}\nolimits f'_{10}$, \begin{equation} -2 \mathop{\rm Im}\nolimits f'_{10} + \dots = 0. \varepsilonnd{equation} The imaginary part of \varepsilonqref{e25} together with \varepsilonqref{e23} give a system of two real equations for $Re h_{00}$ and $\mathop{\rm Im}\nolimits g_{20}$, namely \begin{equation} \mathop{\rm Re}\nolimits h''_{00} +2 \mathop{\rm Im}\nolimits g_{20}+\cdots= 0 \varepsilonnd{equation} and \begin{equation} \frac16 \mathop{\rm Re}\nolimits h'''_{00} - \mathop{\rm Im}\nolimits g'_{20}+\cdots = 0, \varepsilonnd{equation} which determines $\mathop{\rm Re}\nolimits h_{00}$ and $\mathop{\rm Im}\nolimits g_{20}$. \varepsilonnd{proof} \autoref{directsum} and the relations \varepsilonqref{homolog} imply, in the standard manner, the following proposition, which is the first part of \autoref{main}. \begin{proposition}\Label{formalnf} For the germ at a point $p$ of any (formal of real-analytic) everywhere $2$-nondegenerate hypersurface in $\CC{3}$, there exists a formal transformation $H:\,(\CC{3},p)\lr(\CC{3},0)$ mapping $M$ into a hypersurface in normal form \varepsilonqref{nspace}. In any (formal or holomorphic) local coordinates \varepsilonqref{germ}, a normalizing transformation is unique up to the right action of the $5$-dimensional stability group $G$ of the model \varepsilonqref{cone}. \varepsilonnd{proposition} \section{Convergence of the formal normal form and applications} The proof of \autoref{main} is accomplished by \autoref{formalnf} and the following convergence theorem. \begin{theorem}\Label{converge} Any formal transformation bringing a uniformly $2$-nodnegenerate real-analytic hypersurface \varepsilonqref{germ} to the normal form \varepsilonqref{Nspace} -- \varepsilonqref{Nspace3} is convergent. \varepsilonnd{theorem} \begin{proof} We will show that the transformation \varepsilonqref{normalmap} bringing a hypersurface $M$, as in \varepsilonqref{germ}, to a normal form is convergent. Recall that the factorization \varepsilonqref{factord} allows to deal with all the other normalizing transformations. The proof is split into several steps. We keep the notation \begin{equation}\Label{def} v=P(z,\zetaeta,\bar z,\bar\zetaeta)+\sum_{k,l,\alpha,\beta\mathfrak{g}eq 0}\Psi_{kl\alpha\beta}(u)z^k\zetaeta^l\bar z^\alpha\bar\zetaeta^\beta \varepsilonnd{equation} for the defining equation of $M$. \subsection{Proof of Theorem 3} {\mathcal{M}}box{} {\mathcal{M}}edskip \noindent{\bf Step I: choice of a transverse curve.} We make a special choice of a smooth real-analytic curve $\mathfrak{g}amma$ transverse at $0$ to the complex tangent. The choice is specified later in Step VIII. \noindent{\bf Step II: removing harmonic terms.} Next, we do a local biholomorphism at $0$ which eliminates the harmonic terms in \varepsilonqref{def} (that is, we get $\Psi_{kl00}=0$) and, at the same time, straightens the curve $\mathfrak{g}amma$, that is, $\mathfrak{g}amma$ becomes \begin{equation}\Label{Gamma} \Gamma=\{z=\zetaeta=0,\,\,\ensuremath{\mbox{\rm Im}\,} w=0\}. \varepsilonnd{equation} The latter is possible due to e.g. \cite{ber},\cite{lmblowups}. In what follows, we consider only transformation preserving \varepsilonqref{Gamma}. \noindent{\bf Step III: cleaning the $2$-jet.} We perform a gauge transformation $$z{\mathcal{M}}apsto f(w)z,\quad \zetaeta{\mathcal{M}}apsto\zetaeta,\quad w{\mathcal{M}}apsto w$$ with an appropriate $f(w),\,f(0)=0,\,f'(0)=1$ in order to achieve $\Psi_{1010}(u)=0$ (we use $\Psi_{1010}(0)=0$). Further, we use the nondegeneracy of the term $z\bar z$ in \varepsilonqref{def} to eliminate terms $z\bar\zetaeta u^m$ in \varepsilonqref{def} by means of a transformation $$z{\mathcal{M}}apsto z+f(w)\zetaeta,\quad\zetaeta{\mathcal{M}}apsto\zetaeta,\quad w{\mathcal{M}}apsto w$$ with an appropriate $f(w)$. Geometrically, this means straightening the Levi kernels along $\Gamma$ (alternatively, one can consider the $3$-dimensional variety constructed as the union of Levi curves through points of $\Gamma$ and then a holomorphic transformation, straightening the surface and the curves and preserving $\Gamma$). We end up with the additional normalization condition $\Psi_{1001}=0$ achieved. Furthermore, since the Levi rank along $\Gamma$ is constantly $1$, this also implies $\Psi_{0101}=0$. Note that now the {\varepsilonm only} term of degree $\leq 2$ in $z,\bar z,\zetaeta,\bar \zetaeta$ in $\Psi$ is $z\bar z$. \noindent{\bf Step IV: cleaning the $3$-jet.} We first perform a gauge transformation $$z{\mathcal{M}}apsto z,\quad \zetaeta{\mathcal{M}}apsto g(w)\zetaeta,\quad w{\mathcal{M}}apsto w$$ with an appropriate $g(w),\,g(0)=0,\,g'(0)=1$ in order to achieve $\Psi_{2001}(u)=0$ (we use $\Psi_{2001}(0)=0$). Next, we perform a transformation $$z{\mathcal{M}}apsto z+z^2f(w),\quad \zetaeta{\mathcal{M}}apsto \zetaeta+g(w)z, \quad w{\mathcal{M}}apsto w$$ with appropriate $f(w),g(w)$ to eliminate in \varepsilonqref{def} the term $\Psi_{2010}$ and at the same time $\Psi_{3001}$. To accomplish this, we note that the latter two terms transform as follows: $$ \Psi_{2010} {\mathcal{M}}apsto \Psi_{2010}+f(u)+\frac{1}{2}\bar g(u)+\cdots, \quad \Psi_{3001}{\mathcal{M}}apsto \Psi_{3001}+f(u)+\cdots,$$ where dots stand for expressions analytic in $u$ and polynomial in $f,\bar f,g,\bar g$, which either have degree $\mathfrak{g}eq 2$ in $f,\bar f,g,\bar g$, or have degree $1$ in the above but then have a factor vanishing at $u=0$. The latter follows from the previously achieved normalization conditions (compare also with \varepsilonqref{e10},\varepsilonqref{e22}). Now the desired choice of $f,g$ is accomplished by applying the implicit function theorem. Finally, we remove in this step the $z\zetaeta\bar z$ term by a transformation $$z{\mathcal{M}}apsto z+z\zetaeta f(w),\quad \zetaeta{\mathcal{M}}apsto \zetaeta, \quad w{\mathcal{M}}apsto w$$ with an appropriate $f(w)$. Now $\Psi_{1110}=0$. Note that, arguing identically to the proof of \autoref{goodperturb}, it is easy to conclude that the terms $\zetaeta^2\bar\zetaeta$, $z\zetaeta\bar\zetaeta$, and $\zetaeta^2\bar z$ (with $u^0$) are not present now in \varepsilonqref{def}. At the same time, we observe that one can shift the basic point $(0,0,0)$ to $(0,0,u_0),\,u_0\in\RR{}$ (the latter is still within the curve $\Gamma$, as in \varepsilonqref{Gamma}). Then the shifted function $\Psi$ must satisfy the same property, so that we get $\Psi_{0201}=\Psi_{1011}=\Psi_{0210}=0$. We end up with a hypersurface \varepsilonqref{def} for which all terms of degree $\leq 3$ in $z,\bar z,\zetaeta,\bar \zetaeta$ in $\Psi$ vanish. Further, repeating the above argument, we get for the fourth order terms $\Psi_{\bold k}$: \begin{equation}\Label{4ord} (\Psi_{\bold k})_{\zetaeta\bar\zetaeta}=0, \quad |k|=4. \varepsilonnd{equation} \noindent{\bf Step V: choosing an ortonormal basis within the Levi kernel.} We now perform a transformation $$z{\mathcal{M}}apsto ze^{i\varphi(w)},\quad \zetaeta{\mathcal{M}}apsto \zetaeta e^{2i\varphi(w)}+g(w)z^2,\quad w{\mathcal{M}}apsto w,$$ where $\varphi(w),g(w)$ satisfy $\varphi(0)=0,\,\varphi(\RR{})\subset\RR{},\,g(\RR{})\subset\RR{},$ in order to achieve simultaneously $\ensuremath{\mbox{\rm Re}\,}\Psi_{3011}=0$ and $\Psi_{2020}=0$. Geometrically, the latter transformation fixes an ortonormal basis within the Levi kernel $\{\ensuremath{\mbox{\rm Im}\,} w=0,\,z=0\}$ along the transverse curve $\Gamma$. Indeed, for a hypersurface satisfying the previous normalization conditions, it is not hard to compute (using, in particular, \varepsilonqref{4ord}) that the desired terms change as $$\ensuremath{\mbox{\rm Re}\,}\Psi_{3011}(u){\mathcal{M}}apsto \ensuremath{\mbox{\rm Re}\,}\Psi_{3011}(u)-\frac{1}{2}\varphi'(u), \quad \Psi_{2020}(u){\mathcal{M}}apsto \Psi_{2020}(u)-2\varphi'(u)+g(u).$$ This shows the desired choice of $\varphi,g$. \noindent{\bf Step VI: removing terms\, $hol\cdot\bar z$\, and\, $hol\cdot\bar z^2$.} For a hypersurface \varepsilonqref{def} satisfying the normalization conditions achieved in Steps I-IV, we perform a Chern-Moser type transformation of the kind \begin{equation}\Label{killk1} z{\mathcal{M}}apsto z+f(z,\zetaeta,w),\,\,\zetaeta{\mathcal{M}}apsto\zetaeta,\,\,w{\mathcal{M}}apsto w, \varepsilonnd{equation} where $f$ preserves the origin and has degree at least $3$ in $z,\zetaeta$. If one denotes now the sum of all nenzero terms in $\Psi$ of the kind $z^k\zetaeta^l\bar z^1\bar\zetaeta^0 u^m$ with $k+l\mathfrak{g}eq 3$ by $\chi(z,\zetaeta,u)\bar z$, then, using the previous normalization conditions, we see that one can simply take $$f(z,\zetaeta,w):=\chi(z,\zetaeta,w)$$ and obtain a hypersurface satisfying, in addition, $\Psi_{kl10}=0$. Further, we perform a Chern-Moser type transformation of the kind \begin{equation}\Label{killk2} z{\mathcal{M}}apsto z,\,\,\zetaeta{\mathcal{M}}apsto\zetaeta+g(z,\zetaeta,w),\,\,w{\mathcal{M}}apsto w \varepsilonnd{equation} where $g$ preserves the origin and has degree at least $2$ in $z,\zetaeta$. If one denotes then the sum of all nonzero terms in $\Psi$ of the kind $z^k\zetaeta^l\bar z^2\bar\zetaeta^0 u^m$ with $k+l\mathfrak{g}eq 2$ by $\rho(z,\zetaeta,u)\bar z$, then a direct calculation shows that one can simply take $$g(z,\zetaeta,w):=\rho(z,\zetaeta,w)$$ and obtain a hypersurface satisfying also $\Psi_{kl20}=0$. This implies, in particular, \begin{proposition}\Label{killed+} For an analytic everywhere $2$-nondegenerate hypersurface, a transformation elimination the terms \varepsilonqref{killed}, as suggested by \cite{bk}, can be chosen to be holomorphic. \varepsilonnd{proposition} We now apply \autoref{goodperturb} and conclude that the hypersurface $M$, in particular, satisfies the condition \varepsilonqref{perturb}. Furthermore, collecting all the above information, we conclude that all terms of degree $\leq 4$ in $z,\bar z,\zetaeta,\bar \zetaeta$ in $\Psi$ vanish. We shall now discuss in a separate section the distinguished choice of a transverse curve mentioned in Step I. \subsection{Chains in an everywhere $2$-nondegenerate hypersurface} We are now aiming to specify a curve chosen in Step I. An appropriate choice of this curve leads to the additional normalization conditions $\Psi_{4001}=\Psi_{4011}=0$. A curve with this property is called {\varepsilonm a chain}. Recall that the complex tangent bundle $T^{\CC{}}M$ is endowed with the canonical (holomorphically invariant) subbundle $K$, where $K_p$ is the Levi Kernel for $M$ at $p$. Hence, we may consider the canonical (holomorphically invariant) bundle $TM / K$ over $M$ as well as its projectivization \begin{equation}\Label{pbundle} X:={\mathcal{M}}athbb P (TM/K). \varepsilonnd{equation} Every biholomorphism $H:\,M{\mathcal{M}}apsto M^*$ naturally extends to one between $TM / K$ and $TM^* / K^*$ and hence to one between the projectivizations \varepsilonqref{pbundle}. Further, any smooth curve $\mathfrak{g}amma\subset M$ naturally lifts to the projective bundle $X$. In turn, if $M$ is in the form \varepsilonqref{germ}, the fiber of $TM/K$ at $0\in M$ can be identified with the $(z,u)$-subspace in $T_0M$. The bundles $TM / K$ and $X$ are very convenient for describing (formal) transformations into a normal form $M*$ geometrically. Namely, for $p\in M$ and for each direction $l_p\subset T_pM/K_p$ transverse to $T^{\CC{}}_pM/K_p$ (which can be, in fact, seen as an element of the fiber of $X$ at $p$), there is a normalizing transformation $H:\,(M,p){\mathcal{M}}apsto (M*,0)$ such that its extension to $X$ maps $(p,l_p)$ into $(0,L_0)$, where $L_0$ is the point in $X*$ corresponding to the "vertical direction" \varepsilonqref{Gamma}. In turn, for $M$ given by coordinates \varepsilonqref{germ}, choice of $l_0$ corresponds to the action of the algebra ${\mathcal{M}}athfrak g_1$, as in \varepsilonqref{g+}, on the normal forms by the formula \varepsilonqref{factord}. Note also that the subalgebras ${\mathcal{M}}athfrak g_0^c$ and ${\mathcal{M}}athfrak g_2$ from the isotropy algebra \varepsilonqref{isotropy} preserve the condition of being in normal form (this can be seen by a straightforward calculation). Besides, they preserve the curve $\Gamma$. That is why any two transformations $H_1,H_2$ bringing $M$ to a normal form and mapping the same direction $l_p$ into $L_0$ are related as \begin{equation}\Label{reltd} H_2=\psi\circ H_1. \varepsilonnd{equation} Let us now fix $p\in M$ and a pair $(p,\bold v)\in X$, and consider a (formal) transformation $H:\,(M,p){\mathcal{M}}apsto (M^*,0)$ bringing $M$ into a (formal) normal form $M^*$ at $p$, and such that the induced map $\tilde H:\,X{\mathcal{M}}apsto X*$ maps $(p,\bold v)$ into $(0,L_0)$. Let $\tilde\Gamma$ be the lifting of the curve $\Gamma$, as in \varepsilonqref{Gamma}, to the bundle $X$. Consider finally \begin{equation}\Label{field} d\tilde H^{-1}|_{(0,L_0)}(T_0\tilde\Gamma)\subset T_{(p,\bold v)}X. \varepsilonnd{equation} For a fixed $\tilde H$, \varepsilonqref{field} defined a direction in the tangent space $T_{(p,\bold v)}X$. We claim that the latter direction does {\varepsilonm not} depend on the choice of $H$. Indeed, this follows from \varepsilonqref{reltd} and the fact that ${\mathcal{M}}athfrak g_0^c$ and ${\mathcal{M}}athfrak g_2$ preserve $\Gamma$, while their extensions to $X$ preserve $T_0\tilde\Gamma$. The latter means that \varepsilonqref{field} defined a direction field in $X$. In turn, in any coordinates \varepsilonqref{germ}, the value of this direction field at $(0,\bold v)\in X$ amounts to the complex numbers $f_{ww}(0)$ and $g_w(0)$, where $H=(f,g,h)$ is the (formal) normalizing transformation. (And, as discussed above, a choice of a direction $l_0$ amounts to the complex number $f_w(0)$). We next prove \begin{proposition}\Label{analfield} The direction field in $TM/K$ defined by \varepsilonqref{field} is in fact analytic. \varepsilonnd{proposition} \begin{proof} First, we note that the coordinates \varepsilonqref{germ} (actually achieved by a bi-cubic change of variables in $\CC{3}$) depend on the point $p$ analytically. This can be easily seen from the procedure in \cite{ebenfeltC3} and the implicit function theorem. Second, as follows from \autoref{killed+}, terms \varepsilonqref{killed} can be removed by a holomorphic transformation (and hence the representation \varepsilonqref{perturb} can be achieved by the same holomorphic transformation). In turn, we may choose such transformation as a composition of Steps I to VIII above (with an arbitrary choice of a transverse curve in Step I). The explicit procedure in Steps I to VIII combined with the implicit function theorem imply then that coordinates \varepsilonqref{perturb} depend on the point $p$ analytically as well. The latter means that, for proving the proposition, we have to prove the following claim: {\varepsilonm for a hypersurface \varepsilonqref{perturb} and a (formal) normalizing transformation $H=(f,g,h)$ with $f_w(0)=a$, the functions $\chi(a):=f_{ww}(0),\,\tau(a):=g_w(0)$ are real-analytic in $a$}. The proof of the claim is a slight modification of the proof of a similar claim in e.g. \cite{generic}. We note that, for a normalizing transformation $H=(f,g,h)$, the parameter $a$ is a part of the collection $H_j=(f_{j-1},g_{j-2},h_j)$ with $j=3$, while $\chi(a)$ is a part of the one with $j=5$ and $\tau(a)$ is a part of the one with $j=4$. The collections $H_4,H_5$ are obtained by solving the equations \varepsilonqref{homolog}, with the initial data corresponding to the element of the flow of ${\mathcal{M}}athfrak{g}_1$ in \varepsilonqref{isotropy} with $f_w(0)=a$. In view of \varepsilonqref{homolog} and \autoref{directsum} (arguing by induction), all the collections $H_j$ with $j\mathfrak{g}eq 3$ are obtained by solving a system of linear equations with nondegenerate (and fixed) matrix and the right hand side depending polynomially on $a,\bar a$. That is why, in particular, $H_4,H_5$ are polynomial in $a,\bar a$ and so are $\chi(a),\tau(a)$, as required. This proves the claim and the proposition. \varepsilonnd{proof} We now integrate the direction field \varepsilonqref{field} and obtain a foliation of $X$ by smooth real-analytic curves $\tilde\mathfrak{g}amma$. This leads to the following \begin{definition}\Label{degchain} Canonical projections of the curves $\tilde\mathfrak{g}amma$ on $M$ are called {\varepsilonm chains}. \varepsilonnd{definition} As follows from the above procedure, through each point $p$ there is a unique chain in a fixed direction $l_p\subset T_pM/K_p$ transverse to $T^{\CC{}}_pM/K_p$. Furthermore, importantly, {\varepsilonm the family of chains is bihlomorphically invariant}, as follows from its definition. As follows, again, from the definition, {\varepsilonm chains are mapped by normalizing transformations into the standard "vertical"\, curve\varepsilonqref{Gamma}}. We shall remark that, as discussed in the Introduction, orbits of the linear part of the stabilizer \varepsilonqref{isotropy} of the model do {\varepsilonm not} act transitively on transverse directions anymore (unlike the Levi-nondegenerate situation). It is not difficult to compute that, for a hypersurface \varepsilonqref{germ}, the orbit of the "vertical"\, direction \varepsilonqref{Gamma} in $T_0M$ is the cone \begin{equation}\Label{cancone} \left\{(z,\zeta,u):\,\,\zetaeta u+iz^2=0,\,\,u\neq 0\right\}\subset T_0M. \varepsilonnd{equation} \begin{definition} The cone \varepsilonqref{cancone} is called the {\varepsilonm canonical cone} for $M$ at $0$. \varepsilonnd{definition} Clearly, the canonical cone at $p$ does not depend on the choice of coordinates \varepsilonqref{germ} and is furthermore biholomorphically invariant. That is, each everywhere $2$-nondegenerate hypersurface $M$ is equipped with a field of canonical cones in tangent spaces. Possible directions of chains form in the tangent space are {\varepsilonm precisely} the directions from the canonical cone. \subsection{End of proof of Theorem 3} {\mathcal{M}}box{} {\mathcal{M}}edskip \noindent{\bf Step VII: applying the chain property.} We are now able to specify Step I below. Namely, for a hypersurface \varepsilonqref{germ}, we choose $\mathfrak{g}amma$ to be the unique chain in the "vertical" direction (i.e. the direction corresponding to the line line \varepsilonqref{Gamma}). We finally have to prove that, with the above choice of $\mathfrak{g}amma$, we have $\Psi_{4001}=\Psi_{4011}=0$ upon completion of Steps I -- VI. In view of the invariancy in $u$ of the pre-normal form achieved in Steps I to VI, it is enough to prove $\Psi_{4001}(0)=\Psi_{4011}(0)=0$. Consider a (formal) transformation $H=(f,g,h)$, as in \varepsilonqref{normalmap}, bringing $(M,0)$ into a normal form. By the definition of the chain, it satisfies: \begin{equation}\Label{ura} f_{ww}(0)=0,\quad g_w(0)=0. \varepsilonnd{equation} In view of that and the outcome of Step VI, when solving the equations \varepsilonqref{homolog} for the map $H$, for $j=3$ we get the same result as for the identity map. For $j=4$, it is straightforward to see from the basic identity \varepsilonqref{basic}, the outcome of Step VIII and the fact that $H$ coincides with the identity map to weight $3$ that the weight $4$ identity gives {\varepsilonm precisely} the same equations for the collection $H_4=(f_3,g_2,h_4)$ as in the formal procedure in \autoref{directsum}, besides the only equation for the $z^3\bar\zetaeta$ terms (analogous to \varepsilonqref{e24}) which gives $\Psi_{4001}(0)$ in the right hand side instead of $0$. Then, by using the second condition in \varepsilonqref{ura}, we conclude that $\Psi_{4001}(0)=0$. Very similarly, when considering $j=5$ and the $z^4\bar z\bar\zetaeta$ terms, we use the first condition in \varepsilonqref{ura} and obtain $\Psi_{4011}(0)=0$. As a result, we end up with a hypersurface satisfying, in addition, $\Psi_{4001}=\Psi_{4011}=0$. \noindent{\bf Step VIII: choice of a parameterization along the chain.} The last remaining conditions for $\Psi$ to belong to the normal form space are $\ensuremath{\mbox{\rm Im}\,}\Psi_{3011}=0$ and $\Psi_{3030}=0$. We achieve the latter ones by means of a transformation $$z{\mathcal{M}}apsto f(w)z,\,\,\zetaeta{\mathcal{M}}apsto\zetaeta+ig(w)z^2,\,\,w{\mathcal{M}}apsto h(w),$$ where $$f(0)=1,\,\,h(0)=h''(0)=0,\,\,h'(w)=f^2(w),\,\,f(\RR{})\subset\RR{},\,\,g(\RR{})\subset\RR{}.$$ Similarly to the Levi-nondegenerate case \cite{chern}, such a transformation corresponds to a choice of parameterization along the chain. It is then possible to compute (employing the previously achieved normalization conditions) that \begin{equation}\Label{comptd} \Psi_{3011}{\mathcal{M}}apsto \ensuremath{\mbox{\rm Im}\,}\Psi_{3011}h'-\frac{1}{2}h''+\frac{1}{2}gh',\quad \Psi_{3030}{\mathcal{M}}apsto h'\Psi_{3030}+\left(\frac{1}{6}h'''-\frac{1}{2}\frac{h''^2}{h'}\right)-\frac{1}{2}g'(u). \varepsilonnd{equation} Now the conditions $\ensuremath{\mbox{\rm Im}\,}\Psi_{3011}=\Psi_{3030}=0$ turn \varepsilonqref{comptd} into a system of analytic nonsingular ODEs, which we solve uniquely with the Cauchy data $h(0)=0,\,h'(0)=1,\,h''(0)=0$. (To see this, one has to solve the first equation for $g$ and substitute the result into the second, which makes the second equation a nonsingular third order ODE in $h$; the latter is solved uniquely with the above initial data, and then $g$ is found from the substitution). This completely proves the theorem. \varepsilonnd{proof} \autoref{converge} immediately implies \autoref{main}. \subsection{Proofs of further results.} As mentioned above, \autoref{main} allows for important applications stated in \autoref{main2} and \autoref{main3}. We give the proofs for these theorems below. \begin{proof}[Proof of \autoref{main2}] According to \cite{pocchiola}, the local equivalence of an everywhere $2$-nondegenerate hypersurface to the model \varepsilonqref{cone} amounts to vanishing of the two basic invariants $W$ and $J$ (given by lengthy expressions in terms of the defining function and the CR-vector fields, which we do not provide here). It is straightforward to check then that, for a hypersurface in normal form \varepsilonqref{nspace}, the invariant $W|_0$ is proportional to the normal form coefficient $\Psi_{3002}(0)$, and, as long as $\Psi_{3002}(0)$ vanishes, the invariant $J|_0$ is proportional to the normal form coefficient $\Psi_{5001}(0)$. This immediately implies the assertion of the theorem. \varepsilonnd{proof} \begin{proof}[Proof of \autoref{main3}] For a hypersurface \varepsilonqref{model+} in normal form \varepsilonqref{nspace}, we argue as in the proof of \autoref{goodperturb} and consider its complex defining equation $w=\theta(z,\zeta,\bar z,\bar\zeta,\bar w)$. The uniform $2$-nondgeneracy of $M$ then gives the determinant equation \begin{equation}\lambdabel{deteqn} \begin{vmatrix} \theta_{\bar z} & \theta_{\bar \ensuremath{\zetaeta}} & \theta_{\bar w} \\ \theta_{z\bar z} & \theta_{z\bar \ensuremath{\zetaeta}} & \theta_{z\bar w} \\ \theta_{\ensuremath{\zetaeta}\bar z} & \theta_{\ensuremath{\zetaeta}\bar \ensuremath{\zetaeta}} & \theta_{\ensuremath{\zetaeta}\bar w} \varepsilonnd{vmatrix}=0. \varepsilonnd{equation} We need now to express the latter equation as a PDE for the function $\Phi$, as in \varepsilonqref{model+}. For doing so, we have to establish relations between the $2$-jets of the real defining function $\varphi(z,\zeta,\bar z,\bar\zeta,u):=P(z,\zeta,\bar z,\bar\zeta)+\Phi(z,\zeta,\bar z,\bar\zeta,u)$ and the complex defining function $\theta$. Considering the identity $$\frac{1}{2i}(\theta-\bar w)=\varphi\left(z,\zetaeta,\bar z,\bar\zeta,\frac{1}{2}(\theta+\bar w)\right)$$ as an identity in $(z,\zetaeta,\bar z,\bar\zeta,\bar w)$, we differentiate the latter once in each of the variables and easily conclude: \begin{equation}\lambdabel{1jet} \theta_{\bar z}=\frac{2i\varphi_{\bar z}}{1-i\varphi_u},\,\,\theta_{\bar \zeta}=\frac{2i\varphi_{\bar \zeta}}{1-i\varphi_u},\,\,\theta_{\bar w}=\frac{1}{1-i\varphi_u},\,\, \varepsilonnd{equation} Differentiating then once more, we get: \begin{equation}\lambdabel{2jet} \begin{aligned} &\theta_{z\bar z}=2i\varphi_{z\bar z}+\cdots,\,\,\theta_{z\bar \zeta}=2i\varphi_{z\bar \zeta}+\cdots,\,\,\theta_{z\bar w}=i\varphi_{zu}+\cdots,\\ &\theta_{\zeta\bar z}=2i\varphi_{\zeta\bar z}+\cdots,\,\,\theta_{\zeta\bar \zeta}=2i\varphi_{\zeta\bar \zeta}+\cdots,\,\,\theta_{\zeta\bar w}=i\varphi_{\zeta u}+\cdots, \varepsilonnd{aligned} \varepsilonnd{equation} where dots stand for terms of degree $\mathfrak{g}eq 2$ in the first and second order derivatives of $\varphi$. It is not difficult to see then, by using \varepsilonqref{cone}, that the relations \varepsilonqref{1jet},\varepsilonqref{2jet} turn \varepsilonqref{deteqn} into a PDE of the kind: \begin{equation}\lambdabel{thePDE} \Phi_{\zeta\bar\zeta}=T\bigl(z,\bar z,\zeta,\bar\zeta,\Phi_{\bar z},\Phi_{\bar\zeta},\Phi_u,\Phi_{z\bar z},\Phi_{z\bar \zeta},\Phi_{zu},\Phi_{\zeta\bar z},\Phi_{\zeta u}\bigr), \varepsilonnd{equation} where $T$ is a {\varepsilonm universal} (i.e. independent on $M$) rational function in all its variables with $T(0)=0,\,dT(0)=0$ (the function $T$ can be deduced explicitly, even though the resulting expression is cumbersome and we do not provide it here). The PDE \varepsilonqref{thePDE} with the initial data on the ``cross'' $\zeta\bar\zeta=0$ is a particular case of the classical {\varepsilonm Goursat problem}. We now make use of the result of Lednev (see \cite{lednev},\cite{wagschal}) suggesting, in particular, that a PDE of the kind \varepsilonqref{thePDE} with analytic at the origin right hand side satisfying $T(0)=0,\,dT(0)=0$ has a unique analytic at the origin solution with $\Phi(z,\zetaeta,\bar z,0,u)=\Phi(z,0,\bar z,\bar\zeta,u)=0$. (Note that, as long as $\Phi$ satisfies the reality condition, the second condition automatically follows from the first one). To apply the result of Lednev for \varepsilonqref{thePDE} with any analytic at the origin initial data \begin{equation}\lambdabel{data} \Phi(z,\zetaeta,\bar z,0,u)=:\chi(z,\zeta,\bar z,u)\in{\mathcal{M}}athcal D, \varepsilonnd{equation} we shift $\Phi$ by the associated real-analytic expression \varepsilonqref{disting} and note that, since functions in ${\mathcal{M}}athcal D$ all have vanishing $4$-jet at the origin, the modified PDE (which shall be already considered as one with the zero initial data, as required in Lednev's theorem) also has vanishing linear part at the origin. Now Lednev's theorem implies that the PDE \varepsilonqref{thePDE} has a unique analytic near the origin solution for any analytic at the origin initial data \varepsilonqref{data}. To see that the solution $\Phi$ belongs to the normal space \varepsilonqref{nspace}, we note that all possible monomials in \varepsilonqref{nspace} vanishing of which determines the normal form space all have either $\zeta$ or $\bar\zeta$ absent. But monomials of the latter kind which are present in $\Phi$ (or their conjugated) must belong to ${\mathcal{M}}athcal D$ due to the initial data \varepsilonqref{data}, while monomials from ${\mathcal{M}}athcal D$ satisfy the normal form conditions by definition of ${\mathcal{M}}athcal D$. Thus $\Phi$ belongs to the normal form space. Finally, it is not difficult to see that the PDE \varepsilonqref{thePDE} is real, that is, it is a real-analytic (rational) PDE for a scalar real function $\Phi$ in the variables $(x,y,s,t,u),\,z=x+iy,\,\zeta=s+it$ (it can be seen from its invariance under the involution $z\leftrightarrow\bar z,\,\zeta\leftrightarrow\bar \zeta$). The reality condition in the space ${\mathcal{M}}athcal D$ means that the initial data under consideration amounts to a real-analytic initial data on the ``cross''\, $st=0$ which is compatible on the intersection $s=t=0$. The latter means that the solution satisfies rhe reality condition $\Phi(z,\zeta,\bar z,\bar\zeta,u)\in\RR{}$. This immediately implies the assertion of the theorem. \varepsilonnd{proof} \begin{thebibliography}{20000000} \bibitem[BER99]{ber} M. S. Baouendi, P. Ebenfelt, L. P. Rothschild, ``Real Submanifolds in Complex Space and Their Mappings''. Princeton University Press, Princeton Math. Ser. {\bf 47}, Princeton, NJ, 1999. \bibitem[BER96]{beralg} M. S. Baouendi, P. Ebenfelt, L. P. Rothschild, {\it Algebraicity of holomorphic mappings between real algebraic sets in $C^n$}. Acta Math. 177 (1996), no. 2, 225--273. \bibitem[BHR96]{bhr} S. Baouendi, X. Huang and L.P. Rothschild, {\it Regularity of CR mappings between algebraic hypersurfaces.} Invent. Math. 125 (1996), 13--36. \bibitem[Bel05]{belC3} V. K. Beloshapka, {\it Symmetries of real hypersurfaces of a three-dimensional complex space.} (Russian) Mat. Zametki 78 (2005), no. 2, 171--179. \bibitem[Bel79]{belold} V. K. Beloshapka, {\it The dimension of the group of automorphisms of an analytic hypersurface} (Russian) Izv. Akad. Nauk SSSR Ser. Mat. 43 (1979), no. 2, 243-–266. \bibitem[BK15]{bk} V. K. Beloshapka, I. Kossovskiy, {\it The sphere in $\CC{2}$ as a model surface for degenerate hypersurfaces in $\CC{3}$}. Russ. J. Math. Phys. 22 (2015), no. 4, 437–-443. \bibitem[Ca32]{cartan} \'E. Cartan, {\it Sur la g\'eom\'etrie pseudo-conforme des hypersurfaces de l'espace de deux variables complexes II}. Ann. Scuola Norm. Sup. Pisa Cl. Sci. (2) 1 (1932), no. 4, 333--354. \bibitem[Ca84]{catlin} D. Catlin, {\it Boundary invariants of pseudoconvex domains.} Ann. of Math. (2) 120 (1984), no. 3, 529–-586. \bibitem[CM74]{chern} S. S. Chern and J. K. Moser, {\it Real hypersurfaces in complex manifolds}, Acta Math. {133} (1974), 219--271. \bibitem[Eb98b]{ebenfeltC3} P.~Ebenfelt, {\it Normal forms and biholomorphic equivalence of real hypersurfaces in ${\mathcal{M}}athbb C^3$}. Indiana Univ. Math. J. 47 (1998), no. 2, 311-366. \bibitem[Eb06]{ebenfeltduke} P. Ebenfelt, {\it Uniformly Levi degenerate CR manifolds: the 5-dimensional case}. Duke Math. J. 110 (2001), no. 1, 37-80. Correction in Duke Math. J. 131 (2006), no. 3, 589--591. \bibitem[ES96]{es} V.~Ezhov and G.~Schmalz, {\it Normal form and two-dimensional chains of an elliptic CR-manifold in ${\mathcal{M}}athbb C^4$}. J. Geom. Anal. 6 (1996), no. 4, 495--529. \bibitem[FK08]{kaup} G.~Fels, W.~Kaup, {\it Classification of Levi degenerate homogeneous CR-manifolds in dimension 5.} Acta Math. 201 (2008), no. 1, 1-–82. \bibitem[FK07]{kaup2} G.~Fels, W.~Kaup, {\it CR-manifolds of dimension 5: a Lie algebra approach.} J. Reine Angew. Math. 604 (2007), 47–-71. \bibitem[Fr77]{freeman} M. Freeman, {\it Local biholomorphic straightening of real submanifolds}, Ann. of Math. (2) 106 (1977), no. 2, 319--352 \bibitem[HY09a]{hy} X.~Huang and W.~Yin, {\it A Bishop surface with a vanishing Bishop invariant.} Invent. Math. 176 (2009), no. 3, 461--520. \bibitem[IZ13]{iz} A. Isaev and D. Zaitsev, {\it Reduction of five-dimensional uniformly Levi degenerate CR structures to absolute parallelisms}. J. Geom. Anal. 23 (2013), no. 3, 1571--1605. \bibitem[KZ06]{kaupzaitsev} W. Kaup, D. Zaitsev, {\it On local CR-transformation of Levi-degenerate group orbits in compact Hermitian symmetric spaces.} J. Eur. Math. Soc. (JEMS) 8 (2006), no. 3, 465–-490. \bibitem[K05]{kol05} M. Kol\'a\v r, {\it Normal forms for hypersurfaces of finite type in $ {\mathcal{M}}athbb C^2$}, Math. Res. Lett., {12} (2005), 897--910. \bibitem[KMZ14]{kmz} M.~Kol\'a\v r, F. Meylan, D. Zaitsev, {\it Chern-Moser operators and polynomial models in CR geometry}, Advances in Mathematics 263 (2014), 321-–356. \bibitem[KM18]{km} M. Kol\'a\v r, F. Meylan, \textit{Nonlinear CR automorphisms of Levi degenerate hypersurfaces and a new gap phenomenon}, to appear in Annali della Scuola Normale Superiore di Pisa, 2019. \bibitem[KKZ16]{kkz} M. Kol\'a\v r, I. Kossovskiy, D. Zaitsev, {\it Normal forms in Cauchy-Riemann Geometry: a survey}. Analysis and geometry in several complex variables, 153-–177, Contemp. Math., 681, Amer. Math. Soc., Providence, RI, 2017. \bibitem[KLX16]{klx} I.\,Kossovskiy. B.\,Lamel and M.\,Xiao, {\it Regularity of CR-mappings into Levi-degenerate hypersurfaces}. To appear in Comm. in Analysis and Geometry. Available at https://arxiv.org/abs/1609.00652. \bibitem[KZ19]{generic} I.~Kossovskiy and D.~Zaitsev, {\it Convergent normal form for real hypersurfaces at generic Levi degeneracy.} J. Reine Angew. Math. (Crelle's Journal) 749 (2019), 201-–225. \bibitem[KZ15]{cmhyper} I.~Kossovskiy and D.~Zaitsev, {\it Convergent normal form and canonical connection for hypersurfaces of finite type in ${\mathcal{M}}athbb C^2$}. Advances in Mathematics, 2015, pp. 670--705. \bibitem[KrLo83]{krlo} N. Kruzhilin, A. Loboda, {\it Linearization of local automorphisms of pseudoconvex surfaces.} (Russian) Dokl. Akad. Nauk SSSR 271 (1983), no. 2, 280–-282. \bibitem[LM07]{lmblowups} B.~Lamel and N.~Mir, {\varepsilonm Finite jet determination of local CR automorphisms through resolution of degeneracies.} Asian J. Math. 11 (2007), no. 2, 201–-216. \bibitem[Le48]{lednev} N. A. Lednev, {\it A new method for the solution of partial differential equations.} (Russian) Mat. Sbornik N. S. 22(64), (1948). 205-–266. \bibitem[MS14]{ms} C. Medori , A. Spiro, {\it The equivalence problem for 5-dimensional Levi degenerate CR manifolds,} Int. Math. Res. Not. IMRN 2014, no. 20, 5602–-5647. \bibitem[Mir17]{mirdef} N. Mir, {\it Holomorphic deformations of real-analytic CR maps and analytic regularity of CR mappings.} J. Geom. Anal. 27 (2017), no. 3, 1920–-1939. \bibitem[Poc13]{pocchiola} S. Pocchiola, {\it Explicit absolute parallelism for 2-nondegenerate real hypersurfaces in $\CC{3}$ of constant Levi rank 1.} Preprint, arXiv:1312.6400 (2013). \bibitem[Po07]{poincare} H.~Poincar\'e, {\it Les fonctions analytiques de deux variables et la representation conforme}. Rend. Circ. Mat. Palermo. (1907) { 23}, 185--220. \bibitem[PZ17]{zelenko} C. Porter, I. Zelenko, {\it Absolute parallelism for 2-nondegenerate CR structures via bigraded Tanaka prolongation,} preprint, arXiv:1704.03999v3 \bibitem[Sta96]{stanton2} N.~Stanton, {\it Infinitesimal CR automorphisms of real hypersurfaces.} Amer. J. Math. 118 (1996), no. 1, 209-–233. \bibitem[Ta62]{tanaka} N. Tanaka, {\it On the pseudo-conformal geometry of hypersurfaces of the space of n complex variables.} J. Math. Soc. Japan {14} 1962 397--429. \bibitem[Wa79]{wagschal} C. Wagschal, {\it Le probl`eme de Goursat non lin´eaire.} (French) J. Math. Pures Appl. (9) 58 (1979), no. 3, 309-–337. \bibitem[XY17]{xiao} M.\,Xiao, Y.\,Yuan, {\it Complexity of holomorphic maps from the complex unit ball to classical domains}. To appear in J. Math. Pures Appl., available at https://arxiv.org/abs/1609.07523. \varepsilonnd{thebibliography} \varepsilonnd{document}
\begin{document} \title{On the critical value function in the divide and color model} \author{Andr{\'a}s B{\'a}lint\thanks{Chalmers University of Technology, e-mail: \url{[email protected]}} \and Vincent Beffara\thanks{UMPA-ENS Lyon, e-mail: \url{[email protected]}} \and Vincent Tassion\thanks{ENS Lyon, e-mail: \url{[email protected]}}} \maketitle \begin{abstract} The divide and color model on a graph $G$ arises by first deleting each edge of $G$ with probability $1-p$ independently of each other, then coloring the resulting connected components (\emph{i.e.}, every vertex in the component) black or white with respective probabilities $r$ and $1-r$, independently for different components. Viewing it as a (dependent) site percolation model, one can define the critical point $r_c^G(p)$. In this paper, we mainly study the continuity properties of the function $r_c^G$, which is an instance of the question of locality for percolation. Our main result is the fact that in the case $G=\mathbb Z^2$, $r_c^G$ is continuous on the interval $[0,1/2)$; we also prove continuity at $p=0$ for the more general class of graphs with bounded degree. We then investigate the sharpness of the bounded degree condition and the monotonicity of $r_c^G(p)$ as a function of $p$. \end{abstract} \paragraph{Keywords:} Percolation, divide and color model, critical value, locality, stochastic domination. \paragraph{AMS 2000 Subject Classification:} 60K35, 82B43, 82B20 \section*{Introduction} The divide and color (DaC) model is a natural dependent site percolation model introduced by H{\"a}ggstr{\"o}m in \cite{HaggstromDaC}. It has been studied directly in \cite{HaggstromDaC,Garet,BCM,BBT}, and as a member of a more general family of models in \cite{KW,BCM,BGibbs,GrGr}. This model is defined on a multigraph $G=(\mathcal{V},\mathcal{E})$, where $\mathcal{E}$ is a multiset (\emph{i.e.}, it may contain an element more than once), thus allowing parallel edges between pairs of vertices. For simplicity, we will imprecisely call $G$ a \emph{graph} and $\mathcal{E}$ the \emph{edge set}, even if $G$ contains self-loops or multiple edges. The DaC model with parameters $p,r \in [0,1]$, on a general (finite or infinite) graph $G$ with vertex set $\mathcal{V}$ and edge set $\mathcal{E}$, is defined by the following two-step procedure: \begin{itemize} \item First step: Bernoulli bond percolation. We independently declare each edge in $\mathcal{E}$ to be open with probability $p$, and closed with probability $1-p$. We can identify a bond percolation configuration with an element $\eta \in \{0,1\}^{\mathcal{E}}$: for each $e\in \mathcal{E}$, we define $\eta (e)=1$ if $e$ is open, and $\eta (e)=0$ if $e$ is closed. \item Second step: Bernoulli site percolation on the resulting cluster set. Given $\eta \in \{0,1\}^{\mathcal{E}}$, we call \emph{$p$-clusters} or \emph{bond clusters} the connected components in the graph with vertex set $\mathcal{V}$ and edge set $\{e\in \mathcal{E}:\eta (e)=1\}$. The set of $p$-clusters of $\eta$ gives a partition of $\mathcal{V}$. For each $p$-cluster $\mathcal{C}$, we assign the same color to all the vertices in $\mathcal{C}$. The chosen color is black with probability $r$ and white with probability $1-r$, and this choice is independent for different $p$-clusters. \end{itemize} These two steps yield a site percolation configuration $\xi \in \{0,1\}^{\mathcal{V}}$ by defining, for each $v\in \mathcal{V}$, $\xi (v)=1$ if $v$ is black, and $\xi (v)=0$ if $v$ is white. The connected components (via the edge set $\mathcal{E}$) in $\xi$ of the same color are called (black or white) \emph{$r$-clusters}. The resulting measure on $\{0,1\}^{\mathcal{V}}$ is denoted by $\mu_{p,r}^{G}$. Let $E_{\infty }^b\subset \{0,1\}^{\mathcal{V}}$ denote the event that there exists an infinite black $r$-cluster. By standard arguments (see Proposition 2.5 in \cite{HaggstromDaC}), for each $p\in [0,1]$, there exists a \emph{critical coloring value} $r_c^G(p)\in [0,1]$ such that \[\mu _{p,r}^G(E_{\infty }^b) \begin{cases} =0 &\textrm{if } r<r_c^G(p),\\ >0 &\textrm{if } r>r_c^G(p).\\ \end{cases}\] The \emph{critical edge parameter} $p_c^G\in [0,1]$ is defined as follows: the probability that there exists an infinite bond cluster is $0$ for all $p<p_c^G$, and positive for all $p>p_c^G$. The latter probability is in fact $1$ for all $p>p_c^G$, whence $r_c^G(p)=0$ for all such $p$. Kolmogorov's $0-1$ law shows that in the case when all the bond clusters are finite, $\mu _{p,r}^G(E_{\infty }^b)\in \{0,1\}$; nevertheless it is possible that $\mu_{p,r}^G(E_{\infty }^b)\in (0,1)$ for some $r>r_c^G(p)$ (\emph{e.g.} on the square lattice, as soon as $p>p_c=1/2$, one has $\mu_{p,r}^G(E_{\infty }^b) = r$). \subsection*{Statement of the results} Our main goal in this paper is to understand how the critical coloring parameter $r_c^G$ depends on the edge parameter $p$. Since the addition or removal of self-loops obviously does not affect the value of $r_c^G(p)$, we will assume that all the graphs $G$ that we consider are without self-loops. On the other hand, $G$ is allowed to contain multiple edges. Our first result, based on a stochastic domination argument, gives bounds on $r_c^G(p)$ in terms of $r_c^G(0)$, which is simply the critical value for Bernoulli site percolation on $G$. By the \emph{degree} of a vertex $v$, we mean the number of edges incident on $v$ (counted with multiplicity). \begin{Proposition}\label{rcbounds} For any graph $G$ with maximal degree $\Delta $, for all $p\in [0,1)$, \[ 1 - \frac{1-r_c^G(0)}{(1-p)^{\Delta}} \leq r_c^G(p) \leq \frac{r_c^G(0)}{(1-p)^{\Delta}}. \] \end{Proposition} As a direct consequence, we get continuity at $p=0$ of the critical value function: \begin{Proposition}\label{contin0} For any graph $G$ with bounded degree, $r_c^G(p)$ is continuous in $p$ at $0$. \end{Proposition} One could think of an alternative approach to the question, as follows: the DaC model can be seen as Bernoulli site percolation of the random graph $G_p=(V_p,E_p)$ where $V_p$ is the set of bond clusters and two bond clusters are connected by a bond of $E_p$ if and only if they are adjacent in the original graph. The study of how $r_c^G(p)$ depends on $p$ is then a particular case of a more general question known as the \emph{locality problem}: is it true in general that the critical points of site percolation on a graph and a small perturbation of it are always close? Here, for small $p$, the graphs $G$ and $G_p$ are somehow very similar, and their critical points are indeed close. Dropping the bounded-degree assumption allows for the easy construction of graphs for which continuity does not hold at $p=0$: \begin{Proposition}\label{nonbounded} There exists a graph $G$ with $p_c^G>0$ such that $r_c^G$ is discontinuous at $0$. \end{Proposition} In general, when $p>0$, the graph $G_p$ does not have bounded degree, even if $G$ does; this simple remark can be exploited to construct bounded degree graphs for which $r_c^G$ has discontinuities below the critical point of bond percolation (though of course not at $0$): \begin{Theorem}\label{boundeddegreeconstruction} There exists a graph $G$ of bounded degree satisfying $p_c^G > 1/2$ and such that $r_c^G(p)$ is discontinuous at $1/2$. \end{Theorem} \begin{Remark} The value $1/2$ in the statement above is not special: in fact, for every $p_0 \in (0,1)$, it is possible to generalize our argument to construct a graph with a critical bond parameter above $p_0$ and for which the discontinuity of $r_c$ occurs at $p_0$. \end{Remark} Our main results concerns the case $G=\mathbb Z^2$, for which the above does not occur: \begin{Theorem}\label{continp} The critical coloring value $r_c^{\mathbb{Z}^2}(p)$ is a continuous function of $p$ on the whole interval $[0,1/2)$. \end{Theorem} The other, perhaps more anecdotal question we investigate here is whether $r_c^G$ is monotonic below $p_c$. This is the case on the triangular lattice (because it is constant equal to $1/2$), and appears to hold on $\mathbb Z^2$ in simulations (see the companion paper \cite{BBT}). In the general case, the question seems to be rather delicate. Intuitively the presence of open edges would seem to make percolation easier, leading to the intuition that the function $p \mapsto r_c(p)$ should be nonincreasing. Theorem 2.9 in \cite{HaggstromDaC} gives a counterexample to this intuition. It is even possible to construct quasi-transitive graphs on which any monotonicity fails: \begin{Proposition}\label{PropNonMononicity} There exists a quasi-transitive graph $G$ such that $r_c^G$ is not monotone on the interval $[0,p_c^{G})$. \end{Proposition} A brief outline of the paper is as follows. We set the notation and collect a few results from the literature in Section~\ref{definitions-section}. In Section~\ref{section-stochastic-domination}, we stochastically compare $\mu _{p,r}^{G}$ with Bernoulli site percolation (Theorem~\ref{stdom}), and show how this result implies Proposition~\ref{rcbounds}. We then turn to the proof of Theorem~\ref{continp} in Section~\ref{contz2-section}, based on a finite-size argument and the continuity of the probability of cylindrical events. In Section~\ref{tree-like-section}, we determine the critical value function for a class of tree-like graphs, and in the following section we apply this to construct most of the examples of graphs we mentioned above. \section{Definitions and notation}\label{definitions-section} We start by explicitly constructing the model, in a way which will be more technically convenient than the intuitive one given in the introduction. Let $G$ be a connected graph $( \mathcal{V},\mathcal{E} )$ where the set of vertices $\mathcal{V}=\{v_0,v_1,v_2,\ldots\}$ is countable. We define a total order ``$<$'' on $\mathcal{V}$ by saying that $v_i<v_j$ if and only if $i<j$. In this way, for any subset $V \subset \mathcal{V}$, we can uniquely define $\min (V)\in V$ as the minimal vertex in $V$ with respect to the relation ``$<$''. For a set $S$, we denote $\{0,1\}^S$ by $\Omega _S$. We call the elements of $\Omega _{\mathcal {E}}$ {\em bond configurations}, and the elements of $\Omega _{\mathcal {V}}$ \emph{site configurations}. As defined in the Introduction, in a bond configuration $\eta $, an edge $e\in \mathcal{E}$ is called \emph{open} if $\eta (e)=1$, and \emph{closed} otherwise; in a site configuration $\xi $, a vertex $v\in \mathcal{V}$ is called \emph{black} if $\xi (e)=1$, and \emph{white} otherwise. Finally, for $\eta \in \Omega _{\mathcal {E}}$ and $v\in \mathcal{V}$, we define the \emph{bond cluster} $\mathcal{C}_v(\eta )$ of $v$ as the maximal connected induced subgraph containing $v$ of the graph with vertex set $\mathcal{V}$ and edge set $\{e\in \mathcal{E}:\eta (e)=1\}$, and denote the vertex set of $\mathcal{C}_v(\eta )$ by $C_v(\eta )$. For $a\in [0,1]$ and a set $S$, we define $\nu _a ^S$ as the probability measure on $\Omega _S$ that assigns to each $s\in S$ value $1$ with probability $a$ and $0$ with probability $1-a$, independently for different elements of $S$. We define a function \begin{equation*} \begin{array}{lccc} \Phi \: : \: &\Omega_\mathcal{E} \times \Omega_\mathcal{V} &\rightarrow& \Omega_\mathcal{E} \times \Omega_\mathcal{V},\\ &(\eta,\kappa)&\mapsto&(\eta,\xi),\\ \end{array} \end{equation*} where $\xi(v)=\kappa(\min(C_v(\eta )))$. For $p,r\in [0,1]$, we define $\mathbb{P}^G_{p,r}$ to be the image measure of $\nu_p^\mathcal{E} \otimes \nu_r^\mathcal{V}$ by the function $\Phi $, and denote by $\mu ^G_{p,r}$ the marginal of $\mathbb{P}^G_{p,r}$ on $\Omega _{\mathcal{V}}$. Note that this definition of $\mu ^G_{p,r}$ is consistent with the one in the Introduction. Finally, we give a few definitions and results that are necessary for the analysis of the DaC model on the square lattice, that is the graph with vertex set $\mathbb{Z}^2$ and edge set $\mathcal{E}^2=\{\left< v,w\right> :v=(v_1,v_2),w=(w_1,w_2)\in \mathbb{Z}^2,\ |v_1-w_1|+|v_2-w_2|=1\}$. The \emph{matching graph} $\mathbb{Z}^2_*$ {of the square lattice} is the graph with vertex set $\mathbb{Z}^2$ and edge set $\mathcal{E}^2_*=\{\left< v,w\right> :v=(v_1,v_2),w=(w_1,w_2)\in \mathbb{Z}^2,\ \max (|v_1-w_1|,|v_2-w_2|)=1\}$. In the same manner as in the Introduction, we define, for a color configuration $\xi \in \{0,1\}^{\mathbb{Z}^2}$, (black or white) \emph{$*$-clusters} as connected components (via the edge set $\mathcal{E}^2_*$) in $\xi $ of the same color. We denote by $\Theta ^*(p,r)$ the $\mathbb{P}_{p,r}^{\mathbb{Z}^2}$-probability that the origin is contained in an infinite black $*$-cluster, and define \[r_c^*(p) = \sup \{r:\Theta ^*(p,r)=0\}\] for all $p \in [0,1]$ --- note that this value may differ from $r_c^{\mathbb{Z}^2_*}(p)$. The main result in \cite{BCM} is that for all $p\in [0,1/2)$, the critical values $r_c^{\mathbb{Z}^2}(p)$ and $r_c^*(p)$ satisfy the duality relation \begin{equation}\label{rcplusrcstaris1} r_c^{\mathbb{Z}^2}(p) + r_c^*(p)= 1. \end{equation} We will also use exponential decay result for subcritical Bernoulli bond percolation on $\mathbb{Z}^2$. Let ${\bf 0}$ denote the origin in $\mathbb{Z}^2$, and for each $n\in \mathbb{N}=\{1,2,\ldots \}$, let us define $S_n=\{v\in \mathbb{Z}^2:dist(v,{\bf 0})=n\}$ (where $dist$ denotes graph distance), and the event $M_n=\{\eta \in \Omega _{\mathcal{E}^2}:$ there is a path of open edges in $\eta $ from ${\bf 0}$ to $S_n\}$. Then we have the following result: \begin{Theorem}[\cite{kesten}]\label{lexpdecay} For $p<1/2$, there exists $\psi (p)>0$ such that for all $n\in \mathbb{N}$, we have that \begin{displaymath} \nu _{p}^{\mathcal{E}^2}(M_n)< e^{-n\psi (p)}. \end{displaymath} \end{Theorem} \section{Stochastic domination and continuity at \texorpdfstring{$p=0$}{p=0}} \label{section-stochastic-domination} In this section, we prove Proposition~\ref{rcbounds} via a stochastic comparison between the DaC measure and Bernoulli site percolation. Before stating the corresponding result, however, let us recall the concept of stochastic domination. We define a natural partial order on $\Omega _{\mathcal{V}}$ by saying that $\xi ^{\prime }\geq \xi $ for $\xi ,\xi ^{\prime }\in \Omega _{\mathcal{V}}$ if, for all $v\in \mathcal{V}$, $\xi ^{\prime }(v)\geq \xi (v)$. A random variable $f:\Omega _{\mathcal{V}}\to \mathbb{R}$ is called \emph{increasing} if $\xi ^{\prime }\geq \xi $ implies that $f(\xi ^{\prime })\geq f(\xi )$, and an event $E\subset \Omega _{\mathcal{V}}$ is increasing if its indicator random variable is increasing. For probability measures $\mu ,\mu ^{\prime }$ on $\Omega _{\mathcal{V}}$, we say that $\mu ^{\prime }$ is \emph{stochastically larger} than $\mu $ (or, equivalently, that $\mu $ is \emph{stochastically smaller} than $\mu ^{\prime }$, denoted by $\mu \leq _{\textrm{st}}\mu ^{\prime }$) if, for all bounded increasing random variables $f:\Omega _{\mathcal{V}}\to \mathbb{R}$, we have that $$ \int _{\Omega _{\mathcal{V}}}f(\xi )\ d\mu ^{\prime }(\xi )\geq \int _{\Omega _{\mathcal{V}}}f(\xi )\ d\mu (\xi ). $$ By Strassen's theorem \cite{Strassen}, this is equivalent to the existence of an appropriate coupling of the measures $\mu ^{\prime }$ and $\mu $; that is, the existence of a probability measure $\mathbb{Q}$ on $\Omega _{\mathcal{V}}\times \Omega _{\mathcal{V}}$ such that the marginals of $\mathbb{Q}$ on the first and second coordinates are $\mu ^{\prime }$ and $\mu $ respectively, and $\mathbb{Q}(\{(\xi ^{\prime },\xi )\in \Omega _{\mathcal{V}}\times \Omega _{\mathcal{V}}: \xi ^{\prime }\geq \xi \})=1$. \begin{Theorem}\label{stdom} For any graph $G=( \mathcal{V},\mathcal{E} )$ whose maximal degree is $\Delta $, at arbitrary values of the parameters $p,r\in [0,1]$, \begin{displaymath} \nu _{r(1-p)^{\Delta }}^{\mathcal{V}} \leq _{\textrm{st}}\mu _{p,r}^G \leq _{\textrm{st}}\nu _{1-(1-r)(1-p)^{\Delta }}^{\mathcal{V}}. \end{displaymath} \end{Theorem} Before turning to the proof, we show how Theorem~\ref{stdom} implies Proposition~\ref{rcbounds}. \paragraph{Proof of Proposition~\ref{rcbounds}.} It follows from Theorem~\ref{stdom} and the definition of stochastic domination that for the increasing event $E_{\infty }^b$ (which was defined in the Introduction), we have $\mu ^{G} _{p,r}(E_{\infty }^b)>0$ whenever $r(1-p)^{\Delta }>r_c^{G}(0)$, which implies that $r_c^G(p) \leq r_c^G(0)/(1-p)^{\Delta }$. The derivation of the lower bound for $r_c^{G}(p)$ is analogous. \qed Now we give the proof of Theorem~\ref{stdom}, which bears some resemblance with the proof of Theorem 2.3 in \cite{HaggstromDaC}. \paragraph{Proof of Theorem~\ref{stdom}.} Fix $G=(\mathcal{V},\mathcal{E})$ with maximal degree $\Delta $, and parameter values $p,r\in [0,1]$. We will use the relation ``$<$'' and the minimum of a vertex set with respect to this relation as defined in Section~\ref{definitions-section}. In what follows, we will define several random variables; we will denote the joint distribution of all these variables by $\mathbb{P}$. First, we define a collection $(\eta _{x,y}^e:x,y\in \mathcal{V}, e=\left< x,y\right> \in \mathcal{E})$ of i.i.d.\ Bernoulli($p$) random variables (\emph{i.e.}, they take value $1$ with probability $p$, and $0$ otherwise); one may imagine having each edge $e\in \mathcal{E}$ replaced by two directed edges, and the random variables represent which of these edges are open. We define also a set $(\kappa _{x}:x\in \mathcal{V})$ of Bernoulli($r$) random variables. Given a realization of $(\eta _{x,y}^e:x,y\in \mathcal{V},e=\left< x,y\right> \in \mathcal{E})$ and $(\kappa _{x}:x\in \mathcal{V})$, we will define an $\Omega _{\mathcal{V}}\times \Omega _{\mathcal{E}}$-valued random configuration $(\eta ,\xi )$ with distribution $\mathbb{P} _{p,r}^{G}$, by the following algorithm. \begin{enumerate} \item Let $v=\min \{x\in \mathcal{V}:$ no $\xi $-value has been assigned yet to $x$ by this algorithm$\}$. (Note that $v$ and $V, v_i, H_i $ $(i\in \mathbb{N})$, defined below, are running variables, \emph{i.e.}, their values will be redefined in the course of the algorithm.) \item We explore the ``directed open cluster'' $V$ of $v$ iteratively, as follows. Define $v_0=v$. Given $v_0,v_1, \ldots ,v_i$ for some integer $i\geq 0$, set $\eta (e)=\eta ^e _{v_i,w}$ for every edge $e=\left< v_i,w\right> \in \mathcal{E}$ incident to $v_i$ such that no $\eta $-value has been assigned yet to $e$ by the algorithm, and write $H_{i+1}=\{w\in \mathcal{V}\setminus \{v_0,v_1,\ldots ,v_i\}:w$ can be reached from any of $v_0,v_1,\ldots ,v_i$ by using only those edges $e\in \mathcal{E}$ such that $\eta (e)=1$ has been assigned to $e$ by this algorithm$\}$. If $H_{i+1}\neq \emptyset $, then we define $v_{i+1}=\min (H_{i+1})$, and continue exploring the directed open cluster of $v$; otherwise, we define $V=\{v_0,v_1,\ldots ,v_i\}$, and move to step 3. \item Define $\xi (w)=\kappa _v$ for all $w\in V$, and return to step 1. \end{enumerate} It is immediately clear that the above algorithm eventually assigns a $\xi$-value to each vertex. Note also that a vertex $v$ can receive a $\xi$-value only after all edges incident to $v$ have already been assigned an $\eta $-value, which shows that the algorithm eventually determines the full edge configuration as well. It is easy to convince oneself that $(\eta ,\xi )$ obtained this way indeed has the desired distribution. Now, for each $v\in \mathcal{V}$, we define $Z(v)=1$ if $\kappa _v=1$ and $\eta ^e _{w,v}=0$ for all edges $e=\left< v,w\right> \in \mathcal{E}$ incident on $v$ (\emph{i.e.}, all directed edges towards $v$ are closed), and $Z(v)=0$ otherwise. Note that every vertex with $Z(v)=1$ has $\xi (v)=1$ as well, whence the distribution of $\xi $ (\emph{i.e.}, $\mu _{p,r}^{G}$) stochastically dominates the distribution of $Z$ (as witnessed by the coupling $\mathbb{P}$). Notice that $Z(v)$ depends only on the states of the edges pointing to $v$ and on the value of $\kappa_v$; in particular the distribution of $Z$ is a product measure on $\Omega _{\mathcal{V}}$ with parameter $r(1-p)^{d(v)}$ at $v$, where $d(v)\leq \Delta $ is the degree of $v$, whence $\mu _{p,r}^{G}$ stochastically dominates the product measure on $\Omega _{\mathcal{V}}$ with parameter $r(1-p)^{\Delta }$, which gives the desired stochastic lower bound. The upper bound can be proved analogously; alternatively, it follows from the lower bound by exchanging the roles of black and white. \qed \section{Continuity of \texorpdfstring{$r_c^{\mathbb{Z}^2}(p)$}{rcp} on the interval \texorpdfstring{$[0,1/2)$}{[0,1/2)}}\label{contz2-section} In this section, we will prove Theorem~\ref{continp}. Our first task is to prove a technical result valid on more general graphs stating that the probability of any event $A$ whose occurrence depends on a finite set of $\xi$-variables is a continuous function of $p$ for $p<p_c^G$. The proof relies on the fact that although the color of a vertex $v$ may be influenced by edges arbitrarily far away, if $p<p_c^G$, the corresponding influence decreases to $0$ in the limit as we move away from $v$. Therefore, the occurrence of the event $A$ depends essentially on a finite number of $\eta $- and $\kappa $-variables, whence its probability can be approximated up to an arbitrarily small error by a polynomial in $p$ and $r$. Once we have proved Proposition~\ref{cylcont} below, which is valid on general graphs, we will apply it on $\mathbb{Z}^2$ to certain ``box-crossing events,'' and appeal to results in \cite{BCM} to deduce the continuity of $r_c^{\mathbb{Z}^2}(p)$. \begin{Proposition}\label{cylcont} For every site percolation event $A \subset \{0,1\}^{\mathcal{V}}$ depending on the color of finitely many vertices, $\mu_{p,r}^G(A)$ is a continuous function of $(p,r)$ on the set $[0,p_c^G) \times [0,1]$. \end{Proposition} \paragraph{Proof.} In this proof, when $\mu$ is a measure on a set $S$, $X$ is a random variable with law $\mu$ and $F:\: S\longrightarrow \mathbb{R}$ is a bounded measurable function, we write abusively $\mu[F(X)]$ for the expectation of $F(X)$. We show a slightly more general result: for any $k \geq 1$, $\boldsymbol{x}=(x_1,\ldots,x_k)\in \mathcal{V}^k$ and $f: \{0,1\}^k \to \mathbb{R}$ bounded and measurable, $\mu_{p,r} ^G\left [ f(\xi(x_1),\ldots,\xi(x_k))\right ]$ is continuous in $(p,r)$ on the product $[0,p_c^G) \times [0,1]$. Proposition~\ref{cylcont} will follow by choosing an appropriate family $\{x_1,\ldots,x_k\}$ such that the states of the $x_i$ suffices to determine whether $A$ occurs, and take $f$ to be the indicator function of $A$. To show the previous affirmation, we condition on the vector \[ \boldsymbol{m}_{\boldsymbol{x}}(\eta)=(\min C_{x_1}(\eta),\ldots, \min C_{x_k}(\eta)) \] which takes values in the finite set $\boldsymbol{V}=\left \{(v_1,\ldots,v_k) \in \mathcal{V}^k \: : \: \forall i \: v_i \leq \max \{x_1,\ldots,x_k\} \right \}$, and we use the definition of $\mathbb{P}_{p,r}^G$ as an image measure. By definition, \begin{align*} \mu_{p,r} ^G & \left [ f(\xi(x_1),\ldots,\xi(x_k))\right ] \\ &= \sum_{\boldsymbol{v} \in \boldsymbol{V}} \mathbb{P}_{p,r} ^G\left [ f(\xi(x_1),\ldots,\xi(x_k)) | \{ \boldsymbol{m}_{\boldsymbol{x}}=\boldsymbol{v} \} \right ]\mathbb{P}_{p,r} ^G \left [ \{\boldsymbol{m}_{\boldsymbol{x}}=\boldsymbol{v} \} \right ]\\ &= \sum_{\boldsymbol{v} \in \boldsymbol{V}} \nu_p^{\mathcal{E}} \otimes \nu_r^{\mathcal{V}} \left [ f(\kappa(v_1),\ldots,\kappa(v_k)) | \{ \boldsymbol{m}_{\boldsymbol{x}}=\boldsymbol{v} \} \right ]\nu_p^{\mathcal{E}} \left [ \{\boldsymbol{m}_{\boldsymbol{x}}=\boldsymbol{v} \} \right ]\\ &= \sum_{\boldsymbol{v} \in \boldsymbol{V}} \nu_r^{\mathcal{V}} \left [ f(\kappa(v_1),\ldots,\kappa(v_k))\right ] \nu_p^{\mathcal{E}} \left [ \{\boldsymbol{m}_{\boldsymbol{x}}=\boldsymbol{v} \} \right]. \end{align*} Note that $\nu_r^{\mathcal{V}} \left [ f(\kappa(v_1),\ldots,\kappa(v_k))\right ]$ is a polynomial in $r$, so to conclude the proof we only need to prove that for any fixed $\boldsymbol{x}$ and $\boldsymbol{v}$, $\nu_p ^{\mathcal{E}}\left ( \{ \boldsymbol{m}(\boldsymbol{x})=\boldsymbol{v} \} \right )$ depends continuously on $p$ on the interval $[0,p_c^G)$. For $n \geq 1$, write $F_n = \left \{ |C_{x_1}| \leq n, \ldots, |C_{x_k}| \leq n \right \}$. It is easy to verify that the event $\left \{ \boldsymbol{m}_{\boldsymbol{x}}=\boldsymbol{v} \right \} \cap F_n$ depends on the state of finitely many edges. Hence, $\nu_p ^{\mathcal{E}}\left [ \left \{ \boldsymbol{m}_{\boldsymbol{x}}=\boldsymbol{v} \right \} \cap F_n \right ]$ is a polynomial function of $p$. Fix $p_0 < p_c^G$. For all $p \leq p_0$, \begin{eqnarray*} 0 \leq \nu_p^{\mathcal{E}} \left [ \left \{ \boldsymbol{m}(\boldsymbol{x})=\boldsymbol{v} \right \} \right ] - \nu_p ^{\mathcal{E}}\left [ \left \{ \boldsymbol{m}_{\boldsymbol{x}}=\boldsymbol{v} \right \} \cap F_n \right ] &\leq& \nu_p^{\mathcal{E}} \left [ F_n^c \right ]\\ &\leq& \nu_{p_0}^{\mathcal{E}} \left [ F_n^c \right ] \end{eqnarray*} where $\lim\limits_{n \to \infty}\nu_{p_0}^{\mathcal{E}} \left [ F_n^c \right ]= 0$, since $p_0 < p_c^G$. So, $\nu_p^{\mathcal{E}} \left [ \boldsymbol{m}(\boldsymbol{x})=\boldsymbol{v} \right ]$ is a uniform limit of polynomials on any interval $[0,p_0], \: p_0<p_c^G$, which implies the desired continuity. \qed \begin{Remark} In the proof we can see that, for fixed $p<p_c^G$, $\mu_{p,r}^G(A)$ is a polynomial in $r$. \end{Remark} \begin{Remark} If $G$ is a graph with uniqueness of the infinite bond cluster in the supercritical regime, then it is possible to verify that $\nu_p^{\mathcal{E}} \left [ \left \{ \boldsymbol{m}(\boldsymbol{x})=\boldsymbol{v} \right \} \right ] $ is continuous in $p$ on the whole interval $[0,1]$. In this case, the continuity given by the Proposition~\ref{cylcont} can be extended to the whole square $[0,1]^2$. \end{Remark} \paragraph{Proof of Theorem~\ref{continp}.} In order to simplify our notations, we write $\mathbb{P}_{p,r}, \nu _{p}$, $r_c(p)$, for $\mathbb{P}_{p,r}^{\mathbb{Z}^2}, \nu _{p}^{\mathcal{E}^2}$ and \smash{$r_c^{\mathbb{Z}^2}(p)$} respectively. Fix $p_0\in (0,1/2)$ and $\varepsilon >0$ arbitrarily. We will show that there exists $\delta =\delta (p_0,\varepsilon )>0$ such that for all $p\in (p_0-\delta ,p_0+\delta )$, \begin{equation}\label{minus} r_c(p) \geq r_c(p_0)-\varepsilon, \end{equation} and \begin{equation}\label{plus} r_c(p) \leq r_c(p_0)+\varepsilon. \end{equation} Note that by equation (\ref{rcplusrcstaris1}), for all small enough choices of $\delta >0$ (such that $0\leq p_0\pm \delta <1/2$), (\ref{minus}) is equivalent to \begin{equation}\label{plusstar} r_c^*(p) \leq r_c^*(p_0)+\varepsilon. \end{equation} Below we will show how to find $\delta _1>0$ such that we have (\ref{plus}) for all $p\in (p_0-\delta _1,p_0+\delta _1)$. One may then completely analogously find $\delta _2>0$ such that (\ref{plusstar}) holds for all $p\in (p_0-\delta _2,p_0+\delta _2)$, and take $\delta =\min (\delta _1,\delta _2)$. Fix $r=r_c(p_0)+\varepsilon $, and define the event $V_n=\{(\xi ,\eta )\in \Omega _{\mathbb{Z}^2}\times \Omega _{\mathcal{E}_2}:$ there exists a vertical crossing of $[0,n]\times [0,3n]$ that is black in $\xi \}$. By ``vertical crossing,'' we mean a self-avoiding path of vertices in $[0,n]\times [0,3n]$ with one endpoint in $[0,n]\times \{0\}$, and one in $[0,n]\times \{3n\}$. Recall also the definition of $M_n$ in Theorem~\ref{lexpdecay}. By Lemma 2.10 in \cite{BCM}, there exists a constant $\gamma >0$ such that the following implication holds for any $p,a\in [0,1]$ and $L\in \mathbb{N}$: \begin{equation*}\label{eq:finitecriterion} \left . \begin{array}[l]{rcl} (3L+1)(L+1)\nu _{a}(M_{\lfloor L/3\rfloor }) & \leq & \gamma , \\ \textrm{and } \mathbb{P}_{p,a}(V_L) & \geq & 1-\gamma \end{array} \right \} \Rightarrow a\geq r_c(p). \end{equation*} As usual, $\lfloor x \rfloor $ for $x>0$ denotes the largest integer $m$ such that $m\leq x$. Fix such a $\gamma $. By Theorem~\ref{lexpdecay}, there exists $N\in \mathbb{N}$ such that \begin{equation*}\label{p0smallbc} (3n+1)(n+1)\nu _{p_0}(M_{\lfloor n/3\rfloor })<\gamma \end{equation*} for all $n\geq N$. On the other hand, since $r>r_c(p_0)$, it follows from Lemma 2.11 in \cite{BCM} that there exists $L\geq N$ such that \begin{equation*}\label{p0bigcr} \mathbb{P}_{p_0,r}(V_L) > 1-\gamma. \end{equation*} Note that both $(3L+1)(L+1)\nu _{p}(M_{\lfloor L/3\rfloor })$ and $\mathbb{P}_{p,r}(V_L)$ are continuous in $p$ at $p_0$. Indeed, the former is simply a polynomial in $p$, while the continuity of the latter follows from Proposition~\ref{cylcont}. Therefore, there exists $\delta _1>0$ such that for all $p\in (p_0-\delta _1,p_0+\delta _1)$, \begin{eqnarray*} (3L+1)(L+1)\nu _{p}(M_{\lfloor L/3\rfloor }) & \leq & \gamma , \\ \textrm{and } \mathbb{P}_{p,r}(V_L) & \geq & 1-\gamma. \end{eqnarray*} By the choice of $\gamma $, this implies that $r\geq r_c(p)$ for all such $p$, which is precisely what we wanted to prove. Finding $\delta _2>0$ such that (\ref{plusstar}) holds for all $p\in (p_0-\delta _2,p_0+\delta _2)$ is analogous: one only needs to substitute $r_c(p_0)$ by $r_c^*(p_0)$ and ``crossing'' by ``$*$-crossing,'' and the exact same argument as above works. It follows that $\delta =\min (\delta _1,\delta _2)>0$ is a constant such that both (\ref{plus}) and (\ref{plusstar}) hold for all $p\in (p_0-\delta ,p_0+\delta )$, completing the proof of continuity on $(0,1/2)$. Right-continuity at $0$ may be proved analogously; alternatively, it follows from Proposition~\ref{contin0}. \qed \begin{Remark} It follows from Theorem~\ref{continp} and equation (\ref{rcplusrcstaris1}) that $r_c^*(p)$ is also continuous in $p$ on $[0,1/2)$. \end{Remark} \section{The critical value functions of tree-like graphs}\label{tree-like-section} In this section, we will study the critical value functions of graphs that are constructed by replacing edges of an infinite tree by a sequence of finite graphs. We will then use several such constructions in the proofs of our main results in Section~\ref{proofs-section}. Let us fix an arbitrary sequence $D_n=(\mathcal{V}_n,\mathcal{E}_n)$ of finite connected graphs and, for every $n\in \mathbb{N}$, two distinct vertices $a_n,b_n \in \mathcal{V}_n$. Let $\mathbb{T}_3=(V_3,E_3)$ denote the (infinite) regular tree of degree $3$, and fix an arbitrary vertex $\rho \in V_3$. Then, for each edge $e\in E_3$, we denote the end-vertex of $e$ which is closer to $\rho $ by $f(e)$, and the other end-vertex by $s(e)$. Let $\Gamma_D=(\tilde{V},\tilde{E})$ be the graph obtained by replacing every edge $e$ of $\Gamma _3$ between levels $n-1$ and $n$ (\emph{i.e.}, such that $dist(s(e),\rho )=n$) by a copy $D_e$ of $D_n$, with $a_n$ and $b_n$ replacing respectively $f(e)$ and $s(e)$. Each vertex $v \in V_3$ is replaced by a new vertex in $\tilde{V}$, which we denote by $\tilde{v}$. It is well known that $p_c^{\Gamma _3}=r_c^{\Gamma _3}(0)=1/2$. Using this fact and the tree-like structure of $\Gamma _D$, we will be able to determine bounds for $p_c^{\Gamma _D}$ and $r_c^{\Gamma _D}(p)$. First, we define $h^{D_n}(p)=\nu_{p}^{\mathcal{E}_n}(a_n \text{ and } b_n \text{ are in the same bond cluster})$, and prove the following, intuitively clear, lemma. \begin{Lemma} \label{LemmaBond} For any $p\in [0,1]$, the following implications hold: \begin{itemize} \item[a)] if $\limsup _{n\to \infty }h^{D_n}(p)<1/2$, then $p\leq p_c^{\Gamma_D}$; \item[b)] if $\liminf _{n\to \infty }h^{D_n}(p)>1/2$, then $p\geq p_c^{\Gamma_D}$. \end{itemize} \end{Lemma} \paragraph{Proof.} We couple Bernoulli bond percolation with parameter $p$ on $\Gamma_D$ with inhomogeneous Bernoulli bond percolation with parameters $h^{D_n}(p)$ on $\mathbb{T}_3$, as follows. Let $\eta$ be a random variable with law $\nu_p^{\tilde{E}}$, and define, for each edge $e\in E_3$, $W(e)=1$ if $\tilde{f(e)}$ and $\tilde{s(e)}$ are connected by a path consisting of edges that are open in $\eta $, and $W(e)=0$ otherwise. The tree-like structure of $\Gamma _D$ implies that $W(e)$ depends only on the state of the edges in $D_e$, and it is clear that if $dist(s(e),\rho )= n$, then $W(e)=1$ with probability $h^{D_n}(p)$. It is easy to verify that there exists an infinite open self-avoiding path on $\Gamma_D$ from $\tilde{\rho }$ in the configuration $\eta$ if and only if there exists an infinite open self-avoiding path on $\mathbb{T}_3$ from $\rho $ in the configuration $W$. Now, if we assume $\limsup _{n\to \infty }h^{D_n}(p)<1/2$, then there exists $t<1/2$ and $N\in \mathbb{N}$ such that for all $n\geq N$, $h^{D_n}(p)\leq t$. Therefore, the distribution of the restriction of $W$ on $L= \{e\in E_3: dist(s(e),\rho )\geq N\}$ is stochastically dominated by the projection of $\nu _t ^{E_3}$ on $L$. This implies that, a.s., there exists no infinite self-avoiding path in $W$, whence $p\leq p_c^{\Gamma_D}$ by the observation at the beginning of this paragraph. The proof of b) is analogous. \qed We now turn to the DaC model on $\Gamma_D$. Recall that for a vertex $v$, $C_v$ denotes the vertex set of the bond cluster of $v$. Let $E_{a_n,b_n}\subset \Omega_{\mathcal{E}_n} \times \Omega_{\mathcal{V}_n}$ denote the event that $a_n$ and $b_n$ are in the same bond cluster, or $a_n$ and $b_n$ lie in two different bond clusters, but there exists a vertex $v$ at distance $1$ from $C_{a_n}$ which is connected to $b_n$ by a black path (which also includes that $\xi (v)=\xi (b_n)=1$). This is the same as saying that $C_{a_n}$ is \emph{pivotal} for the event that there is a black path between $a_n$ and $b_n$, \emph{i.e.}, that such a path exists if and only if $C_{a_n}$ is black. It is important to note that $E_{a_n,b_n}$ is independent of the color of $a_n$. Define $f^{D_n}(p,r)=\mathbb{P}_{p,r}^{D_n}(E_{a_n,b_n})$, and note also that, for $r>0$, $f^{D_n}(p,r)=\mathbb{P}_{p,r}^{D_n}($there is a black path from $a_n$ to $b_n\mid \xi (a_n)=1)$. \begin{Lemma}\label{LemmaDaC} For any $p,r\in [0,1]$, we have the following: \begin{itemize} \item[a)] if $\limsup _{n\to \infty }f^{D_n}(p,r)<1/2$, then $r\leq r_c^{\Gamma_D}(p)$; \item[b)] if $\liminf _{n\to \infty }f^{D_n}(p,r)>1/2$, then $r\geq r_c^{\Gamma_D}(p)$. \end{itemize} \end{Lemma} \paragraph{Proof.}We couple here the DaC model on $\Gamma_D$ with inhomogeneous Bernoulli \emph{site} percolation on $\mathbb{T}_3$. For each $v \in V_3\setminus \{ \rho \}$, there is a unique edge $e \in E_3$ such that $v=s(e)$. Here we denote $D_e$ (\emph{i.e.}, the subgraph of $\Gamma_D$ replacing the edge $e$) by $D_{\tilde{v}}$, and the analogous event of $E_{a_n,b_n}$ for the graph $D_{\tilde{v}}$ by $E_{\tilde{v}}$. Let $(\eta,\xi)$ with values in $\Omega_{\tilde{E}} \times \Omega_{\tilde{V}}$ be a random variable with law $\mathbb{P} _{p,r}^{\Gamma _D}$. We define a random variable $X$ with values in $\Omega_{V_3}$, as follows: \begin{equation*} X(v)=\begin{cases} \xi(\tilde{\rho })\!\! & \text{if } v= \rho ,\\ 1 & \text{if the event $E_{\tilde{v}}$ is realized by the restriction of $(\eta,\xi)$ to $D_{\tilde{v}}$,} \\ 0 & \text{otherwise.} \end{cases} \end{equation*} As noted after the proof of Lemma~\ref{LemmaBond}, if $u=f(\left< u,v\right>)$, the event $E_{\tilde{v}}$ is independent of the color of $\tilde{u}$, whence $( E_{\tilde{v}} )_{v \in V_3\setminus \{ \rho \} }$ are independent. Therefore, as $X(\rho )=1$ with probability $r$, and $X(v)=1$ is realized with probability $f^{D_n}(p,r)$ for $v\in V_3$ with $dist(v,\rho )=n$ for some $n\in \mathbb{N}$, $X$ is inhomogeneous Bernoulli site percolation on $\mathbb{T}_3$. Our reason for defining $X$ is the following property: it holds for all $v \in V_3\setminus \{ \rho \}$ that \begin{equation}\label{eq:Connection0} \tilde{\rho } \overset{\xi}{\leftrightarrow} \tilde{v} \quad \text{if and only if} \quad \rho \overset{X}{\leftrightarrow} v, \end{equation} where $x \overset{Z}{\leftrightarrow} y$ denotes that $x$ and $y$ are in the same \emph{black} cluster in the configuration $Z$. Indeed, assuming $\tilde{\rho } \overset{\xi}{\leftrightarrow} \tilde{v}$, there exists a path $\rho =x_0,x_1,\cdots,x_k=v$ in $\Gamma _3$ such that, for all $0\leq i<k$, $\tilde{x_i} \overset{\xi}{\leftrightarrow} \tilde{x_{i+1}}$ holds. This implies that $\xi(\tilde{\rho })=1$ and that all the events $( E_{\tilde{x_i}} )_{0<i\leq k}$ occur, whence $X(x_i)=1$ for $i=0, \ldots ,k$, so $\rho \overset{X}{\leftrightarrow} v$ is realized. The proof of the other implication is similar. It follows in particular from (\ref{eq:Connection0}) that $\tilde{\rho }$ lies in an infinite black cluster in the configuration $\xi$ if and only if $\rho $ lies in an infinite black cluster in the configuration $X$. Lemma~\ref{LemmaDaC} presents two scenarios when it is easy to determine (via a stochastic comparison) whether the latter event has positive probability. For example, if we assume that \mbox{$\liminf_{n\to \infty }f^{D_n}(p,r)>1/2$}, then there exists $t>1/2$ and $N\in \mathbb{N}$ such that for all $n\geq N$, $f^{D_n}(p,r)\geq t$. In this case, the distribution of the restriction of $X$ on $K= \{v\in V_3: dist(v,\rho )\geq N\}$ is stochastically larger than the projection of $\nu _t ^{E_3}$ on $K$. Let us further assume that $r>0$. In that case, $X(\rho )=1$ with positive probability, and $f^{D_n}(p,r)>0$ for every $n\in \mathbb{N}$. Therefore, under the assumptions $\liminf _{n\to \infty }f^{D_n}(p,r)>1/2$ and $r>0$, $\rho $ is in an infinite black cluster in $X$ (and, hence, $\tilde{\rho }$ is in an infinite black cluster in $\xi$) with positive probability, which can only happen if $r\geq r_c^{\Gamma_D}(p)$. On the other hand, if $\liminf _{n\to \infty }f^{D_n}(p,0)>1/2$, then it is clear that $\liminf _{n\to \infty }f^{D_n}(p,r)>1/2$ (whence $r\geq r_c^{\Gamma_D}(p)$) for all $r>0$, which implies that $r_c^{\Gamma_D}(p)=0$. The proof of part a) is similar. \qed \section{Counterexamples}\label{proofs-section} In this section, we study two particular graph families and obtain examples of non-monotonicity and non-continuity of the critical value function. \subsection{Non-monotonicity}\label{non-monotonicity-section} The results in Section~\ref{tree-like-section} enable us to prove that (a small modification of) the construction considered by H\"aggstr\"om in the proof of Theorem 2.9 in \cite{HaggstromDaC} is a graph whose critical coloring value is non-monotone in the subcritical phase. \paragraph{Proof of Proposition~\ref{PropNonMononicity}.} Define for $k\in \mathbb{N}$, $D^k$ to be the complete bipartite graph with the vertex set partitioned into $\{z_1,z_2\}$ and $\{a,b,v_1,v_2,\ldots ,v_k\}$ (see Figure~\ref{graphDk}). We call $e_1, e_1'$ and $e_2, e_2'$ the edges incident to $a$ and $b$ respectively, and for $i=1,\ldots,k$, $f_i,f_i'$ the edges incident to $v_i$. Consider $\Gamma_k$ the quasi-transitive graph obtained by replacing each edge of the tree $\mathbb{T}_3$ by a copy of $D_k$. $\Gamma_k$ can be seen as the tree-like graph resulting from the construction described at beginning of the section, when we start with the constant sequence $(D_n,a_n,b_n)=(D^k,a,b)$. \begin{figure} \caption{The graph $D^k$.} \label{graphDk} \end{figure} We will show below that it holds for all $k\in \mathbb{N}$ that \begin{align} &p_c^{\Gamma_k}>1/3, \label{fact1}\\ &r_c^{\Gamma_k}(0)<2/3, \quad \text{and} \label{fact2}\\ &r_c^{\Gamma_k}(1/3)<2/3. \label{fact3} \end{align} Furthermore, there exists $k\in \mathbb{N}$ and $p_0 \in (0,1/3)$ such that \begin{equation}\label{fact4} r_c^{\Gamma_k}(p_0)>2/3. \end{equation} Proving (\ref{fact1})--(\ref{fact4}) will finish the proof of Proposition~\ref{PropNonMononicity} since these inequalities imply that the quasi-transitive graph $\Gamma_k$ has a non-monotone critical value function in the subcritical regime. Throughout this proof, we will omit superscripts in the notation when no confusion is possible. For the proof of (\ref{fact1}), recall that $h^{D^k}$ is strictly increasing in $p$, and $h^{D^k}(p_{D^k})=1/2$. Since $1-h^{D^k}(p)$ is the $\nu_{p}$-probability of $a$ and $b$ being in two different bond clusters, we have that \begin{equation*} 1-h^{D^k}(1/3) \geq \nu_{1/3}(\{\text{$e_1$ and $e_1'$ are closed} \} \cup \{ \text{$e_2$ and $e_2'$ are closed} \} ). \end{equation*} From this, we get that $h^{D^k}(1/3) \leq 25/81$, which proves (\ref{fact1}). To get (\ref{fact2}), we need to remember that for fixed $p<p_{D^k}$, $f^{D^k}(p,r)$ is strictly increasing in $r$, and $f^{D^k}(p,r_{D^k}(p))=1/2$. One then easily computes that $f(0,2/3)=16/27>1/2$, whence (\ref{fact2}) follows from Lemma~\ref{LemmaDaC}. Now, define $A$ to be the event that at least one edge out of $e_1$, $e_1'$, $e_2$ and $e_2'$ is open. Then \begin{eqnarray*} f^{D^k}(1/3,2/3) & \geq & \mathbb{P}_{1/3,2/3}(E_{a,b} \mid A) \mathbb{P}_{1/3,2/3}(A)\\ & \geq & \mathbb{P}_{1/3,2/3}(C_b \textrm{ black} \mid A)\cdot 65/81, \end{eqnarray*} which gives that $f^{D^k}(1/3,2/3) \geq 130/243 >1/2$, and implies (\ref{fact3}) by~\ref{LemmaDaC}. To prove (\ref{fact4}), we consider $B_k$ to be the event that $e_1$, $e_1'$, $e_2$ and $e_2'$ are all closed and that there exists $i$ such that $f_i$ and $f_i'$ are both open. One can easily compute that \[\mathbb{P}_{p,r}(B_k)={(1-p)}^4 \left(1-{(1-p^2)}^k\right),\] which implies that we can choose $p_0 \in (0,1/3)$ (small) and $k\in \mathbb{N}$ (large) such that $\mathbb{P}_{p_0,r}(B_{k})>17/18$. Then, \begin{eqnarray*} f^{D^k}(p_0,2/3) & = & \mathbb{P}_{p_0,r}(E_{a,b}\mid B_k)\mathbb{P}_{p_0,r}(B_k)+\mathbb{P}_{p_0,r}(E_{a,b}\mid B_k^c)(1\!-\!\mathbb{P}_{p_0,r}(B_k))\\ & < & (2/3)^2\cdot 1 + 1\cdot 1/18 ( = 1/2), \end{eqnarray*} whence inequality (\ref{fact4}) follows with these choices from Lemma~\ref{LemmaDaC}, completing the proof. \qed \subsection{Graphs with discontinuous critical value functions}\label{discontgraphs-section} \paragraph{Proof of Proposition~\ref{nonbounded}.} For $n\in \mathbb{N}$, let $D_n$ be the graph depicted in Figure~\ref{Gn}, and let $G$ be $\Gamma _D$ constructed with this sequence of graphs as described at the beginning of Section~\ref{tree-like-section}. \begin{figure} \caption{The graph $D_n$.} \label{Gn} \end{figure} It is elementary that $\lim _{n\to \infty }h^{D_n}(p)=p$, whence $p_c^G=1/2$ follows from Lemma~\ref{LemmaBond}, thus $p=0$ is subcritical. Since $\lim _{n\to \infty }f^{D_n}(0,r)=r^2$, Lemma~\ref{LemmaDaC} gives that $r_c^G(0)=1/\sqrt{2}$. On the other hand, $\lim _{n\to \infty }f^{D_n}(p,r)=p+(1-p)r$ for all $p>0$, which implies by Lemma~\ref{LemmaDaC} that for $p\leq 1/2$, \[ r_c^G(p)=\frac{1/2-p}{1-p}\to 1/2 \] as $p\to 0$, so $r_c^G$ is indeed discontinuous at $0<p_c^G$. \qed In the rest of this section, for vertices $v$ and $w$, we will write $v\leftrightarrow w$ to denote that there exists a path of open edges between $v$ and $w$. Our proof of Theorem~\ref{boundeddegreeconstruction} will be based on the Lemma 2.1 in \cite{PSS}, that we rewrite here: \begin{Lemma}\label{sharpthlemma} There exists a sequence $G_n=(V^n,E^n)$ of graphs and $x_n,y_n\in V^n$ of vertices ($n\in \mathbb{N}$) such that \begin{enumerate} \item $\nu _{1/2}^{E^n}(x_n\leftrightarrow y_n)>\frac{2}{3}$ for all $n$; \item $\lim _{n\to \infty }\nu _{p}^{E^n}(x_n\leftrightarrow y_n)=0$ for all $p<1/2$, and \item there exists $\Delta<\infty $ such that, for all $n$, $G_n$ has degree at most $\Delta $. \end{enumerate} \end{Lemma} Lemma~\ref{sharpthlemma} provides a sequence of bounded degree graphs that exhibit sharp threshold-type behavior at $1/2$. We will use such a sequence as a building block to obtain discontinuity at $1/2$ in the critical value function in the DaC model. \paragraph{Proof of Theorem~\ref{boundeddegreeconstruction}.} We first prove the theorem in the case $p_0=1/2$. Consider the graph $G_n=(V^n,E^n), x_n,y_n$ $(n\in \mathbb{N})$ as in Lemma~\ref{sharpthlemma}. We construct $D_n$ from $G_n$ by adding to it one extra vertex $a_n$ and one edge $\{a_n,x_n\}$. More precisely $D_n$ has vertex set $V^n\cup\{a_n\}$ and edge set $E^n\cup\{a_n,x_n\}$. Set $b_n=y_n$ and let $G$ be the graph $\Gamma _D$ defined with the sequence $(D_n,a_n,b_n)$ as in Section~\ref{tree-like-section}. We will show below that there exists $r_0>r_1$ such that the graph $G$ verify the following three properties: \begin{enumerate} \item[(i)] $1/2<p_c^G$ \item[(ii)] $r_c^G(p)\geq r_0$ for all $p<1/2$. \item[(iii)] $r_c^G(1/2)\leq r_1$. \end{enumerate} It implies a discontinuity of $r_c^G$ at $1/2<p_c^G$, finishing the proof. One can easily compute $h^{D_n}(p)=p\nu_{p}^{E^n}(x_n\leftrightarrow y_n)$. Since the graph $G_n$ has degree at most $\Delta$ and the two vertices $x_n, y_n$ are disjoint, the probability $\nu_{p}^{E^n}(x_n\leftrightarrow y_n)$ cannot exceed $1-(1-p)^\Delta$. This bound guarantees the existence of $p_0>1/2$ independent of $n$ such that $h^{D_n}(p_0)<1/2$ for all $n$, whence Lemma~\ref{LemmaBond} implies that $1/2<p_0\leq p_c^G$. For all $p\in [0,1]$, we have \begin{equation*}\label{fforG} f^{D_n}(p,r)\leq \left(p+r(1-p)\right)\left(\nu _{p}^{E^n}(x_n\leftrightarrow y_n)+r (1-\nu _{p}^{E^n}(x_n\leftrightarrow y_n))\right). \end{equation*} which gives that $\underset{n\to\infty}{\lim}f^{D_n}(p,r)<\left(\frac{r+1}{2}\right)\cdot r$. Writing $r_0$ the positive solution of $r(1+r)=1$, we get that $\underset{n\to\infty}{\lim}f^{D_n}(p,r_0)<1/2$ for all $p<1/2$, which implies by Lemma~\ref{LemmaDaC} that $r_c^G(p)\geq r_0$. On the other hand, $f^{D_n}(1/2,r)\geq \nu _{p}^{E^n}(x_n\leftrightarrow y_n)\left(\frac{1+r}{2}\right)$, which gives by Lemma~\ref{sharpthlemma} that $\underset{n\to\infty}{\lim} f^{D_n}(1/2,r)>\frac{2}{3}\cdot\frac{1+r}{2}$. Writing $r_1$ such that $\frac{2}{3}(1+r_1)=1$, it is elementary to check that $r_1<r_0$ and that $\underset{n\to\infty}{\lim} f^{D_n}(1/2,r_1)>1/2$. Then, using Lemma~\ref{LemmaDaC}, we conclude that $r_c(1/2)\leq r_1$. \qed \paragraph{Acknowledgments.} We thank Jeff Steif for suggesting (a variant of) the graph that appears in the proof of Theorem~\ref{boundeddegreeconstruction}. V.B. and V.T. were supported by ANR grant 2010-BLAN-0123-01. \end{document}
\begin{document} \title{Supplementary materials for ``Maximum weighted likelihood estimator for robust heavy-tail modelling of finite mixture models"} \author{Tsz Chai Fung} \maketitle \section{Regularity conditions for asymptotic theory} \label{apx:asym_reg} Let $h(y;\bm{\Phi})$ be the density function of $Y$ with parameter space of $\bm{\Phi}\in\bm{\Omega}$. For a more concise presentation on the regularity conditions, we here write $\bm{\Phi}=(\psi_1,\ldots,\psi_P)$ where $P$ is the total number of parameters in the model. The regularity conditions are: \begin{enumerate}[font={\bfseries},label={R\arabic*.}] \item $h(y;\bm{\Phi})$ has common support in $y$ for all $\bm{\Phi}\in\bm{\Omega}$, $h(y;\bm{\Phi})$ is identifiable in $\bm{\Phi}$ up to a permutation of mixture components. \item $h(y;\bm{\Phi})$ admits third partial derivatives with respect to $\bm{\Phi}$ for each $\bm{\Phi}\in\bm{\Omega}$ and for almost all $y$. \item For all $j_1,j_2=1,\ldots,P$, the first two derivatives of $h(y;\bm{\Phi})$ satisfy \begin{equation} E\left[\frac{\partial}{\partial\psi_{j_1}}\log h(y;\bm{\Phi})\right]=0; \end{equation} \begin{equation} E\left[\frac{\partial}{\partial\psi_{j_1}}\log h(y;\bm{\Phi})\frac{\partial}{\partial\psi_{j_2}}\log h(y;\bm{\Phi})\right]=E\left[-\frac{\partial^2}{\partial\psi_{j_1}\partial\psi_{j_2}}\log h(y;\bm{\Phi})\right]. \end{equation} \item The Fisher information matrix is finite and positive definite at $\bm{\Phi}=\bm{\Phi}_0$: \begin{equation} \mathcal{I}(\bm{\Phi})=E\left[\left(\frac{\partial}{\partial\bm{\Phi}}\log h(y;\bm{\Phi})\right)\left(\frac{\partial}{\partial\bm{\Phi}}\log h(y;\bm{\Phi})\right)^T\right]. \end{equation} \item There exists an integrable function $\mathcal{M}(y)$ such that \begin{equation} \hspace{-1cm} \left|\frac{\partial}{\partial\psi_{j_1}}\log h(y;\bm{\Phi})\right|\leq \mathcal{M}(y),\quad \left|\frac{\partial^2}{\partial\psi_{j_1}\partial\psi_{j_2}}\log h(y;\bm{\Phi})\right|\leq \mathcal{M}(y),\quad \left|\frac{\partial^3}{\partial\psi_{j_1}\partial\psi_{j_2}\partial\psi_{j_3}}\log h(y;\bm{\Phi})\right|\leq \mathcal{M}(y), \end{equation} \begin{equation} \hspace{-1cm} \left|\frac{\partial}{\partial\psi_{j_1}}\log h(y;\bm{\Phi})\frac{\partial}{\partial\psi_{j_2}}\log h(y;\bm{\Phi})\right|\leq \mathcal{M}(y),\quad \left|\frac{\partial^2}{\partial\psi_{j_1}\partial\psi_{j_2}}\log h(y;\bm{\Phi})\frac{\partial}{\partial\psi_{j_3}}\log h(y;\bm{\Phi})\right|\leq \mathcal{M}(y), \end{equation} \begin{equation} \hspace{-1cm} \left|\frac{\partial}{\partial\psi_{j_1}}\log h(y;\bm{\Phi})\frac{\partial}{\partial\psi_{j_2}}\log h(y;\bm{\Phi})\frac{\partial}{\partial\psi_{j_3}}\log h(y;\bm{\Phi})\right|\leq \mathcal{M}(y). \end{equation} \end{enumerate} \section{Proof of Theorems 1 and 2} \label{apx:asym_proof1} We first focus on Theorem 1. Denote the weighted log-likelihood of a single observation \begin{equation} \mathcal{L}^{*}(\bm{\Phi};y)=W(y)\log \frac{h(y;\bm{\Phi})W(y)}{\int_{0}^{\infty}h(u;\bm{\Phi})W(u)du}. \end{equation} The consistency and asymptotic normality can be proved by applying Theorems 5.41 and 5.42 of \cite{van2000asymptotic}. The theorems require the regularity conditions that $E\left[\|\partial/\partial\bm{\Phi}\mathcal{L}^{*}(\bm{\Phi};Y)\|^2\right]<\infty$, the matrix $E\left[\partial^2/\partial\bm{\Phi}\partial\bm{\Phi}^T\mathcal{L}^{*}(\bm{\Phi};Y)\right]$ exists and that $|\partial^3/\partial\psi_{j_1}\partial\psi_{j_2}\partial\psi_{j_3}\mathcal{L}^{*}(\bm{\Phi};y)|$ is dominated by a fixed integrable function of $y$, $j_1,j_2,j_3=1,\ldots,P$ and $\psi_j$ is the $j^{\text{th}}$ element of $\bm{\Phi}$. Through a direct computation of differentiations, the aforementioned equations can all be expressed as functions of $\kappa(u;\bm{\Phi})$ and $\int_{0}^{\infty}\kappa(u;\bm{\Phi})h(u;\bm{\Phi})W(u)du$ only, where $\kappa(\bm{\Phi})$ can be the six terms presented in regularity condition \textbf{R5} (the left hand side of the six equations underneath \textbf{R5} without the absolute sign). Given \textbf{R5} that $\kappa(u;\bm{\Phi})$ is bounded by an integrable function and since $|\int_{0}^{\infty}\kappa(u;\bm{\Phi})h(u;\bm{\Phi})W(u)du|\leq \int_{0}^{\infty}|\kappa(u;\bm{\Phi})|h(u;\bm{\Phi})du$, the aforementioned regularity conditions required by \cite{van2000asymptotic} hold. For consistency, it suffices from Theorem 5.42 of \cite{van2000asymptotic} to show that $\bm{\Phi}_0$ is the maximizer of \begin{align} E_{\bm{\Phi}_0}\left[\mathcal{L}^{*}(\bm{\Phi};Y)\right] &=\int_{0}^{\infty}W(y)\log \frac{h(y;\bm{\Phi})W(y)}{\int_{0}^{\infty}h(u;\bm{\Phi})W(u)du}h(y;\bm{\Phi}_0)dy\nonumber\\ &=c_1\int_{0}^{\infty}\tilde{h}(y;\bm{\Phi}_0)\log\frac{\tilde{h}(y;\bm{\Phi})}{\tilde{h}(y;\bm{\Phi}_0)}dy+c_2\nonumber\\ &=-c_1D_{\text{KL}}\left(\tilde{h}(y;\bm{\Phi})\|\tilde{h}(y;\bm{\Phi}_0)\right)+c_2, \end{align} where $c_1= \int_0^\infty h(y; \bm{\Phi}_0)W(y) dy >0$ and $c_2=c_1 \int_0^\infty \tilde{h}(y;\bm{\Phi}_0) \log \tilde{h}(y;\bm{\Phi}_0) dy$ are constants and $D_{\text{KL}}(Q_1\|Q_2)\geq 0$ is the KL divergence between $Q_1$ and $Q_2$. Since $D_{\text{KL}}\left(\tilde{h}(y;\bm{\Phi})\|\tilde{h}(y;\bm{\Phi}_0)\right)=0$ as $\bm{\Phi}=\bm{\Phi}_0$, the result follows. For asymptotic normality, from Theorem 5.41 of \cite{van2000asymptotic}, we have $\sqrt{n}(\hat{\bm{\Phi}}_n-\bm{\Phi}_0)\overset{d}{\rightarrow}\mathcal{N}(\bm{0},\bm{\Sigma})$ with $\bm{\Sigma}=\bm{\Gamma}^{-1}\bm{\Lambda}\bm{\Gamma}^{-1}$, where \begin{equation} \label{eq:asym:lambda_proof} \bm{\Lambda}=E_{\bm{\Phi}_0}\left[\left[\frac{\partial}{\partial\bm{\Phi}}\mathcal{L}^{*}(\bm{\Phi};Y)\right]\left[\frac{\partial}{\partial\bm{\Phi}}\mathcal{L}^{*}(\bm{\Phi};Y)\right]^T\Bigg|_{\bm{\Phi}=\bm{\Phi}_0}\right] \end{equation} and \begin{equation} \label{eq:asym:gamma_proof} \bm{\Gamma}=-E_{\bm{\Phi}_0}\left[\frac{\partial^2}{\partial\bm{\Phi}\partial\bm{\Phi}^T}\mathcal{L}^{*}(\bm{\Phi};Y)\Bigg|_{\bm{\Phi}=\bm{\Phi}_0}\right]. \end{equation} Performing the derivatives and algebra manipulations from Equations (2.3) and (2.4) would result to Equations (4.2) and (4.3) respectively, which prove the asymptotic normality result. Proof idea of Theorem 2 is completely identical as the above, except that the expectations in Equations (2.3) and (2.4) are taken as $\tilde{E}[\cdot]$ instead of $E_{\bm{\Phi}_0}[\cdot]$. \section{Proof of Theorem 3} \label{apx:asym_proof2} We begin with the following lemmas: \begin{lemma} \label{apx:lem:asym1} To prove Theorem 3, it suffices to show that \begin{equation} T_n(\bm{\Phi}):=\frac{\partial}{\partial\gamma}\tilde{E}_n[\log \tilde{h}_{n}(Y;\bm{\Phi})] \end{equation} is asymptotically a strictly decreasing function of $\gamma$ as $n\rightarrow\infty$, with $T_n(\bm{\Phi})|_{\gamma=\gamma_0}\rightarrow 0$ as $n\rightarrow\infty$. \end{lemma} \begin{proof} If we keep the weight function $W(\cdot)$ fixed (independent of $n$), applying Theorem 5.7 of \cite{van2000asymptotic} we have that maximizing the weighted log-likelihood function $\mathcal{L}_n^{*}(\bm{\Phi};\bm{y})$ is asymptotically equivalent to maximizing $\tilde{E}_n[\log \tilde{h}_{n}(Y;\bm{\Phi})]$ (which is indeed independent of $n$). Now that the weight function $W_n(\cdot)$ depends on $n$, and as $n$ increases, the increasing distortion (more down-weighting) of the relative importance of observations would cause reduction of the effective number of observations. Heuristically, we need the number of observations $n$ to increase faster than the distortion impacts of $W_n(\cdot)$, so that effective number of observations grows to infinity and large sample theory still applies. Quantitatively, we require that the variance of (scaled) empirical weighted log-likelihood \begin{equation} V_n(\bm{\Phi}):=\text{Var}\left(\frac{1}{n\int_{0}^{\infty}W_n(u)g(u)du}\sum_{i=1}^{n}W_n(Y)\log\tilde{h}_{n}(Y;\bm{\Phi})\right)\rightarrow 0 \end{equation} as $n\rightarrow\infty$, such that the (scaled) empirical weighted log-likelihood function converges to its expectation which is $\tilde{E}_n[\log \tilde{h}_{n}(Y;\bm{\Phi})]$. Now, $V_n(\bm{\Phi})$ is evaluated as follows: \begin{align} V_n(\bm{\Phi}) &=\frac{1}{n(\int_{0}^{\infty}W_n(u)g(u)du)^2}\text{Var}\left(W_n(Y)\log\tilde{h}_{n}(Y;\bm{\Phi})\right)\nonumber\\ &\leq\frac{1}{n(\int_{0}^{\infty}W_n(u)g(u)du)^2}\tilde{E}\left[W_n(Y)(\log\tilde{h}_{n}(Y;\bm{\Phi}))^2\right]\nonumber\\ &=\frac{1}{n\int_{0}^{\infty}W_n(u)g(u)du}\int_{0}^{\infty}\frac{W_n(y)g(y)}{\int_{0}^{\infty}W_n(u)g(u)du}(\log\tilde{h}_{n}(y;\bm{\Phi}))^2dy\nonumber\\ &=\frac{\tilde{E}_n[(\log \tilde{h}_{n}(Y;\bm{\Phi}))^2]}{n\tilde{E}[W_n(Y)]}\rightarrow 0, \end{align} where the convergence is based on Assumption \textbf{A4}. \end{proof} \begin{lemma} \label{apx:lem:asym2} (Monotone density theorem -- Theorem 1.7.2 of \cite{bingham1989regular}) Denote $H$ as a probability distribution function with $h$ being the corresponding probability density function. Assume $h$ is ultimately monotone (i.e. $h$ is monotone on $(z,\infty)$ for some $z>0$). If \begin{equation} \bar{H}(y)\sim y^{-\gamma}L(y) \end{equation} as $y\rightarrow\infty$ for some $\gamma>0$ and slowly varying functions $L$, then \begin{equation} h(y)\sim \gamma y^{-\gamma-1}L(y) \end{equation} as $y\rightarrow\infty$. \end{lemma} We proceed to the proof of Theorem 3 as follows. Using the result from Lemma \ref{apx:lem:asym1}, it suffices to evaluate \begin{align} \label{apx:eq:proof2:Tn} T_n(\bm{\Phi}) &=\frac{\partial}{\partial\gamma}\int_{0}^{\infty}\tilde{g}_n(y)\log \tilde{h}_{n}(y;\bm{\Phi})dy\nonumber\\ &=\frac{\partial}{\partial\gamma}\int_{\tau_n}^{\infty}\tilde{g}^{*}_n(y)\log\tilde{h}^{*}_{n}(y;\bm{\Phi})dy + o(1)\nonumber\\ &=\frac{\partial}{\partial\gamma}\log\tilde{h}^{*}_{n}(\tau_n;\bm{\Phi}) +\frac{\partial}{\partial\gamma}\int_{\tau_n}^{\infty}\left[\frac{\partial}{\partial y}\log\tilde{h}^{*}_{n}(y;\bm{\Phi})\right]\times\bar{\tilde{G}}^{*}_{n}(y)dy + o(1)\nonumber\\ &:=M_1(\tau_n;\bm{\Phi})+M_2(\tau_n;\bm{\Phi}) + o(1), \end{align} where \begin{equation} \tilde{g}^{*}_{n}(y)=\frac{g(y)W_n(y)}{\int_{\tau_n}^{\infty}g(u)W_n(u)du}1\{y\geq\tau_n\},\qquad \tilde{h}^{*}_{n}(y;\bm{\Phi})=\frac{h(y;\bm{\Phi})W_n(y)}{\int_{\tau_n}^{\infty}h(u;\bm{\Phi})W_n(u)du}1\{y\geq\tau_n\}, \end{equation} are the proper transformed density functions, and $\tilde{G}^{*}_{n}$ and $\tilde{H}_{n}^{*}$ are the corresponding distribution functions. The second equality of Equation (\ref{apx:eq:proof2:Tn}) is resulted from Assumption \textbf{A3}, while the third equality is followed by integration by parts. Now, we evaluate $M_1(\tau_n;\bm{\Phi})$ and $M_2(\tau_n;\bm{\Phi})$ as follows: \begin{align} M_1(\tau_n;\bm{\Phi}) &=\frac{\partial}{\partial\gamma}\log\tilde{h}^{*}_{n}(\tau_n;\bm{\Phi})\nonumber\\ &=\frac{\partial}{\partial\gamma}\left[\log\gamma-(\gamma+1)\log\tau_n+\log L(\tau_n;\bm{\Phi})\right]\nonumber\\ &\hspace{3em}-\int_{\tau_n}^{\infty}\frac{\partial}{\partial\gamma}\left[\log\gamma-(\gamma+1)\log y+\log L(y;\bm{\Phi})\right]\tilde{h}^{*}_{n}(y;\bm{\Phi})dy+o(1)\nonumber\\ &=\frac{1}{\gamma}-\log\tau_n-\frac{1}{\gamma}+\int_{\tau_n}^{\infty}(\log y)\times\tilde{h}^{*}_{n}(y;\bm{\Phi})dy -\frac{\partial}{\partial\gamma}\int_{\tau_n}^{\infty}\log\frac{L(y;\bm{\Phi})}{L(\tau_n;\bm{\Phi})}\times\tilde{h}^{*}_{n}(y;\bm{\Phi})dy+o(1)\nonumber\\ &=-\log\tau_n+\log\tau_n +\int_{\tau_n}^{\infty}\frac{1}{y}\bar{\tilde{H}}^{*}_{n}(y;\bm{\Phi})dy -\frac{\partial}{\partial\gamma}\int_{1}^{\infty}\log\frac{L(\tau_nt;\bm{\Phi})}{L(\tau_n;\bm{\Phi})}\times\tau_n\tilde{h}^{*}_{n}(\tau_nt;\bm{\Phi})dt+o(1)\nonumber\\ &=\int_{\tau_n}^{\infty}\frac{1}{y}\bar{\tilde{H}}^{*}_{n}(y;\bm{\Phi})dy + o(1), \end{align} where dominated convergence theorem and integration by parts are repeatedly used. The second equality involves monotone density theorem (Lemma \ref{apx:lem:asym2}) with Assumption \textbf{A5} being satisfied. The last term of the second last equality converges to zero uniformly on $\bm{\Phi}$ due to dominated convergence theorem and the uniform convergence conditions in Assumption \textbf{A2}. Using similar techniques as the above, $M_2(\tau_n;\bm{\Phi})$ can be evaluated as \begin{align} M_2(\tau_n;\bm{\Phi}) &=-\int_{\tau_n}^{\infty}\frac{1}{y}\bar{\tilde{G}}^{*}_{n}(y)dy +\frac{\partial}{\partial\gamma}\int_{\tau_n}^{\infty}\frac{\partial}{\partial y}(\log L(y;\bm{\Phi}))\times\bar{\tilde{G}}^{*}_{n}(y)dy\nonumber\\ &=-\int_{\tau_n}^{\infty}\frac{1}{y}\bar{\tilde{G}}^{*}_{n}(y)dy-\frac{\partial}{\partial\gamma}\int_{\tau_n}^{\infty}\log\frac{L(y;\bm{\Phi})}{L(\tau_n;\bm{\Phi})}\times\tilde{g}^{*}_{n}(y)dy\nonumber\\ &=-\int_{\tau_n}^{\infty}\frac{1}{y}\bar{\tilde{G}}^{*}_{n}(y)dy + o(1). \end{align} To sum up, we have \begin{equation} T_n(\bm{\Phi}) =\int_{\tau_n}^{\infty}\frac{1}{y}\left[\bar{\tilde{H}}^{*}_{n}(y;\bm{\Phi})-\bar{\tilde{G}}^{*}_{n}(y)\right]dy =\int_{1}^{\infty}\frac{1}{t}\left[\bar{\tilde{H}}^{*}_{n}(\tau_nt;\bm{\Phi})-\bar{\tilde{G}}^{*}_{n}(\tau_nt)\right]dt. \end{equation} Investigating each term inside the integrand, we have \begin{align} \bar{\tilde{H}}^{*}_{n}(\tau_nt;\bm{\Phi}) &=\frac{\int_t^{\infty}h(\tau_nv;\bm{\Phi})W_n(\tau_nv)dv}{\int_1^{\infty}h(\tau_nv;\bm{\Phi})W_n(\tau_nv)dv}\nonumber\\ &=\frac{\int_t^{\infty}v^{-\gamma-1}\tilde{W}_n(v)[L(\tau_nv;\bm{\Phi})/L(\tau_n;\bm{\Phi})]dv}{\int_1^{\infty}v^{-\gamma-1}\tilde{W}_n(v)[L(\tau_nv;\bm{\Phi})/L(\tau_n;\bm{\Phi})]dv} + o(1)\nonumber\\ &=\frac{\int_t^{\infty}v^{-\gamma-1}\tilde{W}_n(v)dv}{\int_1^{\infty}v^{-\gamma-1}\tilde{W}_n(v)dv} + o(1), \end{align} and \begin{align} \bar{\tilde{G}}^{*}_{n}(\tau_nt) &=\frac{\int_t^{\infty}g(\tau_nv)W_n(\tau_nv)dv}{\int_1^{\infty}g(\tau_nv)W_n(\tau_nv)dv}\nonumber\\ &=\frac{\int_t^{\infty}v^{-\gamma_0-1}\tilde{W}_n(v)[L_0(\tau_nv)/L_0(\tau_n)]dv}{\int_1^{\infty}v^{-\gamma_0-1}\tilde{W}_n(v)[L_0(\tau_nv)/L_0(\tau_n)]dv} + o(1)\nonumber\\ &=\frac{\int_t^{\infty}v^{-\gamma_0-1}\tilde{W}_n(v)dv}{\int_1^{\infty}v^{-\gamma_0-1}\tilde{W}_n(v)dv} + o(1), \end{align} where $\tilde{W}_n(v)=W_n(\tau_nv)$. Therefore, it is clear that \begin{equation} T_n(\bm{\Phi})=\int_{1}^{\infty}\frac{1}{t}\left[\frac{\int_t^{\infty}v^{-\gamma-1}\tilde{W}_n(v)dv}{\int_1^{\infty}v^{-\gamma-1}\tilde{W}_n(v)dv}-\frac{\int_t^{\infty}v^{-\gamma_0-1}\tilde{W}_n(v)dv}{\int_1^{\infty}v^{-\gamma_0-1}\tilde{W}_n(v)dv}\right]dt+o(1) \end{equation} converges to zero for $\gamma=\gamma_0$ as $n\rightarrow\infty$. To show that $T_n(\bm{\Phi})$ is a strictly decreasing function of $\gamma$ as $n\rightarrow\infty$, it suffices to evaluate \begin{align} \frac{\partial}{\partial\gamma}\log\frac{\int_t^{\infty}v^{-\gamma-1}\tilde{W}_n(v)dv}{\int_1^{\infty}v^{-\gamma-1}\tilde{W}_n(v)dv} &=-\frac{\int_t^{\infty}(\log v)v^{-\gamma-1}\tilde{W}_n(v)dv}{\int_t^{\infty}v^{-\gamma-1}\tilde{W}_n(v)dv}+\frac{\int_1^{\infty}(\log v)v^{-\gamma-1}\tilde{W}_n(v)dv}{\int_1^{\infty}v^{-\gamma-1}\tilde{W}_n(v)dv}, \end{align} which is negative if and only if \begin{equation} \int_{1}^{t}(\log v)k_{n,1,t}(v;\gamma)dv<\int_{t}^{\infty}(\log v)k_{n,t,\infty}(v;\gamma)dv, \end{equation} where \begin{equation} k_{n,t_1,t_2}(v;\gamma)=\frac{v^{-\gamma-1}\tilde{W}_n(v)}{\int_{t_1}^{t_2}v^{-\gamma-1}\tilde{W}_n(v)dv}1\{t_1<v\leq t_2\} \end{equation} is a proper probability density function with $1\leq t_1<t_2\leq\infty$. Since $k_{n,1,t}(v;\gamma)$ and $k_{n,t,\infty}(v;\gamma)$ are both proper densities, it is clear that $\int_{1}^{t}(\log v)k_{n,1,t}(v;\gamma)dv=\log v_1$ for some $v_1\in (1,t)$ and $\int_{t}^{\infty}(\log v)k_{n,t,\infty}(v;\gamma)dv=\log v_2$ for some $v_2\in (t,\infty]$. The result then follows. \section{GEM algorithm for MWLE under J-Gamma Lomax mixture model: Hypothetical data approach} \subsection{Construction of complete data} The complete data is given by \begin{equation} \mathcal{D}^{\text{com}}=\{(y_i,\bm{z}_i,k_i,\{\bm{z}'_{is},y'_{is}\}_{s=1,\ldots,k_i})\}_{i=1,\ldots,n}, \end{equation} where $\bm{z}_i=(z_{i1},\ldots,z_{i(J+1)})$ with $z_{ij}=1$ if observation $i$ belongs to the $j^{\text{th}}$ latent class and $z_{ij}=0$ otherwise. Similarly, $\bm{z}'_i=(z'_{is1},\ldots,z'_{is(J+1)})$ and $z'_{isj}=1$ if the $s^{\text{th}}$ missing sample generated by observation $i$ belongs to the $j^{\text{th}}$ latent class, and $z'_{isj}=0$ otherwise. The complete data weighted log-likelihood function is given by \begin{align} \tilde{\mathcal{L}}^{*}(\bm{\Phi};\mathcal{D}^{\text{com}}) &=\sum_{i=1}^nW(y_i)\left\{\left[\sum_{j=1}^{J}z_{ij}\log \pi_jf_b(y_i;\mu_j,\phi_j)\right]+z_{i(J+1)}\log\pi_{J+1} f_t(y_i;\theta,\gamma)\right\}\nonumber\\ &\quad +\sum_{i=1}^{n}\sum_{s=1}^{k_i}W(y_i)\left\{\left[\sum_{j=1}^J z'_{ijs}\log\pi_j f_b(y'_{is};\mu_j,\phi_j)\right]+z'_{i(J+1)s}\log\pi_{J+1} f_t(y'_{is};\theta,\gamma)\right\}. \end{align} \subsection{E-step} \label{supp:sec:em_e} The expectation of the complete data weighted log-likelihood is given by the following for the $l^{\text{th}}$ iteration: \begin{align} &Q^{*}(\bm{\Phi}|\bm{\Phi}^{(l-1)})\nonumber\\ &=\sum_{i=1}^nW(y_i)\Bigg\{\sum_{j=1}^{J}z^{(l)}_{ij}\left\{\log\pi_j-\frac{1}{\phi_j}\log\phi_j-\frac{1}{\phi_j}\log\mu_j-\log\Gamma(\frac{1}{\phi_j})+(\frac{1}{\phi_j}-1)\log y_i-\frac{y_i}{\phi_j\mu_j}\right\}\nonumber\\ &\hspace{8em}+ z^{(l)}_{i(J+1)}\left\{\log\pi_{J+1}+\log\gamma+\gamma\log\theta-(\gamma+1)\log(y_i+\theta)\right\}\Bigg\}\nonumber\\ &\quad +k^{(l)}\left(\sum_{i=1}^{n}W(y_i)\right)\Bigg\{\sum_{j=1}^J z^{'(l)}_{j}\left\{\log\pi_j-\frac{1}{\phi_j}\log\phi_j-\frac{1}{\phi_j}\log\mu_j-\log\Gamma(\frac{1}{\phi_j})+(\frac{1}{\phi_j}-1)\widehat{\log y'}^{(l)}_j-\frac{\widehat{y'}^{(l)}_j}{\phi_j\mu_j}\right\}\nonumber\\ &\hspace{12em}+z^{'(l)}_{(J+1)}\left\{\log\pi_{J+1}+\log\gamma+\gamma\log\theta-(\gamma+1)\widehat{\log(y'+\theta)}^{(l)}_{J+1}\right\}\Bigg\}, \end{align} where \begin{equation} z^{(l)}_{ij}=P(z_{ij}=1|\bm{y},\bm{\Phi}^{(l-1)})= \begin{cases} \dfrac{\pi_j^{(l-1)}f_b(y_i;\mu_j^{(l-1)},\phi_j^{(l-1)})}{h(u;\bm{\Phi}^{(l-1)})},\quad j=1,\ldots,J\\ \dfrac{\pi^{(l-1)}_{J+1}f_t(y_i;\theta,\gamma^{(l-1)})}{h(u;\bm{\Phi}^{(l-1)})},\quad j=J+1, \end{cases} \end{equation} \begin{equation} k^{(l)}=E(k_i|\bm{y},\bm{\Phi}^{(l-1)})=\frac{\int_0^{\infty}f_t(u;\bm{\Phi}^{(l-1)})(1-W(u))du}{\int_0^{\infty}h(u;\bm{\Phi}^{(l-1)})W(u)du}, \end{equation} \begin{equation} z_{j}^{'(l)}=P(z'_{ijs}=1|\bm{y},\bm{\Phi}^{(l-1)}) \begin{cases} \dfrac{\pi_j^{(l-1)}\int_0^{\infty}f_b(u;\mu_j^{(l-1)},\phi_j^{(l-1)})(1-W(u))du}{\int_0^{\infty}h(u;\bm{\Phi}^{(l-1)})(1-W(u))du},\quad j=1,\ldots,J\\ \dfrac{\pi_j^{(l-1)}\int_0^{\infty}f_t(u;\theta,\gamma^{(l-1)})(1-W(u))du}{\int_0^{\infty}h(u;\bm{\Phi}^{(l-1)})(1-W(u))du}, \quad j=J+1, \end{cases} \end{equation} \begin{equation} \widehat{y'}^{(l)}_j=E(y_{is}'|\bm{y},\bm{\Phi}^{(l-1)},z'_{ijs}=1) =\frac{\int_0^{\infty}uf_b(u;\mu_j^{(l-1)},\phi_j^{(l-1)})(1-W(u))du}{\int_0^{\infty}f_b(u;\mu_j^{(l-1)},\phi_j^{(l-1)})(1-W(u))du}, \quad j=1,\ldots,J, \end{equation} \begin{equation} \widehat{\log y'}^{(l)}_j=E(\log y_{is}'|\bm{y},\bm{\Phi}^{(l-1)},z'_{ijs}=1) =\frac{\int_0^{\infty}\log uf_b(u;\mu_j^{(l-1)},\phi_j^{(l-1)})(1-W(u))du}{\int_0^{\infty}f_b(u;\mu_j^{(l-1)},\phi_j^{(l-1)})(1-W(u))du}, \quad j=1,\ldots,J, \end{equation} \begin{equation} \widehat{\log(y'+\theta)}^{(l)}_{J+1}=E(\log (y_{is}'+\theta)|\bm{y},\bm{\Phi}^{(l-1)},z'_{i(J+1)s}=1) =\frac{\int_0^{\infty}\log (u+\theta) f_t(u;\theta,\gamma^{(l-1)})(1-W(u))du}{\int_0^{\infty}f_t(u;\theta,\gamma^{(l-1)})(1-W(u))du}. \end{equation} Note that the above integrals all have analytical form of solutions under the example choice of the following weight functions (for the generalized weight function form as presented in the main paper, we do a numerical integration instead): \begin{itemize} \item Case 1: Exponential distribution with $W(y;\tilde{\mu})=1-\exp\{-y/\tilde{\mu}\}$; \item Case 2: Two-point discrete distribution with $W(y;\tilde{\mu},\tilde{\phi})=(1-\tilde{\phi})1\{y>\tilde{\mu}\}+\tilde{\phi}$. \end{itemize} Re-parameterize the gamma distribution with $\alpha=1/\phi_j$ and $\beta=1/\phi_j\mu_j$, we are to compute \begin{equation} \int_{0}^{\infty}q(u)\frac{\beta^{\alpha}}{\Gamma(\alpha)}u^{\alpha-1}\exp\{-\beta u\}(1-W(u))du \end{equation} for $q(u)=1$, $q(u)=u$ and $q(u)=\log u$; and \begin{equation} \int_{0}^{\infty}r(u)\frac{\gamma\theta^{\gamma}}{(u+\theta)^{\gamma+1}}(1-W(u))du \end{equation} for $r(u)=1$ and $r(u)=\log(u+\theta)$. \textbf{Case 1}. We have the following analytical results: \begin{equation} \int_{0}^{\infty}\frac{\beta^{\alpha}}{\Gamma(\alpha)}u^{\alpha-1}\exp\{-\beta u\}(1-W(u))du=\left(\frac{\beta}{\beta+1/\tilde{\mu}}\right)^{\alpha}, \end{equation} \begin{equation} \int_{0}^{\infty}u\frac{\beta^{\alpha}}{\Gamma(\alpha)}u^{\alpha-1}\exp\{-\beta u\}(1-W(u))du=\frac{\alpha\beta^{\alpha}}{(\beta+1/\tilde{\mu})^{\alpha+1}}, \end{equation} \begin{equation} \int_{0}^{\infty}\log u\frac{\beta^{\alpha}}{\Gamma(\alpha)}u^{\alpha-1}\exp\{-\beta u\}(1-W(u))du=\frac{\beta^{\alpha}}{\Gamma(\alpha)}\frac{\partial}{\partial\alpha}\frac{\Gamma(\alpha)}{(\beta+1/\tilde{\mu})^{\alpha}}, \end{equation} \begin{equation} \int_{0}^{\infty}\frac{\gamma\theta^{\gamma}}{(u+\theta)^{\gamma+1}}(1-W(u))du=\gamma\left(\frac{\theta}{\tilde{\mu}}\right)^{\gamma}\exp\left\{\frac{\theta}{\tilde{\mu}}\right\}\Gamma(-\gamma;\frac{\theta}{\tilde{\mu}},\infty), \end{equation} \begin{equation} \int_{0}^{\infty}\log(u+\theta)\frac{\gamma\theta^{\gamma}}{(u+\theta)^{\gamma+1}}(1-W(u))du=-\gamma\theta^{\gamma}\exp\left\{\frac{\theta}{\tilde{\mu}}\right\}\frac{\partial}{\partial\gamma}\Gamma(-\gamma;\frac{\theta}{\tilde{\mu}},\infty), \end{equation} where $\Gamma(m;c_1,c_2)=\int_{c_1}^{c_2}u^{m-1}\exp\{-u\}du$ is an incomplete gamma function. \textbf{Case 2}. We have the following analytical results: \begin{equation} \int_{0}^{\infty}\frac{\beta^{\alpha}}{\Gamma(\alpha)}u^{\alpha-1}\exp\{-\beta u\}(1-W(u))du=\tilde{\phi}\frac{\Gamma(\alpha;\beta\tilde{\mu},\infty)}{\Gamma(\alpha)}+(1-\tilde{\phi}), \end{equation} \begin{equation} \int_{0}^{\infty}u\frac{\beta^{\alpha}}{\Gamma(\alpha)}u^{\alpha-1}\exp\{-\beta u\}(1-W(u))du=\frac{\alpha}{\beta}\left[\tilde{\phi}\Gamma(\alpha+1;\beta\tilde{\mu},\infty)+(1-\tilde{\phi})\right], \end{equation} \begin{equation} \int_{0}^{\infty}\log u\frac{\beta^{\alpha}}{\Gamma(\alpha)}u^{\alpha-1}\exp\{-\beta u\}(1-W(u))du=\frac{\beta^{\alpha}}{\Gamma(\alpha)}\left[\tilde{\phi}\frac{\partial}{\partial\alpha}\frac{\Gamma(\alpha;\beta\tilde{\mu},\infty)}{\beta^{\alpha}}+(1-\tilde{\phi})\frac{\partial}{\partial\alpha}\frac{\Gamma(\alpha)}{\beta^{\alpha}}\right], \end{equation} \begin{equation} \int_{0}^{\infty}\frac{\gamma\theta^{\gamma}}{(u+\theta)^{\gamma+1}}(1-W(u))du=\tilde{\phi}\left(\frac{\theta}{\tilde{\mu}+\theta}\right)^{\gamma}+(1-\tilde{\phi}), \end{equation} \begin{equation} \int_{0}^{\infty}\log(u+\theta)\frac{\gamma\theta^{\gamma}}{(u+\theta)^{\gamma+1}}(1-W(u))du=-\gamma\theta^{\gamma}\frac{\partial}{\partial\gamma}\left[\tilde{\phi}\frac{1}{\gamma(\tilde{\mu}+\theta)^{\gamma}}+(1-\tilde{\phi})\frac{1}{\gamma\theta^{\gamma}}\right]. \end{equation} \subsection{M-step} \label{supp:sec:em_m} Maximizing $Q^{*}(\bm{\Phi}|\bm{\Phi}^{(l-1)})$ with respect to $\bm{\Phi}$ yields the following parameter updates: \begin{equation} \pi_j^{(l)}=\frac{\sum_{i=1}^{n}W(y_i)z_{ij}^{(l)}+\left(\sum_{i=1}^{n}W(y_i)\right)k^{(l)}z_j^{'(l)}}{\sum_{j'=1}^{J+1}\left\{\sum_{i=1}^{n}W(y_i)z_{ij'}^{(l)}+\left(\sum_{i=1}^{n}W(y_i)\right)k^{(l)}z_{j'}^{'(l)}\right\}},\quad j=1,\ldots,J+1, \end{equation} \begin{equation} \mu_j^{(l)}=\frac{\sum_{i=1}^{n}W(y_i)z_{ij}^{(l)}y_i+\left(\sum_{i=1}^{n}W(y_i)\right)k^{(l)}z_j^{'(l)}\widehat{y'}^{(l)}_j}{\sum_{i=1}^{n}W(y_i)z_{ij}^{(l)}+\left(\sum_{i=1}^{n}W(y_i)\right)k^{(l)}z_j^{'(l)}},\quad j=1,\ldots,J, \end{equation} \begin{align} \phi_j^{(l)} &=\underset{\phi_j>0}{\text{argmax}}\Bigg\{\sum_{i=1}^nW(y_i)z^{(l)}_{ij}\left\{-\frac{1}{\phi_j}\log\phi_j-\frac{1}{\phi_j}\log\mu_j^{(l)}-\log\Gamma(\frac{1}{\phi_j})+(\frac{1}{\phi_j}-1)\log y_i-\frac{y_i}{\phi_j\mu_j}\right\}\nonumber\\ &\hspace{5em} +k^{(l)}\left(\sum_{i=1}^{n}W(y_i)\right)z^{'(l)}_{j}\left\{-\frac{1}{\phi_j}\log\phi_j-\frac{1}{\phi_j}\log\mu_j^{(l)}-\log\Gamma(\frac{1}{\phi_j})+(\frac{1}{\phi_j}-1)\widehat{\log y'}^{(l)}_j-\frac{\widehat{y'}^{(l)}_j}{\phi_j\mu_j^{(l)}}\right\}\Bigg\},\nonumber\\ \end{align} \begin{equation} \gamma^{(l)}=\frac{\sum_{i=1}^{n}W(y_i)z_{i(J+1)}^{(l)}+\left(\sum_{i=1}^{n}W(y_i)\right)k^{(l)}z_{J+1}^{'(l)}}{\sum_{i=1}^{n}W(y_i)z_{i(J+1)}^{(l)}\left[\log(y_i+\theta)-\log\theta\right]+\left(\sum_{i=1}^{n}W(y_i)\right)k^{(l)}z_{J+1}^{'(l)}\left[\widehat{\log(y'+\theta)}^{(l)}_{J+1}-\log\theta\right]}. \end{equation} Note here that $\theta$ is treated as a fixed hyperparameter not involved in estimation procedure. To estimate $\theta$ as a parameter, we may need to take a further step to numerically maximize the observed data weighted log-likelihood $\mathcal{L}^{*}_n(\bm{\Phi};\bm{y})$ w.r.t. $\theta$. \section{GEM algorithm for MWLE under J-Gamma Lomax mixture model: Parameter transformation approach} \subsection{Construction of complete data} The complete data is given by \begin{equation} \mathcal{D}^{\text{com}}=\{(y_i,\bm{z}_i^{*})\}_{i=1,\ldots,n}, \end{equation} where $\bm{z}_i^{*}=(z_{i1}^{*},\ldots,z_{i(J+1)}^{*})$ are the labels where $z_{ij}^{*}=1$ if observation $i$ belongs to the $j^{\text{th}}$ (transformed) latent mixture component and $z_{ij}^{*}=0$ otherwise. The complete data weighted log-likelihood function is given by \begin{align} \tilde{\mathcal{L}}^{*}_n(\bm{\Phi};\mathcal{D}^{\text{com}}) &=\sum_{i=1}^{n}W(y_i)\Bigg\{\left[\sum_{j=1}^{J}z_{ij}^{*}\left(\log\pi_j^{*}+\log f_b(y_i;\mu_j,\phi_j) -\log\int_0^{\infty}f_b(u;\mu_j,\phi_j)W(u)du\right)\right]\nonumber\\ &\hspace{8em}+z_{i(J+1)}^{*}\left(\log\pi_{J+1}^{*}+\log f_t(y_i;\theta,\gamma)W(y_i)-\log\int_0^{\infty}f_t(u;\theta,\gamma)W(u)du\right)\Bigg\}. \end{align} \subsection{E-step} \label{supp:sec:em_e2} The expectation of the complete data weighted log-likelihood is given by the following for the $l^{\text{th}}$ iteration: \begin{align} Q^{*}(\bm{\Phi}|\bm{\Phi}^{(l-1)}) &=\sum_{i=1}^{n}W(y_i)\Bigg\{\Bigg[\sum_{j=1}^{J}z_{ij}^{*(l)}\Bigg(\log\pi_j^{*}-\frac{1}{\phi_j}\log\phi_j-\frac{1}{\phi_j}\log\mu_j-\log\Gamma(\frac{1}{\phi_j})+(\frac{1}{\phi_j}-1)\log y_i-\frac{y_i}{\phi_j\mu_j}\nonumber\\ &\hspace{12em}-\log\int_0^{\infty}f_b(u;\mu_j,\phi_j)W(u)du\Bigg)\Bigg]\nonumber\\ &\hspace{8em}+z_{i(J+1)}^{*(l)}\Bigg(\log\pi_{J+1}^{*}+\log\gamma+\gamma\log\theta-(\gamma+1)\log(y_i+\theta)\nonumber\\ &\hspace{13em}-\log\int_0^{\infty}f_t(u;\theta,\gamma)W(u)du\Bigg)\Bigg\}, \end{align} where \begin{equation} z^{*(l)}_{ij}=P(z^{*}_{ij}=1|\bm{y},\bm{\Phi}^{(l-1)})= \begin{cases} \dfrac{\pi_j^{*(l-1)}f_b(y_i;\mu_j^{(l-1)},\phi_j^{(l-1)})W(y_i)}{\int_0^{\infty}f_b(u;\mu_j^{(l-1)},\phi_j^{(l-1)})W(u)du\times h(y_i;\bm{\Phi}^{(l-1)})},\quad j=1,\ldots,J\\ \dfrac{\pi^{*(l-1)}_{J+1}f_t(y_i;\theta,\gamma^{(l-1)})W(y_i)}{\int_0^{\infty}f_t(u;\theta,\gamma^{(l-1)})W(u)du\times h(y_i;\bm{\Phi}^{(l-1)})},\quad j=J+1. \end{cases} \end{equation} \subsection{M-step} \label{supp:sec:em_m2} Maximizing $Q^{*}(\bm{\Phi}|\bm{\Phi}^{(l-1)})$ with respect to $\bm{\Phi}$ yields the following parameter updates: \begin{equation} \pi_j^{*(l)}=\frac{\sum_{i=1}^{n}W(y_i)z_{ij}^{*(l)}}{\sum_{j'=1}^{J+1}\sum_{i=1}^{n}W(y_i)z_{ij'}^{*(l)}},\quad j=1,\ldots,J+1, \end{equation} and the other parameters $(\bm{\mu},\bm{\phi},\theta,\gamma)$ are sequentially updated by numerically maximizing $Q^{*}(\bm{\Phi}|\bm{\Phi}^{(l-1)})$ w.r.t. each of the parameters. \section{Proof of Proposition 3} \label{supp:sec:ascend} Write $\mathcal{L}^{*}_n(\bm{\Phi};\bm{y})=\sum_{i=1}^{n}W(y_i)\log p(y_i|\bm{\Phi})$ and $\tilde{\mathcal{L}}^{*}_n(\bm{\Phi}^{(l)};\mathcal{D}^{\text{com}})=\sum_{i=1}^{n}W(y_i)\left[\log p(y_i|\bm{\Phi}) +\log p(\mathcal{D}^{\text{mis}}_i|\bm{\Phi},y_i)\right]$ for some probability density $p$ and missing data from sample $i$ given by $\mathcal{D}^{\text{mis}}_i$. Then, we have \begin{align} \mathcal{L}^{*}_n(\bm{\Phi};\bm{y}) &=\tilde{\mathcal{L}}^{*}_n(\bm{\Phi}^{(l)};\mathcal{D}^{\text{com}})-\sum_{i=1}^{n}W(y_i)\log p(\mathcal{D}^{\text{mis}}_i|\bm{\Phi},y_i)\nonumber\\ &=Q^{*}(\bm{\Phi}|\bm{\Phi}^{(l-1)})-\sum_{i=1}^{n}W(y_i)\int p(\bm{v}_i|\bm{\Phi}^{(l-1)},y_i)\log p(\bm{v}_i|\bm{\Phi},y_i)d\bm{v}_i, \end{align} where the second equality results from expectation of both sides on the missing data under parameters $\bm{\Phi}^{(l-1)}$. Then, we have \begin{align} \mathcal{L}^{*}_n(\bm{\Phi}^{(l)};\bm{y})-\mathcal{L}^{*}_n(\bm{\Phi}^{(l-1)};\bm{y}) &=Q^{*}(\bm{\Phi}^{(l)}|\bm{\Phi}^{(l-1)})-Q^{*}(\bm{\Phi}^{(l-1)}|\bm{\Phi}^{(l-1)})\nonumber\\ &\quad+\sum_{i=1}^{n}W(y_i)\int p(\bm{v}_i|\bm{\Phi}^{(l-1)},y_i)\log\frac{p(\bm{v}_i|\bm{\Phi}^{(l-1)},y_i)}{p(\bm{v}_i|\bm{\Phi}^{(l)},y_i)}d\bm{v}_i\geq 0. \end{align} \section{Initialization of parameters} \label{apx:em:init} As briefly described in Section 5.3 of the paper, parameter initialization $\bm{\Phi}^{(0)}$ is done using the CMM approach by \cite{gui2018fit}. This comes with the following steps: \begin{enumerate} \item Determine a threshold $\tau$ which classifies observations $y_i$ into either body (when $y_i\leq\tau$) or tail (when $y_i>\tau$) part of distribution. This can be done by plotting the log of empirical data survival function against $\log y_i$, which is called the log-log plot. For regular varying distributions, the log-log plot is asymptotically linear. $\tau$ is approximated by the point where the curve turns linear onwards. \item Perform K-means clustering on $\{y_i\}_{i:y_i\leq\tau}$ with $J$ clusters, and obtain the clustering mean $\{\mu^{\text{cluster}}_j\}_{j=1,\ldots,J}$, variance $\{(\sigma^{\text{cluster}}_j)^2\}_{j=1,\ldots,J}$ and weights $\{\tilde{\pi}_j^{\text{cluster}}\}_{j=1,\ldots,J}$. \item Set $\mu_j^{(0)}=\mu^{\text{cluster}}_j$, $\phi_j^{(0)}=({\sigma^{\text{cluster}}_j})^2/{\mu^{\text{cluster}}_j}^2$. \item Obtain $\theta^{(0)}$ and $\gamma^{(0)}$ by matching the first two moments of observations belonging to the tail component (i.e. $\{y_i\}_{i:y_i>\tau}$). \item Set $\pi_{J+1}^{(0)}$ as the proportion of observations satisfying $y_i>\tau$. \item Set the remaining weight parameters as $\pi_{j}^{(0)}=\tilde{\pi}_j^{\text{cluster}}(1-\pi_{J+1}^{(0)})$. \end{enumerate} \section{Truncated log-likelihood function} \label{sec:supp:tll} This section includes more details for Remark 6 in the paper. Denote $g(y)$ as the true distribution generating the observations and $\tilde{h}(y;\bm{\Phi})=\frac{h(y;\bm{\Phi})W(y)}{\int_0^{\infty}h(u;\bm{\Phi})W(u)du}$ as the truncated distribution. The expected weighted log-likelihood can be alternatively written as \begin{align} n\times\tilde{E}[\mathcal{L}^{*}(\bm{\Phi};\bm{Y})] &=n\int_{0}^{\infty}W(u)\log \tilde{h}(u;\bm{\Phi})\times g(u)du\nonumber\\ &=n\int_{0}^{\infty}g(u)W(u)du\times\int_{0}^{\infty}\log \tilde{h}(u;\bm{\Phi})\times\frac{g(u)W(u)}{\int_{0}^{\infty}g(t)W(t)dt}du\nonumber\\ &=n\int_{0}^{\infty}g(u)W(u)du\times\tilde{E}^*[\log \tilde{h}(u;\bm{\Phi})], \end{align} where the expectation $\tilde{E}^*$ is taken on $Y$ under the random truncated distribution $\frac{g(u)W(u)}{\int_{0}^{\infty}g(t)W(t)dt}$. Next, denote a random set $S_n=\{i:V_i(y_i)=1\}$, such that $\mathcal{L}^{**}_n(\bm{\Phi};\bm{y})$ can be written as \begin{equation} \mathcal{L}^{**}_n(\bm{\Phi};\bm{y})=\sum_{i\in S_n}\log \tilde{h}(u;\bm{\Phi}), \end{equation} with effective number of terms $\|S_n\|\approx n\int_{0}^{\infty}g(u)W(u)du\approx \sum_{i=1}^{n}W(y_i)$ in probability as $n\rightarrow\infty$. Comparing the above two equations, they simply correspond to standard MLE with bias term of $P$. \section{Preliminary analysis of the motivating Greek dataset} \label{apx:prelim_data} Modelling the property damage claim size distribution is very challenging. Observing from Figures \ref{fig:density} and \ref{fig:loglogplot} which are also presented by \cite{fung2021mixture}, the claim size distribution is not only heavy-tailed but also multi-modal. The key complexity of the empirical distribution is that there are many small distributional nodes for smaller claims, as evidenced by the right panel of Figure \ref{fig:density}. On the other hand, it is undesirable to model all these nodes using excessive number of mixture components as (i) precise predictions of small claims are of less relevance of insurance pricing and risk management; (ii) this impedes the model interpretability. Further, the heavy-tailedness of empirical distribution is evidenced by asymptotic linearity of both log-log plot and mean excess plot in Figure \ref{fig:loglogplot}. The asymptotic slope of log-log plot suggests that the estimated tail index is $\gamma\approx 1.3$ while the Lomax tail index obtained by \cite{fung2021mixture} is about $\gamma=1.38$, under a subjective choice of splicing threshold. Note however that these only provide a very rough guidance on the true tail index. Note that distributional multimodality and contamination are indeed prevalent not only to the aforementioned Greek dataset, but also to many publicly available insurance data sets. Notable examples include the French business interruption losses (\textbf{frebiloss}), French motor third party liability claims (\textbf{fremotor2sev9907} and \textbf{freMPL8}) and Norwegian car claims (\textbf{norauto}) which can all be retrived from the \textbf{R} package \textbf{CASdatasets}. This suggests that the modelling challenges emphasized in this paper is not only valid for the Greek data set we are analyzing, but is also applicable in many insurance claim severity data sets. \begin{figure} \caption{Empirical density of claim amounts (left panel) and log claim amounts (right panel); the orange vertical lines represent amounts of 10,000, 20,000, 50,000 and 100,000 respectively.} \label{fig:density} \end{figure} \begin{figure} \caption{Left panel: log-log plot of the claim amounts; right panel: mean excess plot.} \label{fig:loglogplot} \end{figure} \end{document}
\begin{document} \title{The spanning method and the Lehmer totient problem } \author{Theophilus Agama} \address{Department of Mathematics, African institute for mathematical sciences, Ghana, Cape-coast} \email{[email protected]/[email protected]} \date{\today} \keywords{fractional totient invariant function; span; measure; variation} \begin{abstract} In this paper we introduce and develop the notion of spanning of integers along functions $f:\mathbb{N}\longrightarrow \mathbb{R}$. We apply this method to a class of problems requiring to determine if the equations of the form $tf(n)=n-k$ has a solution $n\in \mathbb{N}$ for a fixed $k\in \mathbb{N}$ and some $t\in \mathbb{N}$. In particular, we show that \begin{align} \# \{n\leq s~|~t\varphi(n)+1=n,~t,n \in \mathbb{N}\}\geq \frac{s}{2\log s}\prod \limits_{p | s}(1-\frac{1}{p})^{-1}+O(1)\nonumber \end{align}where $\varphi$ is the euler totient function. \end{abstract} \maketitle \section{\textbf{Introduction and problem statement}} Let $\varphi:\mathbb{N}\longrightarrow \mathbb{N}$ be the euler totient function, so that $\varphi(s)$ is the number of integers $n\leq s$ and co-prime with $s$. The image of the euler totient function is defined on prime number arguments as the unit left translate of the primes; in particular, we have $\varphi(p)=p-1$ and one can clearly see that $\varphi(p)|p-1$. The Euler totient function, among other things, is a \textbf{multiplicative} function on the set of all positive integers. That is to say, if $n=u\cdot v$ for $u,v\in \mathbb{N}$ and $\gcd(u,v)=1$, then we have the decomposition \begin{align} \varphi(n)=\varphi(u\cdot v)=\varphi(u)\varphi(v).\nonumber \end{align} It is natural to speculate if composites also satisfy the divisibility relation $\varphi(n)|n-1$. To this end, The mathematician D.H Lehmer posed the question which is now known as the \textbf{Lehmer totient problem} \begin{question} Can the totient function of a composite number $n$ divide $n-1$? \end{question} The Lehmer totient problem is considerably of the same class and possibly of the same difficulty as the odd perfect number problem. The problem has caught the attention and has been investigated by quite a good number of authors. Indeed, D.H Lehmer is known to have pioonered research in this area, where he made substantial progress by showing that if there exists such a composite number $n$, then it must be odd, square-free and have at least \textbf{seven} distinct prime factors \cite{lehmer1932euler}. Further improvements were made by Hagis and Cohen in 1980, who showed that if such composite number $n$ exists then it must satisfy $n\geq 10^{20}$ and have at least \textbf{fourteen} distinct prime factors \cite{cohen1980number}. This was further improved by Hagis proving that if $3$ divides $n$, then $n\geq 10^{1937042}$ and having at least $298848$ distinct prime factors \cite{hagis1988equation}. It is also known (see \cite{luca2011composite}) that the number of solutions $\leq x$ to the Lehmer totient problem satisfy the upper bound \begin{align} \leq \frac{\sqrt{x}}{(\log x)^{\frac{1}{2}+o(1)}}\nonumber \end{align}where $o(1)$ is defined as a function that tends to zero as $x$ tends to infinity. In this paper we study the Lehmer totient problem using the lower bound \begin{lemma}\label{main lemma} The lower bound holds \begin{align} \# \{n\leq s~|~t\varphi(n)+1=n,~t,n \in \mathbb{N}\}\geq \frac{s}{2\log s}\prod \limits_{p| s}(1-\frac{1}{p})^{-1}+O(1)\nonumber \end{align}where $\varphi$ is the euler totient function. \end{lemma} In this paper, we denote $a|b$ to mean $a$ divides $b$. Also when we write $f(n)=o(1)$ for an arithmetic function $f:\mathbb{N}\longrightarrow \mathbb{N}$, we mean $\lim \limits_{n\longrightarrow \infty}f(n)=0$. Similarly when we write $f(n)=O(g(n))$, we mean there exists some fixed constant $c>0$ such that for all sufficiently large values of $n$ then $f(n)\leq c|g(n)|$. The notation $f(n)\ll g(n)$ is also alternatively used to convey the same meaning, where there is the flexibility to write the converse of the inequality as $f(n)\geq c|g(n)|$ for some fixed constant $c>0$ such that for all sufficiently large values of $n$. In this case, we will write simply as $f(n)\gg g(n)$. We also write $f(n)\sim g(n)$ if and only if \begin{align} \lim \limits_{n\longrightarrow \infty}\frac{f(n)}{g(n)}=1.\nonumber \end{align} \section{\textbf{Preliminary results}} In this paper, we find the following elementary inequalities useful. We will employ them in the course of establishing the main result of this paper. \begin{lemma}\label{axler} Let $S(x)$ denotes the sum of all prime number $\leq x$. Then the inequality holds \begin{align} S(x)>\frac{x^2}{2\log x}+\frac{x^2}{4\log^2 x}+\frac{x^2}{4\log^3 x}+\frac{1.2x^2}{8\log^4 x}.\nonumber \end{align}for all $x\geq 905238547$. \end{lemma} \begin{proof} For a proof see for instance \cite{axler2019sum}. \end{proof} \begin{lemma}[The prime number theorem] Let $\pi(x)$ denotes the number of primes $\leq x$. Then \begin{align} \pi(x) \sim \frac{x}{\log x}.\nonumber \end{align} \end{lemma} \begin{lemma}[Merten's formula]\label{Merten} The asymptotic holds \begin{align} \prod \limits_{p\leq s}(1-\frac{1}{p}) \sim \frac{e^{-\gamma}}{\log s}\nonumber \end{align}where $\gamma=0.5772\cdots$ is the euler-macheroni constant. \end{lemma} \begin{lemma}[Stieltjes-Lebesgue integral]\label{Lebesgue} Let $g:[a,b]\longrightarrow \mathbb{R}$ and $h:[a,b]\longrightarrow \mathbb{R}$ be right continuous and of bounded variation on $[a,b]$ and both having left limits. Then we have \begin{align} f(b)g(b)-f(a)g(a)=\int \limits_{(a,b]}f(t^-)dg(t)+\int \limits_{(a,b]}g(t^-)df(t)+\sum \limits_{t\in (a,b]}\Delta f_t \Delta g_t \nonumber \end{align}where $\Delta f_t=f(t)-f(t^-)$. \end{lemma} \section{\textbf{The method of spanning along a function}} In this section we introduce and study the notion of \textbf{spanning} of integers along a function. We study this notion together with associated statistics and explore some applications. \begin{definition} Let $f:\mathbb{N}\longrightarrow \mathbb{R}$. Then we say $n$ is $k$ - step \textbf{spanned} along the function with multiplicity $t$ if \begin{align} tf(n)+k=n.\nonumber \end{align}We call the set of all $n\in \mathbb{N}$ such that $n$ is $k$ - step spanned the $k^{th}$ - step spanning set along $f$ and denote by $\mathbb{S}_k(f)$. We call the set of all truncated $k$-step spanning set $\mathbb{S}_k(f)\cap \mathbb{N}_s:=\mathbb{S}_k(f,s)$ the $s^{th}$ scale spanned along $f$. We write the length of this spanned set as \begin{align} |\mathbb{S}_k(f,s)|:=\# \{n\leq s~|~tf(n)+k=n,~t\in \mathbb{N}\}.\nonumber \end{align}It is easy to see that $\mathbb{S}_k(f,s)<s$. \end{definition} \subsection{\textbf{The $s$-level measure of spanned set}} In this section we introduce the notion of the measure of the span set. We launch and examine the following languages. \begin{definition} By the $s^{th}$ level measure of the span set $\mathbb{S}_k(f)$, denoted $\mathbb{M}_{f}(s,k)$, we mean the partial sum \begin{align} \mathbb{M}_{f}(s,k):=\sum \limits_{\substack{2\leq n\leq s\\n \in \mathcal{S}_k(f)}}f(n).\nonumber \end{align} \end{definition} Let us suppose that $f$ is a right-continuous function and of bounded variation on $[j-1,j)$ for all $j\geq 3$ with $j\in \mathbb{N}$ and with a left limit, then by applying the Stieltjes-Lebesgue integration by parts, we can write the $s^{th}$ level measure of the span set in the form \begin{align} \mathbb{M}_{f}(s,k):&=\sum \limits_{2\leq j \leq s}\sum \limits_{\substack{j-1<n\leq j\\n \in \mathbb{S}_k(f)}}f(n)\nonumber \\&=\sum \limits_{2\leq j\leq s}\int \limits_{(j-1)}^{j}f(t)d|\mathbb{S}_k(f,t)|\nonumber \\&<\sum \limits_{2\leq j\leq s}\bigg(f(j)|\mathbb{S}_k(f,j)|-f(j-1)|\mathbb{S}_k(f,j-1)|\bigg)\nonumber \\&=f(s)|\mathbb{S}_k(f,s)|-f(1)|\mathbb{S}_k(f,1)|.\nonumber \end{align}The following inequality is a simple consequence of the above analysis. \begin{proposition}[Spanning inequality]\label{inequality 1} Let $f$ be a right-continuous function and of bounded variation on $[x,x+1)$ for $x\geq 1$ with $x\in \mathbb{N}$ and have left limits. Then the inequality holds \begin{align} |\mathbb{S}_k(f,s)|\geq \frac{1}{f(s)}\sum \limits_{\substack{2\leq n\leq s\\n \in \mathcal{S}_k(f)}}f(n)+\frac{f(1)|\mathbb{S}_k(f,1)|}{f(s)}.\nonumber \end{align} \end{proposition} \section{\textbf{The fractional totient invariant function}} In this section we introduce and study a new function defined on the real line. We launch the following languages. \begin{definition}\label{euler totient function extension} By the fractional totient invariant function, we mean the function $\tilde{\varphi}:[1,\infty) \longrightarrow \mathbb{R}$ such that \begin{align} \tilde{\varphi}(a)=\varphi(\lfloor a\rfloor)+\{a\}\nonumber \end{align}where $\varphi$ is the euler totient function and $\lfloor \cdot \rfloor$ and $\{\cdot \}$ is the floor and the fractional part of a real number, respectively. \end{definition} The fractional totient invariant function turns out to be an interesting function that in some way extends the euler totient function to the reals. Even though the notion of co-primality in not well-defined on the entire real line, it captures the intrinsic property of the euler totient function defined on the positive integers. In essence, the euler totient function and the fractional totient invariant function coincides on the set of positive integers. Next, we examine some elementary properties of the fractional totient invariant function in the following sequel. \begin{proposition}\label{properties} The following properties of the fractional totient invariant function holds \begin{enumerate} \item [(i)] If $a$ is a positive integer, then $\tilde{\varphi}(a)=\varphi(a)$. \item [(ii)] $\tilde{\varphi}(a)<a$ for all $a>1$. \end{enumerate} \end{proposition} \begin{remark} We now state an analytic property of the fractional totient invariant function. In fact, the fractional totient invariant function can be seen as a slightly continuous analogue of the euler totient function on subsets of the reals. \end{remark} \begin{proposition}\label{analytic property} The function $\tilde{\varphi}:[1,\infty) \longrightarrow \mathbb{R}$ with \begin{align} \tilde{\varphi}(a)=\varphi(\lfloor a\rfloor)+\{a\}\nonumber \end{align}is right-continuous and of bounded variation on $[x,x+1)$ for $x\geq 1$ with $x\in \mathbb{N}$ and have left limits. \end{proposition} \section{\textbf{Main result}} \begin{lemma}\label{main lemma} The lower bound holds \begin{align} \# \{n\leq s~|~t\varphi(n)+1=n,~t,n \in \mathbb{N}\}\geq \frac{s}{2\log s}\prod \limits_{p| s}(1-\frac{1}{p})^{-1}+O(1)\nonumber \end{align}where $\varphi$ is the euler totient function. \end{lemma} \begin{proof} By appealing to Proposition \ref{inequality 1}, we obtain the lower bound \begin{align} \# \{2\leq n\leq s~|~t\tilde{\varphi}(n)+1=n,~t,n \in \mathbb{N}\}&\geq \frac{1}{\tilde{\varphi}(s)}\sum \limits_{\substack{2\leq n\leq s\\n \in \mathbb{S}_1(\tilde{\varphi})}}\tilde{\varphi}(n)+O(1).\label{2} \end{align}Next we estimate each term on the right-hand side of the inequality. Since $\varphi(p)=p-1$ for any prime number $p\in \mathbb{P}$, we obtain the lower bound \begin{align} \sum \limits_{\substack{2\leq n\leq s\\n \in \mathbb{S}_1(\tilde{\varphi})}}\tilde{\varphi}(n)&\geq \sum \limits_{p\leq s}\varphi(p)\nonumber \\&=\sum \limits_{p\leq s}p-\pi(s).\nonumber \end{align}By applying Lemma \ref{axler}, we obtain the lower bound for sufficiently large values of $s$ \begin{align} \sum \limits_{p\leq s}p-\pi(s)&\geq \frac{s^2}{2\log s}-\pi(s)\nonumber \end{align}so that by appealing to the decomposition \begin{align} \varphi(s)=s\prod \limits_{p|s}(1-\frac{1}{p})\nonumber \end{align}with $\tilde{\varphi}(s)\sim \varphi(s)$ and Lemma \ref{Merten}, we obtain the lower bound \begin{align} \frac{1}{\tilde{\varphi}(s)}\sum \limits_{\substack{2\leq n\leq s\\n \in \mathbb{S}_1(\tilde{\varphi})}}\tilde{\varphi}(n)&\geq \frac{s}{2\log s}\prod \limits_{p | s}(1-\frac{1}{p})^{-1}-\frac{1}{\tilde{\varphi}(s)}\pi(s).\label{1} \end{align}By plugging \eqref{1} into \eqref{2} and applying the prime number theorem, we obtain the lower bound \begin{align} \# \{2\leq n\leq s~|~t\tilde{\varphi}(n)+1=n,~t,n \in \mathbb{N}\}\geq \frac{s}{2\log s}\prod \limits_{p | s}(1-\frac{1}{p})^{-1}-\frac{\pi(s)}{\varphi(s)}+O(1)\nonumber \end{align}and the claim inequality holds for $s\geq s_o$ since $\tilde{\varphi}(n)=\varphi(n)$ for each $n\in \mathbb{N}$. \end{proof} \begin{theorem}\label{Lehmer problem} There exists a composite $n\in \mathbb{N}$ such that $\varphi(n)|n-1$. \end{theorem} \begin{proof} Suppose on the contrary that there exists no composite $n\in \mathbb{N}$ such that $\varphi(n)|n-1$. Then for all $s\geq s_o$, we obtain the lower bound by appealing to Lemma \ref{main lemma} \begin{align} \pi(s)\geq \frac{s}{2\log s}\prod \limits_{p | s}(1-\frac{1}{p})^{-1}+O(1)\nonumber \end{align}where $\pi(s)$ is the prime counting function. Now let the prime $p_o\in \mathbb{P}$ be sufficiently large and choose \begin{align} s:=\prod \limits_{p\leq p_o}p\nonumber \end{align}then it can be checked that \begin{align} \prod \limits_{p | s}(1-\frac{1}{p})^{-1}\geq 3 \end{align}so that \begin{align} \pi(s)\geq \frac{3}{2}\frac{s}{\log s}+O(1)\nonumber \end{align}which contradicts the prime number theorem. \end{proof} \section{\textbf{Conclusion and further remarks}} The current study uses two major ingredients to overcome a major obstacle that could conceivably have been a major setback for any prior investigations along these lines. The very fact that the Euler totient function is only defined on the set of positive integers and it's failure to be at least one-sided continuous on the reals is a major roadblock that has been overcome in this paper. The introduction of a variant of the Euler totient function defined on certain subsets of real numbers and the extra feature that it is right continuous, yet preserving the intrinsic property of the Euler totient function allows a smooth passage over the preconceived roadblock. \\ This proof also uses two landmark results of the twentieth century - with some dating back to eighteenth and nineteenth century - the prime number theorem and the Metern formula. The original idea of the spanning method has been used to consolidate these fundamental results to prove the lower bound \begin{lemma}\label{main lemma} The lower bound holds \begin{align} \# \{n\leq s~|~t\varphi(n)+1=n,~t,n \in \mathbb{N}\}\geq \frac{s}{2\log s}\prod \limits_{p| s}(1-\frac{1}{p})^{-1}+O(1)\nonumber \end{align}where $\varphi$ is the euler totient function. \end{lemma}This lower bound is used as the main toolbox to show existence of a certain composite (large) that satisfies the divisibility relation $\varphi(n)|n-1$. The spanning method and it's variant could in principle be used in careful manner to study related problems, which is not the main goal of this paper. \end{document}
\begin{document} \numberwithin{equation}{section} \renewcommand{030}{030} \FirstPageHeading \renewcommand{$\star$}{$\star$} \ShortArticleName{Geodesic Equations on Dif\/feomorphism Groups} \ArticleName{Geodesic Equations on Dif\/feomorphism Groups\footnote{This paper is a contribution to the Proceedings of the Seventh International Conference ``Symmetry in Nonlinear Mathematical Physics'' (June 24--30, 2007, Kyiv, Ukraine). The full collection is available at \href{http://www.emis.de/journals/SIGMA/symmetry2007.html}{http://www.emis.de/journals/SIGMA/symmetry2007.html}}} \Author{Cornelia VIZMAN} \AuthorNameForHeading{C. Vizman} \Address{Department of Mathematics, West University of Timi\c soara, Romania} \Email{\href{mailto:[email protected]}{[email protected]}} \ArticleDates{Received November 13, 2007, in f\/inal form March 01, 2008; Published online March 11, 2008} \Abstract{We bring together those systems of hydrodynamical type that can be written as geodesic equations on dif\/feomorphism groups or on extensions of dif\/feomorphism groups with right invariant $L^2$ or $H^1$ metrics. We present their formal derivation starting from Euler's equation, the f\/irst order equation satisf\/ied by the right logarithmic derivative of a~geodesic in Lie groups with right invariant metrics.} \Keywords{Euler's equation; dif\/feomorphism group; group extension; geodesic equation} \Classification{58D05; 35Q35} \begin{flushright}\it A fluid moves to get out of its own way as efficiently as possible.\\[1mm] Joe Monaghan \end{flushright} \section{Introduction} Some conservative systems of hydrodynamical type can be written as geodesic equations on the group of dif\/feomorphisms or the group of volume preserving dif\/feomorphisms of a Riemannian manifold, as well as on extensions of these groups. Considering right invariant $L^2$ or $H^1$ metrics on these inf\/inite dimensional Lie groups, the following geodesic equations can be obtained: the Euler equation of motion of a perfect f\/luid \cite{Arnold, EM}, the averaged Euler equation \cite{MRS,Shkoller}, the equations of ideal magneto-hydrodynamics \cite{VD,MRW}, the Burgers inviscid equation \cite{Burgers}, the template matching equation \cite{HMA,V3}, the Korteweg--de Vries equation \cite{OK}, the Camassa--Holm shallow water equation \cite{CH,Misiolek1,Kouranbaeva}, the higher dimensional Camassa--Holm equation (also called EPDif\/f or averaged template matching equation) \cite{HM}, the superconductivity equation \cite{Roger}, the equations of motion of a charged ideal f\/luid \cite{V1}, of an ideal f\/luid in Yang--Mills f\/ield \cite{GR3} and of a stratif\/ied f\/luid in Boussinesq approximation \cite{Zeitlin2,V2}. For a Lie group $G$ with right invariant metric, the geodesic equation written for the right logarithmic derivative $u$ of the geodesic is a f\/irst order equation on the Lie algebra $\g$, called the {\it Euler equation}. Denoting by $\ad(u)^\top$ the adjoint of $\ad(u)$ with respect to the scalar product on $\g$ given by the metric, Euler's equation can be written as $\frac{d}{dt}u=-\ad(u)^\top u$. In this survey type article we do the formal derivation of all the equations of hydrodynamical type mentioned above, starting from this equation. By writing such partial dif\/ferential equations as geodesic equations on dif\/feomorphism groups, there are various properties one can obtain using the Riemannian geometry of right invariant metrics on these dif\/feomorphism groups. We will not focus on them in this paper, but we list some of them below, with some of the references. For some of these equations smoothness of the geodesic spray on the group implies local well-posedness of the Cauchy problem as well as smooth dependence on the initial data. This applies for the following right invariant Riemannian metrics: $L^2$ metric on the group of volume preserving dif\/feomorphisms \cite{EM}, $H^1$ metric on the group of volume preserving dif\/feomorphisms on a boundary free manifold \cite{Shkoller}, on a manifold with Dirichlet boundary conditions \cite{MRS,Shkoller2} and with Neumann or mixt boundary conditions \cite{Shkoller2,GR}, $H^1$ metric on the group of dif\/feomorphisms of the circle \cite{Shkoller,Kouranbaeva} and on the Bott--Virasoro group \cite{CKKT}, and $H^1$ metric on the group of dif\/feomorphisms on a higher dimensional manifold \cite{GR2}. There are also results on the sectional curvature (with information on the Lagrangian stability) \cite{Arnold,NHK,Preston,Misiolek2,MR,PS,V6,Hattori,ZK,ZP,V1}, on the existence of conjugate points \cite{Misiolek,Misiolek3} and minimal geodesics \cite{Brenier}, on the f\/initeness of the diameter \cite{Shnirelman,Sh,ER}, on the vanishing of geodesic distance \cite{MM}, as well as on the Riemannian geometry of subgroups of dif\/feomorphisms as a submanifold of the full dif\/feomorphism group \cite{Misiolek0,BR,KM3,V3}. \section{Euler's equation} Given a regular Fr\'echet--Lie group in the sense of Kriegl--Michor \cite{KM}, and a (positive def\/inite) scalar product $\langle \ , \ \rangle :\g\times\g\to\mathbb R$ on the Lie algebra $\g$, we can def\/ine a right invariant metric on $G$ by $g_x(\xi,\eta)=\langle\xi x^{-1},\eta x^{-1}\rangle$ for $\xi,\eta\in T_xG$. The energy functional of a smooth curve $c:I=[a,b]\to G$ is def\/ined by \begin{gather*} E(c) =\frac12\int_a^b g_{c(t)}(c'(t),c'(t))dt =\frac12\int_a^b\langle \de^rc(t),\de^rc(t)\rangle dt, \end{gather*} where $\delta^r$ denotes the right logarithmic derivative (angular velocity) on the Lie group $G$, \ie $\de^rc(t)=c'(t)c(t)^{-1}\in\g$. We assume the adjoint of $\ad(X)$ with respect to $\langle \ , \ \rangle $ exists for all $X\in\g$ and we denote it by $\ad(X)^\top$, \ie \begin{gather*} \langle \ad(X)^\top Y,Z\rangle =\langle Y,[X,Z]\rangle, \qquad\forall \, X,Y,Z\in\g. \end{gather*} The corresponding notation in \cite{AK} is $B(X,Y)=\ad(Y)^\top X$ for the bilinear map $B:\g\x\g\to\g$. \begin{theorem}\label{theo} The curve $c:[a,b]\to G$ is a geodesic for the right invariant metric $g$ on $G$ if and only if its right logarithmic derivative $u=\de^rc:[a,b]\to\g$ satisfies the Euler equation: \begin{gather}\label{euler} \frac{d}{dt}u=-\ad(u)^\top u. \end{gather} \end{theorem} \begin{proof} We denote the given curve by $c_0$ and its logarithmic derivative by $u_0$. For any variation with f\/ixed endpoints $c(t,s)\in G$, $t\in [a,b]$, $s\in(-\ep,\ep)$ of the given curve $c_0$, we def\/ine $u=(\partial_tc)c^{-1}$ and $v=(\partial_sc)c^{-1}$. In particular $u(\cdot,0)=u_0$, and we denote $v(\cdot,0)$ by $v_0$. Following \cite{Milnor} we show f\/irst that \begin{gather}\label{mc} \partial_t v-\partial_s u=[u,v]. \end{gather} For each $h\in G$ we consider the map $F_h(t,s)=(t,s,c(t,s)h)$ for $t\in[a,b]$ and $s\in (-\ep,\ep)$. The bracket of the following two vector f\/ields on $[a,b]\times (-\ep,\ep)\x G$ vanishes: \begin{gather*} (t,s,g)\mapsto\partial_t+u(t,s)g,\qquad (t,s,g)\mapsto \partial_s+v(t,s)g. \end{gather*} The reason is they correspond under the mappings $F_h$, $h\in G$, to the vector f\/ields $\partial_t$ and $\partial_s$ on $ [a,b]\times (-\ep,\ep)$ (with vanishing bracket). Hence $0=[\partial_t+ug,\partial_s+vg]=(\partial_t v)g-(\partial_s u)g-[u,v]g$, because the bracket of right invariant vector f\/ields corresponds to the opposite bracket on the Lie algebra $\g$, so the claim (\ref{mc}) follows. As in \cite{MR} we compute the derivative of $E(c)=\frac12\int_a^b\langle u,u\rangle dt$ with respect to $s$, using the fact that $v(a,s)=v(b,s)=0$. \begin{gather*} \partial_s E(c) =\int_a^b\langle\partial_su,u\rangle dt\stackrel{(\ref{mc})}{=} \int_a^b\langle\partial_tv-[u,v],u\rangle dt =-\int_a^b\langle v,\partial_t u+\ad(u)^\top u\rangle dt. \end{gather*} The curve $c_0$ in $G$ is a geodesic if and only if this derivative vanishes at $s=0$ for all variations~$c$ of~$c_0$, hence for all $v_0:[a,b]\to\g$. This is equivalent to $\frac{d}{dt} u_0=-\ad(u_0)^\top u_0$. \end{proof} The Euler equation for a left invariant metric on a Lie group is $\frac{d}{dt} u=\ad(u)^\top u$. In the case $G=SO(3)$ one obtains the equations of the rigid body. Denoting by $(\ , \ )$ the pairing between $\g^*$ and $\g$, the {\it inertia operator} \cite{AK} is def\/ined by \begin{gather*} A:\g\to\g^*,\qquad A(X)=\langle X,\cdot\rangle,\qquad \text{\ie} \qquad (A(X),Y)=\langle X,Y\rangle, \qquad\forall\, X,Y\in\g. \end{gather*} It is injective, but not necessarily surjective for inf\/inite dimensional $\g$. The image of $A$ is called {\it the regular part of the dual} and is denoted by $\g^*_{\rm reg}$. Let $\ad^*$ be the coadjoint action of $\g$ on $\g^*$ given by $(\ad^*(X)m,Y)=(m,-\ad(X)Y)$, for $m\in\g^*$. The inertia operator relates $\ad(X)^\top$ to the opposite of the coadjoint action of $X$, \ie \begin{gather}\label{iden} \ad^*(X)A(Y)=-A(\ad(X)^\top Y). \end{gather} Hence the inertia operator transforms the Euler equation (\ref{euler}) into an equation for $m=A(u)$: \begin{gather}\label{hami} \frac{d}{dt}m=\ad^*(u)m, \end{gather} result known also as the second Euler theorem. First Euler theorem states that the solution of (\ref{hami}) with $m(a)=m_0$ is \[ m(t)=\Ad^*(c(t))m_0, \] where $u=\delta^r c$ and $c(a)=e$. Indeed, $\frac{d}{dt}m=\ad^*(\delta^r c)\Ad^*(c) m_0 =\ad^*(u)m$. \begin{remark} Equation (\ref{hami}) is a Hamiltonian equation on $\g^*$ with the canonical Poisson bracket \begin{gather*} \{f,g\}(m)=\Big(m,\Big[\frac{\de f}{\de m},\frac{\de g}{\de m}\Big]\Big),\qquad f,g\in C^\oo(\g^*) \end{gather*} and the Hamiltonian function $h\in C^\oo(\g^*)$, $h(m) =\frac12(m,A^{-1}m)=\frac12(m,u)$. \end{remark} \begin{remark} The Euler--Lagrange equation for a right invariant Lagrangian $L:TG\to\mathbb R$ with value $l:\g\to\mathbb R$ at the identity is: \begin{gather*} \frac{d}{dt}\frac{\de l}{\de u}=\ad^*(u)\frac{\de l}{\de u}, \end{gather*} also called the right Euler--Poincar\'e equation \cite{Poincare,MR0}. The Hamiltonian form (\ref{hami}) of Euler's equation is obtained for $l(u)=\frac12\langle u,u\rangle$ since the functional derivative $\frac{\de l}{\de u}$ is $A(u)$ in this case. \end{remark} \section {Ideal hydrodynamics}\label{3} Let $G=\Diff_{\mu}(M)$ be the regular Fr\' echet Lie group of volume preserving dif\/feomorphisms of a~compact Riemannian manifold $(M,g)$ with induced volume form $\mu$. Its Lie algebra is $\g=\mathfrak{X}_{\mu}(M)$, the Lie algebra of divergence free vector f\/ields, with Lie bracket the opposite of the usual bracket of vector f\/ields $\ad(X)Y=-[X,Y]$. We consider the right invariant metric on~$G$ given by the~$L^2$ scalar product on vector f\/ields \begin{gather}\label{el2} \langle X,Y\rangle =\int_M g(X,Y)\mu. \end{gather} In the $L^2$ orthogonal decomposition $\X(M)=\X_\mu(M)\oplus\grad(C^\oo(M))$, we denote by $P$ the projection on $\X_\mu(M)$. The adjoint of $\ad(X)$ is $\ad(X)^\top Y=P(\nabla_X Y+(\nabla X)^\top Y)$ where $\nabla$ denotes the Levi-Civita covariant derivative. Indeed, \begin{gather*} \langle \ad(X)^\top Y,Z\rangle =\int_Mg(Y,[Z,X])\mu =\int_Mg(Y,\nabla_ZX-\nabla_XZ)\mu\\ \phantom{\langle \ad(X)^\top Y,Z\rangle}{}=\int_Mg((\nabla X)^\top Y,Z)\mu+\int_Mg(\nabla_X Y,Z)\mu =\langle P(\nabla_X Y+(\nabla X)^\top Y),Z\rangle, \end{gather*} with $(\nabla X)^\top$ denoting the adjoint of the (1,1)-tensor $\nabla X$ relative to the metric: $g(\nabla_ZX,Y)=g(Z,(\nabla X)^\top Y)$. In particular $\ad(X)^\top X=P(\nabla_XX)=\nabla_XX+\grad p$, with $p$ the smooth function uniquely def\/ined up to a constant by $\De p=\div(\nabla_XX)$. Now Theorem \ref{theo} assures that the geodesic equation in $\Diff_\mu(M)$, in terms of the right logarithmic derivative $u$ of the geodesic, is {\bf Euler's equation for ideal f\/low} with velocity $u$ and pressure $p$ \cite{Moreau,Arnold,EM}: \begin{gather}\label{ihd} \partial_tu=-\nabla_uu-\grad p,\qquad\div u=0. \end{gather} The geodesic equation (\ref{ihd}) written for the vorticity 2-form $\om=du^\flat$, $\flat$ denoting the inverse of the Riemannian lift $\sharp$ and $L$ the Lie derivative, is \begin{gather}\label{vort} \partial_t\om=-L_u\om, \end{gather} because $(\nabla_uu)^\flat=L_uu^\flat-\frac12d(g(u,u))$ and $(\grad p)^\flat=dp$. \section{Burgers equation} Let $G=\Diff(S^1)$ be the group of orientation preserving dif\/feomorphisms of the circle and $\g=\mathfrak{X}(S^1)$ the Lie algebra of vector f\/ields. The Lie bracket is $[X,Y]=X'Y-XY'$, the negative of the usual bracket on vector f\/ields (vector f\/ields on the circle are identif\/ied here with their coef\/f\/icient functions in $C^\oo(S^1)$). We consider the right invariant metric on $G$ given by the $L^2$ scalar product $\langle X,Y\rangle =\int_{S^1}XYdx$ on $\g$. The adjoint of $\ad(X)$ is $\ad(X)^\top Y=2X'Y+XY'$, because: \begin{gather*} \langle \ad(X)^\top Y,Z\rangle =\int_{S^1}Y(X'Z-XZ')dx =\int_{S^1}(X'Y+(XY)')Zdx=\langle 2X'Y+XY',Z\rangle . \end{gather*} It follows from Theorem \ref{theo} that the geodesic equation on $\Diff(S^1)$ in terms of the right logarithmic derivative $u:I\to C^\oo(S^1)$ is {\bf Burgers inviscid equation} \cite{Burgers}: \begin{gather}\label{burgers} \partial_tu=-3uu'. \end{gather} The higher dimensional Burgers equation is the {\bf template matching equation}, used for comparing images via a deformation induced distance. It is the geodesic equation on $\Diff(M)$, the dif\/feomorphism group of a compact Riemannian manifold $(M,g)$, for the right invariant $L^2$ metric \cite{HMA,V3}: \begin{gather}\label{matching} \partial_tu=-\nabla_uu-(\div u)u-\tfrac12\grad g(u,u). \end{gather} Indeed, \begin{gather}\label{brasov} \ad(X)^\top=(\div X)1+\nabla_X+(\nabla X)^\top,\qquad\forall \, X\in\X(M), \end{gather} because as in Section \ref{3} we compute $\langle \ad(X)^\top Y,Z\rangle =\int_Mg((\nabla X)^\top Y,Z)\mu+\int_Mg(\nabla_X Y,Z)\mu -\int_ML_Xg(Y,Z)\mu =\langle (\nabla X)^\top Y+\nabla_XY+(\div X)Y,Z\rangle$ for all vector f\/ields $X$, $Y$, $Z$ on $M$. In particular for $M=S^1$ and $u$ a curve in $\X(S^1)$, identif\/ied with $C^\oo(S^1)$, $\div u=u'$ and $g(u,u)=u^2$, so each of the three terms in the right hand side of (\ref{matching}) is $-uu'$ and we recover Burgers equation (\ref{burgers}). \section{Abelian extensions} A bilinear skew-symmetric map $\om:\g\x\g\to V$ is a 2-cocycle on the Lie algebra $\g$ with values in the $\g$-module $V$ if it satisf\/ies the condition \begin{gather*} \sum_{\rm cycl}\om([X_1,X_2],X_3)=\sum_{\rm cycl}b(X_1)\om(X_2,X_3), \qquad X_1,X_2,X_3\in\g, \end{gather*} where $b:\g\to L(V)$ denotes the Lie algebra action on $V$. It determines an Abelian Lie algebra extension $\hat\g:=V\rtimes_\om\g$ of $\g$ by the $\g$-module $V$ with Lie bracket \begin{gather}\label{bra} [(v_1,X_1),(v_2,X_2)]=(b(X_1)v_2-b(X_2)v_1+\om(X_1,X_2),[X_1,X_2]). \end{gather} There is a 1-1 correspondence between the second Lie algebra cohomology group $H^2(\g,V)$ and equivalence classes of Abelian Lie algebra extensions $0\to V\to\hat\g\to\g\to 0$. When $G$ is inf\/inite dimensional, the two obstructions for the integrability of such an Abelian Lie algebra extension to a Lie group extension of the connected Lie group $G$ involve $\pi_1(G)$ and~$\pi_2(G)$~\cite{Neeb}. The Lie algebra 2-cocycle $\om$ is integrable if \begin{itemize}\itemsep=0pt \item the period group $\Pi_\om\subset V$ (the group of spherical periods of the equivariant $V$-valued 2-form on $G$ def\/ined by $\om$) is discrete and \item the f\/lux homomorphism $F_\om:\pi_1(G)\to H^1(\g,V)$ vanishes. \end{itemize} Then for any discrete subgroup $\Ga$ of the subspace of $\g$-invariant elements of $V$ with $\Ga\supseteq\Pi_\om$, there is an Abelian Lie group extension $1\to T\to\hat G\to G\to 1$ of $G$ by $T=V/\Ga$. There are two special cases: \begin{enumerate}\itemsep=0pt \item {\bf Semidirect product}: $\hat\g=V\rtimes\g$, obtained when $\om=0$.\\ An example is the semidirect product $\g^*\rtimes G$ for the coadjoint $G$-action on $\g^*$, called the magnetic extension in \cite{AK}. It has the Lie algebra $\g^*\rtimes\g$, a semidirect product for the coadjoint $\g$-action $b=\ad^*$ on $\g^*$. \item {\bf Central extension}: $\hat\g=V\x_\om\g$, obtained when $b=0$.\\ An example is the Virasoro algebra $\mathbb R\x_\om\X(S^1)$, a central extension of the Lie algebra of vector f\/ields on the circle given by the Virasoro cocycle $\om(X,Y)=\int_{S^1}(X'Y''-X''Y')dx$. It has a corresponding Lie group extension of the group $\Diff(S^1)$ of orientation preserving dif\/feomorphisms of the circle, def\/ined by the Bott group cocycle: \begin{gather}\label{bott} c(\ph,\ps)=\int_{S^1}\log(\ph'\circ\ps)d\log\ps',\qquad \ph,\ps\in\Diff(S^1). \end{gather} \end{enumerate} An example of a general Abelian Lie algebra extension is $C^\oo(M)\rtimes_\om\X(M)$, the Abelian extension of the Lie algebra of vector f\/ields on the manifold $M$ with the opposite bracket by the natural module of smooth functions on $M$, the Lie algebra action being $b(X)f=-L_Xf$. The cocycle $\om:\X(M)\x\X(M)\to C^\oo(M)$ is given by a closed dif\/ferential 2-form $\et$ on $M$. If $\et$ is an~integral form, then there is a principal circle bundle $P$ over $M$ with curvature $\et$. In this case the group of equivariant automorphisms of $P$ is a Lie group extension integrating the Lie algebra cocycle $\om$: \begin{gather}\label{gauge} 1\to C^\oo(M,\TT)\to\Diff(P)^\TT\to\Diff(M)_{[P]}\to 1. \end{gather} Here $C^\oo(M,\TT)$ is the gauge group of $P$ and $\Diff(M)_{[P]}$ is the group of dif\/feomorphisms of $M$ preserving the bundle class $[P]$ under pullbacks (group having the same identity component as $\Diff(M)$). \section{Geodesic equations on Abelian extensions}\label{6} Following \cite{V1} we write down the geodesic equations on an Abelian Lie group extension $\hat G$ of $G$ with respect to the right invariant metric def\/ined with the scalar product \begin{gather}\label{sca} \langle (v_1,X_1),(v_2,X_2)\rangle _{\hat\g}= \langle v_1,v_2\rangle _V+\langle X_1,X_2\rangle _\g \end{gather} on its Lie algebra $\hat\g=V\rtimes_\om\g$. Here $\langle \ , \ \rangle _\g$ and $\langle \ , \ \rangle _V$ are scalar products on $\g$ and $V$. We have to assume the existence of the following maps: the adjoint $\ad(X)^\top:\g\to\g$ and the adjoint $b(X)^\top:V\to V$ for any $X\in\g$, the linear map $h:V\to L_{\rm skew}(\g)$ taking values in the space of skew-adjoint operators on $\g$, def\/ined by \begin{gather*} \langle h(v)X_1,X_2\rangle _\g=\langle \om(X_1,X_2),v\rangle _V, \end{gather*} and the bilinear map $l:V\x V\to\g$, def\/ined by \begin{gather*} \langle l(v_1,v_2),X\rangle _\g=\langle b(X)v_1,v_2\rangle _V. \end{gather*} The diamond operation $\diamond:V\x V^*\to\g$ in \cite{HMR} corresponds to our map $l$ via $\langle \ , \ \rangle_V$. \begin{proposition}\label{Abelian} The geodesic equation on the Abelian extension $\hat G$ for the right invariant metric defined by the scalar product \eqref{sca} on $\hat\g$, written for the right logarithmic derivative $(f,u)$, \ie for curves $u$ in $\g$ and $f$ in $V$, is \begin{gather*} \frac{d}{dt}u =-\ad(u)^\top u-h(f)u+l(f,f),\\ \frac{d}{dt}f =-b(u)^\top f. \end{gather*} \end{proposition} \begin{proof} We compute the adjoint of $\ad(v,X)$ in $V\rtimes_\om\g$ \wrt the scalar product (\ref{sca}) \begin{gather*} \langle\ad(v_1,X_1)^\top (v_2,X_2),(v_3,X_3)\rangle_{\hat\g} =\langle(v_2,X_2),(b(X_1)v_3-b(X_3)v_1+\om(X_1,X_3),[X_1,X_3]\rangle_{\hat\g}\\ \qquad{} =\langle v_2,b(X_1)v_3\rangle_V+\langle X_2,[X_1,X_3]\rangle_\g +\langle v_2,\om(X_1,X_3)\rangle_V-\langle v_2,b(X_3)v_1\rangle_V\\ \qquad{} =\langle(b(X_1)^\top v_2,\ad(X_1)^\top X_2+h(v_2)X_1-l(v_1,v_2)),(v_3,X_3)\rangle_{\hat\g}. \end{gather*} The result follows now from Euler's equation (\ref{euler}). \end{proof} \begin{remark}\label{rema1} When the scalar product on $V$ is $\g$-invariant, \ie $\langle b(X)v_1,v_2\rangle_V+\langle v_1,b(X)v_2\rangle_V=0$, then $l$ is skew-symmetric and the geodesic equation becomes \begin{gather*} \frac{d}{dt}u =-\ad(u)^\top u-h(f)u,\\ \frac{d}{dt}f =b(u) f. \end{gather*} \end{remark} \section{Geodesic equations on semidirect products}\label{7} A special case of Proposition \ref{Abelian}, obtained for $\om=0$, is: \begin{corollary}\label{semidirect} The geodesic equation on the semidirect product Lie group $V\rtimes G$ for the right invariant metric defined by the scalar product \eqref{sca}, written for the curve $(f,u)$ in $V\rtimes\g$, is \begin{gather*} \frac{d}{dt}u =-\ad(u)^\top u+l(f,f),\\ \frac{d}{dt}f =-b(u)^\top f. \end{gather*} It reduces to \begin{gather*} \frac{d}{dt}u =-\ad(u)^\top u,\\ \frac{d}{dt}f =b(u) f \end{gather*} when the scalar product on $V$ is $\g$-invariant. \end{corollary} \subsection*{Passive scalar motion} The geodesic equation on the semidirect product $C^\oo(M)\rtimes\Diff_\mu(M)$ with $L^2$ right invariant metric, written for the right logarithmic derivative $(f,u):I\to C^\oo(M)\rtimes\X_\mu(M)$ models {\bf passive scalar motion} \cite{Hattori}: \begin{gather} \partial_tu =-\nabla_uu-\grad p,\nonumber\\ \partial_tf =-df(u).\label{star} \end{gather} In this case the $L^2$ scalar product on $C^\oo(M)$ is $\X_\mu(M)$-invariant and we apply Corollary~\ref{semidirect} to get this geodesic equation. \section{Magnetohydrodynamics} Let $A:\g\to\g^*$ be the inertia operator def\/ined by a f\/ixed scalar product $\langle \ ,\ \rangle $ on $\g$. The scalar product on the regular dual $\g^*_{\rm reg}=A(\g)$ induced via $A$ by this scalar product in $\g$ is again denoted by $\langle \ , \ \rangle $. Next we consider the subgroup $\g^*_{\rm reg}\rtimes G$ of the magnetic extension $\g^*\rtimes G$, with right invariant metric of type (\ref{sca}) \cite{V6}. \begin{proposition}\label{corollary} If the adjoint of $\ad(X)$ exists for any $X\in\g$, then the geodesic equation on the magnetic extension $\g^*_{\rm reg}\rtimes G$ with right invariant metric, written for the curve $(A(v),u)$ in $\g^*_{\rm reg}\rtimes\g$ is \begin{gather*} \frac{d}{dt}u =-\ad(u)^\top u+\ad(v)^\top v,\\ \frac{d}{dt}v =\ad(u)v. \end{gather*} \end{proposition} \begin{proof} We have to compute the map $l:\g^*_{\rm reg}\x\g^*_{\rm reg}\to\g$ and the adjoint $b(X)^\top:\g^*_{\rm reg}\to\g^*_{\rm reg}$ for $b=\ad^*$. We use the fact (\ref{iden}) that the coadjoint action on the image of $A$ comes from the opposite of $\ad(\cdot)^\top$. Then $l(A(Y_1),A(Y_2))=\ad(Y_2)^\top Y_1$ because \begin{gather*} \langle l(A(Y_1),A(Y_2)),X\rangle=\langle\ad^*(X)A(Y_1),A(Y_2)\rangle =-\langle \ad(X)^\top Y_1,Y_2\rangle=\langle\ad(Y_2)^\top Y_1,X\rangle. \end{gather*} Also the adjoint of $b(X)=\ad^*(X)$ exists and $b(X)^\top A(Y)=-A(\ad(X)Y)$. The result follows now from Corollary \ref{semidirect}. \end{proof} For $G=SO(3)$ and left invariant metric on its magnetic extension $\g^*\rtimes G$ one obtains Kirchhof\/f equations for a rigid body moving in a f\/luid. Let $G=\Diff_\mu(M)$ be the group of volume preserving dif\/feomorphisms on a compact mani\-fold $M$ and $\g=\X_\mu(M)$. The regular part $\g^*_{\rm reg}$ of $\g^*$ is naturally isomorphic to the quotient space $\Om^1(M)/d\Om^0(M)$ of dif\/ferential 1-forms modulo exact 1-forms, the pairing being $([\al],X)=\int_M\al(X)\mu$, for $\al\in\Om^1(M)$. More precisely $A(X)$ is the coset $[X^\flat]$ obtained via the Riemannian metric. Considering the right invariant $L^2$ metric on the magnetic extension $\g^*_{\rm reg}\rtimes G$ determined by the $L^2$ scalar product (\ref{el2}) on vector f\/ields, the geodesic equations for the time dependent divergence free vector f\/ields $u$ and $B$ are (by Proposition \ref{corollary}) \begin{gather*} \partial_tu =-\nabla_u u+\nabla_B B-\grad p,\\ \partial_tB =-L_uB. \end{gather*} We specialize to a three dimensional manifold $M$. The curl of a vector f\/ield $X$ is the vector f\/ield def\/ined by the relation $i_{\curl X}\mu=dX^\flat$ and the cross product of two vector f\/ields $X$ and~$Y$ is the vector f\/ield def\/ined by the relation $(X\x Y)^\flat=i_Yi_X\mu$. A short computation gives $(\curl X\x X)^\flat=i_XdX^\flat=L_XX^\flat-dg(X,X)=(\nabla_XX)^\flat-\frac12dg(X,X)$, hence $\nabla_XX=\curl X\x X+\frac12\grad g(X,X)$. The geodesic equations above are in this case the equations of {\bf ideal magnetohydrodynamics} with velocity $u$, magnetic f\/ield $B$ and pressure $p$ \cite{VD,MRW}: \begin{gather*} \partial_tu =-\nabla_u u+\curl B\x B-\grad p,\\ \partial_tB =-L_uB. \end{gather*} \subsection*{Magnetic hydrodynamics with asymmetric stress tensor} Let $M$ be a 3-dimensional compact parallelizable Riemannian manifold with induced volume form $\mu$ and let $G=\Diff_\mu(M)$ with $\g=\X_\mu(M)$. Each vector f\/ield $X$ on $M$ can be identif\/ied with a smooth function in $C^\oo(M,\mathbb R^3)$, and $j(X)\in C^\oo(M,\gl(3,\mathbb R))$ denotes its Jacobian. Then $\omega(X,Y)=[\tr(j(X)dj(Y))]\in\Omega^1(M)/d\Omega^0(M)$ is a Lie algebra 2-cocycle on $\g$ with values in the regular dual $\g^*_{\rm reg}$. Considering the $L^2$ scalar product on the Abelian extension $\g_{\rm reg}^*\rtimes_\om\g$, we get the following Euler equation \cite{Billig} for time dependent divergence free vector fields $u$ and $B$: \begin{gather*} \partial_tu =-\nabla_u u+\curl B\x B+\tr (j(B)\grad j(u))-\grad p,\\ \partial_tB =-L_uB, \end{gather*} modeling {\bf magnetic hydrodynamics with asymmetric stress tensor} $T=j(B)\circ j(u)$. \section{Geodesic equations on central extensions} When $V=\mathbb R$ is the trivial $\g$-module, then the Lie algebra action $b$ vanishes and we get a central extension $\mathbb R\times_\om\g$ def\/ined by the cocycle $\om:\g\times\g\to\mathbb R$. A consequence of Proposition \ref{Abelian} is: \begin{corollary}\label{central} The geodesic equation on a $1$-dimensional central Lie group extension $\hat G$ of $G$ with right invariant metric determined by the scalar product $\langle (a,X),(b,Y)\rangle_{\hat\g}=\langle X,Y\rangle_\g+ab$ on its Lie algebra $\hat\g=\mathbb R\times_\om\g$ is \begin{gather*} \frac{d}{dt}u=-\ad(u)^\top u-ak(u),\qquad a\in\mathbb R, \end{gather*} where $u$ is a curve in $\g$ and $k\in L_{\rm skew}(\g)$ is defined by the Lie algebra cocycle $\om$ via \begin{gather*} \langle k(X),Y\rangle =\om(X,Y),\qquad\forall \, X,Y\in\g. \end{gather*} \end{corollary} \begin{proof} The central extension is a particular case of an Abelian extension, so Proposition \ref{Abelian} can be applied. The linear map $h:\mathbb R\to L_{\rm skew}(\g)$ has the form $h(a)X=ak(X)$, because $\langle h(a)X_1,X_2\rangle_\g=a\om(X_1,X_2)=\langle ak(X_1),X_2\rangle_\g$. The $\g$-module $\mathbb R$ being trivial, $\frac{d}{dt}a=0$, so $a\in\mathbb R$ is constant. \end{proof} \subsection*{KdV equation} The geodesic equation on the Bott--Virasoro group (\ref{bott}) for the right invariant $L^2$ metric is the {\bf Korteweg--de Vries} equation \cite{OK}. In this case the Lie algebra is the central extension of $\g=\X(S^1)$ (identif\/ied with $C^\oo(S^1)$) given by the Virasoro cocycle $\om(X,Y)=\int_{S^1}(X'Y''-X''Y')dx$. The computation $\om(X,Y)=-2\int_{S^1}X''Y'dx=2\int_{S^1}X'''Ydx=\langle X''',Y\rangle$ implies $k(X)=2X'''$ and by Corollary \ref{central} the geodesic equation for $u:I\to C^\oo(S^1)$ is the KdV equation: \begin{gather*} \partial_tu=-3uu'-2au''',\qquad a\in\mathbb R. \end{gather*} \section{Superconductivity equation}\label{sec10} Given a compact manifold $M$ with volume form $\mu$, each closed 2-form $\et$ on $M$ def\/ines a Lichnerowicz 2-cocycle $\om_\et$ on the Lie algebra of divergence free vector f\/ields, \begin{gather*} \om_\et(X,Y)=\int_M\et(X,Y)\mu. \end{gather*} The kernel of the f\/lux homomorphism \begin{gather*} \flux_\mu:X\in\X_\mu(M)\mapsto [i_X\mu]\in H^{n-1}(M,\mathbb R) \end{gather*} is the Lie algebra $\X_\mu^{\rm ex}(M)$ of exact divergence free vector f\/ields. On a 2-dimensional manifold it consists of vector f\/ields $X$ possessing stream functions $f\in C^\oo(M)$, \ie $i_X\mu=df$ ($X$ is the Hamiltonian vector f\/ield with Hamiltonian function $f$). On a 3-dimensional manifold it consists of vector f\/ields $X$ possessing vector potentials $A\in\X(M)$, \ie $i_X\mu=dA^\flat$ ($X$ is the curl of $A$). The Lie algebra homomorphism $\flux_\mu$ integrates to the f\/lux homomorphism (due to Thurston) $\Flux_\mu$ on the identity component of the group of volume preserving dif\/feomorphisms: \begin{gather*} \Flux_\mu:\Diff_\mu(M)_0\to H^{n-1}(M,\mathbb R)/\Ga,\qquad \Flux_\mu(\ph)=\int_0^1[i_{\de^r\ph(t)}\mu]dt\mod\Ga, \end{gather*} where $\ph(t)$ is any volume preserving dif\/feotopy from the identity on $M$ to $\ph$ and $\Ga$ a discrete subgroup of $H^{n-1}(M,\mathbb R)$. The kernel of $\Flux_\mu$ is, by def\/inition, the Lie group $\Diff_\mu^{\rm ex}(M)$ of {\it exact volume preserving diffeomorphisms}. It coincides with $\Diff_\mu(M)_0$ if and only if $H^{n-1}(M,\mathbb R)=0$. For $\et$ integral, the Lichnerowicz cocycle is integrable to $\Diff_\mu^{\rm ex}(M)$ \cite{Ismagilov}. When $M$ is 3-dimensional, there exists a vector f\/ield $B$ on $M$ def\/ined with $\et=-i_B\mu$. The 2-form $\et$ is closed if and only if $B$ is divergence free. The integrality condition of $\et$ expresses as $\int_S(B\cdot n)d\si\in\ZZ$ on every closed surface $S\subset M$. The {\bf superconductivity equation} models the motion of a high density electronic gas in a~magnetic f\/ield $B$ with velocity $u$: \begin{gather}\label{superconductivity} \partial_tu=-\nabla_uu-au\x B-\grad p,\qquad a\in\mathbb R. \end{gather} It is the geodesic equation on a central extension of the group of volume preserving dif\/feomorphisms for the right invariant $L^2$ metric \cite{Zeitlin2,V1}, when $M$ is simply connected. Indeed, \begin{gather*} \om_\et(X,Y)=\int_M\et(X,Y)\mu=-\int_M\mu(B,X,Y)\mu=\int_M g(X\x B,Y)\mu =\langle P(X\x B),Y\rangle \end{gather*} hence the map $k\in L_{\rm skew}(\g)$ determined by the Lichnerowicz cocycle $\om_\et$ is $k(X)=P(X\x B)$, with $P$ denoting the orthogonal projection on the space of divergence free vector f\/ields. Now we apply Corollary \ref{central}. \section[Charged ideal fluid]{Charged ideal f\/luid}\label{11} Let $M$ be an $n$-dimensional Riemannian manifold with Levi-Civita connection $\nabla$ and volume form $\mu$, and $\et$ a closed integral dif\/ferential two-form. Let $B$ be an $(n-2)$ vector f\/ield on $M$ (i.e. $B\in C^\oo(\wedge^{n-2}TM)$) such that $\et=(-1)^{n-2}i_B\mu$ is a closed two-form. The cross product of a~vector f\/ield $X$ with $B$ is the vector f\/ield $X\x B=(i_{X\wedge B}\mu)^\sharp=(i_X\et)^\sharp$, $\sharp$ denoting the Riemannian lift. When $M$ is 3-dimensional, then $B$ is a divergence free vector f\/ield with $\et=-i_B\mu$ and $\times$ is the cross product of vector f\/ields. From the integrality of $\et$ follows the existence of a~principal $\TT$-bundle $\pi:P\to M$ with a~principal connection 1-form $\al$ on $P$ having curvature $\et$. The associated Kaluza--Klein metric~$\kappa$ on~$P$, def\/ined at a point~$x\in P$ by \begin{gather*} \kappa_x(\tilde X,\tilde Y)=g_{\pi(x)} (T_x\pi.\tilde X,T_x\pi.\tilde Y)+\al_x(\tilde X)\al_x(\tilde Y),\qquad \tilde X,\tilde Y\in T_xP \end{gather*} determines the volume form $\tilde\mu=\pi^*\mu\wedge\al$ on $P$. The group $\Diff_{\tilde\mu}(P)^\TT$ of volume preserving automorphisms of the principal bundle $P$ is an Abelian Lie group extension of $\Diff_\mu(M)_{[P]}$, the group of volume preserving dif\/feomorphisms preserving the bundle class $[P]$, by the gauge group $C^\oo(M,\TT)$ (an extension contained in (\ref{gauge})). The corresponding Abelian Lie algebra extension \begin{gather*} 0\to C^\oo(M)\to\X_{\tilde\mu}(P)^\TT\to\X_\mu(M)\to 0 \end{gather*} is described again by the Lie algebra cocycle $\om:\X_\mu(M)\x\X_\mu(M)\to C^\oo(M)$ given by $\et$. The Kaluza--Klein metric on $P$ determines a right invariant $L^2$ metric on the group of volu\-me preserving automorphisms of the principal $\TT$-bundle $P$. The geodesic equation written in terms of the right logarithmic derivative $(\rho,u)$, with $\rh$ a time dependent function and $u$ a time dependent divergence free vector f\/ield on $M$, is: \begin{gather*} \partial_tu =-\nabla_uu-\rh u\x B-\grad p,\nonumber\\ \partial_t\rh =-d\rh(u). \end{gather*} It models the motion of a {\bf charged ideal f\/luid} with velocity $u$, pressure $p$ and charge density~$\rh$ in a f\/ixed magnetic f\/ield $B$ \cite{V1}. Indeed, the connection $\al$ def\/ines a horizontal lift and identifying the pair $(f,X)$, $f\in C^\oo(M)$, $X\in\X_\mu(M)$ with the sum of the horizontal lift of $X$ and the vertical vector f\/ield given by $f$, we get an isomorphism between the Abelian Lie algebra extension $C^\oo(M)\rtimes_\om\X_\mu(M)$ and the Lie algebra $\X_{\tilde\mu}(P)^\TT$ of invariant divergence free vector f\/ields on $P$. Under this isomorphism the $L^2$ metric def\/ined by the Kaluza--Klein metric $\kappa$ is $\langle(f_1,X_1),(f_2,X_2)\rangle=\int_M(g(X_1,X_2)+f_1f_2)\mu$. The $L^2$ scalar product on functions is $\X_\mu(M)$ invariant, \ie $b(X)$ is skew-adjoint. The mapping $h:C^\oo(M)\to L_{\rm skew}(\X_\mu(M))$ is $h(f)X=P(fX\x B)$ because: \begin{gather*} \langle h(f)X,Y\rangle=\langle\et(X,Y),f\rangle=\int_Mf(i_{X}\et)(Y)\mu =\int_Mfg(X\times B,Y)\mu=\langle P(fX\times B),Y\rangle, \end{gather*} where $P$ denotes the orthogonal projection on the space of divergence free vector f\/ields on $M$. The result follows from Remark \ref{rema1}, knowing that $\ad(X)^\top X=P(\nabla_XX)$. \section{Geodesics on general extensions} A general extension of Lie algebras is an exact sequence of Lie algebras \begin{gather}\label{general} 0\to\h\to\hat\g\to\g\to 0. \end{gather} A section $s:\g\to\hat\g$ (\ie a right inverse to the projection $\hat\g\to\g$) induces the following mappings~\cite{AMR}: \begin{gather*} b: \ \ \g\to\Der(\h),\qquad b(X)f=[s(X),f],\\ \om:\ \ \g\x\g\to\h,\qquad \om(X_1,X_2)=[s(X_1),s(X_2)]-s([X_1,X_2]) \end{gather*} with properties: \begin{gather*} [b(X_1),b(X_2)]-b([X_1,X_2])=\ad(\om(X_1,X_2)),\\ \sum_{\rm cycl}\om([X_1,X_2],X_3)=\sum_{\rm cycl}b(X_1)\om(X_2,X_3). \end{gather*} The Lie algebra structure on the extension $\hat\g$, identif\/ied as a vector space with $\h\oplus\g$ via the section $s$, can be expressed in terms of $b$ and $\om$: \begin{gather*} [(f_1,X_1),(f_2,X_2)]=([f_1,f_2]+b(X_1)f_2-b(X_2)f_1+\om(X_1,X_2),[X_1,X_2]). \end{gather*} In particular for $\h$ an Abelian Lie algebra this is the Lie bracket (\ref{bra}) on an Abelian Lie algebra extension. We consider scalar products $\langle\ ,\ \rangle_\g$ on $\g$ and $\langle \ , \ \rangle_\h$ on $\h$ and, as in Section~\ref{6}, we impose the existence of several maps: $\ad(X)^\top:\g\to\g$ for any $X\in\g$, $\ad(f)^\top:\h\to\h$ for any $f\in\h$, $b(X)^\top:\h\to\h$ for any $X\in\g$, as well as the linear map $h:\h\to L_{\rm skew}(\g)$ def\/ined by \begin{gather*} \langle h(f)X_1,X_2\rangle _\g=\langle \om(X_1,X_2),f\rangle _\h, \end{gather*} and the bilinear map $l:\h\x\h\to\g$, def\/ined by \begin{gather*} \langle l(f_1,f_2),X\rangle _\g=\langle b(X)f_1,f_2\rangle _\h. \end{gather*} A result similar to Proposition \ref{Abelian} is: \begin{proposition}\label{fgb} The geodesic equation on the Lie group extension $\hat G$ of $G$ by $H$ integra\-ting~\eqref{general}, with right invariant metric determined by the scalar product \begin{gather*} \langle (f_1,X_1),(f_2,X_2)\rangle _{\hat\g}= \langle f_1,f_2\rangle _\h+\langle X_1,X_2\rangle _\g, \end{gather*} written in terms of the right logarithmic derivative $(\rh,u)$ is: \begin{gather*} \frac{d}{dt}u =-\ad(u)^\top u-h(\rho)u+l(\rh,\rh),\\ \frac{d}{dt}\rho =-\ad(\rh)^\top\rho-b(u)^\top \rho. \end{gather*} \end{proposition} \section[Ideal fluid in a fixed Yang-Mills field]{Ideal f\/luid in a f\/ixed Yang--Mills f\/ield} Let $\pi:P\to M$ be a principal $G$-bundle with principal action $\si:P\x G\to P$ and let $\Ad P=P\x_G\g$ be its adjoint bundle. The space $\Om^k(M,\Ad P)$ of dif\/ferential forms with values in $\Ad P$ is identif\/ied with the space $\Om^k_{\rm hor}(P,\g)^G$ of $G$-equivariant horizontal forms on $P$. In particular $C^\oo(M,\Ad P)=C^\oo(P,\g)^G$. We consider a principal connection 1-form $\al\in\Om^1(P,\g)^G$ on $P$. Its curvature $\et=d\al+\frac12[\al,\al]$. is an equivariant horizontal 2-form $\et\in\Om^2_{\rm hor}(P,\g)^G$, hence it can be viewed as a 2-form on $M$ with values in $\Ad P$. The covariant exterior derivative on $\g$-valued dif\/ferential forms on $P$ is $d^\al=\chi^*\circ d$, with $\chi:\X(P)\to\X(P)$ denoting the horizontal projection, and it induces a map $d^\al:\Om^k(M,\Ad P)\to\Om^{k+1}(M,\Ad P)$. Let $g$ be a Riemannian metric on $M$ and $\ga$ a $G$-invariant scalar product on $\g$. These data, together with the connection $\al$, def\/ine a Kaluza--Klein metric on $P$: \begin{gather*} \kappa_x(\tilde X,\tilde Y)=g_{\pi(x)} (T_x\pi.\tilde X,T_x\pi.\tilde Y)+\ga(\al_x(\tilde X),\al_x(\tilde Y)),\qquad \tilde X,\tilde Y\in T_xP. \end{gather*} The canonically induced volume form on $P$ is $\tilde\mu=\pi^*\mu\wedge\al^*\det_\ga$, where $\mu$ is the canonical volume form on $M$ induced by the Riemannian metric $g$ and $\al^*\det_\ga$ is the pullback by $\al:TP\to\g$ of the determinant $\det_\ga\in\wedge^{\dim\g}\g^*$ induced by the scalar product $\ga$ on $\g$. The gauge group of the principal bundle is identif\/ied with $C^\oo(P,G)^G$, the group of $G$-equivariant functions from $P$ to $G$, with $G$ acting on itself by conjugation. The group of automorphisms of $P$, \ie the group of $G$-equivariant dif\/feomorphisms of $P$, is an extension of $\Diff(M)_{[P]}$, the group of dif\/feomorphisms of $M$ preserving the bundle class $[P]$, by the gauge group. This is the analogue of (\ref{gauge}) for non-commutative structure group. Restricting to volume preserving dif\/feomorphisms, we get the exact sequence: \begin{gather*} 1\to C^\oo(P,G)^G\stackrel{\si}{\to}\Diff_{\tilde\mu}(P)^G\to\Diff_\mu(M)_{[P]}\to 1. \end{gather*} On the Lie algebra level the exact sequence is \begin{gather*} 0\to C^\oo(P,\g)^G\stackrel{\dot\si}{\to}\X_{\tilde\mu}(P)^G\to\X_\mu(M)\to 0. \end{gather*} The horizontal lift provides a linear section $:\X_\mu(M)\to\X_{\tilde\mu}(P)^G$, thus identifying the pair $(f,X)\in C^\oo(P,\g)^G\oplus\X_\mu(M)$ with $\tilde X=\dot\si(f)+X^{\rm hor}\in\X_{\tilde\mu}(P)^G$. With this identif\/ication, the~$L^2$ metric on $\X_{\tilde\mu}(P)^G$ given by the Kaluza--Klein metric can be written as \begin{gather*} \int_P\ka(f_1, X_1),(f_2,X_2))\tilde\mu=\int_Mg(X_1,X_2)\mu+\int_P\ga(f_1,f_2)\tilde\mu. \end{gather*} A particular case of a result in \cite{GR3} is the fact that the geodesic equation on the group $\Diff_{\tilde\mu}(P)^G$ of volume preserving automorphisms of $P$ with right invariant $L^2$ metric gives the equations of motion of an {\bf ideal f\/luid moving in a f\/ixed Yang--Mills f\/ield}. Written for the right logarithmic derivative $(\rh,u):I\to C^\oo(P,\g)^G\oplus\X_\mu(M)$, these are: \begin{gather} \partial_tu =-\nabla_uu-\ga(\rh,i_u\et)^\sharp-\grad p,\nonumber\\ \partial_t\rh =-d^\al\rh(u).\label{alfa} \end{gather} Here $u$ denotes the Eulerian velocity, $\rh$, viewed as a time dependent section of $\Ad P$, denotes the magnetic charge, $\et$, viewed as a 2-form on $M$ with values in $\Ad P$, denotes the f\/ixed Yang--Mills f\/ield. The scalar product $\ga$ being $G$-invariant, can be viewed as a bundle metric on $\Ad P$. This result follows from Proposition \ref{fgb}. Indeed, in this particular case the cocycle is $\om=\et$ and the Lie algebra action is $b(X)f=-df.X^{\rm hor}=-d^\al f.X$, hence $b(X)^\top f=-b(X)f$, $l$ is skew-symmetric and $h(f)X=P(\ga(f,i_X\et)^\sharp)$. Moreover $\ad(X)^\top X=P\nabla_XX$ with $P$ the projection on divergence free vector f\/ields and $\ad(f)^\top f=[f,f]=0$, so~(\ref{alfa}) follows. The equations of a charged ideal f\/luid from Section \ref{11} are obtained for the structure group~$G$ equal to the torus $\TT$. \section{Totally geodesic subgroups}\label{14} Let $G$ be a Lie group with right invariant Riemannian metric. A Lie subgroup $H\subseteq G$ is totally geodesic if any geodesic $c:[a,b]\to G$ with $c(a)=e$ and $c'(a)\in\h$, the Lie algebra of $H$, stays in~$H$. From the Euler equation (\ref{euler}) we see that this is the case if $\ad(X)^\top X\in\h$ for all $X\in\h$. If there is a geodesic in $G$ in any direction of $\h$, then this condition is necessary and suf\/f\/icient, so we give the following def\/inition: the Lie subalgebra $\h$ is called {\it totally geodesic} in $\g$ if $\ad(X)^\top X\in\h$ for all $X\in\h$. \begin{remark}\label{cap} Given two totally geodesic Lie subalgebras $\h$ and $\kkk$ of the Lie algebra $\g$, the intersection $\h\cap\kkk$ is totally geodesic in $\g$, but also in $\h$ and in $\kkk$. \end{remark} \subsection*{Ideal f\/luid} The ideal f\/luid f\/low (\ref{ihd}) on $M$ preserves the property of having a stream function (if $M$ two dimensional), \resp a vector potential (if $M$ three dimensional) if and only if $\Diff_\mu^{\rm ex}(M)$ is a totally geodesic subgroup of $\Diff_\mu(M)$ for the right invariant $L^2$ metric. This means $P(\nabla_XX)\in\X_\mu^{\rm ex}(M)$ for all $X\in\X_\mu^{\rm ex}(M)$. \begin{theorem}[\cite{HTV}]\label{stefan} The only Riemannian manifolds $M$ with the property that $\Diff_\mu^{\rm ex}(M)$ is a~totally geodesic subgroup of $\Diff_\mu(M)$ with the right invariant $L^2$ metric are twisted products $M=\mathbb R^k\x_\La F$ of a flat torus $\TT^k=\mathbb R^k/\Lambda$ and a connected oriented Riemannian manifold $F$ with $H^1(F,\mathbb R)=0$. \end{theorem} In particular the ideal f\/luid f\/low on the 2-torus preserves the property of having a stream function~\cite{AK} and the ideal f\/luid f\/low on the 3-torus preserves the property of having a vector potential. \subsection*{Superconductivity} Given a compact Riemannian manifold $M$, from the Hodge decomposition follows that $\X_\mu(M)=\X_\mu^{\rm ex}(M)\oplus\X_{\rm harm}(M)$. On a f\/lat torus the harmonic vector f\/ields are those with all components constant. In the setting of Section \ref{sec10}, the next proposition determines when is $\mathbb R\rtimes_{\om_\et}\X_\mu^{\rm ex}(M)$ totally geodesic in $\mathbb R\rtimes_{\om_\et}\X_\mu(M)$, for $M=\TT^3$ and $\et=-i_B\mu$. \begin{proposition}[\cite{V2}] The superconductivity equation \eqref{superconductivity} on the $3$-torus preserves the pro\-perty of having a vector potential if and only if the three components of the magnetic field $B$ are constant. \end{proposition} \begin{proof} Any exact divergence free vector f\/ield $X$ on the 3-torus admits a potential 1-form $\al$ with $i_X\mu=d\al$, hence $\int_{\TT^3}g(X\x B,Y)\mu=\int_{\TT^3}i_Yi_B\mu\wedge i_X\mu =\int_{\TT^3}i_{[Y,B]}\mu\wedge\al$. Then the totally geodesicity condition which, in this case, says that $P(X\x B)$ is exact divergence free for all $X$ exact divergence free, is equivalent to $[Y,B]=0$ for all harmonic vector f\/ields $Y$. This is further equivalent to the fact that the three components of the magnetic f\/ield $B$ are constant. \end{proof} \subsection*{Passive scalar motion}\label{11.3} On the trivial principal $\TT$ bundle $P=M\x\TT$ we consider the volume form $\tilde\mu=\mu\wedge d\th$. Noticing that $i_{(f,X)}\tilde\mu=i_X\mu\wedge d\th+f\mu$, we get the Lie algebra isomorphisms $\X_{\tilde\mu}(M\x\TT)^\TT\cong C^\oo(M)\rtimes\X_\mu(M)$ and $\X_{\tilde\mu}^{\rm ex}(M\x\TT)^\TT\cong C_0^\oo(M)\rtimes\X_\mu^{\rm ex}(M)$, where $C_0^\oo(M)$ is the subspace of functions with vanishing integral. From \cite{V3} we know that the group of equivariant volume preserving dif\/feomorphisms is totally geodesic in the group of volume preserving dif\/feomorphisms and from Theorem \ref{stefan} we know that the group of exact volume preserving dif\/feomorphisms of a torus is totally geodesic in the group of volume preserving dif\/feomorphisms, hence by Remark \ref{cap} we obtain that for $M=\TT^2$ the subgroup $\Diff_{\tilde\mu}^{\rm ex}(M\x\TT)^\TT$ is totally geodesic in $\Diff_{\tilde\mu}(M\x\TT)^\TT$. This means that $C_0^\oo(M)\rtimes\X_\mu^{\rm ex}(M)$ is totally geodesic in $C^\oo(M)\rtimes\X_\mu(M)$ for $M=\TT^2$. In other words equation (\ref{star}), describing passive scalar motion, preserves the property of having a stream function if $f$ has zero integral at the initial moment. Moreover, $f$ will have zero integral at any moment. \section{Quasigeostrophic motion} Given a closed 1-form $\al$ on the compact symplectic manifold $(M,\si)$, the Roger cocycle on the Lie algebra $\X_\si^{\rm ex}(M)$ of Hamiltonian vector f\/ields on $M$ is \cite{Roger} \begin{gather*} \om_\al(H_f,H_g)=\int_Mf\al(H_g)\si^n. \end{gather*} Here $f$ and $g$ are Hamiltonian functions with zero integral for the Hamiltonian vector f\/ields $H_f$ and $H_g$. The integrability of the 2-cocycle $\omega_\alpha$ to a central extension of the group of Hamiltonian dif\/feomorphisms is an open problem. Partial results are given in \cite{Ismagilov2}. For $M=\TT^2$ the cocycle $\om_\al$ can be extended to a cocycle on the Lie algebra of symplectic vector f\/ields $\X_\si(\TT^2)$ by $\om_\al(\partial_x,\partial_y) =\om_\al(\partial_x,H_f)=\om_\al(\partial_y,H_f)=0$ \cite{Kirillov}. The extendability of $\om_\al$ to $\X_\si(M)$ for $M$ an arbitrary symplectic manifold is studied in \cite{V4}. To a divergence free vector f\/ield $X$ on the 2-torus one can assign a smooth function $\ps_X$ on the 2-torus uniquely determined by $X$ through $d\ps _X=i_X\si-\langle i_X\si\rangle$ and $\int_{\TT^2}\ps _X\si=0$. Here $\langle \ \rangle$ denotes the average of a 1-form on the torus: $\langle adx+bdy\rangle=(\int_{\TT^2}a\si)dx+(\int_{\TT^2}b\si)dy$. In particular $\ps_{H_f}=f$ whenever $f$ has zero integral. \begin{proposition}[\cite{V5}] The Euler equation for the $L^2$ scalar product on $\mathbb R\x_{\om_\al}\X_\si(\TT^2)$ is \begin{gather}\label{sharp} \partial_tu=-\nabla_uu-\ps _u\al^\sharp-\grad p, \end{gather} where the function $\ps _u$ is uniquely determined by $u$ through $d\ps _u=i_u\si-\langle i_u\si\rangle$ and $\int_{\TT^2}\ps _u\si=0$. \end{proposition} \begin{proof} To apply Corollary \ref{central} we compute the map $k$ corresponding to the cocycle $\om_\al$. Using the fact that $\om_\al(\partial_x,X)=\om_\al(\partial_y,X)=0$ for all $X\in\X_\si(\TT^2)$, we get \begin{gather*} \om_\al(u,X)=\om_\al(H_{\ps _u},X)=\int_{\TT^2}\ps _u\al(X)\si =\int_{\TT^2}g(\ps _u\al^\sharp,X)\si=\langle P(\ps _u\al^\sharp),X\rangle, \end{gather*} hence $k(u)=P(\ps _u\al^\sharp)$. Knowing also that $\ad(u)^\top u=P(\nabla_uu)$, we get (\ref{sharp}) as the Euler equation for $a=1$. \end{proof} \begin{proposition}[\cite{V5}]\label{totham} If the two components of the $1$-form $\al$ on $\TT^2$ are constant, then equa\-tion~\eqref{sharp} preserves the property of having a stream function, \ie $\mathbb R\x_{\om_\al}\X_\si^{\rm ex}(\TT^2)$ is totally geodesic in $\mathbb R\x_{\om_\al}\X_\si(\TT^2)$. In this case the restriction of \eqref{sharp} to Hamiltonian vector fields is \begin{gather}\label{hfhf} \partial_tH_\ps =-\nabla_{H_\ps }H_\ps -\ps \al^\sharp-\grad p. \end{gather} \end{proposition} \begin{proof} By Theorem \ref{stefan} on the 2-torus $P(\nabla_XX)$ is Hamiltonian for $X$ Hamiltonian, hence the totally geodesicity condition in this case is equivalent to the fact that $P(\ps _X\al^\sharp)$ is Hamiltonian for $X$ Hamiltonian. By Hodge decomposition this means $\ps _X\al^\sharp$ is orthogonal to the space of harmonic vector f\/ields, so \begin{gather*} \langle P(\ps _X\al^\sharp),Y\rangle =\int_{\TT^2}g(\ps _X\al^\sharp,Y)\si=\int_{\TT^2}\al(Y)\ps _X\si=0,\qquad\forall\; Y\text{ harmonic}. \end{gather*} On the torus the harmonic vector f\/ields $Y$ are the vector f\/ields with constant components and the functions $\ps _X$ have vanishing integral by def\/inition, so the expression above vanishes for all constant vector f\/ields $Y$ if the 1-form $\al$ has constant coef\/f\/icients. \end{proof} On the 2-torus with $\si=dx\wedge dy$ and $u=H_\ps $, the vorticity 2-form is $du^\flat=d(H_\ps)^\flat=(\De\ps)\si$, hence $\om=\De\ps$ is the vorticity function. Since $L_u(du^\flat)=L_{H_\ps }(\om\si)=(L_{H_\ps }\om)\si=\{\om,\ps\}\si$, the vorticity equation (\ref{vort}) written for the vorticity function $\om$ becomes \begin{gather*} \partial_t\om=-\{\om,\ps\}. \end{gather*} For $\al=\be dy$, $\be\in\mathbb R$, we have $d(\ps\al^\sharp)^\flat=d\ps\wedge\al=(\be\partial_x\ps)\si$. Hence the Euler equation (\ref{hfhf}) written for the vorticity function $\om=\De\ps$ with $\ps$ the stream function of $u$, is the equation for {\bf quasigeostrophic motion in $\be$-plane approximation} \cite{ZP,HZ} \begin{gather*} \partial_t\om=-\{\om,\ps\}-\be\partial_x\ps, \end{gather*} with $\be$ the gradient of the Coriolis parameter. \section{Central extensions of semidirect products}\label{13} Let $\g$ be a Lie algebra with scalar product $\langle \ , \ \rangle_\g$ and $V$ a $\g$-module with $\g$-action $b$ and $\g$-invariant scalar product $\langle \ , \ \rangle_V$. Each Lie algebra 1-cocycle $\al\in Z^1(\g,V)$ (\ie a linear map $\al:\g\to V$ which satisf\/ies $\al([X_1,X_2])=b(X_1)\al(X_2)-b(X_2)\al(X_1)$) def\/ines a 2-cocycle $\om$ on the semidirect product $V\rtimes\g$ \cite{OR}: \begin{gather}\label{or} \om((v_1,X_1),(v_2,X_2))=\langle\al(X_1),v_2\rangle_V-\langle\al(X_2),v_1\rangle_V. \end{gather} \begin{proposition}\label{p6} The Euler equation on the central extension $(\g\ltimes V)\x_\om\mathbb R$ with respect to the scalar product $\langle \ , \ \rangle_\g+\langle \ , \ \rangle_V$, written for curves $u$ in $\g$ and $f$ in $V$, is \begin{gather*} \frac{d}{dt} u =-\ad(u)^\top u+a\al^\top(f),\nonumber\\ \frac{d}{dt} f =b(u)f-a\al(u), \qquad a\in\mathbb R, \end{gather*} where $\al^\top:V\to\g$ is the adjoint of $\al:\g\to V$. \end{proposition} \begin{proof} The map $k\in L_{\rm skew}(V\rtimes\g)$ def\/ined by $\om$ is $k(v,X)=(\al(X),-\al^\top(v))$ because \begin{gather*} \om((v_1,X_1),(v_2,X_2))=\langle\al(X_1),v_2\rangle_V-\langle\al^\top(v_1),X_2\rangle_\g =\langle(\al(X_1),-\al^\top(v_1)),(v_2,X_2)\rangle_{V\rtimes\g}. \end{gather*} The result follows from Corollaries \ref{semidirect} and \ref{central}. \end{proof} \begin{remark}\label{ov} More generally, a 1-cocycle $\al$ on $\g$ with values in the dual $\g$-module $V^*$ def\/ines a~2-cocycle on $V\rtimes\g$ by \begin{gather*}\label{osienko} \om((v_1,X_1),(v_2,X_2))=(\al(X_1),v_2)-(\al(X_2),v_1), \end{gather*} where $( \ , \ )$ denotes the pairing between $V^*$ and $V$. \end{remark} \section[Stratified fluid]{Stratif\/ied f\/luid} Let $M$ be a compact Riemannian manifold with induced volume form $\mu$. Let $\al$ be a closed 1-form on $M$. Then $\al:\X(M)\to C^\oo(M)$ is a Lie algebra 1-cocycle with values in the canonical $\X(M)$-module $C^\oo(M)$. The $L^2$ scalar product is $\X_\mu(M)$-invariant, so $\al$ def\/ines by (\ref{or}) a~2-cocycle~$\om$ on the semidirect product Lie algebra $C^\oo(M)\rtimes\X_\mu(M)$: \begin{gather}\label{oror} \om((f_1,X_1),(f_2,X_2))=\int_Mf_2\al(X_1)\mu-\int_Mf_1\al(X_2)\mu. \end{gather} \begin{proposition} The Euler equation on $(C^\oo(M)\rtimes\X_\mu(M))\x_\om\mathbb R$ with $L^2$ scalar product is \begin{gather} \partial_tu =-\nabla_uu+af\al^\sharp-\grad p,\nonumber\\ \partial_tf =-L_uf-a\al(u),\qquad a\in\mathbb R,\label{our} \end{gather} with $\nabla$ the Levi-Civita covariant derivative and $\sharp$ the Riemannian lift. \end{proposition} \begin{proof} We apply Proposition~\ref{p6} for $\g=\X_\mu(M)$ and $V=C^\oo(M)$. In this case $b(X)f=-L_Xf$ and $\ad(X)^\top X=P\nabla_XX$. We compute $\langle\al^\top(f),X\rangle_\g=\int_Mf\al(X)\mu=\int_Mg(f\al^\sharp,X)\mu= \langle P(f\al^\sharp),X\rangle_\g$, for all $X\in\X_\mu(M)$, hence $\al^\top(f)= P(f\al^\sharp)$. \end{proof} Because $d(f\al^\sharp)^\flat=df\wedge\al$, the equation (\ref{our}) written for vorticity 2-form $\om=du^\flat$ becomes \begin{gather*} \partial_t \om =-L_u\om+df\wedge\al,\\ \partial_t f =-L_uf-a\al(u). \end{gather*} \begin{proposition}[\cite{V2}]\label{totgeod} Given a $2$-cocycle $\om$ determined via \eqref{oror} by the constant $1$-form $\al$ on the torus $M\!=\!\TT^2$, we have $(\X_\mu^{\rm ex}(M)\ltimes C^\oo_0(M))\x_\om\mathbb R$ is totally geodesic in \mbox{$(\X_\mu(M){\ltimes} C^\oo(M)){\x_\om}\mathbb R$}, where $C_0^\oo(M)$ is the subspace of functions with vanishing integral. \end{proposition} \begin{proof} We know from Section~\ref{14} that for $M=\TT^2$, $\X_\mu^{\rm ex}(M)\ltimes C^\oo_0(M)$ is totally geodesic in $\X_\mu(M)\ltimes C^\oo(M)$. But $\al(u)$ has zero integral for $u$ exact divergence free, $\al$ being closed. We have to make sure that $f\al^\sharp$ is orthogonal to the space of harmonic vector f\/ields for all $f$ with zero integral, \ie for all functions $f$ such that $f\mu$ is exact ($f\mu=d\nu$). But $\int_Mg(f\al^\sharp,Y)\mu =\int_M\al(Y)d\nu=-\int_ML_Y\al\wedge\nu=0$ because $L_Y\al=0$ for all harmonic vector f\/ields $Y$ on the 2-torus, $\al$ being a constant 1-form. \end{proof} Hence on the 2-torus, for constant $\al$ and initial conditions $u_0$ Hamiltonian vector f\/ield and~$f_0$ function with zero integral, $u$ will be Hamiltonian and $f$ will have zero integral at every time~$t$. The Hamiltonian vector f\/ield is $H_\ps=\partial_y\ps\partial_x-\partial_x\ps\partial_y$ and the Poisson bracket $L_{H_\ps}f=\{f,\ps\}$ is the Jacobian of $f$ and $\ps$. If $\al=-\be dy$ and $a=-1$ we get the equation for stream function $\ps$ and vorticity function $\om=\De\ps$: \begin{gather} \partial_t \om =-\{\om,\ps\}-\be\partial_x f,\nonumber\\ \partial_t f =-\{f,\ps\}+\be\partial_x\ps.\label{stratified} \end{gather} Let $\xi=g\frac{\rh-\rh_0}{\rh_0}$ be a buoyancy variable measuring the deviation of a density $\rh$ from a background value~$\rh_0$, with $g$ the gravity acceleration. The background stratif\/ication $\rh_0$ is assumed to be exponential, characterized by the constant Brunt--V\"ais\"al\"a frequency $N=(-g\frac{d\log\rh_0}{dy})^{\frac12}$. The equation for a {\bf stratif\/ied f\/luid in Boussinesq approximation} \cite{Zeitlin2} is the geodesic equation~(\ref{stratified}) for $\be=N$ constant and $f=N^{-1}\xi$: \begin{gather*} \partial_t\om =-\{\om,\ps\}-\partial_x\xi,\\ \partial_t\xi =-\{\xi,\ps\}+N^2\partial_x \ps. \end{gather*} When the Brunt--V\"ais\"al\"a frequency $N$ is an integer and $\xi$ has zero integral (at time zero), then the stratif\/ied f\/luid equation is a geodesic equation on a Lie group~\cite{V2}. \section[$H^1$ metrics]{$\boldsymbol{H^1}$ metrics} \subsection*{Camassa--Holm equation} The {\bf Camassa--Holm equation} \cite{CH} \begin{gather}\label{ch} \partial_t(u-u'')=-3uu'+2u'u''+uu''' \end{gather} is the geodesic equation for the right invariant metric on $\Diff(S^1)$ given by the $H^1$ scalar product $\langle X,Y\rangle =\int_{S^1}(XY+X'Y')dx=\int_{S^1}X(1-\partial_x^2)Ydx$ \cite{Kouranbaeva}. Indeed, one gets from \begin{gather*} \langle \ad(X)^\top Y,Z\rangle =\langle Y,X'Z-XZ'\rangle =\int_{S^1}(Y(X'Z-XZ')+Y'(X''Z-XZ''))dx\\ \phantom{\langle \ad(X)^\top Y,Z\rangle}{} =\int_{S^1}Z (2YX'+Y'X-2Y''X'-Y'''X)dx \end{gather*} that $\ad(X)^\top Y=(1-\partial_x^2)^{-1}(2YX'+Y'X-2Y''X'-Y'''X)$. Plugging $\ad(X)^\top X=(1-\partial_x^2)^{-1}(3XX'-2X'X''-XX''')$ into Euler's equation (\ref{euler}) one obtains the Camassa--Holm shallow water equation for $u:I\to C^\oo(S^1)$. Since $m=A(u)=u-u''$, the Hamiltonian form of the Camassa--Holm equation is \begin{gather*} \partial_tm=-um'-2u'm. \end{gather*} \begin{remark} Considering the right invariant $H^1$ metric on the Bott--Virasoro group (\ref{bott}), an extended Camassa--Holm equation is obtained \cite{Misiolek1} \begin{gather*} \partial_t(u-u'')=-3uu'+2u'u''+uu'''-2au''',\qquad a\in\mathbb R. \end{gather*} Indeed, the identity $\om(X,Y)=\langle k(X),Y\rangle$ for the Virasoro cocycle $\om(X,Y)=2\int_{S^1}X'''Ydx$ and the $H^1$ scalar product implies $k(X)=2(1-\partial_x^2)^{-1}X'''$. Now by Corollary \ref{central} the geodesic equation is the extended Camassa--Holm equation above. The homogeneous manifold $\Diff(S^1)/S^1$ is a coadjoint orbit of the Bott--Virasoro group. The {\bf Hunter--Saxton equation} describing weakly nonlinear unidirectional waves \cite{HS} \begin{gather*} \partial_tu''=-2u'u''-uu''' \end{gather*} is a geodesic equation on $\Diff(S^1)/S^1$ with the right invariant metric def\/ined by the scalar product $\langle X,Y\rangle=\int_{S^1}X'Y'dx$ \cite{KM2}. \end{remark} \subsection*{Higher dimensional Camassa--Holm equation} The {\bf higher dimensional Camassa--Holm equation} (also called EPDif\/f or averaged template matching equation) \cite{HMR,HM} is the geodesic equation for the right invariant $H^1$ metric \begin{gather}\label{def} \langle X,Y\rangle=\int_M(g(X,Y)+\al^2g(\nabla X,\nabla Y))\mu, \end{gather} on $\Diff(M)$ for compact $M$. Because $\nabla^*\nabla=\De+\Ric$, this scalar product can be rewritten with the help of the rough Laplacian $\De_{R}=\De+\Ric$ as $\langle X,Y\rangle=\int_M(g(X-\al^2\De_{R}X,Y)\mu$, so the momentum density of the f\/luid $m=A(u)$ is $m=(1-\al^2\De_{R})u$. It follows that the adjoint of $\ad(X)$ with respect to (\ref{def}) is conjugate by $1-\al^2\De_R$ to the adjoint of $\ad(X)$ with respect to the $L^2$ metric (\ref{el2}) computed to be (\ref{brasov}). Hence $(1-\al^2\De_R)\ad(X)^\top Y=(\nabla X)^\top(Y-\al^2\De_RY) +\nabla_X(Y-\al^2\De_RY)+(\div X)(Y-\al^2\De_RY)$. We get as geodesic equation the higher dimensional Camassa--Holm equation \begin{gather*} \partial_t(1-\al^2\De_{R})u=-u\div u+\al^2(\div u)\De_{R} u-\nabla_uu +\al^2\nabla_u(\De_{R} u)\\ \phantom{\partial_t(1-\al^2\De_{R})u=}{} -(\nabla u)^\top u+\al^2(\nabla u)^\top \De_{R} u. \end{gather*} In Hamiltonian form this equation is \begin{gather*} \partial_tm=-\nabla_um-(\nabla u)^\top m-(\div u)m \qquad \mbox{for}\quad m=A(u)=u-\al^2\De_R u. \end{gather*} In particular for $M=S^1$ we get the Camassa--Holm equation (\ref{ch}). When $M$ is a manifold with boundary and we put Neumann or mixed conditions on the boundary, then the $H^1$ scalar product (\ref{def}) has to be replaced by \begin{gather}\label{200} \langle X,Y\rangle=\int_M(g(X,Y)+2\al^2g(\Def X,\Def Y))\mu, \end{gather} where $\Def X=\frac12(\nabla X+(\nabla X)^\top)$ denotes the deformation $(1,1)$-tensor of $X$ \cite{GR2}. \subsection*{Averaged Euler equation} For a compact Riemannian manifold $M$ we consider the right invariant metric on the group $\Diff_\mu(M)$ of volume preserving dif\/feomorphisms given by the $H^1$ scalar product (\ref{def}) on vector f\/ields. The geodesic equation is the (Lagrangian) {\bf averaged Euler equation} \cite{MRS,Shkoller}, also called LAE-$\al$ equation: \begin{gather}\label{lambda} \partial_t(1-\al^2\De_R)u=-\nabla_u(1-\al^2\De_R)u +\al^2(\nabla u)^\top(\De_R u)-\grad p. \end{gather} Indeed, from $\langle\ad(X)^\top Y,Z\rangle=\int_Mg(Y-\al^2\De_RY,\nabla_ZX-\nabla_XZ)\mu =\int_Mg((\nabla X)^\top (Y-\al^2\De_RY)+\nabla_X(Y-\al^2\De_RY),Z)\mu$ we obtain that \begin{gather*} (1-\al^2\De_R)(\ad(X)^\top Y) =P((\nabla X)^\top Y+\nabla_X(1-\al^2\De_R)Y-\al^2(\nabla X)^\top(\De_RY)) \end{gather*} and we use Euler's equation (\ref{euler}) to get (\ref{lambda}). In Hamiltonian form this equation is \begin{gather*} \partial_tm=-\nabla_um-(\nabla u)^\top m-\grad p \end{gather*} for $m=A(u)=u-\al^2\De_R u$. As in the higher dimensional Camassa--Holm equation, when Neumann or mixed conditions on the boundary of $M$ are imposed, one has to consider the $H^1$ scalar product (\ref{200}). \section{Systems of two evolutionary equations} From \cite{Fuks} we know that a basis for $H^2(\mathfrak{X}(S^1),C^\oo(S^1))$ is represented by $\si(X,Y)=X'Y-XY'$ and the Virasoro cocycle $\om(X,Y)=\int_{S^1}(X'Y''-X''Y')dx\in\mathbb R\subset C^\oo(S^1)$; a basis for $H^2(\mathfrak{X}(S^1),\Om^1(S^1))$ is represented by the cocycles \begin{gather*} \si_1(X,Y)=XY''-X''Y,\qquad \om_1(X,Y)=X'Y''-X''Y'; \end{gather*} a basis for $H^2(\mathfrak{X}(S^1),\Om^2(S^1))$ is represented by the cocycles \begin{gather*} \si_2(X,Y)=X'''Y-XY''',\qquad \om_2(X,Y)=X'''Y'-X'Y'''. \end{gather*} Only the cocycles $\om$, $\om_1$ and $\om_2$ (whose expressions involve only derivatives of $X$ and $Y$) integrate to group cocycles \cite{OR}. The Euler equations for the $L^2$ or $H^1$ scalar product on the corresponding Abelian extensions provide systems of two equations, generalizing Burgers (\ref{burgers}) or Camassa--Holm (\ref{ch}) equation. We exemplify with the 2-cocycle $\si$ taking values in the module of functions on the circle. The Euler equations for the $L^2$ scalar product on $C^\oo(S^1)\rtimes\X(S^1)$ and on $C^\oo(S^1)\rtimes_\si\X(S^1)$ are \begin{gather*} \partial_tu =-3uu'-ff',\\ \partial_tf =-uf'-u'f, \end{gather*} and \begin{gather*} \partial_tu =-3uu'+uf'+2u'f-ff',\\ \partial_tf =-uf'-u'f. \end{gather*} The Euler equations for the $H^1$ scalar product on $C^\oo(S^1)\rtimes\X(S^1)$ and on $C^\oo(S^1)\rtimes_\si\X(S^1)$ are \begin{gather*} \partial_t(u-u'') =-3uu'+2u'u''+uu'''-ff'+f'f''',\\ \partial_t(f-f'') =-uf'-u'f+uf'''+u'f'', \end{gather*} and \begin{gather*} \partial_t(u-u'') =-3uu'+2u'u''+uu'''-2u'f-uf'+2u'f''+uf'''-ff'+f'f''',\\ \partial_t(f-f'') =-uf'-u'f+uf'''+u'f''. \end{gather*} One can consider central extensions of semidirect products of $\X(S^1)$ with modules of densities as in Remark~\ref{ov} \cite{OR}. For instance the 1-cocycle $\al(X)=X''$ on $\X(S^1)$ with values in $\Om^1(S^1)$, the module dual to $C^\oo(S^1)$, gives the 2-cocycle $\om((f_1,X_1),(f_2,X_2))=\int_{S^1}(X_1''f_2-X_2''f_1)dx$ on the semidirect product $C^\oo(S^1)\rtimes\X(S^1)$. The geodesic equation for the $L^2$ scalar product on the central extension $(C^\oo(S^1)\rtimes\X(S^1))\times_\om \mathbb R$ is \begin{gather*} \partial_tu =-3uu'-ff'-af'',\\ \partial_tf =-uf'-u'f-au'',\quad a\in\mathbb R. \end{gather*} \section{Conclusions} This survey article presents the formal deduction as geodesic equations on dif\/feomorphism groups with right invariant metrics of several PDE's of hydrodynamical type. Sometimes extensions of dif\/feomorphism groups by central or Abelian sugroups come into play and the corresponding Lie algebra 2-cocycles introduce additional terms to the geodesic equations. These equations are Hamiltonian equations too, possessing rich geometric structures. Some of them are completely integrable. But presenting these results is beyond the scope of this article. \LastPageEnding \end{document}
\begin{document} \title{Less Bacon More Threshold} \date{\today} \author{Craig Gidney} \email{[email protected]} \affiliation{Google Quantum AI, Santa Barbara, California 93117, USA} \author{Dave Bacon} \affiliation{Google Quantum AI, Seattle, Washington 98103, USA} \maketitle \begin{abstract} We give the Bacon-Shor code a threshold purely by deleting gates from its circuit. Specifically: we use lattice surgery to concatenate the Bacon-Shor code with itself using local planar connectivity, and observe that the resulting circuit is a subset of the circuit that would be used by a larger Bacon-Shor code. \end{abstract} \emph{The code written, circuits generated, and stats collected for this paper are available on Zenodo at \href{https://doi.org/10.5281/zenodo.7901729}{doi.org/10.5281/zenodo.7901729}~\cite{gidneybacondata}.} \section{Introduction} \label{sec:introduction} The Bacon-Shor code~\cite{bacon2006operator} is a quantum error correcting code built out of two-qubit parity measurements (``pair measurements'') on a planar grid of qubits. Each vertical edge of the grid corresponds to a $Z \otimes Z$ measurement. Each horizontal edge corresponds to an $X \otimes X$ measurement. The code's logical qubit is defined by the anticommuting observables formed by the product of $X$ operators along the top row versus the product of $Z$ operators along the left column. The stabilizer generators of the code are the products of adjacent rows of $X$ operators, and adjacent columns of $Z$ operators. The Bacon-Shor code was initially considered for its thermodynamic properties, but then attracted considerable attention due to its simplicity\cite{aliferis2007subsystem} and amenability to hardware. It has found applications in biased noise proposals~\cite{li2019compasslerpsurface, huang2020fault, napp2012optimal, brooks2013fault} and has been realized in experiment~\cite{li2018direct, egan2021fault}. However, the Bacon-Shor code has a major problem: it doesn't scale to arbitrarily low error rates. It has no threshold. For a fixed physical error rate, increasing the size of the code initially improves the logical error rate, but this improvement eventually stops and reverses. The underlying issue is that the stabilizers being compared by the code all run along the entire length of the grid. This length is increasing with the size of the code. As the size increases, forming the stabilizers involves combining more and more measurements. This makes the stabilizers noisier and noisier. Eventually, the stabilizer noise overwhelms the gains in code distance, and performance regresses. The lack of threshold in the Bacon-Shor code can be fixed in a variety of ways. For example, it can be fixed by mixing in small patches of surface code~\cite{li2019compasslerpsurface} or by concatenation of the code with itself~\cite{aliferis2007subsystem, cross2007comparative}. Our work focuses on obtaining a threshold using the simplest strategy possible - subtracting gates - requiring no additional connectivity or circuit overhead. The key underlying idea is that we can concatenate the Bacon-Shor code with itself by using lattice surgery, and the gates used during the lattice surgery are a subset of the gates that would be used by a non-concatenated Bacon-Shor code. This concatenation differs from the concatenation in~\cite{aliferis2007subsystem, cross2007comparative} because those constructions require long range connectivity to implement higher levels of concatenation. The paper proceeds as follows. First, \sec{construction} describes the lattice surgery pair measurement, the equivalence between concatenation and gate deletion, and the pattern of gates we deleted. Second, \sec{benchmark} benchmarks the construction, showing that it outperforms the normal Bacon-Shor code. Finally, \sec{conclusion} gives some closing remarks. \section{Construction} \label{sec:construction} Lattice surgery is a well established technique for performing pair measurements in the surface code~\cite{de2017zxlattice,fowler2018latticesurgery}. Lattice surgery also works on the Bacon-Shor code (see \fig{lattice_surgery})~\cite{poulsen2017lattice}. Placing two Bacon-Shor codes next to each other, and temporarily merging them into one larger code, performs a logical pair measurement. \begin{figure} \caption{ Lattice surgery in the Bacon-Shor code. When two Bacon-Shor logical qubits are next to each other, temporarily activating the pair measurements between them performs a logical pair measurement. This implementation satisfies all the rules required of a logical $XX$ pair measurement~\cite{mcewen2023relaxingsurface} \label{fig:lattice_surgery} \end{figure} An important distinction between surface code lattice surgery, and Bacon-Shor lattice surgery, is that Bacon-Shor lattice surgery incentivizes monogamy. In surface code lattice surgery, we want to involve every logical qubit in as many simultaneous surgeries as possible, because this makes the computation denser. In Bacon-Shor lattice surgery we would still like to make the computation as dense as possible, but there's a problem: the stabilizers running between the logical qubits are longer during a surgery. For example, if we simultaneously stitched a whole column of logical qubits, the stabilizers could get so long and noisy that the noise would become overwhelming and break fault tolerance. A simple way to avoid this catastrophe is to only involve each logical Bacon-Shor qubit in one surgery at a time. Lattice surgery gives us a logical pair measurement that only requires local planar connectivity. As long as there is a code distance where the logical pair measurement outperforms the physical pair measurements, arbitrarily good logical qubits can be created by concatenation. Therefore, by using lattice surgery to concatenate the Bacon-Shor code with itself, we have created a fault tolerant construction with a threshold. See \app{threshold} for a more detailed proof. If we produce a circuit implementing a Bacon-Shor code concatenated with itself using lattice surgery, we observe something interesting: the circuit is using a strict subset of the operations that would have been used by a non-concatenated Bacon-Shor code covering the same set of physical qubits. This is because the logical pair measurements are activated by including physical pair measurements between the logical qubits. In a non-concatenated Bacon-Shor code, these physical pair measurements would have simply always been included, instead of only being included while performing the logical pair measurement. Therefore we can switch our perspective and view our concatenated code construction as a construction that deletes gates from a Bacon-Shor circuit. To use the edge-deletion perspective, we have to specify the pattern of deletions. For this paper, we found that the following pattern works well: $$\text{Include}_{b,f}(e, t) = \left(t \equiv b \cdot \frac{4^{L(e, f) + 1}-1}{3} \pmod {4^{L(e, f) + 1}}\right)$$ In the above equation: $t$ is a circuit layer index, $e$ is the row index of vertical edges or column index of horizontal edges, $L(e,f)$ is the number of times $f$ divides into $e$, $f$ is the ``fractal pitch'' parameter that determines the code distance used when concatenating, and $b$ is an interleaving control parameter set to 0 for odd-index rows and 2 for even-odd rows and 1 for odd-index columns and 3 for even-index columns. The different $b$ values ensure qubits are only involved in one pair measurement at a time, at all levels of concatenation, assuming an odd fractal pitch. For each edge, at each time step, we evaluate $\text{Include}_{b,f}(e, t)$ and delete the edge from that layer if the function evaluates to false. An example pattern of removed measurements is shown in \fig{cut_pattern}. Given the pattern of edge inclusions, a maximal set of detectors can be computed using a union find data structure. When a column of horizontal edges (or row of vertical edges) is measured, initialize a union find data structure with each edge in its own set. Then, for each anticommuting edge that was included in any layer between the current layer and the last time the column (or row) was measured, perform a union between the two edges the anticommuting edge is touching. After this is complete, each disjoint set of edges in the data structure corresponds to a detector in the circuit. Given a disjoint set, its detector is formed by multiplying together the pair measurements of each edge in the set from the current layer and from the previous layer where this column or row was measured. Taking time slices of the resulting detectors reveals a fractal pattern of checked stabilizers, which varies over time, as shown in \fig{detslice}. Note that there are substantially more stabilizers being checked than there would be in a normal Bacon-Shor code, which is one reason to expect better performance. Importantly, although the pattern of stabilizers is more complex than the original pattern of Bacon-Shor stabilizers, they still have the property that errors in the circuit are graphlike (producing an even number of $X$-type and $Z$-type detection events in the bulk). The circuit can still be decoded by using matching. In fact, using existing open source tools, this process is entirely automated. Starting from a circuit annotated with the detectors and observables, Stim~\cite{gidney2021stim} can sample from this circuit and convert it into a decoding graph. Then, PyMatching~\cite{higgott2021pymatching,higgott2023sparseblossom} can use the decoding graph to predict whether the logical observables were flipped or not, given the sampled detection event data. \begin{figure} \caption{ The pattern of pair measurements over time, assuming a fractal pitch of 5. In each layer of the circuit, pair measurements are included (or deleted) depending on the annotated conditions for that edge's row or column. For example, the vertical pair measurements in the row to the right of the label $10 \bmod 16$ are only included in circuit layers where the layer index $t$ satisfies $t \equiv 10 \pmod{16} \label{fig:cut_pattern} \end{figure} \begin{figure} \caption{ Detector slice diagrams showing checked stabilizers of our construction, with a grid diameter of 27 and a fractal pitch of 3. The coming and going of the pair measurements at higher levels of concatenation causes variation over time. The checked stabilizers correspond to different codes at different time steps. } \label{fig:detslice} \end{figure} \section{Benchmarking} \label{sec:benchmark} We compared our construction to the normal Bacon-Shor using Monte-Carlo sampling backed by Stim~\cite{gidney2021stim} and PyMatching~\cite{higgott2021pymatching,higgott2023sparseblossom}. We built the circuits out of a purely dissipative gate set, with the allowed gates being pair measurements ($M_{XX}$ and $M_{ZZ}$), measurements ($M_X$ and $M_Z$), and resets ($R_{X}$ and $R_{Z}$). We used a uniform noise model specified in \app{noise_model}. For each circuit we sampled 100 million shots or 1000 errors, whichever came first. Our main numerical result is that, at a noise strength of $p=0.001$, the Bacon-Shor code stops improving at a code distance of 25 while our construction is still improving at distance 40 (see \fig{error_rate}). These simulated experimental results don't necessarily confirm that the construction has a threshold. The ripples in the curves (as the grid diameter goes in and out of phase with powers of the fractal pitch) make it hard to tell if the improvement is consistent or slowing. Regardless, this data clearly demonstrates that you can substantially outperform the normal Bacon-Shor code simply by deleting the right pattern of measurements. For a proof that the fractal construction has a threshold, see \app{threshold}. Note that the honeycomb code gets better performance from the same gate set using strictly sparser connectivity~\cite{hastings2021dynamically,paetznick2023honeycomb,gidney2022honeycombplanar,kesselring2022anyoncondense}. Before we finish we want to note something counter-intuitive we found, which the careful reader may have noticed as strange about the $\text{Include}_{b,f}(e, t)$ function defined in the previous section. The performance of the system was best when we held in the lattice surgery stitched configuration for 1 round, rather than multiple rounds (see \fig{error_rate_x} and \fig{error_rate_z}). This shouldn't be true in general, since holding for 1 round breaks the fault tolerance of the logical measurement. A single physical pair measurement error can invert a 1-round lattice surgery measurement. We think the reason that holding for 1 round works best is specific to our use case, due to our construction \emph{repeating} logical pair measurements which are \emph{waiting} for each other. Holding for 1 round results in less reliable measurements, but they are still being cross-checked across time, and they are happening faster. We don't know if this 1-round-works-best effect remains true for arbitrarily high levels of concatenation, but it was the clear winner for the sizes that we simulated. \begin{figure} \caption{ Logical error rate of the normal Bacon-Shor circuit, and the fractal edge-deleting variant introduced in this paper at various ``fractal pitches''. The error rate is the chance of an X error and/or Z error occurring, per round, derived from \fig{error_rate_x} \label{fig:error_rate} \end{figure} \section{Conclusion} \label{sec:conclusion} In this paper, we showed how to perform planar concatenation of the Bacon-Shor code by using lattice surgery. Our construction is equivalent to deleting gates from a larger Bacon-Shor code. This means that the performance of the Bacon-Shor code can be qualitatively improved, from boundedly good to arbitrarily good, purely by doing less. One interesting fact about our construction is that it inherently requires time dynamics. Pair measurements come and go in a fractal pattern, making it impossible to write down the construction as a static stabilizer code or gauge code. This is yet another example of how adding time dynamics, or understanding time dynamics, can improve quantum error correction~\cite{hastings2021dynamically,mcewen2023relaxingsurface}. It's also an example of the power of fractal order parameters in quantum error correction~\cite{yoshida2013exotic}. \section{Contributions} Both authors discussed ideas for using lattice surgery to concatenate the Bacon-Shor code under planar connectivity. Dave Bacon attempted to write code implementing the construction. Craig Gidney wrote code implementing the construction, and insisted on the title. \section{Noise Model} \label{app:noise_model} All circuits in this paper were simulated using the uniform noise model defined in \tbl{noise_model}. \begin{table}[H] \centering \begin{tabular}{|r|l|} \hline Noise channel & Probability distribution of effects \\ \hline $\text{MERR}_B(p)$ & $\begin{aligned} 1-p &\rightarrow M_{B} \\ p &\rightarrow M_{(-1 \cdot B)} \text{\;\;\;\;\;\emph{(i.e. measurement result is inverted)}} \end{aligned}$ \\ \hline $\text{XERR}(p)$ & $\begin{aligned} 1-p &\rightarrow I \\ p &\rightarrow X \end{aligned}$ \\ \hline $\text{ZERR}(p)$ & $\begin{aligned} 1-p &\rightarrow I \\ p &\rightarrow Z \end{aligned}$ \\ \hline $\text{DEP1}(p)$ & $\begin{aligned} 1-p &\rightarrow I \\ p/3 &\rightarrow X \\ p/3 &\rightarrow Y \\ p/3 &\rightarrow Z \end{aligned}$ \\ \hline $\text{DEP2}(p)$ & $\begin{aligned} 1-p &\rightarrow I \otimes I &\;\; p/15 &\rightarrow I \otimes X &\;\; p/15 &\rightarrow I \otimes Y &\;\; p/15 &\rightarrow I \otimes Z \\ p/15 &\rightarrow X \otimes I &\;\; p/15 &\rightarrow X \otimes X &\;\; p/15 &\rightarrow X \otimes Y &\;\; p/15 &\rightarrow X \otimes Z \\ p/15 &\rightarrow Y \otimes I &\;\; p/15 &\rightarrow Y \otimes X &\;\; p/15 &\rightarrow Y \otimes Y &\;\; p/15 &\rightarrow Y \otimes Z \\ p/15 &\rightarrow Z \otimes I &\;\; p/15 &\rightarrow Z \otimes X &\;\; p/15 &\rightarrow Z \otimes Y &\;\; p/15 &\rightarrow Z \otimes Z \end{aligned}$ \\ \hline \end{tabular} \caption{ Definitions of noise channels used to define noisy versions of gates in \tbl{noise_model}. } \label{tbl:noise_channels} \end{table} \begin{table}[H] \centering \begin{tabular}{|r|l|} \hline Ideal gate & Noisy gate \\ \hline $\text{Idle}$ & $\text{DEP1}(p)$ \\ \hline $R_X$ & $\text{ZERR}(p) \cdot R_X$ \\ $R_Z$ & $\text{XERR}(p) \cdot R_Z$ \\ \hline $M_X$ & $\text{DEP1}(p) \cdot \text{MERR}_X(p)$ \\ $M_Z$ & $\text{DEP1}(p) \cdot \text{MERR}_Z(p)$ \\ \hline $M_{XX}$ & $\text{DEP2}(p) \cdot \text{MERR}_{XX}(p)$ \\ $M_{ZZ}$ & $\text{DEP2}(p) \cdot \text{MERR}_{ZZ}(p)$ \\ \hline \end{tabular} \caption{ The uniform noise model used by simulations in this paper. \tbl{noise_channels} defines each noise channel. } \label{tbl:noise_model} \end{table} \section{Additional Data} \label{app:additional_data} \begin{figure} \caption{ Logical error rate of the X observable in Bacon-Shor circuits, at various ``fractal pitches'' and ``surgery hold factors''. The fractal pitch is the code distance where concatenation occurs. The surgery hold factor is how many times more rounds lattice surgery measurements are performed over, at each successive level of concatenation. We expected the best surgery hold factor to be the same as the fractal pitch, but for the parameters explored in this paper a surgery hold factor of 1 worked best. Highlights correspond to hypotheses with a likelihood within a factor of 1000 of the max likelihood hypothesis, given the sampled data. } \label{fig:error_rate_x} \end{figure} \begin{figure} \caption{ Logical error rate of the Z observable in Bacon-Shor circuits. Very similar to \fig{error_rate_x} \label{fig:error_rate_z} \end{figure} \begin{figure} \caption{ Logical error rate of an XX lattice surgery measurement between two distance 5 Bacon-Shor codes, as described in \app{threshold} \label{fig:error_rate_xx} \end{figure} \section{Threshold Proof} \label{app:threshold} \begin{figure} \caption{ A state machine that fault tolerant circuit blocks can satisfy. A fault is ``dangling" if it produces detection events on detectors comparing measurements at the end of one circuit block to measurements at the start of another circuit block. In the diagram, $p$ refers to the physical error rate and $s$ refers to the number of steps taken. The number of steps is analogous to the number of logical operations performed. This diagram captures the notion of a fault not spreading disastrously for concatenated codes, as formalized in \cite{aliferis2006threshold, aliferis2011thesis} \label{fig:threshold_machine} \end{figure} We prove our construction has a threshold by showing our logical operations satisfy the state machine shown in \fig{threshold_machine}. This state machine requires two faults to occur within one logical operation, or in two sequential logical operations, for a logical error to occur. For the proof, we use a digitized noise model where each operation either happens correctly, or faults entirely. This model is the correct one for concatenated codes, since higher levels, unlike the lowest level, may have highly correlated faults~\cite{aliferis2006threshold}. When an operations faults, it can arbitrarily simultaneously corrupt all the qubits it touches and all the measurement results it produces. The gate set we're using contains single-qubit initialization ($R_X$ and $R_Z$), single-qubit measurement ($M_X$ and $M_Z$), pair measurement ($M_{XX}$ and $M_{ZZ}$), and idling ($I$) where we assume idling takes as long as a pair measurement. We will show that the logical version of each of these operations meets the requirements of the state machine. We divide circuits into ``rounds'', where each round is composed of 4 layers of gates and measures the parity of every active edge exactly once. We'll use distance 5 Bacon-Shor, instead of distance 3, because when a pair measurement beaks it can corrupt two qubits and this would not be correctable at distance 3. We'll always use enough rounds for measurement errors to be correctable. Consider the $M_{XX}$ measurement shown in \fig{lattice_surgery}. Suppose it spends 1 round in the unstitched configuration (``before''), then 3 rounds in the stitched configuration (``during''), then 1 round in the unstitched configuration (``after''). This involves performing 215 $M_{XX}$ gates, 200 $M_{ZZ}$ gates, and 170 idle gates, for a total of 585 gates. Note that these physical gates are all local (see \fig{lattice_surgery}). By enumerating all possible failures on all possible gates during the logical $M_{XX}$, we find that any individual fault can be corrected, except for some faults on gates in the last round. We classify faults occurring in the last round as dangling faults, which will be passed along to the next logical operation on one of the logical qubits. When correcting the detection events within a logical operation, any detection events which may have been produced by a dangling fault are ignored and left for the next logical operation. That operation is responsible for detecting and correcting the dangling fault. Note that this involves comparing to the measurements at the end of this operation (the previous operation, from the perspective of the next operation) in order to detect and correct any dangling faults. We can also check by enumeration that the $M_{XX}$ circuit has the property that it will correct any dangling faults arriving into it as input, for each possible combination of previous operations. With this done, we've confirmed the $M_{XX}$ logical gate behaves at least as well as the state machine in \fig{threshold_machine}. To experimentally demonstrate that the logical pair measurement can actually correct any individual error, we performed simulated Monte-Carlo sampling. The results are shown in \fig{error_rate_xx}. It confirms the logical error rate is quadratically suppressed compared to the physical error rate, under circuit noise. Because the $M_{XX}$ gate is our most complicated gate, showing that it satisfies the necessary conditions is the bulk of the proof. The analysis of the $M_{ZZ}$ gate is identical, but with $X$ replaced by $Z$. The logical $I$, $R_X$, and $R_Z$ gates are the same idea, but on smaller chunks of circuit. The main detail left is that the logical $M_X$ and $M_Z$ gates are not allowed to leave dangling faults. We can show they have these properties by enumerating all possible single faults. All of our operations behave at least as well as a state machine whose logical error is quadratic in the physical error rate and linear in the number of steps. Recall that the edge-deletion construction presented in this paper is equivalent to concatenating the Bacon-Shor code using lattice surgery. By concatenating, we can repeatedly quadratically suppress error. At a sufficiently low initial error rate, this will arbitrarily reduce the error. Therefore our construction has a threshold. That said, something we consider lacking about this proof is that it's done in terms of concatenation, instead of in terms of the problem the decoder is actually solving in our simulations. The decoder is not told about the concatenated structure, it's just given a single matching graph problem and decodes it like any other matching graph problem. In particular, based on this concatenation-style proof, you wouldn't predict that holding the internal lattice surgery measurements for one round would outperform holding them for multiple rounds. Holding for one round violates one of the conditions of the proof, but we see in simulation that holding for one round performs better. If the proof was instead in terms of, say, percolation over the fractal structure of the matching graph, then that proof would maybe explain this otherwise surprising behavior. \end{document}
\begin{document} \title{Convex duality in nonlinear optimal transport} \begin{abstract} This article studies problems of optimal transport, by embedding them in a general functional analytic framework of convex optimization. This provides a unified treatment of a large class of related problems in probability theory and allows for generalizations of the classical problem formulations. General results on convex duality yield dual problems and optimality conditions for these problems. When the objective takes the form of a convex integral functional, we obtain more explicit optimality conditions and establish the existence of solutions for a relaxed formulation of the problem. This covers, in particular, the mass transportation problem and its nonlinear generalizations. \end{abstract} \noindent\textbf{Keywords.} mass transport; martingale transport; Schr\"odinger problem; convex duality; integral functionals \newline \newline \noindent\textbf{AMS subject classification codes.} 46N10, 46N30 \section{Introduction} Let $S_t$, $t=0,\ldots,T$ be Polish spaces and $S=S_0\times\cdots\times S_T$. Let $M_t$ and $M$ be spaces of $\mathbb{R}^d$-valued Borel measures on $S_t$ and $S$, respectively, and consider the optimization problem \begin{equation}\label{d}\tag{D} \mathop{\rm minimize}\limits\quad \sum_{t=0}^TG_t^*(\lambda_t) + H^*(\lambda) \quad\mathop{\rm over}\ \lambda\in M, \end{equation} where $G^*_t$ and $H^*$ are convex functions on $M_t$ and $M$, respectively and $\lambda_t$ is the marginal of $\lambda$ on $S_t$. The above covers a wide range of optimization problems encountered in probability theory and finance. In particular, when $T=d=1$, $G^*_t=\delta_{\{\mu_t\}}$ and\footnote{Given a set $C$, its {\em indicator function} $\delta_C$ takes the value $0$ on $C$ and $+\infty$ outside of $C$.} $H^*(\lambda)=\int_S cd\lambda+\delta_{M_+}(\lambda)$ for given $\mu_t\in M_t$ and a lower semicontinuous nonnegative function $c$, we cover the classical Monge--Kantorovich mass transportation problem. Choosing $H^*=\delta_{\cal L}ambda$ for a closed convex set ${\cal L}ambda\subset M$ of probability measures, we obtain the problem from Strassen~\cite{str65} of finding probability measures with given marginals. When $H^*(\lambda)$ is the entropy relative to a given reference measure, we recover the classical Schr\"odinger problem; see e.g.\ \cite{fg97,leo14} and the references therein. Problems where the effective domain of $H^*$ is contained in the set of martingale measures have been recently proposed in mathematical finance e.g.\ in \cite{bhp13}. Allowing for more general choices of $G^*_t$ is relevant e.g.\ in economic applications where $\lambda_t$ is not necessarily fixed but can react to demand with an increasing marginal costs of production. In the case of finite $S$, such problems have been extensively studied in \cite{roc84}. In the financial context of \cite{bhp13}, more general convex functionals $G_t^*$ arise naturally when price quotes for derivatives come with bid-ask spreads and finite quantities. This paper develops a duality theory for \eqref{d} by embedding it in the general conjugate duality framework of Rockafellar~\cite{roc74}. This provides a unified treatment of a wide range of problems in deriving optimality conditions and criteria for the existence of optimal solutions. The duality approach yields simplified proofs and generalizations of many classical results in applied probability. As examples, we extend some well-known results on the existence of probability measure with given marginals, on the Schr\"odinger problem and on model-free superhedging of financial derivatives. Our main theorem on problem \eqref{d} yields extensions of the main results of \cite{str65}, \cite{fg97} and \cite{bhp13} to models with general marginal functionals $G_t^*$. When the functions $G^*_t$ and $H^*$ have the additional structure of integral functionals, the optimality conditions allow for pointwise characterizations and the problem dual to \eqref{d} allows for a relaxation where the optimum is attained under fairly general conditions. Our existence results extend the existing results on the dual of the Monge--Kantorovich problem to a wider class of problems. In particular, we obtain a necessary and sufficient conditions for optimal transportation plans in mass transportation with capacity constraints. We obtain a similar result for the Schr\"odinger problem which also seems new. This paper combines techniques from convex analysis, measure theory and the theory of integral functionals of continuous functions. The general duality results are derived from the functional analytic framework of \cite{roc74} while the theory of integral functionals allows for a more explicit form of optimality conditions and for a relaxation of the problem dual to \eqref{d}. The generality of our setting requires an extended conjugacy theorem for integral functionals proved in the appendix. The attainment of the minimum in the dual of \eqref{d} is established by borrowing techniques from convex stochastic optimization \cite{pp12}. \section{Conjugate duality}\label{sec:cd} This section derives \eqref{d} as a dual problem of a convex optimization problem on a Banach space of continuous functions. In some applications it is convenient to allow for unbounded continuous functions so we will follow \cite{str65} and allow for continuous functions that become bounded when scaled by a possibly unbounded continuous function. Given a continuous $\psi_t:S_t\to[1,\infty)$, \[ C_t:=\{x_t\in C(S_t;\mathbb{R}^d)\mid x_t/\psi_t\in C_b(S_t;\mathbb{R}^d)\} \] is a Banach space under the norm $\|x_t\|_{C_t}:=\|x_t/\psi_t\|_{C_b(S_t;\mathbb{R}^d)}$, where $C_b(S_t;\mathbb{R}^d)$ is the space of bounded continuous functions with the supremum norm. The space $M_t$ of $\mathbb{R}^d$-valued finite Borel measures under which $\psi_t$ is integrable may be identified with a linear subspace of the norm dual $C_t^*$ of $C_t$. Indeed, for every $\lambda_t\in M_t$, \[ x_t\mapsto \int_{S_t} x_td\lambda_t:=\sum_{i=1}^d\int_{S_t} x^i_td\lambda^i_t=\sum_{i=1}^d\int_{S_t} x^i_t/\psi_td(\psi_t\lambda^i_t) \] is a continuous linear functional on $C_t$. If $S_t$ is compact, then Riesz representation (see e.g.\ \cite[Theorem~7.10.4]{bog7}) implies that $C_t^*=M_t$ but, in general, the inclusion $M_t\subseteq C_t^*$ may be strict. Similarly, defining \[ \psi(s):=\sum_{t=0}^T\psi_t(s_t), \] the space $M$ of finite $\mathbb{R}^d$-valued Borel measures on $S$ under which $\psi$ is integrable is a linear subspace of the Banach dual of \[ C:=\{u\in C(S;\mathbb{R}^d)\mid u/\psi\in C_b(S;\mathbb{R}^d)\}. \] When $\psi_t$ are bounded, we have $C_t=C_b(S_t;\mathbb{R}^d)$ and $C=C_b(S;\mathbb{R}^d)$ the duals of which contain all finite $\mathbb{R}^d$-valued Borel measures on $S_t$ and $S$, respectively. Let $G_t$ be a proper convex function on $C_t$, $t=0,\ldots,T$, let $H$ be a proper convex function on $C$, and consider the problem \begin{equation*}\label{p}\tag{P} \begin{alignedat}{2} &\mathop{\rm minimize}\limits\quad & & \sum_{t=0}^TG_t(x_t) + H\left(-\sum_{t=0}^Tx_t\mathop{\text{\scriptsize $\circ$}} \pi_t\right)\quad\mathop{\rm over}\ x\in\prod_{t=0}^T C_t, \end{alignedat} \end{equation*} where $x=(x_t)_{t=0}^T$ and $\pi_t(s):=s_t$. The general duality results below depend on the properties of the {\em optimum value function}. \[ \varphi(u) := \inf_x\left\{\sum_{t=0}^TG_t(x_t) + H\left(u-\sum_{t=0}^Tx_t\mathop{\text{\scriptsize $\circ$}} \pi_t\right)\right\} \] defined on $C$. Throughout, we will endow the dual space $C^*$ of $C$ by the weak*-topology. The spaces $C$ and $C^*$ are then in separating duality under the natural bilinear form \[ \langle u,\lambda\rangle:=\lambda(u). \] Similarly for $C_t^*$. It turns out that the conjugate \[ \varphi^*(\lambda):=\sup_{u\in C}\{\langle u,\lambda\rangle-\varphi(u)\} \] of $\varphi$ can be expressed as \[ \varphi^*(\lambda)=\sum_{t=0}^T G_t^*(\lambda_t) + H^*(\lambda), \] where $G_t^*$ is the conjugate of $G_t$, $H^*$ is the conjugate of $H$ and $\lambda_t\in C_t^*$ denotes the continuous linear functional $x_t\mapsto\langle x_t\mathop{\text{\scriptsize $\circ$}} \pi_t,\lambda\rangle$ on $C_t$, the $t$-th {\em marginal} of $\lambda$. The infimum of $\varphi^*$ over $C^*$ equals $-\varphi^{**}(0)$ so if $\varphi$ is lower semicontinuous and the optimum value $\inf\eqref{p}$ of \eqref{p} is finite, then the biconjugate theorem implies that $-\inf\eqref{p}$ equals the optimum value of \begin{equation}\label{dr}\tag{DR} \mathop{\rm minimize}\limits\quad \sum_{t=0}^T G_t^*(\lambda_t) + H^*(\lambda)\quad\mathop{\rm over}\ \lambda\in C^*. \end{equation} This may be viewed as a ``relaxation'' of \eqref{d} from the space $M$ of Borel measures to all of $C^*$. Clearly, if $\mathop{\rm dom}\varphi^*\subseteq M$, then \eqref{dr} coincides with \eqref{d}. The following lemma gives a sufficient condition for this. It is a simple extension of \cite[Lemma~4.10]{leo6} that was formulated for $T=1$ and $\psi_t\equiv 1$. \begin{lemma}\label{lem:radon} If $\mathop{\rm dom}\varphi^*\subset C^*_+$ and $\mathop{\rm dom} G_t^*\subseteq M_t$ for each $t=0,\ldots,T$, then $\mathop{\rm dom}\varphi^*\subseteq M$. \end{lemma} \begin{proof} By \cite[Theorem 7.10.6]{bog7}, $\lambda\in \mathop{\rm dom}\varphi^*$ is a Radon measure (since $S$ is Polish, this is equivalent to being a Borel measure \cite[Theorem 7.1.7]{bog7}) if and only if, for every $\epsilon>0$, there exists a compact $K\subset S$ such that if $u\in C_b$ is zero on $K$, then $|\langle u,\lambda\rangle| \le \epsilon \|u\|$. Let $\epsilon>0$. By assumption, $\lambda_t\in M_t$ and they are nonnegative since $\mathop{\rm dom}\varphi^*\subset C^*_+$. By \cite[Theorem~7.1.7]{bog7}, there exist compact sets $K_t$ such that $\lambda_t(K_t^C)<\epsilon/(T+1)$. Let $u \in C_b$ be zero on $\prod K_t$. Since $\lambda$ is an additive set function, and $|u|\le 1_{(\prod K_t)^C}\|u\|_{C_b}$, \begin{align*} |\langle u, \lambda \rangle | &\le \int 1_{(\prod K_t)^C}\|u\| d\lambda\\ &= \lambda\left(\bigcup \pi_t^{-1}(K_t)^C\right)\|u\|\\ &\le \sum_t \lambda \left(\pi_t^{-1}(K_t)^C\right)\|u\|\\ &\le \sum_t \lambda_t\left(K_t^C\right)\|u\|\\ &= \epsilon\|u\| \end{align*} which completes the proof. \end{proof} The set of relaxed dual solutions coincides with the {\em subdifferential} $\partial\varphi(0)$ of $\varphi$ at the origin. If $\partial\varphi(0)$ is nonempty, then $\varphi$ is closed at the origin and there is no duality gap. The following result gives a sufficient condition for the existence in \eqref{dr}. It involves the domain \[ \mathop{\rm dom}\varphi = \mathop{\rm dom} H + \{\sum_{t=0}^Tx_t\mathop{\text{\scriptsize $\circ$}}\pi_t\,|\,x_t\in\mathop{\rm dom} G_t\} \] of the optimum value function of \eqref{p} \begin{theorem}\label{thm:dual} If $G_t$ and $H$ be proper lsc functions such that the set \[ \bigcup_{\alpha>0}\alpha\mathop{\rm dom}\varphi \] is a nonempty closed linear subspace of $C$, then the optimum in \eqref{dr} is attained, there is no duality gap and an $x$ solves \eqref{p} if and only if there is a $\lambda\in C^*$ such that \begin{align*} \partial G_t(x_t) &\ni \lambda_t,\quad t=0,\ldots,T,\\ \partial H(-\sum_{t=0}^Tx_t\mathop{\text{\scriptsize $\circ$}}\pi_t) &\ni \lambda, \end{align*} and then $\lambda$ solves \eqref{dr}. \end{theorem} \begin{proof} Problem \eqref{p} fits the conjugate duality framework of \cite{roc74} with $X=\prod_{t=0}^T C_t$, $U=C$ and \[ F(x,u) := \sum_{t=0}^TG_t(x_t) + H\left(u-\sum_{t=0}^Tx_t\mathop{\text{\scriptsize $\circ$}} \pi_t\right). \] The associated {\em Lagrangian} $L$ is the convex-concave function defined for each $x\in\prod_{t=0}^TC_t$ and $\lambda\in M$ by \begin{align*} L(x,\lambda) &:= \inf_u\{F(x,u) - \langle u,\lambda\rangle\}\\ &= \sum_{t=0}^TG_t(x_t) - \sum_{t=0}^T\langle x_t\mathop{\text{\scriptsize $\circ$}} \pi_t,\lambda\rangle - H^*(\lambda)\\ &= \sum_{t=0}^TG_t(x_t) - \sum_{t=0}^T\langle x_t,\lambda_t\rangle - H^*(\lambda). \end{align*} The conjugate of $F$ can thus be expressed for each $\theta\in\prod_tM_t$ and $\lambda\in M$ as \begin{align*} F^*(\theta,\lambda) &= \sup_x\{\langle x,\theta\rangle - L(x,\lambda)\}\\ &= \sup_x\{\sum_{t=0}^T\langle x_t,\theta_t\rangle - \sum_{t=0}^TG_t(x_t) + \sum_{t=0}^T\langle x_t,\lambda_t\rangle + H^*(\lambda)\}\\ &= \sum_{t=0}^T G_t^*(\lambda_t+\theta_t) + H^*(\lambda). \end{align*} Thus \[ \varphi^* (\lambda) = F^*(0,\lambda) = \sum_{t=0}^T G_t^*(\lambda_t) + H^*(\lambda). \] By \cite[Theorem~2.7.1(vii)]{zal2}, $\varphi$ is continuous at the origin relative to $\mathop{\rm aff}\mathop{\rm dom}\phi$, so $\partial\varphi(0)\ne\emptyset$ by \cite[Theorem~2.4.12]{zal2}. The claims now follow from Theorems~15 and 16 of \cite{roc74}. \end{proof} \begin{remark}\label{rem:cq} The second condition in Theorem~\ref{thm:dual} holds, in particular, if $0\in\mathop{\rm int}\mathop{\rm dom}\varphi$, which holds, in particular, if \[ 0\in \mathop{\rm int}\mathop{\rm dom} H+\{\sum_{t=0}^Tx_t\mathop{\text{\scriptsize $\circ$}}\pi_t\,|\,x_t\in\mathop{\rm dom} G_t\}. \] In the scalar case $d=1$ this last condition holds, in particular, if $H$ is nondecreasing with $H(0)<\infty$ and there exist $x_t\in\mathop{\rm dom} G_t$ such that $\sum_{t=0}^Tx_t\mathop{\text{\scriptsize $\circ$}}\pi_t\ge\epsilon\psi$ for some $\epsilon>0$. This is satisfied e.g.\ in the applications of Section~\ref{sec:mt} below where $\mathop{\rm dom} G_t=C_t$ for all $t$. \end{remark} The general results in conjugate duality would also give sufficient conditions for the existence of primal solutions but in many applications, the primal optimum is not attained in $\prod_{t=0}^TC_t$. In Sections~\ref{sec:rel} and \ref{sec:exist} below, we will extend the domain of definition of the primal objective and give sufficient conditions for the attainment of the primal optimum in a larger space of measurable functions. \section{Examples}\label{sec:ex} This section illustrates the general results of Section~\ref{sec:cd} by extending three well-known results in measure theory and mathematical finance. From now on, we will use the simplified notation \[ \sum_{t=0}^Tx_t :=\sum_{t=0}^Tx_t\mathop{\text{\scriptsize $\circ$}}\pi_t. \] \subsection{Probability measures with given marginals} The first application deals with the classical problem on the existence of probability measures with given marginals. The following extends the existence result of \cite{str65} by allowing for more general conditions on the marginals. As usual, the {\em support function} of a set $D$ in a locally convex space $X$ is the lower semicontinuous convex function $\sigma_D$ on the dual space $V$ of $X$ given by \[ \sigma_D(v):=\sup_{x\in D}\langle x,v\rangle. \] \begin{theorem}\label{thm:str} Let ${\cal L}ambda\subset M$ and ${\cal L}ambda_t\subset M_t$ be weakly compact and convex. There exists $\lambda\in {\cal L}ambda$ with $\lambda_t\in{\cal L}ambda_t$ if and only if \begin{equation*} \begin{alignedat}{2} \sum_{t=0}^T\sigma_{{\cal L}ambda_t}(x_t) + \sigma_{{\cal L}ambda}\left(-\sum_{t=0}^Tx_t\right)\ge 0\qquad\forall x\in \prod_{t=0}^T C_t. \end{alignedat} \end{equation*} \end{theorem} \begin{proof} This fits Theorem~\ref{thm:dual} with $H=\sigma_{{\cal L}ambda}$ and $G_t=\sigma_{{\cal L}ambda_t}$. Indeed, by the biconjugate theorem (see e.g.~\cite[Theorem~5]{roc74}), we then have $H^*=\delta_{{\cal L}ambda}$ and $G_t^*=\delta_{{\cal L}ambda_t}$, so the objective of \eqref{d} is simply the indicator of the set \[ \{\lambda\in{\cal L}ambda\,|\,\lambda_t\in{\cal L}ambda_t\}. \] The existence is thus equivalent to the optimum value of \eqref{d} being equal to zero. Since ${\cal L}ambda$ is bounded, $\mathop{\rm dom}\varphi=C$, so the domain condition of the Theorem~\ref{thm:dual} is satisfied. Thus, there is no duality gap so $\inf\eqref{d}=0$ if and only if $\inf\eqref{p}=0$, which holds exactly when the condition in the statement holds. \end{proof} When $T=d=1$, ${\cal L}ambda$ is a subset of probability measures and ${\cal L}ambda_t=\{\mu_t\}$ for given probability measures $\mu_t$ on $S_t$, Theorem~\ref{thm:str} reduces to Theorem~7 of \cite{str65}. \subsection{Schr\"odinger problem}\label{ssec:sch0} Let $d=1$ and let $R\in M$ and $\mu_t\in M_t$ be probability measures. The associated {\em Schr\"odinger problem} is the convex minimization problem \begin{equation*}\label{sp} \begin{alignedat}{2} &\mathop{\rm minimize}\limits\quad & & \int_S\ln(d\lambda/dR)d\lambda \quad\mathop{\rm over}\ \lambda\in M_+(S)\\ &\mathop{\rm subject\ to}\quad & &\lambda\ll R,\quad\lambda_t=\mu_t\quad t=0,\ldots,T. \end{alignedat} \end{equation*} Such problems have been extensively studied in the literature; see e.g.~\cite{csi75} and the references there. This fits the format of \eqref{p} with $G_t(x_t)=\int_{S_t}x_td\mu_t$ and \[ H(u) = \ln\int_Se^udR. \] Indeed, $H$ is proper convex lsc function with the conjugate \[ H^*(\lambda) = \begin{cases} \int_S\ln(d\lambda/dR)d\lambda & \text{if $\lambda\in{\cal P}$,}\\ +\infty & \text{otherwise}, \end{cases} \] where ${\cal P}\subset M$ is the set of probability measures. The expression of the conjugate is derived e.g.\ in \cite[Section~3]{roc71} under the assumption that $S$ is a compact Hausdorff space and $\psi=1$. Combined with with Theorem~\ref{thm:if} below, the same argument works in the case of Polish $S$ and general $\psi$. Allowing for general proper lsc convex $G_t$, gives rise to the following generalized formulation of the Schr\"odinger problem \begin{equation}\label{spg} \begin{alignedat}{2} &\mathop{\rm minimize}\limits\quad & & \sum_{t=0}^TG_t^*(\lambda_t) + \int_S\ln(d\lambda/dR)d\lambda \quad\mathop{\rm over}\ \lambda\in M_+(S)\\ &\mathop{\rm subject\ to}\quad & &\lambda\ll R. \end{alignedat} \end{equation} This allows for situations where the marginals are not known exactly. Theorem~\ref{thm:dual} combined with Remark~\ref{rem:cq} gives the following. \begin{theorem}\label{thm:spd} Assume that there exist $x_t\in\mathop{\rm dom} G_t$ such that $\sum_{t=0}^Tx_t\ge\epsilon\psi$ for some $\epsilon>0$. Then the optimum in \eqref{spg} is attained and the optimum value coincides with the negative of the optimum value of \[ \mathop{\rm minimize}\limits\quad \sum_{t=0}^TG_t(x_t) + \ln\int_S\exp\left(-\sum_{t=0}^Tx_t\right)dR\quad\mathop{\rm over}\ x\in\prod_{t=0}^TC_t. \] \end{theorem} When $T=1$ and $G_t(x_t)=\int_{S_t}x_td\mu_t$, we recover the dual of the Schr\"odinger problem studied in \cite{leo14}. In Section~\ref{ssec:sch} below, we will associate \eqref{sp} with another dual problem for which the optimum is attained. This yields necessary and sufficient conditions for the minimizers of the Schr\"odinger problem. This provides a duality proof of the optimality conditions given in \cite[Theorem~3.43]{fg97}. \subsection{Model-independent superhedging} Let $d=1$, $S_t=\mathbb{R}^n$ and $\psi_t(s_t)=1+|s_t|$ for all $t$ and $H=\delta_{{\cal C}_{\hat u}}$, where \[ {\cal C}_{\hat u} := \{u\in C\,|\,\exists z\in{\cal N}:\ \hat u(s)+u(s)\le \sum_{t=0}^{T-1}z_t(s^t)\cdot\Delta s_{t+1}\} \]for an upper semicontinuous function $\hat u$ and \[ {\cal N}:=\{(z_t)_{t=0}^{T-1}\,|\,z_t\in{\cal L}^\infty(S^t;\mathbb{R}^n)\quad t=0,\ldots,T\}, \] where $S^t:=S_0\times\cdots\times S_t$. Problem \eqref{p} becomes \begin{equation}\label{sssh} \begin{alignedat}{2} &\mathop{\rm minimize}\limits\quad & & \sum_{t=0}^TG_t(x_t) \quad\text{over}\quad x\in\prod_{t=0}^TC_t,\ z\in{\cal N}\\ &\mathop{\rm subject\ to} & & \quad \hat u(s) \le \sum_{t=0}^{T-1}z_t(s^t)\cdot\Delta s_{t+1} + \sum_{t=0}^Tx_t(s_t)\quad\forall s. \end{alignedat} \end{equation} This can be interpreted as a problem of optimal superhedging $\hat u$ in a financial market where $G_t$ gives the cost of buying an $s_t$-dependent cash-flow $x_t$ paid out at time $t$ and the sum involving $z$ represents the gains from a self-financing trading strategy described by $z$. When \[ G_t(x_t)=\int_{S_t}x_td\mu_t \] for given probability measures $\mu_t$, we recover the superhedging problem studied in \cite{bhp13}. Nonlinear functions $G_t$ arise naturally in practice where one faces bid-ask spreads and price quotes are available only for finite quantities. We will denote the set of nonnegative {\em martingale measures} by \[ {\cal M}:=\{\lambda\in M_+\,|\, \int_S\sum_{t=0}^Tz_t(s^t)\cdot\Delta s_{t+1}d\lambda = 0\quad\forall z\in{\cal N}\}. \] \begin{lemma} Assume that $\hat u\le K\psi$ for some $K\in\mathbb{R}$. Then for $\lambda\in M$, the conjugate of $H$ can be expressed as \[ H^*(\lambda) = \begin{cases} -\int_S\hat ud\lambda & \text{if $\lambda\in{\cal M}$},\\ +\infty & \text{otherwise}. \end{cases} \] \end{lemma} \begin{proof} It is clear that $H^*(\lambda)=+\infty$ unless $\lambda\ge 0$. For $\lambda\ge 0$, \[ H^*(\lambda)\le\sup_{z\in{\cal N}}\int_S\sum_{t=0}^Tz_t(s^t)\cdot\Delta s_{t+1}d\lambda -\int_S\hat ud\lambda = \begin{cases} -\int_S\hat ud\lambda & \text{if $\lambda\in{\cal M}$},\\ +\infty & \text{otherwise}. \end{cases} \] On the other hand, \[ H^*\ge\sigma_\Gamma+\sigma_{{\cal C}^c_0}, \] where $\Gamma=\{u\in C\,|\,u\le-\hat u\}$ and \[ {\cal C}_0^c:=\{u\in C\,|\,\exists z\in\tilde{\cal N}:\ u(s)\le\sum_{t=0}^{T-1}z_t(s^t)\cdot\Delta s_{t+1}\} \] with $\tilde{\cal N}\subset{\cal N}$ denoting the continuous bounded strategies. When $\hat u\le K\psi$ for some $K\in\mathbb{R}$, then for $\lambda\ge 0$, \[ \sigma_\Gamma(\lambda) = -\int_S\hat ud\lambda, \] by Theorem~\ref{thm:if} below. By standard approximation arguments, $\sigma_{{\cal C}_0^c}=\delta_{\cal M}$ (see e.g.~\cite[page~435]{str65} or \cite[Lemma~2.3]{bhp13}). \end{proof} When $\mathop{\rm dom} G_t^*\subset M_t$ for all $t=0,\ldots,T$, the feasible dual solutions are in $M$, by Lemma~\ref{lem:radon}, so problem \eqref{d} can be written as \begin{equation}\label{dsssh} \mathop{\rm minimize}\limits\quad\sum_{t=0}^TG_t^*(\lambda_t) - \int_S\hat ud\lambda\quad\mathop{\rm over}\quad\lambda\in {\cal M}. \end{equation} Combining Theorem~\ref{thm:dual} with Remark~\ref{rem:cq} gives the following. \begin{theorem}\label{thm:sh} Assume that $\mathop{\rm dom} G_t^*\subset M_t$ for all $t=0,\ldots,T$, that $\hat u\le K\psi$ for some $K\in\mathbb{R}$ and that \eqref{sssh} remains feasible when $\hat u$ is increased by $\epsilon\psi$ for some $\epsilon>0$. Then the optimum in \eqref{dsssh} is attained and the optimum value coincides with the negative of the optimum value of \eqref{sssh}. \end{theorem} When $G_t(x_t)=\int x_td\mu_t$ for given $\mu_t\in M_t$, the feasibility condition is trivially satisfied and we recover Theorem~1.1 of \cite{bhp13}. In fact, Theorem~\ref{thm:sh} is slightly sharper than \cite[Theorem~1.1]{bhp13} since we obtain the absence of a duality gap for continuous functions $x_t$. We denote by $C^c$ the subset of convex functions in $C$. Allowing for unbounded continuous functions is essential here as the only bounded convex functions are the constant functions. The following corollary of Theorem~\ref{thm:sh} extends \cite[Theorem~8]{str65} on the existence of martingale measures with given marginals. \begin{corollary}\label{cor:exm} Let ${\cal L}ambda_t\subset M_t$ be weakly closed convex sets of probability measures. There exists $\lambda\in {\cal M}$ with $\lambda_t\in{\cal L}ambda_t$ if and only if \begin{equation*} \begin{alignedat}{2} \sum_{t=0}^T \sigma_{{\cal L}ambda_t}(w_t-w_{t+1}) \ge 0 \end{alignedat} \end{equation*} for all $w\in\prod_{t=0}^{T+1}C_t^c$ with $w_0\ge 0$ and $w_{T+1}=0$. \end{corollary} \begin{proof} Let $G_t=\sigma_{{\cal L}ambda_t}$ and $\hat u=0$ in Theorem~\ref{thm:sh}. Given an $x\in\prod_{t=0}^TC_t$, define $w_r\in C_r$ for $r=0,\ldots,T$ by \[ w_r(s_r):=\sum_{t=r}^Tx_t(s_r). \] If $w_0\ge 0$ and $w_r$ is convex for each $r$, then $x$ is feasible. Indeed, if for some $r$, \begin{equation}\label{constr}\tag{$H_r$} 0 \le \sum_{t=0}^{r-1}z_t(s^t)\cdot\Delta s_{t+1} + \sum_{t=0}^{r-1}x_t(s_t) + w_r(s_r)\quad\forall s\in S \end{equation} and we choose $-z_r(s^r)\in\partial w_{r+1}(s_r)$, then \[ 0\le z_r(s_r)\cdot\Delta s_{r+1} + w_{r+1}(s_{r+1})-w_{r+1}(s_r), \] which combined with \eqref{constr} gives $(H_{r+1})$. For $r=0$, \eqref{constr} simply means $w_0\ge 0$. On the other hand, since $\sigma_{{\cal L}ambda_t}$ are nondecreasing, it is optimal to choose $x_t$ so that $w_r$ are convex. Indeed, for $r=T$ this is clear as \eqref{constr} implies that the optimal $x_T$ is given as a pointwise supremum of affine functions of $s_T$ whose gradients are in ${\cal L}^\infty$. If $w_t$ is convex for $t>r$, then \eqref{constr} is necessary and sufficient for feasibility so it is optimal to choose $w_r$ as small as possible subject to \eqref{constr}, which again means that $w_r$ is convex. Moreover, since $z_t$ are bounded, $w_r\in C_r^c$. The optimum value thus equals that of \begin{equation*} \begin{alignedat}{2} &\mathop{\rm minimize}\limits\quad & & \sum_{t=0}^TG_t(w_t-w_{t+1}) \quad\mathop{\rm over}\quad w\in\prod_{t=0}^TC_t^c,\\ & \mathop{\rm subject\ to} & & \quad w_0\ge 0, \end{alignedat} \end{equation*} where $w_{T+1}:=0$. \end{proof} Note that if ${\cal L}ambda_t=\{\mu_t\}$ for each $t$, then \[ \sum_{t=0}^T \sigma_{{\cal L}ambda_t}(w_t-w_{t+1})=\sum_{t=0}^T\int_{\mathbb{R}^n} w_td(\mu_t-\mu_{t-1}), \] and Corollary~\ref{cor:exm} reduces to \cite[Theorem~8]{str65}, which says that there exists a martingale measure with marginals $\mu_t$ if and only if $\mu_t$ are in convex order. \section{Integral functionals}\label{sec:if} From now on, we assume extra structure on $G_t$ and $H$ that will \begin{enumerate} \item allow us to write the optimality conditions in a more explicit pointwise form, \item suggests a natural relaxation of problem \eqref{p} to a larger space of measurable functions where the infimum is more likely to be attained. \end{enumerate} More precisely, we assume that each $G_t$ is an integral functional of the form \[ G_t(x_t) = \int_{S_t}g_t(x_t(s_t),s_t)d\mu_t(s_t)+\delta_{C(D_t)}(x_t), \] where $\mu_t$ is a probability measure on $S_t$, $g_t$ is {\em a convex ${\cal B}(S_t)$-normal integrand\footnote{This means that the set-valued mapping $s_t\mapsto\{(x_t,\alpha)\in\mathbb{R}^d\times\mathbb{R}\,|\,g_t(x_t,s_t)\le\alpha\}$ is ${\cal B}(S_t)$-measurable and closed convex-valued; see e.g.\ \cite[Chapter~14]{rw98}.} on $\mathbb{R}^d$} , $D_t(s_t):=\mathop{\rm cl}\mathop{\rm dom} g_t(\cdot,s_t)$ and \[ C(D_t):=\{u\in C_t\,|\, u(s_t)\in D_t(s_t)\ \forall s_t\in S_t\} \] is the set of selections of $D_t$. Similarly, we assume that \[ H(u) = \int_Sh(u(s),s)d\mu(s)+\delta_{C(D)}(u) \] where $\mu$ is a probability measure on $S$, $h$ is a convex ${\cal B}(S)$-normal integrand on $\mathbb{R}^d$ and $D(s)=:\mathop{\rm cl}\mathop{\rm dom} h(\cdot,s)$. We define a function $h^\infty$ on $\mathbb{R}^d\times S$ by setting $h^\infty(\cdot,s)$ equal to the recession function of $h(\cdot,s)$. Recall that the recession function of a lsc convex function $k$ is given by \[ k^\infty(x) := \sup_{\alpha>0}\frac{k(\bar x+\alpha x) - k(\bar x)}{\alpha}, \] which is independent of the choice $\bar x\in\mathop{\rm dom} k$; see \cite[Theorem~8.5]{roc70a}. By \cite[Exercise~14.54(a)]{rw98}, $h^\infty$ is a convex ${\cal B}(S)$-normal integrand on $\mathbb{R}^d$. Recall that a set-valued mapping $D:S\rightrightarrows\mathbb{R}^d$ is {\em inner semicontinuous} (isc) if the inverse image under $D$ of every open set in $O\subset\mathbb{R}^d$ is open in $S$, the inverse image being defined by \[ D^{-1}(O):=\{s\in S\,|\,D(s)\cap O\ne\emptyset\}. \] The following result characterizes the conjugate and the subdifferential of $H$. Its proof can be found in the appendix. Given a $\lambda\in M$, we denote its absolutely continuous and singular parts, respectively, with respect to $\mu$ by $\lambda^a$ and $\lambda^s$. The {\em normal cone} of $D(s)$ at a point $u$ is defined as the subdifferential of $\delta_{D(s)}$ at $u$. More explicitly, it is the closed convex cone $N_{D(s)}(u)$ given by \[ N_{D(s)}(u)= \{y\in\mathbb{R}^d\,|\,(u'-u)\cdot y\le 0\quad\forall u'\in D(s)\} \] for $u\in D(s)$ and $N_{D(s)}(u)=\emptyset$ for $u\notin D(s)$. \begin{theorem}\label{thm:if} Assume that $D(s):=\mathop{\rm dom} h(\cdot,s)$ is isc, $\mathop{\rm cl}\mathop{\rm dom} H=C(D)$ and that $H$ is finite and continuous at some $u\in C$. Then $H$ is a proper convex lsc function and the restriction to $M$ of its conjugate is given by \[ H^*(\lambda) = \int_S h^*(d\lambda^a/d\mu)d\mu + \int_S(h^*)^\infty(d\lambda^s/d|\lambda^s|)d|\lambda^s|. \] Moreover, $\lambda\in\partial H(u)\cap M$ if and only if \begin{align*} d\lambda^a/d\mu &\in\partial h(u)\quad\mu\text{-a.e.}\\ d\lambda^s/d|\lambda^s| &\in N_D(u)\quad|\lambda^s|\text{-a.e.} \end{align*} If $\mathop{\rm dom} H =C$, then $\mathop{\rm dom} H^*\subseteq \{\lambda \in M\mid \lambda\ll \mu\}$. \end{theorem} Combining Theorem~\ref{thm:if} with Lemma~\ref{lem:radon} gives the following. \begin{corollary}\label{cor:if} Assume that $H$ and $G_t$ all satisfy the assumptions of Theorem~\ref{thm:if} and that $\mathop{\rm dom} H=C$ or $\mathop{\rm dom} G_t=C_t$ for all $t=0,\ldots,T$. Then $\mathop{\rm dom}\varphi^*\subset M$, $\lambda_t\ll\mu_t$ for all $\lambda\in\mathop{\rm dom}\varphi^*$ and the optimality conditions in Theorem~\ref{thm:dual} can be written as \begin{align*} d\lambda_t/d\mu_t &\in\partial g_t(x_t)\quad\mu_t\text{-a.e.}\quad t=0,\dots,T\\ d\lambda^a/d\mu &\in\partial h(-\sum_{t=0}^T x_t)\quad\mu\text{-a.e.}\\ d\lambda^s/d|\lambda^s| &\in N_D(-\sum_{t=0}^T x_t)\quad|\lambda^s|\text{-a.e.}. \end{align*} \end{corollary} The optimality conditions characterize the optimal primal-dual pairs of solutions but in many applications, the primal optimum is not attained in the space of continuous functions. This motivates a relaxation of the primal problem to a larger space where the optimal solutions are more likely to exist. \section{Relaxation of the primal problem}\label{sec:rel} In general, primal solutions do not exist in the space of continuous functions but we will establish the existence of solutions in a larger space of measurable functions when the functionals $G_t$ and $H$ are integral functionals as in Section~\ref{sec:if} above and $\mu_t$ is the $t$-th marginal of $\mu$. More precisely, we study the problem \begin{equation}\label{pr}\tag{$\text{PR}$} \mathop{\rm minimize}\limits\quad \int_S\left[\sum_{t=0}^T g_t(x_t) +h(-\sum_{t=0}^T x_t)\right]d\mu \quad\mathop{\rm over}\quad x\in{\cal P}hi, \end{equation} where \begin{align*} {\cal P}hi := \{x\in\prod_{t=0}^T{\cal L}^0_t\,|\, x_t\in D_t,\ -\sum_{t=0}^T x_t\in D\quad (\mu_t)_{t=0}^T\text{-a.e.}\}, \end{align*} where ${\cal L}^0_t:={\cal L}^0(S_t,{\cal B}(S_t);\mathbb{R}^d)$ and $(\mu_t)_{t=0}^T$-almost everywhere means that the property holds on a Cartesian product of sets of full measure on $S_t$. \begin{lemma}\label{lem:mut} If $A\in{\cal B}(S)$ occurs $(\mu_t)$-almost everywhere then $\mu(A)=1$. \end{lemma} \begin{proof} By definition, $A$ occurs $(\mu_t)$-almost everywhere if there exist $A_t\in S_t$ with $\mu_t(A_t)=1$ such that $\prod_{t=0}^T A_t\subset A$. Noting that $\prod_{t=0}^T A_t = \bigcap_{t=0}^T\pi_t^{-1}(A_t)$, where $\pi_t$ is the projection $s\mapsto s_t$, we get \begin{align*} \mu((\prod_{t=0}^T A_t)^c) &= \mu\left(\bigcup_{t=0}^T(\pi_t^{-1}(A_t))^c\right)\le \sum_{t=0}^T\mu\left(\pi_t^{-1}(A_t^c))\right) = \sum_{t=0}^T\mu_t(A_t^c), \end{align*} where $\mu_t(A_t^c)=0$. \end{proof} Sufficient conditions for attainment of the minimum in \eqref{pr} will be given in Theorem~\ref{thm:exist} below. Clearly, the optimum value of \eqref{pr} minorizes that of \eqref{p}. To guarantee that the optimum value of \eqref{pr} is still greater than $-\inf\eqref{d}$ we will assume the following. \begin{assumption}\label{ass1} Feasible $x$ in \eqref{pr} and $\lambda$ in \eqref{d} satisfy \[ \int_S\left[\sum_{t=0}^T g_t(x_t)\right]d\mu<\infty\quad\text{and}\quad \int_Sh(-\sum_{t=0}^T x_t)d\mu<\infty \] and \[ \int_S\left[\sum_{t=0}^Tx_t\cdot\frac{d\lambda_t}{d\mu_t}\right]d\mu = \int_S\left[\sum_{t=0}^T x_t\cdot\frac{d\lambda}{d|\lambda|}\right]d|\lambda|. \] \end{assumption} Sufficient conditions for Assumption~\ref{ass1} will be given at the end of this section. The following statement shows that \eqref{pr} can indeed be considered as a valid dual to \eqref{d}. \begin{theorem}\label{thm:oc} Assume that the normal integrands $g_t$ and $h$ satisfy the conditions of Corollary~\ref{cor:if} and that Assumption~\ref{ass1} holds. Then \[ \inf\eqref{d}\le\inf\eqref{pr}\le\inf\eqref{p} \] and feasible solutions $x$ in \eqref{pr} and $\lambda$ in \eqref{d} are optimal with $\inf\eqref{pr}=-\inf\eqref{d}$ if and only if \begin{align*} d\lambda_t/d\mu_t &\in\partial g_t(x_t)\quad\mu_t\text{-a.e.}\quad t=0,\dots,T\\ d\lambda^a/d\mu &\in\partial h(-\sum_{t=0}^T x_t)\quad\mu\text{-a.e.}\\ d\lambda^s/d|\lambda^s| &\in N_D(-\sum_{t=0}^T x_t)\quad|\lambda^s|\text{-a.e.} \end{align*} \end{theorem} \begin{proof} Let $x$ and $\lambda$ be feasible in \eqref{pr} and \eqref{d}, respectively. By Lemma~\ref{lem:mut}, the condition $\lambda_t\ll\mu_t$ implies \[ x_t\in D_t,\ -\sum_{t=0}^T x_t\in D\quad \lambda\text{-a.e.} \] Thus, by Fenchel's inequality, \begin{align} g_t(x_t)+g_t^*(d\lambda_t/d\mu_t) &\ge x_t\cdot(d\lambda_t/d\mu_t)\quad\mu_t\text{-a.e.}\label{1}\\ h(-\sum_{t=0}^Tx_t)+ h^*(d\lambda^a/d\mu) & \ge (-\sum_{t=0}^T x_t)\cdot (d\lambda^a/d\mu)\quad\mu\text{-a.e.}\label{2}\\ (h^*)^\infty(d\lambda^s/d|\lambda^s|) & \ge (-\sum_{t=0}^T x_t)\cdot (d\lambda^s/d|\lambda^s|)\quad|\lambda^s|\text{-a.e.}.\label{4} \end{align} Summing up, \eqref{1} gives \[ \sum_{t=0}^Tg_t(x_t) + \sum_{t=0}^Tg_t^*(d\lambda_t/d\mu_t) \ge \sum_{t=0}^Tx_t\cdot(d\lambda_t/d\mu_t)\quad(\mu_t)\text{-a.e.}, \] where, by Lemma~\ref{lem:mut}, the inequality holds $\mu$-almost everywhere as well. Integrating, we get \[ \int_S\left[\sum_{t=0}^Tg_t(x_t)\right]d\mu + \int_S\left[\sum_{t=0}^Tg_t^*(d\lambda_t/d\mu_t)\right]d\mu \ge \int_S\left[\sum_{t=0}^Tx_t\cdot(d\lambda_t/d\mu_t)\right]d\mu \] On the other hand, \eqref{2} and \eqref{4} give \[ \int_S h(-\sum_{t=0}^Tx_t)d\mu + H^*(\lambda) \ge \int_S(-\sum_{t=0}^T x_t)\cdot\frac{d\lambda}{d|\lambda|}d|\lambda|. \] By the first part of Assumption~\ref{ass1}, the left hand sides of the above two inequalities are finite so \begin{multline*} \int_S\left[\sum_{t=0}^Tg_t(x_t) + h(-\sum_{t=0}^Tx_t)\right]d\mu + \sum_{t=0}^TG_t^*(\lambda_t) + H^*(\lambda) \\ \ge \int_S\left[\sum_{t=0}^Tx_t\cdot(d\lambda_t/d\mu_t)\right]d\mu + \int_S(-\sum_{t=0}^T x_t)\cdot\frac{d\lambda}{d|\lambda|}d|\lambda|, \end{multline*} where the right hand side vanishes by the second part of Assumption~\ref{ass1}. Thus, $-\inf\eqref{d}\le\inf\eqref{pr}$. The above also shows that this holds as an equality if and only if \eqref{1}--\eqref{4} hold as equalities almost everywhere, which in turn is equivalent to the subdifferential conditions in the statement; see e.g.~\cite[Theorem~23.5]{roc70a}. \end{proof} The following lemma gives sufficient conditions for the first part of Assumption~\ref{ass1}. \begin{lemma}\label{lem:ass1} Assume that there exist $\bar v\in{\cal L}^\infty$, $\beta\in{\cal L}^1$ and $\delta>0$ such that $g^*_t(\bar v(s),s)\le\beta(s)$ and $h^*(\bar v(s)+v,s)\le\beta(s)$ for $v\in\mathbb{R}^d$ with $|v|\le\delta$. Then, the first part of Assumption~\ref{ass1} holds. If in addition, $\mu=\prod_{t=0}^T\mu_t$, then $x_t\in{\cal L}^1_t$ for every feasible $x$ in \eqref{pr}. \end{lemma} \begin{proof} By Fenchel's inequality, \begin{align*} \sum_{t=0}^Tg_t(x_t,s_t) + h(-\sum_{t=0}^Tx_t,s) &\ge \sum_{t=0}^T[\bar v(s)\cdot x_t-g_t^*(\bar v(s),s)]\\ &\quad - (\bar v(s)+v)\cdot\sum_{t=0}^Tx_t - h^*(\bar v(s)+v,s)\\ &\ge -v\cdot \sum_{t=0}^Tx_t - (T+2)\beta(s). \end{align*} Since this holds for any $v\in\mathbb{R}^d$ with $|v|\le\delta$, the sum $\sum_{t=0}^Tx_t$ is $\mu$-integrable if $x$ is feasible in \eqref{pr}. By Fenchel's inequality again, \[ \sum_{t=0}^Tg_t(x_t)\ge \bar v(s)\cdot\sum_{t=0}^Tx_t-(1+T)\beta(s)\quad\text{and}\quad h(-\sum_{t=0}^Tx_t) \ge \bar v(s)\cdot\sum_{t=0}^Tx_t-\beta(s) \] so the first part of Assumption~\ref{ass1} is satisfied. If $\mu=\prod_{t=0}^T \mu_t$, then by Fubini's theorem, $\mu$-integrability of $\sum_{t=0}^Tx_t$ implies that each $x_t$ is $\mu_t$-integrable. \end{proof} The second part of Assumption~\ref{ass1} clearly holds when feasible solutions $x$ of \eqref{pr} have $x_t$ bounded. More generally, it holds if each $x_t$ is $\lambda_t$-integrable. This holds under all the assumptions of Lemma~\ref{lem:ass1}, when $d=1$ and feasible $\lambda$ satisfy $\lambda_t=\mu_t$. This last condition holds in problems with given marginals; see Section~\ref{sec:mt} below. In some problems it is essential not to require the integrability of $x_t$; see Section~\ref{ssec:sch} below. The following lemma addresses such situations but, interestingly, the argument only works when $T=d=1$. The idea for the proof is taken from that of \cite[Corollary~3.15]{fg97}. \begin{lemma}\label{lem:perp} If $T=d=1$ and feasible $\lambda$ satisfies $\lambda\in M_+$ and $\lambda_t=\mu_t$, then the first part of Assumption~\ref{ass1} implies the second part. \end{lemma} \begin{proof} The proof of Theorem~\ref{thm:oc} shows that when $T=d=1$, $\mu,\lambda\in M_+$ and $\lambda_t=\mu_t$, feasible solutions $x$ and $\lambda$ satisfy \[ \int_S\left[\sum_{t=0}^Tx_t\right]^+d\mu < \infty \quad\text{and}\quad \int_S\left[\sum_{t=0}^T x_t\right]^-d\lambda < \infty \] Let $x_t^\nu$ be the pointwise projection of $x_t$ to the unit ball of radius $\nu$. We have \[ \int_S\left(\sum_{t=0}^Tx^\nu_t\right)d\mu=\sum_{t=0}^T\int_{S_t}x^\nu_td\mu_t=\sum_{t=0}^T\int_{S_t}x^\nu_td\lambda_t=\int_S\left(\sum_{t=0}^Tx^\nu_t\right)d\lambda \] and when $T=1$, \[ [\sum_{t=0}^Tx^\nu_t]^+\le[\sum_{t=0}^Tx_t]^+\quad\text{and}\quad[\sum_{t=0}^Tx^\nu_t]^-\le[\sum_{t=0}^Tx_t]^- \] so, by Fatou's lemma, \[ \int_S\left(\sum_{t=0}^Tx_t\right)d\mu \ge\limsup\int_S\left(\sum_{t=0}^Tx^\nu_t\right)d\mu\ge\liminf\int_S\left(\sum_{t=0}^Tx^\nu_t\right)d\lambda \ge\int_S\left(\sum_{t=0}^Tx_t\right)d\lambda. \] This implies \[ \int_S\left[\sum_{t=0}^Tx_t\right]^-d\mu < \infty \quad\text{and}\quad \int_S\left[\sum_{t=0}^T x_t\right]^+d\lambda < \infty, \] so the same argument gives the reverse inequality. \end{proof} \section{Existence of relaxed primal solutions}\label{sec:exist} We now turn to the existence of solutions in the relaxed problem \eqref{pr}. We start more abstractly by considering problems of the form \begin{equation}\label{pf}\tag{$\bar P$} \mathop{\rm minimize}\limits\quad \int_S f(x)d\mu \quad\mathop{\rm over}\quad x\in{\cal P}hi, \end{equation} where $f$ is a convex normal ${\cal B}(S)$-integrand on $\mathbb{R}^{(1+T)d}$ and \[ {\cal P}hi:= \left\{x\in\prod_{t=0}^T {\cal L}^0_t \;\middle|\; x \in \mathop{\rm cl}\mathop{\rm dom} f\quad (\mu_t)_{t=0}^T\text{-a.e.}\right\}. \] Problem \eqref{pr} fits \eqref{pf} with \begin{equation}\label{fgh} f(x,s) = \sum_{t=0}^T g_t(x_t,s_t)+h(-\sum_{t=0}^Tx_t,s) \end{equation} under the following. \begin{assumption}\label{ass2} The set \[ \{x\in\mathbb{R}^{(1+T)d}\,|\,x_t\in\mathop{\rm rint} D_t(s_t),\ -\sum_{t=0}^Tx_t\in\mathop{\rm rint} D(s)\} \] is nonempty for every $s\in S$. \end{assumption} Indeed, by \cite[Propositions~14.44(d) and 14.45(a)]{rw98}, $f$ defined by \eqref{fgh} is a normal integrand, and, by \cite[Theorem~9.3]{roc70a}, Assumption~\ref{ass2} implies \[ \mathop{\rm cl}\mathop{\rm dom} f(\cdot,s) = \{x\,|\,x_t\in D_t(s),\ -\sum_{t=0}^Tx_t\in D(s)\}. \] Assumption~\ref{ass2} is automatically satisfied if $D_t(s_t)=\mathbb{R}^d$ for all $t$ since $\mathop{\rm rint} D(s)\ne\emptyset$, by \cite[Theorem~6.2]{roc70a}. Except for the filtration property, problem \eqref{pf} is similar to the general stochastic optimization problem studied in \cite{pp12}. The following variant of \cite[Theorem~2]{pp12} gives sufficient conditions for the existence of solutions in \eqref{pf}. Its proof uses \cite[Corollary~8.6.1]{roc70a} which says that if $x\in\mathbb{R}^{(1+T)d}$ is such that $f^\infty(x,s)\le 0$ and $f^\infty(-x,s)\le 0$, then $f(\bar x+x,s)=f(\bar x,s)$ for every $\bar x\in\mathop{\rm dom} f(\cdot,s)$. The Borel sigma-algebra generated on $S$ by the projection of $s$ to $s_t$ will be denoted by ${\cal F}_t$. The statements below involve the set \[ N:=\{x\in\mathbb{R}^{(1+T)d}\mid \sum x_t=0\}. \] \begin{theorem}\label{thm:exist} Assume that $\prod_{t=0}^T\mu_t\ll\mu$, there exists $m\in {\cal L}^1(S,{\cal F},\mu)$ such that \[ f(x,s)\ge m(s)\quad\forall x\in\mathbb{R}^{(1+T)d}\ \text{$\mu$-a.e.}, \] and that for every $s\in S$ \begin{equation}\label{eq:exist} \{x\in\mathbb{R}^{(1+T)d}\mid f^\infty(x,s)\le 0\} = N. \end{equation} Then \eqref{pf} has a solution. \end{theorem} \begin{proof} Let $x$ be feasible in \eqref{pf}. Since the set $N:=\{x\in\mathbb{R}^{(1+T)d}\mid \sum x_t=0\}$ is linear, condition \eqref{eq:exist} implies, by \cite[Corollary~8.6.1]{roc70a}, that $f(\cdot,s)$ is constant in the directions of $N$. Let \[ N_t:=\{x_t\in\mathbb{R}^d\mid \exists z\in\mathbb{R}^{d\prod_{t=0}^T(1+T)}: (0,\dots,0,x_t,z_{t+1},\dots,z_T)\in N\}. \] The $s$-wise orthogonal projection $\tilde x_0$ of $x_0$ to $N_0$ has an extension $\tilde x\in {\cal L}^0({\cal F}_0;N)$ such that $x_0-\tilde x_0\in N_0^\perp$. Defining $\bar x^0:=x-\tilde x$, we have $f(\bar x^0)=f(x)$ everywhere in $S$ and $\bar x^0\in\mathop{\rm cl}\mathop{\rm dom} f$ $(\mu_t)_{t=0}^T$-almost everywhere. Repeating the argument for $t=1,\dots,T$, we arrive at an \[ \bar x^T\in \prod {\cal L}^0_t+ \sum_{t=0}^T {\cal L}^0({\cal F}_t;N) \] with $f(\bar x^T)=f(x)$ and $\bar x^T_t\in N_t^\perp$ everywhere in $S$ for all $t$ and $\bar x^T\in\mathop{\rm cl}\mathop{\rm dom} f$ $(\mu_t)_{t=0}^T$-almost everywhere. Let $(x^\nu)_{\nu=1}^\infty\subset{\cal P}hi$ such that $Ef(x^\nu)\le\inf\eqref{pf}+2^{-\nu}$. Since $f(x^\nu)$ is bounded in ${\cal L}^1$, Komlos' theorem gives the existence of a subsequence of convex combinations (still denoted by $(x^\nu)$) and $\beta\in {\cal L}^0$ such that $f(x^\nu)\le \beta$ almost surely. By the first paragraph, there exists $\bar x^\nu$ with $f(\bar x^\nu)=f(x^\nu)$ and $\bar x^\nu_t\in N_t^\perp$ everywhere in $S$ for all $t$, $\bar x^\nu\in\mathop{\rm cl}\mathop{\rm dom} f$ $(\mu_t)_{t=0}^T$-almost everywhere, and $\bar x^\nu\in \prod {\cal L}^0_t+ \sum_{t=0}^T {\cal L}^0({\cal F}_t;N)$. Thus $\bar x^\nu\in\{x\in {\cal L}^0\mid x\in\Gamma\ \text{a.e.}\}$, where \[ \Gamma(s):=\{x \in\mathbb{R}^{d(T+1)}\mid x_t\in N_t^\perp,\ f(x,s)\le \beta(s)\}. \] By Corollary~8.3.3 and Theorem~8.7 of \cite{roc70a}, the recession cone of $\Gamma(s)$ is given by $\Gamma^\infty(s)=\{x \mid x_t\in N_t^\perp, x\in N\}$. For $x\in \Gamma^\infty(s)$, we have $x_0\in N_t^\perp\cap N_t$, so $x_0=0$. Repeating the argument for $t=1,\dots ,T$, we get that $x=0$ and so $\Gamma^\infty=\{0\}$ $\mu$-almost everywhere. By \cite[Theorem~8.4]{roc70a}, the sequence $(\bar x^\nu)$ is thus almost surely bounded. By Komlos' theorem, there exists a subsequence of convex combinations and $\bar x \in {\cal L}^0$ such that $(\bar x^\nu)\rightarrow \bar x$ $\mu$-almost everywhere. By Fatou's lemma, $\int f(\bar x)d\mu \le \liminf \int f(\bar x^\nu)d\mu\le\inf\eqref{pf}$. Since $\prod_{t=0}^T\mu_t\ll\mu$, the sequence $(\bar x^\nu)$ converges $\prod_{t=0}^T\mu_t$-almost everywhere. By Lemma~\ref{lem:css} below, $\bar x^\nu=\sum_{t=0}^T(\tilde x^\nu)^t$ for some $\mu_t$-almost everywhere converging $(\tilde x^\nu)^t\in{\cal L}^0({\cal B}(S_t),\mu_t)$, so $\bar x\in\mathop{\rm cl}\mathop{\rm dom} f$ $(\mu_t)_{t=0}^T$-almost everywhere. Let $\bar x^t$ be the limit of $(\tilde x^\nu)^t$. For every $t'$, \[ \hat x^{t'}:=(-\bar x^{t'}_0,\dots,-\bar x^{t'}_{t'-1},\sum_{t\ne t'}\bar x^{t'}_t,-\bar x^{t'}_{t'+1},\dots,-\bar x^{t'}_T) \] belongs to ${\cal L}^0(N)$, so $x:=\bar x+ \sum_{t'=0}^T \hat x^{t'}$ satisfies $f(x)=f(\bar x)$. We also have that $x\in{\cal P}hi$ so $x$ is optimal. \end{proof} \begin{remark} The conclusion of Theorem~\ref{thm:exist} still holds if $f$ is coercive in the sense that $\{x\,|\,f^\infty(x,s)\le 0\}=\{0\}$. In fact, the proof then simplifies considerably. A more general condition that covers both the condition of Theorem~\ref{thm:exist} as well as the coercivity condition is that there is a subset $J$ of the indices $\{0,\ldots,T\}$ such that \[ \{x\,|\,f^\infty(x,s)\le 0\}=\{x\,|\,\sum_{t\in J}x_t=0,\ x_t=0\quad \forall t\notin J\} \] for all $s\in S$. \end{remark} \begin{remark} When $f$ is given by \eqref{fgh}, we have \[ f^\infty(x,s) = \sum_{t=0}^Tg_t^\infty(x_t,s_t)+h^\infty(-\sum_{t=0}^Tx_t,s), \] by \cite[Theorems~9.3 and 9.5]{roc70a} as soon as $f$ is proper. \end{remark} The following lemma was used in the proof of Theorem~\ref{thm:exist}. For $T=1$, more general results can be found e.g.~in \cite{fg97}; see also \cite{rt97}. \begin{lemma}\label{lem:css} If $(x^\nu)\subset\sum_{t=0}^T {\cal L}^0({\cal F}_t,\prod_{t=0}^T\mu_t)$ converges $\prod_{t=0}^T\mu_t$-almost everywhere, then there exists $\mu_t$-almost everywhere converging sequences $((\tilde x^\nu)^t)\subset{\cal L}^0({\cal B}(S_t),\mu_t)$ such that $\sum (\tilde x^\nu)^t=x^\nu$. \end{lemma} \begin{proof} The statement is clearly valid for $T=0$. We proceed by induction on $T$. Let $(\sum_{t=0}^Tx^\nu_t)$ be a converging sequence in $\sum_{t=0}^T {\cal L}^0({\cal F}_t,\prod_{t=0}^T\mu_t)$ and let $A\subseteq S$ be the set where the convergence holds. Let $A_T(s_T):=\{s^{T-1}\,|\,(s^{T-1},s_T)\in A\}$. Since $(\prod_{t=0}^T \mu_t)(A)=1$, we have $\mu_T(\bar A_T)=1$, where $\bar A_T:=\{s_T\,|\,\prod_{t=0}^{T-1} \mu_t(A_T(s_T))=1\}$. Let $\bar s_T\in\bar A_T$, \begin{align*} (\tilde x^\nu)^T(s_T)&=(x^\nu)^T(s_T)-(x^\nu)^T(\bar s_T),\\ (\tilde x^\nu)^{T-1}(s_{T-1}) &=(x^\nu)^{T-1}(s_{T-1})+(x^\nu)^T(\bar s_T),\\ (\tilde x^\nu)^t&=x^\nu_t\qquad t=0,\ldots T-2, \end{align*} so that $\sum_t(x^\nu)^t = \sum_t(\tilde x^\nu)^t$. We have that $\sum_{t=0}^{T-1}(\tilde x^\nu)^t$ converges $\prod_{t=0}^{T-1}\mu_t$-almost everywhere and, by the induction hypothesis, there exist $\mu_t$-almost everywhere converging sequences $((\hat x^\nu)^t)\subset{\cal L}^0({\cal B}(S_t),\mu_t)$ such that $\sum_{t=0}^{T-1}(\hat x^\nu)^t=\sum_{t=0}^{T-1}(\tilde x^\nu)^t$. We also get that \[ \sum_{t=0}^T(x^\nu)^t -\sum_{t=0}^{T-1}(\tilde x^\nu)^t =(\tilde x^\nu)^T \] converges $\mu_T$-almost everywhere. This completes the induction argument. \end{proof} \section{Applications to problems with fixed marginals}\label{sec:mt} This section illustrates the results of the previous sections in the case of fixed marginals. More precisely, will assume throughout that $d=1$ (the measures are scalar-valued), and that $G_t$ and $H$ are given in terms of integral functionals with $g_t(x_t,s_t)=x_t$ for each $t$ and $h(\cdot,s)$ nondecreasing. In this case, $g_t^*(\cdot,s_t)=\delta_{\{1\}}$ and $\mathop{\rm dom} H^*\subset C^*_+$ so the assumptions of Lemma~\ref{lem:radon} are satisfied and problem \eqref{d} can be written as \begin{equation}\label{mt} \begin{alignedat}{2} &\mathop{\rm minimize}\limits\quad & & H^*(\lambda) \quad\mathop{\rm over}\ \lambda\in M\\ &\mathop{\rm subject\ to}\quad & &\lambda_t=\mu_t\quad t=0,\ldots,T, \end{alignedat} \end{equation} while the relaxed primal \eqref{pr} problem becomes \begin{equation}\label{mtpr} \mathop{\rm minimize}\limits\quad \int_S\left[\sum_{t=0}^Tx_t + h(-\sum_{t=0}^T x_t)\right]d\mu \quad\mathop{\rm over}\quad x\in{\cal P}hi, \end{equation} where \[ {\cal P}hi = \{x\in\prod_{t=0}^T{\cal L}^0_t\,|\, -\sum_{t=0}^T x_t\in D\quad (\mu_t)_{t=0}^T\text{-a.e.}\}. \] Combining Theorems~\ref{thm:dual}, \ref{thm:oc} and \ref{thm:exist} gives the following. \begin{theorem}\label{thm:mt} Assume that $h$ satisfies the assumptions of Theorem~\ref{thm:if}, that $h^*(v,\cdot)$ is $\mu$-integrable for $v\in\mathbb{R}$ in a neighborhood of $1$ and that either $\mu=\prod_{t=0}^T\mu_t$ or $T=1$ and $\prod_{t=0}^T \mu_t\ll\mu$. Then the optima in \eqref{mt} and \eqref{mtpr} are attained, there is no duality gap and feasible solutions $x$ and $\lambda$ are optimal if and only if \begin{align*} d\lambda^a/d\mu &\in\partial h(-\sum_{t=0}^T x_t)\quad\mu\text{-a.e.},\\ d\lambda^s/d|\lambda^s| &\in N_D(-\sum_{t=0}^T x_t)\quad|\lambda^s|\text{-a.e.} \end{align*} If, in addition, $\mu=\prod_{t=0}^T\mu_t$, then $x_t\in{\cal L}^1_t$ for each feasible $x$ in \eqref{mtpr}. \end{theorem} \begin{proof} Since $\mathop{\rm dom} G_t=C_t$ for all $t$ and $H$ is nondecreasing, Theorem~\ref{thm:dual} implies that the optimum in \eqref{mt} is attained and that there is no duality gap. To prove the attainment in \eqref{mtpr}, we apply Theorem~\ref{thm:exist} with \[ f(x,s)=\sum_{t=0}^Tx_t + h(-\sum_{t=0}^Tx_t,s). \] Assumption~\ref{ass2} holds trivially since $\mathop{\rm dom} g_t=\mathbb{R}^d$ for each $t$, so \eqref{mtpr} coincides with~\eqref{pf}. By the Fenchel inequality, \begin{equation}\label{fenchel} f(x,s) \ge (1-v) \sum_{t=0}^Tx_t - h^*(v,s), \end{equation} so the integrability condition implies that the lower bound in Theorem~\ref{thm:exist} holds with $m(s)= h^*(1,s)$. This also gives \[ f^\infty(x,s)\ge (1-v)\sum_{t=0}^Tx_t \] for $v$ in a neighborhood of $1$, so $f^\infty(x,s)\ge\epsilon|\sum_{t=0}^Tx_t|$ for some $\epsilon>0$. It follows that $f$ satisfies \eqref{eq:exist}. Thus, by Theorem~\ref{thm:exist}, the optimum in \eqref{mtpr} is attained. By Lemma~\ref{lem:ass1}, the integrability condition implies that the first part of Assumption~\ref{ass1} holds. If $T=1$, Lemma~\ref{lem:perp} implies that the second part of Assumption~\ref{ass1} is satisfied as well. If, on the other hand, $\mu=\prod_{t=0}^T\mu_t$, then, by Lemma~\ref{lem:ass1}, $x_t\in{\cal L}^1_t$ and the second part of Assumption~\ref{ass1} is again holds. The rest now follows from Theorem~\ref{thm:oc} by observing that, when $g_t(x_t,s_t)=x_t$, the condition $d\lambda_t/d\mu_t\in\partial g_t(x_t)$ simply means that $\lambda_t=\mu_t$. \end{proof} \subsection{Monge--Kantorovich problem}\label{ssec:linear} Let $c$ be a measurable function on $S$ and let $h(u,s)=\delta_{(-\infty,c(s)]}(u)$. In this case, \[ h^*(v,s)= \begin{cases} c(s)v & \text{if $v\ge 0$},\\ +\infty & \text{otherwise} \end{cases} \] and problem \eqref{mt} can be written as \begin{equation}\label{mk} \begin{alignedat}{2} &\mathop{\rm minimize}\limits\quad & & \int_S cd\lambda \quad\mathop{\rm over}\ \lambda\in M_+\\ &\mathop{\rm subject\ to}\quad & &\lambda_t=\mu_t\quad t=0,\ldots,T. \end{alignedat} \end{equation} When $T=1$, we recover the classical Monge--Kantorovich mass transportation problem; see e.g.\ \cite{acbbv3}, \cite{vil9}, \cite{leo6}, \cite{rr98} and their references. On the other hand, if $S_t$ coincide for all $t$, problem \eqref{mk} can be interpreted as the problem of finding a stochastic process $X=(X_t)_{t=0}^T$ such that $X_t$ has distribution $\mu_t$ and the expectation of $c(X)$ is minimized. It should be noted that \eqref{mk} depends on $\mu$ only through its marginals $\mu_t$. Thus, we choose \[ \mu=\prod_{t=0}^T\mu_t. \] Problem~\eqref{mtpr} becomes \begin{equation}\label{mkpr} \begin{alignedat}{2} &\mathop{\rm minimize}\limits\quad & & \int_S\sum_{t=0}^T x_td\mu\quad\mathop{\rm over}\quad x\in{\cal P}hi, \end{alignedat} \end{equation} where \[ {\cal P}hi = \{x\in\prod_{t=0}^T{\cal L}^0_t\,|\, -\sum_{t=0}^T x_t\le c\quad (\mu_t)_{t=0}^T\text{-a.e.}\}. \] Indeed, by Lemma~\ref{lem:mut}, $x\in{\cal P}hi$ implies $-\sum_{t=0}^Tx_t\in D$ $\mu$-almost everywhere so \[ \int_S\left[\sum_{t=0}^Tx_t + h(-\sum_{t=0}^T x_t)\right]d\mu = \int_S\sum_{t=0}^T x_td\mu. \] \begin{theorem} Assume that $c$ is lower semicontinuous and $\mu$-integrable with $c\ge K\psi$ for some $K\in\mathbb{R}$. Then the optima in \eqref{mk} and \eqref{mkpr} are attained, there is no duality gap and feasible solutions $\lambda$ and $x$ are optimal if and only if \[ \int_S\left(c+\sum_{t=0}^Tx_t\right)d\lambda=0. \] Moreover, if $x$ is feasible in \eqref{mkpr}, then $x_t\in{\cal L}^1_t$ so the objective of \eqref{mkpr} can be written as \[ \int_S\sum_{t=0}^T x_td\mu = \sum_{t=0}^T\int_{S_t} x_td\mu_t. \] \end{theorem} \begin{proof} We now have $D(s)=\{u\in\mathbb{R}\,|\,u\le c(s)\}$ which is inner semicontinuous if and only if $c$ is lower semicontinuous; see \cite[Example~1.2*]{mic56}. The lower bound on $c$ implies that $h$ satisfies the assumptions of Theorem~\ref{thm:if}. Since $c$ is $\mu$-integrable, all the conditions of Theorem~\ref{thm:mt} are satisfied. The form of the optimality conditions follows simply by observing that now, $\partial h=N_D$. \end{proof} Instead of the lower bound $c\ge K\psi$, \cite[Theorem~5.10]{vil9} assumes the existence of $c_t\in{\cal L}^1_t$ such that $c\ge\sum_tc_t$. However, if there is no $K\in\mathbb{R}$ such that $c\ge K\psi$, then problem \eqref{p} is infeasible so the duality argument fails and, in particular, the first conclusion of \cite[Theorem~5.10]{vil9} does not hold. The function $c$ is integrable, in particular, if there exist $c_t\in{\cal L}^1_t$ such that $c\le\sum_tc_t$. This latter condition is assumed e.g.\ in \cite[Theorem~5.10]{vil9} in establishing the existence of solutions. \begin{remark} Feasibility of an $x$ means that the inequality constraint holds on a product set $A^x=A^x_0\times\cdots\times A^x_T$, where $\mu_t(A^x_t)=1$. Thus, every dual feasible solution $\lambda$ satisfies \[ \lambda((A^x)^c)\le\sum_{t=0}^T\lambda_t((A^x_t)^c) = \sum_{t=0}^T\mu_t((A^x_t)^c)=0. \] The optimality conditions thus imply that the optimal dual solutions $\lambda$ are supported by the sets \[ \Gamma_x:=\{s\in A^x\,|\,c(s)+\sum_{t=0}^Tx_t(s_t)\le 0\}, \] where $x$ runs through optimal primal solutions. The sets $\Gamma_x$ are {\em $c$-monotone} in the sense that \[ \sum_{i=1}^n c(s^i_0,\dots,s^i_T)\le \sum_{i=1}^n c(s^{P_0(i)}_0,\dots,s^{P_T(i)}_T) \] for any $(s^i_0,\dots,s^i_T)\in \Gamma_x$, $i=1,\dots n$ and any permutations $P_t$ of the indices $i$. Indeed, \[ \sum_{i=1}^n c(s^i_0,\dots,s^i_T) \le -\sum_{i=1}^n\sum_{t=0}^T x_t(s^i_t)= -\sum_{i=1}^n\sum_{t=0}^T x_t(s^{P_t(i)}_t) \le \sum_{i=1}^n c(s^{P_0(i)}_0,\dots,s^{P_T(i)}_T), \] where the last inequality follows from the feasibility of $x$ on $A^x$. This is a multivariate generalization of the $c$-cyclical monotonicity property studied e.g.\ in \cite{rr98} and \cite{vil9}. When $T=1$, it is known that a feasible $\lambda$ is optimal if it is concentrated on a $c$-monotone set. It would be natural to conjecture that this holds also for $T>1$. \end{remark} \subsection{Capacity constraints}\label{ssec:nonlinear} Let $c$ and $\phi$ be nonnegative measurable functions on $S$ and let \[ h(u,s)= \phi(s)[u-c(s)]^+. \] We get \[ h^*(v,s)= c(s)v + \delta_{[0,\phi(s)]}(v) \] so problem \eqref{mt} can be written as \begin{equation}\label{cc} \begin{alignedat}{2} &\mathop{\rm minimize}\limits\quad & & \int_Scd\lambda \quad\mathop{\rm over}\ \lambda\in M_+\\ &\mathop{\rm subject\ to}\quad & &\lambda\ll\mu,\quad\frac{d\lambda}{d\mu}\le\phi,\quad\lambda_t=\mu_t\quad t=0,\ldots,T. \end{alignedat} \end{equation} This models {\em capacity constraints} on the transport plan requiring $\lambda\le\phi\mu$, where the inequality is taken with respect to the natural order on $M$. Constrained variations of the Monge--Kantorovich problem are considered also in \cite[Chapter~7]{rr98}. What is called ``capacity constraints'' in \cite[Section~7.3]{rr98}, however, is different from the constraints of \eqref{cc}. In the case of finite $S$, problem~\eqref{cc} reduces to a network flow problem where the flow on each arc of the network is bounded from above by the value of $\phi$; see \cite{roc84} for a comprehensive study of linear and nonlinear network flow problems. Problem \eqref{mtpr} becomes \begin{equation}\label{ccpr} \begin{alignedat}{2} &\mathop{\rm minimize}\limits\quad & &\int_S\left[\sum_{t=0}^T x_t + \phi[\sum_{t=0}^Tx_t+c]^-\right]d\mu\quad\mathop{\rm over}\quad x\in{\cal P}hi, \end{alignedat} \end{equation} where \[ {\cal P}hi = \prod_{t=0}^T{\cal L}^0_t. \] Theorem~\ref{thm:mt} gives the following. \begin{theorem}\label{thm:cc} Assume that $\mu=\prod_{t=0}^T\mu_t$ and that $c$ and $\phi$ are $\mu$-integrable with $c\ge K\psi$ and $\phi\ge v$ for some $K\in\mathbb{R}$ and $v>1$. Then the optima in \eqref{cc} and \eqref{ccpr} are attained, there is no duality gap and feasible solutions $\lambda$ and $x$ are optimal if and only if \begin{align*} d\lambda/d\mu &= 0\quad\text{if}\quad -\sum_{t=0}^T x_t < c, \\ d\lambda/d\mu &\in [0,\phi]\quad \text{if}\quad -\sum_{t=0}^T x_t = c, \\ d\lambda/d\mu &=\phi\quad \text{if}\quad -\sum_{t=0}^T x_t > c. \end{align*} Moreover, if $x$ is feasible in \eqref{ccpr}, then $x_t\in{\cal L}^1_t$. \end{theorem} In the case of finite $S$, the optimality conditions in Theorem~\ref{thm:cc} correspond to the classical complementary slackness conditions in constrained network optimization problems; see \cite{roc84}. \subsection{Schr\"odinger problem}\label{ssec:sch} We now return to the Schr\"odinger problem \begin{equation*}\label{sp} \begin{alignedat}{2} &\mathop{\rm minimize}\limits\quad & & \int_S\ln(d\lambda/dR)d\lambda \quad\mathop{\rm over}\ \lambda\in M_+\\ &\mathop{\rm subject\ to}\quad & &\lambda\ll R,\quad\lambda_t=\mu_t\quad t=0,\ldots,T \end{alignedat} \end{equation*} studied in Section~\ref{ssec:sch0}. We will derive optimality conditions and a dual problem under the assumption that there exists a feasible $\lambda$ equivalent to $R$. Denoting the feasible point by $\mu$ and $\phi:=d\mu/dR$, the problem can then be written as \begin{equation*} \begin{alignedat}{2} &\mathop{\rm minimize}\limits\quad & & \int_S\frac{d\lambda}{d\mu}\ln(\phi\frac{d\lambda}{d\mu})d\mu \quad\mathop{\rm over}\ \lambda\in M_+\\ &\mathop{\rm subject\ to}\quad & &\lambda\ll \mu,\quad\lambda_t=\mu_t\quad t=0,\ldots,T. \end{alignedat} \end{equation*} This fits the format of \eqref{mt} with $h(u,s)=\frac{e^u-1}{\phi(s)}$. Indeed, we have \[ h^*(v,s)= \begin{cases} v\ln(\phi(s)v) - v +1/\phi(s) & \text{if $v >0$},\\ 0 &\text{if $v=0$},\\ +\infty &\text{otherwise}, \end{cases} \] so that $(h^*)^\infty(\cdot,s)=\delta_{\{0\}}$ for all $s\in S$ and \[ H^*(\lambda)= \begin{cases} \int_S\frac{d\lambda}{d\mu}\ln(\phi\frac{d\lambda}{d\mu})d\mu & \text{if $\lambda\ll\mu$},\\ +\infty & \text{otherwise}. \end{cases} \] The relaxed primal problem becomes \begin{equation*}\label{sppr} \begin{alignedat}{2} &\mathop{\rm minimize}\limits\quad & & \int_S\left[\sum_{t=0}^Tx_t + \frac{\exp(-\sum_{t=0}^Tx_t)-1}{\phi}\right]d\mu\quad\mathop{\rm over}\quad x\in\prod_{t=0}^T{\cal L}^0_t. \end{alignedat} \end{equation*} Note that even when restricted to $x\in\prod C_t$, the objective is different from that in Theorem~\ref{thm:spd}. Theorem~\ref{thm:mt} gives the following. \begin{theorem}\label{thm:sp} Assume that $T=1$, $\prod_{t=0}^T\mu_t\ll R$ and that \eqref{sp} admits a feasible solution equivalent to $R$. Then the optimum in \eqref{sp} is attained and the optimal solutions $\lambda$ are characterized by the existence of an $x\in\prod_{t=0}^T{\cal L}^0_t$ such that \begin{align*} d\lambda/dR &=\exp(-\sum_{t=0}^Tx_t)\quad R\text{-a.e.}. \end{align*} If $\prod_{t=0}^T\mu_t$ is feasible and equivalent to $R$, then the same conclusion holds for any $T$ and, moreover, $x_t\in{\cal L}^1_t$ for feasible $x$ in \eqref{sppr}. \end{theorem} \begin{proof} Since $\mu\approx R$, the condition $\prod_{t=0}^T\mu_t\ll R$ means that $\prod_{t=0}^T\mu_t\ll\mu$. The feasibility of $\mu$ in \eqref{sp} (and the definition of $\phi$) implies that the integrability condition in Theorem~\ref{thm:mt} is satisfied. It is clear that $h$ satisfies the other conditions as well. The optimality conditions mean that $\lambda\approx\mu$ and \[ \frac{d\lambda}{d\mu}=\frac{\exp(-\sum_{t=0}^Tx_t)}{\phi}\quad\mu\text{-a.e.} \] which reduces to the one in the statement since $\mu\approx R$ and $\phi=d\mu/dR$. \end{proof} The necessity and sufficiency of the optimality condition in Theorem~\ref{thm:sp} was established for feasible solutions equivalent to $R$ in \cite[Theorem~3.43]{fg97} under the assumption that $R\ll\prod_{t=0}^T\mu_t$. Theorem~\ref{thm:sp} above gives the equivalence when $\prod_{t=0}^T\mu_t\ll R$ without assuming apriori the equivalence with $R$. The last statement of Theorem~\ref{thm:sp} seems new. An alternative condition for the integrability of $x_t$ is given in \cite[Proposition~1]{rt93}. Example~1 of \cite{rt93} shows that the integrability may fail without additional conditions. \section{Appendix} In this appendix we prove Theorem~\ref{thm:if}. The proof follows the arguments in \cite{per17} which in turn are based on those in \cite{roc71} and \cite{per14}. We reproduce the proofs here since we allow for unbounded scaling functions $\psi_t$ and we do not assume that $S$ is locally compact. Let $h$ be a convex normal ${\cal B}(S)$-integrand on $\mathbb{R}^d$, $\mu$ a nonnegative Radon measure on $S$ and let \[ I_h(u)=\int h(u)d\mu. \] \begin{theorem}\label{thm:app1} If $I_h$ is finite and continuous at some point on $C$, then $I_h$ is lsc and $I_h^*$ is proper and given by \[ I_h^*(\lambda)=\min_{\lambda'\in M}\{I_{h^*}(d\lambda'/d\mu)+\sigma_{\mathop{\rm dom} I_h}(\lambda-\lambda')\mid \lambda'\ll \mu\}. \] \end{theorem} \begin{proof} Defining the convex function $\bar I_h$ to $L^\infty$ by \[ \bar I_h(u)=\int h(u)d\mu, \] we have $I_h=\bar I_h\mathop{\text{\scriptsize $\circ$}} A$, where $A:C\rightarrow L^\infty(\mu)$ is the natural embedding. We equip $L^\infty$ with the essential supremum-norm. By \cite[Theorem~2]{roc71}, the continuity of $I_h$ at a point $\bar u$ implies that $\bar I_h$ is proper and continuous at $A \bar u$. Thus, by \cite[Theorem~19]{roc74}, \begin{align*} I_h^*(\lambda)&=\inf_{\theta\in (L^\infty)^*}\{\bar I^*_h(\theta) \mid A^*\theta =\lambda\}. \end{align*} By \cite[Theorem~1]{roc71}, the conjugate of $\bar I_h$ on $(L^\infty)^*$ can be expressed in terms of the Yosida-Hewitt decomposition $\theta=\theta^a+\theta^s$ as \[ \bar I_h^*(\theta)=I_{h^*}(d\theta^a/d\mu)+\sigma_{\mathop{\rm dom} \bar I_h}(\theta^s). \] We thus get \begin{align}\label{eq:roc71a} I_h^*(\lambda)&=\inf_{\theta\in (L^\infty)^*}\{I_{h^*}(d\theta^a/d\mu)+\sigma_{\mathop{\rm dom} \bar I_h}(\theta^s) \mid A^*(\theta^a+\theta^s) =\lambda\}. \end{align} It suffices to show that \begin{align}\label{eq:roc71} I_h^*(\lambda) &= \inf_{\tilde\theta\in (L^\infty)^*,\theta^a\ll\mu} \{I_{h^*}(d\theta^a/d\mu) + \sigma_{\mathop{\rm dom} \bar I_h}(\tilde\theta)\mid A^*(\theta^a+\tilde\theta)= \lambda\}. \end{align} Indeed, the formula in the statement follows by writing this as \begin{align*} I_h^*(\lambda)&=\inf_{\theta^a\ll\mu}\left\{I_{h^*}(d\theta^a/d\mu) + \inf_{\tilde \theta\in (L^\infty)^*}\{\sigma_{\mathop{\rm dom} \bar I_h}(\tilde\theta)\mid A^*\tilde \theta=\lambda-A^*\theta^a \}\right\}, \end{align*} and using the expression \[ \sigma_{\mathop{\rm dom} I_h}(\lambda-A^*\theta^a)=\inf_{\tilde \theta\in (L^\infty)^*}\{\sigma_{\mathop{\rm dom} \bar I_h}(\tilde \theta) \mid A^*\tilde\theta =\lambda-A^*\theta^a\}, \] which is obtained by applying \cite[Theorem~19]{roc74} to the function $\delta_{\mathop{\rm dom} I_h}=\delta_{\mathop{\rm dom} \bar I_h}\mathop{\text{\scriptsize $\circ$}} A$. To prove \eqref{eq:roc71}, let $\tilde\theta\in (L^\infty)^*$ such that $A^*(\theta^a+\tilde\theta)= \lambda$. For any $u\in C$, \begin{align*} \langle u,\lambda \rangle -I_h(u) &= \langle Au,\theta^a\rangle - \bar I_h(Au) + \langle u,A^*\tilde\theta\rangle, \end{align*} so taking supremum over $u\in\mathop{\rm dom} I_h$ gives \[ I_h^*(\lambda)\le I_{h^*}(d\theta^a/d\mu)+ \sigma_{\mathop{\rm dom}\bar I_h}(\tilde\theta). \] Minimizing over $\tilde\theta\in L^\infty(S)^*$ and $\theta^a\ll\mu$ such that $A^*(\theta^a+\tilde\theta)= \lambda$ gives \begin{align*} I_h^*(\lambda) &\le \inf_{\tilde\theta\in (L^\infty)^*,\theta^a\ll\mu} \{I_{h^*}(d\theta^a/d\mu) + \sigma_{\mathop{\rm dom}\bar I_h}(\tilde\theta)\mid A^*(\theta^a+\tilde\theta)= \lambda\}. \end{align*} The reverse inequality follows by noting that if we restrict $\tilde\theta$ to be purely singular with respect to $\mu$, we obtain the right hand side of \eqref{eq:roc71a}. \end{proof} \begin{theorem}\label{thm:app2} If $D$ is isc and $C(D)\ne\emptyset$, then for each $\lambda\in M$, \[ \sigma_{C_b(D)}(\lambda)=\int (h^*)^\infty(d\lambda/d|\lambda|)d|\lambda|. \] \end{theorem} \begin{proof} By Fenchel's inequality, \begin{align}\label{eq:fen1} \langle y,\lambda\rangle \le \int \sigma_{D}(d\lambda/d|\lambda|)d|\lambda| \end{align} for every $y\in C_b(D)$, so it suffices to show \[ \sup_{y\in C_b(D)} \langle y,\lambda\rangle\ge \int \sigma_{S}(d\lambda/d|\lambda|)d|\lambda|. \] We have, by \cite[Theorem 14.60]{rw98}, \begin{align*} \sup_{w\in L^\infty(\lambda;D)} \int wd\lambda =\int \sigma_{D}(d\lambda/d|\lambda|)d|\lambda|. \end{align*} Let $\tilde y\in C_b(D)$, \[ \alpha< \int \sigma_{D}(d\lambda/d|\lambda|)d|\lambda| \] and $w\in L^\infty(\lambda;S)$ be such that $\int wd\lambda>\alpha$. By Lusin's theorem \cite[Theorem 7.1.13]{bog7}, there is an open $\tilde O\subset S$ such that $\int_{\tilde O} (|\tilde y|+ |w_t|)d|\lambda|<\epsilon/2$, $\tilde O^C$ is compact and $w$ is continuous relative to $\tilde O^C$. The mapping \[ \Gamma(s)= \begin{cases} w(s)\quad&\text{if } s\in \tilde O^C\\ D(s)\quad&\text{if } s\in \tilde O \end{cases} \] is isc convex closed nonempty-valued so that, by \cite[Theorem 3.1''\!']{mic56}, there is a continuous $\hat y$ on $S$ with $\hat y=w$ on $\tilde O^C$ and $\hat y\in D$ everywhere. Since $\hat y$ is continuous and bounded on $\tilde O^C$ which is compact, there is an open $\hat O$ such that $\hat y$ is bounded on $\hat O$. Since $\hat O^C$ is a countable intersection of open sets, we may choose $\hat O$ in a way that $\int_{\hat O\backslash \tilde O^C} |\hat y_t|d|\lambda|<\epsilon/2$. Since $\hat O$ and $\tilde O$ form an open cover of $T$ and since $T$ is normal, there is, by \cite[Theorem~36.1]{mun0}, a continuous partition of unity $(\hat \alpha,\tilde\alpha)$ subordinate to $(\hat O,\tilde O)$. Defining $y:=\hat \alpha \hat y+\tilde \alpha \tilde y$, we have $y\in C_b(D)$ and \begin{align*} \int yd\lambda &\ge \int_{\tilde O^C} wd\lambda-\int_{\hat O\backslash \tilde O^C} \hat\alpha |\hat y|d|\lambda|-\int_{\tilde O}\tilde\alpha |\tilde y|d|\lambda| \ge\int \alpha-\epsilon, \end{align*} which finishes the proof of necessity, since $\alpha<\int \sigma_{S}(d\lambda/d|\lambda|)d|\lambda|$ was arbitrary. \end{proof} \begin{theorem}\label{thm:ifb} Assume that $D(s):=\mathop{\rm dom} h(\cdot,s)$ is isc, $\mathop{\rm cl}\mathop{\rm dom} H=C_b(D)$ and that $H$ is finite and continuous at some $u\in C_b$. Then $H$ is a proper convex lsc function and the restriction to $M$ of its conjugate is given by \[ H^*(\lambda) = \int_S h^*(d\lambda^a/d\mu)d\mu + \int_S(h^*)^\infty(d\lambda^s/d|\lambda^s|)d|\lambda^s|, \] where $\lambda^s$ is the singular part of $\lambda\in M$ in its Lebesgue decomposition with respect to $\mu$. If $\mathop{\rm dom} H=C_b$, then $\mathop{\rm dom} H^*$ is contained in the set of Borel-measures absolutely continuous w.r.t.\ $\mu$. \end{theorem} \begin{proof} Since $\mathop{\rm int}\mathop{\rm dom} I_h\cap C_b(D)\ne\emptyset$, \cite[Theorem 20]{roc74} gives \begin{align*} H^*(\lambda)=(I_h+\delta_{C_b(D)})^*(\lambda) &=\min_{\lambda''}\{ I_h^*(\lambda-\lambda'')+\sigma_{C_b(D)}(\lambda'')\} \end{align*} Thus, by Theorem~\ref{thm:app1}, \begin{align*} &H^*(\lambda)\\ &=\min_{\lambda''}\{\min_{\lambda'}\{I_{h^*}(d\lambda'/d\mu)+\sigma_{\mathop{\rm dom} I_h}(\lambda-\lambda'-\lambda'')\mid \lambda'\ll \mu\}+\sigma_{C_b(D)}(\lambda'')\}\\ &=\min_{\lambda'}\left\{I_{h^*}(d\lambda'/d\mu)+\min_{\lambda''}\{\sigma_{\mathop{\rm dom} I_h}(\lambda-\lambda'-\lambda'')+\sigma_{C_b(D)}(\lambda'')\}\;\middle|\; \lambda'\ll \mu\right\}. \end{align*} Since $\mathop{\rm int}\mathop{\rm dom} I_h\cap C_b(D)\ne\emptyset$, \cite[Theorem 20]{roc74} again gives \begin{align*} H^*(\lambda) &=\min_{\lambda''}\{ \sigma_{\mathop{\rm dom} I_h}(\lambda-\lambda'')+\sigma_{C(D)}(\lambda'')\}. \end{align*} Since, by assumption, $C_b(D)=\mathop{\rm cl} \mathop{\rm dom} H =\mathop{\rm cl}(\mathop{\rm dom} I_h\cap C_b(D))$, the left side equals $\sigma_{C_b(D)}(\lambda)$. Thus \begin{align}\label{eq:prf} H^*(\lambda)&=\min_{\lambda'}\{I_{h^*}(d\lambda'/d\mu)+\sigma_{C_b(D)}(\lambda-\lambda')\mid \lambda'\ll \mu\}. \end{align} For $\lambda\in M$, Theorem~\ref{thm:app2} now gives \begin{align*} H^*(\lambda)&=\min_{\lambda'}\{\int h^*(d\lambda'/d\mu)d\mu+\int (h^*)^\infty(d(\lambda-\lambda')/d\mu)d\mu\}\\ &\quad+\int (h^*)^\infty(d(\lambda^s)/d|\lambda^s|)d|\lambda^s|, \end{align*} By \cite[Corollary 8.5.1]{roc70a}, the last minimum is attained at $d\lambda'/d\mu=d\lambda/d\mu$, so the last expression equals $J_{h^*}(\lambda)$. If $\mathop{\rm dom} H= C_b$, \eqref{eq:prf} implies the claim. \end{proof} \begin{proof}[Proof of Theorem~\ref{thm:if}] Defining $\tilde h(u,s)= h(\psi(s) u,s)$, $\tilde D(s)=\mathop{\rm cl} \mathop{\rm dom} \tilde h(s)$ and \[ \tilde H(u) := I_{\tilde h}(u)+\delta_{C_b(\tilde D)}, \] on $C_b$, we get \begin{align*} H^*(\lambda) &=\sup_{u\in C}\{\langle u,\lambda\rangle -H(u)\}\\ &=\sup_{u\in C_b}\{ \langle u,\psi\lambda\rangle - H(\psi u)\}\\ &=\tilde H^*(\psi\lambda). \end{align*} Clearly, $\tilde D(s)=\{u \mid \psi(s)u \in \mathop{\rm dom} h(s)\}$. By \cite[Proposition 2.2]{mic56}, $D$ is isc if and only if $\tilde D$ is isc. It is thus clear that $H$ satisfies the assumptions of Theorem~\ref{thm:if} if and only if $\tilde H$ satisfies those of Theorem~\ref{thm:ifb}. Since $\tilde h^*(y,s)= h^*(y/\psi(s),s)$, an application of Theorem~\ref{thm:ifb} to $\tilde H^*(\psi\lambda)$ gives the expression for $H^*$ in the statement. As to the subdifferential formulas, we have $\lambda\in\partial H(u)\cap M$ if and only if $H(u)+J_{h^*}(\lambda)=\langle u,\lambda\rangle$. For any $u\in\mathop{\rm dom} H$ and $\lambda\in M$, we have the Fenchel's inequalities \begin{align*} h(u)+h^*(d\lambda^a/d\mu)&\ge u\cdot(d\lambda^a/d\mu)\quad\mu\text{-a.e.,}\\ (h^*)^\infty(d\lambda^s/d|\lambda^s|) &\ge y\cdot (d\lambda^s/d|\lambda^s|)\quad|\lambda^s|\text{-a.e.}, \end{align*} which hold as equalities if and only if $H(u)+J_{h^*}(\lambda)=\langle u,\lambda\rangle$. These equalities are equivalent to the given pointwise subdifferential conditions. Since $\mathop{\rm dom} H^*=\{\lambda \in C^*\mid \psi\lambda \in \mathop{\rm dom} \tilde H^*\}$, the last claim follows from that of Theorem~\ref{thm:ifb}. \end{proof} \end{document}
\begin{document} \frenchspacing \allowdisplaybreaks[4] \title{\bfseries Lévy Langevin Monte Carlo} \author{David Oechsler\thanks{Technische Universit\"at Dresden, Institut f\"ur Mathematische Stochastik, Helmholtzstra{\ss}e 10, 01069 Dresden, Germany. \texttt{[email protected]}}~\thanks{Center of Scalable Data Analytics and Artificial Intelligence (ScDS.AI) Dresden/Leipzig, Germany}\; } \date{\today} \maketitle \begin{abstract}\noindent Analogue to the well-known Langevin Monte Carlo method, in this article we provide a method to sample from a target distribution \(\boldsymbol{\pi}\) by simulating a solution of a stochastic differential equation. Hereby, the stochastic differential equation is driven by a general Lévy process which - other than in the case of Langevin Monte Carlo - allows for non-smooth targets. Our method will be fully explored in the particular setting of target distributions supported on the half-line \((0,\infty)\) and a compound Poisson driving noise. Several illustrative examples conclude the article. \noindent MSC 2020: \emph{primary} 60G51; 60H10; 60G10 . \emph{secondary} 65C05. \noindent \emph{Keywords:} Langevin Monte Carlo, Lévy processes, stochastic differential equations, invariant distributions, limiting distributions. \end{abstract} \section{Introduction} Monte Carlo methods based on stationary Markov processes appear frequently in fields such as statistics, computer simulation and machine learning, and they have a variety of applications, for example in physics and biology, cf. \cite{bardenet2017markov,brooks2011handbook,kendall2005markov,FLI,welling2011bayesian}. These methods have in common that in order to sample from a target distribution \(\boldsymbol{\pi}\) one considers sample paths of certain Markov processes to approximate \(\boldsymbol{\pi}\).\\ \emph{Langevin Monte Carlo} (\textsc{lmc}) is one of these methods and it originates from statistical physics. It applies to absolutely continuous target distributions \(\boldsymbol{\pi}(\diff x)=\pi(x)\diff x\) with smooth density functions \(\pi:\mathds{R}^d\to\mathds{R}_+\), and its associated process \((X_t)_{t\geqslant0}\) is the so-called \emph{Langevin diffusion}, that is a strong solution of the stochastic differential equation (\textsc{sde}) \begin{align}\label{eq_LMC} \diff X_t = -\frac{\nabla \pi(X_t)}{\pi(X_t)}\diff t + \sqrt{2}\diff B_t, \end{align} where \((B_t)_{t\geqslant0}\) is a standard Brownian motion on \(\mathds{R}^d\), and \(\nabla\pi\) denotes the gradient of \(\pi\). For \textsc{LMC} to produce samples from \(\boldsymbol{\pi}\) it is required that \((X_t)_{t\geqslant0}\) is a unique strong solution for \eqref{eq_LMC} and \(\boldsymbol{\pi}\) is an invariant distribution for \((X_t)_{t\geqslant0}\), that is \begin{align}\label{eq_invmeas} \int_\mathds{R}\mathds{P}^x(X_t\in B)\boldsymbol{\pi}(\diff x)=\boldsymbol{\pi}(B)\quad \text{for all}\quad t\geqslant0, B\in\mathcal{B}(\mathds{R}). \end{align} However, for this to be the case it is only natural that assumptions must be made regarding \(\boldsymbol{\pi}\), e.g. that \(\nabla \pi\) exists in a suitable sense. Moreover, to sample from \(\boldsymbol{\pi}\) using \((X_t)_{t\geqslant0}\) it is essential that \((X_t)_{t\geqslant0}\) converges to \(\boldsymbol{\pi}\) in a suitable sense from any starting point \(x\in\operatorname{supp}\boldsymbol{\pi}\). As solutions \((X_t)_{t\geqslant0}\) of \eqref{eq_LMC} are almost surely continuous, \(\operatorname{supp}\boldsymbol{\pi}\) is necessarily connected for convergence to be even possible. For more on \textsc{lmc} see \cite{brooks2011handbook} or \cite{roberts1996exponential}. \\ Due to the constraints of \textsc{lmc} it is reasonable to ask whether one could construct similar methods by replacing the Brownian motion with a more general process. In this article we consider Lévy processes as driving noises. In particular, we are interested in the following question:\\ \noindent \textit{Given a distribution \(\boldsymbol{\pi}\) and a Lévy process \((L_t)_{t\geq0}\), can we choose a drift coefficient \(\phi\) such that we can sample from \(\boldsymbol{\pi}\) by simulation of a solution \((X_t)_{t\geqslant0}\) of \begin{align}\label{eq_sde} \diff X_t &= \phi(X_{t})\diff t + \diff L_t? \end{align}} \noindent There are various cases in the literature for which \textsc{sde}s of the form \eqref{eq_sde} are considered with Lévy processes as driving noises. In \cite{FLII} and \cite{FLI} a \emph{fractional Langevin Monte Carlo} (f\textsc{lmc}) method is introduced for which \((L_t)_{t\geq0}\) is an \(\alpha\)-stable process, and in \cite{eliazar}, several examples are produced for the case when the driving noise is a pure jump Lévy process. However, both of these studies are rather focused on practical aspects, disregarding some of the theoretical foundations. This will be discussed further in Remark \ref{rem_source}.\\ To thoroughly answer the above question it is essential to distinguish between the notions \emph{infinitesimally invariant distribution}, \emph{invariant distribution}, and \emph{limiting distribution}. These, and some more introductory notions and well-known facts can be found in Section \ref{sec_prel} of this article. After that, in Section \ref{sec_inva}, we investigate under which conditions a drift coefficient \(\phi\) exists such that \(\boldsymbol{\pi}\) is infinitesimally invariant for \((X_t)_{t\geqslant0}\). Clearly, there are cases for which this is not the case, think of discrete distributions, or distributions on a half-space while jumps can occur in all directions. Hence, a general answer can only exist under certain assumptions on the regularity of \(\boldsymbol{\pi}\) and the compatibility of \(\boldsymbol{\pi}\) and \((L_t)_{t\geq0}\). \\ In the same section, we then find a particular set of conditions under which \(\boldsymbol{\pi}\) is invariant and limiting for \((X_t)_{t\geqslant0}\). Various examples subsequently illustrate our results. Afterwards, in Section \ref{sec_prf}, we present the more technical aspects of the proofs, followed in Section \ref{sec_out} by a list of possible extensions with comments on the difficulties they might pose.\\ Methodologically we rely on the results in \cite{behmeoechsler} on invariant measures of Lévy-type processes, the Foster-Lyapunov methods originating in a series of articles by S.P. Meyn and R.L. Tweedie (\cite{MTI}-\cite{MTIII}), and standard techniques from the theory of ordinary differential equations. \section{Preliminaries}\label{sec_prel} Throughout this paper we denote by \(L^{p}(\mathds{R})\) and \(W^{k,p}(\mathds{R})\) the classic Lebesgue and Sobolev spaces, and by \(\mathcal{C}_\infty(\mathds{R})\) the space of continuous functions vanishing at infinity, i.e. functions \(f\in\mathcal{C}(\mathds{R})\) such that for all \(\mathds{V}\mathrm{ar}\,epsilon>0\) there exists a compact set \(K\subset\mathds{R}\) such that for all \(x\in\mathds{R}\setminus K\) it holds \(|f(x)|<\mathds{V}\mathrm{ar}\,epsilon\). \\ Let \(U\subseteq\mathds{R}\) be an open set. As usual, \(\mathcal{C}_c^\infty(U)\) denotes the space of test functions, i.e. smooth functions \(f\) with compact support \(\operatorname{supp} f\subset U\). Linear functionals \(T:\mathcal{C}_c^\infty(U)\to\mathds{R}\) that are continuous w.r.t. uniform convergence on compact subsets of all derivatives are called \emph{Schwartz distributions}. The \emph{distributional derivative} of a Schwartz distribution \(T\) is defined by \(T':\mathcal{C}_c^\infty(U)\to\mathds{R}, f\mapsto T(f')\). If there exists \(N\in \mathds{N}_0\) such that for all compact sets \(K\subset U\) there exists \(c>0\) such that \begin{align*} |T(f)|\leqslant c\max\{|f^{(n)}|: n\leqslant N\} \end{align*} for all \(f\in\mathcal{C}_c^\infty(U)\) with \(\operatorname{supp} f\subset K\), then the smallest such \(N\) is called the \emph{order} of \(T\). If no such \(N\) exists, the order of \(T\) is set to \(\infty\). \paragraph{Markov processes and generators} Let \((X_t)_{t\geqslant0}\) be a Markov process in \(\mathds{R}\) on the probability space \((\Omega,\mathcal{F},\mathds{P})\). We denote \begin{align*} \mathds{P}^x(~\cdot~):=\mathds{P}(~\cdot~|X_0=x)\quad\text{and}\quad \mathds{E}^x[~\cdot~]:=\mathds{E}[~\cdot~|X_0=x] \end{align*} for all \(x\in\mathds{R}\). The \emph{pointwise generator} of \((X_t)_{t\geqslant0}\) is the pair \((\mathcal{A},\mathscr{D}(\mathcal{A}))\) defined by \begin{align*} \mathcal{A} f(x):=\lim_{t\downarrow0} \frac{\mathds{E}^x f(X_t) - f(x)}{t},\quad x\in\mathds{R},\quad f\in\mathscr{D}(\mathcal{A}) \end{align*} where \begin{align*} \mathscr{D}(\mathcal{A}):=\left\{f\in\mathcal{C}_\infty(\mathds{R}): \lim_{t\downarrow0} \frac{\mathds{E}^x f(X_t) - f(x)}{t} \text{ exists for all } x\in\mathds{R} \right\}. \end{align*} Further, denote by \(\mathcal{D}(\mathcal{G})\) the set of all functions \(f:\mathds{R}\to\mathds{R}\) for which there exists a measurable function \(g:\mathds{R}\to\mathds{R}\) such that for all \(x\in\mathds{R}\) and \(t>0\) it holds \begin{align*} \mathds{E}^xf(X_t) = f(x) + \mathds{E}^x\left[\int_0^t g(X_s)\diff s\right] \end{align*} and \begin{align*} \int_0^t\mathds{E}^x\left[ |g(X_s)|\right]\diff s <\infty. \end{align*} Setting \(\mathcal{G} f=g\) the pair \((\mathcal{G},\mathcal{D}(\mathcal{G}))\) is called the \emph{extended generator} of \((X_t)_{t\geqslant0}\). \paragraph{Lévy processes} A (one-dimensional) Lévy process \((L_t)_{t\geq0}\) is a Markov process with stationary and independent increments with characteristic exponent \(\mathds{V}\mathrm{ar}\,phi(\beta):=\ln\mathds{E}[\mathrm{e}^{i\beta L_1}]\) given by \begin{align*} \mathds{V}\mathrm{ar}\,phi(\beta)=i\gamma\beta - \frac12\sigma^2\beta^2 + \int_{\mathds{R}}\left(\mathrm{e}^{i\beta z}-1\right)\mu(\diff z) + \int_{\mathds{R}^d}\left(\mathrm{e}^{i\beta z}-1-i\beta z \mathds{1}_{\{|z|<1\}}\right)\rho(\diff z). \end{align*} Here, \(\gamma\in\mathds{R}\) is the \emph{ location parameter}, \(\sigma^2\geqslant0\) is the \emph{Gaussian parameter}, and \(\mu\) and \(\rho\) are two measures on \(\mathds{R}\) such that \(\mu\{0\}=\nu\{0\}=0\) and \(\int_{\mathds{R}}(1\wedge|z|)\mu(\diff z)<\infty\) and \(\int_{\mathds{R}}(|z|\wedge|z|^2)\rho(\diff z)<\infty\), respectively. The measure \(\Pi=\mu+\rho\) is called the \emph{jump measure}. The triplet \((\gamma,\sigma^2,\Pi) =(\gamma,\sigma^2,\mu+\rho) \) is called the \emph{characteristic triplet} of \((L_t)_{t\geq0}\). Note that the decomposition of \(\Pi\) into \(\mu\) and \(\rho\) is not unique.\\ Further, denote by \begin{align*} \overline\mu(x)&=\begin{cases} \mu(x,\infty), &x>0,\\ \mu(-\infty,x), &x<0,\\ 0, & x=0, \end{cases} \end{align*} the \emph{integrated tail} of \(\mu\), and by \(\overline\mu_s(x):=\operatorname{sgn}(x)\overline\mu(x)\) the \emph{signed integrated tail} of \(\mu\). We similarly define the \emph{double integrated tail} of \(\rho\) by \begin{align*} \overline{\overline\rho}(x)&=\begin{cases} \int_{(x,\infty)}\rho(z,\infty)\diff z, &x>0,\\ \int_{(-\infty,x)}\rho(-\infty,z)\diff z, &x<0,\\ 0, & x=0. \end{cases} \end{align*} A Lévy process \((L_t)_{t\geq0}\) with \(\sigma^2=0,~\rho=0\) and \(|\mu|<\infty\) is called a \emph{compound Poisson process}. If, additionally, \(\operatorname{supp}\mu\subset\mathds{R}_+\) then \((L_t)_{t\geq0}\) is called a \emph{spectrally positive compound Poisson process}. \paragraph{Invariant measures and Harris recurrence} Let \((X_t)_{t\geqslant0}\) be a Markov process on \(\mathds{R}\) with open state space \(\mathcal O\subseteq\mathds{R}\) and with pointwise generator \((\mathcal{A},\mathscr{D}(\mathcal{A}))\). As mentioned in the introduction, a measure \(\boldsymbol{\pi}\) with \(\operatorname{supp}\boldsymbol{\pi}\subset\overline{\mathcal O}\) is called \emph{invariant} for \((X_t)_{t\geqslant0}\) if \eqref{eq_invmeas} holds. It is called \emph{infinitesimally invariant} for \((X_t)_{t\geqslant0}\) if \begin{align*} \int_\mathds{R} \mathcal{A} f(x)\boldsymbol{\pi}(\diff x) = 0\quad \text{for all } f\in\mathcal{C}_c^\infty(\mathcal O), \end{align*} and \(\boldsymbol{\pi}\) is called \emph{limiting} for \((X_t)_{t\geqslant0}\) if \(\boldsymbol{\pi}\) is a \emph{distribution}, i.e. \(|\boldsymbol{\pi}|=1\), and \begin{align*} \lim_{t\to\infty}\|\mathds{P}^x(X_t\in \cdot)-\boldsymbol{\pi}\|_{\mathrm{TV}}=0, \quad \text{for all } x\in\mathcal O, \end{align*} where \(\|\cdot\|_{\mathrm{TV}}\) denotes the total variation norm.\\ The process \((X_t)_{t\geqslant0}\) is called \emph{Harris recurrent} if there exists a non-trivial \(\sigma\)-finite measure \(a\) on \(\mathds{R}\) such that for all \(B\in\mathcal{B}(\mathds{R})\) with \(a(B)>0\) it holds \(\mathds{P}^x(\tau_B<\infty)=1\) where \(\tau_B:=\inf\{t\geqslant0: X_t\in B\}\). It is well-known (cf. \cite{MTIII}) that for any Harris recurrent Markov process \((X_t)_{t\geqslant0}\) an invariant measure \(\boldsymbol{\pi}\) exists which is unique up to multiplication with a constant. If \(\boldsymbol{\pi}\) is finite it can be normalized to be a distribution. In this case \((X_t)_{t\geqslant0}\) is called \emph{positive Harris recurrent}. \section{Lévy Langevin Monte Carlo}\label{sec_inva} Let \(\boldsymbol{\pi}\) be a probability distribution on \(\mathds{R}\), and let \((L_t)_{t\geq0}\) be a Lévy process on \(\mathds{R}\). Further, let \((X_t)_{t\geqslant0}\) be a solution of \begin{align*} \diff X_t = \phi(X_t)\diff t + \diff L_t. \end{align*} Can we choose \(\phi:\mathds{R}\to\mathds{R}\) in such a way that \(\boldsymbol{\pi}\) is limiting for \((X_t)_{t\geqslant0}\)? In the spirit of f\textsc{lmc} we call the sampling of \((X_t)_{t\geqslant0}\) in order to sample from \(\boldsymbol{\pi}\) \emph{Lévy Langevin Monte Carlo} (\textsc{llmc}).\\ As mentioned in the introduction a general answer to this question cannot be given without certain conditions on \(\boldsymbol{\pi}\) and \((L_t)_{t\geq0}\). Throughout, we assume that \(\boldsymbol{\pi}(\diff x)=\pi(x)\diff x\) is absolutely continuous, and that the Lévy process \((L_t)_{t\geq0}\) with characteristic triplet \((\gamma,\sigma^2,\Pi)\) is not purely deterministic, i.e. \(\sigma^2>0\) or \(\Pi\neq0\). Recall that \(\Pi=\mu+\rho\) with \(\mu\) and \(\rho\) as in Section \ref{sec_prel}.\\ Define \(\mathcal{E}:=\{x\in\mathds{R}: \pi(x)>0\}\) and assume either \(\mathcal{E}=\mathds{R}\) or some open half-line - without loss of generality we choose in this case \(\mathcal{E}=(0,\infty)\). This choice of \(\mathcal{E}\) is not a real restriction, as explained further in Section \ref{sec_out}. Additionally we assume the following: \begin{enumerate} \item[\textbf{(a1)}] If \(\mathcal{E}=(0,\infty)\), then \((L_t)_{t\geq0}\) is a spectrally positive compound Poisson process. \item[\textbf{(a2)}] If \(\mathcal{E}=(0,\infty)\), then there exists \(c>0\) such that \(\int_0^x\pi(z)\diff z\leqslant cx\pi(x)\) for all \(x\ll1\). \item[\textbf{(a3)}] If \((L_t)_{t\geq0}\) has paths of unbounded variation, then \(\pi\inW^{1,1}_\mathrm{loc}(0,\infty)\). \end{enumerate} \subsection{Infinitesimally invariant distributions} \begin{theorem}\label{thm_infinv} Let \((L_t)_{t\geq0}\) be a Lévy process in \(\mathds{R}\) with characteristic triplet \((\gamma,\sigma^2,\Pi)\) with \(\Pi=\mu+\rho\) as in Section \ref{sec_prel}. Let \(\boldsymbol{\pi}\) be a distribution on \(\mathds{R}\) such that \textbf{(a1)} - \textbf{(a3)} are fulfilled. Consider the \textsc{sde} \eqref{eq_sde} with \begin{align}\label{eq_drift_coeff} \phi(x):=\mathds{1}_{\mathcal{E}}(x)\left(\frac{\frac{1}2\sigma^2 \pi'(x)- \overline\mu_s *\pi(x)+ (\overline{\overline\rho}*\pi)'(x)}{\pi(x)}-\gamma\right). \end{align} Then \(\boldsymbol{\pi}\) is an infinitesimally invariant distribution of any solution \((X_t)_{t\geqslant0}\) of \eqref{eq_sde}. \end{theorem} The proof of Theorem \ref{thm_infinv} will be clearer if we point out the primary thoughts behind Assumptions \textbf{(a1)} - \textbf{(a3)} first. \begin{remark} Assumptions \textbf{(a1)} and \textbf{(a2)} make sure that the process \((X_t)_{t\geqslant0}\) stays in the open half-line \((0,\infty)\) if \(\mathcal{E}=(0,\infty)\). The former does so by allowing only upward jumps while the latter guarantees that \((X_t)_{t\geqslant0}\) cannot drift onto \(0\), as we will see in the proof below.\\ Clearly, Assumption \textbf{(a3)} becomes only relevant if \(\mathcal{E}=\mathds{R}\), and it ensures that our choice of the drift coefficient in \eqref{eq_drift_coeff} is well-defined. Note that it can be weakened if \(\sigma^2=0\) as \(\pi\inW^{1,1}_\mathrm{loc}(0,\infty)\) is sufficient but not necessary for \((\overline{\overline\rho}*\pi)'\) to be well-defined. However, since we discuss in this article mostly processes with paths of bounded variation we choose to omit various special cases for the sake of clarity. \end{remark} \begin{proof}[Proof of Theorem \ref{thm_infinv}] Denote by \(\mathcal O\subset\mathds{R}\) the state space of \((X_t)_{t\geqslant0}\). In order to show that \(\mathcal{O}=\mathcal{E}\) we prove that \(X_t\in\mathcal{E}\) for all \(t\geqslant0\) if \(X_0\in\mathcal{E}\). As this is trivially true for \(\mathcal{E}=\mathds{R}\) we show it only for \(\mathcal{E}=(0,\infty)\). \\ In this case \((L_t)_{t\geq0}\) is a spectrally positive compound Poisson process, by \textbf{(a1)}. Thus, \((X_t)_{t\geqslant0}\) cannot exit \(\mathcal{E}\) via jumps. We are going to show that \((X_t)_{t\geqslant0}\) cannot exit via drift either. If no jump of \((L_t)_{t\geq0}\) interrupts the path of \((X_t)_{t\geqslant0}\) then \(t\mapsto X_t\) is monotone decreasing and follows the autonomous differential equation \begin{align*} \begin{cases} \diff X_t=\phi(X_t)\diff t,\\ X_0=x>0, \end{cases} \end{align*} with \(\phi(x)=-\mathds{1}_{(0,\infty)}(x)\frac{\overline\mu_s*\pi(x)}{\pi(x)}\). Separation of variables yields that the time \(T\) it takes for \((X_t)_{t\geqslant0}\) to drift from \(x\) to \(x'\in[0,x]\) is given by \begin{align*} T= \int_{x'}^x \frac{\pi(z)}{\overline\mu_s*\pi(z)}\diff z. \end{align*} By \textbf{(a1)} and \textbf{(a2)}, \(\overline\mu_s*\pi(x)\leqslant cx\pi(x)\) for some constant \(c>0\), and thereby \begin{align}\label{eq_subor} \int_0^x \frac{\pi(z)}{\overline\mu_s*\pi(z)} \diff z =\infty \end{align} for all \(x>0\). Hence, \((X_t)_{t\geqslant0}\) cannot drift onto \(0\) in finite time. Therefore, \(\mathcal{O}=\mathcal{E}\) in this case as well.\\ We now return to the general case. A straight-forward application of Itô's lemma and the Lévy-Itô decomposition, similar to \cite[Thm. 2.50]{schnurr2009symbol}, yields that for the pointwise generator \((\mathcal{A},\mathscr{D}(\mathcal{A}))\) of \((X_t)_{t\geqslant0}\) it holds \(\mathcal{C}_c^\infty(\mathcal O)\subset\mathscr{D}(\mathcal{A})\), and \begin{align*} \mathcal{A} f(x)&= (\phi(x)+\gamma)f'(x) + \frac12 \sigma^2 f''(x)\\ &\qquad+ \int_{\mathds{R}} (f(x+z)-f(x))\mu(\diff z)\\ &\qquad + \int_{\mathds{R}} (f(x+z)-f(x)- \nabla f(x))\rho(\diff z) \end{align*} for all \(f\in\mathcal{C}_c^\infty(\mathcal O)\). By \cite[Thm. 4.2]{behmeoechsler} a measure \(\boldsymbol{\eta}\) is infinitesimally invariant for \((X_t)_{t\geqslant0}\) if \begin{align}\label{eq_iile} -((\phi+\gamma) \boldsymbol{\eta})' + \frac12\sigma^2 \boldsymbol{\eta}''- (\overline\mu_s*\boldsymbol{\eta})' + (\overline{\overline\rho}*\boldsymbol{\eta})'' = 0 \end{align} in the distributional sense w.r.t. \(\mathcal{C}_c^\infty(\mathcal O)\). Because of \(\mathcal{O}=\mathcal{E}\), simply inserting \(\boldsymbol{\pi}\) into \eqref{eq_iile} proves the claim. \end{proof} \subsection{Invariant distributions} In general, proving that an infinitesimally invariant distribution is an invariant distribution is hard. The best-case scenario is given when \((X_t)_{t\geqslant0}\) is a Feller process and the test functions constitute a core of the pointwise generator of \((X_t)_{t\geqslant0}\). In this case infinitesimally invariant and invariant are equivalent notions, cf. \cite{liggett}.\\ Although there exist easily verifiable conditions on the drift coefficient \(\phi\) and \((L_t)_{t\geq0}\) such that a solution of \eqref{eq_sde} is a Feller process (cf. \cite{kuhn}) these have some drawbacks. The fact that typically, \(\phi\) is required to be continuous and fulfills a linear growth condition, i.e. \(|\phi(x)|\leqslant C(1+|x|)\) for some \(C>0\), excludes many interesting cases. Moreover, even if \((X_t)_{t\geqslant0}\) is a Feller process, we are still left with the question whether the test functions form a core. The task of finding conditions for this to be the case is an open problem (cf. \cite{bottcher}) which has not yet been answered to the best of our knowledge. \begin{remark}\label{rem_source} Both the article \cite{eliazar} on Lévy Langevin dynamics and the original article \cite{FLI} on f\textsc{lmc} do not provide arguments as to why the considered target measures are invariant for the respective processes.\\ The chosen appraoch in both articles revolves around finding a stationary solution for Kolmogorov's forward equation of the underlying \textsc{sde} \eqref{eq_sde}. This equation, which is also known as Fokker-Planck equation, is inherently connected to invariant distributions as any weak stationary solution of it can be associated to an infinitesimally invariant measure of a solution \((X_t)_{t\geqslant0}\) of \eqref{eq_sde}. In the aforementioned articles it is suggested that the transition densities \(p(t,x,y)\) of \((X_t)_{t\geqslant0}\) defined via \begin{align*} \mathds{P}^x(X_t\in B)=\int_B p(t,x,y)\diff y \end{align*} solve the associated Kolmogorov forward equation. For many processes this is true, e.g. Feller diffusions (cf. \cite{kallenberg}) just to name one. However, for \textsc{sde}s \eqref{eq_sde} with general Lévy noises we were not able to find a reference with a rigorous proof of this claim. If it was indeed true, then any invariant measure \(\boldsymbol{\pi}\) of \((X_t)_{t\geqslant0}\) would necessarily be a stationary solution of Kolmogorov's forward equation. \\ Moreover, both articles are missing an argument as to why the stationary solution of Kolmogorov's forward equation is unique. Although in \cite{FLI} another article (cf. \cite{schertzer2001fractional}) is cited on this topic, in said reference uniqueness is merely argued heuristically but not proved. \end{remark} To ensure methodological rigor, we present in this section a different way of showing that \(\boldsymbol{\pi}\) is an invariant distribution of \((X_t)_{t\geqslant0}\), and as such even unique. We consider this approach in the special case of \(\mathcal{E}=(0,\infty)\) and \((L_t)_{t\geq0}\) being a compound Poisson process but similar results can be achieved in other frameworks by adjusting the individual steps in a suitable way. This will also be discussed in Section \ref{sec_exam} below.\\ \noindent Let us now briefly describe our setting. Denote by \begin{align*} \mathcal P:=\{(x_i)_{i\in\mathds{Z}}\subset (0,\infty)^\mathds{Z}:~& x_i<x_{i+1} \text{ for all }i\in\mathds{Z}, \text{ and}\\ &0 \text{ is the unique accumulation point of } (x_i)_{i\in\mathds{Z}}\} \end{align*} a set of partitions of the open interval \((0,\infty)\). We call a function \(f\in L^1_\mathrm{loc}(0,\infty)\) \emph{piecewise weakly differentiable} if there exists a partition \((x_i)_{i\in\mathds{Z}}\in \mathcal P\) such that \(f|_{(x_i,x_i+1)}\in W^{1,1}(x_i,x_{i+1})\) for all \(i\in\mathds{Z}\). Analogously, we call \(f\) \emph{piecewise Lipschitz continuous} if there exists a partition \((x_i)_{i\in\mathds{Z}}\in\mathcal P\) such that \(f|_{(x_i,x_i+1)}\) is Lipschitz continuous for all \(i\in\mathds{Z}\). \\ Let \((L_t)_{t\geq0}\) be a Lévy process in \(\mathds{R}\) and let \(\boldsymbol{\pi}(\diff x)=\pi(x)\diff x\) be an absolutely continuous distribution on \((0,\infty)\). Our assumptions are as follows: \begin{enumerate} \item[\textbf{(b1)}] \(\pi\) is a positive, piecewise weakly differentiable function, and there exist constants \(C,C',\alpha>0\) such that \(\lim_{x\to\infty}\pi(x)\mathrm{e}^{\alpha x}=C\), and \(\int_0^x \pi(z)\diff z\leqslant C' \pi(x)x\) for \(x\ll1\). \item[\textbf{(b2)}] \((L_t)_{t\geq0}\) is a spectrally positive compound Poisson process, i.e. a Lévy process with characteristic triplet \((0,0,\mu)\) such that \(\operatorname{supp}\mu\subset\mathds{R}_+\) and \(\int_0^\infty (1 \vee z)\mu(\diff z)<\infty\). \end{enumerate} Note that our standing assumptions \textbf{(a1)} - \textbf{(a3)} are direct consequences of \textbf{(b1)} and \textbf{(b2)}. In this setting, the drift coefficient given by \eqref{eq_drift_coeff} is reduced to \begin{align}\label{eq_driftcpn} \phi(x)= -\mathds{1}_{(0,\infty)}(x)\frac{\overline\mu_s*\pi(x)}{\pi(x)}, \end{align} and it is easy to see that \(\phi(x)\in(-\infty,0)\) for all \(x>0\). \\ Sometimes it will be advantageous to write \(L_t=\sum_{i=1}^{N_t}\xi_i\), where \((N_t)_{t\geqslant0}\) is a Poisson process with intensity \(|\mu|\) and \((\xi_i)_{i\in\mathds{N}}\) is a sequence of i.i.d. random variables distributed according to \(\mu/|\mu|\). Note that \(\mathds{E}\xi_1<\infty\) by \textbf{(b2)}.\\ With the following theorem we show that, under \textbf{(b1)} and \textbf{(b2)}, a solution \((X_t)_{t\geqslant0}\) of \eqref{eq_sde} has the unique invariant distribution \(\boldsymbol{\pi}\) if, additionally, one of the following two conditions is met: \begin{enumerate} \item[\textbf{(c1)}] There exists \(n\in\mathds{N}\) such that \(\operatorname{supp} \mu \subset(1/n,n)\), or \item[\textbf{(c2)}] \(\pi\) is piecewise Lipschitz continuous. \end{enumerate} \begin{theorem}\label{thm_cpn} Assume that \textbf{(b1)} and \textbf{(b2)} hold, and let \((X_t)_{t\geqslant0}\) be a solution of \eqref{eq_sde} with \(\phi\) as in \eqref{eq_driftcpn}. Then \begin{enumerate} \item \((X_t)_{t\geqslant0}\) is positive Harris recurrent, and \item any invariant distribution of \((X_t)_{t\geqslant0}\) is an infinitesimally invariant distribution of \((X_t)_{t\geqslant0}\). \end{enumerate} Additionally, if \textbf{(c1)} or \textbf{(c2)} are fulfilled, then \begin{enumerate} \item [(iii)] \(\pi\) is the unique invariant distribution of \((X_t)_{t\geqslant0}\). \end{enumerate} \end{theorem} The proof of Theorem \ref{thm_cpn} is presented in Section \ref{sec_prf}, and it is divided into several steps. The first assertion is shown by using the Foster-Lyapunov method of \cite{MTI} - \cite{MTIII}, while the second assertion is a simple application of \cite[Cor. 5.4]{behmeoechsler}. Under \textbf{(c1)} we show Theorem \ref{thm_cpn} (iii) via techniques from the theory of ordinary differential equations. If instead \textbf{(c2)} is true, we approximate \((X_t)_{t\geqslant0}\) by a sequence of processes fulfilling \textbf{(c1)} to prove the claim. \subsection{Limiting distributions} The natural follow-up question of Theorem \ref{thm_cpn} is whether existence and uniqueness of an invariant distribution \(\eta\) for \((X_t)_{t\geqslant0}\) implies that \(\eta\) is a limiting distribution. This property of \((X_t)_{t\geqslant0}\), i.e. the existence of a limiting distribution, is called \emph{ergodicity}.\\ As before, \(\boldsymbol{\pi}(\diff x)=\pi(x)\diff x\), \(\mathcal{E}=(0,\infty)\), and \((L_t)_{t\geq0}\) is a spectrally positive compound Poisson process. \begin{corollary}\label{cor_erg} Assume \textbf{(b1)} and \textbf{(b2)} hold, and let \((X_t)_{t\geqslant0}\) be a solution of \eqref{eq_sde} with \(\phi\) as in \eqref{eq_driftcpn}. Further assume that some skeleton chain of \((X_t)_{t\geqslant0}\) is irreducible, i.e. there exits \(\Delta>0\) such that for all \(B\in\mathcal{B}((0,\infty))\) with \(\lambda^{\mathrm{Leb}}(B)>0\) and all \(x\in(0,\infty)\) there exists \(n\in\mathds{N}\) such that \begin{align*} \mathds{P}^x(X_{n\Delta}\in B)>0. \end{align*} Then \((X_t)_{t\geqslant0}\) is \emph{ergodic}. \end{corollary} \begin{proof} Follows directly from Theorem \ref{thm_cpn} (i) and \cite[Thm. 6.1]{MTII}. \end{proof} \begin{lemma}[Irreducible skeleton chain]\label{lem_full} Assume \textbf{(b1)} and \textbf{(b2)} hold, and let \((X_t)_{t\geqslant0}\) be a solution of \eqref{eq_sde} with \(\phi\) as in \eqref{eq_driftcpn}. Additionally assume that \(\mu=\mu_1+\mu_2\) where \(\mu_1\) is arbitrary and \(\mu_2\) is absolutely continuous and such that \(\mu_2(I)>0\) for all open intervals \(I\subset(0,\infty)\). Then the 1-skeleton chain is irreducible, and \((X_t)_{t\geqslant0}\) is ergodic. \end{lemma} \begin{proof} Without loss of generality we assume \(\mu_1=0\) since otherwise we may simply condition on the event that the jumps are only sampled from \(\mu_2\). \\ Let \(B\in\mathcal{B}((0,\infty))\) with \(\lambda^{\mathrm{Leb}}(B)>0\). Our goal is to show that for all \(x\in(0,\infty)\) there exists \(n\in\mathds{N}\) such that \begin{align}\label{eq_ir} \mathds{P}^x(X_{n}\in B)>0. \end{align} It suffices to show \eqref{eq_ir} only for sets \(B\) for which \(\inf B>0\) since for arbitrary \(B\in\mathcal{B}(0,\infty)\) with \(\lambda^{\mathrm{Leb}}(B)>0\) there exist \(0<a<b\) such that \(\lambda^{\mathrm{Leb}}(B\cap(a,b))>0\). Moreover, it also suffices to just consider \(x<\inf B\) and \(n=1\). This is due to the fact that for arbitrary \(x\in(0,\infty)\) and \(m\in\mathds{N}\) we obtain \begin{align*} \mathds{P}^x(X_{m}\in B)\geqslant \mathds{P}^x(X_m\in B, N_{m-1}=0), \end{align*} where we recall that \((N_t)_{t\geqslant0}\) is the Poisson process counting the jumps of \((L_t)_{t\geq0}\), and therefore also of \((X_t)_{t\geqslant0}\).\\ As \(\phi(x)<0\) for all \(x>0\), we may choose \(m\) large enough such that \(X^x_{m-1}<\inf B\) on \(\{N_{m-1}=0\}\), and consider \(X^x_{m-1}\) as a new starting point.\\ Thus, let \(0<x<\inf B\). In the following we condition on the event that exactly one jump occurs until \(t=1\). Denote \(Y_t:=\left(X^x_t\big|N_1=1\right)\). It holds \begin{align*} \mathds{P}^x(X_1\in B)\geqslant c\mathds{P}^x(Y_1\in B) \end{align*} for some \(c>0\). Further, denote by \(T\in(0,1)\) the uniformly distributed time of the jump. We show that the joint cumulative distribution function \begin{align*} \mathds{P}(T\leqslant t, Y_1\leqslant y) \end{align*} is strictly monotone on \((0,1)\times (x,\infty)\) in both arguments. Let \(0<t<t'<1\) and \(y\in(x,\infty)\). We obtain \begin{align*} \mathds{P}(T\in(t,t'], Y_1\leqslant y)\geqslant \mathds{P}(T\in(t,t'], \xi_1\leqslant x-Y_{t-}) >0. \end{align*} Indeed, if \(T\in(t,t']\) and additionally \(\xi_1\leqslant x-Y_{t-}\), then \(Y_1\leqslant x<y\), and since \(Y_{t-}<x\) we get \(\mathds{P}(T\in(t,t'],\xi_1\leqslant x-Y_{t-})>0\).\\ Now, let \(t\in(0,1)\) and \(x<y<y'<\infty\). We note that for every \(t\in(0,1)\) there exists some interval \(I\subset (0,\infty)\) such that \(Y_1\in(y,y']\) if \(T=t\) and \(\xi_1\in I\). This is due to the fact that the paths of \((X_t)_{t\geqslant0}\) between two jumps are continuous and strictly decreasing. Moreover, since \(Y_1\) depends continuously on \(T\) and \(\xi_1\), there exists \(\mathds{V}\mathrm{ar}\,epsilon>0\) and an interval \(I'\subset(0,\infty)\) such that \(Y_1\in(y,y']\) if \(T\in(t-\mathds{V}\mathrm{ar}\,epsilon,t]\) and \(\xi_1\in I'\). Thus, \begin{align*} \mathds{P}(T\leqslant t, Y_1\in(y,y']) \geqslant\mathds{P}(T\in(t-\mathds{V}\mathrm{ar}\,epsilon,t],Y_1\in(y,y'])\geqslant \mathds{V}\mathrm{ar}\,epsilon\mu(I')>0 \end{align*} by the assumption on \(\mu\).\\ As both \(T\) and \(Y_1\) have clearly no atoms in \((0,1)\) and \((x,\infty)\), respectively, there exists a joint density function \(f_{(T,Y_1)}\) of \((T,Y_1)\) on \((0,1)\times(x,\infty)\) which is strictly positive. Hence \begin{align*} \mathds{P}^x(X_1\in B)\geqslant c\mathds{P}^x(T\in(0,1),Y_1\in B)=\int_{(0,1)\times B} f_{(T,Y_1)}(t,y)\diff t\diff y >0. \end{align*} This, together with Corollary \ref{cor_erg}, concludes the proof. \end{proof} \subsection{Examples}\label{sec_exam} In this section we illustrate Theorem \ref{thm_cpn} on various examples by sampling \eqref{eq_sde}. To this end, we first compute a realization of the path of the driving noise \((L_t)_{t\geq0}\). With \((L_t)_{t\geq0}\) being a compound Poisson process this is straight-forward. It remains to solve a (deterministic) differential equation which is then done via the classic Euler method. \noindent \begin{figure} \caption{\footnotesize Illustration of Example \ref{ex_dw} \label{fig1} \end{figure} \begin{figure} \caption{From left to right we see the target density function \(\pi\), a histogram of the sampled distribution with sample size \(N=50000\), and an exemplary sample path of \((X_t)_{t\geqslant0} \label{fig2} \end{figure} \begin{example}[double-well]\label{ex_dw} In \cite{FLI} is is pointed out that sampling from a target distribution \(\boldsymbol{\pi}(\diff x)=\pi(x)\diff x\) with two separated modes is challenging for classic \textsc{lmc}. The lower the values of \(\pi\) are between the modes the longer it takes on average for the continuous Langevin diffusion to move from one mode to the other. This issue can be circumvented by allowing jumps. Take \begin{align*} \pi(x)&=\exp\left\{\frac1{10}x(x-4)(x-6.02)(x-10)+0.5\right\}, \quad x>0, \end{align*} which is taken from \cite[Sec. 4]{FLI}, but shifted to the right such that both modes are contained in \((0,\infty)\). As driving noise we choose a Lévy process \((L_t)_{t\geq0}\) with characteristic triplet \((0,0,\mu)\) where \begin{align*} \mu(\diff x)&=\mathrm{e}^{-x}\diff x + \delta_4 + 2\delta_8. \end{align*} Clearly, conditions \textbf{(b1)}, \textbf{(b2)}, and \textbf{(c2)} are fulfilled. Thus, \(\boldsymbol{\pi}\) is invariant for a solution \((X_t)_{t\geqslant0}\) of \eqref{eq_sde} with \(\phi\) as in \eqref{eq_driftcpn} by Theorem \ref{thm_cpn}. Further, since Lemma \ref{lem_full} applies, \(\boldsymbol{\pi}\) is even limiting.\\ We demonstrate this example in Figure \ref{fig1}.\\ Note that, in general, there is no closed-form expression of \(\phi\) due to the convolution term appearing in its definition. Hence, we use numerical integration to evaluate \(\phi\) for this and the following two examples. \end{example} \begin{example}[non-smooth density]\label{ex_ns} Another important advantage of \textsc{llmc} compared to \textsc{lmc} and also f\textsc{lmc} of \cite{FLI} is the possibility to choose non-smooth target densities. Let, for example, \(\boldsymbol{\pi}(\diff x)=\pi(x)\diff x\) with \begin{align*} \pi(x)&=\mathrm{e}^{-0.5x}+\mathds{1}_{(2,4)}(x), \end{align*} and \((L_t)_{t\geq0}\) be a Lévy process with characteristic triplet \((0,0,\mu)\) where \begin{align*} \mu(\diff x)&=x^2\mathrm{e}^{-0.5x}\diff x + \delta_1. \end{align*} Let \((X_t)_{t\geqslant0}\) be a solution of \eqref{eq_sde} with \(\phi\) as in \eqref{eq_driftcpn}. As for the previous example, \textbf{(b1)}, \textbf{(b2)}, and \textbf{(c2)} are met, and \(\boldsymbol{\pi}\) is invariant and limiting for \((X_t)_{t\geqslant0}\) by Theorem \ref{thm_cpn} and Lemma \ref{lem_full}.\\ This, too, is displayed in Figure \ref{fig1}. \end{example} \begin{example}[\textsc{Dresden Frauenkirche}] To illustrate that our result also covers target densities with lots of detail we consider the density \(\pi\) as in Figure 2 (left) which represents the silhouette of the \textsc{Dresden Frauenkirche}, continued by an exponential tail. We manufactured \(\pi\) in a way such that \textbf{(b1)} is met. As driving noise we choose a spectrally positive Lévy process with characteristic triplet \((0,0,\mu)\) where \begin{align*} \mu(\diff x)=\mathds{1}_{\{x>0\}}\mathrm{e}^{-\frac{x^2}2}\diff x. \end{align*} Let \((X_t)_{t\geqslant0}\) be a solution of \eqref{eq_sde} with \(\phi\) as in \eqref{eq_driftcpn}. As for both prior examples, \(\boldsymbol{\pi}\) is clearly invariant and limiting for \((X_t)_{t\geqslant0}\). The density function, the sampled distribution and an exemplary sample path of \((X_t)_{t\geqslant0}\) can be seen in Figure \ref{fig2}.\\ Taking a closer look we see that the process \((X_t)_{t\geqslant0}\) slows down considerably upon entering the interval \((0,7.5)\) on which most of the mass of \(\boldsymbol{\pi}\) concentrates. In general, the drift coefficient takes on large values in areas of small mass and small values in areas of high mass. This stems from \(\pi\) appearing in the denominator of \eqref{eq_driftcpn}, and can be observed by inspecting the slopes of the sample path in Figure 2.\\ Moreover, the process becomes slower the closer it gets to the origin. On the one hand, this is due to Assumption \textbf{(b1)} (and \textbf{(a2)}, respectively), and ensures that \(0\) cannot be reached in finite time. On the other hand, this slowing down is caused by the convolution with the signed tail function \(\overline\mu_s\) in the nominator of \eqref{eq_driftcpn}.\\ This is reasonable: Because jumps go only upwards it is \emph{less likely} for \((X_t)_{t\geqslant0}\) to reach the area of the left half of the silhouette (approximately the interval \((0,3.75)\)) than to appear in the area of its right half. But since both sides are symmetrical the drift must compensate for that. \end{example} \section{Proof of Theorem \ref{thm_cpn}}\label{sec_prf} n the following, whenever constants \(C,C'\) or \(\alpha\) appear in the proof below, we mean the constants of Assumption \textbf{(b1)}. \subsubsection*{Proof of Theorem \ref{thm_cpn} (i): Positive Harris recurrence} The Foster-Lyapunov method of \cite{MTI} - \cite{MTIII} is tailored for processes which cover the whole real line. Hence, we consider the auxiliary process \(Y_t=s(X_t)\) where \(s:(0,\infty)\to\mathds{R}\) is a smooth strictly monotone function such that \(s(x)=\ln(x)\) for \(x\in(0,1-\mathds{V}\mathrm{ar}\,epsilon)\) and \(s(x)=x\) for \(x\in(1+\mathds{V}\mathrm{ar}\,epsilon,\infty)\) where \(0<\mathds{V}\mathrm{ar}\,epsilon<\mathrm{e}^{-1}\) is some constant. Clearly, \((X_t)_{t\geqslant0}\) is positive Harris recurrent if and only if \((Y_t)_{t\geqslant0}\) is positive Harris recurrent.\\ Central to this method are the so-called \emph{norm-like functions} whose precise definition needs additional notation: For \(m\in\mathds{N}\) denote \(O_m:=(\mathrm{e}^{-m},m)\) and choose \(h_m\in\mathcal{C}^\infty_c(\mathds{R}^d)o\) such that \(0\leqslant h_m\leqslant 1\), and with \(h_m(x)=1\) for all \(x\in O_m\), and \(h_m(x)=0\) for all \(x\in O_{m+1}^c\). Let \((X_t)_{t\geqslant0}mm\) be the unique strong solution of \begin{align*} \diff \mathcal{X}^{(m)}_t = h_m(\mathcal{X}^{(m)}_{t-})\left(\phi(\mathcal{X}^{(m)}_{t-})\diff t + \diff L_t\right),\quad \mathcal{X}^{(m)}_0>0. \end{align*} We set \(\mathcal{Y}^{(m)}_t:=s(\mathcal{X}^{(m)}_t)\). Clearly, this construction implies that for all \(m\in\mathds{N}\) if \(\mathcal{X}^{m}_0\in O_m\) it holds \(\mathcal{Y}^{(m)}_t = Y_t\) for all \(t< T_m:=\inf\{s\geqslant0: |Y_s|\geqslant m\}\). For \(m\in\mathds{N}\) denote by \((\mathcal{G}_m,\mathcal{D}(\mathcal{G}_m))\) the extended generator of \((\mathcal{Y}_t)_{t\geqslant0}m\). A function \(f:\mathds{R}\to\mathds{R}_+\) is called \emph{norm-like} w.r.t. \((Y_t)_{t\geqslant0}\) if \begin{enumerate} \item \(f(x)\to\infty\) as \(x\to\pm\infty\), and \item \(f\in\mathcal{D}(\mathcal{G}_m)\) for all \(m\in\mathds{N}\). \end{enumerate} It is typical for the Foster-Lyapunov method that one only requires a single norm-like function (sometimes also called Foster-Lyapunov function) which fulfills a certain inequality. Our particular choice is presented in the lemma below. \\ In the following, to make notation easier, we denote \(y:=s(x)\) if \(x\in(0,\infty)\) is given, and \(x:=s^{-1}(y)\) if \(y\in\mathds{R}\) is given. \begin{lemma}\label{lem_norm} Let \(f\in\mathcal{C}^1(\mathds{R})\) with \(f(y)\in[1+|y|,2+|y|]\) for all \(y\in\mathds{R}\), and \(f(y)=1+|y|\) for all \(|y|>\mathds{V}\mathrm{ar}\,epsilon\) for some \(\mathds{V}\mathrm{ar}\,epsilon>0\). Then \(f\) is norm-like w.r.t. \((Y_t)_{t\geqslant0}\).\\ Moreover, for all \(m\in\mathds{N}\) and \(y\in(-m,m)\) it holds \begin{align}\label{eq_Gmf} \mathcal{G}_m f(y)=\phi(x) f_0'(x) + \int_{(0,\infty)}(f_0(x+z)-f_0(x)) \mu(\diff z). \end{align} where \(f_0(x):=f(y)=f(s(x))\). \end{lemma} \begin{proof} Fix \(m\in\mathds{N}\). Itô's formula yields \begin{align}\label{eq_genito}\nonumber \mathds{E}^y[f(\mathcal{Y}^{(m)}_t)-f(y)] &= \mathds{E}^{x}[f_0(\mathcal{X}^{(m)}_t)-f_0(x)]\\\nonumber &=\mathds{E}^{x}(B_t)_{t\geqslant0}ig[\int_0^t h_m(\mathcal{X}^{(m)}_{s-}) \phi(\mathcal{X}^{(m)}_{s-})f_0'(\mathcal{X}^{(m)}_{s-})\diff s \\ &\quad +\int_{z\neq0}\int_0^t \left(f_0(\mathcal{X}^{(m)}_{s-}+h_m(\mathcal{X}^{(m)}_{s-})z)- f_0(\mathcal{X}^{(m)}_{s-})\right)\widetilde\mu(\cdot,\diff s,\diff z)(B_t)_{t\geqslant0}ig] \end{align} where \(\widetilde\mu\) is the jump measure of \((L_t)_{t\geq0}\). To verify whether the jump measure may be replaced by the compensator under the expectation, and to subsequently swap the order of integration, we need some estimates. Clearly, \(f_0(x+h_m(x)z)-f_0(x)=0\) for all \(z>0\), and \(x\notin[\mathrm{e}^{-m-1},m+1]\). On the other hand, for all \(x\in[\mathrm{e}^{-(m+1)},m+1]\) there exists \(M>0\) such that \begin{align*} |f_0(x+h_m(x)z)-f_0(x)| \leqslant M\vee z \end{align*} by the definition of \(f_0\). Hence, by \cite[Thm. 2.21]{schnurr2009symbol} and the fact that \(\int_0^\infty (1\vee z) \mu(\diff z)<\infty\) by Assumption \textbf{(b2)}, \(\widetilde\mu~(\cdot,\diff s,\diff z)\) may be replaced by \(\diff s\mu(\diff z)\) under the expectation in \eqref{eq_genito}. \\ Applying Fubini's theorem and reversing the space transform, i.e. going back to \((\mathcal{Y}_t)_{t\geqslant0}\), we obtain \begin{align*} \mathds{E}^y[f(\mathcal{Y}^{(m)}_t)-f(y)] &=\mathds{E}^{x}(B_t)_{t\geqslant0}ig[\int_0^t h_m(\mathcal{X}^{(m)}_{s-}) \phi(\mathcal{X}^{(m)}_{s-})f_0'(\mathcal{X}_{s-})\diff s \\ &\quad +\int_0^t \int_{z\neq0}\left(f_0(\mathcal{X}^{(m)}_{s-}+h_m(\mathcal{X}^{(m)}_{s-})z)- f_0(\mathcal{X}^{(m)}_{s-})\right)\mu(\diff z)\diff s(B_t)_{t\geqslant0}ig]\\ &=\mathds{E}^{y} \left[\int_0^tg(\mathcal{Y}^{(m)}_{s-})\diff s\right] \end{align*} where \begin{align}\label{eq_Gmf2}\nonumber g(y)&:= h_m(x)\phi(x) f_0'(x)\\ &\qquad + \int_{z\neq0} (f_0(x+h_m(x)z)-f_0(x)) \mu(\diff z). \end{align} This function is clearly measurable. We observe that the integral term is continuous in \(y\) and vanishes for \(|y|\geqslant m+1\). Therefore, \(g\) is bounded and Tonelli's theorem is applicable yielding \begin{align*} \left|\mathds{E}^{y} \left[\int_0^tg\left(\mathcal{Y}^{(m)}_{s-}\right)\diff s\right]\right|\leqslant\int_0^t\mathds{E}^{y} \left[\left|g\left(\mathcal{Y}^{(m)}_{s-}\right)\right|\right]\diff s \leqslant \left\|g\right\|_\infty t <\infty \end{align*} for all \(y\in\mathds{R}\) and all \(t\geqslant0\). Hence, \(f\in\mathcal{D}(\mathcal{G}_m)\) for all \(m\in\mathds{N}\). This completes the proof as we observe that \eqref{eq_Gmf} follows from the definition of the extended generator and upon realizing that the representation in \eqref{eq_Gmf2} agrees with \eqref{eq_Gmf} for all \(y\in(-m,m)\). \end{proof} The second key ingredient of the Foster-Lyapunov method is the following: A set \(K\subset\mathds{R}\) is called \emph{petite} for a Markov process \((Y_t)_{t\geqslant0}\) if there exists a distribution \(a\) on \((0,\infty)\) and a non-trivial measure \(\mathds{V}\mathrm{ar}\,phi\) on \(\mathcal{B}(\mathds{R})\) such that for all \(y\in K\) and \(B\in\mathcal{B}(\mathds{R})\) \begin{align*} \int_{(0,\infty)} \mathds{P}^y(Y_t\in B)a(\diff t) \geqslant \mathds{V}\mathrm{ar}\,phi(B). \end{align*} \begin{lemma}\label{lem_pet} All compact sets \(K\subset\mathds{R}\) are petite for \((Y_t)_{t\geqslant0}\). \end{lemma} \begin{proof} We start with some helpful notation. For \(y\in\mathds{R}\) denote by \(q_y(\cdot)\) the solution of the autonomous differential equation \begin{align}\label{eq_auto} \begin{cases} q_y'=\phi(s^{-1}(q_y))s'(s^{-1}(q_y)),\\ q_y(0)=y. \end{cases} \end{align} Then \(q_y(t)\) represents the (deterministic) state \(Y_t^{y}\) under the assumption that no jump occurs in the time interval \([0,t]\), that is \(N_t=0\). The inverse function \(q_y^{-1}(y')\) exists due to \(\phi(x)<0\) for all \(x>0\). We note that it represents the time it takes to drift from \(y>0\) to \(y'\in(0,y)\), and is hence decreasing in \(y'\) and increasing in \(y\). \\ Let \(K\subset\mathds{R}\) be compact, without loss of generality assume \(K=[k_1,k_2]\). Let \(a(\diff t)=\mathrm{e}^{-t}\diff t\) and \(\mathds{V}\mathrm{ar}\,phi(\diff z)=c\mathds{1}_{(k_1-1,k_1)}(z)\diff z\) for some \(c>0\) which we are yet to choose. Let \(y\in K\) and \(B\in\mathcal{B}(\mathds{R})\). Using \(\mathds{P}(N_t=0)=\mathrm{e}^{-t|\mu|}\) we compute \begin{align*} \int_0^\infty \mathds{P}^y(Y_t\in B) a(\diff t) &\geqslant \int_0^\infty \mathds{1}_{q_y^{-1}(B)}(t) \mathrm{e}^{-t(1+|\mu|)}\diff t \\ &\geqslant\int_0^{q^{-1}_{y}(k_1-1)} \mathds{1}_{q_y^{-1}(B)}(t) \mathrm{e}^{-t(1+|\mu|)}\frac{q_y'(t)}{q_y'(t)}\diff t \\ &\geqslant\frac1{\sup\{|q_y'(t)|: t\leqslant q^{-1}_{y}(k_1-1)\}}\int_{k_1-1}^y \mathds{1}_B(z)\mathrm{e}^{-q_y^{-1}(z)(1+|\mu|)}\diff z\\ &\geqslant\frac1{\sup\{|q_y'(t)|: t\leqslant q^{-1}_{y}(k_1-1)\}}\int_{k_1-1}^{k_1} \mathds{1}_B(z)\mathrm{e}^{-q_y^{-1}(z)(1+|\mu|)}\diff z\\ &\geqslant \frac{\exp\{-q^{-1}_{k_2}(k_1-1)(1+|\mu|)\}}{\sup\{|q_y'(t)|: t\leqslant q^{-1}_{y}(k_1-1)\}}\int_{k_1-1}^{k_1} \mathds{1}_B(z)\diff z. \end{align*} For the third inequality we substituted \(z:=q_y(t)\) and used the fact that for all \(y,k_1\in\mathds{R}\) it holds \(\sup\{|q_y'(t)|: t\leqslant q^{-1}_{y}(k_1-1)\}<\infty\). Indeed, this is implied by \eqref{eq_auto}, and the properties of \(\phi\) and \(s\). The fourth inequality is due to the reduction of the area of integration while the fifth inequality uses the monotonicity properties of \(q^{-1}\) described above. Lastly, choosing \begin{align*} c:=\frac{\exp\{-q^{-1}_{k_2}(k_1-1)(1+|\mu|)\}}{\sup\{|q_y'(t)|~:~t~\leqslant~ q^{-1}_{y}(k_1-1)\}} \end{align*} finishes the proof. \end{proof} Finally, we are ready to prove the first claim of Theorem \ref{thm_cpn}. \begin{proof}[Proof of Theorem \ref{thm_cpn} (i)] We show that there exist some positive constants \(c,d>0\) and a closed petite set \(K\subset \mathds{R}\) such that \begin{align}\label{eq_fle} \mathcal{G}_mf(y)\leqslant -c + d\mathds{1}_K (y) \end{align} for all \(m\in\mathds{N}\) and \(y\in(-m,m)\). Then \cite[Thm. 4.2]{MTIII} implies that \((Y_t)_{t\geqslant0}\) is positive Harris recurrent, and therefore, \((X_t)_{t\geqslant0}\) is positive Harris recurrent as well.\\ Clearly, the function \begin{align}\label{eq_genfun} \mathcal{G}_m f: y\mapsto \phi(x)f_0'(x) + \int_{(0,\infty)}(f_0(x+z)-f_0(x))\mu(\diff z) \end{align} is continuous, and bounded on \((-m,m)\). \\ Hence, \eqref{eq_fle} follows if we can show that \(\limsup_{y\to\pm\infty} \mathcal{G}_mf(y)\leqslant -c\) for some \(c>0\). We start with \(y\to+\infty\). Note that for \(y\gg1\) we have \(x=s^{-1}(y)=y\), and, on the one hand \(f_0'(y)=1\), and, on the other hand \(f_0(y+z)-f_0(y)=z\).\\ With Assumption \textbf{(b1)} we obtain \begin{align*} \limsup_{y\to+\infty} \phi(y)=-\liminf_{y\to+\infty}(\overline\mu_s*\pi(y))\frac{\mathrm{e}^{\alpha y}}C \leqslant -\liminf_{y\to+\infty} \frac1C\int_{(0,M)}\overline\mu_s(z)\pi(y-z)\mathrm{e}^{\alpha y}\diff z \end{align*} for all arbitrary, but fixed \(M>0\). Thus, also with Assumption \textbf{(b1)}, \begin{align*} \limsup_{y\to+\infty} \phi(y)f_0'(y) &\leqslant -\liminf_{y\to+\infty} \int_{(0,M)}\overline\mu_s(z)\mathrm{e}^{\alpha z}\diff z = -\int_{(0,M)}\overline\mu_s(z)\mathrm{e}^{\alpha z}\diff z \end{align*} Since \(M>0\) was arbitrary this yields \(\limsup_{y\to+\infty} \phi(y)f_0'(y)< \int_{(0,\infty)}\overline\mu_s(z)\diff z = - \mathds{E} \xi_1\). Now, for the second term of \eqref{eq_genfun} we observe that for \(x=y\gg1\) \begin{align*} \int_{(0,\infty)}(f_0(x+z)-f_0(x))\mu(\diff z)&= \int_{(0,\infty)}z \mu(\diff z) =\mathds{E}\xi_1. \end{align*} Consequently, there exists \(c>0\) such that \(\mathcal{G}_m f(y)< -c <0\) for \(y\gg1\).\\ Next, consider the behavior for \(y\to-\infty\), and start with the observation that for \(y\ll-1\) one has \(x=s^{-1}(y)=\mathrm{e}^y\), and \(f_0'(x)=\mathrm{e}^{-y}\). With the definition of \(\phi\) and Assumption \textbf{(b1)} we obtain \begin{align*} |\phi(\mathrm{e}^y)|\leqslant C'|\mu|\mathrm{e}^y \end{align*} for \(y\ll-1\). Therefore, \(\phi(\mathrm{e}^y)f_0'(\mathrm{e}^y)\) is bounded for \(y\ll-1\).\\ Finally, to find a suitable estimate for the second term of \eqref{eq_genfun} for \(y\ll-1\) we fix \(M>0\) such that \(\mu([M,\infty))>0\). Observe that for \(y\ll-1\) it holds \(f_0(\mathrm{e}^y+z)-f_0(\mathrm{e}^y)<0\) for all \(z\in(0,M)\). Further, there exists \(K>0\) such that \(f_0(\mathrm{e}^y+z)<M'+z\) for all \(z\in[M,\infty)\). We then compute \begin{align*} \int_{(0,\infty)}(f_0(\mathrm{e}^y+z)-f_0(\mathrm{e}^y)) \mu(\diff z)&= \int_{(0,M)}(f_0(\mathrm{e}^y+z)-f_0(\mathrm{e}^y)) \mu(\diff z)\\ &\quad +\int_{[M,\infty)}(f_0(\mathrm{e}^y+z)-f_0(\mathrm{e}^y)) \mu(\diff z)\\ &\leqslant \int_{[M,\infty)}(K+z)\mu(\diff z) - f_0(\mathrm{e}^y)\mu[M,\infty). \end{align*} As \(\int_{(0,\infty)}z\mu(\diff z)<\infty\) this implies \begin{align*} \lim_{y\to-\infty}\int_{(0,\infty)}(f_0(\mathrm{e}^y+z)-f_0(\mathrm{e}^y) \mu(\diff z)=-\infty, \end{align*} and therefore, \(\mathcal{G}_m f(y)< -c\) for \(y\ll-1\) with the same \(c\) as above. This completes the proof. \end{proof} \subsubsection*{Proof of Theorem \ref{thm_cpn} (ii): Invariant distributions are infinitesimally invariant} \begin{proof}[Proof of Theorem \ref{thm_cpn} (ii)] The claim follows from \cite[Cor. 5.4]{behmeoechsler} if we can show that \(\frac1t\left|\mathds{E}^xf(X_t)-f(x)\right|<\infty\) for all \(f\in\mathcal{C}_c^\infty(0,\infty)\) and all \(t\geqslant0\).\\ Analogously to the proof of Lemma \ref{lem_norm}, a straight-forward application of Itô's formula yields \begin{align*} \frac1t\left|\mathds{E}^xf(X_t)-f(x)\right| = \frac1t \left|\mathds{E}^x \int_0^t g(X_{s-})\diff s\right| \end{align*} for \(f\in\mathcal{C}_c^\infty(0,\infty)\), where \begin{align*} g(x)=\phi(x)f'(x)+\int_{(0,\infty)}(f(x+z)-f(x))\mu(\diff z). \end{align*} Clearly, if \(f\in\mathcal{C}_c^\infty(0,\infty)\), then \(g\) is bounded and it follows \begin{align*} \frac1t\left|\mathds{E}^xf(X_t)-f(x)\right|\leqslant \|g\|_\infty <\infty. \end{align*} \end{proof} \subsubsection*{Proof of Theorem \ref{thm_cpn} (iii): Uniqueness of the invariant distribution} For the third assertion we require one of the additional assumptions. As described above we start with Assumption \textbf{(c1)}, i.e. there exists \(n\in\mathds{N}\) such that \(\operatorname{supp}\mu\subset(1/n,n)\). \begin{proof}[Proof of Theorem \ref{thm_cpn} (iii) under \textbf{(c1)}] It has been shown in \cite[Thm. 4.2]{behmeoechsler} that any infinitesimally invariant measure \(\boldsymbol{\eta}\) of a solution \((X_t)_{t\geqslant0}\) of \eqref{eq_sde} necessarily solves the distributional equation \begin{align}\label{eq_1} -(\phi \boldsymbol{\eta}) ' + \mu * \boldsymbol{\eta} -|\mu|\boldsymbol{\eta}=0 \end{align} on \((0,\infty)\). To show that there exists only one probability distribution solving \eqref{eq_1} we first need some regularity properties for \(\phi\). A straight-forward calculation yields that for all \(x\geqslant0\) the representation \begin{align}\label{eq_phi1} \phi(x)=\frac{\int_0^x (\mu*\pi(z)-|\mu|\pi(z))\diff z}{\pi(x)} \end{align} holds. As \(\pi\) is piecewise weakly differentiable and \(\pi(x)>0\) for \(x>0\) it follows that \(1/\pi\) is piecewise weakly differentiable as well. Thus, \(\phi\) is piecewise weakly differentiable w.r.t. the same partition as \(\pi\), since the numerator of the right-hand side of \eqref{eq_phi1} is the primitive of a locally integrable function, and as such contained in \(W^{1,1}_\mathrm{loc}(0,\infty)\). Further, as \(\phi(x)<0\) for all \(x>0\) we infer that at least \(1/\phi\in L^1_\mathrm{loc}(0,\infty)\).\\ This property of \(\phi\) allows us to transform \eqref{eq_1} into \begin{align}\label{eq_2} \boldsymbol{\eta}'=\frac{\mu*\boldsymbol{\eta}-\phi'\boldsymbol{\eta}-|\mu|\boldsymbol{\eta}}{\phi}. \end{align} We note that the right-hand side of \eqref{eq_2} defines a Schwartz distribution (cf. \cite[Lem. 2.2]{behmeoechsler}) if \(\boldsymbol{\eta}\) is a real-valued Radon measure which we can assume as we are only looking for solutions which are probability distributions. More importantly, in this case the right-hand side of \eqref{eq_2} is even a Schwartz distribution of order \(0\), i.e. it can be identified with some real-valued Radon measure on \((0,\infty)\).\\ In summary, the distributional derivative of \(\boldsymbol{\eta}\) can be identified with a real-valued Radon measure which implies that \(\boldsymbol{\eta}\) itself can be identified with a locally integrable function. But now, if we insert a locally integrable function \(\boldsymbol{\eta}\) into the right-hand side of \eqref{eq_2} we obtain a locally integrable function plus a discrete measure with atoms at the discontinuities of \(\phi\). Integrating on both sides of \eqref{eq_2} tells us that any solution of \eqref{eq_1} is piecewise weakly differentiable w.r.t. the same partition as \(\pi\).\\ Let \(n\in\mathds{N}\) such that \(\operatorname{supp} \mu\subset(1/n,n)\). To solve \eqref{eq_1} we integrate both sides and obtain \begin{align}\label{eq_3} -\phi\boldsymbol{\eta} + \overline\mu_s*\boldsymbol{\eta}=c_1 \end{align} for some \(c_1\in\mathds{R}\). Observe that for \(x\gg1\) we have \begin{align*} |\phi(x)| \leqslant |\mu|\int_{1/n}^n\frac{ \pi(x-z)}{\pi(x)}\diff z\leqslant |\mu|\mathrm{e}^{\alpha n}, \end{align*} by Assumption \textbf{(b1)}. Thus, \(\limsup_{x\to\infty}|\phi(x)|<\infty\). From Young's convolution inequality it follows that \(\|\overline\mu_s*\boldsymbol{\eta}\|_1\leqslant \|\overline\mu_s\|_1\|\boldsymbol{\eta}\|_1<\infty\). Thus, for any absolutely continuous measure \(\boldsymbol{\eta}\) the left-hand side of \eqref{eq_3} can be identified with an element of \(L^1(\mathds{R}_+)\). Consequently, \(c_1=0\).\\ Above we have seen that any probability distribution \(\boldsymbol{\eta}\) solving \eqref{eq_1} is absolutely continuous. Denoting by \(H(x):=\boldsymbol{\eta}((0,x])\) the cumulative distribution function of \(\boldsymbol{\eta}\) we thus know that the density function \(H'\) of \(\boldsymbol{\eta}\) is integrable. If we once again use the fact that \(\int_0^x (\mu*H'(z)-|\mu|H'(z))\diff z = \overline\mu_s*(H')(x)\), and that \(\mu*(H')(x)=0\) for \(x\in(0,1/n]\), we obtain from \eqref{eq_3} the equation \begin{align}\label{eq_4} H'(x) = -\frac{|\mu|H(x)}{\phi(x)}, \qquad x\in(0,1/n]. \end{align} Caratheodory's theorem (cf. \cite[Thm. 5.3]{hale}) implies that \eqref{eq_4} has for each initial value \(H(\mathds{V}\mathrm{ar}\,epsilon)=c_2\in\mathds{R}\) a unique solution. Clearly, for arbitrary but fixed \(c_2\in\mathds{R}\) the solution of \eqref{eq_4} is given by \(H(x)=\frac{c_2}{F(\mathds{V}\mathrm{ar}\,epsilon)}F(x)\) where \(F(x):=\boldsymbol{\pi}(0,x]\) is the cumulative distribution function of our target distribution \(\boldsymbol{\pi}\). This is easily seen with the definition of \(\phi\). \\ For \(x>1/n\) Equation \eqref{eq_3} reads \begin{align}\label{eq_5} H'(x)=\frac{\int_0^x\mu*(H')(z)\diff z-|\mu|H(x)}{\phi(x)}. \end{align} From Assumption \textbf{(c1)} it follows that for all \(0<x<b\) and any function \(f\in L^1(\mathds{R}_+)\) holds \begin{align*} \mu*f(x)=\mu*(f|_{\left[0,b-\frac1n\right]})(x). \end{align*} Consider Equation \eqref{eq_5} on \([m/n,(m+1)/n]\) for some \(m\in\mathds{N}\). As initial condition we assume \(H(x)=c_3F(x)\) for all \(x\in(0,m/n]\) and some \(c_3\in\mathds{R}\). This results in the equation \begin{align*} H'(x)=\frac{c_3\int_0^x\mu*\pi(z)\diff z-|\mu|H(x)}{\phi(x)}, \qquad x\in[m/n,(m+1)/n] \end{align*} for which Caratheodory's theorem again ensures a unique solution. Hence, by induction over \(m\) and subsequent normalization it follows that \(\boldsymbol{\pi}\) is the unique infinitesimally invariant distribution of \((X_t)_{t\geqslant0}\).\\ Finally, Theorem \ref{thm_cpn} (i), that is positive Harris recurrence, implies existence and uniqueness of an invariant distribution (cf. \cite[Sec. 4]{MTIII}). But this unique distribution must be \(\boldsymbol{\pi}\) due to Theorem \ref{thm_cpn} (ii). This proves the claim. \end{proof} \begin{proof}[Proof of Theorem \ref{thm_cpn} (iii) under \textbf{(c2)}] Assume \(\phi\) is piecewise locally Lipschitz continuous. In this case we show the assertion by approximating \((X_t)_{t\geqslant0}\) with a sequence of processes meeting Assumption \textbf{(c1)}. \\ Recall that \(L_t:= \sum_{i=0}^{N_t}\xi_i\), and define \begin{align*} L^{(n)}_t&:=\sum_{i=0}^{N_t}\xi_i\mathds{1}_{\left\{\frac1n<\xi_i<n\right\}} \end{align*} for all \(n\in\mathds{N}\). Evidently, \((L^{(n)}_t)_{t\geqslant0}\) is a Lévy process with characteristic triplet \((0,0,\mu^{(n)})\) where \(\mu^{(n)}(B)=\mu(B\cap(\frac1n,n))\) for all \(B\in\mathcal{B}(0,\infty)\). To avoid the trivial case we consider only \(n\in\mathds{N}\) large enough such that \(\operatorname{supp} \mu\cap (\frac1n,n)\neq\emptyset\).\\ Further denote \begin{align*} \phi_n(x)=-\frac{\overline\mu_s^{(n)}*\pi(x)}{\pi(x)}. \end{align*} Observe that for \(n\in\mathds{N}\) large enough and all \(x\in(0,\infty)\) it holds \begin{align*} |\phi(x)-\phi_n(x)|=\frac{(\overline\mu_s-\overline\mu_s^{(n)})*\pi(x)}{\pi(x)} \leqslant \mu((0,1/n]\cup[n,\infty)) \frac{\int_0^x \pi(z)\diff z}{\pi(x)}. \end{align*} With \(\pi\) being a probability density the numerator is bounded. Using Assumption \textbf{(b1)} and the fact that \(\pi\) is bounded away from zero on compact intervals in \((0,\infty)\) reveals that for all compact sets \(K\subset[0,\infty)\) there exists \(c>0\) such that \begin{align}\label{eq_estimatephi} |\phi(x)-\phi_n(x)| \leqslant c \mu((0,1/n]\cup[n,\infty)) \end{align} for all \(x\in K\). \\ For \(n\in\mathds{N}\) large enough we consider now solutions \((X_t)_{t\geqslant0}n\) of the stochastic differential equations \begin{align}\label{eq_sde_aux} \diff X^{(n)}_t = \phi_n(X^{(n)}_{t-})\diff t + L^{(n)}_t, \quad X^{(n)}_0\sim \boldsymbol{\pi}. \end{align} Note that, by construction, the processes \((X_t)_{t\geqslant0}\) and \((X_t)_{t\geqslant0}n, n\in\mathds{N},\) are defined on the same probability space \((\Omega,\mathcal{A},\mathds{P})\), and that for all \(\omega\in\Omega\) the set of jump times of \((X_t^{(n)}(\omega))_{t\geqslant0}\) is a subset of the jump times of \((X_t(\omega))_{t\geqslant0}\).\\ The proof of Theorem \ref{thm_cpn} (iii) under \textbf{(c1)} implies that \((X_t)_{t\geqslant0}n\) is a stationary process with invariant distribution \(\boldsymbol{\pi}\). Assume \(X_0=X_0^{(n)}\), and let us show that for all \(t\geqslant0\) it holds \(X^{(n)}_t\to X_t\) in law for \(n\to\infty\). This type of continuous dependence on the coefficients is well-known for the case when \(\phi\) is locally Lipschitz continuous and satisfies a linear growth condition, cf. \cite[Thm. IX.6.9]{jacod}. Unfortunately, these conditions are not necessarily fulfilled in our case, as we require merely piecewise Lipschitz continuity for \(\phi\). In the following, \(\mathbf{p}\in\mathcal{P}\) denotes a partition w.r.t. which \(\phi\) is piecewise Lipschitz continuous.\\ Fix \(\omega\in\Omega\) and \(T>0\), denote \(a:=X_0(\omega)\), and set \begin{align*} t_1:=\inf\{t\geqslant0: \Delta X_t(\omega)\neq0~~ \text{ or } ~~X_t(\omega)\in\mathbf{p}\}. \end{align*} Further, for \(t\geqslant0\) denote by \(q(t):=X_t(\omega)\) and \(q_n(t):=X^{(n)}_t(\omega)\) the paths of the respective processes. On the interval \([0,t_1)\), \(q\) and \(q_n\) are governed by the autonomous integral equations \begin{align*} q(t)=a+\int_0^{t_1}\phi(q(s))\diff s \end{align*} and \begin{align*} q_n(t)=a+\int_0^{t_1}\phi_n(q_n(s))\diff s, \end{align*} respectively. Hence, for all \(t\in[0,t_1)\) \begin{align*} |q(t)-q_n(t)| &\leqslant\int_0^{t_1} |\phi(q(s))-\phi_n(q_n(s))|\diff s\\ &\leqslant \int_0^{t_1}|\phi(q(s))-\phi(q_n(s))|+|\phi(q_n(s))-\phi_n(q_n(s))|\diff s\\ &\leqslant \int_0^{t_1} \ell|q(s)-q_n(s)| \diff s + c\mu((0,1/n]\cup[n,\infty)) \end{align*} for some constants \(\ell,c>0\). This is due to the estimate in \eqref{eq_estimatephi}, the fact that \(q\) is strictly decreasing, and to the piecewise Lipschitz continuity of \(\phi\). For the latter we note that \(\phi\leqslant\phi_n\) which implies \(q(t)\leqslant q_n(t)\) for all \(t\in[0,t_1]\). In other words, if \(q\) reaches a discontinuity of \(\phi\) at \(t_1\), i.e. \(q(t_1)\in\mathbf{p}\), then it reaches it ahead of \(q_n\) which allows the estimate above. \\ Grönwall's inequality (cf. \cite[Cor. I.6.6]{hale}) then yields for all \(t\in[0,t_1)\) \begin{align}\label{eq_conv1} |q(t)-q_n(t)|\leqslant c\mu((0,1/n]\cup[n,\infty)) \left(1+\int_0^{t_1}\mathrm{e}^{\ell(t-s)}\diff s\right) \end{align} which vanishes for \(n\to\infty\).\\ Our strategy is now to iterate this step until we surpass the time \(T\). By design there are two cases: \(q\) either jumps at \(t_1\) or hits a discontinuity of \(\phi\). Note that it can be ruled out that both events occur at the same time as the probability of this happening is zero. In the same way we exclude \(q\) jumping onto a discontinuity of \(\phi\) because \(q\) is strictly decreasing, jumps are space homogeneous, and the set of discontinuities of \(\phi\) has no accumulation points in \((0,\infty)\).\\ The first case, i.e. \(q\) jumps at \(t_1\), is simple. Clearly, there exists \(N\in\mathds{N}\) such that for all \(n>N\) it holds \(\Delta L_{t_1}=\Delta L_{t_1}^{(n)}\). Consequently, for all \(\mathds{V}\mathrm{ar}\,epsilon>0\) there exists \(N'\in\mathds{N}\) such that for all \(n>N'\) it holds \(|q(t_1)-q_n(t_1)|<\mathds{V}\mathrm{ar}\,epsilon\). Choosing \(\mathds{V}\mathrm{ar}\,epsilon\) small enough ensures that \(q\) and \(q_n\) both jump into the same interval \((x_i,x_{i+1})\) of the partition w.r.t. which \(\phi\) is piecewise Lipschitz continuous. Let \begin{align}\label{def_t2} t_2:=\inf\{t\geqslant t_1: \Delta X_t(\omega)\neq0~~ \text{ or }~~ X_t(\omega)\in\mathbf{p}\}. \end{align} For large enough \(n\in\mathds{N}\) we obtain \begin{align*} |q(t)-q_n(t)|\leqslant \mathds{V}\mathrm{ar}\,epsilon + \int_{t_1}^{t_2} \ell|q(s)-q_n(s)| \diff s + c\mu((0,1/n]\cup[n,\infty)). \end{align*} for all \(t\in[t_1,t_2)\), and some (possibly different) constants \(\ell,c>0\). Applying Grönwall's inequality again concludes this iteration step.\\ For the second case, i.e. if \(q\) hits a discontinuity of \(\phi\) at \(t_1\), we argue differently. We denote by \begin{align*} t_2(n):=\inf\{t\geqslant t_1: q_n(t)=q(t_1)\} \end{align*} the time at which \(q_n\) also reaches the discontinuity of \(\phi\) at \(q(t_1)\). We require some observations: First, \(t_2(n)<\infty\) for all \(n\in\mathds{N}\) large enough since \(\phi_n<0\) is bounded away from zero on compact sets \(K\subset(0,\infty)\). Second, \(q_n(t_1)\to q(t_1)\) for \(n\to\infty\) due to \eqref{eq_conv1} and the fact that in this case \(q\) and \(q_n\) are continuous on \([0,t_1]\). Third, from \(\phi_n\leqslant \phi_{n-1}<0\) for all \(n\) large enough it follows that \(q_n'\leqslant q_{n-1}'<0\) on \([t_1,t_2(n)]\).\\ Thus, \(t_2(n)\to t_1\) for \(n\to\infty\). Further, also by the continuity of \(q\), it holds \(q(t_2(n))\to q(t_1)\) for \(n\to\infty\). Hence, for every \(\mathds{V}\mathrm{ar}\,epsilon>0\) we can choose \(N\in\mathds{N}\) such that for all \(n>N\) \begin{align*} |q(t_2(n))-q_n(t_2(n))|=|q(t_2(n))-q(t_1)|<\mathds{V}\mathrm{ar}\,epsilon. \end{align*} Using the above definition \eqref{def_t2} of \(t_2\) we obtain for all \(t\in[t_2(n),t_2)\) \begin{align*} |q(t)-q_n(t)|\leqslant \mathds{V}\mathrm{ar}\,epsilon' + \int_{t_2(n)}^{t_2} \ell|q(s)-q_n(s)| \diff s + c\mu((0,1/n]\cup[n,\infty)). \end{align*} Note that on \([t_2(n),t_2)\) both \(q\) and \(q_n\) act on the same interval \((x_i,x_{i+1})\) of the partition \(\mathbf{p}\) w.r.t. which \(\phi\) is piecewise Lipschitz continuous. Grönwall's inequality then shows that for all \(t\in[t_2(n),t_2)\) it holds \(q_n(t)\to q(t)\) for \(n\to\infty\). But because \(q_n\) is continuous on \([0,t_2)\) and \(t_2(n)\to t_1\) for \(n\to\infty\), this property extends to \([t_1,t_2)\).\\ Finally, iteration and the fact that \(T\) and \(\omega\) have been chosen arbitrarily yields \(X^{(n)}_t\to X_t\) almost surely for \(n\to\infty\) and all \(t\geqslant0\). This implies weak convergence, and therefore, \(X_t\sim\boldsymbol{\pi}\) for all \(t\geqslant0\), i.e. \(\boldsymbol{\pi}\) is invariant for \((X_t)_{t\geqslant0}\). By Theorem \ref{thm_cpn} (i), \((X_t)_{t\geqslant0}\) is positive Harris recurrent, and hence, \(\boldsymbol{\pi}\) is the unique invariant distribution of \((X_t)_{t\geqslant0}\). \end{proof} \section{Outlook}\label{sec_out} It is only natural to try extending Theorem \ref{thm_infinv} and Theorem \ref{thm_cpn} to more general settings, e.g. higher dimensions, more complicated driving noises, or target measures with disconnected supports, heavy tails or atoms. We believe that for many of these cases similar methods to the ones we employed here yield similar results. Generally speaking, one only has to show that the process in question is positive Harris recurrent, and that there exists a unique infinitesimally invariant distribution. The remaining steps are in most instances trivial or at least easy to prove under mild conditions.\\ In the following we shortly comment on the problems one faces when trying this approach on some of the more general cases. \paragraph*{Jump measures with heavy tails} The restriction to light tailed jumps, that is when \(\mathds{E}\xi_1<\infty\), is only needed in the proof of Theorem \ref{thm_cpn} (i). It is not entirely clear whether additional conditions are necessary to show positive Harris recurrence in the case of heavy tailed jumps, i.e \(\mathds{E}\xi_1=\infty\). Yet, as heavy tailed jumps imply larger (negative) values of the drift coefficient \(\phi\) by the definition in \eqref{eq_driftcpn} searching a more sophisticated norm-like function is the most promising approach. \paragraph*{Subordinators as driving noise} In Assumption \textbf{(a1)} we required \((L_t)_{t\geq0}\) to be a spectrally positive compound Poisson process if \(\mathcal{E}=(0,\infty)\). In Theorem \ref{thm_infinv} one might also wish to allow (pure jump) subordinators for \((L_t)_{t\geq0}\), i.e. Lévy processes with characteristic triplet \((0,0,\mu)\) for which \(\operatorname{supp}\mu\subset\mathds{R}_+\) and \(\int_{(0,\infty)} (1\wedge z)\mu(\diff z)<\infty\).\\ However, in this case \eqref{eq_subor} in the proof of Theorem \ref{thm_infinv} does in general not hold. Thus, one needs to find a different way of showing that \(0\notin\mathcal{O}\), e.g. by proving that \((\ln(X_t))_{t\geqslant0}\) does not explode. Moreover, uniqueness of the infinitesimally invariant distribution, that is Theorem \ref{thm_cpn} (iii), has to be shown differently. This is because there exists no subinterval of \((0,\infty)\) that cannot be reached by jumps, and because a non-zero amount of jumps does occur almost surely during every time interval. Thus, the proofs of Theorem \ref{thm_cpn} (iii) under \textbf{(c1)} and \textbf{(c2)}, respectively, do not apply in this case. \paragraph*{Target distributions with full support} One might wish to extend the results of Theorem \ref{thm_cpn} for the case when \(\mathcal{E}=\mathds{R}\). However, just like in the previous paragraph, the approach used for the proof of Theorem \ref{thm_cpn} (iii) fails due to the fact that there exists no subinterval of \(\mathds{R}\) that cannot be reached by jumps. Therefore, the proof of the uniqueness of the solution of \eqref{eq_iile} requires different arguments. \paragraph*{Target measures with disconnected supports} Allowing only \(\mathcal{E}=\mathds{R}\) or \(\mathcal{E}=(0,\infty)\) seems restrictive as the ability to cross gaps is one of the main advantages of the presence of jumps. Intending to allow disconnected supports of the target measure \(\boldsymbol{\pi}\) one has to assume three things: \begin{enumerate} \item \(\mathcal{E}\) is an open set, \item jumps in both directions are possible, i.e. \(\Pi((-\infty,0))>0\) and \(\Pi((0,\infty))>0\), \item jumps can only land in \(\mathcal{E}\), i.e. \(\mathcal{E}+\operatorname{supp}\Pi\subseteq\mathcal{E}\). \end{enumerate} With those three assumptions one can show that, apart from \(\mathcal{E}=\mathds{R}\) and \(\mathcal{E}\) being some half-line, the only option is that \(\mathcal{E}\) is periodic, that is there exists \(p>0\) such that \(\mathcal{E}+p=\mathcal{E}\).\\ However, if \(\mathcal{E}\neq\mathds{R}\) is periodic and \((X_t)_{t\geqslant0}\) with state space \(\mathcal{O}=\mathcal{E}\) solves \eqref{eq_sde}, then \((X_t)_{t\geqslant0}\) cannot be positive Harris recurrent - regardless of the drift coefficient \(\phi\). The reason for this is simple: The jumps of \((X_t)_{t\geqslant0}\) are space-homogeneous, and \(\mathcal{E}\) consists of countably many intervals of the same length that can only be connected by jumps. Thus, the mass of an invariant measure concentrated on each of these segments is the same. Therefore, no invariant measure can be finite (apart from the trivial measure). \paragraph*{Target measures with atoms} An invariant measure \(\boldsymbol{\pi}\) with \(\boldsymbol{\pi}(\{x_0\})>0\) for one or more \(x_0\in\mathds{R}\) can only be achieved by a solution \((X_t)_{t\geqslant0}\) of \eqref{eq_sde} if \((X_t)_{t\geqslant0}\) comes to a halt at \(x_0\). One possible solution might be to set \(\phi(x_0)=0\). At least heuristically this makes sense considering that the denominator in the original definition \eqref{eq_drift_coeff} of \(\phi\) is the density function of \(\boldsymbol{\pi}\). \\ However, extending Theorem \ref{thm_cpn} to this case needs a new idea since the current proof relies on the fact that any solution of \eqref{eq_iile} can be associated to a locally integrable function. \paragraph*{Target measures with arbitrary tails} By Assumption \textbf{(b1)}, we require \(\boldsymbol{\pi}\) to have an exponential tail. This is mostly needed in the proof of Theorem \ref{thm_cpn} (i). As with heavy tailed jumps, using a more sophisticated norm-like function will most likely enable us to consider target measures \(\boldsymbol{\pi}\) for which only \(|\pi(x)|\leqslant c\mathrm{e}^{-\alpha x}\) for all \(x\gg1\) and some constants \(c,\alpha>0\). \\ In case \(\boldsymbol{\pi}\) has a heavy tail, that is when \(|\pi(x)|\geqslant c x^{-(1+\alpha)}\) for all \(x\gg1\) and some constants \(c,\alpha>0\), it is not clear whether \((X_t)_{t\geqslant0}\) is positive Harris recurrent or not. \paragraph*{Higher dimensions} Theorem \ref{thm_infinv} can be extended easily to target measures \(\boldsymbol{\pi}\) on \((\mathds{R}^d,\mathcal{B}(\mathds{R}^d))\) and \(d\)-dimensional driving noises \((L_t)_{t\geq0}\) with \(d\geqslant2\). Simply use the multi-dimensional counterparts (cf. \cite{behmeoechsler}) to all occurring terms in the definition \eqref{eq_drift_coeff} of the drift coefficient, and make sure that jumps can only land in \(\mathcal{E}\), and that \((X_t)_{t\geqslant0}\) cannot drift onto \(\partial \mathcal{E}\).\\ However, Equation \eqref{eq_iile} becomes a partial differential equation in the multi-dimensional case. Thus, Theorem \ref{thm_cpn} cannot be extended with the same approach. \paragraph*{Lévy-type driving noise} A solution to some of the problems mentioned above, e.g. disconnected supports or atoms, might be to select space dependent driving noises. For the case when \((L_t)_{t\geq0}\) is a Lévy-type process (for details see \cite{bottcher}) \cite{behmeoechsler} provides the required framework for defining the drift coefficient. Just like with higher dimensions extending Theorem \ref{thm_infinv} to this setting is feasible while the extension of Theorem \ref{thm_cpn} might require a new approach. \section{Acknowledgments}\label{sec_ack} I would like to thank Anita Behme for her advice and I am very grateful for her helpful comments and suggestions. \nocite{protter} \nocite{behmeoechsler} \nocite{MTI} \nocite{MTII} \nocite{MTIII} \nocite{eliazar} \end{document}
\begin{document} \title{Continuous-variable gate decomposition for the Bose-Hubbard model} \author{Timjan Kalajdzievski} \author{Christian Weedbrook} \author{Patrick Rebentrost} \affiliation{Xanadu, 372 Richmond St W, Toronto, M5V 2L7, Canada} \begin{abstract} In this work, we decompose the time-evolution of the Bose-Hubbard model into a sequence of logic gates that can be implemented on a continuous-variable photonic quantum computer. We examine the structure of the circuit that represents this time-evolution for one-dimensional and two-dimensional lattices. The elementary gates needed for the implementation are counted as a function of lattice size. We also include the contribution of the leading dipole interaction term which may be added to the Hamiltonian, and its corresponding circuit. \end{abstract} \maketitle \section{INTRODUCTION} Quantum simulation of physical systems constitutes an important application for early quantum computing devices \cite{Feyn, Lloyd1, Lloyd2}. A quantum computer can be used for the purpose of observing properties of that system which may be hard to obtain from direct experiments or classical computing. For example, such simulation may be used to determine the ground state energies of certain molecules or to simulate systems of molecules, which can be difficult to determine using a classical computer \cite{MolGrndState,Whitfield2011, HSimChem}. Usually, the starting point is a reasonable model for the Hamiltonian of the physical system and mapping of that Hamiltonian into the degrees of freedom of the quantum simulator. Once a suitable mapping from the physical system has been found, the Hamiltonian time evolution operator is simulated by applying specific operations on the quantum device. The domain of Hamiltonian simulation examines the efficient implementation of Hamiltonians by considering their properties such as locality or sparsity. Often such simulation can be performed efficiently, that is polylogarithmically in the size of the Hilbert space and close to linear in the simulation time. For qubit quantum computers, such Hamiltonian simulations have been discussed in detail in \cite{HsimWalk, Berry2007, Berry2010, HSimChilds, Jordan2012, HSimChilds2, HSimSpectral, Childs2017, Haah2018}. The Bose-Hubbard model has been studied extensively, describing a system of bosonic particles trapped in an optical lattice \cite{dipole}. This model is simulated using various methods such as quantum Monte Carlo simulations \cite{BHSpace, BHSim1, BHSim2, BHSim3, BHSim4}. The purpose of most of these simulations has been to examine state transitions between a superfluid and a Mott insulator \cite{BHSim1, BHSim3, BHSim4, dipole, BHSim5}. The Bose-Hubbard model also has applications in examining the generation of entanglement \cite{entangle} and the creation of quantum magnetic insulators \cite{qmagnets}. It has been shown that while the one-dimensional Bose-Hubbard may be easily simulated classically \cite{1d, BH1d}. However, the general problem of finding the ground state of a quantum system, including the Bose-Hubbard quantum system, is QMA-complete, and simulating the time evolution operator is BQP-complete when formalized as a decision problem \cite{qma1, qma2, qma3, Haah2018}. This means that there exists an efficient quantum algorithm that can accurately determine whether or not a given output was one produced from the Bose-Hubbard system, whereas it is believed that no such efficient classical algorithm exists. Here, efficient means that the algorithm scales as a polynomial in the size of the system. A photonic continuous variable (CV) quantum computer utilizes the infinite-dimensional Hilbert space of the light field and can provide resource advantages compared to qubit quantum computers \cite{ChrisOverview}. Other advantages include room temperature computations and large-scale entanglement generation through the use of squeezers and low-cost components such as beam splitters and phase shifters \cite{Yokoyama2013}. Hamiltonian simulation can also be adapted for these continuous variable systems \cite{decompose}. In this work, we discuss the simulation of the Bose-Hubbard Hamiltonian on a CV quantum computer. For the Bose-Hubbard Hamiltonian, we show that a CV system allows for a straightforward mathematical decomposition into the required logic gates, as well as a circuit topology that allows for advantages in implementation. We present the exact resource counts required to simulate the Bose-Hubbard Hamiltonian on a CV quantum computer. We consider the standard tunneling and on-site interaction terms of the Hamiltonian and also the addition of a dipole interaction term. We use Baker-Campbell-Hausdorff expansions in order to arrive at an elementary set of gates which are exponentials of powers of the position operator. This involves at most cubic and quartic single-mode gates. We also present the circuits that implement 1-D and 2-D Bose-Hubbard models of variable sizes. This paper is structured as follows. Section~\ref{Hamiltonian} presents the Hamiltonian and Sec.~\ref{Gate Decomposition} presents the standard relations for the decomposition of exponential polynomials of the position and momentum operators. In Sec.~\ref{Dipole Term}, we discuss the additional dipole term to the Hamiltonian. In Sec.~\ref{Circuit Implementations and Gate Counts}, we discuss the circuit implementations and the gate counts, as well as optical implementations of the gates and some potential sources of errors. In Sec.~\ref{Discussion} we offer a discussion and conclusion. \section{BOSE-HUBBARD HAMILTONIAN} \label{Hamiltonian} \begin{figure} \caption{(a) A visualization of the effects of the terms in the Bose-Hubbard Hamiltonian. Here, $J$ is the tunneling coefficient which dictates the movement of particles from one site to a neighboring site, $U$ is the on-site interaction between two particles, and $V_{\rm{dip} \label{BHFig} \end{figure} The Bose-Hubbard Hamiltonian describes a system of bosonic particles trapped in an optical lattice of $N$ sites. Using notation from \cite{dipole}, it is given by \begin{equation} \label{eqHamiltonian} H = -\frac{J}{2}\sum_{\{i,j\}}\hat{a}^{\dagger}_{i}\hat{a}_{j} + \frac{U}{2}\sum_{i=1}^N\hat{n}_{i}(\hat{n}_{i} - 1), \end{equation} where the two terms with the factors $J$ and $U$ represent the tunneling of a particle in one site to a neighboring site, and the on-site interaction, respectively (see Fig.~\ref{BHFig} for a schematic). The bosonic creation (annihilation) operators are given by $\hat a^\dagger_i$ ($\hat a_i$) and the number operator is $\hat{n}_{i}=\hat{a}^{\dagger}_{i}\hat{a}_{i}$. The sum $\sum_{\{i,j\}}$ spans neighboring sites. Additional terms may be added to the Hamiltonian which come from dipole interactions \cite{dipole}. Methods to perform a gate decomposition for some of these terms are discussed in the appendix but first the terms in Eq.~(\ref{eqHamiltonian}) are examined in detail. The objective of this work is to find an appropriate implementation of quantum gates which can be used to simulate the evolution of this Hamiltonian $e^{itH}$ for a time $t$. In order to do this, $e^{itH}$ is decomposed into more elementary time evolution operators. \section{GATE DECOMPOSITION} \label{Gate Decomposition} Note that the $J$ terms as well as part of the $U$ terms are of Gaussian order, therefore they may be efficiently implemented with linear optics. The non-Gaussian $U$ term may be further broken down into single-mode quadrature operations of quartic order, as well as Gaussian operations. This decomposition is now examined more precisely. First, the operators $\hat{a}^{\dagger}_{i}$, $\hat{a}_{i}$ and $\hat n_i$ are expanded in terms of position operators $\hat{x}_{i}$ and momentum operators $\hat{p}_{i}$ via \begin{eqnarray} \hat a_i &=& \hat x_i+i \hat p_i \nonumber ,\\ \hat a^\dagger_i &=& \hat x_i-i \hat p_i ,\\ \hat a^{\dagger}_i \hat a_i &=& \hat x_i^2 + \hat p_i^2+i[\hat{x}_{i}, \hat{p}_{i}] \nonumber . \end{eqnarray} In addition, the operators observe the commutator relation $[\hat{x}_{i}, \hat{p}_{i}] = i/2$. Considering these relations and neglecting a constant energy shift an expanded Hamiltonian is then written as \begin{multline} H = -J\sum_{\{i,j\}:i<j}\left(\hat{x}_{i}\hat{x}_{j} + \hat{p}_{i}\hat{p}_{j} \right)+ \\ \frac{U}{2}\sum_{i}\big(\left(\hat{x}^{4}_{i} + \hat{x}^{2}_{i}\hat{p}^{2}_{i} +\hat{p}^{2}_{i}\hat{x}^{2}_{i} + \hat{p}^{4}_{i} - \hat{x}^{2}_{i} - \hat{p}^{2}_{i} \right) + \\ \left(- \hat{x}^{2}_{i} - \hat{p}^{2}_{i} \right) \big). \end{multline} We can simplify $\hat{x}^{2}_{i}\hat{p}^{2}_{i} +\hat{p}^{2}_{i}\hat{x}^{2}_{i}$ with a relation from \cite{decompose} \begin{equation} \hat{x}^{2}_{i}\hat{p}^{2}_{i} +\hat{p}^{2}_{i}\hat{x}^{2}_{i} = -\frac{4}{9}i[\hat{x}^{3}_{i},\hat{p}^{3}_{i}]. \end{equation} As the time evolution to be simulated is $e^{itH}$, we can use the Lie product formula \cite{Childs2017} for sums of operators $H=\sum_{j=1}^N H_j$, \begin{equation}\label{eqLieProduct} e^{i t \sum_{j=1}^N H_j} = \left( \prod_ {j=1}^N e^{i t H_j/K} \right)^K + \mathcal R, \end{equation} where the choice of $K$ controls the size of the remainder $\mathcal R$ and thus gives the accuracy of the decomposition. The size of the remainder can be bounded by \cite{Childs2017} \begin{equation} \label{eqErrorBound} \Vert \mathcal R \Vert = O\left( \frac{N^2 t^2 \Lambda^2}{K} \right), \end{equation} where $\Lambda:=\max_{j}\Vert H_j \Vert$ is the largest Hamiltonian norm. We discuss the choice of $K$ in Sec.~\ref{subsectionErrors} below. In our case, we can write \begin{multline} e^{itH} = \Bigg(\prod_{\{i,j\}:i<j} e^{-i\frac{t}{K}J\hat{x}_{i}\hat{x}_{j}}e^{-i\frac{t}{K}J\hat{p}_{i}\hat{p}_{j}} \\ \prod_{i} e^{i\frac{t}{K}\frac{U}{2}\hat{x}^{4}_{i}}e^{\frac{t}{K}\frac{2U}{9}[\hat{x}^{3}_{i},\hat{p}^{3}_{i}]}e^{i\frac{t}{K}\frac{U}{2}\hat{p}^{4}_{i}} e^{-i\frac{t}{K}U\hat{x}^{2}_{i}}e^{-i\frac{t}{K}U\hat{p}^{2}_{i}} \Bigg)^{K} \\ + \mathcal{R}. \end{multline} The largest Hamiltonian norm here is at most $\Lambda = O({\rm poly} (J,U) )$, taken to be $O(1)$, as all terms involve the position and momentum operators \cite{decompose}. We can rotate every momentum operator into the position basis by a Fourier transform, denoted by $\mathcal{F}_{i}$ for mode $i$. For every polynomial $g$ we have \begin{equation} g(\hat{p}_i) = g(\mathcal{F}_{i}\hat{x}_i\mathcal{F}^{\dagger}_{i}) = \mathcal{F}_{i}g(\hat{x}_i)\mathcal{F}^{\dagger}_{i}. \end{equation} In addition, we can use commutator simulation via the relation \cite{decompose} \begin{equation}\label{eqCommutatorSim} e^{[A,B]\tau^{2}}=e^{iB\tau}e^{iA\tau}e^{-iB\tau}e^{-iA\tau}e^{iBt}e^{iA\tau}e^{-iB\tau}e^{-iA\tau} + O(\tau^4), \end{equation} to partition $e^{\frac{t}{K}\frac{2U}{9}[\hat{x}^{3}_{i},\hat{p}^{3}_{i}]}$ into terms involving $e^{i\left(\frac{t}{K}\frac{2U}{9}\right)^{1/2}\hat{x}^{3}_{i}}$ and $\mathcal{F}_{i}e^{i\left(\frac{t}{K}\frac{2U}{9}\right)^{1/2}\hat{x}^{3}_{i}}\mathcal{F}^{\dagger}_{i}$. Note that in Eq.~(\ref{eqCommutatorSim}), $\tau$ is proportional to $(t/K)^{1/2}$, thus the error is proportional to $(t/K)^{2}$. The final expanded form of the time-evolution operator is given by \begin{multline}\label{eqTimeEvolution} e^{itH} = \Bigg(\prod_{\{i,j\}:i<j} e^{-i\frac{t}{K}J\hat{x}_{i}\hat{x}_{j}}\mathcal{F}_{i}\mathcal{F}_{j}e^{-i\frac{t}{K}J\hat{x}_{i}\hat{x}_{j}}\mathcal{F}^{\dagger}_{j}\mathcal{F}^{\dagger}_{i} \\ \prod_{i} e^{i\frac{t}{K}\frac{U}{2}\hat{x}^{4}_{i}} \\ \mathcal{F}_{i}e^{i\left(\frac{t}{K}\frac{2U}{9}\right)^{1/2}\hat{x}^{3}_{i}}\mathcal{F}^{\dagger}_{i} e^{i\left(\frac{t}{K}\frac{2U}{9}\right)^{1/2}\hat{x}^{3}_{i}}\mathcal{F}_{i}e^{-i\left(\frac{t}{K}\frac{2U}{9}\right)^{1/2}\hat{x}^{3}_{i}} \\ \mathcal{F}^{\dagger}_{i} e^{-i\left(\frac{t}{K}\frac{2U}{9}\right)^{1/2}\hat{x}^{3}_{i}}\mathcal{F}_{i}e^{i\left(\frac{t}{K}\frac{2U}{9}\right)^{1/2}\hat{x}^{3}_{i}}\mathcal{F}^{\dagger}_{i} e^{i\left(\frac{t}{K}\frac{2U}{9}\right)^{1/2}\hat{x}^{3}_{i}}\\ \mathcal{F}_{i}e^{-i\left(\frac{t}{K}\frac{2U}{9}\right)^{1/2}\hat{x}^{3}_{i}}\mathcal{F}^{\dagger}_{i} e^{-i\left(\frac{t}{K}\frac{2U}{9}\right)^{1/2}\hat{x}^{3}_{i}} \\ \mathcal{F}_{i}e^{i\frac{t}{K}\frac{U}{2}\hat{x}^{4}_{i}}\mathcal{F}^{\dagger}_{i} e^{-i\frac{t}{K}U\hat{x}^{2}_{i}}\mathcal{F}_{i}e^{-i\frac{t}{K}U\hat{x}^{2}_{i}}\mathcal{F}^{\dagger}_{i} \Bigg)^{K} \\ + O(\mathcal R). \end{multline} The error term that arises from Eq.~(\ref{eqCommutatorSim}) accumulates $K$ times, thus the contribution to the error in the final expression is proportional to $K\cdot \frac{t^2}{K^2} = \frac{t^2}{K}$ and can be absorbed into the existing error term. \section{DIPOLE TERM} \label{Dipole Term} In case there is a dipole interaction between the bosons in the lattice an additional term may be added to the Hamiltonian \cite{dipole}, that is given by \begin{equation}\label{eqHamiltonianDipole} H_{nn} = V_{\rm{dip}} \sum_{\{i,j\}:i<j}\hat{n}_{i}\hat{n}_{j}, \end{equation} where this term is of leading order in dipole contributions, and corresponds to a nearest neighbor interaction. Other terms in dipole contributions are briefly examined in the appendix. Following the procedure from before, we can expand the Hamiltonian in terms of $\hat{p}$ and $\hat{x}$ operators, then rotate the $\hat{p}$s into $\hat{x}$s and decompose into single-mode quartic gates. Again to error $ O(N^2 t^2 /K)$, the sequence of gates includes the sequence of four Gaussian gates given by \begin{equation}\label{eqDip1} e^{-i\frac{t}{K}\frac{V_{\rm{dip}}}{2}\hat{x}^{2}_{i}}\mathcal{F}_{i}e^{-i\frac{t}{K}\frac{V_{\rm{dip}}}{2}\hat{x}^{2}_{i}}\mathcal{F}^{\dagger}_{i}e^{-i\frac{t}{K}\frac{V_{\rm{dip}}}{2}\hat{x}^{2}_{j}}\mathcal{F}_{j}e^{-i\frac{t}{K}\frac{V_{\rm{dip}}}{2}\hat{x}^{2}_{j}}\mathcal{F}^{\dagger}_{j}, \end{equation} and four quartic terms given by \begin{multline}\label{eqDip2} e^{i\frac{t}{K}V_{\rm{dip}}\hat{x}^{2}_{i}\hat{x}^{2}_{j}}\mathcal{F}_{j}e^{i\frac{t}{K}V_{\rm{dip}}\hat{x}^{2}_{i}\hat{x}^{2}_{j}}\mathcal{F}^{\dagger}_{j}\mathcal{F}_{i}e^{i\frac{t}{K}V_{\rm{dip}}\hat{x}^{2}_{i}\hat{x}^{2}_{j}}\mathcal{F}^{\dagger}_{i}\\ \mathcal{F}_{i}\mathcal{F}_{j}e^{i\frac{t}{K}V_{\rm{dip}}\hat{x}^{2}_{i}\hat{x}^{2}_{j}}\mathcal{F}^{\dagger}_{j}\mathcal{F}^{\dagger}_{i}. \end{multline} Each of these two-mode quartic operators involving $\hat{x}^{2}_{i}\hat{x}^{2}_{j}$ can be decomposed into single-mode quartics and two-mode Gaussian operators using $12 \hat{x}^{2}_{i}\hat{x}^{2}_{j}=(\hat{x}_{i}-\hat{x}_{j})^4 + (\hat{x}_{i}+\hat{x}_{j})^4 - 2\hat{x}_{i}^4 -2\hat{x}_{j}^4$. In addition we employ, $e^{i \hat p_i x_j} f(\hat x_i) e^{-i \hat p_i x_j} = f(\hat x_i + x_j) $ for appropriate functions $f(x)$. This leads to the following relation \begin{multline} \label{eqTwoModeQuartic} e^{i\frac{t}{K}V_{\rm{dip}}\hat{x}^{2}_{i}\hat{x}^{2}_{j}} = \mathcal{F}_{i}e^{i2\hat{x}_{i}\hat{x}_{j}}\mathcal{F}^{\dagger}_{i}e^{i\frac{t}{K}\frac{V_{\rm{dip}}}{12}\hat{x}^{4}_{i}}\mathcal{F}_{i}e^{-i4\hat{x}_{i}\hat{x}_{j}}\mathcal{F}^{\dagger}_{i} \\ e^{i\frac{t}{K}\frac{V_{\rm{dip}}}{12}\hat{x}^{4}_{i}}\mathcal{F}_{i}e^{i2\hat{x}_{i}\hat{x}_{j}}\mathcal{F}^{\dagger}_{i}e^{-i\frac{t}{K}\frac{V_{\rm{dip}}}{6}\hat{x}^{4}_{i}}e^{-i\frac{t}{K}\frac{V_{\rm{dip}}}{6}\hat{x}^{4}_{j}}. \end{multline} \section{CIRCUIT IMPLEMENTATIONS AND GATE COUNTS} \label{Circuit Implementations and Gate Counts} In this section, we show the quantum circuits implementing the time evolution of the Bose-Hubbard model. We start by examining the circuit for a one-dimensional four-node lattice, and then examine the additional circuit of the dipole interaction term. We then generalize to two-dimensional lattices of size $n\times n$. We first introduce several elementary operations. The following single-mode gates are used \begin{eqnarray} \mbox{ \Qcircuit @C=.5em @R=0em @!R { & \gate{P(t)} & \qw & \push{\rule{.3em}{0em}=\rule{.3em}{0em}} & \qw &\gate{e^{it\hat{x}^2}} &\qw }} \nonumber \\ \mbox{ \Qcircuit @C=.5em @R=0em @!R { & \gate{V(t)} & \qw & \push{\rule{.3em}{0em}=\rule{.3em}{0em}} & \qw &\gate{e^{it\hat{x}^3}} &\qw } }\\ \mbox{ \Qcircuit @C=.5em @R=0em @!R { & \gate{Q(t)} & \qw & \push{\rule{.3em}{0em}=\rule{.3em}{0em}} & \qw &\gate{e^{it\hat{x}^4}} &\qw } } \nonumber \end{eqnarray} where $P$ is a quadratic (shearing) gate consisting of the optical elements of squeezing and rotations; $V$ is the cubic phase gate; and $Q$ is the quartic gate. The two-mode Cz, or C-PHASE, gate is given by \begin{equation} \mbox{ \Qcircuit @C=.7em @R=.5em @!R { & \ctrl{2} & \qw &&&& \multigate{2}{e^{ig\hat{x}_1 \hat x_2}} & \qw & \\ & \rstick{g} &&& \push{\rule{.3em}{0em}=\rule{.3em}{0em}}&&&&& \\ & \ctrl{-2} & \qw &&&& \ghost{e^{ig\hat{x}_1 \hat x_2}} & \qw & } } \end{equation} where we use a more generalized version of the Cz gate with a tunable (strength) parameter $g$. \begin{widetext} \subsection{1-D Lattice Circuits} To present an example circuit for a single time step as in Eq.~(\ref{eqTimeEvolution}), we consider a 1-D lattice with four nodes as in Fig.~\ref{BHFig}(b). The circuit is given by \begin{equation} \mbox{\Qcircuit @C=1em @R=.7em { & \multigate{1}{J} & \qw & \qw & \qw & \qw & \gate{U} & \qw \\ & \ghost{J} & \multigate{1}{J} & \qw & \qw & \qw & \gate{U} & \qw\\ & \qw & \ghost{J} & \multigate{1}{J} & \qw &\qw & \gate{U} & \qw \\ & \qw & \qw & \ghost{J} & \qw & \qw & \gate{U} & \qw }} \end{equation} Here, the gate $J$ is given by \begin{equation}\label{eqGateJ} \mbox{ \Qcircuit @C=.7em @R=.5em @!R { & \multigate{2}{J(g)} & \qw & & & \gate{\mathcal{F}^\dagger} & \ctrl{2} & \gate{\mathcal{F}} & \ctrl{2} & \qw \\ &&& \push{\rule{.3em}{0em}=\rule{.3em}{0em}} &&& \rstick{g} && \rstick{g}& \\ & \ghost{J(g)} & \qw & & & \gate{\mathcal{F}^\dagger} & \ctrl{-2} & \gate{\mathcal{F}} & \ctrl{-2} & \qw } } \end{equation} The Cz gate is performed in between each pair of Fourier transform gates and $g$ is taken to be $g=tJ/K=:g_J$. To simplify the $U$ gate we introduce a series of cubic and Fourier transform gates notated by $C$, given by the circuit \begin{equation} \mbox{ \Qcircuit @C=.5em @R=0em @!R { & \gate{C(t)} & \qw & \push{\rule{.3em}{0em}=\rule{.3em}{0em}} & & \gate{V(t)} & \gate{\mathcal{F}^{\dagger}} & \gate{V(t)} & \gate{\mathcal{F}} & \qw } } \end{equation} The $U$ gate is then given by the circuit, with $g_U=\frac{tU}{K}$ and $g_C=(\frac{t}{K}\frac{2U}{9})^{1/2}$, \begin{equation} \mbox{ \Qcircuit @C=1em @R=.7em { & \gate{U(g_U,g_c)} & \qw & \push{\rule{-.3em}{0em}=\rule{.5em}{0em}} & \gate{\mathcal{F}^{\dagger}} & \gate{P\left(g_U\right)} & \gate{\mathcal{F}} & \gate{P\left(g_U\right)} & \gate{\mathcal{F}^{\dagger}} & \gate{Q\left(\frac{g_U}{2 }\right)} & \gate{\mathcal{F}} & \gate{C(g_C)^4}& \gate{Q\left(\frac{g_U}{2 }\right)} & \qw } } \end{equation} The gates of each type needed for this circuit will be denoted in the form $(\mathcal{F}, P, V, Q, {\rm Cz})$. In the present case, we have $(\mathcal{F}, P(g_U), V(g_C), Q(g_U/2), {\rm Cz}(g)) = (60, 8, 32, 8, 6)$. Thus, for one time step, we need 60 Fourier gates, 8 quadratic gates (squeezers and rotations), 32 cubic gates, 8 quartic gates, and 6 Cz gates, with the given gate times $g, g_U,$ and $g_C$. \subsection{Circuit For Dipole Term} The additional dipole term may also be implemented in a circuit for a single time step in a 1-D lattice of $4$ nodes. This circuit is given by \begin{equation} \mbox{ \Qcircuit @C=1em @R=.7em { & \multigate{1}{V_{nn}} & \qw & \qw & \qw \\ & \ghost{V_{nn}} & \multigate{1}{V_{nn}} & \qw & \qw \\ & \qw & \ghost{V_{nn}} & \multigate{1}{V_{nn}} & \qw \\ & \qw & \qw & \ghost{V_{nn}} & \qw }} \end{equation} To expand the $V_{nn}$ gate we introduce the decomposition of the two-mode quartic gate in Eq.~(\ref{eqTwoModeQuartic}) notated by $W$, which has the circuit \begin{align} &\Qcircuit @C=.5em @R=1.2em @!R { & \multigate{2}{W} & \qw \\ &&& \push{\rule{.3em}{0em}=\rule{.3em}{0em}} \\ &\ghost{W} & \qw } \! \! \! \! \! \! \! \! \! \! \! \! \! \! \! \! \! \! \! \! \! \! \! \! \! \! \! \! \! \! \! \! \! \! \! \! \! \! &\Qcircuit @C=.5em @R=.5em @!R { & \gate{Q\left(\frac{g_V}{3}\right)} & \gate{\mathcal{F}^\dagger} & \ctrl{1} & \gate{\mathcal{F}} & \gate{Q\left(\frac{g_V}{6}\right)} & \gate{\mathcal{F}^\dagger} & \ctrl{1} & \gate{\mathcal{F}} & \gate{Q\left(\frac{g_V}{6}\right)} & \gate{\mathcal{F}^\dagger} & \ctrl{1} & \gate{\mathcal{F}} & \qw \\ & & & \rstick{2} & & & & \rstick{-4} & & & & \rstick{2} & & \\ & \gate{Q\left(\frac{g_V}{3}\right)} & \qw & \ctrl{-1} & \qw & \qw & \qw & \ctrl{-1} & \qw & \qw & \qw & \ctrl{-1} & \qw & \qw } \end{align} Here, we take $g_V=tV_{\rm{dip}}/2K$. The $V_{nn}$ gate is then given by \begin{equation} \mbox{ \Qcircuit @C=1em @R=.7em { & \gate{P(g_V)} & \gate{\mathcal{F}^{\dagger}} & \gate{P(g_V)} & \gate{\mathcal{F}} & \multigate{1}{W} & \qw & \multigate{1}{W} & \qw & \gate{\mathcal{F}^{\dagger}} & \multigate{1}{W} & \gate{\mathcal{F}} & \gate{\mathcal{F}^{\dagger}} & \multigate{1}{W} & \gate{\mathcal{F}} & \qw \\ & \gate{P(g_V)} & \gate{\mathcal{F}^{\dagger}} & \gate{P(g_V)} & \gate{\mathcal{F}} & \ghost{W} & \gate{\mathcal{F}^{\dagger}} & \ghost{W} & \gate{\mathcal{F}} & \qw & \ghost{W} & \qw & \gate{\mathcal{F}^{\dagger}} & \ghost{W} & \gate{\mathcal{F}} & \qw } } \end{equation} Using a similar gate count notation we used previously, the dipole part of the circuit for the 1-D lattice will have a gate count of $(\mathcal{F}, P(g_V), Q\left(\frac{g_V}{3}\right), Q\left(\frac{g_V}{6}\right), {\rm Cz}(2), {\rm Cz}(-4)) = (108, 12, 24, 24, 24, 12)$. This means the total circuit including all of the $U$ and $J$ terms will have a gate count of $(\mathcal{F}, P(g_U), P(g_V), V(g_C), Q\left(\frac{g_U}{2}\right), Q\left(\frac{g_V}{3}\right), Q\left(\frac{g_V}{6}\right), {\rm Cz}(g), {\rm Cz}(2), {\rm Cz}(-4))= (168, 8, 12, 32, 8, 24, 24, 6, 24, 12)$ for a single time step. \subsection{2-D Lattice Circuits} In this section, we discuss two-dimensional lattices of size $n \times n$. First, consider a $2\times2$ lattice with four total nodes as in Fig.~\ref{BHFig}(c). The circuit has the form \begin{equation} \Qcircuit @C=1em @R=.7em { & \multigate{1}{J} & \gate{J} \qwx[2] & \qw & \qw & \gate{U} & \multigate{1}{V_{nn}} & \gate{V_{nn}} \qwx[2] & \qw & \qw & \qw \\ & \ghost{J} & \qw & \gate{J} \qwx[2] & \qw & \gate{U} & \ghost{V_{nn}} & \qw & \gate{V_{nn}} \qwx[2] & \qw & \qw \\ & \qw & \gate{} & \qw & \multigate{1}{J} & \gate{U} & \qw & \gate{} & \qw & \multigate{1}{V_{nn}} & \qw \\ & \qw & \qw & \gate{} & \ghost{J} & \gate{U} & \qw & \qw & \gate{} & \ghost{V_{nn}} & \qw } \label{Lattice2x2} \end{equation} \end{widetext} Here, we have introduced a new notation for the two-mode $J$ and $V_{nn}$ gates over two non-neighboring wires. This can be implemented on a circuit with only nearest neighbor coupling by swapping neighboring modes, applying the $J$ or $V_{nn}$ gates and then swapping back. For example, \begin{equation} \mbox{ \Qcircuit @C=.5em @R=0em @!R { &&&& \lstick{1} & \gate{J} \qwx[2] & \qw & & & \lstick{1} & \qw & \multigate{1}{J} & \qw & \qw \\ &&&& \lstick{2} & \qw & \qw & \push{\rule{.3em}{0em}=\rule{.3em}{0em}} & & \lstick{2} & \qswap & \ghost{J} & \qswap & \qw \\ &&&& \lstick{3} & \gate{} & \qw & & & \lstick{3} & \qswap \qwx & \qw & \qswap \qwx & \qw } } \end{equation} Note that the square box on the circuit indicates the other qumode that is being acted upon. This can similarly be done for an $n\times n$ lattice where, if the Bose-Hubbard model has nearest neighbor couplings, at most $n$ swaps are needed on either side of a gate. For an $n\times n$ lattice the first part of the circuit, which is the nearest neighbor pattern involving the $J$ gates, is given in Appendix \ref{appendixCircFull}. We now show the final gate count for the $n\times n$ lattice. Following our notation as before, we also include a count for the number of swaps needed. For each $J$ gate the count is $(\mathcal{F},\rm{Cz}(g)) = (4, 2)$, and for the $n\times n$ lattice there are $2(n^{2}-n)$ $J$ gates and $2(n^{3}-n^{2})$ swaps, which gives us a gate count of $(\mathcal{F},\rm{Cz}(g), \text{SWAP}) = \left(8(n^{2}-n), 4(n^{2}-n), 2(n^{3}-n^{2})\right)$. As shown above, each $U$ gate has a count of $(\mathcal{F}, P(g_U), V(g_C), Q\left(\frac{g_U}{2 }\right)) = (12, 2, 8, 2)$ and in the lattice we have $n^{2}$ of them, giving a total count for the $U$ gates of $(\mathcal{F}, P(g_U), V(g_C), Q\left(\frac{g_U}{2 }\right)) = (12n^{2}, 2n^{2}, 8n^{2}, 2n^{2})$. Finally, each $V_{nn}$ gate has a count of $(\mathcal{F}, P(g_V), Q\left(\frac{g_V}{3}\right), Q\left(\frac{g_V}{6}\right), {\rm Cz}(2), {\rm Cz}(-4)) = (36, 4, 8, 8, 8, 4)$, and in the lattice the $V_{nn}$ gates follow the same pattern as the $J$ gates, so we have a total contribution from the $V_{nn}$ gates of $(\mathcal{F}, P(g_V), Q\left(\frac{g_V}{3}\right), Q\left(\frac{g_V}{6}\right), {\rm Cz}(2), {\rm Cz}(-4), \text{SWAP}) = (\big(72(n^{2}-n), 8(n^{2}-n), 16(n^{2}-n), 16(n^{2}-n), 16(n^{2}-n),$ $8(n^{2}-n), 2(n^3 - n^2)\big)$. Therefore, the final gate count for our $n\times n$ lattice is \begin{multline} (\mathcal{F}, P(g_U), P(g_V), V(g_C), Q\left(\frac{g_U}{2}\right), Q\left(\frac{g_V}{3}\right), \\ Q\left(\frac{g_V}{6}\right), {\rm Cz}(g), {\rm Cz}(2), {\rm Cz}(-4), \text{SWAP}) = \\ \big(92n^{2}-80n, 2n^2, 8(n^{2}-n), 8n^2, 2n^2, 16(n^{2}-n), \\ 16(n^{2}-n), 4(n^{2}-n), 16(n^{2}-n), 16(n^{2}-n), 4(n^{3}-n^{2})\big). \end{multline} Note that this is the gate count for each time step of length $t/K$ in the series of gates simulating $e^{iHt}$, as in Eq.~(\ref{eqTimeEvolution}) and Eqs.~(\ref{eqDip1}) to (\ref{eqTwoModeQuartic}). \subsection{Optical Implementation} Note that the Gaussian elements of the circuits outlined in the previous sections can be implemented deterministically with linear optics whereas the higher-order gates are more complex and contain probabilistic elements. In this section, we briefly discuss the optical implementation of the various gates for completeness and note that the reader can find more information in the following citations. First, we examine the $J$ gate as in Eq.~(\ref{eqGateJ}). This circuit element consists of Fourier transforms and Cz gates which are single-mode Gaussian and multi-mode Gaussian operations and as such can be implemented with linear optics. Ref.~\cite{Gu2009} shows that any single-mode Gaussian operation can be implemented using rotations (or phase shifts), displacements, and either squeezing or shearing (squeezing and rotations). On the other hand, multi-mode Gaussian operations require the use of beam splitters. For the $J$ gate, the Fourier transforms are implemented simply with rotations of $\frac{\pi}{2}$, whereas the Cz gates require squeezers and the multi-mode transformation of beam splitters. More precisely, the Cz gate is given by \cite{linear} \begin{widetext} \begin{equation} \mbox{ \Qcircuit @C=.5em @R=.5em @!R { & \ctrl{1} & \qw & & & \gate{S} & \multigate{2}{BS} & \qw \\ &&&\push{\rule{.3em}{0em}=\rule{.3em}{0em}} &&&&& \\ & \ctrl{-1} & \qw & & & \gate{S} & \ghost{BS} & \qw } } \end{equation} where squeezing is denoted by $S$ gates, and beam splitters by $BS$ gates. The tunable Cz gate will have similar components but needs the squeezing parameters and beam splitting ratios to be changed to fit our choice of $g_J=tJ/K$ \cite{GraphCalc}. Using the implementation for the tunable Cz gate along with the rotations that comprise the Fourier transforms, the $J$ gate can be optically implemented in the following way \begin{equation} \mbox{ \Qcircuit @C=.7em @R=.5em @!R { & \multigate{2}{J(g)} & \qw & & & \gate{R(-\frac{\pi}{2})} & \gate{S(g)} & \multigate{2}{BS} & \gate{R(\frac{\pi}{2})} & \gate{S(g)} & \multigate{2}{BS} & \qw \\ &&& \push{\rule{.3em}{0em}=\rule{.3em}{0em}} &&& &&& \\ & \ghost{J(g)} & \qw & & & \gate{R(-\frac{\pi}{2})} & \gate{S(g)} & \ghost{BS} & \gate{R(\frac{\pi}{2})} & \gate{S(g)} & \ghost{BS} & \qw } } \end{equation} \end{widetext} In order to implement higher-order gates we require more than the set of optical elements discussed above. The cubic phase operators denoted by the $V(t)$ gates in our circuits, are an example of these higher-order operations. To implement the cubic phase gate we add to our set of optical elements a photon counting measurement, which introduces the non-linearity needed. The full implementation then involves a displaced two-mode squeezed state for which $\hat{R}^\dagger \hat n \hat{R}$ (photon counting in a rotated basis) is measured on one arm. The desired cubic operation is then collapsed onto the second unmeasured mode \cite{Gottesman2001}. This is followed by a squeezing correction conditioned on the outcome of the photon number resolving detector followed by gate teleportation (all Gaussian elements). The initial two-mode squeezed state can be implemented with squeezing, beam splitters, and a phase shift which are all linear elements. For the optical implementation of our quartic gates ($Q$ gates in our circuits) we note that they may be expressed as a series of cubic gates by approximating in terms of commutators and using commutator simulation such as in Eq.~(\ref{eqCommutatorSim}) \cite{decompose, Marshall2015}. Thus, for quartic operations we do not need to add anything to our set of linear optical components other than multiple photon counting. Note that in the case where a Kerr interaction is available, given by $e^{it\hat{n}_{i}^{2}}$, it may be used to directly implement the non-linear parts of the $U$ gates \cite{decompose, Braunstein2005, Brod2016}. \subsection{A Note On Errors} \label{subsectionErrors} When performing our gate decomposition and analyzing the makeup of our example circuits, note that all gate counts are given for a single Trotter time step. Let the desired accuracy of simulating $e^{itH}$ be given by $\epsilon$. The accuracy is dependent on the choice of number of time slices $K$, the total simulation time $t$, and the number of sites $N$. From Eq.~(\ref{eqErrorBound}), we can determine $K$ to achieve a given accuracy. Such a $K$ is given by \begin{equation} K= O \left (\frac{N^2 t^2}{\epsilon} \right ). \end{equation} The commutator simulation from Eq.~(\ref{eqCommutatorSim}) contributes at most in the same order as the sum formula Eq.~(\ref{eqErrorBound}). Our final product of operations for the Bose-Hubbard Hamiltonian is raised to the power of $K$, therefore we must repeat each circuit presented in this work $K$ times in order to get the desired error of $\epsilon$. Another important source of error to discuss is the effect of finite squeezing. As discussed in the previous section, the optical implementation of the gates in our circuits will require the use of squeezing. In any experimental setup the squeezing will be finite and the end result with be dependent on a squeezing factor $s$ \cite{Menicucci2006, Gu2009}. For example, consider an optical implementation of the cubic phase gate where a photon counting measurement is made on a displaced two-mode squeezed state. To construct the two-mode squeezed state, two squeezed states, which ideally are zero-momentum eigenstates, are entangled. However, realistically the quadratures can only be finitely squeezed, in for example the momentum quadrature \begin{equation} \ket{0}_p \rightarrow \int dp\ e^{-(p)^{2}/(2s)} \ket{p}. \end{equation} The cubic phase gate is then modulated by a Gaussian envelope with zero mean and variance that depends on the squeezing factor $s$ \cite{Menicucci2006}. The result of this is a distortion effect which is inversely proportional to the amount of squeezing applied. In general, there are other experimental errors due to imperfections in the implementation. For example, actual photonic devices exhibit noisy state preparation, lossy interferometers and noisy and inefficient detectors. However, we leave such analysis for future work. \section{DISCUSSION AND CONCLUSION} \label{Discussion} In this work, we have performed a decomposition of time-evolution under a bosonic Hamiltonian, namely the Bose-Hubbard Hamiltonian, into a set of elementary logic gates. Using our series of gates per-unit-time, we have presented a direct circuit implementation for a photonic quantum computer. The circuits discussed include a simple four-node, one-dimensional optical lattice for the Bose-Hubbard model and general two-dimensional lattices of size $n\times n$. Our final gate count is represented in terms of the number of gates of each type in our elementary set. For simulating the time-evolution of a Bose-Hubbard model to time $t$ and to error $\epsilon$ for an $n\times n$ lattice, the required number of gates is given by $(\mathcal{F}, P(g_U), P(g_V), V(g_C), Q\left(\frac{g_U}{2}\right), Q\left(\frac{g_V}{3}\right),$ $ Q\left(\frac{g_V}{6}\right), {\rm Cz}(g), {\rm Cz}(2), {\rm Cz}(-4), \text{SWAP}) = K \big(92n^{2}-80n, $ $2n^2, 8(n^{2}-n), 8n^2, 2n^2, 16(n^{2}-n), $ $16(n^{2}-n), 4(n^{2}-n), 16(n^{2}-n), 16(n^{2}-n), 4(n^{3}-n^{2})\big)$, where $K=O(n^2 t^2/\epsilon)$. The number of applications of each gate scales as a polynomial of the size of the lattice. Even a small two-dimensional Bose-Hubbard model may be hard to simulate classically \cite{1d}. However, nearer-term photonic quantum computers may allow an implementation for a size $n$ where classical simulation is hard. The tuneable parameters $J$, $U$, and $V_{\rm{dip}}$ also allow a proof-of-principle experiment where we allow $V_{\rm{dip}}$ and $U$ to be much smaller than $J$. In the limit of infinitely small $U$ and $V_{\rm{dip}}$ terms, the circuit is fully Gaussian and implementable using only linear optics \cite{linear}, while at the same time being efficiently simulable classically. One can then systematically introduce non-linear gates to go beyond the classical simulable regime of the Bose-Hubbard model. This would allow photonic quantum computers with a limited number of non-linear gates to be used for the simulation of such a physical system. It is also important to note that the dipole interaction term $H_{nn}$ used here is the leading term and more higher-order terms may be added \cite{dipole}. Implementing these higher-order terms in a CV system can be the subject of future work. Another interesting problem would be to find ways to decrease the final gate count. In the previous section, we used beam splitters to allow us to apply gates to two modes that are not neighbors in our circuit. Advances in photonic integrated circuit (PIC) design may remove the need for these beam splitters by using various topological techniques, such as crossing of photonic waveguides \cite{Nic2017}. Furthermore, the field of gate optimization in qubits is established but has yet to be established for CV systems. Therefore clever optimization tricks for CV systems for particular algorithms could also be constructed to reduce gate counts. The experimental implementation of higher dimensional gates is also potentially difficult and may require additional consideration. It is possible that it would be more useful to represent our quartic gates in terms of cubic gates and remove quartic gates from our elementary set \cite{decompose}. These higher dimensional gates may also require a feed forward implementation when decomposed in terms of lower dimensions \cite{nonlin}. It is also important to note that the procedure used in this work can be extended to other similar Hamiltonians. An efficiently simulable subclass of the Hamiltonian discussed here is the bosonic tight-binding Hamiltonian \cite{TBinding} with applications in condensed matter and solid state physics. The tight-binding Hamiltonian coupled to a bath of harmonic oscillators appears also in the study of exciton dynamics in photosynthetic complexes \cite{Exciton}. Simulating such systems can provide another application for continuous-variable photonic quantum processors. \section*{ACKNOWLEDGMENTS} We would like to thank Pierre-Luc Dallaire-Demers, Tom Bromley, Zachary Vernon, and Nicolas Quesada for interesting discussions and helpful suggestions. \onecolumngrid \appendix \section{J Gate Circuit For 2D Lattice of Arbitrary Size} \label{appendixCircFull} The following circuit diagram is for the $J$ terms of the Bose-Hubbard Hamiltonian in Eq.~(\ref{eqHamiltonian}) applied to a two-dimensional, $n\times n$ lattice, cf. Sec. V.C. The dipole interaction term as in Eq.~(\ref{eqHamiltonianDipole}) will also have the same pattern, but will have gates notated with $V_{nn}$. \Qcircuit @C=1em @R=.7em { &&&\lstick{1} & \multigate{1}{J} & \gate {J} \qwx[8] & \qw & \qw & \qw & \qw & \qw \\ &&&\lstick{2} & \ghost{J} & \qw & \multigate{1}{J} & \gate{J} \qwx[8] & \qw & \qw & \qw &&\cdots \\ &&&\lstick{3} & \qw & \qw & \ghost{J} & \qw & \multigate{1}{J} & \gate{J} \qwx[8] & \qw \\ &&&\lstick{4} & \qw & \qw & \qw & \qw & \ghost{J} & \qw & \qw \\ &&&&&& \cdot \\ &&&&&& \cdot \\ &&&&&& \cdot \\ \\ &&&\lstick{n+1} & \qw & \gate{} & \qw & \qw & \qw & \qw & \qw \\ &&&\lstick{n+2} & \qw & \qw & \qw & \gate{} & \qw & \qw & \qw &&\cdots&& \\ &&&\lstick{n+3} & \qw & \qw & \qw & \qw & \qw & \gate{} & \qw \\ &&&&&&& \cdot \\ &&&&&&& \cdot \\ &&&&&&& \cdot \\ } \Qcircuit @C=1em @R=.7em { &&&&&&&&&&& \lstick{2n+1} & \multigate{1}{J} & \gate{J} \qwx[9] & \qw & \qw & \qw & \qw & \qw \\ &&&&&\cdots&&&&&& \lstick{2n+2} & \ghost{J} & \qw & \multigate{1}{J} & \gate{J} \qwx[9] & \qw & \qw & \qw &&\cdots \\ &&&&&&&&&&& \lstick{2n+3} & \qw & \qw & \ghost{J} & \qw & \multigate{1}{J} & \gate{J} \qwx[9] & \qw \\ &&&&&&&&&&& \lstick{2n+4} & \qw & \qw & \qw & \qw & \ghost{J} & \qw & \qw \\ \\ &&&&&&&&&&&&&& \cdot \\ &&&&&&&&&&&&&& \cdot \\ &&&&&&&&&&&&&& \cdot \\ \\ &&&&&&&&&&& \lstick{3n+1} & \qw & \gate{} & \qw & \qw & \qw & \qw & \qw \\ &&&&&\cdots&&&&&& \lstick{3n+2} & \qw & \qw & \qw & \gate{} & \qw & \qw & \qw &&\cdots \\ &&&&&&&&&&& \lstick{3n+3} & \qw & \qw & \qw & \qw & \qw & \gate{} & \qw \\ } \Qcircuit @C=1em @R=.7em { &&&&&&&&&&&\cdot&&&&&& \cdot \\ &&&&&&&&&&&&\cdot&&&&& \cdot \\ &&&&&&&&&&&&&\cdot&&&& \cdot \\ \\ &&&&&&&&&&&&&&&& \gate{J} \qwx[8] & \qw & \qw & \qw & \qw & \qw & \rstick{n^{2}-n-2} \\ &&&&&&&&&&&&&\cdots&&& \qw & \qw & \gate{J} \qwx[8] & \qw & \qw & \qw & \rstick{n^{2}-n-1} \\ &&&&&&&&&&&&&&&& \qw & \qw & \qw & \qw & \gate{J} \qwx[8] & \qw & \rstick{n^{2}-n} \\ \\ &&&&&&&&&&&&&&&&& \cdot \\ &&&&&&&&&&&&&&&&& \cdot \\ &&&&&&&&&&&&&&&&& \cdot \\ \\ &&&&&&&&&&&&&&&& \gate{} & \multigate{1}{J} & \qw & \qw & \qw & \qw & \rstick{n^{2}-2} \\ &&&&&&&&&&&&&\cdots&&& \qw & \ghost{J} & \gate{} & \multigate{1}{J} & \qw & \qw & \rstick{n^{2}-1} \\ &&&&&&&&&&&&&&&& \qw & \qw & \qw & \ghost{J} & \gate{} & \qw & \rstick{n^{2}} } \section{Extended terms of Hamiltonian} Using notation from \cite{dipole}, the occupation induced one-particle tunneling and the nearest-neighbor pair tunneling terms of the Bose-Hubbard Hamiltonian are respectively given by \begin{equation} \label{eqHamiltonian} H = -T\sum_{\{i,j\}}\hat{a}^{\dagger}_{i}(\hat{n}_{i} + \hat{n}_{j})\hat{a}_{j} + \frac{P}{2}\sum_{\{i,j\}}\hat{a}^{\dagger}_{i}\hat{a}^{\dagger}_{i}\hat{a}_{j}\hat{a}_{j}. \end{equation} Through the use of unitary conjugation with squeezing and displacement operations, the goal is to approximate the terms of the Hamiltonian by one or more gates for which a decomposition is known. This example illustrates a general method which can also be used to find a decomposition for other unitary operations. The method is inspired by techniques used in \cite{MabuchiKerr}. Squeezing has the effect of multiplying $\hat{x}$ and $\hat{p}$ operators by a constant \begin{align} & S(\log\lambda)\hat{x}_{i}S^{\dagger}(\log\lambda) = \lambda \hat{x}_{i}, \\ & S(\log\lambda)\hat{p}_{i}S^{\dagger}(\log\lambda) = \lambda^{-1} \hat{p}_{i}, \end{align} and displacement has the effect of adding a constant (which will be labeled $\alpha$) \cite{Strawberry}. Applying both of these to the annihilation and creation operators maps them to a new effective operation \begin{equation} \hat{a}^{\dagger} \rightarrow \hat{a}^{\dagger}_{\text{eff}} = \lambda \hat{x} - i\lambda^{-1}\hat{p} + \alpha. \end{equation} Applying this to the $P$ term of the Hamiltonian in Eq.~(\ref{eqHamiltonian}) first and then exponentiation in order to perform Hamiltonian simulation gives \begin{equation} U_{P}^{\text{eff}} = e^{-i\tau H_{P}^{\text{eff}}} = e^{-i\tau \frac{P}{2}\sum_{\{i,j\}}(\hat{a}^{\dagger}_{i, \text{eff}})^{2}(\hat{a}_{j, \text{eff}})^{2}}. \end{equation} The exponent can then be expanded in terms of $\hat{x}$ and $\hat{p}$, and simplified by grouping in terms of overall power of operators and constants $\lambda$ and $\alpha$ \begin{align} (\hat{a}^{\dagger}_{i, \text{eff}})^{2} (\hat{a}_{j, \text{eff}})^{2} &= \\ \nonumber & \; \; \left(\lambda^{2}\hat{x}_{i}^{2} - 2i\hat{x}_{i}\hat{p}_{i} + 2\lambda\alpha\hat{x}_{i} - \lambda^{-2}\hat{p}_{i}^{2} - 2i\alpha\lambda^{-1}\hat{p}_{i} + \alpha^{2} \right) \left(\lambda^{2}\hat{x}_{j}^{2} + 2i\hat{x}_{j}\hat{p}_{j} + 2\lambda\alpha\hat{x}_{j} - \lambda^{-2}\hat{p}_{j}^{2} + 2i\alpha\lambda^{-1}\hat{p}_{j} + \alpha^{2} \right) \\ \nonumber & = \big( \lambda^{4}\hat{x}_{i}^{2}\hat{x}_{j}^{2} + 2i\lambda^{2}\hat{x}_{i}^{2}\hat{x}_{j}\hat{p}_{j} - \hat{x}_{i}^{2}\hat{p}_{j}^{2} - 2i\lambda^{2}\hat{x}_{j}^{2}\hat{x}_{i}\hat{p}_{i} + 4\hat{x}_{i}\hat{p}_{i}\hat{x}_{j}\hat{p}_{j} + 2i\lambda^{-2}\hat{p}_{j}^{2}\hat{x}_{i}\hat{p}_{i} - \hat{p}_{i}^{2}\hat{x}_{j}^{2} - 2i\lambda^{-2}\hat{p}_{i}^{2}\hat{x}_{j}\hat{p}_{j}\\ \nonumber & \; \; + \lambda^{-4}\hat{p}_{i}^{2}\hat{p}_{j}^{2} + 2\alpha\lambda^{3}\hat{x}_{i}^{2}\hat{x}_{j} + 2i\alpha\lambda\hat{x}_{i}^{2}\hat{p}_{j} - 4i\alpha\lambda\hat{x}_{j}\hat{x}_{i}\hat{p}_{i} + 4\alpha\lambda^{-1}\hat{p}_{j}\hat{x}_{i}\hat{p}_{i} + 2\alpha\lambda^{3}\hat{x}_{i}\hat{x}_{j}^{3} + 4i\alpha\lambda\hat{x}_{i}\hat{x}_{j}\hat{p}_{j}\\ \nonumber & \; \; - 2\alpha\lambda^{-1}\hat{x}_{i}\hat{p}_{j}^{2} - 2\alpha\lambda^{-1}\hat{x}_{j}\hat{p}_{i}^{2} - 2i\alpha\lambda^{-3}\hat{p}_{i}^{2}\hat{p}_{j} - 2i\alpha\lambda\hat{p}_{i}\hat{x}_{j}^{2} + 4\alpha\lambda^{-1}\hat{p}_{i}\hat{x}_{j}\hat{p}_{j} + 2i\alpha\lambda^{-3}\hat{p}_{i}\hat{p}_{j}^{2}+\alpha^{2}\lambda^{2}\hat{x}_{i}^{2} \\ \nonumber & \; \; - 2i\alpha^{2}\hat{x}_{i}\hat{p}_{i} + 4\alpha^{2}\lambda^{2}\hat{x}_{i}\hat{x}_{j} + 4i\alpha^{2}\hat{x}_{i}\hat{p}_{j} - \lambda^{-2}\alpha^{2}\hat{p}_{i}^{2} - 4i\alpha^{2}\hat{p}_{i}\hat{x}_{j} + 4\alpha^{2}\lambda^{-2}\hat{p}_{i}\hat{p}_{j} + \alpha^{2}\lambda^{2}\hat{x}_{j}^{2} + 2i\alpha^{2}\hat{x}_{j}\hat{p}_{j} \\ \nonumber & \; \; - \alpha^{2}\lambda^{-2}\hat{p}_{j}^{2} + 2\lambda\alpha^{3}\hat{x}_{i} - 2i\alpha^{3}\lambda^{-1}\hat{p}_{i} + 2\lambda\alpha^{3}\hat{x}_{j} + 2i\alpha^{3}\lambda^{-1}\hat{p}_{j} + \alpha^{4} \big). \end{align} Summing over all neighboring lattice sites implies that for a specific $i,j$ there will also be a term with swapped indices. Therefore, terms with alternate indices may be combined or canceled \begin{align} H_{P}^{\text{eff}} &= \frac{P}{2}\sum_{\{i,j\}}\big( \lambda^{4}\hat{x}_{i}^{2}\hat{x}_{j}^{2} - 2\hat{x}_{i}^{2}\hat{p}_{j}^{2} + 4\hat{x}_{i}\hat{p}_{i}\hat{x}_{j}\hat{p}_{j} + \lambda^{-4}\hat{p}_{i}^{2}\hat{p}_{j}^{2} + 4\alpha\lambda^{3}\hat{x}_{i}^{2}\hat{x}_{j} + 8 \alpha\lambda^{-1}\hat{p}_{j}\hat{x}_{i}\hat{p}_{i} - 4\alpha\lambda^{-1}\hat{x}_{i}\hat{p}_{j}^{2} \\ \nonumber & \; \; + 2\alpha^{2}\lambda^{2}\hat{x}_{i}^{2} + 4\alpha^{2}\lambda^{2}\hat{x}_{i}\hat{x}_{j} + 4\alpha^{2}\lambda^{-2}\hat{p}_{i}\hat{p}_{j} - 2\alpha^{2}\lambda^{-2}\hat{p}_{i}^{2} + 4\alpha^{3}\lambda\hat{x}_{i} + \alpha^{4} \big) \\ \nonumber &= \frac{\lambda^{4}P}{2}\sum_{\{i,j\}}\big(\hat{x}_{i}^{2}\hat{x}_{j}^{2} + 4\alpha\lambda^{-1}\hat{x}_{i}^{2}\hat{x}_{j} + 2\alpha^{2}\lambda^{-2}\hat{x}_{i}^{2} + 4\alpha^{2}\lambda^{-2}\hat{x}_{i}\hat{x}_{j} + 4\alpha^{3}\lambda^{-3}\hat{x}_{i} + \mathcal{O}(\lambda^{-4}) \big), \end{align} thus, to leading order $U_{P}^{\text{eff}} \approx e^{-i\tau \frac{\lambda^{4}P}{2}\sum_{\{i,j\}}\hat{x}_{i}^{2}\hat{x}_{j}^{2}}$. The original unitary operator to be decomposed can be expressed in terms of the effective operator \begin{equation} U_{P} = S_{i}(\log\lambda)S_{j}(\log\lambda)D_{i}(\alpha)D_{j}(\alpha)U_{P}^{\text{eff}}D_{j}^{\dagger}(\alpha)D_{i}^{\dagger}(\alpha)S_{j}^{\dagger}(\log\lambda)S_{i}^{\dagger}(\log\lambda), \end{equation} where all of the operators on the right-hand side can now be decomposed to leading order in $\lambda$. Note that the power of $\alpha$ increases when the overall order of the operators in each term decreases, and the power of $\lambda$ increases when there is a greater contribution of $\hat{x}$ operators over $\hat{p}$ operators. Choosing correctly $\alpha$ and $\lambda$ allows certain terms to have a much greater contribution than others \cite{MabuchiKerr}. For the $P$ term of the Hamiltonian, since the overall order is only four and fourth-order operators are not necessarily hard to decompose, the displacement operation can be left out to get a similar result: \begin{align} H_{P}^{\text{eff}} & = \frac{P}{2}\sum_{\{i,j\}}\big(\lambda^{4}\hat{x}_{i}^{2}\hat{x}_{j}^{2} + 4\hat{x}_{i}\hat{p}_{i}\hat{x}_{j}\hat{p}_{j} + \lambda^{-4}\hat{p}_{i}^{2}\hat{p}_{j}^{2} \big) = \frac{\lambda^{4}P}{2}\sum_{\{i,j\}}\big(\hat{x}_{i}^{2}\hat{x}_{j}^{2} + \mathcal{O}(\lambda^{-4}) \big). \end{align} To summarize the method, first map to effective operators by using squeezing and displacement (or just squeezing if overall order isn't an issue). Next, expand the expression in terms of quadrature operators $\hat{x}$ and $\hat{p}$, and simplify if the structure of the problem allows it. Then organize terms in order of power of $\lambda$, and as $\lambda \rightarrow \infty $ the leading order terms will have a much larger contribution. Choosing $\alpha$ to be a function of $\lambda$ can allow for lower order terms in powers of $\hat{x}$ and $\hat{p}$ to have a greater contribution as well. Finally, the original operator can be expressed as the effective operator with the opposite squeezing and displacement applied to it. This process can also be used for the $T$ term of the Bose-Hubbard Hamiltonian in Eq.~(\ref{eqHamiltonian}). With just squeezing (and no displacement) the effective term is given by \begin{align} H_{T}^{\text{eff}} = -T\sum_{\{i,j\}}\big( \left(\lambda\hat{x}_{i} - i\lambda^{-1}\hat{p}_{i} \right)^{2} \left(\lambda\hat{x}_{i} + i\lambda^{-1}\hat{p}_{i} \right) \left(\lambda\hat{x}_{j} + i\lambda^{-1}\hat{p}_{j} \right) \\ \nonumber + \left(\lambda\hat{x}_{i} - i\lambda^{-1}\hat{p}_{i} \right)\left(\lambda\hat{x}_{j} - i\lambda^{-1}\hat{p}_{j} \right)\left(\lambda\hat{x}_{j} + i\lambda^{-1}\hat{p}_{j} \right)^{2} \big). \end{align} After expanding and making use of similar symmetries that arose in the $P$ term, we can simplify to get \begin{align} H_{T}^{\text{eff}} & = -T\sum_{\{i,j\}}\big( 2\lambda^{4}\hat{x}_{i}^{3}\hat{x}_{j} - 4\lambda^{2}\hat{x}_{j}\hat{x}_{i} - \hat{x}_{i}^{2}\hat{p}_{i}\hat{p}_{j} - \hat{p}_{i}^{2}\hat{x}_{i}\hat{x}_{j} - \hat{x}_{i}\hat{x}_{j}\hat{p}_{j}^{2} - \hat{p}_{i}\hat{p}_{j}\hat{x}_{j}^{2} \\ \nonumber & + 2\hat{x}_{i}\hat{p}_{i}\hat{x}_{i}\hat{p}_{j} + 2\hat{x}_{i}\hat{p}_{i}\hat{x}_{j}\hat{p}_{i} + 2\hat{x}_{i}\hat{p}_{j}\hat{x}_{j}\hat{p}_{j} + 2\hat{x}_{j}\hat{p}_{i}\hat{x}_{j}\hat{p}_{j} -2\lambda^{-2}\hat{p}_{i}\hat{p}_{j} + 2\lambda^{-4}\hat{p}_{i}^{3}\hat{p}_{j} \big) \\ \nonumber \\ \nonumber & = -T\lambda^{4}\sum_{\{i,j\}}\big(2\hat{x}_{i}^{3}\hat{x}_{j} - 4\lambda^{-2}\hat{x}_{j}\hat{x}_{i} + \mathcal{O}(\lambda^{-4}) \big). \end{align} Therefore, to leading order $U_{T}^{\text{eff}} \approx e^{i\tau T \lambda^{4}\sum_{\{i,j\}}2\hat{x}_{i}^{3}\hat{x}_{j}}$. Both $U_{P}^{\text{eff}}$ and $U_{T}^{\text{eff}}$ can be decomposed using techniques from \cite{ExactDecomp}. \end{document}
\begin{document} \title{Geodesic distance for right-invariant metrics on diffeomorphism groups: critical Sobolev exponents} \author{Robert L.~Jerrard\footnote{Department of Mathematics, University of Toronto.} \,and Cy Maor\footnotemark[1]} \date{} \maketitle \begin{abstract} We study the geodesic distance induced by right-invariant metrics on the group $\operatorname{Diff}c(\mathcal{M})$ of compactly supported diffeomorphisms of a manifold $\mathcal{M}$, and show that it vanishes for the critical Sobolev norms $W^{s,n/s}$, where $n$ is the dimension of $\mathcal{M}$ and $s\in(0,1)$. This completes the proof that the geodesic distance induced by $W^{s,p}$ vanishes if $sp\le n$ and $s<1$, and is positive otherwise. The proof is achieved by combining the techniques of two recent papers --- \cite{JM18} by the authors, which treated the subcritical case, and \cite{BHP18} of Bauer, Harms and Preston, which treated the critical 1-dimensional case. \varepsilonnd{abstract} \section{Introduction, preliminaries and main result}\label{sec_preliminaries} The geometry of different diffeomorphism groups (e.g., compactly-supported, symplectic, volume-preserving) with respect to various right-invariant metrics has a long history (see, e.g., \cite{ER91,EP93,MM05,BHP18}). One of the basic questions about these geometries is whether the geodesic distance induced by a given norm on the associated Lie algebra of the group actually generates a metric space structure on the group. This may fail if two distinct diffeomorphisms can be connected with paths of arbitrary short lengths. In this paper, we complete the full characterization of this vanishing geodesic distance phenomenon on the group of compactly-supported diffeomorphisms of a manifold, with respect to Sobolev norms $W^{s,p}$ on its Lie algebra of vector fields. This study started in \cite{MM05}, and continued in \cite{BBHM13,BBM13}, where (among other results) the threshold $s=1/p$ between positive and vanishing geodesic distance was identified for one-dimensional manifolds. In a recent paper \cite{BHP18} it was shown that the geodesic distance vanishes in this critical space, completing the characterization in the one-dimensional case. Virtually simultaneously with \cite{BHP18}, in \cite{JM18} the authors identified the critical space in the $n$-dimensional case, namely $s=\min(n/p,1)$, leaving the case $sp= n$, $s<1$ open. In this paper we combine the techniques of \cite{BHP18,JM18} to show that the geodesic distance vanishes in this case, thus completing the classification of vanishing geodesic distance phenomenon for compactly-supported diffeomorphisms. \paragraph{Setting} Let $(\mathcal{M},\mathfrak{g})$ be a Riemannian manifold of \varepsilonmph{bounded geometry}; that is, $(\mathcal{M},\mathfrak{g})$ has a positive injectivity radius and all the covariant derivatives of the curvature are bounded: $\|\nabla^i R\|_\mathfrak{g} < C_i$ for $i\mathfrak{g}e 0$. We denote by $\operatorname{Diff}c(\mathcal{M})$ the group of compactly supported diffeomorphisms of $\mathcal{M}$, that is the diffeomorphisms $\varphi$ for which the closure of $\{\varphi(x)\ne x\}$ is compact, and by $\Gamma_c(T\mathcal{M})$ the Lie-algebra of compactly supported vector fields on $\mathcal{M}$, the tangent space of $\operatorname{Diff}c(\mathcal{M})$ at the identity. Given a norm $\|\cdot\|_A$ on $\Gamma_c(T\mathcal{M})$, the length of a smooth path $\varphi:[0,1]\to \operatorname{Diff}c(\mathcal{M})$ is defined by \[ \operatorname{length}_A\varphi = \int_0^1 \|u_t\|_A\, dt, \qquad u_t := \partial_t \varphi_t \circ \varphi_t^{-1}. \] Note that from the vector fields $\{u_t\}_{t\in [0,1]}$, and the initial condition $\varphi_0$, the path $\varphi$ can be recovered via standard ODE theory. The above formula for lengths induces the \textbf{geodesic distance} between $\varphi_0,\varphi_1\in \operatorname{Diff}c(\mathcal{M})$ in a standard way by \[ \operatorname{dist}_A(\varphi_0,\varphi_1) := \inf\BRK{\operatorname{length}_A \varphi \,\,:\,\, \varphi:[0,1]\to \operatorname{Diff}c(\mathcal{M}), \, \varphi(0) = \varphi_0, \, \varphi(1) = \varphi_1}. \] Note that $\operatorname{dist}_A$ forms a semi-metric on $\operatorname{Diff}c(\mathcal{M})$, that is, it satisfies the triangle inequality but may fail to be positive. This paper is concerned exactly with this phenomenon --- for which Sobolev norms (defined below) the geodesic distance induces a metric space structure on $\operatorname{Diff}c(\mathcal{M})$. $\operatorname{dist}_A$ is, in fact, the geodesic distance of the \textbf{right-invariant Finsler metric} on $\operatorname{Diff}c(\mathcal{M})$ induced by $\|\cdot\|_{A}$, which is defined as \[ \|X\|_{\varphi,A} := \|X\circ \varphi^{-1}\|_A \] for every $\varphi\in \operatorname{Diff}c(\mathcal{M})$ and $X\in T_\varphi \operatorname{Diff}c(\mathcal{M})$. If $\|\cdot\|_A$ is induced by an inner-product, it defines a Riemannian metric on $\operatorname{Diff}c(\mathcal{M})$ in a similar manner; many well-known PDEs are, in fact, the geodesic equations of such Riemannian metrics. See \cite{BBHM13} for more details. The right-invariance is inherited by $\operatorname{dist}_A$, as summarized in the following lemma: \begin{lemma}[Right-invariance] \label{lem:right_invariance} For $\psi,\varphi_0,\varphi_1\in \operatorname{Diff}c(\mathcal{M})$, we have \[ \operatorname{dist}_A(\varphi_0 \circ \psi, \varphi_1 \circ \psi) = \operatorname{dist}_A(\varphi_0,\varphi_1). \] In particular, \[ \operatorname{dist}_A(\operatorname{Id},\psi) = \operatorname{dist}_A(\operatorname{Id},\psi^{-1}), \] and \[ \operatorname{dist}_A(\operatorname{Id},\varphi_1 \circ \varphi_0) \le \operatorname{dist}_A(\operatorname{Id}, \varphi_1) + \operatorname{dist}_A(\operatorname{Id}, \varphi_0). \] \varepsilonnd{lemma} \begin{proof} See \cite[Lemma~2.1]{JM18}. \varepsilonnd{proof} In this paper we are interested in fractional Sobolev $W^{s,p}$-norms, defined as follows: \begin{definition} \label{def:fractional_Sobolev} For $0<s<1$ and $1\le p<\infty$, the $W^{s,p}$-norm of a function $f\in L^p(\mathbb{R}^n)$ is given by \[ \|f\|_{s,p}^p = \| f\|_{L^p}^p + \int_{\mathbb{R}^n}\int_{\mathbb{R}^n} \frac {|f(x)-f(y)|^p}{|x-y|^{n+sp}}\, dx\,dy . \] \varepsilonnd{definition} Given a Riemannian manifold $(\mathcal{M},\mathfrak{g})$ of bounded geometry, this norm can be extended to $\Gamma_c(T\mathcal{M})$ using a trivialization by normal coordinate patches on $\mathcal{M}$ (see \cite[Section~2.2]{BBM13} for details). We will denote the induced geodesic distance on $\operatorname{Diff}_c(\mathcal{M})$ by $\operatorname{dist}_{s,p}$. Different choices of charts result in equivalent metrics, and therefore the question of vanishing geodesic distance is independent of these choices. Instead of using Definition~\ref{def:fractional_Sobolev} directly, we will bound the $W^{s,p}$-norm using the following interpolation inequalities: \begin{proposition}[fractional Gagliardo--Nirenberg interpolation inequalities] \label{pn:GN_inequality} Assume that $1<p<\infty$. For every $f\in W^{1,p}(\mathbb{R}^n)$ and $s\in (0,1)$, \[ \| f\|_{s,p} \le C_{s,p,n} \|f\|_{L^p}^{1-s} \|f\|_{1,p}^s\, , \quad\mbox{ where }\ \ \|f\|_{1,p}^p := \|f\|_{L^p}^p+ \|df\|_{L^p}^p, \] and \[ \|f\|_{s,p} \le C_{s,p,n} \|f\|_{1,sp}^s \|f\|_{L^\infty}^{1-s}, \quad\mbox{ assuming }\ \ sp>1. \] \varepsilonnd{proposition} For a proof, see \cite[Corollary~3.2]{BM01}. These are the only properties of the $W^{s,p}$-norm that will be used in this paper. \paragraph{Main results} The main result of this paper is the following: \begin{theorem}\label{main_thm} Let $(\mathcal{M},\mathfrak{g})$ be an $n$-dimensional Riemannian manifold of bounded geometry, and $p\in(n,\infty)$. Then $\operatorname{dist}_{n/p,p}(\varphi_0,\varphi_1)= 0$ whenever $\varphi_0,\varphi_1$ belong to the same path-connected component of $\operatorname{Diff}c(\mathcal{M})$. \varepsilonnd{theorem} Combining this result with previous results, which are summed up in \cite[Theorem~2.4]{JM18}, we obtain the following full characterization of the vanishing geodesic distance phenomenon on compactly supported diffeomorphism groups: \begin{theorem} Let $(\mathcal{M},\mathfrak{g})$ be an $n$-dimensional Riemannian manifold of bounded geometry. Then for any $p\in[1,\infty)$, the induced $W^{s,p}$-geodesic distance vanishes on any path-connected component of $\operatorname{Diff}c(\mathcal{M})$ if $sp\le n$ and $s<1$, and is strictly positive otherwise. \varepsilonnd{theorem} When $s>n/p$, then the Sobolev embedding $W^{s,p}\subset L^\infty$ implies that for every path $\{\varphi_t\}_{t\in[0,1]}$ between $\varphi_0,\varphi_1\in\operatorname{Diff}c(\mathcal{M})$, and every $x\in \mathcal{M}$, \[ |\varphi_1(x)-\varphi_0(x)| \le \int_0^1 \left| \partial_t\varphi_t(x)\right|\, dt \le \int_0^1 \| u_t\|_\infty\, dt \le C\int_0^1 \| u(t)\|_{s,p} dt = C\operatorname{length}_{s,p}\varphi, \] hence it is impossible to transport even a single point at a low cost. On the other hand, when $sp \le n$, one expects to be able to transport small volumes over large distances at a small cost, using vector fields $u_t$ with $\|u_t\|_\infty \approx 1$ but $\|u_t\|_{s,p}\ll 1$. Indeed, such vector fields are at the heart of all vanishing geodesic distance constructions on $\operatorname{Diff}c(\mathcal{M})$ \cite{MM05,BBHM13,BHP18,JM18}. The main difficulty in proving Theorem~\ref{main_thm}, compared with the subcritical case $s<\min\BRK{n/p,1}$ proved in \cite{JM18}, is that such vector fields are quite rigid in the critical case $sp=n$. In the subcritical case, on the other hand, any function $f\in W^{s,p}(\mathbb{R}^n)$ can be rescaled $f_\lambda(x) := f( x/\lambda )$ with $\lambda \ll 1$ to obtain a function with the same $L^\infty$-norm but arbitrary small $W^{s,p}$-norm. This rigidity in the critical case makes it difficult to control the endpoint of a path $\varphi_t$ starting at $\varphi_0$ and flowing along a vector field $u_t$ with these properties, and therefore it is difficult to construct arbitrary short paths between two {\varepsilonm fixed} diffeomorphisms $\varphi_0,\varphi_1$. In \cite{BHP18}, this problem is circumvented by using the notion of {\varepsilonm displacement energy} defined in \cite{EP93}. As described in the next section, they show that the geodesic distance vanishes if there exists an open set with zero displacement energy --- that is, if it is possible to transport the set so it does not intersect itself, for an arbitrary small cost.\footnote{Similar observations (in the context of contactomophorisms) also appear in \cite{She17}.} This enabled them to prove Theorem~\ref{main_thm} in the one-dimensional case. In this paper we combine this approach of using the displacement energy with the ideas used in \cite{JM18} to construct short paths in the subcritical case, to prove the vanishing of the geodesic distance in the critical case in every dimension. The condition $s<1$ in Theorem~\ref{main_thm} is related to change, rather than transportation, of volumes. That is, when $s\mathfrak{g}e 1$ the $W^{s,p}$-norm detects any volume change, whereas when $s<1$ it is possible to have significant volume changes at a small cost, provided that no point moves very far. When $n>1$, this plays an important role in constructing short paths, as will be clear from the proof. Theorem~\ref{main_thm} is stronger than the main theorem of \cite{JM18}, as the latter proves vanishing geodesic distance only in the subcritical case. Moreover, the proof of Theorem~\ref{main_thm} is significantly shorter, due to the fact that it is no longer needed to control of the endpoints of the short paths considered. On the other hand, the proof of \cite{JM18}, being more direct, has the advantage of showing explicitly how two diffeomorphisms can be connected with arbitrary short paths, so in some sense it is more revealing or instructive. \section{Displacement energy} \begin{definition} \label{def:displacement_energy} The \textbf{displacement energy} of a set $V\subset \mathcal{M}$ with respect to the $W^{s,p}$-induced geodesic distance is defined by \[ E(V) := \inf\BRK{\operatorname{dist}_{s,p}(\operatorname{Id}, \varphi) \,:\, \varphi\in \operatorname{Diff}c(\mathcal{M}), \varphi(V)\cap V = \varepsilonmptyset }. \] \varepsilonnd{definition} In this section we use \cite[Theorem~1]{BHP18} (see also \cite[Remark~7]{She17}, both generalize results of \cite{EP93}), to show that the $W^{s,p}$-geodesic distance vanishes if and only if there exists an open set $V\subset \mathcal{M}$ with $E(V) = 0$. We start with the following lemma (which is almost identical to Step 2 in the proof of \cite[Theorem~2]{BHP18}): \begin{lemma} \label{lem:Lipschitz} For every $s\in (0,1)$ and $p\in[1,\infty)$ and for every $\varphi\in \operatorname{Diff}c(\mathcal{M})$, the left multiplication operator $L_\varphi: \operatorname{Diff}c(\mathcal{M})\to \operatorname{Diff}c(\mathcal{M})$, $L_\varphi(\psi) = \varphi\circ \psi$ is smooth and Lipschitz with respect to $\operatorname{dist}_{s,p}$. \varepsilonnd{lemma} \begin{proof} The smoothness of $L_\varphi$ is obvious. We now prove that it is Lipschitz. First, let $X\in \Gamma_c(T\mathcal{M})$. Then \[ \|dL_\varphi X\|_{\varphi,W^{s,p}} = \| dL_\varphi X \circ \varphi^{-1}\|_{s,p} = \|(d\varphi(X))\circ \varphi^{-1}\|_{s,p} \le C_\varphi \|X\|_{s,p}, \] for some $C_\varphi>0$, by the continuity of multiplications and compositions, see Theorems 4.2.2 and 4.3.2 in \cite{Tri92}. Now, let $\psi_0,\psi_1 \in \operatorname{Diff}c(\mathcal{M})$, and let $\Psi:[0,1]\to \operatorname{Diff}c(\mathcal{M})$ be a path between them. Then $\varphi\circ \Psi$ is a path between $\varphi\circ \psi_0$ and $\varphi\circ \psi_1$, and \[ \begin{split} \operatorname{dist}_{s,p}(\varphi\circ \psi_0,\varphi\circ \psi_1) &\le \int_0^1 \|\partial_t(\varphi\circ \Psi)\|_{\varphi\circ \Psi,W^{s,p}} \,dt = \int_0^1 \|dL_\varphi \partial_t\Psi\|_{\varphi\circ \Psi,W^{s,p}} \,dt \\ & = \int_0^1 \|dL_\varphi (\partial_t\Psi\circ \Psi^{-1})\|_{\varphi,W^{s,p}} \le C_\varphi \int_0^1 \|\partial_t\Psi\circ \Psi^{-1}\|_{s,p} \,dt. \varepsilonnd{split} \] Taking the infimum on $\Psi$ we obtain \[ \operatorname{dist}_{s,p}(\varphi\circ \psi_0,\varphi\circ \psi_1) \le C_\varphi \operatorname{dist}_{s,p}(\psi_0,\psi_1), \] which completes the proof. \varepsilonnd{proof} Denote by $\operatorname{Diff}_0(\mathcal{M})$ the connected component of the identity, i.e., all diffeomorphisms in $\operatorname{Diff}c(\mathcal{M})$ for which there exists a curve between them and $\operatorname{Id}$. $\operatorname{Diff}_0(\mathcal{M})$ is a simple group \cite{Eps70}. This fact, together with Lemma~\ref{lem:Lipschitz}, and the fact that $\operatorname{Diff}c(V)$ is non-Abelian for any open $V$, implies that the following corollary of \cite[Theorem~1]{BHP18} holds: \begin{proposition} \label{prop:displacement_energy} There exists $\varphi\in \operatorname{Diff}_0(\mathcal{M})$, $\varphi\ne \operatorname{Id}$, such that $\operatorname{dist}_{s,p}(\operatorname{Id},\varphi) = 0$ if any only if there exists an open set $V$ such that $E(V)=0$. If such $\varphi$ exists, then $\operatorname{dist}_{s,p}$ is identically zero on $\operatorname{Diff}_0(\mathcal{M})$. \varepsilonnd{proposition} \section{Proof of Theorem~\ref{main_thm}} The case $n=1$, $p=2$ was proved in \cite[Theorem~2]{BHP18}. Their proof holds for every $p>1$, so here we prove for the case $n>1$. It is enough to prove the result for $\mathbb{R}^n$ --- indeed, for a general manifold of bounded geometry $(\mathcal{M},\mathfrak{g})$, one can embed the following $\mathbb{R}^n$ construction into a single coordinate chart, used in the definition of the induced $W^{s,p}$-geodesic distance on $\mathcal{M}$. Since we will often split $\mathbb{R}^n = \mathbb{R}\times \mathbb{R}^{n-1}$, it is convenient to write $m=n-1$. We will denote the standard coordinates on $\mathbb{R}^n$ by $(x,y)$, where $x\in \mathbb{R}$ and $y\in \mathbb{R}^m$. In the following lemma we construct functions $\xi_k\in W^{n/p,p}(\mathbb{R}^n)$, with $\|\xi_k\|_\infty = 1$ and $\|\xi_k\|_{n/p,p}\to 0$, for $p>n$. That is, we bound the capacity of small balls in the critical Sobolev space $W^{n/p,p}(\mathbb{R}^n)$. \begin{lemma}\label{lem:zero_cap} Let $sp= n>1$, $s<1$, and let $(\lambda_k)_{k\in \mathbb{N}}$ be a sequence of positive numbers, $\lambda_k\ll e^{-k^p}$. Then there exists a sequence $(\xi_k)_{k\in \mathbb{N}}$ of functions $\xi_k:\mathbb{R}^n\to [0,1]$ such that \begin{enumerate} \item $\xi_k\varepsilonquiv 1$ on $[-\lambda_k,\lambda_k]^n$ \item $\operatorname{supp} \xi_k \subset [-1,1]^n$ \item $k^{n-1}\|\xi_k\|_{s,p} \to 0$. \varepsilonnd{enumerate} \varepsilonnd{lemma} \begin{proof} Let $r_k = \sqrt{n}\lambda_k$, so that $[-\lambda_k,\lambda_k]^n$ is contained in a ball of radius $r_k$. Consider the function \[ \xi_k(x) = \begin{cases} 1 & |x|\le r_k \\ \frac{\log(1/|x|)}{\log(1/r_k)} & |x|\in (r_k,1) \\ 0 & |x|\mathfrak{g}e 1. \varepsilonnd{cases} \] Then \[ \| \xi_k\|_{L^n}^n \le |B_1(0)| = C(n) \] and $|d\xi_k|\le C \log(1/r_k)^{-1}/|x|$ for $|x|\in (r_k,1)$, and therefore \[ \|d\xi_k \|_{L^n}^n \le C\log(1/r_k)^{1-n}. \] Hence \[ \|\xi_k\|^n_{W^{1,n}} \le C\log(1/r_k)^{1-n}. \] Therefore, by Proposition~\ref{pn:GN_inequality}, we have \[ \|\xi_k\|_{W^{n/p,p}} \le C \|\xi_k\|_{W^{1,n}}^{n/p} \|\xi_k\|_{L^\infty}^{1-n/p} \le C\log(1/r_k)^{(1-n)/p} \ll k^{(1-n)}. \] \varepsilonnd{proof} Note that the above calculation is not optimal (one expects to be able to obtain $\|\xi_k\|_{{n/p},p}^p\approx\log(1/\lambda_k)^{1-p}$), but this simple construction is sufficient for our purposes. \paragraph{General strategy of the proof:} We now proceed to the proof of Theorem~\ref{main_thm}. We prove it using Proposition~\ref{prop:displacement_energy}: we show that there exists an open set $U\subset \mathbb{R}^n$ whose displacement energy with respect to the $W^{n/p,p}$ norm is zero. That is, we show that there exists a sequence $\Phi_k\in \operatorname{Diff}c(\mathbb{R}^n)$ such that $\Phi_k(U) \cap U= \varepsilonmptyset$ and $\operatorname{dist}_{s,p} (\operatorname{Id},\Phi_k) \to 0$. Specifically, we show this for the open set $U=(0,1)^n$. In the rest of this section we construct these diffeomorphisms $\Phi_k$. \paragraph{A sketch of the construction of the diffeomorphisms $\Phi_k$:} Fix $k\in \mathbb{N}$. We consider $(0,1)^m$ as a union of sets $L_I$, $I=1,\ldots 2^m$, each $L_I$ is a union of $\approx k^m$ disjoint cubes of diameter $\approx 1/k$. The main part of the proof consists of constructing diffeomorphisms $\Phi_k^I = (\phi_k^I(x,y) ,y)$, which satisfy \[ \lim_{k\to \infty}\operatorname{dist}_{s,p}(\operatorname{Id}, \Phi_k^I) = 0, \qquad \phi_k^I(x,y)\mathfrak{g}e x \qquad \text{and} \qquad \Phi_k^I ((0,1) \times L_I) \cap (0,1)^n = \varepsilonmptyset. \] We then have that $\Phi_k = \Phi_k^{2^m} \circ \ldots \circ \Phi_k^1$ is the desired map. The construction of $\Phi_k^I$ is carried out in three stages: \[ \Phi_k^I := \Psi_I^{-1} \circ \Theta_I \circ \Psi_I, \] where $\Psi_I$ and $\Theta_I$ (whose dependence of $k$ is omitted in order to simplify the notation) are as follows: \begin{enumerate} \item $\Psi_I(x,y) = (x,\psi_I(x,y)) $, squeezes each cube in $L_I$ to diameter $\lambda_k \ll e^{-k^p}$. Since $s<1$, this can be obtained at a small cost. \item $\Theta_I(x,y) = (\theta_I(x,y) ,y)$ satisfies $\theta_I(0,y) = 1$ whenever $y$ is in one of the squeezed cubes. Since $sp=n$, such a transport is possible at a low cost, but only if the volume of the transported points at every time is small enough; this is the reason for the squeezing stage. $\theta_I$ is constructed (roughly) by flowing along translations of the vector field $u_t(x,y) = \xi_k(x-t,y)$, where $\xi_k$ are the maps constructed in Lemma~\ref{lem:zero_cap}. \varepsilonnd{enumerate} This scheme of splitting--squeezing--transporting--expanding is similar to the constructions in \cite{JM18}. Since here we do not need to control the endpoint of the flow (just to transport $(0,1)^n$ away from itself), the transporting stage $\Theta_I$ is much simpler compared to \cite{JM18}. On the other hand, the squeezing stage is somewhat more elaborate: In order for the norm of $\xi_k$ to be small, its support, which is a cube of diameter $\lambda_k$, needs to be small enough; in the subcritical case, it is enough to have $\lambda_k$ decay faster than any polynomial (in \cite{JM18} it is $\lambda_k \approx k^{-\log k}$), whereas here, in the critical case, we should have $\lambda_k \ll e^{-k^p}$, in view of Lemma~\ref{lem:zero_cap}. Using the same squeezing strategy (i.e., same flow) as in \cite{JM18} for $\lambda_k \ll e^{-k^p}$ results in a path from $\operatorname{Id}$ to a squeezing diffeomorphism $\Psi_I$ whose length is unbounded when $k\to \infty$ (as shown below), and so we need to alter this path in order to show that $\operatorname{dist}(\operatorname{Id},\Psi_I)$ tends to zero. \paragraph{A detailed construction of the diffeomorphisms $\Phi_k$:} We now construct $\Phi_k$ in full detail, and prove that $\operatorname{dist}_{s,p} (\operatorname{Id},\Phi_k) \to 0$. Henceforth, all limits and asymptotic notations such as $o(1)$ are with respect to the limit $k\to \infty$. \paragraph{Step I: splitting the cube into strips} Fix $k\in \mathbb{N}$. We partition the lattice $\frac{1}{k}\mathbb{Z}^m \subset \mathbb{R}^m$ into $2^m$ copies of $\frac{2}{k}\mathbb{Z}^m$: \[ \frac 2k\mathbb{Z}^m,\, \frac2k\mathbb{Z}^m + \frac{e_1}k,\, \ldots,\, \frac2k\mathbb{Z}^m + \sum_{i=1}^m \frac {e_i}k, \] where $\{e_i\}_{i=1}^m$ is the standard basis of $\mathbb{R}^m$. We index the different lattices as $Z_I$, $I\in \mathbb{Z}_2^m$, ordered by \[ (0,\ldots,0), (1,0,\ldots,0), (0,1,0,\ldots,0), \ldots, (0,1,1,\ldots,1), (1,\ldots,1). \] Sometimes we will denote the indices by $1,\ldots, 2^m$ according to this order. For each $I\in \mathbb{Z}^m_2$, denote \[ L_I := \brk{Z_I + \Brk{-\frac{1}{2k},\frac{1}{2k}}^m}\cap [0,1]^m. \] Note that $\cup L_I = [0,1]^m$. For $y\in \mathbb{R}^m$, we will write \[ [y]_I := \mbox{ the closest point in $Z_I$ to $y$,} \] when a unique such point exists (such as when $y\in L_I$). \paragraph{Step II: squeezing the strips} Fix $1\le I \le 2^m$, and an auxiliary constant $\beta\in (0,1-s)$. We now construct a diffeomorphism $\Psi_I\in \operatorname{Diff}c(\mathbb{R}^n)$, $\Psi_I(x,y) = (x,\psi_I(x,y))$, with \begin{equation} \label{eq:squeezing_cost} \operatorname{dist}_{s,p}(\Psi_I,\operatorname{Id}) = o(1), \varepsiloneq such that \begin{equation} \label{eq:squeezing} \psi_I(x,y) = 2k\lambda_k(y-[y]_I) + [y]_I \varepsiloneq for every $x\in [0,1]$ and $y\in L_I$, and with \begin{equation} \label{eq:lambda_k_bound} \lambda_k \ll \varepsilonxp(-\varepsilonxp(\beta k^\beta)) \ll \varepsilonxp(-k^p). \varepsiloneq In particular, for every $x\in[0,1]$, \begin{equation} \label{eq:squeezed_strips} \psi_I\brk{\BRK{x}\times L_I} = (Z_I + [-\lambda_k,\lambda_k]^m) \cap [0,1]^m =: \tL{I}. \varepsiloneq We construct the squeezing in two stages $\Psi_I = \Psi_I^2 \circ \Psi_I^1$. We show the construction for $I=1$; for $I\ne 1$ the construction is obtained by translating the $I=1$ case. We start by constructing $\Psi_1^1$. Let $u\in C_c^\infty((-1,1)^m;\mathbb{R}^m)$, such that $u(y) = -y$ for $y\in [-1/2,1/2]^m$, and extend it to a $2\mathbb{Z}^m$-periodic function on $\mathbb{R}^m$. Let $\chi\in C_c^\infty(\mathbb{R}^n)$ such that $\chi\varepsilonquiv 1$ on $[0,1]^n$. Define $u_k^1(x,y) := \frac{\varepsilonta_k}{k} u(ky)\chi(x,y)$, where $\varepsilonta_k\mathfrak{g}g 1$ will be fixed below. In particular, $u_k^1(x,y) = -\varepsilonta_k (y-[y]_1)$ for $x\in [0,1]$ and $y\in L_1$. Note that \[ \|u_k^1\|_{L^p} \lesssim \|u_k^1\|_{L^\infty} \lesssim \varepsilonta_k/k, \qquad \|d u_k^1\|_{L^p} \lesssim \|d u_k^1\|_{L^\infty} \lesssim \varepsilonta_k. \] Therefore, by Proposition~\ref{pn:GN_inequality} we have \begin{equation} \label{eq:squeezing_cost_1} \|u_k^1\|_{s,p} \lesssim \frac{\varepsilonta_k^{1-s}}{k^{1-s}} \varepsilonta_k^s = \frac{\varepsilonta_k}{k^{1-s}} = o(1), \varepsiloneq where the last equality holds if we choose $\varepsilonta_k = k^\beta \ll k^{1-s}$ (recall that $\beta<1-s$). Let $\psi^1(t,x,y)$ be the solution of \[ \partial_t \psi^1 = u_k^1(x,\psi^1), \qquad \psi^1(0,x,y) = y. \] Define $\psi^1_1(x,y) := \psi^1(1,x,y)$, and $\Psi^1_1(x,y) := (x,\psi^1_1(x,y))$. A direct calculation shows that for $(x,y)\in [0,1]\times [-1/2k, 1/2k]^m$, $\psi^1_1(x,y) = y e^{-\varepsilonta_k}$, so by periodicity and the fact that $\chi\varepsilonquiv 1$ on $[0,1]^n$, \begin{equation} \label{eq:squeezing_first_stage} \psi_1^1(x,y) = e^{-\varepsilonta_k}(y-[y]_1) + [y]_1 \varepsiloneq for every $x\in [0,1]$ and $y\in L_1$. Denote, for $x\in [0,1]$, \[ \bar{L}_1 := \psi_1^1(\{x\}\times L_1). \] $\bar{L}_1$ is independent of $x$, and consists of $\approx k^m$ cubes of diameter $\approx \varepsilonxp(-\varepsilonta_k)/k \ll \varepsilonxp(-k^\beta)$. Also, note that \varepsilonqref{eq:squeezing_cost_1} implies that \[ \operatorname{dist}_{s,p}(\Psi^1_1,\operatorname{Id}) = o(1). \] Note that we cannot choose $\varepsilonta_k$ to be large enough such that $\bar{L}_1$ consists of cubes of diameter $\ll \varepsilonxp(-k^p)$, which is our ultimate goal here; indeed, this would require $\varepsilonta_k \approx k^p \mathfrak{g}g k^{1-s}$, which violates \varepsilonqref{eq:squeezing_cost_1}. However, once we squeeze $L_1$ into $\bar{L}_1$, we can start a new squeezing stage that only squeezes $\bar{L}_1$. That is, instead of having a vector field $u$ that satisfies $u(x,y) = -\alpha(y-[y]_1)$ for $y\in L_1$ (where $\alpha>0$ is a constant), we only need this to hold for $y\in \bar{L}_1$. Since $\bar{L}_1$ is much smaller than $L_I$, we can have a much larger squeeze factor $\alpha$, while keeping the norm of $u$ small. This second squeezing stage that is described below. We denote the second squeezing stage $\Psi^2_1 = (x,\psi_1^2(x,y))$. Again, we define $\psi_1^2(x,y) = \psi^2(1,x,y)$, where $\psi^2(t,x,y)$ is the solution of \[ \partial_t \psi^2 = u_k^2(x,\psi^2), \qquad \psi^2(0,x,y) = y, \] for $u_k^2(x,y)$ that satisfies $u_k^2(x,y) = -\alpha_k(y-[y]_1)$ for $x\in [0,1]$, $y\in \bar{L}_1$, and $\alpha_k\mathfrak{g}g 1$ that will be fixed below. Since $\bar{L}_1$ consists of cubes of diameter $\ll \varepsilonxp(-k^\beta)$, we can choose $u_k^2$ such that \[ \|u_k^2\|_p \lesssim \|u_k^2\|_\infty \ll \alpha_k\varepsilonxp(-k^\beta), \qquad \|u_k^2\|_p \lesssim\|du_k^2\|_\infty \approx \alpha_k. \] Choosing $\alpha_k = \varepsilonxp(\beta k ^\beta)$, we obtain, since $\beta<1-s$, that \[ \|u_k^2\|_{s,p} \ll \alpha_k \varepsilonxp(-(1-s)k^\beta) = o(1). \] In particular we have that \[ \operatorname{dist}_{s,p}(\Psi^2_1,\operatorname{Id}) = o(1). \] It follows that $\psi_1^2(x,\cdot)$ squeezes $\bar{L}_1$ by a factor of $\varepsilonxp(-\alpha_k) = \varepsilonxp(-\varepsilonxp(\beta k^\beta))$, that is \begin{equation} \label{eq:squeezing_second_stage} \psi_1^2(x,y) = \varepsilonxp(-\varepsilonxp(\beta k^\beta))(y-[y]_1) + [y]_1 \varepsiloneq for every $x\in [0,1]$ and $y\in \bar{L}_1$. Therefore $\Psi_1 = \Psi_1^2 \circ \Psi_1^1$ squeezes $L_1$ such that \varepsilonqref{eq:squeezing}-\varepsilonqref{eq:lambda_k_bound} hold, with $\lambda_k = \varepsilonxp(-\varepsilonxp(\beta k^\beta)- k^\beta)/2k$. By Lemma~\ref{lem:right_invariance}, we have \[ \operatorname{dist}_{s,p}(\Psi_1,\operatorname{Id}) \le \operatorname{dist}_{s,p}(\Psi^2_1,\operatorname{Id}) +\operatorname{dist}_{s,p}(\Psi^1_1,\operatorname{Id}) = o(1), \] as required. \paragraph{Step III: Flowing the squeezed strips} Recall that by \varepsilonqref{eq:squeezed_strips}, $\lambda_k \ll e^{-k^p}$ is the width of the squeezed strips $\tL{I}$ defined by \varepsilonqref{eq:squeezed_strips}, and let $\xi_k$ be the function associated with $\lambda_k$ as defined in Lemma~\ref{lem:zero_cap}. Define \[ \xi_k^I(x,y) := \sum_{z\in Z_I\cap [0,1]^m} \xi_k(x,y-z ) \] and \[ v_k(t,x,y) = \xi_k^I(x-t,y). \] Note that\footnote{The righthand side inequality in \varepsilonqref{eq:cost_transport_vector_field} is the reason we need $\lambda_k$ to be so small, which is achieved by the two-stage squeezing. In the subcritical case $sp<n$, the $W^{s,p}$-capacity of small balls is much smaller, hence $\lambda_k$ can be larger (that is, the results of Lemma~\ref{lem:zero_cap} hold for larger values of $\lambda_k$), and then the one-stage squeezing used in \cite{JM18} suffices.} \begin{equation} \label{eq:cost_transport_vector_field} \|v_k(t,\cdot)\|_{s,p} = \|\xi_k^I\|_{s,p} \le \sum_{z\in Z_I\cap [0,1]^m} \|\xi_k\|_{s,p} \lesssim k^m \|\xi_k\|_{s,p} = o(1). \varepsiloneq Let $\theta_I(t,x,y)$ be the solution of \[ \partial_t\theta_I = v_k(t,\theta_I,y), \qquad \theta_I(0,x,y) = x, \] and define $\Theta_I(t,x,y) = (\theta_I(t,x,y),y)$. Denote $\theta_I(x,y) := \theta_I(1,x,y)$ and $\Theta_I(x,y) := \Theta_I(1,x,y)$. Note that for $y\in \tL{I}$, we have $\xi_k^I(0,y) \mathfrak{g}e 1$, and therefore $\theta_I(t,0,y) \mathfrak{g}e t$. Since $\theta_I(t,x',y) > \theta_I(t,x,y)$ whenever $x'>x$, we have that \begin{equation} \label{eq:transport} \theta_I(x,y) > \theta_I(0,y) \mathfrak{g}e 1, \qquad \text{for every $x>0$ and $y\in \tL{I}$.} \varepsiloneq Note also that since $\xi_k\mathfrak{g}e 0$, we have that \begin{equation} \label{eq:transport_2} \theta_I(x,y) \mathfrak{g}e x, \qquad \text{for every $(x,y)$.} \varepsiloneq Finally, \varepsilonqref{eq:cost_transport_vector_field} implies that \begin{equation} \label{eq:cost_transport} \operatorname{dist}_{s,p}(\Theta_I,\operatorname{Id}) = o(1). \varepsiloneq \paragraph{Step IV: conclusion of the proof} Now, define \[ \Phi_k := \Phi_k^{2^m} \circ \ldots \circ \Phi_k^1, \qquad \Phi_k^I := \Psi_I^{-1} \circ \Theta_I \circ \Psi_I. \] Note that $\Phi_k$ and $\Phi_k^I$ only change the $x$ coordinates; therefore, we write \[ \Phi_k(x,y) = (\phi_k(x,y),y), \qquad \Phi_k^I(x,y) = (\phi_k^I(x,y),y). \] Estimates \varepsilonqref{eq:squeezing_cost} and \varepsilonqref{eq:cost_transport}, together with Lemma~\ref{lem:right_invariance} imply that \[ \operatorname{dist}_{s,p}(\operatorname{Id}, \Phi_k) = o(1). \] We now claim that $\Phi_k(U) \cap U = \varepsilonmptyset$. This will complete the proof as it shows that the displacement energy of $U$ is zero, since $\Phi_k(U) \cap U = \varepsilonmptyset$ implies that $E(U) \le \operatorname{dist}_{s,p}(\operatorname{Id},\Phi_k)$ and the righthand side tends to zero. Let $(x,y)\in U$. In particular, $y\in L_I$ for some $I$. Therefore, $\psi_I(x,y) \in \tL{I}$, and therefore, since $x>0$, we have from \varepsilonqref{eq:transport}-\varepsilonqref{eq:transport_2} that \[ \phi_k(x,y) \mathfrak{g}e \phi_k^I(x,y) = \theta_I(x,\psi_I(x,y)) > 1, \] hence $\Phi_k(x,y) \notin U$, hence $\Phi_k(U) \cap U = \varepsilonmptyset$. \paragraph{Acknowledgements} We are grateful to Martin Bauer, Philipp Harms and Stephen Preston for introducing us their paper and the notion of displacement energy. This work was partially supported by the Natural Sciences and Engineering Research Council of Canada under operating grant 261955. {\footnotesize \providecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace} \providecommand{\mathcal{M}R}{\relax\ifhmode\unskip\space\fi MR } \providecommand{\mathcal{M}Rhref}[2]{ \href{http://www.ams.org/mathscinet-getitem?mr=#1}{#2} } \providecommand{\href}[2]{#2} \begin{thebibliography}{BBHM13} \bibitem[BBHM13]{BBHM13} M.~Bauer, M.~Bruveris, P.~Harms, and P.W.~Michor, \varepsilonmph{Geodesic distance for right invariant {Sobolev} metrics of fractional order on the diffeomorphism group}, Ann Glob Anal Geom \textbf{44} (2013), no.~1, 5--21. \bibitem[BBM13]{BBM13} M.~Bauer, M.~Bruveris, and P.W. Michor, \varepsilonmph{Geodesic distance for right invariant {Sobolev} metrics of fractional order on the diffeomorphism group {II}}, Ann Glob Anal Geom \textbf{44} (2013), no.~4, 361--368. \bibitem[BHP18]{BHP18} M.~Bauer, P.~Harms, and S.C. Preston, \varepsilonmph{Vanishing distance phenomena and the geometric approach to {SQG}}, \url{https://arxiv.org/abs/1805.04401}, 2018. \bibitem[BM01]{BM01} H.~Brezis and P.~Mironescu, \varepsilonmph{{Gagliardo-Nirenberg}, composition and products in fractional {Sobolev} spaces}, Journal of Evolution Equations \textbf{1} (2001), no.~4, 387--404. \bibitem[EP93]{EP93} Y.~Eliashberg and L.~Polterovich, \varepsilonmph{Bi-invariant metrics on the group of {Hamiltonian} diffeomorphisms}, International Journal of Mathematics \textbf{04} (1993), no.~05, 727--738. \bibitem[Eps70]{Eps70} D.~B.~A. Epstein, \varepsilonmph{The simplicity of certain groups of homeomorphisms}, Compositio Mathematica \textbf{22} (1970), no.~2, 165--173 (eng). \bibitem[ER91]{ER91} Y.~Eliashberg and T.~Ratiu, \varepsilonmph{The diameter of the symplectomorphism group is infinite.}, Inventiones Mathematicae \textbf{103} (1991), no.~2, 327--340. \bibitem[JM19]{JM18} R.L.~Jerrard and C.~Maor, \varepsilonmph{Vanishing geodesic distance for right-invariant {Sobolev} metrics on diffeomorphism groups}, Ann Glob Anal Geom \textbf{55} (2019), no.~4, 631--656. \bibitem[MM05]{MM05} P.W. Michor and D.~Mumford, \varepsilonmph{Vanishing geodesic distance on spaces of submanifolds and diffeomorphisms}, Doc. Math. \textbf{10} (2005), 217--245. \bibitem[She17]{She17} E.~Shelukhin, \varepsilonmph{The {Hofer} norm of a contactomorphism}, Journal of Symplectic Geometry \textbf{15} (2017), no.~4, 1173--1208. \bibitem[Tri92]{Tri92} H.~Triebel, \varepsilonmph{{Theory of Function Spaces II}}, Monographs in Mathematics, vol.~84, Birkh\"auser Basel, 1992. \varepsilonnd{thebibliography} } \varepsilonnd{document}
\begin{document} \author{Trevor M.\ Wilson} \title{The large cardinal strength of Weak Vop\v{e}nka's Principle} \address{Department of Mathematics\\Miami University\\Oxford, Ohio 45056\\USA} \email{[email protected]} \urladdr{https://www.users.miamioh.edu/wilso240} \begin{abstract} We show that Weak Vop\v{e}nka's Principle, which is the statement that the opposite category of ordinals cannot be fully embedded into the category of graphs, is equivalent to the large cardinal principle Ord is Woodin, which says that for every class $C$ there is a $C$-strong cardinal. Weak Vop\v{e}nka's Principle was already known to imply the existence of a proper class of measurable cardinals. We improve this lower bound to the optimal one by defining structures whose nontrivial homomorphisms can be used as extenders, thereby producing elementary embeddings witnessing $C$-strongness of some cardinal. \end{abstract} \maketitle \section{Introduction} We work in the second-order set theory GB $+$ AC, meaning G\"{o}del--Bernays set theory with the Axiom of Choice for sets. This theory allows us to deal with with arbitrary classes and Ord-sequences of structures. Because every model of ZFC together with its definable classes forms a model of GB $+$ AC, the results of this paper also hold in ZFC for definable classes as a special case. A \emph{graph} is a structure $\langle G; E\rangle$ where $G$ is a set and $E$ is a binary relation on $G$. A \emph{homomorphism} of graphs $\langle G; E\rangle \to \langle G'; E'\rangle$ is a function $h : G \to G'$ such that for all $\langle x_1,x_2\rangle \in E$ we have $\langle h(x_1),h(x_2)\rangle \in E'$. Sometimes we write just $G$ for a graph instead of $\langle G; E\rangle$. \emph{Vop\v{e}nka's Principle} (VP) states that the category of graphs has no large discrete full subcategory, or in other words that for every proper class of graphs there is some non-identity homomorphism among the graphs in that class. Ad\'{a}mek, Rosick\'{y}, and Trnkov\'{a} \cite[Lemma 1]{AdaRosTrnLimitClosed} showed that VP is equivalent to the statement that the category $\mathrm{Ord}$ does not fully embed into the category of graphs, and they defined \emph{Weak Vop\v{e}nka Principle} (WVP) as the dual statement that the opposite category $\text{Ord}^\text{op}$ does not fully embed into the category of graphs. More explicitly: \begin{defn}[\cite{AdaRosTrnLimitClosed}] \emph{Weak Vop\v{e}nka's Principle} (WVP) says that no sequence of graphs $\langle G_\alpha : \alpha \in \mathrm{Ord}\rangle$ has both of the following properties: whenever $\alpha \le \alpha'$ there is a unique homomorphism $G_{\alpha'} \to G_{\alpha}$ and whenever $\alpha < \alpha'$ there is no homomorphism $G_\alpha \to G_{\alpha'}$. \end{defn} Ad\'{a}mek, Rosick\'{y}, and Trnkov\'{a} \cite[Lemma 2]{AdaRosTrnLimitClosed} observed that that WVP follows from VP (hence the name ``weak''.) They also noted that it implies the existence of a proper class of measurable cardinals. The main result of this article is a substantial improvement to this large-cardinal lower bound, leading to an equivalence. A variant of WVP is obtained by removing uniqueness from the definition: \begin{defn}[\cite{AdaRosInjectivity}] \emph{Semi-Weak Vop\v{e}nka's Principle} (SWVP) says that no sequence of graphs $\langle G_\alpha : \alpha \in \mathrm{Ord}\rangle$ has both of the following properties: whenever $\alpha \le \alpha'$ there is a homomorphism $G_{\alpha'} \to G_{\alpha}$ and whenever $\alpha < \alpha'$ there is no homomorphism $G_\alpha \to G_{\alpha'}$. \end{defn} We have VP $\implies$ SWVP $\implies$ WVP, where the first implication is due to Ad\'{a}mek and Rosick\'{y} \cite{AdaRosInjectivity} and the second implication is trivial. Wilson \cite{WilWeakVopenka} proved that SWVP and WVP are equivalent to each other and are both strictly weaker than VP, in fact strictly weaker than the existence of a supercompact cardinal. In this article, we will show that the large cardinal principle ``Ord is Woodin'' (defined below) implies SWVP and follows from WVP. Since SWVP trivially implies WVP, this gives the precise large cardinal strength of both principles. It also gives another proof of their equivalence to each other. Before stating the definition of this large cardinal principle, we briefly review some set-theoretic terminology. Recall that a class $M$ is called \emph{transitive} if every element of $M$ is a subset of $M$. Important examples of transitive sets are $V_\alpha$, $\alpha\in \mathrm{Ord}$, defined recursively by $V_0 = \emptyset$, $V_{\alpha+1} = \powerset(V_\alpha)$, and $V_\lambda = \bigcup_{\alpha < \lambda} V_\alpha$ if $\lambda$ is a limit ordinal. In other words, $V_\alpha$ is the set of all sets of rank less than $\alpha$. The class of all sets, $V$, is equal to $\bigcup_{\alpha \in \mathrm{Ord}} V_\alpha$ by the axiom of foundation. The \emph{critical point} of an elementary embedding $j : \langle V; \in \rangle \to \langle M; \in \rangle$, denoted by $\crit(j)$, is the least ordinal $\kappa$ such that $j(\kappa) \ne \kappa$. If $j$ is not the identity, then it has a critical point $\kappa$ equal to the least rank of any set that is moved by $j$. Moreover, $\kappa$ is a cardinal and $j(\kappa) > \kappa$. \begin{defn}\label{defn:weakly-beta-C-strong} Let $C$ be a class, let $\kappa$ be a cardinal, and let $\beta$ be an ordinal. We say that $\kappa$ is \emph{$\beta$-$C$-strong} if there is a transitive class $M$ and an elementary embedding \[j : \langle V; \in \rangle \to \langle M; \in \rangle\] such that \begin{enumerate} \item\label{item:crit} $\crit(j) = \kappa$, \item\label{item:V-beta} $V_\beta \subset M$, and \item\label{item:coherence} $j(C \cap V_\beta) \cap V_\beta = C \cap V_\beta$. \end{enumerate} \end{defn} Because condition \eqref{item:crit} implies that $\kappa$ is a measurable cardinal, we could omit from the definition the explicit requirement that $\kappa$ is a cardinal. Note that if $\kappa$ is $\beta$-$C$-strong then it is $\beta'$-$C$-strong for every ordinal $\beta' < \beta$, as witnessed by the same elementary embedding $j$. We will sometimes write condition \eqref{item:coherence} in the abbreviated form $j(C) \cap V_\beta = C \cap V_\beta$, where for every class $C$ the class $j(C)$ is defined as $\bigcup_{\alpha \in \mathrm{Ord}} j(C\cap V_\alpha)$. This produces an equivalent definition because $j(C)$ and $j(C\cap V_\beta)$ have the same intersection with $V_\beta$. Many authors include a fourth condition $j(\kappa) > \beta$ in the definition of $\beta$-$C$-strong. This condition is not relevant to WVP, so it will not be used in this article. Adding this condition results in an equivalent definition of $\beta$-$C$-strong when $\beta$ is a successor ordinal,\footnote{See Kanamori \cite[Exercise 26.7(b)]{KanHigherInfinite} for the case $C = \emptyset$, which is based on the argument of Kunen \cite{KunElementary} for $\beta$-supercompactness and easily generalizes to an arbitrary class $C$.} so our omission of it does not affect the global notion of $C$-strongness that we define next. \begin{defn} Let $C$ be a class and let $\kappa$ be a cardinal. We say that $\kappa$ is \emph{$C$-strong} if it is $\beta$-$C$-strong for every ordinal $\beta$. \end{defn} We may now define the large cardinal notion that we will show is equivalent to WVP. It is a standard (though lesser-used) variation of the definition of ``Woodin cardinal'': \begin{defn} \emph{Ord is Woodin} means that for every class $C$ there is a $C$-strong cardinal. \end{defn} The main result of this article is the following. \begin{thm}[GB + AC] \label{thm:main} The following statements are equivalent. \begin{enumerate} \item\label{item:Ord-is-Woodin} Ord is Woodin. \item\label{item:SWVP} SWVP. \item\label{item:WVP} WVP. \end{enumerate} \end{thm} The proof that Ord is Woodin implies SWVP is relatively easy, so we give it now. \begin{proof}[Proof of \eqref{item:Ord-is-Woodin} $\implies$ \eqref{item:SWVP}] Assume that Ord is Woodin, let $G = \langle G(\alpha) : \alpha \in \mathrm{Ord} \rangle$ be a sequence of graphs, and assume that whenever $\alpha \le \alpha'$ there is a homomorphism $G(\alpha') \to G(\alpha)$. It will suffice to show that there is an ordinal $\kappa$ and a homomorphism $G(\kappa) \to G(\kappa+1)$. Because Ord is Woodin, there is a $G$-strong cardinal $\kappa$. Take $\beta$ sufficiently large that the ordered pair $\langle \kappa+1, G(\kappa+1)\rangle$, which is an element of the class function $G$, is also an element of $V_\beta$. Because $\kappa$ is $\beta$-$G$-strong, there is a transitive class $M$ and an elementary embedding \[j : \langle V; \in \rangle \to \langle M; \in \rangle\] such that $\crit(j) = \kappa$, $V_\beta \subset M$, and $j(G) \cap V_\beta = G \cap V_\beta$. Then $\langle \kappa+1, G(\kappa+1)\rangle$ is also an element of the class function $j(G)$ (which is a sequence of graphs in $M$,) meaning \[ j(G)(\kappa+1) = G(\kappa+1).\] The elementarity of $j$ implies that the restriction $j \restriction G(\kappa)$ is a homomorphism $G(\kappa) \to j(G(\kappa))$, and also implies that $j(G(\kappa)) = j(G)(j(\kappa))$, so what we have is a homomorphism \[j \restriction G(\kappa) : G(\kappa) \to j(G)(j(\kappa)).\] By our assumption on the existence of ``backward'' homomorphisms from later graphs to earlier ones, the elementarity of $j$, and the fact that $j(\kappa) > \kappa+1$, there is a homomorphism $h$ in $M$ from $j(G)(j(\kappa))$ to $j(G)(\kappa+1)$. The fact that $h$ is a homomorphism is absolute between $M$ and $V$, and we have $j(G)(\kappa+1) = G(\kappa+1)$ as mentioned previously, so what we have is a homomorphism \[h : j(G)(j(\kappa)) \to G(\kappa+1).\] Then the composition $h \circ (j \restriction G(\kappa))$ is a homomorphism $G(\kappa) \to G(\kappa+1)$, as desired. \end{proof} To complete the proof of Theorem \ref{thm:main}, because SWVP trivially implies WVP it remains to prove that WVP implies Ord is Woodin. This will be done in Section \ref{section:WVP-implies-Woodin} (except the proof of Lemma \ref{lem:every-extender-is-derived}, which will be done in Section \ref{section:every-extender-is-derived}.) In this proof, we will not work directly with graphs. We will use the fact that for any signature with countably many finitary relation (or partial operation) symbols, the category of structures in this signature fully embeds into the category of graphs by Hedrl\'{\i}n and Pultr \cite{HedPulFullEmbeddings}. The principles VP, WVP, and SWVP can therefore be taken to apply to such general structures instead of graphs. The key concept in the proof that WVP implies Ord is Woodin will be a certain notion of extender. Various definitions of ``extender'' can be found in the literature. They fall into two main types, which are essentially equivalent. One type of extender, which we will not use in this article, is a family of ultrafilters whose ultrapower embeddings form a directed system: see for example Kanamori \cite[Section 26]{KanHigherInfinite}. Instead we will use the other type of extender, which is a function on power sets that preserves certain structure (and can therefore be thought of as a kind of homomorphism): see for example Neeman \cite[Definition 2.1]{NeeMitchellOrder}. To keep this article simple and self-contained, we will define the most convenient structures on power sets whose homomorphisms suffice for our purposes. We will not rely on any pre-existing definitions or theorems about extenders. Once Theorem \ref{thm:main} is proved, it can be applied locally to give a characterization of Woodin cardinals. Recall that a \emph{Woodin cardinal} is an inaccessible cardinal $\delta$ such that for every set $C \subset V_\delta$ there is a cardinal $\kappa < \delta$ that is $\beta$-$C$-strong for all $\beta < \delta$. For an inaccessible cardinal $\delta$, the two-sorted structure $\langle V_\delta, V_{\delta+1}; \in \rangle$ satisfies GB + AC, where the elements of $V_\delta$ are regarded as sets of the structure and the elements of $V_{\delta+1}$ (subsets of $V_\delta$) are regarded as classes of the structure. It is not hard to see that an inaccessible cardinal $\delta$ is Woodin if and only if the structure $\langle V_\delta, V_{\delta+1}; \in \rangle$ satisfies ``Ord is Woodin.'' Applying Theorem \ref{thm:main} in this structure immediately yields the following consequence. \begin{cor}[ZFC] \label{cor:Woodin} If $\delta$ is inaccessible, then the following statements are equivalent. \begin{enumerate} \item\label{item:delta-is-Woodin} $\delta$ is a Woodin cardinal. \item\label{item:SWVP-below-delta} There is no sequence of graphs $\langle G_\alpha : \alpha <\delta\rangle$ such that each graph $G_\alpha$ has cardinality less than $\delta$, whenever $\alpha \le \alpha' < \delta$ there is a homomorphism $G_{\alpha'} \to G_{\alpha}$, and whenever $\alpha < \alpha' < \delta$ there is no homomorphism $G_\alpha \to G_{\alpha'}$. \item\label{item:WVP-below-delta} There is no sequence of graphs $\langle G_\alpha : \alpha <\delta\rangle$ such that each graph $G_\alpha$ has cardinality less than $\delta$, whenever $\alpha \le \alpha' < \delta$ there is a unique homomorphism $G_{\alpha'} \to G_{\alpha}$, and whenever $\alpha < \alpha' < \delta$ there is no homomorphism $G_\alpha \to G_{\alpha'}$. \end{enumerate} \end{cor} \section{WVP implies Ord is Woodin}\label{section:WVP-implies-Woodin} Assume GB + AC. For a set $X$ we write $X^{\mathord{<}\omega}$ for the set of all finite-length sequences of elements of $X$, and for $k<\omega$ we write $X^k$ for the set of all $k$-length sequences of elements of $X$. We use the symbol $\powerset$ for the power set operation. \begin{defn} For a set $X$ and natural numbers $j$, $k$, and $i_1,\ldots,i_j$ such that $1 \le i_1,\ldots,i_j \le k$, we define the function $\Proj_{k, \langle i_1,\ldots, i_j \rangle} : X^k \to X^j$ by \begin{align*} & \Proj_{k, \langle i_1,\ldots, i_j \rangle}(\langle x_1, \ldots, x_k \rangle) = \langle x_{i_1}, \ldots, x_{i_j} \rangle. \end{align*} (The notation $\Proj$ is intended to indicate a vector of coordinate projections.) \end{defn} The following structures are designed in such a way that homomorphisms between them will correspond to elementary embeddings with domain $\langle V ; \in\rangle$. \begin{defn} A \emph{$\mathscr{P}$-structure} is a structure \[\mathscr{P}_X = \big\langle \powerset(X^{\mathord{<}\omega}); \mathord{\cap}, \mathord{-}, X^k, \WF, \Proj^{-1}_{k, \langle i_1,\ldots,i_j \rangle}, \BP_k \big\rangle_{j,k<\omega \text{ and } 1 \le i_i,\ldots,i_j \le k,}\] where $X$ is a transitive set, with the following operations and relations. \begin{enumerate} \item $\cap$ is the binary operation of intersection. \item $-$ is the unary operation of complementation. \item $X^k$ is a constant. \item $\WF$ is the unary relation on $\powerset(X^{\mathord{<}\omega})$ consisting of all sets $A \subset X^{\mathord{<}\omega}$ that are wellfounded under $\supsetneq$, the reverse of the proper initial segment relation. \item $\Proj^{-1}_{k, \langle i_1,\ldots,i_j \rangle}$ is the function $\powerset(X^j) \to \powerset(X^k)$, considered as a partial unary operation on $\powerset(X^{\mathord{<}\omega})$, that is the inverse image function of $\Proj_{k, \langle i_1,\ldots,i_j \rangle}$: \[ \Proj^{-1}_{k, \langle i_1,\ldots, i_j \rangle}(A) = \big\{\langle x_1, \ldots, x_k\rangle \in X^k : \langle x_{i_1}, \ldots, x_{i_j} \rangle \in A\big\}.\] \item $\BP_k$ (for \emph{bounded projection}) is the function $\powerset(X^{k+1}) \to \powerset(X^{k+1})$, considered as a partial unary operation on $\powerset(X^{\mathord{<}\omega})$, defined by \[\BP_k(A) = \big\{ \langle x_1, \ldots, x_{k+1} \rangle \in X^{k+1}: \exists z \in x_{k+1}\; \langle x_1,\ldots, x_{k},z \rangle \in A \big\}. \] \end{enumerate} \end{defn} Our main results will only use the structures $\mathscr{P}_{V_\beta}$ for ordinals $\beta$, but we may as well allow $X$ to be an arbitrary transitive set in the definition. Note that for every set $A \subset X^{\mathord{<}\omega}$, the following statements are equivalent by DC: \begin{itemize} \item $\WF(A)$ fails. \item There is an infinite chain $a_1 \subsetneq a_2 \subsetneq a_3 \subsetneq \cdots$ of elements of $A$. \item There is a sequence $f \in X^\omega$ such that $f\restriction n \in A$ for infinitely many $n < \omega$. \end{itemize} The bounded projection operator $\BP_k$ is called ``bounded'' because of the bounded existential quantifier $\exists z \in x_{k+1}$ in the definition. Note that these operators $\BP_k$ depend on the structure of $X$ as a material set, meaning $\langle X; \in \rangle$. A \emph{homomorphism} of $\mathscr{P}$-structures is a homomorphism of structures in the usual sense: it is a function that preserves (commutes with) the operations and partial operations, and preserves the relation $\WF$, but does not necessarily preserve failure of $\WF$. Because all boolean operations are generated by $\cap$ and $-$, every homomorphism of $\mathscr{P}$-structures is a homomorphism of boolean algebras, and in particular preserves the subset relation $\subset$. \begin{rem} In the usual terminology of extenders, the properties of preservation of $\WF$, $\Proj^{-1}$, and $\BP$ are called \emph{countable completeness}, \emph{coherence}, and \emph{normality} respectively. \end{rem} The homomorphisms given by the following lemma will be called \emph{trivial homomorphisms}. Although they carry no information, their existence will be crucial for our application because they will provide the ``backward'' homomorphisms in the definition of WVP. \begin{lem}\label{lem:trivial-hom} For all transitive sets $X$ and $Y$ such that $Y \subset X$, the function $h : \powerset(X^{\mathord{<}\omega}) \to \powerset(Y^{\mathord{<}\omega})$ defined by $h(A) = A \cap Y^{\mathord{<}\omega}$ is a homomorphism from $\mathscr{P}_X$ to $\mathscr{P}_Y$. \end{lem} \begin{proof} Clearly $h$ is a boolean algebra homomorphism, $h(X^k) = Y^k$ for all $k < \omega$, and $h$ preserves the unary relation $\WF$. Preservation of the unary partial operation $\Proj^{-1}_{k, \langle i_1,\ldots,i_j \rangle}$ follows from closure of $Y^{\mathord{<}\omega}$ under the function $\Proj_{k, \langle i_1,\ldots,i_j\rangle}$. To verify preservation of the unary partial operation $\BP_k$ we must show that for all $A \subset X^{\mathord{<}\omega}$ and $y_1,\ldots, y_{k+1}\in Y$, \[ \langle y_1,\ldots, y_{k+1}\rangle \in \BP_k(A) \iff \langle y_1,\ldots, y_{k+1}\rangle \in \BP_k(A \cap Y^{\mathord{<}\omega}).\] In other words, we must show that \[ \exists z \in y_{k+1} \; \langle y_1,\ldots, y_k,z\rangle \in A \iff \exists z \in y_{k+1} \; \langle y_1,\ldots, y_k,z\rangle \in A \cap Y^{\mathord{<}\omega}.\] This follows from transitivity of $Y$: if $y_{k +1}\in Y$ and $z \in y_{k+1}$, then $z \in Y$. (Here we rely on the ``boundedness'' of the bounded projection operator.) \end{proof} Nontrivial homomorphisms of $\mathscr{P}$-structures can be obtained from elementary embeddings of the set-theoretic universe $V$ by the usual ``derived extender'' method: \begin{lem}\label{lem:derived-extender} Let $j : \langle V; \in \rangle \to \langle M; \in \rangle$ be an elementary embedding for some transitive class $M$. For all transitive sets $X$ and $Y$ such that $Y \subset j(X)$, the function $h : \powerset(X^{\mathord{<}\omega}) \to \powerset(Y^{\mathord{<}\omega})$ defined by $h(A) = j(A) \cap Y^{\mathord{<}\omega}$ is a homomorphism from $\mathscr{P}_X$ to $\mathscr{P}_Y$. \end{lem} \begin{proof} Because $j$ is elementary and the $\mathscr{P}$-structure $\mathscr{P}_X$ is uniformly definable from $X$, the restriction $j \restriction \mathcal{P}(X^{\mathord{<}\omega})$ is a homomorphism from $\mathscr{P}_X$ to $(\mathscr{P}_{j(X)})^M$. Here $(\mathscr{P}_{j(X)})^M$ means the structure $\mathscr{P}_{j(X)}$ as it is defined in $M$. Note that $j(X)$ is a transitive set, $(j(X)^{\mathord{<}\omega})^M = j(X)^{\mathord{<}\omega}$, and $\powerset(j(X)^{\mathord{<}\omega})^M \subset \powerset(j(X)^{\mathord{<}\omega})$ where the power set is computed in $M$. It follows that $(\mathscr{P}_{j(X)})^M$ is a substructure of $\mathscr{P}_{j(X)}$ because the definitions of the operations and relations are easily seen to be absolute for transitive models of ZFC. (In particular, every instance of the $\WF$ relation that holds in $M$ holds also in $V$, because it can be ``certified'' by an ordinal-valued rank function in $M$ and the ordinals of $M$ are the true ordinals.) By composing $j \restriction \mathcal{P}(X^{\mathord{<}\omega})$ with the inclusion homomorphism from $(\mathscr{P}_{j(X)})^M$ to $\mathscr{P}_{j(X)}$, we may therefore consider it as a homomorphism from $\mathscr{P}_X$ to $\mathscr{P}_{j(X)}$. The function $h$ is the composition of the homomorphism $j \restriction \mathcal{P}(X^{\mathord{<}\omega}) : \mathscr{P}_X \to \mathscr{P}_{j(X)}$ with the trivial homomorphism $\mathscr{P}_{j(X)} \to \mathscr{P}_Y$ that exists by Lemma \ref{lem:trivial-hom} because $Y \subset j(X)$, so it is a homomorphism. \end{proof} A homomorphism $h$ given by an elementary embedding $j : \langle V; \in \rangle \to \langle M; \in \rangle$ as in Lemma \ref{lem:derived-extender} is said to be \emph{derived from $j$}. We will need the following result, which states that every homomorphism of $\mathscr{P}$-structures can be realized as a derived homomorphism. It will be proved in the next section, where we will build $M$ and $j$ from $h$ using a standard ``ultrapower'' (also called ``term model'') construction. \begin{lem}\label{lem:every-extender-is-derived} Let $X$ and $Y$ be transitive sets and let $h : \mathscr{P}_X \to \mathscr{P}_Y$ be a homomorphism. Then there is a transitive class $M$ and an elementary embedding $j : \langle V; \in \rangle \to \langle M; \in \rangle$ such that $Y \subset j(X)$ and $h(A) = j(A) \cap Y^{\mathord{<}\omega}$ for all $A \subset X^{\mathord{<}\omega}$. \end{lem} The above definitions and results about $\mathscr{P}$-structures would suffice to obtain a strong cardinal (meaning a $C$-strong cardinal for $C = \emptyset$) from WVP, but to obtain a $C$-strong cardinal for an arbitrary class $C$ we will need a slight addition to the notion of $\mathscr{P}$-structure: \begin{defn} A \emph{pointed $\mathscr{P}$-structure} is a $\mathscr{P}$-structure with an additional constant: \[\mathscr{P}_{X,c} = \big\langle \powerset(X^{\mathord{<}\omega}); \mathord{\cap}, \mathord{-}, X^k, \WF, \Proj^{-1}_{k, \langle i_1,\ldots,i_j \rangle}, \BP_k, c^{\mathord{<}\omega}\big\rangle_{j,k<\omega \text{ and } 1 \le i_i,\ldots,i_j \le k}\] where $X$ is a transitive set and $c \subset X$. \end{defn} The notion of homomorphism for pointed $\mathscr{P}$-structures is defined in the usual way, so a homomorphism $\mathscr{P}_{X,c} \to \mathscr{P}_{Y,d}$ is a homomorphism $h: \mathscr{P}_{X} \to \mathscr{P}_{Y}$ such that $h(c^{\mathord{<}\omega}) = d^{\mathord{<}\omega}$. In particular, for a class $C$, a homomorphism $\mathscr{P}_{X,C \cap X} \to \mathscr{P}_{Y, C\cap Y}$ is a homomorphism $h: \mathscr{P}_{X} \to \mathscr{P}_{Y}$ such that $h((C\cap X)^{\mathord{<}\omega}) = (C \cap Y)^{\mathord{<}\omega}$. We may now complete the proof of Theorem \ref{thm:main} by showing that if WVP holds, then Ord is Woodin. Assume that Ord is not Woodin, meaning that for some class $C$ there is no $C$-strong cardinal. Define the class of ordinals \[ S = \{ \beta \in \mathrm{Ord} : \forall \kappa < \beta \,(\text{$\kappa$ is not $\beta$-$C$-strong})\}.\] \begin{claim} $S$ is a proper class. \end{claim} \begin{proof} For every ordinal $\kappa$, we may define $f(\kappa)$ to be the least ordinal $\beta$ such that $\kappa$ is not $\beta$-$C$-strong, which exists because $\kappa$ is not $C$-strong. (If $\kappa$ is not a measurable cardinal, then $\beta = 0$ works here.) Then $S$ contains the class of all closure points of the function $f : \mathrm{Ord} \to \mathrm{Ord}$, which is a closed unbounded class of ordinals. \end{proof} We may therefore enumerate $S$ in strictly increasing order as $S = \{ \beta(\xi) : \xi \in \mathrm{Ord}\}$ and define an Ord-sequence of pointed $\mathscr{P}$-structures $\big\langle \mathscr{P}_{V_{\beta(\xi)}, C \cap V_{\beta(\xi)}} : \xi \in \mathrm{Ord}\big\rangle.$ As mentioned in the introduction, WVP equivalently applies to more general structures such as pointed $\mathscr{P}$-structures instead of graphs, so it remains to prove the following claim. \begin{claim} $\big\langle \mathscr{P}_{V_{\beta(\xi)}, C \cap V_{\beta(\xi)}} : \xi \in \mathrm{Ord}\big\rangle$ is a counterexample to WVP. \end{claim} \begin{proof} If not, then because we have trivial ``backward'' homomorphisms given by Lemma \ref{lem:trivial-hom}, there must also be some nontrivial homomorphism \[h : \mathscr{P}_{V_{\alpha}, C \cap V_{\alpha}} \to \mathscr{P}_{V_{\beta}, C \cap V_{\beta}}\] for some ordinals $\alpha, \beta \in S$. By ``nontrivial'' we mean that either $\alpha < \beta$, or else $\alpha \ge \beta$ and $h(A) \ne A \cap V_\beta^{\mathord{<}\omega}$ for some set $A \subset V_\alpha^{\mathord{<}\omega}$. Considering $h$ as a homomorphism from $\mathscr{P}_{V_{\alpha}}$ to $\mathscr{P}_{V_\beta}$, by Lemma \ref{lem:every-extender-is-derived} there is a transitive class $M$ and an elementary embedding $j : \langle V; \in \rangle \to \langle M; \in \rangle$ such that $h$ is derived from $j$, meaning $V_\beta \subset j(V_{\alpha})$ and $h(A) = j(A) \cap V_\beta^{\mathord{<}\omega}$ for all $A \subset V_{\alpha}^{\mathord{<}\omega}$. Moreover, because $h$ is a homomorphism of pointed $\mathscr{P}$-structures it preserves the additional constant for $C$, meaning $h((C \cap V_\alpha)^{\mathord{<}\omega}) = (C \cap V_\beta)^{\mathord{<}\omega}$. It follows that $j(C \cap V_\alpha) \cap V_\beta = C \cap V_\beta$. We will obtain a contradiction to $\beta \in S$ by showing that $j$ witnesses the definition of $\beta$-$C$-strongness for some $\kappa < \beta$. First, note that $j$ moves some element of $V_\beta$. If $\alpha < \beta$ then this follows from the fact that $V_\beta \subset j(V_{\alpha})$. If $\alpha \ge \beta$ then this follows from the nontriviality assumption that $h(A) \ne A \cap V_\beta^{\mathord{<}\omega}$ for some set $A \subset V_\alpha^{\mathord{<}\omega}$, which implies that $j(A) \cap V_\beta^{\mathord{<}\omega} \ne A \cap V_\beta^{\mathord{<}\omega}$, so $j$ moves some element of $V_\beta^{\mathord{<}\omega}$. Therefore $\crit(j)$ exists and is less than $\beta$, so we may define $\kappa = \crit(j)$. Second, note that the condition $V_\beta \subset j(V_{\alpha})$ implies $V_\beta \subset M$ because $j(V_{\alpha}) \in M$ and $M$ is transitive. Third and finally, note that the $C$-preservation condition $j(C \cap V_\alpha) \cap V_\beta = C \cap V_\beta$ implies $j(C \cap V_\beta) \cap V_\beta = C \cap V_\beta$ because $j(\alpha) \ge \beta$. Therefore $\kappa$ is $\beta$-$C$-strong, which is a contradiction. \end{proof} \section{Proof of Lemma \ref{lem:every-extender-is-derived}}\label{section:every-extender-is-derived} Let $X$ and $Y$ be transitive sets and let $h : \mathscr{P}_X \to \mathscr{P}_Y$ be a homomorphism. We want to show there is a transitive class $M$ and an elementary embedding $j : \langle V; \in \rangle \to \langle M; \in \rangle$ such that the homomorphism $\mathscr{P}_X \to \mathscr{P}_Y$ derived from $j$ is equal to $h$, meaning that $Y \subset j(X)$ and $h(A) = j(A) \cap Y^{\mathord{<}\omega}$ for all $A \subset X^{\mathord{<}\omega}$. Our structure $\langle M; \in\rangle$ will be obtained by a standard ``ultrapower'' (also called ``term model'') construction, similar to Neeman \cite{NeeMitchellOrder} or Zeman \cite{ZemInnerModels}. As a first approximation, we build a structure $\langle M^*; \in^*, =^*\rangle$, in which the symbols $\in$ and $=$ are interpreted as binary relations $\in^*$ and $=^*$ respectively, rather than true membership and equality. \begin{defn} $M^* = \{ \langle k, b, f \rangle : k < \omega \text{ and } b \in Y^k \text{ and } f : X^k \to V\}$. The binary relations $\in^*$ and $=^*$ on $M^*$ are defined by \begin{align*} \langle k_1, b_1, f_1 \rangle \in^* \langle k_2, b_2, f_2 \rangle &\iff b_1 b_2 \in h\big(\big\{a_1 a_2 : f_1(a_1) \in f_2(a_2)\big\}\big),\\ \langle k_1, b_1, f_1 \rangle =^* \langle k_2, b_2, f_2 \rangle &\iff b_1 b_2 \in h\big(\big\{a_1 a_2 : f_1(a_1) = f_2(a_2)\big\}\big), \end{align*} where juxtaposition (as in $a_1a_2$ and $b_1b_2$) denotes concatenation of finite sequences. \end{defn} Note that in definitions of sets like $\{a_1 a_2 : f_1(a_1) \in f_2(a_2)\}$, we implicitly assume the condition $a_i \in X^{k_i}$ that is required to make sense of the expression $f_i(a_i)$. Next we prove a version of \L o\'{s}'s theorem for the structure $\langle M^*; \in^*, =^*\rangle$. Here ``formula'' means a formula in the first order language with two binary relation symbols $\in$ and $=$. \begin{claim}\label{claim:Los} For every formula $\varphi$ and every $n<\omega$ sufficiently large that all free variables of $\varphi$ are contained in the set $\{v_1,\ldots,v_n\}$, the following statements are equivalent for all elements $\langle k_1,b_1,f_1 \rangle,\ldots,\langle k_n,b_n,f_n\rangle$ of $M^*$: \begin{enumerate} \item $\langle M^*; \in^*, =^* \rangle \models \varphi\big[ \langle k_1,b_1,f_1\rangle, \ldots, \langle k_n,b_n,f_n \rangle\big]$. \item $b_1 \cdots b_n \in h\big(\big\{ a_1 \cdots a_n : \varphi[f_1(a_1),\ldots,f_n(a_n)]\big\}\big)$.\footnote{Here by $\varphi[f_1(a_1),\ldots,f_n(a_n)]$ we mean $\langle V; \in, =\rangle \models \varphi[f_1(a_1),\ldots,f_n(a_n)]$.} \end{enumerate} Moreover, there is an elementary embedding \[j^* : \langle V; \in , =\rangle \to \langle M^*; \in^*, =^*\rangle\] defined by $ j^*(p) = \langle 0, \Diamond, c_p \rangle $ where $\Diamond$ is the empty sequence and for every $p \in V$ the constant function $c_p : \{\Diamond\} \to V$ is defined by $c_p(\Diamond) = p$. \end{claim} \begin{proof} The first part is proved by induction on formulas. For the first base case, assume that $\varphi$ is the formula $v_i \in v_j$ and let $n \ge \max\{i,j\}$. Then by definition of $\in^*$ we have \begin{align*} &\langle M^* ; \in^*, =^* \rangle \models \varphi\big[\langle k_1,b_1,f_1\rangle,\ldots,\langle k_n,b_n,f_n\rangle\big]\\ \iff &\langle k_i, b_i, f_i\rangle \in^* \langle k_j, b_j, f_j \rangle\\ \iff &b_ib_j \in h\big(\big\{ a_ia_j : f_i(a_i) \in f_j(a_j)\big\}\big)\\ \iff &b_1\cdots b_n \in h\big(\big\{ a_1\cdots a_n : f_i(a_i) \in f_j(a_j)\big\}\big) && (\Proj^{-1})\footnotemark\\ \iff &b_1 \cdots b_n \in h\big(\big\{ a_1 \cdots a_n : \varphi[f_1(a_1),\ldots,f_n(a_n)]\big\}\big). \end{align*} \footnotetext{This notation means that we use the fact that $h$ preserves some $\Proj^{-1}$ operator in this step. More specifically, the operator $\Proj^{-1}_{k_1 + \cdots + k_n, \langle k_1 + \cdots + k_{i-1}+1, \ldots, k_1 + \cdots + k_i, k_1 + \cdots + k_{j-1}+1, \ldots, k_1 + \cdots + k_j \rangle}$.}The second base case (where $\varphi$ is the formula $v_i = v_j$) is similar, using the definition of $=^*$ instead of the definition of $\in^*$. Now assume that $\varphi$ is $\neg \psi$ where the claim holds for $\psi$. Then letting $k = k_1 + \cdots + k_n$, \begin{align*} &\langle M^*; \in^*, =^* \rangle \models \varphi\big[ \langle k_1,b_1,f_1\rangle, \ldots, \langle k_n,b_n,f_n \rangle\big]\\ \iff & \langle M^*; \in^*, =^* \rangle \not\models \psi\big[ \langle k_1,b_1,f_1\rangle, \ldots, \langle k_n,b_n,f_n \rangle\big]\\ \iff & b_1 \cdots b_n \in Y^k \setminus h\big(\big\{ a_1 \cdots a_n : \psi[f_1(a_1),\ldots,f_n(a_n)]\big\}\big)\\ \iff & b_1 \cdots b_n \in h\big( X^k \setminus \big\{ a_1 \cdots a_n : \psi[f_1(a_1),\ldots,f_n(a_n)]\big\}\big)\\ \iff & b_1 \cdots b_n \in h\big(\big\{ a_1 \cdots a_n : \varphi[f_1(a_1),\ldots,f_n(a_n)]\big\}\big), \end{align*} using preservation of boolean operations and the fact that $h(X^k) = Y^k$. If $\varphi$ is $\psi_1 \wedge \psi_2$ where the claim holds for $\psi_1$ and $\psi_2$, then the claim holds for $\varphi$ by an entirely straightforward argument using preservation of $\cap$. Finally, assume that $\varphi$ is obtained by existential quantification from a formula $\psi$ for which the claim holds. For simplicity of notation, we assume that $n = 2$ and $\varphi$ is $\exists v_3 \psi$. (The general case can be proved similarly, with only notational complications.) Then we have \begin{align*} &\langle M^*; \in^*, =^* \rangle \models \varphi\big[\langle k_1,b_1,f_1\rangle, \langle k_2,b_2,f_2\rangle \big]\\ \implies &\langle M^*; \in^*, =^* \rangle \models \psi\big[\langle k_1,b_1,f_1 \rangle, \langle k_2,b_2,f_2\rangle, \langle k_3,b_3,f_3\rangle \big] \text{ f.s.\ $\langle k_3,b_3,f_3\rangle \in M^*$}\\ \implies & b_1 b_2 b_3 \in h\big(\big\{ a_1 a_2 a_3: \psi[f_1(a_1),f_2(a_2),f_3(a_3)]\big\}\big)\\ \implies & b_1 b_2 b_3 \in h\big(\big\{ a_1 a_2 a_3: \varphi[f_1(a_1),f_2(a_2)]\big\}\big) && (\subset)\\ \implies & b_1 b_2 \in h\big(\big\{ a_1 a_2: \varphi[f_1(a_1),f_2(a_2)]\big\}\big), && (\Proj^{-1})\footnotemark \end{align*} \footnotetext{More specifically, $\Proj^{-1}_{k_1 + k_2 + k_3, \langle 1,\ldots,k_1+k_2 \rangle}$.}and conversely, letting $k_3 = k_1 + k_2$ and $b_3 = b_1 b_2$, we have \begin{align*} &b_1 b_2 \in h\big(\big\{ a_1 a_2 : \varphi[f_1(a_1),f_2(a_2)]\big\}\big)\\ \implies &b_1 b_2 \in h\big(\big\{ a_1 a_2 : \psi[f_1(a_1),f_2(a_2),f_3(a_1a_2)]\big\}\big) \text{ f.s.\ $f_3 : X^{k_3} \to V$} && \text{(AC)}\\ \implies &b_1 b_2 b_1 b_2 \in h\big(\big\{ a_1 a_2 a_1 a_2: \psi[f_1(a_1),f_2(a_2),f_3(a_1a_2)]\big\}\big) && (\Proj^{-1})\footnotemark \\ \implies &b_1 b_2 b_3 \in h\big(\big\{ a_1 a_2 a_3: \psi[f_1(a_1),f_2(a_2),f_3(a_3)]\big\}\big) && (\subset)\\ \implies &\langle M^*; \in^*, =^* \rangle \models \psi\big[\langle k_1,b_1,f_1\rangle, \langle k_2,b_2,f_2\rangle, \langle k_3,b_3,f_3\rangle \big]\\ \implies &\langle M^*; \in^*, =^* \rangle \models \varphi\big[ \langle k_1,b_1,f_1\rangle, \langle k_2,b_2,f_2\rangle \big], \end{align*} where in the step labeled AC we use the Axiom of Choice to produce a function $f_3$ choosing witnesses for the existential quantifier whenever they exist. \footnotetext{More specifically, $\Proj^{-1}_{k_3, \langle 1,\ldots,k_3,1,\ldots,k_3\rangle}$.} For the ``moreover'' part, let $p_1,\ldots,p_n \in V$. Then we have \begin{align*} &\langle M^*; \in^*, =^*\rangle \models \varphi[j^*(p_1),\ldots,j^*(p_n)]\\ \iff & \langle M^*; \in^*, =^*\rangle \models \varphi\big[ \langle 0, \Diamond, c_{p_1}\rangle,\ldots, \langle 0, \Diamond, c_{p_n}\rangle\big]\\ \iff & \Diamond \cdots \Diamond \in h\Big(\Big\{\Diamond \cdots \Diamond : \varphi \big[c_{p_1}(\Diamond),\ldots,c_{p_n}(\Diamond)\big]\Big\}\Big)\\ \iff & \Diamond \in h\big(\big\{\Diamond : \varphi[p_1,\ldots,p_n]\big\}\big)\\ \iff & \varphi[p_1,\ldots,p_n], \end{align*} because $h(\emptyset) = \emptyset$ and $h(\{\Diamond\}) = h(X^0) = Y^0 = \{\Diamond\}$. \end{proof} The existence of the elementary embedding $j^*$ implies that the structure $\langle M^*; \in^*, =^* \rangle$ is elementarily equivalent to $\langle V; \in, = \rangle$. It follows that the relation $=^*$ is an equivalence relation and that the relation $\in^*$ is invariant under $=^*$. We may therefore define the quotient of the structure $\langle M^*; \in^*, =^* \rangle$ by the relation $=^*$ to obtain a structure that interprets the equality symbol as true equality: \begin{defn} $\langle M'; \in', = \rangle$ is the quotient structure $\langle M^*; \in^*, =^* \rangle / =^*$. \end{defn} Then we have \[M'= \big\{ [k, b, f] : k < \omega \text{ and } b \in Y^k \text{ and } f: X^k \to V\big\},\] where $[k,b,f]$ denotes the equivalence class\footnote{We only include representatives of minimal rank in order to ensure that $[k, b,f]$ is a set (Scott's trick).} of $\langle k, b, f \rangle$ under $=^*$ in $M^*$. A straightforward induction on formulas shows that the quotient function from $\langle M^*; \in^*,=^*\rangle$ to $\langle M'; \in', = \rangle$ given by $\langle k, b, f\rangle \mapsto [k, b, f]$ preserves the truth values of all formulas, so we may restate \L o\'{s}'s theorem (Claim \ref{claim:Los}) for the quotient structure $\langle M'; \in'\rangle$ as follows. (The equality symbol will henceforth always be interpreted as true equality. We will drop it from our notation for the structure, but it may still be used in formulas.) \begin{claim}\label{claim:Los2} For every formula $\varphi$ and every $n<\omega$ sufficiently large that all free variables of $\varphi$ are contained in the set $\{v_1,\ldots,v_n\}$, the following statements are equivalent for all elements $[ k_1,b_1,f_1 ],\ldots,[ k_n,b_n,f_n]$ of $M'$: \begin{enumerate} \item $\langle M'; \in' \rangle \models \varphi\big[ [ k_1,b_1,f_1], \ldots, [ k_n,b_n,f_n ]\big]$. \item $b_1 \cdots b_n \in h\big(\big\{ a_1 \cdots a_n : \varphi[f_1(a_1),\ldots,f_n(a_n)]\big\}\big)$. \end{enumerate} Moreover, there is an elementary embedding \[j' : \langle V; \in \rangle \to \langle M'; \in'\rangle\] defined by $ j'(p) = [ 0, \Diamond, c_p ] $ where $\Diamond$ is the empty sequence and for every $p \in V$ the constant function $c_p : \{\Diamond\} \to V$ is defined by $c_p(\Diamond) = p$. \end{claim} The next step is to replace the structure $\langle M'; \in'\rangle$ with an isomorphic structure $\langle M; \in\rangle$ where $M$ is a transitive set and $\in$ is the true membership relation. We will do this using the Mostowski collapse. The existence of the elementary embedding $j'$ implies that the structure $\langle M'; \in'\rangle$ satisfies the Axiom of Extensionality, so to show that its Mostowski collapse exists, it remains to verify the following two claims. \begin{claim}\label{claim:well-founded} The relation $\in'$ on $M'$ is well-founded. \end{claim} \begin{proof} Here we use the fact that our homomorphism $h : \mathscr{P}_X \to \mathscr{P}_Y$ preserves the relation $\WF$. Suppose toward a contradiction that $\in'$ is not well-founded. Then by DC there is an infinite decreasing sequence \[ [ k_1,b_1,f_1 ] \ni' [ k_2,b_2,f_2 ] \ni' [ k_3,b_3,f_3 ] \ni' \cdots,\] so for all $n < \omega$ we have \[ b_1 \cdots b_n \in h(\{a_1\cdots a_n : f_{n}(a_{n}) \in \cdots \in f_1(a_1)\})\] by \L o\'{s}'s theorem (Claim \ref{claim:Los2}). Because $h$ preserves $\subset$ it follows that for all $n<\omega$, \[ b_1 \cdots b_n \in h(A) \text{ where } A = \bigcup_{n <\omega} \{a_1\cdots a_n : f_{n}(a_{n}) \in \cdots \in f_1(a_1)\}.\] Therefore $h(A)$ contains the infinite chain $\{b_1 \cdots b_n : n <\omega\}$, and because $h$ preserves $\WF$ it follows that $A$ also contains some infinite chain. However, an infinite chain in $A$ would produce an infinite decreasing $\in$-sequence in $V$, contradicting the well-foundedness of $\in$. \end{proof} \begin{claim}\label{claim:set-like} The relation $\in'$ on $M'$ is set-like. \end{claim} \begin{proof} Let $[k_2, b_2, f_2] \in M'$. We will show there is only a set (rather than a proper class) of elements $[k_1, b_1, f_1] \in M'$ such that $[k_1,b_1, f_1] \in' [k_2,b_2, f_2]$. Given $[k_1,b_1, f_1] \in' [k_2,b_2, f_2]$, we will define a ``small'' approximation $\bar{f}_1$ to $f_1$ that is bounded by $f_2$ in a certain sense. Namely, we define the function $\bar{f}_1 : X^{k_1} \to V$ by \[\bar{f}_1(a_1) = \begin{cases} f_1(a_1) & \text{if $f_1(a_1) \in \bigcup\ran(f_2)$,}\\ \emptyset & \text{otherwise.} \end{cases}\] (There is nothing special about $\emptyset$ here; we could use any other fixed value.) There is only a set of possibilities for $\bar{f}_1$ because of the restriction on its range, so it suffices to show that $[k_1,b_1,f_1]$ is equal to $[k_1,b_1, \bar{f}_1]$. Indeed, we have \begin{align*} [k_1,b_1,f_1] \in' [k_2,b_2,f_2] \implies & b_1b_2 \in h\big(\big\{a_1a_2 : f_1(a_1) \in f_2(a_2)\big\}\big)\\ \implies & b_1b_2 \in h\big(\big\{a_1a_2 : f_1(a_1) = \bar{f}_1(a_1)\big\}\big) && (\subset)\\ \implies & b_1b_1 \in h\big(\big\{a_1a_1 : f_1(a_1) = \bar{f}_1(a_1)\big\}\big) && (\Proj^{-1})\footnotemark\\ \implies & b_1b_1 \in h\big(\big\{a_1\bar{a}_1 : f_1(a_1) = \bar{f}_1(\bar{a}_1)\big\}\big) && (\subset)\\ \implies &[k_1,b_1,f_1] = [k_1,b_1, \bar{f}_1], \end{align*} where $\bar{a}_1$ denotes an arbitrary element of $X^{k_1}$ not necessarily equal to $a_1$. \end{proof} \footnotetext{More specifically, $\Proj^{-1}_{k_1+k_2, \langle 1, \ldots, k_1,1,\ldots, k_1\rangle}$.} We may therefore define the Mostowski collapse: \begin{defn} $\langle M; \in\rangle$ is the Mostowski collapse of $\langle M'; \in' \rangle$. \end{defn} Then we have \[M = \big\{ \llbracket k, b, f\rrbracket : k < \omega \text{ and } b \in Y^k \text{ and } f: X^k \to V\big\},\] where $\llbracket k,b,f\rrbracket$ denotes the image of $[k,b,f]$ under the Mostowski collapse function. Because the Mostowski collapse function is an isomorphism, we may restate \L o\'{s}'s theorem (Claim \ref{claim:Los2}) for the collapsed structure $\langle M; \in \rangle$ as follows. \begin{claim}\label{claim:Los3} For every formula $\varphi$ and every $n<\omega$ sufficiently large that all free variables of $\varphi$ are contained in the set $\{v_1,\ldots,v_n\}$, the following statements are equivalent for all elements $\llbracket k_1,b_1,f_1 \rrbracket,\ldots,\llbracket k_n,b_n,f_n\rrbracket$ of $M$: \begin{enumerate} \item $\langle M; \in\rangle \models \varphi\big[ \llbracket k_1,b_1,f_1\rrbracket, \ldots, \llbracket k_n,b_n,f_n \rrbracket\big]$. \item $b_1 \cdots b_n \in h\big(\big\{ a_1 \cdots a_n : \varphi[f_1(a_1),\ldots,f_n(a_n)]\big\}\big)$. \end{enumerate} Moreover, there is an elementary embedding \[j : \langle V; \in \rangle \to \langle M; \in\rangle\] defined by $ j(p) = \llbracket 0, \Diamond, c_p \rrbracket$ where $\Diamond$ is the empty sequence and for every $p \in V$ the constant function $c_p : \{\Diamond\} \to V$ is defined by $c_p(\Diamond) = p$. \end{claim} It remains to prove that the homomorphism derived from this elementary embedding $j$ is equal to $h$. We will need the following claim. (Note that the claim immediately implies $Y \subset M$, which is part of our desired conclusion): \begin{claim} For all $y \in Y$, we have $y = \llbracket 1, \langle y \rangle, \pi\rrbracket$ where $\pi : X^1 \to X$ is the trivial projection function defined by $\pi(\langle x \rangle) = x$. \end{claim} \begin{proof} Here we use the fact that our homomorphism $h$ preserves the bounded projection operators $\BP_k$. The proof is by $\in$-induction on $y \in Y$.\footnote{For a similar argument pertaining to the other type of extender, see Martin and Steel \cite[Lemma 1.5]{MarSteProjectiveDeterminacy}.} Let $y \in Y$ and assume that $y' = \llbracket 1, \langle y' \rangle, \pi\rrbracket$ for all $y' \in y$. This implies that $y \subset M$. Also, we have $\llbracket 1, \langle y \rangle, \pi\rrbracket \subset M$ by transitivity of $M$. For every element $\llbracket k,b,f\rrbracket$ of $M$, we have \begin{align*} \llbracket k,b,f\rrbracket \in \llbracket 1, \langle y \rangle, \pi\rrbracket \iff & b \langle y \rangle \in h\big(\big\{ a \langle x \rangle : f(a) \in \pi(\langle x\rangle)\big\}\big)\\ \iff & b \langle y \rangle \in h\big(\big\{ a \langle x \rangle : f(a) \in x\big\}\big)\\ \iff & b \langle y \rangle \in h\big(\BP_k\big(\big\{ a \langle x \rangle : f(a) = x\big\}\big)\big)\\ \iff & b \langle y \rangle \in \BP_k\big(h\big(\big\{ a \langle x \rangle : f(a) = x\big\}\big)\big)\\ \iff & (\exists y' \in y)\; b \langle y' \rangle \in h\big(\big\{ a \langle x \rangle : f(a) = x\big\}\big)\\ \iff & (\exists y' \in y)\; b \langle y' \rangle \in h\big(\big\{ a \langle x \rangle : f(a) = \pi(\langle x \rangle)\big\}\big)\\ \iff & (\exists y' \in y)\; \llbracket k,b,f\rrbracket = \llbracket 1, \langle y' \rangle, \pi\rrbracket\\ \iff & (\exists y' \in y)\; \llbracket k,b,f\rrbracket = y'\\ \iff & \llbracket k,b,f\rrbracket \in y, \end{align*} so the sets $\llbracket 1, \langle y \rangle, \pi\rrbracket$ and $y$ are subsets of each other and are therefore equal. \end{proof} Finally we will show that $h$ is derived from $j$: \begin{claim} For all $A \subset X^{\mathord{<}\omega}$, we have $h(A) = j(A) \cap Y^{\mathord{<}\omega}$. \end{claim} \begin{proof} Let $A \subset X^{\mathord{<}\omega}$. Then $h(A) \subset Y^{\mathord{<}\omega}$ and for all $\langle y_1,\ldots,y_k \rangle \in Y^{\mathord{<}\omega}$ we have \begin{align*} &\langle y_1,\ldots,y_k \rangle \in j(A)\\ \iff &\langle M; \in \rangle \models \langle y_1,\ldots,y_k \rangle \in j(A)\\ \iff &\langle M; \in \rangle \models \big\langle \llbracket 1, \langle y_1 \rangle, \pi\rrbracket,\ldots, \llbracket 1, \langle y_k \rangle, \pi\rrbracket \big\rangle \in \llbracket 0, \Diamond, c_A\rrbracket\\ \iff & \langle y_1 \rangle \cdots \langle y_k \rangle \Diamond \in h\Big(\Big\{ \langle x_1 \rangle \cdots \langle x_k \rangle \Diamond: \big\langle \pi(\langle x_1\rangle) ,\ldots \pi(\langle x_k\rangle) \big\rangle \in c_A(\Diamond) \Big\} \Big)\\ \iff &\langle y_1, \ldots y_k \rangle \in h\big(\big\{\langle x_1,\ldots, x_k \rangle : \langle x_1,\ldots, x_k \rangle \in A \big\} \big) \\ \iff & \langle y_1, \ldots y_k \rangle \in h(A). && \qedhere \end{align*} \end{proof} Applying this claim to the set $A = X^{\mathord{<}\omega}$ yields $h(X^{\mathord{<}\omega}) = j(X^{\mathord{<}\omega}) \cap Y^{\mathord{<}\omega}$. On the other hand, because $h$ is a boolean homomorphism we have $h(X^{\mathord{<}\omega}) = Y^{\mathord{<}\omega}$. It follows that $Y^{\mathord{<}\omega} \subset j(X^{\mathord{<}\omega})$ and therefore $Y \subset j(X)$, completing the proof of Lemma \ref{lem:every-extender-is-derived}. \section{Acknowledgments} The author thanks Joan Bagaria for several corrections to an earlier version of this paper, and Martin Zeman and Takehiko Gappo for helpful conversations about extenders. \end{document}
\begin{document} \title[Reducibility of quantum harmonic oscillator on ${\Bbb R}^d$]{Reducibility of quantum harmonic oscillator on ${\Bbb R}^d$ with differential and quasi-periodic in time potential} \author{ Zhenguo Liang and Zhiguo Wang} \address {School of Mathematical Sciences and Key Lab of Mathematics for Nonlinear Science, Fudan University, Shanghai 200433, China} \epsilonmail{[email protected]} \address {School of Mathematical Sciences, Soochow University, Suzhou 215006, China} \epsilonmail{[email protected]} \thanks{The first author was partially supported by NSFC grants 11371097, 11571249; the second author was partially supported by NSFC grants 11571249, 11671192.} \displaystyleate{} \begin{abstract} We improve the results by Gr\'ebert and Paturel in \cite{GP} and prove that a linear Schr\"odinger equation on ${\Bbb R}^d$ with harmonic potential $|x|^2$ and small $t$-quasiperiodic potential as $$ {\rm i}u_t - \Delta u+|x|^2u+\varepsilon V(\omega t,x)u=0, \ (t,x)\in {\Bbb R}\times{\Bbb R}^d $$ reduces to an autonomous system for most values of the frequency vector $\omega\in{\Bbb R}^n$. The new point is that the potential $V(\theta,\cdot )$ is only in ${\mathcal{C}^{\beta}}({\Bbb T}^n, \mathcal{H}^{s}({\Bbb R}^d))$ with $\beta$ large enough. As a consequence any solution of such a linear PDE is almost periodic in time and remains bounded in some suitable Sobolev norms.\\ \noindent \textsc{Keywords}. quantum harmonic oscillator, finitely differentiable, pure-point spectrum, KAM, \\ \indent\quad\quad\quad\quad reducibility\\ \noindent \textsc{Mathematics Subject Classification numbers}. 35P05, 37K55, 81Q15 \epsilonnd{abstract} \today \maketitle \section{Introduction}\langlebel{introduction} \subsection{State of Reducibility Problem and Main Results}\langlebel{s1.1} We consider the following nonautonomous linear equation in ${\Bbb R}^d$ \begin{eqnarray}\langlebel{HOeq} {\rm i}u_t - \Delta u+|x|^2u+\varepsilon V(\omega t,x)u=0, \ u=u(t,x),\ (t,x)\in {\Bbb R}\times{\Bbb R}^d. \epsilonnd{eqnarray} Here $\varepsilon>0$ is a small parameter and the frequency vector $\omega$ of forced oscillator is regarded as a parameter in $D_0=[0,2\pi]^n\subset {\Bbb R}^n$. The function $V$ is a real multiplicative potential which is quasiperiodic in time. Namely, $V$ is a continuous function of $(\theta,x)\in{\Bbb T}^n\times{\Bbb R}^d$. We assume $V(z,\cdot)\in {\mathcal{C}^{\beta}}({\Bbb R}^n, \mathcal{H}^{s}({\Bbb R}^d))$ which will be denoted in the following(see Definition \ref{Cbetaspace}).\\ \indent As the usual reducibility results we consider the previous equation as a linear non-autonomous equation in the complex Hilbert space $L^2({\Bbb R}^d)$ and we prove that it reduces to an autonomous system for most values of the frequency vector $\omega$.\\ \indent Similar as Gr\'ebert and Paturel \cite{GP}, we introduce some notations. Let $T:=-\Delta+|x|^2=-\Delta+x_1^2+\cdots+x_d^2$ be the d-dimensional quantum harmonic oscillator. Its spectrum is the sum of $d$ copies of odd integers, i.e., the spectrum of $T$ equals to $\widehat{\mathcal{E}}:=\{d,d+2,d+4,\cdots\}$. For $j\in\widehat{\mathcal{E}}$, we denote the associated eigenspace by $ {E}_j$ whose dimension is $$d_j:=card\{(i_1,\cdots,i_d)\in(2{\Bbb N}-1)^d\ |\ i_1+\cdots+i_d=j\}\leq j^{d-1}.$$ We denote $\{\Phi_{j,l},\ l=1,2,\cdots,d_j\}$, the basis of $ {E}_j$ obtained by $d-$tensor product of Hermite functions: $\Phi_{j,l}=\varphi_{i_1}\otimes\cdots\otimes\varphi_{i_d}$ for some choice of ${i_1}+\cdots+{i_d}=j.$ Then setting $$\mathcal{E}:=\{(j,l)\in\widehat{\mathcal{E}}\times{\Bbb N}\ |\ l=1,\cdots,d_j\}.$$ $(\Phi_{a})_{a\in\mathcal{E}} $ is a basis of $L^2({\Bbb R}^d)$ and denote $w_{j,l}:=j\ {\rm for}\ (j,l)\in\mathcal{E}$. We have \begin{equation}\langlebel{eigenfunction1} T\Phi_{a}=w_a\Phi_{a},\ a\in\mathcal{E}. \epsilonnd{equation} We define in $\mathcal{E}$ an equivalence relation $a\sim b\Leftrightarrow w_a=w_b$ and denote by $[a]$ the equivalence class associated to $a\in\mathcal{E}.$ Note that $card\ [a]\leq w_a^{d-1}.$\\ \indent Let $s\geq 0$ be an integer we define $$ \mathcal{H}^s:=\{u\in \mathcal{H}^s({\Bbb R}^d,{\Bbb C})\ |\ x\mapsto x^{\alpha_1}\partial_x^{\alpha_2}u \in L^{2}({\Bbb R}^d)\ {\rm for\ any}\ \alpha_1,\alpha_2\in{\Bbb N}^d,\ 0\leq|\alpha_1|+|\alpha_2|\leq s \}.$$ We need to point it out that $\mathcal{H}^{s}$ is the form domain of $T^s$ and the domain of $T^{\frac{s}{2}}$ and this allows us to extend the definition of $\mathcal{H}^{s}$ to any nonnegative real values of $s$(see Delort \cite{Del14}). \\ \indent To a function $u\in\mathcal{H}^s$ we associate the sequence $\xi$ of its Hermite coefficients by the formula $u(x)=\sum_{a\in\mathcal{E}}\xi_a\Phi_{a}(x).$ Then we define $\epsilonll_{s}^2:=\{(\xi)_{a\in\mathcal{E}}\ |\ \sum_{a\in\mathcal{E}}w_a^s|\xi_a|^2 <\infty\}$, and for $s\geq0$, $u\in\mathcal{H}^s\Leftrightarrow\xi\in\epsilonll_{s}^2$. Then we endow both spaces with the norm $\|u\|_s=\|\xi\|_s=(\sum_{a\in\mathcal{E}}w_a^s|\xi_a|^2)^{\frac12}$. If $s$ is a nonnegative integer, we will use the fact that the norm on $\mathcal{H}^s$ are equivalently defined as $\|T^{\frac{s}{2}}f\|_{L^{2}({\Bbb R}^d)}$ and $\sum\limits_{0\leq|\alpha_1|+|\alpha_2|\leq s}\|x^{\alpha_1}\partial_x^{\alpha_2}f\|_{ L^{2}({\Bbb R}^d)}. $\\ To introduce the main result we introduce some notations and definitions. \begin{Definition}\langlebel{Cbetaspace} Assume that $X$ is a complex Banach space with the norm $\|\cdot \|_{X}$. Let $\mathcal{C}^{b}({\Bbb R}^n,X)$, $0<b<1$, be the space of H\"older continuous functions $f : {\Bbb R}^n\rightarrow X$ with the norm $$\|f\|_{\mathcal{C}^{b}({\Bbb R}^n, X)} : = \sup\limits_{0<|z_1-z_2|<2\pi}\frac{\|f(z_1)-f(z_2)\|_{X}}{{|z_1-z_2|^{b}}}+\sup\limits_{z\in {\Bbb R}^n} \|f(z)\|_{X}.$$ If $b=0$, then $\|f\|_{\mathcal{C}^{b}({\Bbb R}^n, X)}$ denotes the sup-norm. For $\beta=[\beta]+b$ with $0\leq b<1$, we denote by ${\mathcal{C}^{\beta}}({\Bbb R}^n, X)$ the space of functions $f: {\Bbb R}^n\rightarrow X$ with H\"older continuous partial derivatives and $\partial^{\alpha} f\in \mathcal{C}^{b}({\Bbb R}^n, X_{\alpha})$ for all multi - indices $\alpha=(\alpha_1, \cdots, \alpha_n)\in {\Bbb N}^n$, where $|\alpha| : = |\alpha_1|+\cdots+|\alpha_n| \leq \beta$ and $X_{\alpha}=\mathfrak{L}(\prod\limits_{i=1}^{|\alpha|}Y_i, X)$ with the standard norm and $Y_i : ={\Bbb R}^n$, $i=1, \cdots, |\alpha|$. We define the norm $ \|f\|_{\mathcal{C}^{\beta}({\Bbb R}^n, X)} := \sum\limits_{|\alpha|\leq \beta}\|\partial^{\alpha}f\|_{\mathcal{C}^{b}({\Bbb R}^n, X_{\alpha})}. $ If a function $f$ has a finite norm $\|f\|_{\mathcal{C}^{\beta}({\Bbb R}^n, X)}$, then we call $f\in {\mathcal{C}^{\beta}}({\Bbb R}^n, X)$. \\ \indent Denote by $\mathcal{C}^{\beta}({\Bbb T}^n, X)$ the space of all functions $f\in \mathcal{C}^{\beta}({\Bbb R}^n, X)$ that are of period $2\pi$ in all variables. We define $\|f\|_{\mathcal{C}^{\beta}({\Bbb T}^n, X)} := \|f\|_{\mathcal{C}^{\beta}({\Bbb R}^n, X)} $. \epsilonnd{Definition} {\begin{Definition}\langlebel{def1.1} A real potential $V:{\Bbb T}^n\times{\Bbb R}^d\ni(\theta,x)\mapsto V(\theta,x)$ is called $(s,\beta)-$admissible if $V(\theta, x)\in \mathcal{C}^{\beta}({\Bbb T}^n, \mathcal{H}^s({\Bbb R}^d))$ with a finite norm, namely, $\|V(\theta, \cdot )\|_{\mathcal{C}^{\beta}({\Bbb T}^n, \mathcal{H}^{s}({\Bbb R}^d))}\leq C$, where \begin{equation*}\left\{ \begin{array}{cc} s\geq0,& d=1,\\ s>2(d-2), & d\geq2, \epsilonnd{array}\right. \epsilonnd{equation*} and the constant $C$ depends on $s, \beta, n$ and $d$. \epsilonnd{Definition}} Set $\gamma_1= n+d+2,\ \gamma_2=\frac{\alpha }{4+d+2\alpha }$(depending only on $s$ and $d$) and $\alpha$ given by (\ref{alpha}), we have {\begin{Theorem}\langlebel{quantumth} Assume that the potential $V: {\Bbb T}^n\times {\Bbb R}^d\ni (\theta,x)\mapsto{\Bbb R}$ is $(s,\beta)-$admissible. There exists $\displaystyleelta$ satisfying $0<\displaystyleelta<\frac{\gamma_2}{24}$ and $\varepsilon_*(\beta,n,s,d,\displaystyleelta)>0$, if $0<\varepsilon<\varepsilon_*$, $\beta>\max\{9(2+\frac{d}{\alpha})\frac{\gamma_1}{\gamma_2-24\displaystyleelta},\ 9n,\ 12(d+1)\}$, then there exists $D_\varepsilon\subset D_0$ with ${\rm Meas}(D_0\setminus D_\varepsilon)\leq c(\beta,n,d,s,\displaystyleelta)\varepsilon^{\frac{3\displaystyleelta}{2+\frac{d}{\alpha}}}$, such that for all $\omega\in D_\varepsilon$ the linear Schr\"odinger equation (\ref{HOeq}) reduces to a linear autonomous equation in the space $\mathcal{H}^{s'}$ with $1\leq s'\leq \max\{s,1\}$.\\ \indent More precisely, for $\omega\in D_\varepsilon$, there exist a linear isomorphism $\Psi_\omega^\infty(\theta)\in\mathfrak{L}(\mathcal{H}^{s'})$ for $0\leq s'\leq s$, unitary on $L^2(R^d)$, where $\Psi_\omega^\infty(\theta)\in \mathcal{C}^\mu({\Bbb T}^n,\mathfrak{L}(\mathcal{H}^{s'}))$ for $0\leq s'\leq s$ with $\mu\notin{\Bbb Z}$ and $\mu\leq\frac{2}{9}\beta,$ and a bounded Hermitian operator $W=W_{\omega,\varepsilon}\in\mathfrak{L}(\mathcal{H}^{s'})$ such that $t\mapsto u(t,\cdot)\in\mathcal{H}^{s'}$ with $1\leq s'\leq \max\{s,1\}$ satisfies (\ref{HOeq}) if and only if $t\mapsto v(t,\cdot)= \Psi_\omega^\infty(\omega t)u(t,\cdot) $ satisfies the autonomous equation \begin{equation*}\langlebel{reducedeq} \mathrm{i}\partial_t v -\Delta v+|x|^2 v+\varepsilon W(v)=0. \epsilonnd{equation*} Furthermore, for $0\leq s'\leq s$, $$\|\Psi_\omega^{\infty}(\theta)-id\|_{\mathcal{C}^{\mu}({\Bbb T}^n, \mathfrak{L}(\mathcal{H}^{s'},\mathcal{H}^{s'+2\alpha}))}\leq C \varepsilon^{\frac{3}{2\beta}(\frac{2}{9}\beta-\mu)},\ (\theta,\omega)\in{\Bbb T}^n\times D_\varepsilon.$$ On the other hand, the infinite matrix $(W_{a}^b)_{a,b\in\mathcal{E}}$ of the operator $W$ written in the Hermite basis($W_{a}^b=\int_{{\Bbb R}^d} \Phi_a W(\Phi_b)dx$) is block diagonal, i.e. $W_{a}^b=0\ {\rm if}\ w_a\neq w_b.$ Denote $(\langle V \rangle_{a}^b)_{a,b\in\mathcal{E}}$ be the corresponding infinite matrix of the operator $\langle V \rangle(x)=\frac{1}{(2\pi)^n}\int_{{\Bbb T}^n}V(\theta,x)d\theta$ written in the Hermite basis, we have $$\|(W_{a}^b)_{a,b\in\mathcal{E}}-\Pi((\langle V \rangle_{a}^b)_{a,b\in\mathcal{E}})\|_{\mathfrak{L}(\epsilonll_{s'}^2)}\leq c\varepsilon^{\frac12}$$ for $0\leq s'\leq s$, where $\Pi$ is the projection on the diagonal blocks. \epsilonnd{Theorem}} \begin{Remark} Comparing with Theorem 1.2 in \cite{GP} we prove the reducibility theorem in the space $\mathcal{H}^{s'}$ with $1\leq s'\leq \max\{s,1\}$, not in the energy space $ \mathcal{H}^{1}$. See Lemma \ref{convergence04} for details. \epsilonnd{Remark} As a consequence of Theorem \ref{quantumth}, we prove the following corollary concerning the solutions of (\ref{HOeq}). \begin{Corollary}\langlebel{coro01} Assume all the assumptions in Theorem \ref{quantumth} hold. Let $1\leq s'\leq \max\{s,1\}$ and let $u_0\in\mathcal{H}^{s'}$, then there exists $\varepsilon_*>0$ such that for $0<\varepsilon<\varepsilon_*$ and $\omega\in D_\varepsilon$(in Theorem \ref{quantumth}), there exists a unique solution $u\in \mathcal{C}({\Bbb R},\mathcal{H}^{s'})$ of (\ref{HOeq}) such that $u(0)=u_0$. Moreover, u is almost periodic in time and satisfies\\ \begin{equation*}\langlebel{utnorm} (1-c\varepsilon) \|u_0\|_{\mathcal{H}^{s'}}\leq \|u(t)\|_{\mathcal{H}^{s'}}\leq(1+c\varepsilon )\|u_0\|_{\mathcal{H}^{s'}},\ \forall \ t\in{\Bbb R} \epsilonnd{equation*} with some $c=c(s',s,d).$ \epsilonnd{Corollary} Consider on $L^2({\Bbb T}^n)\otimes L^2({\Bbb R}^d)$ the Floquet Hamiltonian operator \begin{equation*}\langlebel{floq} K:=-\mathrm{i}\sum_{k=1}^n \omega_k\frac{\partial}{\partial\theta_k}-\Delta+|x|^2+\varepsilon V(\theta,x), \epsilonnd{equation*} we have \begin{Corollary}\langlebel{coro02} Assume all the assumptions in Theorem \ref{quantumth} hold. Then there exists $\varepsilon_*>0$ such that for $0<\varepsilon<\varepsilon_*$ and $\omega\in D_\varepsilon$, the spectrum of the Floquet operator $K$ is pure point. \epsilonnd{Corollary} \subsection{Related results.} The equations (\ref{HOeq}) can be generalized into a time-dependent Schr\"odinger equation \begin{eqnarray}\langlebel{NLStime3} {\rm i} \partial_{t}\zeta(t)=(\mathcal{A}+\varepsilon \mathcal{B}(\omega t))\zeta(t), \epsilonnd{eqnarray} where $\mathcal{A}$ is a positive self-adjoint operator on a separable Hilbert space $\mathcal{H}$ and the perturbation $\mathcal{B}$ is an operator-valued function from ${\Bbb T}^n$ into the space of symmetric operators on $\mathcal{H}$. Our aim is to show that for sufficiently small $\varepsilon$, and for $\omega$ belonging to a set of large measure, there exists a unitary transformation which conjugates Eq. (\ref{NLStime3}) to a time independent equation. If this is true, we will call Eq. (\ref{NLStime3}) is \textsl{reducible}. From the reducibility of Eq. (\ref{NLStime3}) and relative properties of the transformation we can easily prove the boundedness of the Sobolev norms and pure point spectrum of the relative Floquet operator, which is defined by \begin{eqnarray*} K_{F} : = -{\rm i}\omega \cdot \partial_{\theta}+ \mathcal{A} +\varepsilon \mathcal{B}(\theta)\quad {\rm on}\ \mathcal{H} \otimes L^2({\Bbb T}^n). \epsilonnd{eqnarray*} It has been proved in \cite{BEL, BLE, DS, DSV, GY00, H, JOY, N} that the Floquet operator $K_{F}$ is of pure point spectra or no absolutely continuous spectra where $\mathcal{B}$ is bounded. When $\mathcal B$ is unbounded, the first result was obtained by Bambusi and Graffi \cite{BG} where they considered the time dependent Schr\"odinger equation \begin{eqnarray*}\langlebel{NLStime1} {\rm i}\partial_{t} \psi(x,t) = H(t) \psi(x,t), x\in {\Bbb R}; \qquad H(t) : = -\frac{d^2}{dx^2} + Q(x) +\varepsilon V(x,\omega t),\ \varepsilon\in {\Bbb R}, \epsilonnd{eqnarray*} where $Q(x) \sim |x|^{2\alpha}$ with $\alpha>1$ as $|x|\rightarrow \infty$ and $|V(x,\theta)||x|^{-\beta}$ is bounded as $|x|\rightarrow \infty$ for some $\beta<\alpha-1$. This entails the pure-point nature of the spectrum of the Floquet operator \begin{eqnarray*}\langlebel{floquetspectrum1} K_{F} : = -{\rm i}\omega \cdot \partial_{\theta}-\frac{d^2}{dx^2}+Q(x)+\varepsilon V(x,\theta), \epsilonnd{eqnarray*} on $L^2({\Bbb R}) \otimes L^2({\Bbb T}^n)$ for $\varepsilon$ small. Liu and Yuan \cite{LiuYuan0} solved the case when $\beta\leq \alpha-1$. Very recently Bambusi \cite{Bam1,Bam2} solved the case when $\beta<\alpha+1$ under some additional assumptions. \\ \indent For 1-d quantum harmonic oscillator the main difficulty encountered by the traditional KAM method seems to be the eigenvalue spacing for the unperturbed operator does not grow. In \cite{EV} Enss and Veselic proved that, if $\omega$ is rational, the Floquet operator relative with the 1-d quantum harmonic oscillator has pure point spectrum when the perturbing potential $V$ is bounded and has sufficiently fast decay at infinity. In \cite{Com87} Combescure obtained the reducibility under time periodic, spatially localized perturbation. In \cite{Wang} Wang proved the spectrum of the Floquet operator $K$ is pure point for the quasiperiodic case where the perturbing potential has \textit{exponential decay}. Greb\'ert and Thomann \cite{GT} improved the results in \cite{Wang} from exponential decay to \textit{polynomial decay}. In \cite{WLiang} we extended the results in \cite{GT} from polynomial decay to \textit{logarithmic decay}. Quite recently, in \cite{Bam1,Bam2} Bambusi dealt with the unbounded perturbation case for 1d harmonic oscillators. For example he can deal with the case $-\partial_{xx}+x^2+\varepsilon x a_1(\omega t)- {\rm i} a_2(\omega t)\varepsilon \partial_{x}$. As Bambusi \cite{Bam1} pointed it out that his results didn't contradict with the interesting counterexamples in \cite{Del14} and \cite{GY00}. \\ \indent The results about the reducibility for higher spatial dimension are very few. In \cite{EK0} Eliasson and Kuksin obtained the reducibility for the Schr\"odinger equation on ${\Bbb T}^d$. In \cite{GP} Gr\'{e}bert and Paturel firstly obtained the reducibility for any dimensional harmonic oscillator on ${\Bbb R}^d$ under the temporal quasiperiodic and analytic perturbation. In this paper we will generalize the results in \cite{GP} from temporal analytic perturbations to differential perturbations. \\ \indent Very recently, Bambusi, Greb\'ert, Maspero and Robert \cite{BaGrMaRo} proved a reducibility result for a quantum harmonic oscillator in any dimension perturbed by a linear operator which is a polynomial of degree two in $x_j, -{\rm i\partial_j}$ with coefficients being real analytic in $\theta\in {\Bbb T}^n$. The proof depends on the following key fact: for polynomial Hamiltonians of degree at most 2 the correspondence between classical and quantum mechanics is exact(see also \cite{HLS}). But the reducibility problem keeps open for the quantum oscillator in arbitrary dimension with more general unbounded perturbations(see \cite{BaGrMaRo}, page 2). \\ \subsection{Brief description of the setting and main ideas of the proof.} We use the notations introduced in \cite{GP}. In phase space $(u,\bar{u})\in\mathcal{H}^0\times\mathcal{H}^0$ endowed with the symplectic structure $\mathrm{i}du\wedge d\bar{u}$, equation (\ref{HOeq}) is Hamiltonian with \begin{eqnarray}\langlebel{HOfun} H=h(u,\bar{u})+\varepsilon p(\omega t,u,\bar{u}), \epsilonnd{eqnarray} where $$h(u,\bar{u})=\int_{{\Bbb R}^d}(u_x\bar{u}_x+|x|^2u\bar{u})dx,\ \ \ \ \ \ p(\omega t,u,\bar{u})=\int_{{\Bbb R}^d} V(\omega t,x)u\bar{u}dx.$$ Expanding $u$ and $\bar{u}$ on the Hermite basis, $u(x)=\sum_{a\in\mathcal{E}}\xi_a\Phi_a(x),\ \bar{u}(x)=\sum_{a\in\mathcal{E}}\epsilonta_a\Phi_a(x)$, the phase space $(u,\bar{u})\in\mathcal{H}^0\times\mathcal{H}^0$ becomes into the phase space $(\xi,\epsilonta)\in Y_0$(for the definition of $Y_{s}$ see Subsection \ref{phase}). We endow $Y_0$ with the symplectic structure $\mathrm{i}d\xi\wedge d\epsilonta$. In this setting, (\ref{HOfun}) reads as \begin{equation}\langlebel{hameq000} H(t,\xi,\epsilonta)= \sum\limits_{ a\in\mathcal{E}} w_a\xi_a\epsilonta_a+ \varepsilon {p}_\omega(t,\xi,\epsilonta) \epsilonnd{equation} where $ {p}_\omega(t,\xi,\epsilonta)=\langle \xi,P(\omega t)\epsilonta\rangle=\sum_{a,b\in\mathcal{E}}P_a^b(\omega t)\xi_a\epsilonta_b, $ which is quadratic in $(\xi,\epsilonta)$ with \begin{eqnarray}\langlebel{Pijform} P_a^b(\omega t)=\int_{{\Bbb R}^d} V(\omega t,x)\Phi_a(x)\Phi_b(x)dx,\ \ a,b\in\mathcal{E}. \epsilonnd{eqnarray} Therefore, the reducibility problem of system (\ref{HOeq}) is equivalent to the reducibility problem for the Hamiltonian system \begin{eqnarray} \left\{\begin{array}{c} \displaystyleot{\xi}_a=-\mathrm{i}w_a\xi_a-\mathrm{i}\varepsilon (P^T(\omega t)\xi)_a,\\ \displaystyleot{\epsilonta}_a =\ \ \mathrm{i}w_a\epsilonta_a+\mathrm{i}\varepsilon (P(\omega t)\epsilonta)_a, \epsilonnd{array}\right.\ a\in\mathcal{E}\langlebel{hs00} \epsilonnd{eqnarray} associated to the non autonomous quadratic Hamiltonian function (\ref{hameq000}). We will give a general reducibility result in Subsection \ref{s2.4} which can be applied to system (\ref{hs00}) and the proof is based on KAM theory. We remark that KAM theory is almost well-developed for nonlinear Hamiltonian PDEs in 1-d context. See \cite{BBP2, GY1, KLiang, KP, Ku0, Ku1, Ku2, LZ, LY1, LiuYuan, P2, ZGY} for 1-d KAM results. Comparing with 1-d case, the KAM results for multidimensional PDEs are relatively few. Refer to \cite{EGK, EK, GP1, GXY, GY2, PX} for n-d results. See \cite{Berti} for an almost complete picture of recent KAM theory. \noindent\epsilonmph{Highlights.} By introducing $\theta=\omega t$, system (\ref{hs00}) is equivalent to an autonomous system with Hamiltonian \begin{eqnarray}\langlebel{hamit} \mathcal{H}(\theta,y,\xi,\epsilonta) =\sum\limits_{j=1}^n\omega_jy_j+\langle\xi,N_0\epsilonta\rangle + \varepsilon \langle\xi, P(\theta)\epsilonta\rangle,\ \ \ \ \langle\xi,N_0\epsilonta\rangle=\sum\limits_{a\in\mathcal{E}}w_a\xi_a\epsilonta_a. \epsilonnd{eqnarray} In \cite{GP} Gr\'{e}bert and Paturel assumed that the potential $V(\theta, \cdot)$ is real analytic with value in $\mathcal{H}^{s}({\Bbb R}^d)$ with $s>2(d-2)\geq 0$ when $d\geq 2$. Here we only discuss the higher dimensional case for simplicity. Then in Lemma 3.2 Gr\'{e}bert and Paturel \cite{GP} proved that $P( \theta)\in \mathcal{M}_{s,\alpha}(D_0, \sigma)$, where $\alpha>0$ is critical(see the definition of $\mathcal{M}_{s,\alpha}(D_0,\sigma)$ in Section \ref{section2}). In this paper we only assume that the potential $V(\theta, \cdot )\in \mathcal{C}^{\beta}({\Bbb T}^n, \mathcal{H}^s({\Bbb R}^d))$ with the same condition on $s$. Using the techniques from \cite{GP} and functional analysis(\cite{Ber}) we can prove that $P(\theta)\in {\mathcal{C}^{\beta}}({\Bbb T}^n, \mathcal{M}_{s,\alpha})$(see Lemma \ref{L3.3}), which can be considered as a parallel lemma as Lemma 3.2 in \cite{GP}. \\ \indent Now our main problem is to build a similar reducibility result for the Hamiltonian (\ref{hamit}) when $P(\theta)\in C^{\beta}({\Bbb T}^n, \mathcal{M}_{s,\alpha})$ and thus a smooth KAM is needed here. We recall that the smoothing techniques were firstly introduced by Moser \cite{Moser1, Moser2} and developed later by many people, see Salamon and Zehnder \cite{SaZe}, P\"oschel \cite{Pos2}, Chierchia and Qian \cite{CQ}, Berti and Bolle \cite{BB14, BB13} and etc. An earlier reducibility result about time dependent Schr\"odinger operator with finite differentiable unbounded perturbation has been obtained by Yuan and Zhang in \cite{YZ13}. In \cite{Bam1} Bambusi's method in dealing with the differential perturbations is more close to the classical proof in \cite{Sal04}. A significant difference between our paper and \cite{YZ13}, \cite{Bam1} is that we deal with the matrix block, not the single matrix element. For the following proof we almost follow the presentation of \cite{CQ} in the spirit of \cite{Sal04} combined with the KAM method in \cite{GP}. \\ \indent More clearly, we will introduce a series of analytic functions $P^{(\nu)}(\theta)\in \mathcal{M}_{s,\alpha}(D_0, \sigma_{\nu}),\ \nu=0,1,2,\cdots,$ and $P^{(\nu)}(\theta)\rightarrow P(\theta)$ in $\theta\in {\Bbb T}^n$ as $\sigma_\nu$ shrinking to 0($\nu\rightarrow\infty$), see Lemmas \ref{PP} and \ref{l4.1} for details.\\ \indent Thus, instead of considering the original function $\mathcal{H}$, in each KAM step, we consider the analytic Hamiltonian function \begin{eqnarray*} H^{(\nu)}(\theta,y,\xi,\epsilonta) =\sum\limits_{j=1}^n\omega_jy_j+\langle\xi,N_0\epsilonta\rangle+ \varepsilon\langle\xi, P^{(\nu)}(\theta)\epsilonta\rangle \epsilonnd{eqnarray*} which is an approximation of (\ref{hamit}). We suppose that there exists symplectic map $\Phi^\nu$ such that \begin{eqnarray*}\langlebel{everyterm} H^{(\nu)}\circ \Phi^{\nu}=\sum\limits_{j=1}^n\omega_jy_j+\langle\xi,N_\nu\epsilonta\rangle+ \langle\xi, P_{\nu}(\theta)\epsilonta\rangle \epsilonnd{eqnarray*} with the norm of $P_{\nu}(\theta)$ is less than $\epsilonpsilon_{\nu}/2 $. Then in $(\nu+1)^{th}$ step, we consider the Hamiltonian \begin{eqnarray*} H^{(\nu+1)}(\theta,y,\xi,\epsilonta) &=&\sum\limits_{j=1}^n\omega_jy_j+\langle\xi,N_0\epsilonta\rangle+ \varepsilon\langle\xi, P^{(\nu+1)}(\theta)\epsilonta\rangle\\ &=&H^{(\nu)} +(H^{(\nu+1)}-H^{(\nu)}) \epsilonnd{eqnarray*} which is tiny different from $H^{(\nu)}$. By $\Phi^\nu$ we have \begin{eqnarray*} H^{(\nu+1)}\circ \Phi^{\nu}=H^{(\nu)}\circ \Phi^{\nu}+(H^{(\nu+1)}-H^{(\nu)})\circ \Phi^{\nu}. \epsilonnd{eqnarray*} We shrink the radius of the analytic domain from $\sigma_\nu$ to $\sigma_{\nu+1}=\sigma_\nu^{\frac{3}{2}}$ in order to prove that the norm of additional quadratic perturbation term $(H^{(\nu+1)}-H^{(\nu)})\circ \Phi^{\nu}$ is less than $\epsilonpsilon_{\nu}/2$ too(see Lemma \ref{L4.5}). Then from Proposition 4.1(\cite{GP}) we can construct $\Phi_{\nu+1}$ such that \begin{eqnarray*} H^{(\nu+1)}\circ \Phi^{\nu+1}=H^{(\nu+1)}\circ \Phi^{\nu}\circ \Phi_{\nu+1}= h_{\nu+1}+\langle \xi,P_{\nu+1}(\theta)\epsilonta\rangle, \epsilonnd{eqnarray*} where $h_{\nu+1}$ is in normal form and the norm of $P_{\nu+1}$ is less than $ \epsilonpsilon_{\nu+1}/2$. Thus we can formulate the iteration lemma. Finally, let $\nu\rightarrow \infty$, we obtain $\mathcal{H}\circ \Phi_{\omega}^{\infty}=\sum\limits_{j=1}^n\omega_jy_j+\langle\xi,N_{\infty}\epsilonta\rangle$, where $(\xi,\epsilonta)\in Y_{s'}$ with $1\leq s'\leq \max\{s,1\}$ and $N_\infty(\omega)=\lim_{\nu\rightarrow\infty}N_{\nu}(\omega)$ in normal form and with the norm close to $N_0$. For obtaining the above proof we need to show that $H^{(\nu)}\rightarrow \mathcal{H}$ and $\Phi^{\nu}\rightarrow \Phi_{\omega}^{\infty}$. Furthermore, from the estimation of $\Phi^{\nu}-\Phi^{\nu-1}$ and Lemma \ref{smoothinginverse} we deduce that $\Phi_\omega^\infty-id \in \mathcal{C}^{\mu}({\Bbb T}^n, \mathfrak{L}(Y_{s'}, Y_{s' +2\alpha}))$ for all $0\leq s'\leq s$, where $\mu\leq \frac{2}{9}\beta$ and is not an integer. \indent The paper is organized as follows: In Sect. 2 we state the abstract reducibility theorem: Theorem \ref{MainTheorem}. In Sect. 3 we prove Theorem \ref{quantumth}, Corollary \ref{coro01} and Corollary \ref{coro02}, which are direct results from Theorem \ref{MainTheorem}. In Sect. 4 we prove Theorem \ref{MainTheorem}. The section is split into a few subsections. Finally, the appendix contains some technical lemmas. \section{Reducibility Theorem for Quantum Harmonic Oscillator in ${\Bbb R}^d$ with Quasiperiodic in Time Potential: Smooth Version.}\langlebel{section2} \subsection{Setting}\langlebel{phase} {\noindent\epsilonmph{Notations.}} Denote ${\Bbb C}$, ${\Bbb R}$, ${\Bbb Z}$, ${\Bbb N}$ be the set of all complex numbers, real numbers, integers and nonnegative integers, respectively. ${\Bbb T}={\Bbb R}/ 2\pi{\Bbb Z}$. $\langle \cdot,\cdot\rangle$ is the standard scalar product in $\epsilonll^2$, while $\langle f \rangle:=\frac{1}{(2\pi)^n}\int_{{\Bbb T}^n}f(\theta)d\theta$ be the mean value of $f$ on the torus ${\Bbb T}^n$. $|\cdot|$ will be general to denote a supremum norm with a notable exception: for a multi-index $k=(k_1,\cdots, k_n)\in{\Bbb Z}^n$, denote $|k|=\sum_{i=1}^n |k_i|$. In the whole paper we use $\nu$ to stand for the KAM iteration step. \noindent\epsilonmph{Linear space.} Following the notations in Subsection \ref{s1.1}, for $s\geq0,$ we consider the complex weighted $\epsilonll^2-$space $$\epsilonll_{s}^2:=\{\xi=(\xi_a\in{\Bbb C})_{a\in\mathcal{E}}\big|\ \|\xi\|_s <\infty\}$$ with $\|\xi\|_s^2:=\sum\limits_{a\in\mathcal{E}} |\xi_a|^2w_a^{s}$ and $\epsilonll_{0}^2$ is $\epsilonll^2$. Then we define $$Y_s:=\epsilonll_{s}^2\times\epsilonll_{s}^2=\{\zeta=(\zeta_a=(\xi_a,\epsilonta_a)\in{\Bbb C}^2)_{a\in\mathcal{E}}\big|\ \|\zeta\|_s<\infty\}$$ with $\|\zeta\|_s^2:=\sum\limits_{a\in\mathcal{E}} (|\xi_a|^2+|\epsilonta_a|^2) w_a^{s}$.\\ \indent We provide the space $Y_s,\ s\geq0,$ with the symplectic structure $\mathrm{i}\sum_{a\in\mathcal{E}}d \xi_a\wedge d \epsilonta_a$. To any smooth function $f(\xi,\epsilonta)$ defined on a domain of $Y_s$, it corresponds to the Hamiltonian system: \begin{eqnarray} \left\{\begin{array}{c} \displaystyleot{\xi} =-\mathrm{i}\nabla_{\epsilonta}f(\xi,\epsilonta)\\ \displaystyleot{\epsilonta} =\ \ \mathrm{i}\nabla_{\xi}f(\xi,\epsilonta) \epsilonnd{array}\right. \epsilonnd{eqnarray} where $\nabla f=(\nabla_{\xi}f,\nabla_{\epsilonta}f)^T$ is the gradient with respect to the scalar product in $Y_0.$\\ \indent For any smooth functions $f(\xi,\epsilonta),\ g(\xi,\epsilonta)$, the Poisson bracket of $f$, $g$ is given by \begin{eqnarray} \{f,g\}:= \mathrm{i}\sum_{a\in\mathcal{E}}\left(\frac{\partial f}{\partial\xi_a}\frac{\partial g}{\partial\epsilonta_a}-\frac{\partial g}{\partial\xi_a}\frac{\partial f}{\partial\epsilonta_a}\right). \epsilonnd{eqnarray} \indent We also consider the extended phase space $\mathcal P_s:={\Bbb T}^n\times {\Bbb R}^{n}\times Y_s\ni (\theta, y, \xi,\epsilonta)$. For smooth functions $f(\theta, y,\xi,\epsilonta),\ g(\theta, y,\xi,\epsilonta)$, the Poisson bracket is given by \begin{eqnarray} \{f,g\}:=\sum_{j=1}^n\left(\frac{\partial f}{\partial y_j}\frac{\partial g}{\partial\theta_j}-\frac{\partial g}{\partial y_j}\frac{\partial f}{\partial\theta_j}\right) + \mathrm{i}\sum_{a\in\mathcal{E}}\left(\frac{\partial f}{\partial\xi_a}\frac{\partial g}{\partial\epsilonta_a}-\frac{\partial g}{\partial\xi_a}\frac{\partial f}{\partial\epsilonta_a}\right). \epsilonnd{eqnarray} \noindent\epsilonmph{Infinite matrices.} We denote by $\mathcal{M}_{s,\alpha}$ the set of infinite matrices $A:\mathcal{E}\times\mathcal{E}\rightarrow {\Bbb C}$ with the norm \begin{eqnarray*} |A|_{s,\alpha}:=\sup_{a,b\in\mathcal{E}}\left(w_aw_b\right)^{\alpha}\left\|A_{[a]}^{[b]}\right\|\left(\frac{\sqrt{\min(w_a,w_b)}+|w_a-w_b|}{\sqrt{\min(w_a,w_b)}}\right)^{s/2}<+\infty, \epsilonnd{eqnarray*} where $A_{[a]}^{[b]}$ denotes the restriction of $A$ to the block $[a]\times[b]$ and $\|\cdot\|$ denotes the operator norm. We also denote $\mathcal{M}_{s,\alpha}^+$ be the subspace of $\mathcal{M}_{s,\alpha}$ satisfying that an infinite matrix $A\in\mathcal{M}_{s,\alpha}^+$ if \begin{eqnarray*} |A|_{s,\alpha+}:=\sup_{a,b\in\mathcal{E}}(w_aw_b)^{\alpha}\left(1+|w_a-w_b|\right)\left\|A_{[a]}^{[b]}\right\|\left(\frac{\sqrt{\min(w_a,w_b)}+|w_a-w_b|}{\sqrt{\min(w_a,w_b)}}\right)^{s/2}<+\infty. \epsilonnd{eqnarray*} \indent From the definition we have following simple facts. \begin{Lemma}\langlebel{completed} $(\mathcal{M}_{s,\alpha}, |\cdot|_{s,\alpha})$ and $(\mathcal{M}_{s,\alpha}^+, |\cdot|_{s,\alpha+})$ are Banach spaces. \epsilonnd{Lemma} \begin{Lemma}[\cite{GP},\ Lemma 2.1]\langlebel{daishu01}Let $0<\alpha\leq1$ and $s\geq0$, there exists a constant $c(\alpha,s)>0$ such that\\ i) If $A\in \mathcal{M}_{s,\alpha}$ and $B\in \mathcal{M}_{s,\alpha}^+$, then $AB$ and $BA$ belong to $\mathcal{M}_{s,\alpha}$ and \begin{eqnarray*} |AB|_{s,\alpha},\ |BA|_{s,\alpha}\leq c(\alpha,s)|A|_{s,\alpha}|B|_{s,\alpha+}. \epsilonnd{eqnarray*} ii) If $A,B\in \mathcal{M}_{s,\alpha}^+$, then $AB$ belongs to $\mathcal{M}_{s,\alpha}^+$ and \begin{eqnarray*} |AB|_{s,\alpha+} \leq c(\alpha,s)|A|_{s,\alpha+}|B|_{s,\alpha+}. \epsilonnd{eqnarray*} iii) If $A\in \mathcal{M}_{s,\alpha}$, then for any $t\geq1,\ A\in {\mathfrak{L}}(\epsilonll^2_t,\epsilonll^2_{-t})$ and \begin{eqnarray*} \|A\xi\|_{-t}\leq c(\alpha,s)|A|_{s,\alpha}\|\xi\|_t. \epsilonnd{eqnarray*} iv) If $A\in \mathcal{M}_{s,\alpha}^+$, then $ A\in \mathfrak{L}(\epsilonll_{s'}^2,\epsilonll^2_{s'+2\alpha})$ for all $0\leq s'\leq s$ and \begin{eqnarray*} \|A\xi\|_{ s'+2\alpha}\leq c(\alpha,s)|A|_{s,\alpha+}\|\xi\|_{s'}; \epsilonnd{eqnarray*} furthermore, $A\in\mathfrak{L}(\epsilonll^2_{1},\epsilonll^2_{1})$ and \begin{eqnarray*} \|A\xi\|_{1}\leq c(\alpha,s)|A|_{s,\alpha+}\|\xi\|_{1}. \epsilonnd{eqnarray*} \epsilonnd{Lemma} \noindent{\epsilonmph{Normal form.}} We introduce the following definitions. \begin{Definition} A matrix $F:\ \mathcal{E}\times\mathcal{E}\rightarrow{\Bbb C}$ is Hermitian, i.e., $F_{b}^a=\overline{F_{a}^b},\ a,b\in\mathcal{E}.$ \epsilonnd{Definition} \begin{Definition} A matrix $N:\ \mathcal{E}\times\mathcal{E}\rightarrow{\Bbb C}$ is in normal form, and we denote $N\in\mathcal{NF},$ if\\ (i) $N$ is Hermitian,\\ (ii) $N$ is block diagonal, i.e., $N_{b}^a=0,$ for $w_a\neq w_b$. \epsilonnd{Definition} \noindent{\epsilonmph{Quadratic form.}} To a matrix $Q=(Q_a^b)_{a,b\in\mathcal{E}}\in\mathfrak{L}(\epsilonll_t^2,\epsilonll_{-t}^2)$ we associate in a unique way a quadratic form $q(\xi,\epsilonta)$ on $Y_t$ by the formula $q(\xi,\epsilonta):=\langle\xi,Q\epsilonta\rangle=\sum_{a,b\in\mathcal{E}}Q_{a}^b\xi_a\epsilonta_b$ and the Poisson bracket $$\{q_1,q_2\}(\xi,\epsilonta)=-\mathrm{i}\langle\xi,[Q_1,Q_2]\epsilonta\rangle$$ where $[Q_1,Q_2]=Q_1Q_2-Q_2Q_1$ is the commutator of two matrices $Q_1$ and $Q_2$. Moreover, if $Q\in \mathcal{M}_{s,\alpha}$ then $$\sup_{a,b\in\mathcal{E}}\left\|(\nabla_\xi\nabla_\epsilonta q)_{[a]}^{[b]}\right\|\leq\frac{|Q|_{s,\alpha}}{(w_aw_b)^{\alpha}}\left(\frac{\sqrt{\min(w_a,w_b)}}{\sqrt{\min(w_a,w_b)}+|w_a-w_b|}\right)^{s/2}.$$ \noindent\epsilonmph{Parameter.} In the paper $\omega$ will play the role of a parameter belonging to $D_0=[0,2\pi]^n$. All the constructed functions will depend on $\omega$ with $\mathcal{C}^1$ regularity. When a function is only defined on a Cantor subset of $D_0$ the regularity is understood in Whitney sense. \noindent\epsilonmph{A class of quadratic Hamiltonians.} Let $D\subset D_0,\ s\geq0,\ \alpha>0$ and $\sigma>0$. We denote by $\mathcal{M}_{s,\alpha}(D,\sigma)$ the set of mappings as ${\Bbb T}^n_\sigma\times D\ni (\theta,\omega)\mapsto Q(\theta,\omega)\in \mathcal{M}_{s,\alpha}$ which is real analytic on $\theta\in {\Bbb T}^n_\sigma:=\left\{\theta\in{\Bbb C}^n\ \big|\ |{\Bbb I}m \theta|<\sigma\right\}$ and $\mathcal{C}^1$ continuous on $\omega\in D$. This space is equipped with the norm $$[Q]_{s,\alpha}^{D,\sigma}:=\sup_{\omega\in D,|{\Bbb I}m \theta|<\sigma,|k|=0,1}\left|\partial^k_\omega Q(\theta,\omega)\right|_{s,\alpha}.$$ In view of Lemma \ref{daishu01}, to a matrix $Q\in\mathcal{M}_{s,\alpha}(D,\sigma)$, we can associate the quadratic form on $Y_t$ with $t\geq 1$ $$q(\xi,\epsilonta;\omega,\theta)=\langle\xi,Q(\omega,\theta)\epsilonta\rangle$$ and we have $$|q(\xi,\epsilonta;\omega,\theta)|\leq c(\alpha,s)[Q]_{s,\alpha}^{D,\sigma}\left\|(\xi,\epsilonta)\right\|_t^2$$ for $(\xi,\epsilonta)\in Y_t,\ \omega\in D,\ \theta\in{\Bbb T}^n_\sigma.$ \\ \indent The subspace of $ \mathcal{M}_{s,\alpha}(D,\sigma)$ formed by $F(\theta,\omega)$ such that $\partial^k_\omega F(\theta,\omega)\in \mathcal{M}^+_{s,\alpha},\ |k|=0,1,$ is denoted by $ \mathcal{M}_{s,\alpha}^+(D,\sigma)$ and equipped with the norm $$[F]_{s,\alpha+}^{D,\sigma}:=\sup_{\omega\in D,|{\Bbb I}m \theta|<\sigma,|k|=0,1}\left|\partial^k_\omega F(\theta,\omega)\right|_{s,\alpha+}.$$ The subspace of $ \mathcal{M}_{s,\alpha}(D,\sigma)$ that are independent of $\theta$ will be denoted by $\mathcal{M}_{s,\alpha}(D)$ and for $N\in \mathcal{M}_{s,\alpha}(D),$ $$[N]_{s,{\alpha}}^{D}:=\sup_{\omega\in D,|k|=0,1}|\partial^k_\omega N(\omega)|_{s,{\alpha}}.$$ From Lemma \ref{daishu01}, the following results hold. \begin{Lemma}\langlebel{daishu}For $0<\alpha\leq1$ and $s\geq0$, there exists a constant $c(\alpha,s)>0$ such that\\ i) If $A\in \mathcal{M}_{s,\alpha}(D,\sigma)$ and $B\in \mathcal{M}_{s,\alpha}^+(D,\sigma)$, then $AB$ and $BA$ belong to $\mathcal{M}_{s,\alpha}(D,\sigma)$ and \begin{eqnarray*} [AB]_{s,\alpha}^{D,\sigma},\ [BA]_{s,\alpha}^{D,\sigma}\leq c(\alpha,s)[A]_{s,\alpha}^{D,\sigma}[B]_{s,\alpha+}^{D,\sigma}. \epsilonnd{eqnarray*} ii) If $A,B\in \mathcal{M}_{s,\alpha}^+(D,\sigma)$, then $AB$ belongs to $\mathcal{M}_{s,\alpha}^+(D,\sigma)$ and \begin{eqnarray*} [AB]_{s,\alpha+}^{D,\sigma} \leq c(\alpha,s)[A]_{s,\alpha+}^{D,\sigma}[B]_{s,\alpha+}^{D,\sigma}. \epsilonnd{eqnarray*} Moreover, if $A\in \mathcal{M}_{s,\alpha}(D,\sigma)$ and $B\in \mathcal{M}_{s,\alpha}^+(D,\sigma)$ then $Ae^{B}$, $e^{B}A\in \mathcal{M}_{s,\alpha}(D,\sigma)$ and $e^{B}-Id\in \mathcal{M}_{s,\alpha}^+(D,\sigma)$ satisfying \begin{eqnarray}\langlebel{exponorm01} [Ae^{B}]_{s,\alpha},\ [e^{B}A]_{s,\alpha}&\leq& [A]_{s,\alpha}^{D,\sigma}e^{c(\alpha,s)[B]_{s,\alpha+}^{D,\sigma}}. \\ \langlebel{exponorm02} [e^{B}-Id]_{s,\alpha+}^{D,\sigma}&\leq& [B]_{s,\alpha+}^{D,\sigma}e^{c(\alpha,s )[B]_{s,\alpha+}^{D,\sigma}}. \epsilonnd{eqnarray} \epsilonnd{Lemma} \noindent\epsilonmph{Hamiltonian flow.} When $F$ depends smoothly on $\theta$, ${\Bbb T}^n\ni\theta\mapsto F(\theta)\in \mathcal{M}_{s,\alpha}^+$ with $0<\alpha\leq1$ we associate to $f=\langle\xi, F(\theta)\epsilonta\rangle $ the symplectic transformation, generated by the time 1 map of $X_f$, on the extended phase space $\mathcal{P}_s: $ $$(\theta,y,\xi,\epsilonta)\mapsto(\theta,\tilde{y},e^{-\mathrm{i}F^T}\xi,e^{\mathrm{i}F}\epsilonta),$$ where $\tilde{y}=y+\langle \xi, \nabla_{\theta}F(\theta) \epsilonta\rangle$. In the following we will never calculate $\tilde{y}$ explicitly since the non homogeneous Hamiltonian system (\ref{hs00}) is equivalent to the system (\ref{autohs}) where the variable conjugated to $\theta$ is not concerned. Thus, the above symplectic transformation is rewritten into a symplectic linear change, restricted in on $Y_0$, which is given by $(\xi,\epsilonta)\mapsto(e^{-iF^T }\xi,e^{iF }\epsilonta)$. It is well defined and invertible in $ Y_{0} $ as a consequence of Lemmas \ref{daishu01} and \ref{daishu}. Recall that a sufficient and necessary condition for this map to preserve the symmetry $\epsilonta=\bar{\xi}$ is $F^T(\theta)=\overline{F}(\theta)$ when $\theta\in{\Bbb T}^n$, i.e., $F$ is a Hermitian matrix. \noindent\epsilonmph{$\mathcal{C}^1$ norm of operator in $\omega$.} Given $(\theta,\omega)\in{\Bbb T}_{\sigma}^n\times D$, $\Phi(\theta,\omega)\in\mathfrak{L}(Y_s,Y_{s'})$ being $\mathcal{C}^1$ operator with respect to $\omega$ in Whitney sense, we define the $C^1$ norm of $\Phi(\theta,\omega)$ with respect to $\omega$ by $$\|\Phi\|^*_{\mathfrak{L}(Y_s,Y_{s'})}=\sup_{(\theta,\omega)\in{\Bbb T}_{\sigma}^n\times D,\ |k|=0,1,\ \|\zeta\|_s\neq0}\frac{\|\partial_\omega^k\Phi (\theta,\omega)\zeta\|_{s'}}{\|\zeta\|_s}.$$ \subsection{The reducibility theorem}\langlebel{s2.4} \noindent In this subsection we state an abstract reducibility theorem for quadratic $t$-quasiperiodic Hamiltonian of the form \begin{equation}\langlebel{hameq} H(t,\xi,\epsilonta)= \langle\xi, N_0\epsilonta\rangle+ \varepsilon\langle\xi, P(\omega t)\epsilonta\rangle, \quad (\xi,\epsilonta)\in Y_0, \epsilonnd{equation} and the associated Hamiltonian system is \begin{eqnarray} \left\{\begin{array}{c} \displaystyleot{\xi}=-\mathrm{i}N_0\xi-\mathrm{i}\varepsilon P^T(\omega t)\xi,\\ \displaystyleot{\epsilonta}=\ \ \mathrm{i}N_0\epsilonta+\mathrm{i}\varepsilon P (\omega t)\epsilonta,\ \epsilonnd{array}\right.\langlebel{hameq00} \epsilonnd{eqnarray} where $N_0=diag\{\langlembda_a,\ a\in\mathcal{E}\}$ satisfying the following assumptions:\\ \textbf{Hypothesis A1 - Asymptotics.} There exist positive constants $c_0,\ c_1,\ c_2$ such that $$c_1 w_a\geq\langlembda_a\geq c_2w_a \ {\rm and}\ |\langlembda_a-\langlembda_b|\geq c_0|w_a-w_b|,\ a,b\in\mathcal{E}.$$ \textbf{Hypothesis A2 - Second Melnikov condition in measure estimates.} There exist positive constants $\alpha_1,\alpha_2$ and $c_3$ such that the following holds: for each $0<\kappa<1/4$ and $K>0$ there exists a closed subset $ D':= D'(\kappa,K)\subset D_0$ with ${\rm Meas}( D_0\setminus D')\leq c_3K^{\alpha_1}\kappa^{\alpha_2}$ such that for all $\omega\in D',$ $k\in {\Bbb Z}^n$ with $0<|k|\leq K$ and $a,b\in\mathcal{E}$ we have $$|\langle k,\omega\rangle +\langlembda_a-\langlembda_b|\geq \kappa(1+|w_a-w_b|).$$ \indent Then we have the following reducibility results. \begin{Theorem}\langlebel{MainTheorem} Given a non autonomous Hamiltonian (\ref{hameq}) with $ d\geq1$, we assume that $(\langlembda_{a})_{a\in \mathcal{E}}$ satisfies Hypothesis A1-A2 and $P(\theta)\in \mathcal{C}^\beta({\Bbb T}^n,\mathcal{M}_{s,\alpha})$ with $s\geq0,\ \alpha>0$ and $\beta>\max\{9(2+\frac{d}{\alpha})\frac{\gamma_1}{\gamma_2-24\displaystyleelta},\ 9n,\ 12(d+1)\}$ where $\gamma_1= \max\{\alpha_1, n+d+2\},\ \gamma_2=\frac{\alpha\alpha_2}{4+d+2\alpha\alpha_2}$, $\displaystyleelta\in(0,\frac{\gamma_2}{24})$.\\ \indent Then there exists $\varepsilon_*(n,\beta,s,d,\displaystyleelta )>0$ such that if $0<\varepsilon<\varepsilon_*(n,\beta,s,d,\displaystyleelta )$, there exist\\ (i) a Cantor set $D_\varepsilon\subset D_0$ with ${\rm Meas}(D_0\setminus D_\varepsilon)\leq c(n,\beta,d,s,\displaystyleelta)\varepsilon^{\frac{3\displaystyleelta}{2+d/\alpha}}$;\\ (ii) a $\mathcal{C}^1$ family in $\omega\in D_\varepsilon$(in Whitney sense), linear, unitary and symplectic coordinate transformation $\Phi_\omega^\infty(\theta): Y_0\rightarrow Y_0,\ \theta\in{\Bbb T}^n,\ \omega\in D_\varepsilon,$ of the form \begin{equation*}\langlebel{transf} (\xi_+,\epsilonta_+)\mapsto(\xi,\epsilonta)=\Phi_\omega^\infty(\theta)(\xi_+,\epsilonta_+)=( \overline{M}_\omega(\theta)\xi_+,{M}_\omega(\theta)\epsilonta_+), \epsilonnd{equation*} where $\Phi_{\omega}^{\infty}(\theta)-id \in \mathcal{C}^{\mu}({\Bbb T}^n, \mathcal{L}(Y_{s'}, Y_{s'+2\alpha}))$($0\leq s'\leq s, \mu\leq\frac{2}{9}\beta$, $\mu\notin{\Bbb Z}$) and satisfies \begin{eqnarray*} \|\Phi_\omega^\infty-id\|_{\mathcal{C}^{\mu}({\Bbb T}^n, \mathfrak{L}(Y_{s'}, Y_{s'+2\alpha})) }&\leq& C(n,\beta, \mu, d, s) \epsilonpsilon^{\frac{3}{2\beta}(\frac{2}{9}\beta-\mu)}; \epsilonnd{eqnarray*} (iii) a $\mathcal{C}^1$ family of autonomous quadratic Hamiltonians in normal forms $$H_\infty(\xi_+,\epsilonta_+)=\langle\xi_+,N_\infty(\omega)\epsilonta_+\rangle,\ \omega\in D_\varepsilon,$$ where $N_\infty(\omega)\in \mathcal{NF}$, in particular block diagonal (i.e. $N_{a}^b=0$ for $w_a\neq w_b$), and is close to $N_0$, i.e. $[N_\infty(\omega)-N_0]_{s,\alpha}^{D_\varepsilon}\leq c(n,\beta,d,s)\varepsilon, $ such that $$H(t,\Phi_\omega^\infty(\omega t)(\xi_+,\epsilonta_+))=H_\infty(\xi_+,\epsilonta_+),\ t\in{\Bbb R},\ (\xi_+,\epsilonta_+)\in Y_{s'},\ \omega\in D_\varepsilon,$$ where $1\leq s'\leq \max\{s,1\}$.\\ \epsilonnd{Theorem} \begin{Remark} In fact, $\Phi_\omega^\infty(\theta)$ and its inverse are bounded operators from $Y_{1}$ into itself for any $s\geq 0$. \epsilonnd{Remark} We prove Theorem \ref{MainTheorem} in Section \ref{s4}. \section{Application to the Quantum Harmonic Oscillator--Proof of Theorem \ref{quantumth}} In this section we prove Theorem \ref{quantumth} as a corollary of Theorem \ref{MainTheorem}. \subsection{Verification of the hypothesis} \begin{Lemma}\langlebel{aspt} When $\langlembda_a=w_a,\ a\in\mathcal{E},$ Hypothesis $\mathrm{A1}$ holds true with $c_0=c_1= c_2=1$. \epsilonnd{Lemma} As \cite{GP}, \begin{Lemma}\langlebel{aspt02} When $\langlembda_a=w_a,\ a\in\mathcal{E},$ Hypothesis $\mathrm{A2}$ holds true with $D_0=[0,2\pi]^n$, $ \alpha_1=n+1,\ \alpha_2=1,\ \ c_3= c(n )$ and $$\ D':=\{\omega\in[0,2\pi]^n\big|\ |\langle k,\omega\rangle+j|\geq\kappa(1+|j|), {\rm\ for\ all\ j\in{\Bbb Z}\ and\ k\in{\Bbb Z}^n\setminus\{0\}}\}.$$ \epsilonnd{Lemma} \begin{proof} Since $w_a-w_b\in{\Bbb Z}$, it is obtained by a straightforward computation. \epsilonnd{proof} \begin{Lemma}\langlebel{L3.3} Let $d\geq 1$. Suppose that the potential $V: {\Bbb T}^n\times {\Bbb R}^d\ni (\theta, x)\rightarrow {\Bbb R}$ is $(s,\beta)-$admissible. Then there exists $\alpha=\alpha(d,s)>0$(see (\ref{alpha})) such that the matrix function $P(\theta)$ defined by $$(P(\theta))_{a}^{b}=\int_{{\Bbb R}^d}V(\theta, x)\Phi_a(x)\Phi_b(x)dx,\qquad a,b \in \mathcal{E}, $$ belongs to ${\mathcal{C}^{\beta}}({\Bbb T}^n, \mathcal{M}_{s,\alpha})$. \epsilonnd{Lemma} \begin{proof}We divide the proof into several steps.\\ \indent(a)\quad We show that \begin{equation*}\langlebel{3.3.1} \sup_{\theta\in{\Bbb R}^n}|P(\theta)|_{s,\alpha}\leq C\left(d,s\right)\|V(\theta,\cdot)\|_{{\mathcal{C}^{\beta}}({\Bbb T}^n, \mathcal{H}^s)}. \epsilonnd{equation*} Recall that for $a,b\in \mathcal{E}$, $$(P(\theta))_{a}^{b}=\int_{{\Bbb R}^d}V(\theta, x)\Phi_a(x)\Phi_b(x)dx. $$ To estimate $|P(\theta)|_{s,\alpha}$ by definition, we turn to estimate $$ \|P_{[a]}^{[b]}(\theta)\|=\sup_{\substack {\Psi_a\left(x\right)\in E_{w_a},\ \Psi_b\left(x\right)\in E_{w_b},\\ {\|\Psi_a\left(x\right)\|_{L^2({\Bbb R}^d)}}=\|\Psi_b\left(x\right)\|_{L^2({\Bbb R}^d)}=1}}\int_{{\Bbb R}^d}V(\theta, x)\Psi_a(x)\Psi_b(x)dx. $$ From a similar proof in \cite{GP}(Lemma 3.2), \begin{eqnarray*} &&\left|\int_{{\Bbb R}^d} V \left(\theta,x\right) \Psi_a\left(x\right)\Psi_b\left(x\right)dx\right|\\ &\leq&\frac{C\left(d,s\right) }{\left(w_aw_b\right)^{\widetilde{\alpha}\left(p\right)/2}}\left(\frac{\sqrt{\min\left(w_a,w_b\right)}}{\sqrt{\min\left(w_a,w_b\right)}+|w_a-w_b|}\right)^{s/2}\| V\left(\theta,\cdot\right) \|_{\mathcal{H}^s\left({\Bbb R}^d\right)} \epsilonnd{eqnarray*} where $\widetilde{\alpha}\left(p\right)= \frac{1}{12}$ if $d=1$ and $\widetilde{\alpha}\left(p\right)= \frac{1}{3p}\left(p\geq\frac{10}{3}\right) $ if $d=2$ and $\widetilde{\alpha}\left(p\right)=\frac{1}{2}\left(\frac{d}{3p}-\frac{d-2}{6}\right)>0$ if $d>2$ and $\frac{2(d+3)}{d+1}< p<\frac{2d}{d-2}$. Set \begin{equation}\langlebel{alpha} \alpha : = \frac{\widetilde{\alpha}\left(p\right)}{2}>0. \epsilonnd{equation} It follows for $\theta\in{\Bbb R}^n$, \begin{equation*}\langlebel{} |P(\theta)|_{s,\alpha}\leq C\left(d,s\right)\|V(\theta,\cdot)\|_{ \mathcal{H}^s}\leq C\left(d,s\right)\|V(\theta,\cdot)\|_{{\mathcal{C}^{\beta}}({\Bbb T}^n, \mathcal{H}^s)}. \epsilonnd{equation*} (b)\quad We show that $P(z)\in \mathcal{C}^0({\Bbb R}^n, \mathcal{M}_{s,\alpha}).$ For any $z_1,z_2\in {\Bbb R}^n,\ a,b\in \mathcal{E}$, $$(P(z_1)-P(z_2))_{a}^{b}=\int_{{\Bbb R}^d}(V(z_1, x)-V(z_2, x))\Phi_a(x)\Phi_b(x)dx. $$ A similar discussion as above tells us \begin{equation*}\langlebel{3.3.2} |P(z_1)-P(z_2)|_{s,\alpha}\leq C\left(d,s\right)\|V(z_1, \cdot)-V(z_2,\cdot)\|_{ \mathcal{H}^s}. \epsilonnd{equation*} It follows that $ |P(z_1)-P(z_2)|_{s,\alpha}\rightarrow0$ as $z_1\rightarrow z_2$ by $V(z, \cdot)\in{\mathcal{C}^{\beta}}({\Bbb R}^n, \mathcal{H}^s({\Bbb R}^d)).$\\ (c)\quad We show that $P(z)$ is Fr\'echet differentiable at each $z_0\in{\Bbb R}^n$ and for $h\in{\Bbb R}^n,\ a,b\in \mathcal{E}$, $$(P'(z_0)h)_{a}^{b}=\int_{{\Bbb R}^d}\langle V'_z(z_0, x),\ h\rangle\Phi_a(x)\Phi_b(x)dx $$ satisfying \begin{equation}\langlebel{3.3.3} \|P'(z_0)\|_{\mathfrak{L}({\Bbb R}^n, \mathcal{M}_{s,\alpha})}\leq C(d,s)\|V(z,\cdot)\|_{{\mathcal{C}^{\beta}}({\Bbb R}^n, \mathcal{H}^s)}. \epsilonnd{equation} In fact, we define for $h\in{\Bbb R}^n,\ a,b\in \mathcal{E}$, $$(\mathcal{A}h)_{a}^{b}:=\int_{{\Bbb R}^d}\langle V'_z(z_0, x),\ h\rangle\Phi_a(x)\Phi_b(x)dx. $$ Clearly, $\mathcal{A}$ is a linear map on ${\Bbb R}^n$. Since $\langle V'_z(z_0, \cdot),\ h\rangle\in \mathcal{H}^s({\Bbb R}^d)$, from a similar discussion in (a) we obtain \begin{equation*}\langlebel{} |\mathcal{A}h|_{s,\alpha}\leq C\left(d,s\right)\| V'_z(z_0, \cdot)\|_{ \mathfrak{L}({\Bbb R}^n, \mathcal{H}^s)}\|h\|\leq C\left(d,s\right)\|V(\theta,\cdot)\|_{{\mathcal{C}^{\beta}}({\Bbb R}^n, \mathcal{H}^s)}\|h\|. \epsilonnd{equation*} It follows that \begin{equation*}\langlebel{3.3.4} \|\mathcal{A}\|_{\mathfrak{L}({\Bbb R}^n, \mathcal{M}_{s,\alpha})}\leq C(d,s)\|V(z,\cdot)\|_{{\mathcal{C}^{\beta}}({\Bbb R}^n, \mathcal{H}^s)}. \epsilonnd{equation*} \indent Next we show that \begin{equation}\langlebel{3.3.5} |P(z)-P(z_0)-\mathcal{A}(z-z_0)|_{s,\alpha}=o(|z-z_0|),\ z\rightarrow z_0. \epsilonnd{equation} For $ a,b\in \mathcal{E}$, $$(P(z)-P(z_0)-\mathcal{A}(z-z_0))_{a}^{b}=\int_{{\Bbb R}^d}\left(V(z,x)-V(z_0,x)- V'_z(z_0, x)(z-z_0)\right)\Phi_a(x)\Phi_b(x)dx. $$ Note $V(z, \cdot)\in{\mathcal{C}^{\beta}}({\Bbb R}^n, \mathcal{H}^s({\Bbb R}^d)),$ it follows from Taylor's theorem \begin{equation}\langlebel{3.3.6} \|V(z,\cdot)-V(z_0,\cdot)- V'_z(z_0, \cdot)(z-z_0)\|_{ \mathcal{H}^s}=o(|z-z_0|),\ z\rightarrow z_0. \epsilonnd{equation} From a similar discussion as above we obtain \begin{equation*}\langlebel{} |P(z)-P(z_0)-\mathcal{A}(z-z_0)|_{s,\alpha}\leq C\left(d,s\right)\|V(z,\cdot)-V(z_0,\cdot)- V'_z(z_0, \cdot)(z-z_0)\|_{ \mathcal{H}^s}. \epsilonnd{equation*} Combining with (\ref{3.3.6}) we have (\ref{3.3.5}). Thus $P(z)$ is Fr\'echet differentiable on $z=z_0$ and $P'(z_0)=\mathcal{A}$ which satisfies (\ref{3.3.3}).\\ (d)\quad We show that $P(z)\in \mathcal{C}^1({\Bbb R}^n, \mathcal{M}_{s,\alpha}).$ Note that for any $z_1,z_2\in {\Bbb R}^n,\ a,b\in \mathcal{E}$, $$((P'(z_1)-P'(z_2))h)_{a}^{b}=\int_{{\Bbb R}^d}\langle V'_z(z_1, x)-V'_z(z_2,x),h\rangle\Phi_a(x)\Phi_b(x)dx. $$ Thus \begin{eqnarray*}\langlebel{3.3.7} |(P'(z_1)-P'(z_2))h|_{s,\alpha}&\leq& C\left(d,s\right)\|\langle V'_z(z_1, \cdot)-V'_z(z_2,\cdot),h\rangle\|_{ \mathcal{H}^s}\\ &\leq& C\left(d,s\right)\| V'_z(z_1, \cdot)-V'_z(z_2,\cdot)\|_{ \mathfrak{L}({\Bbb R}^n, \mathcal{H}^s)}\|h\|. \epsilonnd{eqnarray*} It follows that \begin{equation*}\langlebel{} \|P'(z_1)-P'(z_2)\|_{\mathfrak{L}({\Bbb R}^n, \mathcal{M}_{s,\alpha})}\leq C(d,s)\| V'_z(z_1, \cdot)-V'_z(z_2,\cdot)\|_{\mathfrak{L}({\Bbb R}^n, \mathcal{H}^s)}. \epsilonnd{equation*} It follows that $ \| V'_z(z_1, \cdot)-V'_z(z_2,\cdot)\|_{\mathfrak{L}({\Bbb R}^n, \mathcal{H}^s)}\rightarrow0$ as $z_1\rightarrow z_2$ by $V(z, \cdot)\in{\mathcal{C}^{\beta}}({\Bbb R}^n, \mathcal{H}^s({\Bbb R}^d))$ which means that $ \|P'(z_1)-P'(z_2)\|_{\mathfrak{L}({\Bbb R}^n, \mathcal{M}_{s,\alpha})}\rightarrow0$ as $z_1\rightarrow z_2$.\\ (e)\quad Inductively, we assume that $P(z)\in \mathcal{C}^m({\Bbb R}^n, \mathcal{M}_{s,\alpha}),$ $m\leq \beta-1$, with $$\left(P^{(m)}(z) (h_1,\cdots,h_m)\right)_{a}^{b}=\int_{{\Bbb R}^d}V_z^{(m)}(z,x) (h_1,\cdots,h_m)\Phi_a(x)\Phi_b(x)dx $$ satisfying \begin{equation}\langlebel{3.3.8} \|P^{(m)}(z)\|_{\mathfrak{L}_m({\Bbb R}^n, \mathcal{M}_{s,\alpha})}\leq C(d,s)\|V(z,\cdot)\|_{{\mathcal{C}^{\beta}}({\Bbb R}^n, \mathcal{H}^s)}, \epsilonnd{equation} where $\mathfrak{L}_m({\Bbb R}^n, \mathcal{M}_{s,\alpha})$ denotes the multi-linear operator space $\mathfrak{L}(\underbrace{{\Bbb R}^n\times\cdots\times{\Bbb R}^n}_{m}, \mathcal{M}_{s,\alpha}).$ Then we show that $P(z)\in \mathcal{C}^{m+1}({\Bbb R}^n, \mathcal{M}_{s,\alpha})$ with $$\left(P^{(m+1)}(z) (h_1,\cdots,h_{m+1})\right)_{a}^{b}=\int_{{\Bbb R}^d}V_z^{(m+1)}(z,x) (h_1,\cdots,h_{m+1})\Phi_a(x)\Phi_b(x)dx$$ and (\ref{3.3.8}) holds for $m$ replaced by $m+1$. We follow the method in steps (c) and (d), and divide the proof into two parts ($e_1$) and ($e_2$) respectively.\\ ($e_1$)\quad We show that $P^{\left(m\right)}\left(z\right)$ is Fr\'echet differentiable on $z=z_0$ and for $a,b\in \mathcal{E}$, $$\left(P^{\left(m+1\right)}\left(z_0\right)(h_1,\cdots,h_{m+1})\right)_{a}^{b}=\int_{{\Bbb R}^d} V^{\left(m+1\right)}_z\left(z_0, x\right)(h_1,\cdots,h_{m+1})\Phi_a\left(x\right)\Phi_b\left(x\right)dx $$ with \begin{equation}\langlebel{3.3.9} \|P^{\left(m+1\right)}\left(z_0\right)\|_{\mathfrak{L}_{m+1}\left({\Bbb R}^n, \mathcal{M}_{s,\alpha}\right)}\leq C\left(d,s\right)\|V\left(z,\cdot\right)\|_{{\mathcal{C}^{\beta}}\left({\Bbb R}^n, \mathcal{H}^s\right)}. \epsilonnd{equation} In fact, given $z\in{\Bbb R}^n$, we define for $h_1,\cdots,h_{m+1}\in{\Bbb R}^n,\ a,b\in \mathcal{E}$, $$\left(\mathcal{B}(z)\left(h_1,\cdots,h_{m+1}\right)\right)_{a}^{b}:=\int_{{\Bbb R}^d} V^{\left(m+1\right)}_z\left(z, x\right)\left(h_1,\cdots,h_{m+1}\right)\Phi_a\left(x\right)\Phi_b\left(x\right)dx. $$ Clearly, $\mathcal{B}\in \mathfrak{L}_{m+1}\left({\Bbb R}^n, \mathcal{M}_{s,\alpha}\right)$. Since $V^{\left(m+1\right)}_z\left(z, \cdot\right)\in \mathfrak{L}_{m+1}\left({\Bbb R}^n, \mathcal{H}^s\right)$, $\mathcal{B}$ is clearly multi-linear and $ V^{\left(m+1\right)}_z\left(z, \cdot\right)\left(h_1,\cdots,h_{m+1}\right)\in \mathcal{H}^s\left({\Bbb R}^d\right)$. Combining with \begin{equation*}\langlebel{} \sup_{z\in{\Bbb R}^n}\|V^{\left(m+1\right)}_z\left(z\right)\|_{\mathfrak{L_{m+1}}\left({\Bbb R}^n, \mathcal{H}^s\right)}\leq \|V\left(z,\cdot\right)\|_{{\mathcal{C}^{\beta}}\left({\Bbb R}^n, \mathcal{H}^s\right)} \epsilonnd{equation*} and a similar discussion as above we obtain \begin{eqnarray*}\langlebel{} |\mathcal{B}(z)\left(h_1,\cdots,h_{m+1}\right)|_{s,\alpha}&\leq& C\left(d,s\right)\|V^{\left(m+1\right)}_z\left(z\right)\left(h_1,\cdots,h_{m+1}\right)\|_{ \mathcal{H}^s}\\ &\leq& C\left(d,s\right)\|V\left(\theta,\cdot\right)\|_{\mathcal{C}^{\beta}\left({\Bbb R}^n, \mathcal{H}^s\right)}\|h_{1}\|\cdots \|h_{m+1}\|. \epsilonnd{eqnarray*} It follows that \begin{equation*}\langlebel{3.3.10} \|\mathcal{B}\|_{\mathfrak{L}_{m+1}\left({\Bbb R}^n, \mathcal{M}_{s,\alpha}\right)}\leq C\left(d,s\right)\|V\left(z,\cdot\right)\|_{{\mathcal{C}^{\beta}}\left({\Bbb R}^n, \mathcal{H}^s\right)}. \epsilonnd{equation*} Set $B=\mathcal{B}\left(z_0\right)$. Next we show that \begin{equation*}\langlebel{3.3.11} |P^{\left(m\right)}\left(z\right)-P^{\left(m\right)}\left(z_0\right)-B\left(z-z_0\right)|_{s,\alpha}=o\left(|z-z_0|\right),\ z\rightarrow z_0. \epsilonnd{equation*} For $ a,b\in \mathcal{E}$, \begin{eqnarray*}\langlebel{} &&\left(\left(P^{\left(m\right)}\left(z\right)-P^{\left(m\right)}\left(z_0\right)-B\left(z-z_0\right)\right)\left(h_1,\cdots,h_{m}\right)\right)_{a}^{b}\\ &=&\int_{{\Bbb R}^d}\left(V^{\left(m\right)}_z\left(z,x\right)-V^{\left(m\right)}_z\left(z_0,x\right)- V^{\left(m+1\right)}_z\left(z_0, x\right)\left(z-z_0\right)\right)\left(h_1,\cdots,h_{m}\right)\Phi_a\left(x\right)\Phi_b\left(x\right)dx. \epsilonnd{eqnarray*} Note $V\left(z, \cdot\right)\in{\mathcal{C}^{\beta}}\left({\Bbb R}^n, \mathcal{H}^s\left({\Bbb R}^d\right)\right),$ it follows from Taylor's theorem \begin{equation*}\langlebel{3.3.12} \|V^{\left(m\right)}_z\left(z,\cdot\right)-V^{\left(m\right)}_z\left(z_0,\cdot\right)- V^{\left(m+1\right)}_z\left(z_0, \cdot\right)\left(z-z_0\right)\|_{\mathfrak{L_{m+1}}\left({\Bbb R}^n, \mathcal{H}^s\right)}=o\left(|z-z_0|\right),\ z\rightarrow z_0. \epsilonnd{equation*} From a similar discussion as in (c) we obtain \begin{equation*}\langlebel{} |\left(P^{\left(m\right)}\left(z\right)-P^{\left(m\right)}\left(z_0\right)-B\left(z-z_0\right)\right)\left(h_1,\cdots,h_{m}\right)|_{s,\alpha}\leq C\left(d,s\right)o\left(|z-z_0|\right)\|h_{1}\|\cdots \|h_{m}\|,\ z\rightarrow z_0. \epsilonnd{equation*} Thus $P^{\left(m\right)}\left(z\right)$ is Fr\'echet differentiable and $P^{\left(m+1\right)}\left(z \right)=\mathcal{B}$ which satisfies (\ref{3.3.9}).\\ ($e_2$)\quad Since $$ \| V^{\left(m+1\right)}_z\left(z_1, \cdot\right)-V^{\left(m+1\right)}_z\left(z_2,\cdot\right)\|_{\mathfrak{L}_{m+1}\left({\Bbb R}^n, \mathcal{H}^s\right)}\rightarrow0\ {\rm as}\ z_1\rightarrow z_2$$ by $V\left(z, \cdot\right)\in{\mathcal{C}^{\beta}}\left({\Bbb R}^n, \mathcal{H}^s\left({\Bbb R}^d\right)\right)$. From a similar discussion in (d) we have \begin{eqnarray}\langlebel{3.3.13} \nonumber && \|P^{\left(m+1\right)}\left(z_1\right)-P^{\left(m+1\right)}\left(z_2\right)\|_{\mathfrak{L}_{m+1}\left({\Bbb R}^n, \mathcal{M}_{s,\alpha}\right)}\\ &\leq& C\left(d,s\right)\| V^{\left(m+1\right)}_z\left(z_1, \cdot\right)-V^{\left(m+1\right)}_z\left(z_2, \cdot\right)\|_{\mathfrak{L}_{m+1}\left({\Bbb R}^n, \mathcal{H}^s\right)}, \epsilonnd{eqnarray} which means that $P\left(z\right)\in \mathcal{C}^{m+1}\left({\Bbb R}^n, \mathcal{M}_{s,\alpha}\right)$ and \begin{equation*}\langlebel{} \|P^{\left(m+1\right)}\left(z\right)\|_{\mathfrak{L}_{m+1}\left({\Bbb R}^n, \mathcal{M}_{s,\alpha}\right)}\leq C\left(d,s\right)\|V\left(z,\cdot\right)\|_{{\mathcal{C}^{\beta}}\left({\Bbb R}^n, \mathcal{H}^s\right)}. \epsilonnd{equation*} (f)\quad Finally, consider $P\left(z\right)\in \mathcal{C}^{\beta}\left({\Bbb R}^n, \mathcal{M}_{s,\alpha}\right)$. Denote $b:=\beta-[\beta]$, we show that for any $1\leq l\leq[\beta]$ integer and $0<|z_1-z_2|<2\pi,$ \begin{equation}\langlebel{3.3.14} \frac{\|P^{\left(l\right)}\left(z_1\right)-P^{\left(l\right)}\left(z_2\right)\|_{\mathfrak{L}_{l}\left({\Bbb R}^n, \mathcal{M}_{s,\alpha}\right)}}{{|z_1-z_2|^{b}}}\leq \|V\left(z,\cdot\right)\|_{{\mathcal{C}^{\beta}}\left({\Bbb R}^n, \mathcal{H}^s\right)}. \epsilonnd{equation} In fact, note that $V\left(z, \cdot\right)\in{\mathcal{C}^{\beta}}\left({\Bbb R}^n, \mathcal{H}^s\left({\Bbb R}^d\right)\right),$ from the definition we obtain that for $0<|z_1-z_2|<2\pi,$ $$\frac{\|V_z^{\left(l\right)}\left(z_1, \cdot\right)-V_z^{\left(l\right)}\left(z_2, \cdot\right)\|_{\mathfrak{L}_{l}\left({\Bbb R}^n, \mathcal{M}_{s,\alpha}\right)}}{{|z_1-z_2|^{b}}}\leq \|V\left(z,\cdot\right)\|_{{\mathcal{C}^{\beta}}\left({\Bbb R}^n, \mathcal{H}^s\right)}.$$ Thus (\ref{3.3.14}) holds by (\ref{3.3.13}) for any $1\leq l\leq[\beta]$ which means that $P\left(z\right)\in \mathcal{C}^{\beta}\left({\Bbb R}^n, \mathcal{M}_{s,\alpha}\right)$ and $$\|P\left(\theta\right)\|_{{\mathcal{C}^{\beta}}\left({\Bbb T}^n, \mathcal{M}_{s,\alpha}\right)}\leq C\left(d,s\right)\|V\left(z,\cdot\right)\|_{{\mathcal{C}^{\beta}}\left({\Bbb R}^n, \mathcal{H}^s\right)}. $$ \epsilonnd{proof} \subsection{Proof of Theorem \ref{quantumth}} Following the discussions stated in Subsection \ref{s1.1}, the Schr\"odinger equation (\ref{HOeq}) is equivalent to Hamiltonian system with (\ref{HOfun}). Expanding it on the Hermite basis $(\Phi_a)_{a\in\mathcal{E}}$, it is equivalent to the system governed by (\ref{hameq000}) which reads as (\ref{hs00}), a special case of system (\ref{hameq00}) with $\langlembda_a=w_a$ and $P(\omega t)$ satisfying (\ref{Pijform}). By Lemmas given above, if $V$ is $(s,\beta)-$admissable, we can apply Theorem \ref{MainTheorem} to (\ref{hs00}) with $\gamma_1= n+d+2,\ \gamma_2=\frac{\alpha }{4+d+2\alpha }$ and $\alpha$ given by (\ref{alpha}), this leads to Theorem \ref{quantumth}.\\ \indent More precisely, in the new coordinates given by Theorem \ref{MainTheorem}, $(\xi,\epsilonta)=(\overline{ {M}}_\omega\xi_+, {M}_\omega\epsilonta_+)$, system (\ref{hs00}) becomes autonomous and decomposes into blocks as follows: \begin{eqnarray*} \left\{\begin{array}{c} \displaystyleot{\xi}_{+,[a]}=-\mathrm{i} (\overline{{N}}_{\infty})_{[a]}\xi_{+,{[a]}},\\ \displaystyleot{\epsilonta}_{+,[a]}= \ \ \mathrm{i}(N_{\infty})_{[a]}\epsilonta_{+,{[a]}}, \epsilonnd{array}\right.\ \ \ a\in \mathcal{E},\langlebel{inftyeq} \epsilonnd{eqnarray*} where $N_\infty=N_\infty(\omega)\in\mathcal{NF}$. Hence the solution start from $(\xi_+(0),\epsilonta_+(0))$ is given by $$(\xi_+(t),\epsilonta_+(t))=(e^{-\mathrm{i}{t\overline{N}}_\infty }\xi_+(0),e^{ \mathrm{i}{t {N}}_\infty }\epsilonta_+(0)),\ t\in{\Bbb R}.$$ Then the solution $u(t,x)$ of (\ref{HOeq}) corresponding to the initial data $u_0(x)=\sum_{a\in\mathcal{E}}\xi_a(0)\Phi_a(x)\in \mathcal{H}^{s'}$ with $1\leq s'\leq \max\{s,1\}$ is formulated by $u(t,x)=\sum_{a\in\mathcal{E}}\xi_a(t)\Phi_a(x)$ with \begin{eqnarray*}\langlebel{xit} \xi(t)=\overline{M}_\omega(\omega t)e^{-\mathrm{i}{t\overline{N}}_\infty }M^{T}_\omega(0)\xi(0), \epsilonnd{eqnarray*} where we use the fact $(\overline{ {M}}_\omega)^{-1}=M^{T}_\omega.$\\ \indent In other words, let us define the transformation $\Psi_\omega(\theta)\in\mathfrak{L}(\mathcal{H}^{s'}),\ 0\leq s'\leq s,$ by $$\Psi_\omega(\theta)(\sum_{a\in\mathcal{E}}\xi_a\Phi_a(x)):=\sum_{a\in\mathcal{E}}(M^T_\omega(\theta)\xi)_a\Phi_a(x)=\sum_{a\in\mathcal{E}}\xi_{+,a}\Phi_a(x).$$ From a straightforward computation(the proof is given in the Appendix), we have \begin{Lemma}\langlebel{psismooth} For $0\leq s'\leq s$ and $\alpha>0$ given by (\ref{alpha}), $$\|\Psi_\omega(\theta)-id\|_{\mathcal{C}^{\mu}({\Bbb T}^n, \mathfrak{L}(\mathcal{H}^{s'},\mathcal{H}^{s'+2\alpha}))}\leq C \varepsilon^{\frac{3}{2\beta}(\frac{2}{9}\beta-\mu)},$$ where $\mu$ is defined in Theorem \ref{MainTheorem}. \epsilonnd{Lemma} \indent Moreover, $u(t,x)$ satisfies (\ref{HOeq}) if and only if $v(t,x)=\Psi_\omega(\omega t)u(t,x)$ satisfies the autonomous equation: $$i\partial_t v+(-\partial_{xx}+|x|^2)v+\varepsilon Wv=0,$$ where $W(\sum_{a\in\mathcal{E}}\xi_{a}\Phi_a(x))=\frac{1}{\varepsilon}\sum_{a\in\mathcal{E}}((N_\infty-N_0)^T\xi)_a\Phi_a(x)$. Denote by $(W_a^b)_{a,b\in\mathcal{E}}$ the infinite matrix of the operator $W$ written in the Hermite basis($W_a^b=\int_{{\Bbb R}^d} W\Phi_a\Phi_bdx$), then $W $ is block diagonal. Denote $\langle V \rangle(x):=\frac{1}{(2\pi)^n}\int_{{\Bbb T}^n}V(\theta,x)d\theta$ the mean value of $V$ on the torus with $(\langle V \rangle_a^b)_{a,b\in\mathcal{E}}$ the corresponding infinite matrix where $$\langle V \rangle_a^b= \int_{{\Bbb R}^d} \langle V \rangle \Phi_a\Phi_b dx =\frac{1}{(2\pi)^n}\int_{{\Bbb T}^n}P_a^b(\theta)d\theta=(P_a^b)^0$$ with $P_a^b(\theta)=\sum_{k\in{\Bbb Z}^n}(P_a^b)^k e^{ik\theta}.$ From (\ref{ntilde}) with $\nu=0$, $ (\widetilde{N}_0)_{[a]} =\frac{\varepsilon}{(2\pi)^n}\int_{{\Bbb T}^n} P^{(1)} _{[a]}(\theta)d\theta.$ It holds that \begin{eqnarray*}\langlebel{} \|(\widetilde{N}_0)_{[a]}-\varepsilon P_{[a]}^0\|&=& \frac{\varepsilon}{(2\pi)^n}\left\|\int_{{\Bbb T}^n}(P^{(1)}(\theta)-P(\theta))_{[a]}d\theta\right\|\\ &\leq& c\varepsilon w_a^{-2\alpha}[P^{(1)}(\theta)-P(\theta)]^{{\Bbb T}^n,D_\varepsilon}_{s,\alpha}\\ &\leq& c\varepsilon w_a^{-2\alpha}{\sigma_1}^{\beta}= cw_a^{-2\alpha}\varepsilon \epsilonpsilon_1 \epsilonnd{eqnarray*} by Lemma \ref{L4.7}. On the other hand, from Lemma \ref{convergence01}, $[N_\infty-N_1]^{{\Bbb T}^n,D_\varepsilon}_{s,\alpha}\leq 2\epsilonpsilon_1.$ Thus, $$w_a^{ 2\alpha}\|(W-\langle V \rangle)_{[a]}\|\leq \frac{w_{a}^{2\alpha}}{\varepsilon}\|(N_\infty-N_0 -\widetilde{N}_{0 })_{[a]}\|+\frac{w_{a}^{2\alpha}}{\varepsilon}\|(\widetilde{N}_{0 }-\varepsilon P ^0)_{[a]}\| \leq c \varepsilon^{\frac12}. $$ Therefore, $$\|(W_{a}^b)_{a,b\in\mathcal{E}}-\Pi(\langle V \rangle_{a}^b)_{a,b\in\mathcal{E}}\|_{\mathfrak{L}(\epsilonll_{s'}^2)}\leq c\varepsilon^{\frac12}$$ for $0\leq s'\leq s$, where $\Pi$ is the projection on the diagonal blocks.\\ \indent The proofs of Corollary \ref{coro01} and \ref{coro02} are similar as \cite{GP}, we omit it for simplicity.\\ \section{Proof of Theorem \ref{MainTheorem}}\langlebel{s4} \indent In this section we will use a universal constant $C$ to simplify the proof, which depends on $n,\beta,d,s$ and is changing in the context. \\ \indent The system (\ref{hameq00}) is equivalent to the autonomous system: \begin{eqnarray} \left\{\begin{array}{c} \displaystyleot{\xi}_a=-\mathrm{i}\langlembda_a\xi_a-\mathrm{i}\varepsilon (P^T(\theta)\xi)_a,\\ \displaystyleot{\epsilonta}_a=\ \ \mathrm{i}\langlembda_a\epsilonta_a+\mathrm{i}\varepsilon (P(\theta)\epsilonta)_a,\ \ \\ \displaystyleot{y}= - \varepsilon\langle\xi, \nabla_{\theta} P(\theta)\epsilonta\rangle, \\ \displaystyleot{\theta}=\ \omega,\qquad\qquad\qquad\qquad\ \epsilonnd{array} a\in\mathcal{E}\right.\langlebel{autohs} \epsilonnd{eqnarray} with the Hamiltonian \begin{equation}\langlebel{autoH} \mathcal{H}(\theta,y,\xi,\epsilonta,\omega)= \sum\limits_{j=1}^n\omega_jy_j+\sum\limits_{a\in\mathcal{E}} \langlembda_a\xi_a\epsilonta_a+ \varepsilon\langle\xi, P(\theta)\epsilonta\rangle \epsilonnd{equation} in the extended phase space $\mathcal P_s:={\Bbb T}^n\times {\Bbb R}^{n}\times Y_s$, and $\langlembda_{a} $ satisfies Hypothesis $\mathrm{A1}-\mathrm{A2}$. \subsection{Analytic approximation to a ${\mathcal{C}^{\beta}}$ smooth function} \indent The ${\mathcal{C}^{\beta}}$ smooth Hamiltonian function (\ref{autoH}) can be approximated by a series of \epsilonmph{analytic} Hamiltonians \begin{equation*}\langlebel{autoHnu} H^{(\nu)}(\theta,y,\xi,\epsilonta,\omega)= \sum\limits_{j=1}^n\omega_jy_j+\sum\limits_{a\in\mathcal{E}} \langlembda_a\xi_a\epsilonta_a+ \varepsilon\langle\xi, P^{(\nu)}(\theta)\epsilonta\rangle,\ \nu=1,2,\cdots, \epsilonnd{equation*} where $P^{(\nu)}(\theta)$ will be given in the following. In order to extend the $\mathcal{C}^{\beta}$ function to a complex neighborhood of ${\Bbb T}^n$, we need the famous results. \\ \begin{Lemma}(Jackson, Moser and Zehnder)\langlebel{smoothing} Let $X$ be a complex Banach space and $f\in {\mathcal{C}^{\beta}}({\Bbb R}^n, X)$ for some $\beta>0$ with finite ${\mathcal{C}^{\beta}}({\Bbb R}^n, X)$ norm. Let $\phi$ be a radical - symmetric, $\mathcal{C}^{\infty}$ function, having as support the closure of the unit ball centered at the origin, where $\phi$ is completely flat and take value $1$, let $K=\hat{\phi}$ be its Fourier transform. For all $\sigma>0$ define $$S_{\sigma}f(z,\cdot)=K_{\sigma}\ast f=\frac{1}{\sigma^n}\int_{{\Bbb R}^n}K(\frac{z-y}{\sigma})f(y, \cdot)dy.$$ Then there exists a constant $C>0$ depending on $\beta, n$ and $X$ such that the following holds: for any $\sigma>0$, the function $f_{\sigma}(z)$ is a real analytic function from ${\Bbb C}^n$ to $X$ such that if $$\Delta_{\sigma}^{n} : = \left\{z\in {\Bbb C}^n\left| \right. |{\Bbb I}m z_j|\leq \sigma, 1\leq j\leq n\right\}, $$ then for any $\nu\in {\Bbb N}^n$ such that $|\nu|\leq \beta$ one has \begin{eqnarray*}\langlebel{smoothing1.1} \sup\limits_{z\in \Delta_{\sigma}^n}\left\|\partial^{\nu}f_{\sigma}(z)-\sum\limits_{|k|\leq \beta-|\nu|}\frac{\partial^{k+\nu}f({\Bbb R}e z)}{k!}({\rm i}{\Bbb I}m z)^{k}\right\|_{X_{\nu}}\leq C\|f\|_{{\mathcal{C}^{\beta}}({\Bbb R}^n, X)}\sigma^{\beta-|\nu|}, \epsilonnd{eqnarray*} and for all $0\leq \sigma_1\leq \sigma$, $$\sup\limits_{z\in \Delta_{\sigma_1}^n}\|\partial^{\nu}f_{\sigma}(z)-\partial^{\nu}f_{\sigma_1}(z)\|_{X_{\nu}}\leq C\|f\|_{{\mathcal{C}^{\beta}}({\Bbb R}^n, X)}\sigma^{\beta-|\nu|}.$$ The function $f_{\sigma}$ preserves periodicity(i.e. if $f$ is $T-$periodic in any of its variables $z_j$, so is $f_{\sigma}$). \epsilonnd{Lemma} The same theorem was also used in \cite{Bam1}, \cite{YZ13}, etc. \\ \indent The converse statement of Lemma \ref{smoothing} holds only if $\mu$ is not an integer. A classical version of this converse result is due to Bernstein and relates the differentiability properties of a periodic function to quantitative estimates for an approximating sequence of trigonometric polynomials. In the following lemma, we suppose $X$ be a complex Banach space as above. \begin{Lemma}\langlebel{smoothinginverse}Let $\epsilonll\geq0$ and $n$ be a positive integer. Then there exists a constant $c=c(\epsilonll,n)$ such that if $f:{\Bbb R}^n\rightarrow X$ is the limit of a sequence of real analytic maps $f_\nu(x)$ in the strips $|{\Bbb I}m x|\leq\sigma_\nu:=\sigma^{(\frac{3}{2})^\nu}$ with $0<\sigma\leq\frac{1}{4}$ and \begin{eqnarray*} f_0=0,\quad|f_\nu-f_{\nu-1}|_{X}\leq c\sigma_\nu^\epsilonll \epsilonnd{eqnarray*} for $|{\Bbb I}m x|\leq\sigma_\nu$, $\nu=1,2,\cdots,$ then $f\in \mathcal{C}^\mu({\Bbb R}^n,X)$ for every $\mu\leq\epsilonll$ which is not an integer and \begin{eqnarray*} |f|_{\mathcal{C}^{\mu}({\Bbb R}^n, X)}\leq \frac{4c(\epsilonll,n)}{\iota(1-\iota)}\sigma^{\epsilonll-\mu},\ 0<\iota:=\mu-[\mu]<1. \epsilonnd{eqnarray*} \epsilonnd{Lemma} For the proof see section \ref{appendix}.\\ \indent For our applications we choose $f(z) = P(z)$, $X=(\mathcal{M}_{s,\alpha},|\cdot |_{s,\alpha})$. From Lemma \ref{smoothing} we denote \begin{equation}\langlebel{moguangpz} S_{\sigma}P(z) = \sigma^{-n}\int_{{\Bbb R}^n}K(\frac{z-y}{\sigma})P(y)dy,\ z\in \Delta_{\sigma}^{n}. \epsilonnd{equation} From Lemma \ref{smoothing} again we have that for any $\sigma>0$, $S_{\sigma}P(z)$ is a real analytic function from ${\Bbb C}^n$ to $\mathcal{M}_{s,\alpha}$ such that for any $k\in {\Bbb N}^n$ satisfying $|k|\leq \beta$, one has \begin{eqnarray}\langlebel{smoothing1.1} \sup\limits_{z\in \Delta_{\sigma}^n}\|\partial^{k}S_{\sigma}P(z)-\sum\limits_{|m|\leq \beta-|k|}\frac{\partial^{m+k}P({\Bbb R}e z)}{m!}({\rm i}{\Bbb I}m z)^{m}\|_{X_{k}}\leq C\|P(\theta)\|_{{\mathcal{C}^{\beta}}({\Bbb T}^n, \mathcal{M}_{s,\alpha})}\sigma^{\beta-|k|}, \epsilonnd{eqnarray} and for all $0\leq \sigma'\leq \sigma $, \begin{eqnarray}\langlebel{smoothing1.2} \sup\limits_{z\in \Delta_{\sigma'}^n}\|\partial^{k}S_{\sigma }P(z)-\partial^{k}S_{\sigma'}P(z)\|_{X_{k}}\leq C\|P(\theta)\|_{{\mathcal{C}^{\beta}}({\Bbb T}^n, \mathcal{M}_{s,\alpha})}\sigma ^{\beta-|k|}. \epsilonnd{eqnarray} \begin{Remark} Since $S_{\sigma}P(z)$ preserves periodicity, we often write $S_{\sigma}P(\theta)$ instead of $S_{\sigma}P(z)$. Recall $P(\theta)$ is Hermitian and, from \cite{Sal04}, $K({\Bbb R}^n)\in {\Bbb R}$, then $S_{\sigma}P(\theta)$ is also Hermitian when $\theta\in {\Bbb T}^n$ by (\ref{moguangpz}). \epsilonnd{Remark} Suppose $0<\cdots<\sigma_{\nu}<\cdots<\sigma_{1}<\sigma_{0}.$ Then we can construct a series of analytic functions $\{S_{\sigma_\nu}P(\theta),\ \theta\in {\Bbb T}_{\sigma_\nu}^{n}\}_{\nu\in{\Bbb N}}$. From (\ref{smoothing1.2}) we have \begin{Lemma}\langlebel{PP001} For $ |{\Bbb I}m \theta|\leq\sigma_{\nu }$, $\nu=1,2,\cdots,$ \begin{eqnarray*} |S_{\sigma_{\nu }}P (\theta)-S_{\sigma_{\nu-1}}P(\theta)|_{s,\alpha}\leq C \sigma_{\nu-1}^{\beta}\|P(\theta)\|_{\mathcal{C}^{\beta}({\Bbb T}^n, \mathcal{M}_{s,\alpha})}\leq C \sigma_{\nu-1}^{\beta} . \epsilonnd{eqnarray*} \epsilonnd{Lemma} \begin{Lemma}\langlebel{PP002} For $ |{\Bbb I}m \theta|\leq\sigma_{0}\leq 1$, \begin{eqnarray}\langlebel{guanghua2-1} |S_{\sigma_{0}}P (\theta ) |_{s,\alpha}\leq C. \epsilonnd{eqnarray} \epsilonnd{Lemma} \begin{proof} From (\ref{smoothing1.1}), for $|{\Bbb I}m \theta|\leq\sigma_0\leq1$ we have $ |S_{\sigma_0}P(\theta)-\sum\limits_{|k|\leq \beta}\frac{\partial^{k}P({\Bbb R}e \theta)}{k!}({\rm i}{\Bbb I}m \theta)^{k}|_{s,\alpha}\leq C. $ Thus, $ |\sum\limits_{|k|\leq \beta}\frac{\partial^{k}P({\Bbb R}e \theta)}{k!}({\rm i}{\Bbb I}m \theta)^{k}|_{s,\alpha}\leq \sum_{|k|\leq \beta}|\partial^{k} P({\Bbb R}e \theta)|_{s,\alpha}|\sigma_0^{|k|}\leq C. $ Then we obtain (\ref{guanghua2-1}). \epsilonnd{proof} \begin{Lemma}\langlebel{L4.7} For $\theta\in {\Bbb T}^n$, $|S_{\sigma_{\nu}}P(\theta)-P(\theta)|_{s,\alpha}\leq C\sigma_{\nu}^{\beta}$,\ $\nu=0,1,2,\cdots.$ \epsilonnd{Lemma} \begin{proof} From Lemma \ref{smoothing}, we have for $|{\Bbb I}m z|\leq \sigma_{\nu}$, \begin{eqnarray*} |S_{\sigma_{\nu}}P(z)-\sum\limits_{|k|\leq \beta}\frac{\partial^{k}P({\Bbb R}e z)}{k!}(i {\Bbb I}m z)^{k}|_{s,\alpha}\leq C\|P(\theta)\|_{{\mathcal{C}^{\beta}}({\Bbb T}^n, \mathcal{M}_{s,\alpha})}\sigma_{\nu}^{\beta}\leq C\sigma_{\nu}^{\beta}. \epsilonnd{eqnarray*} On the other hand, if $z\in {\Bbb R}^d$, $\sum\limits_{0<|k|\leq \beta}\frac{\partial^{k}P({\Bbb R}e z)}{k!}(i {\Bbb I}m z)^{k}=0$. Thus for $\theta\in {\Bbb T}^d$ it follows $|S_{\sigma_{\nu}}P(\theta)-P(\theta)|_{s,\alpha}\leq C\sigma_{\nu}^{\beta}$. \epsilonnd{proof} In the following we will write $P^{(\nu)}(\theta) : = S_{\sigma_{\nu}}P(\theta)$ for simplicity. Combining with all the above lemmas, we have \begin{Lemma}\langlebel{PP} \begin{eqnarray} \nonumber [P(\theta)]_{s,\alpha}^{D_0}&\leq& C, \qquad \theta\in{\Bbb T}^n;\\ \nonumber [P^{(0)}(\theta)]_{s,\alpha}^{D_0,\sigma_{0}}&\leq& C;\\ \langlebel{guanghua4} [P^{(\nu)} (\theta)-P(\theta)]_{s,\alpha}^{D_0}&\leq& C\sigma_\nu^{\beta}, \quad \theta\in {\Bbb T}^n;\\ \nonumber [P^{(\nu+1)} (\theta)-P^{(\nu)} (\theta)]_{s,\alpha}^{D_0,\sigma_{\nu+1}}&\leq& C\sigma_\nu^{\beta}. \epsilonnd{eqnarray} \epsilonnd{Lemma} \begin{Lemma}\langlebel{l4.1} For $\theta\in {\Bbb T}^n$, \begin{eqnarray}\langlebel{41two} P(\theta)=P^{(0)}(\theta)+\sum_{\nu=0}^\infty(P ^{(\nu+1)}(\theta)-P ^{(\nu)}(\theta)), \epsilonnd{eqnarray} under the assumption \begin{eqnarray}\langlebel{conditionsection4} {\rm B_1}.\qquad \sum\limits_{\nu=0}^{\infty}\sigma_{\nu}^{\beta}<\infty. \epsilonnd{eqnarray} \epsilonnd{Lemma} \begin{proof} From Lemma \ref{PP} and (\ref{conditionsection4}), \begin{eqnarray*} |P^{(0)}(\theta)+\sum_{\nu=0}^\infty(P ^{(\nu+1)}(\theta)-P ^{(\nu)}(\theta))|_{s,\alpha}\leq |P^{(0)}(\theta)|_{s,\alpha}+\sum_{\nu=0}^\infty|P ^{(\nu+1)}(\theta)-P ^{(\nu)}(\theta)|_{s,\alpha}\leq C \epsilonnd{eqnarray*} for any $\theta\in {\Bbb T}^n$. Thus for $\theta\in {\Bbb T}^n$, $P^{(0)}(\theta)+\sum_{\nu=0}^\infty(P ^{(\nu+1)}(\theta)-P ^{(\nu)}(\theta))$ is an element in $(\mathcal{M}_{s,\alpha}, |\cdot |_{s,\alpha})$. From Lemma \ref{PP}, $P(\theta)$ is also an element in $(\mathcal{M}_{s,\alpha}, |\cdot |_{s,\alpha})$ for $\theta\in {\Bbb T}^n$. The equality (\ref{41two}) follows from (\ref{guanghua4}) and (\ref{conditionsection4}). \epsilonnd{proof} \subsection{Homological equation} The proof of Theorem \ref{MainTheorem} is followed by an iterative KAM procedure where in each step we will consider a homological equation of the form \begin{eqnarray*}\langlebel{homoeq} \omega\cdot\nabla_\theta F(\theta,\omega)-\mathrm{i}[N(\omega),F(\theta,\omega)]+Q(\theta,\omega)=\mathrm{remainder} \epsilonnd{eqnarray*} with $N(\omega), \omega\in D\subset D_0,$ in normal form close to $N_0=diag(\langlembda_a)_{a\in\mathcal{E}}$ and $Q\in \mathcal{M}_{s,\alpha}(D,\sigma).$ We can construct a solution $F\in \mathcal{M}_{s,\alpha}^+(D',\sigma')$, $0<\sigma'<\sigma$, $D' \subset D$ as in \cite{GP}. \begin{Proposition}[\cite{GP}, Proposition 4.1]\langlebel{pro4.1} Let $(\theta,\omega)\in {\Bbb T}^n_\sigma\times D$ with $0<\sigma\leq1,\ D\subset D_0$. Suppose $D\ni\omega\mapsto N(\omega)\in \mathcal{NF}$ be a $\mathcal{C}^1$ mapping that satisfies $[N-N_0]_{s,\alpha}^{D}\leq \frac{c_0}{4}$, $Q\in \mathcal{M}_{s,\alpha}(D,\sigma)$ Hermitian, $0<\kappa<\frac{c_0}{2}$ and $K\geq1$. Then for any $0<\sigma'<\sigma$ there exists a subset $D'=D'(\kappa,K)\subset D$, satisfying ${\rm Meas}(D\setminus D')\leq c(n,c_0,\alpha,\alpha_2)K^{\gamma_1}\kappa^{\gamma_2}$ with $\gamma_1=\max\{d+n+2,\alpha_1\},\ \gamma_2=\frac{\alpha\alpha_2}{4+d+2\alpha\alpha_2}$, and there exist $\widetilde{N}\in \mathcal{M}_{s,\alpha}(D')\cap\mathcal{NF},$ $F\in \mathcal{M}_{s,\alpha}^+{(D',\sigma')}$ and $R\in \mathcal{M}_{s,\alpha}{(D',\sigma')}$, $\mathcal{C}^1$ in $\omega$ and analytic in $\theta$, such that \begin{eqnarray*}\langlebel{homoeq01} \omega\cdot\nabla_\theta F(\theta,\omega)-\mathrm{i}[N(\omega),F(\theta,\omega)]=\widetilde{N}(\omega)-Q(\theta,\omega)+R(\theta,\omega) \epsilonnd{eqnarray*} for all $(\theta,\omega)\in {\Bbb T}^n_{\sigma'}\times D'$ and \begin{eqnarray*} \langlebel{homoF} [F]_{s,\alpha+}^{D',\sigma'}&\leq& \frac{c(n,d,s,\alpha)K^{1+d}}{\kappa^{2+d/\alpha}(\sigma -\sigma')^{n}} [Q]_{s,\alpha}^{D,\sigma};\\ \langlebel{homoN} [\widetilde{N}]_{s,\alpha}^{D'}&\leq& [Q]_{s,\alpha}^{D,\sigma};\\ \langlebel{homoR} [R]_{s,\alpha}^{D',\sigma'}&\leq& \frac{c(n,d,s,\alpha)K^{1+d/2}e^{-(\sigma-\sigma')K/2}}{\kappa^{1+d/(2\alpha)}(\sigma-\sigma')^n} [Q]_{s,\alpha}^{D,\sigma}. \epsilonnd{eqnarray*} Moreover, $\widetilde{N},$ $F(\theta) $ and $R(\theta) $ are Hermitian when $\theta\in{\Bbb T}^n$. \epsilonnd{Proposition} \subsection{The KAM Step.}\langlebel{KAMstep} As the KAM proof in \cite{CQ,YZ13}, we begin with the initial Hamiltonian $H^{(0)}=h+ q_0$ with $h=\sum\limits_{j=1}^n\omega_jy_j+\sum\limits_{a\in\mathcal{E}}\langlembda_a\xi_a\epsilonta_a:=\langle\omega,y\rangle+\langle\xi, N_0\epsilonta\rangle$, and $q_0= \langle \xi, Q_0 (\theta)\epsilonta\rangle$ with $Q_0=\varepsilon P^{(0)}\in \mathcal{M}_{s,\alpha} (D_0,\sigma_0) $. For simplicity we set $\sigma_0=1$, by Lemma \ref{PP}, \begin{eqnarray*}\langlebel{} [Q_0]_{s,\alpha}^{D_0,\sigma_{0}}=[\varepsilon P^{(0)}(\theta)]_{s,\alpha}^{D_0,\sigma_{0}}\leq C\varepsilon : = \frac12 \epsilonpsilon_0. \epsilonnd{eqnarray*} \indent In the $\nu$th step of the KAM scheme, we consider the Hamiltonian $H^{(\nu)}=h+ q_\nu$ defined on ${\Bbb T}^n_{\sigma_\nu}\times{\Bbb R}^n\times Y_s\times D_{0}$, where $q_\nu=\langle \xi,Q_\nu (\theta)\epsilonta\rangle$ with $Q_\nu=\varepsilon P^{(\nu)}$ analytic in ${\Bbb T}^n_{\sigma_\nu}$.\\ \indent We set $\Phi^{\nu}=\Phi^{\nu-1}\circ \Phi_{\nu}:\ {\Bbb T}^n_{\sigma_{\nu+1}}\times{\Bbb R}^n\times Y_s\rightarrow {\Bbb T}^n_{\sigma_{0}}\times{\Bbb R}^n\times Y_{s}$ for $\omega\in D_\nu$ with $\Phi^0=id$ and $\Phi_j(\theta,y,\xi,\epsilonta)=X_{f_j}^1(\theta,y,\xi,\epsilonta)=(\theta, \tilde{y}, e^{-\mathrm{i}F^T_{j}(\theta)}\xi,e^{\mathrm{i}F_{j}(\theta)}\epsilonta)$ which is generated by the time 1 map of Hamiltonian function $f_j=\langle\xi,F_j(\theta)\epsilonta \rangle$, $j=1,\cdots,\nu.$ Under $\Phi^j$, we suppose that $$H^{(j)}\circ \Phi^{j}=(h+q_j)\circ \Phi^{j}=h_{j}+p_{j},\ j=1,\cdots,\nu,$$ where $h_{j}(\theta,y,\xi,\epsilonta,\omega)=\langle\omega,y\rangle+\langle \xi,N_j(\omega)\epsilonta\rangle,$ $ p_j(\theta,y,\xi,\epsilonta,\omega)=\langle \xi,{P}_{j}(\theta,\omega)\epsilonta\rangle$ with $(\theta,y,\xi,\epsilonta,\omega)\in{\Bbb T}^n_{\sigma_{j+1}}\times{\Bbb R}^n\times Y_s\times D_{j}$, and for $j=1,\cdots,\nu,$ the following estimates hold \begin{eqnarray}\langlebel{homoP01} [P_j]_{s,\alpha}^{D_{j},\sigma_{j+1}}&\leq& \frac{1}{2}\epsilonpsilon_{j};\\ \langlebel{homoF01} [F_j]_{s,\alpha+}^{D_{j},\sigma_{j+1}}&\leq& c(n,d,s,\alpha) \epsilonpsilon_{j-1}^{\frac{13}{24}} ;\\ \langlebel{homoN01} [{N}_j-N_{j-1}]_{s,\alpha}^{D_{j-1},\sigma_j}&\leq& \epsilonpsilon_{j-1};\\ \|\Phi_{j}-id\|^*_{\mathfrak{L}(Y_{s'},Y_{s'+2\alpha})}&\leq& c(n,d,s,\alpha) \epsilonpsilon_{j-1}^{\frac{13}{24}},\ 0\leq s'\leq s;\\ {\rm Meas}(D_{j-1}\setminus D_j)&\leq& cK_{j-1}^{\gamma_1}\kappa_{j-1}^{\gamma_2}. \epsilonnd{eqnarray} In the $(\nu+1)$th step we consider $$H^{(\nu+1)}(\theta,y,\xi,\epsilonta,\omega)=h+q_{\nu+1}=H^{(\nu )}+(q_{\nu+1}-q_{\nu })$$ with $ (\theta,y,\xi,\epsilonta,\omega)\in {\Bbb T}^n_{\sigma_{\nu+1}}\times{\Bbb R}^n\times Y_s\times D_{\nu}.$ By $\Phi^\nu$ we have \begin{eqnarray}\langlebel{Hnu+1} H^{(\nu+1)}\circ \Phi^{\nu} =H^{(\nu )}\circ \Phi^{\nu}+(q_{\nu+1}-q_{\nu })\circ \Phi^{\nu}:=h_{\nu}+\tilde{p}_{\nu}, \epsilonnd{eqnarray} where $\tilde{p}_{\nu}= p_{\nu}+(q_{\nu+1}-q_{\nu})\circ \Phi^{\nu}:=\langle {\xi,\widetilde{P}}_{\nu}(\theta)\epsilonta\rangle$.\\ \indent We make some assumptions on parameters during the KAM iteration. For any $\nu=0,1,2,\cdots,$ the following assumptions hold:\\ \begin{eqnarray*} &&{\rm B2}.\qquad \sigma_{\nu+1}\leq \frac{1}{2}\sigma_\nu. \langlebel{canshu01} \\ &&{\rm B3}.\qquad \sigma_{\nu}=\epsilonpsilon_{\nu-1}^{t_1}, \kappa_{\nu}=\epsilonpsilon_{\nu}^{t_2},\ K_{\nu-1}^{d+1}\leq\epsilonpsilon_{\nu-1}^{-\frac{1}{8}}\ {\rm with} \ nt_1\leq\frac{1}{6},\ (2+d/\alpha)t_2\leq\frac{1}{6}. \langlebel{canshu02}\\ &&{\rm B4}.\qquad \epsilonpsilon_{\nu+1}=\epsilonpsilon_{\nu}^{\frac{3}{2}}. \langlebel{canshu03}\\ &&{\rm B5}.\qquad \beta\geq\frac{3}{2t_1}.\langlebel{canshu04}\\ &&{\rm B6}.\qquad e^{-\frac{1}{4}K_\nu\sigma_{\nu+1}}\leq\epsilonpsilon_\nu.\langlebel{canshu05} \epsilonnd{eqnarray*} \indent The explicit expressions of these parameters are given in Subsection {\ref{iteration}}. Under these assumptions, we have \begin{Lemma}\langlebel{psigmanuandnuminus1} Under Assumptions {\rm B2 - B5}, if $c(n,\beta,d,s)(1+ c(\alpha,s)\epsilonpsilon_{0}^{1/2})^2\varepsilon\leq\frac12$, then $\widetilde{P}_{\nu}$ is Hermitian when $\theta\in{\Bbb T}^n$ and $[\widetilde{P}_{\nu}]_{s,\alpha}^{D_\nu,\sigma_{\nu+1}}\leq\epsilonpsilon_{\nu}$. \epsilonnd{Lemma} We need the following two preparation lemmas for Lemma \ref{psigmanuandnuminus1}. \\ \indent Denote $B_\nu=e^{\mathrm{i}F_1}\cdots e^{\mathrm{i}F_\nu}$, where $B_\nu$ is defined on $(\theta,\omega)\in {\Bbb T}^n_{\sigma_{\nu+1}}\times D_{\nu}$. Note that $F_\nu, \nu=1,\cdots,$ are Hermitian matrices, thus \begin{equation}\langlebel{inverseM}B^{-1}_\nu(\theta)=\overline{B}^T_\nu(\theta), \ \theta\in{\Bbb T}^n. \epsilonnd{equation} \begin{Lemma}\langlebel{L4.6} If $ 4\epsilonpsilon_{0}^{1/24}\leq 1$, then $B_\nu-Id,\ B^{-1}_\nu-Id \in \mathcal{M}_{s,\alpha}^+{(D_{\nu},\sigma_{\nu+1})}$ and $$[B_\nu-Id]_{s,\alpha+}^{D_\nu,\sigma_{\nu+1}},\ [B^{-1}_\nu-Id]_{s,\alpha+}^{D_\nu,\sigma_{\nu+1}} \leq \epsilonpsilon_{0}^{\frac12}$$ under Assumptions {\rm B2-B4}. Moreover, for $0\leq \nu_1<\nu$, \begin{eqnarray*} [B_{\nu_1}-B_{\nu}]_{s,\alpha+}^{D_\nu,\sigma_{\nu+1}},\ [B^{-1}_{\nu_1}-B^{-1}_{\nu}]_{s,\alpha+}^{D_\nu,\sigma_{\nu+1}}\leq \epsilonpsilon_{\nu_1}^{\frac12}. \epsilonnd{eqnarray*} \epsilonnd{Lemma} \begin{proof} We only give the estimates on $[B_\nu-Id]_{s,\alpha+}^{D_\nu,\sigma_{\nu+1}}$ and $[B_{\nu_1}-B_{\nu}]_{s,\alpha+}^{D_\nu,\sigma_{\nu+1}}$. From Lemma \ref{daishu} and (\ref{homoF01}), we have \begin{eqnarray*} [e^{\mathrm{i}F_j}-Id]_{s,\alpha+}^{D_\nu,\sigma_{\nu+1}}\leq [F_j]_{s,\alpha+}^{D,\sigma}e^{c(\alpha,s )[F_j]_{s,\alpha+}^{D,\sigma}}\leq 2\epsilonpsilon_{j-1}^{\frac{13}{24}},\ j=1,\cdots,\nu, \epsilonnd{eqnarray*} where $e^{c(\alpha,s )\epsilonpsilon_{j-1}^{\frac{13}{24}}}\leq 2$. It follows that \begin{eqnarray*} &&[e^{\mathrm{i}F_1}e^{\mathrm{i}F_2}-Id]_{s,\alpha+}^{D_\nu,\sigma_{\nu+1}}\\ &\leq& [e^{\mathrm{i}F_1}-Id]_{s,\alpha+}^{D_\nu,\sigma_{\nu+1}}+[e^{\mathrm{i}F_2}-Id]_{s,\alpha+}^{D_\nu,\sigma_{\nu+1}}+ [(e^{\mathrm{i}F_1}-Id)(e^{\mathrm{i}F_2}-Id)]_{s,\alpha+}^{D_\nu,\sigma_{\nu+1}}\\ &\leq& 2\epsilonpsilon_{0}^{\frac{13}{24}} +2\epsilonpsilon_{1}^{\frac{13}{24}}+4c(\alpha,s)\epsilonpsilon_{0}^{\frac{13}{24}} \epsilonpsilon_{1}^{\frac{13}{24}} \leq 3\epsilonpsilon_{0}^{\frac{13}{24}} +3\epsilonpsilon_{1}^{\frac{13}{24}}. \epsilonnd{eqnarray*} By induction, we obtain \begin{eqnarray}\langlebel{mapit} [e^{\mathrm{i}F_1}\cdots e^{\mathrm{i}F_{\nu}}-Id]_{s,\alpha+}^{D_\nu,\sigma_{\nu+1}}\leq 3\epsilonpsilon_{0}^{\frac{13}{24}} +3\epsilonpsilon_{1}^{\frac{13}{24}}+\cdots+3\epsilonpsilon_{\nu-1}^{\frac{13}{24}} \leq \epsilonpsilon_{0}^{\frac{1 }{2 }} \epsilonnd{eqnarray} by Assumption B4.\\ \indent Following a similar discussion above, we also have for $0\leq \nu_1<\nu$, \begin{eqnarray}\langlebel{mapit01} [e^{\mathrm{i}F_{\nu_1+1}}\cdots e^{\mathrm{i}F_{\nu}}-Id]_{s,\alpha+}^{D_\nu,\sigma_{\nu+1}}\leq C\epsilonpsilon_{\nu_1}^{\frac{13}{24}}. \epsilonnd{eqnarray} \indent Note that for $0\leq \nu_1<\nu$, $B_{\nu_1}-B_{\nu}=B_{\nu_1}(Id-e^{\mathrm{i}F_{\nu_1+1}}\cdots e^{\mathrm{i}F_{\nu}})$. Thus \begin{eqnarray*} &&[B_{\nu_1}-B_{\nu_2}]_{s,\alpha+}^{D_\nu,\sigma_{\nu+1}}\\ &\leq& [(B_{\nu_1}-Id)(Id-e^{\mathrm{i}F_{\nu_1+1}}\cdots e^{\mathrm{i}F_{\nu_2}})]_{s,\alpha+}^{D_\nu,\sigma_{\nu+1}}+[Id-e^{\mathrm{i}F_{\nu_1+1}}\cdots e^{\mathrm{i}F_{\nu_2}}]_{s,\alpha+}^{D_\nu,\sigma_{\nu+1}}\\ &\leq& c(\alpha,s,n)\epsilonpsilon_0^{\frac12}\epsilonpsilon_{\nu_1}^{\frac{13}{24}}+ \epsilonpsilon_{\nu_1}^{\frac{13}{24}} \ \leq\ \epsilonpsilon_{\nu_1}^{\frac12}. \epsilonnd{eqnarray*} by (\ref{exponorm01}), (\ref{exponorm02}), (\ref{mapit}) and (\ref{mapit01}) . \epsilonnd{proof} \begin{Lemma}\langlebel{L4.5} If $c(n,\beta,d,s)(1+ c(\alpha,s)\epsilonpsilon_{0}^{1/2})^2\varepsilon\leq\frac12$, then $[B^{-1}_\nu(Q_{{\nu+1}}-Q_{{\nu}})B_\nu]_{s,\alpha}^{D_\nu,\sigma_{\nu+1}}\leq\frac{1}{2}\epsilonpsilon_\nu$ under Assumptions {\rm B2-B5}. \begin{proof} Firstly, we consider $[(Q_{{\nu+1}}-Q_{{\nu}})B_\nu]_{s,\alpha}^{D_\nu,\sigma_{\nu+1}}$ where $$[ Q_{{\nu+1}}-Q_{{\nu}} ]_{s,\alpha}^{D_\nu,\sigma_{\nu+1}}=\varepsilon[ P^{(\nu+1)}-P^{(\nu)} ]_{s,\alpha}^{D_\nu,\sigma_{\nu+1}}\leq c(n,\beta,d,s)\varepsilon \sigma_\nu^{\beta}$$ by Lemma \ref{PP}. Then from Lemma \ref{daishu01} and Lemma \ref{L4.6}, it holds that \begin{eqnarray*} &&[(Q_{{\nu+1}}-Q_{{\nu}})B_\nu]_{s,\alpha}^{D_\nu,\sigma_{\nu+1}}\\ &\leq& [(Q_{{\nu+1}}-Q_{{\nu}})(B_\nu-Id)]_{s,\alpha}^{D_\nu,\sigma_{\nu+1}}+[Q_{{\nu+1}}-Q_{{\nu}}]_{s,\alpha}^{D_\nu,\sigma_{\nu+1}}\\ &\leq& c(n,\beta,d,s)(1+ c(\alpha,s)\epsilonpsilon_{0}^{1/2})\varepsilon \sigma_\nu^{\beta}, \epsilonnd{eqnarray*} and \begin{eqnarray*} &&[B^{-1}_\nu(Q_{{\nu+1}}-Q_{{\nu}})B_\nu]_{s,\alpha}^{D_\nu,\sigma_{\nu+1}}\\ &\leq& (1+c(\alpha,s)[B^{-1}_\nu-Id]_{s,\alpha+}^{D_\nu,\sigma_{\nu+1}}) [(Q_{{\nu+1}}-Q_{{\nu}})B_\nu]_{s,\alpha}^{D_\nu,\sigma_{\nu+1}}\\ &\leq& c(n,\beta,d,s)(1+ c(\alpha,s)\epsilonpsilon_{0}^{1/2})^2\varepsilon \sigma_\nu^{\beta} \leq\frac{1}{2}\epsilonpsilon_\nu \epsilonnd{eqnarray*} under Assumptions B2-B5. \epsilonnd{proof} \epsilonnd{Lemma} Note that $\widetilde{P}_{\nu}=P_{\nu}+B^{-1}_\nu(Q_{{\nu+1}}-Q_{{\nu}})B_\nu$, then it is easy to check that $\widetilde{P}_{\nu}$ is Hermitian. Lemma \ref{psigmanuandnuminus1} is obtained immediately from (\ref{homoP01}) and Lemma \ref{L4.5}. $\Box$ Go back to the Hamiltonian (\ref{Hnu+1}), we write $\widetilde{p}_{\nu}=\Gamma\widetilde{p}_{\nu}+r_{\nu}$ and $\widetilde{P}_{\nu}=\Gamma\widetilde{P}_{\nu}+R_{\nu}$ respectively, where \begin{eqnarray*} \Gamma\widetilde{P}_{\nu}(\theta,\omega):= \sum\limits_{|k|\leq K_{\nu}} \widetilde{P}^k_{\nu}(\omega)e^{{{\rm i}k\cdot\theta}},\qquad R_{\nu}(\theta,\omega):= \sum\limits_{|k|> K_{\nu}} \widetilde{P}^k_{\nu}(\omega)e^{{{\rm i}k\cdot\theta}}, \epsilonnd{eqnarray*} and define $ \widetilde{N}_{\nu}(\omega)\in \mathcal{NF} $ satisfying \begin{eqnarray}\langlebel{ntilde} \left(\widetilde{N}_{\nu}(\omega)\right)_{[a]}:= \left(\widetilde{P}^0_{\nu}(\omega)\right)_{[a]}. \epsilonnd{eqnarray} In the following we will use $\Phi_{\nu+1}=X_{f_{\nu+1}}^1$ with $f_{\nu+1}=\langle \xi,F_{\nu+1}(\theta)\epsilonta\rangle$ to put $\Gamma\widetilde{p}_{\nu}$ into normal form. Assume $f_{\nu+1}=\langle \xi,F_{\nu+1}(\theta)\epsilonta\rangle$ satisfying \begin{equation}\langlebel{homoeq03} \Gamma \tilde{p}_{\nu}-\langle\xi,\widetilde{N}_{\nu}\epsilonta\rangle+\{h_{\nu},\ f_{\nu+1}\}=0, \epsilonnd{equation} then under $\Phi_{\nu+1}(\xi,\epsilonta)=X_{f_{\nu+1}}^1(\xi,\epsilonta)=(e^{-\mathrm{i}F^T_{\nu+1}}\xi,e^{\mathrm{i}F_{\nu+1}}\epsilonta)$, we have $ H^{(\nu+1)}\circ \Phi^{\nu}\circ \Phi_{\nu+1} = h_{\nu+1}+p_{\nu+1}, $ where $h_{\nu+1}=\langle\omega,y\rangle+\langle \xi,N_{\nu+1}\epsilonta\rangle\ {\rm with}\ N_{\nu+1}=N_{\nu}+\widetilde{N}_{\nu}$, and $p_{\nu+1}=\langle \xi,{P}_{\nu+1}(\theta,\omega)\epsilonta\rangle$ with \begin{eqnarray}\langlebel{newP} P_{\nu+1}&=&-\mathrm{i}\int_0^1e^{-\mathrm{i}tF_{\nu+1} }[t\Gamma \widetilde{P}_{\nu}+(1-t)\widetilde{N}_\nu,F_{\nu+1}]e^{\mathrm{i}tF_{\nu+1} }dt+ e^{-\mathrm{i}F_{\nu+1} }R_{\nu}e^{\mathrm{i}F_{\nu+1} }. \epsilonnd{eqnarray} Therefore, we obtain $$H^{(\nu+1)}\circ \Phi^{\nu+1}=h_{\nu+1}+p_{\nu+1}=\langle\omega,y\rangle+\langle\xi, N_{\nu+1}(\omega)\epsilonta\rangle+\langle \xi,{P}_{\nu+1}(\theta,\omega)\epsilonta\rangle.$$ In the following we will give the explicit estimates on $F_{\nu+1}$, $N_{\nu+1}$, $P_{\nu+1}$ and $\Phi_{\nu+1}-id$.\\ First of all, (\ref{homoeq03}) is equivalent to \begin{eqnarray}\langlebel{homoequations} \omega\cdot\nabla_\theta F_{\nu+1}(\theta)-i[N_{\nu},F_{\nu+1}(\theta)]+\Gamma\widetilde{P}_{\nu}(\theta)=\widetilde{N}_{\nu}+R_\nu(\theta). \epsilonnd{eqnarray} From Proposition \ref{pro4.1} and Assumption {\rm B4}, we have from (\ref{homoequations}) \begin{eqnarray}\langlebel{homon02} [N_\nu-N_0]_{s,\alpha}^{D_\nu}\leq\sum_{j=1}^\nu[N_j-N_{j-1}]_{s,\alpha}^{D_\nu}\leq\epsilonpsilon_0+\cdots+\epsilonpsilon_{\nu-1}\leq 2\epsilonpsilon_0\leq\frac{c_0}{4}. \epsilonnd{eqnarray} It follows that \begin{eqnarray*}\langlebel{homon02-1} \|\partial_\omega^j(N_\nu-N_0)_{[a]}\|w_a^{2\alpha}\leq 2\epsilonpsilon_0\ j=0,1. \epsilonnd{eqnarray*} From Proposition \ref{pro4.1}, together with (\ref{homon02}), Lemma \ref{psigmanuandnuminus1} and Assumptions {\rm B2-B6}, if $\kappa_\nu\leq\frac{c_0}{2}$ and $K_\nu\geq1$ then there exists a subset $D_{\nu+1}\subset D_\nu$ with ${\rm Meas}(D_\nu\setminus D_{\nu+1})\leq c K_\nu^{\gamma_1}\kappa_\nu^{\gamma_2}$, and there exist $\widetilde{N}_\nu\in \mathcal{M}_{s,\alpha}(D_{\nu+1})\cap\mathcal{NF},$ $F_{\nu+1}\in \mathcal{M}_{s,\alpha}^+{(D_{\nu+1},\sigma_{\nu+2})}$, $R_\nu\in \mathcal{M}_{\alpha}{(D_{\nu+1},\sigma_{\nu+2})}$, $\mathcal{C}^1$ in $\omega$ and analytic in $\theta$, such that (\ref{homoequations}) holds for all $(\theta,\omega)\in {\Bbb T}^n_{\sigma_{\nu+2}}\times D_{\nu+1}$ and \begin{eqnarray} \langlebel{homoFn} [F_{\nu+1}]_{s,\alpha+}^{D_{\nu+1},\sigma_{\nu+2}}&\leq& \frac{c(n,d,s,\alpha)\epsilonpsilon_\nu K_\nu^{d+1}}{\kappa_\nu^{2+d/\alpha}(\sigma_{\nu+1}-\sigma_{\nu+2})^{n}}\leq c(n,d,s,\alpha)\epsilonpsilon_\nu^{\frac{13}{24}} ;\\ \langlebel{homoNn} [\widetilde{N}_\nu]_{s,\alpha}^{D_{\nu+1}}&\leq& [\widetilde{P}_{\nu}]_{s,\alpha}^{D_{\nu},\sigma_{\nu+1}}\leq\epsilonpsilon_\nu.\\ \langlebel{homoRn}[R_\nu]_{s,\alpha}^{D_{\nu+1},\sigma_{\nu+2}}&\leq& \frac{c(n,d,s,\alpha)\epsilonpsilon_\nu K_{\nu}^{1+d/2}e^{-(\sigma_{\nu+1}-\sigma_{\nu+2})K_{\nu}/2}}{\kappa_\nu^{1+d/2\alpha}(\sigma_{\nu+1}-\sigma_{\nu+2})^{n}}\leq \epsilonpsilon_{\nu+1}. \epsilonnd{eqnarray} $\widetilde{N}_\nu,$ $F_{\nu+1}(\theta) $ and $R_\nu(\theta) $ are Hermitian when $\theta\in{\Bbb T}^n$. Moreover, from Lemmas \ref{daishu01}, \ref{daishu} and (\ref{homoFn}), \begin{Lemma}\langlebel{map} If $4c(\alpha,s)\varepsilon^{\frac{1 }{24}}\leq 1$, then the symplectic map $\Phi_{\nu+1}(\theta,\omega)$ defined in ${\Bbb T}^n_{\sigma_{\nu+2}}\times D_{\nu+1}$ satisfies \begin{eqnarray*} \|\Phi_{\nu+1}-id\|^*_{\mathfrak{L}(Y_{s'},Y_{s'+2\alpha})}\leq 4c(\alpha,s)\epsilonpsilon_\nu^{\frac{13}{24}}\leq \epsilonpsilon_\nu^{\frac{1 }{2 }}. \epsilonnd{eqnarray*} for all $0\leq s'\leq s$. \begin{proof} Note that $\Phi_{\nu+1}(\xi,\epsilonta)=X_{f_{\nu+1}}^1(\xi,\epsilonta)=(e^{-\mathrm{i}F^T_{\nu+1}}\xi,e^{\mathrm{i}F_{\nu+1}}\epsilonta)$, where \begin{eqnarray*} \|\partial^k_\omega (e^{\mathrm{i}F_{\nu+1}}-Id)\epsilonta\|_{s'+2\alpha} &\leq& c(\alpha,s)[e^{\mathrm{i}F_{\nu+1}}-Id]_{s,\alpha+}\|\epsilonta\|_{s'}\\ &\leq& c(\alpha,s)[F_{\nu+1}]_{s,\alpha+}^{D_{\nu+1},\sigma_{\nu+2}}e^{c(\alpha,s )[F_{\nu+1}]_{s,\alpha+}^{D_{\nu+1},\sigma_{\nu+2}}} \|\epsilonta\|_{s'}\\ &\leq& 2c(\alpha,s)\epsilonpsilon_\nu^{\frac{13}{24}}\|\epsilonta\|_{s'} \epsilonnd{eqnarray*} for $k=0,1$ and $0\leq s'\leq s$ by (\ref{homoFn}), Lemmas \ref{daishu01} and \ref{daishu}. Similarly, \begin{eqnarray*} \|\partial^k_\omega (e^{-\mathrm{i}F^T_{\nu+1}}-Id)\xi\|_{s'+2\alpha} \leq 2c(\alpha,s)\epsilonpsilon_\nu^{\frac{13}{24}}\|\xi\|_{s'} \epsilonnd{eqnarray*} for $k=0,1$. Therefore, we have $ \|\Phi_{\nu+1}-id\|^*_{\mathfrak{L}(Y_{s'},Y_{s'+2\alpha})} \leq c(\alpha,s,n,d)\epsilonpsilon_\nu^{\frac{13}{24}}. \langlebel{homoPhin} $ \epsilonnd{proof} \epsilonnd{Lemma} \noindent{\it Estimates on the new error term.} Recall $ p_{\nu+1} =\langle \xi,P_{\nu+1}(\theta)\epsilonta\rangle $ with (\ref{newP}), we have \begin{Lemma}\langlebel{pnu+1} Under Assumptions {\rm B2} - {\rm B6}, if $\varepsilon\leq c^{-1}(n,d,s,\alpha)\ll1$ then $P_{\nu+1}(\theta)$ is Hermitian when $\theta\in{\Bbb T}^n$ and $ [P_{\nu+1}]_{s,\alpha}^{D_{\nu+1},\sigma_{\nu+2}}\leq \frac12\epsilonpsilon_{\nu+1}. $ \epsilonnd{Lemma} \begin{proof} It is easy to check that $P_{\nu+1}(\theta)$ is Hermitian from (\ref{newP}). The estimate is divided into two parts.\\ 1) From (\ref{exponorm01}), (\ref{canshu05}), (\ref{homoFn}) and (\ref{homoRn}), \begin{eqnarray*} [e^{-\mathrm{i}F_{\nu+1}}R_{\nu}e^{\mathrm{i}F_{\nu+1}}]_{s,\alpha}^{D_{\nu+1},\sigma_{\nu+2}} &\leq & e^{4c(\alpha)[F_{\nu+1}]_{s,\alpha+}^{D_{\nu+1},\sigma_{\nu+2}}}[R_{\nu}]_{s,\alpha}^{D_{\nu+1},\sigma_{\nu+2}} \\ &\leq & e^{4}[R_{\nu}]_{s,\alpha}^{D_{\nu+1},\sigma_{\nu+2}}\leq e^{4}\epsilonpsilon_\nu^{\frac{1 }{8}}\epsilonpsilon_\nu^{\frac{3 }{2}} \leq\frac{1}{4}\epsilonpsilon_{\nu}^{\frac32}. \epsilonnd{eqnarray*} 2) For $0\leq t\leq1$, note that $\Gamma \widetilde{P}_{\nu}=\widetilde{P}_{\nu}-R_{\nu},$ \begin{eqnarray*} &&[e^{-\mathrm{i}tF_{\nu+1}}[t\Gamma \widetilde{P}_{\nu}+(1-t)\widetilde{N}_\nu,F_{\nu+1}]e^{\mathrm{i}tF_{\nu+1}}]_{s,\alpha}^{D_{\nu+1},\sigma_{\nu+2}}\\ &\leq &e^{4c(\alpha)[F_{\nu+1}]_{s,\alpha+}^{D_{\nu+1},\sigma_{\nu+2}}}[t\Gamma \widetilde{P}_{\nu}+(1-t)\widetilde{N}_\nu,F_{\nu+1}]_{s,\alpha}^{D_{\nu+1},\sigma_{\nu+2}}\\ &\leq &2c(\alpha)e^{4c(\alpha)[F_{\nu+1}]_{s,\alpha+}^{D_{\nu+1},\sigma_{\nu+2}}}[F_{\nu+1}]^{D_{\nu+1},\sigma_{\nu+2}}_{s,\alpha+} ([\Gamma \widetilde{P}_{\nu}]^{D_{\nu+1},\sigma_{\nu+2}}_{s,\alpha}+[\widetilde{N}_\nu]^{D_{\nu+1}}_{s,\alpha})\\ &\leq &4c(\alpha)e^{4}\epsilonpsilon_{\nu}^{\frac{1}{24}}\epsilonpsilon_{\nu}^{\frac32}\leq \frac{1}{4}\epsilonpsilon_\nu^{\frac32}\epsilonnd{eqnarray*} by Lemma \ref{daishu}, (\ref{exponorm01}), (\ref{canshu05}), (\ref{homoFn}), (\ref{homoNn}), (\ref{homoRn}). \epsilonnd{proof} \subsection{Iteration Lemma}\langlebel{iteration} To iterate the KAM steps infinitely often we choose sequences for the pertinent parameters. Set $\epsilonpsilon_0=2\varepsilon c(n,\beta,d,s)$, $\sigma_0=1$ and suppose \begin{eqnarray} \epsilonpsilon_{\nu+1}=\epsilonpsilon_{\nu}^{\frac{3}{2}},\sigma_{\nu+1}=\epsilonpsilon_{\nu}^{\frac{3}{2\beta}}, \kappa_{\nu}=\epsilonpsilon_{\nu}^{\frac{1}{6(2+d/\alpha)}}, K_{\nu}=8|\ln \epsilonpsilon_{\nu}|\epsilonpsilon_{\nu}^{-\frac{3}{2\beta}}\langlebel{canshudingyi} \epsilonnd{eqnarray} for $\nu\geq 0$. From a straightforward computation in Subsection \ref{KAMstep} we have \begin{Lemma}[Iterative Lemma]\langlebel{itelm} Let $0<\displaystyleelta<\frac{\gamma_2}{24}$ and \begin{eqnarray} \beta>\beta_*=\max\{9(2+d/\alpha)\frac{\gamma_1}{\gamma_2-24\displaystyleelta},\ 9n,\ 12(d+1)\}\langlebel{betaxinzhi} \epsilonnd{eqnarray} with $\gamma_1=\max\{d+n+2,\alpha_1\},\ \gamma_2=\frac{\alpha\alpha_2}{4+d+2\alpha\alpha_2}$. If $0<\varepsilon\leq\varepsilon _*(n,d,s,\displaystyleelta)\ll 1$ then all the iteration series $\epsilonpsilon_{\nu}, \sigma_{\nu}, \kappa_{\nu}$ and $K_{\nu}$ satisfy Assumptions $\mathrm{B1} - \mathrm{B6}$, therefore we have the followings:\\ Suppose that in ${\Bbb T}^n_{\sigma_{\nu+1}}\times{\Bbb R}^n\times Y_s\times D_\nu$, $$H^{(\nu)}\circ \Phi^{\nu}(\theta,y,\xi,\epsilonta,\omega)=(h+q_\nu)\circ \Phi^{\nu}=h_{\nu}+p_{\nu},$$ where $\Phi^{\nu}=\Phi_1\circ \Phi_2\circ\cdots \Phi_{\nu}$ and $\Phi_{j}=X_{f_j}^1: Y_{s'}\rightarrow Y_{s'}$ for all $0\leq s'\leq s$, $\omega \in D_{j}$ and $\theta\in {\Bbb T}_{\sigma_{j+1}}$ and satisfies $$\|\Phi_{j}-id\|^*_{\mathfrak{L}(Y_{s'},Y_{s'+2\alpha})}\leq \epsilonpsilon_{j-1}^{\frac{1}{2}}, j=1, \cdots, \nu,$$ and $h_{\nu}=\langle \omega,y\rangle+\langle\xi,N_\nu \epsilonta\rangle$ in normal form and $p_{\nu}=\langle \xi,{P}_{\nu}\epsilonta\rangle$ where $N_{\nu}-N_0\in \mathcal{M}_{s,\alpha}(D_{\nu})$, $P_{\nu}\in \mathcal{M}_{s,\alpha}(D_{\nu},\sigma_{\nu+1})$ and the following estimates hold: \begin{eqnarray*} \nonumber {\rm Meas}(D_{j-1}\setminus D_{j}) &\leq& cK_{j-1}^{\gamma_1}\kappa_{j-1}^{\gamma_2};\\ \nonumber [F_{j}]_{s,\alpha+}^{D_{j}, \sigma_{j+1}}&\leq& c(n,d,s,\alpha) \epsilonpsilon_{j-1}^{\frac{13}{24}};\\ \nonumber [N_j-N_{j-1}]_{s,\alpha}^{D_{j-1},\sigma_j}&\leq& \epsilonpsilon_{j-1};\\ \nonumber [P_{j}]_{s,\alpha}^{D_{j}, \sigma_{j+1}}&\leq& \frac12\epsilonpsilon_{j}. \epsilonnd{eqnarray*} Then there exist $ D_{\nu+1}\subset D_\nu$ and a mapping $\Phi_{\nu+1}=X_{f_{\nu+1}}^1: Y_{s'}\rightarrow Y_{s'}$ for all $0\leq s'\leq s$, $\omega\in D_{\nu+1},\ \theta\in {\Bbb T}_{\sigma_{\nu+2}}$, and $$H^{(\nu+1)}\circ \Phi^{\nu}\circ \Phi_{\nu+1}=(h+q_{\nu+1})\circ \Phi^{\nu+1}=h_{\nu+1}+p_{\nu+1}$$ and $h_{\nu+1}=\langle \omega,y\rangle+\langle\xi,N_{\nu+1} \epsilonta\rangle$ in normal form and $p_{\nu+1}=\langle \xi,{P}_{\nu+1}\epsilonta\rangle$where $N_{\nu+1}-N_0\in \mathcal{M}_{s,\alpha}(D_{\nu+1})$, $P_{\nu+1}\in \mathcal{M}_{s,\alpha}(D_{\nu+1},\sigma_{\nu+2})$ and the same estimates hold for $j\leq\nu+1$.\\ \indent Moreover, since $N_{\nu}, P_{\nu}(\theta)$ are Hermitian when $\theta\in{\Bbb T}^n$, so are $N_{\nu+1}, P_{\nu+1}(\theta), F_{\nu+1}(\theta)$. \quad\quad$\Box$ \epsilonnd{Lemma} \subsection{Transition to the limit and the proof of Theorem \ref{MainTheorem}} Set $ D_\varepsilon=\bigcap_{\nu=0}^\infty D_\nu$. From (\ref{canshudingyi}), (\ref{betaxinzhi}) and Lemma \ref{itelm} we have $${\rm Meas}(D_0\setminus D_\varepsilon)\leq\sum_{\nu=0}^\infty cK_{\nu}^{\gamma_1}\kappa_{\nu}^{\gamma_2}\leq c(n,d,s,\beta,\displaystyleelta) \varepsilon^{\frac{3\displaystyleelta}{2+d/\alpha}},$$ if $0<\varepsilon\leq \varepsilon_*(n,d,s,\beta,\displaystyleelta)$. \\ \indent In the following we will show that \begin{Lemma}\langlebel{L4.12} For $(\omega, \theta)\in D_{\varepsilon}\times {\Bbb T}^n$, $\{\Phi^{\nu}-id\}_\nu $ is a Cauchy sequence in $\mathfrak{L}(Y_{s'}, Y_{s'+2\alpha})$ for all $0\leq s'\leq s$. \epsilonnd{Lemma} To prove this lemma we need to show that \begin{Lemma}\langlebel{zhongjianguji} For $0\leq s'\leq s$, $0\leq {\nu}_1< \nu_2$ and $(\omega,\theta)\in D_{\varepsilon}\times {\Bbb T}^n$, $$\|\Phi_{\nu_1+1}\circ \Phi_{\nu_1+2}\circ \cdots \circ \Phi_{\nu_2}-id \|^*_{\mathfrak{L}(Y_{s'}, Y_{s'+2\alpha})}\leq C\epsilonpsilon_{\nu_1}^{\frac12}. $$ \epsilonnd{Lemma} \begin{proof} Similar to the proof of Lemma \ref{map}, from Lemma \ref{daishu01} iv) and (\ref{mapit01}), for all $0\leq s'\leq s$, $(\theta,\omega)\in{\Bbb T}^n\times { D_\varepsilon} $ it holds that \begin{eqnarray*} &&\|\Phi_{\nu_1+1}\circ \Phi_{\nu_1+2}\circ \cdots \circ \Phi_{\nu_2}-id \|^*_{\mathfrak{L}(Y_{s'}, Y_{s'+2\alpha})}\\ &\leq& c(\alpha,s)\max\left\{[e^{-\mathrm{i}F^T_{\nu_1+1}}\cdots e^{-\mathrm{i}F^T_{\nu_2}}-Id]_{s,\alpha+}^{ D_\varepsilon},\ [e^{\mathrm{i}F_{\nu_1+1}}\cdots e^{\mathrm{i}F_{\nu_2}}-Id]_{s,\alpha+}^{ D_\varepsilon}\right\}\ \leq\ C \epsilonpsilon_{\nu_1}^{1/2}. \epsilonnd{eqnarray*} \epsilonnd{proof} From Lemma \ref{zhongjianguji}, let $\nu_1=0$ we have \begin{Corollary}\langlebel{coro4.1} For all $0\leq s'\leq s$, $(\omega,\theta)\in D_{\varepsilon}\times {\Bbb T}^n$, $$\|\Phi^{\nu}-id\|^*_{\mathfrak{L}(Y_{s'}, Y_{s'+2\alpha})}\leq C\epsilonpsilon_0^{\frac12}.$$ \epsilonnd{Corollary} \noindent\epsilonmph{Proof of Lemma \ref{L4.12}}.\ \ Recall that \begin{eqnarray*} \Phi^{\nu_1}-\Phi^{\nu_2}&=&\Phi^{\nu_1}\circ(\Phi_{\nu_1+1}\circ \Phi_{\nu_1+2}\circ \cdots \circ \Phi_{\nu_2}-id)\\ &=&(\Phi^{\nu_1}-id)\circ(\Phi_{\nu_1+1}\circ \Phi_{\nu_1+2}\circ \cdots \circ \Phi_{\nu_2}-id)+\Phi_{\nu_1+1}\circ \Phi_{\nu_1+2}\circ \cdots \circ \Phi_{\nu_2}-id, \epsilonnd{eqnarray*} Then by Lemmas \ref{zhongjianguji} and Corollary \ref{coro4.1} we have $ \|\Phi^{\nu_1}-\Phi^{\nu_2}\|^*_{\mathfrak{L}(Y_{s'}, Y_{s'+2\alpha})}\leq C\epsilonpsilon_{\nu_1}^{\frac12}. $ We recall that $\Phi^{\nu}-id$ is a map from ${\Bbb T}^n_{\sigma_{\nu+1}}\times D_{\varepsilon}$ to an operator in $\mathfrak{L}(Y_{s'}, Y_{s'+2\alpha})$. From Lemmas \ref{completed} and \ref{L4.12} we denote $\Phi_\omega^\infty-id =\lim\limits_{\nu\rightarrow \infty}(\Phi^{\nu}-id)$ which is a map from ${\Bbb T}^n\times D_{\varepsilon}$ to $\mathfrak{L}(Y_{s'}, Y_{s'+2\alpha})$. Furthermore, \begin{Lemma}\langlebel{phiwuqiong} For $(\omega, \theta)\in D_{\varepsilon}\times {\Bbb T}^n$, $\Phi_\omega^\infty-id \in \mathcal{C}^{\mu}({\Bbb T}^n, \mathfrak{L}(Y_{s'}, Y_{s' +2\alpha}))$ for all $0\leq s'\leq s$, where $\mu\leq \frac{2}{9}\beta$ and is not an integer, \begin{eqnarray*} \|\Phi_\omega^\infty-id\|_{\mathcal{C}^{\mu}({\Bbb T}^n, \mathfrak{L}(Y_{s'}, Y_{s'+2\alpha})) }&\leq& \frac{c(n,\beta )}{\iota(1-\iota)} \epsilonpsilon_0^{\frac{3}{2\beta}(\frac{2}{9}\beta-\mu)},\ 0<\iota:=\mu-[\mu]<1. \epsilonnd{eqnarray*} \epsilonnd{Lemma} \begin{proof} $$\|\Phi^{\nu}-\Phi^{\nu-1}\|_{\mathfrak{L}(Y_{s'},Y_{s'+2\alpha})}\leq C\epsilonpsilon_{\nu-1}^{\frac{1}{2}}\leq C\sigma_{\nu+1}^{\frac{2\beta}{9}}.$$ From Lemma \ref{smoothinginverse}, we deduce that $\Phi_\omega^\infty-id\in\mathcal{C}^{\mu}({\Bbb T}^n,\mathfrak{L}(Y_{s'}, Y_{s'+2\alpha}))$ for every $\mu\leq \frac{2}{9}\beta$ which is not an integer and, \begin{eqnarray*} \|\Phi_\omega^\infty-id\|_{\mathcal{C}^{\mu}({\Bbb T}^n,\mathfrak{L}(Y_{s'}, Y_{s'+2\alpha}))}\leq \frac{c(n,\beta )}{\iota(1-\iota)} \sigma_1^{\frac{2}{9}\beta-\mu}= \frac{c(n,\beta )}{\iota(1-\iota)} \epsilonpsilon_0^{\frac{3}{2\beta}(\frac{2}{9}\beta-\mu)},\ 0<\iota:=\mu-[\mu]<1. \epsilonnd{eqnarray*} \epsilonnd{proof} \begin{Remark} In fact we can prove that $\Phi_{\omega}^{\infty}$ is also $C^1$ smooth in $\omega$ in Whitney sense and satisfies a similar estimation as above. \epsilonnd{Remark} \begin{Lemma}\langlebel{convergence} For any $y\in{\Bbb R}^n$,\ $(\xi,\epsilonta)\in Y_{s'}$ with $1\leq s'\leq \max\{s,1\}$, $$\mathcal{H}\circ\Phi_\omega^\infty (\xi,\epsilonta)=h_\infty(\xi,\epsilonta):=\langle\omega,y\rangle+\langle\xi,N_\infty\epsilonta\rangle,$$ uniformly for $\omega\in D_\varepsilon$ and $\theta\in{\Bbb T}^n$, where $N_\infty(\omega)=\lim_{\nu\rightarrow\infty}N_{\nu}(\omega)$ with $N_{\infty}(\omega)\in\mathcal{NF}$, uniformly on $ D_\varepsilon$, $\mathcal{C}^1$ Whitney smooth and $ [N_\infty-N_0]^{ D_\varepsilon}_{s,\alpha}\leq 2\epsilonpsilon_0.$ \epsilonnd{Lemma} We need to prove a series of preparation lemmas. \begin{Lemma}\langlebel{convergence01} For any $y\in{\Bbb R}^n$,\ $(\xi,\epsilonta)\in Y_{\bar{s}}$ and $\bar{s}\geq 1$, $$\lim_{\nu\rightarrow\infty}(h_\nu+p_\nu)=h_\infty=\langle\omega,y\rangle+\langle\xi,N_\infty\epsilonta\rangle,$$ uniformly on $ D_\varepsilon\times {\Bbb T}^n$ and $ [N_\infty-N_0]^{ D_\varepsilon}_{s,\alpha}\leq 2\epsilonpsilon_0.$ \epsilonnd{Lemma} \begin{proof} For $\nu_1<\nu_2$, from Lemma \ref{itelm}, $ [N_{\nu_1}-N_{\nu_2}]_{s,\alpha}^{D_\varepsilon}\leq 2\epsilonpsilon_{\nu_1}. $ It follows that $N_\nu-N_0$ is a Cauchy series in the norm $[\cdot]_{s,\alpha}^{D_{\varepsilon, {\Bbb T}^n}}$ by (\ref{homon02}). We denote $N_\infty=\lim N_\nu$ by Lemma \ref{completed}. Clearly, $\nu\geq0$, $$[N_\infty-N_{\nu}]^{ D_\varepsilon}_{s,\alpha}\leq \sum_{k=\nu}^\infty[\widetilde{N}_{k}]^{ D_\varepsilon}_\alpha\leq 2\epsilonpsilon_\nu.$$ Thus $$|\langle\xi,(N_\infty-N_\nu)\epsilonta\rangle|\leq\|\xi\|_{\bar{s}}\|(N_\infty-N_\nu)\epsilonta\|_{-\bar{s}}\leq c(\alpha,s)[N_\infty-N_\nu]_{s,\alpha}^{D_\varepsilon}\|\xi\|_{\bar{s}}\|\epsilonta\|_{\bar{s}}\leq 2c(\alpha,s)\epsilonpsilon_{\nu}\|\xi\|_{\bar{s}}\|\epsilonta\|_{\bar{s}},$$ which means that $\lim_\nu\langle\xi,N_\nu\epsilonta\rangle=\langle\xi,N_\infty\epsilonta\rangle.$ On the other hand, \begin{equation*}\langlebel{pes}|p_\nu|\leq c(\alpha,s)[P_\nu]_{s,\alpha}^{D_\nu,\sigma_{\nu+1}}\|\xi\|_{\bar{s}}\|\epsilonta\|_{\bar{s}}\leq \frac{1}{2} c(\alpha,s)\epsilonpsilon_\nu \|\xi\|_{\bar{s}}\|\epsilonta\|_{\bar{s}}\rightarrow 0,\ \nu\rightarrow\infty. \epsilonnd{equation*} \epsilonnd{proof} \begin{Lemma}\langlebel{App02} If $\epsilonpsilon_0 \ll 1,$ then for $0\leq s'\leq s$ and $(\theta,\omega)\in {\Bbb T}^n \times D_{\varepsilon}$ there exists $M_\omega(\theta)$ defined on $\mathfrak{L}(\epsilonll^2_{s' },\epsilonll^2_{s' })(0\leq s'\leq s)$ such that\\ i) $\|B_\nu-M_\omega\|_{\mathfrak{L}(\epsilonll^2_{s'},\epsilonll^2_{s'+2\alpha})},\ \|B_\nu^{-1}-M_\omega^{-1} \|_{\mathfrak{L}(\epsilonll^2_{s'},\epsilonll^2_{s'+2\alpha})}\leq c(\alpha,s,n)\epsilonpsilon_\nu^{\frac12};$ \\ ii) $\|M_\omega-Id\|_{\mathfrak{L}(\epsilonll^2_{s'},\epsilonll^2_{s'+2\alpha})},\ \|M_\omega^{-1} -Id\|_{\mathfrak{L}(\epsilonll^2_{s'},\epsilonll^2_{s'+2\alpha})}\leq c(\alpha,s,n)\epsilonpsilon_0^{\frac12}$. \epsilonnd{Lemma} \begin{proof} From Lemmas \ref{daishu01} and \ref{L4.6}, for $0\leq \nu_1<\nu_2$, $\|B_{\nu_1}-B_{\nu_2}\|_{\mathfrak{L}(\epsilonll^2_{s'},\epsilonll^2_{s'+2\alpha})}\leq c(\alpha,s,n) \epsilonpsilon_{\nu_1}^{\frac12}$. It means that $\{B_\nu-Id\}$ is a Cauchy sequence in $\mathfrak{L}(\epsilonll^2_{s'},\epsilonll^2_{s'+2\alpha})$ and its limit is denoted by $M_\omega-Id$, which satisfies $\|B_\nu-M_\omega\|_{\mathfrak{L}(\epsilonll^2_{s'},\epsilonll^2_{s'+2\alpha})}\leq c(\alpha,s,n) \epsilonpsilon_\nu^{\frac12}$. Recall that from Lemma \ref{L4.6}, $ [B_\nu-Id]_{s,\alpha+}^{{\Bbb T}^n, D_{\varepsilon}}\leq \epsilonpsilon_0^{\frac12}. $ It follows that $\|B_\nu-Id\|_{\mathfrak{L}(\epsilonll^2_{s'},\epsilonll^2_{s'+2\alpha})}\leq c(\alpha,s,n)\epsilonpsilon_0^{\frac12}$. Set $\nu\rightarrow\infty,$ we obtain $$\|M_\omega-Id\|_{\mathfrak{L}(\epsilonll^2_{s'},\epsilonll^2_{s'+2\alpha})}\leq c(\alpha,s,n)\epsilonpsilon_0^{\frac12}.$$ The estimates on $B_\nu^{-1}$ and $M_\omega^{-1}$ are similar, we omit it for simplicity. \epsilonnd{proof} It is easy to show ${M}_\omega^{-1}(\theta)={\overline{M}}^T_\omega(\theta)$ from (\ref{inverseM}) when $\theta\in{\Bbb T}^n$. Moreover, we can improve Lemma \ref{App02} to the following \begin{Lemma}\langlebel{App03} If $\epsilonpsilon_0 \ll 1,$ then\\ i) $[B_\nu-M_\omega]_{s,\alpha+}^{{\Bbb T}^n, D_{\varepsilon}},\ [B_\nu^{-1}-M_\omega^{-1} ]_{s,\alpha+}^{{\Bbb T}^n, D_{\varepsilon}}\leq c(\alpha,s,n)\epsilonpsilon_\nu^{\frac12};$ \\ ii) $[M_\omega-Id]_{s,\alpha+}^{{\Bbb T}^n, D_{\varepsilon}},\ [M_\omega^{-1} -Id]_{s,\alpha+}^{{\Bbb T}^n, D_{\varepsilon}}\leq c(\alpha,s,n)\epsilonpsilon_0^{\frac12}$. \epsilonnd{Lemma} \begin{Remark} Note that $M_\omega -Id\in \mathcal{M}_{s,\alpha}^+$ and $\alpha>0$, it follows from Lemma \ref{daishu01} that $M_\omega -Id\in \mathfrak{L}(\epsilonll_{1}^2, \epsilonll_{1}^2)$ and satisfies $$\|(M_\omega -Id)\xi\|_{1}\leq c(\alpha,s)[M_\omega -Id]^{{\Bbb T}^n, D_{\varepsilon}}_{s,\alpha+}\|\xi\|_{1}\leq c(\alpha,s,n)\epsilonpsilon_0^{\frac12}\|\xi\|_{1}, $$ for $(\theta,\omega)\in {\Bbb T}^n \times D_{\varepsilon}$. Thus for $(\theta,\omega)\in {\Bbb T}^n \times D_{\varepsilon}$ \begin{eqnarray}\langlebel{Momega} \|M_\omega \xi\|_{1}\leq c(\alpha,s,n)\|\xi\|_{1}. \epsilonnd{eqnarray} Similarly, for $(\theta,\omega)\in {\Bbb T}^n \times D_{\varepsilon}$, \begin{eqnarray}\langlebel{Momegainverse} \|M_\omega^{-1} \xi\|_{1}\leq c(\alpha,s,n)\|\xi\|_{1}. \epsilonnd{eqnarray} Denote $\Phi_\omega^\infty(\xi,\epsilonta)=(\overline{M}_\omega\xi,M_\omega\epsilonta)$. From (\ref{Momega}) and (\ref{Momegainverse}) $\Phi_\omega^\infty $ and its inverse are bounded operators from $Y_{1}$ into $Y_{1}$. \epsilonnd{Remark} In the following we denote $U_{\nu} : = B_{\nu}^{-1}N_0 B_{\nu} $, $U_{\infty} : = M_{\omega}^{-1}N_0 M_{\omega} $, $V_{\nu} : = B_\nu^{-1}P^{(\nu)}B_\nu $ and $V_{\infty} : = \varepsilon M_{\omega}^{-1}P(\theta)M_{\omega} $ for simplicity. We will prove that \begin{Lemma}\langlebel{convergence02} For any $y\in{\Bbb R}^n$,\ $(\xi,\epsilonta)\in Y_{s'}$ with $1\leq s'\leq \max\{s,1\}$, \begin{equation}\langlebel{phies} \lim_{\nu\rightarrow\infty}H^{(\nu)}\circ\Phi^\nu(\xi,\epsilonta)=\mathcal{H}\circ\Phi_\omega^\infty(\xi,\epsilonta), \epsilonnd{equation} uniformly for $\omega\in D_\varepsilon$ and $\theta\in{\Bbb T}^n$. \epsilonnd{Lemma} \begin{proof} From the definition, $\Phi^\nu(\xi,\epsilonta)=(\overline{B_\nu}\xi,B_\nu\epsilonta)$ and $(\overline{B_\nu})^T=B_\nu^{-1}$. Thus, \begin{eqnarray*} H^{(\nu)}\circ\Phi^\nu&=&\langle \omega,y\rangle+\langle \overline{B_\nu}\xi,(N_0+P^{(\nu)})B_\nu\epsilonta \rangle\\ &=&\langle \omega,y\rangle+\langle \xi,B_\nu^{-1}(N_0+P^{(\nu)})B_\nu\epsilonta \rangle\\ &=&\langle \omega,y\rangle+\langle \xi,U_\nu\epsilonta \rangle+\langle \xi,V_\nu\epsilonta \rangle. \epsilonnd{eqnarray*} On the other hand, by a straightforward computation, we have \begin{eqnarray*} \mathcal{H}\circ\Phi_\omega^\infty&=&\langle \omega,y\rangle+\langle \xi,U_\infty\epsilonta \rangle+\langle \xi,V_\infty\epsilonta \rangle. \epsilonnd{eqnarray*} Then (\ref{phies}) is proved by the following two lemmas. \epsilonnd{proof} \begin{Lemma}\langlebel{convergence03} For $(\xi,\epsilonta)\in Y_{\bar{s}}$ with $\bar{s}\geq 1$, $$\lim_{\nu\rightarrow\infty}\langle \xi,(V_\nu-V_\infty)\epsilonta \rangle=0,$$ uniformly for $\omega\in D_\varepsilon,\ \theta\in{\Bbb T}^n$. \epsilonnd{Lemma} \begin{proof}Consider \begin{eqnarray*} V_\nu-V_\infty&=&B_\nu^{{-1}}P^{(\nu)}B_\nu-\varepsilon M_\omega^{-1} P {M_\omega}\\ &=&B_\nu^{{-1}}(P^{(\nu)}-\varepsilon P)B_\nu+\varepsilon (B_\nu^{{-1}}-M_\omega^{-1}) P B_\nu+\varepsilon B_\nu^{{-1}}P(B_\nu-{M_\omega})\\ &:=&I_1+I_2+I_3 \epsilonnd{eqnarray*} We first estimate $I_1$. Note for $\theta\in{\Bbb T}^n,$ from Lemma \ref{l4.1}, \begin{eqnarray*}\langlebel{pinfty} [P^{(\nu)}-\varepsilon P]_{s,\alpha}^{D_\varepsilon,{\Bbb T}^n}\leq\sum_{m=\nu}^\infty[P^{(m+1)}-P^{(m)}]_{s,\alpha}^{D_\varepsilon,{\Bbb T}^n}\leq c(n,\alpha,\beta)\varepsilon\epsilonpsilon_{\nu}. \epsilonnd{eqnarray*} On the other hand, by Lemma \ref{L4.6} we have \begin{eqnarray}\langlebel{binfty} [B_\nu^{{-1}}-Id]_{s,\alpha+}^{D_\varepsilon,{\Bbb T}^n},\ [B_\nu-Id]_{s,\alpha+}^{D_\varepsilon,{\Bbb T}^n}\leq \epsilonpsilon_0^{\frac{1}{2}}, \epsilonnd{eqnarray} then, from Lemma \ref{daishu}, \begin{eqnarray*}\langlebel{pinfty} [I_1]_{s,\alpha}^{D_\varepsilon,{\Bbb T}^n}&\leq& [(B_\nu^{{-1}}-Id)(P^{(\nu)}-\varepsilon P)(B_\nu-Id)]_{s,\alpha}^{D_\varepsilon,{\Bbb T}^n}+ [(B_\nu^{{-1}}-Id)(P^{(\nu)}-\varepsilon P)]_{s,\alpha}^{D_\varepsilon,{\Bbb T}^n}\nonumber\\ &+ &[(P^{(\nu)}-\varepsilon P)(B_\nu-Id)]_{s,\alpha}^{D_\varepsilon,{\Bbb T}^n}+ [ P^{(\nu)}-\varepsilon P ]_{s,\alpha}^{D_\varepsilon,{\Bbb T}^n}\nonumber\\ &\leq& \epsilonpsilon_{\nu}. \epsilonnd{eqnarray*} For $I_2$, note that $[B_\nu^{{-1}}-M_\omega^{-1}]_{s,\alpha+}^{D_\varepsilon,{\Bbb T}^n}\leq\epsilonpsilon_\nu^{\frac{1}{2}}$ by Lemma \ref{App03}, thus \begin{eqnarray*}\langlebel{} [I_2]_{s,\alpha}^{D_\varepsilon,{\Bbb T}^n}\leq [\varepsilon (B_\nu^{{-1}}-M_\omega^{-1}) P (B_\nu-Id)]_{s,\alpha}^{D_\varepsilon,{\Bbb T}^n}+[\varepsilon (B_\nu^{{-1}}-M_\omega^{-1}) P ]_{s,\alpha}^{D_\varepsilon,{\Bbb T}^n}\leq c(s,n,\alpha,\beta) \varepsilon \epsilonpsilon_{\nu}^{\frac{1}{2}} \epsilonnd{eqnarray*} combined with (\ref{binfty}). Similarly, $ [I_3]_{s,\alpha}^{D_\varepsilon,{\Bbb T}^n}\leq c(s,n,\alpha,\beta) \varepsilon \epsilonpsilon_{\nu}^{\frac{1}{2}}. $ Finally, we have \begin{eqnarray}\langlebel{VnuminusVinfty} [V_\nu-V_\infty]_{s,\alpha}^{D_\varepsilon,{\Bbb T}^n}\leq c(s,n,\alpha,\beta)\varepsilon \epsilonpsilon_{\nu}^{\frac{1}{2}}, \epsilonnd{eqnarray} and $V_\nu-V_\infty\in\mathfrak{L}(\epsilonll_{\bar{s}}^2,\epsilonll_{-\bar{s}}^2)$. By Lemma \ref{daishu} and (\ref{VnuminusVinfty}) we obtain \begin{eqnarray*} |\langle \xi,(V_\nu-V_\infty)\epsilonta\rangle|\leq\|\xi\|_{\bar{s}} \|(V_\nu-V_\infty)\epsilonta\|_{-\bar{s}}\leq c(s,n,\alpha,\beta)\varepsilon \epsilonpsilon_{\nu}^{\frac{1}{2}}\|\xi\|_{\bar{s}}\|\epsilonta\|_{\bar{s}}\rightarrow 0 \epsilonnd{eqnarray*} as $\nu\rightarrow0$ uniformly for $\omega\in D_\varepsilon,\ \theta\in{\Bbb T}^n$. \epsilonnd{proof} \begin{Lemma}\langlebel{convergence04} For $(\xi,\epsilonta)\in Y_{s'}$ with $1\leq s'\leq \max\{s,1\}$, $$\lim_{\nu\rightarrow\infty}\langle \xi,(U_\nu-U_\infty)\epsilonta \rangle=0,$$ uniformly for $\omega\in D_\varepsilon,\ \theta\in{\Bbb T}^n$. \epsilonnd{Lemma} \begin{proof} Consider \begin{eqnarray*} \langle \xi, (U_\nu-U_\infty)\epsilonta\rangle &=&\langle \xi, (B_\nu^{{-1}}N_0 B_\nu- M_\omega^{-1} N_0 {M_\omega})\epsilonta\rangle \\ &=& \langle \xi, (B_\nu^{{-1}}-M_\omega^{-1}) N_0 B_\nu \epsilonta\rangle + \langle \xi, M_\omega^{-1}N_0({M_\omega} -B_\nu)\epsilonta \rangle. \epsilonnd{eqnarray*} \indent We estimate the first term. For $ {\epsilonta'}\in\epsilonll_{s'}^2$, $\|N_0\epsilonta'\|_{s'-2}\leq c_1\|\epsilonta'\|_{s'}$ by $\langlembda_a\leq c_1w_a$. From Lemma \ref{binfty} and Lemma \ref{daishu01} we obtain $\|N_0B_{\nu}\epsilonta\|_{s'-2}\leq c(\alpha, s, n)\|\epsilonta\|_{s'}$. Therefore, \begin{eqnarray*}\langlebel{} &&|\langle \xi,(B_\nu^{{-1}}-M_\omega^{-1}) N_0 B_\nu\epsilonta \rangle|\\ &=&|\langle (\overline{B}_{\nu}-\overline{M}_\omega) \xi,N_0 B_\nu\epsilonta \rangle|\\ {\rm \underline{from\ Cauchy\ and \ 1\leq s'\leq \max\{s,1\} }} &\leq & \|(\overline{B}_\nu-\overline{M}_\omega) \xi\|_{s'}\|N_0 B_\nu\epsilonta\|_{s'-2}\\ {\rm \underline{Lemma\ \ref{App03},\ Lemma\ \ref{daishu01}}} &\leq& c(\alpha, s,n)\epsilonpsilon_\nu^{\frac{1}{2}}\|\xi\|_{s'}\| \epsilonta\|_{s'}. \epsilonnd{eqnarray*} Similarly, for $(\xi,\epsilonta)\in Y_{s'}$, we have $|\langle \xi,M_\omega^{-1}N_0({M_\omega} -B_\nu)\epsilonta \rangle| \leq c(\alpha, s,n) \epsilonpsilon_\nu^{\frac{1}{2}}\|\xi\|_{s'}\| \epsilonta\|_{s'}$. The conclusion is clear now. \epsilonnd{proof} \indent Combining with Lemma \ref{convergence01} and Lemma \ref{convergence02}, we finish the proof of Lemma \ref{convergence}.\qed \\ \indent From Lemma \ref{convergence} and the concrete form of $\Phi^{\infty}$, we obtain that $$(\omega t, \star, \overline{M}_{\omega}(\omega t)e^{{-\rm i}\overline{N}_{\infty}t}\xi_0, M_{\omega}(\omega t)e^{{\rm i} N_{\infty} t}\epsilonta_0)$$ are the solutions of the Hamiltonian system (\ref{autohs}). Thus, $(\overline{M}_{\omega}(\omega t)e^{{-\rm i}\overline{N}_{\infty}t}\xi_0, M_{\omega}(\omega t)e^{{\rm i} N_{\infty} t}\epsilonta_0)$ are clearly the solutions of the Hamiltonian system (\ref{hameq00}). We complete the proofs of Theorem \ref{MainTheorem}. \qed \section{Appendix}\langlebel{appendix} \subsection{Proof of Lemma \ref{psismooth}.} \begin{proof} Recall that $$ \Psi_\omega\left(\theta\right)\left(\sum_{a\in\mathcal{E}}\xi_a\Phi_a\left(x\right)\right)=\sum_{a\in\mathcal{E}}\left(M^T_\omega\left(\theta\right)\xi\right)_a\Phi_a\left(x\right).$$ From Theorem \ref{MainTheorem}, it is easy to show by definition $M^T_\omega-Id \in{\mathcal{C}^{\mu}\left({\Bbb T}^n, \mathfrak{L}\left(\epsilonll^2_{s'}, \epsilonll^2_{s'+2\alpha}\right)\right) }$ and \begin{eqnarray*} \|M^T_\omega -Id\|_{\mathcal{C}^{\mu}\left({\Bbb T}^n, \mathfrak{L}\left(\epsilonll^2_{s'}, \epsilonll^2_{s'+2\alpha}\right)\right) }&\leq& C\left(n,\beta, \mu, d, s\right) \epsilonpsilon^{\frac{3}{2\beta}\left(\frac{2}{9}\beta-\mu\right)}. \epsilonnd{eqnarray*} (a)\quad From definition, \begin{eqnarray*} &&\|\Psi_\omega(\cdot)-id\|_{ \mathfrak{L}\left(\mathcal{H}^{s'},\mathcal{H}^{s'+2\alpha}\right)}=\sup_{\|u\|_{\mathcal{H}^{s'}}=1}\|\Psi_\omega(\cdot)u-u\|_{\mathcal{H}^{s'+2\alpha}}\\ &=&\sup_{\|\xi\|_{\epsilonll^2_{s'}}=1} \|M^T_\omega\xi-\xi\|_{\epsilonll^2_{s'+2\alpha}} = \|M^T_\omega-Id\|_{\mathfrak{L}\left(\epsilonll^2_{s'},\epsilonll^2_{s'+2\alpha}\right)}\leq C\left(n,\beta, \mu, d, s\right) \epsilonpsilon^{\frac{3}{2\beta}\left(\frac{2}{9}\beta-\mu\right)}. \epsilonnd{eqnarray*} (b)\quad For $b=\mu-[\mu]\in\left(0,1\right)$, $z_1,z_2\in {\Bbb R}^{n}$ with $0<|z_1-z_2|<2\pi$, \begin{eqnarray*} &&\frac{\|\Psi_\omega\left(z_1\right)-\Psi_\omega\left(z_2\right)\|_{\mathfrak{L}\left(\mathcal{H}^{s'},\mathcal{H}^{s'+2\alpha}\right)}}{{|z_1-z_2|^{b}}}\\ &=&\frac{1}{{|z_1-z_2|^{b}}}\sup_{\|u\|_{\mathcal{H}^{s'}}=1}\|\Psi_\omega \left(z_1\right)u-\Psi_\omega\left(z_2\right)u\|_{ \mathcal{H}^{s'+2\alpha} }\\ &=&\frac{1}{{|z_1-z_2|^{b}}}\sup_{\|\xi\|_{\epsilonll^2_{s'}}=1}\|M^T_\omega\left(z_1\right)\xi-M^T_\omega\left(z_2\right)\xi\|_{ \epsilonll^2_{s'+2\alpha}}\\ &= &\frac{1}{{|z_1-z_2|^{b}}} \|M^T_\omega\left(z_1\right)-M^T_\omega\left(z_2\right)\|_{\mathfrak{L}\left(\epsilonll^2_{s'},\epsilonll^2_{s'+2\alpha}\right)} \epsilonnd{eqnarray*} which shows that \begin{eqnarray*} \|\Psi_\omega-id\|_{\mathcal{C}^{b}\left({\Bbb T}^n, \mathfrak{L}\left(\mathcal{H}^{s'},\mathcal{H}^{s'+2\alpha}\right)\right)}= \|M^T_\omega-Id\|_{\mathcal{C}^{b}\left({\Bbb T}^n, \mathfrak{L}\left(\epsilonll^2_{s'}, \epsilonll^2_{s'+2\alpha}\right)\right) }\leq C\left(n,\beta, \mu, d, s\right) \epsilonpsilon^{\frac{3}{2\beta}\left(\frac{2}{9}\beta-\mu\right)}. \epsilonnd{eqnarray*} (c)\quad Denote $\langle\mathcal{A}\left(z\right),h\rangle u:=\sum_{a\in\mathcal{E}}\left(\langle A\left(z\right),h\rangle\xi\right)_a\Phi_a\left(x\right)$ for $h\in{\Bbb R}^n$ where we use the notation $A:=\left(M^T_\omega-Id\right)'_z$ and $\xi\in \epsilonll_{s'}^2$ and $0\leq s'\leq s$. Note $M_{\omega}^T-Id\in C^{\mu}({\Bbb T}^n, \mathfrak{L}(\epsilonll_{s'}^2, \epsilonll_{s'+2\alpha}^2)) $, it follows that for any $z\in {\Bbb T}^n$, $\mathcal{A}(z)\in \mathfrak{L}({\Bbb R}^n, \mathfrak{L}(\mathcal{H}^{s'}, \mathcal{H}^{s'+2\alpha}))$. This is because \begin{eqnarray} \|\mathcal{A}\|_{\mathfrak{L}\left({\Bbb R}^n, \mathfrak{L}\left(\mathcal{H}^{s'},\mathcal{H}^{s'+2\alpha}\right)\right)}&=&\sup_{\|u\|_{\mathcal{H}^{s'}}=1,\|h\|=1} \|\langle\mathcal{A}\left(z\right),h\rangle u\|_{ \mathcal{H}^{s'+2\alpha} }\nonumber\\ &=& \sup_{\|\xi\|_{\epsilonll^2_{s'}}=1,\|h\|=1} \|\langle A\left(z\right),h\rangle\xi\|_{ \epsilonll^2_{s'+2\alpha} }\nonumber\\ &= & \| A(z)\|_{\mathfrak{L}\left({\Bbb R}^n, \mathfrak{L}\left(\epsilonll^2_{s'},\epsilonll^2_{s'+2\alpha} \right)\right)}\langlebel{dengnorm}\\ &\leq& \|M^T_\omega-Id\|_{\mathcal{C}^{b}\left({\Bbb T}^n, \mathfrak{L}\left(\epsilonll^2_{s'}, \epsilonll^2_{s'+2\alpha}\right)\right) }.\nonumber \epsilonnd{eqnarray} Given $z_0\in{\Bbb T}^n$, \begin{eqnarray*} &&\|\Psi\left(z\right)-\Psi\left(z_0\right)-\langle\mathcal{A}\left(z_0\right),z-z_0\rangle\|_{ \mathfrak{L}\left(\mathcal{H}^{s'},\mathcal{H}^{s'+2\alpha}\right)}\\ &=&\sup_{\|u\|_{\mathcal{H}^{s'}}=1} \|\left(\Psi\left(z\right)-\Psi\left(z_0\right)-\langle\mathcal{A}\left(z_0\right),z-z_0\rangle\right) u\|_{ \mathcal{H}^{s'+2\alpha} }\\ &=&\sup_{\|\xi\|_{\epsilonll^2_{s'}}=1} \|\left(M^T_\omega\left(z\right)-M^T_\omega\left(z_0\right)-\langle A\left(z_0\right),z-z_0\rangle\right)\xi\|_{ \epsilonll^2_{s'+2\alpha} }\\ &=& \|M^T_\omega\left(z\right)-M^T_\omega\left(z_0\right)-\langle A\left(z_0\right),z-z_0\rangle\|_{ \mathfrak{L}\left(\epsilonll^2_{s'},\epsilonll^2_{s'+2\alpha} \right)} \epsilonnd{eqnarray*} Note $M^T_\omega-Id \in{\mathcal{C}^{\mu}\left({\Bbb T}^n, \mathfrak{L}\left(\epsilonll^2_{s'}, \epsilonll^2_{s'+2\alpha}\right)\right) }$, then $$\|M^T_\omega\left(z\right)-M^T_\omega\left(z_0\right)-\langle A\left(z_0\right),z-z_0\rangle\|_{ \mathfrak{L}\left(\epsilonll^2_{s'},\epsilonll^2_{s'+2\alpha} \right)}=o\left(|z-z_0|\right),\ z\rightarrow z_0.$$ Therefore, $$\|\Psi\left(z\right)-\Psi\left(z_0\right)-\langle\mathcal{A}\left(z_0\right),z-z_0\rangle\|_{ \mathfrak{L}\left(\mathcal{H}^{s'},\mathcal{H}^{s'+2\alpha}\right)}=o\left(|z-z_0|\right),\ z\rightarrow z_0,$$ which shows that $\Psi\left(z\right)$ is Fr\'echet differentiable at $z_0.$ Moreover, following (b), we have \begin{eqnarray*} &&\sup_{\substack{\|u\|_{\mathcal{H}^{s'}}=1,\|h\|=1\\z_1,z_2\in {\Bbb R}^n, 0<|z_1-z_2|<2\pi}} \frac{1}{{|z_1-z_2|^{b}}} \|\langle \mathcal{A}\left(z_1\right)-\mathcal{A}\left(z_2\right),h\rangle u\|_{ \mathcal{H}^{s'+2\alpha} }\\ &=&\sup_{\substack{\|\xi\|_{\epsilonll^2_{s'}}=1,\|h\|=1\\z_1,z_2\in {\Bbb R}^n, 0<|z_1-z_2|<2\pi}} \frac{1}{{|z_1-z_2|^{b}}}\|\langle {A}\left(z_1\right)- {A}\left(z_2\right),h\rangle \xi\|_{ \epsilonll^2_{s'+2\alpha} }\\ &= &\sup_{ z_1,z_2\in {\Bbb R}^n, 0<|z_1-z_2|<2\pi} \frac{1}{{|z_1-z_2|^{b}}} \| {A}\left(z_1\right)- {A}\left(z_2\right)\|_{\mathfrak{L}\left({\Bbb R}^n,\mathfrak{L}\left(\epsilonll^2_{s'},\epsilonll^2_{s'+2\alpha}\right)\right)}. \epsilonnd{eqnarray*} Combining with (\ref{dengnorm}), we have \begin{eqnarray*} \|\Psi_\omega(\cdot )-id\|_{\mathcal{C}^{1+b}\left({\Bbb T}^n, \mathfrak{L}\left(\mathcal{H}^{s'},\mathcal{H}^{s'+2\alpha}\right)\right)} = \|M^T_\omega(\cdot )-Id\|_{\mathcal{C}^{1+b}\left({\Bbb T}^n, \mathfrak{L}\left(\epsilonll^2_{s'}, \epsilonll^2_{s'+2\alpha}\right)\right) }\leq C\left(n,\beta, \mu, d, s\right) \epsilonpsilon^{\frac{3}{2\beta}\left(\frac{2}{9}\beta-\mu\right)} \epsilonnd{eqnarray*} with $1+b\leq\mu.$ Inductively, we can show that \begin{eqnarray*} \|\Psi_\omega(\cdot)-id\|_{\mathcal{C}^{k+b}\left({\Bbb T}^n, \mathfrak{L}\left(\mathcal{H}^{s'},\mathcal{H}^{s'+2\alpha}\right)\right)} = \|M^T_\omega(\cdot )-Id\|_{\mathcal{C}^{k+b}\left({\Bbb T}^n, \mathfrak{L}\left(\epsilonll^2_{s'}, \epsilonll^2_{s'+2\alpha}\right)\right) }\leq C\left(n,\beta, \mu, d, s\right) \epsilonpsilon^{\frac{3}{2\beta}\left(\frac{2}{9}\beta-\mu\right)} \epsilonnd{eqnarray*} with $k+b\leq\mu.$ Thus we finish the proof of Lemma \ref{psismooth}. \epsilonnd{proof} \subsection{Proof of Lemma \ref{smoothinginverse}} \begin{proof} Following Salamon\cite{Sal04}, it is enough to consider the case $\mu=\epsilonll$. Moreover, once the result has been established for $0<\epsilonll<1$ it follows for $\epsilonll>1$ by Cauchy's estimate. Therefore we assume $0<\iota=\mu=\epsilonll<1$.\\ \indent Define $g_\nu=f_\nu-f_{\nu-1}$. Then $f=\sum g_\nu$ satisfies the estimate \begin{eqnarray*} |f|_{X}\leq c\sum_{\nu=1}^\infty\sigma_\nu^{\iota}=c\sum_{\nu=1}^\infty\sigma^{\iota(\frac{3}{2})^\nu}\leq \frac{2c}{\iota}\sigma^{\iota}, \epsilonnd{eqnarray*} where we use the fact that $\sum_{\nu=1}^\infty\sigma^{\iota(\frac{3}{2})^\nu}\leq \sigma^{\iota}\sum_{\nu\geq0}(\frac{1}{2^\nu})^{\iota}\leq \frac{2 }{\iota}\sigma^{\iota}$ for $0<\sigma\leq1/4.$\\ \indent For $x,y\in{\Bbb R}^n$ with $\sigma<|x-y|\leq1$ this implies $ |f(x)-f(y)|_{X}\leq \frac{4c}{\iota}\sigma^{\iota}\leq \frac{4c}{\iota}|x-y|^{\iota}. $ In the case $0<|x-y|\leq\sigma$ there is an integer $N\geq0$ such that $\sigma_{N+1}<|x-y|\leq\sigma_N$. Following Cauchy's estimate, $|\partial_xg_\nu(u)|_{X}\leq c\sigma_\nu^{\iota-1}$ for every $u\in{\Bbb R}^n$, we have $ |g_\nu(x)-g_\nu(y)|_{X}\leq c\sigma_\nu^{\iota-1}|x-y|. $ We shall use this estimate for $\nu=1,2,\cdots,N$. For $\nu\geq N+1$ we use the trivial estimate $ |g_\nu(x)-g_\nu(y)|_{X}\leq 2c\sigma_\nu^{\iota}. $ Taking into account the inequalities we obtain that \begin{eqnarray*} |f(x)-f(y)|_{X}&\leq& \sum_{1\leq\nu\leq N}|g_\nu(x)-g_\nu(y)|_{X}+\sum_{\nu> N}|g_\nu(x)-g_\nu(y)|_{X}\\ &\leq& c|x-y|\sum_{1\leq\nu\leq N}\sigma_\nu^{\iota-1}+2c\sum_{\nu> N}\sigma_\nu^{\iota}\\ &\leq& c|x-y|\sigma_N^{\iota-1}\sum_{0\leq\nu\leq N-1}\left(\frac{1}{2^\nu}\right)^{1-\iota}+2c\sigma_{N+1}^{\iota}\sum_{\nu\geq0}\left(\frac{1}{2^\nu}\right)^{\iota}\\ &\leq& \frac{4c}{1-\iota}|x-y|\sigma_N^{\iota-1}+\frac{4c}{\iota}\sigma_{N+1}^{\iota}\\ &=& \frac{4c}{\iota(1-\iota)}|x-y|^{\iota}. \epsilonnd{eqnarray*} We finish the proof. \epsilonnd{proof} \begin{thebibliography}{2017} \bibitem{BG} Bambusi, D., Graffi, S.: Time quasi-periodic unbounded perturbations of Schr\"odinger operators and KAM method. Commun. Math. Phys. \textbf{219}(2), 465-480(2001) \bibitem{Bam1} Bambusi, D.: Reducibility of 1-d Schr\"odinger equation with time quasiperiodic unbounded perturbations, I. Trans. AMS, To appear(2017); arXiv: 1606.04494 [math.DS](2016) \bibitem{Bam2} Bambusi, D.: Reducibility of 1-d Schr\"odinger equation with time quasiperiodic unbounded perturbations, II. Commun. Math. Phys. doi: 10. 1007/s00220-016-2825-2(2017) \bibitem{BBP2} Berti, M., Biasco, L., Procesi, M.: KAM for the reversible derivative wave equation. Arch. Rational Mech. Anal. \textbf{212}, 905-955(2014) \bibitem{BaGrMaRo} Bambusi, D., Gr\'{e}bert, B., Maspero, A., Robert, D.: Reducibility of the quantum harmonic oscillator in d-dimensions with polynomial time dependent perturbation, 1-d Schr\"odinger equation with time quasiperiodic unbounded perturbations. preprint, arXiv: 1702.05274v1 [math. AP](2017) \bibitem{BEL} Bellissard, J.: Stability and instability in quantum mechanics. { Trends and developments in the eighties}(Bielefeld, 1982/1983), 1-106. Singapore: World Scientific, 1985. \bibitem{Ber} Berger, M.: Nonlinearity and functional analysis. Lectures on nonlinear problems in mathematical analysis. Pure and Applied Mathematics. Academic Press, New York-London, 1977. \bibitem{Berti} Berti, M.: KAM for PDEs. Boll. Unione Mat. Ital. \textbf{9}, 115-142(2016) \bibitem{BB14} Berti, M., Bolle, P.: Sobolev quasiperiodic solutions of multidimensional wave equations with a multiplicative potential. Nonlinearity \textbf{25}, 2579-2613(2012) \bibitem{BB13} Berti, M., Bolle, P.: Quasi-periodic solutions with Sobolev regularity of NLS on ${\Bbb T}^d$ with a multiplicative potential. Eur. J. Math. \textbf{15}, 229-286(2013) \bibitem{BLE} Blekher, P.M., Jauslin, H.R., Lebowitz, J.L.: Floquet spectrum for two-level systems in quasiperiodic time-dependent fields. J. Statist. Phys. \textbf{68}(1-2), 271-310(1992) \bibitem{CQ} Chierchia, L., Qian, D.: Moser's theorem for lower dimensional tori. J. Diff. Eqs. \textbf{206}, 55-93(2004) \bibitem{Com87} Combescure, M.: The quantum stability problem for time-periodic perturbations of the harmonic oscillator. Ann. Inst. H. Poincar\'e Phys. Th\'eor. \textbf{47}(1), 63-83(1987) \bibitem{Del14} Delort, J. M.: Growth of Sobolev norms for solutions of time dependent Schr\"odinger operators with harmonic oscillator potential. Comm. Partial Differential Equations, \textbf{39}(1), 1-33(2014) \bibitem{DS} Duclos, P., Stovicek, P.: Floquet Hamiltonians with pure point spectrum. Commun. Math. Phys. \textbf{177}, 327-347(1996) \bibitem{DSV} Duclos, P., Stovicek, P., Vittot, M.: Perturbation of an eigenvalue from a dense point spectrum: A general Floquet Hamiltonian. An. Inst. H. Poincar\'e Phys. Th\'eor. \textbf{71}, 241-301(1999) \bibitem{EGK} Eliasson, H. L., Gr\'{e}bert, B., Kuksin, S. B.: KAM for the nonlinear beam equation. Geom. Funct. Anal. \textbf{26}(6), 1588-1715(2016) \bibitem{EK} Eliasson, H. L., Kuksin, S. B.: KAM for the nonlinear Schr\"odinger equation. Ann. of Math. \textbf{172}, 371-435(2010) \bibitem{EK0} Eliasson, L.H., Kuksin, S.B.: On reducibility of Schr\"odinger equations with quasiperiodic in time potentials. Commun. Math. Phys. \textbf{286}(1), 125-135(2009) \bibitem{EV} Enss, V., Veselic, K.: Bound states and propagating states for time-dependent Hamiltonians. Ann IHP \textbf{39}(2), 159-191(1983) \bibitem{GY00} Graffi, S., Yajima, K.: Absolute continuity of the floquet spectrum for a nonlinearly forced harmonic oscillator. Commun. Math. Phys. \textbf{215}, 245-250(2000) \bibitem{GP} Gr\'{e}bert, B., Paturel, E.: On reducibility of quantum harmonic oscillator on ${\Bbb R}^d$ with quasiperiodic in time potential. preprint, arXiv: 1603.07455v1 [math.AP](2016) \bibitem{GP1} Gr\'{e}bert, B., Paturel, E.: KAM for the Klein Gordon equation on $\mathbb S^d$. Boll. Unione Mat. Ital. \textbf{9}(2), 237-288(2016) \bibitem{GT} Gr\'{e}bert, B., Thomann, L.: KAM for the quantum harmonic oscillator. Commun. Math. Phys. \textbf{307}, 383-427(2011) \bibitem{GXY} Geng, J., Xu, X., You, J.: An infinite dimensional KAM theorem and its application to the two dimensional cubic Schr\"odinger equation. Adv. Math. \textbf{226}, 5361-5402(2011) \bibitem{GY1} Geng, J., You, J.: A KAM theorem for one dimensional Schr\"{o}dinger equation with periodic boundary conditions. J. Diff. Eqs. \textbf{209}, 1-56(2005) \bibitem{GY2} Geng, J., You, J.: A KAM theorem for Hamiltonian partial differential equations in higher dimensional spaces. Commun. Math. Phys. \textbf{262}, 343-372(2006) \bibitem{HLS} Hagedorn, G., Loss, M., Slawny, J.: Non stochasticity of time-dependent quadratic Hamiltonians and the spectra of canonical transformations. J. Phys. A \textbf{19}(4), 521-531(1986) \bibitem{H} Howland, J.: Floquet operators with singular spectrum, I. An. Inst. H. Poincar\'e \textbf{49}, 309-323(1989) \bibitem{KLiang} Kappeler, T., Liang, Z.: A KAM theorem for the defocusing NLS equation with periodic boundary conditions. J. Diff. Eqs. \textbf{252}, 4068-4113(2012) \bibitem{KP} Kuksin, S.B., P\"oschel, J.: Invariant cantor manifolds of quasi-periodic oscillations for a nonlinear Schr\"odinger equation. Ann. of Math. \textbf{143}, 149-179(1996) \bibitem{Ku0} Kuksin, S.B.: Nearly integrable infinite-dimensional Hamiltonian systems. Lecture Notes in Mathematics \textbf{1556}, Springer-Verlag, Berlin, 1993. \bibitem{Ku1} Kuksin, S.B.: Analysis of Hamiltonina PDEs. Oxford University Press, Oxford, 2000. \bibitem{Ku2} Kuksin, S.B.: A KAM theorem for equations of the Korteweg-de Vries type. Rev. Math-Math Phys. \textbf{10}(3), 1-64(1998) \bibitem{LZ} Liang, Z.: Quasi-periodic solutions for 1D Schr\"odinger equations with the nonlinearity $|u|^{2p}u$. J. Diff. Eqs. \textbf{244}, 2185-2225(2008) \bibitem{LY1} Liang, Z., You, J.: Quasi-periodic solutions for 1D Schr\"odinger equations with higher order nonlinearity. SIAM J.Math. Anal. \textbf{36}, 1965-1990(2005) \bibitem{LiuYuan} Liu, J., Yuan, X.: A KAM theorem for Hamiltonian partial differential equations with unbounded perturbations. Comm. Math. Phys. \textbf{307}(3), 629-673(2011). \bibitem{LiuYuan0} Liu, J., Yuan, X.: Spectrum for quantum Duffing oscillator and small-divisor equation with large-variable coefficient. Comm. Pure Appl. Math. \textbf{63}(9), 1145-1172(2010) \bibitem{Moser1} Moser, J.: A rapidly convergent iteration method and non-linear partial differential equations I and II. Ann Scuola Norm, Sup. Pisa \textbf{20}, 265-315, 499-535(1966) \bibitem{Moser2} Moser, J.: On the construction of almost periodic solutions for ordinary differential equations. Proc. Intl. Conf. Funct. Anal. and Rel. Top. Tokyo, 60-67(1969) \bibitem{JOY} Joye, A.: Absence of absolutely continuous spectrum of Floquet operators. J. Statist. Phys. \textbf{75}(5-6), 929-952(1994) \bibitem{N} Nenciu, G.: Floquet operators without absolutely continuous spectrum. Ann. Inst. H. Poincar\'{e} \textbf{59}, 91-97(1993) \bibitem{Pos2} P\"oschel, J.: Invariante tori in differenzierbaren Hamiltonschen systemen. Bonner Mathematische Schriften \textbf{120} (1980) \bibitem{P2} P\"{o}schel, J.: A KAM theorem for some nonlinear partial differential equations. Ann. Sc. Norm. sup. Pisa CI. sci. \textbf{23}, 119-148(1996) \bibitem{PX} Procesi, M., Xu, X.: Quasi-t\"oplitz functions in KAM theorem. SIAM J. Math. Anal. \textbf{45}, 2148-2181(2013) \bibitem{Sal04} Salamon, D.A.: The Kolmogorov-Arnold-Moser theorem. Math. Phys. Electron. J., \textbf{10}, 3-37(electronic) (2004) \bibitem{SaZe} Salamon, D.A., Zehnder, E.: KAM theory in configuration space. Comment. Math. Helv. \textbf{64}, 84-132 (1989) \bibitem{Wang} Wang, W.: Pure point spectrum of the Floquet Hamiltonian for the quantum harmonic oscillator under time quasi-periodic perturbations. Commun. Math. Phys. \textbf{277}(2), 459-496(2008) \bibitem{WLiang} Wang, Z., Liang, Z.: Reducibility of 1d quantum harmonic oscillator perturbed by a quasiperiodic potential with logarithmic decay. Nonlinearity \textbf{30}, 1405-1448(2017) \bibitem{YZ13} Yuan, X., Zhang K.: A reduction theorem for time dependent Schr\"odinger operator with finite differentiable unbounded perturbation. J. Math. Phys. \textbf{54}(5), 465-480(2013) \bibitem{ZGY} Zhang, J., Gao, M., Yuan, X.: KAM tori for reversible partial differential equations. Nonlinearity \textbf{24}, 1189-1228(2011) \epsilonnd{thebibliography} \epsilonnd{document}
\begin{document} \title{When will we have a quantum computer?} \author{M.I. Dyakonov} \affiliation{Laboratoire Charles Coulomb, Universit\'e Montpellier, CNRS, France} \maketitle {\bf 1. Introduction, historical background} The idea of quantum computing was first put forward in a rather vague form by the Russian mathematician Yuri Manin in 1980. In 1981, it was independently proposed by Richard Feynman. Realizing that (because of the exponential increase of the number of quantum states) computer simulations of quantum systems become impossible when the system is large enough, he advanced the idea that to make them efficient the computer itself should operate in the quantum mode: ``Nature isn\textsc{\char13}t classical and if you want to make a simulation of Nature, you\textsc{\char13}d better make it quantum mechanical, and by golly it\textsc{\char13}s a wonderful problem, because it doesn\textsc{\char13}t look so easy''. In 1985, David Deutsch formally described the universal quantum computer, as a quantum analogue of the universal Turing machine. The subject did not attract much attention until Peter Shor in 1994 proposed an algorithm that could factor very large numbers on an {\it ideal} quantum computer much faster compared to the conventional (classical) computer. This outstanding theoretical result has triggered an explosion of general interest in quantum computing and many thousands of research papers, mostly theoretical, have been and still continue to be published at an increasing rate. During the last 20 years one can hardly find an issue of any science digest magazine, or even of a serious physical journal, that does not address quantum computing. Quantum Information Centers are opening all over the globe, funds are generously distributed, and breathtaking perspectives are presented to the layman by enthusiastic scientists and journalists. Many researchers feel obliged to justify whatever research they are doing by claiming that it has some relevance to quantum computing. Computer scientists are proving and publishing new theorems related to quantum computers at a rate of {\it one article per day}. A huge number of proposals has been published for various physical objects that could serve as quantum bits, or qubits. As of September 25, 2018, Google gives 71,400,000 results for ``quantum computing'', and 331,000 results for ``quantum computing with'', and these numbers increase every day. The impression has been created that quantum computing - this modern version of the Holy Grail - is going to be the next technological revolution of the 21st century. When will we have useful quantum computers? The most optimistic experts say: ``in 10 years''; others predict 20 to 30 years (note that those expectations have remained unchanged during the last 20 years), and the most cautious ones say: ``not in my lifetime''. The present author belongs to the meager minority that has been answering ``not in any foreseeable future''\cite{Dyakonov}, and this point of view is explained below. {\bf The idea of quantum computing} is to store and process information in a way that is very different from that used in conventional computers, which basically operate with an assembly of on/off switches, physically realized as tiny transistors. At a given moment the state of the {\it classical} computer is described by a sequence ($\uparrow \downarrow \uparrow \uparrow \downarrow \uparrow \downarrow \downarrow$...), where $\uparrow$ and $\downarrow$ represent {\it bits} of information realized as the {\it on} and {\it off} states of individual transistors. With {\it N} transistors, there are $2^N$ different possible states of the computer. The computation process consists in a sequence of switching some transistors between their $\uparrow$ and $\downarrow$ states according to a prescribed program. In {\it quantum} computing one replaces the classical two-state element by a quantum element with two {\it basic} states, known as the quantum bit, or {\it qubit}. The simplest object of this kind is the electron internal angular momentum, spin, with the peculiar quantum property of having only two possible projections on {\it any} axis: +1/2 or $-$1/2 (in units of the Planck constant $\hbar$). For some chosen axis, we can again denote the two basic quantum states of the spin as $\uparrow$ and $\downarrow$. However, an {\it arbitrary} spin state is described by the wave function $\psi = a$$\uparrow$+ b$\downarrow$, where {\it a} and {\it b} are complex numbers, satisfying the normalization condition $|a|^2 + |b|^2 = 1$, so that $|a|^2$ and $|b|^2$ are the {\it probabilities} for the spin to be in the basic states $\uparrow$ and $\downarrow$ respectively. Unlike the classical bit, that can be only in {\it one} of the two states, $\uparrow$ or $\downarrow$, the qubit can be in a {\it continuum} of states defined by the quantum amplitudes {\it a} and {\it b}. Thus, in contrast to the classical bit, {\it\bf the qubit is a continuous object}. This property is often described by the rather mystical and frightening statement that the qubit can exist {\it simultaneously} in both of its $\uparrow$ and $\downarrow$ states. (This is like saying that a vector in the {\it x-y} plane directed at 45 degrees to the {\it x}-axis simultaneously points both in the {\it x-} and {\it y-}directions -- a statement that is true in some sense, but does not have much useful content.) Note that since {\it a} and {\it b} are complex numbers satisfying the normalization condition, and since the overall phase of the wave function is irrelevant, there remain two free parameters defining the state of a single qubit ({\bf exactly like for a classical vector} whose orientation in space is defined by two polar angles). This analogy does not apply any longer when the number of qubits is 2 or more. With two qubits, one has $2^2$ basic states: $\uparrow\uparrow, \uparrow\downarrow, \downarrow\uparrow$, and $\downarrow\downarrow$. Accordingly, they are described by the wave function $\psi=a\uparrow\uparrow+b\uparrow\downarrow+c\downarrow\uparrow+d\downarrow\downarrow$ with 4 complex amplitudes {\it a, b, c}, and {\it d}. In the general case of {\it N} qubits, the state of the system is described by $2^N$ complex amplitudes restricted by the normalization condition only. {\bf While the state of the classical computer with $N$ bits at any given moment coincides with {\it one} of its $2^N$ possible discreet states, the state of a quantum computer with $N$ qubits is described by the values of $2^N$ {\it continuous} variables, the quantum amplitudes}. This is at the origin of the supposed power of the quantum computer, but it is also the reason for it\textsc{\char13}s great fragility and vulnerability. The information processing is supposed to be done by applying unitary transformations (quantum gates), that change these amplitudes {\it a, b, c...} in a precise and controlled manner. The number of qubits needed to have a useful machine (i.e. one that can compete with your laptop in solving certain problems, such as factoring very large numbers by Shor\textsc{\char13}s algorithm) is estimated to be $10^3-10^5$ . As a result, the number of continuous variables describing the state of such a quantum computer at any given moment is at least $2^{1000} \sim 10^{300}$) which is much, much greater than the number of particles in the whole Universe (only $\sim 10^{80}$)! At this point a normal engineer, or an experimentalist, loses interest. Indeed, possible errors in a classical computer consist in the fact that one or more transistors are switched off instead of being switched on, or vice versa. This certainly is an unwanted occurrence, but can be dealt with by relatively simple methods employing {\it redundance}. In contrast, accomplishing the Sisyphean task of keeping under control $10^{300}$ continuous variables is absolutely unimaginable. However, the QC theorists have succeeded in transmitting to the media and to the general public the belief that the feasibility of large-scale quantum computing has been {\it proven} via the famous threshold theorem: once the error per qubit per gate is below a certain value, indefinitely long quantum computation becomes feasible, at a cost of substantially increasing the number of qubits (the logical qubit is encoded by several physical qubits). Very luckily, the number of qubits increases {\it only polynomially} with the size of computation, so that the total number of qubits needed must increase from $N = 10^3$ to $N = 10^6-10^9$ only (with a corresponding increase of the atrocious number of $2^N$ continuous parameters defining the state of the whole machine!) \cite{Lidar}. In this context, Leonid Levin, professor of mathematics at Boston University, has made the following pertinent remark: {\it What thought experiments can probe the QC to be in the state described with the accuracy needed? I would allow to use the resources of the entire Universe, but not more!} {\bf 2. Expert panels: 2018 {\it vs} 2002} Seventeen years ago, in 2002, at the request of the Advanced Research and Development Activity (ARDA) agency of the United States government, a team of distinguished experts in quantum information established a roadmap \cite{arda} for quantum computing, with the following five- and ten-year goals: \\- encode a single qubit into the state of a logical qubit formed from several physical qubits; \\- perform repetitive error correction of the logical qubit; and \\- transfer the state of the logical qubit into the state of another set of physical qubits with high fidelity; \\- and by the year 2012, to implement a concatenated \cite{concat} error-correcting code. The 2007 goal requires ``something on the order of ten physical qubits and multiple logic operations between them'', while the 2012 goal ``requires on the order of 50 physical qubits, exercises multiple logical qubits through the full range of operations required for fault-tolerant QC in order to perform a simple instance of a relevant quantum algorithm''. While a benevolent jury could consider the first two of the 2007 goals to be partly achieved by now, the expectations for the third 2007 goal, and especially for the 2012 goal, are {\bf wildly off the mark}. So are some other predictions of the ARDA panel: ``As larger-scale quantum computers are developed over the next five and ten years, quantum simulation is likely to continue to be the application for which quantum computers can give substantial improvements over classical computation''. Very recently, in late 2018, another expert panel assembled by the U.S. National Academies of Science, Engineering and Medicine issued a detailed 205-page report discussing some of the challenges facing QC as a technology of practical value \cite{National}. The authors of the report state that {\it no quantum computer} will be capable of breaking cryptographic codes based prime number factoring within the next decade, and do not provide any opinion on whether or not this will be possible in a more distant future. {\bf Experimental studies} related to the idea of quantum computing make only a tiny part of the huge QC literature. They represent the {\it nec plus ultra} of the modern experimental technique, they are extremely difficult and inspire respect and admiration. The goal of such proof-of-principle experiments is to show the possibility to realize the basic quantum operations, as well as to demonstrate some elements of quantum algorithms. The number of qubits used is below 10, usually from 3 to 5. Apparently, going from 5 qubits to 50 (the goal set by the ARDA Experts Panel roadmap for the year 2012!) presents hardly surmountable experimental difficulties and the reasons for this should be understood. Most probably, they are related to the simple fact that $2^5$ = 32, while $2^{50}$ = 1,125,899,906,842,624. By contrast, the {\bf theory} of quantum computing, which largely dominates the literature, does not appear to encounter any substantial difficulties in dealing with millions of qubits. Various noise models are being considered, and it has been proved (under certain assumptions) that errors generated by ``local'' noise can be corrected by carefully designed and very ingenious methods, involving, among other tricks, massive parallelism: many thousands of gates should be applied simultaneously to different pairs of qubits and many thousands of measurements should be done simultaneously too. The ARDA Experts Panel also claimed: ``It has been established, under certain assumptions, that if a threshold precision per gate operation could be achieved, quantum error correction would allow a quantum computer to compute indefinitely''. Here, the key words are ``under certain assumptions'', however the distinguished experts did not address the crucial point of whether these assumptions can be realized in the physical world. I argue that they can\textsc{\char13}t. In the physical world, continuous quantities (be they voltages or the parameters defining quantum-mechanical wave functions) can neither be measured nor manipulated exactly. To a mathematician, this might sound absurd, but this is the unquestionable reality of the world we live in. Sure, discrete quantities, like the number of students in a classroom or the number of transistors in the ``on'' state, {\it can} be known exactly. And {\it this} makes the great difference between a classical computer and the hypothetical quantum computer. Indeed, all of the assumptions that theorists make about the preparation of qubits into a given state, the operation of the quantum gates, the reliability of the measurements, and so forth, cannot be fulfilled exactly. They can only be approached with some limited precision. So, the question is: What precision is required? With what exactitude must, say, the $\sqrt{2}$ (an irrational number that enters into many of the relevant quantum operations) be experimentally realized? Can it be approximated as 1.41 or as 1.41421356237? There are no clear answers to these and many similar crucial questions. An {\bf extremely important issue} is related to the energies of the $\uparrow$ and $\downarrow$ states. While the notion of {\it energy} is of primordial importance in all domains of physics, both classical and quantum, quite amazingly, it is not in the vocabulary of QC theorists. They implicitly assume that the energies of all $2^N$ states of an ensemble of qubits are {\it exactly equal}. Otherwise, the existence of an energy difference $\Delta E$ leads to oscillations of the quantum amplitudes with a frequency $\Omega = \Delta E/\hbar$, where $\hbar$ is the Planck constant, and this is a basic fact of quantum mechanics. (For example, one of the popular candidates for a qubit, the electron spin, will make a precession around the direction of the Earth\textsc{\char13}s magnetic field with a frequency of $\sim$ 1 MHz. Should the Earth\textsc{\char13}s magnetic field be shielded, and if yes, with what precision?) Whatever is the nature of qubits, some energy differences will necessarily exist because of stray fields, various interactions, etc. resulting in a chaotic dynamics of the whole system, which will completely disorganize the performance of the quantum machine. I am not aware of any studies of this very general problem. The problem of the accuracy required arises already at the first step, the preparation of the initial state of the quantum computer, which should be ($\uparrow \uparrow \uparrow ...$), or in conventional notation $|00000... >$, e.g. we start with all spins aligned in the {\it z}-direction, which will be the first task for the ``future quantum engineer''. However, where is the {\it z}-direction? Certainly, it can be defined arbitrarily, but only within a certain precision (like any continuous parameter). Aligning spins along this direction can also be done only approximately. So, instead of the desired $|00000... >$ state, inevitably we will have an admixture of all other states, hopefully with small amplitudes. The same question (again, without any answers) concerns quantum gates, that is our manipulations with the qubits required to perform a meaningful quantum calculation. For example, the theorist proposes us to flip the qubit, i.e. perform the operation $|0 > \rightarrow |1 >$ . Obviously, this again cannot be done exactly (especially, since the initial state $|0 >$ cannot be exact either), but the needed precision has not been established so far. {\bf 3. Quantum annealing} A completely different approach, initially started by the D-Wave company and now followed and developed by IBM, Google, Microsoft, and others, is based on using as qubits superconducting Josephson junctions at ultra-low dilution fridge temperatures. Depending on some parameters of the system, Josephson junctions can operate either as classical two-state bits (and classical computers using Josephson logic have been demonstrated), or as quantum bits. This is not going to be the quantum computer everyone was talking about for the past 20 years, it will not be able to factor large numbers by Shor\textsc{\char13}s algorithm or to efficiently search databases by Grover\textsc{\char13}s quantum algorithm. Rather, it is supposed to perform ``quantum annealing''. After initial preparation, any system, whether classical or quantum, at low temperature will relax to its ground state. Calculating the ground state of more or less complex quantum systems, either analytically or numerically, is usually impossible and this is what originally inspired Feynman's vague idea of quantum computing. Hence comes the idea of {\it simulating} a system of interacting qubits by an equivalent system of superconducting quantum circuits based on Josephson junctions. One does not do any quantum calculations by applying quantum gates, and quantum error correction is not needed either. One has just to measure the state of the system after annealing, more precisely, one can measure {\it some} of its $2^N$ parameters. Such an approach is perfectly reasonable. However, Google claims that the 72-qubit superconducting chip in a 10-millikelvin dilution refrigerator (note that such a system is described by $2^{72} \sim 10^{21}$ quantum amplitudes) will prove that quantum computers can beat classical machines, and thus demonstrate ``quantum supremacy''. This claim appears to be somewhat exaggerated. The chip in question is not going to be a {\it quantum computer}, it will be only a specific quantum system (which might be quite interesting on its own) defined by the way the Josephson junctions are interconnected. It is not entirely clear what will be the possible practical use of such systems. However, such modelling might provide some additional knowledge on the behavior of large and complicated quantum systems. Recently, a remarkable simulation of the Kosterlitz-Thouless phase transition was demonstrated in a network of Josephson superconducting rings arranged in a frustrated lattice \cite{King}. {\bf 4. Conclusion} The hypothetical quantum computer is a system with an unimaginable number of continuous degrees of freedom - the values of the $2^N$ quantum amplitudes with $N\sim 10^3 - 10^5$ . These values {\it cannot be arbitrary}, they should be under our control with a high precision (which has yet to be defined). In riding a bike, after some training, we learn to successfully control 3 degrees of freedom: the velocity, the direction, and the angle that our body makes with respect to the pavement. A circus artist manages to ride a one-wheel bike with 4 degrees of freedom. Now, imagine a bike having 1000 (if not $2^{1000}$!) joints that allow free rotations of their parts with respect to each other. Will anybody be capable of riding this machine? Thus, the answer to the question in title is: As soon as physicists and engineers learn to control this number of degrees of freedom, which means - never! {\it About the author}: Mikhail Dyakonov received the PhD (1966) in theoretical physics from Ioffe Physico-Technical Institute in Saint Petersburg (Leningrad), USSR. He worked at Ioffe Institute until 1998 when he became professor at the University of Montpellier, France. He was elected an Honorary Member of Ioffe Institute in 2014. His fields of interest include physics of semiconductors, spin physics, and physics of 2D electrons. He is recipient of the State prize of USSR, Beller lectureship award from the American Physical Society, and the Robin prize from the French Physical Society. His name is connected to several physical phenomena: Dyakonov-Perel spin relaxation mechanism, Dyakonov surface waves, Dyakonov-Shur instability. Together with V.I. Perel, he has predicted the Spin Hall Effect. \end{document}
\begin{document} \title{On the Pathwidth of \\Almost Semicomplete Digraphs} \author{Kenta Kitsunai\inst{1}\and Yasuaki Kobayashi\inst{2} \and Hisao Tamaki\inst{3}} \authorrunning{K.~Kitsunai, Y~Kobayashi, and H.~Tamaki} \institute{NTT DATA Corporation\\\email{[email protected]}\and Computer Center, Gakushuin University\\ \email{[email protected]} \and Department of Computer Science, Meiji University\\ \email{[email protected]}} \maketitle \begin{abstract} We call a digraph {\em $h$-semicomplete} if each vertex of the digraph has at most $h$ non-neighbors, where a non-neighbor of a vertex $v$ is a vertex $u \neq v$ such that there is no edge between $u$ and $v$ in either direction. This notion generalizes that of semicomplete digraphs which are $0$-semicomplete and tournaments which are semicomplete and have no anti-parallel pairs of edges. Our results in this paper are as follows. (1) We give an algorithm which, given an $h$-semicomplete digraph $G$ on $n$ vertices and a positive integer $k$, in $(h + 2k + 1)^{2k} n^{O(1)}$ time either constructs a path-decomposition of $G$ of width at most $k$ or concludes correctly that the pathwidth of $G$ is larger than $k$. (2) We show that there is a function $f(k, h)$ such that every $h$-semicomplete digraph of pathwidth at least $f(k, h)$ has a semicomplete subgraph of pathwidth at least $k$. One consequence of these results is that the problem of deciding if a fixed digraph $H$ is topologically contained in a given $h$-semicomplete digraph $G$ admits a polynomial-time algorithm for fixed $h$. \end{abstract} \section{Introduction} A {\em tournament} is a digraph obtained from a complete graph by orienting each edge. A {\em semicomplete digraph} generalizes a tournament, allowing each pair of distinct vertices to optionally have two edges in both directions between them. Tournaments and semicomplete digraphs are well-studied (see \cite{B-JG08}, for example) and have recently been attracting renewed interests in the following context. There are many problems on undirected graphs that admit polynomial time algorithms but have digraph counterparts that are NP-complete. For example, Robertson and Seymour \cite{RS95}, in their Graph Minors project, proved that the $k$ disjoint paths problem (and the $k$ edge-disjoint paths problem) can be solved in polynomial for fixed $k$. On the other hand, digraph versions of these problems are NP-complete even for $k = 2$ due to Fortune, Hopcroft, and Wyllie \cite{FHW80}. Recently, Chudnovsky, Scot, and Seymour \cite{CSS15} showed that the $k$ directed disjoint paths problem can be solved in polynomial time for fixed $k$ if the digraph is restricted to be semicomplete. The edge-disjoint version of the problem is also polynomial time solvable on semicomplete digraphs, due to Fradkin and Seymour \cite{FS15}. The situation is similar for the topological containment problem, which asks if a given graph (digraph) contains a subgraph isomorphic to a subdivision of a fixed graph (digraph) $H$: the undirected version is polynomial time solvable due to the disjoint paths result and the directed version is NP-complete on general digraphs \cite{FHW80}, while the question on semicomplete digraphs is polynomial time solvable due to Fradkin and Seymour \cite{FS13} and moreover is fixed-parameter tractable due to Fomin and Pilipczuk \cite{FP13,Pilipczuk12}. In addition to these algorithmic results, some well-quasi-order results that are similar to the celebrated Graph Minors theorem of Robertson and Seymour \cite{RS04} have been proved on the class of semicomplete digraphs \cite{CS11,KS15}. These developments seem to suggest that the class of semicomplete digraphs is a promising stage for pursuing digraph analogues of the splendid outcomes, direct and indirect, from the Graph Minors project. Given this progress on semicomplete digraphs, it is natural to look for more general classes of digraphs on which similar results hold. Indeed, the results on disjoint paths problems cited above are proved for some generalizations of semicomplete digraphs. The vertex-disjoint path algorithm given in \cite{CSS15} works for a digraph class called $d$-path dominant digraphs, which contains semicomplete digraphs ($d = 1$) and digraphs with multipartite underlying graphs ($d = 2$). The edge-disjoint path algorithm given in \cite{FS15} works for digraphs with independence number (of the underlying graph) bounded by some fixed integer. On the other hand, the results for topological containment in \cite{FS13,FP13,Pilipczuk12} are strictly for the class of semicomplete graphs. The {\em pathwidth} of digraphs, which plays an essential role in some of the above results, is defined as follows. Let $G$ be a digraph. A {\em path-decomposition} of $G$ is a sequence $(X_1, \ldots, X_m)$ of vertex sets $X_i \subseteq V(G)$, called {\em bags}, such that the following three conditions are satisfied: \begin{enumerate} \item $\bigcup_{1 \leq i \leq m} X_i = V(G)$, \item for each edge $(u, v)$ of $G$, $u \in X_i$ and $v \in X_j$ for some $i \geq j$, and \item for every $v \in V(G)$, the set $\{i \mid v \in X_i\}$ of indices of the bags containing $v$ forms a single integer interval. \end{enumerate} The first and the third conditions are the same as in the definition of the pathwidth of undirected graphs; the second condition, on each edge, is different and depends on the direction of the edge. Note that some authors, including the present authors in previous work in different contexts, reverse the direction of edges in this condition. We follow the convention of the papers cited above. As in the case of undirected graphs, the {\em width} of a path-decomposition $(X_1, \ldots, X_m)$ is $\max_{1 \leq i \leq m}|X_i| - 1$ and the {\em pathwidth} of $G$, denoted by ${\rm pw}(G)$, is the smallest integer $k$ such that there is a path-decomposition of $G$ of width $k$. Unlike for the pathwidth of undirected graphs, which is linear-time fixed-parameter tractable \cite{Bod96}, no FPT-time algorithm is known for computing the pathwidth of general digraphs: only XP-time algorithms (of running time $n^{O(k)}$) are known. The third author of the current paper proposed one in \cite{Tam11}, which was unfortunately flawed and has recently been corrected in \cite{KKKTT15} by the current and two more authors. Another XP algorithm is due to Nagamochi~\cite{Naga12}, which is formulated for a more general problem of optimizing linear layouts in submodular systems. In this paper, we consider another direction of generalizing semicomplete digraphs and study the pathwidth of digraphs in the generalized class. For non-negative integer $h$, we say that a simple digraph $G$ is $h$-semicomplete if each vertex of $G$ has at most $h$ non-neighbors, where a non-neighbor of vertex $v$ is a vertex $u$ distinct from $v$ such that there is no edge of $G$ between $u$ and $v$ in either direction. Thus, semicomplete digraphs are 0-semicomplete. Our main results are as follows. \begin{theorem} \label{thm:alg} There is an algorithm which, given an $h$-semicomplete digraph $G$ on $n$ vertices and a positive integer $k$, in $(h + 2k + 1)^{2k} n^{O(1)}$ time either constructs a path-decomposition of $G$ of width at most $k$ or concludes correctly that the pathwidth is larger than $k$. \end{theorem} This theorem generalizes the $k^{O(k)} n^2$ time result of Pilipczuk \cite{Pilipczuk12} on semicomplete digraphs. Compared on semicomplete digraphs, his algorithm has smaller dependence on $n$ (our $O(1)$ exponent on $n$ is naively 4), while the hidden constant in the exponent on $k$ can be large. \begin{theorem} \label{thm:comb} There is a function $f(h, k)$ on positive integers $h$ and $k$ such that each $h$-semicomplete digraph with pathwidth at least $f(h, k)$ has a semicomplete subgraph of pathwidth at least $k$. \end{theorem} The topological containment result in \cite{FS13} is based on two components. One is a combinatorial result that, for each fixed digraph $H$, there is a positive integer $k$ such that every semicomplete digraph $G$ of pathwidth larger than $k$ topologically contains $H$. The second component is a dynamic programming algorithm that, given a digraph $G$ on $n$ vertices together with a path-decomposition of width $k$ and a digraph $H$ on $r$ vertices with $s$ edges, decides if $G$ topologically contains $H$ in $O(n^{3(k + rs) + 4})$ time. Note that this algorithm does not require $G$ to be semicomplete. Theorem~\ref{thm:comb} enables us to generalize the first component to $h$-semicomplete digraphs and Theorem~\ref{thm:alg} gives us the path-decomposition to be used in the dynamic programming. Thus, we have the following theorem. \begin{theorem} For fixed positive integer $h$ and fixed digraph $H$, the problem of deciding if a given $h$-semicomplete digraph topologically contains $H$ can be solved in polynomial time. \end{theorem} We should remark that extending the FPT result of \cite{FP13,Pilipczuk12} in this direction using the approach of this paper appears difficult, as the FPT-time dynamic programming algorithm therein heavily relies on the strict semicompleteness of the input digraph. \subsubsection{Techniques} Our algorithm in Theorem~\ref{thm:alg} borrows the notion of separation chains from \cite{Pilipczuk12} but the algorithm itself is completely different from the one in \cite{Pilipczuk12}. The advantage of our algorithm is that it works correctly on general digraphs, in contrast to the one in \cite{Pilipczuk12} which is highly specialized for semicomplete digraphs. We need a property of $h$-semicomplete digraphs only in the analysis of the running time. Our algorithm is based on the one due to Nagamochi~\cite{Naga12} for more general problem of finding an optimal linear layout for submodular systems. Informally, his algorithm applied to the pathwidth computation works as follows. Fix digraph $G$ and let $d^+(U)$ for each $U \subseteq V(G)$ denote the number of out-neighbors of $U$. The {\em width} of permutation $\pi$ of $V(G)$ is defined to be the maximum of $d^+(V(\pi'))$ where $\pi'$ ranges over all the prefixes of $\pi$ and $V(\pi')$ denotes the set of vertices in $\pi'$. The smallest integer $k$ such that there is a permutation of width $k$ is called the {\em vertex separation number} of $G$ and is equal to the pathwidth of $G$ \cite{YC08}. Thus, our goal is to decide, given $k$, if there is a permutation of $V(G)$ of width at most $k$. Nagamochi's algorithm is a combination of divide-and-conquer and branching from both sides of the permutation. For disjoint subsets $S$ and $T$ of $V(G)$, call a permutation $\pi$ of $V(G$) an {\em $(S, T)$-permutation}, if it has a prefix $\pi'$ with $V(\pi') = S$ and a suffix $\pi''$ with $V(\pi'') = T$. A vertex set $X$ that minimize $d^+(X)$ subject to $S \subseteq X \subseteq V(G) \setminus T$ is called a minimum {\em $(S, T)$-separator}. A crucial observation, based on the submodularity of set function $d^+$ is the following. Let $X$ be a minimum $(S, T)$-separator. Then, if there is an $(S, T)$-permutation of width at most $k$ then there is such a permutation that is an $(S, V(G) \setminus X)$-permutation and an $(X, T)$-permutation at the same time. Thus if there is a minimum $(S, T)$-separator distinct from both $S$ and $V(G) \setminus T$, then we can divide the problem into two smaller subproblems. When there is no minimum $(S, T)$-separator other than $S$ or $V(G) \setminus T$, we need to branch on vertices to add to $S$ or $T$. For general digraphs, the running time is $n^{2k + O(1)}$: we need to branch on $O(n)$ vertices from both sides, and the depth of branching is bounded by $k$, as the value $d^+(X)$ of the minimum separator $X$ increases at least by one after we branch from both sides. For $h$-semicomplete digraphs, we observe that the number of vertices $v$ such that $d^+(S \cup \{v\}) \leq k$ is at most $h + 2k + 1$ (see Proposition~\ref{prop:kb-bounded}) and therefore, we need to branch on at most $h + 2k + 1$ vertices when extending from $S$. Unfortunately, we do not have a similar bound on the number of vertices to branch on from the side of $T$. For example, if $|T| < k$, then $d^+(V(G) \setminus (T \cup \{v\})) \leq k$ for every $v \not\in T$ and therefore we need to branch on every vertex not in $T \cup S \cup {N^+}(S)$, where ${N^+}(S)$ denotes the set of out-neighbors of $S$. This asymmetry comes from the asymmetry inherent in the vertex separation number characterization: the width of a permutation $\pi$ in $G$ is not equal in general to the width of a reversal of $\pi$ in $G^{-1}$, the digraph obtained from $G$ by reversing all of its edges. We use separation chains \cite{Pilipczuk12} to give a symmetric characterization of pathwidth and formulate a variant of Nagamochi's algorithm which branches from each side on at most $(h + 2k + 1)$ vertices. This is how we get the running time stated in Theorem~\ref{thm:alg}. We remark that a similar result on cutwidth is an immediate corollary of the Nagamochi's result, since we have the desired symmetry in the definition of cutwidth: the cutwidth of a permutation $\pi$ in $G$ equals the cutwidth of the reversal of $\pi$ in $G^{-1}$. The scenario for the combinatorial result in Theorem~\ref{thm:comb} is rather straightforward. Given an $h$-semicomplete graph $G$ of pathwidth at least $f(h, k)$, we complete it into a semicomplete graph $G'$ on $V(G)$, which must have pathwidth at least $f(h, k)$. We then find an obstacle $T \subseteq V(G)$ in $G'$ for small pathwidth, of one of the types defined in \cite{Pilipczuk12}. Then we consider a random semicomplete subgraph $G''$ of $G$ and show that $G''$ inherits an obstacle $T'$ from $T$ with high probability such that the existence of $T'$ in $G''$ implies ${\rm pw}(G'') \geq k$. We need to overcome, however, some difficulties in carrying out this scenario. To be more specific, consider one type of obstacles, namely {\em degree tangles} \cite{Pilipczuk12}. An $(l, k)$-degree tangle of $G$ is a vertex set $T$ with $|T| = l$ such that $\max_{v \in T} d^+(v) - \min_{v \in T} d^+(v) \leq k$. In order for a degree tangle $T$ in $G'$ to give rise to a degree-tangle $T'$ of the random subgraph $G''$, we need the out-degrees of vertices in $T'$ to ``shrink'' almost uniformly. To this end, we wish our sampling to be such that (1) each vertex $v \in V(G)$ is in $V(G'')$ with a fixed probability $p$ and (2) for each vertex set $S \subseteq V(G)$, the intersection $S \cap V(G'')$ has cardinality sharply concentrated around its expectation $p|S|$. The following theorem, which may be of independent interest, makes this possible: we apply this theorem to the complement of the underlying graph of $G$ with $d = h$. \begin{theorem} \label{thm:sample-indep} Let $G$ be an undirected graph on $n$ vertices with maximum degree $d$ or smaller. Let $p = \frac{1}{2d + 1}$. Then, it is possible to sample a set $I$ of independent vertices of $G$ so that ${\rm\bf Pr}(v \in I) = p$ for each $v \in V(G)$ and, for each $S \subseteq V(G)$, we have \begin{eqnarray*} {\rm\bf Pr}(|S \cap I| > p|S|+ t) < {\rm\bf E}p\left(-\frac{t^2}{9|S|}\right) \end{eqnarray*} and \begin{eqnarray*} {\rm\bf Pr}(|S \cap I| <p|S| - t) < {\rm\bf E}p\left(-\frac{t^2}{9|S|}\right). \end{eqnarray*} \end{theorem} Even with this sampling method, it is still not clear if we can have the desired ``uniform shrinking'' of out-degrees of the vertices in the degree tangle, since if the set $S$ of out-neighbors of a vertex has cardinality $\Omega(n)$, then the deviation of $|S \cap V(G'')|$ from its expectation $p|S|$ is necessarily $\Omega(\sqrt{n})$. To overcome this difficulty, we introduce several types of obstacles that are robust against random sampling and show that (1) if $G'$ has an obstacle of a type in \cite{Pilipczuk12} then it has a robust obstacle and (2) each robust obstacle in $G'$ indeed gives rise to a strong enough obstacle in $G(V'')$ with high probability. A conference version of this paper will appear as \cite{KKT15}. The rest of this paper is organized as follows. In Section~\ref{sec:prelim} we define some notation. In Section~\ref{sec:alg}, we describe our algorithm and prove Theorem~\ref{thm:alg}. In Section~\ref{sec:comb}, we prove Theorem~\ref{thm:comb}, assuming Theorem \ref{thm:sample-indep}. Finally in Section~\ref{sec:sample-indep}, we prove Theorem~ \ref{thm:sample-indep}. \section{Notation} \label{sec:prelim} Digraphs in this paper are simple: there are no self-loops and, between each pair of distinct vertices, there is at most one edge in each direction. For digraph $G$, $V(G)$ denotes the set of vertices of $G$ and $E(G) \subseteq V(G) \times V(G)$ the set of edges of $G$. If $(u, v) \in E(G)$, then $v$ is an {\em out-neighbor} of $u$ and $u$ is an {\em in-neighbor} of $v$. For each $v \in V(G)$, we denote the set of in-neighbors of $v$ by ${N^-}G(v) = \{u \mid (u, v) \in E(G)\}$ and write ${N^-}G[v]$ for ${N^-}G(v) \cup \{v\}$. For $U \subseteq V(G)$, we define ${N^-}G[U] = \bigcup_{v \in U} {N^-}G[v]$ and ${N^-}G(U) = {N^-}G[U] \setminus U$. We define the notation for out-neighbors ${N^+}$ similarly. In this paper, the {\em in-degree} and {\em out-degree} of vertex $v$ in $G$, denoted by ${d^-}G(v)$ and ${d^+}G(v)$, respectively, counts the in-neighbors and out-neighbors rather than the incoming and outgoing edges: ${d^-}G(v) = |{N^-}G(v)|$ and ${d^+}G(v) = |{N^+}G(v)|$; we also define ${d^-}G(U) = |{N^-}G(U)|$ and ${d^+}G(U) = |{N^+}G(U)|$ for $U \subseteq V(G)$. We omit the reference to $G$ from the above notation when it is clear from the context which digraph is meant. \section{Algorithm} \label{sec:alg} In this section, we describe the algorithm claimed in Theorem~\ref{thm:alg}, prove its correctness, and analyze its running time. As suggested in the introduction, our first task is to give a symmetric characterization of pathwidth to which the Nagamochi's algorithm is adaptable. Let $G$ be a digraph. A pair $(A,B)$ of vertex sets of $G$ is a {\em separation} of $G$ if $A \cup B = V$ and there is no edge from $A \setminus B$ to $B \setminus A$. The {\em order} of separation $(A, B)$ is $|A \cap B|$. For $S,T \subseteq V$ such that $S \cap T = \emptyset$, separation $(A, B)$ is an {\em $S$--$T$ separation} if $S \cap B = \emptyset$ and $T \cap A = \emptyset$. We call an $S$--$T$ separation $(A, B)$ {\em trivial} if $B = V(G) \setminus S$ or $A = V(G) \setminus T$. An important role in our algorithm is played by a {\em minimum $S$-$T$} separation, which is defined to be an $S$--$T$ separation of the smallest order. Note that if a minimum $S$-$T$ separation is trivial, then it must be either $({N^+}[S],\ V(G) \setminus S)$ or $(V(G) \setminus T,\ {N^-}[T])$. As will be seen later, we may use non-trivial minimum $S$-$T$ separations to divide-and-conquer subproblems in our pathwidth computation. A sequence of separations $((A_0,B_0),(A_1,B_1),\ldots,(A_r,B_r))$ is a {\em separation chain} if $A_0 \subseteq A_1 \subseteq \ldots \subseteq A_r$ and $B_r \subseteq B_{r-1} \subseteq \ldots \subseteq B_0$. The {\em order} of this separation chain is the maximum order of its member separations. We use operator $+$ for concatenating sequences of separations and for appending a separation to a sequence of separations: for sequences $C$ and $C'$ of separations and a separation $(A, B)$, $C$ + $C'$ is the concatenation of $C$ and $C'$, $(A, B) + C$ is the sequence $C$ preceded by $(A, B)$, and $C + (A, B)$ is the sequence $C$ followed by $(A, B)$. Let $C = ((A_0,B_0), (A_1, B_2),\ldots,(A_r,B_r))$ be a separation chain. We say that $C$ is {\em gapless} if, for every $0 < i \leq r$, either $|A_{i} \setminus A_{i - 1}| \leq 1$ or $|B_{i - 1} \setminus B_{i}| \leq 1$ holds. Note that this definition allows a repetition of an identical separation. We say that $C$ is an {\em $S$--$T$ chain}, if $B_0 = V(G) \setminus S$ and $A_r = V(G) \setminus T$, that is, both ends of $C$ are trivial $S$--$T$ separations. Note that every separation in an $S$--$T$ chain is an $S$--$T$ separation. As observed in \cite{Pilipczuk12}, \newline (1) if $(X_1,X_2,\ldots,X_r)$ is a path-decomposition of $G$ then $((A_0,B_0),(A_1,B_1),$ $\ldots,(A_r,B_r))$, where $A_i = \bigcup_{j \leq i}X_j$ and $B_i = \bigcup_{i<j}X_j$, is an $\emptyset$--$\emptyset$ chain in $G$, and \newline(2) if $((A_0,B_0),(A_1,B_1),\ldots,(A_r,B_r))$ is an $\emptyset$--$\emptyset$ chain in $G$, then $(W_1,W_2,\ldots,W_r)$, where $W_i = A_i \cap B_{i-1}$ for $1 \leq i \leq r$, is a path-decomposition of $G$. These observations lead to the following characterization of pathwidth by means of gapless separation chains. \begin{lemma}\label{lem:pathwidth_sc} Digraph $G$ has a path-decomposition of width $k$ if and only if it has a gapless $\emptyset$--$\emptyset$ chain of order $k$. \end{lemma} \begin{proof} Suppose $G$ has a path-decomposition $(X_1, X_2, \ldots, X_r)$ of width $k$. We may assume that this path-decomposition is nice: $X_1 = X_r = \emptyset$ and, for $1 \leq i < r$, either $X_{i + 1} = X_i \cup \{v\}$ for some $v \in V(G) \setminus X_i$ or $X_{i + 1} = X_i \setminus \{v\}$ for some $v \in X_i$. If we set $A_i = \bigcup_{j \leq i}X_j$ and $B_i = \bigcup_{j > i}X_j$ for $0 \leq i \leq r$ as in observation (1), then $((A_0, B_0), (A_2, B_2),\ldots, (A_r, B_r))$ is a gapless $\emptyset$--$\emptyset$ chain. The order of this separation chain is $\max_{0 \leq i \leq r} |A_i \cap B_i| = \max_{1 \leq i \leq r - 1}|X_i \cap X_{i+1}| = k$. Conversely, suppose a gapless separation chain $((A_0, B_0), (A_1, B_1),\ldots, (A_r, B_r))$ of order $k$ is given. We set $X_i = A_i \cap B_{i - 1}$ for $1 \leq i \leq r$. Then, $(X_1, X_2, \ldots, X_r)$ is a path-decomposition by observation (2). Since our separation chain is gapless, we have either $|A_i \setminus A_{i-1}| \leq 1$ or $|B_{i-1} \setminus B_i| \leq 1$ for $1 \leq i \leq r$. In the former case, we have $|A_i \cap B_{i-1}| \leq |A_{i-1} \cap B_{i-1}|+1 = k+1$ and, in the latter case, we have $|A_i \cap B_{i-1}| \leq |A_i \cap B_i|+1 = k+1$. Therefore, the width of path-decomposition $(X_1, X_2, \ldots, X_r)$ is at most $k$ and hence $G$ has a path-decomposition of width $k$. \qed \end{proof} We say that a pair $(S, T)$ of vertex sets of $G$ is {\em $k$-admissible} if ${N^+}[S] \cap T = \emptyset$ (and hence $S \cap {N^-}[T] = \emptyset$), ${d^+}(S) \leq k$, and ${d^-}(T) \leq k$. It is clear that $(S, T)$ must be $k$-admissible in order for $G$ to have a gapless $S$--$T$ chain of order at most $k$. Our algorithm solves the following problem with parameter $k$: given digraph $G$ and a $k$-admissible pair $(S, T)$, compute a gapless $S$--$T$ chain of order at most $k$ if one exists and otherwise report the non-existence. The algorithm in Theorem~\ref{thm:alg} applies this algorithm to $(S, T) = (\emptyset, \emptyset)$ and, if it returns an $\emptyset$--$\emptyset$ chain of order $k$, converts it to a path-decomposition of width at most $k$, using the proof of Lemma~\ref{lem:pathwidth_sc}. The following lemma provides the base case for our algorithm. \begin{lemma}\label{lem:base} If pair $(S,T)$ is $k$-admissible and satisfies $|V(G) \setminus (S \cup T)| \leq k + 1$ then $G$ has a gapless $S$--$T$ chain of order at most $k$. \end{lemma} \begin{proof} The proof is by induction on $|V(G) \setminus (S \cup T)|$. The base case is where $V(G) \setminus (S \cup T) = {N^+}(S) = {N^-}(T)$. The statement holds in this case, since the separation $({N^+}[S], {N^-}[T])$ alone forms a gapless $S$--$T$ chain. Since $(S, T)$ is $k$-admissible, the order of this separation chain is at most $k$. Therefore, the base case holds. Suppose that either $V(G) \setminus (S \cup T) \neq {N^+}(S)$ or $V(G) \setminus (S \cup T) \neq {N^-}(T)$. Consider the first case: we have some $v \not\in {N^+}[S] \cup T$. If we set $T' = T \cup \{v\}$, then as $v \not\in {N^+}(S)$, we have ${N^-}(T') \subseteq V(G) \setminus (S \cup T \cup \{v\})$ and hence we have $|{N^-}(T')| \leq k$. We also have ${N^+}[S] \cap T' = \emptyset$ since $v \not\in {N^+}(S)$. Therefore, $(S, T')$ is $k$-admissible. Moreover, we have $|V(G) \setminus (S \cup T')| < |V(G) \setminus (S \cup T)| \leq k + 1$. Therefore, we may apply the induction hypothesis to $(S, T')$ and have a gapless $S$--$T'$ chain $C'$ of order at most $k$. Let $(A, B)$ be the last separation of $C'$. Then, since $A = (V(G) \setminus T') \subseteq (V(G) \setminus T)$ and $B \supseteq {N^-}[T'] \supseteq {N^-}[T]$, $C = C' + (V(G) \setminus T, {N^-}[T])$ is an $S$--$T$ chain. Since $C'$ is gapless and $(V(G) \setminus T) \setminus A = \{v\}$, $C$ is also gapless. Moreover, since the order of $C'$ is at most $k$ and the order of $(V(G) \setminus T, {N^-}[T])$ is at most $|{N^-}(T)| \leq k$, the order of $C$ is at most $k$. The second case is similar and symmetric to the first case. \qed \end{proof} We have two types of recurrences: divide-and-conquer and branching. For the recurrence of first type, we need the following lemma. \begin{lemma}\label{lem:submodular} Suppose $(X,Y)$ is a minimum $S$--$T$ separation. Then, for each $S$--$T$ separation $(A,B)$, both $(A \cap X,\ B \cup Y)$ and $(A \cup X,\ B \cap Y)$ are $S$--$T$ separations and moreover neither of their orders exceed that of $(A, B)$. \end{lemma} \begin{proof} Let $A_1 = A \setminus B$, $A_2 = A \cap B$, $A_3 = B \setminus A$, $X_1 = X \setminus Y$, $X_2 = X \cap Y$, and $X_3 = Y \setminus X$. Then, both $(A_1, A_2, A_3)$ and $(X_1, X_2, X_3)$ partition of $V(G)$. We have \begin{eqnarray*} (A \cap X) \setminus (B \cup Y) & = & A_1 \cap X_1 \mbox{\ and}\\ (B \cup Y) \setminus (A \cap X) & = & A_3 \cup X_3 \end{eqnarray*} and, since there is no edge from $A_1$ to $A_3$ and no edge from $X_1$ to $X_3$, there is no edge from $(A \cap X) \setminus (B \cup Y)$ to $(B \cup Y) \setminus (A \cap X)$. Therefore, $(A \cap X,\ B \cup Y)$ is a separation and, similarly, $(A \cup X,\ B \cap Y)$ is a separation. Since $S \cap B = \emptyset$ and $S \cap Y = \emptyset$, we have $S \cap (B \cup Y) = \emptyset$ and similarly $(A \cap X) \cap T = \emptyset$. Therefore, $(A \cap X,\ B \cup Y)$ is an $S$--$T$ separation and, similarly, $(A \cup X,\ B \cap Y)$ is an $S$--$T$ separation. To prove the claim on the orders of these separations, we first claim that \begin{eqnarray} \label{eqn:ABXY} |A \cap B| + |X \cap Y| = |(A \cap X) \cap (B \cup Y)|+|(A \cup X) \cap (B \cap Y)|. \end{eqnarray} To see this, note that $A \cap B = A_2$ is partitioned into $A_2 \cap X_1$, $A_2 \cap X_2$, and $A_2 \cap X_3$; $X \cap Y = X_2$ is partitioned into $A_1 \cap X_2$, $A_2 \cap X_2$, and $A_3 \cap X_2$. On the other hand, $(A \cap X) \cap (B \cup Y)$ is partitioned into $A_1 \cap X_2$, $A_2 \cap X_2$, and $A_2 \cap X_1$; $(A \cup X) \cap (B \cap Y)$ is partitioned into $A_3 \cap X_2$, $A_2 \cap X_2$, and $A_2 \cap X_3$. Comparing these lists, we see that both sides of (\ref{eqn:ABXY}) count the same set of vertices with the same multiplicity. Since $(X, Y)$ is a minimum $S$--$T$ separation, we have $|X \cap Y| \leq |(A \cup X) \cap (B \cap Y)|$ and hence $|(A \cap X) \cap (B \cup Y)| \leq |A \cap B|$ by (\ref{eqn:ABXY}); similarly we have $|(A \cup X) \cap (B \cap Y)| \leq |A \cap B|$. \qed \end{proof} The following lemma, which corresponds to the main lemma in \cite{Naga12} underlying the algorithm for submodular systems, provides the divide-and-conquer type recurrence. \begin{lemma}\label{lem:divide} Suppose $G$ has a gapless $S$--$T$ chain of order $k$ and let $(X,Y)$ be a minimum $S$--$T$ separation of $G$. Then $G$ has a gapless $S$--$T$ chain of order at most $k$ of the form $C_1 + (X, Y) + C_2$, where $C_1$ is a gapless $S$--$(Y \setminus X)$ chain and $C_2$ is a gapless $(X \setminus Y)$--$T$ chain. \end{lemma} \begin{proof} Let $C = ((A_0,B_0), (A_1, B_1),\ldots,(A_r,B_r))$ be an arbitrary gapless $S$--$T$ chain of order at most $k$. Recall that $B_0 = V(G) \setminus S$ and $A_r = V(G) \setminus T$ by the definition of $S$--$T$ chains. Consider the sequence of separations $C_1$ consisting of $(A_i \cap X, B_i \cup Y)$ for $0 \leq i \leq r$. Since we have $A_{i - 1} \cap X \subseteq A_i \cap X$ and $B_i \cup Y \subseteq B_{i - 1} \cup Y$ for $0 < i \leq r$, $C_1$ is a separation chain. Since $X \cap T = \emptyset$ and $A_r = V(G) \setminus T$, we have $A_r \cap X = X$. Therefore, $C_1$ is an $S$--$(Y \setminus X)$ chain, since we have $V(G) \setminus (B_0 \cup Y) = S$ and $V(G) \setminus (A_r \cap X) = V(G) \setminus X = Y \setminus X$. Since $C$ is gapless, we have, for each $0 < i \leq r$, either $|A_i \setminus A_{i - 1}| \leq 1$ or $|B_{i - 1} \setminus B_i| \leq 1$. In the former case, we have $|(A_i \cap X) \setminus (A_{i - 1} \cap X)| \leq 1$ and, in the latter case, we have $|(B_{i - 1} \cup Y) \setminus (B_i \cup Y)| \leq 1$. Therefore, the separation chain $C_1$ is gapless. By Lemma~\ref{lem:submodular}, the order of $C_1$ is at most $k$. We similarly construct a gapless $(X \setminus Y)$--$T$ chain $C_2$ of order at most $k$. Since the last separation of $C_1$ is $(A_r \cap X, B_r \cup Y) = (X, B_r \cup Y)$ and the first separation of $C_2$ is $(A_0 \cup X, B_0 \cap Y) = (A_0 \cup X, Y)$, the concatenation $C_1 + (X, Y) + C_2$ is a separation chain and is moreover gapless. Since this separation chain is of order at most $k$ and is an $S$--$T$ chain, the lemma holds. \qed \end{proof} We need some preparations before formulating the branching type recurrence. We say that an $S$--$T$ separation chain $C=((A_0,B_0),(A_1,B_1),\ldots,(A_r,B_r))$ is {\em nice} if, for every $0 \leq i < r$, we have $|A_{i+1} \setminus A_i| \leq 1$ and $|B_i \setminus B_{i+1}|\leq 1$. We say $C$ is {\em tight} if $A_0 = {N^+}[S]$ and $B_r = {N^-}[T]$. \begin{lemma}\label{lem:nice_and_tight} If $G$ has a gapless $S$--$T$ chain of order at most $k$ then it has a tight, nice, and gapless $S$--$T$ chain of order at most $k$. \end{lemma} \begin{proof} To each $S$--$T$ chain $C = ((A_0,B_0),(A_1, B_1),\ldots,(A_r,B_r))$, we assign a non-negative integer $\delta(C)$ by \begin{eqnarray*} \delta(C) & = & |A_0 \setminus {N^+}[S]| + |B_0 \setminus {N^-}[T]| \\ && + \sum_{0 \leq i < r}(\max\{0,|A_{i+1} \setminus A_i|-1 \} + \max\{0,|B_i \setminus B_{i+1}|-1 \}). \end{eqnarray*} Choose a gapless $S$--$T$ chain $C = ((A_0,B_0),(A_1, B_1),\ldots,(A_r,B_r))$ to minimize $\delta(C)$ subject to being of order at most $k$. If $\delta(C)=0$ then $C$ is tight and nice and we are done. For contradiction, suppose $\delta(C) > 0$. We first consider the case where there is some vertex $v \in A_0 \setminus {N^+}[S]$. Let $C'$ be obtained from $C$ by adding separation $(A_0 \setminus \{v\},B_0)$ before $C$. Then, $C'$ is a gapless $S$--$T$ chain. The order of separation $(A_0 \setminus \{v\},B_0)$ is smaller than that of $(A_0,B_0)$ and hence the order of $C'$ is at most $k$. This contradicts the choice of $C$ since $\delta(C') = \delta(C)-1$. We have similarly a contradiction if there is some $v \in B_r \setminus {N^-}[T]$. Suppose finally that $|A_{i+1} \setminus A_i| \geq 2$ for some $0 \leq i < r$. Let $v$ and $v'$ be two distinct vertices in $A_{i+1} \setminus A_i$. Now, since $C$ is gapless, this assumption implies that $|B_i \setminus B_{i+1}| \leq 1$. As neither $v$ nor $v'$ is in $A_i$ and hence both are in $B_i$, it follows that $|A_{i + 1} \cap B_{i + 1}| \geq |A_i \cap B_i| + 1$. Since $|(A_i \cup \{v\}) \cap B_i| = |A_i \cap B_i| + 1$, the order of separation $(A_i \cup \{v\}, B_i)$ is no greater than that of $(A_{i+1},B_{i+1})$ and hence is at most $k$. Therefore the $S$--$T$ chain $C'$ that is obtained from $C$ by placing $(A_i \cup \{v \}, B_i)$ between $(A_i, B_i)$ and $(A_{i + 1}, B_{i + 1})$ is gapless and of order at most $k$. We have \begin{eqnarray*} \delta(C') & = & \delta(C) - \max\{0,|A_{i+1} \setminus A_i|-1\} - \max\{0,|B_i \setminus B_{i+1}|-1\}\\ & & + \max\{0,|\{v\}|-1\} + \max\{0,|B_i \setminus B_i|-1\}\\ & & + \max\{0,|A_{i+1} \setminus (A_i \cup \{v \})|-1\} + \max\{0,|B_i \setminus B_{i+1}|-1\}\\ & = & \delta(C) - \max\{0,|A_{i+1} \setminus A_i|-1]\} + \max\{0,|A_{i+1} \setminus (A_i \cup \{v \})|-1\}. \end{eqnarray*} Since $|A_{i+1} \setminus A_i| > |A_{i+1} \setminus (A_i \cup \{v \})|> 0$, it follows that $\delta(C') \leq \delta(C) - 1$, a contradiction. We similarly obtain a contradiction from the case $|B_i \setminus B_{i+1}| \geq 2$ as well. \qed \end{proof} The following lemma provides our branching type recurrence. \begin{lemma} \label{lem:sc-add} Suppose $G$ has a gapless $S$--$T$ chain of order at most $k$ and suppose that $|V(G) \setminus (S \cup T)| \geq k + 2$ holds. Then, there are a gapless $S$--$T$ chain $((A_0, B_0), \ldots, (A_r, B_r))$ of order at most $k$ and a pair of distinct vertices $u \in V(G) \setminus (S \cup {N^-}[T])$ and $v \in V(G) \setminus (T \cup {N^+}[S])$ such that the following holds: \begin{enumerate} \item $((A_1, B_1), \ldots, (A_r, B_r))$ is an $(S \cup \{u\})$--$T$ chain, \item $((A_0, B_0), \ldots, (A_{r-1}, B_{r-1}))$ is an $S$--$(T \cup \{v\})$ chain, and \item $((A_1, B_1), \ldots, (A_{r-1}, B_{r-1}))$ is an $(S \cup \{u\})$--$(T \cup \{v\})$ chain. \end{enumerate} \end{lemma} \begin{proof} Suppose $G$ has a gapless $S$--$T$ chain of order at most $k$. By Lemma~\ref{lem:nice_and_tight}, $G$ has a gapless $S$--$T$ chain $C=((A_0,B_0), (A_1, B_1),\ldots,(A_r,B_r))$ of order at most $k$ that is tight and nice. Since $C$ is tight, we have $B_r = {N^-}[T]$. We also have $B_0 = V(G) \setminus S$ from the definition of an $S$--$T$ chain. Therefore, $B_0 \setminus B_r = V(G) \setminus (S \cup {N^-}[T])$ and this set contains at least two vertices as we are assuming $|V(G) \setminus (S \cup T)| \geq k + 2$. Similarly $A_r \setminus A_0 = V(G) \setminus ({N^+}[S] \cup T)$ has at least two vertices. Let $i_1$ denote the smallest $i$ such that $0 < i \leq r$ and $|B_{i-1} \setminus B_i| = 1$ and $i_2$ the largest $i$ such that $0 \leq i < r$ and $|A_{i + 1} \setminus A_i| = 1$ Since $C$ is nice, the choice of $i_1$ and $i_2$ implies that $B_i = B_0$ for $0 \leq i < i_1$ and $A_i = A_r$ for $i_2 < i \leq r$. Let $u$ be the unique vertex in $B_{i_1 - 1} \setminus B_{i_1}$ and $v$ the unique vertex in $A_{i_2 + 1} \setminus A_{i_2}$. We must have $i_1 \leq i_2$, since otherwise $A_{i_1} \cap B_{i_1} = A_r \cap (B_0 \setminus \{u\}) = (V(G) \setminus T) \cap (V(G) \setminus (S \cup \{u\})) = V(G) \setminus (S \cup T \cup \{u\})$ and hence $|V(G) \setminus (S \cup T)| \leq |A_{i_1} \cap B_{i_1}| + 1 \leq k + 1$, contradicting our assumption. Since $u \not\in B_{i_1}$ and $v \not\in A_{i_1} \subseteq A_{i_2}$, we must have $u \neq v$. Let $C'$ be the separation chain $((A_{i_1}, B_{i_1}), \ldots, (A_{i_2}, B_{i_2}))$ Then, $(A_0, B_0) + C' + (A_r, B_r)$ is a $S$--$T$ chain since $B_0 = V(G) \setminus S$ and $A_r = V(G) \setminus T$, it is gapless since $|B_0 \setminus B_{i_1}| = 1$ and $|A_r \setminus A_{i_2}| = 1$, and it is clearly of degree at most $k$. Since $B_{i_1} = V(G) \setminus (S \cup \{u\})$ and $A_{i_2} = V(G) \setminus (T \cup \{v\})$, $(A_0, B_0) + C'$ is an $S$--$(T \cup \{v\})$ chain and $C' + (A_r, B_r)$ is an $(S \cup \{u\})$--$T$ chain. Therefore, the separation chain $(A_0, B_0) + C' + (A_r, B_r)$ qualifies as the $S$--$T$ chain claimed in the lemma. \qed \end{proof} Given these recurrences and the base case above, our algorithm is straightforward. Suppose we are given a $k$-admissible pair $(S, T)$. If $|V(G) \setminus (S \cup T)| \leq k + 1$ holds then we apply Lemma~\ref{lem:base} and return the gapless $S$--$T$ chain it provides. Suppose otherwise. We test if there is a minimum $S$--$T$ separation that is non-trivial: a minimum $S$--$T$ separation $(X, Y)$ that is not equal to either $({N^+}[S],\ V(G) \setminus S)$ or $(V(G) \setminus T,\ {N^-}[T])$. If we find one, we apply Lemma~\ref{lem:divide} and recurse on subproblems $(S,\ Y \setminus X)$ and $(X \setminus Y,\ T)$. If either of the recursive calls returns a negative answer, we return a negative answer. Otherwise, we concatenate the solutions from the subproblems as prescribed in Lemma~\ref{lem:divide} and return the result. Finally suppose that there is no minimum $S$--$T$ separation that is non-trivial. If $({N^+}[S], V(G) \setminus S)$ is the only minimum $S$--$T$ separation, then we recurse on $(S \cup \{v\},\ T)$ for every $v \in V(G) \setminus (S \cup T)$ such that $(S \cup \{v\},\ T)$ is $k$-admissible. If $(V(G) \setminus T, {N^-}[T])$ is the only minimum $S$--$T$-separation, then we similarly branch from $T$. If both $({N^+}[S],\ V(G) \setminus S)$ and $(V(G) \setminus T,\ {N^-}[T])$ are the minimum $S$--$T$ separations, then we branch from both sides. In either case, if any of the recursive call returns a gapless separation chain of order at most $k$, we trivially extend the chain into a gapless $S$--$T$ separation of order at most $k$ and return this chain. Otherwise, that is, if all the recursive calls return negative answers, we return a negative answer. The correctness of this algorithm is proved by a straightforward induction for which the above Lemmas provide the base case and the induction steps. We analyze the running time of the algorithm. The following observation extends the one in \cite{Pilipczuk12} that the number of vertices of out-degree at most $k$ in a semicomplete digraph is at most $2k + 1$. \begin{proposition} \label{prop:kb-bounded} Let $G$ be an $h$-semicomplete digraph and let $U \subseteq V(G)$. Then the number of vertices $v \in V(G) \setminus U$ such that ${d^+}(U \cup \{v\}) \leq k$ is at most $h + 2k + 1$ for every $k > 0$. The similar statement with the out-degree replaced by the in-degree also holds. \end{proposition} \begin{proof} Fix $U$, let $X \subset V(G) \setminus U$ be arbitrary, and set $|X| = b$. By the definition of $h$-semicomplete digraphs, $G[X]$ contains at least $b(b - h - 1)/2$ edges and hence the average out-degree of vertices in $G[X]$ is at least $(b - h - 1) / 2$. For each $v \in X$, ${N^+}G(U \cup \{v\})$ contains $N^+_{G[X]}(v)$ and hence if $b > h + 2k + 1$ then there is at least one $v \in X$ such that $|{N^+}G(U \cup \{v\})| > k$. This proves the first statement. The second statement is immediate by symmetry. \qed \end{proof} Thus, the number of vertices to branch on from each side in the above algorithm is bounded by $h + 2k + 1$. To measure the ``size" of the problem instance $(S, T)$, we introduce the following two functions. Let $\gamma(S, T)$ denote the order of the minimum $S$--$T$ separation. Let $\mu(S, T)$ be defined by \begin{eqnarray*} \mu(S,T) & = & 2|V(G) \setminus ({N^+}[S]\cup {N^-}[T])| + |{N^+}(S) \Delta {N^-}(T)|, \end{eqnarray*} where $X \Delta Y$ is the symmetric difference between $X$ and $Y$. \begin{lemma}\label{lem:rec-bound} Let $(X,Y)$ be a minimum $S$--$T$ separation. Then, we have \[ \mu(S,\ Y \setminus X) + \mu(X \setminus Y,\ T) = \mu(S,T). \] \end{lemma} \begin{proof} Since $(X,Y)$ is a minimum $S$--$T$ separation, we have ${N^+}(X \setminus Y) = {N^-}(Y \setminus X) = X \cap Y$ and hence ${N^+}[X \setminus Y] = X$ and ${N^-}[Y \setminus X] = Y$. We define pairwise disjoint vertex sets $C_0$, $C_1$, and $C_2$ by \begin{eqnarray*} C_0 & = & X \cap Y \setminus ({N^+}(S) \cup {N^-}(T))\\ C_1 & = & X \cap Y \cap ({N^+}(S) \setminus {N^-}(T))\\ C_2 & = & X \cap Y \cap ({N^-}(T) \setminus {N^+}(S)). \end{eqnarray*} Then, noting that $(X \cap Y)\setminus {N^-}(T) = C_0 \cup C_1$ and that ${N^-}(T) \setminus (X \cap Y) = {N^-}(T) \setminus X$ since ${N^-}(T) \cap (X \setminus Y) = \emptyset$, we have \begin{eqnarray*} \mu(X \setminus Y,\ T) & = & 2|V(G) \setminus ({N^+}[X \setminus Y] \cup {N^-}[T])| + | {N^+}(X \setminus Y) \Delta {N^-}(T)|\\ & = & 2|V(G) \setminus (X \cup {N^-}[T])| + |(X \cap Y) \Delta {N^-}(T)| \\ & = & 2|V(G) \setminus (X \cup {N^-}[T])| + |C_0| + |C_1| + |{N^-}(T) \setminus X|. \end{eqnarray*} Similarly, we have \begin{eqnarray*} \mu(S,\ Y \setminus T) & = & 2|V(G) \setminus ({N^+}[S] \cup Y)| + |{N^+}(S) \Delta (X \cap Y)| \\ & = & 2|V(G) \setminus ({N^+}[S] \cup Y)| + |C_0| + |C_2| + |{N^+}(S) \setminus Y|. \end{eqnarray*} Moreover, we have \begin{eqnarray*} |V(G) \setminus ({N^+}[S] \cup {N^-}[T])| & = & |V(G) \setminus (Y \cup {N^+}[S])| + |V(G) \setminus (X \cup {N^-}[T])|+ |C_0| \end{eqnarray*} and \begin{eqnarray*} |{N^+}(S) \Delta {N^-}(T)| & = & |C_1| + |C_2| + |{N^+}(S) \setminus Y| + |{N^-}(T) \setminus X|. \end{eqnarray*} Therefore, we have \begin{eqnarray*} \mu(S,T) &=& 2|V(G) \setminus ({N^+}[S] \cup {N^-}[T])|+ |{N^+}(S) \Delta {N^-}(T)| \\ & = & 2|V(G) \setminus (Y \cup {N^+}[S])| + 2|(V(G) \setminus (X \cup {N^-}[T])|\\ & & + 2|C_0| + |C_1| + |C_2| + |{N^+}(S) \setminus Y| + |{N^-}(T) \setminus X|\\ & = & \mu(S,\ Y \setminus X) + \mu(X \setminus Y,\ T) \end{eqnarray*} as claimed in the lemma. \qed \end{proof} \begin{lemma}\label{lem:mu-split} Let $(X, Y)$ be a non-trivial $S$--$T$ separation: $X \setminus Y \neq S$ and $Y \setminus X \neq T$. Then, we have $\mu(S,\ Y \setminus X) \ge 1$ and $\mu(X \setminus Y,\ T) \ge 1$. \end{lemma} \begin{proof} Due to the symmetry it suffices to prove the first inequality. From the assumption, there is some vertex $v \in (X \setminus Y) \setminus S$. Since ${N^-}[Y \setminus X] \subseteq Y$, we have $v \not\in {N^-}[Y \setminus X]$. If $v \in {N^+}(S)$ then $v \in {N^+}(S) \Delta {N^-}(Y \setminus X)$ and otherwise $v \in V(G) \setminus ({N^+}[S]\cup {N^-}[Y \setminus X])$. Therefore, in either case, we have \begin{eqnarray*} \mu(S,\ Y \setminus X) & = & 2|V(G) \setminus ({N^+}[S]\cup {N^-}[Y \setminus X])| \\ & & +|{N^+}(S) \Delta {N^-}(Y \setminus X)|\\ & \ge & 1. \end{eqnarray*} \qed \end{proof} Let $R(S,T)$ denote the number of problem instances recursively considered when we solve the instance $(S, T)$, not counting the instances in the base case, but counting the instance $(S, T)$ itself unless it is in the base case. Let $\mu'(S, T) = \max\{0, 2\mu(S, T) - 1\}$. \begin{lemma} \label{lem:num_rec} Let $G$ be an $h$-semicomplete digraph and $k$ a positive integer. Then, for each $k$-admissible pair $(S, T)$, we have \begin{eqnarray*} \label{eqn:R} R(S,T) \leq \mu'(S, T) \cdot (h + 2k + 1)^{2(k-\gamma(S,T))} \end{eqnarray*} \end{lemma} \begin{proof} The proof is by induction on the structure of recursive calls. If instance $(S, T)$ belongs to the base case $|V(G) \setminus (S \cup T)| \leq k + 1$, then $R(S, T) = 0$ by definition and inequality (\ref{eqn:R}) trivially holds. Note that if $\mu(S, T) = 0$ then $V(G) \setminus (S \cup T) = {N^-}(S) = {N^+}(T)$ and $(S, T)$ belongs to the base case. We next consider the case where, in processing the instance $(S, T)$, the ``divide-and-conquer" recurrence is applied and instances $(S,T')$ and $(S',T)$ are recursed on. We have a non-trivial minimum separation $(X, Y)$ of $(S, T)$ such that $S' = X \setminus Y$ and $T' = Y \setminus X$. By Lemma~\ref{lem:rec-bound}, we have $\mu(S, T) = \mu(S, T') + \mu(S', T)$. Moreover, by Lemma~\ref{lem:mu-split}, we have $\mu(S, T') \ge 1$ and $\mu(S', T) \ge 1$. Therefore, we have \begin{eqnarray*} \mu'(S, T) & = & 2\mu(S, T) - 1 \\ & = & (2\mu(S, T') - 1) + (2\mu(S', T) - 1) + 1\\ & = & \mu'(S, T') + \mu'(S', T) + 1. \end{eqnarray*} Moreover, we have $\gamma(S, T') \geq \gamma(S, T)$ since every $S$--$T'$ separation is a $S$--$T$ separation and similarly $\gamma(S', T) \geq \gamma(S, T)$. Applying the induction hypothesis to the instances $(S, T')$ and $(S', T)$, we have \begin{eqnarray*} R(S,T) & = & 1+R(S,T')+R(S',T) \\ & \leq & 1 + (\mu'(S, T') + \mu'(S', T)) \cdot b^{2(k-\gamma(S,T))}\\ & \leq & 1 + (\mu'(S, T) - 1) \cdot b^{2(k-\gamma(S,T))}\\ & \leq & \mu'(S, T) \cdot b^{2(k-\gamma(S,T))},\\ \end{eqnarray*} where $b = h + 2k + 1$, that is, inequality (\ref{eqn:R}). We next consider the case where the branching recurrence is applied. We have three cases to consider: (1) $({N^+}[S], V(G) \setminus S)$ and $(V(G) \setminus T, {N^-}[T])$ are the only minimum $S$--$T$ separators, (2) $({N^+}[S], V(G) \setminus S)$ is the only minimum $S$--$T$ separator, and (3) $(V(G) \setminus T, {N^-}[T])$ is the only minimum $S$--$T$ separator. First consider case (1). In this case, for each pair of vertices $u \in V \setminus (S \cup {N^-}[T])$ and $v \in V \setminus ({N^+}[S] \cup T)$ such that the pair $(S \cup \{u\}, T \cup \{v\})$ is $k$-admissible, the instance $(S \cup \{u\}, T \cup \{v\})$ is recursed on. By Proposition~\ref{prop:kb-bounded}, the number of such pair is at most $b^2 = (h + 2k + 1)^2$. For each pair of $u$ and $v$, we have by the induction hypothesis \begin{eqnarray*} R(S \cup \{u\}, T \cup \{v\}) \leq \mu'(S \cup \{u\}, T \cup \{v\}) \cdot b^{2(k-\gamma(S \cup \{u\}, S \cup \{v\}))}. \end{eqnarray*} Since no $(S \cup \{u\}$--$(T \cup \{v\})$ separation is a minimum $S$--$T$ separation from the assumption of this case, we have $\gamma(S \cup \{u\}, T \cup \{v\}) > \gamma(S, T)$. Moreover, since $\mu(S \cup \{u\}, T \cup \{v\}) < \mu(S, T)$ and $\mu(S, T) > 0$, we have $\mu'(S \cup \{u\}, T \cup \{v\}) < \mu'(S, T)$. Therefore, we have \begin{eqnarray*} R(S, T) &\leq & 1 + \sum_{u, v}R(S \cup \{u\}, T \cup \{v\})\\ &\leq & 1 + b^2 \cdot (\mu'(S, T) - 1) \cdot b^{2(k-\gamma(S,T) - 1)} \\ &\leq & \mu'(S, T)\cdot b^{2(k-\gamma(S,T))}, \\ \end{eqnarray*} that is, inequality (\ref{eqn:R}). Cases (2) and (3) are similar and somewhat simpler. \qed \end{proof} The time for processing each pair $(S, T)$ excluding the time consumed by subsequent recursive calls is dominated by the time for finding minimum $S$--$T$ separation and for deciding if there is a minimum $S$--$T$ separation that is not trivial. This can be done in $n^{O(1)}$ time by the repeated use of a standard augmenting path algorithm for a minimum $S$--$T$ cut. Since $\mu'(\emptyset, \emptyset) = O(n)$, we have the running time claimed in Theorem~\ref{thm:alg}. \section{Tame obstacles survive random sampling: proof of Theorem~\ref{thm:comb}} \label{sec:comb} We prove Theorem~\ref{thm:comb} in this section. Let $G$ be a semicomplete digraph with $n$ vertices. For $0 \leq d \leq n$, let $\Vinle{d}(G)$, $\Vinge{d}(G)$, $\Voutle{d}(G)$, and $\Voutge{d}(G)$ denote the set of vertices $v$ with ${d^-}G(v) \leq d$, ${d^-}G(v) \geq d$, ${d^+}G(v) \leq d$, and ${d^+}G(v) \geq d$, respectively. We omit the reference to $G$ and write $\Vinle{d}$ etc. when $G$ is clear from the context. \begin{proposition} For every $0 \leq d < n$, we have $\Voutle{d} \subseteq \Vinge{n - d - 1}$ and $\Vinle{d} \subseteq \Voutge{n - d - 1}$. \end{proposition} \begin{definition} \label{def:tangles} \cite{Pilipczuk12} Let $G$ be a semicomplete digraph and let $d \geq 0$, $l > 0$ and $k > 0$ be integers. A {\em $(d, l, k)$-degree tangle} of $G$ is a vertex set $T \subseteq \Voutge{d} \cap \Voutle{d + k}$ with $|T| = l$. An {\em $(d, l, k)$-matching tangle} of $G$ is a pair of vertex sets $(T_1, T_2)$ with $|T_1| =|T_2| = l$ such that: \begin{enumerate} \item $T_1 \subseteq \Voutle{d}$, $T_2 \subseteq \Voutge{d + k + 1}$, and \item there is some bijection $\phi: T_1 \rightarrow T_2$ such that $(v, \phi(v)) \in E(G)$ for every $v \in T_1$. \end{enumerate} We will often refer to a $(d, l, k)$-degree (-matching) tangle as an $(l, k)$-degree (-matching) tangle without specifying $d$. \end{definition} \begin{lemma} \label{lem:degree-interval} Let $G$ be a semicomplete digraph on $n$ vertices. Then, for each pair $d_1$ and $d_2$ of non-negative integers such that $d_1 + d_2 < n$, we have $|\Voutge{d_1} \cap \Vinge{d_2}| \leq n - (d_1 + d_2) + 2{\rm pw}(G)$. \end{lemma} \begin{proof} Fix an optimal nice path-decomposition $X_0, X_1, \ldots, X_{2n}$ of $G$, where $n = |V(G)|$. We say that vertex $v$ is {\em introduced at $i$} if $X_{i} \setminus X_{i - 1} = \{v\}$ and {\em forgotten at $i$} if $X_{i - 1} \setminus X_i = \{v\}$. Let $i_0$ denote the smallest index $i$ such that a vertex in $\Voutge{d_1} \cap \Vinge{d_2}$ is forgotten at $i + 1$; we let $v_0$ denote this forgotten vertex. Similarly, let $i_1$ be the largest index $i$ such that a vertex in $\Voutge{d_1} \cap \Vinge{d_2}$ is introduced at $i$; we let $v_1$ denote this vertex. If $i_0 \geq i_1$ then $\Voutge{d_1} \cap \Vinge{d_2} \subseteq X_{i_0}$ and hence $|\Voutge{d_1} \cap \Vinge{d_2}| \leq {\rm pw}(G) + 1$; we are done. So suppose that $i_0 < i_1$. Let $Y_0 = \bigcup_{j \leq i_0} X_{j}$ and $Y_1 = \bigcup_{j \geq i_1} X_{j}$. Since ${N^+}[v_0] \subseteq Y_0$, by the definition of path-decompositions, and ${d^+}(v_0) \geq d_1$, we have $|Y_0| \geq d_1 + 1$. Similarly, since ${N^-}[v_1] \subseteq Y_1$ and ${d^-}(v_1) \geq d_2$ we have $|Y_1| \geq d_2 + 1$. Let $Z$ be the set of vertices in $\Voutge{d_1} \cap \Vinge{d_2}$ that are introduced at some $i > i_0$ and forgotten at some $i' < i_1$. Then, each vertex in $(\Voutge{d_1} \cap \Vinge{d_2}) \setminus Z$ must be in $X_{i_0}$ if it is introduced at some $i \leq {i_0}$ and in $X_{i_1}$ if it is forgotten at some $i > {i_1}$. As $Y_0 \cup Y_1 \subseteq V(G) \setminus Z$, we have \begin{eqnarray*} |Y_0 \cup Y_1| & \leq & n - |\Voutge{d_1} \cap \Vinge{d_2}| + |(\Voutge{d_1} \cap \Vinge{d_2}) \setminus Z|\\ & \leq & n - |\Voutge{d_1} \cap \Vinge{d_2}| + |X_{i_0} \cup X_{i_1}|. \end{eqnarray*} We have $Y_0 \cap Y_1 = X_{i_0} \cap X_{i_1}$ from the definition of a path-decomposition and hence $|Y_0| + |Y_1| \leq n - |\Voutge{d_1} \cap \Vinge{d_2}| + |X_{i_0}| + |X_{i_1}|$. Combining with the bounds on $|Y_0|$ and $|Y_1|$ above, we have \begin{eqnarray*} |\Voutge{d_1} \cap \Vinge{d_2}| & \leq & n - (d_1 + 1) - (d_2 + 1) + |X_{i_0}| + |X_{i_1}|\\ & \leq & n - (d_1 + d_2) + |X_{i_0}| + |X_{i_1}| - 2\\ & \leq & n - (d_1 + d_2) + 2{\rm pw}(G). \end{eqnarray*} \qed \end{proof} \begin{corollary} \label{cor:degree-tangle} If $G$ has an $(l, k)$-degree tangle then ${\rm pw}(G) \geq (l - k - 1)/2$. \end{corollary} \begin{proof} Let $T$ be a $(l, k)$-degree tangle. Then, $T \subseteq \Voutge{d} \cap \Voutle{d + k} \subseteq \Voutge{d} \cap \Vinge{n - (d + k) -1}$ for some $d$ and hence $l \leq n - (n - k - 1) + 2{\rm pw}(G) = k + 1 + 2{\rm pw}(G)$ by Lemma~\ref{lem:degree-interval}. The corollary follows. \qed \end{proof} \begin{remark} The lemma in \cite{Pilipczuk12} states that if $G$ has a $(5k+2, k)$-degree tangle then ${\rm pw}(G) > k$. The above corollary implies a slightly stronger statement that if $G$ has a $(3k+2, k)$-degree tangle then ${\rm pw}(G) > k$. \end{remark} The following lemma generalizes the analysis of on matching tangles in \cite{Pilipczuk12}. We need this generalization when we introduce another obstacle for small pathwidth. \begin{lemma} \label{lem:disjoint} Let $G$ be a semicomplete digraph on $n$ vertices and let $l$, $k$, $d$ positive integers. Suppose $G$ has a set of $l$ pairwise vertex-disjoint directed paths from $\Voutle{d}$ to $\Voutge{d + k}$. Then, ${\rm pw}(G) \geq \min\{l, k\}$. \end{lemma} \begin{proof} Let $Q$ be a set of $l$ pairwise vertex-disjoint directed paths from $\Voutle{d}$ to $\Voutge{d + k}$. We assume ${\rm pw}(G) \leq k - 1$ and show that ${\rm pw}(G) \geq l$. Let $X_0$, \ldots, $X_{2n}$ be a nice path-decomposition of $G$ of optimal width (which is $k - 1$ or smaller). Let $A_i = \bigcup_{j \leq i} X_j$ and $B_i = \bigcup_{j > i} X_j$ for $0 \leq i < 2n$. Since $|A_i| + |B_i| = n + |A_i \cap B_i| \leq n + k - 1$ holds for $0 \leq i < 2n$, there is some $i$ such that $|A_i| \leq d + k$ and $|B_i| \leq n - d - 1$. Fix such $i$. For each $v \not\in A_i$, ${N^-}[v] \subseteq B_i$ and hence ${d^-}(v) \leq n - d - 2$. Therefore, we have $\Voutle{d} \subseteq \Vinge{n - d - 1} \subseteq A_i$. Similarly, for each $v \not\in B_i$, ${N^+}[v] \subseteq A_i$ and hence ${d^+}(v) \leq d + k - 1$. Therefore we have $\Voutge{d + k} \subseteq B_i$. Therefore, each path in $Q$ from $\Voutle{d}$ to $\Voutge{d + k}$ is from $A_i$ to $B_i$ and must have at least one vertex in $A_i \cap B_i$ since $(A_i, B_i)$ is a separation and hence there is no edge from $A_i \setminus B_i$ to $B_i \setminus A_i$. As the $l$ paths in $Q$ are pairwise vertex-disjoint, we have $l \leq |A_i \cap B_i| \leq {\rm pw}(G)$. \qed \end{proof} \begin{corollary}\cite{Pilipczuk12} If a semicomplete digraph $G$ has a $(l, k)$-matching tangle, then ${\rm pw}(G) \geq \min\{l, k + 1\}$. \end{corollary} \begin{proof} Let $(T_1, T_2)$ be a $(l, k)$-matching tangle and let $d$ be such that $T_1 \subseteq \Voutle{d}$ and $T_2 \subseteq \Voutge{d + k + 1}$. Apply Lemma~\ref{lem:disjoint} to the set of $l$ vertex-disjoint paths from $\Voutle{d}$ to $\Voutge{d + k + 1}$ provided by the matching edges. \qed \end{proof} We follow the scenario described in the introduction. Given an $h$-semicomplete digraph $G$ of pathwidth at least $f(h, k)$, we complete it into a semicomplete digraph $G'$ on $V(G)$, in which we find a large obstacle, say a degree tangle $T$. Then, we apply Theorem~\ref{thm:sample-indep} to obtain a random independent set $I$ of the complement of the underlying graph of $G$. We hope that $T \cap I$ is a tangle of $G[I]$ that is strong enough to conclude ${\rm pw}(G[I]) \geq k$. For this to happen, we need to have the out-degrees $|N^+_{G'}(v) \cap I|$ of $v$, for $v \in T \cap I$, to be close to each other. As observed in \cite{Pilipczuk12}, the optimal vertex separation sequence lists the vertices roughly in the order of increasing out-degrees and therefore each vertex has most vertices of smaller degree as its out-neighbors, except for some exceptions. The following notion of the wildness of vertices measures how exceptional a vertex is. \begin{definition} \label{def:wildness} For each vertex $v \in G$, we define the {\em wildness} ${\rm wld}(v)$ of $v$ by \begin{eqnarray*} {\rm wld}(v) = |\Voutle{{d^+}(v)} \setminus {N^+}(v)|. \end{eqnarray*} \end{definition} \begin{lemma} \label{lem:wild} Let $G$ be semicomplete and $v$ an arbitrary vertex of $G$. Then, for each integer $w \geq 0$, we have \begin{eqnarray*} |\Voutle{{d^+}(v) - w} \cap {N^-}(v)| \geq {\rm wld}(v) - w - 2{\rm pw}(G) - 1 \end{eqnarray*} and \begin{eqnarray*} |\Voutge{{d^+}(v) + w} \cap {N^+}(v)| \geq {\rm wld}(v) - w - 2{\rm pw}(G). \end{eqnarray*} \end{lemma} \begin{proof} For the first inequality, first observe that \begin{eqnarray*} |\Voutle{{d^+}(v)} \cap {N^-}(v)| & \geq & |\Voutle{{d^+}(v)} \setminus {N^+}(v)| - 1\\ & = & {\rm wld}(v) - 1, \end{eqnarray*} since each vertex not in ${N^-}(v)$ must be in ${N^+}(v) \cup \{v\}$. Since \begin{eqnarray*} |\Voutle{{d^+}(v)} \setminus \Voutle{{d^+}(v) - w}| & \leq & |\Vinge{n - {d^+}(v) - 1} \cap \Voutge{{d^+}(v) - w + 1}| \\ & \leq & w + 2{\rm pw}(G) n\end{eqnarray*} by Lemma~\ref{lem:degree-interval} (or trivially holding when $w = 0$ and hence Lemma~\ref{lem:degree-interval} is not applicable), we obtain the first inequality. For the second inequality, we have $|\Voutle{{d^+}(v) + w - 1}| \leq |\Vinge{n - ({d^+}(v) + w)}| \leq {d^+}(v) + w + 2{\rm pw}(G)$ by Lemma~\ref{lem:degree-interval} and hence \begin{eqnarray*} |\Voutle{{d^+}(v) + w - 1} \cap {N^+}(v)| &\leq & {d^+}(v) + w + 2{\rm pw}(G) - |\Voutle{{d^+}(v) + w - 1} \setminus {N^+}(v)| \\ & \leq & {d^+}(v) + w + 2{\rm pw}(G) - |\Voutle{{d^+}(v)} \setminus {N^+}(v)| \\ & = & {d^+}(v) + w + 2{\rm pw}(G) - {\rm wld}(v). \end{eqnarray*} Therefore, of the ${d^+}(v)$ vertices in ${N^+}(v)$, at least ${\rm wld}(v) - w - 2{\rm pw}(G)$ must belong to $\Voutge{{d^+}(v) + w}$. \qed \end{proof} If the vertices of a degree-tangle $T$ have small wildness, then most of their out-neighbors are shared and we may expect that their degrees in the sampled subgraph $G[I]$ will be close to each other. We call such a degree-tangle tame. \begin{definition} \label{def:tame} We say that an $(l, w)$-degree tangle $T$ of $G$ is {\em tame} (relative to the parameters $l$ and $w$), if ${\rm wld}(v) \leq 3l + w + 2{\rm pw}(G)$ for each $v \in T$. \end{definition} A degree-tangle is not necessarily tame, but a large number of wild vertices in a degree-tangle are themselves an evidence of large pathwidth. We capture this fact by another type of obstacles we call spiders. \begin{definition} \label{def:spider} Let $G$ be a semicomplete digraph and let $d \geq 0$, $l > 0$, and $w > 0$ be integers. A {\em $(d, l, w)$-spider} is a triple $(T, L, R)$, where $T$ is a vertex set with $|T| \geq l$, $L$ is a family $\{L_v \mid v \in T\}$ of vertex sets, and $R$ is a family $\{R_v \mid v \in T\}$ of vertex sets, such that the following holds for each $v \in T$: \begin{enumerate} \item $L_v \subseteq {N^-}(v)$, \item $|L_v| \geq 3l$, \item ${d^+}(u) \leq d$ for each $u \in L_v$, \item $R_v \subseteq {N^+}(v)$, \item $|R_v| \geq 3l$, and \item ${d^+}(u) \geq d + w$ for each $u \in R_v$. \end{enumerate} We will sometimes refer to a $(d, l, w)$-spider as an $(l, w)$-spider, without specifying $d$. \end{definition} \begin{lemma} \label{lem:spider_lb} If a semicomplete digraph $G$ has an $(l, w)$-spider then ${\rm pw}(G) > \min\{l, w\}$. \end{lemma} \begin{proof} Let $(T, L, R)$ be a $(d, l, w)$-spider of $G$. Let $T'$ be an arbitrary subset of $T$ with $|T'| = l$. For each $v \in T'$, select $l_v \in L_v$ and $r_v \in R_v$ so that, for each distinct pair $u, v \in T'$, we have $\{u, l_u, r_u\} \cap \{v, l_v, r_v\} = \emptyset$. Since $|L_v| \geq 3l$ and $|R_v| \geq 3l$ for each $v \in T$, such a selection can trivially be done in a greedy manner. We have a set of $l$ pairwise vertex-disjoint paths from $\Voutle{d}(G)$ to $\Voutge{d + w + 1}(G)$ and hence by Lemma~\ref{lem:disjoint}, we have ${\rm pw}(G) > \min\{l, w\}$. \qed \end{proof} The following lemma shows that spiders capture what we intend them to capture. \begin{lemma} \label{lem:degree-spider} Suppose $G$ has a $(2l,w)$-degree tangle $T$. Then, $G$ has either a tame $(l, w)$-degree tangle or an $(l, w)$-spider. \end{lemma} \begin{proof} Let $U = \{v \in T \mid {\rm wld}(v) \leq 3l + w + 2{\rm pw}(G)\}$. If $|U| \geq l$ then $U$ contains a tame $(l, w)$-degree tangle and we are done. So, suppose otherwise. Let $d$ be such that $T \subseteq \Voutge{d} \cap \Voutle{d + w}$. For each $v \in T \setminus U$, let $L_v = \Voutle{d} \cap {N^-}(v)$ and $R_v = \Voutge{d + w} \cap {N^+}(v)$. Fix $v \in T \setminus U$. As ${\rm wld}(v) > 3l + w + 2{\rm pw}(G)$, we have, by Lemma~\ref{lem:wild}, \begin{eqnarray*} |L_v| &\geq & {\rm wld}(v) - ({d^+}(v) - d) - 2{\rm pw}(G) - 1\\ & \geq & {\rm wld}(v) - w - 2{\rm pw}(G) - 1\\ & \geq & 3l \end{eqnarray*} and similarly $|R_v| \geq 3l$. Therefore, the triple $(T \setminus U, L, R)$ is a $(d, l, w)$-spider. \qed \end{proof} We similarly define the tameness of matching tangles. \begin{definition} \label{def:matching-tame} We say that a $(d, l, w)$-matching tangle $(T_1, T_2)$ of $G$ is tame if \begin{enumerate} \item ${\rm wld}(v) \leq 3l + d + w - {d^+}(v) + 2{\rm pw}(G)$ for each $v \in T_1$ and \item ${\rm wld}(v) \leq 3l + {d^+}(v) - d + 2{\rm pw}(G)$ for each $v \in T_2$. \end{enumerate} \end{definition} \begin{lemma} \label{lem:matching-spider} Suppose $G$ has a $(d, 3l, w)$-matching tangle $(T_1, T_2)$. Then, $G$ has either a tame $(d, l, w)$-matching tangle or a $(d, l, w)$-spider. \end{lemma} \begin{proof} Let $I_1 = \{v \in T_1 \mid {\rm wld}(v) > 3l + d + w - {d^+}(v) + 2{\rm pw}(G)\}$ and $I_2 = \{v \in T_2 \mid {\rm wld}(v) > 3l + {d^+}(v) - d + 2{\rm pw}(G)\}$. If $|I_1| \leq l$ and $|I_2| \leq l$ then there is some $T_1' \subseteq T_1 \setminus I_1$ and $T_2', \subseteq T_2 \setminus I_2$ with $|T_1'| = |T_2'| = l$ such that there is a matching from $T_1'$ to $T_2'$ by edges of $G$: $(T_1', T_2')$ is a tame $(d, l, w)$-matching tangle. Suppose otherwise. We first consider the case where $|I_1| > l$. For each $v \in I_1$, let $L_v = \Voutle{{d^+}(v)} \cap {N^-}(v)$ and $R_v = \Voutge{d + w} \cap {N^+}(v)$. Applying Lemma~\ref{lem:wild} and using the assumption ${\rm wld}(v) > 3l + d + w - {d^+}(v) + 2{\rm pw}(G)$ , we have \begin{eqnarray*} |L_v| & \geq & {\rm wld}(v) - 2{\rm pw}(G) - 1 \\ & \geq & 3l + d + w - {d^+}(v)\\ & \geq & 3l \end{eqnarray*} and \begin{eqnarray*} |R_v| & \geq & {\rm wld}(v) - (d + w - {d^+}(v)) - 2{\rm pw}(G) \\ & \geq & 3l.\\ \end{eqnarray*} Therefore, $(I_1, L, R)$ is a $(d, l, w)$-spider. In the case $|I_2| > l$, we have a $(d, l, w)$-spider similarly constructed on $I_2$. \qed \end{proof} We also need to define the tameness of spiders. \begin{definition} \label{def:spider-tame} Let $(T, L, R)$ be a $(d, l, w)$-spider. We say that a vertex $u \in \bigcup_{v \in T} L_v$ is {\em tame} (relative to the parameters $d$, $l$, and $w$) if ${\rm wld}(u) \leq 3l + d + w - {d^+}(u) + 2{\rm pw}(G)$. Similarly, $u \in \bigcup_{v \in T} R_v$ is {\em tame} if ${\rm wld}(u) \leq 3l + {d^+}(u) - d + 2{\rm pw}(G)$. We let $L_v^{\rm tame}$ and $R_v^{\rm tame}$ denote the set of tame vertices in $L_v$ and $R_v$ respectively. We say that a $(d, l, w)$-spider $(T, L, R)$ is {\em tame} if $|L_v^{\rm tame}| \geq 2l$ and $|R_v^{\rm tame}| \geq 2l$ for every $v \in T$. We say that a $(l, w)$-spider is tame, if it is a tame $(d, l, w)$-spider for some $d$. \end{definition} \begin{lemma} \label{lem:spider} Let $G$ be a semicomplete digraph and suppose that $G$ has an $(l, w)$-spider, where $w > 0$. Then, $G$ has a tame $(l, w')$-spider for some $w' \geq w$. \end{lemma} \begin{proof} Suppose $G$ has a $(d, l, w)$-spider $(T, L, R)$. We may assume that $w$ is the largest possible given $l$: for every $(d', l, w')$-spider of $G$, we have $w' \leq w$. Under this assumption, we show that the spider $(T, L, R)$ is tame. For contradiction, suppose not. We consider the case where there is some $v \in T$ such that $|L_v^{\rm tame}| < 2l$; the case where there is some $v \in T$ such that $|R_v^{\rm tame}| < 2l$ is similar. Fix such $v$ and let $U = L_v \setminus L_v^{\rm tame}$. Since $|L_v| \geq 3l$ by the definition of a spider, we have $|U| \geq l$. Let $u$ be an arbitrary member of $U$ and let $w_u = d + w - {d^+}(u)$. Since ${d^+}(u) \leq d$ by the definition of a spider, we have $w_u \geq w$. Since $u$ is not tame, we have ${\rm wld}(u) > 3l + d + w - {d^+}(u) + 2{\rm pw}(G) = 3l + w_u + 2{\rm pw}(G)$. Let $L'_u = \Voutle{{d^+}(u) - w_u} \cap {N^-}(u)$ and $R'_u = \Voutle{{d^+}(u) + w_u} \cap {N^+}(u)$. We apply Lemma~\ref{lem:wild} and have \begin{eqnarray*} |L'_u| & \geq & {\rm wld}(u) - w_u - 2{\rm pw}(G) - 1\\ & \geq & 3l \end{eqnarray*} and \begin{eqnarray*} |R'_u| & \geq & {\rm wld}(u) - w_u - 2{\rm pw}(G) \\ & \geq & 3l. \end{eqnarray*} Since ${d^+}(u) - w_u = d + w - 2w_u \leq d - w$ and ${d^+}(u) + w_u = d + w$ holds for every $u \in U$, we have $L'_u \subseteq \Voutle{d - w}$ and $R'_u \subseteq \Voutge{d + w}$ for every $u \in U$. Therefore, $(U, L', R')$ is a $(d - w, l, 2w)$-spider, contradicting the choice of $w$. \qed \end{proof} To continue our scenario, we invoke the following result due to Pilipczuk. \begin{lemma}(\cite{Pilipczuk12}, Theorem~32) \label{lem:tangle-Pil} There exists an algorithm, which given a semicomplete digraph $G$ and integers $k$ and $l \geq 5k$, in time $O(k|V(G )|^2)$ outputs one of the following: \begin{itemize} \item an $(l + 2, k)$-degree tangle in $G$; \item a $(k + 1, k)$-matching tangle in $G$; \item a path decomposition of $G$ of width at most $(l + 2k)$. \end{itemize} \end{lemma} The following lemma, building on this lemma and previous lemmas, shows that a semicomplete digraph of large pathwidth has a tame tangle or a spider. \begin{lemma} \label{lem:get-tangle} Let $K$ be a positive integer and $G$ a semicomplete digraph with ${\rm pw}(G) \geq 128K$. Then, $G$ has at least one of the following: \begin{enumerate} \item a tame $(46K, 18K)$-degree tangle; \item a $(6K, 18K)$-spider; \item a tame $(6K, 18K)$-matching tangle. \end{enumerate} \end{lemma} \begin{proof} We apply Lemma~\ref{lem:tangle-Pil} to $G$ with $l = 92K - 2$ and $k = 18K$. Since $G$ does not have a path-decomposition of width $l + 2k = 92K - 2 + 36K = 128K - 2$, the algorithm finds either a $(92K, 18K)$-degree tangle of $G$, or an $(18K + 1, 18K)$-matching tangle of $G$. In the first case, by Lemma~\ref{lem:degree-spider}, $G$ has either a tame $(46K, 18K)$-degree tangle or a $(46K, 18K)$-spider, which certainly contains a $(6K, 18K)$-spider. In the second case, $G$ has a $(18K, 18K)$-matching tangle and, hence by Lemma~\ref{lem:matching-spider}, either a tame $(6K, 18K)$-matching tangle or a $(6K, 18K)$-spider. \qed \end{proof} \begin{lemma} \label{lem:degree-sample} Let $h$ be a positive integer. Then, there is some positive integer $k_h$ such that the following holds. Let $k \geq k_h$ be an integer and let $K = (h + 1)k$. Let $G$ be an $h$-semicomplete digraph and suppose a semicomplete supergraph $G'$ of $G$ with vertex set $V(G)$ and with ${\rm pw}(G') \leq 140K$ has a tame $(46K, 18K)$-degree tangle. Then $G$ has a semicomplete subgraph with a $(21k, 10k)$-degree tangle. \end{lemma} \begin{proof} Let $T$ be a tame $(46K, 18K)$-degree tangle of $G'$. Let $\hat{G}$ denote the complement of the undirected graph underlying $G$. The maximum degree of $\hat{G}$ is $h$ or smaller. We apply Theorem~\ref{thm:sample-indep} to $\hat{G}$ to obtain a random independent set $I$ of $\hat{G}$. The probability of each vertex being in $I$ is $p = \frac{1}{2(h + 1)}$. For each $S \subseteq V(G)$, the expectation of $|S \cap I|$ is $p|S|$ and the probability of deviations is bounded as in Theorem~\ref{thm:sample-indep}. That $I$ is independent in $\hat{G}$ implies that $G[I]$, which equals $G'[I]$, is semicomplete. We show that $T \cap I$ contains a $(21k, 10k)$-tangle of $H = G[I]$ with high probability. We call the event $|T \cap I| < 21k$ {\em the bad event on $|T \cap I|$}. Since ${\rm\bf E}[|T \cap I|] = 46pK = 23k$, the probability of this bad event is at most \begin{eqnarray*} {\rm\bf Pr}\left(23k - |T \cap I| > 2k\right) & \leq &{\rm\bf E}p\left(-\frac{4k^2}{9\cdot 46K}\right)\\ & = & {\rm\bf E}p\left(-\frac{2k}{207(h + 1)}\right) \end{eqnarray*} by Theorem~\ref{thm:sample-indep}. Let $d$ be such that $T \subseteq \Voutge{d}(G') \cap \Voutle{d + 18K}(G')$. For each $v \in T \cap I$, we evaluate $d^+_H(v)$ as follows. \begin{eqnarray*} d^+_H(v) & = & |{N^+}_{G'}(v) \cap I|\\ & = & |\Voutle{d}(G') \cap I| - |(\Voutle{d}(G') \setminus {N^+}_{G'}(v)) \cap I| + |(\Voutge{d + 1}(G') \cap {N^+}_{G'}(v)) \cap I|. \end{eqnarray*} The deviation of the first term is common for all $v$: $\varDelta = |\Voutle{d}(G') \cap I| - {\rm\bf E}[|\Voutle{d}(G') \cap I|] = |\Voutle{d}(G') \cap I| - p|\Voutle{d}(G')|$. Therefore, we are concerned with the deviations of other terms depending on $v$. Let $X_v = \Voutle{d}(G') \setminus {N^+}_{G'}(v)$ and $Y_v = \Voutge{d + 1}(G') \cap {N^+}_{G'}(v)$ for each $v \in T$. As the $(46K, 18K)$-degree tangle $T$ of $G'$ is tame, we have ${\rm wld}(v) \leq 3\cdot 46K + 18K + 2{\rm pw}(G') \leq 436K$ and hence \begin{eqnarray*} |X_v| & \leq & |\Voutle{d^+_{G'}(v)}(G') \setminus {N^+}_{G'}(v)| \\ & = & {\rm wld}(v) \\ & \leq & 436K. \end{eqnarray*} Since \begin{eqnarray} |\Voutle{d}(G')| & = & n - |\Voutge{d + 1}(G')| \nonumber\\ & \geq & n - (n - d - 1 + 2{\rm pw}(G')) \nonumber\\ & \geq & d + 1 - 280K \label{eqn:XvYv1} \end{eqnarray} by Lemma~\ref{lem:degree-interval} and hence \begin{eqnarray} |\Voutle{d}(G')| & \geq & d^+_{G'}(v) - 298K + 1, \label{eqn:XvYv} \end{eqnarray} we have \begin{eqnarray*} |Y_v| & = & d^+_{G'}(v) - (|\Voutle{d}(G')| - |X_v|) \\ & \leq & 298K + 436K\\ & \leq & 734K. \end{eqnarray*} Call the event $||X_v \cap I| - p|X_v|| > \frac{k}{4}$ {\em the bad event on $X_v$} and the event $||Y_v \cap I| - p|Y_v|| > \frac{k}{4}$ {\em the bad event on $Y_v$}. By Theorem~\ref{thm:sample-indep}, the probability of the bad event on $X_v$ is smaller than \begin{eqnarray*} 2{\rm\bf E}p\left(-\frac{k^2}{4^2 \cdot 9|X_v|}\right) & \leq & 2{\rm\bf E}p\left(-\frac{k}{62784(h + 1)}\right) \end{eqnarray*} and, similarly, the probability of the bad event on $Y_v$ is smaller than \begin{eqnarray*} 2{\rm\bf E}p\left(-\frac{k}{105696(h + 1)}\right). \end{eqnarray*} Therefore, setting say, $k_h = 10^7(h + 1)^2$, it follows from our assumption $k \geq k_h$ that, with probability close to 1, none of the bad events listed above occurs. Assume none of those bad events occur. Recall that $\varDelta = |\Voutle{d}(G') \cap I| - p|\Voutle{d}(G')|$. Then, for each $v \in T \cap I$, we have \begin{eqnarray*} d^+_{H}(v) & = & |\Voutle{d}(G') \cap I| - |X_v \cap I| + |Y_v \cap I| \\ & \leq & p|\Voutle{d}(G')| + \varDelta - p|X_v| + \frac{k}{4} + p|Y_v| + \frac{k}{4} \\ & \leq & p d^+_{G'}(v) + \varDelta + \frac{k}{2} \end{eqnarray*} and, similarly, \begin{eqnarray*} d^+_H(v) & \geq & p d^+_{G'}(v) + \varDelta - \frac{k}{2}. \end{eqnarray*} Therefore, for each $v \in T \cap I$, we have \begin{eqnarray*} pd + \varDelta - \frac{k}{2} \leq d^+_H(v) & \leq & p(d + 18K) + \varDelta + \frac{k}{2} \\ & = & pd + \varDelta + \frac{19k}{2}. \end{eqnarray*} Therefore, $T \cap I$ contains a $(21k, 10k)$-degree tangle of $H$. \qed \end{proof} \begin{lemma} \label{lem:spider-sample} Let $h$ be a positive integer. Then, there is some positive integer $k_h$ such that the following holds. Let $k \geq k_h$ be an integer and let $K = (h + 1)k$. Let $G$ be an $h$-semicomplete digraph and suppose a semicomplete supergraph $G'$ of $G$ with vertex set $V(G)$ and with ${\rm pw}(G') \leq 140K$ has a $(6K, 18K)$-spider. Then $G$ has a semicomplete subgraph with a $(k, k)$-spider. \end{lemma} \begin{proof} Since $G'$ has a $(6K, 18K)$-spider, by Lemma~\ref{lem:spider}, it has a tame $(6K, w)$-spider for some $w \geq 18K$. The approach is similar to the proof of Lemma~\ref{lem:degree-sample}. The only essential difference is that the wildness of a vertex in the spider may not be $O(K)$ and the deviation of its out-degree in the sampled subgraph may be large. This is not an essential problem, however, since such a vertex with large wildness has, by the definition of tame spiders, the original out-degree far away from the range to be avoided and therefore a large deviation is affordable. Let $(T, L, R)$ be a tame $(d, 6K, w)$-spider of $G'$, where $w \geq 18K$. As in the proof of Lemma~\ref{lem:degree-sample}, let $\hat{G}$ be the undirected graph underlying $G$, $p = \frac{1}{2(h + 1)}$, $I$ the set of independent vertices of $\hat{G}$ sampled with probability $p$ applying Theorem~\ref{thm:sample-indep}, and $H = G'[I] = G[I]$. Let $T' = T \cap I$ and, for each $v \in T'$, let $L'_v = L_v^{\rm tame} \cap I$ and $R'_v = R_v^{\rm tame} \cap I$. Our goal is to show that $(T', L', R')$ is a $(k, k)$-spider of $H = G'[I]$ with high probability. For this to happen, we need to have $|T'| \geq k$ and, for some $d'$ and for each $v \in T'$, \begin{enumerate} \item $L_v' \subseteq N^-_H(v)$, \item $|L_v'| \geq 3k$, \item $d^+_H(u) < d'$ for each $u \in L_v'$, \item $R_v' \subseteq N^+_H(v)$, \item $|R_v'| \geq 3k$, and \item $d^+_H(u) > d' + k$ for each $u \in R_v'$. \end{enumerate} We list ``bad'' events below that could prevent the above conditions from being satisfied. We show that the probability of each of those events is ${\rm\bf E}p(-\Omega(\frac{k}{h}))$ and, since the number of those events is obviously $O(kh)$, the probability is close to 1 that none of these events occurs under the assumption $k \geq k_h$ if $k_h$ is large enough. We also confirm that if none of those events occurs then the above conditions for $(T', L', R')$ being a $(k, k)$-spider are all satisfied. Since most of the analysis below is similar to the one we did for Lemma~\ref{lem:degree-sample}, we omit some details, using $\Omega$ notation rather than giving explicit constants in probability bounds, and emphasize what is different. First consider the event that $|T \cap I| < k$. Since $|T| \geq 6K$ and hence ${\rm\bf E}[|T \cap I|] \geq 3k$, the probability of this event is ${\rm\bf E}p(-\Omega(\frac{k}{h}))$. Next consider, for each $v \in T$, the event that $|L_v \cap I| < 3k$ or $|R_v \cap I| < 3k$. Since $|L_v| \geq 9K$ and $|R_v| \geq 9K$, the probability of this event is also ${\rm\bf E}p(-\Omega(\frac{k}{h}))$. If none of these events occurs, all conditions enumerated above are satisfied but those on the out-degrees on vertices in $\bigcup_{u \in T'} L_u'$ and in $\bigcup_{u \in T'} R_u'$. We proceed to events that may cause intolerable deviations of the out-degrees of those vertices. For each $v \in \bigcup_{u \in T} (L_u^{\rm tame} \cup R_u^{\rm tame})$, let $X_v = \Voutle{d}(G') \setminus N^+_{G'}(v)$ and $Y_v = \Voutge{d + 1}(G') \cap N^+_{G'}(v)$. As in the proof Lemma~\ref{lem:degree-sample}, we evaluate $d^+_H(v)$ (assuming $v \in I$), as follows: \begin{eqnarray*} d^+_H(v) & = & |N^+_{G'}(v) \cap I|\\ & = & |\Voutle{d}(G') \cap I| - |X_v \cap I| + |Y_v \cap I|. \end{eqnarray*} The deviation of the first term is common for all $v$: $\varDelta = |\Voutle{d}(G') \cap I| - {\rm\bf E}[|\Voutle{d}(G') \cap I|] = |\Voutle{d}(G') \cap I| - p|\Voutle{d}(G')|$. Therefore, our bad events concern about the deviations of $|X_v \cap I|$ and of $|Y_v \cap I|$ from their expectations. First consider $v \in \bigcup_{u \in T}L_u^{\rm tame}$. From the tameness condition and by Lemma~\ref{lem:degree-interval}, we have \begin{eqnarray*} |X_v| & = & |\Voutle{d}(G') \setminus N^+_{G'}(v)| \\ & \leq & |\Voutge{d^+_{G'}(v) + 1}(G') \cap \Voutle{d}(G')| + |\Voutle{d^+_{G'}(v)}(G') \setminus N^+_{G'}(v)| \\ & \leq & d - d^+_{G'}(v) + 2{\rm pw}(G') + {\rm wld}(v) \\ & \leq & 3(6K) + 2(d - d^+_{G'}(v)) + w + 4{\rm pw}(G')\\ & \leq & 578K + w + 2(d - d^+_{G'}(v)), \end{eqnarray*} and using (\ref{eqn:XvYv}), \begin{eqnarray*} |Y_v| & = & d^+_{G'}(v) - (|\Voutle{d}(G')| - |X_v|) \\ & \leq & 298K + |X_v|\\ & \leq & 876K + w + 2(d - d^+_{G'}(v)). \end{eqnarray*} Note that neither $w$ nor $d - d^+_{G'}(v)$ is necessarily $O(K)$. Our bad events on $X_v$ and $Y_v$ here are that $|X_v \cap I| < p|X_v| - \max\{\frac{pw}{6}, \frac{p}{2}(d - d^+_{G'}(v))\}$ and that $|Y_v \cap I| > p|Y_v| + \max\{\frac{pw}{6}, \frac{p}{2}(d - d^+_{G'}(v))\}$ respectively. If $\frac{w}{6} \geq \frac{1}{2}(d - d^+_{G'})$, then, noting that $w \geq 18K$ and hence $|X_v| = O(w)$ and $|Y_v| = O(w)$, the probability of each of these events is \begin{eqnarray*} {\rm\bf E}p\left(-\Omega\left(\frac{p^2 w^2}{|X_v|}\right)\right) & = & {\rm\bf E}p\left(-\Omega\left(\frac{p^2 w^2}{w}\right)\right)\\ & = & {\rm\bf E}p\left(-\Omega\left(p^2 K\right)\right)\\ & = & {\rm\bf E}p\left(-\Omega\left(\frac{k}{h}\right)\right). \end{eqnarray*} The other case is similar and the probability of each of these events is ${\rm\bf E}p(-\Omega(\frac{k}{h}))$ in either case. We conclude that, with probability close to 1, none of the above bad events occurs. We analyze the out-degree of each vertex $v \in \bigcup_{u \in T'} L_u'$ assuming that the bad event on neither $X_v$ nor $Y_v$ occurs. We have \begin{eqnarray*} d^+_H(v) & = & |\Voutle{d}(G') \cap I| - |X_v \cap I| + |Y_v \cap I|\\ & = & p(|\Voutle{d}(G')| -|X_v| + |Y_v|) + \varDelta + (p|X_v| - |X_v \cap I|) + (|Y_v \cap I| - p|Y_v|)\\ & = & p d^+_{G'}(v) + \varDelta + (p|X_v| - |X_v \cap I|) + (|Y_v \cap I| - p|Y_v|). \end{eqnarray*} The sum of the last two terms is at most $2\max\{\frac{pw}{6}, \frac{p}{2}(d - d^+_{G'}(v))\} = \max\{\frac{pw}{3}, p(d - d^+_{G'}(v)) \leq \frac{pw}{3} + p(d - d^+_{G'}(v))$. Therefore, we have \begin{eqnarray} \label{eqn:upper_left} d^+_H(v) & \leq & pd + \varDelta + \frac{pw}{3} \end{eqnarray} for each $v \in \bigcup_{u \in T'} L_u'$. Next consider a vertex $v \in \bigcup_{u \in T}R_u^{\rm tame}$. From the tameness condition, we have \begin{eqnarray*} |X_v| & = & |\Voutle{d}(G') \setminus N^+_{G'}(v)| \\ & \leq & {\rm wld}(v) \\ & \leq & 3\cdot 6K + d^+_{G'}(v) - d + 2{\rm pw}(G')\\ & \leq & 298K + d^+_{G'}(v) - d, \end{eqnarray*} and using (\ref{eqn:XvYv1}), \begin{eqnarray*} |Y_v| & = & d^+_{G'}(v) - (|\Voutle{d}(G')| - |X_v|) \\ & \leq & 280K + d^+_{G'}(v) - d + |X_v|\\ & \leq & 578K + 2(d^+_{G'}(v) - d). \end{eqnarray*} Our bad events on $X_v$ and $Y_v$ here are that $|X_v \cap I| > p|X_v| + \frac{p}{6}(d^+_{G'}(v) - d)$ and that $|Y_v \cap I| < p|Y_v| - \frac{p}{6}(d^+_{G'}(v) - d)$ respectively. Since $d^+_{G'}(v) - d \geq w \geq 18K$, the probability of each of these bad events is ${\rm\bf E}p(-\Omega(\frac{k}{h}))$ and therefore, with probability close to 1, none of these bad events occurs for any $v \in \bigcup_{u \in T}R_u^{\rm tame}$. We analyze the out-degree of each vertex $v \in \bigcup_{u \in T'} R_u'$ assuming that none of the bad events occurs. We have \begin{eqnarray*} d^+_H(v) & = & pd^+_{G'}(v) + \varDelta - (|X_v \cap I| - p|X_v|) - (p|Y_v| - |Y_v \cap I|) \end{eqnarray*} as before and the sum of the last two terms, neglecting signs, is at most $\frac{2p}{6}(d^+_{G'}(v) - d) = \frac{p}{3}(d^+_{G'}(v) - d)$. Therefore, we have \begin{eqnarray} d^+_H(v) & \geq & pd^+_{G'}(v) + \varDelta - \frac{p}{3}(d^+_{G'}(v) - d)\nonumber\\ & \geq & pd + \varDelta + \frac{2p}{3}(d^+_{G'}(v) - d)\nonumber\\ & \geq & pd + \varDelta + \frac{2p}{3}w \label{eqn:lower_right} \end{eqnarray} for each $v \in \bigcup_{u \in T'} R_u'$. From (\ref{eqn:upper_left}) and (\ref{eqn:lower_right}), we have \begin{eqnarray*} \min\{d^+_H(v) \mid v \in \bigcup_{u \in T'} R_u'\} - \max\{d^+_H(v) \mid v \in \bigcup_{u \in T'} L_u'\} & \geq & \frac{pw}{3}\\ & \geq & 6pK\\ & = & 3k. \end{eqnarray*} Therefore, $(T', L', R')$ is a $(d', k, k)$-spider of $H$ for some $d'$. \qed \end{proof} \begin{lemma} \label{lem:matching-sample} Let $h$ be a positive integer. Then, there is some positive integer $k_h$ such that the following holds. Let $k \geq k_h$ be an integer and let $K = (h + 1)k$. Let $G$ be an $h$-semicomplete digraph and suppose a semicomplete supergraph $G'$ of $G$ with vertex set $V(G)$ and ${\rm pw}(G') \leq 140K$ has a tame $(6K, 18K)$-matching tangle $(T_1, T_2)$. Suppose moreover that the matching bijection $\phi$ of this tangle is such that the edge $(v, \phi(v))$ of $G'$ for each $v \in T_1$ is in fact an edge of $G$. Then $G$ has a semicomplete subgraph that has a $(k, k)$-matching tangle. \end{lemma} \begin{proof} Let $\hat{G}$ be the complement of the undirected graph underlying $G$. Let $\tilde{G}$ be obtained from $\hat{G}$ by contracting the doubleton $\{v, \phi(v)\}$ into a vertex, say $t_v$, for each $v \in T_1$. Let $T = \{t_v \mid v \in T_1\}$. Note that the maximum degree of $\tilde{G}$ is $2h$ or smaller. Similarly to Lemma~\ref{lem:spider-sample}, we use Theorem~\ref{thm:sample-indep} to obtain an independent set $I'$ of $\tilde{G}$ where the probability of each $v \in V(\tilde{G})$ belonging to $I'$ is $\frac{1}{2(2h + 1)}$. Let $H = G[I]$ where $I = ((V(\hat{G}') \setminus T) \cap I') \cup \{v , \phi(v) \mid v \in T_1, t_v \in I'\}$. As $I'$ is independent in $\hat{G}'$, $I$ is independent in $\hat{G}$ and hence $G'[I] = G[I]$ is semicomplete. By an analysis similar to the one in Lemma~\ref{lem:spider-sample}, setting $k_h$ large enough, we have $|T \cap I| \geq k$ and $\min_{v \in T_2 \cap I} d^+_H(v) - \max_{v \in T_1 \cap I} d^+_H(v) \geq k$ with probability close to 1. When this happens, $(T_1 \cap I, T_2 \cap I)$ contains a $(k, k)$-matching tangle of $H$. \qed \end{proof} We are now ready to prove Theorem~\ref{thm:comb}. Fix positive integer $h$. Let $k_h$ be a constant large enough as required in Lemmas~\ref{lem:degree-sample}, \ref{lem:spider-sample}, and \ref{lem:matching-sample}. We set $f(k, h) = 128(h + 1)k$ for $k \geq k_h$ and $f(k, h) = f(k_h, h)$ for $k < k_h$. Let $G$ be an $h$-semicomplete digraph of pathwidth at least $f(k, h)$. In the following proof that $G$ contains a semicomplete subgraph of pathwidth at least $k$, we assume $k \geq k_h$; otherwise we would prove that $G$ contains a semicomplete subgraph of pathwidth at least $k_h \geq k$. We set $K = (h + 1)k$ for readability. List the vertices of $G$ as $v_1$, \ldots, $v_n$, in the non-decreasing order of out-degrees. Let $G'$ be the semicomplete digraph obtained from $G$ by adding edge $(v_i, v_j)$ for each pair $i > j$ such that neither $(v_i, v_j)$ nor $(v_j, v_i)$ is an edge of $G$. By our assumption, ${\rm pw}(G') \geq {\rm pw}(G)$ is at least $128K$. We assume below that ${\rm pw}(G') \leq 140K$; if this assumption does not hold, we choose $k' \geq k$ such that $128(h + 1)k' \leq {\rm pw}(G') \leq 140(h + 1)k'$ and prove that $G$ has a semicomplete subgraph of pathwidth $\geq k'$. Applying Lemma~\ref{lem:get-tangle}, we obtain a tame $(46K, 18K)$-degree tangle, a tame $(6K, w)$-spider for some $w \geq 18K$, or a tame $(6K, 18K)$-matching tangle of $G'$. If $G'$ has a tame $(46K, 18K)$-degree tangle, then $G$ has a semicomplete subgraph that contains $(21k, 10k)$-degree tangle, by Lemma~\ref{lem:degree-sample}. If $G'$ has a tame $(6K, w)$-spider for $w \geq 18K$, then $G$ has a semicomplete subgraph that contains a $(k, k)$-spider, by Lemma~\ref{lem:spider-sample}. Finally, suppose $G'$ has a $(6K, 18K)$-matching tangle $(T_1, T_2)$ with matching bijection $\phi$. We observe that, for each $v \in T_1$, the edge $(v, \phi(v))$ of $G'$ is in fact an edge of $G$, since \begin{eqnarray*} {d^+}G(\phi(v)) &\geq & d^+_{G'}(\phi(v)) - h \geq d^+_{G'}(v) + 18K - h > d^+_{G'}(v)\\ &\geq & {d^+}G(v) \end{eqnarray*} and the edge addition rule for constructing $G'$ from $G$ dictates that if an edge between $v$ and $\phi(v)$ is added then it must be from $\phi(v)$ to $v$. Therefore, Lemma~\ref{lem:matching-sample} applies and $G$ has a semicomplete subgraph with a $(k, k)$-matching tangle. In either case, we conclude that $G$ contains a semicomplete subgraph of pathwidth at least $k$. This completes the proof of Theorem~\ref{thm:comb}. \section{Proof of Theorem~\ref{thm:sample-indep}} \label{sec:sample-indep} The goal of this section is to prove Theorem~\ref{thm:sample-indep}, which we restate below. Graphs are undirected in this section and we use the following notation. For each $v \in V(G)$, $N_G(v)$ is the set of neighbors of $v$ and $N_G[v] = N_G(v) \cup \{v\}$; for each $U \subseteq V(G)$, $N_G[U] = \bigcup_{u \in U} N_G[u]$ and $N_G(U) = N_G[U] \setminus U$. \begingroup \def\ref{thm:sample-indep}{\ref{thm:sample-indep}} \begin{theorem} Let $G$ be an undirected graph on $n$ vertices with maximum degree $d$ or smaller. Let $p = \frac{1}{2d + 1}$. Then, it is possible to sample a set $I$ of independent vertices of $G$ so that ${\rm\bf Pr}(v \in I) = p$ for each $v \in V(G)$ and, for each $S \subseteq V(G)$, we have \begin{eqnarray*} {\rm\bf Pr}(|S \cap I| > p|S|+ t) < {\rm\bf E}p\left(-\frac{t^2}{9|S|}\right) \end{eqnarray*} and \begin{eqnarray*} {\rm\bf Pr}(|S \cap I| <p|S| - t) < {\rm\bf E}p\left(-\frac{t^2}{9|S|}\right). \end{eqnarray*} \end{theorem} \addtocounter{theorem}{-1} \endgroup A naive sampling method is to keep a set $V$ of candidate vertices and repeatedly pick a random vertex from $V$ to add to $I$, removing the selected vertex and all of its neighbors from $V$. This procedure would produce an independent set of cardinality at least $n / (d + 1)$. The exact probability of each vertex being in $I$, however, would depend on the structure of $G$. To achieve the uniform probability as claimed in the above theorem, we sample, at each step, from a $d$-regular supergraph of $G[V]$ rather than from $G[V]$ itself. We need the following theorem on regular completion of graphs due to Erd\H{o}s and Kelly. \begin{theorem} \label{thm:reg_comp} \cite{EK63} Let $G$ be an undirected graph on $n$ vertices and $d$ an integer such that $d_G(v) \leq d$ for every $v \in V(G)$. Let $t = \sum_{v \in V(G)} (d - d_G(v))$. Then, there is a $d$-regular graph on $n + m$ vertices that has $G$ as an induced subgraph if and only if $m$ satisfies all of the following four conditions: \newline\noindent (1) $md \geq t$; \newline\noindent (2) $m^2 - m(d + 1) + t \geq 0$; \newline\noindent (3) $m \geq d - d_G(v)$ for every $v \in V(G)$; and \newline\noindent (4) $(n + m)d$ is an even integer. \end{theorem} Akiyama {\it et al.} \cite{AEH83} proved that, for every graph $G$ on $n$ vertices with maximal degree $d$ or smaller, there is a $d$-regular graph on $N \leq n + d + 2$ vertices ($N \leq n + d + 1$ if $nd$ is even) that contains $G$ as a (not necessarily induced) subgraph. The following lemma states that every integer $N \geq n + d + 1$ with $Nd$ even has that property. The proof is, naturally, analogous to the one in \cite{AEH83}. \begin{lemma} \label{lem:paving} Let $G$ be a graph on $n$ vertices with maximum degree $d$ or smaller and $N$ an arbitrary integer such that $N \geq n + d + 1$ and $Nd$ is even. Then, there is a $d$-regular graph on $N$ vertices that contains $G$ as a subgraph. \end{lemma} \begin{proof} Let $H$ be a maximal graph on $V(G)$ with maximum degree $d$ that contains all the edges of $G$. Let $D = \{v \in V(G) \mid d_{H}(v) < d\}$. From the maximality of $H$, $D$ must be a clique of $H$ and hence $|D| \leq d$. It trivially follows that $t = \sum_{v \in V(G)} (d - d_{H}(v)) \leq d^2$. Setting $m = N - n \geq d + 1$, conditions (1), (2) and (3) of Theorem~\ref{thm:reg_comp} are trivially satisfied. Condition (4) is also satisfied as we are assuming $Nd$ is even. Thus, we may apply Theorem~\ref{thm:reg_comp} to $H$ to have a $d$-regular graph that contains $H$ and hence $G$ as a subgraph. \qed \end{proof} We now describe the sampling procedure of Theorem~\ref{thm:sample-indep}. Fix a graph $G$ on $n$ vertices with maximum degree $d$ or smaller. Let $s = \lceil n / (d + 1) \rceil$. We construct a sequence of pairs $(I_i, V_i)$ for $0 \leq i \leq s$, where $\emptyset = I_0 \subseteq I_1 \subseteq \ldots \subseteq I_s$ and $V(G) = V_0 \supseteq V_1 \supseteq \ldots \supseteq V_s$. Our independent set $I$ is $I_s$. Fix $i$, $0 \leq i < s$ and suppose we have constructed $I_i$ and $V_i$. We construct $I_{i + 1}$ and $V_{i + 1}$ as follows. Let $n_i = (2s - i)(d + 1)$. Since $i < s$, we have $n_i \geq n + d + 1 \geq |V_i| + d + 1$. Moreover, $n_i d$ is even as $d + 1$ divides $n_i$. Therefore, Lemma~\ref{lem:paving} applies and there is a $d$-regular supergraph $H_i$ of $G[V_i]$ on $n_i$ vertices. We pick a vertex $v$ of $H_i$ uniformly at random. If $v \in V_i$ then we set $I_{i + 1} = I_i \cup \{v\}$; otherwise, we set $I_{i + 1} = I_i$. In either case, we set $V_{i + 1} = V_i \setminus (\{v\} \cup N_{H_i}(v))$. Since $H_i$ is a supergraph of $G[V_i]$, this ensures that $v$ is independent, in $G$, of all vertices in $V_{i + 1}$. By a straightforward induction, $I_i$ is an independent set of $G$, $V_i \subseteq V(G) \setminus I_i$, and there is no edge of $G$ between $I_i$ and $V_i$, for $0 \leq i \leq s$. \begin{remark} To make $I_i$ and $V_i$ well-defined random variables for $0 \leq i \leq s$, we assume that the $d$-regular supergraph $H_i$ of $G[V_i]$ used above is uniquely determined from $V_i$ and $n_i$ by some deterministic procedure relying on some predefined total order on $V(G)$ for tie-breaking. \end{remark} \begin{lemma} For each $v \in V(G)$ and $0 \leq i \leq s$, \begin{eqnarray*} {\rm\bf Pr}(v \in I \mid v \in V_i) = \frac{s - i}{n_i}. \end{eqnarray*} \end{lemma} \begin{proof} The proof is by induction on $s - i$. The base case $i = s$ is trivial. For the induction step, suppose $i < s$. Using the induction hypothesis, we have \begin{eqnarray*} \label{eqn:1} {\rm\bf Pr}(v \in I \mid v \in V_i) & = & {\rm\bf Pr}(v \in I_{i + 1} \mid v \in V_i) + {\rm\bf Pr}(v \in V_{i + 1} \mid v \in V_i) {\rm\bf Pr}(v \in I \mid v \in V_{i + 1}) \\ & = & \frac{1}{n_i} + \frac{n_i - (d + 1)}{n_i}\cdot\frac{s - i - 1}{n_{i + 1}}\\ & = & \frac{1}{n_i} + \frac{n_{i + 1}}{n_i}\cdot\frac{s - i - 1}{n_{i + 1}}\\ & = & \frac{s - i}{n_i}. \end{eqnarray*} \qed \end{proof} \begin{corollary} For each $v \in V(G)$, we have \begin{eqnarray*} {\rm\bf Pr}(v \in I) = \frac{1}{2(d + 1)}. \end{eqnarray*} \end{corollary} Therefore, we have, for each vertex set $S \subseteq V(G)$, \begin{eqnarray*} {\rm\bf E}[|S \cap I|] = \frac{|S|}{2(d + 1)}. \end{eqnarray*} We show that the value $|S \cap I|$ is sharply concentrated around its expectation, to establish Theorem~\ref{thm:sample-indep}. We assume $d \geq 1$ in the following analysis: the case $d = 0$ is trivial. Fix $S \subseteq V(G)$. We first consider the case where $|S| \geq \frac{s}{2}$. We define a random variable $Y_i$ for $0 \leq i \leq s$ by \begin{eqnarray*} Y_i = {\rm\bf E}[|S \cap I| \mid (I_0, V_0), (I_1, V_1), \ldots, (I_i, V_i)], \end{eqnarray*} where the expectation is conditioned on the partial outcome of the experiment up to the construction of $I_i$ and $V_i$. We have \begin{eqnarray*} Y_s & = &|S \cap I|,\\ Y_0 & = & {\rm\bf E}[|S \cap I|] = \frac{|S|}{2(d + 1)}, \end{eqnarray*} and, for $0 \leq i < s$, \begin{eqnarray*} Y_i = {\rm\bf E}[Y_{i + 1} \mid (I_0, V_0), (I_1, V_1), \ldots, (I_i, V_i)], \end{eqnarray*} where the expectation is conditioned similarly to the above. Therefore, the sequence $Y_0$, \ldots, $Y_s$ is a martingale. We show that \begin{eqnarray} \label{eqn:bounded_diff1} |Y_i - Y_{i-1}| \leq \frac{3}{2} \end{eqnarray} holds for $0 < i \leq s$. We have \begin{eqnarray*} Y_i & = & |S \cap I_i| + \sum_{v \in S \cap V_i} {\rm\bf Pr}(v \in I \mid v \in V_i)\\ & = & |S \cap I_i| + \frac{|S \cap V_i|(s - i)}{n_i}. \end{eqnarray*} Since both $|S \cap V_i|$ and the fraction $(s - i) / n_i$ are monotone non-increasing in $i$ and $|S \cap I_i| - |S \cap I_{i-1}| \leq 1$, we have $Y_i - Y_{i-1} \leq 1$. We also have \begin{eqnarray*} Y_{i-1} - Y_i & \leq &\frac{|S \cap V_{i-1}|(s - (i-1))}{n_{i-1}} - \frac{|S \cap V_i|(s - i)}{n_i} \\ & = & \frac{(|S \cap V_{i-1}|- |S \cap V_i|)(s - (i-1))}{n_{i - 1}}\\ && + |S \cap V_i|\left(\frac{s - (i-1)}{n_{i-1}} - \frac{s - i}{n_i}\right)\\ & \leq & \frac{(d + 1)(s - (i-1))}{n_{i-1}} + \frac{|S \cap V_i|}{n_i}\\ & \leq & \frac{s - (i-1)}{2s - (i-1)} + \frac{|S|}{n}\\ & \leq & \frac{3}{2} \end{eqnarray*} and hence (\ref{eqn:bounded_diff1}). We use the following form of Azuma's inequality \cite{AS92}. Let $X_0$, $X_1$, \ldots, $X_m$ be a martingale with \begin{eqnarray*} |X_{i + 1} - X_i| \leq 1 \end{eqnarray*} for all $0 \leq i < m$. Let $\lambda > 0$ be arbitrary. Then, \begin{eqnarray} {\rm\bf Pr}(X_m > X_0 + \lambda \sqrt{m}) < {\rm\bf E}p(-\lambda^2 / 2) \end{eqnarray} and \begin{eqnarray} {\rm\bf Pr}(X_m < X_0 - \lambda \sqrt{m}) < {\rm\bf E}p(-\lambda^2 / 2) \end{eqnarray} Applying this inequality for martingale $Y'_i = \frac{2}{3} Y_i$, $0 \leq i \leq s = m$, with $\lambda = \frac{2t}{3\sqrt{s}}$, we have \begin{eqnarray*} {\rm\bf Pr}(Y_s > Y_0 + t) & = & {\rm\bf Pr}(Y'_s > Y'_0 + \frac{2t}{3}) \\ & < & {\rm\bf E}p\left(-\frac{4t^2}{9\cdot 2s}\right)\\ & \leq & {\rm\bf E}p\left(-\frac{t^2}{9|S|}\right) \end{eqnarray*} and, similarly, \begin{eqnarray*} {\rm\bf Pr}(Y_s < Y_0 - t) & < & {\rm\bf E}p\left(-\frac{t^2}{9|S|}\right), \end{eqnarray*} finishing the case where $|S| \geq \frac{s}{2}$. We turn to the case where $|S| < \frac{s}{2}$. We define a sequence $i_0$, $i_1$, \ldots, $i_m$ of indices, where $m = 3|S|$, that depends on the outcome of the sampling, inductively as follows. \begin{enumerate} \item $i_0 = 0$. \item For $j > 0$, $i_j$ is the smallest $i \geq i_{j - 1}$ that satisfies either of the following conditions: \newline(1) $i = s$; \newline(2) $V_i \cap S \neq V_{i_{j - 1}} \cap S$; \newline(3) $i - i_{j - 1} \geq \frac{s}{2|S|}$. \end{enumerate} Note that if $i_j = s$ for some $j$, then we have $i_{j'} = s$ for $j \leq j' \leq m$. We also note that $i_m = s$, since, in determining $i_j$ for $1 \leq j \leq m$, the second condition may apply at most $|S|$ times and the third condition at most $2|S|$ times, but at most $2|S| - 1$ times if the second condition applies at all. We define a random variable $Z_j$ for $0 \leq j \leq m$ by \begin{eqnarray*} Z_j = {\rm\bf E}[|S \cap I| \mid (I_0, V_0), (I_1, V_1), \ldots, (I_{i_j}, V_{i_j})], \end{eqnarray*} where the expectation is conditioned on the partial outcome of the experiment up to the construction of $I_{i_j}$ and $V_{i_j}$. We have \begin{eqnarray*} Z_m & = &|S \cap I|,\\ Z_0 & = & {\rm\bf E}[|S \cap I|] = \frac{|S|}{2(d + 1)}, \end{eqnarray*} and, for $0 \leq j < s$, \begin{eqnarray*} Z_j = {\rm\bf E}[Z_{j + 1} \mid (I_0, V_0), (I_1, V_1), \ldots, (I_{i_j}, V_{i_j})], \end{eqnarray*} where the expectation is conditioned similarly to the above. Therefore, the sequence $Z_0$, \ldots, $Z_m$ is a martingale. We show that \begin{eqnarray} \label{eqn:bounded_diff} |Z_j - Z_{j-1}| \leq 1 \end{eqnarray} holds for $0 < j \leq m$. We have \begin{eqnarray*} Z_j & = & |S \cap I_{i_j}| + \sum_{v \in S \cap V_{i_j}} {\rm\bf Pr}(v \in I \mid v \in V_{i_j} )\\ & = & |S \cap I_{i_j}| + \frac{|S \cap V_{i_j}|(s - i_j)}{n_{i_j}}. \end{eqnarray*} Since both $|S \cap V_{i_j}|$ and the fraction $(s - i_j) / n_{i_j}$ are monotone non-increasing in $j$ and $|S \cap I_{i_j}| - |S \cap I_{i_{j-1}}| \leq 1$ by the second condition in the definition of $i_j$, we have $Z_j - Z_{j-1} \leq 1$. We also have \begin{eqnarray*} \frac{s - i_{j-1}}{n_{i_{j - 1}}} - \frac{s - i_j}{n_{i_j}} & \leq & \frac{i_j - i_{j-1}}{n_s} \\ & \leq & \left(\frac{s}{2|S|} + 1\right)\frac{1}{(d + 1)s}\\ & \leq & \frac{1}{2(d + 1)|S|} + \frac{1}{(d + 1)s} \\ & \leq & \frac{1}{2(d + 1)|S|} + \frac{1}{2(d + 1)|S|} \\ & \leq & \frac{1}{2|S|}\\ \end{eqnarray*} by the third condition in the definition of $i_j$ and \begin{eqnarray*} \label{eqn:diffx} |S \cap V_{i_{j-1}}| - |S \cap V_{i_j}| \leq d + 1 \end{eqnarray*} by the second condition. Therefore, we have \begin{eqnarray*} Z_{j-1} - Z_j & \leq &\frac{|S \cap V_{i_{j-1}}|(s - i_{j-1})}{n_{i_{j-1}}} - \frac{|S \cap V_{i_j}|(s - i_j)}{n_{i_j}} \\ & = & \frac{(|S \cap V_{i_{j-1}}|- |S \cap V_{i_j}|)(s - i_{j-1})}{n_{i_{j - 1}}}\\ && + |S \cap V_{i_j}|\left(\frac{s - i_{j-1}}{n_{i_{j -1}}} - \frac{s - i_j}{n_{i_j}}\right)\\ & \leq & \frac{(d + 1)(s - i_{j-1})}{n_{i_{j-1}}} + \frac{|S \cap V_{i_j}|}{2|S|}\\ & \leq & \frac{s - i_{j - 1}}{2s - i_{j - 1}} + \frac{|S \cap V_{i_j}|}{2|S|}\\ & \leq & 1 \end{eqnarray*} and hence (\ref{eqn:bounded_diff}). Applying Azuma's inequality for this martingale with $\lambda = t / \sqrt{m}$, we have \begin{eqnarray*} {\rm\bf Pr}(Z_m > Z_0 + t) & < & {\rm\bf E}p\left(-\frac{t^2}{2m}\right)\\ & \leq & {\rm\bf E}p\left(-\frac{t^2}{6|S|}\right) \end{eqnarray*} and \begin{eqnarray*} {\rm\bf Pr}(Z_m < Z_0 - t) & < & {\rm\bf E}p\left(-\frac{t^2}{6|S|}\right), \end{eqnarray*} finishing the proof of Theorem~\ref{thm:sample-indep}. \end{document}
\begin{document} \title{Deep Kernel Learning for Mortality Prediction in the Face of Temporal Shift \thanks{\textit{\underline{Citation} \begin{abstract} Neural models, with their ability to provide novel representations, have shown promising results in prediction tasks in healthcare. However, patient demographics, medical technology, and quality of care change over time. This often leads to drop in the performance of neural models for prospective patients, especially in terms of their calibration. The deep kernel learning (DKL) framework may be robust to such changes as it combines neural models with Gaussian processes, which are aware of prediction uncertainty. Our hypothesis is that out-of-distribution test points will result in probabilities closer to the global mean and hence prevent overconfident predictions. This in turn, we hypothesise, will result in better calibration on prospective data. This paper investigates DKL's behaviour when facing a temporal shift, which was naturally introduced when an information system that feeds a cohort database was changed. We compare DKL's performance to that of a neural baseline based on recurrent neural networks. We show that DKL indeed produced superior calibrated predictions. We also confirm that the DKL's predictions were indeed less sharp. In addition, DKL's discrimination ability was even improved: its AUC was 0.746 $ (\pm$0.014 std), compared to 0.739 ($\pm$ 0.028 std) for the baseline. The paper demonstrated the importance of including uncertainty in neural computing, especially for their prospective use. \end{abstract} \keywords{Deep Kernel Learning, temporal shift, time series, calibration, Gaussian process, mortality prediction.} \section{Introduction} In the ICU, the prediction of in-hospital mortality is the task of providing probabilities for Intensive Care patients to die in the hospital, either in the ICU or after discharge to another ward. The (early) detection of such patients is relevant for clinical decision making. Mortality prediction models (MPMs) are often trained with large collections of electronic health records (EHR) that contain structured patient information such as demographics and physiological variables. MPMs based on deep learning are becoming prevalent in medical applications \citep{rajkomar2018}. One reason for this is that NNs automatically derive {\it representations} for time series data, which may provide predictive ability superior to that of standard regression models \citep{shickel2018, Harutyunyan_2019}. Specifically, neural models learn features from the input data by the incremental composition of simpler layers, resulting in complex representations for non-linear prediction models \citep{Bengio09ftml}. However, patient characteristics, medical technology, and clinical guidelines change over time, thus forming a challenge for the validity of MPMs for prospective patients, as these models were learned on historical data \citep{Minne}. In particular, due to their flexibility, NNs have the ability to leverage on slight patterns in the data, but such patterns may not be stable over time and hence NN models may be sensitive to such temporal shifts causing a change (usually a drop) in performance \citep{pmlr-v106-nestor19a}. For prediction models of a binary outcome, not only the discriminatory capability of the model may suffer, but especially its (mis)calibration. Calibration refers to the correspondence between the predicted probabilities and the true probabilities. The true probabilities are estimated on the test set by some measure of averaging the number of events for a set of patients. Performance drift has consequences for the task at hand, and the detrimental effects on benchmarking ICUs have been demonstrated \citep{Minne}. One way to tackle this problem is to augment NNs with the notion of uncertainty: whenever the data distribution changes due to shift, the predictions should be more uncertain \citep{mackayBNN}. In contrast to NNs, The Gaussian process (GP) is a probabilistic framework for time series modelling that is able to increase model capacity with the amount of available data, and to produce uncertainty estimates. A GP characterises a distribution over possible functions that fit the input data. It is defined by a Gaussian function with a certain mean and, more importantly, a kernel function that captures the correlations between any two observations. The kernel encompasses the notion of uncertainty by performing a pairwise computation among all input data using some notion of similarity between the observations. The kernel can be viewed as providing a probability distribution over all possible models fitting the data. The prediction models based on GPs successfully model time series data, incorporate confidence regions to predictions, and offer interpretability of the variables with the kernel function \citep{Roberts_gaussianprocesses}. Moreover, the GP framework has been used to develop clinical prediction models \citep{durichen2014, cheng2020}. In particular, \citet{marzyeh2015} use a multitask GP to model time series with physiological variables and clinical notes for mortality prediction. Directly relevant to our paper is the proposition in \citep{pmlr-v51-wilson16} to combine both NNs and GPs on a common framework of deep kernel learning (DKL). DKL leverages inductive biases from the NNs and from the non-parametric GPs. In this paper, we investigate the behaviour of mortality prediction models based on DKL. In particular, we are interested in inspecting the robustness of the DKL model to a temporal shift. We also compare it to a strong NN-based baseline. Our hypothesis is that incorporation of uncertainty improves predictions. More specifically, we expect the DKL, when faced with uncertainty in the test set, to provide less extreme predictions that are closer to the global mean rather than providing overconfident predictions. In turn, the resultant prediction set would be less sharp than for the baseline model. Sharpness, which is also referred to refinement in weather forecast \citep{Murphy1987AGF} measures the tendency of predictions to be close to 0 and 1. We therefore also compare the sharpness of both models but check that this does not come at the cost of discrimination. Finally, we also performed internal validation of the DKL model with all the population (i.e. no temporal shift) to understand whether the DKL's behaviour is specific to temporal validation. Our main contribution in this paper is the introduction of a DKL model for in-hospital mortality prediction based on the first hours of an ICU stay in the context of temporal validation. The GP component in the DKL is shown to be robust to the shift in population and produces better calibrated predictions, without sacrificing discrimination. Our feature extraction is based on an open source benchmark \citep{Harutyunyan_2019} using the publicly available MIMIC-III \citep{mimiciii} database. This facilitates the reproducibility of our results\footnote{Code is available at: \url{https://github.com/mriosb08/dkl-temporal-shift.git}}. \section{Deep Kernel Learning} The Gaussian Process \citep{rasmussen2005} is a Bayesian non-parametric framework based on kernels for regression and classification. The set of functions that describes a given input data is possibly infinite and the GP assigns a probability to each one. For a dataset $\mathcal{X}=\left\{\left(\mathbf{x}_{1}, y_{1}\right),\left(\mathbf{x}_{2}, y_{2}\right), \ldots,\left(\mathbf{x}_{n}, y_{n}\right)\right\}$ where $\mathbf{x}$ is an input vector and $y$ a corresponding output, we want to learn a function $f$ that is inferred from a GP prior: \begin{subequations} \begin{align} f(\mathbf{x}) \thicksim \GP(m(\mathbf{x}), k(\mathbf{x}, \mathbf{x}')) \end{align} \end{subequations} where $m(\mathbf{x})$ defines a mean (often set to 0) and $k(\mathbf{x}, \mathbf{x}')$ defines the covariance in the form of a kernel function. The kernel function models the covariance between all possible pairs $(\mathbf{x}, \mathbf{x}')$ and provides a measure of uncertainty. The choice of kernel determines properties of the function that we want to learn, usually this choice is based on background knowledge of the problem. \citet{wilson2016} propose kernels based on deep learning architectures for GP regression. The DKL employs a GP with a base kernel as the last hidden layer of a NN. In other words, the DKL is a pipeline for learning complex NN features, and a distribution over functions that fit our input data. The base kernel $k\left(\mathbf{x}, \mathbf{x}' \mid \theta\right)$ with hyperparameters $\theta$ is parameterized by a non-linear function. \begin{subequations} \begin{align} k\left(\mathbf{x}, \mathbf{x}' \mid \theta\right) \rightarrow k\left(g\left(\mathbf{x}, \omega\right), g\left(\mathbf{x}', \omega\right) \mid \theta, \omega\right), \end{align} \end{subequations} where $g(\mathbf{x}, \omega)$ is a NN architecture with weights $\omega$. In addition, the DKL jointly learns the NN weights and kernel hyperparameters under the GP probabilistic framework. Learning a GP involves computing the kernel function, and finding the best kernel hyperparameters. The DKL optimises both the kernel hyperparameters and the NN weights, by maximising the marginal likelihood. In Figure \ref{fig:arch}, we define the architecture for extracting features $g(\mathbf{x}, \omega)$, $\mathbf{x}_i$ denotes the input vector in the ith element of $\mathcal{X}$. \begin{figure*} \caption{NN architecture $g(\mathbf{x} \label{fig:arch} \end{figure*} The input features are first projected with an affine layer ($\linear(.)$), then fed to a bidirectional LSTM ($ \birnn(.)$) \citep{HochSchm97} for encoding time series. Next the result goes through an affine layer with a non-linearity ($\relu(.)$) that combines the hidden states of the bidirectional LSTM. Next the features $\mathbf{f}_i$ are summarised by averaging ($\avg(.)$) and then fed to the GP layer. \section{Experiments} The Medical Information Mart for Intensive Care (MIMIC-III) database includes over 60,000 ICU stays across 40,000 critical care patients \citep{mimiciii}. \citet{Harutyunyan_2019} propose a public benchmark and baselines based on MIMIC-III for modelling mortality, length of stay, physiologic decline, and phenotype classification. We use the benchmark for predicting in-hospital mortality based on the first 48 hours of an ICU stay. The cohort excludes all ICU stays with unknown length-of-stay, patients under 18, multiple ICU stays, stays less than 48 hours, and no observations during the first 48 hours. The in-hospital mortality class is defined by comparing the date of death against hospital admissions and discharge times with a resulting mortality rate of 13.23\%. We use the benchmark to extract $17$ input physiological variables (i.e. features), that are a subset of the Physionet challenge \footnote{\url{https://physionet.org/content/challenge-2012/1.0.0/}}. The benchmark \citep{Harutyunyan_2019} code processes the time series data with imputation of missing values with the previous hour, and normalisation from MIMIC-III. The normalisation of the features is performed by subtracting the mean and dividing by the standard deviation. The features also provide a binary mask for each variable indicating which time-step is imputed. All categorical variables are encoded using one-hot vectors (e.g. Glasgow coma scales). The final feature vector is formed by the concatenation of the clinical variables and the one-hot vectors with a total of $76$ features. The clinical variables are shown in Table\ref{tab:variables}. \begin{table}[h] \small \centering \begin{tabular}{l} \hline \multicolumn{1}{c}{Variable} \\ \hline Capillary refill rate \\ Diastolic blood pressure \\ Fraction inspired oxygen \\ Glascow coma scale eye opening \\ Glascow coma scale motor response \\ Glascow coma scale total \\ Glascow coma scale verbal response \\ Glucose \\ Heart Rate \\ Height \\ Mean blood pressure \\ Oxygen saturation \\ Respiratory rate \\ Systolic blood pressure \\ Temperature \\ Weight \\ pH \\ \hline \end{tabular} \caption{Clinical variables used in our experiments from MIMIC-III.} \label{tab:variables} \end{table} We use the architecture $g(.)$ as the baseline defined as: \textbf{BiLSTM}, which is based on a bidirectional LSTM for feature representation, and a linear prediction layer. We implement the \textbf{DKL} model with GPyTorch \citep{gardner2018}, with the following components: the RBF kernel as the base kernel, feature extractor $g(.)$, and grid size $100$ which is the number of inducing points used to approximate the GP for faster computations. The computation of the posterior distribution in the GP is expensive and several methods have been proposed to accelerate it by approximating it with a function over a set of inducing points \citep{quionerocandela2007approximation, wilson2015}. In addition, we perform a simple ablation on the architecture by replacing the bidirectional LSTM with a LSTM for both models, baseline and DKL defined as: \textbf{LSTM}, and \textbf{DKL-LSTM}. We use the following hyperparameters: optimiser Adam \citep{adamKingma}, learning rate $1\mathrm{e}{-3}$, epochs 30, encoder size 16, hidden size 16, batch size 100, dropout 0.3 applied after the $\linear$ layer. We perform model selection with the validation dataset based on AUC-ROC. \subsection{Temporal shift: strategy and results} The MIMIC-III dataset includes data using the CareVue electronic patient record (EPR) system from 2001 to 2008. From 2008 to 2012 the MetaVision system was used instead. In the first experiment for inspecting temporal shift, we split the datataset into the CareVue period for training with $9,646$ instances and $1,763$ for validation (for tuning the hyper-parameters), and the data in the MetaVision period with $7,689$ as the test set. We excluded patients present in both registries. This constitutes a temporal validation strategy in which the model is tested on data collected in the future relative to the data on which it has learned. This means that the model faces possible temporal shift due to changes that occur in time, and indeed possibly also due to the change of the EPR system that collects the data that could have affected the workflow and/or the way of registration. Performance was measured in terms of: Discrimination, by the AUC-ROC; the balance between the positive predicted value and sensitivity, by the AUC-PR; the accuracy of predictions by the Brier score; and calibration by calibration graphs and the Cox recalibration approach \citep{Cox1958TwoFA} in which the observed outcome in the test set is regressed using logistic regression on the log odds of the predictions. If the predictions were perfectly calibrated then the linear predictor of this model would have an intercept of 0 and a slope of 1. We test deviations from these ideal value of 0 and 1, respectively. To test our hypothesis whether the DKL approach provides more conservative predictions due to uncertainty for areas in the test set, we measure the (un)sharpness of the predictions. We use the following measure of unsharpness: $\frac{\sum_1^N{p_i(1 - p_i)}}{N}$ where $p_i$ is the $i$th prediction and N is number of observations. \begin{table*}[t] \small \centering \begin{tabular}{lrrrr} \toprule & \multicolumn{2}{c}{Validation} & \multicolumn{2}{c}{Test} \\ Model & \multicolumn{1}{c}{AUC-ROC} & \multicolumn{1}{c}{AUC-PR} & \multicolumn{1}{c}{AUC-ROC} & \multicolumn{1}{c}{AUC-PR} \\ \hline LSTM & $ 0.838\pm 0.003$ & $ 0.532 \pm 0.006$ & $0.693 \pm 0.027$ & $0.317\pm 0.037$ \\ BiLSTM & $ 0.857\pm 0.002$ & $ 0.572 \pm 0.007$ & $0.739 \pm 0.028$ & $\textbf{0.386}\pm 0.018$ \\ DKL-LSTM & $0.854 \pm 0.002$ & $0.562 \pm 0.010$ & $0.701\pm 0.033$ & $0.327 \pm 0.026$ \\ DKL & $0.856 \pm 0.002$ & $0.569 \pm 0.004$ & $\textbf{0.746}\pm 0.014$ & $0.373 \pm 0.018$ \\ \bottomrule \end{tabular} \caption{In-hospital mortality results with a temporal population shift over 10 runs $\pm$ one standard deviation. The training and validation datasets are on CareVue (2001-2008), and the test on MetaVision (2008-2012).} \label{tab:tempinhospital} \end{table*} \begin{figure*} \caption{Receiver operating characteristic curve (a) and calibration curve (b) for in-hospital mortality with temporal shift in population.} \label{fig:tempinroc} \end{figure*} Table \ref{tab:tempinhospital} shows the AUC-ROC and AUC-PR results for in-hospital mortality with a temporal shift in population. The baseline outperforms the DKL model on the validation (tuning) dataset for both metrics. On the test dataset, however, the DKL shows competitive performance on the AUC-ROC. We use the best run from the validation based on the AUC-ROC for reporting the ROC and calibration curves. In addition, we select the best performing models from Table \ref{tab:tempinhospital} based on AUC-ROC, namely BiLSMT and DKL, for comparing the calibration and ROC curves. The LSTM models consistently underperform compared to the bidirectional ones. Figure \ref{fig:tempinroc} shows the ROC and calibration curves for in-hospital mortality with a temporal shift. The Brier score for the DKL is 0.101 which is better that the 0.109 of the BiLSTM. The DKL outperforms the baseline and it shows better calibration. In the Cox re-calibration on both models the BiLSTM had a calibration intercept of 1.965 (1.88, 2.049), and slope of 0.538 (0.5, 0.577) compared to the DKL's of 0.6615 (0.586, 0.734), 0.712 (0.652, 0.772). Although both models deviated significantly from the ideal values (of 0 and 1), the DKL showed significantly much better calibration. The DKL's predictions were also much less sharp: {\bf un}sharpness of 0.061 for DKL versus 0.025 for BiLSTM. \subsection{Experiment 2: Internal validation} We report the results with all the sources (2001-2012) for in-hospital mortality, with no shift in population. The training, validation and test datasets consisted of respectively $14,681$, $3,222$, and $3,236$ instances. \begin{table}[t] \small \centering \begin{tabular}{lrrrr} \toprule & \multicolumn{2}{c}{Validation} & \multicolumn{2}{c}{Test} \\ Model & \multicolumn{1}{c}{AUC-ROC} & \multicolumn{1}{c}{AUC-PR} & \multicolumn{1}{c}{AUC-ROC} & \multicolumn{1}{c}{AUC-PR} \\ \hline LSTM & $0.843\pm 0.003$ & $0.513 \pm 0.006$ & $0.840 \pm 0.005$ & $0.434\pm 0.008$ \\ BiLSTM & $0.858\pm 0.004$ & $0.549 \pm 0.010$ & $\textbf{0.851} \pm 0.004$ & $\textbf{0.478}\pm 0.016$ \\ DKL-LSTM & $ 0.838\pm 0.002$ & $0.485 \pm 0.014$ & $0.841 \pm 0.003$ & $0.425\pm 0.013$ \\ DKL & $ 0.854\pm 0.004$ & $0.536 \pm 0.010$ & $0.847 \pm 0.005$ & $0.454\pm 0.018$ \\ \bottomrule \end{tabular} \caption{In-hospital mortality results over 10 runs $\pm$ one standard deviation. Validation and test dataset from all sources (2001-2012).} \label{tab:inhospital} \end{table} \begin{figure*} \caption{Receiver operating characteristic curve (a) and calibration curve (b) for in-hospital mortality with all sources.} \label{fig:inroc} \end{figure*} Table \ref{tab:inhospital} shows the AUC-ROC and AUC-PR results for in-hospital mortality with all sources (2002-2012). The baseline outperforms the DKL model on the test dataset for both metrics the AUC-ROC, and AUC-PR. Figure \ref{fig:inroc} shows the ROC and calibration curves for in-hospital mortality with all sources. Both of our models perform similarly on the ROC curve. The Brier score for the DKL is $0.082$ slightly better than the $0.084$ of the BiLSTM. In the Cox re-calibration the BiLSTM's calibration intercept was -0.358 (-0.49, -0.229), and slope 0.802 (0.726, 0.88); compared to the DKL's -0.066 (-0.185, 0.05), and 1.177 (1.062, 1.298). Unlike the BiLSTM the DKL showed no significant deviations from the ideal values of 0 and 1. The DKL was slightly more unsharp: 0.089 versus 0.081 for the BiLSTM. \section{Related Work} \citet{durichen2014} propose a multi-task GP that jointly models physiological variables for clinical time series. \citet{cheng2020} develop a real-time clinical prediction model based on a GP model. Aside from producing confidence regions in the predictions, the GP also scales to large patient databases, and produces interpretable relations across (clinical) variables. The interprtability is produced by inspecting the correlation across variables in the kernel function. \citet{futoma2017} propose a sepsis prediction model based on a pipeline with a GP that produces inputs for a NN classifier. The model takes into account uncertainty estimates and outperforms strong sepsis prediction baselines. On the other hand, our DKL model uses RNNs to model the time series physiological variables and feed the resulting features into the GP for prediction. Our work, however, is the first to investigate DKL in the context of temporal shift. \section{Conclusions and Future Work} We investigated the DKL framework for the task of in-hospital mortality prediction under a temporal shift in population. The DKL shows competitive performance compared to a strong NN baseline, as well as a better calibration. However, when the test dataset is in the same distribution as the training both models show similar results. The GP component does not degrade the overall performance, and in addition, it provides extra guarantees such as uncertainty estimates. By contrasting the two experiments and inspecting the sharpness of the predictions we can ascribe the improved performance on the test set to the robustness of the GP when facing uncertainty. For future work, we will analyse different base kernels, evaluate the uncertainty estimate of the DKL, and use the framework described in \citep{Debray2015ANF} for better understanding of discrepancies in performance over time. \end{document}
\begin{document} \title{Five Quantum Algorithms Using Quipper} \begin{small} \begin{abstract} Quipper is a recently released quantum programming language. In this report, we explore Quipper's programming framework by implementing the Deutsch's, Deutsch-Jozsa's, Simon's, Grover's, and Shor's factoring algorithms. It will help new quantum programmers in an instructive manner. We choose Quipper especially for its usability and scalability though it's an ongoing development project. We have also provided introductory concepts of Quipper and prerequisite backgrounds of the algorithms for readers' convenience. We also have written codes for oracles (black boxes or functions) for individual algorithms and tested some of them using the Quipper simulator to prove correctness and introduce the readers with the functionality. As Quipper 0.5 does not include more than \ensuremath{4 \times 4} matrix constructors for Unitary operators, we have also implemented \ensuremath{8 \times 8} and \ensuremath{16 \times 16} matrix constructors. \end{abstract} \section{Introduction} Quantum computing is an interdisciplinary research area for physicists, mathematicians and computer scientists. Keeping pace with the developments in quantum hardware and algorithms, quantum programming languages are also developing. In this report\footnote{This report is based on the undergraduate thesis work of SS.}, we have proposed the implementations of five quantum algorithms, namely, Deutsch's algorithm, Deutsch-Jozsa's algorithm, Simon's periodicity algorithm, Grover's search algorithm and Shor's factoring algorithm using Quipper, a new functional quantum programming language. To our knowledge this report is the first such implementation of the above mentioned algorithms using Quipper. When we will have a physical gate-based quantum computer, these Quipper codes will guide us to get the real results from these algorithms instead of mere simulations. We choose these five algorithms because of their theoretical and pedagogical importance. We have also implemented oracles (black boxes or functions) to use them as inputs for those algorithms. As quantum hardwares are not available yet, we have used Quipper simulator to test some of them classically so that readers can test their own oracles. We assume readers have the initial knowledge of quantum data structure like qubit, quantum gates like \emph{Hadamard gate, controlled-not gate} etc. But it's not necessary to have previous experience of functional programming approach. We give some ideas about quantum programming languages in Section 2. In Section 3, we introduce Quipper and try to present a short tutorial. Implementations of the five quantum algorithms using Quipper are given in section 4. Here readers will get the basic ideas of quantum algorithms and also the basic structures of Quipper codes. Finally we draw our conclusion in section 5. \section{Quantum programming languages} Quantum programming language is an active area of quantum computational research. According to E. H. Knill's \cite{knill1996conventions} proposed architecture of quantum computers, the quantum machine has to be controlled by classical devices. Existing quantum programming languages are designed with classical controls such as loop, conditions etc and allow both quantum and classical data. Programming languages are mainly divided into two paradigms: \emph{imperative quantum programming languages} and \emph{functional quantum programming languages}. In imperative or procedural programming approach, programmers give instructions to the machines step by step, tell exactly how to do the task, and by functional or declarative programming approach, programmers tell the machines what to do, it is not necessary to tell how to do exactly. \emph{C, C++, Java, Python} etc are the examples of imperative programming approach and \emph{Scala, Erlang, Haskell} are the examples of functional programming approach. \emph{Quantum pseudocode} \cite{knill1996conventions}, \emph{QCL (Quantum Computing Language)} \cite{omer1998procedural}, \emph{Q language} \cite{bettelli2003toward}, \emph{qGCL (Quantum Guarded Command Language)} \cite{zuliani2001quantum} etc are imperative quantum programming languages and \emph{QFC} \cite{selinger2004towards}, \emph{QPL (Quantum Programming Language)} \cite{selinger2004towards}, \emph{cQPL (communication capable QPL)} \cite{mauerer2005semantics}, \emph{QML} \cite{altenkirch2005functional}, \emph{Quantum lambda calculi} \cite{van2004lambda}, \emph{Quipper} \cite{q1} etc are functional quantum programming languages \cite{w1}. These programming languages mainly use computational models like \emph{Quantum Turing Machine, Quantum Circuits, Quantum Lambda Calculus} etc. More details about quantum programming languages can be found in \cite{simonsurvey}, \cite{model}. \section{What is Quipper?} Quipper is an embedded functional quantum programming language \cite{q3}. This language is based on Haskell, a pure functional classical programming language. As Haskell's type system is one of the most powerful type systems, by using advanced features of it, Quipper provides many higher order and overloaded operators, though Haskell doesn't have linear type and dependent type features. To overcome this lacking, Quipper checks linear and dependent types in run time rather than in compile time. Thus Quipper offers a corrective, scalable and usable programming framework for quantum computation. As of 2014, Quipper is planning to be equipped soon with stand-alone compiler or at least a custom type-checker \cite{q1}. Quipper was developed by Richard Eisenberg, Alexander S. Green, Peter LeFanu Lumsdaine, Keith Kim, Siun-Chuon Mau, Baranidharan Mohan, Won Ng, Joel Ravelomanantsoa-Ratsimihah, Neil J. Ross, Artur Scherer, Peter Selinger, Beno\^it Valiron, Alexandr Virodov and Stephan A. Zdancewic, in a research supported by the Intelligence Advanced Research Projects Activity (IARPA) \cite{IARPA}. It was first released in June 19, 2013 as a beta version 0.4 \cite{q3}. \subsection{Quipper execution model} Quipper program executes in three phases: \emph{compile time, circuit generation time} and \emph{circuit execution time}. \emph{Compile time} phase and \emph{circuit generation time} phase take place on classical computer. The last and final phase, \emph{Circuit execution time} occurs on physical quantum computer. In \emph{compile time} phase, Quipper takes source code, compile time parameters and uses Haskell's compiler to generate executable object code as output. \emph{Circuit generation time phase} takes the executable object code, circuit parameters (register size, problem size etc) as input and outputs a representation of quantum circuit. \emph{Circuit execution time} phase takes the quantum circuit, some circuit inputs (qubits fetched from long-term storage to initialize circuit inputs, if supported by quantum device, classical bits for classical circuit inputs).This phase outputs the measurement results of quantum subroutines in classical bits and moves the qubits (those are used as input) to long-term storage (if supported by quantum device). \begin{figure} \caption{Quipper execution model} \end{figure} In the model of Quipper execution, classical controller generates a circuit according to the source codes, sends it to quantum hardware for execution and takes the measurement results. Quipper provides \bera{print\_generic} and \bera{print\_simple} functions to print the circuit in available output format (such as text, PostScript, and PDF). Quipper also provides \bera{run\_generic} function to simulate the circuit on classical machine. More details about Quipper can be found in \cite{q1}. \subsection{Quipper examples} Quipper has three basic data types, \bera{Bit, Bool} and \bera{Qubit}. \bera{Bit, Bool} represent classical data and \bera{Qubit} is for quantum data. Quipper distinguishes between \emph{parameter} and \emph{input}. When the value is known at \emph{circuit generation time} phase it is \emph{parameter} and when the value is known only at \emph{circuit execution time} phase it is called \emph{input}. Here \bera{Bool} is a boolean \emph{parameter}, \bera{Bit} and \bera{Qubit} are respectively a classical boolean \emph{input} and a quantum \emph{input} to a circuit. \bera{Bool} can be converted to \bera{Bit}, but vice versa is not possible. Quantum measurements are \bera{Bit}s rather than \bera{Bool}s, as measurements occur at \emph{circuit execution time} phase. Some data has both \emph{parameter} and \emph{input} components, this type of data is called \emph{shape}. Circuit size is a \emph{shape} type data, here qubit \emph{list} (data structure) is \emph{input} type and the length of the \emph{list} is \emph{parameter} type. In this section, we will write our first Quipper code, see how to apply quantum gates, be familiar with the built-in data structures and finally, know the measurement operation that Quipper provides. \paragraph{The \emph{Hello World} program :} To start with Quipper we will write a simple \emph{hello world} function. This function will take a classical data \bera{Bool} and return a corresponding quantum data \bera{Qubit}. The code is given below: \\ \begin{footnotesize} \bera{ import Quipper\\ -\-- declare hello\_world function hello\_world :: Bool \ensuremath{\rightarrow} Circ Qubit hello\_world var = do ~~-\-- convert \bera{Bool} into \bera{Qubit} ~~qbit \ensuremath{\leftarrow} qinit var ~~-\-- to label a variable on pdf circuit ~~label (qbit) ("\ensuremath{|1\rangle}") ~~-\-- return the result ~~return qbit \\} \end{footnotesize} Let us focus on the important parts: first we import the \bera{Quipper} library to get all Quipper properties, built-in functions, operators, data types etc. The first line of a function is the type signature of that function. Here the type signature means \bera{hello\_world} is a function that takes a \bera{Bool} type data and returns a \bera{Qubit} type data. Arguments are separated by "\ensuremath{\rightarrow}" notation and \bera{Circ} is a type operator (in Haskell, it is called \bera{Monad}) that represents this function can have a side effect when it is evaluated. Usually functions are written in a \bera{do} block. A \bera{do} block starts with the \bera{do} keyword and then followed by a series of expressions or operations. The variable \bera{var} will store the value that will be passed through this function. In Quipper "-\-- ..." and "\{- ... -\}" are used for commenting codes. The \bera{qinit} operator (from \bera{Quipper} library) takes a \bera{Bool} as input and initializes a \bera{Qubit}, a quantum state corresponding to the classical data \bera{Bool} (If \bera{False} then qubit is \ensuremath{|0\rangle} and if \bera{True} then \ensuremath{|1\rangle}). "\ensuremath{\leftarrow}" notation means that the new quantum state is stored in a variable \bera{qbit} and finally returns the state. The starting point of a Quipper program is the \bera{main} function. As we have mentioned before, \emph{circuit generation time} phase sends a circuit of the source code to the physical quantum device to execute and \bera{print\_generic} and \bera{print\_simple} functions are used to print the circuit in an available output format. \bera{print\_simple} is used when circuit \emph{shape} is fixed and \bera{print\_generic} is used for the circuit which \emph{shape} is not fixed. Quipper provides \bera{label, comment, comment\_with\_label} for commenting on a generated circuit in a pdf document. In the \bera{main} function, using \bera{print\_simple} we will call the \bera{hello\_world} function with a \bera{Bool} type data \bera{True} and get a corresponding quantum state. After successful compilation, when we will execute this program, it generates a circuit in a pdf document. Circuits are read from left to right and wires are represented by horizontal lines. Quipper uses "\includegraphics{notation.png}" notation to denote the allocation of a new qubit for corresponding classical data. The \bera{main} function is:\\ \begin{footnotesize} \bera{main = print\_simple Preview (hello\_world True)} \end{footnotesize} \begin{figure} \caption{Circuit for \bera{hello\_world} \end{figure} \paragraph{Apply Quantum Gates :} From \bera{hello\_world} function, we get a quantum state. Now we will apply a quantum gate on that qubit. At this point, we will take the advantage of Quipper's higher order functionality that means a function can be passed through another function. We will name our function \bera{apply\_gate} and pass \bera{hello\_world} function as parameter. \bera{apply\_gate} function will use \bera{hello\_world} to convert a classical state into a quantum state and apply a quantum gate on it. The code is given below:\\ \begin{footnotesize} \bera{ -\-- declare apply\_gate function apply\_gate :: (Bool \ensuremath{\rightarrow} Circ Qubit) \ensuremath{\rightarrow} Bool \ensuremath{\rightarrow} Circ Qubit apply\_gate func bool = do ~~-\-- use func function to get qubit ~~qbit \ensuremath{\leftarrow} func bool ~~-\-- to comment on pdf circuit ~~comment "before gate" ~~-\-- apply \emph{Hadamard Transfomation} ~~qbit \ensuremath{\leftarrow} hadamard qbit ~~-\-- both label and comment ~~comment\_with\_label "after gate" (qbit) ("\ensuremath{(|0\rangle - |1\rangle)/2}") ~~-\-- return result ~~return qbit\\ -\-- main function to call the whole program main = print\_simple Preview (apply\_gate hello\_world True) \\} \end{footnotesize} \begin{figure} \caption{Circuit for \bera{apply\_gate} \end{figure} The "type signature" of \bera{apply\_gate} tells that it has two arguments. First one is a function (Bool \ensuremath{\rightarrow} Circ Qubit) that takes a \bera{Bool} and returns a \bera{Qubit}, second one is a \bera{Bool} data type. This function returns a \bera{Qubit}. To apply \emph{Hadamard Transformation} we can use \bera{hadamard} or \bera{gate\_H} operator (box represents quantum gate in pdf of Quipper). We can also use \bera{gate\_H\_at} or \bera{hadamard\_at}, these operators don't return any value, in that case expression should be like \bera{hadamard\_at qbit}. We may use other quantum gates like \bera{qnot, gate\_X, gate\_S, gate\_Z, gate\_T, gate\_Y} etc. In the \bera{main} function, we pass the \bera{hello\_world} function and a \bera{"True" Bool} type data through \bera{apply\_gate} function. \bera{hello\_world} and \bera{True} values are stored respectively in \bera{func} and \bera{bool} variables. Finally this function returns a quantum state after applying \emph{hadamard transformation}.\\ \paragraph{Data Structures:} As Quipper's host language is Haskell, it mainly uses Haskell's data structures. Haskell provides many data structures like \emph{Map, Set} etc, but now we would like to focus on the most basic data structures that are \emph{list} and \emph{tuple}. \bera{let} and \bera{where} clauses are used for local bindings (we will use these in the implementations). \emph{list} in Haskell is a linked list, it uses \bera{(:)} operator to bind an element. \bera{(++)} operator is used for concatenation. \bera{head} operator is used to get the first element of a \emph{list} and \bera{tail} is used to get the rest. Operator \bera{(!!)} is used to find the element of an index. \bera{[Bool]} and \bera{[Qubit]} are the example of \emph{list}. \emph{list}s are \emph{homogeneous} that means a single \emph{list} can contain only single type of elements. On the other hand, \emph{tuple} is a fixed number of single or different type components. \emph{tuple} is mainly used for returning multiple values of different data types. \bera{(Qubit, Bool)} and \bera{([Qubit], [Qubit])} are the examples of \emph{tuple}. Previously we performed \emph{hadamard transformation} only on one qubit. To perform \emph{hadamard transformation}s on multiple qubits, we will use \emph{list} of qubits, \bera{[Qubit]}. In that case, we will need to modify the type signature of \bera{hello\_world} and \bera{apply\_gate} functions like \bera{hello\_world :: [Bool] \ensuremath{\rightarrow} Circ [Qubit]} and \bera{apply\_gate :: ([Bool] \ensuremath{\rightarrow} Circ [Qubit]) \ensuremath{\rightarrow} [Bool] \ensuremath{\rightarrow} Circ [Qubit]}. To perform quantum gates on a \emph{list}, we will use \bera{mapUnary} operator. Then expression should be like \bera{mapUnary hadamard qbit} instead of \bera{qbit \ensuremath{\leftarrow} hadamard qbit}, in \bera{apply\_gate} function. In the \bera{main} function we will use \bera{replicate} operator and \bera{where} clause to create \bera{n True Bool} type data. We will also use \bera{print\_generic} as circuit \emph{shape} is not fixed here. The \bera{main} function is:\\ \begin{footnotesize} \bera{ -\-- main function to call the whole program main = print\_generic Preview (apply\_gate hello\_world (replicate n True)) ~~where ~~~~n = 5 } \end{footnotesize} \begin{figure} \caption{Circuit of applying gates on multiple qubits} \end{figure} \paragraph{Measurement Operation :} Quipper provides \bera{measure} operator to measure quantum states and \bera{cdiscard} operator to discard classical data. These two are \emph{generic operator}s that means any data structure can be applied here. \bera{measure} operator takes \bera{Qubit} and collapses it to one of the basic states. \bera{cdiscard} operator takes classical \bera{Bit}s and discards. \includegraphics[width=0.05\textwidth]{measure.png} and \includegraphics[width=0.02\textwidth]{discard.png} notations are used respectively to denote \bera{measure} and \bera{cdiscard} operations. We will use these operators in our \bera{controlled\_gate} function that applies a controlled gate operation on two qubits and measures their values. Corresponding code is given below:\\ \begin{footnotesize} \bera{ -\-- declare controlled\_gate function controlled\_gate :: (Bool, Bool) \ensuremath{\rightarrow} Circ Bit controlled\_gate (cntrl\_qbit, trget\_qbit) = do ~~-\-- convert Bool into Qubit ~~cntrl\_qbit \ensuremath{\leftarrow} qinit cntrl\_qbit ~~trget\_qbit \ensuremath{\leftarrow} qinit trget\_qbit ~~-\-- controlled gate operation ~~gate\_X\_at trget\_qbit `controlled` cntrl\_qbit ~~-\-- measure Qubits ~~(cntrl\_qbit, trget\_qbit) \ensuremath{\leftarrow} measure (cntrl\_qbit, trget\_qbit) ~~-\-- discard value ~~cdiscard trget\_qbit ~~-- return result ~~return cntrl\_qbit\\ -\-- main function main = print\_simple Preview (controlled\_gate (False,True)) \\} \end{footnotesize} \begin{figure} \caption{Circuit of measurement example} \end{figure} Here we apply a \bera{X} gate on \bera{trget\_qbit} with the control of \bera{cntrl\_qbit}. To do that we use \bera{`controlled`} operator. We can also use this operator for \emph{list} data structure. Then the expression will be like \bera{`controlled` list}. To specify their controlling states we can write the expression like \bera{`controlled` list .==. [0,1,0]} (assume \bera{list} has three elements). Quipper gives the opportunity to apply "Unitary Operations" using matrix. We can do the previous example using \ensuremath{4 \times 4} matrix. We will import some additional libraries and write a function \bera{operator} that have the matrix.\\ \begin{footnotesize} \bera{ -\-- import Matrix constructors import Libraries.Synthesis.Matrix import QuipperLib.Synthesis import Libraries.Synthesis.Ring\\ -\-- initialize unitary matrix operator :: Matrix Four Four DOmega operator = matrix4x4 ( 1, 0, 0, 0 ) ~~~~~~~~~~~~~~~~~~~~~( 0, 1, 0, 0 ) ~~~~~~~~~~~~~~~~~~~~~( 0, 0, 0, 1 ) ~~~~~~~~~~~~~~~~~~~~~( 0, 0, 1, 0 ) \\} \end{footnotesize} In \bera{controlled\_gate} function, if we change \bera{gate\_X\_at trget\_qbit `controlled` cntrl\_qbit} expression into \bera{exact\_synthesis operator [cntrl\_qbit, trget\_qbit]} expression, \bera{main} function will generate the same previous circuit. More tutorials on Quipper can be found in \cite{q2}. \section{Implementation of quantum algorithms} Quantum algorithms are interesting because for some cases it gives exponential computational power like factoring integers, finding orders of functions. Instead of being one state at a time, quantum computer gives the opportunity to the states to be in a superposition. To get the superposition, many quantum algorithms initialize \ensuremath{n} qubits with \ensuremath{|0\rangle} and apply \emph{Hadamard transformation}. It maps \ensuremath{n} qubits to the superpositions of all \ensuremath{2^n} orthogonal states in the \ensuremath{|0\rangle, |1\rangle} basis with equal weight. Here we implement five quantum algorithms in Quipper Programming Language. Section 4.1 describes the Deutsch's algorithm which determines the fairness of a boolean function. Section 4.2 is the Deutsch-Jozsa algorithm which is the generalized version of Deutsch's algorithm. Section 4.3 is Simon's periodicity algorithm that finds the hidden pattern of a function. Section 4.4 describes Grover's search algorithm that can search an element from unordered array in \ensuremath{\sqrt{n}} time. Finally section 4.5 describes Shor's factoring algorithm which can factor integers in polynomial time. \subsection{Deutsch algorithm} The Deutsch algorithm, published in 1985 \cite{deutsch1985quantum}, solves a contrived problem to see how quantum computers can be used. This algorithm actually determines if a function is one to one or not. A function is called \emph{balanced} if \ensuremath{f(0)\neq f(1)}, means one to one. Otherwise a function is called \emph{constant} if \ensuremath{f(0)=f(1)}. Deutsch's algorithm solves the following problem (we quote the definition of the problem verbatim from \cite{noson}): \begin{myprob}Given a function \ensuremath{f : \{0,1\}\rightarrow \{0,1\}} as a black box, where one can evaluate an input, but cannot "look inside" and "see" how the function is defined, determine if the function is balanced or constant.\end{myprob} Classical algorithm needs two steps (first step to find \ensuremath{f(0)}'s value and second step to find \ensuremath{f(1)}'s value) to determine and Deutsch algorithm needs one step. This algorithm provides an oracle separation between P and EQP. First, we will write codes for Deutsch algorithm. Then we will code oracles (black boxes) of balanced and constant functions and finally, test those oracles using the simulator provided by Quipper. \subsubsection{Quantum circuit for Deutsch algorithm} As we have to test an oracle whether it is balanced or constant, at first we will write our own data type named \bera{Oracle}. Later we will declare our oracles of functions using this data type. Main section of Deutsch algorithm will be in \bera{deutsch\_circuit} function. Finally \bera{main} function will call \bera{deutsch\_circuit} function with an oracle named \bera{empty\_oracle}. here \bera{empty\_oracle} is a dummy oracle, we will use this to generalize the circuit. Later we will use some working oracles. \bera{main} function will get a classical data \bera{Bit} to determine whether given oracle is \emph{balanced} or \emph{constant}. The code for \bera{deutsch\_circuit} is:\\ \begin{footnotesize} \bera{ import Quipper\\ -\-- declare Oracle data type data Oracle = Oracle\{ ~~-\-- oracle of function \ensuremath{f(x)} ~~oracle\_function :: (Qubit,Qubit) \ensuremath{\rightarrow} Circ (Qubit,Qubit) \}\\ -\-- declare deutsch\_circuit function deutsch\_circuit :: Oracle \ensuremath{\rightarrow} Circ Bit deutsch\_circuit oracle = do ~~-\-- create the ancillae ~~top\_qubit \ensuremath{\leftarrow} qinit False ~~bottom\_qubit \ensuremath{\leftarrow} qinit True ~~label (top\_qubit, bottom\_qubit) ("\ensuremath{|0\rangle}","\ensuremath{|1\rangle}") ~~-\-- do the first Hadamards ~~hadamard\_at top\_qubit ~~hadamard\_at bottom\_qubit ~~comment "before oracle" ~~-\-- call the oracle ~~oracle\_function oracle (top\_qubit, bottom\_qubit) ~~comment "after oracle" ~~-\-- do the last Hadamards ~~hadamard\_at top\_qubit ~~-\-- measure qubits ~~(top\_qubit, bottom\_qubit) \ensuremath{\leftarrow} measure (top\_qubit, bottom\_qubit) ~~-\-- discard un-necessary output and return the result ~~cdiscard bottom\_qubit ~~return top\_qubit\\ -\-- main function to call the whole program main = print\_generic Preview (deutsch\_circuit empty\_oracle) ~~~where ~~~~~-\-- declare empty\_oracle's data type ~~~~~empty\_oracle :: Oracle ~~~~~empty\_oracle = Oracle \{ ~~~~~~oracle\_function = empty\_oracle\_function ~~~~~\} ~~~~~-\-- initialize empty\_oracle ~~~~~empty\_oracle\_function:: (Qubit,Qubit) \ensuremath{\rightarrow} Circ (Qubit,Qubit) ~~~~~empty\_oracle\_function (one,two) = named\_gate "Oracle" (one,two) }\\ \end{footnotesize} \begin{figure} \caption{Circuit for Deutsch algorithm \cite{noson} \end{figure} \subsubsection{Oracle examples} \paragraph{Balanced Oracle :} We will write code for balanced oracle in \bera{balanced\_oracle}. Here we will perform a controlled not operation. This will be a balanced oracle because here \ensuremath{f(0) \neq f(1)}. Finally \bera{main} function will pass this oracle to \bera{deutsch\_circuit} function. So the modified section is:\\ \begin{footnotesize} \bera{ main = print\_generic Preview (deutsch\_circuit balanced\_oracle) ~~~where ~~~~~-\-- declare balanced\_oracle's data type ~~~~~balanced\_oracle :: Oracle ~~~~~balanced\_oracle = Oracle \{ ~~~~~~oracle\_function = balanced\_oracle\_function ~~~~~\} ~~~~~-\-- initialize oracle function \ensuremath{f(x)} ~~~~~balanced\_oracle\_function:: (Qubit,Qubit) \ensuremath{\rightarrow} Circ (Qubit,Qubit) ~~~~~balanced\_oracle\_function (x,y) = do ~~~~~~qnot\_at y `controlled` x ~~~~~~return (x,y) \\} \end{footnotesize} \begin{figure} \caption{Circuit for balanced oracle} \end{figure} \paragraph{Constant Oracle:} We will write code for constant oracle in \bera{constant\_oracle}. We will remain qubits states same so that this oracle will ensure \ensuremath{f(0)=f(1)}. In \bera{main} function, we will pass this oracle to \bera{deutsch\_circuit} function. So the modified section is:\\ \begin{footnotesize} \bera{ main = print\_generic Preview (deutsch\_circuit constant\_oracle) ~~~where ~~~~~-\-- declare constant\_oracle's data type ~~~~~constant\_oracle :: Oracle ~~~~~constant\_oracle = Oracle \{ ~~~~~~oracle\_function = constant\_oracle\_function ~~~~~\} ~~~~~-\-- initialize oracle function \ensuremath{f(x)} ~~~~~constant\_oracle\_function:: (Qubit,Qubit) \ensuremath{\rightarrow} Circ (Qubit,Qubit) ~~~~~constant\_oracle\_function (x,y) = do ~~~~~~-\-- Qubits will remain the same ~~~~~~return (x,y) \\} \end{footnotesize} \begin{figure} \caption{Circuit for constant oracle} \end{figure} \subsubsection{Simulation} We can test previous oracles with the simulator included in Quipper. We will remove \bera{balanced\_oracle}, \bera{constant\_oracle} functions from \bera{where} clause and write those functions independently. We will write two new functions \bera{simulate} and \bera{circuit}. In \bera{simulate} function, we will test the oracle using built-in \bera{run\_generic} function and in \bera{circuit} function, we will show the simulation results. \bera{main} function is to start the whole program.\\ \begin{footnotesize} \bera{ -\-- import modules for simulations import qualified Data.Map as Map import QuipperLib.Simulation import System.Random\\ -\-- declare simulate function simulate :: Circ Bit \ensuremath{\rightarrow} Bool simulate oracle = (run\_generic (mkStdGen 1) (1.0::Double) oracle) \\} \end{footnotesize} Let's highlight Quipper's built-in \bera{run\_generic} function for simulation. It takes three arguments: \begin{enumerate} \item A source of randomness, something of type \bera{StdGen} from Haskell's \bera{System.Random} library. \bera{mkStdGen} is a function creating such an object out of an \bera{Int}. So \bera{(mkStdGen~1)} does the trick. \item An instance of real number so that the function \bera{run\_generic} knows what to use as datatype for reals. Here we take \bera{Double}. \item The circuit to run. \end{enumerate} \bera{circuit} function will take \bera{simulate} function and an \bera{Oracle} . \bera{IO()} represents this function has an I/O operation. \bera{simulate} function will return a \bera{Bool} type data \bera{True} when oracle is balanced and \bera{False} when oracle is constant. \bera{circuit} function will take the result and perform an I/O operation. \bera{main} function should be modified. When we will run the program in command prompt, we get the simulation results "Given oracle is Balanced" for balanced oracles and "Given oracle is Constant" for constant oracles.\\ \begin{footnotesize} \bera{ -\-- declare circuit function circuit :: (Circ Bit \ensuremath{\rightarrow} Bool) \ensuremath{\rightarrow} Oracle \ensuremath{\rightarrow} IO () circuit run oracle = ~~-\-- first deutsch\_circuit will apply on oracle ~~-\-- then run function will evaluate the result ~~if run (deutsch\_circuit oracle) ~~then putStrLn "Given oracle is Balanced" ~~else putStrLn "Given oracle is Constant"\\ -\-- main function main = do ~~-\-- test constant\_oracle ~~circuit simulate constant\_oracle ~~-\-- test balanced\_oracle ~~circuit simulate balanced\_oracle \\} \end{footnotesize} \begin{figure} \caption{Simulation of Deutsch Algorithm} \end{figure} \subsection{Deutsch-Jozsa algorithm} The Deutsch-Jozsa algorithm, proposed by David Deutsch and Richard Jozsa in 1992 \cite{deutsch1992rapid}, is the generalized version of Deutsch algorithm, which accepts a string of \ensuremath{n~0}'s and \ensuremath{1}'s and outputs a zero or one. A function is called \emph{balanced} if exactly half of the input's outputs are \ensuremath{0}'s and other half of the input's outputs are \ensuremath{1}'s. And a function is called \emph{constant} if all the input's outputs are either \ensuremath{0}'s or \ensuremath{1}'s. Deutsch-Jozsa algorithm solves the following problem (we quote the definition of the problem verbatim from \cite{noson}): \begin{myprob}Given a function \ensuremath{f : \{0,1\}^n\rightarrow \{0,1\}} as a black box, where one can evaluate an input, but cannot "look inside" and "see" how the function is defined, determine if the function is balanced or constant.\end{myprob} For classical algorithm, the best case scenario is when first two inputs have different outputs which ensures that given function is balanced. But to ensure a function is constant, it must evaluate the function more than half of the possible inputs. So it requires \ensuremath{\frac{2^n}{2}+1 = 2^{(n-1)}+1} evaluations. But Deutsch-Jozsa algorithm solves this problem in one evaluation, provides an oracle separation of P and EQP, that's an exponential speedup. \subsubsection{Circuit of Deutsch-Jozsa algorithm} Unlike Deutsch algorithm, the Deutsch-Jozsa algorithm accepts a string of length \ensuremath{n}. That's why we will need to augment our previous \bera{Oracle} data type so that it can deal with the string of qubits. The core section of Deutsch-Jozsa algorithm will be in \bera{deutsch\_jozsa\_circuit} function (more or less similar to \bera{deutsch\_circuit} function). In the \bera{main} function, we will call \bera{deutsch\_jozsa\_circuit} with a dummy oracle named \bera{empty\_oracle}. So the code for Deutsch-Jozsa algorithm is:\\ \begin{footnotesize} \bera{ import Quipper\\ -\-- declare modified Oracle data type data Oracle = Oracle \{ ~~~-\-- declare the length of a string ~~~qubit\_num :: Int, ~~~-\-- declare oracle function \ensuremath{f(x)} ~~~function :: ([Qubit], Qubit) \ensuremath{\rightarrow} Circ ([Qubit], Qubit) \}\\ -\-- declare deutsch\_jozsa\_circuit function deutsch\_jozsa\_circuit :: Oracle \ensuremath{\rightarrow} Circ [Bit] deutsch\_jozsa\_circuit oracle = do ~~-\-- initialize string of qubits ~~top\_qubits \ensuremath{\leftarrow} qinit (replicate (qubit\_num oracle) False) ~~bottom\_qubit \ensuremath{\leftarrow} qinit True ~~label (top\_qubit, bottom\_qubit) ("\ensuremath{|0\rangle}","\ensuremath{|1\rangle}") ~~-\-- do the first hadamard ~~mapUnary hadamard top\_qubits ~~hadamard\_at bottom\_qubit ~~comment "before oracle" ~~-\-- call oracle ~~function oracle (top\_qubits, bottom\_qubit) ~~comment "after oracle" ~~-\-- do the last hadamard ~~mapUnary hadamard top\_qubits ~~-\-- measure qubits ~~(top\_qubits, bottom\_qubit) \ensuremath{\leftarrow} measure (top\_qubits, bottom\_qubit) ~~-\-- discard unnecessary output and return result ~~cdiscard bottom\_qubit ~~return top\_qubits\\ -\-- main function main = print\_generic Preview (deutsch\_jozsa\_circuit empty\_oracle) ~~where ~~~~-\-- declare empty\_oracle's data type ~~~~empty\_oracle :: Oracle ~~~~empty\_oracle = Oracle \{ ~~~~~qubit\_num = 5, ~~~~~function = empty\_oracle\_function ~~~~\} ~~~~-\-- initialize empty\_oracle's function \ensuremath{f(x)} ~~~~empty\_oracle\_function:: ([Qubit],Qubit) \ensuremath{\rightarrow} Circ ([Qubit],Qubit) ~~~~empty\_oracle\_function (ins,out) = named\_gate "Oracle" (ins,out) \\} \end{footnotesize} \begin{figure} \caption{Circuit for Deutsch-Jozsa algorithm \cite{noson} \end{figure} \subsubsection{Oracle examples} \paragraph{Constant Oracle:} We will write code for constant oracle in \bera{constant\_oracle}. For all inputs of this function, outputs will be either \ensuremath{0} or \ensuremath{1} that means \ensuremath{f(x) = 0} for all \ensuremath{x} or \ensuremath{f(x)=1} for all \ensuremath{x}. The code for \bera{constant\_oracle} is given below:\\ \begin{footnotesize} \bera{ main = print\_generic Preview (deutsch\_jozsa\_circuit constant\_oracle) ~~where ~~~~-\-- declare constant\_oracle's data type ~~~~constant\_oracle :: Oracle ~~~~constant\_oracle = Oracle \{ ~~~~~~qubit\_num = 2, ~~~~~~function = constant\_oracle\_function ~~~~\} ~~~~-\-- initialize constant\_oracle function \ensuremath{f(x)} ~~~~constant\_oracle\_function:: ([Qubit],Qubit) \ensuremath{\rightarrow} Circ ([Qubit],Qubit) ~~~~constant\_oracle\_function (ins,out) = do ~~~~~~-\-- Qubits will remain the same ~~~~~~return (ins, out) \\} \end{footnotesize} \begin{figure} \caption{Circuit for constant oracle} \end{figure} \paragraph{Balanced Oracle:} We will write code for balanced oracle in \bera{balanced\_oracle}. Exactly half of the inputs for this function will go for \ensuremath{0}'s and other half will go for \ensuremath{1}'s as output that means \ensuremath{f(x)=0} for half of \ensuremath{x} and \ensuremath{f(x)=1} for other half of \ensuremath{x}.\\ \begin{footnotesize} \bera{ main = print\_generic Preview (deutsch\_jozsa\_circuit balanced\_oracle) ~~where ~~~~-\-- declare balanced\_oracle's data type ~~~~balanced\_oracle :: Oracle ~~~~balanced\_oracle = Oracle \{ ~~~~~qubit\_num = 2, ~~~~~function = balanced\_oracle\_function ~~~~\} ~~~~-\-- initialize balanced\_oracle function \ensuremath{f(x)} ~~~~balanced\_oracle\_function:: ([Qubit],Qubit) \ensuremath{\rightarrow} Circ ([Qubit],Qubit) ~~~~balanced\_oracle\_function ([x,y],out) = do ~~~~~qnot\_at out `controlled` x ~~~~~qnot\_at out `controlled` y ~~~~~return ([x,y],out) ~~~~balanced\_oracle\_function~\_ = error "undefined" -\-- fallback case \\} \end{footnotesize} \begin{figure} \caption{Circuit for balanced oracle} \end{figure} \subsubsection{Simulation} Like in the Deutsch algorithm, we will write a new \bera{simulate} function that simulates oracles classically and a new \bera{circuit} function to show the simulation results. \\ \begin{footnotesize} \bera{ import qualified Data.Map as Map import QuipperLib.Simulation import System.Random\\ -\-- simulate function simulate :: Circ [Bit] \ensuremath{\rightarrow} Bool simulate oracle = and (map not (run\_generic (mkStdGen 1) (1.0::Float) oracle)) \\} \end{footnotesize} Here \bera{simulate} function uses \bera{map} operator to apply \bera{not} to each elements of the oracle to \emph{negate} its values. Then \bera{and} operator is applied to the results and finally returns a \bera{Bool}. \bera{circuit} function will use the \bera{Bool} to print "constant" for constant oracles (when \bera{Bool} is \bera{True}) and "balanced" for balanced oracles (when \bera{Bool} is \bera{False}). Again, \bera{main} function will be used to start the whole program.\\ \begin{footnotesize} \bera{ -\-- circuit function circuit :: (Circ [Bit] \ensuremath{\rightarrow} Bool) \ensuremath{\rightarrow} Oracle \ensuremath{\rightarrow} IO () circuit run oracle = ~~-\-- first deutsch\_jozsa will apply on oracle ~~-\-- then run function will evaluate the result ~~if run (deutsch\_jozsa\_circuit oracle) ~~then putStrLn "constant" ~~else putStrLn "balanced"\\ -\-- main function main = do ~~-\-- test constant\_oracle ~~circuit simulate constant\_oracle ~~-\-- test balanced\_oracle ~~circuit simulate balanced\_oracle \\} \end{footnotesize} \begin{figure} \caption{Simulation of Deutsch-Jozsa algorithm} \end{figure} \subsection{Simon's periodicity algorithm} Simon's periodicity algorithm is about finding patters in a particular set of functions that's why it is a promised problem. It solves Simon's problem which is a computational problem in the model of decision tree complexity or query complexity, conceived by Daniel Simon in 1994 \cite{simon1994}. It was also the inspiration for Shor's algorithm (we will discuss at section 4.5). Both problems are special cases of the abelian hidden subgroup problem. Simon's algorithm has both quantum procedures and classical procedures. We will mainly discuss about quantum procedures. Simon's algorithm solves the following problem (we quote the definition of the problem verbatim from \cite{noson}): \begin{myprob}Given a function \ensuremath{f : \{0,1\}^n\rightarrow {\{0,1\}}^n} as a black box, promised to have a secret (hidden) binary string \textbf{s}, such that for all strings \textbf{x, y} \ensuremath{\in {\{0, 1\}}^n}, we have \ensuremath{\textbf{f(x)=f(y)}} if and only if \ensuremath{\textbf{x = y }\oplus\textbf{ s}}. The goal is to determine \textbf{s}.\end{myprob} In other words, the values of \ensuremath{f} repeat themselves in some pattern and the pattern is determined by \ensuremath{s}. Function \ensuremath{f} is one to one when \ensuremath{s = 0^n} otherwise \ensuremath{f} is two to one. If we find two inputs \ensuremath{x_1, x_2} such that \ensuremath{f(x_1) = f(x_2)}, then \ensuremath{x_1 = x_2 \oplus s} and we obtain \ensuremath{s} by \ensuremath{x_1 \oplus x_2 = x_2 \oplus s \oplus x_2 = s} The worst case scenario for classical algorithm to determine a two to one function is, it needs more than half of the inputs evaluations. So it requires \ensuremath{\frac{2^n}{2}+1 = 2^{n-1} + 1} function evaluations. On the contrary, Simon's algorithm needs \ensuremath{n} function evaluations. Classical computational model needs exponential time complexity to solve this problem while quantum computational model solves it in bounded quantum polynomial time. \subsubsection{Circuit for Simon's algorithm} The core section of Simon's algorithm will be written in \bera{simon\_circuit} function. The quantum part of Simon's algorithm is basically performing this function several times. We will write function \bera{steps} for running \bera{simon\_circuit} \ensuremath{n-1} times. This function will return a \bera{Maybe} data type that mean's it may return \bera{([Bit], [Bit])} or may return \bera{Nothing} (readers may think like null value). We will get only the binary string which satisfies \ensuremath{\langle y, s \rangle = 0} \footnote{We are using the standard Dirac notation. \ensuremath{\langle|} is a row vector and \ensuremath{|\rangle} is a column vector. \ensuremath{\langle y, s \rangle} means inner product of \ensuremath{y,s}}. When \ensuremath{\langle y, s \rangle = 1}, destructive interference occurs which cancels each other and we get nothing. Classical part of Simon's algorithm will take that results and solves "linear equations" to find hidden pattern \ensuremath{s}. This can be done by Gaussian elimination, which takes \ensuremath{\Omega{(n^3)}} steps.\\ \begin{footnotesize} \bera{ import Quipper\\ -\-- declare oracle data type data Oracle = Oracle \{ ~~qubit\_num :: Int, ~~function :: ([Qubit], [Qubit]) \ensuremath{\rightarrow} Circ ([Qubit], [Qubit]) \}\\ -\-- declare simon\_circuit function simon\_circuit :: Oracle \ensuremath{\rightarrow} Circ ([Bit], [Bit]) simon\_circuit oracle = do ~~-\--create the ancillaes ~~top\_qubits \ensuremath{\leftarrow} qinit (replicate (qubit\_num oracle) False) ~~bottom\_qubits \ensuremath{\leftarrow} qinit (replicate (qubit\_num oracle) True) ~~label (top\_qubits, bottom\_qubits) ("top |0>", "bottom |1>") ~~-\-- apply first hadamard gate ~~mapUnary hadamard top\_qubits ~~mapUnary hadamard bottom\_qubits ~~-\-- call the oracle ~~(function oracle) (top\_qubits, bottom\_qubits) ~~-\-- apply hadamard gate again ~~mapUnary hadamard top\_qubits ~~-\-- measure qubits ~~(top\_qubits, bottom\_qubits) \ensuremath{\leftarrow} measure(top\_qubits, bottom\_qubits) ~~-\-- return the result ~~return (top\_qubits,bottom\_qubits)\\ -\-- declare steps function steps :: (Oracle \ensuremath{\rightarrow} Circ ([Bit], [Bit])) \ensuremath{\rightarrow} Oracle \ensuremath{\rightarrow} Circ (Maybe ([Bit], [Bit])) steps simon\_algorithm oracle = do ~~comment " Simon's algorithm" ~~-\-- set value for n ~~let n = toEnum (qubit\_num oracle) :: Int ~~-\-- call simon\_circuit n-1 times ~~for 1 (n-1) 1 \$ \textbackslash i \ensuremath{\rightarrow} do ~~~~comment "start" ~~~~-\-- call simon\_circuit function ~~~~ret \ensuremath{\leftarrow} simon\_algorithm oracle ~~~~-\-- return the result ~~~~return ret ~~~~comment "finish" ~~endfor ~~return Nothing\\ -\-- declare main function main = print\_generic Preview (steps simon\_circuit empty\_oracle) ~~where ~~~-\-- declare empty\_oracle's data type ~~~empty\_oracle :: Oracle ~~~empty\_oracle = Oracle \{ ~~~~~-\-- set the length of qubit string ~~~~~qubit\_num = 4, ~~~~~function = empty\_oracle\_function ~~~\} ~~~-\-- initialize empty\_oracle's function ~~~empty\_oracle\_function:: ([Qubit],[Qubit]) \ensuremath{\rightarrow} Circ ([Qubit],[Qubit]) ~~~empty\_oracle\_function (ins,out) = named\_gate "Oracle" (ins,out) \\} \end{footnotesize} \begin{figure} \caption{Quantum subroutine for Simon's algorithm \cite{s1} \end{figure} \subsubsection{Oracle example} We will write code for an oracle when \bera{qubit\_num = 2}. So, the inputs are 00, 01, 10 and 11. Let's consider the period, \textbf{s} = 11. So, we can define the function as follows:\\\\ \ensuremath{f(00) = 01, f(01) = 10, f(10) = f(01 \oplus 11) = 10} and \ensuremath{f(11) = f(00 \oplus 11) = 01}\\\\ The \emph{Unitary Transformation} is: \ensuremath{|x\rangle |y\rangle \rightarrow |x\rangle |y \oplus f(x)\rangle}. We will code the \emph{Unitary matrix} in \bera{sample\_oracle} function. The truth table and the code for \bera{sample\_oracle} function is given below:\\ \begin{table}[ht] \caption{Truth table of unitary matrix} \centering \begin{tabular}{c c | c c || c c | c c || c c | c c || c c | c c} \hline\hline \ensuremath{x} & \ensuremath{y} &\ensuremath{f(x)} & \ensuremath{y\oplus f(x)} & \ensuremath{x} & \ensuremath{y} &\ensuremath{f(x)} & \ensuremath{y\oplus f(x)} & \ensuremath{x} & \ensuremath{y} &\ensuremath{f(x)} & \ensuremath{y\oplus f(x)} & \ensuremath{x} & \ensuremath{y} &\ensuremath{f(x)} & \ensuremath{y\oplus f(x)}\\ [0.5 ex] \hline 00 & 00 & 01 & 01 & 01 & 00 & 10 & 10 & 10 & 00 & 10 & 10 & 11 & 00 & 01 & 01 \\ 00 & 01 & 01 & 00 & 01 & 01 & 10 & 11 & 10 & 01 & 10 & 11 & 11 & 01 & 01 & 00 \\ 00 & 10 & 01 & 11 & 01 & 10 & 10 & 00 & 10 & 10 & 10 & 00 & 11 & 10 & 01 & 11 \\ 00 & 11 & 01 & 10 & 01 & 11 & 10 & 01 & 10 & 11 & 10 & 01 & 11 & 11 & 01 & 10 \\ \hline \end{tabular} \label{table:nonlin} \end{table} \begin{footnotesize} \bera{ -\-- declare sample\_oracle's data type sample\_oracle :: Oracle sample\_oracle = Oracle\{ ~~qubit\_num = 2, ~~function = sample\_function \}\\ -\-- initialize sample\_oracle's function sample\_function :: ([Qubit],[Qubit]) \ensuremath{\rightarrow} Circ ([Qubit],[Qubit]) sample\_function (controlled\_qubit, target\_qubit) = do ~~let element = controlled\_qubit ++ target\_qubit ~~-\-- call the unitary matrix ~~exact\_synthesis operator element ~~return (controlled\_qubit, target\_qubit)\\ -\-- initialize 16 by 16 unitary matrix operator :: Matrix Sixteen Sixteen DOmega operator = matrix16x16 ( 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) ~~~~~~~~~~~~~~~~~~~~~~~( 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) ~~~~~~~~~~~~~~~~~~~~~~~( 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) ~~~~~~~~~~~~~~~~~~~~~~~( 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) ~~~~~~~~~~~~~~~~~~~~~~~( 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) ~~~~~~~~~~~~~~~~~~~~~~~( 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0 ) ~~~~~~~~~~~~~~~~~~~~~~~( 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) ~~~~~~~~~~~~~~~~~~~~~~~( 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) ~~~~~~~~~~~~~~~~~~~~~~~( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0 ) ~~~~~~~~~~~~~~~~~~~~~~~( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0 ) ~~~~~~~~~~~~~~~~~~~~~~~( 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 ) ~~~~~~~~~~~~~~~~~~~~~~~( 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0 ) ~~~~~~~~~~~~~~~~~~~~~~~( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0 ) ~~~~~~~~~~~~~~~~~~~~~~~( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0 ) ~~~~~~~~~~~~~~~~~~~~~~~( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 ) ~~~~~~~~~~~~~~~~~~~~~~~( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0 ) \\} \end{footnotesize} \begin{figure} \caption{Circuit for \bera{sample\_oracle} \end{figure} \paragraph{16 by 16 matrix constructor :} In previous function, we have implemented \ensuremath{16 \times 16} unitary matrix. As Quipper 0.5 version doesn't include \ensuremath{16 \times 16} matrix constructor, we coded this in \bera{Libraries.Synthesis.Matrix } module. This module provides fixed but arbitrary sized vectors and matrices. Its dimensions are determined by \emph{type level programming}\footnote{calculations are done during compilation time. so it ensures no run-time dimension errors}. We create a type level representation for number \ensuremath{16}. \bera{Ten} and \bera{Succ} are the type representations of number \ensuremath{10} and successor operation, previously declared in this module. \ensuremath{16 \times 16} matrix constructor is \bera{matrix16x16} which takes rows as arguments. Respective codes are given below:\\ \begin{footnotesize} \bera{ -\-- The natural number 16 as a type type Sixteen = Succ (Succ (Succ (Succ (Succ (Succ Ten))))) \\ -\-- A convenience constructor for \ensuremath{16 \times 16} matrices matrix16x16 ::(a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a)\ensuremath{\rightarrow} ~~~~~~~~~~~~~~(a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a)\ensuremath{\rightarrow} ~~~~~~~~~~~~~~(a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a)\ensuremath{\rightarrow} ~~~~~~~~~~~~~~(a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a)\ensuremath{\rightarrow} ~~~~~~~~~~~~~~(a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a)\ensuremath{\rightarrow} ~~~~~~~~~~~~~~(a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a)\ensuremath{\rightarrow} ~~~~~~~~~~~~~~(a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a)\ensuremath{\rightarrow} ~~~~~~~~~~~~~~(a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a)\ensuremath{\rightarrow} ~~~~~~~~~~~~~~(a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a)\ensuremath{\rightarrow} ~~~~~~~~~~~~~~(a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a)\ensuremath{\rightarrow} ~~~~~~~~~~~~~~(a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a)\ensuremath{\rightarrow} ~~~~~~~~~~~~~~(a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a)\ensuremath{\rightarrow} ~~~~~~~~~~~~~~(a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a)\ensuremath{\rightarrow} ~~~~~~~~~~~~~~(a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a)\ensuremath{\rightarrow} ~~~~~~~~~~~~~~(a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a)\ensuremath{\rightarrow} ~~~~~~~~~~~~~~(a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a)\ensuremath{\rightarrow}Matrix Sixteen Sixteen~a\\ matrix16x16~~~(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15) ~~~~~~~~~~~~~~(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15) ~~~~~~~~~~~~~~(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15) ~~~~~~~~~~~~~~(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12, d13, d14, d15) ~~~~~~~~~~~~~~(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15) ~~~~~~~~~~~~~~(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15) ~~~~~~~~~~~~~~(g0, g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, g11, g12, g13, g14, g15) ~~~~~~~~~~~~~~(h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11, h12, h13, h14, h15) ~~~~~~~~~~~~~~(i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11, i12, i13, i14, i15) ~~~~~~~~~~~~~~(j0, j1, j2, j3, j4, j5, j6, j7, j8, j9, j10, j11, j12, j13, j14, j15) ~~~~~~~~~~~~~~(k0, k1, k2, k3, k4, k5, k6, k7, k8, k9, k10, k11, k12, k13, k14, k15) ~~~~~~~~~~~~~~(l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15) ~~~~~~~~~~~~~~(m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12, m13, m14, m15) ~~~~~~~~~~~~~~(n0, n1, n2, n3, n4, n5, n6, n7, n8, n9, n10, n11, n12, n13, n14, n15) ~~~~~~~~~~~~~~(o0, o1, o2, o3, o4, o5, o6, o7, o8, o9, o10, o11, o12, o13, o14, o15) ~~~~~~~~~~~~~~(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15)~= \\ matrix~~~~~~~~[[a0, b0, c0, d0, e0, f0, g0, h0, i0, j0, k0, l0, m0, n0, o0, p0], ~~~~~~~~~~~~~~[a1, b1, c1, d1, e1, f1, g1, h1, i1, j1, k1, l1, m1, n1, o1, p1], ~~~~~~~~~~~~~~[a2, b2, c2, d2, e2, f2, g2, h2, i2, j2, k2, l2, m2, n2, o2, p2], ~~~~~~~~~~~~~~[a3, b3, c3, d3, e3, f3, g3, h3, i3, j3, k3, l3, m3, n3, o3, p3], ~~~~~~~~~~~~~~[a4, b4, c4, d4, e4, f4, g4, h4, i4, j4, k4, l4, m4, n4, o4, p4], ~~~~~~~~~~~~~~[a5, b5, c5, d5, e5, f5, g5, h5, i5, j5, k5, l5, m5, n5, o5, p5], ~~~~~~~~~~~~~~[a6, b6, c6, d6, e6, f6, g6, h6, i6, j6, k6, l6, m6, n6, o6, p6], ~~~~~~~~~~~~~~[a7, b7, c7, d7, e7, f7, g7, h7, i7, j7, k7, l7, m7, n7, o7, p7], ~~~~~~~~~~~~~~[a8, b8, c8, d8, e8, f8, g8, h8, i8, j8, k8, l8, m8, n8, o8, p8], ~~~~~~~~~~~~~~[a9, b9, c9, d9, e9, f9, g9, h9, i9, j9, k9, l9, m9, n9, o9, p9], ~~~~~~~~~~~~~~[a10,b10,c10,d10,e10,f10,g10,h10,i10,j10,k10,l10,m10,n10,o10,p10], ~~~~~~~~~~~~~~[a11,b11,c11,d11,e11,f11,g11,h11,i11,j11,k11,l11,m11,n11,o11,p11], ~~~~~~~~~~~~~~[a12,b12,c12,d12,e12,f12,g12,h12,i12,j12,k12,l12,m12,n12,o12,p12], ~~~~~~~~~~~~~~[a13,b13,c13,d13,e13,f13,g13,h13,i13,j13,k13,l13,m13,n13,o13,p13], ~~~~~~~~~~~~~~[a14,b14,c14,d14,e14,f14,g14,h14,i14,j14,k14,l14,m14,n14,o14,p14], ~~~~~~~~~~~~~~[a15,b15,c15,d15,e15,f15,g15,h15,i15,j15,k15,l15,m15,n15,o15,p15]] } \end{footnotesize} \paragraph{For 8 by 8 matrix:} We also write code for \ensuremath{8 \times 8} matrix constructor named \bera{matrix8x8} in \bera{Libraries.Synthesis.Matrix} module. It also takes rows as argument.\\ \begin{footnotesize} \bera{ -\-- define \ensuremath{8 \times 8} matrix constructor matrix8x8::(a, a, a, a, a, a, a, a) \ensuremath{\rightarrow} (a, a, a, a, a, a, a, a) \ensuremath{\rightarrow} ~~~~~~~~~~~(a, a, a, a, a, a, a, a) \ensuremath{\rightarrow} (a, a, a, a, a, a, a, a) \ensuremath{\rightarrow} ~~~~~~~~~~~(a, a, a, a, a, a, a, a) \ensuremath{\rightarrow} (a, a, a, a, a, a, a, a) \ensuremath{\rightarrow} ~~~~~~~~~~~(a, a, a, a, a, a, a, a) \ensuremath{\rightarrow} (a, a, a, a, a, a, a, a) \ensuremath{\rightarrow} Matrix Eight Eight~a\\ matrix8x8~~(a0, a1, a2, a3, a4, a5, a6, a7) (b0, b1, b2, b3, b4, b5, b6, b7) ~~~~~~~~~~~(c0, c1, c2, c3, c4, c5, c6, c7) (d0, d1, d2, d3, d4, d5, d6, d7) ~~~~~~~~~~~(e0, e1, e2, e3, e4, e5, e6, e7) (f0, f1, f2, f3, f4, f5, f6, f7) ~~~~~~~~~~~(g0, g1, g2, g3, g4, g5, g6, g7) (h0, h1, h2, h3, h4, h5, h6, h7)~= \\ matrix~~~~[[a0, b0, c0, d0, e0, f0, g0, h0], [a1, b1, c1, d1, e1, f1, g1, h1], ~~~~~~~~~~~[a2, b2, c2, d2, e2, f2, g2, h2], [a3, b3, c3, d3, e3, f3, g3, h3], ~~~~~~~~~~~[a4, b4, c4, d4, e4, f4, g4, h4], [a5, b5, c5, d5, e5, f5, g5, h5], ~~~~~~~~~~~[a6, b6, c6, d6, e6, f6, g6, h6], [a7, b7, c7, d7, e7, f7, g7, h7]] } \end{footnotesize} \subsection{Grover's search algorithm} Grover's search algorithm is a quantum algorithm for searching an unsorted database with \ensuremath{N} entries in O(\ensuremath{\sqrt{N}}) time and using O(\ensuremath{\log N}) storage space, invented by Lov Grover in 1996 \cite{grover1996fast}. Grover's search algorithm solves the following problem (we quote the definition of the problem verbatim from \cite{noson}): \begin{myprob}Given a function \ensuremath{f : \{0,1\}^n\rightarrow \{0,1\}} as a black box, exists exactly one binary string \ensuremath{x_0} such that \ensuremath{f(x) = 1} if \ensuremath{x = x_0} and \ensuremath{f(x) = 0} if \ensuremath{x \neq x_0}. The goal is to find \ensuremath{x_0}.\end{myprob} Classically to solve this problem, it needs \ensuremath{\frac{N}{2}} time on average and \ensuremath{N} time in worst case scenario. Unlike previous quantum algorithms, Grover's search algorithm provides a quadratic speedup. Some of the classical algorithms have linear time complexity and quantum algorithm solves the same problem in complexity class BQP. It uses two tricks to increase the probability of desired binary string \ensuremath{x_0}. \begin{enumerate} \item \textbf{Phase inversion}: is used to change the phase of the desired state. \item \textbf{Inversion about the average}: is used to boost the separation of the phases. \end{enumerate} These operations should not be done more than \ensuremath{\sqrt{N}} times. Otherwise, for over computation the probability of desired binary string may decrease. \subsubsection{Circuit for Grover's search algorithm} The core section of Grover's search algorithm is in \bera{grover\_search\_circuit} function. This function calls \bera{phase\_inversion} and \bera{inversion\_about\_mean} functions for \ensuremath{\sqrt{2^n}} times. These functions act as their names suggest. \bera{phase\_inversion} function will take an oracle, a qubit string and apply that oracle function on that qubit string. \bera{inversion\_about\_mean} function will separate target qubit and controlled qubit from qubit string and apply \ensuremath{2 |\psi\rangle \langle\psi| - I}, the conditional phase shift operation \cite{g3}. Finally \bera{main} function will call \bera{grover\_search\_circuit} with a dummy oracle named \bera{empty\_oracle}. Respective codes are given below:\\ \begin{footnotesize} \bera{ import Quipper\\ -\-- declare Oracle data type data Oracle = Oracle \{ ~~qubit\_num :: Int, ~~function :: ([Qubit], Qubit) \ensuremath{\rightarrow} Circ ([Qubit], Qubit) \}\\ -\-- declare phase\_inversion function phase\_inversion::(([Qubit],Qubit)\ensuremath{\rightarrow}Circ([Qubit],Qubit))\ensuremath{\rightarrow}([Qubit],Qubit)\ensuremath{\rightarrow} Circ([Qubit],Qubit) phase\_inversion oracle (top\_qubits, bottom\_qubit) = do ~~comment "start phase inversion" ~~-\-- call oracle ~~oracle (top\_qubits, bottom\_qubit) ~~comment "end phase inversion" ~~return (top\_qubits, bottom\_qubit)\\ -\-- declare inversion\_about\_mean function inversion\_about\_mean :: ([Qubit], Qubit) \ensuremath{\rightarrow} Circ ([Qubit], Qubit) inversion\_about\_mean (top\_qubits, bottom\_qubit) = do ~~comment "start inversion about mean" ~~-\-- apply X gate at top qubit ~~mapUnary gate\_X top\_qubits ~~-\-- separate target and control qubits ~~let pos = (length top\_qubits) - 1 ~~let target\_qubit = top\_qubits !! pos ~~let controlled\_qubit = take pos top\_qubits ~~-\-- apply hadamard at target\_qubit ~~hadamard\_at target\_qubit ~~-\-- apply qnot gate at target qubit ~~qnot\_at target\_qubit `controlled` controlled\_qubit ~~-\-- apply hadamard again at top ~~hadamard\_at target\_qubit ~~-\-- apply X gate at bottom ~~mapUnary gate\_X top\_qubits ~~comment "end inversion about mean" ~~return (top\_qubits, bottom\_qubit)\\ -\-- declare grover\_search\_circuit function grover\_search\_circuit :: Oracle \ensuremath{\rightarrow} Circ ([Bit]) grover\_search\_circuit oracle = do ~~comment "Grover Search algorithm" ~~-\-- set the value of n ~~let n = toEnum (qubit\_num oracle) :: Float ~~-\-- set the index number to iterate \ensuremath{\sqrt{2^n}} times ~~let index = (floor (sqrt (2**n))) :: Int ~~-\-- create the ancillaes ~~top \ensuremath{\leftarrow} qinit (replicate (qubit\_num oracle) False) ~~bottom \ensuremath{\leftarrow} qinit True ~~label (top, bottom) ("|0>","|1>") ~~-\-- apply hadamard gate at string ~~mapUnary hadamard top ~~hadamard\_at bottom ~~-\-- start to iterate ~~for 1 (index) 1 \$ \textbackslash i \ensuremath{\rightarrow} do ~~~~~comment "start grover iteration" ~~~~~-\-- call phase inversion ~~~~~(top, bottom) \ensuremath{\leftarrow} phase\_inversion (function oracle) (top, bottom) ~~~~~-\-- call inversion about mean ~~~~~(top, bottom) \ensuremath{\leftarrow} inversion\_about\_mean (top, bottom) ~~~~~comment "after grover iteration" ~~endfor ~~-\-- measure qubit string and return result ~~hadamard\_at bottom ~~(top, bottom) \ensuremath{\leftarrow} measure (top, bottom) ~~cdiscard bottom ~~return (top)\\ -\-- main function main = print\_generic Preview (grover\_search\_circuit empty\_oracle) ~~where ~~~-\-- declare empty\_oracle's data type ~~~empty\_oracle :: Oracle ~~~empty\_oracle = Oracle \{ ~~~~~-\-- set the length of qubit string ~~~~~qubit\_num = 4, ~~~~~function = empty\_oracle\_function ~~~\} ~~~-\-- initialize empty\_oracle's function \ensuremath{f(x)} ~~~empty\_oracle\_function:: ([Qubit],Qubit) \ensuremath{\rightarrow} Circ ([Qubit],Qubit) ~~~empty\_oracle\_function (ins,out) = named\_gate "Oracle" (ins,out) \\} \end{footnotesize} \begin{figure} \caption{Circuit for Grover's search algorithm} \end{figure} \subsubsection{Oracle examples} \paragraph{Single Grover iteration :} We will code for single Grover iteration in \bera{oracle\_two}. This oracle will find the search element \ensuremath{x_0=2} in search space size \ensuremath{N=4} \cite{g3}. So oracle's reaction will be \ensuremath{f(x) = 1} when \ensuremath{x=x_0=2} and \ensuremath{f(x)=0} when \ensuremath{x \neq 2}. \bera{oracle\_two} function is:\\ \begin{footnotesize} \bera{ -\-- declare oracle\_two data type oracle\_two :: Oracle oracle\_two = Oracle \{ ~~qubit\_num = 2, ~~function = oracle\_two\_function \} -\-- initialize oracle\_two function \ensuremath{f(x)} oracle\_two\_function :: ([Qubit],Qubit) \ensuremath{\rightarrow} Circ ([Qubit],Qubit) oracle\_two\_function (controlled\_qubit, target\_qubit) = do ~~qnot\_at target\_qubit `controlled` controlled\_qubit .==. [1,0] ~~return (controlled\_qubit, target\_qubit) \\} \end{footnotesize} \begin{figure} \caption{Circuit for Grover's single iteration \ensuremath{(x_0=2)} \end{figure} \paragraph{Multiple Grover iteration :} \bera{oracle\_five} function needs multiple Grover's iteration to fine search element \ensuremath{x_0=5} in search space size \ensuremath{N=8}. This function will behave like \ensuremath{f(x) = 1} when \ensuremath{x = x_0 = 5} and \ensuremath{f(x)=0} when \ensuremath{x \neq 5}. \bera{oracle\_five} function is:\\ \begin{footnotesize} \bera{ -\-- declare oracle\_five's data type oracle\_five :: Oracle oracle\_five = Oracle \{ ~~qubit\_num = 3, ~~function = oracle\_five\_function \} -\-- initialize oracle\_five's function oracle\_five\_function :: ([Qubit],Qubit) \ensuremath{\rightarrow} Circ ([Qubit],Qubit) oracle\_five\_function (controlled\_qubit, target\_qubit) = do ~~qnot\_at target\_qubit `controlled` controlled\_qubit .==. [1,0,1] ~~return (controlled\_qubit, target\_qubit) \\} \end{footnotesize} \begin{figure} \caption{Circuit for Grover's multiple iteration \ensuremath{(x_0=5)} \end{figure} \subsection{Shor's factoring algorithm} Shor's algorithm, the most celebrated quantum algorithm is formulated by Peter Shor in 1994 \cite{shor1994algorithms} for integer factorization. Like Simon's periodicity algorithm, Shor' algorithm uses phase estimation procedure to factor integers in polynomial time. This factoring problem turns out to be equivalent to the order finding problem discussed in Simon's algorithm. Shor's algorithm uses two basic steps to reduce factoring procedure to order finding procedure. These two steps are embodied in the following theorems (we quote these theorems verbatim from \cite{g3}): \begin{thm}Suppose N is a n bit composite number, and \textbf{a} is a non-trivial solution to the equation \ensuremath{\textbf{a}^2} = 1(mod N) in the range 1\ensuremath{\le} \textbf{a} \ensuremath{\le} N, that is, neither \textbf{a} = 1(mod~N) nor \textbf{a} = N\ensuremath{- 1} = \ensuremath{- 1}(mod~N). Then at least one of gcd(\ensuremath{\textbf{a}-1}, N) and gcd(\ensuremath{\textbf{a} + 1}, N) is a non-trivial factor of N.\end{thm} \begin{thm}Suppose N = \ensuremath{p_1^{\alpha_1} . . p_m^{\alpha_m}} is the prime factorization of an odd composite positive integer. Let \textbf{a} be an integer chosen uniformly at random, subject to the requirements that \ensuremath{1\le \textbf{a} \le N - 1} and \textbf{a} is co-prime to N. Let r be the order of \textbf{a} modulo N. Then\\ \\ p(r is even and \ensuremath{\textbf{a}^{r/2} \neq -1 (mod~N)) \ge 1 - \ensuremath{\frac{1}{2^m}}}\end{thm} All these theorems can be implemented in classical part, but quantum part needs to find the periods of the functions that hold big integers. We can formally state Shor's algorithm like below \cite{noson}: \paragraph{Step 1:}First check if \ensuremath{N} is a prime or a power of prime. Polynomial algorithm can be used to determine it. If yes then declare it and exit. Otherwise go to the next step. \paragraph{Step 2:}Choose a random integer \ensuremath{\textbf{a}} such that \ensuremath{1< \textbf{a} < N} and then determine \ensuremath{gcd(\textbf{a}, N)} using Euclid's algorithm. If the GCD is not 1, then return it and exit. \paragraph{Step 3:}Quantum subroutine for finding the period \ensuremath{r} of a function. \paragraph{Step 4:}If \ensuremath{r} is odd or if \ensuremath{\textbf{a}^r \equiv -1~mod~N}, then chosen \ensuremath{\textbf{a}} is not appropriate for further calculations. Go back to step 2 and choose another \ensuremath{\textbf{a}}. \paragraph{Step 5:}Calculate \ensuremath{gcd((\textbf{a}^{\frac{r}{2}}+1),N)} and \ensuremath{gcd((\textbf{a}^{\frac{r}{2}}-1),N)} using Euclid's algorithm and return at least one of the nontrivial solutions.\\ Shor's algorithm needs \ensuremath{O(n^2~log~n~log~log~n)} number of steps, where \ensuremath{n} is the number of bit to represent \ensuremath{N}. On the contrary, best known classical algorithm requires \ensuremath{O(\exp ^{cn^{\frac{1}{3}} log^{\frac{2}{3}}n})} where \ensuremath{c} is some constant. Classical algorithms work in sub-exponential time and Shor's algorithm solves it in the complexity class BQP. Here quantum algorithm gives an exponential speedup. Here we will mainly discuss the quantum subroutine of Shor's algorithm. \subsubsection{Circuit for Shor's algorithm} The main section of Shor's algorithm is written in \bera{shor\_circuit} function. In this function, oracle \ensuremath{f_{(a,N)}(x)} will be applied to find it's period \ensuremath{r}. Then \ensuremath{QFT^\dagger} \footnote{adjoint matrix of \ensuremath{QFT}} will be applied to modify the period \ensuremath{r} into \ensuremath{\frac{2^{top\_num}}{r}} and to eliminate the offset. Quipper provides \bera{QuipperLib.QFT} module to perform \emph{Quantum Fourier Transformation}. We will use \bera{qft\_big\_endian} function of this module to apply \ensuremath{QFT^\dagger} on \bera{top\_qubit}. Finally \bera{main} function will pass a dummy oracle named \bera{empty\_oracle} to \bera{shor\_circuit} function and start the whole program.\\ \begin{footnotesize} \bera{ import Quipper import QuipperLib.QFT\\ -\-- define oracle data type data Oracle = Oracle \{ ~~top\_num :: Int, ~~bottom\_num :: Int, ~~function :: ([Qubit], [Qubit]) \ensuremath{\rightarrow} Circ ([Qubit], [Qubit]) \}\\ -\-- declare shor\_circuit function shor\_circuit :: Oracle \ensuremath{\rightarrow} Circ [Bit] shor\_circuit oracle = do ~~comment "Shor algorithm" ~~-\-- create the ancillaes ~~top\_qubit \ensuremath{\leftarrow} qinit (replicate (top\_num oracle) False) ~~bottom\_qubit \ensuremath{\leftarrow} qinit (replicate (bottom\_num oracle) False) ~~label (top\_qubit, bottom\_qubit) ("top\_qubit", "bottom\_qubit") ~~-\-- apply hadamard at top qubits ~~mapUnary hadamard top\_qubit ~~comment "applying oracle" ~~-\-- call the oracle ~~function oracle (top\_qubit, bottom\_qubit) ~~comment "after oracle" ~~-\-- measure bottom qubits and discard ~~bottom\_qubit \ensuremath{\leftarrow} measure bottom\_qubit ~~cdiscard bottom\_qubit ~~-\-- apply \ensuremath{QFT^{\dagger}} ~~top\_qubit \ensuremath{\leftarrow} qft\_big\_endian top\_qubit ~~-\-- measure top qubits and return results ~~top\_qubit \ensuremath{\leftarrow} measure top\_qubit ~~return top\_qubit\\ -\-- main function main = print\_generic Preview (shor\_circuit empty\_oracle) ~~where ~~~-\-- declare empty\_oracle's data type ~~~empty\_oracle :: Oracle ~~~empty\_oracle = Oracle \{ ~~~~~top\_num = 6, ~~~~~bottom\_num = 3, ~~~~~function = empty\_function ~~~\} ~~~-\-- initialize empty\_oracle's function \ensuremath{f(x)} ~~~empty\_function :: ([Qubit], [Qubit]) \ensuremath{\rightarrow} Circ ([Qubit], [Qubit]) ~~~empty\_function (top, bottom) = named\_gate "Oracle" (top,bottom) \\} \end{footnotesize} \begin{figure} \caption{Quantum subroutine for Shor's algorithm\cite{shor3} \end{figure} \subsubsection{Oracle examples} We will write an oracle \ensuremath{f_{(a,N)}(x)} where \ensuremath{a = 7} and \ensuremath{N = 15} \cite{shor3}. Corresponding oracle \bera{mod15\_base7\_oracle} is given below:\\ \begin{footnotesize} \bera{ -\-- declare mod15\_base7\_oracle data type mod15\_base7\_oracle :: Oracle mod15\_base7\_oracle = Oracle\{ ~~top\_num = 3, ~~bottom\_num = 4, ~~function = mod15\_base7\_function \}\\ -\-- initialize mod15\_base7\_oracle's function \ensuremath{f(x)} mod15\_base7\_function :: ([Qubit], [Qubit]) \ensuremath{\rightarrow} Circ ([Qubit], [Qubit]) mod15\_base7\_function (top\_qubit, bottom\_qubit) = do ~~let x1 = top\_qubit !! 1 ~~let x2 = top\_qubit !! 2 ~~let y0 = bottom\_qubit !! 0 ~~let y1 = bottom\_qubit !! 1 ~~let y2 = bottom\_qubit !! 2 ~~let y3 = bottom\_qubit !! 3 ~~qnot\_at y1 `controlled` x2 ~~qnot\_at y2 `controlled` x2 ~~qnot\_at y2 `controlled` y0 ~~qnot\_at y0 `controlled` [x1,y2] ~~qnot\_at y2 `controlled` y0 ~~qnot\_at y1 `controlled` y3 ~~qnot\_at y3 `controlled` [x1,y1] ~~qnot\_at y1 `controlled` y3 ~~return (top\_qubit, bottom\_qubit) \\} \end{footnotesize} \begin{figure} \caption{Circuit for an oracle of \ensuremath{f_{(a,N)} \end{figure} We will write another oracle \ensuremath{f_{(a,N)}(x)} where \ensuremath{a = 20} and \ensuremath{N = 21}. We will optimize quantum circuits for modular multiplication \cite{shor2}. Code for \bera{mod21\_base20\_oracle} is given below:\\ \begin{footnotesize} \bera{ -\-- declare mod21\_base20\_oracle data type mod21\_base20\_oracle :: Oracle mod21\_base20\_oracle = Oracle\{ ~~top\_num = 3, ~~bottom\_num = 5, ~~function = mod21\_base20\_function \} -\-- initialize mod21\_base20\_oracle function mod21\_base20\_function :: ([Qubit], [Qubit]) \ensuremath{\rightarrow} Circ ([Qubit], [Qubit]) mod21\_base20\_function (top\_qubit, bottom\_qubit) = do ~~-\-- separate control qubit ~~let cntrl\_qbit = head top\_qubit ~~-- separate \ensuremath{y_0, y_2} and \ensuremath{y_4} ~~let y0 = head bottom\_qubit ~~let y2 = head (drop 2 bottom\_qubit) ~~let y4 = head (drop 4 bottom\_qubit) ~~-\-- apply quantum gates ~~qnot\_at y4 `controlled` cntrl\_qbit ~~qnot\_at y2 `controlled` cntrl\_qbit ~~qnot\_at y0 `controlled` cntrl\_qbit .==. 0 ~~return (top\_qubit, bottom\_qubit) \\} \end{footnotesize} \begin{figure} \caption{Circuit for an oracle of \ensuremath{f_{(a,N)} \end{figure} \section{Conclusion} This report mainly targets the readers who are interested to study functional quantum programming language. We briefly discussed about some other classes of quantum programming languages. We have introduced readers to Quipper and implemented five quantum algorithms using it. It demonstrates the capability of the language and the work overhead for quantum programmers. Starting from a "hello world" program we demonstrated up to Shor's factoring algorithm in a pedagogically suitable sequence in order to ensure a smooth learning carve. We have also given examples of high dimensional oracles (black boxes or functions) for specific algorithms and test some of them using Quipper simulator. \end{small} \end{document}
\begin{document} \title[Relative Convexity and Its Applications]{Relative Convexity and Its Applications} \author{Constantin P. Niculescu} \address{Department of Mathematics, University of Craiova, Craiova 200585, Romania} \email{[email protected]} \author{Ionel Roven\c{t}a} \address{Department of Mathematics, University of Craiova, Craiova 200585, Romania} \email{[email protected]} \thanks{Presented to the \emph{Workshop on Convex Functions and Inequalities}, Targoviste, June 14, 2014} \date{October 1, 2014} \subjclass[2000]{26B25, 26D15.} \keywords{convex function, supporting hyperplane, positive measure, doubly stochastic matrix} \begin{abstract} We discuss a rather general condition under which the inequality of Jensen works for certain convex combinations of points not all in the domain of convexity of the function under attention. Based on this fact, an extension of the Hardy-Littlewood-P\'{o}lya theorem of majorization is proved and new insight is given into the problem of risk aversion in mathematical finance. \end{abstract} \maketitle \section{Introduction} The important role played by the classical inequality of Jensen in mathematics, probability theory, economics, statistical physics, information theory etc. is well known. See the books of Niculescu and Persson \cite{NP2006}, Pe\v{c}ari\'{c}, Proschan and Tong \cite{PPT} and Simon \cite{Simon}. The aim of this paper is to discuss a rather general condition under which the inequality of Jensen works in a framework that includes a large variety of nonconvex functions and to provide on this basis applications to majorization theory and mathematical finance. The possibility to extend the inequality of Jensen outside the framework of convex functions was first noticed twenty years ago by Dragomirescu and Ivan \cite{DI1993}. Later, Pearce and Pe\v{c}ari\'{c} \cite{PP1997} and Czinder and P\'{a}les \cite{CP} have considered the special case of mixed convexity (assuming the symmetry of the graph with respect to the inflection point). For related results, see the papers of Florea and Niculescu \cite{FN2007}, Niculescu and Spiridon \cite{NSp2013}, and Mihai and Niculescu \cite{MN2015}. The inequality of Jensen characterizes the behavior of a continuous convex function with respect to a mass distribution on its domain. More precisely, if $f$ is a continuous convex function on a compact convex subset $K$ of $\mathbb{R}^{N}$ and $\mu$ is a Borel probability measure on $K$ having the barycenter \[ b_{\mu}=\int_{K}xd\mu(x), \] then the value of $f$ at $b_{\mu}$ does not exceed the mean value of $f$ over $K,$ that is, \[ f(b_{\mu})\leq\int_{K}f(x)d\mu(x). \] A moment's reflection reveals that the essence of this inequality is the fact that $b_{\mu}$ is a point of convexity of $f$ relative to its domain $K$. The precise meaning of the notion of point of convexity is given in Definition \ref{def1} below, which is stated in the framework of real-valued continuous functions $f$ defined on a compact convex subset $K$ of $\mathbb{R}^{N}$. \begin{definition} \label{def1}A point $a\in K$ is a point of convexity of the function $f$ relative to the convex subset $V$ of $K$\emph{ }if $a\in V$ and \begin{equation} f(a)\leq\sum_{k=1}^{n}\lambda_{k}f(x_{k}), \tag{$J$}\label{J} \end{equation} for every family of points $x_{1},...,x_{n}$ in $V$ and every family of positive weights $\lambda_{1},...,\lambda_{n}$ with $\sum_{k=1}^{n}\lambda _{k}=1$ and $\sum_{k=1}^{n}\lambda_{k}x_{k}=a.$\newline The point $a$ is a point of concavity if it is a point of convexity for $-f$ \emph{(}equivalently, if the inequality $(J)$ works in the reversed way\emph{)}. \end{definition} In what follows, the set $V$ that appears in Definition 1 will be referred to as a\ \emph{neighborhood of convexity} of $a.$ Here, the term of \emph{neighborhood} has an extended meaning and is not necessarily ascribed to the topology of $\mathbb{R}^{N}.$ In order to avoid the trivial case where $V=\left\{ a\right\} ,$ we will always assume that $V$ is an infinite set; for example, this happens when $a$ belongs to the relative interior of $V$ (the interior within the affine hull of $V)$. For the function $f(x,y)=x^{2}-y^{2}$, $\ $the origin is a point of convexity relative to the $Ox$ axis, and a point of concavity relative to the $Oy$ axis. With respect to the plane topology, both axes have empty interior. If a function\thinspace$f:K\rightarrow\mathbb{R}$ is convex, then every point of $K$ is a point of convexity relative to the whole domain $K$ (and this fact characterizes the property of convexity of $f$). Definition 1 is motivated mainly by the existence of nonconvex functions that admit points of convexity relative to the whole domain (or at least to a neighborhood of convexity where the function is not convex). An illustration is offered by the nonconvex function $g(x)=\left\vert x^{2}-1\right\vert ;$ for it, all points in $(-\infty,-1]\cup\lbrack1,\infty)$ are points of convexity relative to the entire real set $\mathbb{R}$. Every point of local minimum of a continuous function $f:[0,1]\rightarrow \mathbb{R}$ is a point of convexity. Thus, every nowhere differentiable continuous function $f:[0,1]\rightarrow\mathbb{R}$ admits points of convexity despite the fact that it is not convex on any nondegenerate interval. The idea of point of convexity is not entirely new. In an equivalent form, it is present in the paper of Dragomirescu and Ivan \cite{DI1993}. The technique of convex minorants, described by Steele \cite{Steele} at pp. 96 - 99, is also closed to the concept of point of convexity. A different concept of \emph{punctual convexity} is discussed in the recent paper of Florea and P\u{a}lt\u{a}nea \cite{FP2014}. \section{The Existence of Points of Convexity} The following lemma indicates a simple geometric condition under which a point is a point of convexity relative to the whole domain. \begin{lemma} \label{lem1}Assume that $f$ is a real-valued continuous function defined on a compact convex subset $K$ of $\mathbb{R}^{N}.$ If $f$ admits a supporting hyperplane at a point $a,$ then $a$ is point of convexity of $f$ relative to $K.$ In other words, every point at which the subdifferential is nonempty is a point of convexity. \end{lemma} \begin{proof} Indeed, the existence of a supporting hyperplane at $a$ is equivalent to the existence of an affine function $h(x)=\langle x,v\rangle+c$ (for suitable $v\in\mathbb{R}^{N}$ and $c\in\mathbb{R}$) such that \[ f(a)=h(a)\text{ and }f(x)\geq h(x)\text{ for all }x\in K. \] If $\mu$ is a Borel probability measure, its barycenter is given by the formula \[ b_{\mu}=\int_{K}xd\mu(x), \] so that if $b_{\mu}=a,$ then \[ f(a)=h(a)=h\left( \int_{K}xd\mu(x)\right) =\int_{K}h(x)d\mu(x)\leq\int _{K}f(x)d\mu(x). \] \end{proof} \begin{remark} Another sufficient condition for the convexity at a point, formulated in terms of secant line slopes, can be found in the papers of Niculescu and Stephan \cite{NSt2012}, \cite{NSt2013}. However, as shows the case of polynomials of fourth degree, that condition does not overcome the result of Lemma 1. \end{remark} As is well known, the usual property of convexity assures the existence of a supporting hyperplane at each interior point. See \cite{NP2006}, Theorem 3.7.1, p. 128. This explains why Jensen's inequality works nicely in the context of continuous convex functions. In the case of differentiable functions, the supporting hyperplane is unique and coincides with the tangent hyperplane. For such functions, Lemma 1 asserts that every point where the tangent hyperplane lies above/below the graph is a point of concavity/convexity. \begin{example} In the one real variable case, the existence of points of convexity of a nonconvex differentiable function (such as $xe^{x},$ $x^{2}e^{-x},$ $\log ^{2}x,$ $\frac{\log x}{x}$ etc.) can be easily proved by looking at the position of the tangent line with respect to the graph. For example, the function $xe^{x}$ is concave on $(-\infty,-2]$ and convex on $[-2,\infty)$ (and attains a global minimum at $x=-1).$ See Figure 1. \begin{figure} \caption{A point of convexity of the function $xe^{x} \label{Fig1} \end{figure} Based on Lemma 1, one can easily show that every point $x\geq-1$ is a point of convexity of $f$ relative to the whole real axis. Therefore \[ \sum_{k=1}^{n}\lambda_{k}x_{k}e^{x_{k}}\geq\left( \sum_{k=1}^{n}\lambda _{k}x_{k}\right) e^{\sum_{k=1}^{n}\lambda_{k}x_{k}}, \] whenever $\sum_{k=1}^{n}\lambda_{k}x_{k}\geq-1.$ In the special case where $\sum_{k=1}^{n}\lambda_{k}x_{k}\geq0,$ this inequality can be deduced from Chebyshev's inequality and the convexity of the exponential function. Borwein and Girgensohn \cite{BG} proved that \[ \sum_{k=1}^{n}x_{k}e^{x_{k}}\geq\frac{\max\left\{ 2,e(1-1/n)\right\} } {n}\sum_{k=1}^{n}x_{k}^{2}, \] for every family of real numbers $x_{1},x_{2},...,,x_{n}$ such that $\sum_{k=1}^{n}x_{k}\geq0$. The extension of their result to the weighted case $($subject to the condition $\sum_{k=1}^{n}\lambda_{k}x_{k}\geq0)$ is an open problem. \end{example} \begin{example} The two real variables function \[ f(x,y)=e^{-x^{2}-y^{2}},\quad\left( x,y\right) \in\mathbb{R}^{2}, \] exhibits the phenomenon of relative concavity. Indeed, its graph is the rotation graph of the function $z=e^{-x^{2}}$ around the $Oz$ axis and this makes possible to apply Lemma 1 by means of calculus of one real variable. See Figure 2. \begin{figure} \caption{A point of concavity of the function $e^{-x^{2} \label{Fig2} \end{figure} The convexity properties of the function $f$ can be described in a more convenient way by viewed it as a function of complex variable, via the formula $f(w)=e^{-\left\vert w\right\vert ^{2}}.$ The function $f$ is strictly concave on the compact disc $\overline{D}_{1/\sqrt{2}}\left( 0\right) $ and attains a global maximum at the origin.The tangent plane at the graph of $f,$ at any point $w_{0}=(x_{0},y_{0})$ with $\left\Vert w_{0}\right\Vert \leq1/2,$ is above the graph over a neighborhood of $w_{0}$ including the closed disc $\overline{D}_{r^{\ast}}\left( 0\right) $, where $r^{\ast}=1.\,\allowbreak 183\,802...$ is the solution of the equation $e^{-1/4}(\frac{3}{2} -x)=e^{-x^{2}}$. As a consequence, \[ \sum_{k=1}^{n}\lambda_{k}e^{-\left\vert w_{k}\right\vert ^{2}}\leq e^{-M^{2}} \] for all points $w_{1},...,w_{n}\in\overline{D}_{r^{\ast}}\left( 0\right) $ and all $\lambda_{1},...,\lambda_{n}>0$ such that $\sum_{k=1}^{n}\lambda _{k}=1$ and $\left\vert \sum\limits_{k=1}^{n}\lambda_{k}w_{k}\right\vert =M\leq1/2.$ Notice that Jensen's inequality yields this conclusion only when $w_{1},...,w_{n}\in\overline{D}_{1/\sqrt{2}}\left( 0\right) .$ \end{example} The real variable case has also nontrivial implications in the case of matrix functions. The function $F(X)=\operatorname{trace}(f(X))$ is convex/concave on the linear space $\operatorname*{Sym}(n,\mathbb{R}),$ of all self-adjoint (that is, symmetric) matrices in $\operatorname{M}_{n}(\mathbb{R}),$ whenever $f:\mathbb{R}\rightarrow\mathbb{R}$ is convex/concave. See the paper of Lieb and Pedersen \cite{LP} for details. Thus, in the case of $f(x)=xe^{x},$ the function $F$ is concave on the convex set $\operatorname*{Sym}_{sp\subset (-\infty,-2]}(n,\mathbb{R}),$ of all symmetric matrices in $\operatorname{M} _{n}(\mathbb{R})$ whose spectrum is included in $(-\infty,-2];$ the function $F$ is convex on the set $\operatorname*{Sym}_{sp\subset\lbrack-2,\infty )}(n,\mathbb{R}),$ of all symmetric matrices in $\operatorname{M} _{n}(\mathbb{R})$ whose spectrum is included in $(-2,\infty].$ The following result is a direct consequence of functional calculus with self-adjoint matrices. If $\lambda_{1},...,\lambda_{n}$ are positive numbers such that $\sum_{k=1}^{n}\lambda_{k}=1$ and $A_{1},...,A_{n}$ are matrices in $\operatorname*{Sym}_{sp\subset(-\infty,-2]}(n,\mathbb{R})\cup \operatorname*{Sym}_{sp\subset\lbrack-2,\infty)}(n,\mathbb{R})$ such that $\sum_{k=1}^{n}\lambda_{k}A_{k}\geq-I_{n},$ then \[ \sum_{k=1}^{n}\lambda_{k}\operatorname{trace}\left( A_{k}e^{A_{k}}\right) \geq\operatorname{trace}\left[ \left( \sum_{k=1}^{n}\lambda_{k}A_{k}\right) e^{\sum_{k=1}^{n}\lambda_{k}A_{k}}\right] . \] \section{The Extension of Hardy-Littlewood-P\'{o}lya theorem of majorization} The notion of point of convexity leads to a very large generalization of the Hardy-Littlewood-P\'{o}lya theorem of majorization. Given a vector $\mathbf{x}=(x_{1},...,x_{N})$ in $\mathbb{R}^{N},$ let $\mathbf{x} ^{\downarrow}$ be the vector with the same entries as $\mathbf{x}$ but rearranged in decreasing order, \[ x_{1}^{\downarrow}\geq\cdots\geq x_{N}^{\downarrow}. \] The vector $\mathbf{x}$ is \emph{majorized} by $\mathbf{y}$ (abbreviated, $\mathbf{x}\prec\mathbf{y})$ if \[ \sum_{i\,=\,1}^{k}\,x_{i}^{\downarrow}\leq\sum_{i\,=\,1}^{k}\,y_{i} ^{\downarrow}\quad\text{for }k=1,...,N-1 \] and \[ \sum_{i\,=\,1}^{N}\,x_{i}^{\downarrow}=\sum_{i\,=\,1}^{N}\,y_{i}^{\downarrow }\,. \] The concept of majorization admits an order-free characterization based on the notion of doubly stochastic matrix. Recall that a matrix $A\in \,\operatorname{M}_{n}(\mathbb{R})$ is \emph{doubly stochastic} if it has nonnegative entries and each row and each column sums to unity. \begin{theorem} \label{ThmHLP}\emph{(Hardy, Littlewood and P\'{o}lya \cite{HLP}).} Let $\mathbf{x}$ and $\mathbf{y}$ be two vectors in $\mathbb{R}^{N}$, whose entries belong to an interval $I.$ Then the following statements are equivalent: $a)$ $\mathbf{x}\prec\mathbf{y};$ $b)$ there is a doubly stochastic matrix $A=(a_{ij})_{1\leq i,j\leq N}$ such that $\mathbf{x}=A\mathbf{y};$ $c)$ the inequality $\sum_{i=1}^{N}f(x_{i})\leq\sum_{i=1}^{N}f(y_{i})$ holds for every continuous convex function $f:I\rightarrow\mathbb{R}$. \end{theorem} An alternative characterization of the relation of majorization is given by the Schur-Horn theorem: $\mathbf{x}\prec\mathbf{y}$ in $\mathbb{R}^{N}$ if and only if the components of $\mathbf{x}$ and $\mathbf{y}$ are respectively the diagonal elements and the eigenvalues of a self-adjoint matrix. The details can be found in the book of Marshall, Olkin and Arnold \cite{MOA}, pp. 300-302. The notion of majorization is generalized by weighted majorization, that refers to probability measures rather than vectors. This is done by identifying any vector $\mathbf{x}=(x_{1},...,x_{N})$ in $\mathbb{R}^{N}$ with the probability measure $\frac{1}{N}\sum_{i=1}^{N}\delta_{x_{i}},$ where $\delta_{x_{i}}$ denotes the Dirac measure concentrated at $x_{i}.$ We define the relation of majorization \begin{equation} \sum_{i=1}^{m}\lambda_{i}\delta_{\mathbf{x}_{i}}\prec\sum_{j=1}^{n}\mu _{j}\delta_{\mathbf{y}_{j}}, \tag{$2$}\label{2} \end{equation} between two positive discrete measures supported at points in $\mathbb{R}^{N} $, by asking the existence of a $m\times n$-dimensional matrix $A=(a_{ij} )_{i,j}$ such that \begin{gather} a_{ij}\geq0,\text{ for all }i,j\tag{$3$}\label{3}\\ \sum_{j=1}^{n}a_{ij}=1,\text{\quad}i=1,...,m\tag{$4$}\label{4}\\ \mu_{j}=\sum_{i=1}^{m}a_{ij}\lambda_{i},\text{\quad}j=1,...,n \tag{$ 5$}\label{5} \end{gather} and \begin{equation} \mathbf{x}_{i}=\sum_{j=1}^{n}a_{ij}\mathbf{y}_{j}\text{,\quad}i=1,...,m. \tag{$6$}\label{6} \end{equation} The matrices verifying the conditions $(3)\&(4)$ are called \emph{stochastic on rows}. When $m=n$ and all weights $\lambda_{i}$ and $\mu_{j}$ are equal to each others, the condition $(5)$ assures the \emph{stochasticity on columns, }so in that case we deal with doubly stochastic matrices. We are now in a position to state the following generalization of the Hardy-Littlewood-P\'{o}lya theorem of majorization: \begin{theorem} \label{ThmGHLP}Suppose that $f$ is a real-valued function defined on a compact convex subset $K$ of $\mathbb{R}^{N}$ and $\sum_{i=1}^{m}\lambda_{i} \delta_{\mathbf{x}_{i}}$ and $\sum_{j=1}^{n}\mu_{j}\delta_{\mathbf{y}_{j}}$ are two positive discrete measures concentrated at points in $K.$ If $\mathbf{x}_{1},...,\mathbf{x}_{m}$ are points of convexity of $f$ relative to $K$ and \[ \sum_{i=1}^{m}\lambda_{i}\delta_{\mathbf{x}_{i}}\prec\sum_{j=1}^{n}\mu _{j}\delta_{\mathbf{y}_{j}}, \] then \begin{equation} \sum_{i=1}^{m}\lambda_{i}f(\mathbf{x}_{i})\leq\sum_{j=1}^{n}\mu_{j} f(\mathbf{y}_{j}). \tag{$7$}\label{7} \end{equation} \end{theorem} \begin{proof} By our hypothesis, there exists a $m\times n$-dimensional matrix $A=(a_{ij})_{i,j}$ that is stochastic on rows and verifies the conditions $(5)$ and $(6)$. The last condition shows that each point $\mathbf{x}_{i}$ is the barycenter of the probability measure $\sum_{j=1}^{n}a_{ij}\delta _{\mathbf{y}_{j}}$. By Jensen's inequality, we infer that \[ f(\mathbf{x}_{i})\leq\sum_{j=1}^{n}a_{ij}f(\mathbf{y}_{j}). \] Multiplying each side by $\lambda_{i}$ and then summing up over $i$ from $1$ to $m,$ we conclude that \[ \sum_{i=1}^{m}\lambda_{i}f(\mathbf{x}_{i})\leq\sum_{i=1}^{m}\left( \lambda_{i}\sum_{j=1}^{n}a_{ij}f(\mathbf{y}_{j})\right) =\sum_{j=1} ^{n}\left( \sum_{i=1}^{m}a_{ij}\lambda_{i}\right) f(\mathbf{y}_{j} )=\sum_{j=1}^{n}\mu_{j}f(\mathbf{y}_{j}), \] and the proof of $(7)$ is done. \end{proof} \begin{example} The well known Gauss--Lucas theorem on the distribution of the critical points of a polynomial asserts that the roots $(\mu_{k})_{k=1}^{n-1}$ of the derivative $P^{\prime}$ of any complex polynomial $P\in$ $\mathbb{C}[z]$ of degree $n\geq2$ lie in the smallest convex polygon containing the roots $(\lambda_{j})_{j=1}^{n}$ of the polynomial $P$. This led Malamud \cite{M2005} to the interesting remark that the two families of roots are actually related by the relation of majorization. Based on this remark, he was able to prove the following conjecture raised by de Bruijn and Springer in 1947: for any convex function $f:\mathbb{C}\rightarrow\mathbb{R}$ and any polynomial $P$ of degree $n\geq2,$ \[ \frac{1}{n-1}\sum_{k=1}^{n-1}f(\mu_{k})\leq\frac{1}{n}\sum_{j=1}^{n} f(\lambda_{j}), \] where $(\lambda_{j})_{j=1}^{n}$ and $(\mu_{k})_{k=1}^{n-1}$ are respectively the roots of $P$ and $P^{\prime}.$ Theorem \ref{ThmGHLP} allows us to relax the condition of convexity by asking only that all the roots $\mu_{k}$ of $P^{\prime}$ be points of convexity for $f.$ According to a remark above concerning the function $e^{-\left\vert z\right\vert ^{2}},$ this implies that \[ \frac{1}{n-1}\sum_{k=1}^{n-1}e^{-|\mu_{k}|^{2}}\geq\frac{1}{n}\sum_{j=1} ^{n}e^{-\left\vert \lambda_{j}\right\vert ^{2}}, \] whenever the roots $\mu_{1},...,\mu_{n-1}$ belong to $\overline{D} _{1/2}\left( 0\right) $ and $\lambda_{1},...,\lambda_{n}$ belong to $\overline{D}_{1.18}\left( 0\right) .$ An example of polynomial verifying these conditions is $P(z)=4z^{3}-3z.$ \end{example} \begin{example} A second application of Theorem \ref{ThmGHLP} refers to the function $f(x)=\log^{2}x.$ This function is convex on the interval $(0,e]$ and concave on $[e,\infty)$. The Hardy-Littlewood-P\'{o}lya theorem of majorization easily yields the implication \begin{equation} \left( x_{1},...,x_{n}\right) \prec\left( y_{1},...,y_{n}\right) \Rightarrow\sum_{i=1}^{n}\log^{2}x_{i}\leq\sum_{i=1}^{n}\log^{2}y_{i} \tag{$8$}\label{8} \end{equation} whenever $x_{1},...,x_{n}$ and $y_{1},...,y_{n}$ belong to $(0,e].$ According to Lemma 1, all points in $(0,2]$ are points of convexity of $f$ relative to $(0,a^{\ast}],$ where \[ a^{\ast}=5.495\,869\,874... \] is the solution of the equation $\log^{2}x-\log^{2}2=\left( \log2\right) (x-2).$ By Theorem \ref{ThmGHLP}, the implication $(8)$ still works when $x_{1},...,x_{n}\in(0,2]$ and $y_{1},...,y_{n}\in(0,a^{\ast}].$ Recently, B\^{\i}rsan, Neff and Lankeit \cite{BNL} noticed still another case where an inequality of the form $(8)$ holds true. Precisely, they proved that for every two triplets $x_{1},x_{2},x_{3}$ and $y_{1},y_{2},y_{3}$ of positive numbers which verify the conditions \[ x_{1}+x_{2}+x_{3}\leq y_{1}+y_{2}+y_{3},\text{\quad}x_{1}x_{2}+x_{2} x_{3}+x_{3}x_{1}\leq y_{1}y_{2}+y_{2}y_{3}+y_{3}y_{1}\text{ } \] and $x_{1}x_{2}x_{3}=y_{1}y_{2}y_{3},$ we have \[ \sum_{i=1}^{3}\log^{2}x_{i}\leq\sum_{i=1}^{3}\log^{2}y_{i}. \] This suggests a new concept of majorization for $n$-tuples of positive elements, based on elementary symmetric functions. Being beyond the purpose of this paper, we will not enter the details. \end{example} Theorem \ref{ThmGHLP} provides the following extension of Popoviciu's inequality: \begin{theorem} \label{TPOP} Suppose that $f$ is a real-valued function defined on an interval $I$. If $a,b,c$ belong to $I$ and $\frac{a+b}{2},\frac{a+c}{2}$ and $\frac{b+c}{2}$ are points of convexity of $f$ relative to the entire interval $I,$ then \begin{multline} \frac{f\left( a\right) +f\left( b\right) +f\left( c\right) }{3}+f\left( \frac{a+b+c}{3}\right) \tag{$9$}\label{9}\\ \geq\frac{2}{3}\left[ f\left( \frac{a+b}{2}\right) +f\left( \frac{a+c} {2}\right) +f\left( \frac{b+c}{2}\right) \right] .\nonumber \end{multline} \end{theorem} \begin{proof} Without loss of generality we may assume that $a\geq b\geq c$. Then \[ \frac{a+b}{2}\geq\frac{a+c}{2}\geq\frac{b+c}{2}\text{ and }a\geq\frac {a+b+c}{3}\geq c. \] We attach to the points $a,b,c$ two sextic families of points: \begin{align*} x_{1} & =x_{2}=\frac{a+b}{2},\;x_{3}=x_{4}=\frac{a+c}{2},\;x_{5}=x_{6} =\frac{b+c}{2}\\ y_{1} & =a,\;y_{2}=y_{3}=y_{4}=\frac{a+b+c}{3},\;y_{5}=b,\;y_{6}=c \end{align*} if $a\geq(a+b+c)/3\geq b\geq c,$ and \begin{align*} x_{1} & =x_{2}=\frac{a+b}{2},\;x_{3}=x_{4}=\frac{a+c}{2},\;x_{5}=x_{6} =\frac{b+c}{2}\\ y_{1} & =a,\;y_{2}=b,\;y_{3}=y_{4}=y_{5}=\frac{a+b+c}{3},\;y_{6}=c \end{align*} if $a\geq b\geq(a+b+c)/3\geq c.$ In both cases $\frac{1}{6}\sum_{i=1} ^{6}\delta_{x_{i}}\prec\frac{1}{6}\sum_{i=1}^{6}\delta_{y_{i}},$ and thus the inequality $(9)$ follows from Theorem \ref{ThmGHLP}. \end{proof} Popoviciu noticed that under the presence of continuity, the inequality $(9)$ works for all triplets $a,b,c\in I$ if and only if the function $f$ is convex. See \cite{NP2006}, p. 12. Theorem \ref{TPOP} allows this inequality to work for \emph{certain} triplets $a,b,c$ even when $f$ is not convex. For example, this is the case of the function $\log^{2}x$, and all points $a,b,c\in (0,a^{\ast}]$ such that $\frac{a+b}{2},\frac{a+c}{2},\frac{b+c}{2}\in(0,2].$ \begin{remark} The theory of points of convexity and our generalization of the Hardy-Littlewood-P\'{o}lya theorem stated in Theorem \ref{ThmGHLP} extend verbatim to the context of spaces with global nonpositive curvature. See \cite{NRov2014} for the theory of convex functions on such spaces. \end{remark} \section{An Application to Mathematical Finance} In the context of probability theory, Jensen's inequality is generally stated in the following form: if $X$ is a random variable and $f$ is a continuous convex function on an open interval containing the range of $X$, then \[ f(E(X))\leq E(f(X)), \] provided that both of expectations $E(X)$ and $E(f(X))$ exist and are finite. A nice illustration of this inequality in mathematical finance refers to the so called \emph{risk aversion}, the reluctance of someone who wants to invest his life savings into a stock that may have high expected returns (but also involves a chance of losing value), preferring to put his or her money into a bank account with a low but guaranteed interest rate. Indeed, if the utility function $f$ is concave, then \[ f(E(X))\geq E(f(X)). \] Using the technique of pushing-forward measures (i.e., of image measures), we will show that this inequality still works when $f$ is continuous and $E(X)$ is a point of concavity of $f$ relative to its whole domain. This follows from the following technical result. \begin{theorem} \label{thmJintegral}Suppose that $f$ is a real-valued continuous function defined on an open interval $I$ and $X$ is a random variable associated to a probability space $\,(\Omega,\Sigma,\mu)$ such that $(i)$ the range of $X$ is included in the interval $I;$ $(ii)$ the expectations $E(X)$ and $E(f(X))$ exist and are finite; $(iii)$ $E(X)$ is a point of convexity of $f$ relative to $I.$ Then \[ f(E(X))\leq\int_{\Omega}f(X(\omega))d\mu(\omega) \] \end{theorem} \begin{proof} Since $X:\Omega\rightarrow I$ is a $\mu$-integrable map, the push-forward measure $\nu,$ given by the formula $\nu(A)=\mu(X^{-1}(A)),$ is a Borel probability measure on $I$ with barycenter $b_{\nu}=\int_{\Omega}X(\omega )d\mu(\omega)=E(X).$ We have to prove that \[ f(b_{\nu})\leq\int_{I}f(x)d\nu(x). \] When $\nu$ is a discrete measure, this follows from the fact that $b_{\nu}$ is a point of convexity. If the range of $X$ is included in a compact subinterval $K$ of $I$, then the support of $\nu$ is included in $K$ and we have to use the following approximation argument proved in \cite{NP2006}, Lemma 4.1.10, p. 183: every Borel probability measure $\nu$\ on a compact convex set $K$\ is the pointwise limit of a net of discrete Borel probability measures $\nu_{\alpha}$\ on $K$, each having the same barycenter as $\nu.$ In the general case, we approximate $X$ by the sequence of bounded random variables $X_{n}=\sup\left\{ \inf\left\{ X,n\right\} ,-n\right\} .$ \end{proof} \section{Concluding Remarks} In this paper we introduced the concept of convexity at a point relative to a convex subset of the domain. This fact made Jensen's inequality available to a large variety of nonconvex functions and shed new light on the Hardy-Littlewood-P\'{o}lya theorem of majorization. In turn, the probabilistic form of Jensen's inequality (as stated in Theorem \ref{thmJintegral}) put in a more general perspective the problem of risk aversion. Most likely the notion of convexity at a point could have a practical purpose in optimization theory, information theory, the design of communication systems etc. \noindent\textbf{Acknowledgement}. The first author was supported by a grant of the Romanian National Authority for Scientific Research, CNCS -- UEFISCDI, project number PN-II-ID-PCE-2011-3-0257. The second author was supported by the strategic grant POSDRU/159/1.5/S/133255, Project ID 133255 (2014), co-financed by the European Social Fund within the Sectorial Operational Program Human Resources Development 2007 - 2013. \end{document}
\begin{document} $\:$ \begin{center} {\large\bf Parametric CR-umbilical Locus of Ellipsoids in $\C^2$ } \label{CR-umbilics-ellipsoids} Wei-Guo {\sc Foo}, Jo\"el {\sc Merker}, The-Anh {\sc Ta} {\large\footnotesize\sf Departement of Mathematics} {\large\footnotesize\sf Orsay University} {\large\footnotesize\sf Paris, France} \end{center} \Section{\bf Introduction} \label{introduction} \HEAD{\ref{introduction}.~{\sf Introduction} }{ Wei-Guo {\sc Foo}, Jo\"el {\sc Merker}, The-Anh {\sc Ta} Orsay University, Paris, France} \CITATION{ Different from the situation in the classical Differential Geometry, except in the trivial spherical case, where $\mathcal{S}$ or $\mathcal{P} \equiv 0$, computing umbilical points seems to be a very difficult problem. This is because the explicit formula for the fundamental Cartan-Chern-Moser curvature tensors is too complicated. Xiaojun {\sc Huang}, Shanyu {\sc Ji}, \cite{Huang-Ji-2007} } In 1932, \'Elie Cartan~\cite{Cartan-1932-I, Cartan-1932-II, Cartan-1932-III} showed that a local real-analytic ($\mathcal{C}^\omega$) hypersurface $M^3 \subset \C^2$ is determined up to local biholomorphic equivalence by a single invariant function: \[ \ICartan^M \colon\ \ \ M \,\longrightarrow\, \C, \] together with its (covariant) derivatives with respect to a certain coframe of differential $1$-forms on an $8$-dimensional principal bundle $P^8 \longrightarrow M$. In coordinates $(z,w) = \big( x + \isqrt\, y,\, u + \isqrt\, v\big)$ on $\C^2$, whenever $M$ is: \noindent$\bullet$\, either a {\sl complex graph:} \[ \big\{ (z,w)\in\C^2 \colon\, w=\Theta\big(z,\overline{z},\overline{w}\big) \big\}, \] \noindent$\bullet$\, or a {\sl real graph:} \[ \big\{ (z,w)\in\C^2 \colon\, v=\varphi(x,y,u) \big\}, \] \noindent$\bullet$\, or represented in {\sl implicit form:} \[ \big\{ (z,w)\in\C^2 \colon\, \rho\big(z,w,\overline{z},\overline{w}\big) = 0 \big\}, \] \noindent it is known that $\ICartan^M$ depends on the respective $6$-jets: \[ J_{z,\overline{z},\overline{w}}^6\, \Theta, \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ J_{x,y,u}^6\, \varphi, \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ J_{z,w,\overline{z},\overline{w}}^6\,\rho. \] The {\sl invariancy} of $\ICartan^M$ means that, for any local biholomorphism $h \colon \C^2 \longrightarrow \C^2$, setting $M' := h(M)$, it holds at every point $p \in M$ that: \[ \ICartan^{M'} \big(h(p)\big) \,=\, \nu(p)\, \ICartan^M(p) \eqno {\scriptstyle{(\forall\,p\,\in\,M)}}, \] for some nowhere vanishing (local) function $\nu \colon M \longrightarrow \C \backslash \{0\}$. This guarantees that the locus of {\sl CR-umbilical} points: \[ \UmbCR(M) \,:=\, \big\{ p\in M \colon\, \ICartan^M(p) = 0 \big\} \] is intrinsic. Furthermore, when $M$ is connected, it is well known that $\UmbCR(M)$ contains an open set $\emptyset \neq V \subset M$ if and only if $M$ is {\sl spherical}, in the sense of being locally biholomorphic to the unit sphere $S^3 \subset \C^2$. In 1974, Chern-Moser~{\cite{Chern-Moser-1974}} raised the problem whether $\emptyset \neq \UmbCR(M)$ for compact Levi nondegenerate $\mathcal{C}^\omega$ hypersurfaces $M^{2\NN-1} \subset \C^\NN$ when $\NN \geqslant 2$. This (simple!) paper attacks the more specific: \begin{Question} {\sl Can $\UmbCR(M)$ be described explicity?} \end{Question} But because $\ICartan^M$ is `{\sl too complicated}' as confirmed in~{\cite{Merker-Sabzevari-2012, Merker-Sabzevari-2014}}, the question is nontrivial even in simplest nonspherical examples like {\em e.g.} real ellipsoids introduced and studied by Webster in~{\cite{Webster-1977, Webster-2000}}. In $\C^{\NN \geqslant 2} \cong \R^{2\NN \geqslant 4}$ equipped with coordinates $\zaux_i = \xaux_i + \isqrt\, \yaux_i$, an {\sl ellipsoid} is the image of the unit sphere: \[ S^{2\NN-1} \,:=\, \big\{ \zaux\in\C^\NN \colon\, \vert\zaux_1\vert^2 +\cdots+ \vert\zaux_\NN\vert^2 = 1 \big\}, \] through a real affine transformation of $\R^{2\NN}$, hence has equation the form: \tagsleft@true\usetagform{default} \begin{align} \sum_{1\leqslant i\leqslant\NN}\, \big( \alpha_i\,\xaux_i^2 + \beta_i\,\yaux_i^2 \big) \,=\, 1, \tag{${\sf E}_{\alpha,\beta}$} \end{align} with real constants $\alpha_i \geqslant \beta_i > 0$\,\,---\,\,replace $\zaux_i \longmapsto \isqrt\, \zaux_i$ if necessary. The complex geometry of ellipsoids (Segre varieties, dynamics) began in Webster's seminal article~{\cite{Webster-1977}}, in which it was verified that two ellipsoids ${\sf E}_{\alpha,\beta} \cong {\sf E}_{ \alpha', \beta'}$ are biholomorphically equivalent if and only if up to permutation: \[ \frac{\alpha_i-\beta_i}{\alpha_i+\beta_i} \,=\, \frac{\alpha_i'-\beta_i'}{\alpha_i'+\beta_i'} \eqno {\scriptstyle{(1\,\leqslant\,i\,\leqslant\,\NN)}}. \] Replacing $\zaux_i \longmapsto \frac{1}{\sqrt{\beta_i}}\, \zaux_i$ and setting $a_i := \frac{\alpha_i}{\beta_i}$, whence $1 \leqslant a_i$, leads to a convenient representation: \tagsleft@true\usetagform{default} \begin{align} \sum_{1\leqslant i\leqslant\NN}\, \big( a_i\,\xaux_i^2 + \yaux_i^2 \big) \,=\, 1. \tag{${\sf E}_{a_1,\dots,a_\NN}$} \end{align} Yet an alternative view, due to Webster in~{\cite{Webster-2000}}, is: \tagsleft@true\usetagform{default} \begin{align} \label{A-Webster-ellipsoid} \sum_{1\leqslant i\leqslant\NN}\, \big( \zaux_i\overline{\zaux}_i + A_i\, (\zaux_i^2+\overline{\zaux}_i^2) \big) \,=\, 1, \tag{${\sf E}_{A_1,\dots,A_\NN}$} \end{align} obtained by setting $A_i := \frac{a_i-1}{2a_i+2}$, whence $0 \leqslant A_i < \frac{1}{2}$, so that $a_i = \frac{1+2A_i}{1-2A_i}$, then by changing coordinates $\zaux_i =: \sqrt{1-2A_i}\, \zaux_i'$, and then by dropping primes. In $\C^\NN$ when $\NN \geqslant 3$, what corresponds to the invariant $\ICartan^M$ is the Hachtroudi-Chern tensor $S_{\rho\sigma}^{\alpha\beta}$ with indices $1 \leqslant \alpha, \beta, \rho, \sigma \leqslant \NN$, and the concerned CR-umbilical locus: \[ \UmbCR(M) \,:=\, \big\{ p\in M \colon\, S_{\rho\sigma}^{\alpha\beta}(p) = 0,\,\, \forall\,\alpha,\beta,\rho,\sigma \big\}, \] is known, through local biholomorphisms $h \colon \C^\NN \longrightarrow \C^\NN$ as above, to enjoy \[ h\big( \UmbCR(M) \big) \,=\, \UmbCR \big(h(M)\big). \] \begin{Theorem} {\rm ({\cite{Webster-2000}})} In $\C^{\NN \geqslant 3}$, if $0 < A_1 < \cdots < A_\NN < \frac{1}{2}$, then: \[ \emptyset \,=\, \UmbCR \big( {\sf E}_{A_1,\dots,A_\NN} \big). \eqno\qed \] \end{Theorem} This motivated Huang-Ji in~{\cite{Huang-Ji-2007}} to study the question for compact $\mathcal{C}^\omega$ hypersurfaces $M \subset \C^2$. If $M = \{ \rho = 0\}$, the expected dimension of: \[ \UmbCR(M) \,=\, \big\{ 0 = \rho = \Re\,\ICartan = \Im\,\ICartan \big\} \] should be $4 - 3 = 1$, although this is not rigorous, for $\R$ is not algebraically closed! \begin{Theorem} {\rm (Implicitly proved in~{\cite{Huang-Ji-2007}})} Every real ellipsoid ${\sf E}_{a,b} \subset \C^2$ of equation: \[ a\,x^2+y^2+b\,u^2+v^2 \,=\, 1 \eqno {\scriptstyle{(a\,\geqslant\,1,\,b\,\geqslant\,1,\, (a,b)\,\neq\,(1,1))}} \] enjoys: \[ \dim_\R\, \UmbCR(M) \,\geqslant\, 1. \eqno\qed \] \end{Theorem} In other words, it contains at least some (real algebraic!) curve. What curve? Simple? Complicated? Can what follows be considered as a satisfactory answer? \begin{Theorem} \label{a-b-cos-sin} For every real numbers $a \geqslant 1$, $b \geqslant 1$ with $(a,b) \neq (1,1)$, the curve parametrized by $\theta \in \R$ valued in $\C^2 \cong \R^4${\em :} \[ \gamma \colon \ \ \ \theta \,\,\,\longmapsto\,\,\, \big( x(\theta)+\isqrt\,y(\theta),\,\, u(\theta)+\isqrt\,v(\theta) \big) \] with components: \[ \aligned x(\theta) & \,:=\, {\textstyle{\sqrt{\frac{a-1}{a\,(ab-1)}}}}\, \cos\,\theta, \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ y(\theta) \,:=\, {\textstyle{\sqrt{\frac{b\,(a-1)}{ab-1}}}}\, \sin\,\theta, \\ u(\theta) & \,:=\, {\textstyle{\sqrt{\frac{b-1}{b\,(ab-1)}}}}\, \sin\,\theta, \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ v(\theta) \,:=\, -\, {\textstyle{\sqrt{\frac{a\,(b-1)}{ab-1}}}}\, \cos\,\theta, \endaligned \] has image contained in the CR-umbilical locus: \[ \gamma(\R) \,\subset\, \UmbCR \big({\sf E}_{a,b}\big) \,\subset\, {\sf E}_{a,b} \] of the ellipsoid ${\sf E}_{a,b} \subset \C^2$ of equation $a\,x^2+y^2+b\,u^2+y^2 = 1$. \end{Theorem} In other words: \[ \ICartan^{{\sf E}_{a,b}}\big(\gamma(\theta)\big) \,=\, 0 \eqno {\scriptstyle{(\forall\,\theta\,\in\,\R)}}. \] As is known for ellipsoids, Cartan's invariant $\ICartan^{{\sf E}_{a,b}}$ exhibits a high complexity, {\em e.g.} $\sim\,40\,000$ terms in~{\cite{Merker-Sabzevari-2012}}. So this theorem might be interpreted as a somewhat unexpectedly nice and simple description of $\UmbCR \big({\sf E}_{a,b}\big)$! All computations of this paper were done by hand. \Section{\bf Explicit Expression of Cartan's CR-Invariant $\mathfrak{I}$} \label{explicit-Cartan-invariant} \HEAD{\ref{explicit-Cartan-invariant}.~{\sf Explicit Expression of Cartan's CR-Invariant $\mathfrak{I}$} }{ Wei-Guo {\sc Foo}, Jo\"el {\sc Merker}, The-Anh {\sc Ta} Orsay University, Paris, France} In $\C^2$ equipped with coordinates $(z,w) = \big( x + \isqrt\,y,\, u + \isqrt\, v\big)$, consider a connected real-analytic ($\mathcal{C}^\omega$) $3$-dimensional hypersurface: \[ M^3 \,:=\, \big\{ (z,w)\in\C^2 \colon\, \rho(z,w,\overline{z},\overline{w}) = 0 \big\}, \] with $\overline{\rho} = \rho$, and with $d\rho \big\vert_M$ never zero. Local or global $M$, compact or open, bounded or unbounded, can be equally treated. The two vector fields: \[ L \,:=\, -\,\rho_w\, \frac{\partial}{\partial z} + \rho_z\, \frac{\partial}{\partial w} \ \ \ \ \ \ \ \ \ \ \ \ \ \text{\rm and} \ \ \ \ \ \ \ \ \ \ \ \ \ \overline{L} \,:=\, -\,\rho_{\overline{w}}\, \frac{\partial}{\partial\overline{z}} + \rho_{\overline{z}}\, \frac{\partial}{\partial\overline{w}} \] generate $T^{1,0}M$ and $T^{0,1}M$. If $h \colon \C^2 \longrightarrow \C^2$ is a local biholomorphism: \[ (z,w) \,\longmapsto\, \big(f(z,w),g(z,w)\big) \,=:\, (z',w'), \] if $M = \{ \rho = 0\}$ and $M' = \{ \rho' = 0\}$ are two $\mathcal{C}^\omega$ hypersurfaces, if $h(M) \subset M'$, there is a nowhere vanishing function $\mu \colon M \longrightarrow \C \backslash \{0\}$ such that: \[ \mu(z,w,\overline{z},\overline{w})\, \rho(z,w,\overline{z},\overline{w}) \,\,\equiv\,\, \rho' \big( f(z,w),g(z,w), \overline{f}(\overline{z},\overline{w}), \overline{g}(\overline{z},\overline{w}) \big), \] whence in $\C\{z,w,\overline{z},\overline{w}\}$ (exercise): \[ \mu\, \Big( -\,\rho_w\, \frac{\partial}{\partial z} + \rho_z\, \frac{\partial}{\partial w} \Big) \,\,=\,\, \big(f_zg_w-f_wg_z\big)\, \Big( -\,\rho_{w'}'\, \frac{\partial}{\partial z'} + \rho_{z'}'\, \frac{\partial}{\partial w'} \Big). \] Furthermore, the {\sl Levi determinant:} \[ \aligned \Levi(\rho) \,:=\, &\, -\, \left\vert \begin{array}{ccc} 0 & \rho_z & \rho_w \\ \rho_{\overline{z}} & \rho_{z\overline{z}} & \rho_{w\overline{z}} \\ \rho_{\overline{w}} & \rho_{z\overline{w}} & \rho_{w\overline{w}} \end{array} \right\vert \\ \,=\, &\, \rho_{\overline{z}}\rho_z\rho_{w\overline{w}} - \rho_{\overline{z}}\rho_w\rho_{z\overline{w}} - \rho_{\overline{w}}\rho_z\rho_{\overline{z}w} + \rho_{\overline{w}}\rho_w\rho_{z\overline{z}}, \endaligned \] enjoys (exercise): \[ \mu^3\, {\sf L}(\rho) \,=\, \big(f_zg_w-f_wg_z\big)\, \big(\overline{f}_{\overline{z}} \overline{g}_{\overline{w}} - \overline{f}_{\overline{w}} \overline{g}_{\overline{z}} \big)\, {\sf L}(\rho') \eqno {\scriptstyle{(\text{\rm on}\,M)}}. \] \begin{Definition} A smooth hypersurface $M^3 \subset \C^2$ is called {\sl Levi nondegenerate} at a point $p \in M$ if: \[ 0 \,\neq\, {\sf L}(p). \] \end{Definition} From now on, all $M$ will be assumed smooth and Levi nondegenerate at every point, without further mention. When $0 \neq \rho_w(p) = \rho_{\overline{w}}(p)$ at a point $p = (z_p, w_p) \in M$, the implicit function theorem represents $M$ as a complex graph: \[ w \,=\, \Theta \big(z,\overline{z},\overline{w}\big) \ \ \ \ \ \ \ \ \ \ \ \ \ \text{\rm or equivalently:} \ \ \ \ \ \ \ \ \ \ \ \ \ \overline{w} \,=\, \overline{\Theta} \big( \overline{z},z,w \big), \] in terms of a $\mathcal{C}^\omega$ defining function $\Theta$. A similar graphed representation exists at points $q = (z_q, w_q) \in M$ at which $0 \neq \rho_z(q) = \rho_{\overline{z}}(q)$. Differentiating the identity: \[ 0 \,\equiv\, \rho \big( z, \Theta\big(z,\overline{z},\overline{w}\big), \overline{z},\overline{w} \big) \eqno {\scriptstyle{(\text{\rm in}\,\C\{z,\overline{z},\overline{w}\})}}, \] once with respect to $z$, $\overline{z}$, $\overline{w}$ yields: \[ \aligned 0 & \,\equiv\, \rho_z + \Theta_z\,\rho_w, \notag \\ 0 & \,\equiv\, \rho_{\overline{z}} + \Theta_{\overline{z}}\, \rho_w, \\ 0 & \,\equiv\, \rho_{\overline{w}} + \Theta_{\overline{w}}\, \rho_w, \endaligned \] and next twice with respect to $zz$, $z\overline{z}$, $z\overline{w}$, $\overline{z}\overline{z}$, $\overline{z}\overline{w}$, $\overline{w}\overline{w}$ gives: \tagsleft@true\usetagform{default} \begin{align} \label{2-rho-Theta} 0 & \,\equiv\, \rho_{zz} + 2\,\Theta_z\, \rho_{zw} + \Theta_z\,\Theta_z\, \rho_{ww} + \Theta_{zz}\, \rho_w, \notag \\ 0 & \,\equiv\, \rho_{z\overline{z}} + \Theta_z\, \rho_{\overline{z}w} + \Theta_{\overline{z}}\, \rho_{zw} + \Theta_z\,\Theta_{\overline{z}}\, \rho_{ww} + \Theta_{z\overline{z}}\, \rho_w, \notag \\ 0 & \,\equiv\, \rho_{z\overline{w}} + \Theta_z\, \rho_{w\overline{w}} + \Theta_{\overline{w}}\, \rho_{zw} + \Theta_z\,\Theta_{\overline{w}}\, \rho_{ww} + \Theta_{z\overline{w}}\, \rho_w, \\ 0 & \,\equiv\, \rho_{\overline{z}\overline{z}} + 2\,\Theta_{\overline{z}}\, \rho_{\overline{z}w} + \Theta_{\overline{z}}\,\Theta_{\overline{z}}\, \rho_{ww} + \Theta_{\overline{z}\overline{z}}\, \rho_w, \notag \\ 0 & \,\equiv\, \rho_{\overline{z}\overline{w}} + \Theta_{\overline{z}}\, \rho_{w\overline{w}} + \Theta_{\overline{w}}\, \rho_{\overline{z}w} + \Theta_{\overline{z}}\,\Theta_{\overline{w}}\, \rho_{ww} + \Theta_{\overline{z}\overline{w}}\, \rho_w, \notag \\ 0 & \,\equiv\, \rho_{\overline{w}\overline{w}} + 2\,\Theta_{\overline{w}}\, \rho_{w\overline{w}} + \Theta_{\overline{w}}\,\Theta_{\overline{w}}\, \rho_{ww} + \Theta_{\overline{w}\overline{w}}\, \rho_w. \notag \end{align} It holds that: \[ \big\{ \rho_w \neq 0 \big\} \,=\, \big\{ \Theta_{\overline{w}} \neq 0 \big\} \eqno {\scriptstyle{(\text{\rm in}\,M)}}. \] \begin{Definition} Call $M$ {\sl spherical} if it is locally biholomorphic to: \[ S^3 \,:=\, \big\{ (z,w)\in\C^2 \colon\, z\overline{z} + w\overline{w} = 1 \big\}. \] \end{Definition} When $M$ is connected, the principle of analytic continuation guarantees propagation of this property. Next, set: \[ \Delta \,:=\, -\,\Theta_{\overline{w}}\, \Theta_{z\overline{z}} + \Theta_{\overline{z}}\, \Theta_{z\overline{w}}. \] \begin{Lemma} At a point $p \in \{ \Theta_{\overline{w}} \neq 0\}${\em :} \[ M\,\, \text{is Levi nondegenerate at}\,\, p \,\,\,\Longleftrightarrow\,\,\, \Delta(p) \,\neq\, 0.\eqno\qed \] \end{Lemma} Levi nondegeneracy being a biholomorphically invariant feature, spherical $M$ are so since $S^3$ is. Without restricting assumptions like {\em e.g.} {\sl rigidity} or {\sl tubity} (\cite{Isaev-2011}), an explicit, complete characterization of sphericity in terms of some defining function for a hypersurface $M^3 \subset \C^2$ appeared in October 2009 as {\footnotesize\sf arxiv.org/abs/0910.1694/}, {\em cf.} also~\cite{Nurowski-Sparling-2003, Huang-Ji-2007}. To recall it, set: \[ \Box \,:=\, \frac{\Delta}{-\,\Theta_{\overline{w}}}, \] and use instead: \[ \aligned \overline{\mathcal{L}} \,:=\, &\, -\, \frac{1}{\rho_{\overline{w}}}\, \overline{L} \\ \,=\, & \frac{\partial}{\partial\overline{z}} - \frac{\Theta_{\overline{z}}}{\Theta_{\overline{w}}}\, \frac{\partial}{\partial\overline{w}}. \endaligned \] \begin{Theorem} {\rm ({\cite{Merker-2010}})} At a point $p \in \{ \Theta_{\overline{w}} \neq 0\}$, the hypersurface $M$ is spherical if and only if, near $p$: \[ 0 \,\equiv\, \frac{1}{\Box}\, \overline{\mathcal{L}} \bigg( \frac{1}{\Box}\, \overline{\mathcal{L}} \bigg( \frac{1}{\Box}\, \overline{\mathcal{L}} \bigg( \frac{1}{\Box}\, \overline{\mathcal{L}} \Big( \Theta_{zz} \Big) \bigg) \bigg) \bigg). \eqno\qed \] \end{Theorem} Exchanging $z \longleftrightarrow w$ yields a similar formula at points $q \in \{ \rho_z \neq 0\}$. \begin{Corollary} \label{7-terms-Theta} In $\{ \rho_w \neq 0\} = \{ \Theta_{\overline{w}} \neq 0\}$, a partly expanded characterization of sphericity is: \tagsleft@false\usetagform{EngelLie} \begin{align} 0 & \,\,\equiv\,\, \frac{\overline{\mathcal{L}}^4(\Theta_{zz})}{\Box^4} \,-\, \notag \\ & \ \ \ \ \ -\, 6\, \frac{ \overline{\mathcal{L}}(\Box)\, \overline{\mathcal{L}}^3(\Theta_{zz})}{ \Box^5} - 4\, \frac{ \overline{\mathcal{L}}^2(\Box)\, \overline{\mathcal{L}}^2(\Theta_{zz})}{ \Box^5} - \frac{ \overline{\mathcal{L}}^3(\Box)\, \overline{\mathcal{L}}(\Theta_{zz})}{ \Box^5} \, + \notag \\ & \ \ \ \ \ + 15\, \frac{ \big[\,\overline{\mathcal{L}}(\Box)\big]^2\, \overline{\mathcal{L}}^2(\Theta_{zz})}{ \Box^6} + 10\, \frac{ \overline{\mathcal{L}}(\Box)\, \overline{\mathcal{L}}^2(\Box)\, \overline{\mathcal{L}}(\Theta_{zz})}{ \Box^6} \,- \notag \\ & \ \ \ \ \ -\, 15\, \frac{ \big[\,\overline{\mathcal{L}}(\Box)\big]^3\, \overline{\mathcal{L}}(\Theta_{zz})}{ \Box^7}. \tag \qed \end{align} \end{Corollary} Without presenting details, it is known that Cartan's treatment of the concerned biholomorphic equivalence problem brings a single invariant function: \[ \ICartan^M \colon \ \ \ M \,\longrightarrow\, \C, \] other invariants being (covariant) derivations of it, and that: \[ M\,\, \text{\rm is spherical}\,\, \,\,\,\Longleftrightarrow\,\,\, 0 \,\equiv\, \ICartan^M. \] \begin{Notation} For two functions $\Iaux_1 \colon M \longrightarrow \C$ and $\Iaux_2 \colon M \longrightarrow \C$, write: \[ \Iaux_2 \,\doteqdot\, \Iaux_1, \] when there is a nowhere vanishing function $\mu \colon M \longrightarrow \C \backslash \{0\}$ such that: \[ \Iaux_2 \,=\, \mu\, \Iaux_1. \] \end{Notation} For instance: \[ \ICartan^M \,\doteqdot\, \bigg( \frac{1}{\Box}\, \overline{\mathcal{L}} \bigg)^{\!4} \Big(\Theta_{zz}\Big). \] Now, translate the formula of Corollary~{\ref{7-terms-Theta}} to the case where $M$ is given in implicit representation: \[ 0 \,=\, \rho \big( z,w,\overline{z},\overline{w} \big). \] Set: \[ \Hessian(\rho) \,:=\, \rho_z\rho_z\,\rho_{ww} - 2\,\rho_z\rho_w\,\rho_{zw} + \rho_w\rho_w\,\rho_{zz}, \] with (exercise) on $\{\rho_w\neq 0\}$: \[ \Theta_{zz} \,=\, -\,\frac{\Hessian(\rho)}{\rho_w\rho_w\rho_w}. \] Remind the {\sl Levi determinant:} \[ \Levi(\rho) \,:=\, \rho_{\overline{z}}\rho_z\rho_{w\overline{w}} - \rho_{\overline{z}}\rho_w\rho_{z\overline{w}} - \rho_{\overline{w}}\rho_z\rho_{\overline{z}w} + \rho_{\overline{w}}\rho_w\rho_{z\overline{z}}, \] that satisfies on $\{\rho_w \neq 0\}$: \[ \Levi(\rho) \,\doteqdot\, \Delta, \] {\em i.e.} more precisely (exercise) thanks to~({\ref{2-rho-Theta}}): \[ \Levi(\rho) \,=\, -\, \rho_w\,\rho_w\,\rho_w\,\Delta. \] \begin{Corollary} On $\{\rho_w \neq 0\}$, up to a nowhere vanishing function: \[ \ICartan^M \,\doteqdot\, \Iaux_{[w]}, \] where: \begin{footnotesize} \tagsleft@false\usetagform{EngelLie} \begin{align} \Iaux_{[w]} & \,\,:=\,\, 12\,\big(\rho_w\big)^9\, \bigg\{ \bigg[ \frac{\Levi(\rho)}{\rho_w^2} \bigg]^3 \overline{L}^4 \bigg( \frac{\Hessian(\rho)}{\rho_w^3} \bigg) \,- \notag \\ & \ \ \ \ \ -\, 6\, \bigg[ \frac{\Levi(\rho)}{\rho_w^2} \bigg]^2 \overline{L} \bigg( \frac{\Levi(\rho)}{\rho_w^2} \bigg) \overline{L}^3 \bigg( \frac{\Hessian(\rho)}{\rho_w^3} \bigg) - 4\, \bigg[ \frac{\Levi(\rho)}{\rho_w^2} \bigg]^2 \overline{L}^2 \bigg( \frac{\Levi(\rho)}{\rho_w^2} \bigg) \overline{L}^2 \bigg( \frac{\Hessian(\rho)}{\rho_w^3} \bigg) - \bigg[ \frac{\Levi(\rho)}{\rho_w^2} \bigg]^2 \overline{L}^3 \bigg( \frac{\Levi(\rho)}{\rho_w^2} \bigg) \overline{L} \bigg( \frac{\Hessian(\rho)}{\rho_w^3} \bigg) \,+ \notag \\ & \ \ \ \ \ + 15\, \frac{\Levi(\rho)}{\rho_w^2} \bigg[ \overline{L} \bigg( \frac{\Levi(\rho)}{\rho_w^2} \bigg) \bigg]^2 \overline{L}^2 \bigg( \frac{\Hessian(\rho)}{\rho_w^3} \bigg) + 10\, \frac{\Levi(\rho)}{\rho_w^2} \overline{L} \bigg( \frac{\Levi(\rho)}{\rho_w^2} \bigg) \overline{L}^2 \bigg( \frac{\Levi(\rho)}{\rho_w^2} \bigg) \overline{L} \bigg( \frac{\Hessian(\rho)}{\rho_w^3} \bigg) \,- \notag \\ & \ \ \ \ \ -\, 15\, \bigg[ \overline{L} \bigg( \frac{\Levi(\rho)}{\rho_w^2} \bigg) \bigg]^3 \overline{L} \bigg( \frac{\Hessian(\rho)}{\rho_w^3} \bigg) \bigg\}. \tag{\qed} \end{align} \end{footnotesize} \end{Corollary} Furthermore, exchanging $z \longleftrightarrow w$, there is an {\em exact} formal coincidence (exercise!): \[ \Iaux_{[z]} \,=\, \Iaux_{[w]}. \] In~\cite{Merker-survey-2017}, an alternative formula for an equivalent invariant $\Maux \doteqdot \Iaux_{[w]}$ is discussed, but it incorporates $5! = 120$ terms instead of $7$ above, and is less cleaned up or finalized to really compute exciting things (by hand!). \Section{\bf Pullback to an Exceptional Curve on an Ellipsoid} \label{pullback-ellipsoid} \HEAD{\ref{pullback-ellipsoid}.~{\sf Pullback to an Exceptional Curve on an Ellipsoid} }{ Wei-Guo {\sc Foo}, Jo\"el {\sc Merker}, The-Anh {\sc Ta} Orsay University, Paris, France} To prove Theorem~{\ref{a-b-cos-sin}}, it suffices to verify that: \[ 0 \overset{\text{\bf ?}}{\,\,=\,\,} \gamma^\ast \big( \Iaux_{[w]} \big)(\theta) \eqno {\scriptstyle{(\forall\,\theta\,\in\,\R)}}. \] Drop the factor $12\, (\rho_w)^9 \doteqdot 1$, and call $\Taux_1$, $\Taux_2$, $\Taux_3$, $\Taux_4$, $\Taux_5$, $\Taux_6$, $\Taux_7$ the seven concerned terms, so that the goal becomes: \[ 0 \overset{\text{\bf ?}}{\,\,=\,\,} \gamma^\ast \big(\Taux_1\big) + \gamma^\ast \big(\Taux_2\big) + \gamma^\ast \big(\Taux_3\big) + \gamma^\ast \big(\Taux_4\big) + \gamma^\ast \big(\Taux_5\big) + \gamma^\ast \big(\Taux_6\big) + \gamma^\ast \big(\Taux_7\big). \] Hand computations provide formulas of the shape: \[ \aligned \Taux_1 & \,=\, {\textstyle{\frac{1}{8}}}\, \isqrt\, (a-1)\, \frac{\Naux_1}\Daux, \\ \Taux_2 & \,=\, {\textstyle{\frac{3}{4}}}\, \isqrt\, (a-1)\, \frac{\Naux_2}\Daux, \\ \Taux_3 & \,=\, {\textstyle{\frac{1}{2}}}\, \isqrt\, (a-1)\, \frac{\Naux_3}\Daux, \\ \Taux_4 & \,=\, {\textstyle{\frac{1}{8}}}\, \isqrt\, (a-1)\, \frac{\Naux_4}\Daux, \\ \Taux_5 & \,=\, {\textstyle{\frac{15}{8}}}\, \isqrt\, (a-1)\, \frac{\Naux_5}\Daux, \\ \Taux_6 & \,=\, {\textstyle{\frac{5}{4}}}\, \isqrt\, (a-1)\, \frac{\Naux_6}\Daux, \\ \Taux_7 & \,=\, {\textstyle{\frac{15}{8}}}\, \isqrt\, (a-1)\, \frac{\Naux_7}\Daux, \endaligned \] with, in denominator place: \[ \footnotesize \aligned \Daux \,:=\, \Big( \sqrt{a}\,\cossmall\,\theta - \isqrt\,\sqrt{b}\, \sinsmall\,\theta \Big)^8\, \big( a\,b-1 \big)\, \bigg( \frac{ b-1}{ a\,b-1} \bigg)^{\!\frac{11}{2}}, \endaligned \] with numerator 1: \[ \!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\! \!\!\!\!\!\!\!\!\!\! \scriptsize \aligned \Naux_1 := \cossmall^7\theta\, & \Big[ 499\,a^{9/2}b^3 + 625\,a^{9/2}b^2 - 233\,a^{7/2}b^3 + 205\,a^{9/2}b - 631\,a^{7/2}b^2 + 15\,a^{9/2} - 415\,a^{7/2}b - 65\,a^{7/2} \Big] \\ +\, \isqrt\, \cossmall^6\theta\,\sinsmall\,\theta\, & \Big[ 2887\,a^4b^{7/2} + 4401\,a^4b^{5/2} - 1297\,a^3b^{7/2} + 1905\,a^4b^{3/2} - 4059\,a^3b^{5/2} + 215\,a^4b^{1/2} - 3327\,a^3b^{3/2} - 725\,a^3b^{1/2} \Big] \\ +\, \cossmall^5\theta\,\sinsmall^2\theta\, & \Big[ -7023\,a^{7/2}b^4 - 13021\,a^{7/2}b^3 + 3013\,a^{5/2}b^4 - 7105\,a^{7/2}b^2 + 11011\,a^{5/2}b^3 - 1075\,a^{7/2}b + 11059\,a^{5/2}b^2 + 3141\,a^{5/2}b \Big] \\ +\, \isqrt\, \cossmall^4\theta\,\sinsmall^3\theta\, & \Big[ -\,9267\,a^3b^{9/2} - 20989\,a^3b^{7/2} + 3757\,a^2b^{9/2} - 14101\,a^3\,b^{5/2} + 16279\,a^2b^{7/2} - 2683\,a^3b^{3/2} + 19891\,a^2b^{5/2} + 7113\,a^2b^{3/2} \Big] \\ +\, \cossmall^3\theta\,\sinsmall^4\theta\, & \Big[ 7113\,a^{5/2}b^5 + 19891\,a^{5/2}b^4 - 2683\,a^{3/2}b^5 + 16279\,a^{5/2}b^3 - 14101\,a^{3/2}b^4 + 3757\,a^{5/2}b^2 - 20989\,a^{3/2}b^3 - 9267\,a^{3/2}b^2 \Big] \\ +\, \isqrt\, \cossmall^2\theta\,\sinsmall^5\theta\, & \Big[ 3141\,a^2b^{11/2} + 11059\,a^2b^{9/2} - 1075\,ab^{11/2} + 11011\,a^2b^{7/2} - 7105\,ab^{9/2} + 3013\,a^2b^{5/2} - 13021\,ab^{7/2} - 7023\,ab^{5/2} \Big] \\ +\, \cossmall^1\theta\,\sinsmall^6\theta\, & \Big[ -\,725\,a^{3/2}b^6 - 3327\,a^{3/2}b^5 + 215\,a^{1/2}b^6 - 4059\,a^{3/2}b^4 + 1905\,a^{1/2}b^5 - 1297\,a^{3/2}b^3 + 4401\,a^{1/2}b^4 + 2287\,a^{1/2}b^3 \Big] \\ +\, \isqrt\, \sinsmall^7\,\theta\, & \Big[ -\,65\,ab^{13/2} - 415\,ab^{11/2} + 15\,b^{13/2} - 631\,ab^{9/2} + 205\,b^{11/2} - 233\,ab^{7/2} + 625\,b^{9/2} + 499\,b^{7/2} \Big], \endaligned \] with numerator 2: \[ \!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\! \scriptsize \aligned \Naux_2 := \cossmall^7\theta\, & \Big[ -\,165\,a^{9/2}b^3 - 193\,a^{9/2}\,b^2 + 93\,a^{7/2}b^3 - 67\,a^{9/2}b + 205\,a^{7/2}b^2 - 7\,a^{9/2} + 115\,a^{7/2}b + 19\,a^{7/2} \Big] \\ +\, \isqrt\, \cossmall^6\theta\,\sinsmall\,\theta\, & \Big[ -\,925\,a^4b^{7/2} - 1389\,a^4b^{5/2} + 505\,a^3b^{7/2} - 627\,a^4b^{3/2} + 1341\,a^3b^{5/2} - 83\,a^4b^{1/2} + 975\,a^3b^{3/2} + 203\,a^3b^{1/2} \Big] \\ +\, \cossmall^5\theta\,\sinsmall^2\theta\, & \Big[ 2177\,a^{7/2}b^4 + 4141\,a^{7/2}b^3 - 1145\,a^{5/2}b^4 + 2359\,a^{7/2}b^2 - 3673\,a^{5/2}b^3 + 395\,a^{7/2}b - 3367\,a^{5/2}b^2 - 887\,a^{5/2}b \Big] \\ +\, \isqrt\, \cossmall^4\theta\,\sinsmall^3\theta\, & \Big[ 2777\,a^3b^{9/2} + 6649\,a^3b^{7/2} - 1397\,a^2b^{9/2} + 4711\,a^3b^{5/2} - 5449\,a^2b^{7/2} + 983\,a^3b^{3/2} - 6211\,a^2b^{5/2} - 2063\,a^2b^{3/2} \Big] \\ +\, \cossmall^3\theta\,\sinsmall^4\theta\, & \Big[ -\,2063\,a^{5/2}b^5 - 6211\,a^{5/2}b^4 + 983\,a^{3/2}b^5 - 5449\,a^{5/2}b^3 + 4711\,a^{3/2}b^4 - 1397\,a^{5/2}b^2 + 6649\,a^{3/2}b^3 + 2777\,a^{3/2}b^2 \Big] \\ +\, \isqrt\, \cossmall^2\theta\,\sinsmall^5\theta\, & \Big[ -\,887\,a^2b^{11/2} - 3367\,a^2b^{9/2} + 395\,ab^{11/2} - 3673\,a^2b^{7/2} + 2359\,ab^{9/2} - 1145\,a^2b^{5/2} + 4141\,ab^{7/2} + 2177\,ab^{5/2} \Big] \\ +\, \cossmall^1\theta\,\sinsmall^6\theta\, & \Big[ 203\,a^{3/2}b^6 + 975\,a^{3/2}b^5 - 83\,a^{1/2}b^6 + 1341\,a^{3/2}b^4 - 627\,a^{1/2}b^5 + 505\,a^{3/2}b^3 - 1389\,a^{1/2}b^4 - 925\,a^{1/2}b^3 \Big] \\ +\, \isqrt\, \sinsmall^7\,\theta\, & \Big[ 19\,ab^{13/2} + 115\,ab^{11/2} - 7\,b^{13/2} + 205\,ab^{9/2} - 67\,b^{11/2} + 93\,ab^{7/2} - 193\,b^{9/2} - 165\,b^{7/2} \Big], \endaligned \] with numerator 3: \[ \!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\! \scriptsize \aligned \Naux_3 := \cossmall^7\theta\, & \Big[ -\,91\,a^{9/2}b^3 - 109\,a^{9/2}b^2 + 65\,a^{7/2}b^3 - 37\,a^{9/2}b + 115\,a^{7/2}b^2 - 3\,a^{9/2} + 55\,a^{7/2}b + 5\,a^{7/2} \Big] \\ +\, \isqrt\, \cossmall^6\theta\,\sinsmall\,\theta\, & \Big[ -\,499\,a^4b^{7/2} - 777\,a^4b^{5/2} + 349\,a^3b^{7/2} - 357\,a^4b^{3/2} + 771\,a^3b^{5/2} - 47\,a^4b^{1/2} + 483\,a^3b^{3/2} + 77\,a^3b^{1/2} \Big] \\ +\, \cossmall^5\theta\,\sinsmall^2\theta\, & \Big[ 1143\,a^{7/2}b^4 + 2281\,a^{7/2}b^3 - 781\,a^{5/2}b^4 + 1369\,a^{7/2}b^2 - 2143\,a^{5/2}b^3 + 247\,a^{7/2}b - 1723\,a^{5/2}b^2 - 393\,a^{5/2}b \Big] \\ +\, \isqrt\, \cossmall^4\theta\,\sinsmall^3\theta\, & \Big[ 1407\,a^3b^{9/2} + 3589\,a^3b^{7/2} - 937\,a^2b^{9/2} + 2761\,a^3b^{5/2} - 3199\,a^2b^{7/2} + 643\,a^3b^{3/2} - 3271\,a^2b^{5/2} - 993\,a^2b^{3/2} \Big] \\ +\, \cossmall^3\theta\,\sinsmall^4\theta\, & \Big[ -\,993\,a^{5/2}b^5 - 3271\,a^{5/2}b^4 + 643\,a^{3/2}b^5 - 3199\,a^{5/2}b^3 + 2761\,a^{3/2}b^4 - 937\,a^{5/2}b^2 + 3589\,a^{3/2}b^3 + 1407\,a^{3/2}b^2 \Big] \\ +\, \isqrt\, \cossmall^2\theta\,\sinsmall^5\theta\, & \Big[ -\,393\,a^2b^{11/2} - 1723\,a^2b^{9/2} + 247\,ab^{11/2} - 2143\,a^2b^{7/2} + 1369\,ab^{9/2} - 781\,a^2b^{5/2} + 2281\,ab^{7/2} + 1143\,ab^{5/2} \Big] \\ +\, \cossmall^1\theta\,\sinsmall^6\theta\, & \Big[ 77\,a^{3/2}b^6 + 483\,a^{3/2}b^5 - 47\,a^{1/2}b^6 + 771\,a^{3/2}b^4 - 357\,a^{1/2}b^5 + 349\,a^{3/2}b^3 - 777\,a^{1/2}b^4 - 499\,a^{1/2}b^3 \Big] \\ +\, \isqrt\, \sinsmall^7\,\theta\, & \Big[ 5\,ab^{13/2} + 55\,ab^{11/2} - 3\,b^{13/2} + 115\,ab^{9/2} - 37\,b^{11/2} + 65\,ab^{7/2} - 109\,b^{9/2} - 91\,b^{7/2} \Big], \endaligned \] with numerator 4: \[ \!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\! \scriptsize \aligned \Naux_4 := \cossmall^7\theta\, & \Big[ -\,75\,a^{9/2}b^3 - 91\,a^{9/2}b^2 + 75\,a^{7/2}b^3 - 25\,a^{9/2}b + 91\,a^{7/2}b^2 - a^{9/2} + 25\,a^{7/2}b + a^{7/2} \Big] \\ +\, \isqrt\, \cossmall^6\theta\,\sinsmall\,\theta\, & \Big[ -\,391\,a^4b^{7/2} - 639\,a^4b^{5/2} + 391\,a^3b^{7/2} - 285\,a^4b^{3/2} + 639\,a^3b^{5/2} - 29\,a^4b^{1/2} + 285\,a^3b^{3/2} + 29\,a^3b^{1/2} \Big] \\ +\, \cossmall^5\theta\,\sinsmall^2\theta\, & \Big[ 839\,a^{7/2}b^4 + 1831\,a^{7/2}b^3 - 839\,a^{5/2}b^4 + 1165\,a^{7/2}b^2 - 1831\,a^{5/2}b^3 + 197\,a^{7/2}b - 1165\,a^{5/2}b^2 - 197\,a^{5/2}b \Big] \\ +\, \isqrt\, \cossmall^4\theta\,\sinsmall^3\theta\, & \Big[ 947\,a^3b^{9/2} + 2779\,a^3b^{7/2} - 947\,a^2b^{9/2} + 2401\,a^3b^{5/2} - 2779\,a^2b^{7/2} + 593\,a^3b^{3/2} - 2401\,a^2b^{5/2} - 593\,a^2b^{3/2} \Big] \\ +\, \cossmall^3\theta\,\sinsmall^4\theta\, & \Big[ -\,593\,a^{5/2}b^5 - 2401\,a^{5/2}b^4 + 593\,a^{3/2}b^5 - 2779\,a^{5/2}b^3 + 2401\,a^{3/2}b^4 - 947\,a^{5/2}b^2 + 2779\,a^{3/2}b^3 + 947\,a^{3/2}b^2 \Big] \\ +\, \isqrt\, \cossmall^2\theta\,\sinsmall^5\theta\, & \Big[ -\,197\,a^2b^{11/2} - 1165\,a^2b^{9/2} + 197\,ab^{11/2} - 1831\,a^2b^{7/2} + 1165\,ab^{9/2} - 839\,a^2b^{5/2} + 1831\,ab^{7/2} + 839\,ab^{5/2} \Big] \\ +\, \cossmall^1\theta\,\sinsmall^6\theta\, & \Big[ 29\,a^{3/2}b^6 + 285\,a^{3/2}b^5 - 29\,a^{1/2}b^6 + 639\,a^{3/2}b^4 - 285\,a^{1/2}b^5 + 391\,a^{3/2}b^3 - 639\,a^{1/2}b^4 - 391\,a^{1/2}b^3 \Big] \\ +\, \isqrt\, \sinsmall^7\,\theta\, & \Big[ ab^{13/2} + 25\,ab^{11/2} - b^{13/2} + 91\,ab^{9/2} - 25\,b^{11/2} + 75\,ab^{7/2} - 91\,b^{9/2} - 75\,b^{7/2} \Big], \endaligned \] with numerator 5: \[ \!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\! \scriptsize \aligned \Naux_5 := \cossmall^7\theta\, & \Big[ 63\,a^{9/2}b^3 + 69\,a^{9/2}b^2 - 45\,a^{7/2}b^3 + 25\,a^{9/2}b - 75\,a^{7/2}b^2 + 3\,a^{9/2} - 35\,a^{7/2}b - 5\,a^{7/2} \Big] \\ +\, \isqrt\, \cossmall^6\theta\,\sinsmall\,\theta\, & \Big[ 339\,a^4b^{7/2} + 509\,a^4b^{5/2} - 237\,a^3b^{7/2} + 237\,a^4b^{3/2} - 511\,a^3b^{5/2} + 35\,a^4b^{1/2} - 315\,a^3b^{3/2} - 57\,a^3b^{1/2} \Big] \\ +\, \cossmall^5\theta\,\sinsmall^2\theta\, & \Big[ -\,763\,a^{7/2}b^4 - 1521\,a^{7/2}b^3 + 521\,a^{5/2}b^4 - 909\,a^{7/2}b^2 + 1431\,a^{5/2}b^3 - 167\,a^{7/2}b + 1143\,a^{5/2}b^2 + 265\,a^{5/2}b \Big] \\ +\, \isqrt\, \cossmall^4\theta\,\sinsmall^3\theta\, & \Big[ -\,927\,a^3b^{9/2} - 2409\,a^3b^{7/2} + 617\,a^2b^{9/2} - 1841\,a^3b^{5/2} + 2139\,a^2b^{7/2} - 423\,a^3b^{3/2} + 2191\,a^2b^{5/2} + 653\,a^2b^{3/2} \Big] \\ +\, \cossmall^3\theta\,\sinsmall^4\theta\, & \Big[ 653\,a^{5/2}b^5 + 2191\,a^{5/2}b^4 - 423\,a^{3/2}b^5 + 2139\,a^{5/2}b^3 - 1841\,a^{3/2}b^4 + 617\,a^{5/2}b^2 - 2409\,a^{3/2}b^3 - 927\,a^{3/2}b^2 \Big] \\ +\, \isqrt\, \cossmall^2\theta\,\sinsmall^5\theta\, & \Big[ 265\,a^2b^{11/2} + 1143\,a^2b^{9/2} - 167\,ab^{11/2} + 1431\,a^2b^{7/2} - 909\,ab^{9/2} + 521\,a^2b^{5/2} - 1521\,ab^{7/2} - 763\,ab^{5/2} \Big] \\ +\, \cossmall^1\theta\,\sinsmall^6\theta\, & \Big[ -\,57\,a^{3/2}b^6 - 315\,a^{3/2}b^5 + 35\,a^{1/2}b^6 - 511\,a^{3/2}b^4 + 237\,a^{1/2}b^5 - 237\,a^{3/2}b^3 + 509\,a^{1/2}b^4 + 339\,a^{1/2}b^3 \Big] \\ +\, \isqrt\, \sinsmall^7\,\theta\, & \Big[ -\,5\,ab^{13/2} - 35\,ab^{11/2} + 3\,b^{13/2} - 75\,ab^{9/2} + 25\,b^{11/2} - 45\,ab^{7/2} + 69\,b^{9/2} + 63\,b^{7/2} \Big], \endaligned \] with numerator 6: \[ \!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\! \scriptsize \aligned \Naux_6 := \cossmall^7\theta\, & \Big[ 39\,a^{9/2}b^3 + 43\,a^{9/2}b^2 - 39\,a^{7/2}b^3 + 13\,a^{9/2}b - 43\,a^{7/2}b^2 + a^{9/2} - 13\,a^{7/2}b - a^{7/2} \Big] \\ +\, \isqrt\, \cossmall^6\theta\,\sinsmall\,\theta\, & \Big[ 199\,a^4b^{7/2} + 315\,a^4b^{5/2} - 199\,a^3b^{7/2} + 141\,a^4b^{3/2} - 315\,a^3b^{5/2} + 17\,a^4b^{1/2} - 141\,a^3b^{3/2} - 17\,a^3b^{1/2} \Big] \\ +\, \cossmall^5\theta\,\sinsmall^2\theta\, & \Big[ -\,419\,a^{7/2}b^4 - 919\,a^{7/2}b^3 + 419\,a^{5/2}b^4 - 577\,a^{7/2}b^2 + 919\,a^{5/2}b^3 - 101\,a^{7/2}b + 577\,a^{5/2}b^2 + 101\,a^{5/2}b \Big] \\ +\, \isqrt\, \cossmall^4\theta\,\sinsmall^3\theta\, & \Big[ -\,467\,a^3b^{9/2} - 1399\,a^3b^{7/2} + 467\,a^2b^{9/2} - 1201\,a^3b^{5/2} + 1399\,a^2b^{7/2} - 293\,a^3b^{3/2} + 1201\,a^2b^{5/2} + 293\,a^2b^{3/2} \Big] \\ +\, \cossmall^3\theta\,\sinsmall^4\theta\, & \Big[ 293\,a^{5/2}b^5 + 1201\,a^{5/2}b^4 - 293\,a^{3/2}b^5 + 1399\,a^{5/2}b^3 - 1201\,a^{3/2}b^4 + 467\,a^{5/2}b^2 - 1399\,a^{3/2}b^3 - 467\,a^{3/2}b^2 \Big] \\ +\, \isqrt\, \cossmall^2\theta\,\sinsmall^5\theta\, & \Big[ 101\, a^2b^{11/2} + 577\,a^2b^{9/2} - 101\,ab^{11/2} + 919\,a^2b^{7/2} - 577\,ab^{9/2} + 419\,a^2b^{5/2} - 919\,ab^{7/2} - 419\,ab^{5/2} \Big] \\ +\, \cossmall^1\theta\,\sinsmall^6\theta\, & \Big[ -\,17\,a^{3/2}b^6 - 141\,a^{3/2}b^5 + 17\,a^{1/2}b^6 - 315\,a^{3/2}b^4 + 141\,a^{1/2}b^5 - 199\,a^{3/2}b^3 + 315\,a^{1/2}b^4 + 199\,a^{1/2}b^3 \Big] \\ +\, \isqrt\, \sinsmall^7\,\theta\, & \Big[ -\,ab^{13/2} - 13\,ab^{11/2} + b^{13/2} - 43\,ab^{9/2} + 13\,b^{11/2} - 39\,ab^{7/2} + 43\,b^{9/2} + 39\,b^{7/2} \Big], \endaligned \] with numerator 7: \[ \!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\! \scriptsize \aligned \Naux_7 := \cossmall^7\theta\, & \Big[ -\,27\,a^{9/2}b^3 - 27\,a^{9/2}b^2 + 27\,a^{7/2}b^3 - 9\,a^{9/2}b + 27\,a^{7/2}b^2 - a^{9/2} + 9\,a^{7/2}b + a^{7/2} \Big] \\ +\, \isqrt\, \cossmall^6\theta\,\sinsmall\,\theta\, & \Big[ -\,135\,a^4b^{7/2} - 207\,a^4b^{5/2} + 135\,a^3b^{7/2} - 93\,a^4b^{3/2} + 207\,a^3b^{5/2} - 13\,a^4b^{1/2} + 93\,a^3b^{3/2} + 13\,a^3b^{1/2} \Big] \\ +\, \cossmall^5\theta\,\sinsmall^2\theta\, & \Big[ 279\,a^{7/2}b^4 + 615\,a^{7/2}b^3 - 279\,a^{5/2}b^4 + 381\,a^{7/2}b^2 - 615\,a^{5/2}b^3 + 69\,a^{7/2}b - 381\,a^{5/2}b^2 - 69\,a^{5/2}b \Big] \\ +\, \isqrt\, \cossmall^4\theta\,\sinsmall^3\theta\, & \Big[ 307\,a^3b^{9/2} + 939\,a^3b^{7/2} - 307\,a^2b^{9/2} + 801\,a^3b^{5/2} - 939\,a^2b^{7/2} + 193\,a^3b^{3/2} - 801\,a^2b^{5/2} - 193\,a^2b^{3/2} \Big] \\ +\, \cossmall^3\theta\,\sinsmall^4\theta\, & \Big[ -\,193\,a^{5/2}b^5 - 801\,a^{5/2}b^4 + 193\,a^{3/2}b^5 - 939\,a^{5/2}b^3 + 801\,a^{3/2}b^4 - 307\,a^{5/2}b^2 + 939\,a^{3/2}b^3 + 307\,a^{3/2}b^2 \Big] \\ +\, \isqrt\, \cossmall^2\theta\,\sinsmall^5\theta\, & \Big[ -\,69\,a^2b^{11/2} - 381\,a^2b^{9/2} + 69\,ab^{11/2} - 615\,a^2b^{7/2} + 381\,ab^{9/2} - 279\,a^2b^{5/2} + 615\,ab^{7/2} + 279\,ab^{5/2} \Big] \\ +\, \cossmall^1\theta\,\sinsmall^6\theta\, & \Big[ 13\,a^{3/2}b^6 + 93\,a^{3/2}b^5 - 13\,a^{1/2}b^6 + 207\,a^{3/2}b^4 - 93\,a^{1/2}b^5 + 135\,a^{3/2}b^3 - 207\,a^{1/2}b^4 - 135\,a^{1/2}b^3 \Big] \\ +\, \isqrt\, \sinsmall^7\,\theta\, & \Big[ ab^{13/2} + 9\,ab^{11/2} - b^{13/2} + 27\,ab^{9/2} - 9\,b^{11/2} + 27\,ab^{7/2} - 27\,b^{9/2} - 27\,b^{7/2} \Big]. \endaligned \] \proof[End of proof of Theorem~{\ref{a-b-cos-sin}}] The sum: \[ {\textstyle{\frac{1}{8}}}\, \Naux_1 (\theta) + {\textstyle{\frac{3}{4}}}\, \Naux_2 (\theta) + {\textstyle{\frac{1}{2}}}\, \Naux_3 (\theta) + {\textstyle{\frac{1}{8}}}\, \Naux_4 (\theta) + {\textstyle{\frac{15}{8}}}\, \Naux_5 (\theta) + {\textstyle{\frac{5}{4}}}\, \Naux_6 (\theta) + {\textstyle{\frac{15}{8}}}\, \Naux_7 (\theta) \,=\, 0, \] is indeed (visually!) identically null. \endproof \noindent {\scriptsize\sf [email protected], [email protected], [email protected]} \end{document}
\begin{document} \global\def{\normalsize \it References:}{{\normalsize \it References:}} \baselineskip 12.5pt \title{\LARGE \bf Heat transfer process with solid-solid interface:\\ Analytical and numerical solutions} \date{} \author{\hspace*{-10pt} \begin{minipage}[t]{2.3in} \normalsize \baselineskip 12.5pt \centerline{DIANA RUBIO} \centerline{Univ. Nacional de San Mart\'in} \centerline{Escuela de Ciencia y Tecnolog\'ia} \centerline{Centro de Matem\'atica Aplicada} \centerline{ITECA (CONICET-UNSAM)} \centerline{25 de Mayo y Francia, San Mart\'in} \centerline{ARGENTINA} \end{minipage} \kern 0in \begin{minipage}[t]{2.3in} \normalsize \baselineskip 12.5pt \centerline{DOMINGO A. TARZIA} \centerline{Universidad Austral} \centerline{FCE, Departamento de Matem\'atica} \centerline{Paraguay 1950, Rosario} \centerline{and CONICET} \centerline{ Godoy Cruz 2290, CABA,} \centerline{ARGENTINA} \end{minipage} \begin{minipage}[t]{2.3in} \normalsize \baselineskip 12.5pt \centerline{GUILLERMO F. UMBRICHT} \centerline{Univ.~Nac. de Gral.~Sarmiento} \centerline{Instituto de Ciencias} \centerline{Instituto del Desarrollo Humano} \centerline{J. M. Gutiérrez 1150} \centerline{Los Polvorines} \centerline{ARGENTINA} \end{minipage} \\ \\ \hspace*{-10pt} \begin{minipage}[b]{6.9in} \normalsize \baselineskip 12.5pt {\it Abstract:} This work is aimed at the study and analysis of the heat transport on a metal bar of length $L$ with a solid-solid interface. The process is assumed to be developed along one direction, across two homogeneous and isotropic materials. Analytical and numerical solutions are obtained under continuity conditions at the interface, that is a perfect assembly. The lateral side is assumed to be isolated and a constant thermal source is located at the left-boundary while the right-end stays free allowing the heat to transfer to the surrounding fluid by a convective process. The differences between the analytic solution and temperature measurements at any point on the right would indicate the presence of discontinuities. The greater these differences, the greater the discontinuity in the interface due to thermal resistances, providing a measure of its propagation from the interface and they could be modeled as temperature perturbations. The problem of interest may be described by a parabolic equation with initial, interface and boundary conditions, where the thermal properties, the conductivity and diffusivity coefficients, are piecewise constant functions. The analytic solution is derived by using Fourier methods. Special attention is given to the Sturm-Liouville problem that arises when deriving the solution, since a complicated eigenvalue equation must to be solved. Numerical simulations are conducted by using finite difference schemes where its convergence and stability properties are discussed along with physical interpretations of the results. \\ [4mm] {\it Key--Words:} Heat equation, solid-solid interface, eigenvalues problems, mathematical modeling. \end{minipage} } \maketitle \thispagestyle{empty} \pagestyle{empty} \section{Introduction} \label{S1} Heat transfer problems in multilayer or solid-solid interface materials have been arisen in a several applications in science and engineering \cite{Chung2001}. Direct applications can be found in the industry \cite{Holler2020}, including metallurgical \cite{Ma2010}, aerospace \cite{Barturkin2005}, technological and electronic \cite{Cahill2003} and aviation \cite{Ward2006}. A large number of articles are devote to the study of thermal, electromagnetic and/or optical properties of composed materials, among them \cite{Cahill2003}-\cite{Chung2001}, \cite{{Hristov2012}}, \cite{Prabhu2002}-\cite{Stevens2007}, \cite{Volz2000}-\cite{Zeng2021}. These types of problems are generally approached experimentally or through numerical simulations. Few articles are found in the literature that focus on mathematical models and analytical descriptions of the thermal process, as in \cite{Holler2020}, where the model is described. In \cite{Umbricht2020MEP}, \cite{Umbricht2021IJHT} the problem is approached analytically for the steady-state. On the other hand, the evolutionary state of the interface problem is studied in \cite{Hristov2012} for a solid material of infinite length. This work focus on the analytical solution to a heat transfer problem that it is assumed to occur along a bar composed by two different materials with continuity conditions at the solid-solid interface. A thermal source is imposed at the left boundary ($x=0$) while free convection is assumed at the right side ($x=L$). To the best of authors' knowledge, the analytical solution to this problem is not published. In \cite{Ozisik1993}, the problem is stated with an strategy for solving the equation but is it not explicitly solved. The solution to the perfectly assembly solid-solid interface problem is important since the differences with observed data it would provide a measure of the discontinuities due to roughness and tension between the materials. Here, an approach is presented for solving the problem analytically where the solution is obtained as a combination of the steady-state solution and a transient term, where the latter one is calculated using Fourier techniques. This manner to present the solution is useful to better understand the physical transient behavior. As in the case of a homogeneous bar, a Sturm-Liouville (S-L) eigenvalue problem arises. Finding its solution is complicated since the coefficients of the equation are not constant but depend on the thermal parameters of the materials involved. The existence of an infinite number of solutions to the S-L equation is demonstrated and an illustrative example is included. This is the most important result of this work. Numerical simulations of the temperature profile are conducted using a finite difference scheme of second order centered in space and first order forward in time. The convergence and stability properties are discussed along with physical interpretations of the results. Analytical and numerical solutions to this problem are useful to predict temperatures profiles under different situations assuming perfect assembly between materials and hence, to detect discontinuities at the interface. In Section \ref{framework}, the equations used to describe the process is presented. Section \ref{steady} is aimed to the steady-state heat transfer problem associated to the one of interest. The corresponding transient problem is addressed in Section \ref{transient}, where the eigenvalue problem and the analytical solution is obtained. In Section \ref{numerical}, some numerical examples of the temperature profile for the discretized equation are included. Finally, conclusions and future worksare discussed. \section{Mathematical Framework} \label{framework} Consider a unidimensional heat transfer process on a material, which is modeled as a bar whose lateral surface is totally isolated, and it is made up of two consecutive sections of different, perfectly assembly, isotropic and homogeneous materials. This problem can be described by coupled parabolic equations with interface, initial and boundary conditions. At the left-boundary of the bar, a constant thermal source is assumed while the right-end is free allowing the convection process to occur (see Figure \ref{barscheme}). \begin{figure} \caption{ Heat conduction problem with interface.} \label{barscheme} \end{figure} The system to be solved is given by the heat equations \begin{eqnarray} \label{ec1} U_t(x,t)&=&\alpha_{1}^{2} U_{xx}(x,t), \,\,\, 0<x<l , \\ \label{ec2} U_t(x,t)&=&\alpha_{2}^{2} U_{xx}(x,t), \,\,\, l<x<L, \end{eqnarray} for $t>0$, with initial temperature \begin{equation} \label{initialc} U(x,0)=T_a, \qquad \quad 0<x<L, \end{equation} and boundary conditions \begin{eqnarray} \label{bc} U(0,t)&=&F, \qquad \qquad \qquad \quad \, t>0,\\ k_2\, U_x(L,t)&=&-h\,(U(L,t)-T_a), \, t>0, \end{eqnarray} where $L$ represents the length of the bar, $T_a$ the temperature of the surrounded fluid, $F$ denotes the temperature at $x=0$, $l$ the interface position ($0<l<L$) and $h$ denotes the heat transfer coefficient due to convection at $x=L$. The coefficients $\alpha_1^2$, $k_1$ and $\alpha_2^2$, $k_2$ represent the diffusivity and the thermal conductivity for the materials at the left and right side of the bar, respectively. For two perfectly assembled homogeneous materials, continuity conditions are given at the interface position $ x = l $, that is, \begin{eqnarray} \label{interface1} \displaystyle \lim_{x \to l^-} U(x,t)&=&\lim_{x \to l^+} U(x,t),\\ \label{interface2} \displaystyle \lim_{x \to l^-} k_1 U_x(x,t)&=&\lim_{x \to l^+} k_2 U_x(x,t), \end{eqnarray} for $t>0$. From now on, for simplicity we assume that \begin{equation} \label{FmayorqueTa} F>T_a. \end{equation} \section{The steady-state problem} \label{steady} The steady-state problem corresponding to the initial and boundary problem with interface \eqref{ec1}-\eqref{interface2} is given by the following equations \begin{eqnarray} \label{eqest1} U_{xx}^S (x)&=&0, \qquad 0<x<l,\\ \label{eqest2} U_{xx}^S (x)&=&0, \qquad l<x<L,\\ \label{lbc} U^S (0)&=&F, \hspace*{5cm} \\ \label{rbc} -k_2 U_x^S (L) &=& h (U^S (L)-T_a ), \hspace*{2,5cm} \\ \label{ifbc1} U^S(l^-)&=&U^S(l^+),\hspace*{3.8cm} \\ \label{ifbc2} k_1 U_x^S(l^-) &=& k_2 U_x^S(l^+), \hspace*{3cm} \end{eqnarray} where $U^S(l^-)$ and $U^S(l^+)$ denote $\displaystyle \lim_{x \to l^-} U^S(x)$ and $\displaystyle \lim_{x \to l^+} U^S(x)$, respectively. \begin{lemma} The solution to the steady-state problem \eqref{eqest1}-\eqref{ifbc2} is given by the following expression: \begin{equation} \label{Uestacionario_mu} U^S(x)= \begin{cases} F - Q \mu \frac{1}{k_1}\, x, & 0 \leq x \leq l, \\ F - Q \mu \left(\frac{1}{k_2}(x-l) + \frac{l}{k_1} \right), & l<x \leq L, \end{cases} \end{equation} where $Q=(F-T_a) h $, and $\mu$ is the dimensionless coefficient \begin{equation} \label{mu} \mu =\frac{1}{1+\frac{hL}{k_2} +\left( \frac{1}{k_1} -\frac{1}{k_2} \right) hl }= \frac{k_1 k_2}{D}, \end{equation} being $D=k_1 k_2 +k_1 hL + (k_2 - k_1) hl$, $k_1, k_2, h, l, L, T_a, F$ positive constants, $L>l>0$. \end{lemma} \begin{proof} Equations \eqref{eqest1}-\eqref{eqest2} imply that the solution is a piecewise linear function. Imposing the boundary and interface conditions \eqref{lbc}-\eqref{ifbc2} it follows that, after algebraic computations, the solution can be written as \begin{equation} \label{Uestacionario} U^S(x)= \begin{cases} \displaystyle F - \frac{Q k_2}{D} x,\qquad \qquad \qquad\quad0 \leq x \leq l,\\ \\ \displaystyle F - \frac{Q k_1 k_2}{D} \left(\frac{x-l}{k_2} + \frac{l}{k_1} \right), \,l<x \leq L. \end{cases} \end{equation} By using the dimensionless coefficient $\mu$ defined in \eqref{mu}, the expression \eqref{Uestacionario_mu} is obtained. \end{proof} This section is included for the sake of completeness and no much detail or discussion is given here. In \cite{Umbricht2020}, \cite{Umbricht2020MEP}, \cite{Umbricht2021IJHT} an equivalent expression can be found for the solution to \eqref{eqest1}-\eqref{ifbc2} and its consistency with the corresponding one for an homogeneous bar with the same boundary conditions. \begin{example} Consider the problem described by the equations \eqref{eqest1}-\eqref{ifbc2} with $L = 1m$, $T_a=25^\circ C$, $h=10\, W⁄(m^2 {}^\circ C )$ and $F=100^\circ C$. \end{example} Figure \ref{fig:Us} shows the spatial profile of temperatures for different materials and different interface points. It can be seen that the solution is piecewise linear and, since the thermal source is higher than the room temperature, the temperature decreases as a function of the distance from the source location. The less conductive materials leads to a greater decrease in temperature. \begin{figure} \caption{\label{fig:Us} \label{fig:stationary} \label{fig:Us} \end{figure} The plots on top show different situations for a Fe-Pb or a Pb-Fe bar. It can be observed that in the case of Fe-Pb, higher temperature values are achieved for $x<l=L/2$. This is consistent with the analytical solution since, in this case, it results \begin{equation} \label{Ul_half} U^s(l) = F - \frac{F-T_a}{ (\frac{k_1}{k_2}+1)+\frac{2k_1}{hL}}. \end{equation} Then, for the same pair of materials, the temperature values at $x=l$ are greater when the more conductive material occupies the left half of the bar (i.e., $k_1>k_2$). On the other hand, if $l=L/2$ from \eqref{Uestacionario_mu} it follows that \begin{equation} U^s(L) = F - \frac{F-T_a}{1+\frac{2k_1 k_2}{ h L(k_1 + k_2)}}. \end{equation} Then, the temperature value at $x=L$ depends on $k_1+k_2$ and $k_1k_2$, hence the relative location of the two materials to the left or right (i.e. Fe-Pb and Pb-Fe) does not influence the temperature value $U(L)$ at the right edge (see also \cite{Umbricht2020MEP}). At the bottom of Figure \ref{fig:Us}, the temperature profiles for different interface locations and different material pairs are shown. The materials were chosen so that their thermal conductivities satisfy different relationships that are reflected in the slopes of the lines. For Fe-Cu: $k_1<k_2$, Al-Mg: $ k_1 \simeq k_2$ (thermally similar), Ag-Pb: $k_1>k_2$ (see Table \ref{thermal_prop}). \begin{table} \begin{center} \caption{\label{thermal_prop} Thermal properties of different materials.} \begin{tabular}{lccc} \hline Material (Symbol) & $k (W/m ^\circ C)$ & $\alpha^2 \times 10^4 (m^2/s)$ \\ \hline Lead (Pb) & 35 & 0.23673\\ Nickel (Ni) & 70 & 0.22660\\ Iron (Fe) & 73 & 0.20451\\ Magnesium (Mg) & 156 & 0.88300\\ Aluminium (Al) & 204 & 0.84010\\ Cupper (Cu) & 386 & 1.12530\\ Silver (Ag) & 419 & 1.70140\\ \hline \end{tabular} \end{center} \end{table} \section{The transient problem} \label{transient} In order to solve the problem \eqref{ec1}-\eqref{interface2}, we consider \begin{equation} \label{Usuma} U(x,t)=U^s(x)+\varphi(x,t), \, 0\leq x \leq L, \, t\geq 0, \end{equation} where $U^s (x)$ is given by \eqref{Uestacionario} or \eqref{Uestacionario_mu}-\eqref{mu} and $\varphi(x,t)$ satisfies the following initial and boundary problem with interface for $t>0$ \begin{eqnarray} \label{fi:ec1} \varphi_t (x,t)&=&\alpha_1^2\, \varphi_{xx} (x,t), \,\, 0 < x < l, \qquad\\ \label{fi:ec2} \varphi_t (x,t)&=&\alpha_2^2\, \varphi_{xx} (x,t), \,\, l<x<L, \qquad\\ \label{fi:cit0} \varphi (x,0)&=& T_a-U^s (x), \,\, 0 < x < L, \quad\\ \label{fi:cbx0} \varphi (0,t)&=&0, \\ \label{fi:cbxL} - k_2 \varphi_x (L,t)&=&h \varphi(L,t), \\ \label{fi:itf1} \varphi(l^-,t)&=&\varphi(l^+,t), \\ \label{fi:itf2} k_1 \varphi_x (l^-,t)&=&k_2 \varphi_x (l^+,t). \end{eqnarray} By using this representation, the transient terms can be viewed as " perturbations" to the steady-state. The standard procedure of separation of variables is used to find $\varphi (x,t)$. Assuming the existence of $X(x)$ and $T(t)$ that satisfy, for $t>0$, \begin{equation} \label{fiXT} \varphi(x,t)= \begin{cases} X_1 (x).T(t), & 0 \leq x \leq l,\qquad\\ X_2 (x).T(t), & l<x \leq L, \end{cases} \end{equation} and the following equations and conditions are obtained: \begin{eqnarray} X_1''(x)- \xi_1 X_1 (x)&=& 0, \qquad 0 < x < l, \qquad \\ X_2'' (x)-\xi_2 X_2 (x)&=&0, \qquad l<x < L, \\ T' (t)&=&\xi_1 \alpha_1 T(t) \nonumber\\ &=&\xi_2 \alpha_2 T(t), \, t>0,\\ X_1 (0)&=&0, \\ k_2 X_2' (L)+h X_2 (L)&=&0, \qquad \\ X_1 (l^- ) &=&X_2 (l^+ ), \\ k_1 X_1'(l^- ) &=&k_2 X_2'(l^+ ). \end{eqnarray} A solution to the above eigenvalue problem exists provided that $\xi_i=-\lambda_i^2 < 0$, and it follows that \begin{eqnarray} X_1 (x) &=& A_1 \sin (\lambda_1 x), \\ X_2(x)&=&A_2 \sin (\lambda_2 x) +B_2 \cos(\lambda_2 x), \qquad \\ \label{T1T2} T(t) &=& C_1 e^{-\lambda_1 \alpha_1^2 t}=C_2 e^{-\lambda_2 ^2 \alpha_2^2 t}, \end{eqnarray} where \begin{equation} \label{alfa} \lambda_1= \alpha \lambda_2, \qquad \alpha =\sqrt{\displaystyle \frac{\alpha_2^2}{\alpha_1^2}}=\frac{\alpha_2}{\alpha_1}. \end{equation} From now on, we denote $\lambda= \lambda_2$ and, without loss of generality, it is assumed that $A_1=1$. Setting $A=A_2,B=B_2$ and $C=C_2$ we have \begin{eqnarray} \label{X1} &X_1 (x) = \sin (\alpha \lambda x), \hspace{2cm} \\ \label{X2} & X_2 (x)=A \sin (\lambda x) +B \cos(\lambda x), \\ \label{T} &T(t)=Ce^{-\lambda^2 \alpha_2^2 t}, \hspace{2cm} \end{eqnarray} where $\lambda >0$ must satisfy the eigenvalue equation \begin{equation} \tan(xL)=\frac{k_2 Ax+h B}{k_2 Bx-Ah}, \hspace{1,5cm} x>0, \end{equation} or equivalenty, \begin{equation} \label{eig1} -\frac{k_2}{h}x=\frac{B+A \tan(xL)}{A-B\tan(xL)}, \hspace{1cm} x>0. \end{equation} From the two interface conditions, and letting \begin{equation} \label{k} k=\frac{k_1}{k_2}, \end{equation} it follows that \begin{eqnarray} \label{A} &A=k \alpha \cos(\alpha l x) \cos(lx)+\sin(\alpha lx) \sin(lx),& \,\,\,\\ \label{B} &B=\sin(\alpha lx) \cos(lx)-l \alpha \cos(\alpha lx) \sin(lx).&\, \end{eqnarray} Replacing \eqref{A}-\eqref{B} in equation \eqref{eig1}, by algebraic computation the eigenvalue equation \eqref{eig1} can be written as \begin{equation} \label{eig} -\frac{k_2}{h} x=\frac{\tan (\alpha lx) + k \alpha \tan((L-l)x)}{k \alpha -\tan (\alpha lx) \tan((L-l)x)}, \end{equation} for $x>0$. Lemmas \ref{deff}- \ref{tansuma} show that the right hand side of the above equation \eqref{eig} may be written as the tangent of a sum of two functions and, therefore, it has an infinite number of essential discontinuities. These results will be used in Theorem \ref{Teoeig} to prove that there exist infinitely many solutions to \eqref{eig}. \begin{lemma} \label{deff} For $\alpha, l, L, k >0$ with $L>l$, the function $f: D \subset (0,+\infty) \rightarrow {\rm I}\!{\rm R}$ defined by \begin{equation} \label{f} f(x)= {\rm atan} \left( \frac{\tan(\alpha l x)}{k \alpha} \right) +(L-l)x, \end{equation} satisfies ${\rm I}\!{\rm R}^+ \subseteq Im(f)$ where \begin{equation} D = [0, +\infty) - \{x_n, \quad n \in {\rm I}\!{\rm N}\}, \end{equation} being \begin{equation} \label{xn} x_n=-\frac{\pi}{2 \alpha l} + n \,\frac{\pi}{\alpha l}. \end{equation} \end{lemma} \begin{proof} Consider the one-sided limits at the discontinuity points $x_n$ given in \eqref{xn}. Since \begin{equation} {\rm atan} \left(\frac{\tan (\alpha lx)}{k \alpha}\right) \in \left(-\frac{\pi}{2}, \frac{\pi}{2 }\right), \end{equation} it results that \begin{equation} \lim_{x \to x_n^-} f(x) = \frac{\pi}{2}+(L-l) x_n, \end{equation} \begin{equation} \lim_{x \to x_n^+} f(x) = -\frac{\pi}{2}+(L-l) x_n. \end{equation} Therefore, \begin{equation} \label{step} \lim_{x \to x_n^-} f(x) > \lim_{x \to x_n^+} f(x) \end{equation} and since for $x\in D$ \begin{equation} \label{df} f'(x) = \frac{l}{k\left[1+ \left(\frac{\tan (\alpha lx)}{k \alpha}\right)^2\right] \cos^2 (\alpha lx)} + L-l>0, \end{equation} it follows that $f$ is increasing in the interval $(0, x_1)$ and in each interval $( x_n, x_{n+1})$, $\forall \, n \in {\rm I}\!{\rm N}$. On the other hand, the first term of $f$ is bounded, and $L>l$, then \begin{equation} \label{unbound} \lim_{x \to +\infty} f(x)=+\infty. \end{equation} From \eqref{step}, \eqref{df} and \eqref{unbound} it follows that all positive real values are included in $Im(f)$, and the proof is completed. \end{proof} Different parameter values will produce functions $f(x)$ defined in \eqref{f} having graphs of similar shape. The example bellow illustrates the behaviour for a particular case. \begin{example} \label{ex2} Consider the expression of the function $f$ given in \eqref{f} for the problem described by the equations \eqref{ec1}-\eqref{interface2} for a bar made of iron and lead (Fe-Pb). The particular parameter values for this example are included in Table \ref{tab:ex2}. \end{example} \begin{table}[H] \begin{center} \caption{Parameter values for Example \ref{ex2}.} \begin{tabular}{lc} Parameter & Value\\ \hline $L(m)$ & 5\\ $l(m)$ & 2\\ $k_1 (W/m {}^\circ C)$ & 73\\ $k_2 (W/m {}^\circ C)$ & 35\\ $\alpha_1^2 (m^2⁄s)$ & $0.20451\times10^{-4}$\\ $\alpha_2^2 (m^2⁄s)$ & $0.23673\times10^{-4}$\\ $h (W⁄(m^2 {}^\circ C ))$ & 10\\ \hline \end{tabular} \label{tab:ex2} \end{center} \end{table} Figure \ref{fig:ex2} shows the plots of the piecewise continuous function $f$ given in \eqref{f} for this particular case \begin{equation} f(x)= {\rm atan} \left( \frac{\tan( 1.85892x)}{0.44562} \right) + 3x \end{equation} along with $y=3x$ and \begin{equation} {\rm atan} \left( \frac{\tan( 1.85892x)}{0.44562} \right). \end{equation} \begin{figure} \caption{Red line: $f(x)= {\rm atan} \label{fig:ex2} \end{figure} It can be seen that, although $f$ has an infinite number of discontinuities due to the term $\frac{\tan( 1.85892x)}{0.44562}$, the image of the function $f$ (in red) includes all positive values. This will be crucial to prove that the equation \eqref{eig} has infinitely many solutions. \begin{lemma} \label{tansuma} Given $\alpha, L, l, k >0$, it follows that, for $x>0$ \begin{equation} \label{tanf} \frac{\tan (\alpha lx) + k \alpha \tan((L-l)x)}{k \alpha -\tan (\alpha lx) \tan((L-l)x)}=\tan(f(x)), \end{equation} where $f$ is defined in \eqref{f}. \end{lemma} \begin{proof} Consider $f$ defined in \eqref{f}. By using the formula for the tangent of a sum and some algebraic computations, it follows that \begin{align} \label{tanfsuma} \displaystyle \tan(f(x)) =& \tan \left( {\rm atan} \left( \frac{\tan( 1.85892x)}{0.44562} \right) + 3x \right)\nonumber \\ =& \frac{\tan \left( {\rm atan} \left( \frac{\tan(\alpha l x)}{k \alpha} \right)\right) + \tan((L-l)x)}{1-\tan \left({\rm atan} \left( \frac{\tan(\alpha l x)}{k \alpha} \right)\right) \tan((L-l)x)} \nonumber \\ =& \frac{ \frac{\tan(\alpha l x)}{k \alpha} + \tan((L-l)x)}{1- \frac{\tan(\alpha l x)}{k \alpha} \tan((L-l)x)}. \end{align} The equation \eqref{tanf} is obtained after multiplying the numerator and denominator in \eqref{tanfsuma} by $k\alpha$. \end{proof} \begin{theorem} \label{Teoeig} Let $k_2, h, \alpha, l, L, k >0$, with $L>l$. The equation \begin{equation} \label{eig2} -\frac{k_2}{h} x=\frac{\tan (\alpha lx) + k \alpha \tan((L-l)x)}{k \alpha -\tan (\alpha lx) \tan((L-l)x)}, \qquad x>0, \end{equation} has infinitely many positive solutions $0 <x_1 <x_2 \cdots <x_n < \cdots$. \end{theorem} \begin{proof} Lemma \ref{deff} and Lemma \ref{tansuma} allow to write \begin{equation} \label{proofteo1} -\frac{k_2}{h} x=\tan (f(x)), \qquad x>0, \end{equation} where $f$ is defined in \eqref{f}. Lemma \ref{deff} ensures that ${\rm I}\!{\rm R}^+ \subseteq Im(f)$ implying that $\tan(f(x))$ has an infinite number of branches that intersects the line $y=-\frac{k_2}{h} x$ for $x>0$. \end{proof} The following example illustrates solutions to Equation \eqref{eig} for different setups. \begin{example} \label{ex3} As for the previous example, a bar made of iron and lead (Fe-Pb) it is considered. All parameter values for this example are included in Table \ref{tab:ex3}. \end{example} \begin{table}[h] \centering \caption{Parameter values for Example \ref{ex3}.} \begin{tabular}{lc} Parameter & Value\\ \hline $L(m)$ & 5\\ $l(m)$ & 2\\ $k_1 (W/m ^\circ C)$ & 73\\ $k_2 (W/m ^\circ C)$ & 35\\ $\alpha_1^2 (m^2⁄s)$ & $0.20451\times10^{-4}$\\ $\alpha_2^2 (m^2⁄s)$ & $0.23673\times10^{-4}$\\ $h (W⁄(m^2 {}^\circ C ))$ & 10\\ $F ({}^\circ C)$ & 150\\ $T_a ({}^\circ C)$ & 20\\ \hline \end{tabular} \label{tab:ex3} \end{table} \begin{figure} \caption{Solutions to Eq. \eqref{eig} \label{fig:ex3} \end{figure} The eigenvalue problem \eqref{eig} in this case becomes \begin{equation} \label{ex3:eig} -7.3x=\frac{\tan (1.8589 x) + 0.44563 \tan(3x))}{0.44563 -\tan( 1.8589x) \tan(3x) }, \end{equation} some of its solutions are shown in Figure \ref{fig:ex3}. Similar results are obtained for different bar compositions. Figure \ref{fig:ex3} shows some of the solutions of the equations \eqref{ex3:eig}. These solutions might not be the first ones, since it could exist discontinuities on the right side of \label{eig2} that do not appear in the plot due, for instance, to the discretization step. \begin{theorem} The initial-boundary value problem with a solid-solid interface, described by equations \eqref{ec1}-\eqref{interface2}, has a unique solution of the form \begin{equation} \label{Usmasfi} U(x,t)=U^s (x)+ \varphi (x,t), \,\,0\leq x \leq L,\, t\geq 0, \end{equation} where $U^s (x)$ is given by the expressions \ref{Uestacionario} (or \ref{Uestacionario_mu}-\ref{mu}) and \begin{equation} \label{fi} \varphi(x,t)= \begin{cases} \varphi_1(x,t), \quad & 0 \leq x\leq l, \\ \\ \varphi_2(x,t), \quad & l<x \leq L, \end{cases} \end{equation} for $t>0$, being \begin{equation} \label{fi1} \hspace{-2cm}\varphi_1(x,t)= \sum_{n=1}^{\infty} C_n \sin(\alpha \lambda_n x) e^{-\lambda_n^2 \alpha_2^2 t}, \end{equation} \begin{equation} \label{fi2} \begin{split} \varphi_2(x,t)= \sum_{n=1}^{\infty} C_n [k \alpha \cos(\alpha \lambda_n l) \sin( \lambda_n (x-l)) \qquad \\ + \sin(\alpha \lambda_n l) \cos( \lambda_n (x-l))] e^{-\lambda_n^2 \alpha_2^2 t}, \quad \qquad \end{split} \end{equation} with \begin{equation} \label{cn} \begin{split} C_n = 2 (T_a-F) \hspace{5.5cm}\\ \frac{\displaystyle \frac{- \sin(\alpha \lambda_n l)}{\alpha \lambda_n } \displaystyle \frac{\mu h}{k_2} + \cos\left(\alpha \lambda_n l\right) \left(-1+ \frac{\mu h}{k_2} l\right)+1} {\alpha \lambda_n l-\sin(\alpha \lambda_n l)\cos(\alpha \lambda_n l)}, \end{split} \end{equation} for $n \in N$ where $\lambda_n$ are the solutions to the equation \eqref{eig} and $\mu$ is defined in \eqref{mu}. \end{theorem} \begin{proof} Equations \eqref{fiXT}, \eqref{alfa}-\eqref{T}, \eqref{k}-\eqref{eig} and the superposition principle lead to Equations \eqref{fi}-\eqref{fi2}. From the initial condition \eqref{initialc} it follows that \begin{equation} T_a-U^s (x)=\sum_{n=1} ^{\infty} C_n \sin(\alpha \lambda_n x), \qquad 0 \leq x \leq l, \end{equation} where the Fourier coefficients $C_n$ are given by \begin{equation} \displaystyle C_n= \frac{ \displaystyle \int_0^l (T_a -U^s(x)) \sin(\alpha \lambda_n x) dx}{ \displaystyle \int_0^l \sin^2 (\alpha \lambda_n x) dx}. \end{equation} Using the dimensionless coefficient $\mu$ defined in \eqref{mu} and after some calculations, the equation \eqref{cn} is obtained. \end{proof} \section{Numerical simulations} \label{numerical} The aim of this section is to illustrate the temperature behavior for the heat transfer process given by \eqref{ec1}-\eqref{interface2}. The numerical solutions presented here are obtained by using a finite difference of second order centered in space and forward in time. This explicit method is stable and convergent for \begin{equation} \max\{\alpha_1^2, \alpha_2^2\} < \frac{(\Delta x)^2}{2 \Delta t}, \end{equation} where $\Delta x$ and $\Delta t$ are the discretization steps for the space and time, respectively \cite{Morton2005}. A computational non-parallel scheme was programmed in Matlab. A regular partition is considered in space and time to discretize the equations, taking $\Delta x=0.01 m.$ and $\Delta t= 0.1 s.$ so that $\frac{(\Delta x)^2}{2 \Delta t}=5\times10^{-4}$ which is greater that all possible thermal diffusivity coefficients $\alpha_1^2$, $\alpha_2^2$ considered for this work (see Table \ref{thermal_prop}). The simulations are obtained in few seconds when using an Intel(R) Core(TM) i7-6700K 4.GHz machine. \begin{example} \label{numerical_ex1} Consider the problem described by the equations \eqref{ec1}-\eqref{FmayorqueTa} with $L = 1 \, m$, where the solid-solid interface is located at $l=0.3\, m$, the heat transfer coefficient is $h=10\, W⁄(m^2 {}^\circ C )$, $T_a=25^\circ C$, and the thermal source is $F=100^\circ C$. \end{example} Figures \ref{fig:Tempxl}-\ref{fig:TempxL} show the plots for the temperature profiles at the interface $x=l$ and at the right boundary $x=L$, respectively, for a bar composed by different pairs of materials where the material at the left side of the bar is Pb (top) and Ag (bottom). \begin{figure} \caption{ \label{fig:Tempxl} \label{fig:Tempxl} \end{figure} From these figures, it can be seen that $U(l,t)>U(L,t)$ for all $t>0$, that agrees with the analytical solution given in \eqref{fi2}. It is also observed that in all cases it requires some hours to achieve the steady-state, and it is reached earlier when more diffusive materials are involved. These observations are also consistent with the analytical solution, since the transient terms of the solution, \eqref{fi1}-\eqref{fi2} (and \eqref{T1T2}, \eqref{alfa}, \eqref{T}) decay exponentially with the diffusivity coefficients which are of the order of $ 10^{ -4}$. \begin{figure} \caption{Temperature profiles at the free-end ($x = L$) for the Example \ref{numerical_ex1} \label{fig:TempxL} \end{figure} \begin{figure} \caption{Bar temperature at $t=1h$ (top) and $t=15h$ (bottom) for the Example \ref{numerical_ex1} \label{fig:Temph} \end{figure} \begin{figure} \caption{Temperature as function of space and time for the Example \ref{numerical_ex1} \label{fig:Temptx} \end{figure} In Figure \ref{fig:Temph} temperature profiles on the bar at $t=1h$ and at $t=15h$ are shown. Note that for the latter, the curves resemble piecewise linear functions, which correspond to the steady-state as shown in the analytical formula given in \eqref{Uestacionario} and it is illustrated in Figure \ref{fig:stationary}. The slopes depend on a particular combination of the conductivity values of the materials, the location of the interface and the source and room temperatures. This also agree with the analytical solution given in \eqref{Uestacionario}, \eqref{Usmasfi}-\eqref{fi2} since the transitory terms approaches zero with time. Finally, in Figure \ref{fig:Temptx} the temperatures for Pb-Ag (top) and Ag-Pb (bottom) as functions of space and time are plotted, where the horizontal axis represents the time in hours and the vertical axis represents the distance from the left boundary in meters. That is, for a fixed value of $ t $, the temperature distribution of the bar at that time can be seen vertically, from the left edge $x = 0$ (bottom line of the graph) to the right one, $x = L$ (top line of the graph). On the other hand, taking a fixed value of $ x $, one can see the evolution of the temperature at that point by looking at the corresponding horizontal line. Notice, in both cases, a change in the temperature behavior at the interface point ($x=0.3m$). Moreover, for $x \leq 0.3m$, the plot on bottom (Ag-Pb) shows that the temperature achieves higher values in a shorter period of time than for the corresponding one for Pb-Ag (top) under the same conditions. This observation is physically consistent to the fact that Ag is a more diffusive material than Pb. The materials for this example were chosen so that the differences in the behavior of the temperature function can be easily observed due to the large difference in their respective thermal diffusivities. \section{Conclusion} \label{S3} In this work, the solution to a heat transfer problem along a bar with a solid-solid interface is considered. This study pursues to provide a theoretical basement that can help to gain insight into the effect of interfaces on heat transfer processes, from the mathematical point of view. A perfect assembly between the two parts are considered, so that differences between the analytical solution and experimental measurements will provide an amount of thermal dissipation between the two materials, that would be useful to model tension and roughness at the interface as well as solid-solid thermal resistance. The problem is described by an initial value parabolic partial differential equation with interface and Dirichlet and Robin boundary conditions. The analytical expression for the solution is derived where the steady-state form is explicitly included. The transient part of the solution is obtained which depends on the solution of a Sturm-Liouville problem. The existence of an infinite number of solutions to the eigenvalue problem is demonstrated and it is the most important result of this work. Also, an illustrative example is included. Numerical simulations are conducted by using an explicit finite difference scheme where its convergence and stability properties are discussed. Numerical results are consistent with analytical solutions and physical interpretations. Future works might include, among others, the study of mathematical models for the thermal behavior at the interface and how the imperfections or roughness at the solid-solid interface can change the temperature distribution at the bar. Also, extensions to 2D and 3D analysis and/or the problem for two or more interfaces can be conducted. \noindent {\bf Acknowledgements:} The research was supported by the Universidad de San Mart\'in, Universidad Austral and, in the case of the first and third authors, by SOARD/AFOSR (Grant FA9550-18-1-0523). The second author acknowledges support from European Union's Horizon 2020 Research and Innovation Programme under the Marie Sklodowska-Curie Grant Agreement No. 823731 CONMECH and by the Project PIP No. 0275 from CONICET-UA, Rosario, Argentina. \end{document}
\begin{document} \title{Time Series Clustering for Human Behavior Pattern Mining} \author{Rohan~Kabra\textsuperscript{*}, Divya~Saxena\textsuperscript{*},~\IEEEmembership{Member,~IEEE,} Dhaval~Patel,~\IEEEmembership{Senior Member,~IEEE,} and~Jiannong~Cao,~\IEEEmembership{Fellow,~IEEE} \IEEEcompsocitemizethanks{ \IEEEcompsocthanksitem *Authors contributed equally. \IEEEcompsocthanksitem R. Kabra was with the Department of CSE, IIT Roorkee, Roorkee, India.\protect\\ E-mail: [email protected] \IEEEcompsocthanksitem D. Saxena is with Department of Computing), The Hong Kong Polytechnic University, Hong Kong. E-mail: [email protected] \IEEEcompsocthanksitem D. Patel is with IBM T.J. Watson Research Center, NY, USA. E-mail: [email protected] \IEEEcompsocthanksitem J. Cao is with Department of Computing, The Hong Kong Polytechnic University, Hong Kong. E-mail: [email protected]} \thanks{Manuscript received Month, DD Year; revised Month, DD Year.}} \markboth{Journal of \LaTeX\ Class Files,~Vol.~14, No.~8, August~2015} {Shell \MakeLowercase{\textit{et al.}}: Bare Demo of IEEEtran.cls for Computer Society Journals} \IEEEtitleabstractindextext{ \begin{abstract} Human behavior modeling deals with learning and understanding of behavior patterns inherent in humans' daily routine. Existing pattern mining techniques either assume human dynamics is strictly periodic, or require the number of modes as input, or do not consider uncertainty in the sensor data. To handle these issues, in this paper, we propose a novel clustering approach for modeling human behavior (named, MTpattern) from time-series data. For mining frequent human behavior patterns effectively, we utilize three-stage pipeline: (1) represent time series data into sequence of regularly sampled equal sized unit time intervals for better analysis, (2) a new distance measure scheme is proposed to cluster similar sequences which can handle temporal variation and uncertainty in the data, and (3) exploit an exemplar-based clustering mechanism and fine-tune its parameters to output minimum number of clusters with given permissible distance constraints and without knowing the number of modes present in the data. Then, the average of all sequences in a cluster is considered as a human behavior pattern. Empirical studies on two real-world datasets and a simulated dataset demonstrate the effectiveness of \textit{MTpattern} w.r.to internal and external measures of clustering. \end{abstract} \begin{IEEEkeywords} Multi-modal behavior; Time Series Clustering, Sequence Pattern Discovery, Constrained Optimization, Uncertainty in Sensor Output, Temporal Variability, Contact Tracing in COVID-19 \end{IEEEkeywords}} \maketitle \IEEEdisplaynontitleabstractindextext \IEEEpeerreviewmaketitle \IEEEraisesectionheading{\section{Introduction}\label{sec:introduction}} \IEEEPARstart{M}odeling human behavior using data originating from social network, Internet of Things (IoT), smart home is an active area of research. A behavior pattern refers to a recurrent way of acting or conduct by an individual or a group, such as mobile devices, animals, vehicles, etc., in a physical/virtual environment, while learning and understanding human behavior patterns from raw data is known as \textit{human behavior modeling}. Researchers have discovered and identified various types of behaviors on the basis of the source and domain of raw data, such as social behavior using online social networks \cite{LongOnlineSocial,Xiang:2010:MRS:1772690.1772790}, biological health behavior using smart body sensors \cite{StefanEnvHealth}, online user behavior analysis using clickstream data \cite{unsupervised2016}, customer energy consumption behavior \cite{learning2018}, customer spending behavior using transaction data \cite{purtreeclust2017}, human activity behavior using multivariate temporal data \cite{multivariate2016} and mobility behavior using smart card data, GPS and WiFi traces \cite{clustering2016, RenTSOS15, scalable2018}, etc. Identifying frequent human behaviors is very challenging in any behavior analysis. The complexity of modeling human behaviors comes from two aspects: vast types of humans and irregular behaviors of each human type. Mining patterns in human behavior have wide practical applications in several domains, such as recommendation systems, healthcare, transportation, etc. For instance, identifying underlying patterns in human moving behavior in a location, such as mall, restaurant, during the COVID-19 pandemic (i.e., contact tracing) can support the authorities to understand who and how many people were in close contact with each other. \begin{figure} \caption{Observed visiting sequences of $m$ individuals.} \label{fig:sensor_visiting_sequence} \end{figure} \iffalse Some of the existing research works used surveys and simulations \cite{Kim05asurvey-based, nils-simulation} for analyzing the behavior of humans which could not depict a real and accurate picture of human behavior. But in the age of mobile computing, IoT and ubiquitous computing, every action and behavior of human are being captured by sensors. Ren, et al. analyzed WiFi traces \cite{RenTSOS15} for understanding customer purchasing dynamics in a retail shop whereas Wang, et al. \cite{} modeled customer behavior for effective load forecasting.\fi The main aim of the existing human visiting (or, mobility) behavior pattern mining techniques is to model humans/individuals behavior at single or group level in temporal, spatial or spatio-temporal dimension. Some of the popular behavior pattern mining techniques, include frequency spectrum analysis \cite{ldh10} to analyze the periodicity of recurring behavior patterns, partition and model based time-series clustering \cite{ldh10,WarrenLiao20051857,Bicego2003,Zhong,ICML2011Li159,KlForHMM} and PCA \cite{NathanEigStructure} based eigenbehavior technique to extract the structure of behavior patterns. Even though existing pattern mining methods have shown potential for mining visiting behavior patterns, they have the following limitations: (1) assume temporal dynamics is strictly periodic. However, in practice, human behavior patterns are temporally variable inherently; (2) The sensor is not accurate and may produce false negatives (non-deterministic values). This leads to uncertainty in data which has not been addressed yet; (3) Multi-modality is inherent in behavior of individuals which means that multiple patterns may occur during a given time window, but number of those patterns are unknown. The pattern may span an entire day or for a short span of the day. This poses a challenge to identify the time of occurrence and duration of all patterns. Most of the techniques need the number of modes of behavior as input and only identify patterns that span the entire time period. In many real-world problems, objects are described by large number of binary features. For instance, documents are characterized by presence or absence of certain keywords \cite{santra2016bayesian}; cancer patients are characterized by presence or absence of certain mutations \cite{BioNumerics}; traffic road accident and crash patterns are identified using the presence or absence of accidents and crashes data \cite{rahimi2019clustering}\cite{chung2013identifying}; trading patterns are identified by analysing the presence or the absence of a trading activity \cite{fokianos2017binary}; To better understand the attributes and characteristics of data used to find the behavior of individuals to a location, we take an example. Suppose the longitudinal sequences in Figure 1 represents the observed day-wise visiting sequences of individuals as detected by (inaccurate) sensor(s). Black shade represents presence of an individual during the respective time interval deterministically. Unshaded region represents a non-deterministic value or uncertainty about the individual's presence or absence. Temporal data collected through sensors can have uncertainty as individual may actually be absent during the time interval or the sensor may have failed to detect the presence of an individual (false negative). This may lead to an ambiguity in the observed data. Another aspect of the behavior data is temporal variability of underlying patterns because human routine behavior is not perfectly periodic. Moreover, human behavior is multi-modal in nature and the number of these modes are unknown beforehand. In the example, individuals exhibit four modes of mobility behavior where cluster A, B, and C represent patterns spanning entire time period whereas cluster D represents a localized pattern. The time of occurrence and duration of \textit{localized pattern} is also unknown which poses another challenge. In addition, to determine number of modes of behavior ($C$) in a person‘s visiting sequence is not an easy task. The optimal choice of \textit{C} strikes a balance between maximum compression of the data using a single cluster, and maximum accuracy by assigning each data point to its own cluster. There are many techniques, like \textit{The Elbow Method} or \textit{Silhouette index} to estimate number of clusters. These techniques do not work well in non-euclidean space. One of such technique involves $PCA$ analysis before clustering and setting \textit{C} equal to the number of dominant principal components. This method is not accurate because it often underestimates number of clusters in the dataset. Furthermore, the choice of distance measure is of utmost importance for any clustering. The conventional distance measures, like euclidean, Manhattan, Jaccard, etc., compare corresponding time slots and do not capture temporal dynamics or affinity between neighbouring time instances which is very important in case of uncertainty, noise and variability in data. Moreover, they are not sensitive to local differences between sequences. To handle the aforementioned issues, in this paper, we focus on extracting and identifying visiting (or, mobility) behavior of individuals to a location (named, MTpattern) which requires to group or cluster similar visiting sequences. We propose a novel distance measure scheme to find an appropriate dissimilarity measure between visiting sequences that is invariant to only small temporal variation and uncertainty in the observation. Then, we use segment tree data structure to discover localized frequent patterns in given time window or to find frequent patterns of given length efficiently. Then, we propose an effective clustering mechanism by exploiting affinity propagation to cluster sequences with the given constraints and unknown number of modes (or clusters) of behavior. We consider the average of all visiting sequences in a cluster for every unit time interval to have a \emph{behavior pattern} of individuals. We formulate our problem of finding behavior patterns problem as a \emph{constrained optimization problem} where we minimize number of mutually exclusive clusters that cover the entire dataset under the given constraint of maximum permissible local dissimilarity enforced by dissimilarity metric\footnote{This problem is similar to finding minimum \emph{dominating set} of vertices in an undirected graph}. We perform experiments on a simulated and two real-world datasets. Results show that \textit{MTpattern} outperforms three baseline clustering algorithms. The contributions of this paper are as follows: \begin{enumerate} \item We propose a novel approach to discover visiting (or, mobility) behavior patterns of individuals of given length by clustering similar visiting sequences or to find localized frequent patterns in a given time window (named, MTpattern). \item We represent time series data into discretized sequence of regularly sampled equal sized unit time intervals for better analysis and propose a novel dissimilarity measure, called TDist, to cluster similar sequences by putting an upper-bound on the local dissimilarity for handling temporal variations and uncertainty in the data. Then, we fine-tune a non-parametric exemplar based clustering technique to cluster sequences with the given constraints and unknown number of modes of behavior. \item To validate and to show the usability of \textit{MTpattern}, we evaluate our proposed approach on two real-world datasets and a simulated dataset. Results show that \textit{MTpattern} outperforms three baseline clustering algorithms w.r.to both internal and external measures. \end{enumerate} \section{Related Work} \label{sec:related_work} The idea of discovering underlying patterns in individuals' behavior is not new. In this section, we shall discuss important and popular techniques for modeling mobility behavior using time-series data. Majority of the works have employed time-series clustering algorithms to infer behavior patterns which can be grouped into two main categories: \textit{partitional and hierarchical clustering}, and \textit{model-based clustering}. We also discuss \textit{sequential pattern mining} and \textit{PCA-based Eigenbehavior techniques} that are commonly used to find the structure of frequent individuals' behavior. We also discuss the limitations and shortcomings of these existing techniques. \subsection{Time-series Clustering for Human Behavior Modeling} \subsubsection{Partitional and Hierarchical Clustering} Previous works have used partitional (like, K-Means or K-Medoids) \cite{WarrenLiao20051857, t2017} or hierarchical \cite{ldh10} clustering techniques for behavior modelling. For these techniques, user has to either pre-specify the number of clusters or set an upper bound on overall representational error to stop clustering. Determining number of clusters in advance or maximum permissible representational error is not trivial and is often subjective. \subsubsection{Model Based Clustering} Under \emph{model based clustering} (HMM, Kalman filter, etc.) probabilistic models are initially built on time-series. The asymmetric dissimilarity between time-series $A$ from $B$ is calculated by using a posteriori probability of time-series $A$ given the probabilistic model of time-series $B$. Probabilistic distance measures, such as KL distance \cite{Bicego2003,Zhong,ICML2011Li159,KlForHMM} are popularly used. The major limitation of this approach is that there is need to pre-determine number of clusters. Matsubara, et al. \cite{matsubara2014autoplait} used an Baum-Welch algorithm based approach for the finding distinct high-level patterns from a large set of co-evolving sequences. The Baum–Welch algorithm is a special case of the EM algorithm used to find the unknown parameters of a HMM. However, model based sequence clustering methods, such as HMM are more sensitive to the order of events and invariant to the actual time of occurrence of the events. In model based clustering, it is also important to assume that the observations are deterministic which is not possible in the real-life data as data often contains uncertainty and non-deterministic instances in observation. \subsection{Sequential Pattern Mining} In recent times, priori-based GSP algorithm \cite{Srikant1996}, projection-database based Freespan \cite{1339268} and Prefix Span \cite{Han:2000:FFP:347090.347167}, vertical id-list database and lattice theory based SPADE \cite{Zaki2001} algorithm has been used to find frequent subsequences in a given set of categorical time sequences. These methods only consider the topological order of events for finding frequent patterns instead of the absolute time of occurrence or duration of the events. Moreover, in case of interval events unlike instantaneous events, it is not trivial to establish the topological order. Some recent works \cite{4733940,5360529,BenZakour2012} have proposed mining temporal interval patterns from interval data. For \emph{extracting temporal patterns from interval-based sequences} \cite{Guyet:2011:ETP:2283516.2283614}, Guyet, et al. \cite{Allen:1983:MKT:182.358434} proposed converting temporal sequences into sequence of events linked by Allen's temporal relations. Rawassizadeh, et al. \cite{multivariate2016} proposed a model for identifying frequent behavioral patterns with temporal granularity from the real-time dataset. But they dealt with temporal events that fit into the Allen’s interval algebra, which is not about time-series analysis. Even the dissimilarity measure (City-Block distance) in \emph{extracting temporal patterns from interval-based sequences} between subsequences does not take into account uncertain (or, undeterministic) observations in data. \iffalse For every possible sequence of events clustering is performed. The order of the sequence and then using while For finding frequent patterns they cluster subsequences by taking into account the actual time and duration of events as well For finding frequent patterns, dissimilarity between subsequences is calculated by taking into account the actual time and duration of events and then all subsequences with the given symbolic signature are clustered. Through this, they can discriminate input sequences not only on the succession of events but also on their relative position in time and on their respective duration. But, For finding frequent patterns, dissimilarity between subsequences is calculated by taking into account the actual time and duration of events and then all subsequences with the given symbolic signature are clustered. In case of uncertainty in output, the observed signature of the interval sequence may change and the aforementioned technique will not work. Even the dissimilarity measure (City-Block distance) in \emph{extracting temporal patterns from interval-based sequences} between subsequences does not take into account uncertain (or, undeterministic) observations in data.\fi \subsection{PCA Decomposition} Another popular method to model time-series data is `Eigenbehavior' \cite{NathanEigStructure}. These eigenbehaviors are the eigenvectors of the covariance matrix of behavior data obtained after application of PCA or Singular Value Decomposition (SVD). The first few eigenvectors (highest eigenvalues) of the decomposition typically account for a very large percentage of the overall variance in the longitudinal data's eigen decomposition. It is claimed that every non-trivial (high eigenvalue) eigenvector represents a recurrent dominant pattern. One advantage of this approach is that we do not need to pre-determine or predict the number of modes in the data and it can be directly derived from the number of eigenvectors having large eigenvalues. But, it still suffers from a number of shortcomings. Like conventional distance measures, it fails to capture the temporal dynamics as it plots the sequence in Euclidean space and treats every dimension independent of each other failing to capture the affinity between nearby time instances. Number of eigenvectors is limited by number of dimensions and if a cluster can be represented by a linear combination of eigenvectors having higher eigenvalue, then a new eigenvector in the direction of the cluster is not needed. Therefore, PCA often underestimates number of clusters. Since, all the eigenvectors need to be orthogonal to each other, many eigenvectors do not point in the direction of actual clusters since not all clusters are orthogonal to the principal eigenvector at mean. So, all cluster centroids may not necessarily lie on some eigenvector. Eigenvectors are also biased towards cluster located at a larger distance from mean as they cause bigger variance in the data. Our proposed approach handles the issue of temporal dynamics, uncertainty in the data, and do not require number of behavior modes as input beforehand at the same time. To the best of our knowledge, no prior work studies human behavior patterns mining by handling the above-mentioned issues simultaneously. \iffalse \begin{figure} \caption{Eigenvectors give inaccurate results} \end{figure} \fi \begin{table}[h!] \begin{tabular}{ |p{1.1cm}||p{7cm}| } \hline Notations & Description \\ \hline $m$ & Number of individuals. \\ \hline $L$ & Total discretized timestamps in a BIS or length of the BIS in a day. \\ \hline $e$ & Number of days. \\ \hline $d(A, B)$ & Distance from one interval sequence $A$ to another interval sequence $B$. \\ \hline $D(A, B)$ & Distance from one interval sequence $A$ to another interval sequence $B$ and vice versa. \\ \hline $\delta$ & A threshold to determine two points in a time point sequence will be in the same time interval in time interval sequence or not. \\ \hline $\lambda$ & It is the length of equal sized unit intervals in the discretized time interval sequence (BIS). \\ \hline $\Omega$ & It is a threshold to handle uncertainty in observed data and temporal variability in patterns. \\ \hline C & Number of clusters. \\ \hline cnt & Number of discrete intervals in a BIS. \\ \hline $t\textsubscript{As}$ & Timestamp at the starting of a time interval in a BIS $A$. \\ \hline $t\textsubscript{Ae}$ & Timestamp at the ending of a time interval in a BIS $A$. \\ \hline ITDist & An interval temporal distance between any two time intervals $A$ = [$t\textsubscript{As}$, $t\textsubscript{Ae}$] and $B$ = [$t\textsubscript{Bs}$, $t\textsubscript{Be}$], denoted as $ITDist(A, B)$ which is the absolute difference of the means of two time intervals, given by $|\frac{(t\textsubscript{As} + t\textsubscript{Ae})}{2} - \frac{(t\textsubscript{Bs} + t\textsubscript{Be})}{2}|$. \\ \hline \end{tabular} \caption{Notations and their descriptions} \label{table:1} \end{table} \section{Definitions and Problem Statement} Human behavior at a location can help us to infer interesting information about the location. Simple statistics like number of visitors, average stay time and frequency can reveal semantics of a location. There have been a number of applications in a number of domains, such as transportation (analyze the number of visitors to identify time of the day when overcrowding usually takes place \cite{liu2006scalable}), smart environment (in a residential location, an intelligent lighting, heating or cooling system can be developed by modelling the presence of people in the room \cite{pan2013trace}\cite{chakravorty2013privacy}), health (monitoring the habits and mobility of patients can be used as an indicator of overall health \cite{soulas2013monitoring}), education (understanding how the campus is used can provide very important information and insights to college authorities \cite{wang2014studentlife}), etc. We define the problem as follows: \textbf{Problem 1.} Given a raw sensor data tracking the presence of individuals visiting a location over a long period of time and a threshold $\delta$, assuming that presence of individuals in a location is not strictly periodic, sensors are not accurate and may produce false negatives, and multi-modality is inherent in the individuals' visiting patterns and the number of these modes are unknown beforehand, the goal is to transform the raw sensor data into discretized sequence of regularly sampled equal sized unit time intervals for better analysis and then effectively summarize the frequent behavioural patterns of \textit{m} individuals in any given time window during the day. To be able to formulate the problem first, we describe our definitions. Table 1 lists notations that we have used in this paper. Human behavior is recurring and influenced by a range of factors (such as, time). Here, human behavior under the influence of time has been called "frequent behavioral patterns". \textbf{Definition 1.} Time Point Sequence (PS) is an ordered sequence of \textit{p} timestamps at a day \textit{d} at which an individual \textit{i} was detected by the sensor(s), $PS^d_i$ = $\langle$\{$t\textsubscript{1}$\}, \{$t\textsubscript{2}$\},...,\{$t\textsubscript{p}$\} $\rangle$ \, \textrm{for} \, 1 $\leq$ i $\leq$ m \, \textrm{where} \, \textit{m} \, \textrm{is the number of individuals}. \textbf{Definition 2.} Time Interval Sequence (IS) is an ordered sequence of \textit{q} time intervals at a day $d$ computed from the \textit{PS}, $IS_i^d$ = $\langle$\{$t\textsubscript{1s}$, $t\textsubscript{1e}$\}, \{$t\textsubscript{2s}$, $t\textsubscript{2e}$\},..., \{$t\textsubscript{qs}$, $t\textsubscript{qe}$\} $\rangle$ where $t\textsubscript{1s}$ and $t\textsubscript{1e}$ represents the start and end of interval, and 1 $\leq$ i $\leq$ m where $m$ is the number of individuals. \textbf{Definition 3.} Discretized Time Interval Sequence (BIS) is a sequence of regularly sampled equal sized unit intervals at a day $d$ computed from the IS, $BIS_i^d$ = $\langle$\{0, $\lambda$\}, \{$\lambda$, 2*$\lambda$\},..., \{($L$-1) * $\lambda$, $L$ * $\lambda$\}$\rangle$ where $\lambda$ represents length of unit interval along with the binary value (0 or 1) representing whether the individual was detected or not by the sensor in the corresponding time interval, \textit{L} is the total length of BIS in a day, and 1 $\leq$ i $\leq$ m where $m$ is the number of individuals. We will talk more on how we set value of $\lambda$ in the Section 6.1.3. In addition, transforming raw sensor data into Discretized Time Interval Sequence (BIS) is discussed in Section 4.1. A Discretized Time Interval Sequence (BIS) is also called visiting sequence of an individual. \textbf{Definition 4.} Partial $\Omega$ covering for BIS $A$ is set of all other BISs $B\textsubscript{i}$, 1 $\leq$ i $\leq$ m such that for every unit time interval with value 1 in $A$, there exists some time interval with value 1 in $B\textsubscript{i}$ under the given constraint of maximum permissible local dissimilarity, i.e., ITDist($A$, $B\textsubscript{i}$) < $\Omega$. \textbf{Definition 5.} Complete $\Omega$ covering for BIS $A$ is set of all other BISs $B\textsubscript{i}$, 1 $\leq$ i $\leq$ m, such that $B\textsubscript{i}$ is in \emph{partial $\Omega$ covering} of $A$ and $A$ is in \emph{partial $\Omega$ covering} of $B\textsubscript{i}$ at the same time. \textbf{Definition 6.} Frequent behavioral patterns are the average of all BISs in a cluster, such that every member of the cluster is in complete $\Omega$-covering of an \emph{exemplar} BIS which is also a member of the cluster. The average obtained represents the probability of the presence of individuals in every unit time interval for that particular pattern. \begin{figure} \caption{\textit{MTpattern} \label{fig:framework} \end{figure} \section{Our Proposed Solution} In this section, we discuss about our proposed solution, \textit{MTpattern} in detail. Figure \ref{fig:framework} shows the overview of our proposed approach \textit{MTpattern}. \textit{MTpattern} is composed of four major parts as follows: 1) \textit{Data Preprocessing.} We calculate the \textit{Time Interval Sequence} (IS) for every individual from its corresponding \textit{Time Point Sequence} (PS). Then the \textit{Time Interval Sequences} are discretized into sequence of regularly sampled equal sized unit intervals, named \textit{Discretized Time Interval Sequence} (BIS) for better analysis. 2) \textit{Segmentation.} To facilitate the piece-wise analysis, each and every BIS is hierarchically segmented and stored in a segment tree. 3) \textit{Dissimilarity Measure.} To calculate symmetric dissimilarity between pair of same length BIS segments that is invariant to uncertainty in observations and small temporal variation in underlying patterns, we propose a novel symmetric dissimilarity metric, called TDist. The dissimilarity matrix is pre-computed for every segment in the segment tree. Dissimilarity matrix (or distance matrix) can then be computed for any time interval from dissimilarity matrix of segments in the segment tree. 4) \textit{Pattern Discovery}. Every row of the dissimilarity matrix represents a cluster which is the complete $\Omega$-Covering of the corresponding BIS. These clusters are overlapping in nature as any BIS may be a member of more than one complete $\Omega$-Covering. Therefore, we further extend our analysis and optimize the discovery of patterns by finding a minimum set of disjoint (or, non-overlapping) clusters such that every BIS is a member of exactly one cluster and every cluster has an exemplar BIS that has all members of the corresponding cluster in it is complete $\Omega$-Covering. \begin{figure*} \caption{\label{fig:fig1} \label{fig:fig1} \caption{\label{fig:fig2} \label{fig:fig2} \caption{\label{fig:fig3} \label{fig:fig3} \caption{Preprocessing of raw sensor data} \label{fig:preProcessing22} \end{figure*} To minimize the number of clusters, the dissimilarity matrix of the time window is fed into affinity propagation module and it's \emph{preference} parameter tuned to minimize number of unique $\Omega$-coverings that cover all the BISs while maximizing the net similarity between member BISs of the cluster and the exemplar BIS of the cluster it is part of. We take the average of all BIS in $\Omega$-covering which is a discrete probability distribution as a representative of the corresponding cluster. \subsection{Data Preprocessing} We analyze and preprocess the collected raw sensors data for better analysis. Figure \ref{fig:preProcessing22} (a) shows the Time Point Sequence (PS) for $m$ individuals in which a vertical bar ($|$) in a row shows the instantaneous timestamp when a WiFi packet corresponding to a particular MAC-address is received (i.e., an individual presence is detected). However, storing information of closely spaced presence is redundant and costly. Therefore, we calculate the \emph{IS} for every individual from its corresponding \emph{PS} by inspecting the time delay between consecutive individual's presence detected in $PS$. If the time delay between consecutive bars ($|$) (i.e., consecutive WiFi packets captured of an individual) is below a threshold $\delta$, then they form a time interval in the \emph{IS}. We discuss in detail on how appropriate interval threshold $\delta$ is calculated in Section 6.1.3. Figure \ref{fig:preProcessing22} (b) shows $m$ \emph{IS} where each filled rectangle (\rule{0.3cm}{0.2cm}) shows the time interval when an individual is present. However, performing piece-wise analysis on \emph{IS} is difficult as the intervals in continuous time domain are of varying length (or duration). So, the \emph{IS} are discretized into sequence of regularly sampled equal sized unit intervals of length $\lambda$. We set $\lambda$ equal to $\frac{\delta}{2}$ so that any gap in raw sensor data (consisting of point sequences) which is more than $\delta$ minutes is captured in BIS after discretization. We obtain BIS after representing \emph{IS} in discrete time domain. `1' represents a deterministic value and `0' represents a non-deterministic value. Figure \ref{fig:preProcessing22} (c) shows discretized time interval sequence (BIS) of $m$ individuals. \subsection{Segmentation} \label{sec:segmentation} There can be a possibility that a pattern spans for an entire time period (a \emph{day} in our case) or only during some time window during the day. Since, we do not know when and for how long this regular behavior occurs during the day, we segment every BIS into hierarchical segments to facilitate piece-wise analysis. These segments are arranged in a binary tree data structure called \textit{segment tree} as shown in Figure \ref{fig:segmentation}. Every segment is divided into two equal parts in the next level in the segment tree hierarchy. The $Left$ child of any segment in the tree is the left half of the segment and the $Right$ child is the right half off the segment. This data structure is particularly helpful when a solution to a problem can be represented as a combination of solutions to it's sub problems. This is true in our work as any frequent behavior pattern is also piece-wise frequent patterns and can be represented as a concatenation of frequent patterns in it's smaller pieces or segments. \begin{figure} \caption{Hierarchical segmentation of a BIS} \label{fig:segmentation} \end{figure} The length of the segments at the leaf nodes of the segmented tree represents the highest level of granularity $\frac{L}{2^B}$ where $B$ is a positive integer in (0, ${\log_2 L}$) range. $B = 0$ corresponds to lowest granularity, whereas $B =\log_2 L$ represents highest granularity possible (as the segments at the leaf nodes will be of unit length). Any time interval in continuous $[start, end]$ can be approximated to discretized time domain. Higher granularity will give more accurate results but increase the time and space complexity of the algorithm and makes it more computationally intensive. So, there is need of a trade-off between accuracy and complexity. Therefore, we set $\frac{L}{2^B}$ equal to ${\Omega}$. Every BIS segment is also augmented with extensions at both ends. These extensions are used for computing partial dissimilarity between segments at borders and ensure that no information is lost near the segment boundaries due to segmentation. These extensions only contain $\frac{\Omega}{\lambda}$ discretized time internal units because when we compute dissimilarity between two BIS segments for every unit time interval with value `1' in one BIS, we only need to inspect unit time intervals in the other BIS with interval temporal distance less than $\Omega$ (discussed in sub-section \ref{sec:dissimilarity}). The augmented BIS with extensions is represented by \emph{eBIS}. \subsection{Dissimilarity Measure} \label{sec:dissimilarity} In our work, every BIS is a data point in non-Euclidean space. We define dissimilarity metric between pair of BISs by comparing the relative time of occurrence of unit intervals with value `1' in the two BIS. For every unit interval with value `1' in BIS\textsubscript{1}, we calculate $ITDist$ of nearest unit interval with value `1' in BIS\textsubscript{2}. The average of $ITDist$ value for all unit time intervals with value `1' in BIS\textsubscript{1} from the nearest unit time interval in BIS\textsubscript{2} gives the partial dissimilarity of BIS\textsubscript{1} from BIS\textsubscript{2} denoted by d(1, 2). We also record the count of unit time intervals with value `1' in BIS\textsubscript{1} as $cnt_A$. We repeat the process to find partial dissimilarity of BIS\textsubscript{2} from BIS\textsubscript{1} given by d(2, 1) and record the number of unit time intervals with value `1' in BIS\textsubscript{2} as $cnt_B$ (see Algorithm 1 and 2). \begin{figure} \caption{\label{fig:fig1} \label{fig:fig1} \caption{\label{fig:fig2} \label{fig:fig2} \caption{\textbf{Distance between $A$ and $B$ is within bounds} \label{fig:defined_distance} \end{figure} \begin{figure} \caption{\label{fig:fig1} \label{fig:fig1} \caption{\label{fig:fig2} \label{fig:fig2} \caption{\textbf{Distance between $A$ and $B$ is out of bounds} \label{fig:undefined_distance} \end{figure} \begin{algorithm}[h!] \caption{Find nearest interval of BIS1 from index i}\label{min_itd} \begin{algorithmic}[1] \Procedure{MIN\_ITDist}{i, BIS1, $\Omega$} \For {\text{\textit{index} in range(0, $\Omega$)}} \If{BIS1(\textit{i} + \textit{index}) = 1 or BIS1(\textit{i} - \textit{index}) = 1} \State \textbf{return} \textit{index} \EndIf \EndFor \EndProcedure \State \textbf{return} $\infty$ \end{algorithmic} \end{algorithm} \begin{algorithm}[h!] \caption{Partial Distance from BIS1 to BIS2}\label{partial_dist} \begin{algorithmic}[1] \INPUT \Statex $BIS1$, \Comment{A Discretized Time Interval Sequence} \Statex $BIS2$, \Comment{A Discretized Time Interval Sequence} \Statex $\Omega$, \Comment {Threshold for local distance} \OUTPUT \Statex $d(BIS1, BIS2)$, \Comment{Partial Distance from BIS1 to BIS2} \Statex $cnt$, \Comment{number of discrete intervals (or '1') in BIS1} \Procedure{$P\_D$}{\textit{BIS1}, \textit{BIS2}, $\Omega$} \Comment{partial distance from BIS1 to BIS2} \State $\textit{Len} \gets \text{length of }\textit{BIS1} = \text{length of }\textit{BIS2}$ \State $d(\textit{BIS1}, \textit{BIS2}) = 0$ \State $cnt \gets 0$ \For{\text{\textit{i} in range(0, \textit{Len})}} \If {\textit{BIS1(i)} = 1} \Statex \parbox[t]{3in}{\raggedleft /*eBIS2.intervals is set of discretized intervals in extended BIS2*/} \State $L\textsubscript{di} \gets MIN\_ITDist(i, eBIS2, \Omega)$ \If {\text{\textit{L}\textsubscript{di} = $\infty$}} \State \textbf{return} $\infty$ \EndIf \State $d(\textit{BIS1}, \textit{BIS2})\gets d(\textit{BIS1},\textit{BIS2}) + L\textsubscript{di}$ \State $cnt \gets cnt + 1$ \EndIf \EndFor \State\textbf{return} $d(\textit{BIS1},\textit{BIS2}), cnt$ \EndProcedure \end{algorithmic} \end{algorithm} \begin{algorithm}[ht!] \caption{Proposed Distance Measure, TDist}\label{d_msr} \begin{algorithmic}[1] \INPUT \Statex $BIS1$, \Comment{A Discretized Time Interval Sequence} \Statex $BIS2$, \Comment{A Discretized Time Interval Sequence} \Statex $\Omega$, \Comment {Threshold for local distance} \OUTPUT \Statex $D(A,B)$, \Comment{Distance between BIS1 and BIS2} \Procedure{Complete\_Dist}{\textit{BIS1}, \textit{BIS2}, $\Omega$} \State $d(BIS1, BIS2), cnt1 \gets P\_D(\textit{BIS1}, \textit{BIS2}, \Omega)$ \State $d(BIS2, BIS1), cnt2 \gets P\_D(\textit{BIS2}, \textit{BIS1}, \Omega)$ \State $D(A,B) \gets ( d(BIS1, BIS2) + d(BIS2, BIS1) ) / (cnt1 + cnt2)$ \State \textbf{return} $D(A,B)$ \EndProcedure \end{algorithmic} \end{algorithm} \begin{figure} \caption{Sample Distance Matrix for 7 BISs} \label{fig:dissimilarity_matrix} \end{figure} Figure \ref{fig:defined_distance} and \ref{fig:undefined_distance} illustrate the dissimilarity measure computation with two examples. If $ITDist$ for any unit time interval with value `1' in either of the two BIS from the nearest unit time interval with value `1' in the other BIS is greater than some threshold $\Omega$ then the partial dissimilarity computation algorithm is halted and the overall dissimilarity between the two BIS is set as infinite (or, undefined) which means that the two BISs can never be linked together (or, one of the two BISs can never be exemplar of the other BIS). The overall dissimilarity between BIS\textsubscript{1} and BIS\textsubscript{2} ($D$(1, 2) = $D$(2, 1)) is calculated from the partial dissimilarities given by Algorithm 2. The dissimilarity matrix / distance matrix thus obtained is \textit{symmetric} about main diagonal, so only one half needs to be stored (see Figure \ref{fig:dissimilarity_matrix}). The value of $\Omega$ takes into account uncertainty in observed data and temporal variability of patterns. It has been proved that temporal patterns of human behavior tend to be normally distributed \cite{5f5537a223c84269ba2508ba6845cde5}. This property can be used to model starting (arrival) and ending (departure) time of behavior patterns without uncertainty as normal distributions. But the \emph{observed} behavior patterns contain uncertainty (or, false negatives). The distribution with the combined effects of normal temporal behavior of humans and uncertainty in observations manifests itself in the \emph{observed} arrival and departure distribution of every behavior pattern. To get a sample of this distribution, we record the timestamp (in discrete time domain) when a particular individual was first detected during the day. We construct dissimilarity matrix / distance matrix for any time interval from dissimilarity matrix of it's constituent segments contained in a segment tree. Let the complete dissimilarity between two BISs $A$ and $B$ of same length be $D$($A, B$) (see Algorithm 3). We divide $A$ and $B$ into $k$ segments. Let the segment $i$ of $A$ and $B$ is $A\textsubscript{i}$ and $B\textsubscript{i}$, respectively, and the complete dissimilarity between them is $D$($A\textsubscript{i}$, $B\textsubscript{i}$). Let $Cnt(A\textsubscript{i})$ and $Cnt(B\textsubscript{i})$ be number of unit time intervals with value `1' in the BIS $A\textsubscript{i}$ and BIS $B\textsubscript{i}$, respectively as described in Section \ref{sec:dissimilarity}. Then, $D$($A, B$) can be written as \begin{equation} D(A, B) = \frac{\sum_{i=0}^{k} D(A\textsubscript{i}, B\textsubscript{i})\times (Cnt(A\textsubscript{i}) + Cnt(B\textsubscript{i}))} {\sum_{i=0}^{k} Cnt(A\textsubscript{i}) + Cnt(B\textsubscript{i})} \label{eq:segment_dissimilarity} \end{equation} Using the Eq. \ref{eq:segment_dissimilarity}, \textit{MTpattern} calculates the dissimilarity between every pair of BIS in any time interval from the pair-wise dissimilarity score of its constituent BIS segments in the segment tree. So, \textit{MTpattern} can reconstruct dissimilarity matrix for any time interval without the need to compute dissimilarity score between every pair of BIS from scratch. Algorithm 4 shows how we can query the segment tree and retrieve the dissimilarity matrix of all the constituent segments of a time interval ([Le, Ri]). If the length of the time interval in a BIS (number of unit time intervals) is $L$ then the segment tree returns ${O}(\log_{2}{L})$ segments in the worst case. \begin{algorithm} \label{algo:combine} \caption{Retrieve list of Distance Matrix of constituent segments of an interval}\label{combine} \begin{algorithmic}[1] \INPUT \Statex $[Le,Ri]$ , \Comment {Query Interval endpoints} \Statex $root$ , \parbox[t]{3in} {\Comment {\raggedleft root of the segment\_tree. Each node in the tree contains end points and distance matrix for the corresponding segment}} \OUTPUT \Statex list(distance\_matrix), \parbox[t]{2.05in}{\Comment{\raggedleft List of distance matrices for segments of interval $[Le, Ri]$}} \Procedure{Get\_D\_Matrix}{$root$, $[Le,Ri]$} \If {$root.left>=Le$ and $root.right<=Ri$} \State \textit{return} $root.dist\_matrix$ \ElsIf{$root.left>=Ri$ or $root.right<=Le$} \State \textit{return null} \Else \State $lft\_list$ $\gets$ Get\_D\_Matrix$(root.left$, $[Le,Ri])$ \State $rght\_list$ $\gets$ Get\_D\_Matrix$(root.right$, $[Le,Ri])$ \State \textit{return} $[lft\_list,rght\_list]$ \EndIf \EndProcedure \end{algorithmic} \end{algorithm} Distance measure between any two BIS ensures that for every interval in a sequence member of $\Omega$-covering, there exists an interval in exemplar BIS such that their ITDist is less than a given threshold $\Omega$. We also call this minimum ITDist between constituent intervals as local distance. Transitivity property ensures that maximum local distance between any two BISs in a $\Omega$-covering is less than 2 * $\Omega$. \subsection{Pattern Discovery} \label{sec:pattern_discovery} In this section, we shall discuss discovery of behavior patterns from the database of BISs. The dissimilarity matrix represents overlapping BIS clusters where every row represents a complete $\Omega$-covering of the corresponding BIS (see Figure \ref{fig:dissimilarity_matrix} for example). These $\Omega$-coverings are overlapping as any given BIS may be a member of complete $\Omega$-covering of more than one BIS. If cardinality of a complete $\Omega$-covering of a BIS is small, the corresponding behavior pattern is not frequent. $\Omega$-coverings which are a subset of other $\Omega$-coverings can also be ignored. If two $\Omega$-coverings are equal then \textit{MTpattern} will ignore the $\Omega$-covering for which average distance of all BIS with the exemplar BIS of the corresponding $\Omega$-covering is higher. If the cardinality is more than a threshold $\alpha$ then \textit{MTpattern} takes the average of all BIS in a $\Omega$-covering for every unit time interval to obtain a behavior pattern in the form of discrete time probability distribution. For example, in Figure \ref{fig:dissimilarity_matrix}, if the threshold $\alpha$ is 3, then, \begin{itemize} \item $\Omega$-covering of S-1 is [S-1, S-3, S-5, S-6, S-7] and is frequent \item $\Omega$-covering of S-2 is [S-2, S-5] and is not frequent \item $\Omega$-covering of S-3 is [S-1, S-3, S-4, S-5, S-6, S-7] and is frequent \item $\Omega$-covering of S-4 is [S-3, S-4, S-5, S-6] and is frequent \item $\Omega$-covering of S-5 is [S-1, S-2, S-3, S-4, S-5, S-7] and is frequent \item $\Omega$-covering of S-6 is [S-1, S-3, S-4, S-6] and is frequent \item $\Omega$-covering of S-7 is [S-1, S-3, S-5, S-7] and is frequent \end{itemize} Since we are not concerned about the $\Omega$-coverings which are subset of other $\Omega$-coverings, we ignore $\Omega$-coverings of S-2, S-4 and S-7. \subsubsection{Optimization} \textit{MTpattern} optimizes the clustering of BIS by finding minimum number of disjoint (non-overlapping) BIS clusters such that every BIS is a member of some cluster and the exemplar BIS of any cluster has every member BIS of the same cluster in its Complete $\Omega$-Covering. This task can be broken down to a \textit{constrained optimization problem} where we need to minimize number of clusters under the constraint that every BIS should be a member of some cluster and every member of a cluster should be $\Omega$-covered by the exemplar BIS of the corresponding cluster. If there are multiple arrangements of clusters that satisfy the above constraint, then that arrangement should be chosen which minimizes net dissimilarity of all BIS with their corresponding cluster's exemplar BIS. \textit{MTpattern} achieves this optimization by using affinity propagation which is a relatively new clustering technique based on the concept of ``message passing" between data points (or BISs). It starts by considering all BISs as candidate exemplars and exchanges messages between every pair of BISs in every iteration till a good set of exemplars are obtained and the algorithm converges. The advantage of this technique is that it does not need number of clusters to be pre-specified and it clusters around ``exemplar" BISs \cite{frey07affinitypropagation} (members of the input set that are good representative of their corresponding cluster). This suits to the problem of behavior modeling as it is not possible to have idea about the number of underlying modes or clusters in the visiting sequences of individuals beforehand. \textit{MTpattern} minimizes the number of clusters by tuning the \emph{preference} parameter of affinity propagation\footnote{``The preference of point $i$, called $p$($i$) or $s$($i$, $i$), is the \emph{a priori} suitability of point $i$ to serve as an exemplar. Preferences can be set to a common global value, or customized for every data point. High values of the preferences will cause affinity propagation to find many exemplars (clusters), while low values will lead to a small number of exemplars (clusters)" \cite{frey07affinitypropagation}}. The \emph{preference} parameter determines the granularity of the clusters. The value of \emph{preference} is usually set to the median of data points which outputs moderate number of clusters. On the other hand, it can be shown mathematically, that by setting \emph{preference} to a very large negative value (negative infinity), affinity propagation converges to a solution which outputs minimum number of clusters such that every BIS in every cluster is in the $\Omega$-Covering of the ``exemplar" BIS of the corresponding cluster. In the following equations, \textit{MTpattern} use \emph{similarity} metric instead of \emph{dissimilarity} metric where a similarity score is negative of dissimilarity score. \begin{equation} \begin{split} Preference = s(S\textsubscript{i},S\textsubscript{i}) \,\forall i \,\,\epsilon\,\,(1, m) \label{eq:pref} \end{split} \end{equation} In Eq. \ref{eq:pref}, $s(S\textsubscript{i}, S\textsubscript{i})$ represents self similarity or \emph{preference} of data point (or, BIS) $i$. \textit{MTpattern} set the preference value for all BIS to the same global value which ensures that affinity propagation is not biased towards choosing any BIS as an exemplar beforehand. \begin{equation} \begin{split} \begin{gathered} Inter\_Sim = \sum_{\substack{i=1}}^{m}\sum_{\substack{c=1}}^{C} \begin{cases} s(i, ex\textsubscript{c}),& s(i, ex\textsubscript{c}) \neq \infty \\ 0, & \text{otherwise} \end{cases} \end{gathered} \end{split} \label{eq:inter_similarity} \end{equation} \begin{equation} \begin{split} \begin{gathered} \\Preference \approx -\infty \end{gathered} \end{split} \label{eq:pref_is_comp} \end{equation} In Eq. \ref{eq:inter_similarity}, $s(i,j)$ is the similarity score between BIS $i$ and BIS $j$ and is equal to the negative of dissimilarity score between BIS $i$ and BIS $j$. $C$ represents total number of clusters and $ex\textsubscript{c}$ represents exemplar BIS of the cluster $c$. In Eq. \ref{eq:pref_is_comp}, \textit{MTpattern} set the value of \textit{preference} to a large negative value, much smaller than $Inter\_Sim$ which is the sum of all pair-wise similarities between similar BISs. Affinity propagation seeks to find number of clusters that maximizes the total similarity for each cluster which is measured as the sum of the similarities between non-exemplar BISs and their exemplar BIS and the sum of preferences for selected exemplars BISs. A formal statement of the \textit{MTpattern} optimization problem that underlies affinity propagation begins with the definition of two inputs: the similarity matrix, $s(i, j)$; and the \textit{preference}. There are two sets of decision variables associated with the optimization problem: $y$\textsubscript{c} = 1 if a BIS \textit{c} is selected as an exemplar and 0 otherwise, for 1 $\leq$ \textit{c} $\leq$ \textit{C}; and $x$\textsubscript{$ic$} = 1 if BIS \textit{i} is assigned to the cluster for which BIS \textit{j} serves as an exemplar and 0 otherwise, for 1 $\leq$ \textit{i} $\leq$ \textit{m} and 1 $\leq$ \textit{c} $\leq$ \textit{C}. The integer linear programming formulation of the problem can then be stated as follows: \begin{equation} Net\_Sim = \sum_{\substack{i=1}}^{m}\sum_{\substack{c=1}}^{C} s(i, \\ ex\textsubscript{c})*x\textsubscript{$ic$}+\sum_{c=1}^{C} s(ex\textsubscript{c}, \\ ex\textsubscript{c})*y\textsubscript{c} \label{eq:net_similarity_AP} \end{equation} subject to \begin{equation} \sum_{\substack{c=1}}^{C} x\textsubscript{$ic$}, for \, all \, 1 \leq \textit{i} \leq \textit{m} \, \textrm{and} \, ITDist(i, \\ ex\textsubscript{c}) < \Omega; \label{eq:xic} \end{equation} \begin{equation} x\textsubscript{$ic$} \leq y\textsubscript{c}, for \: all \, 1 \leq \textit{i} \leq \textit{m} \, \textrm{and} \, 1 \leq \textit{c} \leq \textit{C}; \label{eq:xic-yc} \end{equation} \begin{equation} x\textsubscript{cc} = y\textsubscript{c}, for \, all \, 1 \leq \textit{c} \leq \textit{C}. \label{eq:xcc-yc} \end{equation} \begin{equation} x\textsubscript{$ic$} \in \{0,1\}, for \, all \, 1 \leq \textit{i} \leq \textit{m} \, \textrm{and} \, 1 \leq \textit{c} \leq \textit{C}; \label{eq:xic-0} \end{equation} \begin{equation} y\textsubscript{c} \in \{0,1\}, for \, all \: 1 \leq \textit{c} \leq \textit{C}. \label{eq:yc-0} \end{equation} The objective function (equation (5)) of the optimization problem is $Net\_Sim$, and the first and second terms on the right-hand side of the equals sign are BISs similarity and exemplar preference similarity, respectively. Constraint set (6) guarantees that each BIS is assigned to exactly one exemplar and each non-exemplar BIS in a cluster is $\Omega$-covered by the exemplar BIS of the corresponding cluster under the given constraint of local dissimilarity. Constraint set (7) ensures that a BIS is not assigned to an BIS that is not selected as an exemplar. Constraint set (8) is incorporated in the affinity propagation algorithm to ensure that, if a BIS is selected as an exemplar, then that BIS must be assigned to the cluster for which it serves as the exemplar. Finally, constraint sets (9) and (10) enforce the binary restrictions on the x\textsubscript{$ic$} and y\textsubscript{c} variables, respectively. \begin{equation} Net\_Sim = \sum_{\substack{i=1\\i\neq c}}^{m}\sum_{\substack{c=1\\c\neq i}}^{C} s(i, ex\textsubscript{c})+\sum_{c=1}^{C} s(ex\textsubscript{c}, ex\textsubscript{c})) \end{equation} \begin{equation} \qquad\qquad = \sum_{\substack{i=1\\i\neq c}}^{m}\sum_{\substack{c=1\\c\neq i}}^{C} s(i, \, ex\textsubscript{c})+C\times preference\\ \end{equation} \begin{equation} \qquad \qquad \approx C\times preference \text{ (from Eq. \ref{eq:inter_similarity} and \ref{eq:pref_is_comp})}\qquad \label{eq:net_similarity} \end{equation} \textit{MTpattern} maximizes $Net\_Sim$ similar as affinity propagation \cite{frey07affinitypropagation} does. For the $x\textsubscript{$ic$}$ = 1 and $y\textsubscript{c}$ =1, we get the Eq. \ref{eq:net_similarity} where $s$($ex\textsubscript{c}$, $ex\textsubscript{c}$) is also equal to \textit{preference} (from Eq. \ref{eq:pref}). As preference is set to -$\infty$, increasing the number of clusters ($C$) will decrease the $Net\_Sim$ value as the preference is negative and the goal is to maximize $Net\_Sim$. Therefore, a globally stable solution will minimize total number of clusters (i.e., $C$) for convergence. Once clusters are obtained, an average of all the BISs in the cluster is taken for every unit time interval. This average gives us a probabilistic view of an individual's behavior in every time slot. This average is the manifestation of the cumulative effect of all BIS in the same cluster. Because the dissimilarity matrix is sparse, the number of messages exchanged between data points in every iteration of affinity propagation is significantly less reducing time complexity (assuming serial message passing) and space complexity of clustering. In the Figure \ref{fig:dissimilarity_matrix}, after applying affinity propagation, we get two clusters. The exemplar BIS of first cluster is S-3 and it contains S-1, S-3, S-4, S-6 and S-7. The exemplar BIS of second cluster is S-5 and it contains S-2 and S-5. The $Net\_Sim$ (see Eq. \ref{eq:net_similarity}) is minimum in this configuration. \section{Complexity Analysis} In this section, we shall discuss time and space complexity for two main components of the proposed solution, i.e., dissimilarity measure and pattern discovery in detail. \subsection{Time Complexity Analysis} \subsubsection{Distance Measure Computation} The time complexity of the distance measure (Algorithm 3) between two segments of \textit{N\textsubscript{s}} length, each can be given by the total number of MIN-ITDist’s calculated. Maximum number of times MIN-ITDist is executed for a pair of segment is equal to the total number of 1’s in both the sequences combined which can be 2\textit{N\textsubscript{s}} in the worst case. One instance of MIN-ITDist method runs for O($\Omega$) times. So, complexity of distance computation is 2O($\Omega$)\textit{N\textsubscript{s}}, i.e., \textit{O}(\textit{N\textsubscript{s}}$\Omega$). Since there are \textit{e}\textsuperscript{2} pairs of sequences for every segment (where \textit{e} is the total number of days), in worst case, time complexity to generate distance matrix for a time segment of length \textit{N\textsubscript{s}} is \textit{O}(\textit{e}\textsuperscript{2}\textit{N\textsubscript{s}}\textit{$\Omega$}). The above distance matrix represents one node in the segment tree. Similar operation needs to be done for every node (every segment at every level). Time complexity for constructing distance matrix for the segment tree is equal to sum of time complexity to generate distance metric of all nodes. If the total length of root node (unsegmented sequence) is \textit{L} \textit{BIS}, height of the tree will be O(log\textsubscript{2}\textit{L}) since we divide the segment length by 2 at every level. Total number of leaf nodes is (2\textsuperscript{log\textsubscript{2}\textit{L}}) = \textit{L}. Time complexity for computing distance metric for all leaf nodes = \textit{O}(\textit{L}\textit{k}\textsuperscript{2}\textit{N\textsubscript{s}} \textit{$\Omega$}). Once, we compute the distance matrix for leaf nodes in the segment tree, distance matrix for non-leaf nodes in the segmentation can be computed using the distance matrix of its children nodes without any information loss, i.e., by using \textit{eBIS}. The time complexity for generating distance matrix for all non-leaf nodes at a given depth \textit{d} can be given by time complexity for computing one node at that level \textit{K}\textsuperscript{2} (distance between one pair of segment can be calculated in constant time using distance matrix of its children nodes. There are total \textit{e}\textsuperscript{2} pairs) multiplied by total number of nodes at that level O({2}\textsuperscript{\textit{d}}) = O(\textit{e}\textsuperscript{2}{2}\textsuperscript{\textit{d}}). The total cost will be the sum of time complexity for leaf nodes and non-leaf nodes given by \textit{O}(\textit{L}\textit{e}\textsuperscript{2}\textit{N\textsubscript{s}}\textit{$\Omega$}) + $\sum_{\textit{d}=2^0}^{2^{log\textsubscript{2}L}} O(\textit{K}\textsuperscript{2} × \textit{2\textsuperscript{d}})$ $\Rightarrow$ O(\textit{Ne}\textsuperscript{2}\textit{N\textsubscript{s}}$\Omega$) + O(\textit{Lk}\textsuperscript{2}) $\Rightarrow$ O(\textit{L}\textit{e}\textsuperscript{2}\textit{N\textsubscript{s}}\textit{$\Omega$}). Thus, creating hierarchical segments from leaf nodes does not asymptotically take more time complexity than it would take for computing distance matrix for leaf nodes alone. \subsubsection{Pattern Discovery} For clustering, \textit{MTpattern} uses affinity propagation. The underlying concept of this algorithm is belief propagation and the worst time complexity is O(\textit{r'}\textit{d\textsubscript{p}}\textsuperscript{2}) where \textit{r'} is the total number of iterations for convergence and \textit{d\textsubscript{p}} is the total number of data points. For sparse distance matrix, this time complexity is less as messages will not be passed between data points between for whom distance is not defined (or infinite). For a given distance matrix with \textit{d\textsubscript{p}} data points, time complexity is O(\textit{r'}\textit{d\textsubscript{p}}\textsuperscript{2})$\Rightarrow$ O(\textit{d\textsubscript{p}}\textsuperscript{2}). \subsection{Space Complexity Analysis} \subsubsection{Distance Measure Computation} Space complexity to store distance matrix for entire segment tree is equal to the size of one node multiplied by total number of nodes. Every node contains the segment dimensions [start, end], the left and right pointer of the children nodes and the distance matrix for that segment. Segment dimensions, left and right child pointers are constant size whereas distance matrix takes O(\textit{e}\textsuperscript{2}) space where \textit{e} is number of sequences (or days). For all segments at depth d in the segmentation tree, space complexity is O(\textit{e}\textsuperscript{2}{2}\textsuperscript{\textit{d}}). For the entire segment tree, space complexity is given by $\sum_{\textit{len}=2^0}^{2^{log\textsubscript{2}L}} O(\textit{e}\textsuperscript{2} × {2}\textsuperscript{\textit{d}})$ $\Rightarrow$ O(L\textit{e}\textsuperscript{2}). \subsubsection{Pattern Discovery} The exhaustive search for all $\Omega$-coverings for a given time interval is in place so it does not require any extra space. For clustering using affinity propagation, there is need to keep in memory two messages (responsibility and availability) from every other data point, so, space complexity of single instance of affinity propagation is O(\textit{e}\textsuperscript{2}). \section{Experiments and Analysis} At the high-level, an effective clustering algorithm should be able to cluster similar users together and different ones separately. We evaluate our behavioral clusters quality by finding how well they capture similar users. To evaluate clustering quality, internal and external evaluation measures are used. Internal criteria are used for finding clustering quality when ground truth in the dataset is not available while, external criteria is used when ground truth is available. We conduct three experiments from different perspectives to evaluate and compare our proposed approach \textit{MTpattern} with baselines. We select three widely used clustering algorithms as baselines: K-means, Hierarchical Clustering (HC), and a variant of HMM, Expectation-Maximization (EM) \cite{dempster1977maximum}. We also compare \textit{MTpattern} with one of the most popular dimensionality reduction techniques, PCA. \textbf{Experiment 1:} Evaluation and Comparison with Baselines through Internal Criteria. In Internal measure, the clustering evaluation is compared only with the result itself, i.e., to evaluate the structure of the found clusters and relationships among these clusters. Evaluation of clustering quality using internal measure is preferred in several real-world scenarios as it is not always possible to obtain ground truth with the data. While, the data labeling is expensive task. \textbf{Experiment 2:} Evaluation and Comparison with Baselines through External Criteria. In most real applications, complete knowledge of the ground truth is not available. Therefore, external measure is widely used to evaluate synthetic data. We create synthetic dataset for this purpose. \textbf{Experiment 3:} Evaluation of Distance Measure. The clustering quality is highly dependent on the distance measure used between the data objects. We compare our proposed distance measure with widely used Euclidean distance (ED) and Dynamic Time Warping (DTW). In \textit{MTpattern}, the results are generated for two values of \textit{preference} for affinity propagation: \textit{median of dissimilarity scores} and \textit{$<<$median of dissimilarity scores} for different values of $\Omega$. In addition, the length of BIS (L) for any particular day is equal to 192 (1 hour = 8 * (7 mins + 30 secs), 24 hours = 192 * (7 mins + 30 secs)) as $\delta$ = 15 mins. \textit{MTpattern} uses affinity propagation based clustering on these timestamps. We perform each experiment with $preference$ parameter set to Median of data points and $-\infty$ to demonstrate efficacy of $-\infty$ preference. The experiment is also repeated with different values of $\Omega$ ($\delta$, $2\times\delta$, $3\times\delta$ and $4\times\delta$). \subsection{Datasets} We use two real-world datasets and a planted (synthetic) dataset to validate the practicality of our proposed method, \textit{MTpattern} for the discovery of bahavior patterns. \subsubsection{WiFi dataset} Now-a-days, the usage of Smartphones is continuously increasing all over the globe. We use the unmodified Smartphone-based user identification and tracking system (called, SmartITS) \cite{Kulshrestha} which continuously tracks MAC ids of user equipments (Smartphones/BLE tags/Bluetooth devices) in indoor-outdoor environments seamlessly and upload the users' traces into the cloud server for the long-term analysis. \begin{figure} \caption{Smartphone working as a Client (left) and Portable Sensing Unit (right).} \label{fig:patterns2} \end{figure} A \textit{portable sensing unit} (PSU) collects the individuals' traces (records) in the following format $<$\textit{device id}, \textit{MAC address}, \textit{Time}, \textit{Client RSSI}, \textit{Location}$>$ where \textit{device id} is a \textit{PSU} name, \textit{MAC address} is the MAC id of the individual detected, \textit{Time} is the physical time at which an individual is detected, \textit{Client RSSI} is the signal strength of an individual to estimate the physical distance (in meters) between the \textit{PSU} and detected individual. \textit{Location} captures the GPS coordinates of the \textit{PSU}. Location comprises of latitude, longitude, and accuracy. \textit{PSUs} keep uploading the collected data at the cloud server periodically where data is stored in a time-stamped manner. Moreover, for the security and privacy reason, all MAC addresses are strictly anonymized using AES algorithm. Experiments are carried out in Indian Institute of Technology, Roorkee (IITR) campus. IITR is an academic and research institute in the state of Uttarakhand, India and has a 1.48 km\textsuperscript{2} campus including many objects, such as academic departments, administrative buildings, hostels, library, banks, post office, hospital, schools, canteen shops, etc. We set our \textit{PSUs} in the UGPC lab in the Department of Computer Science and Engineering, IITR and track all those smart devices for 3 months using \textit{SmartITS} (for which the WiFi is turned ON). In total, there are 11,853 records collected at the UGPC. \iffalse We preprocess the collected data from the sensors and transformed the data into binary time series. In a binary time series, 1 represents that an individual is present in the sensor vicinity, while 0 represents individual is either absent or sensor could not capture the presence. Binary time series data shown in Figure \ref{fig:patterns1} can be represented as time point sequence shown in Figure \ref{fig:preProcessing22}. \begin{figure} \caption{Binary time series data for \textit{N individuals} \label{fig:patterns1} \end{figure} \fi \begin{figure} \caption{Time delay between consecutive WiFi probe packets} \label{fig:probepackets} \end{figure} \subsubsection{Reality Mining} Reality mining dataset was collected by MIT Media lab where 95 academic mobile phone users are tracked for approximately 9 months. We use this dataset and mine the visiting patterns of \textit{home} (using the cell tower associated with the home-ids) \cite{r2006}. Both real-world datasets have the uncertainty in the data as in WiFi dataset, sensors may fail to capture emitted packets from individuals' smart devices while reality mining dataset can loose cell tower signal or inconsequential tower transitions due to dense tower network and overlapping tower range. It can be observed that both real-world datasets share the common uncertainty arising from inherent temporal variability in any \textit{individual(s)} nature. \subsubsection{Computation of $\delta$} We set the threshold $\delta$ equal to 15 mins based on an experimental analysis. To find the appropriate value of threshold $\delta$, we analyze duration between consecutive WiFi packets emitted by the same device in the direct line of sight which are stationary and within the range of the adapter. We use four android phones, one iPhone and one windows phone for this experiment. We inspect time delay between all consecutive WiFi packets from same device and realize that for more than 95$\%$ of packets, time delay between the current packet and the previous packet is below 15 minutes (see Figure \ref{fig:probepackets}). Therefore, we set the threshold $\delta$ equal to 15 minutes with the false negative rate of less than 5$\%$. If devices are not placed in the direct line of sight and are not static then the average rate of false negatives rate for $\delta$ equal to 15 mins increases to 38$\%$ due to packet loss. In other words, threshold $\delta$ is the transmitting interval of wireless packets from client smart device in order to advertise their presence to nearby devices or actively discover the access points in its proximity. The computation of the threshold $\delta$ depends on the data collection strategy. For example, in the reality mining dataset, Bluetooth scans periodically at 5 minute intervals. While we collect all the incoming wireless packets and then preprocess them in the Time Point Sequence (PS) as shown in Figure 3. Then, we use the threshold $\delta$ to convert point sequence data into interval sequence data. Furthermore, threshold $\delta$ determines the width of discretized bin where the width of discretized bin decides the granularity of clustering. Size of each cluster is described by granularity. Therefore, we can say that we use threshold $\delta$ to decide the size of a cluster instead of pre-specifying number of clusters. \subsubsection{Planted (synthetic) Patterns} Clustering efficiency can be accurately measured on synthetic datasets, since the true distribution and its modes are known. We generate different planted patterns to remove any bias that can be present in the above real-world datasets with a pre-determined temporal variability and sensor uncertainty. For simulating temporal variance, we vary the sequences' start-end in a given visiting mode/pattern so that the start/end points follow a normal distribution around a mean with 3x$\sigma$ (standard deviation) equal to 4 $\lambda$ (1 $\lambda$ = 7.5 minutes, 4 $\lambda$ = 30 minutes). When we perform clustering on planted patterns, we set $\Omega$ between 30 minutes (4 $\lambda$) and 1 hour (8$\lambda$). Note that, by definition 99.74\% of data points reside between -3$\sigma$ and 3$\sigma$, so we have significantly reduced the probability of any MIN\_ITDist being more than $\Omega$ because of temporal variation, which is inline of our assumption of a small inherent temporal variability in human actions. We set the visiting pattern with an error probability of 0.2, i.e., sensor can fail to sense the signal even though an individual was in the surrounding with a probability of 0.2 for a given \emph{IS}. The probability of erroneously failing to sense a signal over consecutive BISs reduces with a power of 0.2. So, false negative probability for 30 minutes (4$\lambda$) is 0.2\textsuperscript{4} = 0.0016 which is again very less. This dataset has only 1000 different and random patterns. \subsection{Experiment 1 : Evaluation of Clustering Quality through Internal Criteria} We evaluate \textit{MTpattern} using two internal measures metrics: number of clusters and accuracy score. Both metrics consider some data points as representatives of each cluster. Below, we define them formally. \begin{enumerate} \item Number of clusters: It is the total number of clusters achieved for the given data. \item Accuracy Score: It is an output pattern to correctly represent all members of a cluster. Small score means high accuracy and high score means low accuracy. \end{enumerate} We plot the total number of clusters achieved after applying the \textit{MTpattern} and $baseline$ clustering approaches. Figure \ref{fig:preProcessing1} (a), (b) and (c) show that total number of clusters achieved for all three datasets after clustering is low for our proposed technique, $MTpattern$ compared to all $baseline$ approaches. We find that for all approaches, the number of clusters decreases as the $\Omega$ increases. $MTpattern$ tunes affinity propagation to give the minimum number of clusters, while satisfying the upper bound in local dissimilarity. There is no objective way in which such minimization can be achieved for $PCA$ or for any other partition-based clustering, i.e., K-means and HC. Since, eigenbehavior outputs eigenvectors (unit magnitude), we first convert frequent behavior patterns obtained from our proposed approach into unit vectors. We first calculate accuracy score as per the Eq. \ref{eq:ICV}. In our case, there are only two classes, \textit{present} and \textit{absent}. For every visiting sequence, we compare it with the unit vector that represents the corresponding cluster. Every correct prediction (in a unit time interval) will be rewarded and every wrong prediction will be penalized. Sum of accuracy score is taken for all segments at all segmentation levels for all individuals to represent the overall accuracy score for the techniques. \begin{figure*} \caption{\label{fig:fig11} \label{fig:fig11} \caption{\label{fig:fig12} \label{fig:fig12} \caption{\label{fig:fig12} \label{fig:fig12} \caption{Number of Clusters, (a) WiFi dataset (b) Reality Mining Dataset, and (c) Simulated Dataset} \label{fig:preProcessing1} \end{figure*} \begin{figure*} \caption{\label{fig:fig13} \label{fig:fig13} \caption{\label{fig:fig14} \label{fig:fig14} \caption{\label{fig:fig14} \label{fig:fig14} \caption{Accuracy Score, (a) WiFi dataset (b) Reality Mining Dataset, and (c) Simulated Dataset} \label{fig:preProcessing} \end{figure*} \begin{equation} \begin{split} Accuracy\_Score = \sum_{i=1}^{e} \sum_{j=1}^{N\textsubscript{s}} \sum_{c=1}^{N\textsubscript{c}} Flag\textsubscript{i}\textsubscript{j}\textsubscript{c} \times P\textsubscript{i}\textsubscript{j}\textsubscript{c} \end{split} \label{eq:ICV} \end{equation} \begin{equation} \begin{split} Flag\textsubscript{i}\textsubscript{j}\textsubscript{c}= \begin{cases} -1,& S\textsubscript{i}\textsubscript{j} \in c\\ 1, & \text{otherwise} \end{cases} \end{split} \end{equation} \textit{e} is the total number of sequences or number of days. \textit{N\textsubscript{s}} is the length of a segment. \textit{N\textsubscript{c}} is total number of classes, i.e., present and absent. \textit{S}\textsubscript{i}\textsubscript{j} is the \textit{j}\textsuperscript{th} bit of sequence `\textit{i}'. \textit{P}\textsubscript{i}\textsubscript{j}\textsubscript{c} is the value of \textit{j}\textsuperscript{th} bit of \textit{P}\textsubscript{i} (which is the frequent unit vector pattern representing the cluster which sequence `\textit{i}' belongs to) to be of class \textit{c}. The above equation rewards correct prediction and penalizes wrong prediction. Figure \ref{fig:preProcessing} (a), (b) and (c) show that accuracy score for all three datasets is less for our proposed technique, \textit{MTpattern} compared to $PCA$ and $baseline$ approaches. Small accuracy score shows that \textit{MTpattern} has high accuracy, i.e., \textit{MTpattern} has high cluster accuracy compared to baselines. \textit{MTpattern} takes into account local proximity between discrete time intervals unlike baseline approaches which treat every time interval as an independent dimension. \iffalse \textit{MTpattern} puts an upper bound on local dissimilarity which is allowed for the two sequences to be similar, whereas there is no such provision in $PCA$. \fi Even though eigenbehavior representation has been used to find the human's behavior structure, it has several drawbacks: First, $PCA$ is a dimensionality reduction method which does not exploit domain-specific knowledge. On the other hand, assigning physical meanings to the weight values in eigenvectors is challenging and can be too subjective for humans’ behavior patterns discovery as eigenvectors themselves have no well-defined physical meanings and are only used to project data to low-dimensional space. Second, the eigenbehavior representation does not consider the uncertainty in humans’ behaviors. Third, Eigenvectors are not suitable for clustering as this process lacks theoretical support. Our proposed model, \textit{MTpattern} compensates these drawbacks. After doing multi-modal analysis for each time slot, frequent co-occurring behavior in different time slots in a day is presented. This gives us broader view of individuals' behavior. The correlated behavior may not be serial or contiguous. E.g., a peculiar early morning behavior may lead to some different behavior at night. We owe this advantage to the segmentation of day into time slots. Early techniques, like eigenbehavior fail to capture non-contiguous co-occurring frequent behavior. As we have already mentioned that determining number of clusters in advance or maximum permissible representational error is not trivial and is often subjective. Partition-based clustering, like K-means and HC clustering algorithms and model-based clustering, like EM suffer from the above-mentioned issue. Moreover, \textit{MTpattern} puts an upper bound on local dissimilarity which is allowed for the two sequences to be similar, whereas there is no such provision in $PCA$, partition-based clustering and model-based clustering. \begin{figure} \caption{One EM model (with 2 hidden states) fits two different binary sequences.} \label{fig:ModelBasedClustering} \end{figure} On the other hand, we also observe that model-based sequence clustering methods, like HMM and EM are more sensitive to the order of events and invariant to the actual time of occurrence of the events as shown in Figure \ref{fig:ModelBasedClustering}. We fix the number of hidden states to 2\footnote{There is no golden rule to find the correct number of hidden states in HMM}. As it turns out, EM for both the cases will have same sequence of hidden states. The EM models for both the cases also has same transition and emission probability matrix. So, EM based clustering will render these two sequences indistinguishable. We also assume the observations in Figure \ref{fig:ModelBasedClustering} to be deterministic which is prerequisite for model-based clustering to work. But, real-world data often contains uncertainty and non-deterministic instances in observation as shown in Example 1 (see Figure 1). Also, we observe through the results in Figure 10 and 11 that synthetic dataset has similar performance pattern for the proposed approach, \textit{MTpattern} and baseline clustering approaches. This gives confidence that synthetic dataset has similar data distribution as real-world datasets. Thus, synthetic dataset can be used further for evaluating clustering quality. Furthermore, we evaluate the time efficiency of \textit{MTpattern} for the optimal number of clusters achieved in WiFi dataset ($\Omega$ = 15 mins) w.r.to K-means, EM and Hierarchical clustering algorithms. The execution time of \textit{MTpattern}, k-means, EM and Hierarchical clustering algorithms are 9 mins, 28 mins, 16 mins, and 43 mins, respectively. \textit{MTpattern} seems to outperform methods that randomly re-sample potential centers. Finding $C$ clusters within $d\textsubscript{p}$ data points involve a search space of size $d\textsubscript{p}$ choose $C$, or [$d\textsubscript{p}$($d\textsubscript{p}$-1)($d\textsubscript{p}$-2)...($d\textsubscript{p}$-$C$+1)]/[$C$($C$-1)($C$-2)...(2)], which is polynomial in $d\textsubscript{p}$ if $C$ is within a fixed distance of 1 or $d\textsubscript{p}$, but exponential in $d\textsubscript{p}$ if $C$ grows linearly with $d\textsubscript{p}$. k-centers clustering algorithms, such as K-means depends on random initialization and performs better with smaller search space as the initialization will be more likely to be sampled from a region near the optimal solution. Therefore, we can say that these methods work well when $C$ is close to 1 or $d\textsubscript{p}$. On the other hand, \textit{MTpattern} exploits Affinity which does not rely on random sampling. Affinity propagation exchanges messages between data points (i.e., $d\textsubscript{p}$) while considering all of them to be potential exemplars. It works better in the large-search-space situation, where $C$ is not close to 1 or $d\textsubscript{p}$. \subsection{Experiment 2: Evaluation of Clustering Quality through External Criteria} We evaluate \textit{MTpattern} using three external measures metrics: \textit{Purity}, \textit{Rand Index} and \textit{F-measure}. Below, we define them formally. \begin{enumerate} \item Purity: It measures the ratio of the items that are in the cluster with the same class as its own. \item Rand Index: It measures the accuracy of the clustering result in terms of percentage of decision that is correct. \item F-measure: It is a statistical classification measure which considers both the precision and the recall to compute the score. Precision is the number of correct results divided by the number of all returned results and the recall is the number of correct results divided by the number of results that should have been returned. \end{enumerate} We calculate these measures for the simulated dataset as for this dataset, we have knowledge about the clusters and their membership. So, it is possible to compare the actual original clusters with the clusters that \textit{MTpattern} has discovered. \begin{table}[h!] \centering \begin{tabular}{||c c c c c||} \hline Metrics & K-means & HC & EM & MTpattern \\ [0.5ex] \hline\hline Purity & 0.68 & 0.66 & 0.69 & 0.98 \\ \hline Rand Index & 0.64 & 0.63 & 0.65 & 0.93 \\ \hline F-Measure & 0.55 & 0.53 & 0.59 & 0.92 \\ [1ex] \hline \end{tabular} \caption{Results of External Measures on different clustering algorithms for 30 mins of $\Omega$} \label{table:1} \end{table} For calculating purity, every cluster is assigned to most frequent class, then accuracy is calculated by counting the number of correctly assigned data points divided by total number of data points. High purity percentage shows that patterns are classified correctly, while low purity percentage shows wrong classification of the patterns. The purity percentage for K-means, HC and EM approach is 68\%, 66\%, 69\% for all 30 mins of $\Omega$ since these approaches are not temporally sensitive. The purity percentage of \textit{MTpattern} for preference $<<$ Median ($\approx-\infty$) is 98\% for 30 mins of $\Omega$. This result shows that \textit{MTPattern} is able to correctly cluster sequences with higher accuracy as compared to baselines. As the $\Omega$ increases, purity and number of clusters (note that number of clusters is inverse of data compression) naturally decreases and as sequences farther apart which are member of different class may be grouped in the same cluster. We assign two items to the same cluster iff they are similar. A true positive (TP) decision assigns two similar items to the same cluster, a true negative (TN) decision assigns two dissimilar items to different clusters. But, a False Positive (FP) decision assigns two dissimilar items to the same cluster, while False Negative (FN) decision assigns two similar items to different clusters. The Rand Index (RI) measures the percentage of decisions that are correct, i.e., accuracy. But, RI gives equal weightage to FPs and FNs. Separating similar items is sometimes worse than putting pairs of dissimilar items in the same cluster. Therefore, we also use F-measure (F-Score) to evaluate the clustering quality by penalizing FNs more strongly than FPs by selecting a value $\beta > 1$. Table 2 shows a comparison between K-means, HC, EM and MTpattern algorithms from the point of the view of purity, Rand Index (RI), and F-measure. Purity, RI and F-measure of \textit{MTpattern} are significantly higher than the baseline approaches which confirms a better clustering quality. \subsection{Experiment 3: Evaluation of Distance Measure } There are many methods to calculate the distance information; the choice of distance measures is a critical step in clustering. It calculates the similarity between two elements and also influences the shape of the clusters. We compare our proposed distance measure with the well-known Euclidean distance (ED) and Dynamic Time Warping (DTW). DTW is the generalization of the ED. We define a novel distance measure between discrete time series which takes into account temporal proximity of sequences and guarantees an upper limit on the maximum variance within the cluster. We feed this sparse distance matrix into affinity propagation which finds naturally dense clusters. Table 3 shows the F-measure for the different distance measure ED, DTW and our proposed distance measure, TDist for the Affinity Propagation clustering algorithm. Results show that our proposed distance measure, TDist outperforms ED and DTW. The reason is that the conventional distance measures used in clustering, like Euclidean distance, Manhattan distance, Jaccard distance, Kullback Leibler distance, etc., consider every time slot as an independent dimension and hence, fails to capture the temporal dynamics between neighbouring time instances. \begin{table}[h!] \centering \begin{tabular}{||c c c c||} \hline Distance measure & 30 mins & 45 mins & 60 mins \\ [0.5ex] \hline\hline ED & 0.56 & 0.62 & 0.69 \\ \hline DTW & 0.67 & 0.73 & 0.78 \\ \hline TD & 0.92 & 0.95 & 0.97 \\ [1ex] \hline \end{tabular} \caption{F-measure for different distance measures} \label{table:2} \end{table} In addition, \emph{Dynamic Time Warping} (DTW) is widely used to find dissimilarity between temporal sequences independent of some non-linear variations between them. Unlike conventional distance measures, DTW takes into account the affinity between neighbouring time instances. But, DTW metric only takes the difference of magnitude of the two sequences after aligning them and is not suitable for binary or categorical time series with uncertainty as it does not take into account the extent of warping needed to perfectly align two sequences. Therefore, the DTW metric is not the right choice for binary or categorical time series as the DTW distance for categorical time series will always be 0 after alignment and makes it impossible to distinguish between sequences based on the extent of non-linear variation between them. Moreover, in DTW metric, it is required to map every observation to some observation of the other signal which may lead to unexpected results in case of uncertain or non-deterministic observations. \section{Conclusion and Future Works} In this paper, we mine temporal variable patterns from uncertain temporal data. We propose a novel approach to effectively cluster behavior patterns of individuals (named, \textit{MTpattern}) from the temporal data. We propose a dissimilarity measure, called TDist, between visiting sequences that considers only temporal distance between deterministic values. Our dissimilarity measure is sensitive to local temporal differences between sequences and restricts local temporal difference between similar sequences below a threshold. Since, it is not possible to know the number of patterns exhibited by an individual in a given segment, we employ affinity propagation, a non-parametric exemplar based clustering technique. We optimize the clustering by tuning the $preference$ value to output minimum number of clusters such that every member sequence of a cluster is similar to the exemplar sequence of the cluster. We also use segment tree data structure to pre-compute distance matrix for segments which can be in turn used to compute distance matrix for any interval. Our extensive experiments show that \textit{MTpattern} outperforms the several baseline clustering approaches. In future, we can correlate behavior patterns from multi-source data with various external factors to help us to understand 'humans' behavior more accurately.\\ \iffalse \section{Time Complexity Analysis} \label{app:time_complexity} \textbf{Segmentation} - Time complexity for constructing segment tree is equal to time complexity to generate content of a single node multiplied by the total number of nodes in the tree which. Assume the total length of unsegmented sequence to be $M$. The time complexity for the highest granularity level in the segment tree hierarchy is ${O}((M/len)\times len \times N^2)$ = ${O}(M\times N^2)$ in the worst case. Once, we compute the distance matrix for highest granularity level in the segment tree, distance matrix for higher levels in the segmentation can be computer using the distance matrix at lower granularity (by recording the number of 1's in the segment) without the need for finding the distance exclusively between the two segments.Thus, for higher levels in the segmentation distance matrix computation's time complexity for all segments is given by ${O}((M/len)\times N^2)$. Notice the disappearance of $len$ factor in the numerator. Now, since the length of segment at a segmentation level is half the length of segment at a level just below it, the total cost for distance matrix computation is given by, $\sum_{len=2^0}^{2^{\log_{2}{M}}} {O}((M/len)\times N^2) = {O}(M\times N^2)$ Thus, creating hierarchical segmentation does not affect distance measure computation complexity. \\ \textbf{Distance Measure Computation} - The time complexity of the distance measure Algorithm 1 (see page \pageref{algo:d_msr}) between two segments of length $Len$ can be given by the total number of MIN\_ITDist's calculated. The complexity of MIN\_ITDist method is equal to $\Omega$. Maximum number of MIN\_ITDist calculated is equal to the total number of 1's in both the sequences combined which can be 2$\times len$ in the worst case. So,complexity of distance computation is $2\times \Omega \times len$ . Since, $\Omega $ and $2$ are constants, we can say that complexity of distance computation is ${O}(len)$. Since there are $N^2$ pairs of sequences for every segment(where N is the total number of days), the worst case time complexity to generate distance matrix for a segment of length $len$ is ${O}(N^2\times len)$. \\ \textbf{Pattern Discovery} - Constructing distance matrix for an interval of length $len$ will take in the worst case ${O}(N^2\times \log_{2}{len})$. If the segment tree was not constructed it would take ${O}(N^2\times len)$. For exhaustively searching for all frequent $\Omega$-coverings it will take ${O}(N)$ time in the worst case. As we only need to check cardinality of $\Omega$-covering of every sequence. If we wish to ignore $\Omega$-covering which are subset of other $\Omega$-coverings then it would take ${O}(N^3)$ in the worst case as we need to compare every possible $\Omega$-covering with every other $\Omega$-covering for containment. One comparison takes ${O}(N)$ in the worst case and there are ${O}(N^2)$ such pairs. For clustering, we use affinity propagation. The underlying concept of this algorithm is belief propagation and the worst time or message complexity is ${O}((r\times N^2))$ where r is the total number of iterations for convergence and N is the total number of data points (or days in our case). For sparse distance matrix this time complexity is less as messages will not be passed between data points between for whom distance is not defined (or infinite). For a given distance matrix with N data points, time complexity is ${O}(r\times N^2) = {O}(N^2)$ \section{Space Complexity} \label{app:space_complexity} \textbf{Segmentation} - Space complexity to store entire segment tree is equal to the size of one node multiplied by total number of nodes. Every node contains the segment dimensions, the left and right child pointer and the distance matrix. Segment dimensions, left and right child pointers are constant size whereas distance matrix takes ${O}(N^2)$ space. For every segment in the given segmentation level, space complexity is ${O}(N^2\times (M/len))$. For the entire segment tree, space complexity is given by $\sum_{len=2^0}^{2^{\log_{2}{M}}} {O}(N^2 \times M/len)) = {O}((N^2\times M))$ \\ \textbf{Distance Matrix Computation} - The worst case space complexity of distance measure computation for any segment is given by ${O}(N^2)$\footnote{All the terms, variables and constants have the same meaning as in time complexity analysis \ref{app:time_complexity}}. \\ \textbf{Pattern Discovery} - Space needed to store distance matrix for given time interval is ${O}(N^2)$. The exhaustive search for all $\Omega$-coverings for a given time interval is in place so it does not require any extra space. For clustering using affinity propagation we need to keep in memory two messages (responsibility and availability) from every other data point, so, space complexity of single instance of affinity propagation is ${O}(N^2)$. Since we are constructing a sparse matrix, information between every pair of data points need not be stored which will save a lot of space both in distance matrix computation and in message complexity in clustering. \fi \ifCLASSOPTIONcaptionsoff \fi \begin{IEEEbiography}[ { \includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{fig/rohan-kabra.jpg} } ] {Rohan Kabra} received B.Tech and M.Tech degrees in CSE from Indian Institute of Technology (IIT) Roorkee in 2016, India. Currently, he is working as Senior Software Developer in Alexa Team, Amazon, USA. His research interests include Machine Learning, Data Mining and Big Data technologies. \end{IEEEbiography} \begin{IEEEbiography}[ { \includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{fig/divya-saxena.jpg} } ] {Divya Saxena} received the M.Tech. and PhD degrees in CSE from the IIITM, Gwalior, India and Indian Institute of Technology (IIT) Roorkee, India in 2012 and 2017, respectively. Currently, she is working as Research Assistant Professor in the Department of Computing, The Hong Kong Polytechnic University, Hong Kong. She has also worked as Postdoc and Research Fellow, in the Department of Computing and University Research Facility in Big Data Analytics (UBDA), The Hong Kong Polytechnic University, Hong Kong. Her research interests include Generative Adversarial Networks (GANs), Spatio-temporal data mining, Image-to-Image Translation and Big data analytics. She is a member of ACM and IEEE. \end{IEEEbiography} \begin{IEEEbiography}[ { \includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{fig/Dhaval.jpg} } ] {Dhaval Patel} received the M.Tech. and PhD degrees in Computer Science from the IIT, Kharagpur, India and NUS Singapore, in 2006 and 2011, respectively. Currently, he is working as a Research Staff Member, IBM Thomas J. Watson Research Center, Yorktown Heights, NY USA. His research interests include Data Mining, Text Mining, Natural Language Processing. He is a Senior member of IEEE. \end{IEEEbiography} \begin{IEEEbiography}[ { \includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{fig/Prof-Cao1.png} } ] {Jiannong Cao} received the MSc and PhD degrees in Computer Science from Washington State University, Pullman, Washington, in 1986 and 1990, respectively. He is currently the Chair Professor in the Department of Computing and Associate Director of the University Research Facility in Big Data Analytics (UBDA), The Hong Kong Polytechnic University, Hong Kong. His research interests include parallel and distributed computing, wireless sensing and networks, pervasive and mobile computing, and big data and cloud computing. He is a fellow of the IEEE and ACM Distinguished member. \end{IEEEbiography} \iffalse We perform some basic analytics on the data, like the number of days humans visit that location (number of days that the individual was detected at that location) and the average average amount of time a person spends in that vicinity daily or the primary periodicity of a person’s visits (See Figure 8 and 9). \begin{figure} \caption{Fraction of number of individuals on (Y axis) that visited UGPC lab for equal to or less than x (X axis) number of days. 90\% of individuals visited the lab for less than 8 days which is consistent with the dynamics of most places as the percentage of non-regulars or passersby is usually higher} \label{fig:probepackets1} \end{figure} \begin{figure} \caption{Fraction of number of individuals (Y axis) that spend less than or equal to x (X axis) amount of time on an average daily at UGPC lab} \label{fig:probepackets2} \end{figure} \fi \iffalse \subsection{Feature Engineering for Data Analysis} As the data is discretized into time slots, we perform time segmentation of the WiFi dataset. Before we create features, we also perform dynamic type segmentation of individuals as the features will largely depend on the type of subgroup that it belongs to. This segmentation is \textit{dynamic} because the above groups keep getting updated every day. We perform type segmentation where we divide the set of individuals into three different sub-groups. \begin{enumerate} \item Frequent Repeat Visitor, if the person has been detected in the last week. These are mostly routine visitors. \item Non-Frequent Repeat Visitor, if the person has not been detected in the last week but has been detected before that. These are mostly occasional visitors. \item New Visitor, if the person has been detected for first time on the particular day. These are mostly outsiders who usually visit during some special events. \end{enumerate} \begin{figure} \caption{Average Day for a frequent visitor in the month of Oct. for WiFi dataset} \label{fig:oct} \end{figure} \fi \end{document}
\begin{document} \title{\LARGE \bf Line Search for Averaged Operator Iteration } \thispagestyle{empty} \pagestyle{empty} \begin{abstract} Many popular first order algorithms for convex optimization, such as forward-backward splitting, Douglas-Rachford splitting, and the alternating direction method of multipliers (ADMM), can be formulated as averaged iteration of a nonexpansive mapping. In this paper we propose a line search for averaged iteration that preserves the theoretical convergence guarantee, while often accelerating practical convergence. We discuss several general cases in which the additional computational cost of the line search is modest compared to the savings obtained. \end{abstract} \section{Introduction} First-order algorithms such as forward-backward splitting, Douglas-Rachford splitting, and the alternating direction methods of multipliers (ADMM) are often used for large-scale convex optimization. While the theory tells us that these methods converge, practical convergence can be very slow for some problem instances. One effective method to reduce the number of iterations is to precondition the problem data. This approach has been extensively studied in the literature and has proven very successful in practice; see, e.g., \cite{Benzi_precond,Bramble_Uzawa,Hu_nonlin_Uzawa,GhadimiADMM,gisBoydAut2014metric_select,gisBoydTAC2014metric_select} for a limited selection of such approaches. Another general approach to improving practical efficiency is to carry out a line search, i.e., to first compute a tentative next iterate and then to select the next iterate on the ray from the current iterate passing through the tentative iterate. Typical line searches are based on some readily computed quantity such as the function value or norm of the gradient or residual. A well designed line search preserves the theoretical convergence of the base method, while accelerating the practical convergence. Line search is widely used in gradient descent or Newton methods; see \cite{Boyd2004,Nocedal}. These line search methods cannot be applied to all first-order methods mentioned above, however, since in general there is no readily computed quantity that is decreasing. (The convergence proofs for these methods typically rely on quantities related to the distance to an optimal point, which cannot be evaluated while the algorithm is running.) In this paper we propose a general line search scheme that is applicable to most first-order convex optimization methods, including those mentioned above whose convergence proofs are not based on the decrease of an observable quantity. We exploit the fact that many first-order optimization algorithms can be viewed as averaged iterations of some nonexpansive operator, i.e., they can be written in the form \begin{align} x^{k+1}=(1-\bar{\alpha})x^k+\bar{\alpha} Sx^k = x^k + \bar{\alpha} (Sx^k-x^k), \label{eq:averaged_iter} \end{align} where $\bar{\alpha}\in(0,1)$ and $S~:~\mathbb{R}^n\to\mathbb{R}^n$ is nonexpansive, i.e., it satisfies $\|Su-Sv\|_2 \leq \|u-v\|_2$ for all $u,v$. The superscript $k$ denotes iteration number. The middle expression shows that the next point is a weighted average of the current point $x^k$ and $Sx^k$. The expression on the righthand side of \eqref{eq:averaged_iter} shows that the iteration can be interpreted as a taking a step of length $\bar{\alpha}$ in the direction of the fixed-point residual $r^k=Sx^k-x^k$. Assuming a fixed-point exists, the iteration \eqref{eq:averaged_iter} converges to the set of fixed-points. In this paper we will show how steps sometimes much larger than $\bar{\alpha}$ can be taken, which typically accelerates practical convergence. This iteration has the form \begin{align} x^{k+1}= x^k + \alpha_k (Sx^k-x^k), \label{eq:averaged_iter_ls} \end{align} where $\alpha_k>0$ is chosen according to line search rules described below. We refer to $\alpha_k$ as the \emph{step length} in the $k$th iteration, and $\bar{\alpha}$ as the \emph{nominal step length}. The choice $\alpha_k = \bar{\alpha}$ recovers the basic averaged iteration \eqref{eq:averaged_iter}. We refer to the selection of $\alpha_k$ as a line search, since we are selecting the next iterate as a point on the line or ray passing through $x^k$ in the direction of the residual. The merit function used to accept a step length $\alpha_k$ in the line search is the norm of the fixed-point residual $\|r\|_2=\|Sx-x\|_2$. To evaluate this merit function for a candidate point, we must compute $Sx$, which corresponds to the dominant cost of taking a full iteration of the nominal algorithm. In the general case, then, the line search is computationally expensive, and there is a trade-off between the cost of the line search (which depends on the number of candidate points examined), and the savings in iterations due to the line search. But we have identified many common and interesting problem and algorithm combinations for which the fixed-point residual can be computed at low additional cost along the candidate ray. In these situations, performing one iteration with line search is roughly as expensive as performing one standard iteration of the nominal algorithm, so the additional cost of the line search is minimal. This happens when the nonexpansive operator $S$ can be written as $S=S_2S_1$ where $S_1~:~\mathbb{R}^n\to\mathbb{R}^n$ is affine and $S_2~:~\mathbb{R}^n\to\mathbb{R}^n$ is relatively cheap to evaluate. The paper is organized as follows. In Section~\ref{sec:main}, we state the line search method and prove its convergence. In Section~\ref{sec:comp_cost}, we show that the line search can be carried out efficiently when $S=S_2S_1$ and $S_2$ is cheap to evaluate and $S_1$ is affine. In Section~\ref{sec:opt_algs}, we show how to implement the line search for some popular algorithms. Finally, in Section~\ref{sec:num_examples} we provide numerical examples that show the efficiency of the proposed line search. \section{The line search method} \label{sec:main} \subsection{Line search test} The line search method first computes the nominal next iterate $\bar x^k$ according to the basic averaged iteration \eqref{eq:averaged_iter}, and then (possibly) selects a different value of $\alpha_k$. The algorithm has the following form. \begin{align} \label{eq:rk}r^k&:=Sx^k-x^k\\ \label{eq:xknom}\bar x^k&:=x^k+\bar{\alpha} r^k\\ \label{eq:rknom}\bar r^k&:=S \bar x^k - \bar x^k\\ \label{eq:xk+1}x^{k+1} &:=x^k+\alpha_k r^k \end{align} In the first step we compute the current residual, in the second step we compute the nominal next iterate, and in the third step we compute the nominal next residual. In the last step, we form the actual next iterate. In \eqref{eq:xk+1} the step length $\alpha_k$ must satisfy the following. Either $\alpha_k = \bar{\alpha}$, i.e., we take the nominal step, or $\alpha_k\in (\bar{\alpha}, \alpha^\mathrm{max}]$ is such that \begin{align} \|r^{k+1}\|_2 = \|Sx^{k+1}-x^{k+1}\|_2 \leq(1-\epsilon)\|\bar r^k\|_2, \label{eq:ls_test} \end{align} where $\epsilon\in(0,1)$ and $\alpha^\mathrm{max} \geq \bar{\alpha}$ are fixed algorithm parameters. Thus we either take the nominal step, or one that reduces the norm of the fixed point residual compared to the nominal step. We will discuss the details of the computation and give some specific methods to choose $\alpha_k$ later; but for now we observe that to verify the line search test \eqref{eq:ls_test}, we must evaluate $r^{k+1}$, which is the first step \eqref{eq:rk} of the next iteration. In a similar way, if we take the nominal step, i.e., choose $\alpha_k=\bar{\alpha}$, then step \eqref{eq:rknom} is the first step of the next iteration. In either case, there is no additional computational cost. \subsection{Convergence analysis} \label{sec:conv_analysis} We analyze the proposed line search method and provide residual and iterate convergence results. All results are proven in Appendix~\ref{app:proofs}. \begin{thm} Suppose that $S~:~\mathbb{R}^n\to\mathbb{R}^n$ is nonexpansive and let $\bar{\alpha}\in(0,1)$. Then the iteration \eqref{eq:rk}-\eqref{eq:xk+1} satisfies $\|r^k\|_2\to c$ as $k\to\infty$. \label{thm:norm_conv} \end{thm} So, the norm of the residual converges. Next, we show that the residual converges to zero if a fixed-point to $S$ exists, i.e., if ${\rm{fix}}S=\{x\in\mathbb{R}^n~|~x=Sx\}\neq\emptyset$. \begin{thm} Suppose that $S~:~\mathbb{R}^n\to\mathbb{R}^n$ is nonexpansive, that ${\rm{fix}}S\neq\emptyset$, and that $\bar{\alpha}\in(0,1)$. Then the iteration \eqref{eq:rk}-\eqref{eq:xk+1} satisfies $r^k\to 0$ and $x^{k+1}\to x^k$ as $k\to\infty$. \label{thm:res_conv_fix_nonempty} \end{thm} If a fixed-point to $S$ exists, the fixed-point residual will converge to zero. Next, we establish what happens when no fixed-point to $S$ exists. \begin{thm} Suppose that $S~:~\mathbb{R}^n\to\mathbb{R}^n$ is nonexpansive, that ${\rm{fix}}S=\emptyset$, that $\inf\|Sx-x\|=c>0$, and that $\bar{\alpha}\in(0,1)$. Then the iteration \eqref{eq:rk}-\eqref{eq:xk+1} satisfies $r^k\to d$ and $x^{k+1}-x^k\to\bar{\alpha} d$ with $\|d\|=c$ as $k\to\infty$. \label{thm:res_conv_fix_empty} \end{thm} This result relies heavily on \cite[Proposition~4.5]{bauschkeAffineSS} (which is a specification of more general results in \cite[Corollary~1.5]{BruckHouston1977} and \cite[Corollary 2.3]{BaillonHouston1978}). It says that, in the limit, the residual converges to a vector with smallest fixed-point residual. So the iterates converge to a line. This can, e.g., be used to devise infeasibility detection methods for these methods. Next, we establish a rate bound for a difference of residuals. \begin{thm} Suppose that $S~:~\mathbb{R}^n\to\mathbb{R}^n$ is nonexpansive and $\bar{\alpha}\in(0,1)$. Then the iteration \eqref{eq:rk}-\eqref{eq:xk+1} satisfies \begin{align} \sum_{k=0}^n\|\bar{r}^k-r^k\|_2^2\leq \frac{\bar{\alpha}}{1-\bar{\alpha}}\|r^0\|_2^2. \label{eq:res_diff_sum} \end{align} Let $k_{\rm{best}}^n\in\{0,\ldots,n\}$ be the iterate $k$ (up to $n$) for which $\|\bar{r}^k-r^k\|_2$ is smallest. Then \begin{align} \|\bar{r}^{k_{\rm{best}}^n}-&r^{k_{\rm{best}}^n}\|_2^2 \leq \frac{\bar{\alpha}}{(n+1)(1-\bar{\alpha})}\|r^0\|_2^2. \label{eq:res_diff_best} \end{align} \label{thm:sublin_conv_rate} \end{thm} If $S$ is a $\delta$-contraction with $\delta\in[0,1)$, i.e., $\|Sx-Sy\|\leq\delta \|x-y\|$ for all $x,y\in\mathbb{R}^n$, stronger convergence results can be obtained. \begin{thm} Assume that $S~:~\mathbb{R}^n\to\mathbb{R}^n$ is $\delta$-contractive with $\delta\in[0,1)$ and $\bar{\alpha}\in(0,1)$. Then the iteration \eqref{eq:rk}-\eqref{eq:xk+1} satisfies \begin{align*} \|r^{k+1}\|_2\leq (1-\bar{\alpha}+\bar{\alpha}\delta)\|r^{k}\|_2 \end{align*} for all iterations $k$. \label{thm:lin_conv_rate} \end{thm} So, the fixed-point residual converges linearly to zero (which it can since contractive operators always have a unique fixed-point). \begin{rem} All results in this section are stated in the Euclidean setting with the standard 2-norm. But they also hold in general finite-dimensional real Hilbert space settings. \end{rem} \section{Computational cost} \label{sec:comp_cost} The fixed-point residual must be evaluated to carry out the line search test \eqref{eq:ls_test}. In the general case this requires us to evaluate the operator $S$, which has the same cost as a full iteration of the algorithm. Therefore, in the general case it may be too expensive to evaluate many (or even just more than one) candidate step lengths $\alpha_k$ compared to the savings in iterations due to the line search. In this section we consider a special case in which the line search can be carried out more efficiently, i.e., many candidate points along the ray can be evaluated with low additional cost. Suppose that $S=S_2S_1$, where $S_2~:~\mathbb{R}^n\to\mathbb{R}^n$ is cheap to evaluate compared to $S_1$, and $S_1~:~\mathbb{R}^n\to\mathbb{R}^n$ is affine. The algorithm \eqref{eq:rk}-\eqref{eq:xk+1} in this case becomes: \begin{align} \label{eq:rk_comp}r^k&:=S_2S_1x^k-x^k\\ \label{eq:xknom_comp}\bar x^k&:=x^k+\bar{\alpha} r^k\\ \label{eq:rknom_comp}\bar r^k&:=S_2S_1 \bar x^k - \bar x^k\\ \label{eq:xk+1_comp}x^{k+1} &:=x^k+\alpha_k r^k \end{align} In between \eqref{eq:rknom_comp} and \eqref{eq:xk+1_comp}, we perform the line search test \eqref{eq:ls_test}, \begin{align} \|r^{k+1}\|_2 = \|S_2S_1x^{k+1}-x^{k+1}\|_2 \leq(1-\epsilon)\|\bar r^k\|_2, \label{eq:ls_test_comp} \end{align} for multiple candidate values of $\alpha_k$. We now analyze the complexity, assuming that the cost of evaluating $S_2$, and vector-vector operations, are negligible (or at least, dominated by the cost of evaluating $S_1$). In one iteration with line search we need to compute $S_1x^k$ in \eqref{eq:rk_comp}, $S_1\bar x^k$ in \eqref{eq:rknom_comp}, and $S_1(x^k+\alpha_k r^k)$ for each candidate $\alpha_k$ in \eqref{eq:ls_test_comp}. Since $S_1$ is affine, i.e., of the form \begin{align} S_1(x)=Fx+h \label{eq:S2_affine} \end{align} with $F\in\mathbb{R}^{n\times n}$ and $h\in\mathbb{R}^n$, we have for any $\alpha$, \begin{align*} S_1(x^k+\alpha r^k) = F x^k + h+ \alpha Fr^k. \end{align*} So once we evaluate $F_2x^k$ and $F_2r^k$, we can evaluate $S_1(x^k + \alpha r^k)$ for any number of values of $\alpha$, at the cost of only vector operations. In particular, we can evaluate $S_1 \bar x^k$ in step \eqref{eq:rknom_comp}, and $S_1 x^{k+1}$ for multiple values of $\alpha_k$ in the line search test \eqref{eq:ls_test_comp}, with no further evaluations of $S_1$. We can express the first three steps of the algorithm as \begin{align} \label{eq:rk_comp2}r^k&:=S_2(Fx^k+h)-x^k\\ \label{eq:xknom_comp2}\bar x^k&:=x^k+\bar{\alpha} r^k\\ \label{eq:rknom_comp2}\bar r^k&:=S_2\left(Fx^k +h +\bar{\alpha} Fr^k\right) - \bar x^k \end{align} which involves two evaluations of $F$ (and two evaluations of $S_2$), and some vector operations. The next step is the line search, in which we evaluate the residual $r$ using \begin{align} r^{k+1} = S_2 \left(Fx^k+h+\alpha_k Fr^k\right)-(x^k+\alpha_k r^k) \label{eq:res_k+1_S1_S2} \end{align} for $p$ candidate values of $\alpha_k$. Each of these involves a few vector operations, and one evaluation of $S_2$, since we use the cached values of $Fr^k$ and $Fx^k$. One iteration costs $2+p$ evaluations of $S_2$, $2$ evaluations of $F$, and order $p$ vector operations. Finally, as observed above, we will have already evaluated the step \eqref{eq:rk_comp} for the next iteration, so one evaluation of $F$ (and $S_2$) does not count (or rather, counts towards the next iteration). Thus the computational cost of one iteration with $p$ candidate values of $\alpha_k$ is one evaluation of $S_1$ (hence $F$) and $p+1$ evaluations of $S_2$. If the cost of evaluating $S_1$ dominates the cost of evaluating $S_2$ (and vector operations), the computational cost of the iteration with line search is the same as the basic iteration without line search. \paragraph{A variation.} For some algorithms such as forward-backward splitting the averaged iteration \eqref{eq:averaged_iter} is more conveniently written as \begin{align} x^{k+1} := T_2T_1x^k \label{eq:avg_iter_variation} \end{align} where $T_2~:~\mathbb{R}^n\to\mathbb{R}^n$ and $T_1~:~\mathbb{R}^n\to\mathbb{R}^n$. So, in this case $(1-\bar{\alpha})x^k+\bar{\alpha} S_2S_1x^k=T_2T_1x^k$. (The nominal $\bar{\alpha}$ is hidden in the composition between $T_2$ and $T_1$.) Instead of using $S_2S_1x-x$ as residuals in \eqref{eq:rk_comp}-\eqref{eq:xk+1_comp}, we can use $\bar{\alpha} (S_2S_1x-x)=T_2T_1x-x$. An equivalent algorithm then becomes \begin{align} \label{eq:rk_variation}r^k&:=T_2T_1x^k-x^k\\ \label{eq:xknom_variation}\bar x^k&:=x^k+r^k\\ \label{eq:rknom_variation}\bar r^k&:=T_2T_1 \bar{x}^k- \bar{x}^k\\ \label{eq:xk+1_variation}x^{k+1}&:=x^k+\alpha_k r^k \end{align} where $\alpha_k\in[1,\alpha_{\max}]$. Now, let $T_1$ be affine, i.e., of the form \begin{align} T_1x=Fx+h. \label{eq:T2_affine} \end{align} Then the steps \eqref{eq:rk_comp2}-\eqref{eq:rknom_comp2} (with the $x^{k+1}$ update) becomes \begin{align} \label{eq:rk_variation2}r^k&:=T_2(Fx^k+h)-x^k\\ \label{eq:xknom_variation2}\bar x^k&:=x^k+r^k\\ \label{eq:rknom_variation2}\bar r^k&:=T_2\left(F x^k + h +Fr^k\right)- \bar{x}^k\\ \label{eq:xk+1_variation2}x^{k+1}&:=x^k+\alpha_k r^k \end{align} The residual for the line search that is evaluated between \eqref{eq:rknom_variation2} and \eqref{eq:xk+1_variation2} is computed as \begin{align} r^{k+1} = T_2\left(Fx^{k}+h+\alpha_k Fr^k\right)-(x^k+\alpha_k r^k) \label{eq:res_k+1_T1_T2} \end{align} for multiple candidate values of $\alpha_k$. \paragraph{Evaluating affine operators.} To evaluate the affine operator $S_1~:~\mathbb{R}^n\to\mathbb{R}^n$ typically involves a matrix multiplication or a matrix inversion, where the matrix is the same in all iterations. There are two main methods for repeated matrix inversion. The first is to factorize the matrix to be inverted once before the algorithm starts. Then forward and backward solves are used in every iteration. The cost of the forward and backward solves depends on the sparsity of the factors, but is typically more than $O(n)$ up to $O(n^2)$. The second option is to use an iterative method (with warm start). This requires a number of multiplications with the matrix to invert and is hence more expensive than $O(n)$. Assuming that the cost of evaluating $S_2~:~\mathbb{R}^n\to\mathbb{R}^n$ is $O(n)$, the cost of evaluating $S_1$ dominates the one of evaluating $S_2$ in this setting. \section{Optimization algorithms} \label{sec:opt_algs} Many popular optimization algorithms can be implemented with the proposed line search method. In this section, we show how $S$, $S_2$ and $S_1$ (or $T_2$ and $T_1$) look for some of these. Before this, we introduce some operators. The proximal operator associated with a proper closed and convex $f~:~\mathbb{R}^n\to\mathbb{R}\cup\{\infty\}$ is defined as \begin{align} {\rm{prox}}_{\gamma f}(z):=\mathop{\rm argmin}_{x}\{f(x)+\tfrac{1}{2\gamma}\|x-z\|_2^2\} \label{eq:prox} \end{align} where $\gamma>0$. The reflected proximal operator is defined as \begin{align} R_{\gamma f}:=2{\rm{prox}}_{\gamma f}-I. \label{eq:rprox} \end{align} If $f$ is the indicator function of a nonempty closed and convex set $C$, i.e., \begin{align} f(x)=\iota_{C}(x):=\begin{cases}0&{\hbox{if }} x\in C\\ \infty & {\hbox{else}} \end{cases} \label{eq:indicator_fcn} \end{align} then the proximal operator in \eqref{eq:prox} is a projection: \begin{align} {\rm{prox}}_{\gamma f}(z)=\Pi_C(z):=\mathop{\rm argmin}_{x\in C}\|x-z\|_2 \label{eq:proj} \end{align} and the reflected proximal operator in \eqref{eq:rprox} is $R_{\gamma\iota_C}=R_{\iota_C}=2\Pi_C-I$. \subsection{Forward-backward splitting} The forward-backward splitting method (see, e.g., \cite{CombettesFBS}) solves composite optimization problems of the form \begin{align} {\hbox{minimize }} f(x)+g(x), \label{eq:fb_prob} \end{align} where $f~:~\mathbb{R}^n\to\mathbb{R}$ is convex and differentiable with an $L$-Lipschitz continuous gradient $\nabla f$ and $g~:~\mathbb{R}^n\to\mathbb{R}\cup\{\infty\}$ is proper closed and convex. The forward-backward algorithm for this problem is \begin{align} x^{k+1}:={\rm{prox}}_{\gamma g}(x^k-\gamma \nabla f(x^k)), \label{eq:fb_alg} \end{align} where $\gamma \in(0,\tfrac{2}{L})$ is the step size and ${\rm{prox}}_{\gamma g}$ is defined in \eqref{eq:prox}. If $\gamma\in(0,\tfrac{2}{L})$, it can be shown (by combining \cite[Proposition~4.33]{bauschkeCVXanal}, \cite[Proposition~23.7, Remark~4.24)(iii)]{bauschkeCVXanal}, and \cite[Proposition~2.4]{Combettes201555} or \cite[Proposition~3]{gisSIAM2015}) that \begin{align*} {\rm{prox}}_{\gamma g}(I-\gamma \nabla f)=(1-\bar{\alpha})I+\bar{\alpha} S \end{align*} with $\bar{\alpha}=\tfrac{2}{4-\gamma L}$, where \[ S=(1-\tfrac{1}{\bar{\alpha}})I+\tfrac{1}{\bar{\alpha}}{\rm{prox}}_{\gamma g} (I-\gamma\nabla f) \] is nonexpansive. So, the forward-backward splitting algorithm \eqref{eq:fb_alg} is an averaged iteration of a nonexpansive mapping with $\bar{\alpha}=\tfrac{2}{4-\gamma L}$. So, if $\gamma\in(0,\tfrac{2}{L})$, we can do line search in forward-backward splitting. We identify $T_2={\rm{prox}}_{\gamma g}$ and $T_1=(I-\gamma\nabla f)$ in \eqref{eq:avg_iter_variation}. With these definitions, forward-backward splitting with line search is implemented as \eqref{eq:rk_variation}-\eqref{eq:xk+1_variation}. \paragraph{$T_1$ affine.} The operator $T_1=(I-\gamma\nabla f)$ is affine if $f~:~\mathbb{R}^n\to\mathbb{R}$ is convex quadratic, i.e., if \begin{align*} f(x)=\tfrac{1}{2}x^TPx+q^Tx \end{align*} with $P\in\mathbb{R}^{n\times n}$ positive semi-definite and $q\in\mathbb{R}^n$. The operator $T_1$ becomes \begin{align*} T_1=(I-\gamma P)x-\gamma q. \end{align*} Comparing to \eqref{eq:T2_affine}, we identify $F=I-\gamma P$ and $h=-\gamma q$. With these $F$ and $h$, forward-backward splitting with line search can be implemented as in \eqref{eq:rk_variation2}-\eqref{eq:xk+1_variation2}. So a full iteration with line search needs only one multiplication with $F=(I-\gamma P)$. If in addition $T_2={\rm{prox}}_{\gamma g}$ is cheap to evaluate, one full line search iteration can be evaluated roughly at the same cost as a basic iteration of the algorithm. \subsection{Douglas-Rachford splitting} \label{sec:DR} The Douglas-Rachford splitting method \cite{LionsMercier1979} solves problems of the form \begin{align*} {\hbox{minimize }} f(x)+g(x), \end{align*} where $f~:~\mathbb{R}^n\to\mathbb{R}\cup\{\infty\}$ and $g~:~\mathbb{R}^n\to\mathbb{R}\cup\{\infty\}$ are proper closed and convex. The algorithm is given by the following iteration \begin{align} \label{eq:DR1}x^{k}&:={\rm{prox}}_{\gamma f}(z^k)\\ \label{eq:DR2}y^{k}&:={\rm{prox}}_{\gamma g}(2x^k-z^k)\\ \label{eq:DR3}z^{k+1}&:=z^k+2\alpha(y^k-x^k) \end{align} where $\gamma$ is a positive scalar and $\alpha\in(0,1)$. Using the reflected proximal operator defined in \eqref{eq:rprox} the Douglas-Rachford algorithm can be written as \begin{align} z^{k+1}:=((1-\alpha)I+\alpha R_{\gamma g}R_{\gamma f})z^k. \label{eq:DR} \end{align} The reflected proximal operators $R_{\gamma g}$ and $R_{\gamma f}$ are nonexpansive \cite[Corollary~23.10]{bauschkeCVXanal}, and so is their composition $R_{\gamma g}R_{\gamma f}$. The algorithm \eqref{eq:DR} is exactly on the form used in Section~\ref{sec:comp_cost} where $S_2=R_{\gamma g}$, $S_1=R_{\gamma f}$, $S=R_{\gamma g}R_{\gamma f}$, and $\bar{\alpha}=\alpha$. With these definitions, Douglas-Rachford with line search can be implemented as \eqref{eq:rk_comp}-\eqref{eq:xk+1_comp}. Note that $R_{\gamma f}z^k=2x^k-z^k$ in \eqref{eq:DR1}-\eqref{eq:DR3}, $R_{\gamma g}R_{\gamma f}=2y^k-2x^k+z^k$ and the residual $r^k=R_{\gamma g}R_{\gamma f}z^k-z^k=2(y^k-x^k)$. \paragraph{$S_1$ affine.} If $S_1=R_{\gamma f}$ is affine and $S_2=R_{\gamma g}$ is cheap to evaluate, the line search can be done almost for free, see Section~\ref{sec:comp_cost}. The operator $S_1=R_{\gamma f}=2{\rm{prox}}_{\gamma f}-I$ is affine if ${\rm{prox}}_{\gamma f}$ is affine, which it is if $f$ is of the form \begin{align*} f(x)=\begin{cases} \tfrac{1}{2}x^TPx+q^Tx & {\hbox{if }} Ax=b\\ \infty & {\hbox{else}} \end{cases} \end{align*} with $P\in\mathbb{R}^{n\times n}$ positive semi-definite, $q\in\mathbb{R}^n$, $A\in\mathbb{R}^{m\times n}$, and $b\in\mathbb{R}^m$. (Any of the quadratic or linear functions, or the affine constraint can be removed, and the operator $S_1$ is still affine.) The proximal and reflected proximal operators of $f$ become \begin{align*} {\rm{prox}}_{\gamma f}(z)&=\begin{bmatrix}I & 0\end{bmatrix}\begin{bmatrix} P+\gamma^{-1}I & A^T\\ A & 0 \end{bmatrix}^{-1}\begin{bmatrix} \gamma^{-1}z-q\\ b \end{bmatrix}\\ R_{\gamma f}(z)&=2{\rm{prox}}_{\gamma f}(z)-z=2\begin{bmatrix}I & 0\end{bmatrix}\begin{bmatrix} P+\gamma^{-1}I & A^T\\ A & 0 \end{bmatrix}^{-1}\begin{bmatrix} \gamma^{-1}z-q\\ b \end{bmatrix}-z\\ &=:Fz+h \end{align*} where $F\in\mathbb{R}^{n\times n}$ and $h\in\mathbb{R}^n$. In this situation, the first three steps of the line search algorithm are \eqref{eq:rk_comp2}-\eqref{eq:rknom_comp2} with $S_2=R_{\gamma g}$ and the residual is \eqref{eq:res_k+1_S1_S2}. As shown in Section~\ref{sec:comp_cost}, we only need one evaluation of $F$ per full iteration. Note that in practice, the matrix $F$ is typically not stored explicitly. One alternative is to factorize $\begin{bmatrix}\begin{smallmatrix}P+\gamma^{-1} I&A^T\\A&0\end{smallmatrix}\end{bmatrix}$ before the algorithm starts. This factorization is cached and used in all consecutive iterations to compute $Fr^k$ (and $Fz^0$). Another option is to use an iterative method (with warm-start) to solve the corresponding linear system of equations. \subsection{ADMM} The alternating direction method of multipliers \cite{Glowinski1975,Gabay1976,BoydDistributed} solves problems of the form \begin{align} \begin{tabular}{ll} minimize & $f(x)+g(z)$\\ subject to & $Ax+Bz=c$, \end{tabular} \label{eq:ADMM_prob} \end{align} where $f~:~\mathbb{R}^n\to\mathbb{R}\cup\{\infty\}$ and $g~:~\mathbb{R}^m\to\mathbb{R}\cup\{\infty\}$ are proper closed convex, and $A\in\mathbb{R}^{p\times n}$, $B\in\mathbb{R}^{p\times m}$, and $c\in\mathbb{R}^p$. A standard form of ADMM (with scaled dual variable $u$ and relaxation $\alpha\in(0,1)$) is: \begin{align} \label{eq:ADMM1}x^{k+1} &= \mathop{\rm argmin}_{x}\{f(x)+\tfrac{\rho}{2}\|Ax+Bz^{k}-c+u^{k}\|_2^2\}\\ \label{eq:ADMM2}x_A^{k+1}&=2\alpha Ax^{k+1}-(1-2\alpha)(Bz^{k}-c)\\ \label{eq:ADMM3}z^{k+1} &= \mathop{\rm argmin}_{z}\{g(z)+\tfrac{\rho}{2}\|x_A^{k+1}+Bz-c+u^{k}\|_2^2\}\\ \label{eq:ADMM4}u^{k+1} &= u^{k}+ (x_A^{k+1}+Bz^{k+1}-c) \end{align} where $\alpha=\tfrac{1}{2}$ gives standard ADMM without relaxation. This form of ADMM does not have a variable for which the algorithm is an averaged iteration of a nonexpansive mapping. In Appendix~\ref{app:ADMM} it is shown that ADMM is Douglas-Rachford splitting applied to a specific problem formulation. (This is a well known fact, see, e.g., \cite{Gabay83,EcksteinPhD}.) Therefore, ADMM is $\alpha$-averaged and can be written on the form \begin{align} v^{k+1}=(1-\alpha)v^k+\alpha R_1R_2v^k \label{eq:ADMM_on_DR_form} \end{align} where $R_1~:~\mathbb{R}^p\to\mathbb{R}^p$ and $R_2~:~\mathbb{R}^p\to\mathbb{R}^p$ are reflected proximal operators. These reflected proximal operators are given by (see \eqref{eq:rprox_p1} and \eqref{eq:rprox_p2} in Appendix~\ref{app:ADMM} where $\rho=\tfrac{1}{\gamma}$): \begin{align} \label{eq:R1} R_{1}(v)&=2A\mathop{\rm argmin}_{x}\{f(x)+\tfrac{\rho}{2}\|Ax-v-c\|_2^2\}-2c-v,\\ \label{eq:R2} R_{2}(v)&=-2B\mathop{\rm argmin}_{z}\{g(z)+\tfrac{\rho}{2}\|Bz+v\|_2^2\}-v. \end{align} The algorithm \eqref{eq:ADMM_on_DR_form} (and therefore ADMM in \eqref{eq:ADMM1}-\eqref{eq:ADMM4}) can then be implemented as (see Appendix~\ref{app:ADMM}): \begin{align} \label{eq:ADMM1_ls}z^k&:=\mathop{\rm argmin}_{z}\{g(z)+\tfrac{\rho}{2}\|Bz+v^k\|_2^2\}\\ \label{eq:ADMM2_ls}x^k&:=\mathop{\rm argmin}_{x}\{f(x)+\tfrac{\rho}{2}\|Ax+2Bz^k+v^k-c\|_2^2\}\\ \label{eq:ADMM3_ls}v^{k+1}&:=v^k+2\alpha(Ax^k+Bz^k-c) \end{align} The iteration \eqref{eq:ADMM_on_DR_form} is on the form discussed in Section~\ref{sec:comp_cost} with $S_2=R_1$, $S_1=R_2$, $S=R_1R_2$, and $\bar{\alpha}=\alpha$. With these definitions, ADMM with line search can be implemented as \eqref{eq:rk_comp}-\eqref{eq:xk+1_comp}. Note that $R_2v^k=-2Bz^k-v^k$ in \eqref{eq:ADMM1_ls}-\eqref{eq:ADMM3_ls}, $R_1R_2v^k=2Ax^k-2c+2Bz^k+v^k$, and the residual $r^k=2(Ax^k+Bz^k-c)$ in \eqref{eq:ADMM3_ls}. \paragraph{$R_2$ affine.} If $R_2$ is affine and $R_1$ is cheap to evaluate, then line search can be performed efficiently, see Section~\ref{sec:comp_cost}. The operator $R_2$ is affine if $g$ is of the form \begin{align*} g(z)=\begin{cases} \tfrac{1}{2}z^TPz+q^Tz & {\hbox{if }} Lz=b\\ \infty & {\hbox{else}} \end{cases} \end{align*} with $P\in\mathbb{R}^{m\times m}$ positive semi-definite, $q\in\mathbb{R}^m$, $A\in\mathbb{R}^{s\times m}$, and $b\in\mathbb{R}^s$. The operator $R_2$ in \eqref{eq:R2} becomes \begin{align*} R_{2}(v)&=\begin{bmatrix}-2B & 0\end{bmatrix}\begin{bmatrix} P+\rho B^TB & L^T\\ L & 0 \end{bmatrix}^{-1}\begin{bmatrix} -(q+\rho B^Tv)\\ b \end{bmatrix}-v\\ &=:Fv+h \end{align*} where $F\in\mathbb{R}^{p\times p}$ and $h\in\mathbb{R}^p$. With these definitions of $F$ and $h$, the first three steps of ADMM with line search is \eqref{eq:rk_comp2}-\eqref{eq:rknom_comp2} with $S_2=R_1$ and the residual is \eqref{eq:res_k+1_S1_S2}. Therefore, only one application of $R_2$ (and $F$) is needed per full line search iteration, see Section~\ref{sec:comp_cost}. Also here, the matrix $F$ is typically not stored explicitly. Instead, either a cached factorization of $\begin{bmatrix}\begin{smallmatrix}P+\rho B^TB&L^T\\L&0\end{smallmatrix}\end{bmatrix}$ or an iterative method (with warm-start) is used to compute $Fr^k$ (and $Fv^0$). \subsection{Consensus} The consensus algorithm \cite[Section~7]{BoydDistributed} solves problems of the form \begin{align} {\hbox{minimize }} f(x)=\sum_{i=1}^Nf_i(x) \label{eq:consensus_prob} \end{align} where $f~:~\mathbb{R}^n\to\mathbb{R}\cup\{\infty\}$ and all $f_i~:~\mathbb{R}^n\to\mathbb{R}\cup\{\infty\}$ are proper closed and convex. An equivalent formulation is \begin{align} {\hbox{minimize }} f_i(x_i)+\iota_C(x_1,\ldots,x_N) \label{eq:consensus_prob2} \end{align} where the consensus constraint set $C$ is \[ C=\{(x_1,\ldots,x_N)\in\mathbb{R}^n\times\cdots\times\mathbb{R}^n~|~x_1=\cdots=x_N\} \] and $\iota_C$ is an indicator function defined in \eqref{eq:indicator_fcn}. That is, every $x_i\in\mathbb{R}^n$ in \eqref{eq:consensus_prob2} is a local version of the global $x\in\mathbb{R}^n$ in \eqref{eq:consensus_prob}. We use the following formulation of the consensus algorithm: \begin{align} \label{eq:consensus1}x_i^{k}&:={\rm{prox}}_{\gamma f_i}(2z_{\rm{av}}^k-z_i^k)\\ \label{eq:consensus2}z_i^{k+1}&:=z_i^k+(x_i^k-z_{\rm{av}}^k) \end{align} where $z_{\rm{av}}=\tfrac{1}{N}\sum_{i=1}^Nz_i$ is the average of the $z_i$'s. This consensus algorithm is obtained by applying Douglas-Rachford splitting with $\alpha=\tfrac{1}{2}$ to \eqref{eq:consensus_prob2}. (To use ADMM as in \cite{BoydDistributed} would give an equivalent algorithm, see \cite{EcksteinPhD}, but without a variable for which the algorithm is an averaged iteration.) Therefore, it is $\tfrac{1}{2}$-averaged and can be written on the form \begin{align*} {\bf{z}}^{k+1} := \tfrac{1}{2}({\bf{z}}^k+R_{\gamma f}R_{ \iota_C}{\bf{z}}^k)=\tfrac{1}{2}\left({\bf{z}}^k+R_{\gamma f}(2z_{\rm{av}}^k-{\bf{z}}^k)\right) \end{align*} where ${\bf{z}}=(z_1,\ldots,z_N)$. Using local variables, it can instead be written as \begin{align*} z_i^{k+1} := \tfrac{1}{2}\left(z_i^k+R_{\gamma f_i}(2z_{\rm{av}}^k-z_i^k)\right) \end{align*} for all $i=\{1,\ldots,N\}$. The local updates of the algorithm with line search become: \begin{align} \label{eq:rk_consensus}r_i^k&:= R_{\gamma f_i}(2z_{\rm{av}}-z_i^k)-z_i^k\\ \label{eq:xknom_consensus}\bar z_i^k&:=z_i^k+ \tfrac{1}{2} r_i^k\\ \label{eq:rknom_consensus}\bar r_i^k&:=R_{\gamma f}(2\bar z_{\rm{av}}^k-\bar{z}_i^k)-\bar z_i^k\\ \label{eq:xk+1_consensus}z_i^{k+1} &:=z_i^k+\alpha_k r_i^k \end{align} where either $\alpha_k=\tfrac{1}{2}$, or $\alpha_k\in(\tfrac{1}{2},\alpha_{\max}]$ is chosen in accordance with \eqref{eq:ls_test}, i.e., such that \begin{align*} \|{\bf{r}}^{k+1}\|_2\leq(1-\epsilon)\|\bar {\bf{r}}^k\|_2. \end{align*} where ${\bf{r}}^k=(r_1^k,\ldots,r_N^k)$. Note that the local residual $r_i^k$ in \eqref{eq:rk_consensus} is given by $2(x_i^k-z_{\rm{av}}^k)$ in \eqref{eq:consensus2} (and similarly for $\bar{r}_i^k$ in \eqref{eq:rknom_consensus}). The operator $R_{\iota_C}$ is always affine. Therefore, a full iteration with line search can be performed with only one evaluation of $R_{\iota_C}$, see Section~\ref{sec:comp_cost}. However, $R_{\iota_C}$ is often cheaper to evaluate than $R_{\gamma f}$. So, to evaluate a candidate point in the line search involves the costly operator $R_{\gamma f}$ and may be almost as costly as a full iteration of the algorithm. \subsection{Alternating projection methods} We consider the problem of finding a point in the intersection of two nonempty closed and convex sets $C$ and $D$. That is, we want to find any $x\in C\cap D$. This can equivalently be written as solving the optimization problem \begin{align} {\hbox{minimize }} \iota_C(x)+\iota_D(x) \label{eq:indicator_prob} \end{align} where $\iota_C~:~\mathbb{R}^n\to\mathbb{R}\cup\{\infty\}$ and $\iota_D~:~\mathbb{R}^n\to\mathbb{R}\cup\{\infty\}$ are indicator functions (defined in \eqref{eq:indicator_fcn}) for $C$ and $D$ respectively . There are numerous algorithms for finding such $x$. We focus on alternating projections and Douglas-Rachford splitting. \paragraph{Alternating projections.} The alternating projections \cite{vonNeumann} is given by \begin{align} x^{k+1}=\Pi_{C}\Pi_{D}x^k. \label{eq:ap} \end{align} Since $\Pi_C$ and $\Pi_D$ are $\tfrac{1}{2}$-averaged \cite[Proposition~23.7]{bauschkeCVXanal}, the composition is $\tfrac{2}{3}$-averaged \cite[Proposition~2.4]{Combettes201555} or \cite[Proposition~3]{gisSIAM2015}. Therefore, alternating projections is an averaged iteration with $\bar{\alpha}=\tfrac{2}{3}$ and of the form $x^{k+1}=T_2T_1x^k$ where $T_2=\Pi_C$ and $T_1=\Pi_D$. Since alternating projections is an instance of \eqref{eq:avg_iter_variation}, we can implement alternating projections with line search as \eqref{eq:rk_variation}-\eqref{eq:xk+1_variation} (with $T_2=\Pi_C$ and $T_1=\Pi_D$). \paragraph{Douglas-Rachford.} The problem \eqref{eq:indicator_prob} can also be solved using Douglas-Rachford splitting. The algorithm becomes \begin{align*} z^{k+1}=(1-\alpha)z^k+\alpha R_{\iota_C}R_{\iota_D}z^k \end{align*} where $\alpha\in(0,1)$. That is, we have a composition of two reflections. This algorithm is treated in Section~\ref{sec:DR} where we identified $R_{\iota_C}=S_2$ and $R_{\iota_D}=S_1$. \begin{rem} Note that the $\gamma$ parameter used in standard Douglas-Rachford is not present here (since the projection is independent of this). Therefore, the only parameter to be tuned is $\alpha$, i.e., the one we perform line search over. \end{rem} \paragraph{$D$ affine.} When $D$ is affine, i.e., $D=\{x~|~Ax=b\}$, then \begin{align*} \Pi_{D}(x)&=\begin{bmatrix}I&0\end{bmatrix}\begin{bmatrix} I & A^T\\ A & 0\end{bmatrix}^{-1}\begin{bmatrix}x\\b\end{bmatrix},\\ R_{\iota_D}(x)&=2\Pi_D(x)-x=\begin{bmatrix}2I&0\end{bmatrix}\begin{bmatrix} I & A^T\\ A & 0\end{bmatrix}^{-1}\begin{bmatrix}x\\b\end{bmatrix}-x. \end{align*} Both these operators are affine. Assume that $\Pi_C$ (and hence $R_{\iota_C}=2\Pi_C-I$) is cheap to evaluate. Then the line search can be implemented in alternating projections and in Douglas-Rachford splitting with almost no additional cost compared to a their basic iterations (see Section~\ref{sec:comp_cost}). Alternating projections with line search is implemented as \eqref{eq:rk_variation2}-\eqref{eq:xk+1_variation2} with $T_2=P_C$ and $Fx+h=\Pi_D$. The residual used for the line search is \eqref{eq:res_k+1_T1_T2}. The three first steps of Douglas-Rachford with line search is \eqref{eq:rk_comp2}-\eqref{eq:rknom_comp2} with $S_2=R_{\iota_C}$ and $Fx+h=R_{\iota_D}$. The residual used for the line search is \eqref{eq:res_k+1_S1_S2}. \subsection{Other algorithms} There are numerous other optimization algorithms that are averaged iterations of some nonexpansive mapping. For instance, forward-backward splitting for solving monotone inclusion problems and for solving Fenchel dual problems, as well as projected and standard gradient methods fit the framework. The line search can also be used in Douglas-Rachford splitting for solving monotone inclusion problems. Also, preconditioned ADMM methods \cite{ChambollePock} can be interpreted as an averaged iteration of some nonexpansive mapping \cite{HeYuan_PDconv}. The recently proposed three operator splitting method in \cite{Davis_three_splitting} is another example. Finally, the proximal point algorithm \cite{Rockafellar_PPA} for finding the zero of one maximally monotone operator is an averaged iteration. Actually, an algorithm is an averaged iteration of a nonexpansive mapping if and only if it is an instance of the proximal point method. Many of the methods mentioned above are discussed in \cite{primer_Ryu_Boyd}. \section{Line search variations} There are numerous ways to create variations of the line search method. In this section, we list some that can improve practical convergence. \paragraph{Line search activation.} We do not need to perform line search in every iteration. Line search can be used in a subset of the iterations only. If a cheap test can indicate if a line search is beneficial, this can be used as activation rule for the line search. Let $v^k=x^{k}-x^{k-1}$ be the difference between consecutive iterates. We have observed that if $v^{k+1}$ and $v^k$ are almost aligned, large step lengths $\alpha_k$ are typically accepted. If they are not aligned, we are typically restricted to smaller $\alpha_k$. So, an activation rule could be that the cosine between the vectors $v^{k+1}$ and $v^k$ is large, i.e., that \begin{align} \frac{(v^{k+1})^Tv^k}{\|v^{k+1}\|_2\|v^k\|_2}>1-\hat{\epsilon} \end{align} for some small $\hat{\epsilon}>0$. This is particularly useful for methods where the affine operator $S_1$ is not dominating (as in consensus). Even for methods where $S_1$ is dominating, this can be useful. In some cases we get fewer iterations when this activation rule is used, than if not. \paragraph{Other candidate points.} We are not restricted to perform the line search along the residual direction $r^k$. We can accept any candidate point $\hat{x}^{k+1}$ as the next iterate if its fixed-point residual is smaller than for the nominal point. We introduce the residual function \begin{align} r(x) = Sx-x. \label{eq:resid_fcn} \end{align} Then we can replace the test in \eqref{eq:ls_test} with \begin{align} \|r(\hat{x}^{k+1})\|_2\leq(1-\epsilon)\|r(\bar{x}^k)\|_2. \label{eq:ls_test_new_point} \end{align} The full algorithm becomes \begin{align*} r^k&:=Sx^k-x^k\\ \bar x^k&:=x^k+\bar{\alpha} r^k\\ \bar r^k&:=S \bar x^k - \bar x^k\\ x^{k+1} &:=\begin{cases} \hat{x}^{k+1}& {\hbox{if }} \eqref{eq:ls_test_new_point} {\hbox{ holds}}\\ x^k+\bar{\alpha} r^k & {\hbox{else}} \end{cases} \end{align*} It is straightforward to verify that all convergence results for the residuals $r^k$ in Section~\ref{sec:conv_analysis} still hold in this more general setting. One special case is to perform line search along another direction $d^k$. Then the candidate point is $\hat{x}^{k+1}=x^k+\alpha_k d^k$. To evaluate the test in \eqref{eq:ls_test_new_point}, we need to compute $S_2S_1(x^{k}+\alpha_k d^k)$. One evaluation is in the general case as expensive as one iteration of the method. However, if $d^k=r^k$ and $S_1$ is affine, we saw in Section~\ref{sec:comp_cost} that no additional $S_1$ applications are needed to perform the line search. If the direction $d^k$ instead is a linear combination of previous residuals, i.e., $d^k=\sum_{i=0}^k\theta_ir^i$ where $\theta_i\in\mathbb{R}$, also no additional applications of $S_1$ are needed due to it being affine. \paragraph{Another line search condition.} Here, we present another line search test that does not compare progress with a nominal step, but with the last iterate that was decided by a line search. The progress is not measured with the residual function $r$ in \eqref{eq:resid_fcn}, but with a different function $s$. To state the line search test, we let $i_k$ be the index of the last iterate (up to the current iterate $k$) that was decided by a line search, i.e., that was not the result of a nominal step. Then any candidate point $\hat{x}^{k+1}$ can be accepted as the next iterate if the following conditions hold \begin{align*} \|s(\hat{x}^{k+1})\|_2\leq(1-\epsilon)\|s(x^{i_k})\|_2 {\hbox{\phantom{aaa}and\phantom{aaa}}} \|r(\hat{x}^{k+1})\|_2\leq C\|s(\hat{x}^{k+1})\|_2, \end{align*} where $C$ is a positive scalar, $\epsilon$ is a small positive scalar, and $r$ is the residual function in \eqref{eq:resid_fcn}. If these conditions are not satisfied, the algorithm instead takes a nominal step $x^{k+1} = x^k+\bar{\alpha} r^k$. The convergence results in this setting become weaker. The rate results in Theorem~\ref{thm:sublin_conv_rate} and \ref{thm:lin_conv_rate} cannot be guaranteed. The results concerning the residual sequence $r^{k}$ in Theorem~\ref{thm:norm_conv}, Theorem~\ref{thm:res_conv_fix_nonempty}, and Theorem~\ref{thm:res_conv_fix_empty} can, however, be shown to hold. Let $k_0, k_1, k_2,\ldots$ be the iteration indices whose iterates have been decided by accepting a candidate line search point. Then \begin{align*} \|s(x^{k_p})\|_2\leq (1-\epsilon)\|s(x^{k_{p-1}})\|_2\leq(1-\epsilon)^p\|s(x^{k_0})\|_2, \end{align*} which implies for iteration indices $k\in[k_{p+1},k_p]$ that \begin{align*} \|r(x^{k})\|_2\leq \|r(x^{k_p})\|_2\leq C\|s(x^{k_p})\|_2\leq (1-\epsilon)^{p}\|s(x^{k_0})\|_2, \end{align*} since $\{\|r(x^k)\|_2\}$ is a nonincreasing sequence in the basic method. If the tests are satisfied an infinite number of times, then $p\to\infty$ and $\|r(x^{k})\|_2\to 0$ as $k\to\infty$. If the tests are satisfied a finite number of times (which they are if, e.g., $\inf_{x}\|Sx-x\|_2>0$), the algorithm reduces to the basic iteration after a finite number of steps. Using these insights, the proofs to the results concerning the residual $r^k$ in Theorem~\ref{thm:norm_conv}, Theorem~\ref{thm:res_conv_fix_nonempty}, and Theorem~\ref{thm:res_conv_fix_empty} can easily be modified to show that the results hold also in this setting. To improve performance, one might want to add a condition that accepts a candidate point if there is an improvement compared to the previous iterate, i.e., if the following condition is satisfied \begin{align*} \|s(\hat{x}^{k+1})\|_2\leq(1-\epsilon)\|s(x^k)\|_2. \end{align*} This condition is, however, not needed to guarantee convergence of the method. \section{Numerical examples} \label{sec:num_examples} \begin{figure} \caption{Fixed-point residual vs iteration for Douglas-Rachford with and without line search.} \label{fig:NNLS_rate} \end{figure} \begin{figure} \caption{Step length $\alpha_k$ vs iteration in the line search method.} \label{fig:NNLS_alpha} \end{figure} \subsection{Nonnegative least squares} To evaluate the efficiency of the line search, we solve a nonnegative least squares problem using the Douglas-Rachford algorithm. The problem is of the form \begin{align*} \begin{tabular}{ll} minimize & $\|Ax-b\|_2^2$\\ subject to & $x\geq 0$ \end{tabular} \end{align*} where $A\in\mathbb{R}^{1000\times 1000}$ is dense and $b\in\mathbb{R}^{1000}$. The entries in the data matrix $A$ are drawn from a normal distribution with zero mean and unit variance. Then, each row of $A$ is scaled with a uniformly distributed random number between 0.1 and 1.1 to worsen the conditioning of the problem. The entries in $b$ are drawn from a normal distribution with zero mean and unit variance. To fit the Douglas-Rachford framework, we let $f(x)=\|Ax-b\|_2^2$ and $g(x)=\iota(x\geq 0)$. The operator ${\rm{prox}}_{\gamma f}$ is affine and the operator ${\rm{prox}}_{\gamma g}$ is (very) cheap to evaluate compared to ${\rm{prox}}_{\gamma f}$. Therefore, this problem is on the form discussed in Section~\ref{sec:comp_cost}. So an iteration with line search is just slightly more expensive than performing a basic iteration of the algorithm. In the line search test \eqref{eq:ls_test_comp}, we let $\epsilon=0.03$ (which may or may not be a good choice in other examples) and $\alpha_k$ is decided using back-tracking from $\alpha_{\max}=50$ with a factor 1/1.4 for each candidate $\alpha$. The back-tracking is stopped either when the test is satisfied, or when the candidate $\alpha\leq \bar{\alpha}$, in which case $\alpha_k=\bar{\alpha}$. This gives a worst case of $14$ line search test points. The computational cost for ${\rm{prox}}_{\gamma f}$ is roughly $2n^2$ after an initial matrix factorization. The cost for ${\rm{prox}}_{\gamma g}$ is, on the other hand, roughly $n$. To evaluate the line search test, no additional ${\rm{prox}}_{\gamma f}$ computations are needed. But about 10 vector additions or multiplications with scalars and one ${\rm{prox}}_{\gamma g}$ is needed for every candidate point (the same as in the standard algorithm). So, evaluating one candidate point costs approximately $10n$. A worst case of 14 candidate points costs $140n$ for a full line search. Comparing this to the cost for one basic iteration, $2n^2+10n$, gives, when $n=1000$, that one iteration with line search costs, in the worst case, 1.07 times a basic iteration. Figure~\ref{fig:NNLS_rate} shows the fixed-point residual vs iteration number for Douglas-Rachford with and without line search (the Douglas-Rachford parameters are chosen to be $\bar{\alpha}=\tfrac{1}{2}$ and $\gamma= 3$). For this example, the number of iterations is reduced by roughly a factor four. The improvement in execution time is roughly the same because of the modest 7$\%$ increase in computational cost due to the line search. Figure~\ref{fig:NNLS_alpha} shows what values $\alpha_k$ that are chosen in the line search. An $\alpha_k=\bar{\alpha}$ corresponds to a standard Douglas-Rachford iteration. In 175 out of the 2800 iterations, an $\alpha_k>\bar{\alpha}$ was selected. Among these 158 had $\alpha_k>5$. \def350{350} \begin{figure} \caption{The left figure shows one iteration of alternating projections. The residual in this figure is $r^1=x^2-x^1$. In the right figure, an alternating projections step with line search is performed. The residual direction is shown in red. We evaluate six candidate points $x_{i} \label{fig:ap_w_wo_ls} \end{figure} \begin{figure} \caption{This figure shows 50 iterations of alternating projections. Comparing to Figure~\ref{fig:ap_w_wo_ls} \label{fig:ap_many_steps} \end{figure} \subsection{An alternating projections example} To visualize the line search, we solve a two dimensional feasibility problem using alternating projections. We want to find a point in the intersection between two sets $C=\{x\in\mathbb{R}^2~|~\|x\|\leq 1\}$ and $D=\{x\in\mathbb{R}^2~|~x=(x_1,x_2),x_1=1\}$. So $C$ is the unit circle, and $D$ is a vertical line that touches the boundary of $C$ at $(1,0)$. The unique intersection point is $x^\star=(1,0)$. In Figure~\ref{fig:ap_w_wo_ls} we show one iteration of the standard alternating projections algorithm and one iteration with line search. In Figure~\ref{fig:ap_many_steps} we show 50 steps of alternating projections. We see that the progression in 50 steps of alternating projections is roughly the same as the progression of one step with line search (when the farthest acceptable candidate point is chosen). The line search scheme does, on the other hand, compute six candidate points to advance this far. (Or really five, since the first is the basic next step.) So, we gain roughly a factor 10 in this step. This is just a simple example where both projections are very cheap. If the cost of projecting onto the subspace is dominating the other cost of the other projection. Then the cost of performing one iteration with line search is roughly the same as the cost of one basic iteration. In such cases, we can gain a lot by performing line search. \section{Acknowledgments} The first author is financially supported by the Swedish Foundation for Strategic Research. The two first authors are members of the LCCC Linneaus Center at Lund University. \appendix \section{Proofs to results in Section~\ref{sec:main}} \label{app:proofs} \subsection{Proof to Theorem~\ref{thm:norm_conv}} First, we show that $\|r^k\|_2=\|x^k-Sx^k\|_2\to c$ as $k\to\infty$. We show this by considering the cases $\alpha_k=1$ and $\alpha_k>1$ separately. First, we consider the case $\alpha_k=1$. For convenience, we introduce the operator $T=(1-\bar{\alpha})I+\bar{\alpha} S$. Then the update for $\bar{x}^k$ in \eqref{eq:xknom} can be written as \begin{align*} \bar{x}^k = x^k+\bar{\alpha}(Sx^k-x^k)=(1-\bar{\alpha})x^k+\bar{\alpha} Sx^k=Tx^k. \end{align*} Noting that $\|x-Tx\|_2=\|x-(1-\bar{\alpha})x-\bar{\alpha} Sx\|_2=\bar{\alpha}\|x-Sx\|_2$ implies \begin{align*} \|r^{k+1}\|_2=\|\bar{r}^k\|_2=\|\bar{x}^{k}-S\bar{x}^{k}\|_2=\tfrac{1}{\bar{\alpha}}\|\bar{x}^{k}-T\bar{x}^{k}\|_2=\tfrac{1}{\bar{\alpha}}\|Tx^k-TTx^k\|_2. \end{align*} Therefore, since $T$ is nonexpansive: \begin{align} \label{eq:no_ls_decrease}\|r^{k+1}\|_2&\leq\tfrac{1}{\bar{\alpha}}\|x^k-Tx^k\|_2=\|x^k-Sx^k\|_2=\|r^k\|_2. \end{align} Next, we consider the case where $\alpha_k>1$. Since $\|\bar{r}^{k}\|_2\leq \|r^k\|_2$, we get from the line search test \eqref{eq:ls_test} that \begin{align} \label{eq:ls_decrease}\|r^{k+1}\|_2\leq (1-\epsilon)\|\bar{r}^{k}\|_2\leq (1-\epsilon)\|r^k\|_2. \end{align} So $\{\|r^{k}\|_2\}_{k=1}^{\infty}$ is a decreasing sequence which is bounded below (by 0). Hence it converges. This completes the proof. \subsection{Proof to Theorem~\ref{thm:res_conv_fix_nonempty}} Combining \eqref{eq:no_ls_decrease} and \eqref{eq:ls_decrease}, we get \begin{align} \label{eq:res_decrease}\|r^{k+1}\|_2\leq (1-\epsilon)^{k_0}\|r^0\|_2 \end{align} where $k_0$ is the number of times that $\alpha_k$ satisfies $\alpha_k>1$. If $k_0\to\infty$ as $k\to\infty$, then $\|r^{k+1}\|_2\to 0$ as $k\to\infty$. On the other hand, if $k_0$ stays finite as $k\to\infty$, there exists a finite $k_{\max}$ after which the line search is not activated again. Then for $k\geq k_{\max}$, the algorithm reduces to $x^{k+1}=Tx^k$, which satisfies $\|r^k\|_2=\|x^{k}-Sx^{k}\|_2=\tfrac{1}{\bar{\alpha}}\|x^{k}-Tx^{k}\|_2\to 0$ as $k\to\infty$, see \cite[Theorem~5.14]{bauschkeCVXanal}. This concludes the proof. \subsection{Proof to Theorem~\ref{thm:res_conv_fix_empty}} Combining \eqref{eq:no_ls_decrease} and \eqref{eq:ls_decrease}, we get \begin{align} \label{eq:res_decrease}\|r^{k+1}\|_2\leq (1-\epsilon)^{k_0}\|r^0\|_2 \end{align} where $k_0$ is the number of times that $\alpha_k$ satisfies $\alpha_k>1$. If $k_0\to\infty$ as $k\to\infty$, then $\|r^{k+1}\|_2\to 0$ as $k\to\infty$. This is a contradiction to that $\inf\|Sx-x\|_2>0$. Hence $k_0$ must be finite and there exists a $k_{\max}$ after which the algorithm reduces to the basic averaged iteration. Let $T=(1-\bar{\alpha})I+\bar{\alpha} S$, $x^{k_{\max}}=\tilde{x}_0$ and $\Delta k = k-k_{\max}$. Then a straightforward generalization of \cite[Proposition~4.5]{bauschkeAffineSS} to allow for averaged operators (instead of only firmly nonexpansive or $\tfrac{1}{2}$-averaged) gives that \begin{align*} \|\bar{\alpha} r^{k}-v\|=\|x^k-x^{k+1}-v\|=\|T^{\Delta k}\tilde{x}_0-T^{\Delta k+1}\tilde{x}_0-v\|\to 0 \end{align*} for a specific $v$. Therefore $r^k\to \tfrac{1}{\bar{\alpha}}v=:d$ as $k\to\infty$. Further, $x^{k+1}-x^k=\bar{\alpha} r^k\to\bar{\alpha} d$ as $k\to\infty$. The $v$ is the {\emph{infimal displacement vector}} (see \cite[Fact~2.2]{bauschkeAffineSS}) that satisfies $v\in\overline{{\rm{ran}}}(I-T)$ (i.e., $v$ is in the closure of the range of $I-T$) and $\|v\|_2=\inf_x\|x-Tx\|_2$. Therefore $\|d\|_2=\tfrac{1}{\bar{\alpha}}\|v\|_2=\tfrac{1}{\bar{\alpha}}\inf_x\|x-Tx\|_2=\inf_x\|x-Sx\|_2$. This concludes the proof. \subsection{Proof to Theorem~\ref{thm:sublin_conv_rate}} We need the following lemma for this proof. \begin{lem} Suppose that $S~:~\mathbb{R}^n\to\mathbb{R}^n$ nonexpansive and that $\bar{\alpha}\in(0,1)$. Then every iteration of \eqref{eq:rk}-\eqref{eq:xk+1} satisfies \begin{align} \label{eq:telescope_eq}\bar{\alpha}(1-\bar{\alpha})\|\bar{r}^k-&r^k\|_2^2 \leq \|x^k-\bar{x}^{k}\|_2^2-\|x^{k+1}-\bar{x}^{k+1}\|_2^2. \end{align} \label{lem:telescope_eq} \end{lem} \begin{pf} Let $T=(1-\bar{\alpha})I+\bar{\alpha} S$. Then $T$ is $\bar{\alpha}$-averaged, and it satisfies \cite[Proposition~4.25(iii)]{bauschkeCVXanal} \begin{align*} \tfrac{1-\bar{\alpha}}{\bar{\alpha}}\|(I-T)\bar{x}^k-(I-T)x^k\|_2^2 \leq \|x^k-\bar{x}^k\|_2^2-\|Tx^k-T\bar{x}^k\|_2^2. \end{align*} Now, since $(I-T)x=(I-(1-\bar{\alpha})I-\bar{\alpha} S)x=\bar{\alpha} (I-S)x$, we have $(I-T)x^k=\bar{\alpha} r^k$ and $(I-T)\bar{x}^k=\bar{\alpha} \bar{r}^k$. Therefore \begin{align*} \bar{\alpha}(1-\bar{\alpha})\|\bar{r}^k-r^k\|_2^2 \leq \|x^k-\bar{x}^k\|_2^2-\|Tx^k-T\bar{x}^k\|_2^2. \end{align*} The algorithm chooses either $\alpha_k=\bar{\alpha}$ or $\alpha_k>\bar{\alpha}$. If $\alpha_k=\bar{\alpha}$, we have $Tx^k=\bar{x}^k=x^{k+1}$ and $T\bar{x}^k=Tx^{k+1}=\bar{x}^{k+1}$. Therefore \begin{align*} \bar{\alpha}(1-\bar{\alpha})\|\bar{r}^k-r^k\|_2^2 &\leq \|x^k-\bar{x}^{k}\|_2^2-\|Tx^k-T\bar{x}^{k}\|_2^2\\ &= \|x^k-\bar{x}^{k}\|_2^2-\|x^{k+1}-\bar{x}^{k+1}\|_2^2. \end{align*} If instead $\alpha_k>\bar{\alpha}$, we get \begin{align*} \bar{\alpha}(1-\bar{\alpha})\|\bar{r}^k-r^k\|_2^2 &\leq \|x^k-\bar{x}^k\|_2^2-\|Tx^k-T\bar{x}^k\|_2^2\\ &= \|x^k-\bar{x}^k\|_2^2-\|\bar{x}^k-T\bar{x}^k\|_2^2\\ &\leq \|x^k-\bar{x}^k\|_2^2-\tfrac{1}{(1-\epsilon)^2}\|x^{k+1}-Tx^{k+1}\|_2^2\\ &\leq \|x^k-\bar{x}^k\|_2^2-\|x^{k+1}-Tx^{k+1}\|_2^2\\ &= \|x^k-\bar{x}^k\|_2^2-\|x^{k+1}-\bar{x}^{k+1}\|_2^2 \end{align*} where the second inequality holds due to the line search test in \eqref{eq:ls_test} and the third inequality holds since $\epsilon\in(0,1)$. Therefore \eqref{eq:telescope_eq} holds for all $k$ and the proof is complete. \end{pf} Now we are ready to prove the result. A telescope summation of \eqref{eq:telescope_eq} gives \begin{align*} \bar{\alpha}(1-\bar{\alpha})\sum_{k=0}^n\|\bar{r}^k-r^k\|_2^2\leq \|x^0-\bar{x}^0\|_2^2=\bar{\alpha}^2\|r^0\|_2^2. \end{align*} This proves \eqref{eq:res_diff_sum}. To prove \eqref{eq:res_diff_best}, we note that $k_{\rm{best}}^n\in\{0,\ldots,n\}$ is the iteration $k$ (up till $n$) with smallest $\|\bar{r}^k-r^k\|_2$. Therefore \begin{align*} (n+1)\|\bar{r}^{k_{\rm{best}}^n}-r^{k_{\rm{best}}^n}\|_2^2 \leq \sum_{k=0}^n\|\bar{r}^k-r^k\|_2^2 \leq \frac{\bar{\alpha}}{1-\bar{\alpha}}\|r^0\|_2^2. \end{align*} This concludes the proof. \subsection{Proof to Theorem~\ref{thm:lin_conv_rate}} First, we introduce $T=(1-\bar{\alpha})I+\bar{\alpha} S$ which is $\bar{\alpha}$-averaged, and satisfies $\|x-Sx\|_2=\tfrac{1}{\bar{\alpha}}\|x-Tx\|_2$. Let's consider the case when $\alpha_k=\bar{\alpha}$. Then $\bar{x}^k=Tx^k$ and \begin{align*} \|r^{k+1}\|_2&=\|\bar{r}^k\|_2=\|\bar{x}^{k}-S\bar{x}^{k}\|_2=\tfrac{1}{\bar{\alpha}}\|\bar{x}^{k}-T\bar{x}^{k}\|_2 =\tfrac{1}{\bar{\alpha}}\|Tx^k-TTx^k\|_2\\ &=\tfrac{1}{\bar{\alpha}}\|(1-\bar{\alpha})(x^k-Tx^k)+\bar{\alpha} (Sx^k-STx^k)\|_2. \end{align*} The triangle inequality gives that \begin{align*} \|r^{k+1}\|_2&\leq\tfrac{1}{\bar{\alpha}}((1-\bar{\alpha})\|x^k-Tx^k\|_2+\bar{\alpha}\|Sx^k-STx^k\|_2)\\ &\leq\tfrac{1}{\bar{\alpha}}((1-\bar{\alpha})\|x^k-Tx^k\|_2+\bar{\alpha}\delta\|x^k-Tx^k\|_2)\\ &=\tfrac{1}{\bar{\alpha}}(1-\bar{\alpha}+\bar{\alpha}\delta)\|x^k-Tx^k\|_2\\ &=(1-\bar{\alpha}+\bar{\alpha}\delta)\|x^k-Sx^k\|_2\\ &=(1-\bar{\alpha}+\bar{\alpha}\delta)\|r^k\|_2. \end{align*} Next, we consider the case when $\alpha_k>\bar{\alpha}$. Since $\|\bar{r}^k\|_2\leq(1-\bar{\alpha}+\bar{\alpha}\delta)\|r^k\|_2$ the line search test \eqref{eq:ls_test} implies that \begin{align*} \|r^{k+1}\|_2\leq (1-\epsilon)\|\bar{r}^k\|_2\leq (1-\epsilon)(1-\bar{\alpha}+\bar{\alpha}\delta)\|r^k\|_2\leq (1-\bar{\alpha}+\bar{\alpha}\delta)\|r^k\|_2. \end{align*} That is, the algorithm is linearly convergent with factor (at most) $(1-\bar{\alpha}+\bar{\alpha}\delta)$ in both situations. This concludes the proof. \section{ADMM derivation} \label{app:ADMM} In this section, we show the equivalence between the standard ADMM formulation \eqref{eq:ADMM1}-\eqref{eq:ADMM4} and the ADMM version used for line search \eqref{eq:ADMM1_ls}-\eqref{eq:ADMM3_ls}. We also show that the version used for line search, \eqref{eq:ADMM1_ls}-\eqref{eq:ADMM3_ls}, is an $\alpha$-averaged iteration of a nonexpansive mapping. We do this by showing that the ADMM iterations can be derived by applying Douglas-Rachford splitting to a specific problem formulation. This derivation is not new \cite{Gabay83,EcksteinPhD}, but we include it here for completeness and to explicitly arrive that the ADMM variation \eqref{eq:ADMM1_ls}-\eqref{eq:ADMM3_ls} that we need for the line search. ADMM solves problems of the form \begin{align} \begin{tabular}{ll} minimize & $f(x)+g(z)$\\ subject to & $Ax+Bz=c$ \end{tabular} \label{eq:ADMM_prob_app} \end{align} where $f~:~\mathbb{R}^n\to\mathbb{R}\cup\{\infty\}$ and $g~:~\mathbb{R}^m\to\mathbb{R}\cup\{\infty\}$ are proper closed convex, $A\in\mathbb{R}^{p\times n}$, $B\in\mathbb{R}^{p\times m}$, and $c\in\mathbb{R}^{p}$. Using image functions (that are also called infimal postcompositions) defined as \begin{align*} (L\triangleright \psi)(y)=\inf\{\psi(x)~|~Lx=y\} \end{align*} where $L\in\mathbb{R}^{n\times m}$ is a linear operator and $\psi~:~\mathbb{R}^n\to\mathbb{R}\cup\{\infty\}$ is a proper function, it is straightforward to verify that \eqref{eq:ADMM_prob_app} is equivalent to \begin{align*} {\hbox{minimize }} (-A\triangleright f)(-u-c)+(-B\triangleright g)(u). \end{align*} Let $p_1(u)=(-A\triangleright f)(-u-c)$ and $p_2(u)=(-B\triangleright g)(u)$ to get the equivalent problem \begin{align} {\hbox{minimize }} p_1(u)+p_2(u). \label{eq:DR_imgfcn_prob} \end{align} To arrive at the standard ADMM iterations, we apply Douglas-Rachford splitting to \eqref{eq:DR_imgfcn_prob}. The algorithm becomes \begin{align} v^{k+1}=(1-\alpha)v^k+\alpha R_{\gamma p_1}R_{\gamma p_2}v^k \label{eq:ADMM_avg_iter} \end{align} where the reflected proximal operators $R_{\gamma p_1}$ and $R_{\gamma p_2}$ are given by $R_{\gamma p_1}=2{\rm{prox}}_{\gamma p_1}-I$ and $R_{\gamma p_2}=2{\rm{prox}}_{\gamma p_2}-I$. Under the assumption that the infimum over $x$ is attained in the following prox evaluation, we have \begin{align} \nonumber{\rm{prox}}_{\gamma p_1}(v)&=\mathop{\rm argmin}_{u}\{\inf_{x}\{f(x)~|~-Ax=-u-c\}+\tfrac{1}{2\gamma}\|u-v\|_2^2\}\\ \label{eq:prox_p1}&=A\mathop{\rm argmin}_{x}\{f(x)+\tfrac{1}{2\gamma}\|Ax-v-c\|_2^2\}-c. \end{align} The reflected proximal operator becomes \begin{align} R_{\gamma p_1}(v)=2A\mathop{\rm argmin}_{x}\{f(x)+\tfrac{1}{2\gamma}\|Ax-v-c\|_2^2\}-2c-v. \label{eq:rprox_p1} \end{align} Again, assuming that the following infimum is attained, we get \begin{align} \nonumber{\rm{prox}}_{\gamma p_2}(v)&=\mathop{\rm argmin}_{u}\{\inf_{z}\{g(z)~|~-Bz=u\}+\tfrac{1}{2\gamma}\|u-v\|_2^2\}\\ \label{eq:prox_p2}&=-B\mathop{\rm argmin}_{z}\{g(z)+\tfrac{1}{2\gamma}\|Bz+v\|_2^2\} \end{align} and reflected proximal operator \begin{align} R_{\gamma p_2}(v)=-2B\mathop{\rm argmin}_{z}\{g(z)+\tfrac{1}{2\gamma}\|Bz+v\|_2^2\}-v. \label{eq:rprox_p2} \end{align} Using the prox expressions \eqref{eq:prox_p1} and \eqref{eq:prox_p2}, and defining $\rho=\tfrac{1}{\gamma}$, we find that the Douglas-Rachford algorithm \eqref{eq:DR1}-\eqref{eq:DR3} applied to \eqref{eq:DR_imgfcn_prob} becomes \begin{align} \label{eq:ADMM1_ls_app}z^k&=\mathop{\rm argmin}_{z}\{g(z)+\tfrac{\rho}{2}\|Bz+v^k\|_2^2\}\\ \label{eq:ADMM2_ls_app}x^k&=\mathop{\rm argmin}_{x}\{f(x)+\tfrac{\rho}{2}\|Ax+2Bz^k+v^k-c\|_2^2\}\\ \label{eq:ADMM3_ls_app}v^{k+1}&=v^k+2\alpha (Ax^k+Bz^k-c) \end{align} This is exactly the iteration \eqref{eq:ADMM1_ls}-\eqref{eq:ADMM3_ls} which is used in the line search. This algorithm is equivalent to ADMM, but keeps the $v^k$ variables in which the algorithm can be interpreted as an averaged iteration of a nonexpansive mapping, see \eqref{eq:ADMM_avg_iter}. To derive the ADMM iterations \eqref{eq:ADMM1}-\eqref{eq:ADMM4}, we next substitute $v^{k+1}=u^{k}+2\alpha(Ax^k-c)-(1-2\alpha)Bz^k$. Let $x_A^{k} = 2\alpha Ax^k-(1-2\alpha)(Bz^k-c)$ to get $v^{k+1}=u^{k}+x_A^k-c$ and \begin{align*} z^{k} &= \mathop{\rm argmin}_{z}\{g(z)+\tfrac{\rho}{2}\|x_A^{k-1}+Bz-c+u^{k-1}\|_2^2\}\\ x^k &= \mathop{\rm argmin}_{x}\{f(x)+\tfrac{\rho}{2}\|Ax+2 Bz^k+u^{k-1}+x_A^{k-1}- 2c\|_2^2\}\\ u^{k} &= u^{k-1}+(x_A^{k-1}+Bz^k-c) \end{align*} since $v^{k+1}=u^{k}+x_A^k-c$ inserted in \eqref{eq:ADMM3_ls_app} implies \begin{align*} u^{k}&=u^{k-1}+x_A^{k-1}-x_A^k+2\alpha(Ax^{k}+Bz^k-c)\\ &=u^{k-1}+x_A^{k-1}-(2\alpha Ax^{k}-(1-2\alpha)(Bz^{k}-c))+2\alpha(Ax^{k}+Bz^k-c)\\ &=u^{k-1}+(x_A^{k-1}+Bz^k-c) \end{align*} (This implies that $v^{k}=u^{k}-Bz^{k}$.) Next, insert the third equation into the second to get \begin{align*} z^{k} &= \mathop{\rm argmin}_{z}\{g(z)+\tfrac{\rho}{2}\|x_A^{k-1}+Bz-c+u^{k-1}\|_2^2\}\\ x^k &= \mathop{\rm argmin}_{x}\{f(x)+\tfrac{\rho}{2}\|Ax+Bz^k- c+u^{k}\|_2^2\}\\ u^{k} &= u^{k-1}+ (x_A^{k-1}+Bz^k-c) \end{align*} Now, change order of the $x^k$ update and the $u^{k}$ update and move the $x^{k}$ update to the first line and insert $x_A^{k-1}$ to get \begin{align*} x^{k-1} &= \mathop{\rm argmin}_{x}\{f(x)+\tfrac{\rho}{2}\|Ax+Bz^{k-1}-c+u^{k-1}\|_2^2\}\\ x_A^{k-1}&=2\alpha Ax^{k-1}-(1-2\alpha)(Bz^{k-1}-c)\\ z^{k} &= \mathop{\rm argmin}_{z}\{g(z)+\tfrac{\rho}{2}\|x_A^{k-1}+Bz-c+u^{k-1}\|_2^2\}\\ u^{k} &= u^{k-1}+ (x_A^{k-1}+Bz^k-c) \end{align*} Now, let $x^k\to x^{k+1}$ and $x_A^k\to x_A^{k+1}$ to get \begin{align*} x^{k} &= \mathop{\rm argmin}_{x}\{f(x)+\tfrac{\rho}{2}\|Ax+Bz^{k-1}-c+u^{k-1}\|_2^2\}\\ x_A^{k}&=2\alpha Ax^{k}-(1-2\alpha)(Bz^{k-1}-c)\\ z^{k} &= \mathop{\rm argmin}_{z}\{g(z)+\tfrac{\rho}{2}\|x_A^{k}+Bz-c+u^{k-1}\|_2^2\}\\ u^{k} &= u^{k-1}+ (x_A^{k}+Bz^{k}-c) \end{align*} Letting $k\to k+1$ gives ADMM on the standard form \eqref{eq:ADMM1}-\eqref{eq:ADMM4}. \begin{rem} ADMM can also be derived by applying Douglas-Rachford to the Fenchel dual of \eqref{eq:ADMM_prob_app}, see \cite{Gabay83}. The Fenchel dual is \begin{align*} {\hbox{minimize }} f^*(-A^T\mu)+c^T\mu+g^*(-B^T\mu). \end{align*} Letting $d_1(\mu):=f^*(-A^T\mu)+c^T\mu$ and $d_2(\mu) := g^*(-B^T\mu)$, this is equivalent to \begin{align*} {\hbox{minimize }} d_1(\mu)+d_2(\mu). \end{align*} It holds that $p_1^*=d_1$ and $p_2^*=d_2$, see \cite[Corollary~15.28]{bauschkeCVXanal}. It is also known that Douglas-Rachford when applied to minimize $p_1+p_2$ is equivalent to applying Douglas-Rachford to minimize $p_1^*+p_2^*$ (which is $d_1+d_2$), see \cite{EcksteinPhD}. Therefore we can also apply Douglas-Rachford to this dual formulation to get ADMM. This derivation is longer and therefore not used here. \end{rem} \end{document}
\begin{document} \title{Chromatic structures in stable homotopy theory} \begin{abstract} In this survey, we review how the global structure of the stable homotopy category gives rise to the chromatic filtration. We then discuss computational tools used in the study of local chromatic homotopy theory, leading up to recent developments in the field. Along the way, we illustrate the key methods and results with explicit examples. \end{abstract} \tableofcontents \section{Introduction} At its core, chromatic homotopy theory provides a natural approach to the computation of the stable homotopy groups of spheres $\pi_*S^0$. Historically, the first few of these groups were computed geometrically through the classification of stably framed manifolds, using the Pontryagin--Thom isomorphism $\pi_*S^0 \cong \Omega_*^{\mathrm{fr}}$. However, beginning with the work of Serre, it soon turned out that algebraic tools were more effective, both for the computation of specific low-degree values as well as for establishing structural results. In particular, Serre proved that $\pi_*S^0$ is a degreewise finitely generated abelian group with $\pi_0S^0 \cong \mathbb{Z}$ and that all higher groups are torsion. Serre's method was essentially inductive: starting with the knowledge of the first $n$ groups $\pi_0S^0,\ldots,\pi_{n-1}S^0$, one can in principle compute $\pi_nS^0$. Said differently, Serre worked with the Postnikov filtration of $\pi_*S^0$, in which the $(n+1)$st filtration quotient is given by $\pi_nS^0$. The key insight of chromatic homotopy theory is that $\pi_*S^0$ comes naturally equipped with a completely different filtration---\emph{the chromatic filtration}---which systematically exhibits the large scale symmetries hidden in the stable homotopy category. Chromatic homotopy theory is the study of the chromatic filtration and the structures that arise from it, both on $\pi_*S^0$ but also on the category of spectra itself. As with many young and active fields, points of views are evolving rapidly and there are few surveys that keep up with the developments. Our goal for this chapter is to present our perspective on the subject and, in the process, to draw one of the possible maps of the field in its current state. We would like to emphasize that our exposition is in many ways revisionistic and certainly far from comprehensive, but rather reflects our own understanding of and point of view on the subject. We apologize to those who would have preferred us to present the material from a different point of view, or for us to include important topics we have left untouched. Hopefully, they will take this as a cue to write an expository piece of their own as we feel there is a great need for more background literature in this vibrant field. In the rest of this short introduction, we give a brief overview of the content of the chapter. The goal of Section \ref{sec:landscape} is to introduce and study the chromatic filtration and its consequences from an abstract point of view. More precisely, we will: \begin{enumerate}[(1)] \item Explain that the chromatic filtration arises \emph{canonically} from the global structure of the stable homotopy category. See Section \ref{ssec:toy}. \item Describe the geometric origins of the chromatic filtration through the relation with the stack of formal groups. See Section \ref{sec:geomodel}. \item Demonstrate that many geometric structures have homotopical manifestations in the chromatic picture that motivate and guide the past and recent developments in the subject. See Section \ref{ssec:chromfiltration1} and Section \ref{ssec:chromfiltration2}. \end{enumerate} While Section~\ref{sec:landscape} focuses mostly on the global picture, in Section~\ref{ssec:lcht} we zoom in on $K(n)$-local homotopy theory. In Section~\ref{ssec:lcht}, we introduce Morava $E$-theory $E_n$ and the Morava stabilizer group $\mathbb{G}_n$, which play a central role in this story because of their relationship to the $K(n)$-local sphere via the equivalence $L_{K(n)}S^0 \simeq E_n^{h\mathbb{G}_n}$. The resulting descent spectral sequence, whose $E_2$-term is expressed in terms of group cohomology, is one of the most important computational tools in the subject. For this reason, Section~\ref{ssec:gn} is devoted to the study of $\mathbb{G}_n$ and its homological algebraic properties. At this point, we go on a hiatus and give an overview of the chromatic story at height $n=1$. This is the content of Section~\ref{sec:cscheight1}, whose goal is to provide the reader with a concrete example to keep in mind for the rest of the chapter. The most technical part of this overview of chromatic homotopy theory is Section~\ref{sec:fin}, which presents the theory of finite resolutions. These are finite sequences of spectra that approximate the $K(n)$-local sphere by spectra of the form $E_n^{hF}$ for $F$ finite subgroups of $\mathbb{G}_n$. The advantage of this approach is that the spectra $E_n^{hF}$ are computationally tractable. Finite resolutions have been one of the most important tools in computations at height $n=2$ and we gives detailed examples in this case in Section~\ref{sec:resheight2}. In the last part, Section~\ref{sec:thms}, we provide an overview of three topics in chromatic homotopy theory that have seen recent breakthroughs: \begin{enumerate}[(1)] \item In Section~\ref{sec:cscmore}, we discuss chromatic reassembly, which describes the passage from the $K(n)$-local to the $p$-local picture. The main open problem is the \emph{chromatic splitting conjecture} and we give an overview of the current state of affairs on this question. \item In Section~\ref{sec:invdual}, we turn to the problem of computing the group of invertible objects in the symmetric monoidal category of $K(n)$-local spectra. We also touch upon the closely related topic of $K(n)$-local dualities. \item In Section~\ref{sec:assymptotics} we talk about the asymptotic behavior of local chromatic homotopy theory when $p\to \infty$. \end{enumerate} These developments demonstrate how chromatic homotopy theory uncovers structures in the stable homotopy category that reveal the many interactions between homotopy theory and other areas of mathematics. \subsection*{Conventions and prerequisites} We will assume that the reader is familiar with basic stable homotopy theory and category theory, as for example contained in the appendices to Ravenel \cite{RavNil}. Throughout this chapter, $\mathrm{Sp}$ will denote a good symmetric monoidal model for the category of spectra, as for example $S$-modules \cite{ekmm}, symmetric spectra \cite{hss_symm}, orthogonal spectra \cite{mmss_orth}, or the $\infty$-category of spectra \cite{ha}. Note that all of these categories model the stable homotopy category, i.e., their associated homotopy categories are equivalent to the stable homotopy category, so the homotopical constructions in this chapter will be model-independent. In fact, Schwede's rigidity theorem \cite{schwede_rigid} justifies that we may work in a model-independent fashion. In particular, we freely use the theory of ring spectra in $\mathrm{Sp}$ and module spectra over them, formal groups, and spectral sequences. A full triangulated subcategory of a triangulated category $\mathcal{T}$ is called thick if it is closed under suspensions and desuspensions, fiber sequences, and retracts. If $\mathcal{T}$ is cocomplete, then a thick subcategory is called localizing if it closed under all set-indexed direct sums, and we write $\Loc(S)$ for the smallest thick subcategory of $\mathcal{T}$ containing a given set $S$ of objects in $\mathcal{T}$. Further, recall that an object $C \in \mathcal{T}$ is said to be compact (or small) if $\mathrm{Hom}_{\mathcal{T}}(C,-)$ commutes with arbitrary direct sums in $\mathcal{T}$; we will write $\mathcal{T}^{\omega}$ for the full subcategory spanned by the compact objects in $\mathcal{T}$. If $\mathcal{C}$ denotes a model (i.e., a stable model category or stable $\infty$-category) for $\mathcal{T}$, then the corresponding notions for $\mathcal{C}$ are defined analogously. \section{A panoramic view of the chromatic landscape}\label{sec:landscape} The goal of this section is to give an overview of the global structure of the stable homotopy category from the chromatic perspective. Motivated by the analogy with abelian groups and the geometry of the moduli stack of formal groups, we will explain how the solution of the Ravenel Conjectures by Devinatz, Hopkins, Ravenel, and Smith leads to a canonical filtration in stable homotopy theory. The construction as well as the coarse properties of the resulting chromatic filtration are then summarized in the remainder of this section, which prepares for the in-depth study of the local filtration quotients in Section~\ref{ssec:lcht}. \begin{remark} The global point of view taken in this section goes back to Hopkins' original account~\cite{hopkins_global} of his work with Devinatz and Smith on the nilpotence conjectures. It has subsequently led to the study of the global structure of more general tensor-triangulated categories and the systematic development of tt-geometry by Balmer and his coauthors. We refer to Balmer's chapter in this handbook for background and a plethora of further examples. \end{remark} \subsection{From abelian groups to spectra}\label{ssec:toy} As expressed in Waldhausen's vision of \emph{brave new algebra}, the category $\mathrm{Sp}$ of spectra should be thought of as a homotopical enrichment of the derived category $\mathcal{D}_{\mathbb{Z}}$ of abelian groups. Consequently, before beginning with our analysis of the global structure of the stable homotopy category, we may consider the case of abelian groups as a toy example. The starting point is the \emph{Hasse square}\index{Hasse square} for the integers, displayed as the pullback square on the left: \begin{equation}\label{eq:hassesquare} \xymatrix{\mathbb{Z} \ar[r] \ar[d] & \prod_{p}\mathbb{Z}_p \ar[d] & & M \ar[r] \ar[d] & \prod_pM_p^{\wedge} \ar[d] \\ \mathbb{Q} \ar[r] & \mathbb{Q} \otimes \prod_{p}\mathbb{Z}_p & & \mathbb{Q} \otimes M \ar[r] & \mathbb{Q} \otimes \prod_pM_p^{\wedge}.} \end{equation} This is a special case of a local-to-global principle for any chain complex $M \in \mathcal{D}_{\mathbb{Z}}$, expressed by the homotopy pullback square on the right, in which $M_p^{\wedge}$ denotes the derived $p$-completion of $M$. While the remaining terms in this square seem to be more complicated than $M$ itself, they are often easier from a structural point of view. This is the reason that problems in arithmetic geometry---for example finding integer valued solutions to a set of polynomial equations---can often be divided into two steps: First solve the usually simpler question at individual primes $p$, and then attempt to globalize the solutions. This approach is tied closely to the global structure of the category $\mathcal{D}_{\mathbb{Z}}$. Let $\mathcal{D}_{\mathbb{Q}}$ be the derived category of $\mathbb{Q}$-vector spaces and write $(\mathcal{D}_{\mathbb{Z}})_p^{\wedge}$ for the category of derived $p$-complete complexes of abelian groups. (Recall that a complex $C$ of abelian groups is derived $p$-complete if it is $p$-local and $\operatorname{Ext}^i(\mathbb{Q},C) = 0$ for $i=0,1$ or, equivalently, if $C$ is in the image of the zeroth left derived functor of $p$-completion on $\mathcal{D}_{\mathbb{Z}}$.) We highlight three fundamental properties of these subcategories of $\mathcal{D}_{\mathbb{Z}}$: \begin{enumerate}[(1)] \item The category $(\mathcal{D}_{\mathbb{Z}})_p^{\wedge}$ is compactly generated by $\mathbb{Z}/p$. In particular, an object $X \in (\mathcal{D}_{\mathbb{Z}})_p^{\wedge}$ is trivial if and only if $X \otimes \mathbb{Z}/p$ is trivial. \item The only proper localizing subcategory of $(\mathcal{D}_{\mathbb{Z}})_p^{\wedge}$ is $(0)$, i.e., if $X$ is any non-trivial object in $(\mathcal{D}_{\mathbb{Z}})_p^{\wedge}$, then $\Loc(X) = (\mathcal{D}_{\mathbb{Z}})_p^{\wedge}$, i.e., the smallest full triangulated subcategory of $(\mathcal{D}_{\mathbb{Z}})_p^{\wedge}$ closed under shifts and colimits which contains $X$ is $(\mathcal{D}_{\mathbb{Z}})_p^{\wedge}$ itself. \item Any object $M \in \mathcal{D}_{\mathbb{Z}}$ can be reassembled from its derived $p$-completions $M_p^{\wedge} \in (\mathcal{D}_{\mathbb{Z}})_p^{\wedge}$, its rationalization $\mathbb{Q} \otimes M \in \mathcal{D}_{\mathbb{Q}}$, together with the gluing information specified in the pullback square displayed on the right of \eqref{eq:hassesquare}. \end{enumerate} Therefore, we may think of $(\mathcal{D}_{\mathbb{Z}})_p^{\wedge}$ as an irreducible building block of $\mathcal{D}_{\mathbb{Z}}$. In fact, we can promote these observations to a natural bijection between the residue fields of $\mathbb{Z}$, which are parametrized by the points of $\mathrm{Sp}ec(\mathbb{Z})$, and the irreducible subcategories of $\mathcal{D}_{\mathbb{Z}}$ they detect: \begin{equation}\label{eq:zbijection} \begin{Bmatrix} \text{Prime fields} \\ \mathbb{Q} \text{ and } \mathbb{F}_p \text{ for } p \text{ prime} \end{Bmatrix} \xymatrix@C=1.7pc{ \ar@{<->}[r]^-{\sim} &} \begin{Bmatrix} \text{Minimal localizing subcategories} \\ \mathcal{D}_{\mathbb{Q}} \text{ and } (\mathcal{D}_{\mathbb{Z}})_p^{\wedge} \text{ for } p \text{ prime} \end{Bmatrix} \end{equation} A convenient language and framework for describing the global structure of categories like $\mathcal{D}_{\mathbb{Z}}$ and $\mathrm{Sp}$ is provided by Balmer's \emph{tensor triangular geometry}. Roughly speaking, the Balmer spectrum $\mathrm{Sp}c(\mathcal{T})$ of a tensor triangulated category $\mathcal{T}$ has as points the thick $\otimes$-ideal of $\mathcal{T}^{\omega}$ (where $\mathcal{T}^{\omega}$ denotes the subcategory of compact objects), equipped with a topology that encodes the inclusions among these subcategories. Whenever $\mathcal{T}$ is compactly generated by its $\otimes$-unit, as is the case for example for $\mathrm{Sp}$, thick $\otimes$-ideals coincide with thick subcategories of $\mathcal{T}^{\otimes}$. We refer to Balmer's~\cite{balmer_chapter} for precise definitions and many examples. With this terminology at hand, we are now ready to make the slogan at the beginning of this section more precise. First note that we can truncate the homotopy groups $S^0$ above degree $0$ to obtain a ring map $\phi\colon S^0 \to \tau_{\le 0}S^0 \simeq H\mathbb{Z}$, which is the Hurewicz map for integral homology. Base-change along $\phi$ then provides a functor \[ \xymatrix{\mathrm{Sp} \simeq \mathrm{Mod}_{S^0}(\mathrm{Sp}) \ar[r]^-{\phi^*} & \mathrm{Mod}_{H\mathbb{Z}}(\mathrm{Sp}) \simeq \mathcal{D}_{\mathbb{Z}}} \] which represents the passage from higher algebra to classical algebra; here, the second equivalence was established by Shipley in \cite{shipley_hz}. Moreover, identifying $\mathbb{Z} \cong [S^0,S^0]$, Balmer constructs a canonical comparison map $\rho$ from the Balmer spectrum of $\mathrm{Sp}$ to the Zariski spectrum of $\mathbb{Z}$. The bijection \eqref{eq:zbijection} implies that the composite \[ \xymatrix{\mathrm{Sp}c(\mathcal{D}_{\mathbb{Z}}) \ar[rr]^-{\mathrm{Sp}c(\phi^*)} & & \mathrm{Sp}c(\mathrm{Sp}) \ar[rr]^{\rho} & & \mathrm{Sp}ec(\mathbb{Z})} \] is an isomorphism, so $\mathrm{Sp}c(\mathrm{Sp})$ contains $\mathrm{Sp}ec(\mathbb{Z})$ as a retract. This leads to the following natural question: For $p \in \mathrm{Sp}ec(\mathbb{Z})$, what is the fiber $\rho^{-1}(p)$ in $\mathrm{Sp}c(\mathrm{Sp})$? We will see in Theorem~\ref{thm:hopsmith} below that, for each prime ideal $(p) \in \mathrm{Sp}ec(\mathbb{Z})$, there is an infinite family of points in $\mathrm{Sp}c(\mathrm{Sp})$ that interpolates between $(p)$ and $(0) \in \mathrm{Sp}ec(\mathbb{Z})$, the so-called \emph{chromatic primes}. In other words, the global structure of the stable homotopy category refines the global structure of $\mathcal{D}_{\mathbb{Z}}$; see \cite[Theorem 1.3.3]{balmer_chapter} for a picture. Let $\mathrm{Sp}_{(p)}$ be the category of $p$-local spectra, i.e., those spectra whose homotopy groups are $p$-local abelian groups. It turns out that $\rho^{-1}(p)$ is determined by $\mathrm{Sp}c(\mathrm{Sp}_{(p)})$. We will address the following two problems: \begin{enumerate}[(1)] \item Classify the thick subcategories of $\mathrm{Sp}_{(p)}^{\omega}$. \item\label{it2} Find the analogues of prime fields of $\mathrm{Sp}_{(p)}$. \end{enumerate} As we will see, the classification of thick subcategories is a consequence of the answer to Problem \ref{it2}, but before we can get there, we will exhibit a geometric model that serves as a good approximation to stable homotopy theory. \begin{convention}\label{conv:plocal} From here onwards, we fix a prime $p$ and only consider the category of $p$-local spectra. We write $\mathrm{Sp} = \mathrm{Sp}_{(p)}$ and assume without further mention that our spectra have been localized at $p$. \end{convention} \subsection{A geometric model for stable homotopy theory}\label{sec:geomodel} In order to prepare for the resolution of the questions above, we first exhibit a geometric model for the stable homotopy category whose main structural features will turn out to reflect that of $\mathrm{Sp}$ rather closely. Recall that the mod $p$ singular cohomology $H^*(X,\mathbb{F}_p)$ of any space or spectrum $X$ is endowed with an action of cohomology operations $\pi_*\mathrm{Hom}(H\mathbb{F}_p,H\mathbb{F}_p)$, which form the mod $p$ Steenrod algebra $\mathcal{A}_p$. In other words, singular cohomology naturally factors through the functor that forgets the $\mathcal{A}_p$-module structure and only remembers the underlying $\mathbb{F}_p$-vector space of $H^*(X,\mathbb{F}_p)$: \[ \xymatrixcolsep{3pc} \xymatrix{& & \mathrm{Mod}_{\mathcal{A}_p}^{\mathrm{graded}} \ar[d]^{\text{forget}} \\ \mathrm{Sp}^{\mathrm{op}} \ar[rr]_{H^*(-,\mathbb{F}_p)} \ar@{-->}[rru]^{H^*(-,\mathbb{F}_p)} & & \mathrm{Mod}_{\mathbb{F}_p}^{\mathrm{graded}}} \] The Adams spectral sequence, first introduced in \cite{adams_ss}, can then be interpreted as a device that attempts to go back, or at least recover partial information about $X$: There is a spectral sequence \[ E_2^{s,t} \cong \operatorname{Ext}_{\mathcal{A}_p}^{s}(H^*(Y),H^*(X))_t \Longrightarrow [X,Y_{p}^{\wedge}]_{t-s}, \] which converges whenever $X$ and $Y$ are spectra of finite type with $X$ finite, see for a general study of the convergence properties of (generalized) Adams spectral sequences~\cite{bousfield_locspectra}. Here, finite type means that the mod $p$ cohomology is finitely generated in each degree, and $Y_{p}^{\wedge}$ denotes the $p$-completion of $Y$. The subscript $t$ on the $\operatorname{Ext}$-indicates the internal grading, arising from the grading of cohomology groups involved. Informally speaking, this spectral sequence measures to what extent $\mathrm{Mod}_{\mathcal{A}_p}$ deviates from being a perfect model for $\mathrm{Sp}$. \begin{remark} Paraphrasing, the \emph{Mahowald uncertainty principle}\index{Mahowald uncertainty principle} asserts that any spectral sequence that computes the stable homotopy groups of a finite spectrum with a machine computable $E_2$-term will be infinitely far from the actual answer. In practical terms this means that the Adams spectral sequence for $X=S^0$ and $Y$ a finite spectrum contains many differentials that require additional input to be determined. \end{remark} Building on the work of Novikov~\cite{novikov_mu} and Quillen~\cite{quillen_mu}, Morava~\cite{morava} realized that replacing $H\mathbb{F}_p$ by the Brown--Peterson spectrum $BP$ gives rise to a geometric model for $\mathrm{Sp}$ that resembles its global structure more closely. To describe it, recall that $BP$ is an irreducible additive summand in the $p$-localized complex cobordism spectrum $MU$ with coefficients $BP_* = \mathbb{Z}_{(p)}[v_1,v_2,\ldots]$ and $\deg(v_n) = 2p^n-2$. The generator $v_{i+1}$ is uniquely determined only modulo the ideal $(p,v_1,\ldots,v_{i})$ and there are different choices available, for example the Araki or Hazewinkel generators. See, for example, \cite[A2.2]{ravgreen}. The corresponding Hopf algebroid $(BP_*,BP_*BP)$ is a presentation of the moduli stack\index{moduli stack of formal groups} of ($p$-typical) formal groups $\mathcal{M}_{fg}$ and the category of evenly graded comodules over $(BP_*,BP_*BP)$ is equivalent to the category of quasi-coherent sheaves over $\mathcal{M}_{fg}$, see for example~\cite{naumann_stacks} for a general treatment. Miller~\cite{miller_left} explains how this equivalence can be extended to all graded comodules by replacing $\mathcal{M}_{fg}$ by a moduli stack of \emph{spin formal groups}, see also \cite{goerss_mfg}. Taking $BP$-homology induces a functor \[ \xymatrix{\mathrm{Sp} \ar[r] &\mathbb{C}omod_{BP_*BP}^{\mathrm{even}} \simeq \mathbb{Q}Coh(\mathcal{M}_{fg}) , & X \mapsto BP_*(X),} \] where $\mathbb{C}omod_{BP_*BP}^{\mathrm{even}}$ denotes the abelian category of evenly graded $BP_*BP$-comodules. The associated Adams--Novikov spectral sequence\index{Adams--Novikov spectral sequence} has signature \[ E_2^{s,t} \cong H^s(\mathcal{M}_{fg};BP_*(X))_t \cong \operatorname{Ext}_{BP_*BP}(BP_*,BP_*(X)) \Longrightarrow \pi_{t-s}X. \] The structure of this spectral sequence, whose computational exploitation was a major impetus in the development of chromatic homotopy theory (see \cite{MRW}), is governed by the particularly simple geometric structure of $\mathcal{M}_{fg}$, which we describe next: As explained in great detail in \cite{goerss_mfg}, the height filtration of formal groups manifests itself in a descending filtration by closed substacks \begin{equation}\label{eq:heightfiltration} \mathcal{M}_{fg} \supset \mathcal{M}(1) \supset \mathcal{M}(2) \supset \cdots \end{equation} where $\mathcal{M}(n)$ is cut out locally by the ideal defined by the regular sequence $(p,v_1,v_2,\ldots,v_{n-1})$. Note that this filtration is not separated, as the additive formal group has height $\infty$. Write \begin{itemize} \item $\mathcal{M}_{fg}^{\le n}$ for the open complement of $\mathcal{M}(n+1)$ representing formal groups of height at most $n$ with $i_n\colon \mathcal{M}_{fg}^{\le n} \to \mathcal{M}_{fg}$ the inclusion, \item $\mathcal{H}(n) = \mathcal{M}(n) \cap \mathcal{M}_{fg}^{\le n}$ for the locally closed substack of formal groups of height exactly $n$, and \item $\widehat{\mathcal{H}}(n)$ for its formal completion. \end{itemize} If $\Gamma$ is any formal group of height $n$ over $\mathbb{F}_p$, then $\mathcal{H}(n)$ is equivalent as a stack to $B\mathrm{Aut}_{\overline{\mathbb{F}}_p}(\Gamma)$, so the filtration quotients of the height filtration \eqref{eq:heightfiltration} contain a single geometric point. Furthermore, there is a (pro-)Galois extension \begin{equation}\label{eq:galgeometric} \mathbb{D}ef(\overline{\mathbb{F}}_p,\Gamma) \longrightarrow \widehat{\mathcal{H}}(n) \end{equation} with Galois group $\mathrm{Gal}(\overline{\mathbb{F}}_p/\mathbb{F}_p) \ltimes \mathrm{Aut}_{\overline{\mathbb{F}}_p}(\Gamma)$, with $\mathbb{D}ef(\overline{\mathbb{F}}_p,\Gamma)$ being the Lubin--Tate deformation space. See Remark~\ref{rem:connextensions} below. In light of \eqref{eq:heightfiltration}, any quasi-coherent sheaf $\mathcal{F} \in \mathbb{Q}Coh(\mathcal{M}_{fg})$ can be approximated by its restrictions to the open substacks $\mathcal{M}_{fg}^{\le n}$, so the geometric filtration on $\mathcal{M}_{fg}$ gives rise to a filtration of $\mathbb{Q}Coh(\mathcal{M}_{fg})$. It follows that the computation of the cohomology of a quasi-coherent sheaf $\mathcal{F}$ on $\mathcal{M}_{fg}$ can be restricted to the computation of the cohomology of $\mathcal{F}$ reduced to the strata $\mathcal{H}(n)$ together with the gluing data between different strata. The insight of Bousfield, Morava, and Ravenel was that the resulting structure on the $E_2$-term of the Adams--Novikov spectral sequence is in fact manifest in $\pi_*S^0$ and $\mathrm{Sp}$ as well, as we shall see in the next sections. \begin{remark}\label{rem:landweber} An early hint there is such a close relation between $\mathrm{Sp}$ and $\mathbb{Q}Coh(\mathcal{M}_{fg})$ is the Landweber exact functor theorem\index{Landweber exact functor theorem}, which shows that any flat map $f\colon \mathrm{Sp}ec(R) \to \mathcal{M}_{fg}$ can be lifted to a complex orientable ring spectrum with formal group classified by $f$. We refer to \cite{behrens_chapter} for more details. \end{remark} \subsection{The chromatic filtration: construction}\label{ssec:chromfiltration1} The goal of this section is to answer the questions raised at the end of Section~\ref{ssec:toy} and to construct the chromatic filtration. We continue to work in the category $\mathrm{Sp}$ of $p$-local spectra for a fixed prime $p$ as in Convention~\ref{conv:plocal}. In loose analogy with algebra, a ring spectrum $K \in \mathrm{Sp}$ is said to be a field\index{field object} if every $K$-module splits into a wedge of shifted copies of $K$. In particular, for any spectra $X$ and $Y$, there is a K\"unneth isomorphism \[ K_*(X \wedge Y) \cong K_*(X) \otimes_{K_*}K_*(Y). \] There exists a family of distinct field spectra $K(n)$ for $0 \le n \le \infty$ called the \emph{Morava $K$-theories}\index{Morava $K$-theory}, whose construction will be reviewed in Section~\ref{sec:moravaEthy}. As a result of the seminal nilpotence theorem\index{nilpotence theorem} proven by Devinatz, Hopkins, and Smith~\cite{nilpotence1,nilpotence2}, we obtain a classification of fields in $\mathrm{Sp}$. \begin{theorem}[Hopkins--Smith]\label{thm:nilpotence} Any field object in $\mathrm{Sp}$ splits (additively) into a wedge of shifted copies of Morava $K$-theories. Moreover, if $R$ is a ring spectrum such that $K(n)_*(R) = 0$ for all $0 \le n \le \infty$, then $R \simeq 0$. \end{theorem} \noindent For example, $K(0) =H\mathbb{Q}$, $K(\infty)=H\mathbb{F}_p$, and $K(1)$ is an Adams summand of mod $p$ $K$-theory. Informally speaking, the spectra $K(n)$ may be thought of as the homotopical residue fields of the sphere spectrum. \begin{remark} As remarked in \cite{nilpotence2}, this theorem can be interpreted as providing a classification of prime fields of $\mathrm{Sp}$. However, there is the subtlety that the ring structure on $K(n)$ is not unique at $p=2$, even in the homotopy category, see \cite[Theorem B.7.4]{RavNil} for a summary and further references. The existence and uniqueness of $\mathbb{A}_{\infty}$-structures on $K(n)$ is studied in Angeltveit's paper~\cite{angeltveit_moravak}. Hopkins and Mahowald have proved that none of these multiplicative structures on $K(n)$ refine to an $\mathbb{E}_2$-ring structure (e.g., \cite[Corollary 5.4]{acb_thom}). \end{remark} In light of this theorem, there is a natural notion of support for a spectrum $X \in \mathrm{Sp}$, namely \[ \operatorname{supp}(X) = \{n\mid K(n)_*(X) \neq 0\} \subseteq \mathbb{Z}_{\geq 0} \cup \{\infty\}. \] This notion of support turns out to be particularly well-behaved for the category of finite spectra $\mathrm{Sp}^{\omega}$. Since $K(\infty)_*F \cong H_*(F,\mathbb{F}_p)= 0$ implies $F \simeq 0$ for finite $F$, for any non-trivial $F$ there exists an $n \in \mathbb{N}$ such that $n \in \operatorname{supp}(F)$. Ravenel \cite{Rav84} further proved that $n \in \operatorname{supp}(F)$ implies $(n+1) \in \operatorname{supp}(F)$, so the only subsets of $\mathbb{Z}_{\geq 0} \cup \{\infty\}$ that can be realized as the support of a finite spectra are the sets $\{n,n+1,n+2,\ldots,\infty\}$ with $n \in \mathbb{N}$. A result of Mitchell's \cite{mitchell_an} implies that all of these subsets can be realized by a finite spectrum. Write $\mathcal{C}_0 = \mathrm{Sp}^{\omega}$ and, for $n\ge 1$, let $\mathcal{C}_n \subseteq \mathrm{Sp}^{\omega}$ be the thick subcategory of finite spectra $F$ with $\operatorname{supp}(F) \subseteq \{n,n+1,n+2,\ldots,\infty\}$ for $n \in \mathbb{N}$. The following consequence of Theorem~\ref{thm:nilpotence} is often called the \emph{thick subcategory theorem}, proven in \cite{nilpotence2}. It says in particular that the support function defined above detects the thick subcategories of $\mathrm{Sp}^{\omega}$: \begin{theorem}[Hopkins--Smith]\label{thm:hopsmith}\index{thick subcategory theorem} If $\mathcal{C} \subseteq \mathrm{Sp}^{\omega}$ is a nonzero thick subcategory, then there exists an $n \ge 0$ such that $\mathcal{C} = \mathcal{C}_n$. Moreover, there is a sequence of proper inclusions \[ \mathrm{Sp}^{\omega} = \mathcal{C}_0 \supset \mathcal{C}_1 \supset \mathcal{C}_2 \supset \cdots \supset (0), \] which completely describes $\mathrm{Sp}c(\mathrm{Sp})$. \end{theorem} This categorical filtration gives rise to a sequence of functorial approximations of any finite spectrum $F$ by spectra that are supported on $\{0,1,\ldots,n\}$ for varying $n$, where the zeroth approximation is given by the rationalization $F\wedge H\mathbb{Q}$. This filtration should be understood as a homotopical incarnation of the geometric filtration of $\mathcal{M}_{fg}$, so that the approximations of $F$ correspond to the restriction of the associated sheaf $BP_*(F) \in \mathbb{Q}Coh(\mathcal{M}_{fg})$ to $\mathcal{M}_{fg}^{\le n}$. The tool required to formulate this notion of approximation rigorously is provided by Bousfield localization\index{Bousfield localization}, which we briefly review here for the convenience of the reader. Let $E$ be a spectrum and consider the full subcategory $\langle E \rangle \subseteq \mathrm{Sp}$ of $E$-acyclic spectra, i.e., those spectra $A$ with $E \wedge A \simeq 0$. Bousfield \cite{bousfield_locspectra} proved that there exists a fiber sequence \[ \xymatrix{C_E \ar[r] & \mathrm{id} \ar[r] & L_E} \] of functors on $\mathrm{Sp}$ satisfying the following properties: \begin{enumerate}[(1)] \item For any $X \in \mathrm{Sp}$, $C_EX$ is in $\langle E \rangle$. \item For any $X \in \mathrm{Sp}$, $L_EX$ is $E$-local, i.e., it does not admit any nonzero maps from an $E$-acyclic spectrum. \end{enumerate} It follows formally that $L_EX$ is the initial $E$-local spectrum equipped with a map from $X$, and it is called the $E$-localization of $X$. The full subcategory of $\mathrm{Sp}$ on the $E$-local spectra will be denoted by $\mathrm{Sp}_E$; by construction, it is the quotient of $\mathrm{Sp}$ by $\langle E \rangle$. In order to extract the part of a spectrum $X$ that is supported on $\{0,1,\ldots,n\}$, i.e., the information of $X$ that is seen by the residue fields $K(0),K(1), \ldots, K(n)$, it is natural to consider the following Bousfield localization \[ \xymatrix{X \ar[r] & L_nX := L_{K(0) \vee K(1) \vee \ldots \vee K(n)}X.} \] In fact, for every $n$ there exists a spectrum $E(n)$ with coefficients $E(n)_* = \mathbb{Z}_{(p)}[v_1,\ldots,v_n][v_n^{-1}]$ called \emph{Johnson--Wilson spectrum}\index{Johnson--Wilson spectrum} (of height $n$) which has the property that $\langle E (n) \rangle = \langle K(0) \vee K(1) \vee \ldots \vee K(n) \rangle$, hence $L_n = L_{E(n)}$. We let $\mathrm{Sp}_n = \mathrm{Sp}_{E(n)}$ denote the category of $E(n)$-local spectra. By construction, these localization functors fit into a \emph{chromtic tower}\index{chromatic tower} under $X$ as follows \begin{equation}\label{eq:chromatictower} \xymatrixcolsep{1.7pc} \xymatrix{ & M_nX \ar[d] & & M_2X \ar[d] & M_1X \ar[d] & M_0X \simeq H\mathbb{Q} \wedge X \ar@{=}@<-3.5ex>[d] \\ \cdots \ar[r] & L_nX \ar[r] & \cdots \ar[r] & L_2X \ar[r] & L_1X \ar[r] & L_0S^0 \simeq H\mathbb{Q} \wedge X,} \end{equation} where the \emph{monochromatic layers}\index{monochromatic layer} $M_nX$ are defined by the fiber sequence \[ M_nX \longrightarrow L_nX \longrightarrow L_{n-1}X. \] Specializing to the sphere spectrum and applying homotopy groups, we arrive at the definition of the chromatic filtration. \begin{definition}\index{chromatic filtration} The chromatic filtration on $\pi_*S^0$ is given by the descending filtration \begin{equation}\label{eq:chromaticfiltration} \pi_*S^0 \supseteq \mathbb{C}_0\pi_*S^0 \supseteq \mathbb{C}_1\pi_*S^0 \supseteq \cdots \end{equation} defined as $\mathbb{C}_n\pi_*S^0 = \ker(\pi_*S^0 \to \pi_*L_nS^0)$. \end{definition} There is an important subtlety in the definition of the chromatic filtration, as there is an a priori different way of constructing a filtration of $\mathrm{Sp}$ from the thick subcategory theorem (Theorem~\ref{thm:hopsmith}). Indeed, without relying on the Morava $K$-theories $K(n)$, one may instead take the quotient of $\mathrm{Sp}$ by the localizing subcategories $\Loc(\mathcal{C}_n) \subseteq \mathrm{Sp}$ for each $n$. The resulting localization functors $L_n^f$ can then be used as above to construct a descending filtration \[ \pi_*S^0 \supseteq \mathbb{C}_0^f\pi_*S^0 \supseteq \mathbb{C}_1^f\pi_*S^0 \supseteq \cdots \] with $\mathbb{C}_n^f\pi_*S^0 = \ker(\pi_*S^0 \to \pi_*L_n^fS^0)$, known as the \emph{geometric filtration}\index{geometric filtration}, see Ravenel \cite[Section 7.5]{RavNil}. If $X$ is spectrum such that $L_n^fX \simeq 0$, then also $L_nX \simeq 0$, so there are natural comparison transformations $L_n^f \to L_n$, leading to the following optimistic conjecture about the comparison between the two filtrations: \begin{conjecture}[Telescope conjecture]\index{telescope conjecture} The natural transformation $L_n^f \to L_n$ is an equivalence. \end{conjecture} A number of equivalent formulations of this conjecture and the current state of knowledge about it can be found in Mahowald--Ravenel--Schick \cite{mrstriple} and \cite{chromconj_survey}. The smash product theorem\index{smash product theorem} of Hopkins and Ravenel \cite[Section 8]{RavNil} states that $L_n$ is smashing, i.e., $L_n$ as an endofunctor on $\mathrm{Sp}$ commutes with colimits, while the analogous fact for $L_n^f$ was proven by Miller \cite{miller_finite}. It therefore suffices to show the telescope conjecture for $S^0$. This has been verified by explicit computations for $n=0$ and $n=1$ by work of Mahowald \cite{mahowald_bo} for $p=2$ and Miller \cite{miller_ass} for odd $p$, but the telescope conjecture is open in all other cases. It is known however that $L_n^fM \to L_nM$ is an equivalence for many spectra $M$, including $BP$-modules~\cite[Corollary 1.10]{cschov} and $E(m)$-local spectra~\cite[Corollary 6.10]{HovStrick} for any $m \ge 0$. \subsection{The chromatic filtration: disassembly and reassembly}\label{ssec:chromfiltration2} The goal of this subsection is to first demonstrate how the chromatic filtration decomposes the stable homotopy groups of spheres into periodic families and then to explain how these irreducible pieces reassemble into $\pi_*S^0$. The starting point is the chromatic convergence theorem due to Hopkins and Ravenel, proven in \cite{RavNil}, whose content is that the chromatic tower \eqref{eq:chromatictower} does not lose any information about $S^0$. In particular, the chromatic filtration \eqref{eq:chromaticfiltration} on $\pi_*S^0$ is exhaustive. We continue to follow Convention~\ref{conv:plocal}. \begin{theorem}[Hopkins--Ravenel]\label{thm:chromaticconvergence}\index{chromatic convergence theorem} The canonical map $X \to \lim_nL_nX$ is an equivalence for all finite spectra $X$. \end{theorem} \begin{remark} For general $X$, this map can be far from being an equivalence. For example, the chromatic tower of $H\mathbb{F}_p$ or the Brown--Comenetz dual $IS^0$ of the sphere is identically zero. However, chromatic convergence is known to hold for a class of spectra larger than just finite ones, including $\mathbb{CP}^{\infty}$. See \cite{chromaticcompletion}. \end{remark} We now turn to the filtration quotients of the chromatic filtration, which correspond homotopically to the monochromatic layers $M_nX$. Much of the material in this section can be found in \cite{HovStrick}. The layers $M_nX$ decompose into spectra which are periodic of periods a multiple of $2(p^n-1)$, thereby resembling the decomposition of light into waves of different frequencies. (This is the origin of the term \emph{chromatic} homotopy theory, coined by Ravenel.) More precisely, if $X$ is any spectrum, then its $n$th monochromatic layer is equivalent to a filtered colimit of spectra $F_{\alpha}$, \[ \xymatrix{\operatorname{colim}_{\alpha}F_{\alpha} \ar[r]^-{\sim} & M_nX,} \] such that each $F_{\alpha}$ is periodic. That is, for each $\alpha$ there exists a natural number $\lambda(\alpha)$ and a homotopy equivalence $F_{\alpha} \simeq \Sigma^{2(p^n-1)p^{\lambda(\alpha)}}F_{\alpha}$. This follows from the fact that $M_n$ is equivalent to the colocalization of the $E(n)$-local category with respect to the $E(n)$-localization of a finite type $n$ spectrum, see for example \cite[Proposition~7.10]{HovStrick}, together with the periodicity theorem of Hopkins and Smith~\cite[Theorem 9]{nilpotence2} Having resolved $S^0$ into its irreducible chromatic pieces $M_nS^0$, it is now time to consider the question of how to reassemble the pieces. For this, it is more convenient to consider the $K(n)$-localizations instead of the monochromatic layers, as we shall explain next. Write $\mathfrak{M}_n \subset \mathrm{Sp}$ for the essential image of the functor $M_n$ and let $\mathrm{Sp}_{K(n)}$ be the category of $K(n)$-local spectra. For any $n$, the functors $L_{K(n)}$ and $M_n$ restrict to an adjunction on the category $\mathrm{Sp}_n$ (with $M_n$ as the left adjoint) and then further to a symmetric monoidal equivalence~\cite[Theorem~6.19]{HovStrick} \[ \xymatrix{M_n \colon \mathrm{Sp}_{K(n)} \ar@<0.5ex>[r] & \ar@<0.5ex>[l] \mathfrak{M}_n \noloc L_{K(n)}. } \] So we may equivalently work with $L_{K(n)}S^0$ in place of $M_nS^0$. \begin{remark} The more categorically minded reader may think of the situation as follows: The descending filtration of $\mathrm{Sp}^{\omega}$ of Theorem \ref{thm:hopsmith} extends to two descending filtrations of $\mathrm{Sp}$: \[ \mathrm{Sp} = \ker(0) \supset \ker(L_0) \supset \ker(L_1) \supset \cdots \supset \ker(\operatorname{id}) = (0) \] and \[ \mathrm{Sp} = \Loc(\mathcal{C}_0) \supset \Loc(\mathcal{C}_1) \supset \Loc(\mathcal{C}_2) \supset \cdots \supset (0), \] which are equivalent if the telescope conjecture holds for all $n$. Focusing on the first filtration for concreteness and writing $\mathrm{Sp}_n$ for the essential image of $L_n$ as before, we could equivalently pass to the associated ascending filtration \[ (0) = \operatorname{im}(0) \subset \mathrm{Sp}_0 \subset \mathrm{Sp}_1 \subset \mathrm{Sp}_2 \subset \cdots \subset \operatorname{im}(\operatorname{id}) = \mathrm{Sp}. \] The consecutive subquotients $\mathrm{Sp}_{n}/\mathrm{Sp}_{n-1}$ can then be realized in two different ways as subcategories of $\mathrm{Sp}$, namely either as a localizing subcategory $\mathfrak{M}_n$ or as a colocalizing subcategory $\mathrm{Sp}_{K(n)}$. The resulting equivalence between $\mathfrak{M}_n$ and $\mathrm{Sp}_{K(n)}$ is an instance of a phenomenon called \emph{local duality}\index{local duality}, see \cite{bhv1}. \end{remark} Suppose $X$ is a spectrum for which we have determined $L_{n-1}X$ and $L_{K(n)}X$, and we are interested in reassembling them to obtain $L_nX$. Motivated by the geometric model of Section~\ref{sec:geomodel}, we expect this process to be analogous to the way a sheaf on the open subset $\mathcal{M}_{fg}^{\le n-1}$ and another sheaf on the stratum $\mathcal{H}_n$ are glued together along the formal neighborhood $\widehat{\mathcal{H}}_n$ of $\mathcal{H}_n$ inside $\mathcal{M}_{fg}^{\le n-1}$ to produce a sheaf on $\mathcal{M}_{fg}^{\le n}$. This picture turns out to be faithfully reflected in stable homotopy theory: The chromatic reassembly process for $X \in \mathrm{Sp}$ is governed by the homotopy pullback square displayed on the left, usually called the \emph{chromatic fracture square}\index{chromatic fracture square} (see for example \cite{greenlees_axiomatictate}): \begin{equation}\label{eq:chromaticfracture} \xymatrix{L_{n}X \ar[r] \ar[d] & L_{K(n)} X \ar[d] & \mathrm{Sp}_n \ar[r]^-{L_{K(n)}} \ar[d]_{X \mapsto \iota_n(X)} & \mathrm{Sp}_{K(n)} \ar[d]^{L_{n-1}} \\ L_{n-1}X \ar[r]_-{\iota_n(X)} & L_{n-1}L_{K(n)}X & \mathbb{F}un(\mathbb{D}elta^1,\mathrm{Sp}_{n-1}) \ar[r]_-{\mathrm{target}} & \mathrm{Sp}_{n-1}} \end{equation} In fact, by~\cite{acb_cubes} the category $\mathrm{Sp}_n$ itself admits a decomposition into chromatically simpler pieces, see the pullback square on the right of \eqref{eq:chromaticfracture}. Here, $\mathbb{F}un(\mathbb{D}elta^1,\mathrm{Sp}_{n-1})$ is the arrow category of $\mathrm{Sp}_{n-1}$ and the pullback is taken in a suitably derived sense. The labels of the arrows in this diagram indicate how to translate from the chromatic fracture square of a spectrum $X$ to the categorical decomposition on the right of \eqref{eq:chromaticfracture}. Based on computations of Shimomura--Yabe \cite{shimyabe}, Hopkins~\cite{cschov} conjectured that the chromatic reassembly process which recovers $L_{n}X$ from $L_{K(n)}X$ and $L_{n-1}X$ takes a particularly simple form:\index{chromatic splitting conjecture} \begin{conjecture}[Weak Chromatic Splitting]\label{conj:wcsc} The map \[ \xymatrix{\iota_n(S_p^0)\colon L_{n-1}S_p^0 \ar[r] & L_{n-1}L_{K(n)}S_p^0} \] in \eqref{eq:chromaticfracture} is split, i.e., it admits a section. Here, $S_p^0$ is the $p$-complete sphere spectrum. \end{conjecture} This conjecture, its variations, and its consequences are discussed in more detail in Section~\ref{sec:cscmore}. For now we note that Conjecture~\ref{conj:wcsc} is known to hold for $n\le 2$ and all primes $p$, and is wide open otherwise.\\ We can now summarize the chromatic approach\index{chromatic approach} as follows: \begin{chromaticapproach*} The chromatic approach to $\pi_*S^0_{(p)}$ consists of three steps: \begin{enumerate}[(1)] \item Compute $\pi_*L_{K(n)}S^0$ for each $n$. \item Understand the gluing in the chromatic fracture square \eqref{eq:chromaticfracture}. \item Use chromatic convergence (Theorem~\ref{thm:chromaticconvergence}) to recover $S^0_{(p)}$. \end{enumerate} \end{chromaticapproach*} Finally, the $p$-local sphere spectrum $S^0_{(p)}$ determines $S_p^0$ by $p$-completion. Together with $H\mathbb{Q} \wedge S^0 \simeq H\mathbb{Q}$, we can thus reassemble the sphere spectrum $S^0$ itself via the following homotopical analogue of the Hasse square \eqref{eq:hassesquare}: \[ \xymatrix{S^0 \ar[r] \ar[d] & \prod_pS_p^0 \ar[d] \\ H\mathbb{Q} \wedge S^0 \ar[r] & H\mathbb{Q} \wedge \prod_pS_p^0.} \] In the next section, we discuss the first two steps of the chromatic approach. \begin{remark} As mentioned earlier, the deconstructive analysis of the stable homotopy category based on its spectrum $\mathrm{Sp}c(\mathrm{Sp})$ can be carried out in any tensor triangulated category; many examples can be found in~\cite{balmer_chapter}. This is the subject of \emph{prismatic algebra}. An especially interesting example is the stable module category $\mathrm{StMod}_{kG}$ of a finite $p$-group $G$ and field $k$ of characteristic $p$, whose spectrum $\mathrm{Sp}c(\mathrm{StMod}_{kG})$ is homeomorphic to $\mathrm{Proj}(H^*(G;k))$, the Proj construction of the graded ring $H^*(G;k)$. This category is a good test case for chromatic questions: for instance, the analogues of both the telescope conjecture and the weak chromatic splitting conjecture are known to hold in $\mathrm{StMod}_{kG}$, see \cite{bik_fingps} and \cite{bhv3}. \end{remark} \section{Local chromatic homotopy theory}\label{ssec:lcht} We begin this section by introducing the main players of local chromatic homotopy theory: Morava $E$-theory $E_n$, the Morava stabilizer group $\mathbb{G}_n$ and its action on $E_n$, and the resulting descent spectral sequence computing $\pi_*L_{K(n)}S^0$. We then summarize the key algebraic features of the Morava stabilizer group, its continuous cohomology, and its action on the coefficients of Morava $E$-theory. In order to have a toy case in mind for the general constructions to follow, we study in detail the case of height $1$. \subsection{Morava $E$-theory and the descent spectral sequence}\label{sec:moravaEthy} The chromatic program has led us naturally and inevitably to the study of the $K(n)$-local categories, which should be thought of as an analog of $(\mathcal{D}_{\mathbb{Z}})_p^{\wedge}$ for abelian groups. Formally, we note that $\mathrm{Sp}_{K(n)}$ is a closed symmetric monoidal stable category. Moreover, in close analogy with Section~\ref{ssec:toy}, the $K(n)$-local categories have the following properties: \begin{enumerate}[(1)] \item The category $\mathrm{Sp}_{K(n)}$ is compactly generated by $L_nF(n)$ for any $F(n) \in \mathcal{C}_n \setminus \mathcal{C}_{n+1}$ for $\mathcal{C}_n$ as in Theorem~\ref{thm:hopsmith}, and an object $X \in \mathrm{Sp}_{K(n)}$ is trivial if and only if $X \wedge K(n)$ is trivial. \item The only proper localizing subcategory of $\mathrm{Sp}_{K(n)}$ is $(0)$. \item A spectrum $X \in \mathrm{Sp}_n$ can be reassembled from $L_{K(n)}X$, $L_{n-1}X$, together with the gluing information specified in the pullback square displayed on the right of \eqref{eq:chromaticfracture}. \end{enumerate} This confirms the idea that the $K(n)$-local categories play the role of the irreducible pieces of $\mathrm{Sp}$. With the techniques developed so far, both the finer structural properties of $\mathrm{Sp}_{K(n)}$ as well as any concrete calculations would be essentially inapproachable: Incipit \emph{Morava $E$-theory}. We let $\Gamma_n$ denote the Honda formal group law\index{Honda formal group law} of height $n$. It is the formal group law classified by the map \[ BP_* \cong \mathbb{Z}_{(p)}[v_1, v_2, v_3, \ldots ] \longrightarrow \mathbb{F}_p \] which sends $v_n$ to $1$ and $(p, v_1, \ldots, v_{n-1}, v_{n+1}, v_{n+2}, \ldots )$ to zero. In fact, it is the unique $p$-typical formal group law over $\mathbb{F}_{p^n}$ whose $p$-series satisfies $[p]_{\Gamma_n}(x) =x^{p^n}$. A good reference on formal group laws for homotopy theorists is \cite[Appendix A2]{ravgreen}. Let $\mathbb{W}_n= W(\mathbb{F}_{p^n}) $ be the ring of Witt vectors of $\mathbb{F}_{p^n}$\index{Witt vectors}, which is isomorphic to the ring of integers in an unramified extension of $\mathbb{Q}_p$ of degree $n$. Lubin and Tate~\cite{lubintate} showed that there exists a $p$-typical universal deformation $F_{n}$ of $\Gamma_n$ to complete local rings with residue field $\mathbb{F}_{p^n}$, whose formal group law $F_{n}(x,y)$ is defined over the ring \begin{equation}\label{eq:Estariso} (E_n)_0 = \mathbb{W}_n[\![u_1, \ldots, u_{n-1}]\!] \ \ \ u_i \in (E_n)_0. \end{equation} Introducing a formal variable $u$ in degree $-2$ then allows to extend $F_{n}$ to a graded formal group law $F_{n, \mathrm{gr}}(x,y) = uF_{n}(u^{-1}x,u^{-1}y)$ defined over $(E_n)_* = (E_n)_0[u^{\pm1}]$, classified by the ring homomorphism \[ BP_* \cong \mathbb{Z}_{(p)}[v_1, v_2, v_3, \ldots ] \longrightarrow (E_n)_* \] which sends $(v_{n+1}, v_{n+2}, \ldots)$ to zero, $v_n$ to $u^{1-p^n}$, and $v_k$ to $u_k u^{1-p^k}$ for $k<n$. Here, we are using the Araki generators for $BP_*$. See \cite[A2.2]{ravgreen} for more details. In order to lift this construction to stable homotopy theory, one first shows that the functor \[ X \mapsto (E_n)_* \otimes_{BP_*} BP_*(X) \] is a homology theory, represented by a complex orientable ring spectrum $E_n = E(\mathbb{F}_{p^n}, \Gamma_n)$, called Morava $E$-theory\index{Morava $E$-theory} or the \emph{Lubin--Tate spectrum}\index{Lubin--Tate spectrum} because of its connection to Lubin--Tate deformation theory, see Rezk \cite{rezk_hm}. This is an instance of the Landweber exact functor theorem\index{Landweber exact functor theorem} mentioned in Remark~\ref{rem:landweber}. The spectrum $E_n$ is a completed and 2-periodized version of the Johnson--Wilson spectrum $E(n)$ from Section \ref{ssec:chromfiltration1} and it turns out that $L_{E(n)} = L_{E_n}$ for all $n$; in particular, the terms $E(n)$-local and $E_n$-local are synonymous. Since $(E_n)_*$ is a regular graded commutative ring which is concentrated in even degrees, reduction modulo the maximal ideal $\mathfrak{m} = (p,u_1,\ldots,u_{n-1})$ can be realized by a (homotopy) ring map \[ E_n \longrightarrow E_n/\mathfrak{m} =: K_n \] with $\pi_*K_n \cong \mathbb{F}_{p^n}[u^{\pm 1}]$, see for example Chapter V of \cite{ekmm}. The spectrum $K_n$ splits as a wedge of equivalent spectra, which are shifts of the Morava $K$-theory $K(n)$ of Theorem~\ref{thm:nilpotence}, with homotopy groups $K(n)_* \cong \mathbb{F}_p [v_n^{\pm 1}]$ for $v_n = u^{1-p^n}$. \begin{definition}\label{defn:stabilizergrop} The \emph{small Morava stabilizer group} $\mathbb{S}_n:=\mathrm{Aut}_{\mathbb{F}_{p^n}} (\Gamma_n)$ is the group of automorphisms of $\Gamma_n$ with coefficients in $\mathbb{F}_{p^n}$ \[ \mathbb{S}_n = \{f(x) \in \mathbb{F}_{p^n}[\![x]\!] : f(\Gamma_n(x,y)) = \Gamma_n(f(x),f(y)), \ f'(0) \neq 0\}. \] Since $\Gamma_n$ is defined over $\mathbb{F}_p$, the Galois group $ \mathrm{Gal}=\mathrm{Gal}(\mathbb{F}_{p^n}/\mathbb{F}_p)$ acts on $\mathbb{S}_n$ by acting on the coefficients of an automorphism. The \emph{big Morava Stabilizer group} $\mathbb{G}_n $ is the extension $\mathbb{S}_n \rtimes \mathrm{Gal} $. Equivalently, $\mathbb{G}_n$ is the group of automorphisms of the pair $(\mathbb{F}_{p^n}, \Gamma_n)$.\index{Morava stabilizer group} \end{definition} The construction $E(\mathbb{F}_{p^n}, \Gamma_n)$ is natural in the formal group law $\Gamma_n$, so there is an up to homotopy action of $\mathrm{Aut}_{\mathbb{F}_{p^n}} (\Gamma_n)$ on $E(\mathbb{F}_{p^n}, \Gamma_n)$. This action can be promoted to an action through $\mathbb{E}_{\infty}$-ring maps in a unique way: By Goerss--Hopkins--Miller obstruction theory~\cite{hm_elliptic, gh_modulien}, $E_n$ admits an essentially unique structure of an $\mathbb{E}_{\infty}$-ring spectrum and $\mathbb{G}_n$ acts on it through $\mathbb{E}_{\infty}$-ring maps. In fact, $\mathbb{G}_n$ gives essentially all such automorphisms of $E_n$. A new proof of these results from the perspective of derived algebraic geometry has recently appeared in Lurie~\cite{ell2}. The connection between $K(n)$-local homotopy theory and Morava $E$-theory is then illustrated in the diagram \begin{align}\label{eq:galoisspectra} \xymatrix{L_{K(n)}S^0 \ar[r] & E_n \ar[r] & {K_n}.} \end{align} The first map is a pro-Galois extension of ring spectra\index{pro-Galois extension of ring spectra} with Galois group $\mathbb{G}_n$ in the sense of Rognes. In particular, $L_{K(n)}S^0 \simeq E_n^{h\mathbb{G}_n}$ and the extension $L_{K(n)}S^0 \to E_n$ behaves like an unramified field extension. The second map in \eqref{eq:galoisspectra} corresponds to the passage to the residue field. See \cite{rognes,br_absgal} for precise definitions on pro-Galois extensions and \cite{DH} for a definition of homotopy fixed points for profinite groups. Further results and alternative approaches to the construction of (continuous) homotopy fixed points in the generality needed for chromatic homotopy theory can be found in \cite{davis_hfp, quick_chfp, davisquick} and the references therein. \begin{remark} Note also that the extension $L_{K(n)}S^0 \to E_n $ can be broken into two pro-Galois extensions \[ \xymatrix{L_{K(n)}S^0 \ar[rr]^-{\mathrm{Gal}} & & E_n^{h\mathbb{S}_n} \simeq L_{K(n)}S^0(\omega) \ar[rr]^-{\mathbb{S}_n} & & E_n,} \] where the arrows are labelled by the structure group of the extension. Here, $L_{K(n)}S^0(\omega) $ is an $\mathbb{E}_{\infty}$-ring obtained by adjoining a primitive $(p^n-1)$th root of unity $\omega$ to the $K(n)$-local sphere. See \cite[5.4.6]{rognes} and \cite[Section 1.6]{BobkovaGoerss} for details on this. \end{remark} From the fact that the first map of \eqref{eq:galoisspectra} is a Galois extension, it follows that \begin{align}\label{eq:EnEn} \pi_*L_{K(n)}(E_n \wedge E_n) &\cong \mathrm{Map}^c(\mathbb{G}_n, (E_n)_*) , \end{align} where $\mathrm{Map}^c$ denotes the continuous functions as profinite sets. See for example \cite[Theorem 4.11]{hovey_ops}. In fact, the functor $(E_n)_*^{\vee}(-):=\pi_*L_{K(n)}(E_n \wedge - )$ takes values in a category of \emph{Morava modules}, which are $(E_n)_*$-modules equipped with a continuous action by $\mathbb{G}_n$ (see Definition~\ref{defn:moravamodules}). Furthermore, a map $f$ is a $K(n)$-local equivalence (i.e., $K(n)_*(f)$ is an isomorphism) if and only if $(E_n)_*^{\vee}(f)$ is an isomorphism. The resulting relationship between the topological category $\mathrm{Sp}_{K(n)}$ and the algebraic category of Morava modules provides very powerful tools for computations in the $K(n)$-local category. In particular, it gives rise to a \emph{homotopy fixed point spectral sequence}\index{homotopy fixed point spectral sequence}, also called the \emph{descent spectral sequence}\index{descent spectral sequence}. \begin{theorem}[Hopkins--Ravenel \cite{RavNil}, Devinatz--Hopkins \cite{DH}, Rognes \cite{rognes}]\label{thm:descentss} The unit map $L_{K(n)}S^0 \to E_n$ is a pro-Galois extension with Galois group $\mathbb{G}_n$. There is a convergent descent spectral sequence \begin{equation}\label{eq:hfpss} E_2^{s,t} \cong H^s_c(\mathbb{G}_n, (E_n)_t) \Longrightarrow \pi_{t-s} L_{K(n)}S^0, \end{equation} which collapses with a horizontal vanishing on a finite page. \end{theorem} The spectral sequence \eqref{eq:hfpss} is the \emph{$K(n)$-local $E_n$-based Adams--Novikov spectral sequence}\index{$K(n)$-local $E_n$-based Adams--Novikov spectral sequence}, which for a general $X$ has the form \begin{align}\label{rem:KnlocalANSS} E_1^{s,t} = \pi_tL_{K(n)}(E_n \wedge E_n^{s} \wedge X) \Longrightarrow \pi_{t-s} L_{K(n)}X. \end{align} It is constructed in \cite[Appendix A]{DH}. The description of the $E_2$-page in terms of continuous group cohomology $H^*_c$ for $X=S^0$ uses \eqref{eq:EnEn} to identify the $E_1$-term with the cobar complex. More generally, if the $(E_n)_*$-module $(E_n)_*^{\vee}(X)$ is flat, or finitely generated, or if there exists $k\ge 1$ such that $\mathfrak{m}^k[(E_n)_*^{\vee}(X)] = 0$, then there is an isomorphism~\cite{bh_e2adams} \[ E_2^{s,t} \cong H^s_c(\mathbb{G}_n, (E_n)_t^{\vee}(X)). \] Section~\ref{sssec:moravahomalg} below further discusses homological algebra over profinite groups and properties of this spectral sequence. In fact, as discussed in \cite{mathew_galois}, the $\mathbb{G}_n$-action on $E_n$ lifts to an action on the $\infty$-category $\mathrm{Mod}_{E_n}$, which yields a categorical reformulation of the theorem as a canonical equivalence \[ \xymatrix{\mathrm{Sp}_{K(n)} \ar[r]^-{\sim} & \mathrm{Mod}_{E_n}^{h\mathbb{G}_n}, } \] where the right hand side denotes the homotopy fixed points taken in the $\infty$-category of $\infty$-categories. These observations demonstrate the fundamental role of $E_n$-theory and the Morava stabilizer group $\mathbb{G}_n$ together with its cohomology in chromatic homotopy theory. \begin{remark}\label{rem:Ethygeneral}\label{rem:connextensions} Other choices for Morava $E$-theory are possible. For any perfect field $k$ of characteristic $p$ and formal group law $\Gamma$ of height $n$ defined over $k$, there is an associated spectrum $E(k, \Gamma)$ whose formal group law is a universal deformation of $\Gamma$ to complete local rings with residue field isomorphic to $k$. There is an associated Morava $K$-theory $K(k, \Gamma)$, stabilizer group $\mathbb{G}(k,\Gamma)$, and $\mathbb{G}(k,\Gamma)$-Galois extension $L_{K(k, \Gamma)}S^0 \to E(k, \Gamma)$. The localization functor $L_{(k,\Gamma)}$ is independent of the choice of formal group law $\Gamma$ and extension $k$ of $\mathbb{F}_p$, so one can make any convenient choice to study $L_{K(k, \Gamma)}S^0$. Recall the Galois extension $\mathbb{D}ef(\overline{\mathbb{F}}_p,\Gamma) \longrightarrow \widehat{\mathcal{H}}(n)$ from \eqref{eq:galgeometric}. By definition, $\mathbb{G}(\overline{\mathbb{F}}_p, \Gamma)$ is the group $ \mathrm{Aut}_{\overline{\mathbb{F}}_p}(\Gamma) \rtimes \mathrm{Gal}(\overline{\mathbb{F}}_p/\mathbb{F}_p) $, and the Galois extension $ E(\overline{\mathbb{F}}_p, \Gamma) \leftarrow L_{K(\overline{\mathbb{F}}_p, \Gamma)}S^0 $ is a homotopical lift of the pro-Galois extension \eqref{eq:galgeometric}. The coefficients of the Lubin--Tate spectrum $E(\overline{\mathbb{F}}_p, \Gamma)$ correspond to the global sections of $\mathbb{D}ef(\overline{\mathbb{F}}_p,\Gamma)$ (hence the reversal of the arrow direction). A more thorough discussion of Morava $E$-theory\index{Morava $E$-theory} is given in \cite{stapleton_chapter}. See also Remark~\ref{rem:choiceGamma} for more on this point. \end{remark} The first step in the Chromatic Approach described in Section~\ref{ssec:chromfiltration2} is to compute the homotopy groups of $L_{K(n)}S^0 \simeq E_n^{h\mathbb{G}_n}$. As for any Galois extension, it makes sense to first study intermediate extensions. In general, if $H$ and $K$ are closed subgroups of $\mathbb{G}_n$ and $H$ is normal in $K$, then $E_n^{hK} \to E_n^{hH}$ is a $K/H$-Galois extension and there is a descent spectral sequence \begin{equation}\label{eq:hfpssgen} E_2^{s,t} \cong H^s_c(K/H, (E_n^{hH})_t) \Longrightarrow \pi_{t-s} E_n^{hK}. \end{equation} See for example Devinatz \cite{devinatz_lhs}. It seems natural to consider the following kinds of intermediate extensions: \begin{enumerate}[(a)] \item The $\mathbb{G}_n/K$ Galois extensions $L_{K(n)}S^0 \to E_n^{hK}$ for $K \subseteq \mathbb{G}_n$ normal closed subgroups. \item The $F$-Galois extensions $E_n^{hF} \to E_n$ for finite subgroups $F$ of $\mathbb{G}_n$. \end{enumerate} An important example of an intermediate extension of the form (a) is given in Remark~\ref{rem:3termfiber} below. These kinds of extensions are conceptually important, but the homotopy groups of spectra of the form $E_n^{hK}$ are generally out of reach at heights $n\geq 3$. An exception is when $K=F \subseteq \mathbb{G}_n$ is finite, which brings us to extensions of the form (b), in which case the intermediate extensions $E_n^{hF}\to E_n$ and computations of the homotopy groups of $E_n^{hF}$ are more accessible. In fact, there are many computations of the homotopy groups of $E_n^{hF}$ at various heights and the recent developments in equivariant homotopy theory by Hill, Hopkins and Ravenel \cite{HHR}, followed by the work on real orientations for $E$-theory of Hahn and Shi \cite{hahnshi} make these computations even more accessible. A non-exhaustive list of reference related to these types of computations is given by \cite{bauer_tmf, beaudrybobkovahillstojanoska, BehrensOrmsby, BobkovaGoerss, hahnshi, heard_tate, HHRC4, c4e4, hill_chapter, MR}. In view of this, an approach to studying the $K(n)$-local sphere is to approximate it by spectra of the form $E_n^{hF}$ for finite subgroups $F \subseteq \mathbb{G}_n$. These approximations fit together to form so-called \emph{finite resolutions}. This is the philosophy established by Goerss, Henn, Mahowald, and Rezk (GHMR) in \cite{ghmr}. It has proven to be very effective for organizing computations and clarifying the structure of the $K(2)$-local category. In the next sections, we will describe the study of chromatic homotopy theory using the finite resolution perspective, starting with explicit examples at height $n=1$. In particular, finite resolutions will be discussed at length in Section~\ref{sec:fin}. \subsection{The Morava stabilizer group}\label{ssec:gn}\label{ssec:finsubgps} In this section, we give more details on the structure of the Morava stabilizer groups\index{Morava stabilizer group} $\mathbb{G}_n$ and $\mathbb{S}_n$, which were introduced in Definition~\ref{defn:stabilizergrop}. We also discuss homological algebra in this context. More detail on this material can be found, for example, in \cite{henn_minicourse}. \subsubsection{The structure of $\mathbb{G}_n$}\label{sec:GGn} Recall that $\Gamma_n$ denotes the Honda formal group law\index{Honda formal group law} of height $n$. We write \[x+_{\Gamma_n}y := \Gamma_n(x,y).\] By definition, $\mathbb{S}_n$ is the group of the units in $\mathrm{End}_{\mathbb{F}_{p^n}}(\Gamma_n)$. In fact, \[\mathrm{End}_{\overline{\mathbb{F}}_p}(\Gamma_n) \cong \mathrm{End}_{\mathbb{F}_{p^n}}(\Gamma_n).\] That is, all endomorphisms of $\Gamma_n$ have coefficients in $\mathbb{F}_{p^n}$. We give a brief description of the endomorphism ring here, originally due to Dieudonn{\'e}~\cite{dieudonne_lie7} and Lubin~\cite{lubin_oneparam}. A good reference for this material from the perspective of homotopy theory is \cite[Appendix A2.2]{ravgreen}. Recall that $\mathbb{W}_n$ denotes the ring of Witt vectors on $\mathbb{F}_{p^n}$. It is isomorphic to the ring of integers $ \mathbb{Z}_p(\omega)$ of the unramified extension of $\mathbb{Q}_p(\omega)$ obtained from $\mathbb{Q}_p$ by adjoining a primitive $(p^n-1)$th root of unity $\omega$. The residue field is $\mathbb{F}_{p^n}$ and we also let $\omega$ denote its reduction in $\mathbb{F}_{p^n}$. The series $\omega(x) = \omega x$ and $\xi(x) = x^p$ are elements of $\mathrm{End}_{\mathbb{F}_{p^n}}(\Gamma_n)$ and, in fact, the endomorphism ring is a $\mathbb{Z}_p$-module generated by these elements: \begin{equation}\label{eq:stabpres} \mathrm{End}_{\mathbb{F}_{p^n}}(\Gamma_n) \cong \mathbb{W}_n\langle \xi \rangle/ ( \xi \omega - \omega^{p}\xi, \xi^n- p) \end{equation} The identification of \eqref{eq:stabpres} is given explicitly as follows. An element of the right hand side can be written uniquely as \begin{equation}\label{eq:alphaexpressions} f= \sum_{j=0}^{n-1}f_j \xi^j \end{equation} for $f_j \in \mathbb{W}_n$. Further, $f_j = \sum_{i=0}^{\infty}a_{j+in}p^i$ for unique elements $a_i \in \mathbb{W}_n$ such that $a_i^{p^n}-a_i=0$. Using the fact that $\xi^n=p$, the element $f$ can also be written uniquely as \[ f = \sum_{i =0}^{\infty} a_i \xi^i. \] The series \[ [f](x) = \sum\nolimits_{i\geq 0}^{\Gamma_n} a_ix^{p^i} = a_0 x +_{\Gamma_n} a_1 x^p +_{\Gamma_n} \cdots \] is the endomorphism of $\Gamma_n$ corresponding to $f$. Let $\mathbb{D}_n = \mathbb{Q} \otimes \mathrm{End}_{\mathbb{F}_{p^n}}(\Gamma_n) $. There is a valuation $v \colon \mathbb{D}_n^{\times} \to \frac{1}{n}\mathbb{Z}$ normalized so that $v(\xi)=1/n$. The ring $\mathbb{D}_n$ is a central division algebra algebra over $\mathbb{Q}_p$ of Hasse invariant $1/n$. The ring of integers of $\mathcal{O}_{\mathbb{D}_n}$ is defined to be those $x\in \mathbb{D}_n$ such that $v(x)\geq 0$, so that $ \mathcal{O}_{\mathbb{D}_n} \cong \mathrm{End}_{\mathbb{F}_{p^n}}(\Gamma_n)$. The element $\xi$ is invertible in $\mathbb{D}_n$ and conjugation by $\xi$ preserves $ \mathcal{O}_{\mathbb{D}_n}$. In fact, conjugation by $\xi$ corresponds to the action of a generator of $\mathrm{Gal} = \mathrm{Gal}(\mathbb{F}_{p^n}/\mathbb{F}_p)$ on $\mathbb{S}_n \cong \mathcal{O}_{\mathbb{D}_n}^{\times}$. From this, we get a presentation \[ \mathbb{D}_n^{\times}/(\xi^n) \cong \mathbb{G}_n. \] The problem of determining the isomorphism types and conjugacy classes of maximal finite subgroups of $\mathbb{S}_n$ was studied by Hewett \cite{hewett_groups, hewett_normalizers} and was revisited by Bujard \cite{bujard}. We have listed the conjugacy classes of maximal finite subgroups of $\mathbb{S}_n$ in Table~\ref{tab:finitesub}. Note that the list is rather restricted and that the groups which appear all have periodic cohomology in characteristic $p$. The kind of finite subgroups of $\mathbb{G}_n$ that have appeared in the construction of finite resolutions so far are extensions of finite subgroups of $\mathbb{S}_n$ in the following sense. \begin{definition}\label{defn:extendsubgroups} For $F_0$ a finite subgroup of $\mathbb{S}_n$, an \emph{extension of $F_0$ to $\mathbb{G}_n$} is a subgroup $F$ of $\mathbb{G}_n$ which contains $F_0$ as a normal subgroup and such that the following diagram commutes: \[ \xymatrix{0 \ar[r] & F_0 \ar[d] \ar[r] & F \ar[r] \ar[d] & \mathrm{Gal} \ar[r] \ar[d]^-{\cong} & 0 \\ 0 \ar[r] & \mathbb{S}_n \ar[r] & \mathbb{G}_n \ar[r] & \mathrm{Gal} \ar[r] & 0} \] Here, the rows are exact, the left and middle vertical arrows are the inclusions, and the induced right vertical map is an isomorphism. \end{definition} The question of when a finite subgroup $F_0$ of $\mathbb{S}_n$ extends to a finite subgroup of $\mathbb{G}_n$ is subtle and largely addressed by Bujard in \cite{bujard}. We do not give it much attention here. \begin{table} \centering \caption{The table below lists isomorphism types of maximal finite subgroups of $\mathbb{S}_n$ at various heights and primes. Each isomorphism type listed below belongs to a unique conjugacy class. Here, $C_q$ denotes a cyclic group of order $q$ and $T_{24} \cong Q_8 \rtimes C_3$ is the binary tetrahedral group (the action of $C_3$ on $Q_8$ permutes a choice of generators $i$, $j$ and $k$). See Hewett \cite{hewett_groups, hewett_normalizers} and Bujard \cite{bujard} for more details. In particular, see \cite{hewett_groups} for the isomorphism type of the semi-direct product on the list below. } \ \\ \begin{tabular}{|l|l|l|l|} \hline Prime $p$ & Height $n$: $k\geq 1$, $p\hspace{-4pt}\not|\hspace{2pt} m$ &\makecell[l]{Isomorphism Types of Maximal \\ Finite Subgroups in $\mathbb{S}_n$} \\ \hline \hline $p\neq 2$ & $n$ not divisible by $p-1$ &$C_{p^n-1}$ \\ \hline $p\neq 2$ & $n=(p-1)p^{k-1}m$ &\makecell[l]{ $C_{p^n-1}$, and \\ $C_{p^\ell} \rtimes C_{(p^{p^{k-\ell}m} -1 )(p-1)}$, $1\leq \ell \leq k$} \\ \hline $p=2$ & $n$ odd & $C_{2(2^n-1)}$ \\ \hline $p=2$ & $n=2^{k-1}m$ and $k\neq 2$ &$C_{2^\ell(2^{2^{k-\ell}m} -1 )}$, $1\leq \ell \leq k $ \\ \hline $p=2$ & $n=2 m$ and $m\neq 1$ &\makecell[l]{$C_{2(2^m-1)}$, and \\ $T_{24} \times C_{2^m-1}$} \\ \hline $p=2$ & $n=2$ &$T_{24}$ \\ \hline \end{tabular} \label{tab:finitesub} \end{table} \begin{remark}\label{rem:choiceGamma} For any formal group law $\Gamma$ of height $n$ defined over a perfect field extension $k$ of $\mathbb{F}_p$, one can define the group \[ \mathbb{G}(k,\Gamma) = \{(f,i) : \sigma \in \mathrm{Gal}(k/\mathbb{F}_p), \ f \in k[\![x]\!] \colon \sigma^*\Gamma \xrightarrow{\cong} \Gamma\}. \] With this definition, $\mathbb{G}_n = \mathbb{G}(\mathbb{F}_{p^n}, \Gamma_n)$. This group was mentioned in Remark~\ref{rem:Ethygeneral}. The group $\mathbb{S}(k, \Gamma) = \mathrm{End}_k(\Gamma)^{\times}$ is the subgroup of $\mathbb{G}(k,\Gamma)$ consisting of pairs for which $\sigma=\operatorname{id}$. In general, both $\mathbb{S}(k, \Gamma)$ and $\mathbb{G}(k, \Gamma)$ depend on the pair $(k, \Gamma)$. However, since any two formal group laws of height $n$ are isomorphic over $\overline{\mathbb{F}}_p$, $\mathrm{End}_{\overline{\mathbb{F}}_p}(\Gamma)$ is independent of $\Gamma$, and hence so are $\mathbb{G}(\overline{\mathbb{F}}_p, \Gamma)$ and $\mathbb{S}(\overline{\mathbb{F}}_p, \Gamma)$. Since \[ \mathbb{S}_n = \mathbb{S}(\mathbb{F}_{p^n}, \Gamma_n) \cong \mathbb{S}(\overline{\mathbb{F}}_p, \Gamma_n), \] it follows that for any formal group law $\Gamma$ as above, there is an isomorphism $\mathbb{S}_n \cong \mathbb{S}(\overline{\mathbb{F}}_p, \Gamma)$. So, Table~\ref{tab:finitesub} is canonical in the sense that it classifies conjugacy classes of finite subgroups of $ \mathbb{S}(\overline{\mathbb{F}}_p, \Gamma)$ for any formal group law $\Gamma$ of height $n$ defined over $\overline{\mathbb{F}}_p$. However, even if all of the automorphisms of $\Gamma$ are defined over $\mathbb{F}_{p^n}$, so that \[ \mathbb{S}(\mathbb{F}_{p^n}, \Gamma) \cong \mathbb{S}(\overline{\mathbb{F}}_{p}, \Gamma) \cong \mathbb{S}_n, \] it can still be the case that $\mathbb{G}(\mathbb{F}_{p^n}, \Gamma)$ and $\mathbb{G}_n$ are not isomorphic. If this is the case, extensions of a finite subgroup of $\mathbb{S}_n \cong \mathbb{S}(\mathbb{F}_{p^n}, \Gamma)$ to $\mathbb{G}(\mathbb{F}_{p^n}, \Gamma)$ and $\mathbb{G}_n$ can have different isomorphism types. \end{remark} We now turn to the definition of a few group homomorphisms that play a role in the rest of this paper. \begin{definition}\label{defn:determinant} The \emph{determinants}\index{determinant} \begin{align*} \det &\colon \mathbb{G}_n \longrightarrow \mathbb{Z}_p^{\times} & \det &\colon \mathbb{S}_n \longrightarrow \mathbb{Z}_p^{\times} \end{align*} are the homomorphisms defined as follows. The group $ \mathbb{S}_n$ acts on $\mathcal{O}_{\mathbb{D}_n}$ by right multiplication. This action gives a representation $\rho\colon \mathbb{S}_n \to GL_n(\mathbb{W}_n)$. The composite $\det \circ \rho$ has image in the Galois invariants of $\mathbb{W}_n^{\times}$ (see \cite[Section 5.4]{henn_minicourse}), so it induces a homomorphism $\mathbb{S}_n \to \mathbb{Z}_p^{\times}$, which we also denote by $\det$. We extend this homomorphism to $\mathbb{G}_n$ via the composite \[ \det \colon \mathbb{G}_n \cong \mathbb{S}_n\rtimes \mathrm{Gal} \xrightarrow{ \det \times \operatorname{id} }\mathbb{Z}_p^{\times} \times \mathrm{Gal} \to \mathbb{Z}_p^{\times}, \] where the second map is the projection. \end{definition} Composing $\det \colon \mathbb{G}_n \to \mathbb{Z}_p^{\times}$ with the quotient map to $\mathbb{Z}_p^{\times}/\mu \cong \mathbb{Z}_p$ gives a homomorphism \begin{equation}\label{eq:zetan} \zeta_n \colon \mathbb{G}_n \longrightarrow \mathbb{Z}_p \end{equation} where $\mu=C_2$ if $p=2$ and $\mu = C_{p-1}$ if $p$ is odd. \index{$\zeta_n$} This corresponds to a class \[\zeta_n \in H_c^1(\mathbb{G}_n, \mathbb{Z}_p) \cong \mathrm{Hom}^c(\mathbb{G}_n, \mathbb{Z}_p),\] where $\mathrm{Hom}^c$ denotes continuous group homomorphisms and $H^1_c$ the continuous cohomology (see Section~\ref{sssec:moravahomalg}). If $p=2$, the determinant also induces a map \begin{equation}\label{eq:chin} \chi_n \colon \mathbb{G}_n \to (\mathbb{Z}_2/4)^{\times} \cong \mathbb{Z}/2. \end{equation} which then represents a class $\chi_n \in H_c^1(\mathbb{G}_n, \mathbb{Z}/2)$. Let $\widetilde{\chi}_n \in H_c^2(\mathbb{G}_n, \mathbb{Z}_2)$ be the Bockstein of $\chi_n$, and note that $2\widetilde{\chi}_n =0$. Denote by $\mathbb{G}_n^1$ the kernel of $\zeta_n$ and let $\mathbb{S}_n^1 = \mathbb{S}_n \cap \mathbb{G}_n^1$. The homomorphism $\zeta_n$ is surjective, and necessarily split since $\mathbb{Z}_p$ is topologically free. Therefore, \begin{align}\label{eq:G1ext} \mathbb{G}_n &\cong \mathbb{G}_n^1 \rtimes \mathbb{Z}_p, & \mathbb{S}_n &\cong \mathbb{S}_n^1 \rtimes \mathbb{Z}_p. \end{align} If $n$ is coprime to $p$, then the splitting is trivial and this is a product. \begin{remark}\label{rem:3termfiber} As a consequence of the fact that $\mathbb{G}_n/\mathbb{G}_n^1\cong \mathbb{Z}_p$, there is an equivalence $L_{K(n)}S^0 \simeq (E_n^{h\mathbb{G}_n^1})^{h\mathbb{Z}_p}$. If $\psi \in \mathbb{G}_n$ is such that $\zeta_n(\psi)$ is a topological generator of $\mathbb{Z}_p$, then we get an exact triangle \[ \xymatrix{ L_{K(n)}S^0 \ar[r] & E_n^{h\mathbb{G}_n^1} \ar[r]^-{\psi-1} & E_n^{h\mathbb{G}_n^1} \ar[r]^-{\delta} & \Sigma L_{K(n)}S^0.} \] We also denote by $\zeta_n$ its image $H_c^*(\mathbb{G}_n, (E_n)_0)$. It is known that $\zeta_n$ is a permanent cycle in the homotopy fixed point spectral sequence, see \cite[Section 8]{DH}. It detects the composite $S^0 \to E_n^{h\mathbb{G}_n^1} \xrightarrow{\delta} \Sigma L_{K(n)}S^0 $ (where the first map is the unit), which is also denoted by $\zeta_n \in \pi_{-1}L_{K(n)}S^0$. \end{remark} \subsubsection{The action of the Morava stabilizer group} \index{Action of the Morava stabilizer group} We now discuss the action of $\mathbb{G}_n$ on $(E_n)_*$. Most notably, this problem was first attacked in depth by Devinatz and Hopkins in \cite{DH_action} using the Gross--Hopkins period map (Remark~\ref{rem:grosshop}). A very nice summary of this approach is given by Kohlhaase \cite{Kohlhaase} and we discuss some of the consequences here. Let $F_n$ be the formal group law over $(E_n)_{0}$ which is a universal deformation of $\Gamma_n$ and was defined in Section~\ref{sec:moravaEthy}. For $\alpha \in \mathbb{G}_n$ given by a pair $(f,\sigma)$ where $\sigma \in \mathrm{Gal}(\mathbb{F}_{p^n}/\mathbb{F}_p)$ and $f \in \mathbb{S}_n$, the universal property of the deformation $F_n$ implies that there exists a unique pair $(f_{\alpha}, \alpha_*)$ consisting of a continuous ring isomorphism $\alpha_* \colon (E_n)_0 \to (E_n)_0$ and an isomorphism of formal group laws $f_\alpha \colon \alpha_*F_n \to F_n$ such that \begin{align}\label{eq:cong} ({f}_{\alpha},\alpha_*) \equiv (f, \sigma) \mod (p, u_1, \ldots, u_{n-1}).\end{align} The isomorphism $\alpha_*$ is extended to $(E_n)_*$ by defining $\alpha_*(u) = f'(0)u$. The assignment $\alpha \mapsto \alpha_*$ gives a left action of $\mathbb{G}_n$ on $(E_n)_*$. The action of an element $(\operatorname{id}, \sigma)$ corresponds to the natural action of the Galois group on the coefficients $\mathbb{W}_n$ in $(E_n)_* \cong \mathbb{W}_n[\![u_1, \ldots, u_{n-1}]\!][u^{\pm 1}] $, and we denote it by $\sigma_*$. Similarly, if $\alpha = (f, \operatorname{id})$, we let $f_*$ denote the isomorphism $\alpha_*$. Computing the action explicitly is difficult and there exists no general formula. However, three cases are fairly simple to deduce from the general description above: \begin{enumerate}[(a)] \item If $\alpha$ for $\sigma \in \mathrm{Gal}(\mathbb{F}_{p^n}/\mathbb{F}_p)$, then $\sigma_*$ is the action of the Galois group on the coefficients $\mathbb{W}_n$. For $x \in \mathbb{W}_n$, we write $x^{\sigma} = \sigma_*(x)$. \item If $ \omega \in \mathbb{S}_n$ is a primitive $(p^n-1)$th root of unity, then $\omega_*(u_i)=\omega^{p^i-1}u_i$ and $\omega_*(u) = \omega u$. \item If $\psi\in \mathbb{Z}_p^{\times} \subseteq \mathbb{S}_n$ is in the center, then $\psi_*(u_i)=u_i$ and $\psi_*(u)= \psi u$. \end{enumerate} Understanding the action more generally is difficult, but we say a few words on this here. For $f \in \mathbb{S}_n$, write $f= \sum\nolimits_{j= 0}^{n-1} f_j \xi^{j}$ for $f_j \in \mathbb{W}_n$ with $f_0 \in \mathbb{W}_n^{\times}$ as in \eqref{eq:alphaexpressions}. The following results due to Devinatz and Hopkins \cite{DH_action} are also given in Theorem 1.3 and Theorem 1.19 of \cite{Kohlhaase}. \begin{theorem}[Devinatz--Hopkins]\label{thm:DHact} Let $1\leq i \leq n-1$ and $f_j$ be as above. Then, modulo $(p,u_1, \ldots, u_{n-1})^2$, \begin{align*} f_*(u) &\equiv f_0 u+\sum_{j=1}^{n-1}f_{n-j}^{\sigma^{j}}uu_j , & f_*(uu_i) &\equiv \sum_{j=1}^{i} f_{i-j}^{\sigma^{j}}uu_j + \sum_{j=i+1}^{n} p f_{n+i-j}^{\sigma^{j}}uu_j \ . \end{align*} Further, if $f \in \mathbb{W}_n^{\times} \subseteq \mathbb{S}_n$, so that $f=f_0$ then $f_*(u_i) \equiv f_0^{\sigma^i}f_0^{-1}u_i$ modulo $(u_1, \ldots, u_{n-1})^2$. \end{theorem} An example of an immediate consequence of Theorem~\ref{thm:DHact} is the following result. See \cite[Lemma 1.33]{BobkovaGoerss} for a surprisingly simple proof. \begin{corollary}\label{cor:easyfixpoints} For all primes $p$ and all heights $n$, the unit $\mathbb{Z}_p \to (E_n)_*$ induces an isomorphism on $\mathbb{G}_n$ fixed points $\mathbb{Z}_p \cong (E_n)_*^{\mathbb{G}_n}$. \end{corollary} \begin{remark}[Gross--Hopkins period map]\label{rem:grosshop} The proof of Theorem~\ref{thm:DHact} relies on one of the deepest results in chromatic homotopy theory, due to Gross and Hopkins \cite{HopkinsGross}\index{Gross--Hopkins period map}, which points towards the mysterious interplay between this subject and arithmetic geometry. Let $K$ be the quotient field of $\mathbb{W}_n$ and $\mathrm{Sp}f((E_n)_0)_K$ be the generic fiber of the formal scheme associated to $(E_n)_0$. Since the division algebra $\mathbb{D}_n$ splits over $K$, i.e., $\mathbb{D}_n \otimes_{\mathbb{Q}_p}K$ is isomorphic to a matrix algebra $M_n(K)$, there is a natural $n$-dimensional $\mathbb{G}_n$-representation $V_K$. It follows that $\mathbb{G}_n$ acts on the corresponding projective space $\mathbb{P}(V_K)$ through projective linear transformations. In \cite{HopkinsGross,GrossHopkins}, Gross and Hopkins construct a \emph{period mapping} that linearizes the action of $\mathbb{G}_n$ on $\mathrm{Sp}f((E_n)_0)_K$: They prove that there is an {\'e}tale and $\mathbb{G}_n$-equivariant map of rigid analytic varieties \begin{equation}\label{eq:permap} \xymatrix{\Phi\colon \mathrm{Sp}f((E_n)_0)_K \ar[r] & \mathbb{P}(V_K).} \end{equation} Devinatz and Hopkins use this map to prove Theorem~\ref{thm:DHact} and it also features in the computations of Kohlhaase \cite{Kohlhaase}. \end{remark} One often needs more precision than that provided by Theorem~\ref{thm:DHact}. Since $f_{\alpha}$ is a morphism of formal group laws, it follows that \[ f_\alpha( [p]_{\alpha_*F_n}(x)) = [p]_{F_n}(f_\alpha(x)). \] This relation contains a lot of information. In practice, it gives a recursive formula to compute the morphism $\alpha_*$ as a function of the $\alpha_j$s. This method is applied explicitly in Section 4 of the paper~\cite{HKM} by Henn--Karamanov--Mahowald. However, even with these methods, it is difficult to get good approximations for the action of $\mathbb{G}_n$. If one restricts attention to finite subgroups $F \subseteq \mathbb{G}_n$, it is sometimes possible to do much better than these kinds of approximations. Recent developments suggest that working with a formal group law other than the Honda formal group law $\Gamma_n$ may be better suited to this task. For example, when $n=2$, one can choose to work with the formal group law of a super-singular elliptic curve. The automorphisms of the curve embed in the associated Morava Stabilizer group and one can use geometric information to write explicit formulas for their action on the associated $E$-theory. See Strickland \cite{strickland_level} and \cite[Section 2]{beaudry_towards}. In fact, the spectra $E_2^{hF}$ at height $2$ are the $K(2)$-localizations of various spectra of topological modular forms with level structures. See, for example, \cite{behrens_modular} and Remark~\ref{rem:tmf}. Elliptic curves are not available at higher heights, but there is a hope that the theory of automorphic forms will provide a replacement. This is the subject of \cite{bl_taf}, see also \cite{behrens_chapter}. Finally, equivariant homotopy theory also seems to provide better choices of formal group laws for studying the action of the finite subgroups. See, for example, \cite{HHR, HHRC4} together with \cite{hahnshi}, \cite{c4e4}, and \cite{beaudrybobkovahillstojanoska}. \subsubsection{Morava stabilizer group: homological algebra}\label{sssec:moravahomalg} Recall that the $E_2$-term of the descent spectral sequence in Theorem~\ref{thm:descentss} is given by the continuous cohomology of the Morava stabilizer group with coefficients in $(E_n)_*$. The goal of this section is to summarize the homological algebra required to construct these cohomology groups and to then discuss some features specific to $\mathbb{G}_n$. An important subtlety arising from the homotopical applications we have in mind is that we have to study the continuous cohomology of $\mathbb{G}_n$ with profinite coefficients, and not merely discrete ones. This theory has been systematically developed by Lazard~\cite{lazard_analyticgroups}; our exposition follows the more modern treatment of Symonds and Weigel~\cite{sw_analyticgroups}. Let $G = \lim_i G_i$ be a profinite group, given as an inverse limit of a system of finite groups $(G_i)$ and write $\mathbb{C}C_p(G)$ for the category of profinite modules over \[\mathbb{Z}_p\llbracket G \rrbracket = \lim_{i,j}\mathbb{Z}/p^j[G_i]\] and continuous homomorphisms. The category $\mathbb{C}C_p(G)$ is abelian and has enough projective objects. Moreover, the completed tensor product equips $\mathbb{C}C_p(G)$ with the structure of a symmetric monoidal category with unit $\mathbb{Z}_p$. In order to define a well-behaved notion of continuous cohomology\index{continuous cohomology} for $G$, assume that $G$ is a compact $p$-analytic Lie group in the sense of~\cite{lazard_analyticgroups}. A good reference for properties of $p$-adic analytic groups is \cite{dixon}. Lazard then shows that: \begin{itemize} \item $G$ is of type $p\text{-}\mathrm{FP}_{\infty}$, i.e., $\mathbb{Z}_p$ admits a resolution by finitely generated projective $\mathbb{Z}_p\llbracket G \rrbracket$-modules. It follows that the continuous cohomology of $G$ with coefficients in $M \in \mathbb{C}C_p(G)$, defined as \[ H_c^*(G,M) = \operatorname{Ext}_{\mathbb{Z}_p\llbracket G \rrbracket}^*(\mathbb{Z}_p,M), \] is a well-behaved cohomological functor, where the (continuous) $\operatorname{Ext}$-group is computed in $\mathbb{C}C_p(G)$. In particular, there is a Lyndon--Hochschild--Serre spectral sequence and an Eckmann--Shapiro type lemma for open normal subgroups \cite[Theorem 4.2.6 and Lemma 4.2.8]{sw_analyticgroups}. Similarly, continuous homology\index{continuous homology} is defined as \[H^c_*(G,M) = \operatorname{Tor}^{\mathbb{Z}_p\llbracket G \rrbracket}_*(\mathbb{Z}_p,M)\] where the (continuous) $\operatorname{Tor}$-group is computed in $\mathbb{C}C_p(G)$. \item $G$ is a virtual Poincar\'e duality group\index{virtual Poincar\'e duality group} in dimension $d = \dim(G)$ \cite[Theorem 5.1.9]{sw_analyticgroups}, i.e., there exists an open subgroup $H$ in $G$ such that \[ H_c^*(H,\mathbb{Z}_p\llbracket H \rrbracket) \cong \begin{cases} \mathbb{Z}_p & \text{if } * = d, \\ 0 & \text{otherwise}, \end{cases} \] and the length of a projective resolution of $\mathbb{Z}_p \in \mathbb{C}C_p(H)$ can be taken to be $d$. The second property is referred to by saying that the cohomological dimension of $H$ is $d$ and that the virtual cohomological dimension\index{virtual cohomological dimension} of $G$ is $d$; in symbols, $\cdim_p(H) = d$ and $\vcdim_p(G) = d$. The Poincar\'e duality property gives rise to a non-degenerate pairing \[ H_c^*(H,\mathbb{F}_p) \otimes H_c^{d-*}(H,\mathbb{F}_p) \longrightarrow H_c^{d}(H,\mathbb{F}_p)\cong \mathbb{F}_p, \] thereby justifying the terminology. \end{itemize} The key theorem, proved by Morava~\cite[\S 2.2]{morava} and relying on work by Lazard~\cite{lazard_analyticgroups}, allows us to apply this theory to the Morava stabilizer group: \begin{theorem}[Lazard, Morava]\label{thm:vcdim} The Morava stabilizer group $\mathbb{S}_n$ is a compact $p$-analytic virtual Poincar\'e duality group of dimension $n^2$. Further, the group $\mathbb{S}_n$ is $p$-torsion-free if and only if $p-1$ does not divide $n$, and in this case $\vcdim_p(G) = \cdim_p(G) =n^2$. \end{theorem} We note an important immediate consequence of this theorem, which is the underlying reason for the small prime vs.~large prime dichotomy in chromatic homotopy theory. See also Figure~\ref{fig:sse2}: \begin{corollary}\label{cor:hfpsslarge} If $p>2$ is such that $2(p-1) > n^2$, then the descent spectral sequence \eqref{eq:hfpss}\index{homotopy fixed point spectral sequence}\index{descent spectral sequence} for $S^0$ collapses at the $E_2$-page with a horizontal vanishing line\index{horizontal vanishing line} of intercept $s=n^2$ (meaning that $E_{2}^{s,t}=0$ for $s>n^2$) and there are no non-trivial extensions. \end{corollary} \begin{remark} The condition $2(p-1) > n^2$ can be improved to $2(p-1) \geq n^2$ using Corollary~\ref{cor:easyfixpoints}. \end{remark} \begin{remark}\label{rem:strongvanishing} An extremely powerful result of Devinatz--Hopkins is that, for any prime $p$ and any height $n$, there exists an integer $N$ such that, for all spectra $X$, the $K(n)$-local $E_n$-based Adams--Novikov spectral sequence\index{$K(n)$-local $E_n$-based Adams--Novikov spectral sequence} for $X$ (see \eqref{rem:KnlocalANSS}) has a horizontal vanishing line on the $E_{\infty}$-term at $s=N$, although the minimal such $N$ may be greater than $n^2$. For example, when $n=1$ and $p=2$, the homotopy fixed point spectral sequence \eqref{eq:hfpss} has non-trivial elements on the $s=2>1^2$ line at $E_{\infty}$. See \cite[Section 2.3]{BGH} for a proof of the existence of the vanishing line. Note further that it follows from Corollary~\ref{cor:easyfixpoints} and the existence of the vanishing line that the natural map $\mathbb{Z}_p \to \pi_0L_{K(n)}S^0$ is a nilpotent extension of rings. \end{remark} \begin{center} \begin{figure} \caption{{The $E_2$-term of $E_2^{s,t} \label{fig:sse2} \end{figure} \end{center} In order to run the descent spectral sequence computing $\pi_*L_{K(n)}S^0$, we have to come to grips with $H_c^*(\mathbb{G}_n, (E_n)_*)$, an extremely difficult problem. However, if one restricts attention to $H_c^*(\mathbb{G}_n, (E_n)_0)$, the computation appears to radically simplify in a completely unexpected way. Let $\iota \colon \mathbb{W}_n \to (E_n)_0$ be the natural inclusion. The following has been shown to be true at all primes when $n\leq 2$, see \cite{shimyabe, behse2, Kohlhaase, GoerssSplit, BGH, Beaudry2018}: \begin{conjecture}[Vanishing conjecture]\label{conj:vanishing}\index{Vanishing conjecture} Let $p$ be any prime and $n$ be any height. The map $\iota$ induces an isomorphism \[ \xymatrix{\iota_* \colon H_c^*(\mathbb{G}_n, \mathbb{W}_n) \ar[r]^-{\cong} & H_c^*(\mathbb{G}_n, (E_n)_0).} \] \end{conjecture} \begin{remark} The conjecture is so named because it implies that the cohomology of the $\mathbb{G}_n$-module $(E_n)_0/\mathbb{W}_n$ vanishes in all degrees. Note further that if one proves that $\mathbb{W}_n/p \to (E_n)_0/p$ induces an isomorphism on cohomology, then Conjecture~\ref{conj:vanishing} follows formally. \end{remark} As we will see in Section~\ref{sec:cscmore}, this conjecture and the accompanying computations informs our understanding of $L_{K(n)}S^0$, the essence of which is distilled in the formulation of the chromatic splitting conjecture. In fact, what makes Conjecture~\ref{conj:vanishing} particularly appealing is the fact that $H_c^*(\mathbb{G}_n,\mathbb{W}_n)$ appears to be rather simple when $p$ is large with respect to $n$. Rationally, we have some partial understanding due to work of Lazard \cite[Remark 2.2.5]{morava} and \cite[Rem.~2.2.5]{morava}, who established an isomorphism for all heights and primes \begin{equation}\label{eq:rationaliso} H_c^*(\mathbb{G}_n,\mathbb{W}_n) \otimes \mathbb{Q} \cong \Lambda_{\mathbb{Z}_p}(x_1,\ldots,x_n)\otimes \mathbb{Q}, \end{equation} where $\Lambda_{\mathbb{Z}_p}(x_1,\ldots,x_n)$ is the exterior algebra over $\mathbb{Z}_p$ on $n$ generators in degrees $\deg(x_i) = 2i-1$. Here, the class $x_1$ is $\zeta_n$ as defined in \eqref{eq:zetan}. Furthermore, when $p$ is large with respect to $n$, it is believed that there is an isomorphism \eqref{eq:rationaliso} before rationalization. \begin{conjecture} If $p\gg 0$, then $H_c^*(\mathbb{G}_n,\mathbb{W}_n) \cong \Lambda_{\mathbb{Z}_p}(x_1,\ldots,x_n)$. \end{conjecture} \begin{remark}\label{rem:WGmodules} For our chromatic applications, we need a mild extension of the setup presented above. Here and below, $\mathbb{W}_n = W(\mathbb{F}_{p^n})$ denotes the $\mathbb{G}_n$-module whose action is the restriction along $\mathbb{G}_n \to \mathrm{Gal}$ of the natural action of $\mathrm{Gal}$ on $W(\mathbb{F}_{p^n})$. We write $w^g=g(w)$ for this action. For $ G\subseteq \mathbb{G}_n$, define the \emph{twisted group ring}\index{twisted group ring} to be \[ \twistgr{\mathbb{W}_n}{G} := \lim_{i,j} \mathbb{W}_n/\mathfrak{m}^j\langle G_i\rangle \] with $G$-twisted multiplication determined by the relations $g\cdot r = g(r)g$ for $r\in \mathbb{W}_n$ and $g\in G$. We let $\GMod{\mathbb{W}_n}{G}$ be the category of profinite left $\twistgr{\mathbb{W}_n}{G}$-modules. These are profinite abelian groups $M = \lim_kM_k$ with a continuous action $\twistgr{\mathbb{W}_n}{G} \times M \to M$. If $H \subseteq G$ is a closed subgroup and $M$ is a left $\twistgr{\mathbb{W}_n}{H}$-module, then \begin{align}\label{eq:uparrow} M\upar{H}{G} := \twistgr{\mathbb{W}_n}{G} \otimes_{\twistgr{\mathbb{W}_n}{H}}M =\lim_{i,j,k} \left(\mathbb{W}_n/\mathfrak{m}^j[G_i] \otimes_{\twistgr{\mathbb{W}_n}{H}} M_k\right) \end{align} is a left $\twistgr{\mathbb{W}_n}{G}$-module. One can show that the homological algebra summarized above also works in the context of profinite modules over twisted group rings. Note that there is a functor from $\mathrm{Mod}_{\mathbb{Z}_p\llbracket G \rrbracket}$ to $\GMod{\mathbb{W}_n}{G}$ which sends a $\mathbb{Z}_p\llbracket G \rrbracket$-module $M$ to the $\mathbb{W}G{G}$-module $\mathbb{W}_n\otimes_{\mathbb{Z}_p}M$ with action given by $g( w \otimes m) = w^{g}\otimes g(m)$. This allows us to transport constructions in $\GMod{\mathbb{Z}_p}{G}$ to constructions in $\GMod{\mathbb{W}_n}{G}$. \end{remark} We now come to another important construction in chromatic homotopy theory, namely the $(E_n)_*$-module \[ (E_n)_*^{\vee}X= \pi_*L_{K(n)}(E_n\wedge X) \] associated to a spectrum $X$. The action of $\mathbb{G}_n$ on $E_n$ induces an action on $(E_n)_*^{\vee}X$ compatible with the $(E_n)_*$-action. Moreover, let $\mathfrak{m} = (p,u_1,\ldots,u_{n-1})$ be the maximal ideal of $(E_n)_0$ and, for $s\ge 0$, let $\mathbb{L}_s$ be the $s$th left derived functor of $\mathfrak{m}$-adic completion on $\mathrm{Mod}_{(E_n)_*}$. There is a strongly convergent spectral sequence \[ \mathbb{L}_s(\pi_*(E_n \wedge X))_t \Longrightarrow (E_n)_{s+t}^{\vee}X \] which in particular implies that the canonical map $(E_n)_*^{\vee}X \to \mathbb{L}_0((E_n)_*^{\vee}X)$ is an isomorphism. Such $(E_n)_*$-modules are called $\mathbb{L}$-complete and we refer the interested reader to \cite[Appendix A]{HovStrick} for a more thorough treatment. Taken together, this structure is called the \emph{Morava module of $X$}: \begin{definition}[Morava modules]\label{defn:moravamodules}\index{Morava module} A \emph{Morava module} $M$ is an $\mathbb{L}$-complete $(E_n)_*$-module equipped with an action by $\mathbb{G}_n$ in $\mathbb{L}$-complete modules that is compatible with the action on $(E_n)_*$. That is, for every $g\in \mathbb{G}_n$, $e\in (E_n)_*$ and $m \in M$, $g(em) = g(e)g(m)$. A morphism of Morava modules is a continuous map of $(E_n)_*$-modules that preserves the action. We denote the category of Morava modules by $\moravamod{n}$. \end{definition} By the discussion above, $(E_n)_*^{\vee}X$ is a Morava module for any spectrum $X$ and we obtain a functor \begin{align}\label{eq:moravamodulefunctor} (E_n)_*^{\vee} (-) := \pi_*L_{K(n)}(E_n\wedge -) \colon \mathrm{Sp} \longrightarrow \moravamod{n}. \end{align} This functor detects and reflects $K(n)$-local equivalences, but has the advantage that $(E_n)_*^{\vee} (-) $ comes equipped with an action of $\mathbb{G}_n$. This extra structure proves to be extremely powerful for computations, and is one of the reasons why Morava modules play a central role in the field. For more information on Morava modules, we refer the reader to \cite[Section 1.3]{BobkovaGoerss} and \cite[Section 2]{ghmr}, noting that authors often simply write $(E_n)_*X= \pi_*L_{K(n)}(E_n\wedge X)$ as opposed to the non-completed homology $(E_n)_*X= \pi_*(E_n \wedge X)$, but we will not do so here. Note also that, if $X$ is finite, then $(E_n)_*X \cong (E_n)_*^{\vee}X$. \begin{remark}\label{rem:periodicity}\index{periodicity} For $F$ a finite subgroup of $\mathbb{G}_n$, the action of $\mathbb{G}_n$ on $(E_n)_*$ restricts to an action of $F$. We can also consider the category $\mathrm{Mod}_{(E_n)_*}^F$ of $\mathbb{L}$-complete $(E_n)_*$-modules equipped with an action of $F$. Then $(E_n)_*$ is periodic as an object in $\mathrm{Mod}_{(E_n)_*}^F$ since the element $N=\prod_{g\in F} g(u)$ for $u \in (E_{n})_{-2}$ as in \eqref{eq:Estariso} is an invariant unit. Let $d^{\operatorname{alg}}_F$ be the smallest integer such that $(E_n)_* \cong (E_n)_{*+d^{\operatorname{alg}}_F}$ in $\mathrm{Mod}_{(E_n)_*}^F$. This leads to an isomorphism of Morava modules \begin{align*} (E_n)_*^{\vee}E_n^{hF} & \cong \mathrm{Map}^c(\mathbb{G}_n/F, (E_n)_*) \cong \mathrm{Hom}^c_{\mathbb{W}_n}(\mathbb{W}_n\upar{F}{\mathbb{G}_n}, (E_n)_*) \end{align*} closely related to \eqref{eq:EnEn} and it implies that \[ (E_n)_*^{\vee}E_n^{hF} \cong (E_n)_*^{\vee}\Sigma^{d^{\operatorname{alg}}_F}E_n^{hF}. \] However, $E_n^{hF} $ need not be equivalent to $\Sigma^{d^{\operatorname{alg}}_F}E_n^{hF}$. Nonetheless, because of the strong vanishing line discussed in Remark~\ref{rem:strongvanishing}, some power of $N$ is a permanent cycle and gives rise to a periodicity generator for $E_n^{hF}$, so for some multiple $d_{F}^{\mathrm{top}}$ of $d^{\operatorname{alg}}_F$, there is an equivalence $E_n^{hF}\simeq \Sigma^{d^{\mathrm{top}}_F}E_n^{hF}$. For example, at $p=2$, $E_1$ is $2$-complete complex $K$-theory\index{complex $K$-theory} and $E_1^{hC_2}$ is the $2$-complete real $K$-theory spectrum $KO$.\index{real $K$-theory} We have: \begin{align*} K_*^{\vee}KO &\cong K_*^{\vee}\Sigma^4KO, & KO &\not\simeq \Sigma^4KO, & KO &\simeq \Sigma^8KO. \end{align*} \end{remark} \section{$K(1)$-local homotopy theory}\label{sec:K1}\label{sec:cscheight1} In this section, we tell a part of the chromatic story at height $n=1$ as a warm up for the more complicated ideas needed to study higher heights. The contents of this section are classical and can be found in various forms throughout the literature, for example, Adams and Baird \cite{adamsbaird}, Bousfield \cite{bousfield_locspectra, bousfield_odd}, Ravenel \cite[Theorem 8.10, 8.15]{Rav84}. See \cite[Section 6]{henn_minicourse} for a more recent treatment, and \cite[Section 4]{BGH} for more details on the case $p=2$. \subsection{Morava $E$-theory and the stabilizer group at $n=1$} At height $n=1$, Morava $E$-theory is the $p$-completed complex $K$-theory\index{complex $K$-theory} spectrum, which we simply denote by $K$. There is an isomorphism $K_* \cong \mathbb{Z}_p[u^{\pm 1}]$ for a unit $u \in K_{-2}$ which can be chosen so that $u^{-1}\in K_2$ is the Bott element.\index{Bott element} In this case, $\mathbb{G}_1 = \mathbb{S}_1 \cong \mathbb{Z}_p^{\times}$ corresponds to the $p$-completed Adams operations.\index{Adams operations} The action of $\mathbb{S}_1$ on $K_*$ is the $\mathbb{Z}_p$-algebra isomorphism determined by \begin{align}\label{eq:actadams} \alpha_*(u) = \alpha u \end{align} for $\alpha \in \mathbb{Z}_p^{\times}$. The keen reader will notice that this is the inverse of the action of the Adams operations, which is given by $\alpha_*(u) = \alpha^{-1} u$. This comes from switching a right action to a left action. The map $L_{K(1)}S^0 \to K^{h\mathbb{Z}_p^{\times}}$ of \eqref{eq:galoisspectra} is a $\mathbb{Z}_p^{\times}$ pro-Galois extension. We use this extension to compute the homotopy groups of $\pi_*L_{K(1)}S^0$.\index{$K(1)$-local sphere} One can take the direct approach of computing the spectral sequence of \eqref{eq:hfpss} \begin{equation}\label{eq:hfpssk1} E_2^{s,t} = H_c^s(\mathbb{Z}_p^{\times}, K_t ) \Longrightarrow \pi_{t-s}L_{K(1)}S^0. \end{equation} In fact, this spectral sequence collapses at the $E_2$-page at odd primes and at the $E_4$-page at the prime $2$. This is not a hard computation, but we take a different path in order to illustrate the finite resolution philosophy. \subsection{Finite resolution at height $n=1$}\label{sssec:resn1} Here, we describe our first example of a finite resolution.\index{finite resolution} Let $C_m$ denotes a cyclic group of order $m$, $\mu =C_2$ if $p=2$, and $\mu =C_{p-1}$ if $p$ is odd. Then, $\mathbb{Z}_p^{\times} \cong \mu \times \mathbb{Z}_p$, where the $\mathbb{Z}_p$ corresponds to the subgroup of units congruent to $1$ modulo $p$ if $p$ is odd, and to those congruent to $1$ modulo $4$ is $p=2$. We let $\psi$ be a topological generator for this factor of $\mathbb{Z}_p$. The notation is meant to be reminiscent of the Adams operations. We will make a choice for $\psi$ below in \eqref{eq:psichoice}. \begin{remark} The spectrum $K^{hC_{p-1}} $ is the unit component in the splitting of the $p$-completed complex $K$-theory spectrum $K$ into Adams summands if $p$ is odd, and $K^{hC_2}$ is the $2$-completed real $K$-theory spectrum if $p=2$. \end{remark} The $K(1)$-local sphere can be obtained by an iterated fixed points construction: \[ L_{K(1)} S^0 \simeq K^{h\mathbb{Z}_p^{\times}} \simeq (K^{h\mu})^{h\mathbb{Z}_p}. \] Since $\psi \in \mathbb{Z}_p$ is a topological generator, taking homotopy fixed point with respect to $\mathbb{Z}_p$ is equivalent to taking the homotopy fiber of the map $\psi-1$. Therefore, there is a fiber sequence \begin{align}\label{eq:K1localfiber} \xymatrix{L_{K(1)}S^0 \ar[r] & K^{h\mu} \ar[rr]^-{ \psi-1} & & K^{h\mu} \ar[r] & \Sigma L_{K(1)}S^0 }. \end{align} This is a \emph{finite resolution} of $L_{K(1)}S^0$ as will be defined in Definition~\ref{defn:finresolution} below. To construct finite resolutions at higher heights where the structure of the Morava stabilizer group is more intricate, we start by attacking the problem in algebra and then we transfer algebraic constructions to topology. We give a quick overview of how this would happen at height $1$ to give the reader something to think of while reading Section~\ref{sec:fin}. \ \\ \noindent \emph{Step 1: Algebraic resolution.}\label{algebraic resolution} The group $\mathbb{Z}_p$ is topologically free of rank one and there is an exact sequence of left $\mathbb{Z}_p^{\times}$-modules \begin{align}\label{eq:algres1} \xymatrix{ 0\ar[r] & \mathbb{Z}_p\upar{\mu}{\mathbb{Z}_p^{\times}} \ar[r]^-{\psi-1} &\mathbb{Z}_p\upar{\mu}{\mathbb{Z}_p^{\times}} \ar[r] & \mathbb{Z}_p \cong \mathbb{Z}_p\upar{\mathbb{Z}_p^{\times}}{\mathbb{Z}_p^{\times}} \ar[r] & 0.} \end{align} Here, $\mathbb{Z}_p[\![\mathbb{Z}_p^{\times}]\!] = \lim_{i,j} \mathbb{Z}/p^i[(\mathbb{Z}/p^j)^{\times}]$ is the completed group ring, which was discussed in Section~\ref{sssec:moravahomalg}, and $\mathbb{Z}_p\upar{\mu}{\mathbb{Z}_p^{\times}} \cong \mathbb{Z}_p[\![\mathbb{Z}_p^{\times}]\!] \otimes_{\mathbb{Z}_p[\mu]} \mathbb{Z}_p$. This is a projective resolution of $\mathbb{Z}_p$ as a $\mathbb{Z}_p^{\times}$-module if and only if $p>2$. See Remark~\ref{rem:notproj} below on this point. Applying $\mathrm{Hom}^c_{\mathbb{Z}_p}( -, K_* )$ to \eqref{eq:algres1} gives a short exact sequence of Morava modules \begin{align}\label{eq:morava} \xymatrix@C=1.5pc{ K_* \ar[r] &\mathrm{Hom}^c_{\mathbb{Z}_p} (\mathbb{Z}_p\upar{\mu}{\mathbb{Z}_p^{\times}} ,K_* ) \ar[rrr]^-{\mathrm{Hom}_{\mathbb{Z}_p}^c( \psi-1, K_* ) } & & & \mathrm{Hom}^c_{\mathbb{Z}_p} (\mathbb{Z}_p\upar{\mu}{\mathbb{Z}_p^{\times}} , K_* ) } \end{align} \ \\ \noindent \emph{Step 2: Topological Resolution.} The second step is to prove that the algebraic resolution has a topological realization. More precisely, \eqref{eq:morava} is an exact sequence in the category of Morava modules $\moravamod{1}$. As was described in \eqref{eq:moravamodulefunctor}, there is a functor \[ K_*^{\vee}(-)=(E_1)_*^{\vee}(-) \colon \mathrm{Sp} \longrightarrow \moravamod{1}. \] When we have an algebraic resolution of length $1$, a \emph{topological realization of \eqref{eq:algres1}} is a choice of fiber sequence in the category of $K(1)$-local spectra \begin{align}\label{eq:diag} \xymatrix{ \mathcal{E}_{-1} \ar[r]^{\delta_{-1}} & \mathcal{E}_{0} \ar[r]^{\delta_0} & \mathcal{E}_{1}& } \end{align} where $\mathcal{E}_{0}$ and $\mathcal{E}_{1}$ are finite wedges of suspensions of spectra of the form $K^{hF}$ for $F\subseteq \mathbb{G}_1$ a finite subgroup, such that, up to isomorphism of complexes, \eqref{eq:morava} is the complex of Morava modules obtained from \eqref{eq:diag} by applying $K_*^{\vee}(-)$. If $\mathcal{E}_{-1} = L_{K(1)}S^0$, then this is an example of a finite resolution of the $K(1)$-local sphere. \begin{remark}The case when the algebraic resolution has length $d\geq 1$ is discussed in the next section. We will see in Definition~\ref{defn:finresolution} that the definition of a finite topological resolution of length greater than $1$ is more subtle. See also Example~\ref{ex:finres1}. \end{remark} There is no algorithm for finding a topological realization. A priori, one may not exist, and if it does, it may not be unique. Without a priori knowledge of the existence of \eqref{eq:K1localfiber}, the key observations for finding a topological realization of \eqref{eq:algres1} are \begin{itemize} \item the isomorphism of Morava modules \[ K_*^{\vee}K^{h\mu} \cong \mathrm{Map}^c (\mathbb{Z}_{p}^{\times}/\mu,K_* ) \cong \mathrm{Hom}^c_{\mathbb{Z}_p} (\mathbb{Z}_p\upar{\mu}{\mathbb{Z}_p^{\times}} ,K_* ) ,\] and \item the fact that $\mathrm{Hom}^c_{\mathbb{Z}_p}( \psi-1, K_* ) = K_*^{\vee}(\psi-1)$. \end{itemize} Knowing these facts, \eqref{eq:morava} can be identified with the short exact sequence of Morava modules \begin{align}\label{eq:resheight1algK} \xymatrix{ K_* \ar[r] & K_*^{\vee}K^{h\mu} \ar[rrr]^-{K_*^{\vee}(\psi-1)} & & & K_*^{\vee}K^{h\mu}}. \end{align} Given this, we let $\mathcal{E}_{-1} = L_{K(1)}S^0 $, $\mathcal{E}_0 = \mathcal{E}_1 = K^{h\mu}$. We let $\delta_{-1}$ be the unit and $\delta_{0}$ be $\psi-1$. It follows that the fiber sequence \begin{align*} \xymatrix{L_{K(1)}S^0 \ar[r] & K^{h\mu} \ar[rrr]^-{\psi-1} & & & K^{h\mu}}. \end{align*} is a topological realization as it gives rise to \eqref{eq:morava} under the functor $K_*^{\vee}(-)$. This is our first example of a finite resolution of $L_{K(1)}S^0$. \begin{remark} We \emph{did} make choices here and different choices could have given a different topological realization. For example, for $p=2$, $K^{hC_2} \simeq KO$ and $K_*^{\vee} KO \cong K_*^{\vee}\Sigma^4 KO$, yet $KO \not\simeq \Sigma^4 KO $. In fact, we could have constructed a topological realization using $ \Sigma^4 KO$ instead of $KO$. Such a resolution is described below in \eqref{eq:realizationP1}. The resolution described there is a topological realization of the algebraic resolution \eqref{eq:algres1}, but it is not a finite resolution of the sphere as $\mathcal{E}_{-1} = P_1 \not\simeq L_{K(1)}S^0$. \end{remark} \subsection{Homotopy groups and chromatic reassembly}\label{sec:reassemlyn1} The long exact sequence on homotopy groups associated to \eqref{eq:K1localfiber} allows one to compute $\pi_*L_{K(1)}S^0$ from $\pi_*K^{h\mu}$ and knowledge of the action of $\psi$. The homotopy groups of $K^{h{\mu}}$ are computed using the homotopy fixed point spectral sequence \[ E_2^{s,t} \cong H^s({\mu}, \pi_tK) \Longrightarrow \pi_{t-s}K^{h{\mu}}. \] Recall that $\mu = C_{p-1}$ if $p$ is odd and $C_2$ if $p=2$. So computing group cohomology with coefficients in $K_* = \mathbb{Z}_p[u^{\pm1}]$ is not so bad given the explicit formula \eqref{eq:actadams}. We get \[ H^*({\mu}, \pi_*K) \cong \begin{cases} \mathbb{Z}_p[u^{\pm (p-1)}] & p \neq 2 \\ \mathbb{Z}_2[\eta, u^{\pm 2}]/(2\eta) & p=2, \end{cases} \] where the $(s,t)$ bidegree of $\eta$ is $(1,2)$. The element $\eta$ detects the Hopf map in $\pi_1S^0$. For $p$ odd, the spectral sequence collapses for degree reasons. For $p=2$, the fact that $\eta^4=0$ in $\pi_*S^0$ implies a differential $d_3( u^{-2}) =\eta^3$, and the spectral sequence collapses at $E_4$ for degree reasons. So, we have \[ \pi_{*}K^{h{\mu}} \cong \begin{cases} \mathbb{Z}_p[\beta^{\pm 1}] & p \neq 2, \ |\beta|=2(p-1) \\ \mathbb{Z}_p[\eta, \alpha, \beta^{\pm 1}]/(2\eta, \eta^3, \alpha^2-4\beta) & p=2, \ |\eta|=1, \ |\alpha|=4, \ |\beta| = 8. \end{cases} \] If $p$ is odd, $\beta \in \pi_{2(p-1)}$ is detected by $u^{1-p}$. If $p=2$, $\eta \in \pi_1$ is detected by the same-named class on the $E_2$-page, $\alpha\in \pi_4$ is detected by $2u^{-2}$ and $\beta \in \pi_8$ is detected by $u^{-4}$. \begin{remark} The differential $d_3( u^{-2}) =\eta^3$ can be obtained as a consequence of the \emph{slice differentials theorem} \cite[Theorem 9.9]{HHR}. This is an overkill for this particular example which follows from classical considerations. However, we mention this here since the slice differentials theorem also implies differentials at higher heights in spectral sequences computing $\pi_*E_n^{hF}$ for finite subgroups $F \subseteq \mathbb{G}_n$. \end{remark} Now, we turn to computing the long exact sequence on homotopy groups associated to \eqref{eq:K1localfiber}. Choose an element $\psi$ of $\mathbb{Z}_p^{\times}$ which satisfies \begin{align}\label{eq:psichoice} \psi^{-1}= \begin{cases}(1+p)& p\neq 2 \\ 5 & p= 2. \end{cases} \end{align} There are other possible choices: One could choose any element in $\mathbb{Z}_p^{\times}$ such that the image of $\psi^{-1}$ in $\mathbb{Z}_p^{\times}/\mu$ is a topological generator. The outcome of these calculations are independent of the choice. From \eqref{eq:actadams}, we deduce that the action of $\psi$ is then given by \begin{align*}\psi_*(\beta) &= \begin{cases} (1+p)^{p-1}\beta & p\neq 2\\ 5^{4} \beta & p=2 \end{cases}, & \psi_*(\alpha) &= 5^{2} \alpha, & \psi_*(\eta ) &=\eta. \end{align*} Let $v_p(k)$ denote the $p$-adic valuation of $k \in \mathbb{Z}$. For $p$ odd, the long exact sequence on homotopy groups gives \[ \pi_*L_{K(1)}S^0 = \begin{cases} \mathbb{Z}_p & *=0,-1 \\ \mathbb{Z}/p^{v_p(k)+1} & *=2k(p-1)-1 \\ 0 & \text{otherwise.} \end{cases} \] This is depicted in Figure~\ref{fig:K1p3} for $p=3$. For $p=2$, we have \[ \pi_*L_{K(1)}S^0 = \begin{cases} \mathbb{Z}_2 & *=-1 \\ \mathbb{Z}_2 \oplus \mathbb{Z}/2 & *=0 \\ \mathbb{Z}/2 & *=0, 2\mod 8, *\neq 0 \\ \mathbb{Z}/2 \oplus \mathbb{Z}/2 & *=1\mod 8 \\ \mathbb{Z}/8 & *=3\mod 8 \\ \mathbb{Z}/2^{v_2(k)+4} & *=-1+8k, k\neq 0 \\ 0 & *=4,5,6 \mod 8 . \end{cases} \] This is depicted in Figure~\ref{fig:K1p2}. One has to argue that there is no additive extension in degrees $1 \mod 8$ but we do not do this here. \begin{figure} \caption{The long exact sequence on homotopy groups associated to \eqref{eq:K1localfiber} \label{fig:K1p3} \end{figure} \begin{figure} \caption{The long exact sequence on homotopy groups associated to \eqref{eq:K1localfiber} \label{fig:K1p2} \end{figure} \begin{remark}\label{rem:notproj} The dichotomy between $p=2$ and odd primes in the computations is an instance of the general phenomena which was discussed in Section~\ref{sssec:moravahomalg} and is revisited in Section~\ref{sec:assymptotics} below. That is, when $p$ is large with respect to $n$, chromatic homotopy theory becomes algebraic (see for example Corollary~\ref{cor:hfpsslarge}). On the other hand, when $p$ is small the stabilizer group $\mathbb{S}_n$ might contain $p$-torsion and this appears to reflect interesting topological phenomena. Here, $\mathbb{S}_1 \cong\mathbb{Z}_2^{\times}$ contains $2$-torsion at $p=2$ and there are differentials in the spectral sequence computing the homotopy groups of $KO \simeq K^{hC_2}$, a much more intricate spectrum than the Adams summand $K^{hC_{p-1}}$ at odd primes whose homotopy fixed point spectral sequence collapses at the $E_2$-page. \end{remark} \begin{remark} The $\mathbb{Z}_p$ summand in $\pi_{-1}L_{K(1)}S^0$ is generated by the image of the composite $S^0 \to K^{h\mu} \to \Sigma^{-1}L_{K(1)}S^0$ where the first map is the unit and the second is the connecting homomorphism of \eqref{eq:K1localfiber}. We call this map and the homotopy class it represents $\zeta_1 \in \pi_{-1}L_{K(1)}S^0$. It is detected in \eqref{eq:hfpssk1} by the same-named class \[\zeta_1 \in H_c^1(\mathbb{Z}_p^{\times}, K_0) \cong \mathrm{Hom}^c(\mathbb{Z}_p^{\times}, \mathbb{Z}_p) \] corresponding to the projection $ \mathbb{Z}_p^{\times} \to \mathbb{Z}_p^{\times}/\mu \cong \mathbb{Z}_p$. See \eqref{eq:zetan} and Remark~\ref{rem:3termfiber} for analogues at higher heights. \end{remark} \begin{remark}\label{rem:L1V0} An easy computation that will be relevant later is that of $\pi_*(L_{K(1)}S^0/p)$ for $p$ odd. The descent spectral sequence \[ H_c^s(\mathbb{Z}_p^{\times}, K_t/p) \Longrightarrow \pi_{t-s}(L_{K(1)}S^0/p). \] collapses with no extensions and \[ \pi_*(L_{K(1)}S^0/p) \cong \mathbb{F}_p[v_1^{\pm 1}]\otimes \Lambda_{\mathbb{F}_p}(\zeta_1), \] where $v_1 = u^{1-p}$. We abuse notation by denoting the composite $\mathbb{Z}_p^{\times} \xrightarrow{\zeta_1} \mathbb{Z}_p^{\times} /\mu \cong \mathbb{Z}_p \to \mathbb{Z}/p$ also by $\zeta_1 \in H^1_c(\mathbb{Z}_p^{\times}, K_0/p)$. \end{remark} Finally, we turn to the problem of chromatic reassembly at height $n=1$. The chromatic fracture square\index{chromatic fracture square} \eqref{eq:chromaticfracture} in this case gives \[\xymatrix@=1.5pc{F_1 \ar[r] \ar[d]_-{\simeq} & L_1S^0 \ar[r] \ar[d] & L_{K(1)}S^0 \ar[d] \\ F_1 \ar[r] & L_0S^0_p \ar[r] & L_0L_{K(1)}S^0 } \] where $F_1$ is the fiber of the horizontal maps. In particular, it is the fiber of the map $L_0S^0_p \to L_0L_{K(1)}S^0$ induced by the unit. Since $L_0$ is rationalization, there is an isomorphism $\pi_*L_0L_{K(1)}S^0 \cong p^{-1}\pi_*L_{K(1)}S^0$. From the above calculations, we see that the map $1 \vee \zeta_1 \colon S^0 \vee S^{-1} \to L_{K(1)}S^0$ induces an equivalence \begin{equation}\label{eq:cscn1first} L_0L_{K(1)}S^0 \simeq L_0S^0_p \vee L_0S^{-1}_p. \end{equation} In particular, $L_0S^0_p \to L_0L_{K(1)}S^0$ is split and $\Sigma F_1 \simeq L_0 S_p^{-1}$. This proves the \emph{strong chromatic splitting conjecture}\index{chromatic splitting conjecture} for $n=1$, which will be stated in general in Section~\ref{sec:cscmore}. We get the following diagram from the long exact sequence on homotopy groups associated to the fiber sequence $L_1S^0_p \to L_{K(1)}S^0 \to \Sigma F_1\simeq L_0 S_p^{-1}$: \begin{align*} \xymatrix{\pi_{-1}L_1S^0_p \ar[r] \ar@{=}[d] & \pi_{-1} L_{K(1)}S^0 \ar@{=}[d] \ar[r] & \pi_{-2} F_1 \ar@{=}[d] \ar[r] & \pi_{-2} L_1S^0_p \ar[r] \ar@{==}[d] & \pi_{-2} L_{K(1)}S^0 \ar@{=}[d] \\ 0\ar[r] & \mathbb{Z}_p \ar[r] &\mathbb{Q}_p \ar[r] & \mathbb{Q}_p/\mathbb{Z}_p \ar[r] & 0. } \end{align*} Piecing the rest of the long exact sequence on homotopy groups together gives \[ \pi_*L_1S^0_p \cong \mathbb{Z}_p \oplus \Sigma^{-2}\mathbb{Q}_p/\mathbb{Z}_p \oplus \mathrm{Tor}(\pi_*L_{K(1)}S^0 ), \] where the $\mathbb{Z}_p$ is in degree $0$ and comes from the summand $\mathbb{Z}_p \subseteq \pi_0L_{K(1)}S^0$, this inclusion being an isomorphism when $p$ is odd. The summand $\mathbb{Q}_p/\mathbb{Z}_p$ is in degree $-2$ and $ \mathrm{Tor}$ denotes the torsion subgroup. In the next sections, we review these topics at higher heights. While we are not able to do such an explicit analysis for $n\geq 2$, the tools and ideas described above do generalize and we give an overview of some of the techniques available to study the $K(n)$-local category and the $K(n)$-local sphere. \section{Finite resolutions and their spectral sequences}\label{sec:fin} \index{finite resolution} We now describe a \emph{recipe} for the construction of finite resolutions of the $K(n)$-local sphere. We note that every step of this procedure requires hard work specific to the height and the prime. We then illustrate the general formalism with many examples at height $n=2$ in Section~\ref{sec:resheight2}. Some applications of these finite resolutions will then be discussed in the next section on the chromatic splitting conjecture and local Picard groups. References for this material are \cite{ghmr, henn_res, henn_minicourse}. \subsection{What is a finite resolution} \begin{definition}\label{defn:finresolution} A \emph{finite resolution of $L_{K(n)}S^0$} of length $d$ is a diagram \begin{align}\label{eq:finres} \xymatrix{ L_{K(n)}S^0 =X_d \ar[r]^-{i_d} & X_{d-1} \ar[r]^-{i_{d-1}} & \cdots \ar[r]^-{i_2} & X_1 \ar[r]^-{i_1} & X_0 \\ \Sigma^{-d} \mathcal{E}_d \ar[u]^-{j_d} & \Sigma^{-(d-1)} \mathcal{E}_{d-1} \ar[u]^-{j_{d-1}} & & \Sigma^{-1} \mathcal{E}_1 \ar[u]^-{j_1} & \mathcal{E}_0 \ar[u]^{j_0}_-{\simeq} }\end{align} in the $K(n)$-local category such that \begin{enumerate}[(a)] \item the sequences \begin{equation}\label{eq:exactfirst} \xymatrix@C=1pc{ \Sigma^{-k} \mathcal{E}_k \ar[rr]^-{ j_k} & &X_k \ar[rr]^-{ i_k} & & X_{k-1} \ar[rr]^-{\ell_k} & & \Sigma^{-k+1} \mathcal{E}_k }\end{equation} are exact triangles, and \item the $\mathcal{E}_k$s are finite wedges of suspensions of spectra of the form $E_n^{hF}$ for finite subgroups $F$ of $\mathbb{G}_n$. \end{enumerate} \end{definition} In other words, a finite resolution is a tower of fibrations resolving $L_{K(n)}S^0$ by spectra of the form $E_n^{hF}$ in a finite number of steps using a finite number of pieces. Typically, $d=n^2$. Note that the tower \eqref{eq:finres} gives a diagram \begin{align}\label{eq:resunrav} \xymatrix{ L_{K(n)}S^0 \ar[r]^-{\delta_0} & \mathcal{E}_{0} \ar[d]_-{\simeq}^-{j_0} \ar[r]^-{\delta_{1}} & \mathcal{E}_{1} \ar[r]^-{\delta_{2}} \ar[d]^-{\Sigma j_1} & \mathcal{E}_{2} \ar[r] & \cdots \ar[r]^-{\delta_{d}} & \mathcal{E}_d \ar[d]^-{\Sigma^d j_d} & \\ & X_0 \ar[ur]_{\ell_1} & \Sigma X_1 \ar[ur]_-{\Sigma \ell_2} & & & \Sigma^d L_{K(n)}S^0 & } \end{align} where $\delta_0$ is defined so that $j_0 \delta_0 = i_1 \ldots i_d$. We often denote the finite resolution by the top line of this diagram. We can also smash \eqref{eq:finres} (in the $K(n)$-local category) with a spectrum $Y$ to obtain a tower of fibrations resolving $L_{K(n)} Y $. For any $X \in \mathrm{Sp}$, a resolution of the form \eqref{eq:finres} gives rise to a strongly convergent spectral sequence \begin{align*} E_1^{s,t} &= [X, \mathcal{E}_s \wedge Y ]_t \Longrightarrow [X, L_{K(n)} Y ]_{t-s}, \end{align*} with differentials $d_r\colon E_r^{s,t} \to E_r^{s+r, t+r-1}$ that collapses at the $E_{d+1}$-page. There is also a similar spectral sequence computing $ [L_{K(n)}Y,X ]$. \begin{example}\label{ex:finres1} The proto-example of such a resolution is the resolution \eqref{eq:K1localfiber}. Recall that $E_1$ is $p$-completed $K$-theory and let $\mu$ be as in Section~\ref{sec:K1}. The fiber sequence \eqref{eq:K1localfiber} can be rearranged into a (very short) tower of fibrations \[ \xymatrix{ L_{K(1)}S^0 \ar[r]^-{i_1} & E_1^{h\mu}=X_0 \\ \Sigma^{-1}E_1^{h\mu} = \Sigma^{-1}\mathcal{E}_1. \ar[u]^-{j_1} & E_1^{h\mu} = \mathcal{E}_0\ar[u]^-{j_0}_{\simeq} } \] In this case, the associated Bousfield--Kan spectral sequence degenerates to the long exact sequence on homotopy groups. \end{example} For the rest of this section, we give an overview of how such resolutions are constructed. Note that the art of building finite resolutions has evolved in the last fifteen years. For a long time, the role of the Galois group was not as clear as it has become recently in the work of Henn in \cite{henn_centr}, so we give a revised recipe here. \subsection{Algebraic resolutions}\index{algebraic resolution} In practice, the first step to constructing a finite topological resolution is to construct its algebraic ``reflection''. These are the finite algebraic resolution. In practice, experts do not work from a definition, but rather know a finite algebraic resolution when they see one. Because of this, we give the following loose \emph{description} as opposed to \emph{definition}. \begin{descript}\label{def:finalgres} A \emph{finite algebraic resolution} of length $d$ is an exact sequence \begin{align}\label{eq:algres} \xymatrix{0 \ar[r] & \mathcal{C}_d \ar[r]^-{\partial_{d}} & \mathcal{C}_{d-1} \ar[r]^{\partial_{d-1}} & \cdots \ar[r]^-{\partial_1} & \mathcal{C}_0 \ar[r]^-{\partial_{0}} & \mathcal{C}_{-1}= \mathbb{W}_n \ar[r] & 0, } \end{align} where the $\mathcal{C}_k$s are $\mathbb{W}G{\mathbb{G}_n}$-modules that have the property that, for some $\mathcal{E}_k$ as in Definition~\ref{eq:finres} (b), there is an isomorphism \begin{equation}\label{eq:fortheCk} (E_n)_*^{\vee}\mathcal{E}_k \cong \mathrm{Hom}_{\mathbb{W}_n}^c(\mathcal{C}_k, (E_n)_*).\end{equation} \end{descript} Roughly, a topological resolution \emph{realizes} an algebraic topological resolution if there is an isomorphism of exact sequences \[\mathrm{Hom}_{\mathbb{W}_n}^c( \mathcal{C}_\bullet , (E_n)_*) \cong (E_n)_*^{\vee}(\mathcal{E}_\bullet).\] Here $\mathcal{C}_{\bullet}$ is as in \eqref{eq:algres} and $\mathcal{E}_\bullet$ is the top row of \eqref{eq:resunrav}. In this sense, the algebraic resolution is a ``reflection'' of the topological resolution. \begin{remark} Recall from \eqref{eq:uparrow} that $M \upar{F}{\mathbb{G}_n} = \mathbb{W}G{\mathbb{G}_n} \otimes_{\mathbb{W}_n\langle F \rangle} M $. Typical examples for the modules $\mathcal{C}_k$ are among the following: \begin{itemize} \item If $\mathcal{C}_k$ is a direct sum of modules of the form $\mathbb{W}_n\upar{F}{\mathbb{G}_n}$ for $F$ a finite subgroup of $\mathbb{G}_n$, then $\mathcal{C}_k$ satisfies \eqref{eq:fortheCk}. Indeed, it was mentioned in \eqref{rem:periodicity} that for any $m\in \mathbb{Z}$ and $F$ a finite subgroup of $\mathbb{G}_n$, there are isomorphisms \begin{align*} \mathrm{Hom}_{\mathbb{W}_n}^c(\mathbb{W}_n\upar{F}{\mathbb{G}_n}, (E_n)_*) & \cong \mathrm{Map}^c(\mathbb{G}_n/F, (E_n)_*) \cong (E_n)_*^{\vee}\Sigma^{md_F^{\operatorname{alg}}}E_n^{hF}. \end{align*} \item By a \emph{character} $\chi$ of $\mathbb{W}_n \langle F \rangle$, we will mean a $\mathbb{W}_n \langle F \rangle$-module which has rank one over $\mathbb{W}_n$. Suppose that $\chi$ is a summand (as a $\mathbb{W}_n\langle F \rangle$-module) in $\mathbb{W}_n\langle F \rangle$ and that $e_{\chi}$ is an idempotent of $\mathbb{W}_n\langle F \rangle$ that picks up $\chi$. Let $E_n^{\chi}$ be wedge summand of $E_n$ associated to this idempotent, obtained as the telescope on $e_\chi \colon E_n \to E_n$. Then, \[(E_n)_*^{\vee}E_n^{\chi} \cong \mathrm{Hom}_{\mathbb{W}_n}^c(\chi\upar{F}{\mathbb{G}_n}, (E_n)_*).\] In existing examples, some of the summands of the terms $\mathcal{C}_k$s are built out of projective characters $\chi$ of $\mathbb{W} \langle F \rangle$, such that $E_n^\chi$ is a suspension of $E_n^{hF}$. See, for example, \cite[Section 5]{ghmr} and Section~\ref{sec:resheight2} below. \end{itemize} \end{remark} \begin{remark} One reason for using $\mathbb{W}_n$-coefficients (which don't seem to play a role in the topological story) rather than $\mathbb{Z}_p$-coefficients in these constructions is that, if $p$ divides $n$, $\mathbb{G}_n$ is ``cohomologically larger'' than $\mathbb{S}_n$ over $\mathbb{Z}_p$, but not over $\mathbb{W}_n$ since the later is free over $\mathrm{Gal}$. So, if one wants to construct a resolution of length $n^2$ for $L_{K(n)}S^0 \simeq E_n^{h\mathbb{G}_n}$ in cases when $p$ divides $n$, the right approach appears to be to work over $\mathbb{W}_n$, and not over $\mathbb{Z}_p$. See also Remark~\ref{rem:HWinsight} below. \end{remark} We now give an outline of the steps one follows to construct a finite algebraic resolution. In practice, to construct such a resolution, it is essential to have some control over the homology $H^c_*(U, \mathbb{W}_n)$ for an open subgroup $U$ of $\mathbb{G}_n$ of finite cohomological dimension. In fact, all the examples of finite algebraic resolutions which we describe below restrict to a projective resolution of $\mathbb{W}_n$ as a $\mathbb{W}G{U}$-module for some choice of $U$. This motivates the name of \emph{resolutions} for these exact sequences. In practice, if $p$ is large with respect to $n$ so that $\mathbb{S}_n$ has finite cohomological dimension, the finite algebraic resolutions are projective resolutions of $\mathbb{W}_n$ as a $\mathbb{W}G{\mathbb{G}_n}$-modules. The process is inductive and goes as follows. Suppose that the $\mathbb{W}G{\mathbb{G}_n}$-modules $\mathcal{C}_{i}$ for $i\leq k-1$ together with maps $\partial_{k-1} \colon \mathcal{C}_{k-1} \to \mathcal{C}_{k-2}$ of $\mathbb{W}G{\mathbb{G}_n}$-modules have been defined so that \begin{align*} \xymatrix{ \mathcal{C}_{k-1} \ar[r]^{\partial_{k-1}} & \mathcal{C}_{k-2} \ar[r]^-{\partial_{k-2}} & \cdots \ar[r]^-{\partial_0} & \mathcal{C}_{-1}= \mathbb{W}_n \ar[r] & 0 } \end{align*} is an exact sequence. Suppose further that each term restricts to a projective $\mathbb{W}G{U}$-module. Let $N_{k-1} = \ker(\partial_{k-1})$. The projectivity assumption implies that \[ \operatorname{Tor}^{\mathbb{W}G{U}}_0(\mathbb{W}_n, N_{k-1}) = H_{0}^c(U, N_{k-1}) \cong H_{k}^c(U, \mathbb{W}_n). \] This isomorphism, the knowledge of $H_{k}^c(U, \mathbb{W}_n)$ and a generalized form of Nakayama's Lemma \cite[Lemma 4.3]{ghmr} allows us to identify a set of $\mathbb{W}G{\mathbb{G}_n}$-generators for $N_{k-1} \subseteq \mathcal{C}_{k-1}$. The trick then is to choose a $\mathbb{W}G{\mathbb{G}_n}$-module $\mathcal{C}_{k}$ of the desired form (preferably as ``small'' as possible) and to construct a map $f \colon \mathcal{C}_k \to N_{k-1}$ which surjects onto this set of generators. The map $ f$ is surjective by construction since it is chosen to make $\operatorname{Tor}_{\mathbb{W}G{U}}^{0}(f, \mathbb{F}_{p^n})$ surjective. The map $\partial_{k} \colon \mathcal{C}_{k} \to \mathcal{C}_{k-1}$ is then defined to be the composite $ \mathcal{C}_{k} \to N_{k-1} \to \mathcal{C}_{k-1}$, completing the inductive step. The process stops once $\partial_{d-1}$ has been defined. At this point, we define $\mathcal{C}_d= N_{d-1}=\ker(\partial_{d-1})$ and prove that $\mathcal{C}_d$ is a $\mathbb{W}G{\mathbb{G}_n}$-module of the required type. Of course, this need not be the case and proving that this happens for some series of choices of modules $\mathcal{C}_k$ and maps $\partial_k$ is usually difficult. \begin{remark}[Algebraic resolution spectral sequence]\label{rem:ARSS} If one resolves \eqref{eq:algres} into a double complex $P_{\bullet,\bullet}$ where $P_{\bullet,k} \to \mathcal{C}_k$ for $0\leq k \leq d$ is a projective resolution as $\mathbb{W}G{\mathbb{G}_n}$-modules, then the totalization of the double complex $P_{\bullet,\bullet}$ is a projective resolution of $\mathbb{W}_n$. For a (graded) profinite $\mathbb{W}G{\mathbb{G}_n}$-module $M = \{M_t\}_{t\in \mathbb{Z}}$, let $E_0^{s,k,t} = \mathrm{Hom}_{\mathbb{W}G{\mathbb{G}_n}}^c( P_{k,s}, M_t)$ and take the vertical cohomology (i.e., with $k$ fixed). The result is the $E_1$-term of a spectral sequence \[E_1^{s,k,t} =\operatorname{Ext}_{\mathbb{W}G{\mathbb{G}_n}}^s(\mathcal{C}_k, M_t) \Longrightarrow H^{s+k}_c(\mathbb{G}_n, M_t)\] with differentials $d_r \colon E_1^{s,k,t} \to E_1^{s+r, k+r-1,t}$. If the $\mathcal{C}_k$s are direct sums of modules of the form $\chi \upar{F}{\mathbb{G}_n}$ for characters $\chi$, then the $E_1$-term is easy to compute since by a version of Shapiro's lemma, we have \begin{align*}\operatorname{Ext}_{\mathbb{W}G{\mathbb{G}_n}}^s(\chi \upar{F}{\mathbb{G}_n} , M_t) \cong \operatorname{Ext}_{\mathbb{W}_n\langle F\rangle}^s(\chi , M_t) . \end{align*} We call this an \emph{algebraic resolution spectral sequence}.\index{algebraic resolution spectral sequence} \end{remark} Finally, applying the functors $\mathrm{Hom}_{\mathbb{W}_n}^c(-, (E_n)_*)$ to \eqref{eq:algres} gives an exact sequence in the category of Morava modules $\moravamod{n}$: \begin{align}\label{eq:moravan} \xymatrix@C10pt{0 \ar[r] & (E_n)_* \ar[r]^-{\partial^{0}} & \mathrm{Hom}_{\mathbb{W}_n}^c(\mathcal{C}_0, (E_n)_*) \ar[r]^-{\partial^{1}} & \cdots \ar[r]^-{\partial^{d}} & \mathrm{Hom}_{\mathbb{W}_n}^c(\mathcal{C}_d, (E_n)_*) \ar[r] & 0,} \end{align} where the maps $\partial^{k}$ are induced by $\partial_k$. \subsection{Topological resolutions}\index{topological resolution} With an algebraic resolution \eqref{eq:algres} in hand, the next step is to prove that it has a topological realization which is a finite resolution of $L_{K(n)}S^0$. That is, one wants to construct a finite resolution \begin{equation}\label{eq:seqEE} \xymatrix{\mathcal{E}_{-1}=L_{K(n)}S^0 \ar[r]^-{\delta_{0}} & \mathcal{E}_0 \ar[r]^-{\delta_1} & \cdots \ar[r]^-{\delta_{d-1}} & \mathcal{E}_d} \end{equation} in the sense of Definition~\ref{defn:finresolution} with the property that applying the functor \[ (E_n)_*^{\vee}(-) \colon \mathrm{Sp} \longrightarrow \moravamod{n} \] to this sequence gives rise to a complex of Morava modules isomorphic to \eqref{eq:moravan}. By our choice of $\mathcal{C}_k$s (see Description~\ref{def:finalgres}), there are isomorphisms of Morava modules $ (E_n)_*^{\vee}\mathcal{E}_k \cong \mathrm{Hom}_{\mathbb{W}_n}^c(\mathcal{C}_k, (E_n)_*)$ for non-uniquely determined spectra $\mathcal{E}_k$ of the form specified in part (b) of Definition~\ref{defn:finresolution}. The non-uniqueness of the $\mathcal{E}_k$s comes from the freedom in choosing the values of $m$ above. (Note that the spectrum $E_n^{hF}$ itself is periodic with periodicity some multiple $d_{F}^{\mathrm{top}}$ of $d_{F}^{\operatorname{alg}}$ so there is a limited number of choices.) Fixing some choice of $\mathcal{E}_k$s, we can identify \eqref{eq:moravan} with \[ \xymatrix@C15pt{ 0\ar[r] & (E_n)_*^{\vee} \ar[r]^-{\partial^{0}} & (E_n)_*^{\vee}\mathcal{E}_0 \ar[r]^-{\partial^{1}} & (E_n)_*^{\vee}\mathcal{E}_1 \ar[r] & \cdots \ar[r]^-{\partial^{d}} & (E_n)_*^{\vee}\mathcal{E}_d \ar[r] & 0 .} \] To obtain a topological realization, one must also show that the maps $\partial^k$ are of the form $(E_n)_*^{\vee}(\delta_k)$ for maps of spectra $\delta_k \colon \mathcal{E}_{k-1} \to \mathcal{E}_k$. Note that this being the case can depend on the choices of $\mathcal{E}_k$s. The existence of $\delta_k$ is established using a Hurewicz homomorphism \[ \xymatrix{[\mathcal{E}_{k-1}, \mathcal{E}_{k}] \ar[r] & \mathrm{Hom}_{(E_n)_*^{\vee}E_n}( (E_n)_*^{\vee}\mathcal{E}_{k-1},(E_n)_*^{\vee}\mathcal{E}_{k}).} \] See Proposition 2.7 \cite{ghmr} for more details. Even if the $\delta_k$s exist, it still does not imply that any choice of $\mathcal{E}_k$s and $\delta_k$s give a finite resolution in the sense of Definition~\ref{defn:finresolution}. For this to be the case, one must have first that the compositions $\delta_k \circ \delta_{k-1}$ are null-homotopic. If such choices exists, then we inductively define spectra $X_k$ and maps $\ell_k$ so that \[ \xymatrix{\Sigma^{k-1} X_{k-1} \ar@{.>}[rr]^-{\Sigma^{k-1} \ell_{k}} & & \mathcal{E}_k \ar[rr]^-{\Sigma^k j_k} & & \Sigma^k X_k \ar[rr]^-{\Sigma^k i_k} & & \Sigma^{k} X_{k-1}} \] are exact triangles (see \eqref{eq:exactfirst}). That is, if the map $\ell_k$ can be chosen so that $\delta_{k+1} \circ \Sigma^{k-1} \ell_{k}$ is null-homotopic, then $X_{k+1}$ is defined as the cofiber of $ \Sigma^{-1} \ell_k \colon \Sigma^{-1} X_{k-1} \to \Sigma^{-k}\mathcal{E}_k$ and there exists a map $\Sigma^{k}X_{k} \xrightarrow{\Sigma^{k}\ell_{k+1}} \mathcal{E}_{k+1}$ which factorizes $\delta_{k+1}$. To prove that $X_d \simeq L_{K(n)}S^0$, one needs to check that the map $\delta_0$ lifts along the tower \[ \xymatrix{L_{K(n)}S^0 \ar[d]_-{\delta_0} \ar@{.>}[dr] \ar@{.>}[drrr] & & & \\ X_0 & X_1 \ar[l]^-{i_1} & \ldots \ar[l] & X_d \ar[l]^-{i_d} }\] to a map $L_{K(n)}S^0 \to X_d$. If the lift exists, it will induce an isomorphism $(E_n)_*^{\vee}(L_{K(n)}S^0) \xrightarrow{\cong} (E_n)_*^{\vee}(X_d)$ so will be a $K(n)$-local equivalence. \begin{remark}[Doubling up]\label{rem:doublingup} In \eqref{eq:G1ext} above, we defined a normal subgroup $\mathbb{G}_n^1 \subseteq \mathbb{G}_n$ with the property that $\mathbb{G}_n \cong \mathbb{G}_n^1 \rtimes \mathbb{Z}_p$ where the extension splits whenever $n$ is coprime to $p$. In practice, one first constructs a finite resolution of $\mathbb{W}_n$ as a $\mathbb{W}G{\mathbb{G}_n^1}$-module and then upgrades it to a resolution of $\mathbb{W}_n$ as a $\mathbb{W}G{\mathbb{G}_n}$-module. See Corollary 4.2 of \cite{ghmr} for an example. \end{remark} \subsection{Diagram of resolution spectral sequences} The resolutions whose construction is described above give rise to spectral sequences which fit in a diagram: \begin{equation*} \xymatrix@C=1.5pc{E_1^{s,k,t}=\operatorname{Ext}_{\mathbb{W}G{\mathbb{G}_n}}^s(\mathcal{C}_k, (E_n)_t) \ar@{=>}[rr]^-{\mathrm{ARSS}} \ar@{=>}[d]_-{\mathrm{LHFPSS}} & & H_{c}^{s+k}(\mathbb{G}_n,(E_n)_t) \cong E_2^{s+k,t} \ar@{=>}[d]^-{\mathrm{HFPSS}} \\ E_1^{k,t-s}=\pi_{t-s}\mathcal{E}_k \ar@{=>}[rr]_-{\mathrm{TRSS}} & & \pi_{t-(s+k)}L_{K(n)}S^0.} \end{equation*} Here ARSS stands for \emph{algebraic resolution spectral sequence}\index{algebraic resolution spectral sequence}, TRSS for \emph{topological resolution spectral sequence}\index{topological resolution spectral sequence}, HFPSS for \emph{homotopy fixed point spectral sequence}\index{homotopy fixed point spectral sequence} and in LHFPSS, the L is for \emph{level-wise}. The horizontal spectral sequence have the advantage of being first quadrant spectral sequences which are zero in degrees $k>d$ and so collapse at the $E_{d+1}$-page. By Remark~\ref{rem:strongvanishing}, the vertical spectral sequences also collapse at some finite stage with a horizontal vanishing line.\index{horizontal vanishing line} \subsection{Finite resolutions at height $n=2$}\label{sec:resheight2} Now we give examples of some of the finite resolutions at height $n=2$ that exist in the literature. In the references cited, the algebraic resolutions are usually constructed in the category $\mathbb{Z}_p[\![G]\!]$ for $G=\mathbb{G}_2$ or $G=\mathbb{G}_2^1$. As is explained Remark~\ref{rem:WGmodules}, we can transport the constructions to the category of $\GMod{\mathbb{W}_2}{G}$ and this is what we do here. The reason for this change is explained in Remark~\ref{rem:HWinsight}. \begin{notation}[Finite subgroups and their modules] The maximal finite subgroups of $\mathbb{S}_n$ were given in Table~\ref{tab:finitesub}. Here, we discuss them more specifically in the case $n=2$. Note that in the cases $p=2,3$, $\mathbb{S}_2$ contains $p$-torsion and so has more interesting finite subgroups (see (2) and (3) below). \begin{enumerate}[(1)] \item Let $p$ be odd. Let $\sigma \in \mathrm{Gal} = \mathrm{Gal}(\mathbb{F}_{p^2}/\mathbb{F}_p)$ be the Frobenius and $\omega \in \mathbb{F}_{p^2}^{\times}$ be a primitive $(q=p^2-1)$th root of unity. The group $\mathrm{Gal}$ acts on $\mathbb{F}_{p^2}^{\times}$ by $\sigma(\omega) = \omega^\sigma =\omega^p$. We define \[ F=F_{2q}:= \mathbb{F}_{p^2}^{\times} \rtimes \mathrm{Gal}. \] For example, if $p=3$, then $F \cong SD_{16}$, the semi-dihedral group of order 16. We let $\omega \in \mathbb{W}_2\cong \mathbb{Z}_p(\omega)$ denote the Teichm\"uller lift of the same named class in $\mathbb{F}_{p^2}$. The Teichm\"uller lifts then specify an embedding of $ \mathbb{F}_{p^2}^{\times}$ in $\mathbb{S}_n$, and so of $F$ in $\mathbb{G}_n$. We define $\mathbb{W}FT{F}$-modules $\chi_i^{+}$ and $\chi_i^{-}$ for $0\leq i\leq q-1$ as follows. The underlying $\mathbb{W}_2$-module of $\chi_i^{\pm}$ is $\mathbb{W}_2$. For $x\in \chi_i^{\pm}$, define $\omega_*(x)=\omega^i x$ and \begin{align*} \sigma_*(x)&= \begin{cases} x^{\sigma} & x\in \chi_i^+ \\ -x^{\sigma} & x\in \chi_i^-. \end{cases} \end{align*} Let $\chi_i = \chi_i^{+} \oplus \chi_i^{-} $. The twisted group ring completely decomposes as a $\mathbb{W}FT{F}$-modules as \[ \mathbb{W}FT{F} \cong \bigoplus_{i=0}^{q-1} \chi_i = \bigoplus_{i=0}^{q-1} \chi_i^{+} \oplus \chi_i^{-}. \] To see this isomorphism, let $x_i \in \mathbb{W}FT{F}$ be given by \[ x_i = [e] +\omega^{-i}[\omega]+\omega^{-2i}[\omega^2] + \cdots + \omega^{-(q-2)i} [\omega^{q-2}] \] for $0\leq i \leq q-2$. The elements $x_i$ together with the elements $x_i[\sigma]$ generated $\mathbb{W}FT{F}$ as a $\mathbb{W}_2$-module. Furthermore, $\omega_*(x_i) = \omega^i x_i$ and $\sigma_*(x_i) =x_i[\sigma]$. So, there are isomorphisms $\chi_i^{+} \cong \mathbb{W}_2\{x_i + x_i [\sigma]\}$ and $\chi_i^{-} \cong \mathbb{W}_2\{x_i - x_i[\sigma]\}$. (Note that the $\mathbb{Z}_p$-module $\lambda_i$ of \cite{henn_res} has the property that \[\mathbb{W}_2 \otimes_{\mathbb{Z}_p} \lambda_i \cong \chi_{-i}^+ \oplus \chi_{-pi}^{+}\] when viewed as a $\mathbb{W}FT{F}$-module as described in Remark~\ref{rem:WGmodules}.) \item For $p=3$, let $G_{24}$ be an extension of $C_3 \rtimes C_4 \subseteq \mathbb{S}_2$ to $\mathbb{G}_2$ in the sense of Definition~\ref{defn:extendsubgroups}. We can give an explicit choice as follows. The subgroup of $\mathbb{S}_2$ generated by $ s= \frac{1}{2}(1+\omega \xi)$ and $t=\omega^2$ is isomorphic to $C_3 \rtimes C_4$. Here, $s$ is of order $3$, $t$ is of order $4$, and $t s t^{-1}=s^2$. We let $G_{24}$ be the group generated by $s$, $t$, $\psi=\omega \xi$ in $\mathbb{D}_2^{\times}/\xi^2 \cong \mathbb{G}_2$. Note that $\psi s =s\psi$ and $t \psi = \psi t^3$. The group $G_{24}$ is an extension of $C_3 \rtimes C_4$ in $\mathbb{G}_2$. See Section 1.1 of \cite{ghmr}. Note that $C_3$ is normal in $G_{24}$. Therefore, the $\chi_{i}^{\pm 1}$ inherit a $G_{24}$-module structure via the map \[ G_{24} \to G_{24}/C_3 \cong (\mathbb{F}_9^{\times})^2 \times \mathrm{Gal} \xrightarrow{\subseteq} SD_{16}, \] where $(\mathbb{F}_9^{\times})^2 \cong C_4$ denotes the subgroup of squares in $\mathbb{F}_9^{\times}$. \item For $p=2$, the group $\mathbb{S}_2$ contains a unique conjugacy class of maximal finite subgroups isomorphic to the binary tetrahedral group $T_{24} \cong Q_8 \rtimes \mathbb{F}_4^{\times}$. There is a choice of $T_{24}$ generated by $\omega \in \mathbb{F}_4^{\times}$ and an element of order four which we denote by $i \in Q_8$ with the property that $i^2=-1 \in \mathbb{S}_2$. For $j = \omega i \omega^{-1}$, the elements $i$ and $j$ satisfy the usual quaternion relations. We let $G_{48}$ be the extension of $T_{24} \subseteq \mathbb{S}_2$ to $\mathbb{G}_2$ given by $G_{48} = \langle \omega, 1+i \rangle \subseteq \mathbb{D}_2^{\times}/\xi^2 \cong \mathbb{G}_2$. The group $G_{48}$ is isomorphic to the binary octahedral group. See \cite[Lemma 2.1, 2.2]{henn_centr}. We also let $C_2 = (\pm 1)\subseteq \mathbb{S}_2$. Define $V_4=C_2 \times \mathrm{Gal}$ and $G_{12}= (C_2 \times \mathbb{F}_4^{\times}) \rtimes \mathrm{Gal} \cong C_2 \times \Sigma_3$. The group $C_4 = \langle i\rangle \subseteq \mathbb{S}_2$ also extends to a finite subgroup of $\mathbb{G}_2$ which is a cyclic group of order eight given by $C_8 =\langle 1+i\rangle \subseteq \mathbb{D}_2^{\times}/\xi^2$. Finally, we let $\pi=1+2\omega$, which has the property that $\det(\pi)=\pi \pi^{\sigma}=3 \in \mathbb{Z}_2^{\times}/(\pm 1)$ is a topological generator. We let $G_{48}' = \pi G_{48} \pi^{-1}$. This group is conjugate to $G_{48}$ in $\mathbb{G}_2$, but not in $\mathbb{G}_2^1$. \end{enumerate} \end{notation} \begin{remark}\label{rem:tmf} The spectra $E_2^{hF}$ for $F$ finite are often equivalent to the $K(2)$-localization of topological modular forms with level structures (see \cite{behrens_chapter}). For example, \begin{align*} L_{K(2)} {TMF} &\simeq E_2^{hG_{24}} , & L_{K(2)} {TMF}_0(2) &\simeq E_2^{hQ_8} \simeq E_2^{hSD_{16}} \vee \Sigma^8 E_2^{hSD_{16}} \end{align*} at $p=3$, and at $p=2$, \begin{align*} L_{K(2)} {TMF} &\simeq E_2^{hG_{48}}, & L_{K(2)} {TMF}_0(3) &\simeq E_2^{hG_{12}}, & L_{K(2)} {TMF}_0(5)&\simeq E_2^{hC_8}. \end{align*} Note that, in \cite{behrens_modular}, Behrens writes $ L_{K(2)} {TMF}_0(2) \simeq E_2^{hD_8} $ (for $p=3$). The difference comes from the fact that he is using the formal group law $\Gamma_C$ of a super-singular curve while we are using the Honda formal group law $\Gamma_2$. The groups $Q_8$ and $D_8$ are extensions to $\mathbb{G}(\mathbb{F}_8, \Gamma_2)$ and $\mathbb{G}(\mathbb{F}_8, \Gamma_C)$ respectively of the subgroup $C_4 \subseteq \mathrm{Aut}_{\overline{\mathbb{F}}_3}(\Gamma_2) \cong \mathrm{Aut}_{\overline{\mathbb{F}}_3}(\Gamma_C) $. See Remark~\ref{rem:choiceGamma}. \end{remark} Recall from Remark~\ref{rem:doublingup} that in practice, one begins by constructing a finite resolution of the group $\mathbb{G}_2^1$. (The groups $\mathbb{G}_2^1$ and $\mathbb{S}_2^1$ were defined right before \eqref{eq:G1ext}.) \begin{example}[The Duality Resolutions]\label{ex:duality}\index{Duality resolution} The following examples of resolutions of $\mathbb{W}_2$ as a $\mathbb{W}GT{\mathbb{G}_2^1}$-module have been coined the \emph{duality resolutions}. They are self-dual in a suitable sense (see \cite[Section 3.4]{HKM} or \cite[Section 3.3]{beaudry_res}). In fact, this duality is related to the virtual Poincar\'e duality of the group $\mathbb{G}_2^1$. They are given by exact sequences \begin{equation}\label{eq:dualityalg} \xymatrix{0 \ar[r] & \mathcal{D}_3 \ar[r] & \mathcal{D}_2 \ar[r] & \mathcal{D}_1 \ar[r] & \mathcal{D}_0 \ar[r] & \mathbb{W}_2 \ar[r] & 0} \end{equation} such that each $\mathcal{D}_i$ is isomorphic to a direct sum of modules of the form \begin{align}\label{eq:DCtype} \chi\upar{H}{\mathbb{G}_2^1} := \mathbb{W}GT{\mathbb{G}_2^1}\otimes_{\mathbb{W}FT{H}} \chi \end{align} for $H$ an extension to $\mathbb{G}_2^1$ of a finite subgroup of $\mathbb{S}_2^1$ and $\chi$ is $\mathbb{W}FT{H}$-module which restricts to a free module of rank one over $\mathbb{W}_2$. They are minimal in the sense that their associated algebraic resolution spectral sequence \begin{equation}\label{eq:dualADSS} E_1^{r,q} = \operatorname{Ext}_{\mathbb{W}GT{\mathbb{G}_2^1}}^q(\mathcal{D}_r, \mathbb{F}_{p^2}) \Longrightarrow H_c^{p+q}(\mathbb{G}_2^1, \mathbb{F}_{p^2}) \end{equation} collapses at the $E_1$-term. They can be realized as finite resolutions of $E_2^{h\mathbb{G}_2^1}$ \begin{equation}\label{eq:dualitytop} \xymatrix{E_2^{h\mathbb{G}_2^1} \ar[r] & \mathcal{E}D_0 \ar[r] & \mathcal{E}D_1 \ar[r] & \mathcal{E}D_2 \ar[r] & \mathcal{E}D_3.} \end{equation} \begin{enumerate}[(a)] \item Let $p\geq 5$. There is an exact sequence \eqref{eq:dualityalg} with \begin{align*} \mathcal{D}_0 &\cong \mathcal{D}_3 \cong \mathbb{W}_2\upar{F}{\mathbb{G}_2^1}, & \mathcal{D}_1 &\cong \mathcal{D}_2 \cong (\chi_{p-1}^+\oplus \chi_{1-p}^{+}) \upar{F}{\mathbb{G}_2^1} \ . \end{align*} This can be realized as a finite resolution \eqref{eq:dualitytop} with \begin{align*} \mathcal{E}D_0& \simeq \mathcal{E}D_3 \simeq E_2^{hF}, & \mathcal{E}D_1 &\simeq \mathcal{E}D_2 \simeq \Sigma^{2(p-1)} E_2^{hF} \vee \Sigma^{2(1-p)} E_2^{hF}.\end{align*} These were constructed by Henn \cite[Theorem 5]{henn_res}. See also Lader \cite{lader}. \item Let $p=3$. There is an exact sequence \eqref{eq:dualityalg} with \begin{align*} \mathcal{D}_0 &\cong \mathcal{D}_3 \cong \mathbb{W}_2\upar{G_{24}}{\mathbb{G}_2^1}, & \mathcal{D}_1 &\cong \mathcal{D}_2 \cong \chi_4^{-} \upar{SD_{16}}{\mathbb{G}_2^1} \ . \end{align*} This can be realized as a finite resolution \eqref{eq:dualitytop} with \begin{align*} \mathcal{E}D_0 &\simeq E_2^{hG_{24}}, & \mathcal{E}D_1 &\simeq \Sigma^8 E_2^{hSD_{16}}, & \mathcal{E}D_2 &\simeq \Sigma^{40}E_2^{hSD_{16}}, & \mathcal{E}D_3 &\simeq \Sigma^{48}E_2^{hG_{24}}. \end{align*} These were constructed by Goerss, Henn, Mahowald and Rezk in \cite{ghmr}. \item Let $p=2$. There is an exact sequence \eqref{eq:dualityalg} with \begin{align*} \mathcal{D}_0 &\cong \mathbb{W}_2\upar{G_{48}}{\mathbb{G}_2^1}, & \mathcal{D}_1 &\cong \mathcal{D}_2 \cong \mathbb{W}_2\upar{G_{12}}{\mathbb{G}_2^1}, & \mathcal{D}_3 \cong \mathbb{W}_2\upar{G_{48}'}{\mathbb{G}_2^1} \ . \end{align*} This can be realized as a finite resolution \eqref{eq:dualitytop} with \begin{align*} \mathcal{E}D_0 &\simeq E_2^{hG_{48}}, & \mathcal{E}D_1 &\simeq E_2^{hG_{12}}, & \mathcal{E}D_2 &\simeq \Sigma^{48}E_2^{hG_{12}}, & \mathcal{E}D_3 &\simeq \Sigma^{48}E_2^{hG_{48}}. \end{align*} These were constructed by Beaudry, Bobkova, Goerss, Henn, Mahowald, and Rezk in \cite{henn_res, beaudry_res, BobkovaGoerss}. In these references, the resolution is constructed for $E_2^{h\mathbb{S}_2}$. However, using the ideas of \cite{henn_centr} it is now straightforward to construct it for $E_2^{h\mathbb{G}_2}$.\end{enumerate} \end{example} \begin{remark}\label{rem:dualityfullresp3} If $p\neq 2$, the algebraic resolution can be doubled up in the sense of Remark~\ref{rem:doublingup} and the result can be realized topologically. For $p\geq 5$, this gives a resolution \begin{equation*} \xymatrix@=1.5pc{ L_{K(2)}S^0 \ar[r] & E_2^{hF} \ar[r]^-{\delta_0} & X \vee E_2^{hF} \ar[r]^-{\delta_1} & X \vee X \ar[r]^-{\delta_2} &E_2^{hF} \vee X \ar[r]^-{\delta_3} & E_2^{hF} } \end{equation*} where $X = \Sigma^{2(p-1)} E_2^{hF} \vee \Sigma^{2(1-p)} E_2^{hF} $, and for $p=3$, we get \begin{equation*} \xymatrix@=0.6pc{ L_{K(2)}S^0 \ar[r] & E_2^{hG_{24}} \ar[r]^-{\delta_0} & E_2^{hSD_{16}} \vee E_2^{hG_{24}} \ar[r]^-{\delta_1} & \Sigma^{48}E_2^{hSD_{16}}\vee E_2^{hSD_{16}} \\ & \ar[r]^-{\delta_2} & \Sigma^{48}(E_2^{hG_{24}} \vee E_2^{hSD_{16}}) \ar[r]^-{\delta_3} & \Sigma^{48}E_2^{hG_{24}} } \end{equation*} However, the duality resolution at $p=2$ cannot be doubled up. \end{remark} \begin{example}[The Centralizer Resolutions]\index{centralizer resolution} The following two resolutions of the trivial $\mathbb{W}GT{\mathbb{G}_2^1}$-modules are called \emph{centralizer resolutions} because their construction has as a key input Henn's Centralizer Approximation Theorem \cite[Theorem 1.4]{henn_duke}. They are given by exact sequences \begin{equation}\label{eq:centalg} \xymatrix{0 \ar[r] & \mathcal{C}_3 \ar[r] & \mathcal{C}_2 \ar[r] & \mathcal{C}_1 \ar[r] & \mathcal{C}_0 \ar[r] & \mathbb{W}_2 \ar[r] & 0,} \end{equation} where the $\mathcal{C}_i$s are of the form described in \eqref{eq:DCtype}. They can be realized as finite resolutions \begin{equation}\label{eq:centtop} \xymatrix{E_2^{h\mathbb{G}_2^1} \ar[r] & \mathcal{E}C_0 \ar[r] & \mathcal{E}C_1 \ar[r] & \mathcal{E}C_2 \ar[r] & \mathcal{E}C_3.} \end{equation} The algebraic centralizer exact sequences \eqref{eq:centalg} described below are \emph{$\mathcal{F}$-resolutions} in the sense of \cite[\S 3.5.1]{henn_res} and \cite[\S 1.2]{henn_centr}. We will not explain what this means, but a consequence of this fact is that the algebraic centralizer resolutions can be doubled up in the sense of Remark~\ref{rem:doublingup}. The downside of the centralizer resolutions is that they are ``larger'' than the duality resolutions. For example, the analogues of \eqref{eq:dualADSS} for the centralizer resolutions do not collapse. As a consequence, the associated algebraic and topological spectral sequences are less efficient for computations. Nonetheless, having different resolutions offers different perspectives and the centralizer resolutions have been crucial in recent computations. See for example \cite{BobkovaGoerss, gh_bcdual}. \begin{enumerate}[(a)] \item Let $p=3$. There is an exact sequence \eqref{eq:centalg} with \begin{align*} \mathcal{C}_0 &\cong \mathbb{W}_2\upar{G_{24}}{\mathbb{G}_2^1}, & \mathcal{C}_1 &\cong \chi_4^{-} \upar{SD_{16}}{\mathbb{G}_2^1} \oplus \chi_2^{+} \upar{G_{24}}{\mathbb{G}_2^1},\\ \mathcal{C}_2 &\cong (\chi_{2}^+ \oplus \chi_{-2}^+) \upar{SD_{16}}{\mathbb{G}_2^1}, & \mathcal{C}_3 &\cong \mathbb{W}_2\upar{SD_{16}}{\mathbb{G}_2^1} \ . \end{align*} This can be realized as a finite resolution \eqref{eq:centtop} with \begin{align*} \mathcal{E}C_0 &\simeq E_2^{hG_{24}}, & \mathcal{E}C_1 &\simeq \Sigma^8 E_2^{hSD_{16}} \vee \Sigma^{36} E_2^{hG_{24}}, \\ \mathcal{E}C_2 &\simeq \Sigma^{36}E_2^{hSD_{16}} \vee \Sigma^{44} E_2^{hSD_{16}}, & \mathcal{E}C_3 &\simeq \Sigma^{48}E_2^{hSD_{16}}. \end{align*} This is constructed by Henn in \cite{henn_res}. See also \cite[Section 4]{gh_bcdual}. Since $\mathbb{G}_2 \cong \mathbb{G}_2^1 \times \mathbb{Z}_3$, Remark~\ref{rem:doublingup} applies and we get a resolution of $L_{K(2)}S^0$. \item Let $p=2$. There is an exact sequence \eqref{eq:centalg} with \begin{align*} \mathcal{C}_0 &\cong \mathbb{W}_2\upar{G_{48}}{\mathbb{G}_2^1} \oplus \mathbb{W}_2\upar{G_{48}'}{\mathbb{G}_2^1} , & \mathcal{C}_1 &\cong \mathbb{W}_2\upar{C_8}{\mathbb{G}_2^1} \oplus \mathbb{W}_2\upar{G_{12}}{\mathbb{G}_2^1},\\ \mathcal{C}_2 &\cong \mathbb{W}_2\upar{V_4}{\mathbb{G}_2^1}, & \mathcal{C}_3 &\cong \mathbb{W}_2\upar{G_{12}}{\mathbb{G}_2^1} \ . \end{align*} This can be realized as a finite resolution \eqref{eq:centtop} with \begin{align*} \mathcal{E}C_0 &\simeq E_2^{hG_{48}} \vee E_2^{hG_{48}'}, & \mathcal{E}C_1 &\simeq E_2^{hC_8} \vee E_2^{hG_{12}}, & \mathcal{E}C_2 &\simeq E_2^{hV_4}, & \mathcal{E}C_3 &\simeq E_2^{hG_{12}}. \end{align*} \end{enumerate} This is constructed by Henn \cite{henn_centr}. Note again that, as opposed to the duality resolution at $p=2$, the algebraic centralizer resolution \emph{can} be doubled up and the resulting sequence can be realized topologically to give a finite resolution of $L_{K(2)}S^0$. See \cite[Theorem 1.1, 1.5]{henn_centr}. \end{example} \begin{remark} The doubled up centralizer resolution at $n=p=2$ is very large compared to the duality resolution available at odd primes. However, there is a handicraft way to glue a duality resolution with a centralizer resolution to obtain a much smaller resolution called the \emph{hybrid resolution}.\index{hybrid resolution} It can be realized as a resolution of $L_{K(2)}S^0$ \begin{equation*} \xymatrix@=0.6pc{ L_{K(2)}S^0 \ar[r] & E_2^{hG_{48}} \ar[r]^-{\delta_0} & E_2^{hC_{8}} \vee E_2^{hG_{12}} \ar[r]^-{\delta_1} & E_2^{hV_4}\vee E_2^{hG_{12}} \\ & \ar[r]^-{\delta_2} & E_2^{hG_{12}} \vee \Sigma^{48} E_2^{hG_{12}} \ar[r]^-{\delta_3} & \Sigma^{48}E_2^{hG_{48}}. } \end{equation*} The construction of this resolution is not published but is due to the second author and Henn. \end{remark} \begin{remark}\label{rem:HWinsight} Until recently, at $n=p=2$, topological and algebraic resolutions existed only for $\mathbb{S}_2$ and not $\mathbb{G}_2$. In some loose sense, the reason for this was our inability to average over the action of $\mathrm{Gal}$. By an insight of Hans-Werner Henn \cite{henn_res} if one switches to $\mathbb{W}_n$-coefficients, one can form ``weighted averages'' in $\mathbb{W}_n$ and this has allowed us to upgrade our resolutions for $\mathbb{S}_2$ to resolutions for $\mathbb{G}_2$. Note further that the description of the duality resolution at $n=2$ and $p\geq 5$ is much cleaner if one works over the Witt vectors. \end{remark} \section{Chromatic splitting, duality, and algebraicity}\label{sec:thms} This section discusses two of the major areas of applications of the techniques developed above in local chromatic homotopy theory: the chromatic splitting conjecture and the study of duality phenomena in $K(n)$-local homotopy theory. In both cases, we start with an outline of the general picture, before specializing to a summary of the known results at height 2. We then conclude with a brief outlook to the asymptotic behavior of chromatic homotopy theory for primes large with respect to the height. \subsection{Chromatic splitting and reassembly}\label{sec:cscmore} In this section, we discuss the chromatic splitting conjecture\index{chromatic splitting conjecture} in more detail, putting an emphasis on new developments and points that were not discussed in \cite{cschov}. The chromatic splitting conjecture (CSC) gives a fairly simple prediction of $L_{n-1}L_{K(n)}S^0$ in the chromatic fracture square. Although the original conjecture does not hold for the prime $p=2$ and $n=2$ as it was stated in \cite{cschov}, the fundamental idea behind the conjecture remains intact. The philosophy behind the CSC is that chromatic reassembly is governed by the structure of $H^*_c(\mathbb{G}_n, \mathbb{W})$, via the map $H^*_c(\mathbb{G}_n, \mathbb{W}) \to H^*_c(\mathbb{G}_n, (E_n)_*)$ to the $E_2$-term of the homotopy fixed point spectral sequence \eqref{eq:hfpss}. As discussed in Conjecture~\ref{conj:vanishing}, this map is expected to be an isomorphism onto $H^*_c(\mathbb{G}_n, (E_n)_0)$. The isomorphism \eqref{eq:rationaliso} implies at the very least that there is an inclusion \[ \Lambda_{\mathbb{Z}_p}(x_1, x_2, \ldots, x_{n}) \subseteq H_c^*(\mathbb{G}_n, \mathbb{W}_n) \] for generators $x_i$ of cohomological degree $2i-1$ at all primes and heights. Here, we always choose $x_1=\zeta_n$ for $\zeta_n$ as in \eqref{eq:zetan} and choose the other $x_i$s so that they do not map to zero in $H_c^*(\mathbb{G}_n, \mathbb{W}_n)/p$. Conjecture~\ref{conj:vanishing} then implies the existence of non-zero classes $x_{i}\in E_2^{2i-1,0}$ in \[ E_2^{s,t} \cong H_c^{s}(\mathbb{G}_n, (E_n)_t) \Longrightarrow \pi_{t-s}L_{K(n)}S^0. \] Further, at heights $n\leq 2$ the following phenomena have been observed. \begin{conjecture} If $p$ is odd, or if $p=2$ and $n$ is odd, then \[ E_{\infty}^{*,0} \cong \Lambda_{\mathbb{Z}_p}(e_1, e_2, \ldots, e_{n})\subseteq \pi_*L_{K(n)}S^0 \] for some choice of classes $e_i$ detected by a multiple of $x_i$. If $p=2$ and $n$ is even, then \[ E_{\infty}^{*,0} \cong \Lambda_{\mathbb{Z}_2}(f, e_1, e_2, \ldots, e_{n})/(2f, e_{n} f) \subseteq \pi_*L_{K(n)}S^0. \] for $e_i$ as above and some choice of class $f$ detected by $\widetilde{\chi}_n$ (see \eqref{eq:chin}). \end{conjecture} \begin{remark} If $p$ is large with respect to $n$, then there is no ambiguity about the choice of classes $e_i$ because of the sparsity of the spectral sequence. At $n=2$ and $p=3$, once can choose $e_2$ to be detected by $3x_2$ and at $n=p=2$, by $4x_2$. \end{remark} The dichotomy between odd and even heights for the prime $2$ comes from the following observations. If $n$ is odd, then the inclusion of $C_2 \cong (\pm 1)$ in $\mathbb{G}_n$ splits the map $\chi_n$ and \[ H^*(C_2, \mathbb{Z}_2) \cong \mathbb{Z}_2[\widetilde{\chi}_n]/(2\widetilde{\chi}_n) \to H^*(C_2, \mathbb{W}_n) \xrightarrow{\chi_n^*} H_c^*(\mathbb{G}_n, \mathbb{W}_n) \] is an inclusion. However, if the image of $\widetilde{\chi}_n$ in $H_c^2(\mathbb{G}_n, (E_n)_0)$ is non-trivial, then it must support a non-trivial $d_3$ differential since its image in the HFPSS computing $E_n^{hC_2}$ has this property by \cite[Theorem 1.3]{hahnshi}. (The image of $\widetilde{\chi}_n$ would be the class $u_{2\sigma}^{-1}a_{\sigma}^2$ which supports a non-zero $d_3$ differential.) At $n=2$, what we observe is that $(\widetilde{\chi}_2^2)$ is the kernel of $\chi_2^*$ so that the latter induces the inclusion of $\mathbb{Z}_2[\widetilde{\chi}_2]/(2\widetilde{\chi}_2,\widetilde{\chi}_2^2)$ into $ H_c^*(\mathbb{G}_2, \mathbb{W}_2)$. We do not know how this generalizes at even heights $n>2$. As is discussed in \cite{cschov}, the induced maps $e_i \colon S^{1-2i} \to L_{n-1}S^0$ (for some choice of $e_i$) are conjectured to factor through $L_{n-i}S^0$. Since $f$ has order $2$, it induces a map $f \colon S^{-2}/2 \to L_{K(n)}S^0$, so after localizing at $E_{n-1}$, we get a map $f\colon L_{n-1} S^{-2}/2 \to L_{n-1}L_{K(n)}S^0$. The CSC as stated in \cite{cschov} did not take into account this class $f$. Based on what we see in the case $n=p=2$, Conjecture~\ref{conj:csc2} below is a suggestion for a revised version of the CSC in its strongest form which includes $f$. Below, for spectra $X_i$, we let $\Lambda_{L_{n-1}S^0_p}(X_1, \ldots, X_n)$ be the wedge of $L_{n-1}S^0_p$ and of $X_{i_1}\wedge \ldots \wedge X_{i_j}$ for $1\leq i_1<\ldots <i_j \leq n$. Let \[\iota \colon L_{n-1}S^0_p \to \Lambda_{L_{n-1}S^0_p}(X_1, \ldots, X_n)\] be the inclusion of the $L_{n-1}S^0_p$ summand. \begin{conjecture}[Strong CSC]\label{conj:csc2} There is an equivalence in the category of $E_{n-1}$-local spectra \[ L_{n-1}L_{K(n)}S^0 \simeq \Lambda_{L_{n-1}S^0_p} \left( L_{n-i}S^{1-2i} : 1\leq i \leq n \right) \] if $p\neq 2$, or $p=2$ and $n$ odd. The map $\iota$ corresponds to the unit $L_{n-1}S^0_p \to L_{n-1}L_{K(n)}S^0_p$. If $p=2$ and $n$ is even, there is an $E_{n-1}$-local equivalence \[ L_{n-1}L_{K(n)}S^0 \simeq \Lambda_{L_{n-1}S^0_2} \left( L_{n-i}S^{1-2i} : 1\leq i \leq n \right)\wedge \Lambda_{L_{n-1}S^0_2} \left(L_{n-1}S^{-2}/2 \right) . \] In this case, the map $\iota \wedge \iota$ corresponds to the unit $L_{n-1}S^0_p \to L_{n-1}L_{K(n)}S^0_p$. \end{conjecture} \begin{remark} A criterion for this revision is for the conjecture at odd primes to remain as stated in \cite{cschov}. We have made what we think is a minimal modification to reflect what we see at $n=p=2$. However, other reformulations are possible and we concede that this is a somewhat arbitrary choice. \end{remark} Note that Conjecture~\ref{conj:csc2} implies the weak CSC (Conjecture~\ref{conj:wcsc}), saying that $L_{n-1}S^0 \to L_{n-1}L_{K(n)}S^0$ splits. Further, both Conjecture~\ref{conj:csc2} and Conjecture~\ref{conj:wcsc} hold if one replaces the sphere by any finite complex $X$. However, even Conjecture~\ref{conj:wcsc} does not hold for arbitrary spectra. In \cite{devcounterBP}, Devinatz proves that it fails for the $p$-completion of $BP$. Before giving examples we would like to point out that, among its many consequences, the strong form of the chromatic splitting conjecture would also imply \begin{conjecture}\label{conj:fingen} For any $n \ge 0$ and any prime $p$, the homotopy groups $\pi_*L_{K(n)}S^0$ are degreewise finitely generated $\mathbb{Z}_p$-modules. \end{conjecture} \begin{example}\label{ex:csc} At height $n=1$, the equivalence $L_0L_{K(1)}S^0 \simeq L_0(S^0_p \vee S^{-1}_p)$ holds for all primes and was discussed in Section~\ref{sec:reassemlyn1}, see \eqref{eq:cscn1first}. \end{example} \begin{theorem} At height $n=2$, if $p$ is odd, then \begin{equation*} L_1L_{K(2)}S^0 \simeq L_1(S^0_p \vee S^{-1}_p) \vee L_0(S_p^{-3} \vee S_p^{-4}) . \end{equation*} See \cite{behse2, GoerssSplit}. If $p=2$, there is an equivalence \begin{equation*} L_1L_{K(2)}S^0 \simeq L_1(S^0_2\vee S^{-1}_2 \vee S^{-2}/2 \vee S^{-3}/2) \vee L_0(S_2^{-3} \vee S_2^{-4} ). \end{equation*} See \cite{BGH}. \end{theorem} \begin{remark} The fact that the CSC in its original form would most likely fail at $n=p=2$ was first noticed by Mark Mahowald. His intuition was based on the computations of Shimomura and Wang \cite{shimwang}, who identify $v_1$-torsion-free summands in the $E_2$-term of the Adams-Novikov Spectral Sequence for $\pi_\ast L_{K(2)}V(0)$ that are not predicted by the CSC. Their work, however, did not preclude the possibility of differentials that could have eliminated the summands not accounted for in the original statement of the CSC. \end{remark} The CSC is one of the key inputs for chromatic reassembly, which recovers $L_nS^0$ from $L_{K(n)}S^0$ and $L_{n-1}S^0$ via the chromatic fracture square \eqref{eq:chromaticfracture}. We discuss this a little more here. Let $F_n$ be the fiber of the map $L_{n-1}S^0 \to L_{n-1}L_{K(n)}S^0$, whose homotopy type is predicted by the CSC. The chromatic fracture square implies that $F_n$ is also the fiber of $L_nS^0 \to L_{K(n)}S^0$. The homotopy groups of $L_nS^0$ can be reassembled from the long exact sequence on homotopy groups \[ \xymatrix{\pi_{k}\Sigma^{-1} L_{K(n)} S^0 \ar[r] & \pi_k F_n \ar[r] & \pi_{k}L_nS^0 \ar[r] & \pi_k L_{K(n)}S^0 \ar[r] & \pi_{k}\Sigma F_n.} \] We explained chromatic reassembly at height $n=1$ in Section~\ref{sec:reassemlyn1}. At this point, we would like to at least give the reader an idea of chromatic reassembly at chromatic level $n=2$. A description of reassembly for $L_2S^0_p$ itself would be very technical, so instead, we describe the reassembly process for $L_2S^0/p$ for primes $p\geq 5$, which is significantly simpler. To do this, we first give a qualitative description of the homotopy groups of $\pi_*(L_{K(2)}S^0/p)$. For primes $p \ge 5$, the homotopy fixed point spectral sequence is too sparse for differentials or extensions, so collapses to give \[ \pi_m(L_{K(2)}S^0/p) \cong \bigoplus_{m=t-s} H_c^s(\mathbb{G}_2, (E_2)_t/p). \] The groups on the right side of this isomorphism can be deduced from the computation by Shimomura and Yabe in \cite{shimyabe} and were discussed in Sadofsky \cite{sadofsky_picture}. They are computed directly using the finite resolution (1) of Example~\ref{ex:duality} by Lader in \cite{lader}. See \cite[Corollaire 4.4]{lader} and the discussion before it for an explicit description. We make a few observations about the answer: \begin{enumerate}[(a)] \item The homotopy groups $\pi_{*}(L_{K(2)}S^0/p)$ form a module over $\mathbb{F}_p[v_1]\otimes \Lambda_{\mathbb{F}_p} ( \zeta_2)$ for a class $v_1 = u^{1-p} \in \pi_{2(p-1)}(L_{K(2)}S^0/p)$ and $\zeta_2 \in \pi_{-1}(L_{K(2)}S^0/p)$ as in Remark~\ref{rem:3termfiber}. The group $\pi_{m}(L_{K(2)}S^0/p)$ is zero if \[ 2k(p-1) <m < 2(k+1)(p-1) -4. \] \item Since $L_0 S^0/p \simeq \ast$, the chromatic fracture square gives an equivalence $ L_{1}S^0/p \simeq L_{K(1)}S^0/p$ and, similarly, $L_1L_{K(2)}S^0/p\simeq L_{K(1)}L_{K(2)}S^0/p$. Furthermore, on homotopy groups, the effect of $E_1$-localization on $L_{K(2)}S^0/p$ is to invert $v_1$. \item There is unbounded $v_1$-torsion in $\pi_{*}(L_{K(2)}S^0/p)$. However, the homotopy groups are finite in each degree $m$. In fact, for any class $x$ detected in $H_c^s(\mathbb{G}_2, (E_2)_t/p)$ for $t<0$, if $t+ 2k(p-1)\geq 0$, then $v_1^{k}x=0$. That is, multiplication by $v_1$ never ``crosses'' the $s=t$ line in $H_c^s(\mathbb{G}_2, (E_2)_t/p)$. \item The only homotopy classes in $\pi_*L_{K(2)}S^0$ that are not $v_1$-torsion are given by $\mathbb{F}_p[v_1]\otimes \Lambda_{\mathbb{F}_p}(\zeta_2, h_0)$ for a class $h_0 \in \pi_{ 2(p-1)-1}(L_{K(2)}S^0/p)$ that is the image of the homotopy class $\alpha_1 \in \pi_{2(p-1)-1}S^0_{(p)}$. Furthermore, \[ \pi_*(L_1L_{K(2)}S^0/p) \cong v_1^{-1}\pi_{*}(L_{K(2)}S^0/p) \cong \mathbb{F}_p[v_1^{\pm 1}] \otimes \Lambda_{\mathbb{F}_p}(\zeta_2, h_0). \] Under the canonical map $\pi_{*}(L_{1}S^0/p) \to \pi_{*}(L_{1}L_{K(2)}S^0/p)$, the class $\zeta_1 \in \pi_{-1}(L_{1}S^0/p)$ maps to $v_1^{-1}h_0$. \end{enumerate} We use the long exact sequence on homotopy groups associated to the fiber sequence \[ F_2/p \to L_{2}S^0/p \to L_{K(2)}S^0/p \] to deduce that \begin{align*} \pi_*(L_2S^0/p) &\cong \mathrm{Tor}_{v_1}(\pi_{*}(L_{K(2)}S^0/p))\oplus \mathbb{F}_p[v_1] \{1,h_0\} \oplus \Sigma^{-1}\mathbb{F}_p[v_1]/(v_1^{\infty}) \{\zeta_2,h_0\zeta_2\}, \end{align*} where $\mathrm{Tor}_{v_1}(\pi_{*}(L_{K(2)}S^0/p))$ is the $v_1$-power torsion subgroup of $\pi_{*}(L_{K(2)}S^0/p)$ and $\mathbb{F}_p[v_1]/(v_1^{\infty})$ is the cokernel of the canonical map in the following short exact sequence \[ \xymatrix{0 \ar[r] & \mathbb{F}_p[v_1] \ar[r] & \mathbb{F}_p[v_1^{\pm 1}] \ar[r] & \mathbb{F}_p[v_1]/(v_1^{\infty}) \ar[r] & 0.} \] All available proofs of the CSC, even in its weakest form, have been brutally computational. Short of simply computing $\pi_*L_{K(2)}S^0$ explicitly, the steps have been: \begin{enumerate}[(a)] \item Prove that there are non-zero homotopy classes $e_1$ and $e_2$, and if $p=2$ an additional class $f$ detecting a non-trivial class of order $2$. This gives the map \[ \xymatrix{S^0 \vee S^{-1} \vee S^{-3} \vee S^{-4} \ar[r]^-{\varphi} & L_{K(2)}S^0,} \] where $\varphi = 1 \vee e_1 \vee e_2 \vee e_1e_2$. If $p=2$, there is an additional factor of $S^{-2}/2 \vee S^{-3}/2 \xrightarrow{f \vee e_1f} L_{K(2)}S^0$. \item Compute $v_1^{-1}\pi_*L_{K(2)}(\varphi \wedge X)$ for a finite type $1$ complex, usually $X=S^0/p$. \item Compute $p^{-1}\pi_*L_{K(2)}S^0$. \item Reassemble the fracture square. \end{enumerate} \begin{remark} Let $p$ be an odd prime. In general, the CSC predicts that $1 \vee \zeta_n$ induces an equivalence $L_{K(n-1)}S^0 \vee L_{K(n-1)}S^{-1} \xrightarrow{\simeq} L_{K(n-1)}L_{K(n)}S^0$. In particular, it implies that $L_{K(n-1)}S^0 \simeq L_{K(n-1)}E_n^{h\mathbb{G}_{n}^1}$. There is another conjecture of Hopkins related to chromatic splitting called the \emph{algebraic chromatic splitting conjecture} \cite[Section 14]{petersoneric}. It states: \begin{conjecture}[Algebraic CSC]\label{conj:ACSC} Let $p$ be an odd prime, possibly large with respect to $n$. Then \[ \underset{i}{\lim}{}_{(E_n)_*E_n}^s (E_n)_t/(p, v_1, \ldots, v_{n-2}, v_{n-1}^i ) \cong \begin{cases} (E_n)_*/(p, v_1, \ldots, v_{n-2}) & s=0 \\ v_{n-1}^{-1}(E_n)_*/(p, v_1, \ldots, v_{n-2}) & s=1 \\ 0 & s>1. \end{cases} \] \end{conjecture} \noindent Here, the limit (and its derived functors) is taken in the category of $(E_n)_*E_n$-comodules, where $(E_n)_*E_n = \pi_*(E_n \wedge E_n)$ is the group of non-completed $E_n$-cooperations. Provided that $(E_n)_*\zeta_n$ generates the ${\lim}^1$-term, Conjecture~\ref{conj:ACSC} implies that $1 \vee \zeta_n$ is an $E_n$-local equivalence, thereby verifying the chromatic splitting conjecture at height $n$ for finite type $n-1$ complexes. \end{remark} \begin{remark} As explained above, the chromatic splitting conjecture is a fundamentally transchromatic statement. In a series of papers, Torii uses generalized character theory to study the relation between adjacent strata in the chromatic filtration. In particular, he shows in \cite{torii_zeta} that under the canonical map \[ \xymatrix{\pi_{-1}L_{K(n-1)}S^0 \ar[r] & \pi_{-1}L_{K(n-1)}L_{K(n)}S^0} \] the class $\zeta_{n-1}$ maps non-trivially. \end{remark} \subsection{Invertibility and duality}\label{sec:invdual} In analogy with the problem of computing the group of units of a classical ring, an important aspect of understanding a symmetric monoidal category $(\mathcal{C}, \otimes, I )$ with unit $I$ is to classify its invertible objects. An object $X \in \mathcal{C}$ is invertible if there exists another object $Y \in \mathcal{C}$ such that $X \otimes Y \cong I$ where $I$ is the unit of the symmetric monoidal structure. If the collection of invertible objects forms a set, then it is an abelian group under $\otimes$ and this group is called the \emph{Picard group} of $\mathcal{C}$, denoted $\mathrm{Pic}(\mathcal{C})$. The Picard group\index{Picard group} of a symmetric monoidal $\infty$-category $\mathcal{C}$ is defined to be the Picard group of the homotopy category of $\mathcal{C}$, i.e., we set $\mathrm{Pic}(\mathcal{C}) = \mathrm{Pic}(\mathrm{Ho}(\mathcal{C}))$. If $\mathcal{C}$ is a triangulated category, the Picard group always contains a cyclic subgroup generated by the shift of the unit $I[1]$. For example, the Picard group of the stable homotopy category contains a copy of $\mathbb{Z}$ generated by $S^1$. In fact, in this case, there is nothing else and $\mathrm{Pic}(\mathrm{Sp})\cong \mathbb{Z} \langle S^1\rangle$, see \cite{hms_pic}. We can view $\mathrm{Sp}_E$ as a symmetric monoidal category with product $L_E(-\wedge -)$ and unit $L_ES^0$. The objects $L_ES^n$ for $n\in \mathbb{Z}$ are invertible in this category. One of the fascinating aspects of $E$-local homotopy theory is that, for some choices of $E$, there are invertible object in $\mathrm{Sp}_E$ which are not of the form $L_ES^n$ for some $n\in \mathbb{Z}$. When $E=K(n)$ for $0< n <\infty$, the Picard group is in fact much larger; we remark that for $E=K(n)$ and $E=E(n)$ and arbitrary $n$, the collection of isomorphism classes of invertible objects in $\mathrm{Sp}_E$ indeed forms a set, see \cite[Proposition 7.6]{hms_pic} and \cite[Proposition 1.4]{hs_pic}. The Picard group of the $K(n)$-local category (with $p$ fixed and suppressed from the notation) is usually denoted by $\mathrm{Pic}_n$. Note that, if $X \in \mathrm{Pic}_n$, its inverse is the Spanier--Whitehead dual of $X$ in the $K(n)$-local category: $D_nX = F(X, L_{K(n)}S^0)$. By Galois descent~\cite[Proposition 10.10]{mathew_galois}, there is an isomorphism \[ \mathrm{Pic}_n \cong \mathrm{Pic}_{K(n)}(\mathrm{Mod}_{E_n}^{\mathbb{G}_n}), \] where $\mathrm{Mod}_{E_n}^{\mathbb{G}_n}$ is the $K(n)$-local category of ${\mathbb{G}_n}$-twisted $E$-module spectra. The right hand side has a natural algebraic analogue given by \[ \mathrm{Pic}_n^{\operatorname{alg}} := \mathrm{Pic}(\moravamod{n}) \] where $\moravamod{n}$ is the category of Morava modules (see Definition~\ref{defn:moravamodules}). A Morava module $M$ is in $\mathrm{Pic}_n^{\operatorname{alg}}$ if and only if it is free of rank one over $(E_n)_*$. Since $(E_n)_*$ is two periodic, $\mathrm{Pic}_n^{\operatorname{alg}}$ is naturally $\mathbb{Z}/2$-graded. Let $\mathrm{Pic}_n^{\operatorname{alg},0}$ be the subgroup of elements such that $M \cong (E_n)_*$ as $(E_n)_*$-modules. The latter can then be described (but not easily computed) as \[ \mathrm{Pic}_n^{\operatorname{alg},0} \cong H_c^1({\mathbb{G}_n}, (E_n)_0^{\times}). \] The functor which sends $X \in \mathrm{Sp}_{K(n)}$ to $(E_n)_*^{\vee}X$ induces a map, $\mathrm{Pic}_n \to \mathrm{Pic}_n^{\operatorname{alg}}$, and we define $\kappa_n$ to be the kernel: \begin{equation}\label{eq:picseq} \xymatrix{0 \ar[r] & \kappa_n \ar[r] & \mathrm{Pic}_n \ar[r] & \mathrm{Pic}_n^{\operatorname{alg}}} \end{equation} It is called the \emph{exotic} Picard group\index{exotic Picard group $\kappa_n$} fo $\mathrm{Sp}_{K(n)}$. Elements of $\mathrm{Pic}_n$ which are in $\kappa_n$ are called exotic. For $2(p-1) \geq n^2$, an argument that uses the sparseness of \eqref{eq:hfpss} shows that $\kappa_n = 0$ so that the map \eqref{eq:picseq} $\mathrm{Pic}_n \to \mathrm{Pic}_n^{\operatorname{alg}}$ is an injection \cite[Proposition 7.5]{hms_pic}. However, it has been shown in many cases that $\kappa_n$ is non-trivial. The following is a conjecture of Hopkins. \begin{conjecture}\label{conj:fin} The group $\kappa_n$ is a finite $p$-group. \end{conjecture} In \cite[Theorem 4.4.1]{Heard}, Heard proves that for $p$ odd, $\kappa_n$ is a direct product of cyclic $p$-groups. Note also that a positive answer to Conjecture~\ref{conj:fingen} would imply Conjecture~\ref{conj:fin}. In \cite{pstragowski_pic}, Pstr{\c a}gowski proves that for $2(p-1)>n^2+n$, $ \mathrm{Pic}_n \cong \mathrm{Pic}_n^{\operatorname{alg}}$. The question of whether or not $ \mathrm{Pic}_n \to \mathrm{Pic}_n^{\operatorname{alg}}$ is surjective in general is open. Furthermore, the algebraic Picard group $\mathrm{Pic}_n^{\operatorname{alg}}$ is not known for any prime when $n > 2$. It is believed that $\mathrm{Pic}_n^{\operatorname{alg}}$ is finitely generated over $\mathbb{Z}_p$. In fact, the (folklore) expectation is that $\mathrm{Pic}_n^{\operatorname{alg}}$ is of rank two over $\mathbb{Z}_p$, with one summand generated by $L_{K(n)}S^1$ and the other by the spectrum $S\langle{\det}\rangle$ discussed below in Example~\ref{example:elementsinpic} (b). The table of Figure~\ref{fig:pictable} summarizes the current state of the literature on these questions. The second author, Bobkova, Goerss and Henn have been working towards identifying $\mathrm{Pic}_2$ when $p=2$. \begin{table} \captionsetup{width=\textwidth} \caption{The table below contains some known values of $\mathrm{Pic}_n$. Here, H--M--S stands for Hopkins--Mahowald--Sadofsky, G--M--H--R stands for Goerss--Henn--Mahowald--Rezk and K--S for Kamiya--Shimomura.} \label{fig:pictable} \centering \small{ \begin{tabular}{|c | c | c | c | c | c | } \hline $n$ & $p$ & $\mathrm{Pic}_n$ & $\mathrm{Pic}_{n}^{\mathrm{alg}} $ & $\kappa_n$ & Reference \\ \hline \hline $1$ & $\geq 3$ & $\mathbb{Z}_p \times \mathbb{Z}/2(p-1)$ & $\mathbb{Z}_p \times \mathbb{Z}/2(p-1)$ & $0$ & H--M--S \cite{hms_pic} \\ \hline $1$ & $ 2$ & $\mathbb{Z}_2 \times \mathbb{Z}/2 \times \mathbb{Z}/4$ & $\mathbb{Z}_2 \times (\mathbb{Z}/2)^2$ & $\mathbb{Z}/2$ & H--M--S \cite{hms_pic} \\ \hline $2$ & $\geq 5$ & $\mathbb{Z}_p^2 \times \mathbb{Z}/2(p^2-1)$ & $\mathbb{Z}_p^2 \times \mathbb{Z}/2(p^2-1)$ & $0$ & \makecell{Due to Hopkins \\ See Lader \cite{lader}}\\ \hline $2$ & $3$ & $\mathbb{Z}_3^2 \times \mathbb{Z}/16 \times (\mathbb{Z}/3)^2$ & $\mathbb{Z}_3^2 \times \mathbb{Z}/16 $ & $(\mathbb{Z}/3)^2$ & \makecell{Karamanov \cite{karamanov} \\ G--H--M--R \cite{ghmr_pic} \\ K--S \cite{kam_shim}} \\ \hline \end{tabular}} \end{table} \begin{remark}Analogously, there is a map from the Picard group $\mathrm{Pic}(\mathrm{Sp}_n)$ of the $E_n$-local category $\mathrm{Sp}_n$ to the category of $(E_n)_0E_n$-comodules. In \cite{hs_pic}, Hovey and Sadofsky use a variant of Theorem~\ref{thm:vcdim} to determine these Picard groups completely for large primes: They show that for $2(p-1) > n^2+n$, there is an isomorphism $\mathrm{Pic}(\mathrm{Sp}_n) \cong \mathbb{Z}$, generated by $L_nS^1$. \end{remark} \begin{example}\label{example:elementsinpic} We describe a few important elements in $\mathrm{Pic}_n$. \begin{enumerate}[(a)] \item The spheres $L_{K(n)}S^m$ for $m \in \mathbb{Z}$ are all invertible. \item The determinant sphere $S\langle {\det} \rangle \in \mathrm{Pic}_n$. See \cite{detsphere} for a construction. It has the property that $(E_n)_*^{\vee}S\langle {\det} \rangle \cong (E_n)_*$ as $(E_n)_*$-modules, but with action of $\mathbb{G}_n$ twisted by the determinant. Its image in $\mathrm{Pic}_n^{\operatorname{alg},0} \cong H_c^1(\mathbb{G}_n, (E_n)_0^{\times})$ is the homomorphism $\det \colon \mathbb{G}_n \to \mathbb{Z}_p^{\times} \subseteq (E_n)_0^{\times}$ of Definition~\ref{defn:determinant}. \item\label{item:fiber} Given $\lambda \in (\pi_0E_n^{h\mathbb{G}_n^1})^{\times}$, one can define a element $S^{\lambda}$ via the fiber sequence \[\xymatrix{S^{\lambda} \ar[r] & E_n^{h\mathbb{G}_n^1} \ar[r]^-{\psi -\lambda } & E_n^{h\mathbb{G}_n^1} .}\] Some variation of this construction is discussed in Section 3.6 of \cite{westerland}. If the Adams--Novikov filtration of $\lambda$ is positive, one can show that $S^{\lambda}$ is exotic. At $p=3$, the subgroup of $(\pi_0E_2^{h\mathbb{G}_2^1})^{\times}$ of positive Adams--Novikov filtration is isomorphic to $\mathbb{Z}/3$. The elements in one of the factors of $\mathbb{Z}/3$ in $\kappa_2 \cong \mathbb{Z}/3 \times \mathbb{Z}/3$ are of the form $S^{\lambda}$. \item Some exotic elements cannot be constructed using (\ref{item:fiber}). One can instead use finite resolutions to construct them. The first example is at $p=2$ and $n=1$. Recall that $K=E_1$ and $KO\simeq K^{hC_2}$. Since $K_*^{\vee}KO \cong K_*^{\vee}\Sigma^4KO$, rather than choosing $\mathcal{E}_0 = \mathcal{E}_1 = KO$ to topologically realize \eqref{eq:resheight1algK}, one can let $\mathcal{E}_0 = \mathcal{E}_1 = \Sigma^4KO$ to get a fiber sequence \begin{align}\label{eq:realizationP1} \xymatrix{P_1 \ar[r] & \Sigma^4KO \ar[rr]^{\Sigma^{4}5^{2}\psi -1} & & \Sigma^4KO} \end{align} where $\psi \in \mathbb{G}_1 \cong \mathbb{Z}_2^{\times}$ is as in \eqref{eq:psichoice}. The fiber $P_1$ is a generator of $\kappa_1$. By construction, $P_1\wedge KO \simeq \Sigma^4 KO$. See \cite[Example 5.1]{ghmr_pic} for more details. Similarly, at $p=3$, there is an element $P_2 \in \kappa_2$ with the property that $P_2 \wedge E_2^{hG_{24}} \simeq \Sigma^{48} E_2^{hG_{24}}$; in fact,$P_2$ is a non-trivial exotic element which generates the other summand of $\mathbb{Z}/3 \subseteq \kappa_2$. See \cite[Theorem 5.5]{ghmr_pic}. It is constructed by modifying the realization of the duality resolution of Remark~\ref{rem:dualityfullresp3}. \end{enumerate} \end{example} We now discuss another element of $\mathrm{Pic}_n$ which plays an important role in $K(n)$-local homotopy theory and brings us to the topic of Gross--Hopkins duality. As an application of the period map mentioned in Remark~\ref{rem:grosshop}, Gross and Hopkins determine the dualizing complex of $\mathrm{Sp}_{K(n)}$, defined via a lift of Pontryagin duality for abelian groups. More precisely, the functor \[ \xymatrix{I_n^*(-) = \mathrm{Hom}(\pi_{-*}M_n(-), \mathbb{Q}_p/\mathbb{Z}_p)\colon \mathrm{Sp}_{K(n)}^{\mathrm{op}} \ar[r] & \mathrm{Mod}_{\mathbb{Z}_p}^{\mathrm{graded}}} \] is cohomological and thus representable by an object $I_n \in \mathrm{Sp}_{K(n)}$, the \emph{Gross--Hopkins dual of the sphere}. From an abstract point of view, it endows $\mathrm{Sp}_{K(n)}$ with a Serre duality functor, see~\cite{ars}. By Theorem \ref{thm:descentss} and Corollary \ref{cor:hfpsslarge}, $I_n$ is determined by its Morava module $(E_n)_*^{\vee}(I_n) = \pi_*L_{K(n)}(E_n \wedge I_n)$ when $p$ is large with respect to the height $n$; otherwise, one might have to twist by an exotic element of the $K(n)$-local Picard group. The spectrum $I_n$ turns out to be invertible in $\mathrm{Sp}_{K(n)}$, and Gross and Hopkins use the period map \eqref{eq:permap} to show that there is an equivalence \[ I_n \simeq L_{K(n)}(S^{n^2-n} \wedge S\langle {\det} \rangle \wedge P_n), \] where $P_n \in \kappa_n$ and $S\langle {\det} \rangle $ is as in Example~\ref{example:elementsinpic} (b). This identification is also known as \emph{Gross--Hopkins duality}\index{Gross--Hopkins duality}. It turns out that $P_1$ for $p=2$ and $P_2$ for $p=3$ are the elements discussed in Example~\ref{example:elementsinpic} (d). It has now been shown in many cases where $\mathbb{G}_n$ has $p$-torsion that $P_n \not\simeq L_{K(n)}S^0$ by showing that $P_n \wedge E_n^{hF} \not\simeq E_n^{hF}$ for a suitable choice of subgroup $F \subseteq \mathbb{G}_n$. See \cite{BBS} and \cite{heard_li_shi}. These arguments rely on the intimate relationship between Gross--Hopkins duality and $K(n)$-local Spanier--Whitehead duality\index{Spanier--Whitehead duality}: For $X \in \mathrm{Sp}_{K(n)}$, let $I_nX =F(X,I_n)$. The invertibility of $I_n$ implies that $I_nX \simeq D_nX \wedge I_n$ so studying $I_nX$ amounts to understanding $P_n$ and $D_nX$. We end this section with a few remarks on Spanier--Whitehead duality and, more specifically, on the problem of identifying $D_nE_n$. A first answer to this question due to Gross and Hopkins (see \cite[Proposition 16]{StrickGrossHop}) states that there is a weak equivalence $\Sigma^{-n^2}E_n \to D_nE_n $ which induces an isomorphism of $\mathbb{G}_n$-modules on homotopy groups. This does not imply that $D_nE_n$ is equivalent to $\Sigma^{-n^2}E_n$ as $\mathbb{G}_n$-equivariant spectra. But it does suggest that there is a \emph{dualizing module}, that is, that $D_nE_n$ is self-dual up to a twist. In fact, the twist can be described as the $K(n)$-localization of a $p$-adic sphere. A first description of this sphere is given as the Spanier--Whitehead dual of \[S^{\mathbb{G}_n} :=\left( \operatorname{colim}_{m, \mathrm{tr}} \Sigma_{+}^{\infty}B\mathbb{S}_n^{m}\right)^{\wedge}_p,\] where $\mathbb{S}_n^{m} \subseteq \mathbb{S}_n$ is the subgroup of elements congruent to $1$ modulo $\xi^{nm}$ (as in \eqref{eq:stabpres}). Here, the colimit is taken over transfers and one can show that $S^{\mathbb{G}_n}$ has the homotopy type of a $p$-adic sphere of dimension $n^2$. This description is not practical for computations as the action of $\mathbb{G}_n$ on $S^{\mathbb{G}_n}$ induced by the conjugation actions on $B\mathbb{S}_n^{m}$ is mysterious. However, there is a conjectural description of the twist analogous to the identification of the dualizing object in the Wirthm{\"u}ller isomorphism for compact Lie groups~\cite[Chapter III]{lmsm_esht}. Let $\mathfrak{g}$ be the abelian group underlying $\mathcal{O}_{\mathbb{D}_n}$ (as defined in Section~\ref{sec:GGn}) endowed with the conjugation (or adjoint) action of $\mathbb{G}_n$. Let \[S^{\mathfrak{g}} = \left(\operatorname{colim}_{m, \mathrm{tr}} \Sigma_{+}^{\infty} Bp^m \mathfrak{g} \right)^{\wedge}_p.\] Again, $S^{\mathfrak{g}}$ has the homotopy type of a $p$-adic sphere of dimension $n^2$ and the action of $\mathbb{G}_n$ on $Bp^m \mathfrak{g}$ induces an action on $S^{\mathfrak{g}}$. \index{Linearization hypothesis} \begin{linhypo}\label{conj:lin} There is a $\mathbb{G}_n$-equivariant equivalence \[S^{ \mathfrak{g}} \simeq S^{ \mathbb{G}_n} .\] \end{linhypo} \noindent Inspired by Serre's definition of the dualizing module \cite[Chapter I, \S 3.5]{serre}, such a statement was first suggested to the experts by the strong connections in the $K(n)$-local category between Spanier--Whitehead duality, Brown--Comenetz duality, and Poincar\'e duality for the group $\mathbb{G}_n$ (see Gross--Hopkins \cite{HopkinsGross} and Devinatz--Hopkins \cite[Section 5]{DH_action}). The hypothesis is stated in work of Clausen \cite[Section 6.4]{clausen_padicj}, not only for $\mathbb{G}_n$, but for any $p$-adic analytic group. Clausen has recently announced a proof of the Linearization Hypothesis in this general form. The linearization hypothesis leads to a $\mathbb{G}_n$-equivariant equivalence \[D_nE_n \simeq L_{K(n)}(E_n \wedge S^{- \mathfrak{g}}),\] where $S^{-\mathfrak{g}}= F(S^{\mathfrak{g}}, S_p^0 )$, a description that lends itself well to applications. \subsection{Compactifications and asymptotic algebraicity}\label{sec:assymptotics} \index{asymptotic algebraicity} We conclude this survey with a short overview of another recent direction in chromatic homotopy theory. As explained in Section~\ref{ssec:gn} and demonstrated in the examples above, chromatic homotopy theory at a fixed height $n$ simplifies when $p$ grows large, the essential transition occurring when $p-1>n$. This leads to the question of how to isolate those phenomena that hold generically, i.e., for all primes $p$ which are large with respect to the given height. The goal of this section is to outline a result of \cite{ultra1} that describes the \emph{compactification} of chromatic homotopy theory, which is based on the notion of ultraproducts\index{ultraproduct}. In particular, this provides a model of the limit of the $K(n)$-local categories when $p \to \infty$ that captures the generic behavior of these categories. A different approach using Goerss--Hopkins obstruction theory that gives an algebraic triangulated model for suitably large primes has recently appeared in the work of Pstr{\c a}gowski~\cite{pstragowski_alg}. The Stone--\v{C}ech compactification of a topological space $X$ is the initial compact Hausdorff space $\beta X$ equipped with a continuous map $\iota$ from $X$. If $X$ is discrete, $\beta X$ can be modeled by the set of ultrafilters\index{ultrafilter} on $X$ endowed with the Stone topology; recall that an \emph{ultrafilter} on $X$ is a set $\mathcal{U}$ of subsets of $X$ such that whenever $X$ is written as a disjoint union of finitely many subsets, then exactly one of them belongs to $\mathcal{U}$. The structure map $\iota \colon X \to \beta X$ sends a point $x \in X$ to the principal ultrafilter at $x$, i.e., the set of subsets of $X$ that contain $x$. We denote the set of non-principal ultrafilters suggestively by $\partial \beta X = \beta X \setminus \iota(X)$. Assuming the axiom of choice, $X$ is infinite if and only if $\partial\beta X$ is non-empty. (In fact, the existence of non-principal ultrafilters is weaker than the axiom of choice, but the key point here is that there is no constructive way to find non-principal ultrafilters.) Moreover, one can show that an open subset of $\beta X$ containing $\partial \beta X$ misses only finitely many points of $\iota X$. This may be thought of as a topological manifestation of the fundamental theorem of ultraproducts due to {\L}o{\'s}, which says that for a collection of models $(M_i)_{i\in I}$ of a first order theory, there is an equivalence for any formula $\phi$: \begin{equation}\label{eq:los} \begin{Bmatrix} \phi \text{ hold in } M_i\\ \text{ for almost all } i \in I \end{Bmatrix} \xymatrix@C=1.7pc{ \ar@{<=>}[r]^-{\sim} &} \begin{Bmatrix} \phi \text{ hold in } \prod_{\mathcal{U}}M_i \\ \text{ for all } \mathcal{U} \in \partial\beta I \end{Bmatrix}, \end{equation} where $\prod_{\mathcal{U}}M_i$ denotes the \emph{ultraproduct} of the $M_i$ at $\mathcal{U}$. While ultraproducts at non-principal ultrafilters thus capture generic information about the collection $(M_i)_{i\in I}$, they tend to also exhibit simplifying features. For example, for $\mathcal{U} \in \partial\beta\mathbb{P}$ a non-principal ultrafilter on the set of prime numbers $\mathbb{P}$, the ultraproduct of $(\mathbb{F}_p)_{p \in \mathbb{P}}$ turns out to be a rational field. If $\mathcal{F}$ is a presheaf on a topological space $X$ with values in a coefficient category $\mathcal{C}$ that is closed under filtered colimits and products, then one may construct a naive completion of $\mathcal{F}$ to be the presheaf on $\beta X$ given as the composite \[ \xymatrix{\widehat{\mathcal{F}}\colon \mathrm{Open}(\beta X)^{\mathrm{op}} \ar[r]^-{\iota^*} & \mathrm{Open}(X)^{\mathrm{op}} \ar[r]^-{\mathcal{F}} & \mathcal{C},} \] where $\iota^*$ denotes the inverse image functor. Assuming that $X$ is discrete so that $\mathcal{F}$ is a just a collection of stalks $(\mathcal{F}_x)_{x\in X}$, the stalk of $\widehat{\mathcal{F}}$ at an ultrafilter $\mathcal{U} \in \beta X$ is given by \begin{equation}\label{eq:ultraproduct} \xymatrix{\widehat{\mathcal{F}}_{\mathcal{U}} \simeq \operatorname{colim}_{A \in \mathcal{U}}\prod_{x \in A}\mathcal{F}_x,} \end{equation} where the filtered colimit is taken over the projection maps induced by inclusions $A \subseteq A'$ in $\mathcal{U}$. In particular, if $\mathcal{U} = \mathcal{U}_{x_0} \in \beta X$ is principal at a point $x_0 \in X$, then $\widehat{\mathcal{F}}_{\mathcal{U}_{x_0}} \simeq \widehat{\mathcal{F}}_{x_0}$. The formula \eqref{eq:ultraproduct} exhibits the stalk $\widehat{\mathcal{F}}_{\mathcal{U}}$ as a categorical generalization of ultraproducts: indeed, if all the $\mathcal{F}_x$ are non-empty and the coefficients are $\mathcal{C} = \mathrm{Set}$, then this recovers the usual notion of ultraproducts mentioned above. In Section \ref{sec:landscape}, we saw that the points of the spectrum $\mathrm{Sp}c(\mathrm{Sp})$ are in bijective correspondence with pairs $(p,n) \in \mathbb{P} \times (\mathbb{N} \cup \{\infty\})$ with $(p,0) \sim (q,0)$ for all $p,q \in \mathbb{P}$. From the point of view of tensor triangular geometry, one may think of the category of spectra as behaving like a bundle of categories over the space $\mathrm{Sp}c(\mathrm{Sp})$. Restricting to the discrete subspace $\mathbb{P} \times \{n\} \subset \mathrm{Sp}c(\mathrm{Sp})$, this bundle should then be a disjoint union of the local categories $\mathrm{Sp}_{K(n)}$ for varying $p \in \mathbb{P}$. Therefore, the above formalism yields a diagram \[ \xymatrix{\coprod_{p \in \mathbb{P}}\mathrm{Sp}_{K(n)} \ar[d] & \widehat{\coprod_{p \in \mathbb{P}}\mathrm{Sp}_{K(n)}} \ar[d] & \prod_{\mathcal{U}}^{\flat}\mathrm{Sp}_{K(n)} \ar[d] \\ \mathbb{P} \times \{n\} \ar[r]^-{\iota} & \widehat{\mathbb{P}\times \{n\}} \cong \beta\mathbb{P} & \{\mathcal{U}\}, \ar[l]_-{\supset}} \] in which the right vertical arrow exhibits $\prod_{\mathcal{U}}^{\flat}\mathrm{Sp}_{K(n)}$ as the stalk of the compactification over $\mathcal{U} \in \beta\mathbb{P}$. (Here, the superscript $\flat$ indicates that the coefficient category is $\mathcal{C} = \mathbb{C}at_{\infty}^{\flat}$ for a suitable decoration $\flat$. For the details, we refer the interested reader to \cite{ultra1,ultra1.5}.) The final ingredient in the formulation of the asymptotic algebraicity of chromatic homotopy theory is the algebraic model itself. Informally speaking, this is given by the stable $\infty$-category of periodized ind-coherent sheaves on the formal stack $\hat{\mathcal{H}}(n)$ from Section \ref{sec:geomodel}. An algebraic avatar of this category has previously been studied by Franke \cite{franke}. Based on \cite{ultra1}, the main theorem of \cite{ultra1.5} can now be stated as follows: \begin{theorem}\label{thm:ultra} For any non-principal ultrafilter $\mathcal{U}$ on $\mathbb{P}$ there is a symmetric monoidal equivalence \[ \xymatrix{\prod_{\mathcal{U}}^{\flat}\mathrm{Sp}_{K(n)} \ar[r]^-{\sim} & \prod_{\mathcal{U}}^{\flat}\mathrm{IndCoh}(\hat{\mathcal{H}}(n))^{\mathrm{per}}} \] of $\mathbb{Q}$-linear stable $\infty$-categories. \end{theorem} The algebraic categories $\mathrm{IndCoh}(\hat{\mathcal{H}}(n))^{\mathrm{per}}$ admit explicit algebraic models in terms of certain comodule categories. Therefore, the above result gives a precise formulation of the empirical observation that height $n$ chromatic homotopy theory becomes asymptotically algebraic when $p \to \infty$. \end{document}
\begin{equation}gin{examp}in{document} \title{On the Fekete-Szeg\"o problem for concave univalent functions} \author{B. Bhowmik} \address{B. Bhowmik, Department of Mathematics, Indian Institute of Science, Bangalore-560012, India.} \email{[email protected]} \author{S. Ponnusamy} \address{S. Ponnusamy, Department of Mathematics, Indian Institute of Technology Madras, Chennai-600 036, India.} \email{[email protected]} \author{K.-J. Wirths} \address{K.-J. Wirths, Institut f\"ur Analysis, TU Braunschweig, 38106 Braunschweig, Germany} \email{[email protected]} \subjclass[2000]{30C45} \keywords{Concave, univalent and starlike functions} \date{ version: {\tt Nov 17, 2010}; File: $\tt FEK-Ar.tex$} \begin{equation}gin{examp}in{abstract} We consider the Fekete-Szeg\"o problem with real parameter $\leftarrowmbda$ for the class $Co(\alpha)$ of concave univalent functions. \end{abstract} \maketitle \pagestyle{myheadings} \markboth{B. Bhowmik, S. Ponnusamy and K.-J. Wirths }{Fekete-Szeg$\ddot{\rm{o}}$ problem} \section{Introduction} Let ${\mathcal S}$ denote the class of all univalent (analytic) functions \begin{equation}\leftarrowbel{p9eq1} f(z)=z+\sum_{n=2}^{\infty}a_n z^n \end{equation} defined on the unit disk ${\mathbb D} =\{z\in{\mathbb C}:\,|z|<1\}$. Then the classical Fekete-Szeg\"{o} inequality, presented by means of Loewner's method, for the coefficients of $f\in \mathcal{S}$ is that $$|a_3-\leftarrowmbda a_2^2|\leq 1+2\exp (-2\leftarrowmbda / (1-\leftarrowmbda )) \quad \mbox{ for $\leftarrowmbda\in [0,1)$}. $$ As $\leftarrowmbda \rightarrow 1-$, we have the elementary inequality $|a_3-a_2^2|\leq 1$. Moreover, the coefficient functional $$ \Leftarrowmbda _\leftarrowmbda (f)=a_3-\leftarrowmbda a_2^2 $$ on the normalized analytic functions $f$ in the unit disk ${\mathbb D}$ plays an important role in function theory. For example, the quantity $a_3- a_2^2$ represents $S_f(0)/6$, where $S_f$ denotes the Schwarzian derivative $(f''/f')'-(f''/f')^2/2$ of locally univalent functions $f$ in ${\mathbb D}$. In the literature, there exists a large number of results about inequalities for $\Leftarrowmbda _\leftarrowmbda (f)$ corresponding to various subclasses of $\mathcal{S}$. The problem of maximizing the absolute value of the functional $\Leftarrowmbda _\leftarrowmbda (f)$ is called the Fekete-Szeg\"{o} problem. In \cite{Koepf-87}, Koepf solved the Fekete-Szeg$\ddot{\rm{o}}$ problem for close-to-convex functions and the largest real number $\leftarrowmbda$ for which $\Leftarrowmbda _{\leftarrowmbda} (f)$ is maximized by the Koebe function $z/(1 - z)^2$ is $\leftarrowmbda= 1/3$, and later in \cite{Koepf-87a} (see also \cite{London-93}), this result was generalized for functions that are close-to-convex of order $\begin{equation}ta$. In \cite{Pfluger-85}, Pfluger employed the variational method to give another treatment of the Fekete-Szeg\"{o} inequality which includes a description of the image domains under extremal functions. Later, Pfluger \cite{Pfluger-86} used Jenkin's method to show that $$|\Leftarrowmbda _\leftarrowmbda (f)| \leq 1+2|\exp (-2\leftarrowmbda / (1-\leftarrowmbda ))|, ~~ f\in {\mathcal S}, $$ holds for complex $\leftarrowmbda$ such that ${\rm Re\,}(1/(1-\leftarrowmbda))\geq 1$. The inequality is sharp if and only if $\leftarrowmbda$ is in a certain pear shaped subregion of the disk given by $$\leftarrowmbda = 1-(u+itv)/(u^2 +v^2), ~~ −1\leq t\leq 1, $$ where $u = 1- \log ({\overline{\operatorname{co}}}s \phi)$ and $v = \tan \phi -\phi$ , $0< \phi< \pi/2$. In this paper, we solve the Fekete-Szeg$\ddot{\rm{o}}$ problem for functions in the class $Co(\alpha)$ of concave univalent functions, with real parameter $\leftarrowmbda$. \section{Preliminaries} A function $f:{\mathbb D}\to {\mathbb C}$ is said to belong to the family $Co(\alpha)$ if $f$ satisfies the following conditions: \begin{equation}gin{examp}in{enumerate} \item[(i)] $f$ is analytic in ${\mathbb D}$ with the standard normalization $f(0)=f'(0)-1=0$. In addition it satisfies $f(1)=\infty$. \item[(ii)] $f$ maps ${\mathbb D}$ conformally onto a set whose complement with respect to ${\mathbb C}$ is convex. \item[(iii)]the opening angle of $f({\mathbb D})$ at $\infty$ is less than or equal to $\pi\alpha$, $\alpha\in (1,2]$. \end{enumerate} This class has been extensively studied in the recent years and for a detailed discussion about concave functions, we refer to \cite{Avk-Wir-06, Avk-Wir-05,Pom-Cruz} and the references therein. We note that for $f\in Co(\alpha)$, $\alpha\in (1,2]$, the closed set ${\mathbb C}\begin{equation}gin{array}ckslash f({\mathbb D})$ is convex and unbounded. Also, we observe that $Co(2)$ contains the classes $Co(\alpha)$, $\alpha\in (1,2]$. We recall the analytic characterization for functions in $Co(\alpha), \alpha \in (1,2]$: $f\in Co(\alpha)$ if and only if ${\rm\, Re}\, P_f(z)> 0$ in ${\mathbb D}$, where $$P_f(z)=\frac{2}{\alpha-1}\, \left [\frac{(\alpha+1)}{2}\frac{1+z}{1-z}-1-z \frac{f''(z)}{f'(z)}\right ]. $$ In \cite{BPW-09}, we have used this characterization and proved the following theorem which will be used to prove our result. \noindent {\bf Theorem A.} {\em Let $\alpha\in (1, 2]$. A function $f\in Co(\alpha)$ if and only if there exists a starlike function $\phi\in \mathcal{S}^{*}$ such that $f(z)=\Leftarrowmbda _\phi (z)$, where $$\Leftarrowmbda _\phi (z) =\int_{0}^{z}\frac{1}{(1-t)^{\alpha+1}}\left(\frac{t}{\phi(t)}\right)^{(\alpha-1)/2}\, dt. $$ } We also recall a lemma due to Koepf \cite[Lemma 3]{Koepf-87}. \noindent {\bf Lemma A.} {\em Let $g(z)=z+b_2z+b_3z^2+\cdots \in \mathcal{S}^{*}$. Then $|b_3-\leftarrowmbda b_2^2|\leq \max~\{1, |3-4\leftarrowmbda|\}$ which is sharp for the Koebe function $k$ if $|\leftarrowmbda-3/4|\geq 1/4$ and for $(k(z^2))^{1/2}=\frac{z}{1-z^2}$ if $|\leftarrowmbda-\frac{3}{4}|\leq 1/4$. } Here $ \mathcal{S}^{*}$ denote the family of functions $g\in \mathcal{S} $ that map ${\mathbb D}$ into domains that are starlike with respect to the origin. Each $ g\in\mathcal{S}^{*}$ is characterized by the condition ${\rm Re\,} (zg'(z)/g(z))>0$ in ${\mathbb D}$. Ma and Minda \cite{Ma-Minda-92} presented the Fekete-Szeg\"o problem for more general classes through subordination, which includes the classes of starlike and convex functions, respectively. In a recent paper, the authors in \cite{CKS-07} obtained a new method of solving the Fekete-Szeg\"o problem for classes of close-to-convex functions defined in terms of subordination. \section{Main Result and its Proof} We recall from Theorem~A that $f\in Co(\alpha)$ if and only if there exists a function $\phi (z)= z+ \sum_{n=2}^{\infty}\phi_n z^n\in \mathcal{S}^{*}$ such that \begin{equation}\leftarrowbel{p9eq2a} f'(z)=\frac{1}{(1-z)^{\alpha+1}}\left(\frac{z}{\phi(z)}\right)^{\frac{\alpha-1}{2}}, \end{equation} where $f$ has the form given by (\ref{p9eq1}). Comparing the coefficients of $z$ and $z^2$ on the both sides of the series expansion of (\ref{p9eq2a}), we obtain that \begin{equation}gin{eqnarray}q a_2&=& \frac{\alpha+1}{2}-\frac{\alpha-1}{4}\phi_2,\quad \mbox{and}\\ a_3 &=& \frac{(\alpha+1)(\alpha+2)}{6}-\frac{\alpha^2-1}{6}\phi_2-\frac{\alpha-1}{6}\phi_3+\frac{\alpha^2-1}{24}\phi_2^2, \end{eqnarray}q respectively. A computation yields, \begin{equation}gin{eqnarray}\nonumber\leftarrowbel{p9eq5} a_3-\leftarrowmbda a_2^2 &= & \frac{(\alpha+1)^2}{4}\left ( \frac{2(\alpha+2)}{3(\alpha+1)}-\leftarrowmbda \right ) +\frac{\alpha^2-1}{4}\left (\leftarrowmbda-\frac{2}{3}\right )\phi_2\\ && ~~~-\frac{\alpha-1}{6}\left [\phi_3-\left(\frac{2(\alpha+1)-3\leftarrowmbda(\alpha-1)}{8}\right)\phi_2^2\right]. \end{eqnarray} \noindent {\bf \underline{Case (1)}:} Let $\displaystyle \leftarrowmbda \in \left(-\infty, \frac{2(\alpha-3)}{3(\alpha-1)}\right]$. We observe that the assumption on $\leftarrowmbda$ is seen to be equivalent to $$\frac{2(\alpha+1)-3\leftarrowmbda(\alpha-1)}{8}\geq 1 $$ and the first term in the last expression is nonnegative. Hence, using Lemma~A for the last term in (\ref{p9eq5}), and noting that $|\phi_2|\leq 2$, we have from the equality (\ref{p9eq5}), \begin{equation}gin{eqnarray}q |a_3-\leftarrowmbda a_2^2|&\leq& \frac{(\alpha+1)^2}{4}\left ( \frac{2(\alpha+2)}{3(\alpha+1)}-\leftarrowmbda \right ) +\frac{\alpha^2-1}{4}\left (\frac{2}{3}-\leftarrowmbda \right ) \left|\phi_2\right|\\ && ~~~ +\frac{\alpha-1}{6}\left| \phi_3-\left(\frac{2(\alpha+1)-3\leftarrowmbda(\alpha-1)}{8}\right)\phi_2^2 \right|\\ &\leq & \frac{(\alpha+1)^2}{4}\left ( \frac{2(\alpha+2)}{3(\alpha+1)}-\leftarrowmbda \right ) +\frac{\alpha^2-1}{2}\left (\frac{2}{3}-\leftarrowmbda \right ) \\ && ~~~ +\frac{\alpha-1}{6}\left(\frac{2(\alpha+1)-3\leftarrowmbda(\alpha-1)}{2}-3\right). \end{eqnarray}q Thus, simplifying the right hand expression gives \begin{equation}\leftarrowbel{neweq1} |a_3-\leftarrowmbda a_2^2| \leq \frac{2\alpha^2+1}{3}- \leftarrowmbda \alpha^2, ~\mbox{ if }~ \leftarrowmbda \in \left(-\infty, \frac{2(\alpha-3)}{3(\alpha-1)}\right]. \end{equation} \noindent {\bf \underline{Case (2)}:} Let $\displaystyle\leftarrowmbda\geq \frac{2(\alpha+2)}{3(\alpha+1)}$ so that the first term in (\ref{p9eq5}) is nonpositive. The condition on $\leftarrowmbda$ in particular gives $\leftarrowmbda >2/3$ and therefore, our assumption on $\leftarrowmbda$ implies that $$ \frac{2(\alpha+1)-3\leftarrowmbda(\alpha-1)}{8} < \frac{1}{2}. $$ Again, it follows from Lemma~A that $$\left|\phi_3-\left(\frac{2(\alpha+1)-3\leftarrowmbda(\alpha-1)}{8}\right)\phi_2^2\right|\leq 3-\frac{2(\alpha+1)-3\leftarrowmbda(\alpha-1)}{2}. $$ In view of these observations and an use of the inequality $|\phi_2|\leq 2$, the equality (\ref{p9eq5}) gives \begin{equation}gin{eqnarray}q |a_3-\leftarrowmbda a_2^2| &\leq& -\frac{(\alpha+1)^2}{4}\left ( \frac{2(\alpha+2)}{3(\alpha+1)}-\leftarrowmbda \right ) -\frac{\alpha^2-1}{2}\left (\frac{2}{3}-\leftarrowmbda \right )\\ && ~~~~ +\frac{\alpha-1}{6}\left(3-\frac{2(\alpha+1)-3\leftarrowmbda(\alpha-1)}{2}\right). \end{eqnarray}q Thus, simplifying the right hand expression gives \begin{equation}\leftarrowbel{neweq2} |a_3-\leftarrowmbda a_2^2| \leq \leftarrowmbda\alpha^2-\frac{2\alpha^2+1}{3}, ~\mbox{ if }~ \leftarrowmbda\geq \frac{2(\alpha+2)}{3(\alpha+1)}. \end{equation} The inequalities in both cases are sharp for the functions $$f(z)= \frac{1}{2\alpha} \left [\left(\frac{1+z}{1-z}\right)^{\alpha}-1\right ]. $$ \noindent {\bf \underline{Case (3)}:} To get the complete solution of the Fekete-Szeg\"o problem, we need to consider the case \begin{equation}\leftarrowbel{neweq3} \leftarrowmbda\in \left(\frac{2(\alpha -3)}{3(\alpha -1)},\,\frac{2(\alpha +2)}{3(\alpha +1)}\right). \end{equation} Now, we deal with this case by using the formulas (\ref{p9eq2a}) and (\ref{p9eq5}) together with the representation formula for $\phi\in \mathcal{S}^{*}$: $$\frac{z\phi'(z)}{\phi(z)}=\frac{1+z\omega(z)}{1-z\omega(z)}, $$ where $\omega :{\mathbb D} \rightarrow \overline{{\mathbb D}}$ is a function analytic in ${\mathbb D}$ with the Taylor series $$\omega (z)= \sum_{n=0}^{\infty}c_n z^n. $$ Inserting the resulting formulas $$\phi_2=2c_0~\mbox{ and }~\phi_3=c_1+3{c_0}^2 $$ into (\ref{p9eq5}) yields \begin{equation}gin{eqnarray}q a_3-\leftarrowmbda a_2^2&=& \frac{(\alpha+1)^2}{4}\left ( \frac{2(\alpha+2)}{3(\alpha+1)}-\leftarrowmbda \right ) +\frac{\alpha^2-1}{2}\left (\leftarrowmbda-\frac{2}{3}\right )c_0\\ && ~~~ -\frac{\alpha-1}{6}\left[c_1+\frac{4-2\alpha+3\leftarrowmbda(\alpha-1)}{2}{c_0}^2\right]\\ & =:& A + Bc_0+Cc_0^2+Dc_1, \end{eqnarray}q where $$\left\{ \begin{equation}gin{array}{lll} A &=&\displaystyle \frac{(\alpha+1)(\alpha+2)}{6}-\leftarrowmbda\frac{(\alpha+1)^2}{4},\\ B &=&\displaystyle (\alpha^2-1)\left(\frac{\leftarrowmbda}{2}-\frac{1}{3}\right),\\ C &=& \displaystyle -\frac{(\alpha-1)\left(4-2\alpha+3\leftarrowmbda(\alpha-1)\right)}{12},\\ D &=& \displaystyle -\frac{\alpha-1}{6}. \end{array} \right. $$ It is well-known that $|c_0|\leq 1$ and $|c_1|\leq 1-|c_0|^2$. Using this we obtain, \begin{equation}gin{eqnarray}\leftarrowbel{p9eq1a} |a_3-\leftarrowmbda a_2^2|&=&| A + Bc_0+Cc_0^2+Dc_1|\\\nonumber &\leq& |A + Bc_0+Cc_0^2|+ |D||c_1|\\\nonumber &\leq& |A + Bc_0+Cc_0^2|+ |D|(1-|c_0|^2). \end{eqnarray} Let $c_0=re^{i\theta}$. First we search for the maximum of $|A + Bc_0+C{c_0}^2|$ where we fix $r$ and vary $\theta$. To this end, we consider the expression $|A + Bc_0+C{c_0}^2|^2$ \begin{equation}gin{eqnarray}q &=& |A+Bre^{i\theta}+Cr^2e^{2i\theta}|^2\\ &=&(A-Cr^2)^2+B^2r^2+(2ABr+2BCr^3){\overline{\operatorname{co}}}s\theta+4ACr^2({\overline{\operatorname{co}}}s\theta)^2\\ &=:& f(r,\theta). \end{eqnarray}q Afterwards, we have to find the biggest value of the maximum function, if $r$ varies in the interval (0,1]. We need to deal with several subcases of (\ref{neweq3}). \noindent {\bf \underline{Case A}:} Let $\leftarrowmbda\in \left(\frac{2(\alpha -3)}{3(\alpha -1)},\frac{2(\alpha -2)}{3(\alpha -1)} \right)$. We observe that $C>0$, $B<0,$ and $A+Cr^2>0$ for $r\in [0,1]$. Hence the corresponding quadratic function $$ h(x)=(A-Cr^2)^2+B^2r^2+2Br(A+Cr^2)x+4ACr^2x^2, ~x\in [-1,1], $$ attains its maximum value for any $r\in (0,1]$ at $x=-1$. Therefore, our task is to find the maximum value of $$ g(r)=A-Br+Cr^2+\frac{\alpha -1}{6}(1-r^2). $$ The inequalities $g'(0)=-B$ and $$ g'(1)=-B+2C-\frac{\alpha -1}{3}=\frac{\alpha -1}{6}(-6\leftarrowmbda +4(\alpha -1))>0 $$ for $\leftarrowmbda < \frac{2(\alpha -1)}{3\alpha}$ imply $$ g(r)\leq g(1)=A-B+C=\frac{2\alpha^2 +1}{3}-\leftarrowmbda \alpha^2. $$ \noindent {\bf \underline{Case B}:} If $\leftarrowmbda =\frac{2(\alpha -2)}{3(\alpha -1)}$, then $C=0$ and $h$ is a linear function that has its maximum value at $x=-1$. The considerations of Case A apply and again we get the maximum value $g(1)$ as above. \noindent {\bf \underline{Case C}:} Let $\leftarrowmbda \in \left(\frac{2(\alpha -2)}{3(\alpha -1)},\frac{2(\alpha -1)}{3\alpha} \right)$. Firstly, we prove that in this interval the quadratic function $h$ is monotonic decreasing for $x\in [-1,1]$. Since the function $h:{\mathbb R} \rightarrow {\mathbb R}$ has its maximum at $$ x(r)=\frac{-B(A+Cr^2)}{4ACr}=\frac{-B}{4}\left(\frac{1}{Cr}+\frac{r}{A}\right), $$ it is sufficient to show that $x(r)$ is monotonic increasing and $x(1)<-1$. The first assertion is trivial and the second one is equivalent to $$j(\leftarrowmbda)=\alpha^2(3\leftarrowmbda-2)^2-4+3\leftarrowmbda > 0 $$ for the parameters $\leftarrowmbda$ in question. This inequality is easily proved. Hence, we get the same upper bound as in Cases A and B. In conclusion, Cases A, B and C give, \begin{equation}\leftarrowbel{neweq4} |a_3-\leftarrowmbda a_2^2| \leq \frac{2\alpha^2+1}{3}-\leftarrowmbda\alpha^2, ~\mbox{ if }~ \leftarrowmbda\in \left(\frac{2(\alpha -3)}{3(\alpha -1)}, \frac{2(\alpha-1)}{3\alpha} \right ). \end{equation} \noindent {\bf \underline{Case D}:} Let $\leftarrowmbda \in \left[\frac{2(\alpha -1)}{3\alpha},\frac{2}{3} \right)$ and we may factorize $j(\leftarrowmbda)$ as $$j(\leftarrowmbda)=9\alpha^2(\leftarrowmbda-\leftarrowmbda _1)(\leftarrowmbda-\leftarrowmbda _2), $$ where \begin{equation}\leftarrowbel{neweq8} \leftarrowmbda_1=\frac{4\alpha^2-1-\sqrt{8\alpha^2+1}}{6\alpha^2}~\mbox{ and }~\leftarrowmbda_2=\frac{4\alpha^2-1+\sqrt{8\alpha^2+1}}{6\alpha^2}. \end{equation} We observe that $\leftarrowmbda_2>\leftarrowmbda_1$. For $\leftarrowmbda \in \left[\frac{2(\alpha -1)}{3\alpha},\leftarrowmbda_1\right)$, the function $h$ has its maximum value at $x=-1$ and the function $g$ has its maximum value at $$ r_m=\frac{-B}{-2C+\frac{\alpha -1}{3}}\in (0,1]. $$ Hence, the maximum of the Fekete-Szeg\"o functional is $$ g(r_m)=\frac{\alpha(10-9\leftarrowmbda)-(3\leftarrowmbda-2)}{9(2-\leftarrowmbda )+3\alpha(3\leftarrowmbda-2)}. $$ For $\leftarrowmbda\in \left[\leftarrowmbda_1,\frac{2}{3}\right)$, the number $$ r_0=\frac{B}{2C\left(1+\sqrt{1-\frac{B^2}{4AC}}\right)}\in (0,1] $$ is the unique solution of $x(r)=-1$ in the interval $(0,1]$. It is easily seen that $r_m<r_0$ for $\leftarrowmbda < \frac{2}{3}$. Further, $$ k(r)=\sqrt{h(x(r))}+\frac{\alpha -1}{6}(1-r^2)=(A-Cr^2)\sqrt{1-\frac{B^2}{4AC}}+\frac{\alpha -1}{6}(1-r^2) $$ is monotonic decreasing for $r\geq r_0$. Hence, the maximum value of $|a_3-\leftarrowmbda a_2^2|$ is $g(r_m)$ in this part of the interval in question, too. The extremal function maps ${\mathbb D}$ onto a wedge shaped region with an opening angle at infinity less than $\pi\alpha$ and one finite vertex as in Example 3.12 in \cite{BPW-08}. \noindent {\bf \underline{Case E}:} For $\leftarrowmbda=\frac{2}{3}$, we have $B=0$ and $C=-\frac{\alpha -1}{6}$. Hence the maximum is attained for ${\overline{\operatorname{co}}}s\theta =0$ and any $r\in(0,1]$. In all these cases, we get $$ |a_3-\leftarrowmbda a_2^2|=\frac{\alpha}{3} $$ as the sharp upper bound. The extremal functions map ${\mathbb D}$ onto a region with an opening angle at infinity equal to $\pi\alpha$ and two finite vertices as in Example 3.13 in \cite{BPW-08}. In conclusion, Cases D and E give, \begin{equation}\leftarrowbel{neweq5} |a_3-\leftarrowmbda a_2^2| \leq \frac{\alpha(10-9\leftarrowmbda)-(3\leftarrowmbda-2)}{9(2-\leftarrowmbda )+3\alpha(3\leftarrowmbda-2)}, ~\mbox{ if }~ \leftarrowmbda\in \left(\frac{2(\alpha-1)}{3\alpha}, \frac{2}{3} \right ]. \end{equation} \noindent {\bf \underline{Case F}:} Let $\leftarrowmbda\in (\frac{2}{3},\leftarrowmbda_2]$. Since $B>0$, the function $x(r)$ is monotonic decreasing now. The number $$ r_1=\frac{B}{-2C\left(1+\sqrt{1-\frac{B^2}{4AC}}\right)}\in (0,1] $$ is the unique solution of the equation $x(r)=1$ lying in (0,1]. For $r< r_1$, we have $h(x)\leq h(1)$. We consider the function $$ l(r)=A+Br+Cr^2+\frac{\alpha-1}{6}(1-r^2) $$ and determine the maximum of this function to be attained at $$ r_n=\frac{B}{-2C+\frac{\alpha-1}{3}}. $$ It is easily proved that $r_n>r_1$. Since $k(r)$ is monotonic increasing, we get the maximum value of the Fekete-Szeg\"o functional in this case as $$k(1)=(A-C)\sqrt{1-\frac{B^2}{4AC}}=\alpha(1-\leftarrowmbda)\sqrt{\frac{12(1-\leftarrowmbda)}{(4-3\leftarrowmbda)^2-\alpha^2(3\leftarrowmbda-2)^2}}, $$ which is attained for $c_0=e^{i\theta_0}$, where $$ {\overline{\operatorname{co}}}s\theta_0 = \frac{-B(A+C)}{4AC}. $$ In this case, the extremal function $f$ is defined by the solution of the following complex differential equation $$ f'(z)=\frac{(1-ze^{i\theta_0})^{\alpha-1}}{(1-z)^{\alpha+1}}. $$ In conclusion, in this case, we have, \begin{equation}\leftarrowbel{neweq6} |a_3-\leftarrowmbda a_2^2| \leq\alpha(1-\leftarrowmbda)\sqrt{\frac{12(1-\leftarrowmbda)}{(4-3\leftarrowmbda)^2-\alpha^2(3\leftarrowmbda-2)^2}}, ~\mbox{ if }~ \leftarrowmbda\in (2/3,\leftarrowmbda_2]. \end{equation} \noindent {\bf \underline{Case G}:} Let $\leftarrowmbda \in \left(\leftarrowmbda_2, \frac{2(\alpha +2)}{3(\alpha +1)}\right)$. Since $x(1)<-1$ for these $\leftarrowmbda$, the number $$ r_2= \frac{B}{-2C\left(1-\sqrt{1-\frac{B^2}{4AC}}\right)} $$ satisfies $x(r_2)=-1$ and $r_2\in (0,1)$. For $r\leq r_2$, we can make similar considerations as in the preceding case, i.e. for $r\leq r_1$ the function $l(r)$ takes the maximum value, and for $r\in (r_1,r_2]$, the function $k(r)$ plays this role. For $r > r_2$, the point $x(r)$ does not lie in the interval $[-1,1]$. Hence, the maximum in question is attained for $x=-1$ or $x=1$. We see that $A+C <0$ and $-A-Cr^2>0$ for the values of $\leftarrowmbda$ that we are considering now, the maximum of (\ref{p9eq1a}) is attained for $x=-1$, i.e. for $c_0=-r$. Hence, for $r \in (r_2,1]$ the maximum function is $$ n(r)=-A+Br-Cr^2+\frac{\alpha -1}{6}(1-r^2). $$ Since $$ -C>\frac{\alpha -1}{6}~ \mbox{ and }~ B>0, $$ we get $n(r)\leq n(1)$ in the interval in question and hence $$|a_3-\leftarrowmbda a_2^2|\leq n(1)=-A+B-C=\leftarrowmbda \alpha^2-\frac{2\alpha^2 +1}{3} $$ whenever $\leftarrowmbda \in \left(\leftarrowmbda_2, \frac{2(\alpha +2)}{3(\alpha +1)}\right)$. Equations (\ref{neweq1}), (\ref{neweq2}), (\ref{neweq4}), (\ref{neweq5}), (\ref{neweq6}) and Case G give \noindent {\bf Theorem. } {\it For $\alpha\in (1,2]$, let $f\in Co(\alpha)$ have the expansion $(\ref{p9eq1})$. Then, we have $$\left|a_3-\leftarrowmbda {a_2}^2\right|\leq \left \{ \begin{equation}gin{examp}in{array}{rl} \displaystyle \frac{2\alpha^2+1}{3}-\leftarrowmbda\alpha^2 & \mbox{ for }\displaystyle \leftarrowmbda \in \left(-\infty,\frac{2(\alpha-1)}{3\alpha} \right ] \\ \displaystyle \frac{\alpha(10-9\leftarrowmbda)-(3\leftarrowmbda-2)}{9(2-\leftarrowmbda)+3\alpha(3\leftarrowmbda-2)} & \mbox{ for }\displaystyle \frac{2(\alpha-1)}{3\alpha}\leq \leftarrowmbda \leq \frac{2}{3}\\ \displaystyle \alpha(1-\leftarrowmbda)\sqrt{\frac{12(1-\leftarrowmbda)}{(4-3\leftarrowmbda)^2-\alpha^2(3\leftarrowmbda-2)^2}} & \mbox{ for }\displaystyle \frac{2}{3}\leq \leftarrowmbda \leq \leftarrowmbda_2\\ \displaystyle \leftarrowmbda\alpha^2-\frac{2\alpha^2+1}{3} & \mbox{ for }\displaystyle \leftarrowmbda\in \left [\leftarrowmbda_2 ,\infty\right) , \end{array} \right . $$ where $\leftarrowmbda_2$ is given by {\rm (\ref{neweq8})}. To emphasize the fact that the bound is a continuous function of $\leftarrowmbda$ for any $\alpha$ we mention two different expressions for the same bound for some values of $\leftarrowmbda$. The inequalities are sharp. } The Fekete-Szeg\"o inequalities for functions in the class $Co(\alpha)$ for complex values of $\leftarrowmbda$ remain an open problem. {\bf Acknowledgement:} The authors thank the referee for careful reading of the paper. \begin{equation}gin{examp}in{thebibliography}{99} \bibitem{Avk-Wir-06} {\sc F.G. Avkhadiev, Ch. Pommerenke and K.-J. Wirths}: Sharp inequalities for the coefficient of concave schlicht functions, \textit{Comment. Math. Helv.} {\bf 81} (2006), 801--807. \bibitem{Avk-Wir-05} {\sc F.G. Avkhadiev and K.-J. Wirths}: Concave schlicht functions with bounded opening angle at infinity, \textit{ Lobachevskii J. Math.} {\bf 17}(2005), 3--10. \bibitem{BPW-08} {\sc B. Bhowmik, S. Ponnusamy and K.-J. Wirths}: Unbounded convex polygons, Blaschke products and concave schlicht functions, \textit{Indian J. Math.} {\bf 50} (2008), 339--349. \bibitem{BPW-09} {\sc B. Bhowmik, S. Ponnusamy and K.-J. Wirths}: Characterization and the pre-Schwarzian norm estimate for concave univalent functions, \textit{Monatshefte f\"ur Mathematik}, online first DOI 10.1007/s00605-009-0146-7 (2009). \bibitem{CKS-07} {\sc J. H. Choi, Y. C. Kim and T. Sugawa}: A general approach to the Fekete-Szeg\"o problem, \textit{J. Math. Soc. Japan} {\bf 59}(2007), no. 3, 707--727. \bibitem{Pom-Cruz} {\sc L. Cruz and Ch. Pommerenke}: On concave univalent functions, \textit{Complex Var. Elliptic Equ.} {\bf 52}(2007), 153--159. \bibitem{FeSz-33} {\sc M. Fekete and G. Szeg\"o}: Eine Bemerkung \"uber ungerade schlichte Funktionen, \textit{J. London Math. Soc.} {\bf 8}(1933), 85--89. \bibitem{Koepf-87} {\sc W. Koepf}: On the Fekete-Szeg\"o problem for close-to-convex functions, \textit{Proc. Amer. Math. Soc.} {\bf 101}(1987), 89--95. \bibitem{Koepf-87a} {\sc W. Koepf}: On the Fekete-Szeg\"o problem for close-to-convex functions II, \textit{Arch. Math. } {\bf 49}(1987), 420--433. \bibitem{London-93} {\sc R.R. London}: Fekete-Szeg\"o inequalities for close-to convex functions, \textit{Proc. Amer. Math. Soc.} {\bf 117}(1993), 947--950. \bibitem{Ma-Minda-92} {\sc W. Ma and D. Minda}: A unified treatment of some special classes of univalent functions, \textit{Proceedings of the Conference on Complex Analysis} (Z. Li, F. Ren, L. Lang, and S. Zhang, eds.), 1992, 157--169; International Press Inc., Cambridge, MA, 1994. \bibitem{Pfluger-85} {\sc A. Pfluger}: The Fekete-Szeg\"o inequality by a variational method, \textit{ Ann. Acad. Sci. Fenn. Ser. A I Math.} {\bf 10}(1985), 447--454. \bibitem{Pfluger-86} {\sc A. Pfluger}: The Fekete-Szeg\"o inequality for complex parameters, \textit{Complex Variables Theory Appl.} {\bf 7}(1986), no. 1-3, 149--160. \end{thebibliography} \end{document}
\begin{document} \title{On the moments of roots of Laguerre-polynomials and the Marchenko-Pastur law} \author{\widehat{\ell}arge{M. Kornyik} \\ E\"otv\"os Lor\'and University \\ Department of Probability Theory and Statistics \\ P\'azm\'any P\'eter s\'et\'any 1/C., H-1117, Budapest, Hungary \\ \textit{email}:[email protected] \and \widehat{\ell}arge{Gy. Michaletzky} \\ E\"otv\"os Lor\'and University \\ Department of Probability Theory and Statistics \\ P\'azm\'any P\'eter s\'et\'any 1/C., H-1117, Budapest, Hungary\\ \textit{email}:[email protected]} \maketitle \begin{abstract} In this paper we compute the leading terms in the sum of the $k^{th}$ power of the roots of $L_{p}^{(\alpha)}$, the Laguerre-polynomial of degree $p$ with parameter $\alpha$. The connection between the Laguerre-polynomials and the Marchenko-Pastur distribution is expressed by the fact, among others, that the limiting distribution of the empirical distribution of the normalized roots of the Laguerre-polynomials is given by the Marchenko-Pastur distribution. We give a direct proof of this statement based on the recursion satisfied by the Laguerre-polynomials. At the same time, our main result gives that the leading term in $p$ and $(\alpha+p)$ of the sum of the $k^{th}$ power of the roots of $L_{p}^{(\alpha)}$ coincides with the $k^{th}$ moment of the Marchenko-Pastur law. We also mention the fact that the expectation of the characteristic polynomial of a $XX^T$ type random covariance matrix, where $X$ is a $p\times n$ random matrix with iid elements, is $\mathrm{e}ll^{(n-p)}_p$, i.e. the monic version of the $p^{th}$ Laguerre polynomial with parameter $n-p$. \mathrm{e}nd{abstract} \section{Introduction} In theory of orthogonal polynomials the limit of the empirical distribution of their roots is a much studied matter. In this paper we are going to study the limit distribution of the roots of Laguerre polynomials $L_p^{(\alpha_p)}$, where \begin{align}\label{Lag_coeff} L_p^{(\alpha)}(x)=\sum_{j=0}^p{(-1)^j\binom{\alpha+p}{p-j}\frac{x^j}{j!}} \ \ \ \ \ \alpha\in\mathbb{R} \mathrm{e}nd{align} assuming that $\alpha_p/p\rightarrow c>-1$. For $\alpha>-1$ these polynomials are known to be orthogonal with respect to the measure $x^{\alpha}e^{-x}\textbf{1}_{ [0,\infty]} dx$, from which one can conclude that all the roots are distinct and lie in $\mathbb{R}_+$. For $\alpha\in [-p+1,-1]\cap\mathbb{Z}$ one has that $$ L_p^{(\alpha)}(x)=x^{-\alpha}L_{p+\alpha}^{(-\alpha)}(x) $$ and hence one can make the conclusion that for such $\alpha$ values the polynomial $L_p^{(\alpha)}$ has $p+\alpha$ disticnt positive roots and $0$ is also a root with multiplicity $-\alpha$. In section 1 we show that the normalized generating function of the moments of the normalized roots of $L_p^{(\alpha_p)}$ satisfies the same quadratic fixed point equation in the limit as the generating function of the moments of the Marchenko-Pastur distribution. In section 2 we will explicitly show that the coefficient of the highest order term (viewed as a polynomial in $p$) of the $k^{th}$ power of the roots of $L_p^{(\alpha)}$ coincides with the $k^{th}$ moment of the corresponding Marchenko-Pastur distribution. \section{Convergence of the empirical distribution} Let us consider the roots of the Laguerre-polynomial $L_p^{(\alpha)}$ denoted by $\xi_{p,1}^{(\alpha)}, \dots , \xi_{p,p}^{(\alpha)}$. Let $M^{(\alpha)}_p(k)$ denote the sum of their $k$-th power. That is $M^{(\alpha)}_p(k) = \sum_{i=1}^p (\xi_{p,i}^{( \alpha)})^k$. Finally, $\mathcal M_p^{(\alpha)}$ denotes the power series determined by these coefficients, i.e. \begin{equation} \label{mgf} \mathcal{M}^{(\alpha)}_p(z) = p+\sum_{k=1}^\infty M^{(\alpha)}_p(k) z^k\,. \mathrm{e}nd{equation} Note that in case $\alpha$ is a negative integer in the interval $[-p+1,-1]$ the zero is also a root of $L_p^{(\alpha)}$, which explains why the case $k=0$, i.e. the zeroth moment, had to be dealt with seperately in (\ref{mgf}). It is known that \begin{align*} \mathcal{M}^{(\alpha)}_p(z)=\frac1z \frac{(\mathrm{e}ll_{p}^{(\alpha)})'(1/z)}{\mathrm{e}ll_p^{(\alpha)}(1/z)}=-z\frac{(\widehat{\mathrm{e}ll}_{p}^{(\alpha)})'(z)}{\widehat{\mathrm{e}ll}_{p}^{(\alpha)}(z)}+p \mathrm{e}nd{align*} where $ \mathrm{e}ll^{(\alpha)}_p(x)=(-1)^p p! L_p^{(\alpha)}(x)$ is the monic version of $L_p^{(\alpha)}$, and for any polynomial of degree $p$ we denote by $\widehat{\ell}(z)=z^p \mathrm{e}ll(1/z)$ the so-called conjugate polynomial. \begin{theorem}\label{MP_weaklimit} Let us assume that $\alpha = \alpha_p $ and $\frac{\alpha_p}{p}\rightarrow c\in (-1,\infty)$, as $p\rightarrow \infty$. Then the empirical distribution determined by the normalized roots (where \ $p^{-1}\ $ is the normalization factor) of the Laguerre-polynomial $L_p^{(\alpha_p)}$ converges weakly to the Marchenko-Pastur distribution, given as \begin{align}\label{MP_distr}\mu_c(A)=\begin{cases} -c\delta_0(A)+ \nu_c(A)\,, & \mbox{ if } -1< c<0 \mbox{ and } \alpha_p\in\{-p+1,\ldots,-1\} \ \hbox{ for all } p\,,\\ \nu_c(A)\,, & \mbox{ if } c\geq 0, \mathrm{e}nd{cases} \mathrm{e}nd{align} for $ A \in \mathcal{B}(\mathbb{R})$, where $\delta_0$ denotes the Dirac-delta measure at $0$, while the measure $\nu_c$ is absolutely \\ continuous with density $$ d\nu_c(x)= \frac{\sqrt{(x_+-x)(x-x_-)}}{2\pi x}\mathbf{1}_{[x_-,x_+]}(x)dx $$ where $x_\pm=[ \sqrt{c+1}\pm1]^2.$ \if 0\textbf{Megj.: Szerintem a fenti helyett az k\'ene, hogy} $$ d\nu_c(x)=\frac{\sqrt{(x-x_-')(x_+'-x)}}{2\pi x}\textbf{1}_{[x_-',x_+']}(x)dx $$ \textbf{ahol} $x_\pm'=(\sqrt{c+1}\pm1)^2$. \fi \mathrm{e}nd{theorem} \begin{remark} A more general version of this theorem -- allowing for $c < -1$ -- was proved by Mart\'inez-Gonz\'alez et al. in \cite{Mar01} using complex analysis and differential equations, but the proof presented here is based on elementary calculations using only the recursion equations satisfied by the Laguerre-polynomials. \mathrm{e}nd{remark} \begin{remark} Laguerre polynomials show a deep connection with random matrix theory in the following ways: \begin{itemize} \item[1.] Forrester and Gamburd proved in \cite{for06} that the expectation of the characteristic polynomial of the random matrix $XX^T$ is given by $\mathrm{e}ll_p^{(n-p)}(z)$, i.e. $E \det (x\cdot I -XX^T)=\mathrm{e}ll_p^{(n-p)}(x)$, where $X$ is a $p\times n$ random matrix with independent, identically distributed entries with zero expectation and variance $1$. \item[2.] If $X$ is a $p\times n$ random matrix in the same sense as above, then the weak limit of the empirical measure of the eigenvalues is a much studied question of random matrix theory, although it is usually normalized by $n$, which in our case means a normalization by $\alpha+p$. A well-known theory -- proved by Marchenko and Pastur in \cite{mar67} -- states that the weak limit of the empirical measure of the eigenvalues of $\frac1n XX^T$ is given by $\tilde{\mu}_a$ as $\frac p n \rightarrow a > 0$, where $\tilde{\mu}_a$ is defined below. In the case of the present paper $\mu_c$ is the weak limit of the empirical measure of the eigenvalues of $\frac1p XX^T$. \item[3.] The matrix theoretical Marchenko-Pastur distribution with parameter $a>0$ is given by $$ \tilde{\mu}_a(A)=\begin{cases} \left(1-\frac1a\right) \delta_0 + \tilde{\nu}_a(A) , & \mbox{ if } a\in(0,1) \\ \tilde{\nu}_a(A) & \mbox{ if } a\geq 1\mathrm{e}nd{cases} \ \ \ A\in\mathcal{B}(\mathbb{R}). $$ with $\tilde{\nu}_a$ being absolutely continuous with density $$ d\tilde{\nu}_a(x)= \frac{\sqrt{(x-\tilde{x}_-)(\tilde{x}_+-x)}}{2\pi ax}\mathbf{1}_{[\tilde{x}_-,\tilde{x}_+]}(x)dx $$ where $\tilde{x}_\pm=(1\pm\sqrt{a})^2$. As mentioned before this version of the Marchenko Pastur arises when the zeros of $\mathrm{e}ll_p^{(\alpha_p)}(z)$ are normalized by a factor of $(p+\alpha_p)^{-1}$. The connection between $d\mu_c$ and $d\tilde{\mu}_a$ is the following: \begin{eqnarray} a&=&\frac1{c+1}\,, \\ \mu_c&=& \tilde{\mu}_a\circ g^{-1} \,. \mathrm{e}nd{eqnarray} where $g(x)=(c+1)x$ for $x\in\mathbb{R}$. On the other hand it is known that the moments of $\tilde{\mu}_a$ are given by \begin{equation} \int x^k d\tilde{\mu}_a(x)=\sum_{j=1}^k \frac 1 k\binom{k}{j}\binom{k}{j-1} a^{j-1}, \mathrm{e}nd{equation} hence the moments of $\mu_c$ can be calculated as $$ \int x^{k}d\mu_c=\sum_{j=1}^{k}\frac1k\binom{k}{j}\binom{k}{j-1}(c+1)^{k-j+1} .$$ \mathrm{e}nd{itemize} \mathrm{e}nd{remark} \begin{proof}[of the Theorem] Let $\mathrm{e}ll^{(\alpha)}_p(z):=(-1)^pp!L_p^{(\alpha)}(z)$ denote the monic version of $L_p^{(\alpha)}(z)$ then \begin{align}\label{monic_lag_coeff} \mathrm{e}ll_p^{(\alpha)}(z)=\sum_{j=0}^{p}{(-1)^j \frac{(p)_j(\alpha+p)_j}{j!}z^{p-j}} \mathrm{e}nd{align} with $(\beta)_k=\beta(\beta-1)\cdots(\beta-k+1)$ for $k>0$. Note that if $\beta$ is a positive integer and $k>\beta$ then $(\beta)_k=0$. Thus it follows from (\ref{monic_lag_coeff}) that for $\alpha \in \{ -p+1, -p+2, \dots , -2, -1\}$ \begin{equation}\label{Lag_neg_par} \mathrm{e}ll_p^{(\alpha)}(z)=z^{-\alpha} \mathrm{e}ll_{p+\alpha}^{(-\alpha)}(z)\,. \mathrm{e}nd{equation} This means that in this case zero is a root of $\mathrm{e}ll_p(x)^{(\alpha)}(z)$ with multiplicity $-\alpha$ and the other $p+\alpha$ roots given by the Laguerre-polynomial $L_{p+\alpha}^{(-\alpha)}(z)$ of degree $p+\alpha$. \begin{itemize} \item[1.] Let us first consider the case when $\alpha_p \geq 0$ for all $p$, which also implies $\lim \alpha_p / p = c \geq 0$. The recursion of the Laguerre-polynomials for arbitrary parameter $\alpha>-1$ is \begin{equation}\label{Lag_recursion} a_pL_{p+1}^{(\alpha)}(z)=(b_p-z)L_p^{(\alpha)}(z)-c_pL_{p-1}^{(\alpha)}(z), \mathrm{e}nd{equation} where $a_p=p+1$, $b_p=2p+\alpha+1$ and $c_p=p+\alpha$ and also \begin{eqnarray}\label{Lag_recursion_2} pL_p^{(\alpha)}(z)& = & (p+\alpha)L_{p-1}^{(\alpha)}-z L_{p-1}^{(\alpha+1)}(z) \, \mathrm{e}nd{eqnarray} \if 0\begin{equation} L_p^{(\alpha)}(x)=\sum_{j=0}^p{(-1)^j\binom{p+\alpha}{p-j}\frac{1}{j!}x^j}. \mathrm{e}nd{equation} \fi These polynomials are known to be orthogonal with respect to the measure $z^\alpha \mathrm{e}^{-z}\mathbf{1}_{[0,\infty)}(z)dz$, which implies that all the roots of $L_p^{(\alpha)}(x)$ lie in the interval $[0,\infty)$ and hence the sum of the $k^{th}$ power of its roots is positive. Furthermore \begin{align} \label{Lag_diff} \frac{d}{dz}L_p^{(\alpha_p)}(z)= -L_{p-1}^{(\alpha_p+1)}(z) \,. \mathrm{e}nd{align} implying, after proper algebraic transformations, that \begin{equation} \frac{d}{dz}\widehat{\ell}^{(\alpha)}_p(z) = -(\alpha+p)p \widehat{\ell}^{(\alpha)}_{p-1}(z) \mathrm{e}nd{equation} where $\widehat{\ell}_p^{(\alpha)}(z)=z^p\mathrm{e}ll_p^{(\alpha)}(z^{-1})$. Applying this for $\alpha = \alpha_p$ we obtain that \begin{equation}\label{f_M_kapcs} \frac1p\mathcal{M}^{(\alpha_p)}_p\left(\frac{z}{p}\right) = \frac {\alpha_p+p} p \frac{z\widehat{\ell}_{p-1}^{(\alpha_p)}(z/p)}{\widehat{\ell}_p^{(\alpha_p)}(z/p)}+1 \mathrm{e}nd{equation} Also from recursion (\ref{Lag_recursion}) we get \begin{align} \label{conjlag-rec} \widehat{\ell}_{p+1}^{(\alpha)}(z)=[1-(\alpha+2p+1)z]\widehat{\ell}_{p}^{(\alpha)}(z)-z^2(p+\alpha)p\widehat{\ell}_{p-1}^{(\alpha)}(z). \mathrm{e}nd{align} Since the largest zero of $L_p^{(\alpha)}$ is no greater then $4p + 2\alpha +3$ (see \cite{sze39}) we obtain that $\widehat{\ell}_p^{(\alpha)}(z)> 0$, if $0\leq z < \frac{1}{4p+2\alpha + 3}$. In this case one has that $$ \frac{\widehat{\ell}_{p-1}^{(\alpha)}(z)}{\widehat{\ell}_{p}^{(\alpha)}(z)}\leq \frac{1-(\alpha+2p+1)z}{z^2p(\alpha+p)}\leq \frac {1}{(p+\alpha)pz^2}. $$ Using the computations above we get that \begin{align} \label{diff_bd} \frac d {dz} \frac{\widehat{\ell}_{p-1}^{(\alpha)}(z)}{\widehat{\ell}_p^{(\alpha)}(z)}&= \frac{(\widehat{\ell}_{p-1}^{(\alpha)}(z))'\widehat{\ell}_p^{(\alpha)}(z)-(\widehat{\ell}_p^{(\alpha)}(z))'\widehat{\ell}_{p-1}^{(\alpha)}(z)}{(\widehat{\ell}_p^{(\alpha)}(z))^2}= \nonumber\\ &=(\alpha+p)p\left[ \left(\frac{\widehat{\ell}_{p-1}^{(\alpha)}(z)}{\widehat{\ell}_p^{(\alpha)}(z)}\right)^2-\frac{\widehat{\ell}_{p-2}^{(\alpha)}(z)}{\widehat{\ell}_p^{(\alpha)}(z)} \right] +(\alpha+2p-1)\frac{\widehat{\ell}_{p-2}^{(\alpha)}(z)}{\widehat{\ell}_p^{(\alpha)}(z)}. \mathrm{e}nd{align} Since in the present case $\mathcal{M}^{(\alpha)}_p(z)$ is a convex, monotonically increasing function for $z\geq 0$, and furthermore $\mathcal{M}^{(\alpha)}_p(0)=1$, one has \begin{align}\label{int-bd} z\frac{d}{dz}\frac{\widehat{\ell}_{p-1}^{(\alpha)}(z)}{\widehat{\ell}_p^{(\alpha)}(z)}\leq \int_0^{2z}{\frac{d}{dt}\frac{\widehat{\ell}_{p-1}^{(\alpha)}(t)}{\widehat{\ell}_p^{(\alpha)}(t)}dt}=\frac{\widehat{\ell}_{p-1}^{(\alpha)}(2z)}{\widehat{\ell}_p^{(\alpha)}(2z)}-1\leq \frac{1}{4(\alpha+p)pz^2} \mathrm{e}nd{align} and so according to (\ref{diff_bd}) and to (\ref{int-bd}) we have \begin{equation}\label{bdd} \left|\left( \frac{\widehat{\ell}_{p-1}^{(\alpha)}(z/p)}{\widehat{\ell}_p^{(\alpha)}(z/p)}\right)^2-\frac{\widehat{\ell}_{p-2}^{(\alpha)}(z/p)}{\widehat{\ell}_p^{(\alpha)}(z/p)}\right|\leq \frac{p^3}{4(\alpha+p)^2p^2z^3}+\frac{\alpha+2p-1}{(\alpha+p)^2p^2}\frac{p^4}{(p-1)(\alpha+p-1)z^4}. \mathrm{e}nd{equation} Let $f^{(\alpha)}_p(z):=\frac{\widehat{\ell}_{p-1}^{(\alpha)}(z/p)}{\widehat{\ell}_p^{(\alpha)}(z/p)}$, for $p\geq 1$. According to (\ref{bdd}) we have \[ f^{(\alpha)}_p(z)f^{(\alpha)}_{p-1}\left(z\frac{p-1}p\right)-(f^{(\alpha)}_p)^2(z)\rightarrow 0 \] if $p\rightarrow \infty$ and $\alpha=\alpha_p \geq 0$, especially $f^{(\alpha_p)}_p(z)f^{(\alpha_p)}_{p-1}\left((p-1)z/p)\right)-(f^{(\alpha_p)}_p)^2(z)\rightarrow 0$ if $\frac {\alpha_p} {p} \rightarrow c $ as $p\rightarrow \infty$. Applying (\ref{conjlag-rec}) to $\widehat{\ell}_{p}^{(\alpha)}$ one has \begin{eqnarray}\label{f_rec} 1& = & \left(1-z\frac{\alpha+2p-1}{p}\right)f^{(\alpha)}_p(z) - \nonumber \\ & & -z^2\frac{(p-1)(\alpha+p-1)}{p^2}f^{(\alpha)}_p(z)f^{(\alpha)}_{p-1}\left(z\frac{p-1}{p}\right)\,, \mathrm{e}nd{eqnarray} hence we get that the accumulation points of $(f^{(\alpha_p)}_p(z))_{p\in\mathbb{N}}$ as $\alpha_p/p \rightarrow c$ satisfy the following equation in $\xi$ \begin{equation}\label{fix_eq} 1= \left[1-(c+2)z\right]\xi-(c+1)z^2 \xi^2. \mathrm{e}nd{equation} The solutions of this equation are $$ \xi_{\pm}=\frac{1-(c+2)z\pm \sqrt{[1-(c+2)z]^2-4(c+1)z^2}}{2(c+1)z^2}\,. $$ Let us introduce the notation $$ f_{c-}(z):=\frac{1-(c+2)z- \sqrt{[1-(c+2)z]^2-4(c+1)z^2}}{2(c+1)z^2}. $$ In order to find the appropriate root let us look at the map $\xi\rightarrow \mathrm{e}ta_c(\xi, z)$ for a fixed $z$ defined by $$ 1= \left[1-(c+2)z\right]\mathrm{e}ta_c(\xi, z)-(c+1)z^2 \xi\mathrm{e}ta_c(\xi, z) $$ and hence $$ \mathrm{e}ta_c(\xi, z)= \frac{1}{1-(c+2)z-(c+1)z^2\xi}. $$ Note that the fixed points of this mapping are the solutions of (\ref{fix_eq}). In parallel with this for any fixed $\alpha \geq 0$ and $p\geq 1$ consider the following equation in $\xi$: \begin{equation}\label{alpha_p_eq} 1 = \left[1-z\frac{\alpha+2p-1}{p}\right] \xi - z^2 \frac{(p-1)(\alpha+p-1)}{p^2}\xi^2\,. \mathrm{e}nd{equation} Denote by $\zeta_p^{(\alpha)}$ the largest nonnegative $z$ value, for which both roots of this second-order equation are non-negative, i.e. \[ \zeta_p^{(\alpha)} = \sup \left\{ z \, \mid \, z\frac{\alpha + 2p -1}{p} \leq 1,\quad 4z^2\frac{(p-1)(\alpha+p-1)}{p^2} \leq \left(1-z\frac{\alpha+2p-1}{p}\right)^2 \right\} \] Short calculation shows that $\zeta_p^{(\alpha)}= \left(a_p^{(\alpha)}+2\sqrt{b_p^{(\alpha)}}\right)^{-1}$, where $a_p^{(\alpha)}=\frac{\alpha+2p-1}{p}$ and $b_p^{(\alpha)}=\frac{(p-1)(\alpha+p-1)}{p^2}$. Now for $0 \leq z < \zeta_p^{(\alpha)}$ define the map $\xi \rightarrow \mathrm{e}ta_p^{(\alpha)}(\xi, z)$ as the solution to $$ 1=\left[1-z\frac{\alpha+2p-1}{p}\right]\mathrm{e}ta_p^{(\alpha)}(\xi, z)-z^2\frac{(p-1)(\alpha+p-1)}{p^2}\mathrm{e}ta^{(\alpha)}_p(\xi, z)\xi. $$ Thus $$ \mathrm{e}ta_p^{(\alpha)}(\xi, z)=\frac{1}{1-z\frac{\alpha+2p-1}{p}-z^2\frac{(p-1)(\alpha+p-1)}{p^2} \xi} $$ Observe that $\mathrm{e}ta_p^{(\alpha_p)}(\xi,z)\xrightarrow[p\rightarrow\infty]{} \mathrm{e}ta_c(\xi,z) \ \ \ \forall (\xi,z)\in\mathbb{R}^2$. \\ For the small positive values of $\xi$ the functions $\mathrm{e}ta_c(\xi, z)$ and $\mathrm{e}ta_p^{(\alpha)}(\xi, z)$ are increasing. Let us denote by $g_{p-}^{(\alpha)}(z)$ the smaller fixed point of the mapping $\mathrm{e}ta_p^{(\alpha)}(\xi,z)$ and observe that for $0\leq z < \zeta_p^{(\alpha)}$ the inequality $\mathrm{e}ta_{p}^{(\alpha)}(0, z) > 0$ holds true, thus for $0 \leq \xi < g_{p-}^{(\alpha)}(z)$ we have that \[ \xi < \mathrm{e}ta_p^{(\alpha)}(\xi, z) \leq g_{p-}^{(\alpha)}(z)\,. \] We are going to prove by induction on $p$ that for any fixed $\alpha \geq 0$ and $0\leq z < \zeta_p^{(\alpha)}$ the inequality \begin{equation}\label{ind_stat} f_p^{(\alpha)}(z) \leq g_{p-}^{(\alpha)}(z) \mathrm{e}nd{equation} holds true. It is easy to check that for $p=1$ we have that $\zeta_{1}^{(\alpha)} = \frac{1}{\alpha+1}$ and \[ g_{1-}^{(\alpha)}(z) = f_1^{(\alpha)} (z) = \frac{1}{1-z(\alpha+1)}\,. \] On the other hand straightforward calculation gives that if $0\leq z < \zeta_p^{(\alpha)}$ then $z\frac{p-1}{p} < \zeta_{p-1}^{(\alpha)}$ thus using the induction hypothesis for $p-1$ we obtain that \begin{equation}\label{ind_hyp} f_{p-1}^{(\alpha)}\left(z\frac{p-1}{p}\right) \leq g_{(p-1)-}^{(\alpha)}\left(z\frac{p-1}{p}\right)\,. \mathrm{e}nd{equation} The latter one is the smaller fixed point of the mapping \[ \mathrm{e}ta_{p-1}^{(\alpha)}(\ \cdot\ , z\frac{p-1}{p}):\xi \rightarrow \frac{1}{1-z\frac{p-1}{p}\frac{\alpha+2p-3}{p-1}-z^2\frac{(p-1)^2}{p^2}\frac{(p-2)(\alpha+p-2)}{(p-1)^2} \xi} \] On the other hand \begin{align} \mathrm{e}ta_{p-1}^{(\alpha)}(\xi,z\frac{p-1}{p}) &= \frac{1}{1-z\frac{p-1}{p}\frac{\alpha+2p-3}{p-1}-z^2\frac{(p-1)^2}{p^2}\frac{(p-2)(\alpha+p-2)}{(p-1)^2} \xi} \\ & = \frac{1}{1-z\frac{\alpha+2p-3}{p}-z^2\frac{(p-2)(\alpha+p-2)}{p^2} \xi} \\ &\leq \frac{1}{1-z\frac{\alpha+2p-1}{p}-z^2\frac{(p-1)(\alpha+p-1)}{p^2} \xi} = \mathrm{e}ta_p^{(\alpha)}(\xi, z)\,, \mathrm{e}nd{align} proving that \begin{equation}\label{major} g_{(p-1)-}^{(\alpha)} \left(z\frac{p-1}{p}\right) \leq g_{p-}^{(\alpha)}(z)\,. \mathrm{e}nd{equation} But equation (\ref{f_rec}) implies that for $\xi = f_{p-1}^{(\alpha)}\left(z\frac{p-1}{p}\right)$ \[ \mathrm{e}ta_p^{(\alpha)} (\xi, z) = f_p^{(\alpha)}(z)\,. \] Comparing (\ref{ind_hyp}) and (\ref{major}) we obtain that for $0 \leq z < \zeta_p^{(\alpha)}$ \[ f_p^{(\alpha)}(z) \leq g_{p-}^{(\alpha)}(z) \] proving the induction step. \if 0 Note that $$ z< \min \left\{a_p^{(\alpha})^{-1},\frac1{a_p^{(\alpha)}+2\sqrt{b_p^{(\alpha)}}} \right\}=\frac1{a_p^{(\alpha})+2\sqrt{b_p^{(\alpha)}}} \mathbb{R}ightarrow z<\zeta_p^{(\alpha)} $$ where $a_p^{(\alpha)}=\frac{\alpha+2p-1}{p}$ and $b_p^{(\alpha)}=\frac{(p-1)(\alpha+p-1)}{p^2}$.\fi Since $a_p^{(\alpha_p)}\rightarrow c+2$, $b_p^{(\alpha_p)}\rightarrow c+1$ and so $\zeta_p^{(\alpha_p)} \rightarrow \frac{1}{(\sqrt{c+1}+1)^2}$, if $\alpha_p/p\rightarrow c$ as $p \rightarrow \infty$ the following implication holds for large enough $p$: $$ \left[0, \frac{1}{2(\sqrt{c+1}+1)^2}\right) \subset \left[0, \zeta_p^{(\alpha_p)}\right) $$ Hence for $0\leq z<\frac{1}{2(\sqrt{c+1}+1)^2}$ we have that $ g_{p-}^{(\alpha_p)}(z)\xrightarrow[p\rightarrow \infty]{} f_{c-}(z) $ as $p\rightarrow \infty$, thus inequality (\ref{ind_stat}) implies that \[ \lim_pf_p^{(\alpha_p)}(z)=f_{c-}(z). \] Now let $\mathfrak{M}_c(z)=\lim_p\frac1p \mathcal{M}_p^{(\alpha_p)}\left(\frac z p \right). $ According to (\ref{f_M_kapcs}) we have that \begin{eqnarray} \mathfrak{M}_c(z)=(c+1)zf_{c-}(z)+1 \nonumber \mathrm{e}nd{eqnarray} from which one has \begin{align} \mathfrak{M}_c(z)= \frac{1-cz-\sqrt{[1-(c+2)z]^2-4(c+1)z^2}}{2z} = \frac{1-cz-\sqrt{(1-cz)^2-4z}}{2z}. \mathrm{e}nd{align} \item[2.] Consider now the case when $\alpha_p\in\{-p+1,\ldots,-1\}$ for all $p$ in such a way that $\lim \alpha_p / p = c$ exists and $c > -1$. Obviously this implies $c \leq 0$. In this case the recursion (\ref{Lag_recursion}) is still valid, but orthogonality (with respect to $z^{\alpha_p}e^{-z}\mathbf{1}_{[0,\infty)}(z)dz$) cannot be assured. According to (\ref{monic_lag_coeff}) one has that $$ \mathrm{e}ll_p^{(\alpha_p)}(z)=z^{-\alpha}\mathrm{e}ll_{p+\alpha_p}^{(-\alpha_p)}(z) $$ and so $$\widehat{\ell}_p^{(\alpha_p)}(z)=z^p \mathrm{e}ll_p^{(\alpha_p)}(1/z)= z^p z^{\alpha_p} \mathrm{e}ll_{p+\alpha_p}^{(-\alpha_p)}(1/z)=z^{p+\alpha_p}\mathrm{e}ll_{p+\alpha_p}^{(-\alpha_p)}(1/z)=\widehat{\ell}_{p+\alpha_p}^{(-\alpha_p)}(z) $$ hence $$ \mathcal{M}_p^{(\alpha_p)}(z)=-z\frac{\frac{d}{dz}\widehat{\ell}_{p+\alpha_p}^{(-\alpha_p)}(z)} {\widehat{\ell}_{p+ \alpha_p}^{(-\alpha_p)}(z)}+p=\mathcal{M}_{p+\alpha_p}^{(-\alpha_p)}(z)-\alpha_p $$ therefore we immediately get that $\mathcal{M}_p^{(\alpha_p)}(z)$ is monotonically increasing convex function if $z\geq 0$ and $$ \frac 1p \mathcal{M}_p^{(\alpha_p)}\left( \frac z p \right)=\frac {p+\alpha_p} {p}\frac 1 {p+\alpha_p} \mathcal{M}_{p+\alpha_p}^{(-\alpha_p)}\left(\frac {p+\alpha_p}{p} \frac z {p+\alpha_p} \right)-\frac{\alpha_p} p. $$ Due to $\alpha_p/p\rightarrow c $ we have $-\frac{\alpha_p}{p+\alpha_p} \rightarrow -\frac{c}{c+1}$ and $$ \frac 1 {p+\alpha_p} \mathcal{M}_{p+\alpha_p}^{(-\alpha_p)}\left(\frac z {p+\alpha_p} \right) \rightarrow \mathfrak{M}_{-\frac{c}{c+1}}(z) .$$ Since $f_{p+\alpha_p}^{(-\alpha_p)} $ is a sequence with uniformly bounded derivatives ( according to (\ref{int-bd}) ) one has that for $0\leq z < \left[2(\sqrt{c+1}+1)^2\right]^{-1}$ \begin{eqnarray} \mathfrak{M}_c(z)=(c+1)\mathfrak{M}_{\frac{-c}{c+1}}((c+1)z)-c \nonumber \mathrm{e}nd{eqnarray} implying that $$ \mathfrak{M}_{c}(z)=\frac{1-cz-\sqrt{(1-cz)^2-4z}}{2z}.$$ \mathrm{e}nd{itemize} \if 0 According to \cite{Mar01} we have that the Stieltjes transform of $\mu_c$ is given by $$ S_{c}(z)=\int \frac1{z-x} d\mu_c(z)=\frac{z-c-\sqrt{(z-c)^2-4z}}{2z} $$ and due to the fact that $$ \mathfrak{M}_c(z)=\frac1z S_c\left( \frac 1 z \right) $$ we get that $\mathfrak{M}_c(z)$ is the moment generating function of $\mu_c$. \fi \if 0 It is easy to show that this is the generating function of the Marchenko-Pastur distribution, since the Stieltjes transform $\mathfrak{S}_c$ of it is given by $$ \mathfrak{S}_c(z)=\frac{-z+c+\sqrt{z^2-2z(c+2)-c^2}}{2z} $$ and according to the connection between the Stieltjes transform and the moment generating function we have $$ -\frac1z \mathfrak{S}_c \left( \frac1z\right)=\mathfrak{M}_c(z). $$ \fi \noindent Since $\mathfrak{M}_c(z)$ coincides with the generating function of the moments of $\mu_c$, i.e. $$ \mathfrak{M}_c(z)=\sum_{k\geq 0 } \int x^k d\mu_c(x) \cdot z^k\,, \ \ for \ \ z\in[0,\frac{1}{2}(\sqrt{c+1}+1)^{-2}) $$ and $\mu_c$ is fully determined by its moments we have that the weak limit of the empirical measure of the normalized zeros of $\mathrm{e}ll_p^{(\alpha_p)}(z)$ is $\mu_c$. Theorem \ref{MP_weaklimit} is hereby proved. \mathrm{e}nd{proof} \begin{corollary} Theorem \ref{MP_weaklimit} also implies the convergence of the moments of the empirical distribution of the normalized roots of $\mathrm{e}ll_p^{(\alpha_p)}$. In other words if $m_p^{(\alpha_p)}$ denotes the empirical distribution of the normalized roots of $\mathrm{e}ll_p^{(\alpha_p)}$, then $$ \int x^k dm_p^{(\alpha_p)}(x) \xrightarrow[p\rightarrow\infty]{} \int x^k d\mu_c(x)=\sum_{j=1}^k \frac1k \binom{k}{j}\binom{k}{j-1} (c+1)^{k-j+1} \ \ \ \forall k\geq0 $$ when $\frac{\alpha_p}{p}\rightarrow c$. \mathrm{e}nd{corollary} \section{The sum of the $k^{th}$ power of the roots of $L_{p}^{(\alpha)}$} The following theorem shows that the connection between the root distribution of the Laguerre-polynomials and the Marchenko-Pastur distribution is not only an asymptotic connection but in a "dominating way" it holds for large enough $p$ values, as well. \begin{theorem}\label{highest} Let $p\in\mathbb{N}$, $M^{(\alpha)}_p(k):=\sum_{j=1}^p{\xi^k_{p,j}}$, where $0\leq \xi_{p,1}^{(\alpha)}<\xi_{2,p}^{(\alpha)}<\ldots<\xi_{p,p}^{(\alpha)}<\infty$ denotes the roots of $L_p^{(\alpha)}$. Then for $\alpha\in\mathbb{R}$, $\alpha+p>k-1$ one has $$ M^{(\alpha)}_p(k)= \sum_{j=1}^{ k} \frac1k\binom k j \binom {k}{j-1} p^{j} (\alpha+p)^{k-j+1}+f(\alpha+p,p) $$ where $f$ is a polynomial in two variables with $\deg f\leq k$.\\ In case $\alpha+p\leq k-1$ one has that the coefficient of the dominating term in $M^{(\alpha)}_p(k)$ is less than or equal to the quantity above. \mathrm{e}nd{theorem} \begin{proof} Let us consider the Newton identities $\sum_{j=0}^{k-1} M^{(\alpha)}_n(k-j)a_{p-j}=-ka_{n-k},$ where $a_{p-j}$ denote the $j^{th}$ coefficient of $\mathrm{e}ll_p^{(\alpha)}(x)$. It is known that $a_j=(-1)^{p+j} p!\binom{\alpha+p}{p-j} \frac{1}{j!}$ (see e.g. \cite{sze39}), hence $$ a_{p-j}=(-1)^{j}\frac{(\alpha+p)_j(p)_j}{j!}. $$ Writing the Newton identities in matrix form we obtain that \begin{align}\label{Newtonmatrix} \begin{bmatrix} 1 & 0 & 0 & \ldots & 0 \\ a_{p-1} & 1 & 0 & \ldots &0 \\ a_{p-2} & a_{p-1} & 1 &\ldots &0 \\ & & & \ddots & \\ a_{p-(k-1)} & a_{p-(k-2)} & a_{p-(k-3)} & \ldots & 1 \mathrm{e}nd{bmatrix}\begin{bmatrix} M^{(\alpha)}_p(1) \\ M^{(\alpha)}_p(2) \\ M^{(\alpha)}_p(3) \\ \vdots \\ M^{(\alpha)}_p(k) \mathrm{e}nd{bmatrix}= \begin{bmatrix} -a_{p-1} \\ -2a_{p-2} \\ -3a_{p-3} \\ \vdots \\ -ka_{p-k} \mathrm{e}nd{bmatrix}\,. \mathrm{e}nd{align} Thus \begin{align*} M^{(\alpha)}_p(k)=\det \begin{bmatrix} 1 & 0 & 0 &\ldots & -a_{p-1} \\ a_{p-1} & 1 & 0 & \ldots & -2a_{p-2} \\ a_{p-2} & a_{p-1} & 1 & \ldots & -3a_{p-3} \\ & & & \ddots & \\ a_{p-(k-1)} & a_{p-(k-2)} & a_{p-(k-3)} & \ldots & -ka_{p-k} \mathrm{e}nd{bmatrix} \mathrm{e}nd{align*} according to Cramer's rule and the fact that the determinant of the matrix in (\ref{Newtonmatrix}) is $1$. In general, let us introduce the following notation: $$ A(k,l):=\det \begin{bmatrix} 1 & 0 & \ldots & (\alpha+p)_lp \\ -(\alpha+p)p & 1 & \ldots & -2\frac{(\alpha+p)_{l+1}(p)_{2}}{2} \\ & & \ddots & \\ \frac{(-1)^{k-1}(\alpha+p)_{(k-1)}(p)_{k-1}}{(k-1)!} &\frac{ (-1)^{k-2}(\alpha+p)_{k-2}(p)_{k-2}}{(k-2 )!} & \ldots & -k \frac{(-1)^k(\alpha+p)_{l+k-1} (p)_{k}}{k!} \mathrm{e}nd{bmatrix}, $$ for $k\geq 2, l \geq 1$ and $A(1,l)=(p+\alpha)_lp$ for $l\geq 1$. With this notation $A(k,1)=M^{(\alpha)}_p(k)$ and it can be proved by induction that for $k \geq 2$ \begin{align} \label{A_recursion}A(k,l)=\sum_{r=1}^{l}{p (\alpha+p-r)_{l-r} A(k-1,r)} +A(k-1,l+1), \mathrm{e}nd{align} In fact, for $k\geq 3$ let us subtract $p(\alpha+p)_l$ times the first column of the matrix in the definition of $A(k,l)$ from the last of the same. The $j^{th}$ element of the last column obtained this way can be written as \begin{align*} &-(-1)^{j}\frac{(\alpha+p)_{l+j-1}(p)_{j}}{(j-1)!}-(-1)^{j-1}\frac{(\alpha+p)_l p(\alpha+p)_{j-1}(p)_{j-1}}{(j-1)!}=\\ &\hspace{5mm}=-(-1)^{j-1}\frac{(\alpha+p)_{j-1}(p)_{j-1}}{(j-1)!}[(\alpha+p-j+1)_l(p-j+1)-(\alpha+p)_lp]=\\ &\hspace{5mm}=(-1)^{j-1}\frac{(\alpha+p)_{j-1}(p)_{j-1}}{(j-1)!}(j-1)\left(\sum_{r=1}^l {(\alpha+p-j+1)_r(\alpha+p-r)_{l-r}}p+1 \right) \mathrm{e}nd{align*} due to $$ \prod_{i=1}^m{c_i}-\prod_{i=1}^m{d_i}=\sum_{h=1}^{m}{\prod_{1\leq e<h}c_e(c_{h}-d_{h})\prod_{m\geq e>h}d_e}, $$ with $m=l+1$, $c_i=(\alpha+p-j-i+2)$, $d_i=(\alpha+p-i+1) $ for $1\leq i \leq l$ and $c_{l+1}=(p-j+1)$, $d_{l+1}=p$. This proves the recursion in (\ref{A_recursion}) for $k\geq 3$. On the other hand \begin{align} & A(2,l)=\det \begin{bmatrix} 1 & (\alpha+p)_lp \\ -(\alpha+p)p & -(\alpha+p)_{l+1}(p)_2 \mathrm{e}nd{bmatrix}= (\alpha+p)_lp(\alpha+p-l+lp) \nonumber \\ & = \sum_{r=1}^l p (\alpha+p -r)_{l-r} (\alpha +p)_r p + (\alpha + p)_{l+1} p \nonumber \mathrm{e}nd{align} proving (\ref{A_recursion}) for $k=2$. Note that in case $k\leq p+\alpha$ we have $ (\alpha+p)_l>0 $ for $0\leq l \leq k$, and $(p+\alpha)_l=(p+\alpha)^l+O((p+\alpha)^{l-1})$ hence the multiplier in the sum in (\ref{A_recursion}) does not change the (positive) coefficient - nor its sign - of the highest order terms of $A(k-1,r)$. We are going to prove that viewing $A(k,l)$ as a polynomial of the variables $p$ and $p+\alpha$ one has $\deg A(k,l)=k+l$. The proof goes by induction on $k$. For $k=1$ and $l$ arbitrary this is an immediate consequence of its definition. In fact -- assuming the induction hypotesis for $k-1$ and $l$ arbitrary -- we have that \begin{align*}\deg p(p+\alpha)_{l-r} A(k-1,r)&= k-1+r+l-r+1=k+l \quad \hbox{for} \ 1\leq r\leq l \leq k \leq p+\alpha \\ \deg A(k-1,l+1)=k+l, \mathrm{e}nd{align*} and using that there is no cancellation in the highest degree terms we obtain that $\deg A(k,l)=k+l$, hence we immediately get that $\deg_pM_p^{(\alpha)}(k)=k+1$. \\ Computing the leading coefficient in $p$ and $\alpha+p$ of $A(k,1)$ leads to the following graph theoretical question: Let $G=((\mathbb{Z}_{\geq0})^2,\overrightarrow{E})$ be the following graph: there is a directed arrow from $(a_1,b_1)$ pointing to $(a_2,b_2)$ if and only if $a_2=a_1+1$ and $b_2\geq b_1-1$. We shall also use the word edge instead of arrow in case we are not interested in its direction. We will call an edge $(a,b_1)\rightarrow (a+1,b_2)$ an upward edge if $ b_2\geq b_1$, if $b_2=b_1-1$ we will refer to it as a downward edge. The height of an edge $((a,b_1),(a+1,b_2))$ is going to be defined as $b_2-b_1$, total height of a set of edges is the sum of their heights. Let us call a path ending in $(k,l)$ for $k\geq 1,l\geq 1$ legal if it starts in the origin and after that it stays strictly above the line $y=0$. Since $\deg_p A(k,l)=k+l$, it can be written as $A(k,l)=\sum_{j=0}^{k+l}{a^{(k,l)}_j p^j(\alpha+p)^{k+l-j}}+\text{L.O.T.}$, for some $a_j^{(k, l)}$, $j=0, \dots , k+l$, where $\text{L.O.T.}$ means lower order terms. But the recursion (\ref{A_recursion}) implies that the degree of $p$ in $A(k, l)$ cannot be larger then $k$ and it is at least $1$, for any $l\geq 1$, thus $A(k,l)=\sum_{j=1}^{k}{a^{(k,l)}_j p^j(\alpha+p)^{k+l-j}}+\text{L.O.T.}$ Using the recursion (\ref{A_recursion}) again we obtain that \[ a_j^{(k, l)} = \sum_{h=1}^l a_{j-1}^{(k-1, h)} + a_j^{(k-1, l+1)}\,. \] Our claim is that $a_j^{(k,l)}$ is equal to the number of legal paths $b_j^{(k,l)}$ ending in $(k,l)$ with exactly $j$ upward edges. For $k=1$, $ l \geq 1$ we have that $A(1,l)=p(\alpha+p)_l$ thus the highest order term is $p(\alpha+p)^l$ and so $a_1 ^{(1,l)}= 1$, while $a_j^{(1, l)} = 0$ for $j \neq 1$ obviously coinciding with the values $b_j^{(1,l)}$, $j\geq 0, l\geq 1$ since in this case the path consists of one single upward edge. For the induction step $k-1\mapsto k$ consider the following: Each of the legal paths ending in $(k,l)$ has to go through exactly one of the points $(k-1,r)$ $1\leq r \leq l+1$. A path with exactly $j$ upward edges going through the points $(k-1,r)$ for $1\leq r \leq l$ should have $j-1$ upward edges before these points, while a path going through $(k-1,l+1)$ has $j$ upward edges before this point. Therefore the number of legal paths ending in $(k,l)$ and having $j$ upward edges is the sum of the number of legal paths ending in $(k-1,r)$ with $1\leq r\leq l$ with $j-1$ upward edges plus the number of legal paths ending in $(k-1,l+1)$ with $j$ upward edges. In other words: $$ b^{(k,l)}_j=\sum_{r=1}^l{b_{j-1}^{(k-1,r)}} + b_{j}^{(k-1,l+1)}. $$ Thus the number of legal paths satisfies the same recursion as the coefficients in the sequence $A(k, l)$. Since for $k=1$ they are equal the induction argument gives that $a_j^{(k, l)} = b_j^{(k, l)}$ for $j=1, \dots k$, $k\geq 1$, $l\geq 1$. Now let us turn our attention to computing the coefficients of the highest order term of $M_p^{(\alpha)}(k)=A(k,1)=\sum_{j=1}^{k}a_j^{(k,1)}p^j(\alpha+p)^{k-j+1}+\text{L.O.T} $. As we proved before the coefficient $a_j^{(k,1)}$ is given by the number of legal paths ending in $(k,1)$ with $j$ upward edges. In this case there are $k-j$ downward edges with total height $-(k-j)$ hence the total height of the upward edges is $k-j+1$. Since the length of the legal path from the origin to $(k,1)$ is $k$ there are $\binom{k}{j}$ possibilities to choose the positions of the $j$ upward edges. On the other hand the total height of the upward edges is $k-j+1$, and there are $\binom{k}{j-1}$ ways writing it as a sum of $j$ non-negative numbers when the sequence of the summands matters. Choosing these numbers as the heights of the upward edges we obtain a path from the origin to $(k, 1)$ which is not necessarily legal, since they can cross the line $y=0$. For such a given path let $(x,y)$ denote the node of the path with the largest first coordinate such that its second coordinate is not greater than the second coordinate of any other node of the path (i.e. the latest "global minimum" of the path). By placing this node with the tail of the path in the origin this new path is a legal path ending in $(k-x,1+y)$. Taking the first part of the original path (connecting the origin with $(x,y)$ ) and gluing it to $(k-x,1+y)$ we will get a legal path ending in $(k,1)$. We will say that two paths are equivalent if the cut-and-glue process described above results in the same legal path. The equivalence class of a path consists of its periodic horizontal translations, so in each equivalence class there are $k$ paths. Since the cut-and-glue process gives the same legal path for each equivalence class, thus the number of legal paths ending in $(k,1)$ having $j$ upward edges is given by $ \frac 1 k \binom{k}{j}\binom{k}{j-1} $, hence $$M_p^{(\alpha)}(k)=\sum_{j=1}^{k} \frac{1}{k} \binom{k}{j}\binom{k}{j-1}p^{j}(\alpha+p)^{k-j+1}+\text{L.O.T} $$ and so Theorem \ref{highest} is proved. \mathrm{e}nd{proof} \begin{remark}If $\alpha/p=c$ with $c\in(-1,\infty)$ and $k < \alpha +p + 1$ then \begin{equation} \label{MP_mom} \sum_{l=1}^p{(\xi_{p,l}^{(\alpha)})^k}=\sum_{j=1}^{k} \frac1k\binom{k}{j}\binom{k}{j-1}(c+1)^{k-j+1} p^{k+1} +f(\alpha+p,p) \mathrm{e}nd{equation} hence we immediately get that $$ \int x^k dm_p^{(\alpha_p)}(x)\xrightarrow[p\rightarrow\infty]{}\int x^k d\mu_c(x) $$ if $\frac{\alpha_p}{p} \rightarrow c$ for all $k\geq 0$. We also emphasize that even in the case when $\alpha<0$ is not an integer thus $L_p^{(\alpha)}(z)$ has complex roots with nonzero imaginary part, the limit relation above holds true. But since now the measure is not concentrated on the real line this property is not enough for the identification of the the limit measure. \mathrm{e}nd{remark} \mathrm{e}nd{document}
\begin{document} \begin{abstract} We show that a pseudoeffective ${\mathbb R}$-divisor has numerical dimension 0 if it is numerically trivial on a subvariety with ample normal bundle. This implies that the cycle class of a curve with ample normal bundle is big, which gives an affirmative answer to a conjecture of Peternell. We also give other positivity properties of such subvarieties. {\mathbf e}nd{abstract} \title{On subvarieties with ample normal bundle} \thispagestyle{empty} A well-established principle in algebraic geometry is that geometric properties of an algebraic variety is reflected in the subvarieties which are in various senses `positively embedded' in it. The primary example is the hyperplane section in a projective embedding of the variety, which gives rise to the notion of an ample divisor. However, in higher codimension it is less clear what it should mean in general for a subvariety to be `ample'. In his book \cite{Har70}, Hartshorne considers several approaches, including the condition that the normal bundle of the subvariety should be an ample vector bundle. Even for divisors this condition is weaker than ampleness, in the sense that it is a condition that concerns a vector bundle on the subvariety itself, rather than a global condition on the ambient variety. Still the condition guarantees certain good properties of the subvariety, e.g., that its cycle class is nef. For a more global definition of ampleness, see \cite{Ott12}. Our first result is the following theorem, which gives a positive answer to a question of Peternell \cite{Pet11}. The theorem essentially says that a pseudoeffective divisor which is numerically trivial on a subvariety with ample normal bundle is very far from being big. \begin{theorem}\label{interiortheorem} Let $X$ be a smooth projective variety over an algebraically closed field of characteristic 0 and let $Y$ be a smooth subvariety of dimension $>0$ with ample normal bundle. If $D$ is a pseudoeffective ${\mathbb R}$-divisor such that $D|_Y{\mathbf e}quiv 0$, then its numerical dimension $\nu(D)$ is 0. In particular, if $D$ is nef, then $D{\mathbf e}quiv 0$. {\mathbf e}nd{theorem} See Definition |ef{numericaldimension} for the precise definition of numerical dimension of a divisor. In particular, this implies that the Iitaka dimension $\kappa(D)$ is non-positive. Combining this result with the duality theorem of Boucksom--Demailly--Paun--Peternell \cite{BDPP}, we prove the following result about curves with ample normal bundle. The first part of the theorem was also conjectured by Peternell \cite{OP04,Pet08,Pet11}. \begin{theorem}\label{interiortheorem2} Let $X$ be a smooth projective variety over ${\mathbb C}$, let $C$ be a smooth curve with ample normal bundle. Then the cycle class of $C$ is big, i.e., it lies in the interior of the cone of curves, $\overline{{ N}E}(X)$. If in addition $C$ is strictly nef (i.e., $C\operatorname{cd}ot D>0$ for any irreducible divisor $D$), then the cycle class of $C$ lies in the interior of the cone of movable curves, $\overline{{\mathscr M}E}(X)$. In particular, $C$ is numerically equivalent to a ${\mathbb Q}$-linear combination of strongly movable curves. {\mathbf e}nd{theorem} Interestingly, Voisin \cite{Voi08} showed that the corresponding result is false for subvarieties of higher dimensions. More precisely, she gives an examples of smooth projective varieties in any dimension $\ge 4$, containing a codimension 2 subvariety with ample normal bundle, but whose class is in the boundary of the pseudoeffective cone. In these examples, the subvariety deforms in a family covering the ambient variety and the normal bundle is even globally generated. The strictly nef assumption in the second part of the theorem is necessary. Indeed, take any smooth projective variety with a curve with ample normal bundle and blow up a point outside it. Then on the blow-up, the preimage of the curve, $C$, has ample normal bundle, but the exceptional divisor satisfies $E\operatorname{cd}ot C=0$, so $C$ lies in the boundary of the cone of movable curves. On the other hand, the following theorem says that there can be at most finitely many prime divisors disjoint from $C$. \begin{theorem}\label{finitelymany}Let $X$ be a smooth projective variety over ${\mathbb C}$ and let $Y\subseteqset X$ be a smooth subvariety of dimension at least one with ample normal bundle. Then $Y$ intersects all but finitely many prime divisors on $X$. In fact, the number of such divisors is less than the Picard number of $X$. {\mathbf e}nd{theorem} All of the above results remain valid if $Y$ is assumed to be locally complete intersection instead of smooth. Thanks to Fr\'ed\'eric Campana and Burt Totaro for comments and useful discussions. \section{Curves with positivity properties} Let $X$ be a smooth projective variety. An ${\mathbb R}$-divisor is a finite sum $D=\sum \lambda_i D_i$ where $\lambda_i^{-1}n {\mathbb R}$ and each $D_i$ is an irreducible divisor in $X$. Write $N^1(X)=\operatorname{Pic}(X)\otimes {\mathbb R}/{\mathbf e}quiv$ for the N\'eron-Severi group of $X$, i.e., the ${\mathbb R}$-vector space of divisors modulo numerical equivalence. In $N^1(X)$ we define the effective cone ${\mathbf e}ff(X)$ as the cone spanned by effective divisors and the nef cone $\operatorname{Nef}(X)$ the cone of nef divisors, i.e., ${\mathbb R}$-divisors such that $D\operatorname{cd}ot C\ge 0$ for every curve $C$ on $X$. An ${\mathbb R}$-divisor is {\mathbf e}mph{pseudoeffective} if its class lies in the closure $\overline{{\mathbf e}ff}(X)$ of the effective cone. We let $N_1(X)$ denote the vector space of 1-cycles modulo numerical equivalence, and ${ N}E(X)$ the cone spanned by curves on $X$. We call a cycle $\alpha^{-1}n N_1(X)$ {\mathbf e}mph{big} if it lies in the interior of ${ N}E(X)$. Inside ${ N}E(X)$, there is the subcone ${\mathscr M}E(X)$ spanned by curves that are {\mathbf e}mph{movable}. Here a curve $C$ is called movable if it is a member of a family of curves that dominates $X$. By definition, the cones $\operatorname{Nef}(X)$ and $\overline{{ N}E}(X)$ are dual with respect to the intersection pairing. A fundamental result of Boucksom--Demailly--Paun--Peternell\cite{BDPP}, states that for a smooth variety over ${\mathbb C}$, also the cones $\overline{{\mathscr M}E}(X)$ and $\overline{{\mathbf e}ff}(X)$ are dual, i.e., a divisor $D$ is pseudoeffective if and only if $D\operatorname{cd}ot C\ge 0$ for all movable curves $C$. Moreover, they show that $\overline{{\mathscr M}E}(X)$ coincides with the closure of the cone spanned by curves which are strongly movable, that is, 1-cycles of the form $f_*(H_1\cap \operatorname{cd}ots \cap H_{n-1})$, where the $H_i$ are very ample divisors on $X'$ and $f:X'\to X$ is birational. \subseteqsection{Subvarieties with ample normal bundle} Recall that a vector bundle ${\mathscr E}$ on a variety is {\mathbf e}mph{ample} if the line bundle ${\mathscr O}(1)$ is ample on ${\mathbb P}({\mathscr E})$. Here and throughout the paper we use the Grothendieck notation for projectivized bundles, i.e., ${\mathbb P}({\mathscr E})$ is the variety of hyperplanes in ${\mathscr E}$. If ${\mathscr E}$ is a vector bundle on a curve, ${\mathscr E}$ is ample if and only if every quotient line bundle of ${\mathscr E}$ has positive degree \cite{Harcurves}. We will mainly consider the case when ${\mathscr E}$ is the normal bundle $N_Y=(\mathcal I/\mathcal I^2)^*$, of a subvariety $Y\subseteqset X$, which is a vector bundle of rank equal to the codimension when $Y$ is smooth (or more generally locally complete intersection.) Subvarieties with ample normal bundle share many interesting geometric properties with ample divisors (see e.g., \cite{Har70} or \cite{Laz04}). For example, for every coherent sheaf ${\mathscr F}$, the cohomology groups $H^i(X-Y,{\mathscr F})$ are finite-dimensional vector spaces for $i<\dim Y$ \cite{Har70}. Also, if $\dim Y\ge 1$, a result of Napier and Ramachandran \cite{NR98} says $^{-1}m(\pi_1(Y)\to \pi_1(X))$ has finite index in $\pi_1(X)$. A property which will be important for our purposes is the following: \begin{proposition}\cite[Corollary 8.4.3]{Laz04} Let $Y\subseteqset X$ be a subvariety with ample normal bundle, then $Y$ is nef, i.e., $Y\operatorname{cd}ot Z\ge 0$ for any subvariety with $\dim Y+\dim Z=\dim X$. {\mathbf e}nd{proposition} In his book \cite{Har70}, Hartshorne presented two of his influential conjectures about such subvarieties: \begin{conja}[Hartshorne]\label{Hartshorne2} Let $Y\subseteqset X$ be a smooth subvariety of $X$ such that the normal bundle $N_{Y}$ is an ample vector bundle. Is it true that some multiple of $Y$ deforms (as a cycle) in a family covering $X$? {\mathbf e}nd{conja} \begin{conjb}[Hartshorne]\label{Hartshorne1} Let $Y,Z$ be smooth subvarieties of $X$ with such that the normal bundles $N_Y,N_Z$ are ample vector bundles. If $\dim Y+\dim Z\ge \dim X$, then $Y\cap Z\neq {\mathbf e}mptyset$. {\mathbf e}nd{conjb} It is known by results of \cite{FL81} that Conjecture A implies Conjecture B. Unfortunately, Fulton and Lazarsfeld also showed that Conjecture A is false in general when $\dim Y\ge 2$. Their counterexample is based on constructing a certain ample rank two vector bundle on $Y={\mathbb P}^2$, so that no multiple of the zero-section moves in the total space of the bundle. Given this, one asks whether it is still true that a {\mathbf e}mph{curve} $C$ with ample normal bundle has a multiple that moves in $X$. This question is open in general, but is known when $X$ is a surface or when $g(C)\le 1$. Further evidence for this is given by the result of Campana and Flenner \cite{CF90}, which states that some multiple of the zero-section moves in the normal bundle. In particular, this implies that the example of Fulton and Lazarsfeld cannot be modified to the dimension 1 case. Theorem |ef{interiortheorem2} can be viewed as a weak form of Hartshorne's Conjecture A. Indeed, since the cycle class of $C\subseteqset X$ is big, a multiple of it can be written as $h+e$ where $h$ is the class of a complete intersection of $n-1$ sufficiently ample divisors and $e$ is an effective 1-cycle. In particular, for any finite set of points in $X$, there is an effective cycle numerically equivalent to $mC$ which passes through them. Note also that if $C$ is in addition assumed to be strictly nef, Theorem |ef{interiortheorem2} implies that some integral multiple of $C$ is even equivalent to a sum of strongly movable curves in $X$. \subseteqsection{Examples} (i) If $D$ is a divisor with ample normal bundle, then $D$ is nef and big \cite{Voi08}. In particular, when $X$ is a surface, Theorems |ef{interiortheorem} and |ef{finitelymany} follow directly from the Hodge index theorem. (ii) If $Y$ is a complete intersection, or more generally, a transverse intersection of subvarieties with ample normal bundle, then $N_Y$ is ample. (iii) Any smooth subvariety of projective space has ample normal bundle, since it is the quotient of the tangent bundle $T_{{\mathbb P}^n}$ which is ample \cite{Laz04}. (iv) If the normal bundle $N_C$ is sufficiently ample in the sense that $h^0(N_C)\ge \dim X-1$ and $h^1(N_C)=0$, then the curve $C$ itself moves in a family covering $X$. In this case it is known that $C$ is big by \cite[Theorem 4.11]{Pet11}. In particular, this holds when $C$ is rational or elliptic. In fact, a variety is rationally connected if and only if it contains a rational curve with ample normal bundle. In \cite{OP04}, Oguiso and Peternell give an analogous geometric characterization when $C$ is an elliptic curve in a threefold. (v) If $C$ has genus $g\ge 2$, we can consider the embedding of $C$ in its Jacobian $\operatorname{Jac}(C)$. Here the normal bundle of $C$ is ample \cite{Laz04}. In this example, it is classically known that the cycle-class of $C$ is in the interior of the cone of curves of $\operatorname{Jac}(C)$. In fact, Poincare's formula gives that $C{\mathbf e}quiv \Gammarac1{(g-1)!}{\mathscr T}heta^{g-1},$ where ${\mathscr T}heta$ is the theta divisor of $\operatorname{Jac}(C)$, which is ample. (vi) If $X$ is a homogenous manifold, then the ampleness of the normal bundle of a subvariety can often be interpreted geometrically. For example, $Y$ is non-degenerate. If $X$ is an abelian variety and $C$ is a curve, then $N_C$ is ample if and only if a translate of $C$ generates $X$ as a group \cite{Laz04}. If $X$ is a quadric, then by \cite[Theorem 1]{Ballico}, the normal bundle $N_{C}$ is ample if and only if $C$ is not a line. In general, a line in a homogeneous manifold has ample bundle if and only if $X={\mathbb P}^n$. (vii) Bigness of the cycle class of $C$ has however no implications for the positivity of the normal bundle. Indeed, take any 3-fold with Picard number one containing a $(-1,-1)$ curve: then $N_C={\mathscr O}(-1)\oplus {\mathscr O}(-1)$, and $C$ is big because $\overline{{ N}E}(X)$ is 1-dimensional. \section{Proof of Theorem |ef{interiortheorem} and |ef{interiortheorem2}} \subseteqsection{Divisorial Zariski decomposition} We briefly recall the divisorial Zariski decomposition introduced by Boucksom \cite{Bou04} and Nakayama \cite{Nak04}. Let $X$ be a smooth projective variety and let $D$ be a pseudoeffective ${\mathbb R}$-divisor. We define the {\mathbf e}mph{diminished base locus} of $D$ by $$\mathbf B_-(D)=\bigcup_{A} \mathbf B_{\mathbb R}(D+A),$$ where $A$ runs over all ample divisors and $\mathbf B_{\mathbb R}(D)=\bigcap\{\operatorname{Supp}(D') | D'\ge 0\mbox{ and } D'\sim_{\mathbb R} D\}$. By \cite[Theorem V.1.3]{Nak04}, $\mathbf B_-(D)$ is a countable union of closed subsets. Let $H$ be an ample line bundle on $X$. For each prime divisor ${\mathscr G}amma$ on $X$ define the coefficient $$\sigma_{\mathscr G}amma(D)=\lim_{{\mathbf e}psilon\to 0^+}^{-1}nf\{\mbox{mult}_{\mathscr G}amma(D') | D'\sim_{\mathbb R} D+{\mathbf e}psilon H \mbox{ and }D'\ge 0\} $$It was shown by Nakayama \cite[III.1.5]{Nak04} that these numbers do not depend on the choice of $H$ and that there are only finitely many prime divisors ${\mathscr G}amma$ such that $\sigma_{{\mathscr G}amma}(D)>0$. Following \cite{Nak04} we then define $N_\sigma(D)=\sum_{\mathscr G}amma \sigma_{\mathscr G}amma(D){\mathscr G}amma$ and $P_\sigma(D)=D-N_\sigma(D)$, and call $D=N_\sigma(D)+P_\sigma(D)$ the {\mathbf e}mph{divisorial Zariski decomposition} of $D$. The main properties of this decomposition is captured by the following \begin{proposition}\cite[III.1.4, III.1.9, V.1.3]{Nak04} Let $D$ be a pseudoeffective ${\mathbb R}$-divisor. \begin{enumerate}[(i)] ^{-1}tem $N_\sigma(D)$ is effective and $\operatorname{Supp}(N_ \sigma(D))$ coincides with the divisorial part of $\mathbf B_-(D)$. ^{-1}tem $N_\sigma(D)=0$ when $D$ is nef. ^{-1}tem For all $m\ge 0$, $H^0(X,{\mathscr O}_X(\Gammaloor{mP_\sigma(D)}))\simeq H^0(X,{\mathscr O}_X(\Gammaloor{mD}))$ {\mathbf e}nd{enumerate} {\mathbf e}nd{proposition} \begin{definition}\label{numericaldimension} Let $D$ be a pseudoeffective ${\mathbb R}$-divisor. For an ample divisor $H$ define $\nu(D,H)$ as the maximal non-negative integer $k$ such that $$ \limsup_{m\to^{-1}nfty}\Gammarac{h^0(X,{\mathscr O}_X(\Gammaloor{mD}+H)}{m^k}>0 $$We define the {\mathbf e}mph{numerical dimension} $\nu(D)$ has the maximal value of $\nu(D,H)$ when $H$ varies over all ample divisors on $X$. (Although the paper \cite{BDPP} uses a different definition of $\nu(D)$, it is equivalent to ours by the main theorem in \cite{Leh11}.) {\mathbf e}nd{definition} \begin{lemma}\label{slowgrowth}\cite[Proposition V.2.7]{Nak04} Let $X$ be a smooth projective variety and let $D$ be a pseudoeffective ${\mathbb R}$-divisor. Then $\nu(D)=0$ if and only if $D{\mathbf e}quiv N_\sigma(D)$. {\mathbf e}nd{lemma} Since this result is vital in the proof of Theorem 1, we give a proof in the case $D$ is a nef divisor. In fact, this special case is enough to prove the first part of Theorem 2. We will prove the following statement: If $H$ is a smooth very ample divisor, then $D{\mathbf e}quiv 0$ if and only if for all sufficiently large $k$, $\nu(D,kH)=0$. We'll use the observation that $D{\mathbf e}quiv 0$ if and only if $D|_H{\mathbf e}quiv 0$ (which comes from the fact that $D{\mathbf e}quiv 0$ if and only if $D\operatorname{cd}ot H^{n-1}=0$). By Fujiita's vanishing theorem, there is a $k_0$ such that $H^1(X,{\mathscr O}_X(mD+(k-1)H))=0$ for all $m\ge0$ and $k\ge k_0$. Consider now the restriction map $$H^0(X,{\mathscr O}_X(mD+kH))\to H^0(H,{\mathscr O}_H(mD+kH)).$$By construction, this map is surjective for every $m\ge 0,k\ge k_0$, so in particular also $\nu(D|_H,kH|_H)=0$ for all $k\ge k_0$. By induction on the dimension, $D|_H{\mathbf e}quiv 0$ and hence also $D{\mathbf e}quiv 0$. When $D$ is only pseudoeffective, essentially the same idea can be used, but a different vanishing theorem is required (cf. \cite{Nak04}). \begin{lemma}\label{h0vanishing} Let ${\mathscr E}$ be an ample vector bundle on a curve $C$ and let $d$ be an integer. Then there is an integer $m_0=m_0(d)>0$ so that $$ H^0(C,\operatorname{Sym}^m {\mathscr E}^* \otimes {\mathscr O}_C(L))=0 $$for all $m\ge m_0$, and all line bundles $L$ of degree $d$. {\mathbf e}nd{lemma} \begin{proof} Let ${\mathbb P}({\mathscr E})$ denote the variety of hyperplanes in ${\mathscr E}$ with projection $\pi:Y\to C$. By the ampleness of ${\mathscr E}$, the line bundle ${\mathscr O}_{{\mathbb P}({\mathscr E})}(1)$ is ample on ${\mathbb P}({\mathscr E})$. Hence by Serre duality and the Leray spectral sequence, \begin{eqnarray*} H^0(C,\operatorname{Sym}^m {\mathscr E}^* \otimes {\mathscr O}_C(L))&=&H^1(C,{\mathscr O}_C(K_C-L)\otimes \operatorname{Sym}^m({\mathscr E}))\\ &=&H^1({{\mathbb P}({\mathscr E})},\pi^*(K_C-L)\otimes {\mathscr O}(m))=0{\mathbf e}nd{eqnarray*} The last cohomology group vanishes for all $m\ge m_0$, where $m_0$ depends only on $d$ (e.g., by Fujita's vanishing theorem \cite{Laz04}). {\mathbf e}nd{proof} Note that proof uses the characteristic $0$ assumption in the isomorphism $(\operatorname{Sym}^m {\mathscr E}^*)^*=\operatorname{Sym} {\mathscr E}$. \begin{lemma}\label{h0bounded} Let $C\subseteqset X$ be a smooth curve with ample normal bundle and let $D$ be a pseudoeffective ${\mathbb R}$-divisor on $X$ such that $D\operatorname{cd}ot C=0$. Then for any ample divisor $H$, the function $h(t)= h^0(X,{\mathscr O}_X(\Gammaloor{tD}+H))$ is bounded. {\mathbf e}nd{lemma} \begin{proof}Let $I$ be the ideal sheaf of $C$ in $X$. Since $C$ is locally complete intersection, we have $I^{k}/I^{k+1}=\operatorname{Sym}^k N_C^*$. By taking global sections of the exact sequences $$ 0\to I^{k+1} (\Gammaloor{tD}+H)\to I^{k}(\Gammaloor{tD}+H) \to \operatorname{Sym}^k N_C^*\otimes {\mathscr O}_C(\Gammaloor{tD}+H)\to 0 $$for $k=0,1,\ldots$, we deduce that \begin{equation*}\label{h0sum} h^0(X,{\mathscr O}_X(\Gammaloor{tD}+H))\le \sum_{k=0}^^{-1}nfty h^0(C, \operatorname{Sym}^k N_C^* \otimes {\mathscr O}_C(\Gammaloor{tD}+H)) {\mathbf e}nd{equation*}Note that we have $\Gammaloor{tD}\operatorname{cd}ot C\le tD\operatorname{cd}ot C=0$. So in particular, $\deg {\mathscr O}_C(\Gammaloor{tD}+H)$ is bounded above by some constant $K>0$ depending only on $D$ and $H$. By Lemma |ef{h0vanishing}, there is a $k_0\ge 1$ so that the cohomology groups on the right-hand side of {\mathbf e}qref{h0sum} vanish for $k\ge k_0$ and all $t$. In particular, \begin{equation*}\label{h0sum2} h^0(X,{\mathscr O}_X(\Gammaloor{tD}+H))\le \sum_{k=0}^{k_0} h^0(C, \operatorname{Sym}^k N_C^* \otimes {\mathscr O}_C(\Gammaloor{tD}+H)) {\mathbf e}nd{equation*}Moreover, as each of the terms on the right-and side are bounded above by a constant independent of $t$, we see that the same holds for $h^0(X,{\mathscr O}_X(\Gammaloor{tD}+H))$. {\mathbf e}nd{proof} With these results, we are now in position to prove Theorem 1 and 2. \begin{proof}[Proof of Theorem |ef{interiortheorem}]It suffices to prove the theorem when $Y$ is a curve. Indeed, if $\dim Y\ge 2$ and $A_1,\dots,A_{\dim Y-1}$ are sufficiently general, smooth, ample divisors, then $C=Y\cap A_1\cap \operatorname{cd}ots \cap A_{\dim Y-1}$ will be a smooth curve and $D|_Y{\mathbf e}quiv 0$ if and only if $D\operatorname{cd}ot C=0$. Moreover, the normal bundle of $C$ is ample, because it is an extension of the ample vector bundles $N_{Y|X}|_C$ and $N_{C|Y}$ (see e.g., \cite[III.\S 1]{Har70}). So suppose that $Y=C$ is a curve with ample normal bundle and let $D$ be a pseudoeffective ${\mathbb R}$-divisor such that $D\operatorname{cd}ot C=0$ and let $H$ be any ample divisor. By Lemma |ef{h0bounded}, we have that the dimensions of the cohomology groups $H^0(X,{\mathscr O}_X(\Gammaloor{tD}+H))$ are bounded above, so in particular $\nu(D)=0$. Moreover, if $D$ is nef, from the definition, $N_\sigma(D)=0$, so in particular $D{\mathbf e}quiv 0$.{\mathbf e}nd{proof} \begin{proof}[Proof of Theorem |ef{interiortheorem2}] Let $C$ be a curve with ample normal bundle. By definition, the cone of curves $\overline{{ N}E}(X)\subseteqset N_1(X)$ is dual to $\operatorname{Nef}(X)\subseteqset N^1(X)$. Hence, to show that the class of $C$ is in the interior of the cone of curves it suffices to show that if $D$ is a nef ${\mathbb R}$-divisor such that $D\operatorname{cd}ot C=0$, then $D{\mathbf e}quiv 0$. But this is exactly the first part of Theorem |ef{interiortheorem}. Suppose now that $C$ is strictly nef (i.e., $C\operatorname{cd}ot D>0$ for all effective divisors $D$), we need to show that the class of $C$ is in the interior of the cone of {\mathbf e}mph{movable curves}, $\overline{{\mathscr M}E}(X)\subseteqset N_1(X)$. By \cite{BDPP}, the movable cone is dual to the pseudoeffective cone, so we need only check that $C\operatorname{cd}ot D>0$ for every pseudoeffective ${\mathbb R}$-divisor which is not numerically trivial. Let $D$ be a pseudoeffective ${\mathbb R}$-divisor such that $C\operatorname{cd}ot D=0$. By Lemma |ef{slowgrowth} and Lemma |ef{h0bounded}, we have that $D{\mathbf e}quiv N_\sigma(D)=\sum \sigma_{\mathscr G}amma {\mathscr G}amma$, so in particular also $N_\sigma(D)\operatorname{cd}ot C=0$, contradicting the strictly nefness of $C$.{\mathbf e}nd{proof} \def\operatorname{mob}{\operatorname{mob}} \begin{remark} The paper \cite{Ott12} presents a definition of ampleness for subschemes of arbitrary codimension, generalizing the usual notion for divisors. In short, a subscheme is defined to be {\mathbf e}mph{ample} if the exceptional divisor on the blow-up along the subscheme satisfies a certain partial positivity condition, namely that its asymptotic cohomology groups vanish in certain degrees (it is `$q$-ample' in the sense of \cite{Tot10}, with $q=\operatorname{codim} Y-1$). When $Y$ is smooth, or locally complete intersection, it is known that this condition implies that the normal bundle of $Y$ is ample and $Y$ is strictly nef \cite[Corollary 5.6]{Ott12}. {\mathbf e}nd{remark} \section{Proof of Theorem |ef{finitelymany}}Let $X$ be a smooth complex variety over ${\mathbb C}$ and let $Y$ be a smooth subvariety with ample normal bundle and let $D\subseteqset X$ be any effective divisor (reducible or non-reduced) such that $Y\cap D={\mathbf e}mptyset$. By Theorem |ef{interiortheorem}, $D$ must have numerical dimension 0, so in particular its Iitaka dimension $\kappa(D)$ is also 0. From this we have \begin{lemma}\label{uniquelin} Let $D$ be an effective divisor disjoint from $Y$. Then $H^0(X,{\mathscr O}_X(D))={\mathbb C}$, i.e., $D$ is the unique effective divisor in its linear equivalence class. {\mathbf e}nd{lemma} The following lemma is the essential ingredient in the proof of Theorem |ef{finitelymany}. The idea of using the Albanese variety was inspired by an argument used by Totaro \cite{Tot00}. \begin{lemma} Let $Y\subseteqset X$ be a smooth subvariety with ample normal bundle. Then the restriction map \begin{equation}\label{restrH1} H^1(X,{\mathbb Q})\to H^1(Y,{\mathbb Q}) {\mathbf e}nd{equation} is injective.{\mathbf e}nd{lemma} \begin{proof}This essentially follows since a subvariety with ample normal bundle can not be contracted to a point by a non-constant morphism. Fix a base-point on $Y$ and consider the map of Albanese varieties \begin{equation*} \alpha: {\mathscr A}lb(Y)\to {\mathscr A}lb(X). {\mathbf e}nd{equation*}If {\mathbf e}qref{restrH1} is not injective, then $\alpha$ is not surjective, i.e., the quotient abelian variety $B={\mathscr A}lb(X)/\alpha({\mathscr A}lb(Y))$ has positive dimension. Note that the composition$$Y\to X\to {\mathscr A}lb(X)\to B$$sends $Y$ to a point $b^{-1}n B$. Let $f$ be a non-constant holomorphic function in a neighbourhood of $b$, which vanishes at $b$. Note that the above composition pulls the function $f$ back to a global section of $H^0(Y,I_Y^m/I_Y^{m+1})$ for some $m>0$. But $I_Y^m/I_Y^{m+1}=\operatorname{Sym}^m N_C^*$ cannot have global sections if the normal bundle of $Y$ is ample. {\mathbf e}nd{proof} In particular, this implies that the map of abelian varieties \begin{equation}\label{finiteker} \operatorname{Pic}^0(X)\to \operatorname{Pic}^0(Y) {\mathbf e}nd{equation}has finite kernel. \begin{lemma}\label{uniquenum} Suppose $D_1,D_2$ are numerically equivalent effective divisors whose supports are disjoint from $Y$. Then $D_1$ and $D_2$ are equal as divisors.{\mathbf e}nd{lemma} \begin{proof}Suppose first that $D_1$ and $D_2$ are algebraically equivalent. By definition, the element $D_1-D_2$ defines an element of $\operatorname{Pic}^0(X)$. Note that $D_1-D_2$ restricts to $0$ in $\operatorname{Pic}^0(Y)$ (since both $D_1$ or $D_2$ are disjoint from $Y$). Since the kernel of {\mathbf e}qref{finiteker} is finite, this means that there is a positive integer $m>0$ such that $m(D_1-D_2)=0$ in $\operatorname{Pic}^0(X)$ and hence $mD_1$ and $mD_2$ are linearly equivalent. By Lemma |ef{uniquelin}, we have $mD_1=mD_2$, and also $D_1=D_2$. If $D_1$ and $D_2$ are numerically equivalent, then by Matsusaka's theorem, there is an integer $m>0$ such that $mD_1$ and $mD_2$ are algebraically equivalent. Using the same argument again, we find that $D_1=D_2$. {\mathbf e}nd{proof} With this we can complete the proof of Theorem |ef{finitelymany}: \begin{proof}[Proof of Theorem |ef{finitelymany}]After replacing $Y$ with an appropriate linear section in some projective embedding, we may assume that $Y$ is a smooth curve. We may also suppose that the Picard number $|ho$ is greater than 1, otherwise there is nothing to prove. Now take any distinct $|ho$ prime divisors $D_1,\ldots,D_|ho$ disjoint from $Y$. Since $D_i\operatorname{cd}ot Y=0$ for $i=0,\ldots, |ho$, we see that the $D_i$ lie in a rational hyperplane in $N^1(X)$. Hence after re-ordering the $D_i$, there is a relation of the form $$ m_1D_1+\operatorname{cd}ots+m_s D_s {\mathbf e}quiv m_{s+1} D_{s+1}+\operatorname{cd}ots+m_|ho D_|ho $$where $m_i$ are non-negative integers. Now let $E$ (resp. $F$) denote the divisor on the left hand side (resp. right hand side) of this equation. Note that the supports of $E$ and $F$ are disjoint from $Y$, so by Lemma |ef{uniquenum} the divisors $E,F$ are equal. This contradicts the assumption that the components $D_1,\ldots,D_|ho$ are different. {\mathbf e}nd{proof} \begin{thebibliography}{-9} \bibitem{Ballico} E.~Ballico. \newblock Normal bundle to curves in quadrics. \newblock {{\mathbf e}m Bull. Soc. Math. France}, 109, (1981), 227--235. \bibitem{Bou04} S. Boucksom, Divisorial Zariski Decompositions on Compact Complex Manifolds. Ann. Scient. \'Ec. Norm. Sup., $4^e$ s\'erie, 37 (2004) 45--76. \bibitem{BDPP}S. Boucksom, J.-P. Demailly, M. Paun, T. Peternell, The pseudo-effective cone of a compact K\"ahler manifold and varieties of negative Kodaira dimension. {{\mathbf e}m J. Algebraic Geom. 22} (2013), 201--248. \bibitem{CF90} F. Campana and H. Flenner, A characterization of ample vector bundles on a curve, {{\mathbf e}m Math. Ann.} 287 (1990), 571--575. \bibitem{FL81} W. Fulton, R. Lazarsfeld. \newblock Positivity and excess intersection, in {{\mathbf e}m Enumerative and classical algebraic geometry, Nice, 1981, \newblock Progress in Math. 24, Birkhauser} (1982), 97--105. \bibitem{Har70} R.~Hartshorne. \newblock {{\mathbf e}m Ample subvarieties of algebraic varieties}. \newblock Lecture Notes in Mathematics, Vol. 156. Springer-Verlag, Berlin, (1970). \bibitem{Harcurves} R.~Hartshorne. \newblock {Ample vector bundles on curves}. \newblock {{\mathbf e}m Nagaoya Math. J.} 43 (1971), 73--89. \bibitem{Laz04} R.~Lazarsfeld. \newblock {{\mathbf e}m Positivity in algebraic geometry {I and II}}. \newblock Springer-Verlag, Berlin, 2004. \bibitem{Leh11} B. Lehmann. Comparing numerical dimensions. To appear in J. of Alg. and Num. Theory. \bibitem{Nak04} N. Nakayama, Zariski-decomposition and abundance, MSJ Memoirs, vol. 14, Mathematical Society of Japan, Tokyo, 2004. \bibitem{NR98} T.~Napier and M.~Ramachandran. \newblock The {$L^2\ \overline \partial$}-method, weak {L}efschetz theorems, and the topology of {K}\"ahler manifolds. \newblock {{\mathbf e}m J. Amer. Math. Soc.}, 11 (1998) 375--396. \bibitem{OP04}K. Oguiso, T. Peternell. The dual K¨ahler cone of compact K¨ahler threefolds. Comm. Anal. Geom. 12 (2004) 1131--1154. \bibitem{Ott12}J. C. Ottem. {Ample subvarieties and $q$-ample divisors}. {{\mathbf e}m Advances in Mathematics} {229} (2012) 2868--2887. \bibitem{Pet08}T. Peternell, Submanifolds with ample normal bundles and a conjecture of Hartshorne. {\mathbf e}mph{Interactions of classical and numerical algebraic geometry}, 317--330, Contemp. Math., 496, Amer. Math. Soc., Providence, RI, 2009. \bibitem{Pet11} T. Peternell, Compact subvarieties with ample normal bundles, algebraicity, and cones of cycles. {{\mathbf e}m Michigan Math. J.} 61 (2012), no. 4, 875--889. \bibitem{Tot10} B.~Totaro. \newblock Line bundles with partially vanishing cohomology. \newblock {{\mathbf e}m J. Eur. Math. Soc. 15} (2013), 731-754. \bibitem{Tot00} B.~Totaro. The topology of smooth divisors and the arithmetic of abelian varieties. {{\mathbf e}m Michigan Math. J.} Volume 48, Issue 1 (2000), 611-624. \bibitem{Voi08}C. Voisin. Coniveau 2 complete intersections and effective cones. {{\mathbf e}m Geom. Funct. Anal.} Vol. 19 (2010) 1494--1513 {\mathbf e}nd{thebibliography} {\mathbf e}nd{document}
\begin{document} \title{All Multiparty Quantum States Can Be Made Monogamous} \author{Salini K.\(^{1}\), R. Prabhu\(^{2}\), Aditi Sen(De)\(^{2}\), and Ujjwal Sen\(^{2}\)} \affiliation{\(^{1}\)School of Physics, IISER TVM, Thiruvananthapuram, Kerala, India\\ \(^{2}\)Harish-Chandra Research Institute, Chhatnag Road, Jhunsi, Allahabad 211 019, India} \begin{abstract} Monogamy of quantum correlation measures puts restrictions on the sharability of quantum correlations in multiparty quantum states. Multiparty quantum states can satisfy or violate monogamy relations with respect to given quantum correlations. We show that all multiparty quantum states can be made monogamous with respect to all measures. More precisely, given any quantum correlation measure that is non-monogamic for a multiparty quantum state, it is always possible to find a monotonically increasing function of the measure that is monogamous for the same state. The statement holds for all quantum states, whether pure or mixed, in all finite dimensions and for an arbitrary number of parties. The monotonically increasing function of the quantum correlation measure satisfies all the properties that is expected for quantum correlations to follow. We illustrate the concepts by considering a thermodynamic measure of quantum correlation, called the quantum work deficit. \end{abstract} \maketitle \section{Introduction} \label{sec:introduction} Sharing of quantum correlations among many parties is known to play an important role in quantum phenomena, ranging from quantum communication protocols \cite{BW, teleportation, exp, comm-review} to cooperative events in quantum many-body systems \cite{amader-AdP, Andreas-Fazio-Vlatko-RMP}. It is therefore important to conceptualize and quantify quantum correlations, for which investigations are usually pursued in two directions, viz. the entanglement-separability \cite{HHHH-RMP} and the information-theoretic \cite{Modi} ones. Any such measure of quantum correlation is expected to satisfy a monotonicity (precisely, non-increasing) under an intuitively satisfactory set of local quantum operations. For a quantum state which is shared between more than two parties, one may expect that all the measures of quantum correlation would additionally follow a monogamy property \cite{Wootters, Bennetteof, KW, monogamyN}, which restricts the sharability of quantum correlations among many parties. In the case of three parties, say, Alice, Bob and Charu, monogamy of a measure, ${\cal Q}$, says that the sum, ${\cal Q}_{AB}+{\cal Q}_{AC}$, of quantum correlations of the two-party local states between the Alice-Bob and the Alice-Charu pairs, should not exceed the quantum correlation, ${\cal Q}_{A:BC}$, of Alice with Bob and Charu taken together. Alice is therefore alloted a special status, and is called the ``nodal observer''. If the tripartite state, shared between the three parties, Alice, Bob, and Charu, is symmetric under exchange of particles, then any of the three parties in the monogamy relation can act as the nodal observer. However, if the state under consideration is non-symmetric under interchange of particles, then we allot the status of the nodal observer to the party $i$ that minimises the monogamy expression ${\cal Q}_{i:jk}-{\cal Q}_{ij}-{\cal Q}_{ik}$, with $i,\, j,\, k$ being chosen from Alice, Bob, and Charu, and with no two of $i,\, j,\, k$ being equal. Let us mention however that our results hold with other choices of the nodal observer also. The concept of monogamy has also been carried over to more than two extra-nodal observers. Classical correlations certainly do not satisfy a monogamy constraint \cite{monogamyN}. The monogamous nature of quantum correlations plays a key role in the security of quantum cryptography \cite{cryptoRMP}. Moreover, monogamy of quantum correlations has recently been used to study frustrated spin systems \cite{koteswar}. Surprisingly however, there are important and useful entanglement measures that do not satisfy monogamy for certain multiparty quantum states, an example being the entanglement of formation \cite{Bennetteof}, which quantifies the amount of entanglement required for preparation of a given bipartite quantum state. Nevertheless, it was found that for multiqubit systems, the concurrence squared \cite{concurrence}, a monotonically increasing function of the entanglement of formation is monogamous \cite{Wootters, Bennetteof, KW, monogamyN}. Similarly, the square of concurrence and entanglement of formation are monogamous for arbitrary multiqubit systems \cite{OliveiraBai}, although concurrence and entanglement of formation themselves are not so. Recently, it was shown that the information-theoretic quantum correlation measure, quantum discord \cite{discord1, discord2}, can violate monogamy \cite{amaderPrabhu, Giorgi, lightcone, RenFan} (cf. \cite{LOCC-monogamy, Dagmar}), and again a monotonically increasing function of the quantum discord satisfies monogamy for three-qubit pure states \cite{monogamyDnew}. In this paper, we show that if any bipartite quantum correlation measure, of an arbitrary number of parties in arbitrary finite dimensions, is non-increasing under loss of a part of a local subsystem, any multiparty quantum state is either already monogamous with respect to that measure or an increasing function of the bipartite measure can make it so. Note that the result holds for both pure and mixed states. It is interesting to note that the increasing function also satisfies all the properties for being a measure of quantum correlation, which include monotonicity under local operations and vanishing for ``classically correlated'' states (which is the set of separable states for measures of entanglement). Moreover we show that the function can always be chosen to be reversible, so that there is no loss of information in applying the function on the parent quantum correlation \cite{HHHH-RMP, Vidal}. To illustrate the result, we show that although the quantum work-deficit \cite{workdeficit}, an information-theoretic quantum correlation measure, violates monogamy even for three-qubit pure states, the states become monogamous when one considers integer powers of the measure. In stark contrast to what happens for concurrence and quantum discord, we show that for the three-qubit generalized W states \cite{Wstate, dur-vidal-cirac}, the fourth power of quantum work-deficit is required to obtain monogamy for these states. In case of arbitrary three-qubit W-class states \cite{Wstate, dur-vidal-cirac} and the GHZ-class states \cite{GHZ, dur-vidal-cirac}, to obtain monogamy of quantum work-deficit, one requires higher polynomials. We also find that three-qubit pure states that are monogamous with respect to quantum discord are also so with respect to quantum work-deficit. \section{Turning non-monogamous multisite quantum states into monogamous ones} Let ${\cal Q}$ be a quantum correlation measure that is defined for arbitrary bipartite states (pure or mixed) in arbitrary finite dimensions. Consider a three-party quantum state (pure or mixed), \(\varrho_{ABC}\), in arbitrary finite dimensions, shared between three observers, Alice \((A)\), Bob $(B)$, and Charu $(C)$. Let \(\mathcal{Q}_{AB}\) denote the quantum correlation \(\mathcal{Q}\) for the two-party reduced state \(\varrho_{AB} = \mbox{tr}_{C}\varrho_{ABC}\). \(\mathcal{Q}_{AC}\) is similarly defined. Let \(\mathcal{Q}_{A:BC}\) denote the quantum correlation for the state \(\varrho_{ABC}\) in the \(A:BC\) partition. To prove our results, we consider quantum states of three finite dimensional systems. However, the results can be generalized to an arbitrary number of finite dimensional systems. The measure \(\mathcal{Q}\) is said to satisfy monogamy for the state \(\varrho_{ABC}\) if \({\cal Q}_{A:BC} \geq {\cal Q}_{AB}+{\cal Q}_{AC}\). The idea is that a measure will be called monogamous for a certain shared quantum state if the amount of quantum correlations that Alice has with Bob and Charu separately would be smaller than what she has with her partners taken together. The measure will be called strictly monogamous for \(\varrho_{ABC}\) if \({\cal Q}_{A:BC} > {\cal Q}_{AB}+{\cal Q}_{AC}\). On the other hand, \({\cal Q}_{A:BC} < {\cal Q}_{AB}+{\cal Q}_{AC}\), will imply that the measure is non-monogamous for the corresponding state. It is interesting to note that the ``monogamy score" ${\cal Q}_{A:BC}-{\cal Q}_{A:B}-{\cal Q}_{A:C}$ \cite{Wootters,VanishingDisc} can be used to quantify sharability of quantum correlations in tripartite quantum systems. Such quantities has been employed to detect regime changes in frustrated quantum many-body systems in experimental nuclear magnetic resonance substances \cite{koteswar}. The following theorem demonstrates that the non-monogamous nature of any measure for any state can be transformed to a monogamous one (in fact, strictly so), by considering an increasing function of the measure. Let \(\mathcal{R}\) be the set of all real numbers.\\ \noindent \textbf{Theorem 1:} If \({\cal Q}\) violates monogamy for an arbitrary three-party quantum state \(\varrho_{ABC}\) in arbitrary finite dimensions, there always exists an increasing function \(f:\mathcal{R}\to \mathcal{R}\) such that \begin{equation} f({\cal Q}_{A:BC}) > f({\cal Q}_{AB}) + f({\cal Q}_{AC}), \end{equation} provided that \({\cal Q}\) is monotonically decreasing under discarding systems and invariance under discarding systems occurs only for monogamy-satisfying states. \\ \noindent \texttt{Proof:} Let us first rename \[ {\cal Q}_{A:BC} = x, \quad {\cal Q}_{AB} = y, \quad {\cal Q}_{AC} = z, \] for notational simplicity. Then the constraints in the premise of the theorem (non-monogamy and monotonicity of \({\cal Q}\)) can be rewritten as \[ x < y+z, \quad x > y > 0, \quad x > z>0.\] Hence it follows that $ 0 < \frac{y}{x} < 1$ and $ 0 < \frac{z}{x} < 1 $\\ This implies that \begin{equation} \lim_{n \to \infty}\left( \frac{y}{x}\right)^n = 0, \quad \lim_{n \to \infty}\left( \frac{z}{x}\right)^n = 0 \end{equation} Hence $\forall\) \(\epsilon > 0$, there exists positive integers $n_1( \epsilon), n_2( \epsilon)$ such that \begin{eqnarray} \left(\frac{y}{x}\right)^m < \epsilon \quad \forall \mbox{ positive integers } m \ge n_1(\epsilon), \nonumber \\ \left(\frac{z}{x}\right)^m < \epsilon \quad \forall \mbox{ positive integers } m \ge n_2(\epsilon). \end{eqnarray} Let us now choose $\epsilon = \epsilon_1 < \frac{1}{2}$. Therefore, $\left(\frac{y}{x}\right)^m < \epsilon_1 $ and $\left(\frac{z}{x}\right)^m <\epsilon_1$, $\forall $ positive integers $m \ge n(\epsilon_1)$, where $n(\epsilon_1) = \max \{ n_1(\epsilon_1),n_2(\epsilon_1)\}$ Adding the inequalities, we have $\left(\frac{y}{x}\right)^m + \left(\frac{z}{x}\right)^m <2\epsilon_1 <1$, $\forall $ positive integers $m \ge n(\epsilon_1)$. Hence the proof. $\blacksquare$ The above theorem can be generalized to an arbitrary number of parties (say, $N$) by choosing $\epsilon = \epsilon_1 < \frac{1}{(N-1)}$, whereby an inequality $x \leq \sum^{N-1}_{i=1}y_i$ (with $x>y_i>0,\, i=1,2,\ldots, N-1)$ will lead us to $\sum^{N-1}_{i=1}\left(\frac{y_i}{x}\right)^m<1$ for a suitably chosen $m$. Note here that invariance under discarding part of a subsystem implying monogamy, holds for many quantum correlation measures, including entanglement of formation and concurrence for three-qubit systems and quantum discord in arbitrary finite dimensional three-party states. Note also that any positive power of a measure vanishes for the same class of states for which the original measure vanishes, so that the set of states that is indicated to be ``classical'' by the original measure, is invariant after the transformation of the original measure into the new one. Let us also mention here that if a measure is monotonically non-increasing for a certain class of local operations (possibly assisted by classical communication between the parties), a positive integer power of the measure also has the same property. Specifically, for a measure \(\mathcal{Q}\) and a multiparty state \(\rho\), \(\mathcal{Q}(\rho) \geq \mathcal{Q}(\Lambda(\rho)) (\geq 0)\) implies that \((\mathcal{Q}(\rho))^\alpha \geq (\mathcal{Q}(\Lambda(\rho)))^\alpha\) for any positive \(\alpha\), where \(\Lambda\) represents a map that can be implemented by local quantum operations and classical communication. Note that while the cases of vanishing \(x,y,z\) have been ignored in the proof, they can be handled easily. There is no guarantee that a given power that is instrumental in rendering a quantum correlation monogamous for Alice as the nodal observer, will also work for Bob or Charu as the nodal observer. However, the lowest common multiple of the these powers, corresponding to the three nodal observers, does the job. There do exist examples of situations where a non-strict monotonically increasing function turns a non-monogamous quantum correlation into a monogamous one. However, they do not preserve all information about the original quantum correlation. In other words, for such functions, knowing $f({\cal Q}(\rho_{AB}))$ will not necessarily imply a knowledge of ${\cal Q}(\rho_{AB})$. This can make the $f({\cal Q}(\rho))$ to be drastically less useful in comparison to ${\cal Q}(\rho) $. We therefore want to restrict ourselves to strictly monotonically increasing functions. More specifically, we consider only ``reversible functions", i.e., function $f$ such that $f(x)$ can be used to find $x$ for all arguments $x$. We now show that the class of monogamous states is closed under the operation of taking positive integral powers of the corresponding measure.\\ \textbf{Theorem 2:} If a quantum correlation measure is monogamous for a three-party quantum state, any positive integer power of the measure is also monogamous for the same state.\\ \noindent \texttt{Proof:} The premise implies that \(x\geq y+z\). Then for any positive integer \(m\), we have \begin{equation} x^m \geq \left(y+z\right)^m = \sum_{k=0}^m{m \choose k} y^k z^{m-k}, \end{equation} which in turn is \(\geq y^m + z^m\), as \(y\), \(z\) are non-negative. Hence the proof. \(\blacksquare\) \section{On monogamy of quantum work-deficit} We will now consider the monogamy properties of the information-theoretic quantum correlation measure, called quantum work-deficit (WD) \cite{workdeficit}. In particular, this will help to illustrate that positive powers of a measure can lead to monogamous nature for a state, when the measure itself is not so. We begin by relating the monogamy properties of quantum discord, quantum work-deficit, and entanglement of formation. Consider an arbitrary pure three-party state \(|\psi\rangle_{ABC}\). Let us denote the quantum discord for the state \(\sigma_{AB} = \mbox{tr}_C |\psi\rangle\langle\psi|\) by \(D_{AB}\), where the measurement is performed by the observer \(B\). \(D_{AC}\) is similarly defined, with the measurement being performed by the observer \(C\). The entanglements of formation of \(\sigma_{AB}\) and \(\sigma_{AC}\) are denoted by \(E^f_{AB}\) and \(E^f_{AC}\) respectively. Similar notations are used for the different varieties of the quantum work-deficits, \(\Delta\), \(\Delta^\leftarrow\), and \(\Delta^\rightarrow\). See the Appendix for the definitions of these measures. \\ \noindent \textbf{Proposition 1:} For an arbitrary three-party pure state, \(D_{AB} + D_{AC} + H(\{p^B_i\}) + H(\{p^C_j\})= E^f_{AB} + E^f_{AC} + H(\{p^B_i\}) + H(\{p^C_j\}) \geq \Delta_{AB}^\leftarrow + \Delta_{AC}^\leftarrow \geq \Delta_{AB} + \Delta_{AC}\), where \(H(\{p^B_i\})\) is the entropy produced by the measurement in \(B\), and similarly for \(H(\{p^C_j\})\). \\ \noindent \texttt{Proof:} It can be obtained from Ref. \cite{KW} that for an arbitrary pure state \(|\psi\rangle_{ABC}\), \begin{equation}\label{baddi-jaRd} E^f_{AB} - \sum_i p_i^C S(I \otimes M_i \varrho_{AC}I \otimes M_i^\dagger /p_i^C) = 0, \end{equation} where \(\{M_i\}\) forms the optimal measurement by the observer \(C\) and \(p_i^C\) are the corresponding probabilities. Here \(S(\cdot)\) denotes the von Neumann entropy of its argument. Therefore, \[E^f_{AB} + H(\{p_i^C\}) - S\left(\sum_i I \otimes M_i \varrho_{AC} I \otimes M_i^\dagger\right) = 0,\] where \(H(\cdot)\) denotes the Shannon entropy of the probability distribution in its argument. Here we assume that projective measurements attain optimality, which is conjectured to be the case for rank-2 states in Ref. \cite{seikhane-hobe-dekha}. Consequently, \(E^f_{AB} + H(\{p_i^C\}) \geq \Delta_{AB}^\leftarrow + S(\varrho_{AB}) \geq \Delta_{AB}^\leftarrow \geq \Delta_{AB}\). Hence the result. \(\blacksquare\) Performing measurements on the first parties will lead to \(2E^f_{BC} + H(\{p^A_i\}) + H(\{q^A_j\}) \geq \Delta_{AB}^\rightarrow + \Delta_{AC}^\rightarrow \geq \Delta_{AB} + \Delta_{AC}\), where \(H(\{p^A_i\})\) (\(H(\{q^A_j\})\)) is the entropy produced in the measurement at \(A\) on \(\sigma_{AB}\) (\(\sigma_{AC}\)). \\ \noindent \textbf{Theorem 3:} For an arbitrary pure three-party quantum state \(|\psi\rangle_{ABC}\), quantum discord is monogamous whenever the quantum work-deficit, \(\Delta^\leftarrow\), is so. \\ \noindent \texttt{Proof:} From the definitions of quantum discord and WD, we obtain \begin{equation} D_{AB} = S_B + \Delta_{AB} - H(\{p_i^B\}), \label{eq:discord11111} \end{equation} where \(S_B\) is the von Neumann entropy of \(\sigma_{B} = \mbox{tr}_{AC}|\psi\rangle\langle\psi|\). Since \(S_B - H(\{p_i^B\}) \leq 0\), \( D_{AB} \leq \Delta_{AB}^\leftarrow \). For states for which WD is monogamous, we have \begin{equation} D_{AB} + D_{AC} \leq \Delta_{AB}^\leftarrow + \Delta_{AC}^\leftarrow \leq \Delta_{A:BC}^\leftarrow = S_A = D_{A:BC}. \label{eq:discord_monogamy} \end{equation} Here we assume that the minimum of work-deficit and quantum discord are attained by the same measurement. It is easy to see that the theorem holds even if the first parties perform the measurements. \(\blacksquare\) The converse of the theorem does not hold, and by numerically searching over $10^5$ randomly-chosen pure three-qubit states, uniformly with respect to the Haar measure, we find that there are 35.788\% of three-qubit pure states for which WD is non-monogamous while quantum discord is monogamous, 6.975\% of them for which WD and quantum discord are both non-monogamous, and 57.237\% of them for which WD and quantum discord are both monogamous. \subsection{Monogamy of quantum work-deficit for three qubit states} We now consider the monogamy properties of quantum work-deficit for three-qubit pure states, and will begin by investigating the same for an important class of three-qubit pure states, viz. the generalized W states \cite{Wstate, dur-vidal-cirac}, given by \begin{equation} |\phi_{GW}\rangle=\sin \theta \cos \phi |011\rangle+\sin \theta \sin \phi |101\rangle+\cos \theta |110\rangle, \label{eq:Wgen} \end{equation} where $\theta \in (0,\frac{\pi}{4}]$ and $\phi \in (0,2\pi]$. Numerical evidence indicate that quantum work-deficit is non-monogamous for all or almost all members of this class (see Fig. \ref{WDgenW} (left)). In other words, setting \begin{equation} \delta_\mathcal{Q} (\varrho_{ABC}) \equiv \mathcal{Q}_{A:BC} - \mathcal{Q}_{AB} - \mathcal{Q}_{AC} \end{equation} for an arbitrary bipartite quantum correlation measure \(\mathcal{Q}\) and an arbitrary three-party state \(\varrho_{ABC}\), we find that \begin{equation} \delta_{\Delta^\leftarrow}(|\phi_{GW}\rangle) < 0 \end{equation} for all the $10^4$ generalized W states that we randomly chose from the class of $|\phi_{GW}\rangle$. Note here that another information-theoretic quantum correlation measure, the quantum discord, can also be non-monogamous for these states \cite{amaderPrabhu, Giorgi, RenFan, lightcone}. However, recently it has been shown that the square of (one variety of) quantum discord is a monogamous quantity for all three-qubit pure states \cite{monogamyDnew}. This however is no longer valid for WD. As stated in Theorem 1, suitably chosen integral powers of WD will be monogamous for any given state. And we find that for WD, monogamy for generalized W states is obtained (numerically) for the fifth power (see Fig. \ref{WDgenW} (right)), i.e. \begin{equation} \delta_{\left(\Delta^\leftarrow\right)^5}(|\phi_{GW}\rangle) > 0 \end{equation} for all the $10^4$ randomly chosen generalized W states. This feature remains unchanged when the measurement is performed by the observer $A$. If one considers the W-class states, the percentage of non-monogamous states decreases slowly, as compared to the case of generalized W states with the increase of powers of work-deficit. In particular, we found by numerical simulation that the percentage of non-monogamous states with respect to \({\left(\Delta^\leftarrow\right)^8}\) is 10.76, decreasing from 100\% for \(\Delta^\leftarrow\). The percentages are determined by Haar uniform generation of 10$^4$ randomly chosen states in the space of W-class states. \begin{figure} \caption{Monogamy of quantum work-deficit. Left: The ``monogamy score'', \(\delta_{\Delta^\leftarrow} \label{WDgenW} \end{figure} We have also considered the monogamy properties of general three-qubit pure states with respect to quantum work-deficit, \(\Delta^\leftarrow\). A histogram showing the relative frequencies of non-monogamous states among randomly chosen pure three-qubit states, for different powers of quantum work-deficit, is given in Fig. 2. Admixture of noise, if sufficiently small in amount, will still satisfy monogamy for the same power of $\Delta^\leftarrow$. Theorem 1 is however true for all mixed states, but larger levels of noise may require higher powers of $\Delta^\leftarrow$ to attain monogamy. In a given experimental setup, the experimenter can in principle find out her shared quantum state, and then Theorem 1 guarantees a finite positive power, $n$, for every bipartite quantum correlation measure, ${\cal Q}$, so that the corresponding ${\cal Q}^n$ will satisfy the monogamy relation. \begin{figure} \caption{Relative frequencies of non-monogamous three-qubit pure states. We provide estimates of the percentages of the complete space of three-qubit pure states which violates monogamy with respect to quantum work-deficit and its integral powers. The histogram in the figure shows the percentages on the vertical axis, while the different integral powers are on the horizontal axis. So, for example, the left-most (red) column indicates the estimated relative frequency of non-monogamous states with respect to the first power of WD, \(\Delta^\leftarrow\). Both axes represent dimensionless parameters. The feature remains similar for the other variety of WD, viz., \(\Delta^\rightarrow\), although in this case, the decrease of percentages is slower, with increasing powers of \(\Delta^\rightarrow\). The percentages are numerically determined by choosing $10^5$ three-qubit pure states Haar uniformly over the state space.} \label{WDgenW1} \end{figure} \section{Conclusion} It is well-known that quantum correlation measures can be monogamous or non-monogamous for multisite quantum states. This can occur for quantum correlation measures of the entanglement-separability paradigm, as well as those of the information-theoretic one. We demonstrated that any quantum correlation measure that is non-monogamous for a multiparty quantum state can be made monogamous for the same by considering an increasing function of the measure. The transformed measure retains the important properties, like monotonicity under local operations and vanishing for ``classical'' states, of the original measure. We illustrate the results by using quantum work-deficit, an information-theoretic quantum correlation measure. We show that while the generalized W states are non-monogamous with respect to quantum work-deficit, the fifth power of the measure makes the states monogamous. We also discuss the monogamy properties of quantum work-deficit, and its powers, for arbitrary three-qubit pure states. Let us mention here that in the literature, monotonically increasing functions of a quantum correlation measure are regarded with the same level of importance as the original measure. So, for example, the nearest-neighbor entanglement of quantum spin-1/2 systems \cite{amader-AdP, Andreas-Fazio-Vlatko-RMP} is usually investigated by employing the measure, concurrence, although a more physically meaningful measure is the entanglement of formation, with concurrence being an increasing function of the latter. \section*{Acknowledgment} RP acknowledges an INSPIRE-faculty position at the Harish-Chandra Research Institute (HRI) from the Department of Science and Technology, Government of India, and SK thanks HRI for hospitality and support. \appendix \section{Definitions of quantum correlation measures} This appendix provides a brief definition to the various quantum correlation measures used in this paper. \section*{Entanglement of formation} The entanglement of formation of a pure bipartite state, \(|\psi\rangle_{AB}\), shared between two parties \(A\) and \(B\), can be shown to be equal to the von Neumann entropy of the local density matrix of the shared state \cite{Bennetteof}: \begin{equation} E(|\psi\rangle_{AB})= S(\varrho_A) = S(\varrho_B), \end{equation} where $\varrho_{A}=\mbox{tr}_{B}|\psi\rangle\langle\psi|$ and similarly for \(\varrho_B\). Entanglement of formation of a mixed bipartite state \(\rho_{AB}\) is then defined by the convex roof approach \cite{EoF1}: \begin{equation} E(\rho)=\mbox{min}\sum_i p_iE(|\psi_i\rangle), \end{equation} where the minimization is over all pure state decompositions of $\rho = \sum_i p_i (|\psi_i\rangle \langle \psi_i|)_{AB}$. \section*{Quantum discord} Quantum discord is defined as the difference between two quantum information-theoretic quantities, whose classical counterparts are equivalent expressions for the classical mutual information \cite{discord2,discord1}: \begin{equation} Q(\rho_{AB})= {\cal I}(\rho_{AB}) - {\cal J}(\rho_{AB}). \end{equation} The ``total correlation'', \({\cal I}(\rho_{AB})\), of a bipartite state \(\rho_{AB}\) is given by \cite{qmi1} (see also \cite{Cerf1, GROIS1}) \begin{equation} \mathcal{I}(\rho_{AB})= S(\rho_A)+ S(\rho_B)- S(\rho_{AB}), \end{equation} where $S(\varrho)= - \mbox{tr} (\varrho \log_2 \varrho)$ is the von Neumann entropy of the quantum state \(\varrho\), and \(\rho_A\) and \(\rho_B\) are the reduced density matrices of \(\rho_{AB}\). On the other hand, \({\cal J}(\rho_{AB})\) can be interpreted as the amount of classical correlation in \(\rho_{AB}\), and is defined as \begin{equation} {\cal J}(\rho_{AB}) = S(\rho_A) - S(\rho_{A|B}). \end{equation} Here \begin{equation} S(\rho_{A|B}) = \min_{\{B_i\}} \sum_i p_i S(\rho_{A|i}), \end{equation} is the conditional entropy of \(\rho_{AB}\), conditioned on a measurement performed by \(B\) with a rank-one projection-valued measurement \(\{B_i\}\), producing the states \(\rho_{A|i} = \frac{1}{p_i} \mbox{tr}_B[(\mathbb{I}_A \otimes B_i) \rho (\mathbb{I}_A \otimes B_i)]\), with probability \(p_i = \mbox{tr}_{AB}[(\mathbb{I}_A \otimes B_i) \rho (\mathbb{I}_A \otimes B_i)]\). \(\mathbb{I}\) is the identity operator on the Hilbert space of \(A\). \\ \section*{Quantum work-deficit} We now briefly introduce the information-theoretic measure of quantum correlation, known as quantum work-deficit \cite{workdeficit} for an arbitrary bipartite quantum state $\rho_{AB}$. Let us begin by considering the number, \(I_{G}\), of pure qubits that can be extracted from $\rho_{AB}$ by ``closed global operations'', with the latter consisting of any sequence of unitary operations and dephasing. It can be shown that \begin{equation} I_G (\rho_{AB})= N - S(\rho_{AB}), \end{equation} where $N $ is the \(\log\) of the dimension of the Hilbert space ${\cal H}$ on which $\rho_{AB}$ is defined. This thermodynamic ``work'' that can be extracted from the quantum state \(\rho_{AB}\) may require to employ global operations, which are not accessible to observers who are situated in separated laboratories. To obtain a quantification of the amount of work that can be extracted from \(\rho_{AB}\) by local actions, we restrict to ``closed local quantum operations and classical communication (CLOCC)'', which consists of local unitaries, local dephasings, and sending dephased states from one party to another. Under these local actions, the number of pure qubits that can be extracted is given by \begin{equation} I_L(\rho_{AB}) = N - \inf_{\Lambda \in CLOCC} [S(\rho{'}_A) + S(\rho{'}_B)], \end{equation} where $S(\rho{'}_A) = \mbox{tr}_B (\Lambda (\rho_{AB}))$ and $S(\rho{'}_B) = \mbox{tr}_A (\Lambda (\rho_{AB}))$. For an arbitrary bipartite state $ \rho_{AB}$, the quantum work-deficit is then defined as \begin{equation} \Delta(\rho_{AB}) = I_G(\rho_{AB}) - I_L(\rho_{AB}), \end{equation} and is interpreted as an information-theoretic quantum correlation measure of \(\rho_{AB}\). The quantity is not efficiently computable for arbitrary bipartite states. General CLOCC actions are also difficult to implement in an experiment. Therefore we will also consider the quantity \(\Delta_{AB}^\rightarrow\), in which we restrict our attention to CLOCC consisting of projection measurements at the single party (\(A\)) only for extracting work with local actions. If the measurement is performed by \(B\), we denote it as \(\Delta_{AB}^\leftarrow\). \end{document}
\begin{document} \title[Thermal layer for inviscid compressible flows] {Well-posedness of thermal layer equations for inviscid compressible flows} \author{C.-J. Liu} \address{Chengjie Liu \newline\indent School of Mathematical Sciences, Shanghai Jiao Tong University, Shanghai, P. R. China \newline\indent and Department of Mathematics, City University of Hong Kong, Hong Kong, P. R. China} \email{[email protected]} \author{Y.-G. Wang} \address{Ya-Guang Wang \newline\indent School of Mathematical Sciences, MOE-LSC and SHL-MAC, Shanghai Jiao Tong University, Shanghai, 200240, P. R. China} \email{[email protected]} \author{T. Yang} \address{Tong Yang \newline\indent School of Mathematical Sciences, Shanghai Jiao Tong University, Shanghai, P. R. China \newline\indent and Department of mathematics, City University of Hong Kong, Hong Kong, P. R. China} \email{[email protected]} \subjclass[2000]{35M13, 35Q35, 76D03, 76D10, 76N20} \date{} \keywords{thermal layer, inviscid compressible flow, well-posedness, non-monotonic velocity fields, stability of shear flows.} \begin{abstract} A semi-explicit formula of solution to the boundary layer system for thermal layer derived from the compressible Navier-Stokes equations with the non-slip boundary condition when the viscosity coefficients vanish is given, in particular in three space dimension. In contrast to the inviscid Prandtl system studied by \cite{H-H} in two space dimension, the main difficulty comes from the coupling of the velocity field and the temperature field through a degenerate parabolic equation. The convergence of these boundary layer equations to the inviscid Prandtl system is justified when the initial temperature goes to a constant. Moreover, the time asymptotic stability of the linearized system around a shear flow is given, and in particular, it shows that in three space dimension, the asymptotic stability depends on whether the direction of tangential velocity field of the shear flow is invariant in the normal direction respective to the boundary. \end{abstract} \mathcal{m}aketitle \tableofcontents \section{Introduction} There has been extensive study on the Prandtl equations since Prandtl introduced in \cite{prandtl} to describe the behavior of flows near physical boundaries in viscous flows in 1904. The well-posedness theory and ill-posedness results obtained by Oleinik and her collaborators (\cite{O, Ole}), and Gerard-Varet, Dormy \cite{GD}, Grenier \cite{GR}, and Guo, Nguyen \cite{GN1} respectively, show that the monotonicity of the tangential velocity in the normal direction to the boundary plays an essential role in the well-posedness of the Prandtl equations even locally in time. On the other hand, as observed by van Dommnelen and Shen \cite{van} and studied mathematically by Hong and Hunter \cite{H-H}, the monotonicity condition is not needed for the well-posedness of the inviscid Prandtl equations at least locally in time. This paper aims to study the corresponding boundary layer problem derived from compressible Navier-Stokes equations when the viscosity coefficients vanish or are of higher order with respect to the heat conductivity coefficient, i.e. the Prandtl number ${\rm Pr}$ is strictly smaller than one. The results obtained in this paper not only reveal the role of the temperature played in this boundary layer system, but also reveal the phenomena in three space dimensions that are different from those obtained by Hong and Hunter \cite{H-H} for two dimensional inviscid Prandtl equations. Precisely, we consider the following initial-boundary value problem in $\{(t,x',y):~t>0,x'\in{\mathcal{m}athbb{R}}^{d-1},y\in{\mathcal{m}athbb{R}}_+\}$ with $d=2,3$: \begin{equation}\label{pr_invis}\begin{cases} \mathcal{m}athcal{P}d_t \bu_h+(\bu_h\cdot\nabla_h+u_d\mathcal{m}athcal{P}d_y)\bu_h=0,\\%~&in ~\Omega_T,\\ \mathcal{m}athcal{P}d_t \ta+(\bu_h\cdot\nabla_h+u_d\mathcal{m}athcal{P}d_y)\ta =\frac{\ka}{P}\ta \mathcal{m}athcal{P}d_y^2\ta+\frac{\ka P_t}{P}\ta,\\%~&in ~\Omega_T,\\ \nabla_h\cdot\bu_h+\mathcal{m}athcal{P}d_y u_d=\frac{\ka}{P} \mathcal{m}athcal{P}d_y^2\ta-\frac{(1-\ka)P_t}{P},\\%\big(\bu_\tau\cdot \nabla_hP+ P_t\big),\\%~&in ~\Omega_T,\\ (u_d,\ta)|_{y=0}=\big(0,\ta^0(t,x')\big), \quad\lim\limits_{\yinf}\ta(t,x,y)=\Ta(t,x'),\\ (\bu_h,\ta)|_{t=0}=(\bu_{h0},\ta_0)(x',y), \end{cases}\end{equation} where $x'=(x_1,\cdots,x_{d-1}),~\nabla_h=(\mathcal{m}athcal{P}d_{x_1},\cdots,\mathcal{m}athcal{P}d_{x_{d-1}})^T;$ $\bu_h=(u_1,\cdots,u_{d-1})^T\in{\mathcal{m}athbb{R}}^{d-1}$ is unknown vector function, $u_d$ and $\ta$ are unknown scalar functions; $P=P(t)$ and $\Ta(t,x')$ are positive known functions, and $\kappa>0$ is a constant. The above problem \eqref{pr_invis} discribes the behavior of boundary layer for inviscid compressible non-isentropic flow, as the heat conductivity tends to zero, and the behavior of thermal layer for the compressible Navier-Stokes equations with nonslip boundary condition on velocity when the viscosity tends to zero faster than the heat conductivity. The formal derivation of \eqref{pr_invis} will be given in the Appendix. When the first equation of \eqref{pr_invis} has an additional diffusion term $\mathcal{m}athcal{P}artial_y^2\bu_h$ on the right hand side, which describes the boundary layer behavior of the compressible Navier-Stokes equations, we have studied the well-posedness of this boundary layer problem in \cite{LWY4}, in two space dimensions, under the usual monotonic condition on the tangential velocity with respect to the normal direction to the boundary, as for the classical incompressible Prandtl equations (\cite{O, Ole, AWXY, WXY, Mas-Wong, GR, GD}). In this paper, motivated by the work of Hong and Hunter \cite{H-H}, we are going to study the problem \eqref{pr_invis} without the monotonicity of the tangential velocity. When the pressure of the outer flow is a function of time $t$ only, we will first give a semi-explicit formula for the solution to problem \eqref{pr_invis} in the next section. In Subsection 2.2, we obtain that the velocity field of \eqref{pr_invis} converges to that of the inviscid Prandtl system when the temperature tends to a constant state. And then in Section 3, we will study the linearized system of \eqref{pr_invis} around a shear flow. In three space dimensions, it will be shown that the solution to the linearized system is bounded for all positive $t$ when the tangential velocity direction of the background shear flow is independent of the normal direction to the boundary and one component of the tangential velocity is strictly monotonic with respect to the normal variable, and it grows like $\sqrt{t}$ when the two tangential components of the shear flow is not linearly dependent, or they are linearly dependent and one component has a non-degenerate critical point. \section{Study of the nonlinear thermal layer problem} \subsection{Local existence of classical solutions} Before stating the local existence result, we first give some notations. Denote by $I_{k}$ the $k\times k$ identity matrix for some integer $k$, $det(A)$ the determinant of a matrix $A$, $\nabla_h u(x',\cdot)$ the gradient of a function $u$ with respect to the variables $x'\in {\mathcal{m}athbb{R}}^{d-1}$. By using the intial data of the problem \eqref{pr_invis}, we introduce the vector function $\xi(t,x',z)\in{\mathcal{m}athbb{R}}^{d-1}$, defined by the following equation \begin{equation}\label{2.1} x'=\xi+t\bu_{h0}(\xi,z), \end{equation} and then, the functions $a(t,x',z)$ and $\bb(t,x',z)$ are defined as: \begin{equation}\label{notation} a(t,x',z):=\frac{P(t)}{P(0)}\ta_0\big(\xi(t,x',z),z\big)\cdot det(I_{d-1}+t\nabla_h\bu_{h0})\big(\xi(t,x',z),z\big),\quad \bb(t,x',z):=\bu_{h0}\big(\xi(t,x',z),z\big). \end{equation} Then, we have the following local existence of a classical solution to the problem \eqref{pr_invis}, in which no monotonicity condition is required on the initial data. \begin{theorem}\label{thm-2-1} Suppose that the data given in \eqref{pr_invis}, $\bu_{h0}\in C^2,\ta_0\in C^2,\ta^0\in C^1,P\in C^1$ and $\Ta\in C^1$ satisfy the compatibility conditions of \eqref{pr_invis} up to order one, and \begin{equation}\label{ass_ini} t^*:= \sup \Big\{t: \inf\limits_{(x',y)\in{\mathcal{m}athbb{R}}^{d}_+}det\big(I_{d-1}+s\nabla_h\bu_{h0}(x',y)\big)>0,~\forall s\in[0,t]\Big\}>0. \end{equation} Also, there exists a positive constant $C_0$ such that for $t\in[0,t^*)$ and $(x',y)\in{\mathcal{m}athbb{R}}_+^{d},$ \begin{equation}\label{ass_ta} \begin{cases} C_0^{-1}\leq \ta_0(x',y),~\ta^0(t,x'),~\Ta(t,x'),~P(t)\leq C_0,\\[3mm] \|\bu_{h0}\|_{C^2}\le C_0,\quad \|\ta_0\|_{C^1}\leq C_0. \end{cases} \end{equation} Then, there exist a $t_0\in (0, t^*]$ and a unique classical solution to \eqref{pr_invis} in $[0,t_0)\times{\mathcal{m}athbb{R}}^d_+$ given by \begin{equation}\label{sol_invis}\begin{split} &\bu_h(t,x',y)~=~\bu_{h0}\Big(\xi\big(t,x',\eta(t,x',y)\big),\eta(t,x',y)\Big),\\ & u_d(t,x',y)~=~\int_0^{\eta(t,x',y)}\mathcal{m}athcal{P}d_t(\frac{\tta}{a})(t,x',z)dz+\int_0^{\eta(t,x',y)}\Big[\bb\big(t,x',\eta(t,x',y)\big)\cdot\nabla_h(\frac{\tta}{a})(t,x',z)\Big]dz,\\ &\ta(t,x',y)~=~ \tta\big(t,x',\eta(t,x',y)\big). \end{split}\end{equation} Here, $a(t,x',z)$ and $\bb(t,x',z)$ are given by \eqref{notation}, $\tta(t,x',z)$ is a positive smooth solution to the following problem in $[0,t_0)\times {\mathcal{m}athbb{R}}_+^d$: \begin{equation}\label{pr_tta}\begin{cases} \mathcal{m}athcal{P}d_t\tta+\bb\cdot\nabla_h\tta-\frac{\ka P_t}{P}\tta-\frac{\ka a}{P}\mathcal{m}athcal{P}d_z\big(\frac{a}{\tta}\mathcal{m}athcal{P}d_z\tta\big)=0,\\%\quad in~\big\{0\leq t<t^*,z>0,x'\in{\mathcal{m}athbb{R}}^{d-1}\big\},\\ \tta|_{z=0}=\ta^0(t,x'), \quad \lim\limits_{\zinf}\tta=\Ta(t,x'),\\ \tta|_{t=0}=\ta_0(x',z), \end{cases}\end{equation} and the function $\eta(t,x',y)$ is defined implicitly by the relation \begin{equation}\label{tran_y} y~=~\int_0^\eta\frac{\tta(t,x',z)}{a(t,x',z)}dz. \end{equation} \end{theorem} \begin{proof}[\bf{Proof.}] We shall use the method of characteristics, introduced in \cite{H-H} for the inviscid Prandtl equations, to get the solution formula \eqref{sol_invis} for the problem \eqref{pr_invis}. (1) Suppose that $(\bu_h,u_d,\ta)(t,x',y)$ is a smooth solution to the problem \eqref{pr_invis}, we introduce characteristic coordinates: \begin{equation}\label{tran} t=\tau,~x'=x'(\tau,\xi,\eta),~y=y(\tau,\xi,\eta) \end{equation} being determined by solving the problems, \begin{equation}\label{pr_char}\begin{cases} \frac{\mathcal{m}athcal{P}artial}{\mathcal{m}athcal{P}artial\tau}x'(\tau,\xi,\eta)~=~\bu_h \big(\tau,x'(\tau,\xi,\eta),y(\tau,\xi,\eta)\big),\\ \frac{\mathcal{m}athcal{P}artial }{\mathcal{m}athcal{P}artial\tau}y(\tau,\xi,\eta)~=~u_d\big(\tau,x'(\tau,\xi,\eta),y(\tau,\xi,\eta)\big),\\ x'(0,\xi,\eta)~=\xi, ~y(0,\xi,\eta)~=~\eta, \end{cases}\end{equation} with $\xi=(\xi_1,\cdots,\xi_{d-1})^T\in{\mathcal{m}athbb{R}}^{d-1}$. We denote by \begin{equation}\label{new_fun} (\bar \bu_h,\bar u_d,\bar\ta)(\tau,\xi,\eta)~:=~(\bu_h,u_d,\ta)\big(\tau,x'(\tau,\xi,\eta),y(\tau,\xi,\eta)\big), \end{equation} then, it is easy to deduce from \eqref{pr_invis} and the relation \eqref{pr_char} that $ (\bar \bu_h,\bar\ta)(\tau,\xi,\eta)$ satisfy the following problem: \begin{equation}\label{eq_new}\begin{cases} \mathcal{m}athcal{P}d_\tau\bar \bu_h~=~0,\\ \mathcal{m}athcal{P}d_\tau\bar\ta~=~\frac{\ka}{P(\tau)}\bar\ta ~\overline{\mathcal{m}athcal{P}d_y^2\ta}+\frac{\ka P_\tau(\tau) }{P(\tau)}\bar\ta,\\ (\bar \bu_h,\bar \ta)|_{\tau=0}~=~(\bu_{h0},\ta_0)(\xi,\eta) \end{cases}\end{equation} with the notation $\overline{\mathcal{m}athcal{P}d_y^2\ta}(\tau,\xi,\eta)=(\mathcal{m}athcal{P}d_y^2\ta)(\tau,x'(\tau,\xi,\eta),y(\tau,\xi,\eta))$. We immediately obtain that from \eqref{eq_new}, \begin{equation}\label{formu_u} \bar \bu_h(\tau,\xi,\eta)~\equiv~\bu_{h0}(\xi,\eta), \end{equation} which implies that by plugging \eqref{formu_u} into \eqref{pr_char}, \begin{equation}\label{formu_x} x'~=~\xi+\tau \bu_{h0}(\xi,\eta). \end{equation} It is easy to see that the relation \eqref{formu_x} determines uniquely $\xi=\xi(\tau,x',\eta)$ when $0\leq\tau\le t^*$, with $t^*>0$ being given in \eqref{ass_ini}. (2) Next, we are going to verify that the relation $\eta=\eta(t,x',y)$ implicitly defined by \eqref{tran}-\eqref{pr_char} obeys the equation \eqref{tran_y}. Denote by $J(\tau,\xi,\eta)$ the Jacobian of the transformation between $(x',y)$ and $(\xi,\eta)$: \begin{equation}\label{j1} J(\tau,\xi,\eta)~:=~\frac{\mathcal{m}athcal{P}d(x',y)}{\mathcal{m}athcal{P}d(\xi,\eta)}=\det(\nabla_\xi x')\cdot\mathcal{m}athcal{P}d_\eta y-\sum\limits_{i=1}^{d-1}\big[det(\nabla_i x')\cdot\mathcal{m}athcal{P}d_{\xi_i}y\big],\end{equation} with the notation \[ \nabla_i=( \cdots,\mathcal{m}athcal{P}d_{\xi_{i-1}},\mathcal{m}athcal{P}d_\eta,\mathcal{m}athcal{P}d_{\xi_{i+1}},\cdots)^T,\qquad 1\leq i\leq d-1.\] By a direct computation and using \eqref{pr_char}, we get \[ \mathcal{m}athcal{P}d_\tau J(\tau,\xi,\eta)= J(\tau,\xi,\eta)\cdot(\nabla_h\cdot\bu_h+\mathcal{m}athcal{P}d_yu_d) \big(\tau,x'(\tau,\xi',\eta),y(\tau,\xi',\eta)\big) \] which gives rises to \begin{equation}\label{j} \mathcal{m}athcal{P}d_\tau J(\tau,\xi,\eta)= J(\tau,\xi,\eta) \cdot\Big[\frac{\ka}{P(\tau)}\overline{\mathcal{m}athcal{P}d_y^2\ta}(\tau,\xi,\eta)- (1-\ka)\frac{P_\tau(\tau)}{P(\tau)}\Big] \end{equation} by using the third equation given in \eqref{pr_invis}. Note that $J(0,\xi,\eta)=1$, thus combining \eqref{j} with the second equation given in \eqref{eq_new} we deduce \begin{equation}\label{Jaco} J(\tau,\xi,\eta)~=~\frac{P(0)}{P(\tau)\ta_0(\xi,\eta)}\bar\ta(\tau,\xi,\eta). \end{equation} Noting that \[\det(\nabla_\xi x')(\tau,\xi,\eta)=det\Big(I_{d-1}+\tau\nabla_\xi \bu_{h0}(\xi,\eta)\Big)>0, \quad{\rm for}~\tau\leq t^*,\] plugging \eqref{Jaco} into \eqref{j1} yields that \begin{equation}\label{Jaco1} \mathcal{m}athcal{P}d_\eta y-\sum\limits_{i=1}^{d-1}\Big[\frac{det(\nabla_i x')}{det(\nabla_\xi x')}\cdot\mathcal{m}athcal{P}d_{\xi_i}y\Big]=\frac{P(0)}{P(\tau)\ta_0(\xi,\eta)\cdot det(\nabla_\xi x')(\tau,\xi,\eta)}\bar\ta(\tau,\xi,\eta). \end{equation} By a direct calculation, it deduces that the characteristics of the equation \eqref{Jaco1} is $x'=constant$ or $\xi=\xi(\tau,x',\eta)$ given in \eqref{formu_x}. Denote by \begin{equation}\label{define_ta} \tta(\tau,x',\eta)~:=~\bar\ta\big(\tau,\xi(\tau,x',\eta),\eta\big). \end{equation} From \eqref{Jaco1}, it follows \begin{equation}\label{Jaco2} \frac{\mathcal{m}athcal{P}d}{\mathcal{m}athcal{P}d \eta}y\big(\tau,\xi(\tau,x',\eta),\eta\big)~=~\frac{\tta(\tau,x',\eta)}{a(\tau,x',\eta)}, \end{equation} where $a(\tau,x',\eta)$ is defined in \eqref{notation}. Moreover, as $u_d|_{y=0}=0$, from \eqref{pr_char} we have $y=0$ when $\eta=0.$ Therefore, integrating \eqref{Jaco2} along characteristics, we obtain \begin{equation}\label{formu_y} y~=~y\big(\tau,\xi(\tau,x',\eta),\eta\big)~=~\int_0^\eta\frac{\tta(\tau,x',z)}{a(\tau,x',z)}dz. \end{equation} Consequently, when $0\leq\tau \le t^*$ and $\tta>0$, we have that $a>0$ from the definition \eqref{notation}, thus by using \begin{equation*} \mathcal{m}athcal{P}d_\eta y=\frac{\tta(\tau,x',\eta)}{a(\tau,x',\eta)}>0, \end{equation*} the equation \eqref{formu_y} is invertible and gives $\eta=\eta(\tau,x',y)$ with \begin{equation}\label{deri_eta} \eta_y=\frac{a(\tau,x',\eta)}{\tta(\tau,x',\eta)}>0. \end{equation} Also, the domain $\{y>0\}$ is changed as $\{\eta>0\}$ with the boundary $\{y=0\}$, $y\rightarrow+\infty$ respectively, being changed as $\{\eta=0\},$ $\eta\rightarrow+\infty$ respectively. (3) Now, we will derive the formula \eqref{sol_invis} and the problem \eqref{pr_tta} for $\tta(\tau,x',\eta)$. Note that the inverse function of $x'=x'(\tau,\xi,\eta), ~y=y(\tau,\xi,\eta)$ given by \eqref{formu_x} and \eqref{formu_y}, is \[\Big(\xi\big(\tau,x',\eta(\tau,x',y)\big),\eta(\tau,x',y) \Big).\] Thus, combining \eqref{new_fun}, \eqref{formu_u} and \eqref{define_ta} yields that \[ \bu_h(\tau,x',y)=\bu_{h0}\Big(\xi\big(\tau,x',\eta(\tau,x',y)\big),\eta(\tau,x',y)\Big),\quad \ta(\tau,x',y)=\tta\big(\tau,x',\eta(\tau,x',y)\big), \] which implies the formulas of $\bu_h(t,x',y)$ and $\ta(t,x',y)$ given in \eqref{sol_invis}. Denote by \[\tilde y(\tau,x',\eta)~:=~\int_0^\eta\frac{\tta(\tau,x',z)}{a(\tau,x',z)}dz, \] then from \eqref{formu_y} and \eqref{formu_x} we have $y(\tau,\xi,\eta)=\tilde y\big(\tau,\xi+\tau \bu_{h0}(\xi,\eta),\eta\big)$, which yields that \begin{equation}\label{tv} y_\tau(\tau,\xi,\eta)=\mathcal{m}athcal{P}d_\tau\tilde y\big(\tau,\xi+\tau \bu_{h0}(\xi,\eta),\eta\big)+\bu_{h0}(\xi,\eta)\cdot\nabla_h\tilde y\big(\tau,\xi+\tau \bu_{h0}(\xi,\eta),\eta\big). \end{equation} Combining \eqref{pr_char} with \eqref{tv}, we get that \[\begin{split} &u_d\big(\tau,x'(\tau,\xi,\eta),y(\tau,\xi,\eta)\big)=y_\tau(\tau,\xi,\eta)\\ &=\int_0^\eta\mathcal{m}athcal{P}d_\tau(\frac{\tta}{a})\big(\tau,\xi+\tau \bu_{h0}(\xi,\eta),z\big)dz+\int_0^\eta\bu_{h0}(\xi,\eta)\cdot\nabla_h(\frac{\tta}{a})\big(\tau,\xi+\tau \bu_{h0}(\xi,\eta),z\big)dz, \end{split}\] which implies the formula of $u_d(t,x,y)$ given in \eqref{sol_invis} by using that \eqref{formu_x} and \eqref{formu_y}. Next, from \eqref{define_ta} and the relation \eqref{formu_x} we have \[\bar\ta(\tau,\xi,\eta)~=~\tta\Big(\tau,\xi+\tau\bu_{h0}(\xi,\eta),\eta\Big),\] which implies that, \[\begin{split} &\mathcal{m}athcal{P}d_\tau\bar\ta=\mathcal{m}athcal{P}d_\tau\tta+\bu_{h0}\cdot\nabla_{h}\tta, \qquad\nabla_\xi\bar\ta=\big(I_{d-1}+\tau\nabla_\xi\bu_{h0}\big)\cdot\nabla_h\tta,\\ &\mathcal{m}athcal{P}d_\eta\bar\ta=\mathcal{m}athcal{P}d_\eta\tta+\tau\mathcal{m}athcal{P}d_\eta\bu_{h0}\cdot\nabla_h\tta. \end{split}\] Moreover, from \eqref{formu_x} it follows that \[\big(I_{d-1}+\tau\nabla_\xi\bu_{h0}\big)\cdot\xi_\eta+\tau\mathcal{m}athcal{P}d_\eta\bu_{h0}=0,\] thus we obtain that by virtue of \eqref{deri_eta}, \[ \begin{split} \mathcal{m}athcal{P}d_y\bar\ta\big(\tau,\xi(\tau,x',\eta),\eta\big) & = \Big[(\xi_\eta\cdot\nabla_\xi+\mathcal{m}athcal{P}d_\eta)\bar\ta\Big] \big(\tau,\xi(\tau,x',\eta),\eta\big)\cdot\eta_y\\ &=(\mathcal{m}athcal{P}d_\eta\bar\ta-\tau\mathcal{m}athcal{P}d_\eta \bu_{h0}\cdot \nabla_h\tta)\frac{a(\tau,x',\eta)}{\tta(\tau,x',\eta)}\\ &=\mathcal{m}athcal{P}d_\eta\tta(\tau,x',\eta)\cdot \frac{a(\tau,x',\eta)}{\tta(\tau,x',\eta)}. \end{split}\] Therefore, the problem for $\bar\ta$ given in \eqref{eq_new} can be reduced as follows, \[\begin{cases} \mathcal{m}athcal{P}d_\tau\tta+\bu_{h0}\big(\xi(\tau,x',\eta),\eta\big)\cdot\nabla_h\tta=\frac{\ka a(\tau,x',\eta)}{P(\tau)}\mathcal{m}athcal{P}d_\eta\Big(\frac{a(\tau,x',\eta)}{\tta}\mathcal{m}athcal{P}d_\eta\tta\Big)+\frac{\ka P_\tau(\tau)}{P(\tau)}\tta,\quad {\rm in}~[0,t^*)\times{\mathcal{m}athbb{R}}_+^d,\\ \tta|_{\tau=0}=\ta_0(x',\eta). \end{cases}\] Furthermore, from the boundary conditions of $\ta$ given in \eqref{pr_invis}, we get \[\tta|_{\eta=0}=\ta^0(\tau,x'), \quad \lim\limits_{\eta\rightarrow+\infty}\tta(\tau,x',\eta)=\Ta(\tau,x'),\] so we obtain the problem \eqref{pr_tta} for $\tta(\tau,x',\eta).$ Then, by the following Proposition \ref{lemma_ta}, we know that the problem \eqref{pr_tta} admits a unique classical solution in $[0,t_0)\times{\mathcal{m}athbb{R}}^d_+$ for some $0<t_0\leq t^*$. Finally, One can check directly that \eqref{sol_invis}-\eqref{tran_y} defines a smooth solution to the problem \eqref{pr_invis}. \end{proof} \begin{remark} From \eqref{sol_invis} and \eqref{tran_y} with the definition of the function $a(t,x',z) $ given in \eqref{notation}, one can see that there may be a loss of derivatives in the tangential variables $x'$ for the solution of \eqref{pr_invis}, with respect to the regularity of the initial data. \end{remark} \begin{prop}\label{lemma_ta} Under the assumption of Theorem \ref{thm-2-1}, there is a time $0<t_0\leq t^*$ such that the problem \eqref{pr_tta} has a unique classical solution $\tta(t,x',z)$ with bounded derivatives in $[0,t_0)\times{\mathcal{m}athbb{R}}^d_+,$ satisfying that \begin{equation*} C_0^{-1-2\ka}\leq \tta(t,x',z) \leq C_0^{1+2\ka}, \end{equation*} for the constant $C_0$ given in \eqref{ass_ta}. \end{prop} \begin{proof}[\bf{Proof.}] Set $$\mathcal{m}athcal{A}at{\ta}(t,x',z)=\frac{\tta(t,x',z)}{[P(t)]^\kappa}.$$ From the problem \eqref{pr_tta}, we know that $\mathcal{m}athcal{A}at{\ta}(t,x',z)$ satisfies the following initial-boundary value problem, \begin{equation}\label{pr_hta}\begin{cases} \mathcal{m}athcal{P}d_t\mathcal{m}athcal{A}at{\ta}+\bb\cdot\nabla_h\mathcal{m}athcal{A}at{\ta}-\frac{\ka a}{P^{1+\ka}}\mathcal{m}athcal{P}d_z\big(\frac{a}{\mathcal{m}athcal{A}at{\ta}}\mathcal{m}athcal{P}d_z\mathcal{m}athcal{A}at{\ta}\big)=0,\\ \mathcal{m}athcal{A}at{\ta}|_{z=0}=\ta^0(t,x')/[P(t)]^\ka, \quad \lim\limits_{\zinf}\mathcal{m}athcal{A}at{\ta}=\Ta(t,x')/[P(t)]^\ka,\\ \mathcal{m}athcal{A}at{\ta}|_{t=0}=\ta_0(x',z)/[P(0)]^\ka. \end{cases}\end{equation} Introduce an auxiliary function $\vskip 1emarphi(s)$ defined for all $s\in{\mathcal{m}athbb{R}}$, satisfies that $\vskip 1emarphi$ is smooth, $\frac{1}{2}C_0^{-1-\ka}\leq \vskip 1emarphi\leq 2C_0^{1+\ka}$ with the positive constant $C_0$ given in \eqref{ass_ta}, and $\vskip 1emarphi(s)=\frac{1}{s}$ when $s\in\Big[C_0^{-1-\ka},C_0^{1+\ka}\Big]$. Then, corresponding to the problem \eqref{pr_hta}, we consider the following initial-boundary value problem, \begin{equation}\label{pr_aux}\begin{cases} \mathcal{m}athcal{P}d_t\ta+\bb\cdot\nabla_h\ta-\frac{\ka a}{P^{1+\ka}}\mathcal{m}athcal{P}d_z\big(a\vskip 1emarphi(\ta)\mathcal{m}athcal{P}d_z\ta\big)=0,\\ \ta|_{z=0}=\ta^0(t,x')/[P(t)]^\ka, \quad \lim\limits_{\zinf}\ta=\Ta(t,x')/[P(t)]^\ka,\\ \ta|_{t=0}=\ta_0(x',z)/[P(0)]^\ka. \end{cases}\end{equation} Noting that the equation in \eqref{pr_aux} is degenerate parabolic with smooth coefficients, by employing the classical theory of degenerate parabolic equations (cf. \cite{solo}), we conclude there is a classical solution to the problem \eqref{pr_aux} in $[0,t_0)\times{\mathcal{m}athbb{R}}^d_+$ for some $0<t_0\leq t^*.$ Obviously, when the solution $\ta$ to \eqref{pr_aux} satisfies \begin{equation}\label{bound_ta} C_0^{-1-\ka}\leq \ta(t,x',y)\leq C_0^{1+\ka}, \end{equation} the problem \eqref{pr_aux} coincides with the one given in \eqref{pr_hta}. Thus, it suffices to verify \eqref{bound_ta} being true in the following. To prove the lower bound of the solution $\ta(t,x',y)$ given in \eqref{bound_ta}, letting $r_+\triangleq\mathcal{m}ax\{r,0\}$, multiplying the equation of \eqref{pr_aux} by $\big(C_0^{-1-\ka}-\ta\big)_+$ and integrating over ${\mathcal{m}athbb{R}}_+^d$, it gives that by integration by parts, \begin{equation}\label{low-bound} \begin{split} &-\frac{d}{2dt}\int_{{\mathcal{m}athbb{R}}_+^d}\big(C_0^{-1-\ka}-\ta\big)_+^2dx'dz+\int_{{\mathcal{m}athbb{R}}^d_+}\big[(\nabla\cdot \bb)\big(C_0^{-1-\ka}-\ta\big)_+^2\big]dx'dz\\ &=\int_{{\mathcal{m}athbb{R}}^d_+}\frac{\ka}{P^{1+\ka}} a\vskip 1emarphi(\ta)\mathcal{m}athcal{P}d_z\big(C_0^{-1-\ka}-\ta\big)_+\big[a\mathcal{m}athcal{P}d_z\big(C_0^{-1-\ka}-\ta\big)_++a_z\big(C_0^{-1-\ka}-\ta\big)_+\big]dx'dz. \end{split}\end{equation} It is obvious that the right hand side of the above equality satisfies \[\begin{split} &\int_{{\mathcal{m}athbb{R}}^d_+}\frac{\ka}{P^{1+\ka}} a\vskip 1emarphi(\ta)\mathcal{m}athcal{P}d_z\big(C_0^{-1-\ka}-\ta\big)_+\big[a\mathcal{m}athcal{P}d_z\big(C_0^{-1-\ka}-\ta\big)_++a_z\big(C_0^{-1-\ka}-\ta\big)_+\big]dx'dz\\ &\geq-\int_{{\mathcal{m}athbb{R}}^d_+}\frac{\ka\vskip 1emarphi(\ta)}{4P^{1+\ka}} \big[a_z\big(C_0^{-1-\ka}-\ta\big)_+\big]^2dx'dz, \end{split}\] thus, from \eqref{low-bound} we have \[\begin{split} \frac{d}{dt}\int_{{\mathcal{m}athbb{R}}_+^d}\big(C_0^{-1-\ka}-\ta\big)_+^2dx'dz&\leq\int_{{\mathcal{m}athbb{R}}^d_+}\big[2|\nabla\cdot \bb|+\frac{\ka\vskip 1emarphi(\ta)a_z^2}{2P^{1+\ka}}\big]\big(C_0^{-1-\ka}-\ta\big)_+^2dx'dz\\ &\leq C\int_{{\mathcal{m}athbb{R}}^d_+}\big(C_0^{-1-\ka}-\ta\big)_+^2dx'dz, \end{split}\] \iffalse &\geq { \red -\int_{{\mathcal{m}athbb{R}}^d_+}\frac{\ka}{4P^{1+\ka}}\mathcal{m}athcal{P}artial_z(\vskip 1emarphi(\theta)(a^2)_z) \big(C_0^{-1-\ka}-\ta\big)_+^2dx'dz, } \end{split}\] thus, from \eqref{low-bound} we have \[\begin{split} \frac{d}{dt}\int_{{\mathcal{m}athbb{R}}_+^d}\big(C_0^{-1-\ka}-\ta\big)_+^2dx'dz&\leq\int_{{\mathcal{m}athbb{R}}^d_+}\big[2|\nabla\cdot b| { \red +\frac{\ka}{4P^{1+\ka}}\mathcal{m}athcal{P}artial_z(\vskip 1emarphi(\theta)(a^2)_z) } \big]\big(C_0^{-1-\ka}-\ta\big)_+^2dx'dz\\ &\leq C\int_{{\mathcal{m}athbb{R}}^d_+}\big(C_0^{-1-\ka}-\ta\big)_+^2dx'dz, \end{split}\] \fi for a positive constant $C>0$. Applying the Gronwall inequality to the above expression and using that $(C_0^{-1-\ka}-\ta\big)_+|_{t=0}=0$, it yield that \[ \int_{{\mathcal{m}athbb{R}}_+^d}\big(C_0^{-1-\ka}-\ta\big)_+^2(t,x',z)dx'dz=0, \] which implies that \[ \ta(t,x',z)~\geq~C_0^{-1-\ka}, \] and we obtain the lower bound of $\ta$ given in \eqref{bound_ta}. The upper bound of the solution $\ta$ given in \eqref{bound_ta} can be obtained similarly, this gives rise to a classical solution to the problem \eqref{pr_hta}. Thus, the problem \eqref{pr_tta} admits a classical solution $\tta(t,x',z)=[P(t)]^\kappa\mathcal{m}athcal{A}at{\ta}(t,x',z)$, and the estimates \eqref{bd_ta} follows immediately. The uniqueness of the solution to \eqref{pr_tta} can be obtained by a standard comparison argument. \end{proof} \iffalse \subsection{Formation of singularities} In this subsection, we construct a singular solution of the problem \eqref{pr_invis} based on the formation mechanism of singularities for the inviscid Prandtl equations, studied in \cite{H-H} in the two space variable case. For this, we consider a simple case of the problem \eqref{pr_invis} with a uniform outflow, i.e., the functions $P(t)$ and $\Ta(t,x')$ are constants. Precisely, consider the following problem \begin{equation}\label{pr_shear}\begin{cases} \mathcal{m}athcal{P}d_t \bu_h+(\bu_h\cdot\nabla_h+u_d\mathcal{m}athcal{P}d_y)\bu_h=0,\\%~&in ~\Omega_T,\\ \mathcal{m}athcal{P}d_t \ta+(\bu_h\cdot\nabla_h+u_d\mathcal{m}athcal{P}d_y)\ta =\ta \mathcal{m}athcal{P}d_y^2\ta,\\%~&in ~\Omega_T,\\ \nabla_h\cdot\bu_h+\mathcal{m}athcal{P}d_y u_d= \mathcal{m}athcal{P}d_y^2\ta,\\%\big(\bu_\tau\cdot \nabla_hP+ P_t\big),\\%~&in ~\Omega_T,\\ (u_d,\ta)|_{y=0}=\big(0,\ta^0(t,x')\big), \quad\lim\limits_{\yinf}\ta(t,x,y)=1,\\ (\bu_h,\ta)|_{t=0}=(\bu_{h0},\ta_0)(x',y). \end{cases}\end{equation} Then, we have: \begin{prop}\label{prop_singular} Assume that the initial-boundary data of the problem \eqref{pr_shear} is given by \[ (\bu_{h0},\ta_0)(x',y)~=~\Big(\bU\big(y+f_0(x')\big),1\Big),\quad \ta^0(t,x')=1, \] where $\bU(y)=\big(U_1(y),\cdots,U_{d-1}(y)\big)^T$ and $f_0(x')$ are smooth functions. Then, there exists a solution to \eqref{pr_shear} with $\ta(t,x',y)\equiv1$ and \[ \bu_h(t,x',y)=\bU\big(y+f(t,x')\big),\quad u_d(t,x',y)=-f_t(t,x')-\bU\big(y+f(t,x')\big)\cdot\nabla_hf(t,x'), \] where the function $f(t,x')$ is a solution of \[\begin{cases} f_t+\bU(f)\cdot\nabla_hf=0,\\ f(0,x')=f_0(x'). \end{cases}\] Moreover, if \[ t^*~:=~-\Big[\inf\limits_{x'\in{\mathcal{m}athbb{R}}^{d-1}}\bU'\big(f_0(x')\big)\cdot\nabla_hf_0(x')\Big]^{-1}>0, \] then, both of $\nabla_h\bu_h$ and $u_d$ blow up as $t\uparrow t^*$. \end{prop} The proof of the above proposition is similar to the one of Proposition 3.1 in \cite{H-H} for the two-dimensional problem, i.e. $d=2$, so we omit it here. \fi \subsection{Convergence to the inviscid Prandtl equations} In this subsection, we investigate the asymptotic behavior of the classical solution of \eqref{pr_invis} obtained in Theorem \ref{thm-2-1}, as $\ta$ tends to a positive constant. For this, we consider a simple case of the problem \eqref{pr_invis} with a uniform outflow, i.e., the functions $P(t)$ and $\Ta(t,x')$ are constants, and the general case can be studied similarly. Consider the following problem \begin{equation}\label{pr_shear}\begin{cases} \mathcal{m}athcal{P}d_t \bu_h+(\bu_h\cdot\nabla_h+u_d\mathcal{m}athcal{P}d_y)\bu_h=0,\\%~&in ~\Omega_T,\\ \mathcal{m}athcal{P}d_t \ta+(\bu_h\cdot\nabla_h+u_d\mathcal{m}athcal{P}d_y)\ta =\ta \mathcal{m}athcal{P}d_y^2\ta,\\%~&in ~\Omega_T,\\ \nabla_h\cdot\bu_h+\mathcal{m}athcal{P}d_y u_d= \mathcal{m}athcal{P}d_y^2\ta,\\%\big(\bu_\tau\cdot \nabla_hP+ P_t\big),\\%~&in ~\Omega_T,\\ (u_d,\ta)|_{y=0}=\big(0,\ta^0(t,x')\big), \quad\lim\limits_{\yinf}\ta(t,x,y)=1,\\ (\bu_h,\ta)|_{t=0}=(\bu_{h0},\ta_0)(x',y), \end{cases}\end{equation} and assume that \begin{equation}\label{ass_con} \ta^0(t,x')~=~1+\ep\tilde\ta^0(t,x'),\quad \ta_0(x',y)~=~1+\ep\tilde\ta_0(x',y) \end{equation} with $\ep\ll1.$ Then, formally \eqref{pr_shear} tends to the following inviscid Prandtl system as $\ep\to 0$, \begin{equation}\label{pr_con}\begin{cases} \mathcal{m}athcal{P}d_t \bu_h+(\bu_h\cdot\nabla_h+u_d\mathcal{m}athcal{P}d_y)\bu_h=0,\\%~&in ~\Omega_T,\\ \nabla_h\cdot\bu_h+\mathcal{m}athcal{P}d_y u_d= 0,\\ u_d|_{y=0}=0,\quad \bu_h|_{t=0}=\bu_{h0}(x',y). \end{cases}\end{equation} For the above problem \eqref{pr_con}, through analogous arguments as given in Theorem \ref{thm-2-1}, it's not difficult to obtain the following local existence of a classical solution. \begin{prop}\label{prop_inP} Let $\bu_{h0}(x',y)$ be smooth and satisfy the compatibility conditions of the problem \eqref{pr_con} up to order one, and $t^*>0$ be given as in \eqref{ass_ini}. Then, the problem \eqref{pr_con} has a unique classical solution in $[0,t^*)$ given by \begin{equation}\label{sol_invis1}\begin{split} \bu_h(t,x',y)=&\bu_{h0}\Big(\xi_1\big(t,x',\eta_1(t,x',y)\big), \eta_1(t,x',y)\Big),\\ u_d(t,x',y)= &\int_0^{\eta_1(t,x',y)}\mathcal{m}athcal{P}d_t(\frac{1}{a_1})(t,x',z)dz+ \int_0^{\eta_1(t,x',y)}\Big[\bb_1\big(t,x',\eta_1(t,x',y)\big) \cdot\nabla_h(\frac{1}{a_1})(t,x',z)\Big]dz, \end{split}\end{equation} where, the vector function $\xi_1(t,x',z)\in{\mathcal{m}athbb{R}}^{d-1}$ is determined by the equation \begin{equation}\label{tran_x1}\begin{split} &x'~=~\xi_1+t\bu_{h0}(\xi_1,z);\\ \end{split}\end{equation} the functions $a_1(t,x',z)$ and $\bb_1(t,x',z)$ are given as \begin{equation}\label{notation1}\begin{cases} a_1(t,x',z)~:=~ det(I_{d-1}+t\nabla_h\bu_{h0})\big(\xi_1(t,x',z),z\big),\\ \bb_1(t,x',z)~:=~\bu_{h0}\big(\xi_1(t,x',z),z\big); \end{cases}\end{equation} and $\eta_1(t,x',y)$ is determined by the relation \begin{equation}\label{tran_y1} y~=~\int_0^{\eta_1}\frac{1}{a_1(t,x',z)}dz. \end{equation} \end{prop} Now, we show that the solution of \eqref{pr_shear} given in Theorem \ref{thm-2-1} converges to $(\bu_h,u_d,1)$ when $\epsilon\to 0$, where $(\bu_h,u_d)$ is the solution of \eqref{pr_con} given in Proposition \ref{prop_inP}. \begin{theorem}\label{prop_con} Assume that the initial data of the problem \eqref{pr_shear} are smooth, and satisfy \eqref{ass_ini} and the compatibility conditions of \eqref{pr_shear} up to order one. Moreover, they have the special form \eqref{ass_con}, with $\tilde{\ta}_0(x',y)$ satisfying \begin{equation}\label{ass_tta} (1+y)^k\tilde{\ta}_0(x',y)~\in~H^2_{x'}\big({\mathcal{m}athbb{R}}^{d-1},H^1_y({\mathcal{m}athbb{R}}_+)\big) \end{equation} for some constant $k>\frac{1}{2}$. Let $(\bu_h,u_d,\ta)(t,x',y)$ ($0\le t< t_0\le t^*$) and $(\bu_{h1},u_{d1})(t,x',y)$ ($0\le t< t^*$) be the solutions of the problems \eqref{pr_shear} and \eqref{pr_con} given in Theorem \ref{thm-2-1} and Proposition \ref{prop_inP} respectively. Then, for sufficiently small $\ep$ there is a constant $C>0$ independent of $\ep,$ such that for all $(t,x',y)\in[0,t_0)\times{\mathcal{m}athbb{R}}^d_+$, \begin{equation}\label{est_con} \big|(\bu_h,u_d,\ta)(t,x',y)-(\bu_{h1},u_{d1},1)(t,x',y)\big|~\leq~C\ep. \end{equation} \end{theorem} \begin{proof}[\bf{Proof.}] (1) Setting the solution $\ta$ of the problem \eqref{pr_shear} having the form \begin{equation}\label{ta} \ta(t,x',y)~=~1+\ep\tilde{\ta}\big(t,x',\eta(t,x',y)\big), \end{equation} then, from \eqref{pr_tta} we know that $\tilde{\ta}(t,x',z)$ satisfies the following problem in $\{0\leq t<t_0\le t^*,x'\in{\mathcal{m}athbb{R}}^{d-1},z>0\}$, \begin{equation}\label{tta}\begin{cases} \mathcal{m}athcal{P}d_t\tta+\bb\cdot\nabla_h\tta- a\mathcal{m}athcal{P}d_z\big(\frac{a}{1+\ep\tta}\mathcal{m}athcal{P}d_z\tta\big)=0,\\ \tta|_{z=0}=\tilde\ta^0(t,x'), \quad \lim\limits_{\zinf}\tta=0,\\ \tta|_{t=0}=\tilde\ta_0(x',z). \end{cases}\end{equation} Through similar arguments as given in the proof of Proposition \ref{lemma_ta}, we have the local existence of a classical solution to \eqref{tta}. Moreover, under the assumption \eqref{ass_tta}, by the standard energy method it's not difficult to obtain that there is a constant $C_1>0$ independent of $\ep,$ such that \begin{equation}\label{bound_tta} \|(1+z)^k\tta\|_{L^\infty(0, t_0;H^2_{x'}({\mathcal{m}athbb{R}}^{d-1}, H^1_z({\mathcal{m}athbb{R}}_+)))}~\leq ~C_1, \end{equation} which implies the assertion \eqref{est_con} for the $\ta$ component by using the Sobolev embedding inequality. (2) Comparing Theorem \ref{thm-2-1} with Proposition \ref{prop_inP}, we know that the auxiliary function $\xi(t,x',z)$ given by \eqref{2.1} coincides with $\xi_1(t,x',z)$ given by \eqref{tran_x1}, which implies that by combining \eqref{notation} with \eqref{notation1}, \begin{equation}\label{b} a(t,x',z)~=~a_1(t,x',z)\big[1+\ep\tilde{\ta}_0\big(\xi(t,x',z),z\big)\big],\quad \bb(t,x',z)~=~\bb_1(t,x',z). \end{equation} Also, from \eqref{tran_y} and \eqref{tran_y1} we have \begin{equation*} \int_{0}^{\eta(t,x',z)}\frac{1+\ep\tilde{\ta}(t,x',z)}{a(t,x',z)}dz~=~\int_{0}^{\eta_1(t,x',z)}\frac{1}{a_1(t,x',z)}dz, \end{equation*} which implies that by \eqref{b}, \begin{equation}\label{est_eta} \ep\int_{0}^{\eta(t,x',z)}\frac{\tilde{\ta}(t,x',z)-\tilde{\ta}_0\big(\xi(t,x',z),z\big)}{a(t,x',z)}dz~=~\int_{\eta(t,x',z)}^{\eta_1(t,x',z)}\frac{1}{a_1(t,x',z)}dz. \end{equation} Note that both $a$ and $a_1$ are bounded and have positive lower bounds, that is, there is a constant $C_2$ independent of $\ep$ such that \[ C_2^{-1}~\leq~a(t,x',z),~a_1(t,x',z)~\leq ~C_2,\quad (t,x',z)\in [0,t_0)\times{\mathcal{m}athbb{R}}^d_+,\] then the right-hand side of \eqref{est_eta} gives that \begin{equation}\label{est_rh} \Big|\int_{\eta(t,x',z)}^{\eta_1(t,x',z)}\frac{1}{a_1(t,x',z)}dz\Big| ~\geq~\frac{|\eta(t,x',z)-\eta_1(t,x',z)|}{C_2}. \end{equation} On the other hand, we have that for the left-hand side term of \eqref{est_eta}, \begin{equation*}\begin{split} &\Big|\int_{0}^{\eta(t,x',z)}\frac{\tilde{\ta}(t,x',z)-\tilde{\ta}_0\big(\xi(t,x',z),z\big)}{a(t,x',z)}dz\Big|\\ &\leq C_2\big(\|\tilde{\ta}(t,x',z)\|_{L^1_z}+\|\tilde{\ta}_0\big(\xi(t,x',z),z\big)\|_{L^1_z}\big)\\ &\leq C_3\Big(\|(1+z)^k \tilde{\ta}(t,x',z)\|_{L^2_z}+\|(1+z)^k\tilde{\ta}_0\big(\xi(t,x',z),z\big)\|_{L^2_z}\Big). \end{split}\end{equation*} Note that for $t\in[0,t_0)$ and $x'\in{\mathcal{m}athbb{R}}^{d-1}$, \[|\tilde{\ta}(t,x',z)|\leq\|\tilde{\ta}(t,x',z)\|_{H^2_{x'}},\] and \[|\tilde{\ta}_0\big(\xi(t,x',z),z\big)|\leq\|\tilde{\ta}_0\big(\xi(t,x',z),z\big)\|_{H^2_{x'}}\leq C_4\|\tilde{\ta}_0(x',z)\|_{H^2_{x'}},\] where we use that $\xi(t,x',z)$ has bounded derivatives up to order two. From the above three inequalities we obtain that \begin{equation}\label{est_lh} \Big|\int_{0}^{\eta(t,x',z)}\frac{\tilde{\ta}(t,x',z)-\tilde{\ta}_0\big(\xi(t,x',z),z\big)}{a(t,x',z)}dz\Big|\leq C_5\|(1+z)^k\tilde{\ta}\|_{L^\infty_t(H^2_{x'}L^2_z)} \end{equation} for some constant $C_5>0$ independent of $\ep.$ Plugging \eqref{est_rh} and \eqref{est_lh} into \eqref{est_eta}, it follows that \begin{equation}\label{con_eta} |\eta(t,x',z)-\eta_1(t,x',z)|~\leq~C_2C_5\ep~ \|(1+z)^k\tilde{\ta}\| _{L^\infty(0, t_0; H^2_{x'}({\mathcal{m}athbb{R}}^{d-1}, L^2_z({\mathcal{m}athbb{R}}_+)))} \end{equation} for all $(t,x',z)\in[0,t_0)\times{\mathcal{m}athbb{R}}^d_+$. (4) Now we prove the estimate \eqref{est_con} for the components $\bu_h$ and $u_d$. Since $\xi(t,x',z)=\xi_1(t,x',z)$, it follows that from the formulas of $\bu_h$ and $\bu_{h1}$ given by \eqref{sol_invis} and \eqref{sol_invis1} respectively, \begin{equation}\label{con_u}\begin{split} &|\bu_h(t,x',y)-\bu_{h1}(t,x',y)|\\ &\leq\|\xi_z(t,x',z)\cdot\nabla_h\bu_{h0}\big(\xi(t,x',z),z\big) +\mathcal{m}athcal{P}d_y\bu_{h0}\big(\xi(t,x',z),z\big)\|_ {L^\infty}\cdot|\eta-\eta_1|(t,x',y), \end{split} \end{equation} which implies that by using \eqref{con_eta}, \begin{equation} |\bu_h(t,x',y)-\bu_{h1}(t,x',y)|~\leq~C_6~\ep, \end{equation} for some constant $C_6>0$ independent of $\ep$. Similarly, we can show \eqref{est_con} for the component $u_d$. \end{proof} \section{Linearized problems of thermal layer equations at a shear flow} In this section, we study the well-posedness and long-time asymptotic behavior of the linearized problem of \eqref{pr_shear} at a shear flow. It is easy to know that under proper initial and boundary data, \eqref{pr_shear} has a shear flow solution: \begin{equation}\label{shear} (\bu_h,u_d,\ta)(t,x',y)~=~\Big(\bU_h(y),0,1\Big) \end{equation} with $\bU_h(y)=\big(U_1,\cdots,U_{d-1}\big)^T(y)$. Then, the linearized problem of \eqref{pr_shear} at the shear flow \eqref{shear} is given as \begin{equation}\label{pr_linear} \begin{cases} &\mathcal{m}athcal{P}d_t\bu_h+\bU_h(y)\cdot\nabla_h\bu_h+\bU_h'(y)u_d=0,\\ &\mathcal{m}athcal{P}d_t\ta+\bU_h(y)\cdot\nabla_h\ta=\mathcal{m}athcal{P}d_y^2\ta,\\ &\nabla_h\cdot\bu_h+\mathcal{m}athcal{P}d_yu_d=\mathcal{m}athcal{P}d_y^2\ta, \\ &(u_d,\ta)|_{y=0}=0,\\%\quad\liy\ta(t,x',y)=0,\\ & (\bu_h,\ta)|_{t=0}=(\bu_{h0},\ta_0)(x',y). \end{cases} \end{equation} We observe that the problem \eqref{pr_linear} shall be solved by the following two steps. Firstly, we determine $\ta(t,x',y)$ by solving the linear initial-boundary value problem in $\{t>0, x'\in {\mathcal{m}athbb{R}}^{d-1}, y>0\}$: \begin{equation}\label{pr_ta} \begin{cases} &\mathcal{m}athcal{P}d_t\ta+\bU_h(y)\cdot\nabla_h\ta=\mathcal{m}athcal{P}d_y^2\ta,\\ &\ta|_{y=0}=0,\quad\ta|_{t=0}=\ta_0(x',y). \end{cases} \end{equation} Then, $(\bu_h(t,x',y), u_d(t,x',y))$ are obtained by studying the following problem for the linearized inviscid Prandtl type equations: \begin{equation}\label{pr_u} \begin{cases} &\mathcal{m}athcal{P}d_t\bu_h+\bU_h(y)\cdot\nabla_h\bu_h+\bU_h'(y)u_d=0,\\ &\nabla_h\cdot\bu_h+\mathcal{m}athcal{P}d_yu_d=\mathcal{m}athcal{P}d_y^2\ta, \\ &u_d|_{y=0}=0,\quad \bu_h|_{t=0}=\bu_{h0}(x',y). \end{cases} \end{equation} Moreover, it's easy to know that the problem \eqref{pr_ta} with smooth and compatible initial data has a global classical solution and the solution is unique. \subsection{ Explicit representations of solutions} Based on the above discussion, we have the following result for the problem \eqref{pr_linear}. \begin{prop}\label{prop_linear} Assume that $\bU_h(y),\bu_{h0}(x',y)$ and $\ta_0(x',y)$ are smooth, and satisfy the compatibility conditions of \eqref{pr_linear} up to order one. Then, there exists a classical solution $(\bu_h,u_d,\ta)(t,x',y)$ to the problem \eqref{pr_linear}, where $\ta(t,x',y)$ is solved from the problem \eqref{pr_ta}, and $\bu_h(t,x',y), ~u_d(t,x',y)$ are given explicitly as \begin{equation}\label{formu-u} \begin{split} \bu_h(t,x',y)= & \bu_{h0}\big(x'-t\bU_h(y),y\big)+t\bU_h'(y)\int_{0}^{y}(\nabla_h\cdot \bu_{h0})\big(x'-t\bU_h(z),z\big)dz \\ & +\bU_h'(y)\int_{0}^{y}\ta_0\big(x'-t\bU_h(z),z\big)dz-\bU_h'(y)\int_{0}^{y}\ta\big(t,x',z\big)dz,\\ u_d(t,x',y) = &\ta_y(t,x',y)-\ta_y(t,x',0) -\int_{0}^{y}\Big\{(\nabla_h\cdot\bu_{h0})\big(x'-t\bU_h(z),z\big)dz\\ &-t\int_{0}^{y}\big[\bU_h(y)-\bU_h(z)\big]\cdot\nabla_h(\nabla_h\cdot\bu_{h0})\big(x'-t\bU_h(z),z\big)\Big\}dz\\ & -\int_{0}^{y}\Big\{\big[\bU_h(y)-\bU_h(z)\big]\cdot\nabla_h\ta_0\big(x'-t\bU_h(z),z\big)\Big\}dz\\%+\ta_y(t,x',y)\\ &+\int_{0}^{y}\Big\{\big[\bU_h(y)-\bU_h(z)\big]\cdot\nabla_h\ta\big(t,x',z\big)\Big\}dz. \end{split} \end{equation} \end{prop} \begin{proof}[\bf{Proof.}] According to the arguments given before this proposition, we only need to derive the representations \eqref{formu-u} of $(\bu_h,u_d)(t,x',y)$. Denote by $\tilde f(s,\xi,y)$ the Fourier-Laplace transform of a function $f(t,x',y)$ for $t>0$ and $x'\in {\mathcal{m}athbb{R}}^{d-1}$, \begin{equation}\label{Four_Lap} \tilde f(s,\xi,y)~:=~\int_{0}^{+\infty}\int_{{\mathcal{m}athbb{R}}^{d-1}}f(t,x',y)e^{-st-i\xi\cdot x'}dx'dt \end{equation} with ${\rm Re}~s>0$ and $\xi\in{\mathcal{m}athbb{R}}^{d-1}$. Applying the Fourier-Laplace transform to the problem \eqref{pr_linear} yields that \begin{equation}\label{pr_FL} \begin{cases} \big[s+i\xi\cdot\bU_h(y)\big]~\widetilde{\bu_h}-\widehat{\bu_{h0}}+\widetilde{u_d}~\bU_h'(y)=0,\\ \big[s+i\xi\cdot\bU_h(y)\big]~\tilde{\ta}-\widehat{\ta_{0}}-\mathcal{m}athcal{P}d_y^2\tilde{\ta}=0,\\ i\xi\cdot\widetilde{\bu_h}+\mathcal{m}athcal{P}d_y\widetilde{u_d}-\mathcal{m}athcal{P}d_y^2\tilde{\ta}=0,\\ \widetilde{u_d}(s,\xi,0)=\tilde{\ta}(s,\xi,0)=0, \end{cases} \end{equation} where $\widehat{\bu_{h0}}=\widehat{\bu_{h0}}(\xi,y)$ and $\widehat{\ta_0}=\widehat{\ta_0}(\xi,y)$ are the Fourier transform of the initial data $\bu_{h0}(x',y)$ and $\ta_0(x',y)$ with respect to $x',$ respectively. From the first and third equations of \eqref{pr_FL} we have \[ \big[s+i\xi\cdot\bU_h(y)\big]~\mathcal{m}athcal{P}d_y\widetilde{u_d}-\big[i\xi\cdot\bU'_h(y)\big]~\widetilde{u_{d}}= \big[s+i\xi\cdot\bU_h(y)\big]\mathcal{m}athcal{P}d_y^2\tilde{\ta}-i\xi\cdot\widehat{\bu_{h0}}. \] Solving this equation with the boundary condition $\widetilde{u_d}(s,\xi,0)=0,$ it follows that \begin{equation}\label{FL_v}\begin{split} \widetilde{u_d}(s,\xi,y)~&=~\int_0^y\frac{s+i\xi\cdot \bU_h(y)}{s+i\xi\cdot\bU_h(z)}\mathcal{m}athcal{P}d_y^2\tilde{\ta}(s,\xi,z)dz -\int_{0}^{y}\frac{s+i\xi\cdot\bU_h(y)}{\big[s+i\xi\cdot \bU_h(z)\big]^2}~\big[i\xi\cdot\widehat{\bu_{h0}}(\xi,z) \big]dz, \end{split}\end{equation} which implies \begin{equation}\label{FL_v1}\begin{split} \widetilde{u_d}(s,\xi,y) ~&=~\mathcal{m}athcal{P}d_y\tilde{\ta}(s,\xi,y)-\mathcal{m}athcal{P}d_y\tilde{\ta}(s,\xi,0)+\int_{0}^{y}\frac{i\xi\cdot\big[\bU_h(y)-\bU_h(z)\big]}{s+i\xi\cdot\bU_h(z)}\mathcal{m}athcal{P}d_y^2\tilde{\ta}(s,\xi,z)dz\\ &\quad -\int_{0}^{y}\Big\{\frac{1}{s+i\xi\cdot\bU_h(z)}+\frac{i\xi\cdot\big[\bU_h(y)-\bU_h(z)\big]}{\big[s+i\xi\cdot\bU_h(z)\big]^2}\Big\}~\big[i\xi\cdot\widehat{\bu_{h0}}(\xi,z)\big]dz\\ ~&=~\mathcal{m}athcal{P}d_y\tilde{\ta}(s,\xi,y)-\mathcal{m}athcal{P}d_y\tilde{\ta}(s,\xi,0)+\int_{0}^{y}\big[i\xi\cdot\big(\bU_h(y)-\bU_h(z)\big)\big]\cdot\big[\tilde{\ta}(s,\xi,z)-\frac{\widehat{\ta_0}(\xi,z)}{s+i\xi\cdot\bU_h(z)}\big]dz\\ &\quad -\int_{0}^{y}\Big\{\frac{1}{s+i\xi\cdot\bU_h(z)}+\frac{i\xi\cdot\big(\bU_h(y)-\bU_h(z)\big)}{\big[s+i\xi\cdot\bU_h(z)\big]^2}\Big\}~\big[i\xi\cdot\widehat{\bu_{h0}}(\xi,z)\big]dz, \end{split} \end{equation} by using the second equation given in \eqref{pr_FL}. Then, inverting the Fourier-Laplace transform in \eqref{FL_v1} we obtain the expression of $u_d(t,x',y)$ given in \eqref{formu-u}. Plugging the relation \eqref{FL_v} into the first equation of \eqref{pr_FL} and using the second equation of \eqref{pr_FL}, we get \begin{equation}\label{FL_u} \widetilde{\bu_h}(s,\xi,y)~=~\frac{\widehat{\bu_{h0}}(\xi,y)}{s+i\xi\cdot\bU_h(y)}+\Big\{\int_{0}^{y}\frac{i\xi\cdot\widehat{\bu_{h0}}(\xi,z)}{\big[s+i\xi\cdot\bU_h(z)\big]^2}dz+\int_{0}^{y}\frac{\widehat{\ta_0}(\xi,z)}{s+i\xi\cdot\bU_h(z)}dz-\int_{0}^{y}\tilde{\ta}(s,\xi,z)dz\Big\}~\bU_h'(y). \end{equation} Then, by inverting the Fourier-Laplace transform in this equality we deduce the expression of $\bu_h(t,x',y)$ given in \eqref{formu-u} immediately. \iffalse (1) From the problem \eqref{pr_linear}, we know that $\mathcal{m}athcal{P}d_yu_d$ satisfies the equation \begin{equation}\label{eq_w} \big(\mathcal{m}athcal{P}d_t+\bU_h(y)\cdot\nabla_h\big)(\mathcal{m}athcal{P}d_yu_d-\mathcal{m}athcal{P}d_y^2\ta)-\big(\bU_h'(y)\cdot\nabla_h\big)u_d=0, \end{equation} and the initial data \begin{equation}\label{ini_w} \mathcal{m}athcal{P}d_yu_d(0,x',y)~=~\mathcal{m}athcal{P}d_y^2\ta_0(x',y)-(\nabla_h\cdot\bu_{h0})(x',y). \end{equation} Let \begin{equation}\label{tran_lag} \tau=t,\quad \xi=x'-t\bU_h(y),\quad\eta=y \end{equation} with $\xi=(\xi_1,\cdots,\xi_{d-1})\in{\mathcal{m}athbb{R}}^{d-1}$. Obviously, we have \begin{equation}\label{pd} \mathcal{m}athcal{P}d_\tau=\mathcal{m}athcal{P}d_t+\bU_h(y)\cdot\nabla_h,\quad\nabla_\xi=\nabla_h,\quad\mathcal{m}athcal{P}d_\eta=\mathcal{m}athcal{P}d_y+t\bU_h'(y)\cdot\nabla_h. \end{equation} Under the transformation \eqref{tran_lag}, the first two equations of \eqref{pr_linear} and the equation \eqref{eq_w} are { \red transformed into } \begin{equation}\label{pr_lag} \begin{cases} & \mathcal{m}athcal{P}d_\tau\bu_h+\bU_h'(\eta)u_d=0, \\ & \mathcal{m}athcal{P}d_\tau\ta=(\mathcal{m}athcal{P}d_y^2\ta)\big(\tau,\xi+\tau\bU_h(\eta),\eta\big),\\ & \mathcal{m}athcal{P}d_\tau(\mathcal{m}athcal{P}d_yu_d-\mathcal{m}athcal{P}d_y^2\ta)-\bU_h'(\eta)\cdot\nabla_\xi u_d=0, \end{cases} \end{equation} where $\nabla_\xi=(\mathcal{m}athcal{P}d_{\xi_1},\cdots,\mathcal{m}athcal{P}d_{\xi_{d-1}})^T.$ Moreover, we have the initial data: \begin{equation}\label{ini_lag} (\bu_h,\ta,\mathcal{m}athcal{P}d_yu_d)(0,\xi,\eta)~=~(\bu_{h0},\ta_0,\mathcal{m}athcal{P}d_y^2\ta_0-\nabla_h\cdot\bu_{h0})(\xi,\eta). \end{equation} Similar to \cite{H-H}, let the Lagrangian stream function $\Psi$ satisfy \begin{equation}\label{Psi} \Psi_\eta~=~\mathcal{m}athcal{P}d_yu_d,\quad \Psi|_{\eta=0}=0. \end{equation} From \eqref{ini_lag}, we have \begin{equation}\label{ini_Psi} \Psi_\eta|_{\tau=0}~=~(\mathcal{m}athcal{P}d_y^2\ta_0-\nabla_h\cdot\bu_{h0})(\xi,\eta). \end{equation} Combining \eqref{Psi} with the third equation of \eqref{pr_lag}, and using the second equation of \eqref{pr_lag} we have \begin{equation}\label{eq_Psi} \mathcal{m}athcal{P}d_\tau\Psi_\eta-\bU_h'(\eta)\cdot\nabla_\xi u_d=\mathcal{m}athcal{P}d_\tau^2\ta, \end{equation} which implies that by virtue of \eqref{tran_lag}, \begin{equation}\label{eq_Psi1} \mathcal{m}athcal{P}d_\tau(\tau \Psi_\eta)=\mathcal{m}athcal{P}d_\eta u_d+\tau\mathcal{m}athcal{P}d_\tau^2\ta,\quad{\mathcal{m}box or}\quad \mathcal{m}athcal{P}d_\eta u_d=\mathcal{m}athcal{P}d_\eta(\tau\Psi)_\tau-\tau\mathcal{m}athcal{P}d_\tau^2\ta. \end{equation} Integrating \eqref{eq_Psi1} in $\eta$ and using the boundary values of $u_d$ and $\Psi$, it yields that \begin{equation}\label{eq_Psi2}\begin{split} u_d~&=~(\tau\Psi)_\tau-\tau\mathcal{m}athcal{P}d_\tau^2\big(\int_{0}^{\eta}\ta\big(\tau,\xi+\tau\bU_h(\zeta),\zeta\big) d\zeta\big)\\ ~&=~\mathcal{m}athcal{P}d_\tau\Big(\tau\Psi-\tau\mathcal{m}athcal{P}d_\tau\int_{0}^{\eta}\ta\big(\tau,\xi+\tau\bU_h(\zeta),\zeta\big) d\zeta+\int_{0}^{\eta}\ta\big(\tau,\xi+\tau\bU_h(\zeta),\zeta\big)d\zeta\Big). \end{split}\end{equation} Then, substituting the expression \eqref{eq_Psi2} of $u_d$ into the equation of $\bu_h$ given in \eqref{pr_lag}, and combining with the initial data \eqref{ini_lag}, we have \begin{equation}\label{for_u}\begin{split} \bu_h=\bu_{h0}-\bU_h'(\eta)\Big[&\tau\Psi-\tau\mathcal{m}athcal{P}d_\tau\int_{0}^{\eta}\ta\big(\tau,\xi+\tau\bU_h(\zeta),\zeta\big) d\zeta+\int_{0}^{\eta}\ta\big(\tau,\xi+\tau\bU_h(\zeta),\zeta\big)d\zeta- \int_0^\eta\ta_0(\xi,\zeta)d\zeta\Big]. \end{split}\end{equation} (2) Next, we study the terms of $\Psi$ given on the right hand sides of \eqref{eq_Psi2} and \eqref{for_u}. Plugging the expression \eqref{eq_Psi2} of $u_d$ into \eqref{eq_Psi}, it follows \begin{equation*}\begin{split} &\quad\mathcal{m}athcal{P}d_\tau\Big(\Psi_\eta-\tau\bU_h'(\eta)\cdot\nabla_\xi\Psi\Big)\\ &=\mathcal{m}athcal{P}d_\tau^2\ta-\mathcal{m}athcal{P}d_\tau\Big(\tau\bU_h'(\eta)\cdot\nabla_\xi\big(\mathcal{m}athcal{P}d_\tau\int_{0}^{\eta}\ta\big(\tau,\xi+\tau\bU_h(\zeta),\zeta\big) d\zeta\big)\Big)+\bU_h'(\eta)\cdot\nabla_\xi\Big(\mathcal{m}athcal{P}d_\tau\int_{0}^{\eta}\ta\big(\tau,\xi+\tau\bU_h(\zeta),\zeta\big) d\zeta\Big)\\ &=\mathcal{m}athcal{P}d_\tau\Big[\mathcal{m}athcal{P}d_\eta\Big(\mathcal{m}athcal{P}d_\tau\int_{0}^{\eta}\ta\big(\tau,\xi+\tau\bU_h(\zeta),\zeta\big) d\zeta\Big)-\tau\bU_h'(\eta)\cdot\nabla_\xi\Big(\mathcal{m}athcal{P}d_\tau\int_{0}^{\eta}\ta\big(\tau,\xi+\tau\bU_h(\zeta),\zeta\big) d\zeta\Big)\Big]\\ &\qquad+\mathcal{m}athcal{P}d_\tau\Big[\bU_h'(\eta)\cdot\nabla_\xi\Big(\int_{0}^{\eta}\ta\big(\tau,\xi+\tau\bU_h(\zeta),\zeta\big) d\zeta\Big)\Big], \end{split}\end{equation*} which implies that by virtue of\eqref{pd}, \begin{equation}\label{eq_Psi3} \mathcal{m}athcal{P}d_\tau\Big(\Psi-\mathcal{m}athcal{P}d_\tau\int_{0}^{\eta}\ta\big(\tau,\xi+\tau\bU_h(\zeta),\zeta\big) d\zeta\Big)_y=\mathcal{m}athcal{P}d_\tau\Big[\bU_h'(\eta)\cdot\nabla_\xi\Big(\int_{0}^{\eta}\ta\big(\tau,\xi+\tau\bU_h(\zeta),\zeta\big) d\zeta\Big)\Big]. \end{equation} Integrating this equation with respect to $\tau$, using the initial data \eqref{ini_Psi} and noting $\mathcal{m}athcal{P}artial_\eta=\mathcal{m}athcal{P}artial_y$ at $t=0$, we have \begin{equation*}\begin{split} &\quad\Big(\Psi-\mathcal{m}athcal{P}d_\tau\int_{0}^{\eta}\ta\big(\tau,\xi+\tau\bU_h(\zeta),\zeta\big) d\zeta\Big)_y\\ &=-(\nabla_h\cdot\bu_{h0})(\xi,\eta)-\bU_h'(\eta)\cdot\nabla_\xi\big(\int_{0}^{\eta}\ta_0(\xi,\zeta) d\zeta\big)+\bU_h'(\eta)\cdot\nabla_\xi\Big(\int_{0}^{\eta}\ta\big(\tau,\xi+\tau\bU_h(\zeta),\zeta\big) d\zeta\Big)\\ &=-(\nabla_h\cdot\bu_{h0})\big(x'-t\bU_h(y),y\big)-\bU_h'(y)\cdot\nabla_h\Big(\int_0^y\ta_0\big(x'-t\bU_h(y),z\big)dz\Big)\\ &\quad+\bU_h'(y)\cdot\nabla_h\Big(\int_0^y\ta\big(t,x'-t\bU_h(y)+t\bU_h(z),z\big)dz\Big). \end{split}\end{equation*} Then, integrating the above quality in the variable $y$ and using the boundary condition $\Psi|_{y=0}=0$, it yields that \[\begin{split} &\quad\Psi-\mathcal{m}athcal{P}d_\tau\int_{0}^{\eta}\ta\big(\tau,\xi+\tau\bU_h(\zeta),\zeta\big) d\zeta\\ &=-\int_0^y(\nabla_h\cdot\bu_{h0})\big(x'-t\bU_h(z),z\big)dz-\int_0^y\int_0^{\tilde y}\Big(\bU_h'(\tilde y)\cdot\nabla_h\ta_0\big(x'-t\bU_h(\tilde y),z\big)\Big)dzd\tilde y\\ &\quad+\int_0^y\int_0^{\tilde y}\Big(\bU_h'(\tilde y)\cdot\nabla_h\ta\big(t,x'-t\bU_h(\tilde y)+t\bU_h(z),z\big)\Big)dzd\tilde y, \end{split}\] which implies that \begin{equation}\label{ex_Psi}\begin{split} &\quad \tau\Big(\Psi-\mathcal{m}athcal{P}d_\tau\int_{0}^{\eta}\ta\big(\tau,\xi+\tau\bU_h(\zeta),\zeta\big) d\zeta\Big)+\int_{0}^{\eta}\ta\big(\tau,\xi+\tau\bU_h(\zeta),\zeta\big)d\zeta\\ &=-t\int_0^y(\nabla_h\cdot\bu_{h0})\big(x'-t\bU_h(z),z\big)dz +\int_0^y\ta_0\big(x'-t\bU_h(y),z\big)dz-\int_0^y\ta_0\big(x'-t\bU_h(z),z\big)dz +\int_0^y\ta\big(t,x',z\big)dz. \end{split}\end{equation} Substituting \eqref{ex_Psi} into \eqref{eq_Psi2} gives \begin{equation}\label{ex_w}\begin{split} u_d(t,x',y) &=-\int_0^y\Big[\nabla_h\cdot\bu_{h0}+\big(\bU_h(y)-\bU_h(z)\big)\cdot\nabla_h\big(t\nabla_h\cdot\bu_{h0}+\ta_0\big)\Big]\big(x'-t\bU_h(z),z\big)dz \\ &\quad+\int_0^y\big(\ta_t+\bU_h(y)\cdot\nabla_h\ta\big)(t,x',z)dz\\ &=-\int_0^y\Big[\nabla_h\cdot\bu_{h0}+\big(\bU_h(y)-\bU_h(z)\big)\cdot\nabla_h\big(t\nabla_h\cdot\bu_{h0}+\ta_0\big)\Big]\big(x'-t\bU_h(z),z\big)dz \\ &\quad+\int_0^y\big[\big(\bU_h(y)-\bU_h(z)\big)\cdot\nabla_h\ta\big](t,x',z)dz+\ta_y(t,x',y)-\ta(t,x',0), \end{split}\end{equation} where we use the equation of $\ta$ given in \eqref{pr_linear} and the boundary condition $u_d|_{y=0}=0.$ Finally, plugging \eqref{ex_Psi} into \eqref{for_u}, it follows that \begin{equation}\label{ex_u}\begin{split} \bu_h(t,x',y)=&\bu_{h0}\big(x'-t\bU_h(y),y\big)+\bU_h'(y)\int_0^y\big[t\nabla_h\cdot\bu_{h0}+\ta_0\big]\big(x'-t\bU_h(z),z\big)dz-\bU_h'(y)\int_0^y\ta(t,x',z)dz. \end{split}\end{equation} Thus, we obtain the proof of this proposition. \fi \end{proof} \begin{remark} (1) One can also obtain the expression \eqref{formu-u} by solving the problem \eqref{pr_u} through the method of characteristics as introduced in \cite{H-H}. (2) From the expression of $\bu_h(t,x',y)$ given in \eqref{formu-u}, we know that when the initial data $\bu_{h0}(x',y)$ decays faster than the background shear flow $\bU_h(y)$ as $y\to +\infty$, the decay rate of the solution $\bu_h(t,x',y)$ of the linearized problem \eqref{pr_linear} is mainly dominated by that of $\bU_h'(y)$ when $y\rightarrow+\infty$. (2) The representation \eqref{formu-u} given in Proposition \ref{prop_linear} shows that in general, there is a loss of derivatives with respect to the tangential variables $x'$ for the solution $(\bu_h, u_d)(t,x',y)$ of the problem \eqref{pr_linear}. \end{remark} From the expression \eqref{formu-u}, we divide the solution $(\bu_h,u_d)(t,x',y)$ into two parts: \begin{equation}\label{decom} (\bu_h, u_d)(t,x',y)~:=~(\tilde\bu_h, \tilde u_d)(t,x',y)+(\bar\bu_h, \bar u_d)(t,x',y), \end{equation} where \begin{equation}\label{formu-u1}\begin{cases} \tilde\bu_h(t,x',y)= & \bu_{h0}\big(x'-t\bU_h(y),y\big)+t\bU_h'(y)\int_{0}^{y}(\nabla_h\cdot \bu_{h0})\big(x'-t\bU_h(z),z\big)dz ,\\ \tilde u_{d}(t,x',y) = & -\int_{0}^{y}\Big\{(\nabla_h\cdot\bu_{h0})\big(x'-t\bU_h(z),z\big)dz\\ &-t\int_{0}^{y}\big[\bU_h(y)-\bU_h(z)\big]\cdot\nabla_h(\nabla_h\cdot\bu_{h0})\big(x'-t\bU_h(z),z\big)\Big\}dz,\\ \end{cases}\end{equation} and \begin{equation}\label{formu-u2} \begin{cases} \bar\bu_h(t,x',y)= & \bU_h'(y)\int_{0}^{y}\ta_0\big(x'-t\bU_h(z),z\big)dz-\bU_h'(y)\int_{0}^{y}\ta\big(t,x',z\big)dz,\\ \bar u_d(t,x',y) = &\ta_y(t,x',y)-\ta_y(t,x',0) -\int_{0}^{y}\Big\{\big[\bU_h(y)-\bU_h(z)\big]\cdot\nabla_h\ta_0\big(x'-t\bU_h(z),z\big)\Big\}dz\\%+\ta_y(t,x',y)\\ &+\int_{0}^{y}\Big\{\big[\bU_h(y)-\bU_h(z)\big]\cdot\nabla_h\ta\big(t,x',z\big)\Big\}dz. \end{cases}\end{equation} Then, it is easy to know that $(\tilde\bu_h,\tilde u_d)(t,x',y)$ and $(\bar\bu_h, \bar u_d)(t,x',y)$ satisfy the following intial-boundary value problems, respectively, \begin{equation}\label{invis_prandtl} \begin{cases} &\mathcal{m}athcal{P}d_t\tilde\bu_h+\bU_h(y)\cdot\nabla_h\tilde\bu_h+\bU_h'(y)\tilde u_d=0,\\ &\nabla_h\cdot\tilde\bu_h+\mathcal{m}athcal{P}d_y\tilde u_d=0, \\ &\tilde u_d|_{y=0}=0,\quad \tilde\bu_h|_{t=0}=\bu_{h0}(x',y), \end{cases} \end{equation} and \begin{equation}\label{pr_bar} \begin{cases} &\mathcal{m}athcal{P}d_t\bar\bu_h+\bU_h(y)\cdot\nabla_h\bar\bu_h+\bU_h'(y)\bar u_d=0,\\ &\nabla_h\cdot\bar\bu_h+\mathcal{m}athcal{P}d_y\bar u_d=\mathcal{m}athcal{P}d_y^2\ta, \\ &\bar u_d|_{y=0}=0,\quad \bar\bu_h|_{t=0}=0. \end{cases} \end{equation} Moreover, we note that \eqref{invis_prandtl} is the linearization of the inviscid Prandtl equations at the shear flow $\big(\bU_h(y),0\big)$. Denote by \begin{equation}\label{def_h} \|\bu_h\|(t,y)~:=~\Big(\int_{{\mathcal{m}athbb{R}}^{d-1}}|\bu_h(t,x',y)|^2dx'\Big)^{\frac{1}{2}}, \end{equation} and the following anisotropic space: \[ L^{p,q}~:=~\{f=f(x',y)~\mathcal{m}box{measurable}:~\|f\|_{L^{p,q}} :=\|\|f\|_{L^p(dx')}\|_{L^q(dy)}<\infty\} \] for $1\leq p,q\leq\infty$, and $$ H^{m,k}~:=~\{f=f(x',y)~\mathcal{m}box{measurable}:~\|f\|_{H^{m,k}}:=\Big(\sum\limits_{|\alpha|\leq m,0\leq i\leq k}\|\mathcal{m}athcal{P}d_{x'}^\alpha\mathcal{m}athcal{P}d_y^if\|^2_{L^2(dx'dy)}\Big)^{\frac{1}{2}}<\infty\} $$ with \[\mathcal{m}athcal{P}d_{x'}^\alpha=\mathcal{m}athcal{P}d_{x_1}^{\alpha_1}\cdots\mathcal{m}athcal{P}d_{x_{d-1}}^{\alpha_{d-1}},\qquad \alpha=(\alpha_1,\cdots,\alpha_{d-1}),\quad|\alpha|=\alpha_1+\cdots+\alpha_{d-1}.\] Next, we have the following result on the boundedness estimates of the solution to the problem \eqref{pr_linear}. \begin{prop}\label{prop_est} Assume that $\bU_h\in W^{2,\infty}({\mathcal{m}athbb{R}}_+)$, the initial data of the problem \eqref{pr_linear} are bounded in the sense that all norms of the initial data appeared in the following estimates are finite, and also satisfy the compatibility conditions of the problem \eqref{pr_linear}. Let $(\bu_h,u_d,\ta)$ be the solution of the problem \eqref{pr_linear}, then there exist positive constants $M_0=M_0(\|\bU_h(y)\|_{L^\infty({\mathcal{m}athbb{R}}_+)})$ and $M_1=M_1(\|\bU_h(y)\|_{W^{2,\infty}({\mathcal{m}athbb{R}}_+)})$ independent of $t,$ such that \begin{equation}\label{est_ta0} \|\ta(t,\cdot)\|_{L^2({\mathcal{m}athbb{R}}^d_+)}\leq\|\ta_0\|_{L^2({\mathcal{m}athbb{R}}^d_+)},\quad\|\nabla_h\ta(t,\cdot)\|_{L^2({\mathcal{m}athbb{R}}^d_+)} \leq\|\nabla_h\ta_0\|_{L^2({\mathcal{m}athbb{R}}^d_+)}, \end{equation} and \begin{equation}\label{est_ta1}\begin{split} &\|\ta\|(t,y)+\|\ta_y\|(t,y)\leq M_0 \big(\|\ta_0\|_{H^{1,0}}+\|\ta_0\|_{H^{0,2}}\big),\\ &\|\nabla_h\ta\|(t,y)\leq M_0\big(\|\ta_0\|_{H^{2,0}}+\|\ta_0\|_{H^{1,2}}\big),\\ &\|\mathcal{m}athcal{P}d_y^2\ta\|(t,y)\leq M_1\big(\|\ta_0\|_{H^{2,0}}+\|\ta_0\|_{H^{1,2}}+\|\ta_0\|_{H^{0,4}}\big) \end{split}\end{equation} hold for all $t\ge 0$ and $y\ge 0$. Moreover, one has the following estimates: \begin{equation}\label{est_linear}\begin{split} \|\bu_h\|(t,y)\leq&\|\bu_{h0}\|(y) +t|\bU_h'(y)|\cdot\int_{0}^{y}\|\nabla_h\cdot\bu_{h0}\|(z)dz+2\|\ta_0\|_{L^{2}({\mathcal{m}athbb{R}}^d_+)}\cdot\big|\sqrt{y}\bU_h'(y)\big|,\\ \|u_d\|(t,y)\leq &\int_{0}^{y}\|\nabla_h\cdot\bu_{h0}\|(z)dz+t\int_{0}^{y} \Big[\big|\bU_h(y)-\bU_h(z)\big|\cdot\|\nabla_h\big(\nabla_h\cdot\bu_{h0} \big)\|(z)\Big]dz\\ &+2\|\nabla_h\ta_0\|_{L^2({\mathcal{m}athbb{R}}^d_+)}\Big(\int_0^y\big|\bU_h(y)-\bU_h(z)\big|^2dz\Big)^{\frac{1}{2}}+M_0\Big(\|\ta_0\|_{H^{1,0}}+\|\ta_0\|_{H^{0,2}}\Big). \end{split}\end{equation} \end{prop} \begin{proof}[\bf{Proof.}] (1) Firstly, from Proposition \ref{prop_linear} we know that $\ta(t,x',y)$ satisfies the linear problem \eqref{pr_ta}. Then, it is easy to obtain that by energy estimate, \begin{equation*} \frac{d}{2dt}\|\ta(t,\cdot)\|^2_{L^2({\mathcal{m}athbb{R}}^d_+)}+\|\mathcal{m}athcal{P}d_y\ta(t,\cdot)\|^2_{L^2({\mathcal{m}athbb{R}}^d_+)}~=~0, \end{equation*} which implies that \begin{equation}\label{energy_ta} \|\ta(t,\cdot)\|_{L^2({\mathcal{m}athbb{R}}^d_+)}^2+2\int_{0}^{t}\|\mathcal{m}athcal{P}d_y\ta(s,\cdot)\|_{L^2({\mathcal{m}athbb{R}}_+^d)}^2ds~=~\|\ta_0\|_{L^2({\mathcal{m}athbb{R}}^d_+)}^2,\quad \forall t\geq0. \end{equation} Denote by the operator \[\mathcal{m}athcal{P}d_{\mathcal{m}athcal{T}}^\alpha~:=~\mathcal{m}athcal{P}d_t^{\alpha_1}\mathcal{m}athcal{P}d_{x_1}^{\alpha_2} \cdots \mathcal{m}athcal{P}d_{x_{d-1}}^{\alpha_d},\qquad\alpha=(\alpha_1,\cdots,\alpha_d),\quad|\alpha|=\alpha_1+\cdots+\alpha_d.\] Applying the operator $\mathcal{m}athcal{P}d_{\mathcal{m}athcal{T}}^\alpha,~|\alpha|=1$ to the equation of \eqref{pr_ta}, and similarly we have that, \begin{equation}\label{energy_x} \|\mathcal{m}athcal{P}t^\alpha\ta(t,\cdot)\|_{L^2({\mathcal{m}athbb{R}}^d_+)}^2+2\int_{0}^{t}\|\mathcal{m}athcal{P}d_y\mathcal{m}athcal{P}t^\alpha\ta(s,\cdot)\|^2_{L^2({\mathcal{m}athbb{R}}^d_+)}ds~=~\|\mathcal{m}athcal{P}t^\alpha\ta(0,\cdot)\|_{L^2({\mathcal{m}athbb{R}}_+^d)}^2. \end{equation} (2) Combining the equation of \eqref{pr_ta} with the estimate \eqref{energy_x}, and noting that \begin{equation}\label{ini_pdta} \ta_t(0,x',y)=\mathcal{m}athcal{P}d_y^2\ta_0(x',y)-\bU_h(y)\cdot\nabla_h\ta_0(x',y), \qquad\ta_{x_i}(0,x',y)=\ta_{0x_i}(x',y),\quad 1\leq i\leq d-1, \end{equation} it follows that \begin{equation}\label{energy_yy}\begin{split} \|\mathcal{m}athcal{P}d_y^2\ta(t,\cdot)\|_{L^2({\mathcal{m}athbb{R}}^d_+)}&~\leq~\|\ta_t(t,\cdot)\|_{L^2({\mathcal{m}athbb{R}}^d_+)}+\|\bU_h(y)\|_{L^\infty({\mathcal{m}athbb{R}}_+)}\cdot\|\nabla_h\ta(t,\cdot)\|_{L^2({\mathcal{m}athbb{R}}^d_+)}\\ &~\leq~2\|\bU_h(y)\|_{L^\infty({\mathcal{m}athbb{R}}_+)}\cdot\|\nabla_h\ta_0\|_{L^2({\mathcal{m}athbb{R}}^d_+)}+\|\mathcal{m}athcal{P}d_y^2\ta_0\|_{L^2({\mathcal{m}athbb{R}}^d_+)}. \end{split}\end{equation} By the classical interpolation inequality we obtain that from \eqref{energy_ta} and \eqref{energy_yy}, \begin{equation}\label{energy_y}\begin{split} \|\ta_y(t,\cdot)\|_{L^2({\mathcal{m}athbb{R}}^d_+)}&\leq C\Big(\|\ta(t,\cdot)\|_{L^2({\mathcal{m}athbb{R}}^d_+)}+\|\mathcal{m}athcal{P}d_y^2\ta(t,\cdot)\|_{L^2({\mathcal{m}athbb{R}}^d_+)}\Big)\\ &\leq C\Big(\|\ta_0\|_{L^2({\mathcal{m}athbb{R}}^d_+)}+\|\bU_h(y)\|_{L^\infty({\mathcal{m}athbb{R}}_+)}\cdot\|\nabla_h\ta_0\|_{L^2({\mathcal{m}athbb{R}}^d_+)}+\|\mathcal{m}athcal{P}d_y^2\ta_0\|_{L^2({\mathcal{m}athbb{R}}^d_+)}\Big), \end{split}\end{equation} where $C$ is a positive constant independent of $t$. Then, from the estimates \eqref{energy_ta}, \eqref{energy_yy} and \eqref{energy_y} it implies by the imbedding inequality that there is a positive constant $M_0=M_0(\|\bU_h(y)\|_{L^\infty({\mathcal{m}athbb{R}}_+)})$ independent of $t$, such that \begin{equation}\label{est_tay}\begin{split} &\|\ta(t,\cdot)\|_{L^{2,\infty}}\leq \|\ta(t,\cdot)\|_{H^{0,1}}\leq M_0\big(\|\ta_0\|_{H^{1,0}}+\|\ta_0\|_{H^{0,2}}\big),\\ &\|\ta_y(t,\cdot)\|_{L^{2,\infty}}\leq \|\ta_y(t,\cdot)\|_{H^{0,1}}\leq M_0\big(\|\ta_0\|_{H^{1,0}}+\|\ta_0\|_{H^{0,2}}\big). \end{split}\end{equation} (3) Next, we apply $\mathcal{m}athcal{P}t^\alpha,~|\alpha|=1$ to the equation in \eqref{pr_ta} and get \begin{equation}\label{eq_pdta} \mathcal{m}athcal{P}d_t\mathcal{m}athcal{P}t^\alpha\ta+\bU_h(y)\cdot\nabla_h\mathcal{m}athcal{P}t^\alpha\ta-\mathcal{m}athcal{P}d_y^2\mathcal{m}athcal{P}t^\alpha\ta=0, \end{equation} moreover, we have the initial data \eqref{ini_pdta} and the following boundary value of $\mathcal{m}athcal{P}t^\alpha\ta(t,x',y)$: \begin{equation}\label{bd_pdta} \mathcal{m}athcal{P}t^\alpha\ta(t,x',0)~=~0. \end{equation} Thus, by using the same argument as above for the solution $\mathcal{m}athcal{P}t^\alpha\ta$ of the problem \eqref{eq_pdta}-\eqref{bd_pdta}, we can obtain that there exist positive constants $C_1=C_1(\|\bU_h(y)\|_{L^\infty({\mathcal{m}athbb{R}}_+)})$ and $C_2=C_2(\|\bU_h(y)\|_{W^{2\infty}({\mathcal{m}athbb{R}}_+)})$ independent of $t,$ such that \begin{equation}\label{est_tax}\begin{split} &\|\nabla_h\ta(t,\cdot)\|_{L^{2,\infty}}\leq C_1\big(\|\ta_0\|_{H^{2,0}}+\|\ta_0\|_{H^{1,2}}\big),\\ &\|\ta_t(t,\cdot)\|_{L^{2,\infty}}\leq C_2\big(\|\ta_0\|_{H^{2,0}}+\|\ta_0\|_{H^{1,2}}+\|\ta_0\|_{H^{0,4}}\big). \end{split}\end{equation} Furthermore, from the equation given in \eqref{pr_ta} we obtain that there is a positive constant $C_3=C_3(\|\bU_h(y)\|_{W^{2\infty}({\mathcal{m}athbb{R}}_+)})$ independent of $t$, such that \begin{equation}\label{est_tayy}\begin{split} \|\mathcal{m}athcal{P}d_y^2\ta(t,\cdot)\|_{L^{2,\infty}}&\leq\|\ta_t(t,\cdot)\|_{L^{2,\infty}}+\|\bU_h(y)\|_{L^\infty({\mathcal{m}athbb{R}}_+)}\cdot\|\nabla_h\ta\|_{L^{2,\infty}}\\ &\leq C_3\big(\|\ta_0\|_{H^{2,0}}+\|\ta_0\|_{H^{1,2}}+\|\ta_0\|_{H^{0,4}}\big). \end{split} \end{equation} Combining \eqref{est_tay}, \eqref{est_tax} and \eqref{est_tayy}, we obtain the estimates given in \eqref{est_ta1}. \iffalse multiply the above equation by $\ta_y$ and integrate the resulting equation over ${\mathcal{m}athbb{R}}^d_+$ to obtain that by integration by parts, \begin{equation}\label{energy_y}\begin{split} \frac{d}{2dt}\|\ta_y\|^2(t)+\|\mathcal{m}athcal{P}d_y\ta_y\|^2(t)&=-\int_{{\mathcal{m}athbb{R}}^d_+}\bU_h'(y)\cdot\nabla_h\ta\cdot\ta_ydx'dy\\ &\leq \frac{1}{2}\|\ta_y\|^2(t)+\frac{1}{2}\|\bU_h'\|^2_{L^\infty({\mathcal{m}athbb{R}}_+)}\cdot\|\nabla_h\ta\|^2(t), \end{split}\end{equation} Combining \eqref{energy_ta}, \eqref{energy_ta1} and \eqref{energy_y}, it yields that \fi (4) From the representation \eqref{formu-u} of $(\bu_h, u_d)$ given in Proposition \ref{prop_linear}, it is easy to obtain: \begin{equation}\label{energy_uh}\begin{split} \|\bu_h\|(t,y)&\leq\|\bu_{h0}\|(y)+|\bU_h'(y)|\int_{0}^{y}\Big[\|\ta_0\|(z)+\|\ta\|(t,z)\Big]dz+t|\bU_h'(y)|\int_{0}^{y}\|\nabla_h\cdot\bu_{h0}\|(z)dz\\ &\leq\|\bu_{h0}\|(y)+t|\bU_h'(y)|\int_{0}^{y}\|\nabla_h\cdot\bu_{h0}\|(z)dz+\big|\sqrt{y}\bU_h'(y)\big|\Big(\|\ta_0\|_{L^2({\mathcal{m}athbb{R}}^d_+)}+\|\ta(t,\cdot)\|_{L^2({\mathcal{m}athbb{R}}^d_+)}\Big), \end{split}\end{equation} and \begin{equation}\label{energy_ud}\begin{split} \|u_d\|(t,y)\leq&2\|\ta_y(t,\cdot)\|_{L^{2,\infty}}+\int_{0}^{y}\Big[\big|\bU_h(y)-\bU_h(z)\big|\cdot\Big(\|\nabla_h\ta_0\|(z)+\|\nabla_h\ta\|(t,z)\Big)\Big]dz\\ & +\int_{0}^{y}\|\nabla_h\cdot\bu_{h0}\|(z)dz+t\int_{0}^{y} \Big[\big|\bU_h(y)-\bU_h(z)\big|\cdot\|\nabla_h\big(\nabla_h\cdot\bu_{h0} \big)\|(z)\Big]dz\\ \leq&2\|\ta_y(t,\cdot)\|_{L^{2,\infty}}+\Big(\int_{0}^{y}\big|\bU_h(y)-\bU_h(z)\big|^2dz\Big)^{\frac{1}{2}}\cdot\Big(\|\nabla_h\ta_0\|_{L^2({\mathcal{m}athbb{R}}^d_+)}+\|\nabla_h\ta(t,\cdot)\|_{L^2({\mathcal{m}athbb{R}}^d_+)}\Big)\\ & +\int_{0}^{y}\|\nabla_h\cdot\bu_{h0}\|(z)dz+t\int_{0}^{y} \Big[\big|\bU_h(y)-\bU_h(z)\big|\cdot\|\nabla_h\big(\nabla_h\cdot\bu_{h0} \big)\|(z)\Big]dz. \end{split}\end{equation} Combining \eqref{energy_ta} with \eqref{energy_uh}, we obtain the estimate of $\bu_h$ given in \eqref{est_linear}. Substituting \eqref{est_ta1} and \eqref{energy_x} into \eqref{energy_ud}, the estimate of $u_d$ given in \eqref{est_linear} follows immediately. \end{proof} \begin{remark} From the computation given in the above proposition, and the expressions \eqref{formu-u1} and \eqref{formu-u2} of $(\tilde{\bu}_h,\tilde u_d)$ and $(\bar{\bu}_h,\bar u_d)$ respectively, indeed we can get the following more precise estimates for all $t\geq0,y\geq0$, \begin{equation}\label{est_linear1}\begin{cases} \|\tilde\bu_h\|(t,y)\leq\|\bu_{h0}\|(y) +t|\bU_h'(y)|\cdot\int_{0}^{y}\|\nabla_h\cdot\bu_{h0}\|(z)dz,\\ \|\tilde u_d\|(t,y)\leq \int_{0}^{y}\|\nabla_h\cdot\bu_{h0}\|(z)dz+t\int_{0}^{y} \Big[\big|\bU_h(y)-\bU_h(z)\big|\cdot\|\nabla_h\big(\nabla_h\cdot\bu_{h0} \big)\|(z)\Big]dz, \end{cases}\end{equation} and \begin{equation}\label{est_linear2}\begin{cases} \|\bar\bu_h\|(t,y)\leq 2\|\ta_0\|_{L^{2}({\mathcal{m}athbb{R}}^d_+)}\cdot\big|\sqrt{y}\bU_h'(y)\big|,\\ \|\bar u_d\|(t,y)\leq 2\|\nabla_h\ta_0\|_{L^2({\mathcal{m}athbb{R}}^d_+)}\Big(\int_0^y\big|\bU_h(y)-\bU_h(z)\big|^2dz\Big)^{\frac{1}{2}}+M_0\Big(\|\ta_0\|_{H^{1,0}}+\|\ta_0\|_{H^{0,2}}\Big). \end{cases}\end{equation} \end{remark} \subsection{Linearized stability of shear flows in two-dimensional problems} The next main goal is to improve the estimates given in \eqref{est_linear} to have a lower bound on the growth rate of $(\bu_h, u_d)$ as $t\to+\infty$, under certain structural condition on shear flow $\bU_h(y)$, which implies the asymptotic instability of the linearized problem \eqref{pr_linear}. Hong and Hunter had studied the similar problem for the linearized two-dimensional inviscid Prandtl equations in \cite{H-H}. Firstly, we consider the problem \eqref{pr_linear} in two space variables. Note that from \eqref{invis_prandtl}, $(\tilde\bu_h,\tilde u_d)(t,x',y)$ is the solution to the linearized problem of two-dimensional inviscid Prandtl equations, thus from the relation \eqref{decom} and the estimate \eqref{est_linear2} we claim that $\bu_h(t,x',y)$ satisfies similar estimates as given in \cite[Proposition 6.1]{H-H}. Indeed, we have the following result. \begin{prop}\label{prop_sta2d} Under the assumptions of Proposition \ref{prop_est}, let $(\bu_h,u_d,\ta)(t,x',y)$ be the solution of \eqref{pr_linear} in $\{t>0, x'\in {\mathcal{m}athbb{R}}, y>0\}$. (1) If $\bU(y)$ has no any critical point, then $\|\bu_h\|(t,y)$ and $\|u_d\|(t,y)$ are bounded uniformly in $t$ with the following estimates: \begin{equation}\label{est_uv}\begin{split} \|\bu_h\|(t,y)\leq&\Big|\frac{\bU_h'(y)}{\bU_h'(0)}\Big|\|\bu_{h0}\|(0) +|\bU_h'(y)|\int_{0}^{y}\Big\{\Big|\frac{\|\mathcal{m}athcal{P}d_y\bu_{h0}\|(z)}{\bU_h'(z)}\Big|+\Big|\frac{\bU_h''(z)}{\big(\bU_h'(z)\big)^2}\Big|\|\bu_{h0}\|(z)\Big\}dz+2\|\ta_0\|_{L^2({\mathcal{m}athbb{R}}^d_+)}|\sqrt{y}\bU_h'(y)|,\\ \|u_d\|(t,y)\leq&\Big|\frac{\bU_h(y)-\bU_h(0)}{\bU_h'(0)}\Big|\|\mathcal{m}athcal{P}d_{x'}\bu_{h0}\|(0) +\int_{0}^{y}\Big|\frac{\bU_h(y)-\bU_h(z)}{\bU_h'(z)}\Big|\Big[\|\mathcal{m}athcal{P}d_{x'y}^2\bu_{h0}\|(z)+\Big|\frac{\bU_h''(z)}{\bU_h'(z)}\Big|\|\mathcal{m}athcal{P}d_{x'}\bu_{h0}\|(z)\Big]dz\\ &+2\|\mathcal{m}athcal{P}d_{x'}\ta_0\|_{L^2({\mathcal{m}athbb{R}}^d_+)}\Big(\int_0^y\big|\bU_h(y)-\bU_h(z)\big|^2dz\Big)^{\frac{1}{2}}+M_0\Big(\|\ta_0\|_{H^{1,0}}+\|\ta_0\|_{H^{0,2}}\Big). \end{split}\end{equation} (2) If $\bU(y)$ has a single, non-degenerate critical point at $y=y_0>0$, and the initial data $\bu_{h0}(x',y)$ satisfies \begin{equation} \|\bu_{h0}\|_{\frac{i}{2}}(y_0)~:=\Big(\int_{{\mathcal{m}athbb{R}}_\xi}|\xi|^i\cdot|\widehat{\bu_{h0}}(\xi,y_0)|^2d\xi\Big)^{\frac{1}{2}}<\infty, \qquad i=1,2,3, \end{equation} where $\widehat{\bu_{h0}}(\xi,y)$ is the Fourier transform of $\bu_{h0}(x',y)$ with respect to $x'$, then when $y> y_0$, it holds that for sufficiently large $t$, \begin{equation}\label{est-uv}\begin{split} &\|\bu_h\|(t,y)\geq C\sqrt{t}~\frac{|\bU_h'(y)|}{\sqrt{|\bU_h''(y_0)|}},\qquad\|u_d\|(t,y)\geq C\sqrt{t}~\frac{|\bU_h(y)-\bU_h(y_0)|}{\sqrt{|\bU_h''(y_0)|}}, \end{split}\end{equation} where the positive constant $C$ depends only on $y_0$ and $\bu_{h0}$. Furthermore, we have similar results as above for $\mathcal{m}athcal{P}d_y\bu_h.$ \end{prop} \begin{proof}[\bf{Proof.}] Combining \eqref{decom} with \eqref{est_linear2}, we only need to estimate $\|(\tilde\bu_h,\tilde u_d)\|(t,y)$. As we know, $(\tilde\bu_h,\tilde u_d)(t,x',y)$ solves the linearized inviscid Prandtl equation \eqref{invis_prandtl}, so we can follow the method given in the proof of Proposition 6.1 in \cite{H-H} to have the estimates of $(\tilde\bu_h,\tilde u_d)(t,x',y)$, and we sketch the process in the following. By taking the Fourier transform with respect to $x'\in{\mathcal{m}athbb{R}}$ in the representation \eqref{formu-u1} of $(\tilde \bu_h,\tilde u_d)(t,x',y)$, it follows that \begin{equation}\label{fourier_tu}\begin{split} \widehat{\tilde{\bu}_h}(t,\xi,y)=&\widehat{\bu_{h0}}(\xi,y)e^{-it\xi\bU_h(y)}+it\xi\bU_h'(y)\int_{0}^{y}\widehat{{\bu}_{h0}}(\xi,z)e^{-it\xi\bU_h(z)}dz, \\ \widehat{\tilde u_d}(t,\xi,y)= &-\int_{0}^{y} \Big\{i\xi\widehat{\bu_{h0}}(\xi,y)-t\xi^2\big(\bU_h(y)-\bU_h(z)\big)\widehat{\bu_{h0}}(\xi,z)\Big\}e^{-it\xi\bU_h(z)}dz. \end{split}\end{equation} (1) If $\bU_h(y)$ has no any critical point, we take integration by parts in \eqref{fourier_tu} to obtain that \begin{equation*} \begin{split} \widehat{\tilde{\bu}_h}(t,\xi,y)=& \frac{\bU'_h(y)}{\bU'_h(0)} \widehat{\bu_{h0}}(\xi,0)e^{-it\xi\bU_h(0)}+\bU_h'(y)\int_{0}^{y}\big[\frac{\mathcal{m}athcal{P}d_y\widehat{{\bu}_{h0}}(\xi,z)}{\bU_h'(z)}-\frac{\bU_h''(z)}{\big(\bU_h'(z)\big)^2}\widehat{{\bu}_{h0}}(\xi,z)\big]e^{-it\xi\bU_h(z)}dz, \\ \widehat{\tilde u_d}(t,\xi,y)= &-i\xi\frac{\bU_h(y)-\bU_h(0)}{\bU_h'(0)}\widehat{\bu_{h0}}(\xi,0)e^{-it\xi\bU_h(0)}\\ &-i\xi\int_{0}^{y} \big[\frac{\mathcal{m}athcal{P}d_y\widehat{{\bu}_{h0}}(\xi,z)}{\bU_h'(z)} -\frac{\bU_h''(z)}{\big(\bU_h'(z)\big)^2}\widehat{{\bu}_{h0}}(\xi,z)\big]\big(\bU_h(y)-\bU_h(z)\big) e^{-it\xi\bU_h(z)}dz, \end{split} \end{equation*} which implies by using Parseval's identity, \begin{equation}\label{est_uv1}\begin{split} \|\tilde{\bu}_h\|(t,y)\leq& \Big|\frac{\bU'_h(y)}{\bU_h'(0)}\Big| \|\bu_{h0}\|(0) +|\bU_h'(y)|\int_{0}^{y}\Big[\frac{\|\mathcal{m}athcal{P}d_y\bu_{h0}\|(z)}{|\bU_h'(z)|}+\frac{|\bU_h''(z)|}{\big(\bU_h'(z)\big)^2}\|\bu_{h0}\|(z)\Big]dz,\\ \|\tilde u_d\|(t,y)\leq&\Big|\frac{\bU_h(y)-\bU_h(0)}{\bU_h'(0)}\Big|\|\mathcal{m}athcal{P}d_{x'}\bu_{h0}\|(0) +\int_{0}^{y}\Big|\frac{\bU_h(y)-\bU_h(z)}{\bU_h'(z)}\Big|\Big[\|\mathcal{m}athcal{P}d_{x'y}^2\bu_{h0}\|(z)+\Big|\frac{\bU_h''(z)}{\bU_h'(z)}\Big|\|\mathcal{m}athcal{P}d_{x'}\bu_{h0}\|(z)\Big]dz. \end{split}\end{equation} Substituting \eqref{est_linear2} and \eqref{est_uv1} into \eqref{decom}, it follows the estimates given in \eqref{est_uv} immediately. (2) If $\bU_h(y)$ has a single, non-degenerate critical point at $y=y_0$, through the method of stationary phase we obtain that for $y>y_0$ and as $|t\xi|\rightarrow+\infty,$ \begin{equation}\label{st-ph}\begin{split} &\int_{0}^{y}\widehat{\bu_{h0}}(\xi,z)e^{-it\xi\bU_h(z)}dz=\sqrt{\frac{2\mathcal{m}athcal{P}i}{|t\xi\bU_h''(y_0)|}}\cdot\widehat{\bu_{h0}}(\xi,y_0)e^{-it\xi\bU_h(y_0)-\frac{i\mathcal{m}athcal{P}i}{4}sgn\big(\xi\bU_h''(y_0)\big)}+o(\frac{1}{|t\xi|}),\\%\quad |t\xi|\rightarrow+\infty,\\ &\int_{0}^{y}\big[\bU_h(y)-\bU_h(z)\big]\widehat{\bu_{h0}}(\xi,z)e^{-it\xi\bU_h(z)}dz\\ &=\sqrt{\frac{2\mathcal{m}athcal{P}i}{|t\xi\bU_h''(y_0)|}}\cdot\big[\bU_h(y)-\bU_h(y_0)\big]\widehat{\bu_{h0}}(\xi,y_0)e^{-it\xi\bU_h(y_0)-\frac{i\mathcal{m}athcal{P}i}{4}sgn\big(\xi\bU_h''(y_0)\big)}+o(\frac{1}{|t\xi|}). \end{split} \end{equation} Then, combining \eqref{fourier_tu} with \eqref{st-ph} yields that for $\xi$ being in a bounded interval, $\xi\in[a,b]$ with $0<a<b$, the following inequalities hold for sufficiently large $t$ (independent of $\xi$), \begin{equation}\label{as-ex}\begin{split} &\big|\widehat{\tilde{\bu}_h}(t,\xi,y)-\widehat{\bu_{h0}}(\xi,y)e^{-it\xi\bU_h(y)}\big|\geq\sqrt{\frac{\mathcal{m}athcal{P}i|t\xi|}{|\bU_h''(y_0)|}}\cdot\big|\bU_h'(y)\widehat{\bu_{h0}}(\xi,y_0)\big|,\\ &\big|\widehat{\tilde u_d}(t,\xi,y)\big|\geq\sqrt{\frac{\mathcal{m}athcal{P}i|t\xi|}{|\bU_h''(y_0)|}}\cdot\big|\xi\big[\bU_h(y)-\bU_h(y_0)\big]\widehat{\bu_{h0}}(\xi,y_0)\big|. \end{split} \end{equation} Therefore, by Parseval's identity we obtain that for sufficiently large $t$, \begin{equation}\label{est-uv1}\begin{split} &\|\tilde \bu_h\|(t,y)\geq\sqrt{\frac{\mathcal{m}athcal{P}i t}{2|\bU_h''(y_0)|}}|\bU_h'(y)|\cdot\big\|\sqrt{|\xi|}\widehat{\bu_{h0}}(\xi,y_0)\big\|_{L^2_\xi([a,b])}\triangleq C_0 \sqrt{t}~\frac{|\bU_h'(y)|}{\sqrt{|\bU_h''(y_0)|}},\\ &\|\tilde u_d\|(t,y)\geq\sqrt{\frac{\mathcal{m}athcal{P}i t}{2|\bU_h''(y_0)|}}\big|\bU_h(y)-\bU_h(y_0)\big|\cdot\Big\||\xi|^{\frac{3}{2}}\widehat{\bu_{h0}}(\xi,y_0)\Big\|_{L^2_\xi([a,b])}\triangleq C_1 \sqrt{t}~\frac{|\bU_h(y)-\bU_h(y_0)|}{\sqrt{|\bU_h''(y_0)|}},\\ \end{split}\end{equation} where the positive constants $C_0$ and $C_1$ depend only on $y_0$ and $\bu_{h0}$. Finally, it is easy to obtain \eqref{est-uv} by substituting \eqref{est_linear2} and \eqref{est-uv1} into \eqref{decom}. \end{proof} \iffalse If we add some restrictions on the initial data $\bu_{h0}(x',y)$ for the linearized problem \eqref{pr_linear}, then the solution $\bu_h(t,x',y)$ admits uniform (in $t$) estimate of the $L^2-$norm $\|\bu_h(t,\cdot)\|_{L^2({\mathcal{m}athbb{R}}^2_+)}$, even the shear flow $\bU_h(y)$ has a single, non-degenerate critical point $y_0>0$. To obtain the uniform estimate in $t$ for $(\bu_h, u_d)(t,x',y)$, we need some auxiliary work for $\bU_h(y)$ first. As the smooth function $\bU_h(y)$ has a single, non-degenerate critical point $y_0>0$, then there exists $\delta>0$ such that $[y_0-3\delta,y_0+3\delta]\subseteq(0,2y_0)$ and \begin{equation}\label{delta} \frac{|\bU_h''(y_0)|}{2}~\leq~|\bU_h(y)|~\leq~2|\bU_h''(y_0)|,\qquad \forall~ y\in[y_0-3\delta,y_0+3\delta]. \end{equation} Also, there exists a constant $C_0>1$ such that $\|\bU_h\|_{W^{2,\infty}({\mathcal{m}athbb{R}}_+)}\leq C_0$, and \begin{equation}\label{bound_U} |\bU_h'(y)|~\geq~C_0^{-1},\quad\mathcal{m}box{for}~y\in[0,y_0-\delta]\cup[y_0+2\delta,y_0+3\delta]. \end{equation} We construct three cut-off functions: $$\mathcal{m}athcal{P}hi_1(y),\mathcal{m}athcal{P}hi_2(y),\mathcal{m}athcal{P}si(y)\in C^\infty({\mathcal{m}athbb{R}}_+),\qquad 0\leq\mathcal{m}athcal{P}hi_1,\mathcal{m}athcal{P}hi_2,\mathcal{m}athcal{P}si\leq1$$ with \begin{equation}\label{cutoff}\begin{split} &\mathcal{m}athcal{P}hi_1(y)=1~\mathcal{m}box{for}~0\leq y\leq y-2\delta,\quad\mathcal{m}athcal{P}hi_1(y)=0,~\mathcal{m}box{for}~y\geq y_0-\delta;\\ &\mathcal{m}athcal{P}hi_2(y)=0~\mathcal{m}box{for}~0\leq y\leq y+\delta,\quad\mathcal{m}athcal{P}hi_2(y)=1,~\mathcal{m}box{for}~y\geq y_0+2\delta;\\ &\mathcal{m}athcal{P}si(y)=1~\mathcal{m}box{for}~|y-y_0|\leq 2\delta,\quad\mathcal{m}athcal{P}si(y)=0,~\mathcal{m}box{for}~|y-y_0|\geq 3\delta. \end{split}\end{equation} Moreover, we choose a constant $C_1>0$ such that \begin{equation}\label{cutoff_der} |\mathcal{m}athcal{P}hi_1'|,~|\mathcal{m}athcal{P}hi_2'|,~|\mathcal{m}athcal{P}si'|~\leq~C_1. \end{equation} Then, let us list the support of those cut-off fuctions as follows: \begin{equation}\label{support}\begin{split} &I_{\mathcal{m}athcal{P}hi_1}~=~\{y:~0\leq y\leq y_0-\delta\},\qquad I_{\mathcal{m}athcal{P}hi_1'}~=~\{y:~y_0-2\delta\leq y\leq y_0-\delta\};\\ &I_{\mathcal{m}athcal{P}hi_2}~=~\{y:~ y\geq y_0+\delta\},\qquad I_{\mathcal{m}athcal{P}hi_2'}~=~\{y:~y_0+\delta\leq y\leq y_0+2\delta\};\\ &I_{\mathcal{m}athcal{P}si}~=~\{y:~| y-y_0|\leq 3\delta\},\qquad I_{\mathcal{m}athcal{P}hi_1'}~=~\{y:~2\delta\leq |y-y_0|\leq 3\delta\}. \end{split}\end{equation} \begin{prop}\label{prop_sta} Under the assumptions of Proposition \ref{prop_est}, let $\bU_h(y)$ have a single, non-degenerate critical point $y_0>0$, and the initial data $\bu_{h0}(x',y)$ of problem \eqref{pr_linear} satisfy \begin{equation}\label{ass_sta} \|\bu_{h0}\|(0),~\|\mathcal{m}athcal{P}hi_1\bu_{h0}(\cdot)\|_{L^2({\mathcal{m}athbb{R}}_+^2)},~\|\mathcal{m}athcal{P}hi_1\mathcal{m}athcal{P}d_y\bu_{h0}(\cdot)\|_{L^2({\mathcal{m}athbb{R}}_+^2)},~\|\mathcal{m}athcal{P}si\bu_{h0}(\cdot)\|_{L^2({\mathcal{m}athbb{R}}_+^2)},~\|\mathcal{m}athcal{P}si\mathcal{m}athcal{P}d_y\bu_{h0}(\cdot)\|_{L^2({\mathcal{m}athbb{R}}_+^2)},~\|y^l\mathcal{m}athcal{P}hi_2\big(\frac{\bu_{h0}(x',y)}{\bU'_h(y)}\big)_y\|_{L^2({\mathcal{m}athbb{R}}_+^2)}\leq\widetilde{M}_0 \end{equation} for some constants $\widetilde{M}_0>0$ and $l>\frac{1}{2}$. Then, for the solution $(\bu_h,u_d)(t,x',y)$ of \eqref{pr_linear} there is a constant $M=M(C_0,C_1,y_0,\delta,\widetilde{M}_0)>0$, independent of $t$, such that the following estimates hold for any $t,y>0:$ \begin{equation}\label{est_sta} \|\bu_h\|(t,y)~\leq M+2\|\ta_0\|_{L^2({\mathcal{m}athbb{R}}_+^2)}\cdot|\sqrt{y}\bU'_h(y)|. \end{equation} \end{prop} \begin{proof}[\bf{Proof.}] Recall the decomposition \eqref{decom} of $(\bu_h,u_d)(t,x',y)$ and the uniform estimates \eqref{est_linear2} in $t$ for the component $(\bar\bu_h,\bar u_d)(t,x',y)$, it leads us to focus on the component $(\tilde\bu_h,\tilde u_d)(t,x',y)$. Note that $(\tilde\bu_h,\tilde u_d)(t,x',y)$ satisfies the linearized problem \eqref{invis_prandtl} of two-dimensional inviscid Prandtl equations, we will use the approach given in \cite{X-Z} (see also \cite{GM}) to obtain the estimates of $(\tilde\bu_h,\tilde u_d)(t,x',y)$. To show the uniform estimates in $t$ of $(\tilde{\bu_h},\tilde u_d)(t,x',y)$, we divide the proof process into three steps. {\underline{\bf Step 1.}} For $y\in I_{\mathcal{m}athcal{P}hi_1}$, we have $\bU_h'(y)\neq0$ and then, obtain that from the expression \eqref{formu-u1} of $(\tilde{\bu_h},\tilde u_d)(t,x',y)$ by integration by parts, \begin{equation}\label{tu} \begin{split} \tilde{\bu}_h(t,x',y)=&\frac{\bU_h(y)}{\bU_h'(0)}\bu_{h0}\big(x'-t\bU_h(0),0\big)+\bU_h'(y)\int_{0}^{y}\Big[\frac{\mathcal{m}athcal{P}d_y\bu_{h0}\big(x'-t\bU_h(z),z\big)}{\bU'_h(z)}-\frac{\bU''_h(z)}{\big(\bU_h'(z)\big)^2}\bu_{h0}\big(x'-t\bU_h(z)\big)\Big]dz,\\ \end{split} \end{equation} Thus, the above equalities \eqref{tu} imply that for $0\leq y\leq y_0-2\delta$ and $t\geq0,$ \begin{equation}\label{est_tu1}\begin{split} \|\tilde{\bu}_h\|(t,y)\leq&\Big|\frac{U_1(y)}{U_1'(0)}\Big|\cdot\|\bu_{h0}\|(0) +|\bU_h'(y)|\int_{0}^{y}\Big[\frac{\|\mathcal{m}athcal{P}d_y\bu_{h0}\|(z)}{|U_1'(z)|}+\frac{|\bU_h''(z)|}{\big(\bU_h'(z)\big)^2}\cdot\|\bu_{h0}\|(z)\Big]dz\\ \leq&C_0^2~\|\bu_{h0}\|(0)+C_0^2\sqrt{y_0}~\big(\|\mathcal{m}athcal{P}hi_1\mathcal{m}athcal{P}d_y\bu_{h0}\|_{L^2({\mathcal{m}athbb{R}}_+^2)}+C_0\|\mathcal{m}athcal{P}hi_1\bu_{h0}\|_{L^2({\mathcal{m}athbb{R}}_+^2)}\big)~\triangleq~M_1.\\ \end{split}\end{equation} {\underline{\bf Step 2.}} For $y\in I_{\mathcal{m}athcal{P}hi_2}$, we have $\bU_h'(y)\neq0$ and introduce the transformation \begin{equation}\label{trans} v(t,x',y)~:=~\mathcal{m}athcal{P}d_y\big(\frac{\tilde\bu_h(t,x',y)}{\bU_h'(y)}\big)\end{equation} to the problem \eqref{invis_prandtl}. Then, from \eqref{invis_prandtl} we have the following problem for $w(t,x',y)$: \begin{equation}\label{pr_v} \mathcal{m}athcal{P}d_t v+\bU_h(y)\mathcal{m}athcal{P}d_{x'}v=0,\qquad v(0,x',y)=\mathcal{m}athcal{P}d_y\big(\frac{\bu_{h0}(x',y)}{\bU_h'(y)}\big)\triangleq v_0(x',y), \end{equation} which implies that \begin{equation}\label{formu_v} v(t,x',y)~=~v_0\big(x'-t\bU_h(y),y\big),\qquad y\in I_{\mathcal{m}athcal{P}hi_2}. \end{equation} An application of the energy method to \eqref{pr_v} yields that for any $l>\frac{1}{2},$ \begin{equation}\label{est_v} \|y^l\mathcal{m}athcal{P}hi_2v(t,\cdot)\|_{L^2({\mathcal{m}athbb{R}}^2_+)}~=~\|y^l\mathcal{m}athcal{P}hi_2v_0\|_{L^2({\mathcal{m}athbb{R}}^2_+)}~=~\big\|y^l\mathcal{m}athcal{P}hi_2(y)~\big(\frac{\bu_{h0}(x',y)}{\bU_h'(y)}\big)_y\big\|_{L^2({\mathcal{m}athbb{R}}^2_+)}. \end{equation} Note that from \eqref{trans}, we have $\tilde \bu_h(t,x',y)=-\bU_h'(y)\int_{y}^{+\infty}v(t,x',z)dz$ with $y\in I_{\mathcal{m}athcal{P}hi_2},$ and then, \[ |\tilde \bu_h|(t,x',y)~\leq~|\bU_h'(y)|\big(\int_{y}^{+\infty}z^{-2l}dz\big)^{\frac{1}{2}}\big(\int_{y}^{+\infty}z^{2l}v^2(t,x',z)dz\big)^{\frac{1}{2}}~\leq~\frac{y_0^{1-2l}}{2l-1} |\bU_h'(y)|\cdot\big(\int_{y}^{+\infty}z^{2l}v^2(t,x',z)dz\big)^{\frac{1}{2}}, \] which implies that for $y\geq y_0+2\delta,$ \begin{equation}\label{est_tu2} \|\tilde \bu_h\|(t,y)~\leq~C_0\frac{y_0^{1-2l}}{2l-1} \cdot\|y^l\mathcal{m}athcal{P}hi_2v(t,\cdot)\|_{L^2({\mathcal{m}athbb{R}}_+^2)}. \end{equation} It follows that by combining \eqref{est_v} with \eqref{est_tu2}, \begin{equation}\label{est_tu3} \|\tilde \bu_h\|(t,y)~\leq~C_0\frac{y_0^{1-2l}}{2l-1} \cdot\|y^l\mathcal{m}athcal{P}hi_2(y)\big(\frac{\bu_{h0}(x',y)}{\bU_h'(y)}\big)_y\|_{L^2({\mathcal{m}athbb{R}}_+^2)}~\triangleq~M_2, \quad \mathcal{m}box{for}~y\geq y_0+2\delta,~t\geq0. \end{equation} {\bf \underline{Step 3.}} For $y\in I_{\mathcal{m}athcal{P}si},$ let \begin{equation}\label{def_w} w(t,x',y)~:=~\mathcal{m}athcal{P}d_y\tilde\bu_h(t,x',y), \end{equation} then from \eqref{invis_prandtl} we obtain that $w(t,x',y)$ satisfies the equation: \begin{equation}\label{eq-w} \mathcal{m}athcal{P}d_t w+\bU_h(y)\mathcal{m}athcal{P}d_{x'}w+\bU''_h(y)\tilde u_d=0 \end{equation} and the initial data $w(0,x',y)=\mathcal{m}athcal{P}d_y\bu_{h0}(x',y)$. Multiplying \eqref{eq-w} by $sgn\big(\bU''_h(y_0)\big)\frac{\mathcal{m}athcal{P}si^2(y)w(t,x',y)}{\bU_h''(y)}$ and integrating the resultion equation over ${\mathcal{m}athbb{R}}_+^2$, we obtain that by integration by parts, \begin{equation}\label{est_w} \frac{d}{2dt}\Big\|\frac{\mathcal{m}athcal{P}si(y)w(t,x',y)}{\sqrt{|\bU''_h(y)|}}\Big\|^2_{L^2({\mathcal{m}athbb{R}}_+^2)}+sgn\big(\bU''_h(y_0)\big)\int_{{\mathcal{m}athbb{R}}_+^2}\mathcal{m}athcal{P}si^2\tilde u_d\cdot wdx'dy=0. \end{equation} Plugging \eqref{def_w} into the second term of the above equality \eqref{est_w}, it implies that by integration by parts and the divergence-free condition in \eqref{invis_prandtl}, \begin{equation}\label{est_w1} \begin{split} sgn\big(\bU''_h(y_0)\big)\int_{{\mathcal{m}athbb{R}}_+^2}\mathcal{m}athcal{P}si^2\tilde u_d\cdot wdx'dy&=-sgn\big(\bU''_h(y_0)\big)\int_{{\mathcal{m}athbb{R}}_+^2}2\mathcal{m}athcal{P}si\mathcal{m}athcal{P}si'\tilde u_d\cdot \tilde \bu_hdx'dy+sgn\big(\bU''_h(y_0)\big)\int_{{\mathcal{m}athbb{R}}_+^2}\mathcal{m}athcal{P}si^2\mathcal{m}athcal{P}d_{x'}\tilde \bu_h\cdot \tilde{\bu}_hdx'dy\\ &=-2sgn\big(\bU''_h(y_0)\big)\int_{{\mathcal{m}athbb{R}}_+^2}\mathcal{m}athcal{P}si\mathcal{m}athcal{P}si'\tilde u_d\cdot \tilde{\bu}_hdx'dy. \end{split}. \end{equation} Note that $I_{\mathcal{m}athcal{P}si}\cap I_{\mathcal{m}athcal{P}si'}=\{y:~2\delta\leq|y-y_0|\leq3\delta\}$ and $\bU'_h(y)\neq0$ for $y\in I_{\mathcal{m}athcal{P}si}\cap I_{\mathcal{m}athcal{P}si'}$, so we have that from the first equation of \eqref{invis_prandtl}, \begin{equation}\label{def_ud} \tilde u_d(t,x',y)~=~-\big(\bU'_h(y)\big)^{-1}\cdot\big(\mathcal{m}athcal{P}d_t\tilde{\bu}_h+\bU_h(y)\mathcal{m}athcal{P}d_{x'}\tilde{\bu}_h\big),\quad \mathcal{m}box{for}~y\in I_{\mathcal{m}athcal{P}si}\cap I_{\mathcal{m}athcal{P}si'}, \end{equation} Substituting \eqref{def_ud} into \eqref{est_w1} yields that by integration by parts, \begin{equation}\label{est_w2} \begin{split} sgn\big(\bU''_h(y_0)\big)\int_{{\mathcal{m}athbb{R}}_+^2}\mathcal{m}athcal{P}si^2\tilde u_d\cdot wdx'dy&=2sgn\big(\bU''_h(y_0)\big)\int_{{\mathcal{m}athbb{R}}_+^2}\frac{\mathcal{m}athcal{P}si\mathcal{m}athcal{P}si'}{\bU'_h}\big(\mathcal{m}athcal{P}d_t\tilde{\bu}_h+\bU_h\mathcal{m}athcal{P}d_{x'}\tilde{\bu}_h\big)\cdot \tilde{\bu}_hdx'dy\\ &=sgn\big(\bU''_h(y_0)\big)\frac{d}{dt}\int_{{\mathcal{m}athbb{R}}_+^2}\frac{\mathcal{m}athcal{P}si\mathcal{m}athcal{P}si'}{\bU'_h}\tilde{\bu}_h^2dx'dy. \end{split} \end{equation} Combining \eqref{est_w} with \eqref{est_w2} and integrating with respect to $t$, it follows that \begin{equation}\label{est_w3}\begin{split} &\Big\|\frac{\mathcal{m}athcal{P}si(y)w(t,x',y)}{\sqrt{|\bU''_h(y)|}}\Big\|^2_{L^2({\mathcal{m}athbb{R}}_+^2)}+2sgn\big(\bU''_h(y_0)\big)\int_{{\mathcal{m}athbb{R}}_+^2}\frac{\mathcal{m}athcal{P}si(y)\mathcal{m}athcal{P}si'(y)}{\bU'_h(y)}\tilde\bu_h^2(t,x',y)dx'dy\\ &=\Big\|\frac{\mathcal{m}athcal{P}si(y)\mathcal{m}athcal{P}d_y\bu_{h0}(x',y)}{\sqrt{|\bU''_h(y)|}}\Big\|^2_{L^2({\mathcal{m}athbb{R}}_+^2)}+2sgn\big(\bU''_h(y_0)\big)\int_{{\mathcal{m}athbb{R}}_+^2}\frac{\mathcal{m}athcal{P}si(y)\mathcal{m}athcal{P}si'(y)}{\bU'_h(y)}\bu_{h0}^2(x',y)dx'dy. \end{split}\end{equation} Note that \[ \frac{|\bU''_h(y_0)|}{2}\leq |\bU''_h(y)|\leq2|\bU''_h(y_0)|\quad\mathcal{m}box{for} ~y\in I_{\mathcal{m}athcal{P}si},\qquad |\bU_h'(y)|\geq C_0^{-1}\quad\mathcal{m}box{for}~y\in I_{\mathcal{m}athcal{P}si}\cap I_{\mathcal{m}athcal{P}si'}, \] and \[ I_{\mathcal{m}athcal{P}si}\cap I_{\mathcal{m}athcal{P}si'}~\subset~\{y:~\mathcal{m}athcal{P}hi_1(y)=1\}\cup\{y:~\mathcal{m}athcal{P}si_2(y)=1\}, \] then, by using the estimates \eqref{est_tu1} and \eqref{est_tu3} we have that for any $t\geq0,$ \[ \Big|\int_{{\mathcal{m}athbb{R}}_+^2}\frac{\mathcal{m}athcal{P}si(y)\mathcal{m}athcal{P}si'(y)}{\bU'_h(y)}\tilde\bu_h^2(t,x',y)dx'dy\Big|\leq C_0C_1\int_{2\delta\leq|y-y_0|\leq3\delta}\big[\|\bu_h\|(t,y) \big]^2dy\leq C_0C_1\delta\big(M_1^2+M_2^2\big). \] Therefore, we obtain that by substituting the above estimate into \eqref{est_w3}, \begin{equation*} \begin{split} \frac{1}{2|\bU''_h(y_0)|}\|\mathcal{m}athcal{P}si(y)w(t,x',y)\|^2_{L^2({\mathcal{m}athbb{R}}_+^2)}\leq&4C_0C_1\delta\big(M_1^2+M_2^2\big)+2|\bU''_h(y_0)|\|\mathcal{m}athcal{P}si\mathcal{m}athcal{P}d_y\bu_{h0}(\cdot)\|_{L^2({\mathcal{m}athbb{R}}_+^2)}^2,\\ &+2C_0C_1\|\mathcal{m}athcal{P}si\bu_{h0}(\cdot)\|^2_{L^2({\mathcal{m}athbb{R}}_+^2)}, \end{split} \end{equation*} which implies that \begin{equation}\label{est_w4} \|\mathcal{m}athcal{P}si(y)w(t,x',y)\|^2_{L^2({\mathcal{m}athbb{R}}_+^2)}\leq8C_0C_1\delta|\bU''_h(y_0)|\big(M_1^2+M_2^2\big)+4|\bU''_h(y_0)|^2\|\mathcal{m}athcal{P}si(y)\mathcal{m}athcal{P}d_y\bu_{h0}(x',y)\|_{L^2({\mathcal{m}athbb{R}}_+^2)}^2~\triangleq~M_3. \end{equation} As \[ |\tilde{\bu}_h(t,x',y)-\tilde{\bu}_h(t,x',y_0-2\delta)|=\big|\int_{y_0-2\delta}^{y}w(t,x',z)dz\big|\leq2\sqrt{\delta}\Big(\int_{{\mathcal{m}athbb{R}}_+^2}\mathcal{m}athcal{P}si^2(y)w^2(t,x',y)dy\Big)^{\frac{1}{2}},\quad \mathcal{m}box{for}~|y-y_0|\leq2\delta, \] then, it implies that by virtue of \eqref{est_tu1} and \eqref{est_w4}, \begin{equation}\label{est_tu4} \|\tilde \bu_h\|(t,y)\leq\|\tilde{\bu}_h\|(t,y_0-2\delta)+2\sqrt{\delta}\|\mathcal{m}athcal{P}si(y)w(t,x',y)\|_{L^2({\mathcal{m}athbb{R}}_+^2)}\leq M_1+2\sqrt{\delta M_3},\quad \mathcal{m}box{for}~|y-y_0|\leq2\delta,~t\geq0. \end{equation} Finally, combining \eqref{est_tu1}, \eqref{est_tu3} and \eqref{est_tu4} and setting \begin{equation}\label{def_M} M~:=~\mathcal{m}ax\{M_2,M_1+2\sqrt{\delta M_3}\}, \end{equation} we know that the constant $M_4>0$, independent of $t$, depends on $C_0,C_1,y_0,\delta$ and $\widetilde{M}_0$, and for any $t,y>0,$ \begin{equation}\label{est_tu5} \|\tilde{\bu}_h\|(t,y)~\leq~M, \end{equation} which implies \eqref{est_sta} immediately by virtue of \eqref{decom} and \eqref{est_linear2}. \end{proof} \begin{remark} From the above proof, we deduce that the result in Proposition \ref{prop_sta} also holds if $\bU_h(y)$ has finite non-degenerate points $0<y_0<y_1<\cdots<y_N<+\infty.$ \end{remark} \begin{question} Comparing the result given in the second part of Proposition \ref{prop_sta2d} with the one in Proposition \ref{prop_sta}, these two results seem to be contradictory. How to explain it? \end{question} \fi \subsection{Linearized stability of shear flows in three-dimensional problems} We shall see that the results on linear stability of shear flows in three-dimensional case are different from the ones in two-dimensional case given in the above subsection. By using the decomposition \eqref{decom} and the estimates of $(\bar{\bu}_h,\bar u_d)(t,x',y)$ given in \eqref{est_linear2}, we will focus on the component $(\tilde{\bu}_h,\tilde u_d)(t,x',y)$ which satisfies the three-dimensional inviscid linearized Prandtl system \eqref{invis_prandtl}. In analogy with the well-posedness result and ill-posedness result on the three-dimensional viscous Prandtl system given in \cite{LWY1} and \cite{LWY3} respectively, we will deduce that the structure of the shear flow $\bU_h(y)$ plays an important role on its linear stability. In fact, we have the following result: \begin{prop}\label{thm_sta} Consider the linearized problem \eqref{pr_linear} in three space variables, suppose that $\bU_h(y)=\big(U_1(y),U_2(y)\big)$ and the initial data are smooth, and the norms appeared on the right hand side of the following estimate \eqref{est_ud1} are finite. Let $(\bu_h,u_d,\ta)(t,x',y)$ be the solution of \eqref{pr_linear}. (1) If there is $k\in {\mathcal{m}athbb{R}}$ such that $U_2(y)=kU_1(y)$ holds for all $y\ge 0$, and $U_1(y)$ has no critical point in $y\ge 0$, then $\|u_d\|(t,y)$ is bounded uniformly in $t$, and satisfies the estimate: \begin{equation}\label{est_ud1}\begin{split} \|u_d\|(t,y)\leq&\Big|\frac{U_1(y)-U_1(0)}{U_1'(0)}\Big|\cdot\|\nabla_h\cdot\bu_{h0}\|(0) +\int_{0}^{y}\Big\{\Big|\frac{U_1(y)-U_1(z)}{U_1'(z)}\Big|\cdot\|\nabla_h\cdot\mathcal{m}athcal{P}d_y\bu_{h0}\|(z)\\ &\qquad+\Big|\frac{U_1''(z)\big(U_1(y)-U_1(z)\big)}{\big(U_1'(z)\big)^2}\Big|\cdot\|\nabla_h\cdot\bu_{h0}\|(z)\Big\}dz\\ &+2\|\nabla_h\ta_0\|_{L^2({\mathcal{m}athbb{R}}^d_+)}\Big(\int_0^y\big|\bU_h(y)-\bU_h(z)\big|^2dz\Big)^{\frac{1}{2}}+M_0\Big(\|\ta_0\|_{H^{1,0}}+\|\ta_0\|_{H^{0,2}}\Big). \end{split}\end{equation} (2) Assume that the initial data of \eqref{pr_linear} admits \[ \|\nabla_h\cdot\bu_{h0}\|_{\frac{i}{2}}(y)~:=~\Big(\int_{{\mathcal{m}athbb{R}}^2}|\xi|^i\cdot\big|\xi\cdot\widehat{\bu_{h0}}\big|^2(\xi,y)d\xi\Big)^{\frac{1}{2}}<+\infty,\quad i=0,1, \] where $\widehat{\bu_{h0}}(\xi, y)$ denotes the Fourier transform of ${\bu}_{h0}(x_1,x_2, y)$ with respect to $(x_1,x_2)$. (2a) If there is $k\in {\mathcal{m}athbb{R}}$ such that $U_2(y)=kU_1(y)$ holds for all $y\ge 0$, and $U_1(y)$ has a single, non-degenerate critical point at $y=y_0>0$, then when $y> y_0$, there exists a constant $C=C\big(y,y_0,\bU_h,\bu_{h0}\big)>0$ independent of $t$, such that for sufficiently large $t$, \begin{equation}\label{est_ud4} \|u_d\|(t,y)~\geq~ C\sqrt{t}~\frac{|U_1(y)-U_1(y_0)|}{\sqrt{|U_1''(y_0)|}} . \end{equation} (2b) If for any given $k\in {\mathcal{m}athbb{R}}$, $U_2(y)=kU_1(y)$ does not hold for all $y\ge 0$, then there is a point $y_0> 0$ such that, when $y>y_0$ we have that for sufficiently large $t,$ \begin{equation}\label{est_ud5}\begin{split} \|u_d\|(t,y)~\geq~C\sqrt{t} \end{split}\end{equation} with the constant $C=C\big(y,y_0,\bU_h,\bu_{h0}\big)>0$ independent of $t.$ Moreover, we have similar results as above for $\mathcal{m}athcal{P}d_yu_d$ and $\nabla_h\cdot\bu_h.$ \end{prop} \begin{proof}[\bf{Proof.}] As in the proof of Proposition \ref{prop_sta2d}, by using \eqref{decom} and \eqref{est_linear2} we only need to study $(\tilde\bu_h,\tilde u_d)(t,x',y)$, which solves the three-dimensional linearized inviscid Prandtl equations \eqref{invis_prandtl}. By taking the Fourier transform with respect to $x'=(x_1, x_2)^T$ in the representation \eqref{formu-u1} of $\tilde u_d(t,x',y)$, we obtain that \begin{equation}\label{fourier_u}\begin{split} \widehat{\tilde u_d}(t,\xi,y)= &-\int_{0}^{y} \Big\{i\xi\cdot\widehat{\bu_{h0}}(\xi,y)-t\big[\xi\cdot\big(\bU_h(y)-\bU_h(z)\big)\big]\cdot\big[\xi\cdot\widehat{\bu_{h0}}(\xi,z) \big]\Big\}e^{-it\xi\cdot\bU_h(z)}dz. \end{split}\end{equation} (1) When $U_2(y)=kU_1(y)$ for some constant $k\in{\mathcal{m}athbb{R}}$, then \eqref{fourier_u} is reduced as \begin{equation}\label{four}\begin{split} &\widehat{\tilde u_d}(t,\xi,y)=-\int_{0}^{y}\big[1-t(\xi_1+k\xi_2)\big(U_1(y)-U_1(z)\big)\big]\big[ \xi\cdot\widehat{\bu_{h0}}(\xi,z)\big]e^{-it(\xi_1+k\xi_2)U_1(z)}dz. \end{split}\end{equation} If $U_1(y)$ has no any critical point for all $y\ge 0$, then when $\xi_1+k\xi_2\neq 0$, we obtain that by integration by parts, \begin{equation}\label{four_1} \begin{split} \widehat{\tilde u_d}(t,\xi,y)=&-\frac{U_1(y)-U_1(0)}{U_1'(0)}\big[i \xi\cdot\widehat{\bu_{h0}}(\xi,0)\big]e^{-it(\xi_1+k\xi_2)U_1(0)} -\int_{0}^{y}\Big\{\frac{U_1(y)-U_1(z)}{U_1'(z)}\big[i \xi\cdot\mathcal{m}athcal{P}d_y\widehat{\bu_{h0}}(\xi,z)\big]\\ &\qquad\qquad-\frac{U_1''(z)\big(U_1(y)-U_1(z)\big)}{\big(U_1'(z)\big)^2}\big[i \xi\cdot\widehat{\bu_{h0}}(\xi,z)\big]\Big\}e^{-it(\xi_1+k\xi_2)U_1(z)}dz. \end{split}\end{equation} which implies that by Parseval's identity, \begin{equation}\label{estimate_ud}\begin{split} \|\tilde u_d\|(t,y)\leq&\Big|\frac{U_1(y)-U_1(0)}{U_1'(0)}\Big|\cdot\|\nabla_h\cdot\bu_{h0}\|(0) +\int_{0}^{y}\Big\{\Big|\frac{U_1(y)-U_1(z)}{U_1'(z)}\Big|\cdot\|\nabla_h\cdot\mathcal{m}athcal{P}d_y\bu_{h0}\|(z)\\ &\qquad+\Big|\frac{U_1''(z)\big(U_1(y)-U_1(z)\big)}{\big(U_1'(z)\big)^2}\Big|\cdot\|\nabla_h\cdot\bu_{h0}\|(z)\Big\}dz. \end{split}\end{equation} Thus, by plugging \eqref{est_linear2} and the above estimate \eqref{estimate_ud} into \eqref{decom} we conclude \eqref{est_ud1}. \iffalse {\blue On the other hand, from \eqref{four_1}, we have \begin{equation}\label{3.44} \begin{split} &\mathcal{m}athcal{A}at{u}_d(t,\xi,y)-\int_0^y(U_h(y)-U_h(z))\cdot \widehat{\nabla_h\theta}(t,\xi,z)dz-\mathcal{m}athcal{A}at{\theta}_y(t,\xi,y)\\ &-\frac{U_1(y)-U_1(0)}{U_1'(0)}\widehat{{\rm div}_h\bu_{h0}}(\xi,0)e^{-it(\xi_1+k\xi_2)U_1(0)}\\ &= -\int_{0}^{y}\Big\{(U_1(y)-U_1(z))\frac{\mathcal{m}athcal{P}artial}{\mathcal{m}athcal{P}artial z}\left( \frac{{\rm div}_h\bu_{h0}(\xi,z)}{U_1'(z)}\right)+(U_h(y)-U_h(z))\cdot \widehat{\nabla_h \theta_0}(\xi, z)\Big\} e^{-it\xi\cdot\bU_h(z)}dz\\ &= \Big\{(U_1(y)-U_1(0))\frac{\mathcal{m}athcal{P}artial}{\mathcal{m}athcal{P}artial z}\left( \frac{{\rm div}_h\bu_{h0}(\xi,0)}{U_1'(0)}\right)+(U_h(y)-U_h(0))\cdot \widehat{\nabla_h \theta_0}(\xi, 0)\Big\} \frac{e^{-it\xi\cdot\bU_h(0)}}{it\xi\cdot U'_h(0)}\\ &-\int_0^y \frac{e^{-it\xi\cdot\bU_h(z)}}{it\xi\cdot U'_h(z)}a(\xi,z) dz \end{split}\end{equation} with $$a(\xi, z)=\frac{\mathcal{m}athcal{P}artial}{\mathcal{m}athcal{P}artial z}\Big\{(U_1(y)-U_1(z))\frac{\mathcal{m}athcal{P}artial}{\mathcal{m}athcal{P}artial z}\left( \frac{{\rm div}_h\bu_{h0}(\xi,z)}{U_1'(z)}\right)+(U_h(y)-U_h(z))\cdot \widehat{\nabla_h \theta_0}(\xi, z)\Big\} .$$ Thus, from \eqref{3.44} we obtain \begin{equation}\label{3.45} \begin{split} &\lim\limits_{t\to +\infty}\left( u_d(t,x',y)-\int_0^y(U_h(y)-U_h(z))\cdot \nabla_h\theta(t,x',z)dz-\theta_y(t,x',y)\right.\\ & \mathcal{m}athcal{A}space{1.5in} \left.-\frac{U_1(y)-U_1(0)}{U_1'(0)}({\rm div}_h\bu_{h0})(x'-tU_h(0),0)\right)=0 \end{split}\end{equation} in the sense.... when } {\red Moreover, from \eqref{four_1} we have \begin{equation}\label{four_2} \begin{split} &\mathcal{m}athcal{A}at{u}_d(t,\xi,y)-\int_{0}^{y}\big[i\xi\cdot\big(\bU_h(y)-\bU_h(z)\big)\big]\cdot\mathcal{m}athcal{A}at{\ta}(t,\xi,z)dz -\mathcal{m}athcal{A}at{\ta}_y(t,\xi,y)\\ &\rightarrow-\frac{U_1(y)-U_1(0)}{U_1'(0)}\big[i \xi\cdot\mathcal{m}athcal{A}at{\bu}_{h0}(\xi,0)\big]e^{-it(\xi_1+k\xi_2)U_1(0)},\qquad\mathcal{m}box{as}~t\rightarrow+\infty. \end{split}\end{equation} Why??? Thereby, \eqref{est_ud2} follows from \eqref{four_2} and Parseval's identity. } \begin{equation}\label{fourier_u} \begin{split} \mathcal{m}athcal{A}at{\bu}_h(t,\xi,y)= & \mathcal{m}athcal{A}at{\bu}_{h0}(\xi,y)e^{-t\xi\cdot\bU_h(y)}+\bU_h'(y)\int_{0}^{y}\big[i t\xi\cdot\mathcal{m}athcal{A}at{\bu}_{h0}+\mathcal{m}athcal{A}at{\ta}_0\big](\xi,z)e^{-it\xi\cdot\bU_h(z)}dz\\ &-\bU_h'(y)\int_{0}^{y}\mathcal{m}athcal{A}at{\ta}(t,\xi,z)dz. \end{split}\end{equation} If $U_i(y)$ (we may assume that $i=1$) has no critical points and $U_2(y)=kU_1(y)$ for some constant $k$, then \eqref{fourier_u} is reduced as \begin{equation*} \begin{split} \mathcal{m}athcal{A}at{\bu}_h(t,\xi,y)= & \mathcal{m}athcal{A}at{\bu}_{h0}(\xi,y)e^{-tU_1(y)(\xi_1+k\xi_2)}+\bU_h'(y)\int_{0}^{y}\big[i t\xi\cdot\mathcal{m}athcal{A}at{\bu}_{h0}+\mathcal{m}athcal{A}at{\ta}_0\big](\xi,z)e^{-itU_1(z)(\xi_1+k\xi_2)}dz\\ &-\bU_h'(y)\int_{0}^{y}\mathcal{m}athcal{A}at{\ta}(t,\xi,z)dz. \end{split}\end{equation*} For $\xi_1\neq -k\xi_2$ in the above equality, we obtain that by integration by parts, \begin{equation}\label{four_1} \begin{split} \mathcal{m}athcal{A}at{\bu}_h(t,\xi,y)= &\Big[ \mathcal{m}athcal{A}at{\bu}_{h0}(\xi,y)-\frac{\xi\cdot\mathcal{m}athcal{A}at{\bu}_{h0}(\xi,y)}{\xi\cdot\bU_h'(y)}\bU_h'(y)\Big]e^{-tU_1(y)(\xi_1+k\xi_2)}+\bU_h'(y)\frac{\xi\cdot\mathcal{m}athcal{A}at{\bu}_{h0}(\xi,0)}{\xi\cdot\bU_h'(0)}e^{-itU_1(0)(\xi_1+k\xi_2)}\\ &+\bU_h'(y)\int_{0}^{y}\Big[\frac{\xi\cdot\widehat{\mathcal{m}athcal{P}d_y\bu}_{h0}(\xi,z)}{\xi\cdot\bU_h'(z)}-\frac{\big(\xi\cdot\bU_h''(z)\big)\big(\xi\cdot\mathcal{m}athcal{A}at{\bu}_{h0}(\xi,z)\big)}{\big(\xi\cdot\bU_h'(z)\big)^2}\Big]e^{-itU_1(y)(\xi_1+k\xi_2)}dz\\ &+\bU_h'(y)\int_{0}^{y}\mathcal{m}athcal{A}at{\ta}_0(\xi,z)e^{-itU_1(z)(\xi_1+k\xi_2)}dz-\bU_h'(y)\int_{0}^{y}\mathcal{m}athcal{A}at{\ta}(t,\xi,z)dz \end{split}\end{equation} \fi (2) If $U_2(y)=kU_1(y)$ for some constant $k$ and $U_1(y)$ has a non-degenerate critical point at $y=y_0$, then for $y>y_0$ and $\xi_1\neq-k\xi_2$, by the method of stationary phase it yields that as $|t(\xi_1+k\xi_2)|\rightarrow+\infty$, \begin{equation}\label{mop}\begin{split} &\int_{0}^{y}\big(U_1(y)-U_1(z)\big)\big[\xi\cdot\widehat{\bu_{h0}}(\xi,z)\big]e^{-it(\xi_1+k\xi_2)U_1(z)}dz\\ &=\sqrt{\frac{2\mathcal{m}athcal{P}i}{|t(\xi_1+k\xi_2)U''_1(y_0)|}}~\big(U_1(y)-U_1(y_0)\big)\big[\xi\cdot\widehat{\bu_{h0}}(\xi,y_0)\big]\exp\big\{-it(\xi_1+k\xi_2)U_1(y_0)-\frac{i\mathcal{m}athcal{P}i}{4}sgn\big((\xi_1+k\xi_2)U_1''(y_0)\big)\big\}\\ &\quad+o(\frac{1}{|t(\xi_1+k\xi_2)|}). \end{split}\end{equation} Substituting the above estimate into \eqref{four} we obtain that for $\xi\in S$, with a bounded domain $S\subset{\mathcal{m}athbb{R}}^2$ being away from the line $\{\xi|~ \xi_1+k\xi_2=0\}$, \begin{equation}\label{estimate_ud1} \|\widehat{\tilde u_d}\|(t,y)~\geq~\sqrt{\frac{\mathcal{m}athcal{P}i|\xi_1+k\xi_2| t}{|U''_1(y_0)|}}\cdot\Big|\big(U_1(y)-U_1(y_0)\big)\big[\xi\cdot\widehat{\bu_{h0}}(\xi,y_0)\big]\Big|,\qquad \mathcal{m}box{for~sufficiently~large}~t, \end{equation} which implies by using Parseval's identity, \begin{equation}\label{estimate_ud2} \|\tilde u_d\|(t,y)~\geq~\sqrt{\mathcal{m}athcal{P}i t}~\frac{|U_1(y)-U_1(y_0)|}{\sqrt{U''_1(y_0)}}~ \|\sqrt{\xi_1+k\xi_2}~\xi\cdot\widehat{\bu_{h0}}(\xi,y_0) \|_{L^2_\xi(S)} \end{equation} for $y>y_0$ and $t$ large. Then, from the uniform boundedness of $\|\bar u_d\|(t,y)$ with respect to $t$ obtained in \eqref{est_linear2}, we deduce \eqref{est_ud4} immediately. (3) If for any given $k\in {\mathcal{m}athbb{R}}$, $U_2(y)=kU_1(y)$ does not hold for all $y\ge 0$, and both of $U_1(y), U_2(y)$ vanish at infinity, then by a contradiction argument, one can show that there is a point $y_0$ such that \begin{equation}\label{condition} U_1'(y_0)U_2''(y_0)~\neq~U_2'(y_0)U_1''(y_0). \end{equation} Without loss of generality, we may assume that $U_1'(y_0)>0$ and $U_1'(y_0)U_2''(y_0)-U_2'(y_0)U_1''(y_0)>0.$ Then, we affirm that for any $\delta>0$, there is an interval $S_\delta\subseteq(y_0-\delta,y_0+\delta)$ such that \begin{equation}\label{def_S} U_1'(y)>0,\quad U_2'(y)\neq0,\quad U_1'(y)U_2''(y)-U_2'(y)U_1''(y)>0,\qquad\forall~y\in S_\delta, \end{equation} which implies that the function $\frac{U_2'(y)}{U_1'(y)}$ is monotonically increasing in $S_\delta.$ Denote by \begin{equation}\label{def_I} I_\delta^R~:=~\Big\{\xi=(\xi_1,\xi_2)\in{\mathcal{m}athbb{R}}^2\setminus\{0\}:~|\xi|\leq R,~\mathcal{m}box{and}~\exists y\in S_\delta,~s.t.~~\xi\cdot\bU_h'(y)=0\Big\}. \end{equation} From the monotonicity of $\frac{U_2'(y)}{U_1'(y)}$ in $S_\delta$, we know that for fixed $\xi\in I_\delta^R$, there is only one point $y\in S_\delta$ satisfying $\xi\cdot\bU_h'(y)=0$. Moreover, by virtue of the continuity of $\bU_h'(y)$, it is easy to know that the Lebesgue measure of $I_\delta^R$ is positive, i.e., $m (I_\delta^R)>0.$ Thus, when $y>y_0$ and for any $\xi\in I_\delta^R$ with $\delta\leq y-y_0$, we have $S_\delta\subseteq(0,y),$ and there exists a unique $y_\xi\in S_\delta$ such that $\xi\cdot\bU_h'(y_\xi)=0$ and $\xi\cdot\bU_h''(y_\xi)\neq0$ by using \eqref{def_S}. For such $(\xi,y)$ it yields that by the method of stationary phase, as $t\rightarrow+\infty$, \begin{equation}\label{estimate_ud3}\begin{split} &\int_{0}^{y}\big[\xi\cdot\big(\bU_h(y)-\bU_h(z)\big)\big]\cdot\big[\xi\cdot\widehat{\bu_{h0}}(\xi,z)\big]~e^{-it\xi\cdot\bU_h(z)}dz\\ &=\sqrt{\frac{2\mathcal{m}athcal{P}i}{t\big|\xi\cdot\bU_h''(y_\xi)\big|}}~\big[\xi\cdot\big(\bU_h(y)-\bU_h(y_\xi)\big)\big]\cdot\big[\xi\cdot\widehat{\bu_{h0}}(\xi,y_\xi)\big]~e^{-it\xi\cdot\bU_h(y_\xi)-\frac{i\mathcal{m}athcal{P}i}{4}sgn\big(\xi\cdot\bU''_h(y_\xi)\big)}+o(\frac{1}{t}). \end{split}\end{equation} Note that when $\delta$ is small enough, we have that for any $\xi\in I_\delta^R,$ \[ \Big|\frac{\xi\cdot\big(\bU_h(y)-\bU_h(y_\xi)\big)}{\sqrt{\big|\xi\cdot\bU_h''(y_\xi)\big|}}\big[\xi\cdot\widehat{\bu_{h0}}(\xi,y_\xi)\big]\Big|~\geq~\Big|\frac{\xi\cdot\big(\bU_h(y)-\bU_h(y_0)\big)}{2\sqrt{\big|\xi\cdot\bU_h''(y_0)\big|}}\big[\xi\cdot\widehat{\bu_{h0}}(\xi,y_0)\big]\Big|, \] and then, substituting \eqref{estimate_ud3} into \eqref{fourier_u} implies that for $\xi\in I_\delta^R$ and $t$ large enough, \begin{equation}\label{mop1}\begin{split} \big|\widehat{\tilde u_d}(t,\xi,y)\big|~\geq~& \frac{\sqrt{\mathcal{m}athcal{P}i t}}{2}~\Big|\frac{\xi\cdot\big(\bU_h(y)-\bU_h(y_0)\big)}{\sqrt{\big|\xi\cdot\bU_h''(y_0)\big|}}\big[\xi\cdot\widehat{\bu_{h0}}(\xi,y_0)\big]\Big|. \end{split}\end{equation} Thus, for sufficiently large $t$ we obtain that by using Parseval's identity in \eqref{mop1}, \[\begin{split} &\|\tilde u_d\|(t,y)\geq\frac{\sqrt{\mathcal{m}athcal{P}i t}}{2}~\Big\|\frac{\xi\cdot\big(\bU_h(y)-\bU_h(y_0)\big)}{\sqrt{\big|\xi\cdot\bU_h''(y_0)\big|}}\big[\xi\cdot\widehat{\bu_{h0}}(\xi,y_0)\big]\Big\|_{L^2_\xi(I_\delta^R)}, \end{split}\] and then, combining with the uniform boundedness of $\|\bar u_d\|(t,y)$ given in \eqref{est_linear2}, it implies that, \[ \|u_d\|(t,y)\geq\frac{\sqrt{\mathcal{m}athcal{P}i t}}{4}~\Big\|\frac{\xi\cdot\big(\bU_h(y)-\bU_h(y_0)\big)}{\sqrt{\big|\xi\cdot\bU_h''(y_0)\big|}}\big[\xi\cdot\widehat{\bu_{h0}}(\xi,y_0)\big]\Big\|_{L^2_\xi(I_\delta^R)}. \] Consequently, we get the estimate \eqref{est_ud5}. Through analogous arguments as above, we can obtain similar results for $\mathcal{m}athcal{P}artial_yu_d$ and $\nabla_h\cdot\bu_h$. \end{proof} \begin{remark}\label{rem_sta} From Proposition \ref{thm_sta}, we see that when the velocity field direction of the background shear flow $(U_1(y), U_2(y), 0)$ is invariant in the normal variable, the linearized problem \eqref{pr_linear} is asymptotically stable when the tangential velocity $U_1(y)$ is monotonic, and unstable when it has a non-degenerate critical point, on the other hand, when the velocity field direction of $(U_1(y), U_2(y), 0)$ changes with respect to $y$, then the problem \eqref{pr_linear} is always asymptotically unstable. This interesting phenomenon is analogy to the stability and instability results obtained by authors in \cite{LWY1, LWY2, LWY3} for the three dimensional incompressible Prandtl equations. \iffalse (2) For the two-dimensional problem \eqref{pr_linear}, in a way similar to that given in \cite[Proposition 6.1]{H-H}, we can prove that, if $U(y)$ has no critical point, then $\|\bu_h\|(t,y)$ is bounded uniformly in $t:$ \begin{equation}\label{est_uh}\begin{split} \|\bu_h\|(t,y)\leq&\Big|\frac{\bU_h(y)}{\bU_h'(0)}\Big|\cdot\|\bu_{h0}\|(0) +\int_{0}^{y}\Big\{\Big|\frac{\bU_h'(y)}{\bU_h'(z)}\Big|\cdot\|\mathcal{m}athcal{P}d_y\bu_{h0}\|(z)\\ &\quad+\Big|\frac{\bU_h''(z)}{\big(\bU_h'(z)\big)^2}\Big|\cdot\|\bu_{h0}\|(z)\Big\}dz+2\|\ta_0\|_{L^2({\mathcal{m}athbb{R}}^d_+)}|\sqrt{y}\bU_h'(y)|, \end{split}\end{equation} and as $t\rightarrow+\infty,$ \begin{equation}\label{est_uh1}\begin{split} \Big\|\bu_h+\bU_h'(y)\int_{0}^{y}\ta(t,x',z)dz\Big\|(t,y) \rightarrow\|\bu_{h0}\|(0)\cdot\Big|\frac{\bU_h'(y)}{\bU_h'(0)}\Big|. \end{split}\end{equation} If $U(y)$ has a single, non-degenerate critical point at $y=y_0$, then when $y> y_0$, we have that when $t\rightarrow+\infty,$ \begin{equation}\label{est_uh2}\begin{split} \Big\|\bu_h+\bU_h'(y)\int_{0}^{y}\ta(t,x',z)dz\Big\|(t,y) \sim\sqrt{2\mathcal{m}athcal{P}i t}\|\bu_{h0}\|_{\frac{1}{2}}(y_0)\frac{|\bU_h'(y)|}{\sqrt{|\bU_h''(y_0)|}}, \end{split}\end{equation} with $ \|\bu_{h0}\|_{\frac{1}{2}}(y)~:=~\Big(\int_{{\mathcal{m}athbb{R}}^2}|\xi|\cdot\big|\mathcal{m}athcal{A}at{\bu}_{h0}\big|^2(\xi,y)d\xi\Big)^{\frac{1}{2}}. $ Also, for sufficiently large $t$ we have \begin{equation}\label{est_uh3} \|\bu_h\|(t,y)\geq\sqrt{\frac{\mathcal{m}athcal{P}i t}{2}}~\|\bu_{h0}\|_{\frac{1}{2}}(y_0)\frac{|\bU_h'(y)|}{\sqrt{|U_1''(y_0)|}}. \end{equation} Similar estimates hold for $\mathcal{m}athcal{P}d_y\bu_h, u_d$ and $\mathcal{m}athcal{P}d_yu_d.$ \fi \end{remark} \iffalse \section{Local existence for two-dimensional problem with monotonic initial data} In this section, we consider two-dimensional case of the problem \eqref{pr_invis} in ${\mathcal{m}athbb{R}}_+\times{\mathcal{m}athbb{R}}_+^2$ with monotonic initial data, i.e., \begin{equation}\label{pr_2d}\begin{cases} \mathcal{m}athcal{P}d_t u+(u\mathcal{m}athcal{P}d_x+v\mathcal{m}athcal{P}d_y)u=0,\\ \mathcal{m}athcal{P}d_t \ta+(u\mathcal{m}athcal{P}d_x+v\mathcal{m}athcal{P}d_y)\ta =\frac{ \ka}{P}\ta\big(\mathcal{m}athcal{P}d_y^2 \ta+P_t\big),\\ \mathcal{m}athcal{P}d_x u+\mathcal{m}athcal{P}d_y v=\frac{\ka }{P}\mathcal{m}athcal{P}d_y^2 \ta -\frac{(1-\ka)P_t}{P},\\ v|_{y=0}=0,~\mathcal{m}athcal{P}d_y \ta|_{y=0}=0,\quad \liy\ta(t,x,y)=\Ta(t,x),\\ (u,\ta)|_{t=0}=(u_0,\ta_0)(x,y), \end{cases}\end{equation} where we assume that for the initial data: \begin{equation}\label{ass_mono} \mathcal{m}athcal{P}d_y u_0(x,y)>0,\qquad\forall~(x,y)\in{\mathcal{m}athbb{R}}^2_+. \end{equation} Here we still take the boundary condition of $\ta$ in \eqref{bd_ta} with $\alpha\neq0$, the case of $\alpha=0$ is similar. Recall that from \eqref{Ber}, \begin{equation}\label{ber}\begin{cases} U_t+UU_x=0,\\ \Ta_t+U\Ta_x-\frac{\ka\Ta P_t}{P}=0, \end{cases}\end{equation} where the known functions $U(t,x)$ and $\Ta(t,x)$ are from Euler flow. We have the following result for the problem \eqref{pr_2d}: \begin{prop} Let $(u_0,\ta_0)(x,y)$ be smooth initial data of \eqref{pr_2d} such that \begin{equation}\label{ass_1} \liy u_0(x,y)=U(0,x),~\mathcal{m}athcal{P}d_yu_0>0,\quad t_*:=-\Big[\inf\limits_{x\in{\mathcal{m}athbb{R}}}u_{0x}(x,0)\Big]^{-1}>0, \end{equation} and \begin{equation}\label{ass_2} C^{-1}\leq\ta_0\leq C,\quad|u_{0x}|,|u_{0y}|,|\ta_{0x}|\leq C,\quad |u_{0xy}|\leq C,\quad |u_{0yy}|\leq Cu_{0y}. \end{equation} Then there exists a time $t_0:0<t_0\leq t_*$ and a unique classical solution to the problem \eqref{pr_2d} in $0\leq t< t_0$, satisfying that $\mathcal{m}athcal{P}d_yu>0$ and $\ta\geq\frac{1}{2C}$. \end{prop} \begin{proof}[\bf Proof.] We will prove this proposition by the Crocco transformation. First of all, we establish the boundary conditions of $u(t,x,y)$ at $y=0$ and $y\rightarrow+\infty.$ From \eqref{ber} and the first equation $U_t+UU_x=0$ of \eqref{pr_2d}, it is easy to get that \begin{equation}\label{bd_infi} \liy u(t,x,y)~=~U(t,x) \end{equation} by using $\liy u_0(x,y)=U(0,x).$ Let $u^b(t,x):=u(t,x,0)$ be the boundary data on $\{y=0\},$ then the restriction of first equation in \eqref{pr_2d} on $\{y=0\}$ and the condition $v|_{y=0}=0$ yield that $u^b(t,x)$ satisfies the following Burgers equation: \begin{equation}\label{pr_ini}\begin{cases} \mathcal{m}athcal{P}d_tu^b+u^b\mathcal{m}athcal{P}d_xu^b=0,\\ u_b(0,x)=u_0(x,0). \end{cases}\end{equation} It follows that the problem \eqref{pr_ini} has a smooth solution $u^b(t,x)$ for $t\in[0,t_*).$ Moreover, from $\mathcal{m}athcal{P}d_yu_0>0$ and $\liy u_0(x,y)=U(t,x)$ we get $u^b(0,x)=u_0(x,0)<U(0,x),$ and then $u^b(t,x)<U(t,x)$ for $t\in[0,t_*).$ We introduce the Crocco transformation: \begin{equation}\label{Crocco} \tau=t,\quad\xi=x,\quad\eta=u(t,x,y), \end{equation} and let \begin{equation*} w(\tau,\xi,\eta)~=~\mathcal{m}athcal{P}d_yu(t,x,y),\quad\tta(\tau,\xi,\eta)~=~\ta(t,x,y). \end{equation*} Then, the domain $\{(t,x,y):t,y>0,x\in{\mathcal{m}athbb{R}}\}$ becomes \begin{equation*} \Omega~:=~\big\{(\tau,\xi,\eta): \tau>0,\xi\in{\mathcal{m}athbb{R}},u^b(\tau,\xi)<\eta<U(\tau,\xi)\big\}, \end{equation*} and the problem can be reduced as follows: \begin{equation}\label{pr_crocco}\begin{cases} \mathcal{m}athcal{P}d_\tau w+\eta\mathcal{m}athcal{P}d_\xi w-\frac{(1-\ka)P_\tau}{P}w+\frac{\ka}{P}w^2\mathcal{m}athcal{P}d_\eta(w\mathcal{m}athcal{P}d_\eta \tta)=0,\quad&{\rm in}~\Omega,\\ \mathcal{m}athcal{P}d_\tau \tta+\eta\mathcal{m}athcal{P}d_\xi \tta-\frac{\ka P_\tau}{P}\tta-\frac{\ka}{P}\tta w\mathcal{m}athcal{P}d_\eta(w\mathcal{m}athcal{P}d_\eta \tta)=0,\quad&{\rm in}~\Omega,\\ w\mathcal{m}athcal{P}d_\eta\tta|_{\eta=u^b(\tau,\xi)}=0,\quad \tta|_{\eta=U(\tau,\xi)}=\Ta(\tau,\xi),\\ (w,\tta)|_{\tau=0}=(w_0,\tta_0)(\xi,\eta), \end{cases}\end{equation} where the initial data \begin{equation}\label{ini_crocco} w_0(\xi,\eta):=\mathcal{m}athcal{P}d_yu_0(x,y),~ \tta_0(\xi,\eta):=\ta_0(x,y)\geq C^{-1},\quad{\rm with}~\xi=x,\eta=u_0(x,y). \end{equation} From the equations in \eqref{pr_crocco}, we get \[ \mathcal{m}athcal{P}d_\tau(w\tta)+\eta\mathcal{m}athcal{P}d_\xi(w\tta)-\frac{P_\tau}{P}w\tta=0, \] which implies that \begin{equation}\label{wta} (w\tta)(\tau,\xi,\eta)~=~\frac{P(\tau)}{P(0)}(w_0\tta_0)(\xi-\tau\eta,\eta)~\triangleq~a(\tau,\xi,\eta). \end{equation} We know that from \eqref{ini_crocco}, \begin{equation}\label{ini_va} w_0(\xi,\eta)>0,~{\rm for}~u^b(0,\xi)\leq\eta<U(0,\xi);\quad w_0\Big(\xi,U(0,\xi)\Big)=0, \end{equation} and $U(\tau,\xi)=U\Big(0,\xi-\tau U(\tau,\xi)\Big)$ by using that $U_\tau+UU_\xi=0$ in \eqref{ber}, then it follows that \[ w_0\big(\xi-\tau\eta,\eta\big)|_{\eta=U(\tau,\xi)}=w_0\Big(\xi-\tau U(\tau,\xi),U\big(0,\xi-\tau U(\tau,\xi)\big)\Big)=0, \] which implies that by combining \eqref{wta} with \eqref{ini_va}, \begin{equation}\label{a_va} a(\tau,\xi,\eta)>0,~{\rm for}~u^b(\tau,\xi)\leq\eta<U(\tau,\xi);\quad a\Big(\tau,\xi,U(\tau,\xi)\Big)=0. \end{equation} Thus, if we suppose that $\tta>0$, then from \eqref{wta} and \eqref{a_va} we have \begin{equation}\label{formu_w} w(\tau,\xi,\eta)~=~\frac{a(\tau,\xi,\eta)}{\tta(\tau,\xi,\eta)}>0,~{\rm for}~u^b(\tau,\xi)\leq\eta<U(\tau,\xi);\quad w(\tau,\xi,\eta)|_{\eta=U(\tau,\xi)}=0, \end{equation} and plugging \eqref{formu_w} into \eqref{pr_crocco} yields that \begin{equation}\label{pr-tta}\begin{cases} \mathcal{m}athcal{P}d_\tau\tta+\eta\mathcal{m}athcal{P}d_\xi\tta-\frac{\ka P_\tau}{P}\tta-\frac{\ka a}{P}\mathcal{m}athcal{P}d_\eta\big(\frac{a}{\tta}\mathcal{m}athcal{P}d_\eta\tta\big)=0,\quad {\rm in}~\Omega,\\ \mathcal{m}athcal{P}d_\eta\tta|_{\eta=u^b(\tau,\xi)}=0,\quad\tta|_{\eta=U(\tau,\xi)}=\Ta(\tau,\xi),\\ \tta|_{\tau=0}=\tta_0(\xi,\eta). \end{cases}\end{equation} It isn't difficult to obtain that there exists a time $0<t_0\leq t_*$ and a unique classical solution $\tta(\tau,\xi,\eta)$ to the problem \eqref{pr-tta} in $0\leq \tau<t_0$, such that $\tta\geq\frac{1}{2C}$ and has bounded derivative $\mathcal{m}athcal{P}d_\eta\tta.$ Thus, we get the unique solution $(w,\tta)(\tau,\xi,\eta)$, given by \eqref{formu_w} and \eqref{pr-tta}, to problem \eqref{pr_crocco} for $\tau\in[0,t_0)$. Next, we turn to the original problem \eqref{pr_2d} of $(u,\ta)(t,x,y)$. From the Crocco transformation \eqref{Crocco} and the formula \eqref{formu_w}, we determine $u(t,x,y)$ by \begin{equation}\label{formu-y} y~=~\int_{u^b(t,x)}^{u}\frac{d\eta}{w(t,x,\eta)}, \end{equation} or we solve the following ODE in $y$ for $u(t,x,y),$ \begin{equation}\label{ode_u}\begin{cases} \mathcal{m}athcal{P}d_yu(t,x,y)~=~(a/\tta)\big(t,x,u(t,x,y)\big),\\%\frac{a\big(t,x,u(t,x,y)\big)}{\tta\big(t,x,u(t,x,y)\big)},\\ u(t,x,0)~=~u^b(t,x). \end{cases}\end{equation} It's known that there is a global solution in $y$ of \eqref{ode_u} if the right-hand side $\frac{a}{\tta}(t,x,u)$ is a globally Lipschitz function of $u$. From \eqref{ini_crocco} we have \[(w_0,\tta_0)(x,u)~=~(u_{0y},\ta_0)\big(x,y(x,u)\big), \] where $y=y(x,u)$ is the inverse function of $u=u_0(x,y)$, then \begin{equation} \mathcal{m}athcal{P}d_u a(t,x,u)=\frac{P(t)}{P(0)}\Big[-t\mathcal{m}athcal{P}d_x(u_{0y}\ta_0)+(1+tu_{0x})\frac{\mathcal{m}athcal{P}d_y(u_{0y}\ta_0)}{u_{0y}}\Big]\Big(x-tu,y(x-tu,u)\Big), \end{equation} thus it implies that $\mathcal{m}athcal{P}d_u a(t,x,u)$ is bounded by virtue of the conditions in \eqref{ass_2}. Note that $\tta(t,x,u)\geq\frac{1}{2C}$ with bounded derivative $\mathcal{m}athcal{P}d_u\tta(t,x,u)$, so $\frac{a}{\tta}(t,x,u)$ is globally Lipschitz in $u$, which shows that we obtain the solution $u(t,x,y)$ to the problem \eqref{ode_u}. Now, we will determine the functions $v(t,x,y)$ and $\ta(t,x,y)$ for \eqref{pr_2d}. Firstly, for the function $u(t,x,y)$ we obtained above, and combining \eqref{formu-y} with \eqref{formu_w}, we get that $y\in{\mathcal{m}athbb{R}}_+$, and the inverse transformation of $\eta=u(t,x,y)$ changes the boundary $\eta=u^b(t,x)$, $\eta=U(t,x)$ respectively, into $y=0$, $y\rightarrow+\infty$ respectively. Moreover, we have that from \eqref{formu-y}, \begin{equation}\label{tr_u}\begin{split} &u|_{y=0}=u^b(t,x),\quad\mathcal{m}athcal{P}d_y u=w(t,x,u)>0,\quad \mathcal{m}athcal{P}d_{yt}^2u=w_t(t,x,u)+w_\eta(t,x,u)\cdot\mathcal{m}athcal{P}d_tu,\\ &\mathcal{m}athcal{P}d_{yx}^2u=w_x(t,x,u)+w_\eta(t,x,u)\cdot\mathcal{m}athcal{P}d_xu,\quad\mathcal{m}athcal{P}d_t u=w(t,x,u)\int_{u^b}^u\frac{w_t}{w^2}(t,x,\eta)d\eta+\frac{w(t,x,u)}{w(t,x,u^b)}u^b_t,\\ &\mathcal{m}athcal{P}d_x u=w(t,x,u)\int_{u^b}^u\frac{w_x}{w^2}(t,x,\eta)d\eta+\frac{w(t,x,u)}{w(t,x,u^b)}u^b_x. \end{split}\end{equation} Let \begin{equation}\label{formu_ta} \ta(t,x,y)~=~\tta(t,x,u(t,x,y)), \end{equation} then from the boundary conditions of $\tta$ in \eqref{pr-tta}, it implies that \begin{equation}\label{bd-ta} \mathcal{m}athcal{P}d_y\ta|_{y=0}=w\mathcal{m}athcal{P}d_\eta\tta|_{\eta=u^b(t,x)}=0,\quad \liy\ta(t,x,y)=\Ta(t,x). \end{equation} Also, the initial value $(u,\ta)|_{t=0}=(u_0,\ta_0)(x,y)$ is fulfilled by $(w_0,\tta_0)(x,u_0)=(u_{0y},\ta_0)(x,y).$ Furthermore, we have \begin{equation}\label{tr_ta}\begin{split} &\mathcal{m}athcal{P}d_y\ta=(w\tta_\eta)(t,x,u),\quad \mathcal{m}athcal{P}d_y^2\ta=(ww_\eta\tta_\eta+w^2\tta_{\eta\eta})(t,x,u),\\ &\mathcal{m}athcal{P}d_t\ta=\tta_t(t,x,u)+\tta_\eta(t,x,u)\cdot\mathcal{m}athcal{P}d_tu,\quad\mathcal{m}athcal{P}d_x\ta=\tta_x(t,x,u)+\tta_\eta(t,x,u)\cdot\mathcal{m}athcal{P}d_xu. \end{split}\end{equation} Next, let us set \begin{equation}\label{formu_v} v(t,x,y)~=~-\frac{\mathcal{m}athcal{P}d_tu(t,x,y)+u(t,x,y)\mathcal{m}athcal{P}d_xu(t,x,y)}{\mathcal{m}athcal{P}d_yu(t,x,y)}, \end{equation} and then, we know that the function $v(t,x,y)$ is smooth and bounded in $[0,t_0)\times{\mathcal{m}athbb{R}}^2_+.$ Also, by using $u|_{y=0}=u^b(t,x)$ in \eqref{tr_u} and the equation of $u^b$ in \eqref{pr_ini} it yields that \[v(t,x,0)~=~\frac{\mathcal{m}athcal{P}d_tu^b(t,x)+u^b(t,x)\mathcal{m}athcal{P}d_xu^b(t,x)}{\mathcal{m}athcal{P}d_yu(t,x,0)}~=~0.\] Thus, the rest of our work is to show that $(u,v,\ta)(t,x,y)$, given by \eqref{ode_u},\eqref{formu_ta} and \eqref{formu_v}, satisfies the equations of problem \eqref{pr_2d}. Obviously, the first equation of \eqref{pr_2d} holds because of the definition of $v$ in \eqref{formu_v}. \end{proof} \fi \section*{Appendix. Derivation of the boundary layer problem} \setcounter{equation}{0} \renewcommand{A.\arabic{equation}}{A.\arabic{equation}} In the appendix, we give a formal derivation of the problem \eqref{pr_invis} for the thermal layer profiles in the zero heat conductivity limit of inviscid compressible flows. Analogously, this problem of the thermal layer profiles can also be derived from the compressible Navier Stokes equations when the viscosity coefficients are of higher order with respect to the heat conductivity coefficient. Consider the following problem of the compressible Euler-Fourier equations in the domain ${\mathcal{m}athbb{R}}_+\times{\mathcal{m}athbb{R}}^d_+$ with $d=2,3,$ \begin{equation}\label{oreq} \begin{cases} \mathcal{m}athcal{P}artial_t\rho+\nabla\cdot(\rho \textbf{u})=0,\\ \rho\{ \mathcal{m}athcal{P}artial_t\textbf{u}+(\textbf{u}\cdot\nabla)\textbf{u}\}+\nabla p(\rho,\theta) =0,\\%\mathcal{m}u\mathcal{m}athcal{D}elta \textbf{u}+(\lambda+\mathcal{m}u)\nabla(\nabla\cdot \textbf{u}),\\ c_V\rho\{\mathcal{m}athcal{P}artial_t\theta+(\textbf{u}\cdot\nabla)\theta\}+p(\rho,\theta)\nabla\cdot \textbf{u} =\ep\mathcal{m}athcal{D}elta\ta, \end{cases}\end{equation} where the spatial variables $x=(x',x_d)\in{\mathcal{m}athbb{R}}_+^d$ with $x'=(x_1,\cdots,x_{d-1})\in{\mathcal{m}athbb{R}}^{d-1}$ and $x_d>0$, $\rho$ is the density, $\textbf{u}=(u_1,\cdots,u_d)^T$ is the velocity, $\theta$ is the absolute temperature, $p(\rho,\theta)$ is the pressure, the constant $c_V>0$ is the specific heat capacity, $\ep$ is the coefficient of heat conduction. For the equations \eqref{oreq}, we endow them with the following boundary conditions: \begin{equation}\label{bd_oreq} u_d|_{x_d=0}=0,\quad \big[\alpha\mathcal{m}athcal{P}d_{x_d}\ta+\beta\ta\big]\big|_{x_d=0}=\gamma, \end{equation} where $\alpha=\alpha(t,x'),\beta=\beta(t,x')$ and $\gamma=\gamma(t,x')$ are given functions. For simplicity, we consider the ideal gas model for the problem \eqref{oreq}-\eqref{bd_oreq}, i.e., \(p(\rho,\theta)=R\rho\theta\) with a positive constant $R$. We are concerned with the asymptotic behavior of the solution $(\rho,\textbf{u},\ta)(t,x)$ to the problem \eqref{oreq}-\eqref{bd_oreq} when the heat conduction coefficient $\ep$ tends to zero. Formally, when $\epz$, the equations \eqref{oreq} goes to the following compressible non-isentropic Euler equations in ${\mathcal{m}athbb{R}}_+\times{\mathcal{m}athbb{R}}^d_+:$ \begin{equation}\label{eq_e} \begin{cases} \mathcal{m}athcal{P}artial_t\rho^e+\nabla\cdot(\rho^e \textbf{u}^e)=0,\\ \rho^e\{ \mathcal{m}athcal{P}artial_t\textbf{u}^e+(\textbf{u}^e\cdot\nabla)\textbf{u}^e\}+R\nabla (\rho^e\theta^e) =0,\\ c_V\rho^e\{\mathcal{m}athcal{P}artial_t\theta^e+(\textbf{u}^e\cdot\nabla)\theta^e\}+R\rho^e\theta^e(\nabla\cdot \textbf{u}^e)=0. \end{cases}\end{equation} From the impenetrable condition given in \eqref{bd_oreq}, we know that the condition \begin{equation}\label{bd_e} u_d^e|_{x_d=0}=0, \end{equation} is a reasonable one to determine the flow described by \eqref{eq_e}, as the flow moves sliply on the boundary $\{x_d=0\}$. In particular, we do not impose any constrain of temperature on the boundary. The inconsistent of boundary conditions between \eqref{bd_oreq} and \eqref{bd_e} leads to the appearance of boundary layers near the physical boundary $\{x_d=0\}$, in which the termperture shall change rapidly. Since in the problem \eqref{oreq}-\eqref{bd_oreq}, the heat diffusion is important in the boundary layer and should be balanced by the convection, meanwhile note that the vertical component of the velocity field vanishes at the boundary, then as in \cite{prandtl, LWY4, Gerard, Gerard2}, the boundary layer is of the characteristic type, and the size of boundary layer is of order $\mathcal{m}athcal{O}(\sep)$. Therefore, we assume that near the boundary, the solution of \eqref{oreq}-\eqref{bd_oreq} has the form of \begin{equation}\label{form} (\rho,\bu,\ta)(t,x)=\Big(\rho^\ep,\bu_h^\ep,\sep u_d^\ep,\ta^\ep\Big)(t,x',\frac{x_d}{\sep}) \end{equation} with \(\bu_h^\ep=(u_1^\ep,\cdots,u_{d-1}^\ep)^T\). In these new variables, the problem \eqref{oreq}-\eqref{bd_oreq} is transformed into the following one in $\{(t,x',y): t>0,x'\in{\mathcal{m}athbb{R}}^{d-1},y>0\}$ with $y=\frac{x_d}{\sep}$: \begin{equation}\label{pr_ep} \begin{cases} \mathcal{m}athcal{P}artial_t\rho^\ep+\nabla_h\cdot(\rho^\ep \bu_h^\ep)+\mathcal{m}athcal{P}d_y(\rho^\ep u_d^\ep)=0,\\ \rho^\ep\{ \mathcal{m}athcal{P}artial_t\textbf{u}^\ep_h+(\textbf{u}^\ep_h\cdot\nabla_h+u_d^\ep\mathcal{m}athcal{P}d_y)\textbf{u}^\ep_h\}+R\nabla_h (\rho^\ep \theta^\ep) =0,\\%\mathcal{m}u\mathcal{m}athcal{D}elta \textbf{u}+(\lambda+\mathcal{m}u)\nabla(\nabla\cdot \textbf{u}),\\ \rho^\ep\{ \mathcal{m}athcal{P}artial_t u_d^\ep+(\textbf{u}^\ep_h\cdot\nabla_h+u_d^\ep\mathcal{m}athcal{P}d_y)u_d^\ep\}+\frac{R\mathcal{m}athcal{P}d_y (\rho^\ep \theta^\ep) }{\ep}=0,\\ c_V\rho^\ep\{\mathcal{m}athcal{P}artial_t\theta^\ep+(\textbf{u}^\ep_h\cdot\nabla_h+u_d^\ep\mathcal{m}athcal{P}d_y)\theta^\ep\}+R\rho^\ep\theta^\ep(\nabla_h\cdot \textbf{u}_h^\ep+\mathcal{m}athcal{P}d_yu_d^\ep) =\ep\mathcal{m}athcal{D}elta_h\ta^\ep+\mathcal{m}athcal{P}d_y^2\ta^\ep,\\%\kappa\mathcal{m}athcal{D}elta\theta+\lambda(\nabla\cdot \textbf{u})^2+2\mathcal{m}u D\cdot D\}, u_d^\ep|_{y=0}=0,~\big[\frac{\alpha}{\sep}\mathcal{m}athcal{P}d_y\ta^\ep+ \beta\ta^\ep\big]|_{y=0}=\gamma, \end{cases}\end{equation} where $\nabla_h=(\mathcal{m}athcal{P}d_{x_1},\cdots,\mathcal{m}athcal{P}d_{x_{d-1}})^T$, $\mathcal{m}athcal{D}elta_h=\mathcal{m}athcal{P}d_{x_1}^2+\cdots+\mathcal{m}athcal{P}d_{x_{d-1}}^2$. Inspired by the Prandtl boundary layer theory of incompressible flows given in \cite{prandtl}, we assume that the solution of \eqref{pr_ep} can be approximated as follows: \begin{equation}\label{ansatz} (\rho^\ep,\bu_h^\ep,u_d^\ep,\ta^\ep)(t,x',y)=(\rho^e,\bu_h^e,\frac{u_d^e}{\sep},\ta^e)(t,x',\sep y)+(\rho^b,\bu_h^b,u_d^b,\ta^b)(t,x',y)+O(\sep), \end{equation} where $(\rho^e,\bu^e,\ta^e)$ denotes the Euler flow given by \eqref{eq_e}-\eqref{bd_e} with $\bu^e=(\bu_h^e,u_d^e)^T,$ and the boundary layer profiles $(\rho^b,\bu_h^b,u_d^b,\ta^b)(t,x',y)$ decrease rapidly as $y\rightarrow+\infty$. Obviously, from \eqref{ansatz} we have \begin{equation}\label{ansatz1} (\rho^\ep,\bu_h^\ep,u_d^\ep,\ta^\ep)(t,x',y)=(\rho,\bu_h,u_d,\ta)(t,x',y)+O(\sep). \end{equation} where \[(\rho,\bu_h,u_d,\ta)(t,x',y)~:=~(\rho^e,\bu_h^e,y\mathcal{m}athcal{P}d_{x_d}u_d^e,\ta^e)(t,x',0) +(\rho^b,\bu_h^b,u_d^b,\ta^b)(t,x',y)\] are the boundary layer profiles. Plugging the ansatz \eqref{ansatz1} into the problem \eqref{pr_ep} and collecting the leading terms in $\ep$, we obtain the following problem in $\{(t,x',y)|~ t>0, x'\in {\mathcal{m}athbb{R}}^{d-1}, y>0\}$: \begin{equation}\label{pr_p} \begin{cases} \mathcal{m}athcal{P}artial_t\rho+\nabla_h\cdot(\rho \bu_h)+\mathcal{m}athcal{P}d_y(\rho u_d)=0,\\ \rho\{ \mathcal{m}athcal{P}artial_t\textbf{u}_h+(\textbf{u}_h\cdot\nabla_h+u_d\mathcal{m}athcal{P}d_y)\textbf{u}_h\}+R\nabla_h (\rho \theta) =0,\\%\mathcal{m}u\mathcal{m}athcal{D}elta \textbf{u}+(\lambda+\mathcal{m}u)\nabla(\nabla\cdot \textbf{u}),\\ \mathcal{m}athcal{P}d_y(\rho\ta)=0,\\ c_V\rho\{\mathcal{m}athcal{P}artial_t\theta+(\textbf{u}_h\cdot\nabla_h+u_d\mathcal{m}athcal{P}d_y)\theta\}+R\rho\theta(\nabla_h\cdot \textbf{u}_h+\mathcal{m}athcal{P}d_yu_d) =\mathcal{m}athcal{P}d_y^2\ta,\\%\kappa\mathcal{m}athcal{D}elta\theta+\lambda(\nabla\cdot \textbf{u})^2+2\mathcal{m}u D\cdot D\}, u_d|_{y=0}=0,\quad\liy(\rho,\bu_h,\ta)=(\rho^e,\bu_h^e,\ta^e)(t,x',0), \end{cases}\end{equation} and the boundary values for $\ta:$ \begin{equation}\label{bd_ta}\begin{cases} \mathcal{m}athcal{P}d_y\ta|_{y=0}=0,\qquad&{\rm when}\quad \alpha\neq0,\\ \ta|_{y=0}=\ta^0(t,x'),\qquad&{\rm when}\quad \alpha=0, \end{cases}\end{equation} with $\ta^0(t,x'):=\frac{\gamma(t,x')}{\beta(t,x')}$ provided $\beta\neq0.$ Firstly, we immediately obtain that from the third equation and boundary conditions given in \eqref{pr_p}, \begin{equation}\label{bd_pre} (\rho\ta)(t,x',y)~\equiv~(\rho^e\ta^e)(t,x',0)~=~\frac{p^e(t,x',0)}{R}, \end{equation} where $p^e$ is the pressure of the Euler flow. It means that the leading term of the pressure does not change in boundary layers. Next, for the problem \eqref{pr_p} endowed with the Neumann boundary condition for $\ta$ given in \eqref{bd_ta}, i.e., $\alpha\neq0, \mathcal{m}athcal{P}d_y\ta|_{y=0}=0,$ one can check that \[ (\rho,\bu_h,\ta)(t,x',y)~=~(\rho^e,\bu_h^e,\ta^e)(t,x',0),\quad u_d(t,x',y)=y\mathcal{m}athcal{P}d_{x_d}u_d^e(t,x',0) \] is a special solution to \eqref{pr_p}. Indeed, it can be easily verified by restricting the equations \eqref{eq_e} to the boundary $\{x_d=0\}$ and using the boundary condition \eqref{bd_e}. This shows that when $\alpha\neq 0$ in the boundary condition \eqref{bd_oreq}, the leading term of boundary layer profiles does not appear and the thermal layer for the compressible system \eqref{oreq} shall be `weak'. Usually, it is not true when we use the Dirichlet boundary condition $\ta|_{y=0}=\theta^0(t,x')$ given in \eqref{bd_ta} for the problem \eqref{pr_p}, that is to say, the thermal layer for the system \eqref{oreq} is `strong' when it is endowed with the boundary condition \eqref{bd_oreq} with $\alpha=0$. Therefore, we focus on the case of $\alpha=0$ in the following. Plugging \eqref{bd_pre} into the problem \eqref{pr_p}-\eqref{bd_ta}, it follows that $(\bu_h,u_d,\ta)(t,x',y)$ satisfies the following problem in ${\mathcal{m}athbb{R}}_+\times{\mathcal{m}athbb{R}}^d_+$: \begin{equation}\label{pr_p1}\begin{cases} \mathcal{m}athcal{P}d_t \bu_h+(\bu_h\cdot\nabla_h+u_d\mathcal{m}athcal{P}d_y)\bu_h+\frac{R\ta}{P}\nabla_hP=0,\\%~&in ~\Omega_T,\\ \mathcal{m}athcal{P}d_t \ta+(\bu_h\cdot\nabla_h+u_d\mathcal{m}athcal{P}d_y)\ta =\frac{R}{(R+c_V)P}\ta\big( \mathcal{m}athcal{P}d_y^2\ta+\bu_h\cdot\nabla_hP+P_t\big),\\%~&in ~\Omega_T,\\ \nabla_h\cdot\bu_h+\mathcal{m}athcal{P}d_y u_d=\frac{ R}{(R+c_V)P} \mathcal{m}athcal{P}d_y^2\ta-\frac{c_V}{(R+c_V)P}\big(\bu_h\cdot\nabla_hP+ P_t\big),\\%~&in ~\Omega_T,\\ (u_d,\ta)|_{y=0}=\big(0,\ta^0(t,x')\big), \quad \lim\limits_{\yinf}(\bu_h,\ta)=(\textbf{U}_h,\Ta)(t,x), \end{cases}\end{equation} where $$(P,\textbf{U}_h,\Ta)(t,x')~=~(p^e,\bu_h^e,\ta^e)(t,x',0)$$ are given by the Euler flow, and satisfy the following equations derived from \eqref{eq_e}-\eqref{bd_e}, \begin{equation}\label{Ber}\begin{cases} \mathcal{m}athcal{P}d_t\textbf{U}_h+\textbf{U}_h\cdot\nabla_h\textbf{U}_h+\frac{R\Ta}{P}\nabla_hP=0,\\ \mathcal{m}athcal{P}d_t\Ta+\textbf{U}_h\cdot\nabla_h\Ta-\frac{R\Ta}{(R+c_V)P}\cdot(P_t+\textbf{U}_h\cdot\nabla_hP)=0. \end{cases}\end{equation} Then, we endow the problem \eqref{pr_p1} with the initial data \begin{equation}\label{initial} (\bu_h,\ta)(0,x',y)~=~(\bu_{h0},\ta_0)(x',y). \end{equation} If the initial data $\bu_{h0}$ satisfies the compatibility condition \begin{equation}\label{ass_init} \liy \bu_{h0}~=~\textbf{U}_h(0,x'), \end{equation} we observe that the constrain of $\bu_h$ as $y\rightarrow+\infty$ in \eqref{pr_p1} can be removed, since the condition $\liy\bu_h=\textbf{U}_h(t,x')$ holds trivially from $\eqref{Ber}_1$ and \eqref{ass_init}, provided that $\bu_h$ has a limit when $y\to +\infty$. Therefore, we conclude the following initial-boundary value problem for the inviscid Prandtl equations coupled with a degenerate parabolic equation in ${\mathcal{m}athbb{R}}_+\times{\mathcal{m}athbb{R}}^d_+$: \begin{equation}\label{pr_bd1}\begin{cases} \mathcal{m}athcal{P}d_t \bu_h+(\bu_h\cdot\nabla_h+u_d\mathcal{m}athcal{P}d_y)\bu_h+\frac{R\ta}{P}\nabla_hP=0,\\%~&in ~\Omega_T,\\ \mathcal{m}athcal{P}d_t \ta+(\bu_h\cdot\nabla_h+u_d\mathcal{m}athcal{P}d_y)\ta =\frac{\ka\ta}{P}\big( \mathcal{m}athcal{P}d_y^2\ta+\bu_h\cdot \nabla_hP+P_t\big),\\%~&in ~\Omega_T,\\ \nabla_h\cdot\bu_h+\mathcal{m}athcal{P}d_y u_d=\frac{\ka}{P} \mathcal{m}athcal{P}d_y^2\ta-\frac{1-\ka}{P}\big(\bu_h\cdot \nabla_hP+ P_t\big),\\%~&in ~\Omega_T,\\ (u_d,\ta)|_{y=0}=\big(0,\ta^0(t,x')\big), \quad\lim\limits_{\yinf}\ta(t,x,y)=\Ta(t,x'),\\ (\bu_h,\ta)|_{t=0}=(\bu_{h0},\ta_0)(x',y) \end{cases}\end{equation} with the constant \(\ka:=\frac{R}{R+c_V}.\) Finally, we point out that the theoretic study developed in previous sections is focused on a simple case of the problem \eqref{pr_bd1}, i.e., the pressure $P(t,x')$ of the outflow is a positive function depending only on the time variable $t$, \[P(t,x')~\equiv~P(t)>0,\] and thus, the problem \eqref{pr_bd1} is simplified as the following one, \begin{equation}\label{pr_bd2}\begin{cases} \mathcal{m}athcal{P}d_t \bu_h+(\bu_h\cdot\nabla_h+u_d\mathcal{m}athcal{P}d_y)\bu_h=0,\\%~&in ~\Omega_T,\\ \mathcal{m}athcal{P}d_t \ta+(\bu_h\cdot\nabla_h+u_d\mathcal{m}athcal{P}d_y)\ta =\frac{\ka}{P}\ta \mathcal{m}athcal{P}d_y^2\ta+\frac{\ka P_t}{P}\ta,\\%~&in ~\Omega_T,\\ \nabla_h\cdot\bu_h+\mathcal{m}athcal{P}d_y u_d=\frac{\ka}{P} \mathcal{m}athcal{P}d_y^2\ta-\frac{(1-\ka)P_t}{P},\\%\big(\bu_\tau\cdot \nabla_hP+ P_t\big),\\%~&in ~\Omega_T,\\ (u_d,\ta)|_{y=0}=\big(0,\ta^0(t,x')\big), \quad\lim\limits_{\yinf}\ta(t,x,y)=\Ta(t,x'),\\ (\bu_h,\ta)|_{t=0}=(\bu_{h0},\ta_0)(x',y). \end{cases}\end{equation} \begin{remark} In \cite{LWY4}, the authors have studied the small viscosity and heat conductivity limit for the compressible Navier-Stokes-Fourier equations with nonslip boundary condition on velocity and the same condition as given in \eqref{bd_oreq} for the temperature, and obtained that the thermal layer profiles satisfy the same problem as given in \eqref{pr_bd1}, when the viscosity goes to zero faster than the heat conductivity. \end{remark} {\bf Acknowledgements:} The first two authors' research was supported in part by National Natural Science Foundation of China (NNSFC) under Grant Nos. 91230102 and 91530114, and the second author's research was also supported by Shanghai Committee of Science and Technology under Grant No. 15XD1502300. The last author's research was supported by the General Research Fund of Hong Kong, CityU No. 103713. \end{document}
\begin{document} \title[]{Information transmission and control in a chaotically kicked spin chain} \author{Lucile Aubourg and David Viennot} \address{Institut UTINAM (CNRS UMR 6213, Universit\'e de Bourgogne-Franche-Comt\'e, Observatoire de Besan\c con), 41bis Avenue de l'Observatoire, BP1615, 25010 Besan\c con cedex, France.} \ead{[email protected]} \maketitle \begin{abstract} We study spin chains submitted to disturbed kick trains described by classical dynamical processes. The spin chains are coupled by Heisenberg and Ising-Z models. We consider chaotic processes by using the kick irregularity in the multipartite system (the spin chain). We show that the both couplings transmit differently the chaos disorder along the spin chain but conserve the horizon of coherence (when the disorder into the kick bath is transmitted to the spin chain). An example of information transmission between the spins of the chain coupled by a Heisenberg interaction shows the interest of the horizon of coherence. The use of some chosen stationary kicks disturbed by a chaotic environment allows to modify the information transmission between the spins and to perform a free control during the horizon of coherence. \end{abstract} \pacs{03.65.Yz, 05.45.Mt, 75.10.Jm, 75.10.Pq} \section{Introduction} The emergence of quantum information protocols (to perform logic gates and for the transport and the teleportation of information) and the nanosciences have given an interest in dynamics and in control of multipartite quantum systems. A key problem is the understanding of the effects of dynamical processes on the whole multipartite quantum system. They can have consequences on each component of the system and could induce decoherence, relaxation and chaotic processes (\cite{breuer,lages,gedik,lages2,rossini,zhou,castanino,xu,brox}). In order to understand these problems we consider a spin chain, i.e. a set of $N$ $\frac{1}{2}$-spins two by two coupled to form a line chain. The coupling is modelled by the Heisenberg or the Ising-Z interaction which allows a ``cohesion'' into the spin chain. Each spin of the chain is submitted to a train of ultra-short kicks which are disturbed by a chaotic dynamical process. The subjects concerning decoherence and chaotic processes of regularly kicked spin chains have been studied by some authors \cite{prosen,prosen2,prosen3,prosen4,lakshmin,boness,pineda}. In a previous paper \cite{viennot2013} we have extended the analyses to irregular kicks on spin ensembles without any coupling between the spins. Some interesting behaviours of the density matrix of the spin ensemble have been observed as for example an ``horizon of coherence'' for chaotic dynamics (it corresponds to the time from which the disorder of the kick bath is transmitted to the spins). However, this ensemble cannot be considered as a multipartite system (no information is exchanged between the spins) but only as a set of independent systems dephased during the evolution. A goal of the present paper is to see the behaviours and to understand the state modifications of a kicked spin chain coupled by the Heisenberg or the Ising-Z interaction when the dynamics of the ultra-short kick trains is chaotically disturbed. A main question is to know if the horizon of coherence remains in spite of the coupling between the spins and if it is possible to control the spins before this horizon using appropriate stationary kicks. In the paper \cite{aubourg} we have already seen the general behaviour of a ten spin chain coupled by a nearest-neighbour Heisenberg, Ising-Z and Ising-X interaction and submitted to various ultra-short kick dynamics (stationary, drift, microcanonical and Markovian). It results from this model that the coupling between the spins of the chain allows a better transmission of the disorder (the disorder into a spin chain being defined as a large difference between the states of the different spins) whatever the coupling. An initial dispersion of the kicks induces a disorder and an entanglement between the spins. The entanglement between two spins increases with the increase of the disorder, and so of the decoherence. The Ising-X coupling always induces decoherence, even if there is no kick. It is the worst model to realize quantum controls. So, the spin chain coupled by an Ising-X interaction will not be studied here. The behaviour of a spin chain coupled by an Ising-Z interaction is nearly identical to the one of a chain coupled by the Ising-X interaction except that there is an initial ``plateau'' of coherence. It allows a conservation of the coherence (which is not maximum) during a little number of kicks (see section \ref{consplateau}). The Heisenberg coupling seems to be the most efficient to realize quantum controls. This coupling is isotropic. Two neighbour spins tend to be in the same state due to the coupling if there is no kick. For this coupling, all spins follow the behaviour of the average spin of the chain. The dynamical processes describing the trains of ultra-short kicks can be considered as being induced by an environment which disturbs a primary train of kicks. The disturbance can attenuate the kick strengths and/or delay the arrival kicks. Since each kick train can be irregular, the spins can feel different trains. The set of kick trains is called a kick bath since we can assimilate the model to a spin chain in contact with a kind of classical bath. For a chaotic kick bath, the chaotic process is defined by continuous automorphisms of the torus, i.e a dynamics characterised by its matrix and by its modulo $2 \pi$ (this process have a lot of interesting properties, it is chaotic, ergodic, Anosov...). One of the advantages of the chaotic process is the property of sensitivity to initial conditions. This notion means that two points initially really close, do not remain close during the dynamics. They separate each other after a time called the horizon of predictability. This horizon in our model is the time from which the similar kick trains on different spins become different. In a spin ensemble, we have seen that this irregularity of the kicks induces an irregularity of the spin states from the horizon of coherence. This last horizon is larger than the horizon of predictability and corresponds to an initial conservation of the coherence. A spin can be assimilated to a qubit. The up state is supposed to be the value 1 and the down state the value 0. So a variation of the spin population can be identified as a variation of the quantum information and a fall of the coherence can be a lost of information. During the horizon of coherence, the coherence is conserved and so all the information. But after it, the coherence falls to 0, the information is completely lost. But we have only seen this phenomenon for a spin ensemble, the spins (or the qubits) cannot exchange any information. This paper studies the use of the interaction between the spins to show the possibility to realize information transports from one spin to another one and to control the spins (using stationary kicks) during the horizon of coherence.\\ This paper is organized as follows. Section II presents the model of the disturbed kicked spin chain. Section III is devoted to the behaviours of the spin chain submitted to chaotic kicks according to the kind of the coupling : Heisenberg or Ising-Z. The last section talks about the information transmission and control along the chain. The use of chaotic kick processes with the Heisenberg coupling, allows to give an interesting example of information transmission. We can extend the previous example to a control of a closed spin chain using stationary kicks disturbed by a chaotic dynamical process. \section{Dynamics of kicked spin chain} We consider an open chain of $N$ spins coupled by nearest-neighbour interactions. A constant and uniform magnetic field $\vec B$ is applied on the spin chain inducing an energy level splitting by Zeeman effect. We denote by $\frac{\hbar \omega_1}{2}$ the energy splitting. At the initial time $t=0$, the chain can be coherent or incoherent. In a coherent case, the spins are in the same quantum state $|\psi_0\rangle = \alpha |\uparrow \rangle+ \beta |\downarrow \rangle$ ($|\alpha|^2+|\beta|^2=1$ with $\alpha,\beta\not=0$ -- $|\psi_0\rangle$ is a ``Schr\"odinger's cat state'' -- ). For $t>0$ the chain is submitted to a train of ultrashort pulses kicking the spins. Each pulse can be disturbed by a classical environment such that each spin ``views'' a different train (fig.~\ref{kickedspinbath2}). \begin{figure} \caption{\label{kickedspinbath2} \label{kickedspinbath2} \end{figure} Let $\omega_0 = \frac{2\pi}{T}$ be the kick frequency of the primary train. We suppose that the classical environment can attenuate kick strengths and can delay kicks. We denote by $\lambda_n^{(i)}$ and by $\tau_n^{(i)}$ the strength and the delay of the $i$-th kick on the $n$-th spin of the chain. Let ${H_{0_n} = \mathrm{id}^{\otimes (n -1)} \otimes \frac{\hbar \omega_1}{2} |\downarrow \rangle \langle \downarrow|} \otimes \mathrm{id}^{\otimes (N-n)}$ be the quantum Hamiltonian of the $n$-th spin with the Zeeman effect (where we have removed a constant value without significance) and $H_I$ be the nearest-neighbour interaction Hamiltonian which can be for the $n$-th spin of the chain one of the following operators \begin{enumerate} \item Heisenberg coupling \begin{equation} \label{heisenberg} H_{I_n} = -J \mathrm{id}^{\otimes (n -1)} \otimes (S_x \otimes S_x + S_y \otimes S_y + S_z \otimes S_z) \otimes \mathrm{id}^{\otimes (N-n-1)} \end{equation} where $S_i= \frac{\hbar}{2}\sigma_i$, $\{\sigma_{i}\}_{i=x,y,z}$ are the Pauli matrices and $\mathrm{id^{\otimes n}}$ is the tensor product of ``$n$'' identity matrices of order two. \item Ising-Z coupling \begin{equation} \label{isingz} H_{I_n} = -J \mathrm{id}^{\otimes (n-1)} \otimes S_z \otimes S_z \otimes \mathrm{id}^{\otimes (N-n-1)} \end{equation} \end{enumerate} Let $\theta = \frac{2\pi t}{T} = \omega_0 t$ be the reduced time. The quantum Hamiltonian of a kicked spin chain is \begin{equation} \label{dynamics} H(\theta) = \sum_{n=1}^N \Big( H_{0_n} + H_{I_n} + \mathrm{id}^{\otimes (n -1)} \otimes \hbar W \sum_{i \in \mathbb N} \lambda_n^{(i)} \delta\left(\theta - 2i \pi +\varphi_n^{(i)} \right)\otimes \mathrm{id}^{\otimes (N-n)} \Big) \end{equation} where $\delta(t)$ is the Dirac distribution and where the kick operator $W$ is a rank one projection : $W = |w \rangle \langle w|$ with the kick direction ${|w \rangle = \cos \vartheta |\uparrow \rangle + \sin \vartheta |\downarrow \rangle}$ (for the sake of simplicity we do not consider a relative phase between the two components of $|w \rangle$). $\varphi_n^{(i)} = \omega_0 \tau_n^{(i)}$ is the angular delay. The $i$-th monodromy operator (the evolution operator from $t=\frac{2 i \pi}{\omega_0}$ to $\frac{2(i+1)\pi}{\omega_0}$) \cite{viennot}, for the spins organized from the smallest delay (for $n=1$) to the greatest one (for $n=N$) is \begin{multline} \label{monodromy} U^{(i)} = e^{-\frac{\imath H_{0,I}}{\hbar \omega_0} (2\pi - \varphi_N^{(i)})} \prod_{n=1}^{N-1} \left[ \mathrm{id}^{\otimes (N-n)}\right. \otimes (id+(e^{-\imath \lambda_{N-n+1}^{(i)}}-1)W) \otimes \mathrm{id}^{\otimes (n-1)}\\ \left. \times e^{-\frac{\imath H_{0,I}}{\hbar \omega_0} (\varphi_{N-n+1}^{(i)} - \varphi_{N-n}^{(i)})}\right] \mathrm{id}^{\otimes (N-1)} \otimes (\mathrm{id}+(e^{-\imath \lambda_{ 1}^{(i)}}-1)W) e^{-\frac{\imath H_{0,I}}{\hbar \omega_0} \varphi_1^{(i)}} \end{multline} with $H_{0,I} = \sum_{n=1}^N H_{0_n} + \sum_{n=1}^{N-1} H_{I_n}$. We see that the monodromy operator is $2\pi$-periodic with respect to the kick strength. $\lambda_n^{(i)}$ is then defined modulo $2\pi$ from the viewpoint of the quantum system. Thus the strength-delay pair $(\lambda,\varphi)$ defines a point on a torus $\mathbb T^2$ which plays the role of a classical phase space for the kick train. So, we can consider the kick dynamics as being continuous automorphisms of the torus $\mathbb T^2$ like the Arnold's cat map (in \cite{viennot2013}). Let $|\psi^{(i)} \rangle \in \mathbb{C}^{2N}$ be the state of the chain at time $t=iT$ ($|\psi^{(i)} \rangle$ represents the ``stroboscopic'' evolution of the chain). By definition of the monodromy operator we have \begin{equation} |\psi^{(i+1)} \rangle = U^{(i)} |\psi^{(i)} \rangle \end{equation} The density matrix of the chain is then \begin{equation} \rho^{(i)} = \frac{1}{N} |\psi^{(i)}\rangle \langle \psi^{(i)}| \end{equation} and the density matrix of the $n$-th spin is \begin{equation} \rho_{n}^{(i)} = \Tr_{i=1,...,n-1,n+1,...,N}(\rho^{(i)}) \end{equation} $ \Tr_{i=1,...,n-1,n+1,...,N}$ is the partial trace on all the spin Hilbert spaces except the $n$-th one. It encodes two fundamental informations. The first information concerns the diagonal elements of the density matrix. They represent the occupation probabilities of the state $|\uparrow\rangle$ and $|\downarrow \rangle$ for the $n$-th spin and are called the populations ($\langle \uparrow|\rho_n^{(i)}|\uparrow \rangle$ and $\langle \downarrow|\rho_n^{(i)}|\downarrow \rangle$). The second one is associated with the non-diagonal terms. It is a measure of the entanglement of the $n$-th spin with the others of the chain \cite{breuer,bengtsson} and is called the coherence ($|\langle \uparrow| \rho_n^{(i)} |\downarrow \rangle|$). We deduce from $\rho_{n}^{(i)}$ the density matrix of the average spin of the chain for the $i$-th kick \begin{equation} \rho_{tot}^{(i)} = \frac{1}{N} \sum_{n=1}^N \rho_{n}^{(i)} \end{equation} The kick baths are also defined by the initial distribution of the first kicks $\{ (\lambda_n^{(0)},\varphi_n^{(0)}) \}_{n=1,...,N}$. Since the dynamical processes are considered as being chaotic, the kicks are characterised by the sensitivity to initial conditions. The first kicks are randomly chosen in $[\lambda_*,\lambda_*+d_0] \times [\varphi_*,\varphi_*+d_0]$ (with uniform probabilities) with a small $d_0$. $(\lambda_*,\varphi_*)$ can be viewed as the parameters of the primary kick train. The length of the support of the initial distribution (the initial dispersion) $d_0$ is the magnitude of the disturbance on the first kick.\\ Using the model described in this section, we want to know the effects of chaotic kick trains on a spin chain coupled by an Ising-Z or a Heisenberg interaction. Especially we are interested in controlling the informations of the system in spite of the kicks and of the coupling. But for a control, it is necessary that the spins of the chain remain coherent. For a sake of simplicity, in the following analyses, we consider that $\hbar=1$. \section{The chaos} We have shown in \cite{viennot2013} with the model of kicked spins without interaction, that a large coherence plateau appears when the classical kick dynamics is chaotic. In the same paper, we have found an empirical expression of the length of the plateau (corresponding to a kick number) which determines the horizon of coherence \begin{equation} \label{horizon} n_* = n_{\square} + \frac{1}{2} \sqrt{1 + \frac{8 S_{max}}{\ln |\lambda_+|}} - \frac{1}{2} \end{equation} $S_{max}$ is the maximum entropy, $\ln |\lambda_+|$ is the Lyapunov exponent of the dynamical system in the instable direction and $n_{\square}$ is the horizon of predictability of the kick bath and is given by the sensitivity to initial conditions of the chaotic dynamics. For a continuous automorphism of the torus, the horizon of predictability is given by \begin{equation} n_{\square} = \frac{\ln d_{\square} - \ln \frac{d_0}{\sin \gamma}}{\ln |\lambda_+ |} \end{equation} where $\gamma = \arctan \frac{e_+^{(\phi)}}{e_+^{(\lambda)}}$ is the angle between $e_+$ the instable direction of the automorphism matrix of the torus $\mathbb{T}^2$ ($\lambda_+$) and the strength axis ($\lambda$) of $\mathbb{T}^2$. The dispersion ($d_0$) of the projection of the initial distribution on the unstable axis is approximately $\frac{d_0 }{\sin \gamma}$. $d_{\square}$ is the microstate length of an equipartition of $\mathbb{T}^2$ ($\mathbb{T}^2$ is covered by a set of disjoint cells of dimensions $d_{\square} \times d_{\square} $ which constitute the classical microstates). It is important to note that with a spin ensemble, the horizon of coherence does not correspond to the horizon of predictability. It is larger and allows a conservation of the coherence. \\ This section studies the robustness of this horizon of coherence regarding to the interactions between the spins and the validity of eq.~\ref{horizon} in this context. This is a very important question, because the horizon of coherence is a time during which it could be possible to control the spins before the decoherence disturbs their quantum behaviours. \subsection{An almost destruction of the plateau} \label{sectionisingz} We consider a spin chain coupled by a nearest-neighbour Ising-Z interaction. Without any kind of kick, there is an oscillation of the coherence below the initial coherence value which is generally less important for the edge spin than for the others (because an edge spin has only one neighbour and so is ``less coupled'') as we can see on fig.~\ref{isingzfreeevolution}. \begin{figure} \caption{\label{isingzfreeevolution} \label{isingzfreeevolution} \end{figure} There is no modification of the population. Each one remains at its initial value even if the states of the spins are not the same. This is due to the fact that the coupling is completely diagonal and only induces a dephasing in the absence of kicks (see \ref{isingzdemo}). Thus, there is no information transmission between the spins. In order to see more precisely what happens, we consider a semi-classical analysis \footnote{A spin could be viewed as a classical magnetic moment vector, inducing a local magnetic field $\vec B_{loc} \propto \langle \vec S \rangle = \tr(\rho \vec S)$ (where $\vec S$ are the spin operators and $\rho$ is spin density matrix) which is felt by their neighbours. We talk about the (classical) spin orientation in place of the (quantum) spin state (a quantum spin state $\alpha |\uparrow \rangle + \beta |\downarrow \rangle$ being equivalent to the classical spin orientation $\theta = 2 \arctan \left| \frac{\beta}{\alpha} \right|$ and $\varphi = \arg \beta - \arg \alpha$, or in other words we identify the Bloch sphere (the space of the spin states without global phase) with a sphere of classical vector directions)} of the spin chain by the use of the Husimi distribution \cite{husimi}. This distribution is defined by : \begin{equation} H_n^{(i)}(\theta,\varphi) = | \langle \theta, \varphi|\rho_n^{(i)} |\theta, \varphi\rangle |^2 \end{equation} where $|\theta, \phi \rangle= \cos( \frac{\theta}{2})| \uparrow \rangle + e^{\i \varphi} \sin(\frac{\theta}{2})|\downarrow \rangle $ is the spin coherent state. The Husimi distribution measures the quasiprobability distribution of a quantum state onto the classical phase space (here, the sphere of the classical spin direction). This sphere will be represented by an azimuthal projection map (north pole at the center and south pole as being the limit circle). The entanglement processes are also shown by the Husimi distribution. The distribution becomes uniform for a maximal entanglement state. Figure \ref{husimiIsingZ} represents the evolution of the Husimi distribution with respect to the spin and to the kick number. We see that periodically, the spins become entangled (the distribution goes to the green colour). In \ref{isingzdemo}, we have obtained the value of the coherence of two spins coupled by the Ising-Z interaction. This term is $2\pi$ periodic (which explains the oscillation) and is inherent to the quantum aspect. Every time that there is a system where the coupling is completely diagonal, these oscillations appear. They are due to the interferences between the phases of the energies of each spin. \\ \begin{figure} \caption{\label{husimiIsingZ} \label{husimiIsingZ} \end{figure} For a spin chain coupled by an Ising-Z interaction and submitted to a kick bath disturbed by a chaotic process (a continuous automorphism of the torus), the coupling induces disorder and entanglement. The coherence and the populations go toward a microcanonical distribution (relaxation of the population toward $\frac{1}{2}$ and fall of the coherence to 0) and the entropy increases a lot. Figure \ref{Husimisingzentanglement} presents the evolution of the entropy (up) and of the Husimi distribution (down) of a seven spin chain chaotically kicked in a direction different of the one of the eigenvectors. We see that the entropy increases rapidly and that the Husimi distribution tends to become entirely green which is a sign of the entanglement. The value of the maximum entropy corresponds to when the Husimi distribution is the closest to the green color (about 13 kicks). \begin{figure} \caption{\label{Husimisingzentanglement} \label{Husimisingzentanglement} \end{figure} However, an interesting phenomenon appears for the coherence which can be seen on fig.~\ref{plateau} : a little initial coherence conservation. This coherence plateau is described by the presence of some oscillations of the coherence before going to the microcanonical distribution (the coherence falls near to 0). This low coherence conservation is more visible for the individual spin coherence than for the coherence of the average spin of the chain because of the oscillation addition of each spin. During this plateau, there is some oscillations of the population before it relaxes toward $\frac{1}{2}$, the microcanonical distribution, when the coherence goes to zero. So, before that the coherence goes to $0$, there is a little conservation of the spin information. The coherence plateau does not depend on the dynamics, on the initial dispersion and apparently on the number of spins. It does not correspond to a maximal coherence and its value is about 0.2-0.3, it only depends on the coupling value. The larger the coupling is, the less large the plateau is and the less it can be viewed for the average spin of chain (the plateau always appears on the population and the coherence for an individual spin of the chain). This is not the plateau due to the chaotic process, because it ends before 23 kicks (obtained using eq.~\ref{horizon}). This is a result of the Ising-Z coupling.\\ \begin{figure} \caption{\label{plateau} \label{plateau} \end{figure} The plateau linked to the horizon of coherence is only seen in one case for a spin chain coupled by an Ising-Z interaction : when the kick direction is $|\uparrow \rangle$ or $|\downarrow \rangle$, i.e. when the kick is in the direction of an eigenvector. Figure \ref{isingzgreatplate} is realized in this condition. It shows that each spin conserves a coherence with strong down oscillations whatever the kick number. There is no modification of the population. The behaviour of the coherence of the average spin is a little different. The coherence is conserved with large down oscillations only before the horizon of coherence delimited by the green vertical line on fig.~\ref{isingzgreatplate}. After it, the average of these oscillations falls to zero. The comparison between fig.~\ref{isingzfreeevolution} and \ref{isingzgreatplate} shows that before the horizon of coherence, the kicked chain has the same behaviour than a free chain. It is as if the spins do not feel the kicks. If we consider a spin without any interaction with its neighbours, we see in \ref{withoutcoupling} that the strength and/or the delay do not influence the population when the kick is in the direction of an eigenvector. The strength only induces a pure dephasing. But here, we have in addition a coupling between the spins. We have demonstrated on \ref{isingzdemo2} that two coupled and kicked spins never feel the delay (it does not appear in the evolution operator). For the strengths, two cases appear. If the kick strengths on two spins are the same, there is no effect on the coherence, but, if the kick strengths are different, the coherence is modified. This can be easily extended to a larger number of spins. The coupling induces a ``cohesion'' between the spins. If the cohesion is complete (same strength and delay) the system has a free behaviour. But if the cohesion is lost, when the kick bath disorder (different strengths) is transmitted to the spin chain, there is some coherence interferences which can induce a lost of the quantum property. Before the horizon of coherence, the spins are quantum and in a state superposition. After it, the fall of the coherence means than the spins become classical, they are either on the up or in the down state with a probability given by the up and the down population.\\ \begin{figure} \caption{\label{isingzgreatplate} \label{isingzgreatplate} \end{figure} \begin{figure} \caption{\label{isingzplateauinteraction} \label{isingzplateauinteraction} \end{figure} The comparison with the results obtained without interaction (the yellow point curve fig.~\ref{heisenbergplate} for fifty spins), gives a same coherence plateau and a same fall of the average coherence after the horizon of coherence but without the down oscillations. In order to know if eq.~\ref{horizon} is still correct (the green vertical line fig.~\ref{isingzgreatplate} and \ref{isingzplateauinteraction}) for the average spin of a chain coupled by an Ising-Z interaction we have to see the evolution of the coherence plateau with the interaction parameter. Figure \ref{isingzplateauinteraction} shows the coherence (up) and the entropy (down) of the average spin of a ten spin chain with respect to the kick number and to the interaction parameter. If the interaction parameter is too small, the plateau disappear. We are nearly in the case of ten spins without interaction. This spin number is not sufficient to see the coherence plateau when the spins are not coupled. If $\frac{J}{w_0}$ is large enough, we see coherence and entropy oscillations before the horizon of coherence. After it, they fall to 0. In the entropy graphic, we see that the horizon of coherence always corresponds to the kick number for which the entropy oscillations begin to decrease and so the average oscillations begin to increase. For an Ising-Z coupling, the empirical formula also corresponds to the kick number for which the entropy begins to increase. \\ If we chaotically kick the spins in a direction which do not correspond to an eigenvector, there is a lost of the information and the coherence goes to $0$. In this condition it is impossible to realize a control of the information even during the initial little plateau. The kicks in the direction of an eigenvector allow, before the horizon of coherence, a kind of conservation of the coherence with large down oscillations. In addition, whatever the strengths and the delays of the kicks, there is no modification of the populations, so nothing can be controlled. Thus this model is not efficient to realize quantum control and information transmission. This coupling could eventually be interested if we want to conserve the spin state and if we can force the environment to be in an eigenvector direction. In this case and only in this one there is a conservation of the spin state. \subsection{Conservation of the plateau} \label{consplateau} Consider now a spin chain coupled by a Heisenberg interaction. We have reminded in the introduction that, this interaction is isotropic (two coupled spins tend to be in the same state or to become entanglement if they cannot), and each spin follows the average evolution. Figure \ref{heisenbergplate} shows the evolution of the coherence of the average spin of five chains of ten spins, of one spin of one chain and of an ensemble of fifty spins. The spins of the chains are submitted to the Heisenberg interaction and the classical dynamics is chosen to be the Arnold's cat map. The up graphic is for a kick in the direction of an eigenvector ($|\uparrow \rangle$ or $|\downarrow \rangle$) and the down one is when the kick direction is a superposition of the both eigenvectors of a spin. For these both cases, all the coherence curves are merged. We have the same behaviour for a spin chain than for a spin ensemble with the particularity that each spin exhibits the coherence plateau. The Heisenberg coupling allows to conserve for spin chains and for each spin of the chains, the interesting result obtained for a spin ensemble. \begin{figure} \caption{\label{heisenbergplate} \label{heisenbergplate} \end{figure} The length of the coherence plateau does not change for a coupled spin chain and corresponds to the kick number given by the eq.~\ref{horizon}. This can be better seen using the entropy, fig.~\ref{entropy}. The quantum entropy, the entropy into the spin chain is measured by the von Neumann entropy \begin{equation} S_{vN,n}=- \gamma tr(\rho_n \log \rho_n) \end{equation} The factor $\gamma$ is arbitrary. To define the classical entropy it is necessary to introduce a partition of the phase space $\mathbb{T}^2$. Let X be this partition. The dimension of the phase space is $2 \pi \times 2 \pi$ and the partition is chosen to be $\{i\frac{\pi}{64}\}_{i = 0, ..., 128} \times \{j\frac{\pi}{64}\}_{j = 0, ..., 128}$. A cell of $X$ constitutes one of the classical microstates for one kick train. The classical entropy, the entropy into the kick bath, is defined by the Shannon entropy \begin{equation} S_{Sh,n}=\theta \sum_{i,j} - p_{ij,n} \ln p_{ij,n} \end{equation} where $p_{ij,n}$ is the fraction of kick trains which are in the microstate $(i,j)$ at the $n$-th iteration and $\theta$ is another arbitrary factor. The arbitrary factor in the von Neumann and in the Shanon entropy is chosen in order to have a same maximum for the classical and the quantum entropy. \begin{figure} \caption{\label{entropy} \label{entropy} \end{figure} \begin{figure} \caption{\label{entropieheisenberg} \label{entropieheisenberg} \end{figure} The entropy is a measure of disorder. In the kick bath, the disorder is given by a variation of the kick strengths and delays received by the spins. In the spin chain, the quantum entropy corresponds to a large difference between the states of the spins into the chain and/or to a large entanglement and comes from the kicks (since we choose all spins initially in the same state). The classical entropy (the disorder of the kick bath) begins from the horizon of predictability and the quantum entropy (the disorder of the spin chain), begins from the horizon of coherence. Even if the interaction and the entanglement allow a better transmission of the disorder into the spin chain (see \cite{aubourg}), the time required for the transmission of the disorder from the classical bath to the quantum one is the same than without interaction. We also see that the entropy of the spin chain increases only if the cumulated classical entropy exceeds a threshold value, $S_{max}$. \\ For only five or seven coupled spins, the quantum entropy follows the evolution obtained for fifty spins without interaction, which is not the case for the classical entropy. The classical entropy can be modeled by the Kolmogorov-Sina\"i entropy \cite{benoist} which requires a large number of kick trains. From fifty kick trains (and so fifty spins), the evolution of the classical entropy corresponds to the Kolmogorov-Sina\"i prediction. So, the notion of ``a large number of spins'' is different according to the disorder is quantum or classical. The disorder into the kicks, which is a classical disorder, requires a large number of kick trains to be in conformity with the prediction (Kolmogorov-Sina\"i) whereas a lower number of spins is sufficient to see the disorder into the spin chain. \\ In order to know the modifications of the horizon of coherence when the spins are coupled by a Heisenberg interaction, we have to see the evolution of the coherence and of the entropy with respect to $\frac{J}{w_0}$. Figure \ref{entropieheisenberg} shows the coherence (up) and the quantum entropy (down) evolution with respect to the kick number. When $J<w_0$ the entropy and the coherence behave as a spin ensemble and the empirical formula given by eq.~\ref{horizon} can be used here. The dynamics induced by the kicks dominates the internal dynamics of the chain. In this condition, the results can be compared with the one of a spin ensemble. But, if $J>w_0$ the entropy increase and the fall of the coherence begin earlier and earlier. There is still a horizon of coherence but it cannot be predicted by eq.~\ref{horizon}. The interne dynamics induces more disorder and other phenomena which are not taken into account in eq.~\ref{horizon}. \\ Contrary to the results obtained for a spin chain coupled by an Ising-Z interaction, the Heisenberg coupling allows to conserve for each spin and for the average spin of the chain, the coherence during the horizon of coherence. The analysis of the entropy allows to confirm the length of the plateau given by eq.~\ref{horizon} only when $J\leq w_0$. In this way, we can think that the information transmission between the spins can be conserved and well performed and maybe that some controls can be realized during this horizon. In the next section, we choose to stay in the case where $J\leq w_0$ in order to know the value of the horizon of coherence. \section{The Heisenberg coupling, an appropriate interaction to realize information transmission and control during the horizon of coherence} We have just seen that for a spin chain coupled by a Heisenberg interaction and submitted to a chaotic kick bath, there is a time during which the spins conserve their coherence. The conservation of the coherence is linked to a conservation of the information. For kicks no in a direction on an eigenvector, there is an oscillation of the population and of the coherence due to the kicks. So, before the horizon of coherence, the coherence is conserved with down oscillations. But, if the kicks are in the direction of an eigenvector, during the horizon, even if the spins are kicked, there is a complete conservation of the coherence without any oscillation. Since the spin could represent a qubit, we will consider the wave observed on the density graphics as an information transmission along the spin chain. The first subsection is devoted to the means to conserve the information during the horizon of coherence. The second one talks about the manner of transmitting an information. The last one uses the information obtained in the second subsection in order to realize a control during the horizon of coherence using stationary kicks on a closed spin chain. For all the following analysis, we consider that $J\leq w_0$. In this case, we can predict the value of the horizon of coherence and the information transmissions in the density graphic are visible. We also choose this condition in order that the control of the dynamics by the kicks dominates compared to the internal dynamics of the chain. \subsection{Information conservation} We consider a spin chain coupled by a nearest-neighbour Heisenberg interaction. Figure \ref{heisenbergdensiteonde} represents the evolution of the up population of each spin with respect to the time. All spins are in the initial state $\frac{1}{\sqrt{5}} (|\uparrow \rangle + 2|\downarrow \rangle)$ except the center one (here the fourth) which is in the up state. Since there is no kick, we see an information transmission between the spins due to the Heisenberg coupling represented by the orange-yellow colour (the Heisenberg coupling is isotropic and induces a same state for the coupled spins). This figure presents density peaks (yellow, orange and white colour) which result from the interferences between the various waves. The more yellow point at the end of the graphic seems to be a revival of the initial wave (at $t=0$) and at the middle, the peak looks like an inverse revival of the information (the populations are inverted) : this graphic looks like a wave revival. However the Fourier transform of the population with respect to the time, right graphic on fig.~\ref{heisenbergdensiteonde}, presents a broadband which is a signature of chaotic oscillations \cite{broad}. Thus, the wave packet does not have a complete revival. We called this phenomenon an almost-revival. \begin{figure} \caption{\label{heisenbergdensiteonde} \label{heisenbergdensiteonde} \end{figure} \begin{figure} \caption{\label{heisenbergdensitechaotiqueonde} \label{heisenbergdensitechaotiqueonde} \end{figure} The up graphic on fig.~\ref{heisenbergdensitechaotiqueonde} is the same than the left one on fig.~\ref{heisenbergdensiteonde} except that all spins are kicked in the direction of an eigenvector. The kicks are disturbed by a chaotic dynamics. We clearly see that the information is completely transmitted along the spin chain until a certain number of kicks, in exactly the same manner than when there is no kick. The kick number for which the information transmission is stopped corresponds to the duration of the horizon of coherence. The second graphic of fig.~\ref{heisenbergdensitechaotiqueonde} shows that the average spin of the chain has a coherence which falls after the horizon of coherence. It is the same thing for one spin of the chain but with large oscillations. These oscillations appear because the coupling is chosen to be not too large in order to see the information transmission and to have a prediction of the value of the horizon of coherence (for a large coupling, the oscillations are really fast). The spins are kicked differently from the horizon of coherence, which explains the lost of information transmission. However, if the kick direction does not correspond to an eigenvector, the information wave cannot be seen, as in fig.~\ref{heisenbergdensitechaotiquew}. A kick in the direction of an eigenvector allows a transmission of information before the horizon of coherence as if there is no kick. The demonstration is made on \ref{force} and shows that if the strengths are the same for all spins, they do not affect the population. In the case of a chaotically kicked spin chain, all trains of kicks are almost similar until the horizon of predictability. The spins only feel the difference at the horizon of coherence. So before the horizon, the population is not modified by the kicks and we only conserve the coupling variations. Inversely, if there is a modification of the strength kicks between two kick trains or more, the spin populations are not modified in the same manner. This induces a lost of coherence. Since the coupling induces a ``cohesion'' between the spins, if the kicks are all the same the cohesion remains, so the population and the coherence do not change. But if the kicks are modified, when the spins feel this modification, the cohesion into the chain is disturbed and some interferences between the coherence wave appears. If the kicks are not in an eigenvector direction, they modify the spin states and so the spin populations. The interaction can also add some population modifications because it induces a same state for the coupled spin. It produces a modification of the states and same an entanglement between the spins if their states are too different. For a kick not in the direction of an eigenvector (down graphic of fig.~\ref{heisenbergplate}), some oscillations appear during the coherence plateau. At the beginning of the dynamics, all spins are in the same state. The kicks on the spins are approximately the same (the initial dispersion of the initial strengths and delays of the kicks is small). So, no disorder is transmitted from the kick bath to the spin chain. But, the kick direction (for a superposition) modifies the spin states and disturbs the transmission of information (up graphic of fig.~\ref{heisenbergdensitechaotiqueonde}). The states of the spins can be more or less close to a classical state and then can lose or gain some coherence. This explains the presence of some coherence oscillations before the horizon of coherence and the lost of information in fig.~\ref{heisenbergdensitechaotiquew}. But, if the kick direction is the one of the eigenvectors, we only conserve the modification due to the interaction and not the one due to the kicks. The up graphic of fig.~\ref{heisenbergplate} shows that kicks in the direction of an eigenvector do not modify the coherence before the horizon of coherence, i.e before the dispersion induced by the sensitivity to initial conditions.\\ \begin{figure} \caption{\label{heisenbergdensitechaotiquew} \label{heisenbergdensitechaotiquew} \end{figure} If the kicks are not in a direction of an eigenvector, the states of the spins are completely modified. Since the automorphism of the torus induces all the time a variation of the strength and of the delay, sometimes the strength is larger than other times, and so sometimes the spins are more in the direction of the kicks than other times. It is really complicated to realize a control in this condition. However a spin chain coupled by a Heisenberg interaction and kicked in an eigenvector direction transmits all the information (until the horizon of coherence) like a no kicked chain. So we can realize some information transmission during the horizon of coherence. \subsection{Information transmission} We consider a spin chain coupled by a nearest-neighbour Heisenberg interaction where $J \leq w_0$. Because of the Heisenberg coupling, two neighbour spins tend to be aligned in the same direction. This allows to obtain an information transmission if two neighbour spins are not in the same initial state. Let the spins be submitted to a chaotic kick bath and be initially in the state $\frac{1}{\sqrt{17}} (|\uparrow> + 4|\downarrow \rangle$ except the first one which is in the up state. We have just seen that for a spin chain chaotically kicked in the direction of an eigenvector, before the horizon of coherence, the spin state evolution is only due to the coupling. But after this horizon there is a modification of the population, a fall of the coherence and an increase of the entropy. If the kick is not in an eigenvector direction, there exists two kinds of oscillations of the population. The first oscillation is due to the kick and the spin frequency and corresponds to the carrier wave. The second one results from the coupling and is the envelope. For a kick not in a direction of an eigenvector, these both oscillations describe the population behaviours. But, for a kick in a direction of an eigenvector, there is only the oscillations due to the coupling if all spins are initially similarly kicked. We want to know the number of spins through which the information passes during the horizon of coherence, with respect to $w_0$ (the kick frequency). We remind that the horizon of coherence can be predicted using eq.~\ref{horizon} (because we are in the condition $J\leq w_0$). Consider fig.~\ref{informationtransmission}. We see a variation of the number of spins reached by the information before the horizon of coherence with respect to $w_0$. Especially, more $w_0$ increases, less the number of reached spins is large. In the monodromy operator eq.~\ref{monodromy}, $w_0$ is only included in $e^{-\imath \frac{H_{0, I}}{\hbar w_0}}$ . When $w_0$ tends to zero, this exponential presenting a lot of fast oscillations which behaves as if it is equal to zero (Riemann-Lebesgue lemma), and if $w_0$ is large it tends to one. If the exponential tends to one, the impact of this factor on the spin states is lower than if this factor tends to 0, which is in agreement with our observations. We can also make this analysis by considering the variation of the interaction parameter. The results will be the same. Physically, larger the interaction parameter is, faster the spins tend to be in the same state and so faster they transmit their information. \begin{figure} \caption{\label{informationtransmission} \label{informationtransmission} \end{figure} \begin{figure} \caption{\label{spintransmission} \label{spintransmission} \end{figure} In order to obtain the number of spins reached by the information before the horizon of coherence, we need to know the transmission velocity. Figure \ref{spintransmission} presents the up population evolution of the seven spins of a chain coupled by a Heisenberg interaction with respect to the kick number and corresponds to the up left graphic of fig.~\ref{informationtransmission}. Each spin transmits its information to its neighbours. The state of the first spin is up. It transmits its information to the second spin. The state of the second spin depends on the state of its two neighbours and it tends to be a superposition of them. It is the same thing for the other spins. In addition, the up populations of the spins do not decrease to $0$ but to a value upper than $0$ at the end of an oscillation. So each spin conserves a little information which explains the decrease of the peak height of the up state from one spin to the following one with the kick number. The last spin has a single neighbour, it is only influenced by it. This is like a wave in a box, it has an increase of the information of the previous spins (a kind of concentration of the wave). We observe the classical phenomenon of signal scattering during its propagation (the spreading of the signal with an attenuation of its maximal intensity). Here the signal corresponds to the population with the maximal up state which spreads along the chain.\\ We want to obtain the oscillation period of one spin coupled with only one spin (so an edge spin in our case). The interaction Hamiltonian of two coupled spins is given by the following matrix \begin{equation} \begin{pmatrix} -\frac{J \hbar}{4 w_0} & 0 & 0 & 0 \\ 0 & \frac{J \hbar}{4 w_0} & -\frac{J \hbar}{2 w_0}& 0 \\ 0 & -\frac{J \hbar}{2 w_0} & \frac{J \hbar}{4w_0}& 0 \\ 0 & 0 & 0 & -\frac{J \hbar}{4 w_0} \end{pmatrix} \end{equation} The coupling part is in the middle of this matrix with the non-diagonal terms. We consider the matrix block associated with the states $(|\uparrow \downarrow \rangle, |\downarrow \uparrow \rangle)$, $\begin{pmatrix} \frac{J \hbar}{4 w_0} & -\frac{J \hbar}{2 w_0} \\ -\frac{J \hbar}{2 w_0} & \frac{J \hbar}{4w_0} \end{pmatrix}$ for which the eigenvalues are $\lambda_{\pm}= \frac{J \hbar}{4 w_0} \pm \frac{J \hbar}{2 w_0}$. Then the frequency of the Rabi oscillations for a spin which has only one neighbour corresponds to $\lambda_{+}-\lambda_{-} = \frac{J \hbar}{ w_0}$. An edge spin has an oscillation period of \begin{equation} T^{eff}_{edge}=\frac{w_0}{J \hbar}. \end{equation} A spin with two neighbours has its frequency multiplicated by two and so its period divided by two \begin{equation} T^{eff}_{mid}=\frac{w_0}{2 J \hbar} \end{equation} During the information propagation, there is a wave packet spreading. So, the oscillation period of each spin increases during the propagation of the information. This phenomenon can be seen fig.~\ref{dispersiononde}. More the time increases, more the wave packet is spread on a larger number of spins. The oscillation period of the first spin is $T^{eff}_{edge}$. But the second one, which has two neighbours, receives the information from a spin which only has one neighbour and so does not have the same oscillation period than itself. The oscillation period of the second spin is then the average between the one of one spin with two neighbours and the one for one spin with only one neighbour \begin{equation} T^{eff,2}_{average,}=\frac{1}{2} (\frac{w_0}{J \hbar} +\frac{w_0}{2 J \hbar}) \end{equation} In the same way, for the other spins, we obtain the oscillation period \begin{equation} T^{eff^n}_{average}=\frac{1}{2} (T^{eff,n-1}_{average}+T^n) \end{equation} with $T^n= T^{eff}_{edge}$ or $T^{eff}_{mid}$ the oscillation period of the $n$-th spin only induced by the nearest-neighbours. \begin{figure} \caption{\label{dispersiononde} \label{dispersiononde} \end{figure} Now, we know the oscillation period of all spins of the chain. If we obtain the time to transmit the information from the maximal up population of one spin to the maximal up population to the following spin, we have all data that we need. Consider a spin called ``$sp$'' which has two nearest-neighbours. This spin has its maximal information when the one before it and the one after it cross each other what is well seen fig.~\ref{spintransmission} and \ref{dispersiononde}. This is only seen for the first transmission from the first spin to the last one, i.e only for a one-way transmission of the information and not for the return way because of the scattering and the interferences. On fig.~\ref{spintransmission}, at $t=0$ only the first spin has the information. When $t$ increases, the number of spins reached by the information increases, but also, the wave spreads on a larger number of spins. The up population of the spin before the spin $sp$ decreases whereas the one after it increases. The maximal information that the spin $sp$ can obtain is when its neighbours have the same information and so when they cross each other. Since the shape of the wave packet is symmetric with respect to the maximal up population, the spin $sp+1$ has the maximal information when the spin $sp$ is at a quarter of its oscillation. The dispersion and the interferences of the wave packet induces that it is hard to obtain the value of the up population of all spins with the time. The dispersion is not only between three spins but more. One spin population has its maximum at half of its oscillation period and transmits it at three-quarter of it. Then, \begin{equation} T_{Trans}^n = \frac{1}{4} T^{eff,n}_{average} \end{equation} This does not concern the last spin of the chain in the transmission direction. The last spin has twice the period of a middle spin. So $T ^N_{Trans}$ has to be multiplicated by two. Finally a complete period of information transmission from the first spin to the last one is (a one-way) \begin{equation} P= \frac{3}{4} T^{eff,2}_{average} + \sum_{n=3}^{N-2} T_{Trans}^n + 2T_{Trans}^{N} \end{equation} where $N$ corresponds to the number of spins. The first term gives the time to obtain the maximum information of the third spin, the second gives the quarter of an oscillation period of the spins from the third to the second to last one, and the last term is linked to the maximal information of the last spin of the chain. For the model chosen, \begin{multline} P=\frac{3}{4}\left[\frac{1}{2}\left(\frac{w_0}{\hbar J} +\frac{w_0}{2\hbar J} \right) \right] + \frac{1}{4}\left[\frac{1}{2}\left(\left[\frac{1}{2}\left(\frac{w_0}{\hbar J} +\frac{w_0}{2\hbar J} \right) \right] +\frac{w_0}{2\hbar J}\right) \right]\\ + \frac{1}{4}\left[\frac{1}{2} \left(\left[\frac{1}{2}\left(\left[\frac{1}{2}\left(\frac{w_0}{\hbar J} +\frac{w_0}{2\hbar J} \right) \right] +\frac{w_0}{2\hbar J}\right) \right] +\frac{w_0}{2\hbar J} \right)\right]\\ + \frac{1}{4}\left[\frac{1}{2}\left( \left[\frac{1}{2} \left(\left[\frac{1}{2}\left(\left[\frac{1}{2}\left(\frac{w_0}{\hbar J} +\frac{w_0}{2\hbar J} \right) \right] +\frac{w_0}{2\hbar J}\right) \right] +\frac{w_0}{2\hbar J} \right)\right] +\frac{w_0}{2\hbar J} \right) \right]\\ + \frac{1}{2}\left[\frac{1}{2}\left(\left[\frac{1}{2}\left( \left[\frac{1}{2} \left(\left[\frac{1}{2}\left(\left[\frac{1}{2}\left(\frac{w_0}{\hbar J} +\frac{w_0}{2\hbar J} \right) \right] +\frac{w_0}{2\hbar J}\right) \right] +\frac{w_0}{2\hbar J} \right)\right] +\frac{w_0}{2\hbar J} \right) \right] + \frac{w_0}{\hbar J} \right)\right] \end{multline} Finally, to obtain the number of spins ($nsp$) reached by the information, we calculate \begin{equation} nsp=\frac{n_*}{P} \times N -NTurn \end{equation} $NTurn$ is the number of one-way transmissions from the first spin of the chain to the last one in the direction of the transmission. This number has to be removed in order to not add the last spin or the first one two times. In order to know the value of the horizon of coherence ($n_*$), we use eq.~\ref{horizon}. To obtain it, we realize a simulation with 700 classical systems (700 trains of kicks). For this study we need a large number of classical systems in order that the Kolmogorov Sina\"i analyses would be efficient. With this number of spins, the horizon of coherence of fig.~\ref{informationtransmission} is approximately 50. On the entropy graphic (the down one) of fig.\ref{informationtransmission}, we see that the entropy begins to increase at 50 kicks. However, the increase is relatively low. The large increase begins approximatively at 55 kicks. This value is in accordance to when the disorder becomes to be visible on the density graphics of the same figure. So let $n_*$ = 55. The prediction of the number of spins reached by the information, for the down graphic of fig.~\ref{informationtransmission} is 3.5, the prediction for the second one is 7 and for the upper one, the prediction is 13. These values correspond to what we obtain on the graphics. But it is necessary to watch out. We use a nearest-neighbour interaction. So the information of one spin is transmitted to two spins. To simplify the calculation the possibility to have a revival information by the wave interferences is not taken into account. We only consider the transmission of the information of the first spin.\\ \begin{figure} \caption{\label{heisenberg} \label{heisenberg} \end{figure} We can also observe another behaviour fig.~\ref{heisenberg}. In this one we have alternate the spin states. If the position of the spin in the chain is odd, then the spin state is $|\psi_{2n+1} \rangle =\frac{1}{\sqrt{10}}(3 |\uparrow\rangle + |\downarrow \rangle)$, and if it is even, $|\psi_{2n} \rangle=\frac{1}{\sqrt{10}}( |\uparrow\rangle + 3 |\downarrow \rangle)$. The horizon of coherence is about 13. Like previously we can know the states of the spins at the moment of the horizon of coherence with respect to $w_0$ and to the interaction parameter. There is also an other effect well seen in this graphic. There is a kind of state freezing. The upper state of the fourth spin at the horizon of coherence is conserved for a large number of kicks. We can also see this effect for the other density graphics. This phenomenon is explained on the next section.\\ Note : For a sake of simplicity, we choose to not consider the case where the spins are almost all in the state $|\uparrow \rangle$ and/or $|\downarrow \rangle$, i.e. in the direction of an eigenvector. If the spin direction is initially near to an eigenvector, at $t=0$, there is no coherence between the spins because they are in a classical direction. So the effect of the horizon of coherence like we have described it in the third section (with a fall of the coherence and a large increase of the entropy) is not present. However, the results will be the same. From the time which corresponds to the horizon of coherence and at each kicks, the spins feel different kick strengths and delays. So the spins react to the kicks which induce that the information transmission is stopped. So the effect is the same.\\ We can now determinate the information transmission time between the spins coupled by a nearest-neighbour Heisenberg interaction. Using these analyses, it can be interesting to see if it is possible to realize a control experience. We have just observed what happened if we kick chaotically the system. Since the strength and the delay are modified all the time from one kick to another due to the automorphism of the torus, it may be interesting to considered other kind of kicks. \subsection{Control of the information transmission} We consider a closed spin chain where each spin is submitted to a nearest-neighbour Heisenberg interaction. This model requires a modification to complete the interaction Hamiltonian as follows \begin{equation} H_{I} = \sum_{n=1}^{N-1} H_{I_n} -J(S_x\otimes \mathrm{id}^{\otimes N-1} \otimes S_x + S_y \otimes \mathrm{id}^{\otimes N-1} \otimes S_y + S_z \otimes \mathrm{id}^{\otimes N-1} \otimes S_z) \end{equation} where we have just added the interaction term between the first and the last spin. \\ Let all spins be in the initial state $\frac{1}{\sqrt{17}}(|\uparrow\rangle + 4 |\downarrow \rangle)$ except the first one which is in the state $|\uparrow>$. Without any kick we obtain a free information transmission between the spins as we can see on the left density graphic of fig.~\ref{evolution_ss_frappe}. \begin{figure} \caption{\label{evolution_ss_frappe} \label{evolution_ss_frappe} \end{figure} Since the chain is closed, the information of the first spin is transmitted to the second and to the ninth spin. For the control, we would like that the information goes only in one direction, toward the second spin. For this, we calculate the oscillation period of the first spin as if it has one neighbour, i.e. $T^{eff}_{edge} = \frac{\omega_0}{J \hbar}$. Here, we have the half of its oscillation, so $T^1 = \frac{\omega_0}{2J \hbar} = 10$. We choose to kick the ninth spin in order that it remains in a state near to $|\downarrow>$ during the first oscillation of the first spin, (approximately ten kicks). Thus, the first spin can only transmit its information to the second spin and behaves as if it has only one neighbour (this explains the calculation of the oscillation period of the first spin). For this control model, it is not interesting to use the dynamic of the chaotic kicks before the horizon of coherence. If we kick a spin chaotically, there is a modification of the strength and of the delay. Thus sometimes the spin is less kick and can oscillate more. If the kicks are stationary, they are all similar and with a large strength. This forces the spins to stay in the state near to the down one. This control is represented on the right density graphic of fig.~\ref{evolution_ss_frappe}, which is what we want to obtain.\\ This model is interesting because it does not present any interference between the spins. We force the initial to go toward one direction, toward the second spin. We introduce a way to transmit the information. In addition there is a quarter of period during which the spin $n+1$ has its information which decreases and the spin $n$ has no information. So the probability that the spin $n+1$ transmits again an information to the spin $n$ is really low. \\ We can now stop the information transmission. We choose to stop the information when it is on the fifth spin and when it crosses it two times. We obtain fig.~\ref{information_transmission}. In order to obtain this graphic, we have to calculate the oscillation period of each spin and more precisely the period of two oscillations for the spin 1, 2, 3 and 4, and the period for only one oscillation for the spin 6, 7, 8 and 9. After that, we have to kick them stationary in the down direction when their information is the lowest. This allows to concentrate the information on the fifth spin. \\ \begin{figure} \caption{\label{information_transmission} \label{information_transmission} \end{figure} Note : Here each spin is kicked at an appropriate time in order that each one is nearly in the state of the kick. However if we kick all spins (except the fifth one) at the same time, each spin can have a state near or different from the kick state. This induces an oscillation of the population, a lower or no information concentration. An example is given on fig.~\ref{informationtransmissionmminstant}. In order to perform a control, it is more efficient to kick the spin always in their state. It is also better to kick them in a direction of an eigenvector in order that they less oscillate.\\ \begin{figure} \caption{\label{informationtransmissionmminstant} \label{informationtransmissionmminstant} \end{figure} This last analysis allows us to understand why there is a kind of freezing of the last spin state in fig.~\ref{heisenbergdensitechaotiqueonde}, \ref{informationtransmission} and \ref{heisenberg}. We see just above that if we kick one spin at the appropriate time, we can force it to stay in its state kicking. Consider for example the right density graphic of fig.~\ref{heisenbergdensitechaotiqueonde}. At the beginning, there is a free variation of the spin oscillations. We only see the oscillation due to the interaction between the spin because all spins are nearly kicked similarly. But, after the horizon of coherence, all spins are kicked differently. So the spins react to the kicks. At the time of the horizon of coherence, all spins are nearly in the down state except the fourth one which is in a superposition $\alpha |\uparrow> + \beta |\downarrow>$ with $\alpha > \beta$. So all spins around the fourth one are forced to stay in their directions. This shows a kind of freezing. Thus we can conserve the information, like the control in this part, but with chaotic kicks.\\ \begin{figure} \caption{\label{evolution_avec_frappe} \label{evolution_avec_frappe} \end{figure} In this section, until now, we have just made a perfect control, i.e. nothing disturbed the kicks. We now introduce a chaotical disruption of the kick. Let the two kinds of data $(\lambda_n^i, \phi_n^i)$ be respectively the strength and the delay of the $i$-th kick on the $n$-th spin associated with the perfect control solution. We introduce another kick set $(\lambda_n^{dist.,i},\phi_n^{dist,i})$ which corresponds to the disruption induced by a chaotic dynamical process. The initial dispersion is chosen to be really small and the initial kick parameters are near to $0$ (we only want the chaotic effect and not the parameter propagations on the phase space induced by the automorphism on the torus). The new kicks are defined by $(\lambda_n^i, \phi_n^i) + (\lambda_n^{dist.,i},\phi_n^{dist,i})$. We obtain fig.~\ref{evolution_avec_frappe}. On the up graphic, we see that before 80 kicks, there is no modification of the control information. After it, the information stopped on the fifth spin begins to be scattered on the other spins. This is always seen on the down graphic which represents the entropy evolution with respect to the kick number. If there is no disruption of the control kicks, at the time where the information is stopped, the spin five has a large entropy whereas for all the others it is lower. When we add the chaotic disruption, the evolution is the same until the horizon of coherence where the entropy is large for all spins with a lot of oscillations (because the interaction is low).\\ This section allows us to perform a control. We have just seen that a free transmission of information during the horizon of coherence appears when the spins are chaotically kicks in the direction of an eigenvector. We have also realised a control by changing or concentrating a spin information. For this we have used some stationary kicks before the horizon of coherence. In this subsection, for the control the propagation of the kick parameters on the phase space before the horizon of predictability are considerably reduced because we choose to begin from $\lambda=0$ and $\phi=0$ (we only conserved the effects of the chaos). In this condition we can think that we can take other directions of kicks. However, other kick directions generally produce some population and coherence oscillations. For a kick on only one spin as for example what we have made on fig.~\ref{evolution_ss_frappe}, we can use another kick direction and the results are generally corrects, or better if the spin state is really different from an eigenvector. For example, if the spin states are $\frac{1}{\sqrt{2}}(|\uparrow\rangle + |\downarrow \rangle)$ except the first one which is in the up state, kicking in the direction of an eigenvector destroys all the spin information which stay near to $\frac{1}{2}$. In this case it is better to kick in the spin direction). But if we kick several spins, different oscillations appear which are transmitted to the other spins by the interaction. The spin information is then completely lost. \section{Conclusion} In this paper, we have studied the behaviours of a spin chain submitted to a kick bath. The kicks are disturbed by chaotic dynamics which are given by the continuous automorphisms of the torus. The spins of the chain are coupled by a nearest-neighbour Heisenberg or Ising-Z interaction. With the Ising-Z coupling the system evolution is characterised by a coherence plateau and a horizon of coherence which are present when the kicks are in the direction of an eigenvector. The length of this plateau is well predicted by eq.~\ref{horizon}. The most interesting case in order to control the system is the Heisenberg coupling. This coupling presents the coherence plateau and the horizon of coherence which can be predicted by eq.~\ref{horizon} for the condition $J \leq w_0$. The coherence conservation is present for all kick directions. We have seen that this coupling allows a conservation and a transmission of information during the horizon of coherence when the kicks are in an eigenvector direction. It is also possible to make some predictions concerning the spin chain evolution with respect to the interaction and/or to the kick frequency parameter and so to know the information evolution. We can speed up or slow down the transmission of information using these parameters. It is also possible to realise an interesting control of the spin information during the horizon of coherence using stationary kicks : we can stop the evolution of the information and concentrate it on only one spin during the horizon of coherence. If we can find a chaotic environment which presents a large horizon of coherence and that we can force the kicks to be in the direction of an eigenvector, it is possible to control freely the system. Other analyses will consist of finding an expression of the value of the horizon of coherence for the spins coupled by the Heisenberg interaction for the condition $J>w_0$ and so extending this work. \appendix \section{Coherence oscillation and stationary population of a spin chain coupled by an Ising-Z interaction} \label{isingzdemo} We have seen in section \ref{sectionisingz} that the spins of a chain coupled by an Ising-Z interaction and not submitted to kicks have a stationary population (at its initial value) and a coherence which oscillates. To understand and to prove it, consider two spins coupled by an Ising-Z interaction. Let all matrices be defined in the base $\{|\uparrow \uparrow\rangle, |\uparrow \downarrow\rangle, |\downarrow \uparrow\rangle, |\downarrow \downarrow\rangle \}$. In this case, the evolution operator becomes \begin{multline} U^{(\mathrm{i})} = e^{-\imath \frac{ H_{0,I}}{\hbar w_0}(2 \pi - \varphi_2^{(\mathrm{i})})} \left[ \mathrm{id} \otimes \left(\mathrm{id} + \left(e^{- \imath \lambda_2^{(\mathrm{i})}}-1\right) W\right)\right]e^{-\imath \frac{ H_{0,I}}{\hbar w_0} (\varphi_2^{(\mathrm{i})} - \varphi_1^{(\mathrm{i})})}\\ \left[\left(\mathrm{id} + \left(e^{- \imath \lambda_1^{(\mathrm{i})}}-1\right) W\right)\otimes \mathrm{id} \right] e^{-\imath \frac{ H_{0,I}}{\hbar w_0} \varphi_1^{(\mathrm{i})}} \end{multline} \begin{eqnarray} U^{(\mathrm{i})} &=& e^{-\imath \frac{ H_{0,I}}{\hbar w_0}(2 \pi) } \left[ \mathrm{id} \otimes\mathrm{id} \right]e^{-\imath \frac{ H_{0,I}}{\hbar w_0} \times 0} \left[\mathrm{id} \otimes \mathrm{id} \right] e^{-\imath \frac{ H_{0,I}}{\hbar w_0} \times 0}\\ &=&e^{-\imath \frac{ H_{0,I}}{\hbar w_0}(2 \pi) } \begin{pmatrix} 1 & 0 & 0 & 0\\ 0 & 1 & 0 & 0\\ 0 & 0 & 1 & 0\\ 0 & 0 & 0 & 1 \end{pmatrix}\\ &=&e^{-\imath \frac{ H_{0,I}}{\hbar w_0}(2 \pi) } \end{eqnarray} With \begin{eqnarray} H_{0,I} &=& \begin{pmatrix} 0 & 0\\ 0 & \frac{\hbar w_1}{2} \end{pmatrix} \otimes \mathrm{id} + \mathrm{id} \otimes\begin{pmatrix} 0 & 0\\ 0 & \frac{\hbar w_1}{2} \end{pmatrix} - J S_z \otimes S_z \\ &=& \begin{pmatrix} 0 & 0 & 0 & 0\\ 0 & \frac{\hbar w_1}{2} & 0 & 0\\ 0 & 0 & \frac{\hbar w_1}{2} & 0\\ 0 & 0 & 0 & \hbar w_1 \end{pmatrix}-J \frac{\hbar^2}{4} \begin{pmatrix} 1 & 0 & 0 & 0\\ 0 & -1 & 0 & 0\\ 0 & 0 & -1 & 0\\ 0 & 0 & 0 & 1 \end{pmatrix}\\ &=& \begin{pmatrix} - J \frac{\hbar^2}{4} & 0 & 0 & 0\\ 0 & J \frac{\hbar^2}{4}+ \frac{\hbar w_1}{2} & 0 & 0\\ 0 & 0 & J \frac{\hbar^2}{4} + \frac{\hbar w_1}{2} & 0\\ 0 & 0 & 0 & -J \frac{\hbar^2}{4}+ \hbar w_1 \end{pmatrix} \end{eqnarray} \begin{equation} \Leftrightarrow U^{(\mathrm{i})} = e^{-\imath \frac{ H_{0,I}}{\hbar w_0}2 \pi } = \begin{pmatrix} e^{\imath \frac{\hbar J}{4 w_0}2 \pi} & 0 & 0 & 0\\ 0 & e^{- \imath (\frac{\hbar J}{4 w_0} + \frac{ w_1}{2 w_0})2 \pi} & 0 & 0\\ 0 & 0 & e^{- \imath( \frac{\hbar J}{4 w_0} + \frac{ w_1}{2 w_0})2 \pi} & 0\\ 0 & 0 & 0 & e^{\imath (\frac{\hbar J}{4 w_0} - \frac{ w_1}{w_0})2 \pi} \end{pmatrix} \end{equation} Let the complete wave function be \begin{equation} |\psi^{(0)}\rangle = |\Psi_1\rangle \otimes |\Psi_2 \rangle = \begin{pmatrix} \alpha \\ \beta \\ \gamma\\ \delta \end{pmatrix} \end{equation} with $|\alpha|^2+|\beta|^2+|\gamma|^2+|\delta|^2=1$. The wave function evolution is \begin{eqnarray} |\psi^{(\mathrm{i})} \rangle &=& U^{(\mathrm{i})} |\psi^{(\mathrm{i}-1)}\rangle \\ &=& \begin{pmatrix} \alpha e^{\mathrm{i}\imath \frac{\hbar J}{4 w_0}(2 \pi)}\\ \beta e^{-\mathrm{i} \imath (\frac{\hbar J}{4 w_0} + \frac{ w_1}{2 w_0})(2 \pi)} \\ \gamma e^{-\mathrm{i} \imath( \frac{\hbar J}{4 w_0} + \frac{ w_1}{2 w_0})(2 \pi)}\\ \delta e^{\mathrm{i} \imath (\frac{\hbar J}{4 w_0} - \frac{ w_1}{w_0})(2 \pi)} \end{pmatrix} \end{eqnarray} To obtain the coherence and the population of the first spin, we have to calculate the density matrix and the partial trace on the second spin. The density matrix is defined by \begin{equation} \rho^{(\mathrm{i})} = |\psi^{(\mathrm{i})} \rangle \langle \psi^{(\mathrm{i})} | \end{equation} \begin{equation} \rho^{(\mathrm{i})} = \begin{pmatrix} \alpha e^{\mathrm{i}\imath \frac{\hbar J}{4 w_0}2 \pi}\\ \beta e^{-\mathrm{i} \imath (\frac{\hbar J}{4 w_0} + \frac{ w_1}{2 w_0})2 \pi} \\ \gamma e^{-\mathrm{i} \imath( \frac{\hbar J}{4 w_0} + \frac{ w_1}{2 w_0})2 \pi}\\ \delta e^{\mathrm{i}\imath (\frac{\hbar J}{4 w_0} - \frac{ w_1}{w_0})2 \pi} \end{pmatrix} \begin{pmatrix} \alpha^* e^{-\mathrm{i}\imath \frac{\hbar J}{2 w_0} \pi}& \beta^* e^{\mathrm{i}\imath (\frac{\hbar J}{2 w_0} + \frac{ w_1}{ w_0}) \pi} & \gamma^* e^{\mathrm{i}\imath( \frac{\hbar J}{2 w_0} + \frac{ w_1}{ w_0}) \pi}& \delta^* e^{-\mathrm{i}\imath (\frac{\hbar J}{4 w_0} - \frac{ w_1}{w_0})2 \pi} \end{pmatrix} \end{equation} \begin{equation} \rho^{(\mathrm{i})}= \begin{pmatrix} \alpha\alpha^* & \alpha\beta^* e^{\mathrm{i} \imath (\frac{\hbar J}{2 w_0} + \frac{ w_1}{2 w_0})2 \pi} & \alpha \gamma^* e^{\mathrm{i} \imath( \frac{\hbar J}{2 w_0} + \frac{ w_1}{2 w_0})2 \pi}& \alpha \delta^* e^{\mathrm{i}\imath\frac{ w_1}{w_0} 2 \pi}\\ \beta \alpha^* e^{-\mathrm{i} \imath (\frac{\hbar J}{2 w_0} + \frac{ w_1}{2 w_0})2 \pi} & \beta \beta^* & \beta \gamma^* & \beta \delta^* e^{-\mathrm{i} \imath (\frac{\hbar J}{2 w_0} - \frac{ w_1}{2 w_0})2 \pi}\\ \gamma \alpha^* e^{-\mathrm{i} \imath( \frac{\hbar J}{2 w_0} + \frac{ w_1}{2 w_0})2 \pi} & \gamma \beta^* & \gamma \gamma^* & \gamma \delta^* e^{-\mathrm{i} \imath( \frac{\hbar J}{2 w_0} - \frac{ w_1}{2 w_0})2 \pi}\\ \delta \alpha^* e^{- \mathrm{i}\imath \frac{ w_1}{w_0}2 \pi} & \delta\beta^* e^{\mathrm{i}\imath (\frac{\hbar J}{2 w_0} - \frac{ w_1}{2 w_0})2 \pi} & \delta \gamma^* e^{\mathrm{i}\imath (\frac{\hbar J}{2 w_0} - \frac{ w_1}{2 w_0})2 \pi} & \delta \delta^* \end{pmatrix} \end{equation} The coherence of the first spin is \begin{eqnarray} \rho^{cohe,(\mathrm{i})}_1 &=& |\langle \uparrow \uparrow |\rho^\mathrm{i} | \downarrow \uparrow \rangle + \langle \uparrow \downarrow|\rho^\mathrm{i} | \downarrow \downarrow \rangle|\\ &=& |\left( \gamma \alpha^* e^{- \mathrm{i}\imath (\frac{\hbar J}{2 w_0} + \frac{ w_1}{2 w_0})(2 \pi)} + \delta\beta^* e^{\mathrm{i}\imath (\frac{\hbar J}{2 w_0} - \frac{ w_1}{2w_0})(2 \pi)} \right)| \end{eqnarray} We see that the coherence only depends on the exponential which is $2 \pi$ periodic. The up population of the first spin is given by \begin{eqnarray} \rho^{pop,(\mathrm{i})}_1 &=& \langle \uparrow \uparrow |\rho^\mathrm{i} | \uparrow \uparrow \rangle + \langle \uparrow \downarrow|\rho^\mathrm{i} | \uparrow \downarrow \rangle\\ &=& \left( \alpha \alpha^* + \beta \beta^* \right) \end{eqnarray} The population is not modified with the time if there is no kick.\\ The extension of these analyses to $N$ coupled spins give the same results. \section{Effect of the kick strength on an uncoupled spin kicked in a direction of an eigenvector} \label{withoutcoupling} If we consider a spin without any interaction with its neighbours, we have the following evolution operator for the $i$-th kick \begin{equation} U^{(\mathrm{i})} = e^{-\imath \frac{H_0}{\hbar w_0} (2 \pi - \varphi^{(\mathrm{i})})} \left[ \mathrm{id} + \left(e^{-\imath \lambda^{(\mathrm{i})}} -1 \right)W \right] e^{-\imath \frac{H_0}{\hbar w_0} \varphi^{(\mathrm{i})}} \end{equation} All matrices are defined in the base $\{|\uparrow \uparrow\rangle, |\uparrow \downarrow\rangle, |\downarrow \uparrow\rangle, |\downarrow \downarrow\rangle \}$. We suppose that the kicks are in the direction of an eigenvector $W = |w\rangle \langle w| = \begin{pmatrix} 1 \\ 0 \end{pmatrix}\begin{pmatrix} 1 & 0 \end{pmatrix} = \begin{pmatrix} 1 & 0\\ 0 & 0 \end{pmatrix}$. If we calculate the evolution of the evolution operator until the $m$-th kick, we obtain \begin{eqnarray} \prod_{j=1}^m U^{(\mathrm{j})} &=& \prod_{j=1}^m \begin{pmatrix} 1 & 0\\ 0 & e^{-\imath \frac{w_1}{2 w_0}(2\pi- \varphi^{(\mathrm{j})})} \end{pmatrix} \begin{pmatrix} e^{-\imath \lambda^{(\mathrm{j})}} & 0\\ 0 & 1 \end{pmatrix} \begin{pmatrix} 1 & 0\\ 0 & e^{-\imath \frac{w_1}{2 w_0}\varphi^{(\mathrm{j})}} \end{pmatrix} \\[.2cm] &=& \prod_{j=1}^m \begin{pmatrix} e^{-\imath \lambda^{(\mathrm{j})}} & 0\\ 0 & e^{- \imath \frac{w_1}{w_0} \pi} \end{pmatrix} \\[.4cm] &=&\begin{pmatrix} e^{-\imath \sum_{j=1}^m \lambda^{(\mathrm{j})}} & 0\\ 0 & e^{- \imath \sum_{j=1}^m \frac{w_1}{w_0} \pi } \end{pmatrix} \end{eqnarray} The initial state is chosen to be $|\psi^{(0)} \rangle =\begin{pmatrix} \alpha \\ \beta \end{pmatrix}$ with $|\alpha|^2 + |\beta|^2=1$. The wave function at the $i$-th kick is \begin{equation} |\psi^{(i)} \rangle = \begin{pmatrix} \alpha e^{-\imath \sum_{j=1}^i \lambda^{(j)}}\\ \beta e^{- \imath \sum_{j=1}^i \frac{w_1}{w_0} \pi} \end{pmatrix} \end{equation} The density matrix is then \begin{equation} \rho^{(\mathrm{i})} = |\psi^{(\mathrm{i})} \rangle \langle \psi^{(\mathrm{i})}| = \begin{pmatrix} \alpha \alpha^* & \alpha \beta^* e^{-\imath \sum_{j=1}^i \lambda^{(\mathrm{i})}} e^{\imath \sum_{k=1}^i \frac{w_1}{w_0} \pi}\\ \alpha^* \beta e^{\imath \sum_{j=1}^i \lambda^{(j)}} e^{-\imath \sum_{k=1}^i \frac{w_1}{w_0} \pi} & \beta \beta^* \end{pmatrix} \end{equation} We see that there is no effect of the strength or of the delay on the population. The strength only induces a pure dephasing. \section{Effect of the coupling on the kick strength when the kick is in the direction of an eigenvector} We have seen in section \ref{sectionisingz} that if two spins are kicked with the same strength in the direction of an eigenvector, the coherence and the population are not modified. However, if the spins are kicked with various strengths, a modification appears. In order to understand mathematically what happens, we choose to make the calculation for two coupled spins. All matrices are defined in the base $\{|\uparrow \uparrow\rangle, |\uparrow \downarrow\rangle, |\downarrow \uparrow\rangle, |\downarrow \downarrow\rangle \}$. \subsection{When the spins are coupled by an Ising-Z interaction} \label{isingzdemo2} The evolution operator is characterised at the $i$-th kick by \begin{multline} U^{(\mathrm{i})} = e^{-\imath \frac{ H_{0,I}}{\hbar w_0}(2 \pi - \varphi_2^{(\mathrm{i})})} \left[ \mathrm{id} \otimes \left(\mathrm{id} + \left(e^{- \imath \lambda_2^{(\mathrm{i})}}-1\right) W\right)\right]e^{-\imath \frac{ H_{0,I}}{\hbar w_0} (\varphi_2^{(\mathrm{i})} - \varphi_1^{(\mathrm{i})})}\\ \left[\left(\mathrm{id} + \left(e^{- \imath \lambda_1^{(\mathrm{i})}}-1\right) W\right)\otimes \mathrm{id} \right] e^{-\imath \frac{ H_{0,I}}{\hbar w_0} \varphi_1^{(\mathrm{i})}} \end{multline} In order to simplify the calculation, this demonstration does not take into account the possible variation of the strength and of the delay from one kick to another, i.e. $U^{(\mathrm{i})}=U$, $\varphi_1^{(\mathrm{i})}=\varphi_1$, $\varphi_2^{(\mathrm{i})}=\varphi_2$, $\lambda_1^{(\mathrm{i})}=\lambda_1$ and $\lambda_2^{(\mathrm{i})}=\lambda_2$. Let the kicks be in the direction of an eigenvector, so \begin{center} $W = |w\rangle \langle w| = \begin{pmatrix} 1 \\ 0 \end{pmatrix}\begin{pmatrix} 1 & 0 \end{pmatrix} = \begin{pmatrix} 1 & 0\\ 0 & 0 \end{pmatrix}$ \end{center} We have obtained in \ref{isingzdemo} the exponential of the Hamiltonian $H_{0,I}$ \begin{equation} e^{-\imath \frac{ H_{0,I}}{\hbar w_0} } = \begin{pmatrix} e^{\imath \frac{\hbar J}{4 w_0}} & 0 & 0 & 0\\ 0 & e^{- \imath (\frac{\hbar J}{4 w_0} + \frac{ w_1}{2 w_0})} & 0 & 0\\ 0 & 0 & e^{- \imath( \frac{\hbar J}{4 w_0} + \frac{ w_1}{2 w_0})} & 0\\ 0 & 0 & 0 & e^{\imath (\frac{\hbar J}{4 w_0} - \frac{ w_1}{w_0})} \end{pmatrix} \end{equation} Let $\alpha = \frac{\hbar J}{4 w_0}$ and $\beta=\frac{ w_1}{2 w_0}$. The Hamiltonian becomes \begin{equation} e^{-\imath \frac{ H_{0,I}}{\hbar w_0}} = \begin{pmatrix} e^{\imath \alpha} & 0 & 0 & 0\\ 0 & e^{- \imath (\alpha + \beta)} & 0 & 0\\ 0 & 0 & e^{- \imath(\alpha + \beta)} & 0\\ 0 & 0 & 0 & e^{\imath (\alpha - 2\beta)} \end{pmatrix} \end{equation} \begin{multline} \Leftrightarrow U= \begin{pmatrix} e^{\imath \alpha (2\pi - \varphi_2)} & 0 & 0 & 0\\ 0 & e^{- \imath (\alpha + \beta)(2\pi - \varphi_2)} & 0 & 0\\ 0 & 0 & e^{- \imath(\alpha + \beta)(2\pi - \varphi_2)} & 0\\ 0 & 0 & 0 & e^{\imath (\alpha - 2\beta)(2\pi - \varphi_2)} \end{pmatrix} \\ \left[\begin{pmatrix} 1 & 0\\ 0 & 1 \end{pmatrix} \otimes \begin{pmatrix} e^{- \imath \lambda_2} & 0\\ 0 & 1 \end{pmatrix}\right] \begin{pmatrix} e^{\imath \alpha (\varphi_2 - \varphi_1)} & 0 & 0 & 0\\ 0 & e^{- \imath (\alpha + \beta)(\varphi_2 - \varphi_1)} & 0 & 0\\ 0 & 0 & e^{- \imath(\alpha + \beta)(\varphi_2 - \varphi_1)} & 0\\ 0 & 0 & 0 & e^{\imath (\alpha - 2\beta)(\varphi_2 - \varphi_1)} \end{pmatrix} \\ \left[ \begin{pmatrix} e^{- \imath \lambda_1} & 0\\ 0 & 1 \end{pmatrix}\otimes \begin{pmatrix} 1 & 0\\ 0 & 1 \end{pmatrix} \right]\begin{pmatrix} e^{\imath \alpha \varphi_1} & 0 & 0 & 0\\ 0 & e^{- \imath (\alpha + \beta) \varphi_1} & 0 & 0\\ 0 & 0 & e^{- \imath(\alpha + \beta)\varphi_1} & 0\\ 0 & 0 & 0 & e^{\imath (\alpha - 2\beta)\varphi_1} \end{pmatrix} \end{multline} \begin{equation} \Leftrightarrow U=\begin{pmatrix} e^{\imath \alpha 2\pi}e^{-\imath (\lambda_2 + \lambda_1)} & 0 & 0 & 0 \\ 0 & e^{-\imath (\alpha +\beta)2\pi}e^{-\imath \lambda_1} & 0 & 0\\ 0 & 0 & e^{-\imath (\alpha +\beta)2\pi}e^{-\imath \lambda_2} & 0\\ 0 & 0 & 0 & e^{\imath (\alpha - 2\beta) 2\pi} \end{pmatrix} \end{equation} We choose the initial state to be : $|\Psi_1\rangle = \begin{pmatrix} \chi \\ \zeta \end{pmatrix}$ and $|\Psi_2\rangle = \begin{pmatrix} \gamma\\ \delta \end{pmatrix}$ \begin{equation} |\psi^{(0)}\rangle = |\Psi_1\rangle \otimes |\Psi_2\rangle =\begin{pmatrix} \chi \gamma\\ \chi \delta\\ \zeta \gamma\\ \zeta \delta \end{pmatrix} \end{equation} The evolution of wave function at the $i$-th kick is \begin{equation} |\psi^{(\mathrm{i})}\rangle = U |\psi^{(\mathrm{i}-1)}\rangle = \begin{pmatrix} \chi \gamma e^{\mathrm{i} \imath \alpha 2\pi}e^{-\mathrm{i} \imath (\lambda_2 + \lambda_1)}\\ \chi \delta e^{-\mathrm{i} \imath (\alpha +\beta)2\pi}e^{-\mathrm{i} \imath \lambda_1} \\ \zeta \gamma e^{-\mathrm{i} \imath (\alpha +\beta)2\pi}e^{-\mathrm{i} \imath \lambda_2}\\ \zeta \delta e^{\mathrm{i} \imath (\alpha - 2\beta) 2\pi} \end{pmatrix} \end{equation} The density matrix is then \begin{equation} \rho^{(\mathrm{i})} = |\psi^{(\mathrm{i})}\rangle \langle \psi^{(\mathrm{i})}| \end{equation} The coherence of the first spin is \begin{eqnarray} \rho^{cohe,(\mathrm{i})}_1 &=& |\langle \uparrow \uparrow |\rho^{(\mathrm{i})} | \downarrow \uparrow \rangle + \langle \uparrow \downarrow|\rho^{(\mathrm{i})} | \downarrow \downarrow \rangle|\\ &=& |\chi \gamma \zeta^* \gamma^* e^{\mathrm{i} \imath (2 \alpha +\beta) 2\pi}e^{-\mathrm{i} \imath \lambda_1} + \chi \delta \zeta^* \delta^* e^{-\mathrm{i} \imath (2\alpha - \beta) 2\pi} e^{-\mathrm{i} \imath \lambda_1}| \\ &=& | \chi \gamma \zeta^* \gamma^* e^{\mathrm{i} \imath (2 \alpha +\beta) 2\pi} + \chi \delta \zeta^* \delta^* e^{-\mathrm{i} \imath (2\alpha - \beta) 2\pi}| \end{eqnarray} and the first spin up population \begin{eqnarray} \rho^{pop,(\mathrm{i})}_1 &=& \langle \uparrow \uparrow |\rho^{(\mathrm{i})} | \uparrow \uparrow \rangle + \langle \uparrow \downarrow|\rho^{(\mathrm{i})} | \uparrow \downarrow \rangle\\ &=& \chi \chi^* \gamma \gamma^* + \chi \chi^* \delta \delta^* \end{eqnarray} The coherence and the population of the first spin does not change with respect to the kick number for a kick in a direction of an eigenvector as we have seen in section \ref{sectionisingz}. For the coherence of the average spin we have \begin{equation} \rho^{cohe,(\mathrm{i})}_{tot} = \frac{1}{2} |\langle \uparrow \uparrow |\rho^{(\mathrm{i})} | \downarrow \uparrow \rangle + \langle \uparrow \downarrow|\rho^{(\mathrm{i})} | \downarrow \downarrow \rangle + \langle \uparrow \uparrow |\rho^{(\mathrm{i})} | \uparrow \downarrow \rangle + \langle \downarrow \uparrow |\rho^{(\mathrm{i})} | \downarrow \downarrow \rangle| \end{equation} \begin{multline} \rho^{cohe,(\mathrm{i})}_{tot} = \frac{1}{2} \left| \chi \gamma\zeta^* \gamma^* e^{\mathrm{i} \imath (2\alpha +\beta)2\pi}e^{-\mathrm{i} \imath \lambda_1} + \chi \delta \zeta^* \delta^* e^{-\mathrm{i} \imath \lambda_1} e^{-\mathrm{i} \imath (2\alpha - \beta) 2\pi}\right.\\ \left.+ \chi \gamma \chi^* \delta^* e^{-\mathrm{i} \imath \lambda_2 } e^{\mathrm{i} \imath (2\alpha +\beta)2\pi} + \zeta \gamma \zeta^* \delta^* e^{-\mathrm{i} \imath (2\alpha -\beta)2\pi}e^{-\mathrm{i} \imath \lambda_2} \right| \end{multline} In the case where $\lambda = \lambda_1 = \lambda_2$, we obtain \begin{multline} \rho^{cohe,(\mathrm{i})}_{tot} = \frac{1}{2} \left| \chi \gamma\zeta^* \gamma^* e^{\mathrm{i} \imath (2\alpha +\beta)2\pi} + \chi \delta \zeta^* \delta^* e^{-\mathrm{i} \imath (2\alpha - \beta) 2\pi}\right.\\ \left.+ \chi \gamma \chi^* \delta^* e^{\mathrm{i} \imath (2\alpha +\beta)2\pi} + \zeta \gamma \zeta^* \delta^* e^{-\mathrm{i} \imath (2\alpha -\beta)2\pi}\right| \end{multline} We see that if the spins are similarly kicked (same strength) in a direction of an eigenvector, there is no modification of the coherence of the average spin of the chain. However, when the strengths are different, the spins feel the kicks. The kick delay never influences the coherence and the population. The extension to a large number of spins and to a variation of the strength and of the delay from one kick to another one gives the same results. \subsection{When the spins are coupled by a Heisenberg interaction} \label{force} The evolution operator is characterised by \begin{multline} U^{(\mathrm{i})} = e^{-\imath \frac{ H_{0,I}}{\hbar w_0}(2 \pi - \varphi_2^{(\mathrm{i})})} \left[ \mathrm{id} \otimes \left(\mathrm{id} + \left(e^{- \imath \lambda_2^{(\mathrm{i})}}-1\right) W\right)\right]e^{-\imath \frac{ H_{0,I}}{\hbar w_0} (\varphi_2^{(\mathrm{i})} - \varphi_1^{(\mathrm{i})})}\\ \left[\left(\mathrm{id} + \left(e^{- \imath \lambda_1^{(\mathrm{i})}}-1\right) W\right)\otimes \mathrm{id} \right] e^{-\imath \frac{ H_{0,I}}{\hbar w_0} \varphi_1^{(\mathrm{i})}} \end{multline} As previously we simplify the calculation in not taking into account the possible variation of the strength and of the delay from one kick to another, i.e. $U^{(\mathrm{i})}=U$, $\varphi_1^{(\mathrm{i})}=\varphi_1$, $\varphi_2^{(\mathrm{i})}=\varphi_2$, $\lambda_1^{(\mathrm{i})}=\lambda_1$ and $\lambda_2^{(\mathrm{i})}=\lambda_2$. We choose to kick in the direction of an eigenvector, so \begin{center} $W = |w\rangle \langle w| = \begin{pmatrix} 1 \\ 0 \end{pmatrix}\begin{pmatrix} 1 & 0 \end{pmatrix} = \begin{pmatrix} 1 & 0\\ 0 & 0 \end{pmatrix}$ \end{center} We are only interested by the variation of the strength between the first and the second spin. For a sake of simplicity, we suppose that $\varphi_1 = \varphi_2 = 0$. The evolution operator is modified as follows \begin{multline} U = e^{-\imath \frac{ H_{0,I}}{\hbar w_0}2 \pi} \left[\begin{pmatrix} 1 & 0\\ 0 & 1 \end{pmatrix} \otimes \left[ \begin{pmatrix} 1 & 0\\ 0 & 1 \end{pmatrix} + \left( e^{- \imath \lambda_2}-1 \right) \begin{pmatrix} 1 & 0\\ 0 & 0 \end{pmatrix} \right] \right] e^{H_{0,I} \times 0} \\ \left[ \left[ \begin{pmatrix} 1 & 0\\ 0 & 1 \end{pmatrix} + \left( e^{- \imath \lambda_1}-1 \right) \begin{pmatrix} 1 & 0\\ 0 & 0 \end{pmatrix} \right] \otimes \begin{pmatrix} 1 & 0\\ 0 & 1 \end{pmatrix}\right]e^{H_{0,I} \times 0} \end{multline} with $ e^{H_{0,I} \times 0} = \begin{pmatrix} 1 & 0 & 0 & 0\\ 0 & 1 & 0 & 0\\ 0 & 0 & 1 & 0\\ 0 & 0 & 0 & 1 \end{pmatrix}$. \begin{equation} \Leftrightarrow U = e^{-\imath \frac{ H_{0,I}}{\hbar w_0}2 \pi} \begin{pmatrix} e^{-\imath (\lambda_1 + \lambda_2)} & 0 & 0 & 0\\ 0 & e^{- \imath \lambda_1} & 0 & 0\\ 0 & 0 & e^{- \imath \lambda_2} & 0 \\ 0 & 0 & 0 & 1 \end{pmatrix} \end{equation} $H_{0,I} = H_0 + H_I$ with $H_0$ a diagonal matrix and $H_I$ a matrix with non-diagonal terms associated with the coupling. The matrix $H_{0,I}$ can be written as \begin{equation} H_{0,I} = \begin{pmatrix} a & 0 & 0 & 0\\ 0 & b & c & 0\\ 0 & c & b & 0\\ 0 & 0 & 0 & d \end{pmatrix} \end{equation} with $a \neq d$ because of the shape of $H_0$. The exponential of such a matrix becomes \begin{equation} e^{-\imath \frac{H_{0,I}}{\hbar w_0} 2 \pi} = \begin{pmatrix} u & 0 & 0 & 0\\ 0 & v & w & 0\\ 0 & w & v & 0\\ 0 & 0 & 0 & x \end{pmatrix} \end{equation} and the evolution operator becomes \begin{equation} U = \begin{pmatrix} u e^{-\imath (\lambda_1 + \lambda_2)} & 0 & 0 & 0\\ 0 & v e^{- \imath \lambda_1} & w e^{- \imath \lambda_2} & 0\\ 0 & w e^{- \imath \lambda_1} & v e^{- \imath \lambda_2} & 0 \\ 0 & 0 & 0 & x \end{pmatrix} \end{equation} Let the spin state be at $t=0$ \begin{center} $|\Psi_1\rangle = \begin{pmatrix} 1\\ 0 \end{pmatrix}$ \\ $|\Psi_2\rangle = \frac{1}{\sqrt{2}} \begin{pmatrix} 1\\ 1 \end{pmatrix}$. \end{center} \begin{equation} \Leftrightarrow |\psi^{(0)}\rangle = | \Psi_1 \rangle\otimes |\Psi_2 \rangle = \frac{1}{\sqrt{2}} \begin{pmatrix} 1 \\ 1\\ 0\\ 0 \end{pmatrix} \end{equation} The evolution with respect to the kick number is given by \begin{equation} |\psi^{(i+1)}\rangle = U |\psi^{(i)} \rangle \end{equation} In order to know the effect of the kick on the population, we calculate the three first up populations of the first spin ($\rho_1^{pop,(i)}$). For this, we have to calculate the complete wave function $|\psi^{(i)}\rangle$ and the density matrix $\rho^{(i)}$. \begin{center} $|\psi^{(0)}\rangle = | \Psi_1 \rangle\otimes |\Psi_2 \rangle = \frac{1}{\sqrt{2}} \begin{pmatrix} 1 \\ 1\\ 0\\ 0 \end{pmatrix}$ \end{center} \begin{equation} \Leftrightarrow \rho^{(0)} = |\psi^{(0)}\rangle \langle \psi^{(0)}| = \frac{1}{2} . \begin{pmatrix} 1 & 1 & 0 & 0\\ 1 & 1 & 0 & 0\\ 0 & 0 & 0 & 0\\ 0 & 0 & 0 & 0 \end{pmatrix} \end{equation} The up population of the first spin is given by the partial trace on the second spin. \begin{equation} \Leftrightarrow \psi_1^{(0)} = \langle \uparrow \downarrow | \rho^{(0)} |\uparrow \downarrow \rangle + \langle \uparrow \uparrow | \rho^{(0)} |\uparrow \uparrow \rangle= 1 \end{equation} \begin{equation} |\psi^{(1)}\rangle = U |\psi^{(0)} \rangle = \frac{1}{\sqrt{2}} \begin{pmatrix} u e^{- \imath (\lambda_1 + \lambda_2)} \\ v e^{- \imath \lambda_1}\\ w e^{- \imath \lambda_1} \\ 0 \end{pmatrix} \end{equation} \begin{equation} \Leftrightarrow \rho^{(1)} = |\psi^{(1)}\rangle \langle \psi^{(1)}| = \frac{1}{2} \begin{pmatrix} uu^* & uv^* e^{- \imath \lambda_2} & uw^* e^{-\imath \lambda_2} & 0\\ vu^* e^{\imath \lambda_2} & vv^* & vw^* & 0\\ wu^* e^{\imath \lambda_2} & wv^* & ww^* & 0\\ 0 & 0 & 0 & 0 \end{pmatrix} \end{equation} \begin{equation} \Leftrightarrow \rho_1^{pop,(1)} = \langle \uparrow \downarrow | \rho^{(1)} |\uparrow \downarrow \rangle + \langle \uparrow \uparrow | \rho^{(1)} |\uparrow \uparrow \rangle= \frac{1}{2} (uu^* + vv^*) \end{equation} \begin{equation} |\psi^{(2)}\rangle = U |\psi^{(1)} \rangle = \frac{1}{\sqrt{2}} \begin{pmatrix} u^2 e^{- 2 \imath (\lambda_1 + \lambda_2)} \\ v^2 e^{- 2 \imath \lambda_1} + w^2 e^{-\imath (\lambda_1 + \lambda_2)}\\ vw e^{- 2 \imath \lambda_1} + wv e^{-\imath (\lambda_1 + \lambda_2)} \\ 0 \end{pmatrix} \end{equation} \begin{multline} \Leftrightarrow \rho_1^{pop,(2)} = \langle \uparrow \downarrow | \rho^{(2)} |\uparrow \downarrow \rangle + \langle \uparrow \uparrow | \rho^{(2)} |\uparrow \uparrow \rangle\\ = \frac{1}{2} (u^2(u^2)^* + v^2(v^2)^* + w^2(w^2)^* + v^2(w^2)^* e^{-\imath (\lambda_1 - \lambda_2)}+ w^2(v^2)^* e^{\imath (\lambda_1 - \lambda_2)}) \end{multline} In the same way, we obtain \begin{multline} \Leftrightarrow \rho_1^{pop,(3)} = \langle \uparrow \downarrow | \rho^{(3)} |\uparrow \downarrow \rangle + \langle \uparrow \uparrow | \rho^{(3)} |\uparrow \uparrow \rangle\\ = \frac{1}{2} \left(u^3(u^3)^* + vv^* \left[ v^2(v^2)^* + 5 w^2(w^2)^* + v^2(w^2)^* e^{2 \imath (\lambda_2 - \lambda_1)} + 2 v^2 (w^2)^* e^{-\imath (\lambda_1-\lambda_2)} \right. \right.\\ \left. \left.+ w^2(v^2)^* e^{2 \imath (\lambda_1 - \lambda_2)} + 2 w^2 (w^2)^* e^{\imath (\lambda_1 - \lambda_2)}+ 2 w^2 (v^2)^* e^{\imath(\lambda_1- \lambda_2)} + 2 w^2 (w^2)^* e^{-\imath (\lambda_1 - \lambda_2)}\right] \right) \end{multline} Across the three up state of the first spin, we see that the strength can affect the population only if it is different for two coupled spins. If the strength is the same, we can easily see that it disappears and the spin evolution is only due to the coupling. This demonstration can also be made for a kick strength which is not the same for every kicks and for more coupled spins. The conclusion will be the same. \end{document}
\begin{document} \title[Archimedes' principle]{Archimedes' principle for ideal gas} \author{Krzysztof Burdzy and Jacek Ma\l{}ecki} \address{KB: Department of Mathematics, Box 354350, University of Washington, Seattle, WA 98195} \email{[email protected]} \address{JM: Department of Mathematics \\ Wroc{\l}aw University of Science and Technology \\ ul. Wybrze{\.z}e Wyspia{\'n}\-skiego 27 \\ 50-370 Wroc{\l}aw, Poland} \email{[email protected]} \thanks{KB's research was supported in part by Simons Foundation Grant 506732. J. Ma\l{}ecki was supported by the Polish National Science Centre (NCN) grant no. 2018/29/B/ST1/02030.} \keywords{Archimedes' principle, ideal gas} \subseteqjclass[2010]{82B21; 82C21} \begin{abstract} We prove Archimedes' principle for a macroscopic ball in ideal gas consisting of point particles with non-zero mass. The main result is an asymptotic theorem, as the number of point particles goes to infinity and their total mass remains constant. We also show that, asymptotically, the gas has an exponential density as a function of height. We find the asymptotic inverse temperature of the gas. We derive an accurate estimate of the volume of the phase space using the local central limit theorem. \end{abstract} \maketitle \section{Introduction} There seems to be no rigorous proof of Archimedes' principle in the mathematical literature. The most likely reason for this omission is that Archimedes' principle is trivial given a few natural assumptions. The principle can be easily derived using the divergence theorem, assuming that the formula for the pressure as a function of height is known. The ``barometric formula'' which says that the pressure in gas has an exponential density as a function of height can be easily derived from the ideal gas law. While this derivation of Archimedes' principle is sufficient for the scientific applications, one could ask whether the principle can be derived from a more fundamental model of the matter, as in Hilbert's 6-th problem. This is what we will do in the present article. Perhaps the most significant difference between our model and the derivation of Archimedes' principle alluded to above is that the floating object is allowed to move in our case. We are not aware of an existing proof of Archimedes' principle, rigorous or not, based on a model with a moving floating object. We will consider a container with a bounded base, vertical side walls and no top. The container will hold point particles (ideal gas) and a floating object in the shape of a ball and no internal structure (the mass will be uniformly spread over the ball). The spherical shape of the floating object allows us to avoid the discussion of the energy going into rotation---the collisions of the ball with point particles and the walls of the container will not induce rotation of the ball. The point particles and the ball will move according to Newton's laws in a gravitational field with constant acceleration. We will assume conservation of energy and momentum but this assumption does not have a unique interpretation in the case of collisions of point particles with the infinitely heavy walls of the container. We will consider two types of reflections of point particles from the walls of the container: (i) specular reflections where the angle of reflection is equal to the angle of incidence, and (ii) random reflections according to the Lambertian distribution also known as the Knudsen law. We will assume that the system is in equilibrium so that its density is given by the microcanonical ensemble formula. We will prove that this distribution is the unique invariant measure in case (ii). Simple examples show that there are multiple invariant measures if we assume specular reflections. In our asymptotic results we will assume that the following objects and quantities are fixed: the container, the mass and radius of the macroscopic ball, the total mass of the gas (all point particles), the total (potential and kinetic) energy of all moving objects (point particles and the ball), and the gravitational acceleration. The number of point particles will go to infinity so the mass of a single particle will go to zero. On the way to the main result, Archimedes' principle, we will derive a few other results that may have independent interest. We will present an accurate formula for the volume of the phase space. Our calculation is based on the local central limit theorem. This theorem was proved long time ago but the literature is hard to follow so we hope that our short review of that literature will help those readers who might need this result in their own research. The microcanonical ensemble formula is well known, see, e.g., \cite[Sect.~1.2]{Ruelle}. We could not find a version of the formula needed here in the literature so a derivation was supplied in a parallel project \cite{fermi}. We will not reproduce that proof here but we will present a brief review. We will prove a version of the barometric formula, i.e., we will show that the density of the gas has, asymptotically, an exponential density as a function of height. We will show that the parameter of the exponential distribution can be identified with the inverse temperature. \subseteqsection{Literature review} A version of Archimedes' principle was proved in \cite{archim} but that model was completely different from the present one. The ``gas'' consisted of hard spheres with strictly positive radius. Their centers moved according to independent Brownian motions (except for the collisions). The number of ``gas molecules'' was constant and the asymptotic theorems were proved by sending the ``gravitational acceleration'' to infinity. Our article is concerned with a model in which a macroscopic object interacts with microscopic molecules according to Newtonian mechanics. In this sense, our model is closest to the ``piston problem'' proposed in \cite{Lieb99}. A large number of papers were inspired by \cite{Lieb99} and devoted to the piston problem; see, for example, \cite{Sin99,LPS,CL2002,CLS2002,LSC2002,NeiSin,Gorel,IS} and references therein. Several different models were considered in those papers. In one of the models, a piston moves along a tube and is bombarded by microscopic molecules from both sides. \subseteqsection{Limitations of the model} Our model is, obviously, an oversimplified representation of reality and there is no hope that it could be modified to be very realistic. Still, from the mathematical point of view, some aspects of the model might be worth generalizing in future research. (i) In our model, the macroscopic ball has no internal structure so it has negligibly small heat capacity. It might be possible to analyze a model in which the ball is replaced with a ``balloon,'' i.e., an infinitely thin sphere holding inside (mobile) point particles with different masses than those of the outside particles. (ii) Since we assume that the gas is ideal, the point particles do not interact and, therefore, their collisions with the macroscopic ball are the only way in which they can exchange energy between each other. This is the only way in which the energy may become approximately equidistributed in the stationary regime. To generalize our results to gases consisting of hard spheres with positive radius one would need accurate estimates of the volume of the phase space. The virial expansion might be useful in this context, see \cite[Sect. 8i]{Mayer} or \cite[Sect. 4.3]{Ruelle}. If this approach works, it will require a whole new set of calculations. (iii) In our model, the container has a flat bottom and vertical side walls. Many of our calculations depend on this assumption. \subseteqsection{Organization of the paper} The rigorous presentation of the model and the statement of our main results are in Section \ref{y19.2}. We will review known results on the local central limit theorem in Section \ref{y19.3}. Section \ref{y19.4} contains a review of the results on the microcanonical ensemble formula developed in a parallel paper. We will derive an accurate estimate of the phase space volume in Section \ref{y19.5}. The proof of Archimedes' principle will be given in Section \ref{y19.6}. The inverse temperature will be identified in Section \ref{y19.7}. The uniqueness of the stationary probability distribution under Lambertian reflections will be proved in Section \ref{y19.8}. \section{Model and main results}\label{y19.2} We will consider $n$ point particles (ideal gas) and one macroscopic hard ball in a $d$-dimensional container $D$ with vertical walls, a bounded base, extending upward to infinity in the vertical direction. Suppose that $d\geq 2$ and let $D_b\subseteqset \mathbb{R}^{d-1}$ denote the bottom of the container $D=D_b\times[0,\infty)$ in $\mathbb{R}^d$. The radius of the $(n+1)$-st (macroscopic) ball will be fixed and denoted $R >0$. Obviously, the point particles will have radii equal to $0$. We will assume that $D_b$ has a smooth boundary and satisfies the inner ball condition with radius $R$, i.e., for every point $x\in \mathbf{P}t D_b$, there is a unique $(d-1)$-dimensional open ball with radius $R$ inside $D_b$ whose boundary is tangent to $\mathbf{P}t D_b$ at $x$ and, moreover, $x$ is the only point in the intersection of the ball boundary and $\mathbf{P}t D_b$. We will also assume that a closed ball of radius $R$ fits in the interior of $D$, so that each point particle has room to move from below to above the ball, and vice versa. We will assume that the mass of the $i$-th point particle is $m_i=m/n>0$ for $i=1,\dots, n$, for some $m>0$. When we let $n\to \infty$ in our theorems, the total mass of point particles will remain constant and equal to $m$. The mass of the macroscopic ball will be denoted $M = m_{n+1}>0$. We will assume that the mass is evenly spread over the volume of the macroscopic ball. The walls of the container will be assumed to have infinite mass---this assumption is a way of specifying the meaning of ``totally elastic collisions'' of point particles and the ball with the walls of the container. We will assume that the point particles and the ball are moving within a gravitational field with the constant acceleration $g>0$ pointing downwards. The point particles will not interact with each other. They will reflect from the walls of the container and they will undergo totally elastic collisions with the ball. The collisions of the ball with the walls of the container will be totally elastic. Time will be suppressed in the notation, except for Section \ref{y19.8}. We will assume that the system is in equilibrium. Random objects will represent the state of the system at time 0 (or any other fixed time). Let $(X_i,Y_i)\in\mathbb{R}^d$ denote the random position of the $i$-th point particle or the center of the macroscopic ball (for $i=n+1$), where $X_i\in \mathbb{R}^{d-1}$ represents the horizontal coordinates and $Y_i\geq 0$ is the vertical coordinate ($Y_{n+1} \geq R$). By $V_i\in\mathbb{R}^d$ we will denote the random velocity of the $i$-th point particle or the ball. Let \begin{align}\label{a1.4} \mathbf{x}_k&= (x_1,\ldots,x_{k}), \qquad \mathbf{y}_k= (y_1,\ldots,y_{k}),\qquad \mathbf{v}_k=(v_1,\ldots,v_{k}),\\ \mathbf{X}_{k}&= (X_1,\ldots,X_{k}), \qquad \mathbf{Y}_{k}= (Y_1,\ldots,Y_{k}), \qquad \mathbf{V}_{k}= (V_1,\ldots,V_{k}), \label{j24.4} \end{align} where $x_i \in \mathbb{R}^{d-1}$, $y_i \in \mathbb{R}_+$, and $v_i \in \mathbb{R}^d$. In the notation given above, upper case letters represent random variables and lower case letters represent their values. We will consider $(\mathbf{X}_{n+1},\mathbf{Y}_{n+1},\mathbf{V}_{n+1})$ to be a random vector distributed according to the microcanonical ensemble formula \eqref{j10.4} stated below, although we will give different distributions to these random vectors in some of the proofs. We will assume that the total energy of our system, $E$, is fixed. Hence, a.s., \begin{align}\label{y19.1} E= \sum_{i=1}^{n+1} \left( m_igY_i+\frac{m_i||V_i||^2}{2}\right)\/, \end{align} with the convention that the zero level represents zero potential energy. We will always assume that $E>MgR $ so that the ball and point particles cannot rest motionless at the bottom. A ball in $\mathbb{R}^d$ with center $(x,y)$ and radius $r$ will be denoted $\mathcal{B}((x,y), r)$. Let \begin{align}\label{j10.1} &D_b' = \{x\in D_b: \mathcal{B}((x, y), R)\subseteqset D \text{ for all } y>R\},\\ \label{j9.4} &\mathbf{D}_n = \Big\{( \mathbf{x}_n,x_{n+1},\mathbf{y}_n,y_{n+1},\mathbf{v}_{n+1})\in D_b^{n}\times D_b'\times \mathbb{R}_+^n \times [R,\infty) \times \mathbb{R}^{nd}: \\ & \qquad \qquad( x_k, y_k) \in D\setminus \mathcal{B}((x_{n+1},y_{n+1}),R), \ k=1,\dots n\Big\},\notag\\ &\mathbf{D}_nE = \left\{(\mathbf{x}_{n+1},\mathbf{y}_{n+1},\mathbf{v}_{n+1})\in\mathbf{D}_n: \sum_{i=1}^{n+1}\left( m_iy_ig+\frac 1 2 m_iv_i^2\right)=E\right\} .\notag\\ \label{eq:DnEy:defn} &\mathbf{D}_nEy = \bigg\{(\mathbf{x}_{n},x_{n+1},\mathbf{y}_{n})\in D_b^{n}\times D_b'\times \mathbb{R}_+^n: \sum_{i=1}^{n+1} m_iy_ig\leq E, \\ &\qquad \qquad \qquad( x_k, y_k) \in D\setminus \mathcal{B}((x_{n+1},y),R), \ k=1,\dots n\bigg\}.\notag \end{align} Let $\mu_{\mathbf{y}_{n+1}}(\rd x)$ denote the uniform probability measure on the sphere in $\mathbb{R}^{(n+1)d}$ (so that the sphere is $((n+1)d-1)$-dimensional), centered at the origin, with the radius $\left(2E-2\sum_{i=1}^{n+1} m_iy_ig\right)^{1/2}$. Consider the following measure $\mathbb{P}_n$ on $\mathbf{D}_nE$, \begin{align}\label{j10.4} \mathbb{P}_n&(\rd \mathbf{x}_{n+1} \rd \mathbf{y}_{n+1} \rd \mathbf{v}_{n+1})\\ & = C\left(E-\sum_{i=1}^{n+1} m_iy_ig\right)^{((n+1)d-2)/2} \mu_{\mathbf{y}_{n+1}}\left(\frac{\rd v_1}{\sqrt{m_1}},\ldots,\frac{\rd v_{n+1}}{\sqrt{m_{n+1}}}\right)\rd \mathbf{x}_{n+1}\rd \mathbf{y}_{n+1},\notag \end{align} where $C$ is the normalizing constant so that $\mathbb{P}_n$ is a probability measure. The measure $\mathbb{P}_n$ is a special case of the ``microcanonical ensemble formula,'' see, e.g., \cite[Sect.~1.2]{Ruelle}. We will say that a point particle undergoes a Lambertian reflection from a surface or that the particle reflects according to the Knudsen law if the reflection occurs at a point where the inner normal to the surface is uniquely defined, the probability density of the angle between the reflected trajectory and the normal vector is proportional to the cosine of the angle, and the law is invariant under rotations about the normal vector. Note that the Lambertian distribution of the outgoing velocity vector is independent from the incoming velocity vector, except that the two velocities have the same norm, so that energy is conserved. This law for random reflections was considered by Lambert in the context of light reflection from rough surfaces (\cite{L1760}) and by Knudsen (\cite{K1934}) as a model for gas molecule reflections. By \cite[Cor. 3.2]{fermi} (which can be derived from \cite[Thm. 4.1]{Plakh} or \cite[Thm. 2.2]{ABS}), the Lambertian reflection law is the unique random reflection law that does not depend on the angle of incidence and is consistent with the standard (specular) reflection law from a rough surface consisting of small crystals with smooth reflecting surfaces. \begin{theorem}\label{j18.1} (i) The measure $\mathbb{P}_n$ (microcanonical ensemble formula) is invariant for the dynamical system defined above and two types of reflections: (a) totally elastic reflections between any pair of objects, (b) independent Lambertian reflections for point particles reflecting from the container walls (including the bottom) and totally elastic reflections between any other pair of objects. (ii) In case (b), $\mathbb{P}_n$ is the unique non-degenerate stationary probability distribution for the system of point particles and the macroscopic ball. The following are the only classes of ``degenerate'' invariant distributions: (1) Invariant distributions such that no point particle ever hits a wall of the container. (2) Invariant distributions such that at least one point particle has no energy, so that it is resting at the bottom of the container. \end{theorem} The proof of Theorem \ref{j18.1} will be given in Sections \ref{y19.4} and \ref{y19.8}. \begin{remark} (i) Part (ii) of Theorem \ref{j18.1} is not true under assumption (a); see Remark \ref{a1.1} (i). (ii) We call invariant distributions in (1) degenerate because Lambertian reflections are never activated and the system has no opportunity to mix. See Remark \ref{a1.1} (ii) for an example of such a distribution. (iii) Invariant distributions in (2) are degenerate because in this case the number of point particles is effectively less than $n$. \end{remark} We will often use integrals of the form $\int_{D\setminus\mathcal{B}((\widetilde x, y), R)} \lambda e^{-\lambda r} \rd x \rd r $. In cases like this (and similar), $\widetilde x$ should be interpreted as any point in $D_b'$, $\rd x$ will represent Lebesgue measure in $\mathbb{R}^{d-1}$, and $\rd r$ will represent Lebesgue measure in $\mathbb{R}$. Note that the integral does not depend on $\widetilde x$ as long as $\widetilde x \in D_b'$. \begin{theorem}\label{j24.1} Fix $d,D,R,M,m,g$ and $E$. Consider the equations \begin{align}\label{a20.2} &M = m\frac {\int_{\mathcal{B}((\widetilde x, y), R)} \lambda e^{-\lambda r} \rd x \rd r} {\int_{D\setminus\mathcal{B}((\widetilde x, y), R)} \lambda e^{-\lambda r} \rd x \rd r } ,\\ &\frac{dmg}{2\lambda} + mg \frac { \int_{D\setminus\mathcal{B}((\widetilde x, y), R)} r\lambda e^{-\lambda r} \rd x \rd r} { \int_{D\setminus\mathcal{B}((\widetilde x, y), R)} \lambda e^{-\lambda r} \rd x \rd r} + Mg y = E, \label{j27.10} \end{align} with unknowns $y\geq R$ and $\lambda>0$. (i) There exists a unique $\lambda_*$ which satisfies \eqref{j27.10} with $y=R$. (ii) Suppose that \begin{align}\label{a8.12} &M < m\frac {\int_{\mathcal{B}((\widetilde x, R), R)} \lambda_* e^{-\lambda_* r} \rd x \rd r} {\int_{D\setminus\mathcal{B}((\widetilde x, R), R)} \lambda_* e^{-\lambda_* r} \rd x \rd r }. \end{align} Then \eqref{a20.2}-\eqref{j27.10} have a unique solution that will be denoted $(y_{\mathrm{A}}, \lambda_A)$. Moreover $R< y_A< E/(Mg)$. \end{theorem} The proof of Theorem \ref{j24.1} will be given in Section \ref{y19.5}. For $\widetilde x \in D_b'$, $y'\geq R$ and $\lambda >0$, let $ \nu_{\widetilde x,y',\lambda}$ be the probability distribution defined by \begin{align}\label{j24.3} \nu_{\widetilde x,y',\lambda}(\rd x,\rd y)=\frac { \mathbf{1}_{D\setminus \mathcal{B}((\widetilde x,y'),R)}(x,y) \lambda e^{-\lambda y}} {\int_{D\setminus\mathcal{B}((\widetilde x, y'), R)} \lambda e^{-\lambda r} \rd z \rd r}\rd x\rd y , \qquad x\in D_b, y>0 . \end{align} Let $\delta_{(X_i, Y_i)}$ denote the probability measure on $D$ which consists of a single atom at $(X_i,Y_i)$. The normalized (probability) empirical distribution $\mathbb{Q}_n$ of the gas (point particles) is defined as \begin{align}\label{j13.1} \mathbb{Q}_n = \frac 1 n \sum_{i=1}^n \delta_{(X_i, Y_i)} . \end{align} \begin{theorem}\label{a20.4} (Archimedes' principle) Fix $d,D,R,M,m,g$ and $E$. Assume that the distribution of $(\mathbf{X}_{n+1},\mathbf{Y}_{n+1}, \mathbf{V}_{n+1})$ is $\mathbb{P}_n$. Recall the notation from Theorem \ref{j24.1} and assume that \eqref{a8.12} holds. (i) For every $\varepsilon>0$, \begin{align}\label{j8.1} \lim_{n\to \infty} \mathbb{P}_n( |Y_{n+1} -y_{\mathrm{A}}|>\varepsilon)=0. \end{align} (ii) The marginal distribution of $X_{n+1}$ under $\mathbb{P}_n$ is uniform in $D_b'$. Given $\{X_{n+1}=x\}$, the conditional distribution of $\mathbb{Q}_n$ converges to $\nu_{x,y_{\mathrm{A}},\lambda_A}$ weakly, in probability as $n\to \infty$. \end{theorem} The proof of Theorem \ref{a20.4} will be given in Section \ref{y19.7}. \begin{remark} (i) Theorem \ref{a20.4} is a mathematical representation of Archimedes' principle. Part (ii) of the theorem identifies the limiting empirical distribution of gas molecules when $n\to \infty$. The gas density is constant on the horizontal hyperplanes and it is exponential as a function of the height, outside the ball. Part (i) of Theorem \ref{a20.4} and \eqref{a20.2} say that the ball is likely to float at the height such that the weight of the ball is equal to the weight of the displaced gas (i.e., \eqref{a20.2} holds), assuming that the gas is distributed as in part (ii). (ii) We need an extra equation \eqref{j27.10} to identify $y_{\mathrm{A}}$ and $\lambda_A$ uniquely. The equation is an expression of additivity and conservation of energy. The three terms on the left hand side represent (asymptotically) the kinetic energy of the gas, the potential energy of the gas, and the potential energy of the ball. The kinetic energy of the ball is negligibly small asymptotically. The form of the first term on the left hand side of \eqref{j27.10}, i.e., $dmg/(2\lambda_A)$, representing the kinetic energy of the gas, is a manifestation of the virial theorem, which states how the energy is distributed between potential and kinetic forms. The virial theorem is by now a classical result, discovered by Claussius in 1870 (see, for example \cite{Collins}). Formula \eqref{j27.10} reduces to \begin{align*} \frac{dmg}{2\lambda_A} + \frac{mg} {\lambda_A} = E \end{align*} in the case when there is no ball, i.e., $R=0$ and $M=0$. In other words, the ratio of potential to kinetic energy is $2/d$ for pure gas. Alternatively one could say that $dmg/(2\lambda_A)$ represents the kinetic energy, hence the heat energy, because $\lambda_A$ can be identified with the inverse temperature (see \eqref{j15.5} for a precise formula). (iii) The heuristic meaning of the first claim of Theorem \ref{j24.1} is that there is a unique asymptotic distribution of gas if the ball rests at the bottom of the container. The second part says that if the weight of the ball is smaller than the weight of the gas displaced by the ball when it is placed at the bottom then, asymptotically, there is a unique level at which the ball will float and the corresponding unique distribution of the gas. \end{remark} \section{Local Central Limit Theorem}\label{y19.3} For any random variable $A$, let $f_A$ denote the density of $A$ (if it exists). Suppose that random variables $\xi_k$, $k\geq 1$, are i.i.d. with $\mathbb{E} \xi_k =0$, $ E\xi_k^2 =1$ and density $f_{\xi_k}$. Let $M_3 = \mathbb{E} |\xi_k|^3 $ and $S_n= \sum_{k=1}^n \xi_k/(\sqrt{n} \sigmagma)$. Let $\varphi(x)$ denote the standard normal density, i.e., $\varphi(x) = \frac 1 {\sqrt{2\pi}} e^{-x^2/2}$. The following result is Theorem 1 in \cite{SiraSaha}. \begin{lemma}\label{j10.2} There exists an absolute constant $C$ such that if $f_{\xi_k}(x) \leq C_1$ for all $x\in \mathbb{R}$ then for all $n\geq 1$, \begin{align}\label{j9.2} \sup_{x\in\mathbb{R}} \left| f_{S_n}(x) - \varphi(x)\right| \leq \frac {C M_3^2 \max(1, C_1^3)}{\sqrt{n} }. \end{align} \end{lemma} \begin{remark} There is a considerable literature on the local central limit theorem but the results are hard to extract from that literature for a number of reasons, including absence of some journals from accessible libraries and lack of proofs in a number of publications. The bound in Lemma \ref{j10.2} is given in Remark 5, Section 4, Chapter VII in \cite{Petrov}. The bound is given there without a proof and it is attributed to \cite{Saha}. Unfortunately, \cite{Saha} does not contain a proof. A similar bound is a special case of Theorem 1 in \cite{Serva} but, once again, that paper does not contain a proof. One can derive Lemma \ref{j10.2} from Theorem 4 in \cite{Statu}. The book \cite{Petrov} contains several versions of the local central limit theorem, see, e.g., Theorem 15 in Chapter VII. However, in each of these theorems the error is given in the form $o(1/\sqrt{n})$. One could extract a bound of the form $c/\sqrt{n}$ with an explicit formula for $c$ depending on the moments of the summands from the proof but that would be a very tedious task. \end{remark} \section{Microcanonical ensemble formula}\label{y19.4} \begin{proof}[Proof of Theorem \ref{j18.1} (i)] The formula \eqref{j10.4} is a version of a well known ``microcanonical ensemble formula,'' see, e.g., \cite[Sect.~1.2]{Ruelle}. We could not find a rigorous proof that this distribution is invariant in the contemporary literature. A proof has been given in \cite{fermi}, a parallel project. The result proved in \cite[Prop. 5.3]{fermi} implies that \begin{equation*} f(\mathbf{x}_{n+1},\mathbf{y}_{n+1},\mathbf{v}_{n+1}):= \left(E-\sum_{i=1}^{n+1} m_iy_ig\right)^{((n+1)d-2)/2} \frac{ \rd \mu_{\mathbf{y}_{n+1}}}{\rd \mathbf{v}_{n+1}} \left(\frac{v_1}{\sqrt{m_1}},\ldots,\frac{v_{n+1}}{\sqrt{m_{n+1}}}\right) \end{equation*} is the density of an invariant measure on the whole space $\mathbb{R}^{2(n+1)d}$. We claim that this density restricted to $\mathbf{D}_n$ is invariant for our dynamical system with reflections. It suffices to show that the specular and Lambertian reflections between the point particles, the ball and the walls of the container leave the measure invariant. For the specular reflections, the proof is essentially identical to that of the proof of \cite[Prop. 2.3]{fermi}. The result can be extended to Lambertian reflections of point particles from the wall, as shown in \cite[Prop. 3.3, Remark 3.4]{fermi} (see the remarks preceding and following that result). \end{proof} \section{Phase space volume}\label{y19.5} We will use two different sets of notations for some quantities. While this may create some confusion, we will explain why this convention has some advantages. For $n=0,1,2,\ldots$, $\gamma>0$ and $y\geq R$ we let \begin{align} \label{eq:hn:def} \mathscr{h}_n=\mathscr{h}_n(\gamma,y) = \int_{D\setminus \mathcal{B}((\widetilde x,y),R)}\gamma r^ne^{-\gamma r}\rd x\rd r\/, \end{align} where $\widetilde x \in D_b'$. The definition of and notation for $\mathscr{h}_n$ lead to the following very simple formula, obtained using the dominated convergence theorem, \begin{align} \label{eq:hn:der} \dfrac{\mathbf{P}t \mathscr{h}_n}{\mathbf{P}t \gamma} = \frac{\mathscr{h}_n}{\gamma}-\mathscr{h}_{n+1}\/. \end{align} Some of the functions $\mathscr{h}_n$ have physical meaning, so we find it easier to memorize them if different notation is used in some cases. Specifically, \begin{align}\label{j27.1} \mathscr{q}(\gamma,y) &= \int_{D\setminus \mathcal{B}((\widetilde x,y),R)} \gamma e^{-\gamma r} \rd x \rd r = \mathscr{h}_0(\gamma,y),\\ \mathscr{u}(\gamma,y) &= \frac 1 {\mathscr{q}(\gamma,y)} \int_{D\setminus\mathcal{B}((\widetilde x,y),R)} r\gamma e^{-\gamma r} \rd x \rd r = \frac{\mathscr{h}_1(\gamma,y)}{\mathscr{h}_0(\gamma,y)},\label{j28.2}\\ \mathscr{w}(\gamma,y) &= \int_{D\setminus \mathcal{B}((\widetilde x,y),R)} \gamma r^2 e^{-\gamma r} \rd x \rd r = \mathscr{h}_2(\gamma,y).\label{a1.5} \end{align} We will prove in Lemma \ref{j24.2} that for given $u>0$ and $y\geq R$, the equation $u=\mathscr{u}(\lambda,y)$ uniquely defines $\lambda=\lambda(u,y)$. We let \begin{align}\label{a3.1} h_n(u,y)&= \mathscr{h}_n(\lambda(u,y),y),\quad q(u,y)= \mathscr{q}(\lambda(u,y),y),\quad w(u,y)= \mathscr{w}(\lambda(u,y),y). \end{align} \begin{lemma}\label{y21.1} For all $y'\geq R$ and $\gamma>0$, \begin{align}\label{y22.3} &\frac 1 8 (1- e^{-1/2}) \leq\frac {\mathscr{q}(\gamma,y')} {|D_b|} \leq 1,\\ & \frac 1 {200} \leq \gamma \mathscr{u}(\gamma,y') \leq \frac 8 {1- e^{-1/2}}<21,\label{y22.4}\\ & 2^{-12} e^{-1} \leq \gamma^2 \left(\frac{\mathscr{w}(\gamma,y')}{\mathscr{q}(\gamma,y')} - \mathscr{u}(\gamma,y')^2\right) \leq \frac {16} {1- e^{-1/2}} ,\label{y22.5}\\ & 2^{-21}<\frac{1- e^{-1/2}} {16} \frac 1 {200^2} \leq \frac{\mathscr{u}(\gamma,y')^2}{\mathscr{w}(\gamma,y')/\mathscr{q}(\gamma,y') - \mathscr{u}(\gamma,y')^2} \leq \frac { 2^{18} e} {(1- e^{-1/2}) ^2} < 2^{23}.\label{y22.6} \end{align} \end{lemma} \begin{proof} Let $\mathcal{B}_*=\mathcal{B}((\widetilde x,y),R)$. Elementary geometry shows that if \begin{align*} A= \left\{ x\in D_b: |x- \widetilde x| < \frac{\sqrt{3}}2 R \right\} \end{align*} then $(D_b \setminus A ) \times (0,y'-R/2) \in D\setminus \mathcal{B}_*$. If $\sigmagma(r)$ denotes the volume of a $(d-1)$-dimensional ball then, using our assumption that $d\geq 2$, \begin{align}\label{y21.2} \frac{|D_b \setminus A|}{|D_b|} = 1 - \frac{| A|}{|D_b|} \geq 1 - \frac {\sigmagma\left(\sqrt{3}R/2\right)}{\sigmagma(R)} = 1 - \left(\frac {\sqrt{3}}{2}\right)^{d-1} \geq 1 - \frac {\sqrt{3}}{2} >\frac 1 8 . \end{align} We have \begin{align}\label{y21.3} \mathscr{q} = \int_{D\setminus \mathcal{B}_*} \gamma e^{-\gamma r} \rd x \rd r \geq \int_{D_b} \rd x \int_{y'+ R}^\infty \gamma e^{-\gamma r} \rd r = |D_b| \mathbb{E}p( - \gamma (y'+R)). \end{align} By \eqref{y21.2}, \begin{align}\label{y21.4} \mathscr{q} &= \int_{D\setminus \mathcal{B}_*} \gamma e^{-\gamma r} \rd x \rd r \geq \int_{D_b \setminus A} \rd x \int_0^{y'- R/2} \gamma e^{-\gamma r} \rd r = |D_b \setminus A|(1- \mathbb{E}p( - \gamma (y'-R/2)))\\ & \geq \frac 1 8 |D_b| (1- \mathbb{E}p( - \gamma (y'-R/2))).\notag \end{align} Recall that $y'\geq R$. If $\gamma \leq 1/y'$ then \eqref{y21.3} yields \begin{align}\label{y21.5} \mathscr{q} \geq |D_b| \mathbb{E}p( - \gamma (y'+R)) \geq |D_b| \mathbb{E}p( - (1/y') (y'+R)) = |D_b| \mathbb{E}p( - 1 - R/y') \geq |D_b| e^{-2}. \end{align} In the case when $\gamma \geq 1/y'$, we use \eqref{y21.4} to see that \begin{align*} \mathscr{q} &\geq \frac 1 8 |D_b| (1- \mathbb{E}p( - \gamma (y'-R/2))) \geq \frac 1 8 |D_b| (1- \mathbb{E}p( - (1/y') (y'-R/2)))\\ &= \frac 1 8 |D_b| (1- \mathbb{E}p( - 1 + R/(2y'))) \geq \frac 1 8 |D_b| (1- \mathbb{E}p( - 1 + 1/2)) = \frac 1 8 |D_b| (1- e^{-1/2}). \end{align*} Since $e^{-2} > \frac 1 8 (1- e^{-1/2})$, the above estimate and \eqref{y21.5} give \begin{align}\label{y22.7} \mathscr{q} \geq \frac 1 8 |D_b| (1- e^{-1/2}). \end{align} This proves the lower bound in \eqref{y22.3}. The upper bound follows directly from the definition of $\mathscr{q}$. We use \eqref{y22.7} to derive the upper bound in \eqref{y22.4} as follows, \begin{align}\label{y21.6} \mathscr{u} &= \frac { \int_{D\setminus\mathcal{B}_*} r\gamma e^{-\gamma r} \rd x \rd r} {\int_{D\setminus \mathcal{B}_*} \gamma e^{-\gamma r} \rd x \rd r} = \frac 1 \mathscr{q} \int_{D\setminus\mathcal{B}_*} r\gamma e^{-\gamma r} \rd x \rd r \leq \frac 1 \mathscr{q} \int_{D} r\gamma e^{-\gamma r} \rd x \rd r\\ &= \frac 1 \mathscr{q} |D_b| \frac 1 \gamma \leq \frac 8 {(1- e^{-1/2}) \gamma}.\notag \end{align} Let $W=(W_1,W_2)$ be a random vector with the distribution $\nu= \nu_{\widetilde x,y',\gamma}$ defined in \eqref{j24.3}, where $W_1\in D_b$ and $W_2>0$. Note that \begin{align}\label{y22.2} \mathscr{w}/\mathscr{q} - \mathscr{u}^2 =\var W_2 . \end{align} In view of \eqref{y21.2} and \eqref{y22.3}, the marginal density $f_{W_2}(y)$ satisfies \begin{align}\label{y22.8} f_{W_2}(y) &\leq \frac {|D_b|} \mathscr{q} \gamma e^{-\gamma y} \leq \frac 8 {1- e^{-1/2}} \gamma e^{-\gamma y}, \qquad y >0,\\ \label{y21.7} f_{W_2}(y) &\geq \gamma e^{-\gamma y}, \qquad y \geq y'+R,\\ f_{W_2}(y) &\geq \frac 1 8 \gamma e^{-\gamma y}, \qquad y \leq y'-R/2. \label{y21.8} \end{align} It follows from \eqref{y22.8} that \begin{align*} \mathscr{w}/\mathscr{q} - \mathscr{u}^2 =\var W_2 \leq \mathbb{E} W_2^2 = \int_0^\infty y^2 f_{W_2}(y) \rd y \leq \int_0^\infty y^2 \frac 8 {1- e^{-1/2}} \gamma e^{-\gamma y} \rd y = \frac 8 {1- e^{-1/2}} \frac 2 {\gamma^2}. \end{align*} This gives the upper bound in \eqref{y22.5}. Suppose that $1/\gamma \leq y'$ and let $y'' = 1/\gamma$. Since $y'\geq R$ and $y''\leq y'$, we have $ y''/2 \leq y'-R/2$. Hence, we can apply \eqref{y21.8} to $y\in(0, y''/2)$ and obtain \begin{align}\label{y22.9} \mathscr{u} &= \mathbb{E} W_2 \geq \int_{0}^{y''/2} y f_{W_2}(y) \rd y \geq \int_{0}^{y''/2} y \frac 1 8 \gamma e^{-\gamma y} \rd y = \frac 1{8\gamma} (1-e^{-y'' \gamma/2} (y'' \gamma/2+1))\\ &= \frac 1{8\gamma} (1-(3/2)e^{-1/2} )> \frac 1 {200 \gamma}.\notag \end{align} Suppose that $1/\gamma \geq y'$ and let $y'' = 1/\gamma$. Since $y'\geq R$ and $y''\geq y'$, we have $2 y'' \geq y'+R$. Hence, we can apply \eqref{y21.7} to $y>2 y''$ and obtain \begin{align*} \mathscr{u} &= \mathbb{E} W_2 \geq \int_{2y''}^\infty y f_{W_2}(y) \rd y \geq \int_{2y''}^\infty y \gamma e^{-\gamma y} \rd y = \frac 1{\gamma} e^{-2y'' \gamma} (2y'' \gamma+1)) = \frac 1{\gamma} 3e^{-2} > \frac 1 {3 \gamma}.\notag \end{align*} This and \eqref{y22.9} yield the lower bound in \eqref{y22.4}. Suppose that $1/\gamma \leq y'$ and let $y'' = 1/\gamma$. Then for any $a_0\in \mathbb{R}$ there exists an interval $(a_1,a_2)\subseteqset (0, y''/2)$ such $a_2-a_1 \geq y''/8$ and for any $y\in(a_1,a_2)$, we have $|y - a_0| \geq y''/8$. We will apply this observation with $a_0 = \mathbb{E} W_2$. Since $y'\geq R$ and $y''\leq y'$, we have $a_2 \leq y''/2 \leq y'-R/2$. Hence, we can apply \eqref{y21.8} to $y\in(a_1,a_2)$ and obtain \begin{align}\label{y22.1} \var W_2 &\geq \int_{a_1}^{a_2} |y - \mathbb{E} W_2| ^2 f_{W_2}(y) \rd y \geq \int_{a_1}^{a_2} (y''/8) ^2 \frac 1 8 \gamma e^{-\gamma y} \rd y \geq (a_2-a_1) (y''/8) ^2 \frac 1 8 \gamma e^{-\gamma a_2}\\ &\geq (y''/8) ^3 \frac 1 8 (1/y'') e^{-\gamma y''} = (y'')^2 2^{-12} e^{-1} = 2^{-12} e^{-1} \frac 1 {\gamma^2} .\notag \end{align} Next suppose that $1/\gamma \geq y'$ and let $y'' = 1/\gamma$. Then for any $b_0\in \mathbb{R}$ there exists an interval $(b_1,b_2)\subseteqset (2 y'', 6y'')$ such $b_2-b_1 \geq y''$ and for any $y\in(b_1,b_2)$, we have $|y - b_0| \geq y''$. We will apply this observation with $b_0 = \mathbb{E} W_2$. Since $y'\geq R$ and $y''\geq y'$, we have $b_1 \geq 2y'' \geq y'+R$. Hence, we can apply \eqref{y21.7} to $y\in(b_1,b_2)$ and obtain \begin{align*} \var W_2 &\geq \int_{b_1}^{b_2} |y - \mathbb{E} W_2| ^2 f_{W_2}(y) \rd y \geq \int_{b_1}^{b_2} (y'') ^2 \gamma e^{-\gamma y} \rd y \geq (b_2-b_1) (y'') ^2 \gamma e^{-\gamma b_2}\\ &\geq (y'') ^3 (1/y'') e^{-6\gamma y''} = (y'')^2 e^{-6} = e^{-6} \frac 1 {\gamma^2} . \end{align*} Since $e^{-6} > 2^{-12} e^{-1}$, the above estimate, \eqref{y22.2} and \eqref{y22.1} imply that \begin{align*} \mathscr{w}/\mathscr{q} - \mathscr{u}^2 =\var W_2 &\geq 2^{-12} e^{-1} \frac 1 {\gamma^2} . \end{align*} This yields the lower bound in \eqref{y22.5}. The bound in \eqref{y22.6} follows from \eqref{y22.4} and \eqref{y22.5}. \end{proof} \begin{lemma}\label{j24.2} (i) For any $y\geq R$, the function $\lambda \to \mathscr{u}(\lambda,y)$ is strictly decreasing. (ii) For every $u>0$ and $y\geq R$ there exists a unique $\lambda>0$ satisfying $ u = \mathscr{u}(\lambda,y)$. \end{lemma} \begin{proof} Recall $\mathscr{h}_n$ defined in \eqref{eq:hn:def} and formula \eqref{eq:hn:der}. We have \begin{align*} \dfrac{\mathbf{P}t }{\mathbf{P}t \lambda}\left(\frac { \int_{D\setminus\mathcal{B}_*} r\lambda e^{-\lambda r} \rd x \rd r} {\int_{D\setminus \mathcal{B}_*} \lambda e^{-\lambda r} \rd x \rd r}\right) = \dfrac{\mathbf{P}t }{\mathbf{P}t \lambda}\left(\frac{\mathscr{h}_1}{\mathscr{h}_0}\right) = \frac{\mathscr{h}_1^2-\mathscr{h}_2\mathscr{h}_0}{\mathscr{h}_0^2}\/, \end{align*} which is strictly negative by the Cauchy-Schwarz inequality and, consequently, $\lambda \to \mathscr{u}(\lambda,y) =\mathscr{h}_1(\lambda,y)/\mathscr{h}_0(\lambda,y)$ is strictly decreasing for every $y\geq R$. This proves (i). Note that \begin{align*} \mathscr{h}_1(\lambda,y) = \int_{D\setminus \mathcal{B}_*}\lambda r e^{-\lambda r}\rd x\rd r \leq \int_{D}\lambda r e^{-\lambda r}\rd x\rd r = \frac {|D_b|} \lambda. \end{align*} This and \eqref{y22.3} imply that \begin{align*} \mathscr{u}(\lambda,y)= \frac{\mathscr{h}_1(\lambda,y)}{\mathscr{h}_0(\lambda,y)} &\leq \frac {|D_b|} {\lambda \mathscr{q}} \leq \frac{8}{\lambda(1-e^{1/2})}\to 0\/, \quad \text{ as }\lambda\to\infty. \end{align*} On the other hand, using \eqref{y22.3} again, when $\lambda \to 0$, \begin{align*} \mathscr{u}(\lambda,y)= \frac{\mathscr{h}_1(\lambda,y)}{\mathscr{h}_0(\lambda,y)} & = \frac {\mathscr{h}_1(\lambda,y)}{\mathscr{q}(\lambda,y)}\geq \frac{|D_b|} \mathscr{q} \int_{y+R}^\infty \lambda re^{-\lambda r}\rd r \geq \frac{1}{\lambda} \int_{\lambda(y+R)}se^{-s}\rd s \to \infty\/. \end{align*} We conclude that for given $u>0$ and $y\geq R$ there exists a unique $\lambda=\lambda(u,y)>0$ such that $ u = \mathscr{u}(\lambda,y)$ holds. \end{proof} \begin{lemma} \label{lem:der} Recall that $\lambda(u,y)$ denotes the solution to $ u = \mathscr{u}(\lambda,y)$, and recall \eqref{j27.1}-\eqref{a3.1}. We have \begin{align} \label{eq:l:der:u} \dfrac{\mathbf{P}t \lambda}{\mathbf{P}t u} &= \frac{q}{qu^2-w}\/,\\ \label{eq:l:der:y} \dfrac{\mathbf{P}t \lambda}{\mathbf{P}t y} &= \dfrac{\lambda u |D_b|-q}{qu^2-w}\/,\\ \label{eq:p:der:u} \dfrac{\mathbf{P}t q}{\mathbf{P}t u} &= \frac{q}{\lambda}(1-\lambda u)\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t u}\/,\\ \label{eq:p:der:y} \dfrac{\mathbf{P}t q}{\mathbf{P}t y} &= \frac{q}{\lambda}(1-\lambda u)\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t y}+\lambda (|D_b|-q)\/,\\ \label{eq:pel:der:u} \dfrac{\mathbf{P}t }{\mathbf{P}t u}\left(\frac{qe^{\lambda u}}{\lambda}\right) &= qe^{\lambda u}\/. \end{align} \end{lemma} \begin{proof} Comparing \eqref{eq:hn:def} and \eqref{j27.1}-\eqref{a3.1}, we see that $q=h_0$, $uh_0=h_1$ and $w=h_2$. Using \eqref{eq:hn:der} and differentiating the identity \begin{align} \label{eq:h01:rel} u\mathscr{h}_0(\lambda(u,y),y) = \mathscr{h}_1(\lambda(u,y),y) \end{align} with respect to $u$ we obtain \begin{align*} &\mathscr{h}_0+u\left(\frac{\mathscr{h}_0}{\lambda}-\mathscr{h}_1\right)\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t u} = \left(\frac{\mathscr{h}_1}{\lambda}-\mathscr{h}_2\right)\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t u},\\ &\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t u} = \mathscr{h}_0\left(\frac{\mathscr{h}_1}{\lambda}-\mathscr{h}_2-\frac{u\mathscr{h}_0}{\lambda}+u\mathscr{h}_1\right)^{-1} = \frac{q}{qu^2-w}\/, \end{align*} where we used the facts that $w=h_2$ and $uq=uh_0=h_1$. This proves \eqref{eq:l:der:u}. We prove \eqref{eq:p:der:u} as follows, \begin{align*} \dfrac{\mathbf{P}t q}{\mathbf{P}t u} = \dfrac{\mathbf{P}t \mathscr{h}_0}{\mathbf{P}t \lambda}\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t u} = \left(\frac{\mathscr{h}_0}{\lambda}-\mathscr{h}_1\right)\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t u} = \frac{q}{\lambda}(1-\lambda u)\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t u}\/. \end{align*} We use the above formula in the following proof of \eqref{eq:pel:der:u}, \begin{equation*} \dfrac{\mathbf{P}t}{\mathbf{P}t u}\left(\frac{qe^{\lambda u}}{\lambda}\right) =\frac{e^{\lambda u}}{\lambda^2}\left(q(1-\lambda u)\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t u}+q\lambda \left(\lambda+u\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t u}\right)-q\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t u}\right)=qe^{\lambda u}\/. \end{equation*} Since $\int_0^\infty \lambda r^n e^{-\lambda r} \rd r = n!/\lambda^n$ and the volume of the $(d-1)$-dimensional sphere with radius $\rho$ is $\pi^{(d-1)/2} \rho^{d-1}/ \Gamma(\frac{d+1}{2})$, \begin{align*} \mathscr{h}_n(\lambda,y) &= \frac{|D_b|n!}{\lambda^n} - \frac{\pi^{(d-1)/2}}{\Gamma(\frac{d+1}{2})}\int_{y-R}^{y+R}\lambda r^ne^{-\lambda r}(R^2-(y-r)^2)^{(d-1)/2}\rd r\\ &= \frac{|D_b|n!}{\lambda^n} - \frac{\pi^{(d-1)/2}}{\Gamma(\frac{d+1}{2})}e^{-\lambda y}\int_{-R}^{R}\lambda (s+y)^ne^{-\lambda s}(R^2-s^2)^{(d-1)/2}\rd s. \end{align*} Consequently, by the dominated convergence theorem, we obtain \begin{align} \dfrac{\mathbf{P}t \mathscr{h}_n}{\mathbf{P}t y} &= \lambda\left(\frac{|D_b|n!}{\lambda^n}-\mathscr{h}_n\right)-n\left(\frac{|D_b|(n-1)!}{\lambda^{n-1}}-\mathscr{h}_{n-1}\right) . \end{align} In particular, \begin{align}\label{a1.6} \dfrac{\mathbf{P}t \mathscr{h}_0}{\mathbf{P}t y} = \lambda(|D_b|-\mathscr{h}_0)\/,\quad \dfrac{\mathbf{P}t \mathscr{h}_1}{\mathbf{P}t y} = (|D_b|-\lambda \mathscr{h}_1)-(|D_b|-\mathscr{h}_0) = \mathscr{h}_0 -\lambda \mathscr{h}_1\/. \end{align} Differentiation of \eqref{eq:h01:rel} with respect to $y$ gives \begin{align*} &u\left(\dfrac{\mathbf{P}t \mathscr{h}_0}{\mathbf{P}t \lambda}\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t y}+\dfrac{\mathbf{P}t \mathscr{h}_0}{\mathbf{P}t y}\right) = \dfrac{\mathbf{P}t \mathscr{h}_1}{\mathbf{P}t \lambda}\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t y}+\dfrac{\mathbf{P}t \mathscr{h}_1}{\mathbf{P}t y},\\ &\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t y} = \left(\dfrac{\mathbf{P}t \mathscr{h}_1}{\mathbf{P}t y}-u\dfrac{\mathbf{P}t \mathscr{h}_0}{\mathbf{P}t y}\right)\left(u\dfrac{\mathbf{P}t \mathscr{h}_0}{\mathbf{P}t \lambda}-\dfrac{\mathbf{P}t \mathscr{h}_1}{\mathbf{P}t \lambda}\right)^{-1} \/. \end{align*} We now use \eqref{eq:hn:der} and \eqref{a1.6} to see that \begin{align*} &\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t y} = \frac{\lambda u |D_b|-q}{qu^2-w}\/. \end{align*} This proves \eqref{eq:l:der:y}. We apply \eqref{eq:hn:der} and \eqref{a1.6} once again to prove \eqref{eq:p:der:y}, \begin{align*} \dfrac{\mathbf{P}t q}{\mathbf{P}t y} = \dfrac{\mathbf{P}t \mathscr{h}_0}{\mathbf{P}t \lambda}\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t y}+\dfrac{\mathbf{P}t \mathscr{h}_0}{\mathbf{P}t y} = \frac{q}{\lambda}(1-\lambda u)\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t y}+\lambda(|D_b|-q)\/. \end{align*} \end{proof} \begin{proof}[Proof of Theorem \ref{j24.1}] In view of \eqref{j27.1}-\eqref{j28.2}, the equations \eqref{a20.2}-\eqref{j27.10} can be written in this form, \begin{align}\label{j28.3} &M = K(\lambda,y) := m\frac {|D_b| - \mathscr{q}( \lambda,y)} {\mathscr{q}( \lambda,y) } = m\left(\frac{|D_b| }{ \mathscr{q}( \lambda,y)} -1\right) ,\\ &G(\lambda,y):=\frac{dmg}{2\lambda} + mg \mathscr{u}(\lambda,y) + Mg y = E. \label{j28.4} \end{align} It is easy to see that all partial derivatives of any order of the functions $K(\lambda,y)$ and $G(\lambda,y)$ exist and are continuous. Consider any $y\geq R$ such that $Mgy < E$. For sufficiently small $\lambda >0$, $dmg/(2\lambda) >E$, so for some $\lambda>0$, $G(\lambda,R) >E$. Our assumption that $Mgy < E$ and \eqref{y22.4} imply that for very large $\lambda$, $G(\lambda,R ) < E$. By continuity of $G(\lambda,y)$, there exists $\lambda$ such that $G(\lambda,y) = E$. Let $\lambda_y$ denote the smallest $\lambda$ with this property. Part (i) of the lemma holds true because we have assumed that $MgR < E$. Therefore, we can take $\lambda_*=\lambda_R$. We have $\lim_{y \uparrow E/(Mg)} \lambda _y = \infty$ because the term $Mgy$ in the formula for $G(\lambda,y)$ approaches $E$, so the first term, $dmg/(2 \lambda_y)$, must go to 0. By the assumptions of part (ii) of the lemma, $K( \lambda_R,R)=K( \lambda_*,R) >M$. It is easy to see that the function $q(\lambda,y)$ converges to $|D_b|$ when $\lambda \to \infty$, no matter how $y$ and $\lambda$ are related. Hence $\lim_{y\uparrow E/(Mg)} K(\lambda_y,y) =0$. By continuity of $K(\lambda,y)$, there exists $y$ such that $K(\lambda_{y},y)=M$. Let $y_A$ be the smallest $y$ with this property and let $\lambda_A = \lambda_{y_A}$. Note that $R < y_A < E/(Mg)$. It remains to prove uniqueness of the solution $(y,\lambda)$ to \eqref{a20.2}-\eqref{j27.10}. Let $z(y) = \frac{E-Mgy}{mg}$. By Lemma \ref{j24.2}, the function $$ \kappa_y(u) := u+\dfrac{d}{2\lambda(u,y)}-z(y) $$ is a strictly increasing function of $u$. By \eqref{y22.4}, $\kappa_y(0^+)=-z(y)$ and $\kappa_y(z(y))> 0$, so for every given $y\geq R$ there exists a unique $u=u(y)$ such that \begin{align}\label{a3.2} u(y)=z(y)-\dfrac{d}{2\lambda(u(y),y)}. \end{align} Comparing this formula to \eqref{j28.4}, we see that we must have $\mathscr{u}(\lambda,y) = u(y)$. It will suffice to show that there is at most one $y$ such that $\mathscr{q}(\lambda(u(y),y),y)=q(u(y),y)$ satisfies \eqref{j28.3}. Assuming that $u(y)$ satisfies \eqref{a3.2}, $$ \dfrac{\rd u}{\rd y} = -\frac{M}{m}+\frac{d}{2\lambda^2}\left(\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t u}\dfrac{\mathbf{P}t u}{\mathbf{P}t y}+\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t y}\right). $$ Hence, \begin{align}\label{a7.10} & \dfrac{\rd u}{\rd y}\left(1-\frac{d}{2\lambda^2}\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t u}\right) = -\frac{M}{m}+\frac{d}{2\lambda^2}\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t y},\\ & \dfrac{\rd u}{\rd y} = \dfrac{-\dfrac{M}{m}+\dfrac{d}{2\lambda^2}\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t y}}{1-\dfrac{d}{2\lambda^2}\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t u}}.\notag \end{align} We use this formula and \eqref{eq:p:der:u}-\eqref{eq:p:der:y} to see that \begin{align}\label{j31.2} \dfrac{\rd }{\rd y}q(u(y),y) &= \dfrac{\mathbf{P}t q}{\mathbf{P}t u}\dfrac{\mathbf{P}t u}{\mathbf{P}t y}+\dfrac{\mathbf{P}t q}{\mathbf{P}t y}= \frac{q}{\lambda}(1-\lambda u) \left(\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t u}\dfrac{\rd u}{\rd y}+\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t y}\right)+\lambda(|D_b|-q)\\ &= \frac{q}{\lambda}(1-\lambda u) \left(\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t u}\dfrac{-\dfrac{M}{m}+\dfrac{d}{2\lambda^2}\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t y}}{1-\dfrac{d}{2\lambda^2}\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t u}}+\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t y}\right)+\lambda(|D_b|-q).\notag \end{align} Lemma \ref{j24.2} (i) implies that $\mathbf{P}t \lambda/ \mathbf{P}t u <0$ so $1-\frac{d}{2\lambda^2}\frac{ \mathbf{P}t \lambda}{ \mathbf{P}t u}>0$. Since we are interested only in the sign of $\rd q/\rd y$, it will suffice to analyze $A := \frac{\rd q}{\rd y}\left(1-\frac{d}{2\lambda^2}\frac{ \mathbf{P}t \lambda}{ \mathbf{P}t u}\right)$. Multiplying both sides of \eqref{j31.2} by $1-\frac{d}{2\lambda^2}\frac{ \mathbf{P}t \lambda}{ \mathbf{P}t u}$, we obtain \begin{align*} &A=\frac{q}{\lambda}(1-\lambda u) \left(\dfrac{ \mathbf{P}t \lambda}{ \mathbf{P}t u}\left(-\frac{M}{m}+\dfrac{d}{2\lambda^2}\dfrac{ \mathbf{P}t \lambda}{ \mathbf{P}t y}\right)+\dfrac{ \mathbf{P}t \lambda}{ \mathbf{P}t y}\left(1-\dfrac{d}{2\lambda^2}\dfrac{ \mathbf{P}t \lambda}{ \mathbf{P}t u}\right)\right)+\lambda(|D_b|-q)\left(1-\dfrac{d}{2\lambda^2}\dfrac{ \mathbf{P}t \lambda}{ \mathbf{P}t u}\right)\\ &= \frac{q}{\lambda}(1-\lambda u)\left(-\frac{M}{m}\dfrac{ \mathbf{P}t \lambda}{ \mathbf{P}t u}+\dfrac{ \mathbf{P}t \lambda}{ \mathbf{P}t y}\right)+\lambda(|D_b|-q)\left(1-\dfrac{d}{2\lambda^2}\dfrac{ \mathbf{P}t \lambda}{ \mathbf{P}t u}\right)\\ &= \frac{q}{\lambda(w-qu^2)}\left((1-\lambda u)\left(\frac{Mq}{m}+q-\lambda u |D_b|\right)+\frac{d}{2}(|D_b|-q)\right)+\lambda(|D_b|-q)\\ &=\frac{q|D_b|}{\lambda(w-qu^2)}\left((1-\lambda u)^2+(1-\lambda u)\left(\frac{q}{|D_b|}(1+M/m)-1\right)+\frac{d}{2}\left(1-\frac{q}{|D_b|}\right)\right)+\lambda(|D_b|-q). \end{align*} If $(q/|D_b|)(1+M/m)-1=0$ then \begin{align*} A=\frac{q|D_b|}{\lambda(w-qu^2)}\left((1-\lambda u)^2+\frac{d}{2}\left(1-\frac{q}{|D_b|}\right)\right)+\lambda(|D_b|-q). \end{align*} According to \eqref{y22.2}, $w-qu^2 >0$. Since we assume that $R>0$, $|D_b|-q>0$. It follows that $A>0$ and, therefore, $\rd q/\rd y>0$ if $(q/|D_b|)(1+M/m)-1=0$. In other words, $\rd q/\rd y>0$ whenever $q(u(y),y) = |D_b|/(1+M/m)$. A smooth function cannot cross a level multiple times if its derivative is strictly positive at every crossing point. We have proved that there is at most one $y$ such that $q(u(y),y)$ satisfies \eqref{j28.3}. \end{proof} Recall \eqref{a1.4} and for $u>0$, $y\geq R$ and $x\in D_b'$ let \begin{align}\notag \mathbf{D}_n(y,u) &=\Bigg \{( \mathbf{x}_n,x_{n+1},\mathbf{y}_n)\in D_b^{n}\times D_b' \times \mathbb{R}^n: \frac 1 n \sum_{i=1}^n y_i = u,\\ &\qquad ( x_k, y_k) \in D\setminus \mathcal{B}((x_{n+1},y),R), \ k=1,\dots n\Bigg\}, \notag\\ \mathbf{D}_n(x,y,u) &=\Bigg \{( \mathbf{x}_n,\mathbf{y}_n)\in D_b^{n} \times \mathbb{R}^n: \frac 1 n \sum_{i=1}^n y_i = u,\label{j13.2}\\ &\qquad ( x_k, y_k) \in D\setminus \mathcal{B}((x,y),R), \ k=1,\dots n\Bigg\} .\notag \end{align} \begin{proposition}\label{j11.2} Consider $u>0$, $y\geq R$ and $x\in D_b'$. Let $\Vol_n(x,y,u)$ be the $(nd-1)$-dimensional volume of $\mathbf{D}_n(x,y,u)$ and $\Vol_n(y,u)$ be the $((n+1)d-2)$-dimensional volume of $\mathbf{D}_n(y,u)$. Let $\lambda=\lambda(u,y)>0$ be the solution to $ u = \mathscr{u}(\lambda,y)$ and recall $q=q(u,y)$ defined in \eqref{j27.1} and \eqref{a3.1}. Then for some absolute constants $0<c_1,c_2,c_3,c_4<\infty$ and $n_1$, for all $n\geq n_1$, \begin{align}\label{j19.1} c_1|D_b| n^{1-n} \left( \frac{ q e^{\lambda u}}{\lambda}\right)^{n-1}\leq \Vol_n(x,y,u) &\leq c_2|D_b| n^{1-n} \left( \frac{ q e^{\lambda u}}{\lambda}\right)^{n-1}, \\ c_3 |D_b'|\cdot|D_b| n^{1-n} \left( \frac{ q e^{\lambda u}}{\lambda}\right)^{n-1}\leq \Vol_n(y,u) &\leq c_4 |D_b'|\cdot|D_b| n^{1-n} \left( \frac{ q e^{\lambda u}}{\lambda}\right)^{n-1}.\label{j10.6} \end{align} \end{proposition} \begin{proof} First note that \eqref{j10.6} is an immediate corollary of \eqref{j19.1} so it will suffice to prove \eqref{j19.1}. Let $\mathcal{B}_*=\mathcal{B}((x,y),R)$, \begin{align*} H &=\{(z_1,t_1, \dots,z_n, t_n):( t_1+ \dots + t_n)/n=u; t_k>0, z_k \in D_b,\text{ for } 1\leq k \leq n\},\\ H_*&=H \cap (D\setminus \mathcal{B}_*)^n . \end{align*} Let $ (Z^{1 }_1,T^{1 }_1),\ldots,(Z^{1 }_{n},T^{1 }_n)$ be i.i.d., with $Z^{1 }_i \in \mathbb{R}^{d-1}$ and $T^{1 }_i >0$ for $1\leq i \leq n$. Assume that $T^{1 }_i$ and $Z^{1 }_i$ are independent, $Z^{1 }_i$ has the uniform distribution in $D_b$, and $T^{1 }_i$ has the exponential distribution with parameter $\lambda$. Let $f_1(z_1,t_1, \dots,z_n, t_n)$ be the density of $\left((Z^{1 }_1,T^{1 }_1),\ldots,(Z^{1 }_{n},T^{1 }_n)\right)$. We have \begin{align}\label{j23.3} f_1(z_1,t_1, \dots,z_n, t_n) = |D_b|^{-n} \lambda ^n \mathbb{E}p(-\lambda(t_1+ \dots + t_n)), \end{align} for $z_i \in D_b$, $t_i >0$, $1\leq i \leq n$. Note that the density $f_1$ is constant on $H$. Let $(Z^{2 }_1,T^{2 }_1),\ldots,(Z^{2 }_{n},T^{2 }_n)$ be i.i.d., with $( Z^{2 }_i, T^{2 }_i)$ being distributed as $( Z^{1 }_i, T^{1 }_i)$ conditioned by $\{(Z^{1 }_i,T^{1 }_i)\notin \mathcal{B}_*\}$. By the definition of $q$ and \eqref{j23.3}, the density $f_2$ of $((Z^{2 }_1,T^{2 }_1),\ldots,(Z^{2 }_{n},T^{2 }_n))$ is given by \begin{align}\label{j23.4} f_2(z_1,t_1, \dots,z_n, t_n) = \left(\frac{q}{|D_b|}\right)^{-n} |D_b|^{-n} \lambda^n \mathbb{E}p(-\lambda(t_1+ \dots + t_n)) \end{align} on the set $(D\setminus \mathcal{B}_*)^n$. Let $S^1_n = \frac 1 n \sum_{k=1}^n T^1_k = \sum_{k=1}^n (T^1_k/n)$. The distribution of $T^1_k/n$ is exponential with mean $(n\lambda)^{-1}$, so the distribution of $S^1_n$ is gamma with the density $f_{S^1_n}(s) =(( n\lambda)^n/\Gamma(n)) s^{n-1} e^{-n\lambda s}$. Hence, \begin{align}\label{j23.5} f_{S^1_n}(u) = \frac{( n\lambda)^n}{\Gamma(n)} u^{n-1} e^{-n\lambda u}. \end{align} Let $S^2_n = \frac 1 n \sum_{k=1}^n T^2_k $ and $T^3_j = T^2_j -u$ for $1\leq j \leq n$. It follows from the fact that $ u = \mathscr{u}(\lambda,y)$ that $\mathbb{E} T^2_j=u$ and $\mathbb{E} T^3_j=0$. Let $\sigmagma^2 = \var T^2_j=\var T^3_j$ and $S^3_n = \frac 1 {n^{1/2}\sigmagma} \sum_{k=1}^n T^3_k =n^{1/2}\sigmagma^{-1}(S_n^2-u) $. We have \begin{align*} f_{S_n^2}(s) = \sigmagma^{-1} n^{1/2} f_{S_n^3}(\sigmagma^{-1} n^{1/2}(s-u)). \end{align*} By Lemma \ref{j10.2}, \begin{align}\label{j23.6} f_{S_n^2}(u) =\sigmagma^{-1} n^{1/2} \left( \frac 1 {\sqrt{2 \pi}} +A\right), \end{align} where \begin{align}\label{y22.10} |A| &\leq \frac {C \left(\mathbb{E} |T^3_j/\sigmagma|^3\right)^2 \max(1, C_1^3)}{\sqrt{n} },\\ C_1 &= \sup_{u\in \mathbb{R}} f_{T^3_j/\sigmagma}(u).\label{y22.11} \end{align} In view of \eqref{j23.3}, \eqref{j23.4}, \eqref{j23.5} and \eqref{j23.6}, \begin{align}\label{j23.8} \frac{\Vol(H_*)}{\Vol(H)} = \frac{q^n f_{S_n^2}(u)}{|D_b|^n f_{S_n^1}(u)} = \frac{q^n\sigmagma^{-1} n^{1/2} \left( \frac 1 {\sqrt{2 \pi}} +A\right)}{|D_b|^n \frac{( n\lambda)^n}{\Gamma(n)} u^{n-1} e^{-n\lambda u}}. \end{align} The volume of a regular $n$-simplex with unit side length is $\sqrt{n}/((n-1)! 2^{(n-1)/2})$. The volume of a regular $n$-simplex with the side length $\sqrt{2}$ is $(\sqrt{2})^{n-1}\sqrt{n}/((n-1)! 2^{(n-1)/2}) =\sqrt{n}/(n-1)! $. Hence $\Vol(H) = |D_b|^n u^{n-1} \sqrt{n}/(n-1)!$. This and \eqref{j23.8} imply that \begin{align}\label{y22.12} \Vol(H_*) &=\Vol(H) \frac{q^n\sigmagma^{-1} n^{1/2} \left( \frac 1 {\sqrt{2 \pi}} +A\right)}{|D_b|^n\frac{( n\lambda)^n}{\Gamma(n)} u^{n-1} e^{-n\lambda u}} =\frac{|D_b|^n u^{n-1} \sqrt{n}}{(n-1)! }\frac{q^n\sigmagma^{-1} n^{1/2} \left( \frac 1 {\sqrt{2 \pi}} +A\right)}{|D_b|^n\frac{( n\lambda)^n}{\Gamma(n)} u^{n-1} e^{-n\lambda u}}\\ &= \frac{ q^n\sigmagma^{-1} n \left( \frac 1 {\sqrt{2 \pi}} +A\right)}{( n\lambda)^n e^{-n\lambda u}} =\left( \frac{ q e^{\lambda u}}{\lambda}\right)^n \left[ n^{1-n}\sigmagma^{-1} \left( \frac 1 {\sqrt{2 \pi}} +A\right)\right].\notag \end{align} In this proof, for any functions $a$ and $b$ of any number of variables, we will write $a\approx b$ to indicate that there exist universal constants $0< c', c'' < \infty$ such that $c' \leq a/b \leq c''$ for all values of the arguments. By Lemma \ref{y21.1}, $q \approx |D_b|$, $\lambda u \approx 1$ and $\lambda \sigmagma = \lambda \sqrt{w/q - u^2} \approx 1$, so \eqref{y22.12} implies that \begin{align}\label{a12.1} \Vol(H_*)\approx \left( \frac{ q e^{\lambda u}}{\lambda}\right)^{n-1} \left[|D_b| n^{1-n} \left( \frac 1 {\sqrt{2 \pi}} +A\right)\right]. \end{align} Next we will find an upper bound for $A$ defined in \eqref{j23.6} using \eqref{y22.10}-\eqref{y22.11}. We use the bound $f_{T_j^2}(y) \leq \frac{|D_b|}{q}\lambda e^{-\lambda y}$ and the substitution $\lambda r = s$ to obtain \begin{equation*} \mathbb{E} |T^3_j/\sigmagma|^3 \leq \sigmagma^{-3}\frac{|D_b|}{q}\int_0^\infty |r-u|^3\lambda e^{-\lambda r}\rd r = \frac{1}{(\lambda \sigmagma)^3} \frac{|D_b|}{q} \int_0^\infty |s-u\lambda|^ 3 e^{-s}\rd s. \end{equation*} We have already pointed out that $\lambda \sigmagma \approx 1$, $|D_b|/q\approx 1$ and $u \lambda \approx 1$. The last formula shows that $\mathbb{E} |T^3_j/\sigmagma|^3 \leq c_5< \infty$, where $c_5$ is a universal constant. Recall that $\lambda \sigmagma \approx 1$ and $|D_b|/q\approx 1$ to see that for some universal constant $c_6 < \infty$, \begin{equation*} f_{T_j^3/\sigmagma}(s) \leq q^{-1}\sigmagma\lambda e^{-\lambda(s\sigmagma + u)} \leq \sigmagma\lambda q^{-1}\leq \frac{c_6}{|D_b|}. \end{equation*} This bound and $\mathbb{E} |T^3_j/\sigmagma|^3 \leq c_5$ imply that $|A| \leq c_7/\left(\sqrt{n}\min(1,|D_b|^3)\right)$ where $c_7$ is an absolute constant. Consider the case $|D_b|= 1$. Then $|A| \leq c_7/\sqrt{n}$. Let $n_1$ be so large that for $n\geq n_1$, \begin{align*} \frac 1 {2\sqrt{2 \pi}} < \frac 1 {\sqrt{2 \pi}} + \frac {c_7}{\sqrt{n}} <\frac 2 {\sqrt{2 \pi}}. \end{align*} This and \eqref{a12.1} prove \eqref{j19.1} in the case $|D_b| = 1$. For $|D_b|\ne 1$, we use scaling. If $x,y, D, R$ and $u$ are multiplied by $c_*>0$, it is elementary to verify that $\Vol_n(x,y,u)$, $q$, $\lambda$ and $u$ are rescaled by powers of $c_*$ such that \eqref{j19.1} remains true. \end{proof} \begin{remark} We believe that Proposition \ref{j11.2} holds for all $n$. One could prove the bounds for $n<n_1$ using elementary estimates similar to those in the proof of Lemma \ref{y21.1}. We do not provide a proof because it is not needed for our main theorem. \end{remark} \section{Archimedes' principle}\label{y19.6} In this section, for any functions $a_1(\,\cdot \,)$ and $a_2(\,\cdot \,)$ of any arguments we will write \begin{align}\label{a5.1} a_1(\,\cdot \,) \approx a_2(\,\cdot \,)\qquad \Longleftrightarrow \qquad \mathbb{E}ists 0< c_1, c_2<\infty: c_1 a_1(\,\cdot \,) \leq a_2(\,\cdot \,) \leq c_2 a_1(\,\cdot \,). \end{align} The constants $c_1$ and $c_2$ may depend only on ``fixed'' parameters in our model: $d,D,D_b,R,M,m,g$ and $E$. \begin{proposition}\label{j13.3} Recall the notation from Theorem \ref{j24.1} and assume that \eqref{a8.12} holds. Recall \eqref{j28.2} and let $u_A = \mathscr{u}(\lambda_A,y_A)$. For every $\varepsilon >0$, \begin{align*} \lim_{n\to\infty} \mathbb{P}_n \left(|Y_{n+1} - y_A|> \varepsilon \text{ or } \left| \sum_{i=1}^n Y_i - u_A\right| >\varepsilon\right) =0. \end{align*} \end{proposition} \begin{proof} Recall notation from \eqref{eq:DnEy:defn}. The following formula for the marginal density $ f_{Y_{n+1}}(y)$ of $Y_{n+1}$ follows from \eqref{j10.4}, \begin{equation}\label{a8.8} f_{Y_{n+1}}(y) = \frac{1}{Z_n}\int_{\mathbf{D}_nEy}\left(E-mg\frac{1}{n}\sum_{i=1}^n y_i-Mgy\right)^{((n+1)d-2)/2}\rd \mathbf{x}_{n+1}\rd \mathbf{y}_{n}\/, \end{equation} where $Z_n$ is the normalizing constant. Let $z(y)=(E-Mgy)/(mg)$ and note that $z(y) >0$ for $y\in[R, E/(Mg))$ because we have assumed that $E> MgR$. Recall notation from Proposition \ref{j11.2}. The proposition implies that \begin{align} f_{Y_{n+1}}(y) &= \frac{1}{Z_n}\int_0^{z(y)}\left(E-Mgy-mgu\right)^{((n+1)d-2)/2} \Vol_n(y,u)\rd u \label{a8.9}\\ &\approx \frac{1}{Z_n'}|D_b| n^{1-n} \int_0^{z(y)}(z(y)-u)^{((n+1)d-2)/2} \left(\frac{q(u,y)e^{\lambda(u,y) u}}{\lambda(u,y)}\right)^{n-1}\rd u\label{eq:fY:int} \end{align} where \begin{equation*} Z_n' = |D_b| n^{1-n} \int_R^{E/(Mg)}\rd y\int_0^{z(y)}(z(y)-u)^{((n+1)d-2)/2} \left(\frac{q(u,y)e^{\lambda(u,y) u}}{\lambda(u,y)}\right)^{n-1}\rd u\/. \end{equation*} The integral in \eqref{eq:fY:int} can be written as \begin{equation} \label{eq:integral} \int_0^{z(y)}\alphapha_y^{n-1}(u) (z(y)-u)^{d-1}\rd u\/, \end{equation} where $\alphapha_y(u) = (z(y)-u)^{d/2}q e^{\lambda u}/\lambda$. Let \begin{align}\label{a6.4} \beta_y(u) = \lambda(u,y)-\frac d{2 (z(y)-u)}. \end{align} By \eqref{eq:pel:der:u}, \begin{equation} \label{eq:hy:der} \dfrac{\mathbf{P}t}{\mathbf{P}t u}\alphapha_y(u) = (z(y)-u)^{d/2}\frac{qe^{\lambda u}}{\lambda}\left(\lambda-\frac{d}{2(z(y)-u)}\right) = \alphapha_y(y)\beta_y(u)\/. \end{equation} By Lemma \ref{j24.2}, the function $u\to \lambda(u,y)$ is strictly decreasing. Hence $\beta_y(u) $ is strictly decreasing. When $u\downarrow 0$, $\lambda \to \infty$ by \eqref{y22.4}. Thus $\beta_y(0^+)=\infty$ for $y\in[R, E/(Mg))$. The estimate \eqref{y22.4} implies that $\beta_y(z(y)^-)=-\infty$. We conclude that there is a unique $u_0=u_0(y)\in (0,z(y))$ such that $\beta_y(u_0)=0$, $\beta_y(y)$ is positive on $(0,u_0)$ and negative on $(u_0,z(y))$. In view of \eqref{eq:hy:der}, $\alphapha_y(u)$ attains its only maximum at $u_0$ on the interval $(0,u_0)$, and the maximum is strict. Note that \begin{equation}\label{a6.5} \beta_y(u) = \beta_y(u)-\beta_y(u_0) = \lambda(u,y)-\lambda(u_0,y)+\frac{d}{2}\frac{u_0-u}{(z(y)-u_0)(z(y)-u)}\/. \end{equation} This, and the facts that the function $u\to \lambda(u,y)$ is decreasing and $z(y) \geq z(y)-u >0$ for $u\in(0,z(y))$, imply that \begin{align}\label{a6.2} |\beta_y(u)| &= -\beta_y(u) \geq \frac{d}{2z^2(y)}(u-u_0) = \frac{d}{2z^2(y)}|u-u_0|\/,\quad u\in( u_0,z(y))\/,\\ |\beta_y(u)| &= \beta_y(u) \geq \frac{d}{2z^2(y)}(u_0-u) = \frac{d}{2z^2(y)}|u-u_0|\/,\quad u\in(0,u_0)\/.\label{a6.3} \end{align} To simplify notation, we will write $k=n-1$. From \eqref{a6.2}-\eqref{a6.3}, we obtain for $u \notin [u_0-z(y)k^{-1/2},u_0+z(y)k^{-1/2}]$, \begin{align*} \frac{2 \sqrt{k} z(y) |\beta_y(u)|}{ d} = \frac 2 d \frac{ z^2(y) |\beta_y(u)|}{ z(y)k^{-1/2}} \geq \frac 2 d \frac{ z^2(y) |\beta_y(u)|}{ |u-u_0|} & \geq 1\/. \end{align*} This, \eqref{eq:integral} and \eqref{eq:hy:der}, and the bound $z(y)-u<z(y)$ imply that \begin{align}\label{a8.10} &\left(\int_0^{u_0-z(y)k^{-1/2}}+\int_{u_0+z(y)k^{-1/2}}^{z(y)}\right)\alphapha_y^{k}(u) (z(y)-u)^{d-1}\rd u\\ &\leq \left(\int_0^{u_0-z(y)k^{-1/2}}+\int_{u_0+z(y)k^{-1/2}}^{z(y)}\right)\alphapha_y^{k}(u) (z(y)-u)^{d-1} \frac{2 \sqrt{k} z(y) |\beta_y(u)|}{ d} \rd u\notag\\ &= \frac{2z^{d}(y)\sqrt{k}}{d} \left(\int_0^{u_0-z(y)k^{-1/2}}+\int_{u_0+z(y)k^{-1/2}}^{z(y)}\right)\alphapha_y^{k-1}(u)\alphapha_y(u)\beta_y(u)\rd u\notag\\ & = \frac{2z^{d}(y)}{d\sqrt{k}}\left(\alphapha^{k}_y\left(u_0-\frac{z(y)}{\sqrt{k}}\right)+\alphapha^{k}_y\left(u_0+\frac{z(y)}{\sqrt{k}}\right)-\alphapha_y^{k}(0)-\alphapha_y^{k}(z(y))\right) \leq \frac{4}{d\sqrt{k}}z^{d}(y)\alphapha_y^{k}(u_0)\/.\notag \end{align} We combine this estimate with the observation that $z(y) \leq E/mg$ and the following bound \begin{equation}\label{a8.6} \int_{u_0-z(y)k^{-1/2}}^{u_0+z(y)k^{-1/2}}\alphapha_y^{k}(u)(z(y)-u)^{d-1}\rd u\leq \frac{2z^{d}(y)}{\sqrt{k}}\alphapha_y^{k}(u_0)\/, \end{equation} to arrive at \begin{equation} \label{eq:integral:upper} \int_0^{z(y)}\alphapha_y^{k}(u)(z(y)-u)^{d-1}\rd u \leq \left(2+\frac{4}{d}\right)\,\frac{z^{d}(y)}{\sqrt{k}}\alphapha_y^{k}(u_0)\/. \end{equation} Since $\beta_y(u)\leq0$ for $u\in[ u_0,z(y))$, \eqref{a6.4} implies that, for $u\in[ u_0,z(y))$, \begin{align*} u\geq\frac{z(y)}{1+\frac d {2u\lambda(u,y)}}. \end{align*} This and \eqref{y22.4} yield, \begin{align}\label{a7.6} u&\geq \frac{z(y)}{1+100 d},\qquad u\in[ u_0,z(y)). \end{align} Since $\beta_y(u_0)=0$, we obtain from \eqref{a6.4}, \begin{align*} u_0(y)=\frac{z(y)}{1+\frac d {2u_0(y)\lambda(u_0(y),y)}}, \end{align*} and, by \eqref{y22.4}, \begin{align} u_0(y) &\leq \frac{z(y)} {1 + d/42} \leq \frac{21}{22} z(y). \label{a7.7} \end{align} Using \eqref{eq:l:der:u}, \eqref{y22.4}, \eqref{y22.5} and \eqref{a7.6}, we get for $u\in[ u_0,z(y))$, \begin{align}\label{a7.1} \left|\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t u}\right| &= -\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t u} = \frac{1}{(w/p-u^2)\lambda^2}(\lambda u)^2\frac{1}{u^2} \leq 2^{12}e\cdot 21^2 \frac{(1+100d)^2}{z^2(y)}=\frac{c_1}{z^2(y)}\/, \end{align} where $c_1$ depends only on $d$. For $u\in(u_0,u_0+z(y)k^{-1/2})$ and $k\geq 44^2=1936$, in view of \eqref{a7.7}, \begin{align*} &\frac{z^2(y)}{(z(y)-u_0)(z(y)-u)} \leq \frac{z^2(y)}{\left(z(y)-\frac{21}{22} z(y)\right)\left(z(y)-\frac{21}{22}z(y) - z(y) k^{-1/2}\right)} \\ &\leq \frac 1 {(1/22)(1/44)} \leq 968. \end{align*} This estimate, \eqref{a6.5} and \eqref{a7.1} show that for $u\in(u_0,u_0+z(y)k^{-1/2})$ and $k\geq 1936$, \begin{align}\notag -\beta_y(u) &= \lambda(u_0,y)-\lambda(u,y)+\frac{d}{2}\frac{u-u_0}{(z(y)-u_0)(z(y)-u)}\\ &\leq \frac{u-u_0}{z^2(y)}\left(c_1+484d\right)\leq \frac{c_2}{z(y)\sqrt{k}},\label{a7.2} \end{align} where $c_2>0$ depends only on $d$. For $u\in(u_0,u_0+z(y)k^{-1/2})$ and $k\geq 1936$, \begin{align*} z(y)-u \geq z(y)-\frac{21}{22}z(y) - z(y) k^{-1/2}\geq z(y)/44. \end{align*} This implies that, for $u\in(u_0,u_0+z(y)k^{-1/2})$ and $k\geq 1936$, \begin{align}\label{a7.3} &\int_0^{z(y)}\alphapha_y^{k}(u) (z(y)-u)^{d-1}\rd u\\ &\quad\geq \alphapha_y^{k}(u_0) \int_{u_0}^{u_0+z(y)k^{-1/2}}(z(y)-u)^{d-1} \left(1-\frac{\alphapha_y(u_0)-\alphapha_u(u)}{\alphapha_y(u_0)}\right)^{k}\rd u\notag\\ &\quad\geq z^{d-1}(y) \alphapha_y^{k}(u_0)\left(1/44\right)^{d-1}\int_{u_0}^{u_0+z(y)k^{-1/2}} \left(1-\frac{\alphapha_y(u_0)-\alphapha_u(u)}{\alphapha_y(u_0)}\right)^{k}\rd u\/.\notag \end{align} Recall that $u_0$ is the maximum of $\alphapha_y$, and also \eqref{eq:hy:der} and \eqref{a7.2}. There exists $\tilde{u}\in (u_0,u_0+z(y)k^{-1/2})$ such that, \begin{align*} \frac{\alphapha_y(u_0)-\alphapha_u(u)}{\alphapha_y(u_0)} = -\frac{\alphapha_y(\tilde{u})}{\alphapha_y(u_0)}\beta_y(\tilde{u})(u-u_0)\leq \frac{c_2}{z(y)\sqrt{k}}(u-u_0)\leq \frac{c_2}{k}\/. \end{align*} Hence, for $u\in(u_0,u_0+z(y)k^{-1/2})$ and $k\geq 1936$, \begin{align*} \left(1-\frac{\alphapha_y(u_0)-\alphapha_u(u)}{\alphapha_y(u_0)}\right)^{k} \geq c_3. \end{align*} We combine this with \eqref{a7.3} to see that \begin{equation}\label{a7.8} \int_0^{z(y)}\alphapha_{y}^k(y)(z(y)-u)^{d-1}\rd u\geq \frac{c_4}{\sqrt{k}}z^d(y)\alphapha_y^k(u_0)\/. \end{equation} Let $\lambda_0=\lambda(u_0(y),y)$, $q_0=q(u_0(y),y)$ and \begin{align}\label{a7.9} \psi(y) &= (2/d)^{d/2}\alphapha_y(u_0)=(2/d)^{d/2}(z(y)-u_0(y))^{d/2} \frac{q_0e^{\lambda_0u_0}}{\lambda_0} \/. \end{align} Since $u_0(y)$ makes the right hand side of \eqref{a6.4} equal to 0, \begin{align}\label{a7.11} \psi(y) &= (2/d)^{d/2} (z(y)-u_0(y))^{d/2} \frac{q_0e^{\lambda_0u_0}}{\lambda_0} = \frac{q_0e^{\lambda_0u_0}}{\lambda_0^{d/2+1}}\/. \end{align} Recall that $k=n-1$. It follows from \eqref{eq:fY:int}, \eqref{eq:integral}, \eqref{eq:integral:upper}, \eqref{a7.8} and \eqref{a7.9} that \begin{equation}\label{a8.3} f_{Y_{n+1}}(y) \approx \frac{1}{Z_n''}\,\psi^{n-1}(y)z^{d}(y)\/, \end{equation} with the normalizing constant $Z_n''= \int_R^{E/(Mg)}\psi^{n-1}(y)z^{d}(y)\rd y$. Comparing \eqref{a3.2} and \eqref{a6.4}, we see that we can apply \eqref{a7.10} to $u_0(y)$, i.e., \begin{equation}\label{a8.5} \dfrac{\rd u_0}{\rd y}\left(1-\frac{d}{2\lambda^2_0}\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t u}\right) = -\frac{M}{m}+\frac{d}{2\lambda^2_0}\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t y}\/. \end{equation} We use this formula, \eqref{eq:p:der:u}, \eqref{eq:p:der:y} and \eqref{a7.11} in the following calculation, \begin{align*} \lambda_0^{d/2+2}&e^{-\lambda_0 u_0}\dfrac{\rd \psi}{\rd y} = \lambda_0\left(\dfrac{\mathbf{P}t q}{\mathbf{P}t u}\dfrac{\rd u_0}{\rd y}+\dfrac{\mathbf{P}t q}{\mathbf{P}t y}\right) +\lambda_0^2 q_0 \dfrac{\rd u_0}{\rd y}+q_0\left(\lambda_0u_0-\frac{d+2}{2}\right)\left(\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t u}\dfrac{\rd u_0}{\rd y}+\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t y}\right)\\ & = \dfrac{\rd u_0}{\rd y}\left(\lambda_0\dfrac{\mathbf{P}t q}{\mathbf{P}t u}+q_0\left(\lambda_0u_0-\frac{d+2}{2}\right)\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t u}+\lambda_0^2q_0\right)+\lambda_0\dfrac{\mathbf{P}t q}{\mathbf{P}t y}+ q_0\left(\lambda_0u_0-\frac{d+2}{2}\right)\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t y}\\ & = \dfrac{\rd u_0}{\rd y}\left(\lambda_0 \frac{q_0}{\lambda_0}(1-\lambda_0 u_0)\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t u} +q_0\left(\lambda_0u_0-\frac{d+2}{2}\right)\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t u}+\lambda_0^2q_0\right)\\ &\qquad+\lambda_0\left( \frac{q_0}{\lambda_0}(1-\lambda_0 u_0)\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t y}+\lambda_0 (|D_b|-q_0) \right) + q_0\left(\lambda_0u_0-\frac{d+2}{2}\right)\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t y}\\ & = \lambda_0^2q_0\dfrac{\rd u_0}{\rd y}\left(1-\frac{d}{2\lambda_0^2}\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t u}\right) +\lambda_0^2(|D_b|-q_0)-\frac{dq_0}{2}\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t y}\\ & =\lambda_0^2q_0\left(-\frac{M}{m}+\frac{d}{2\lambda^2_0}\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t y}\right)+\lambda_0^2(|D_b|-q_0)-\frac{dq_0}{2}\dfrac{\mathbf{P}t \lambda}{\mathbf{P}t y}\\ & = \frac{\lambda_0^2 q_0}{m}\left(m\frac{|D_b|-q_0}{q_0}-M\right)\/. \end{align*} Recall the usual $\sigmagn$ function that takes values $-1,0$ or $1$. The formula given above implies that \begin{align}\label{a8.2} \sigmagn\left(\dfrac{\rd \psi(y)}{\rd y}\right) = \sigmagn\left(m\frac{|D_b|-q_0(y)}{q_0(y)}-M\right) = \sigmagn\left(m\frac {\int_{\mathcal{B}((\widetilde x, y), R)} \lambda_0 e^{-\lambda_0 r} \rd x \rd r} {\int_{D\setminus\mathcal{B}((\widetilde x, y), R)} \lambda_0 e^{-\lambda_0 r} \rd x \rd r } -M\right). \end{align} According to \eqref{a6.4} and the definition of $u_0(y)$, \begin{align}\label{a8.1} \lambda(u_0(y),y)-\frac d{2 (z(y)-u_0(y))} =0. \end{align} This is equivalent to \eqref{j27.10} (see also \eqref{j28.4} and \eqref{a3.2}). In view of \eqref{a20.2}-\eqref{a8.12}, \eqref{a8.2} and \eqref{a8.1}, by Theorem \ref{j24.1} (ii), there exists a unique $y_A\in( R, E/(Mg))$ such that $\left.\dfrac{\rd \psi(y)}{\rd y}\right|_{y=y_A} =0$ and \eqref{a8.1} holds. Hence $\psi(y)$ attains its maximum at $y_A$ and it is strictly increasing on $(R,y_A)$ and strictly decreasing on $(y_A,E/(Mg))$. For later reference we note that the above argument also shows that $\lambda_0=\lambda(u_0(y_A),y_A) = \lambda_A$, with $\lambda_A$ as defined in Theorem \ref{j24.1}, and, therefore, $u_0(y_A) = u_A$, with $u_A$ as defined in the statement of the present theorem. According to \eqref{a8.3}, for some $c_5$ and any $\varepsilon\in(0, \min(y_A-R,E/(Mg)-y_A))$, \begin{align*} \mathbb{P}_n(|Y_{n+1}-y_A|\geq \varepsilon) &\leq \frac{c_5}{Z_n''}\left(\int_R^{y_A-\varepsilon}+\int_{y_A+\varepsilon}^{E/(Mg)}\right)\psi^n(y)z^{d/2}(y)\rd y\/. \end{align*} Since \begin{align*} Z_n''&\geq \int_{y_A}^{y_A+\varepsilon/2}\psi^n(y)z^{d/2}(y)\rd y +\int_{y_A-\varepsilon/2}^{y_A}\psi^n(y)z^{d/2}(y)\rd y\\ &\geq \frac{\varepsilon}{2}\psi^n(y_A+\varepsilon/2)z^{d/2}(y_A+\varepsilon/2) + \frac{\varepsilon}{2}\psi^n(y_A-\varepsilon/2)z^{d/2}(y_A)\/, \end{align*} and \begin{align*} & \left(\int_R^{y_A-\varepsilon}+\int_{y_A+\varepsilon}^{E/(mg)}\right)\psi^n(y)z^{d/2}(y)\rd y \leq (E/(Mg) - R) z^{d/2}(R) (\psi^n(y_A-\varepsilon) + \psi^n(y_A+\varepsilon)), \end{align*} we have \begin{align*} \mathbb{P}_n(|Y_{n+1}-y_A|\geq \varepsilon) &\leq \frac {(E/(Mg) - R) z^{d/2}(R) (\psi^n(y_A-\varepsilon) + \psi^n(y_A+\varepsilon))} {\frac{\varepsilon}{2}\psi^n(y_A+\varepsilon/2)z^{d/2}(y_A+\varepsilon/2) + \frac{\varepsilon}{2}\psi^n(y_A-\varepsilon/2)z^{d/2}(y_A)} \\ &\leq \frac{2E}{\varepsilon Mg}\left(\frac{z(R)}{z(y_A+\varepsilon/2)}\right)^{d/2}\left[\left(\frac{\psi(y_A+\varepsilon)}{\psi(y_A+\varepsilon/2)}\right)^n +\left(\frac{\psi(y_A-\varepsilon)}{\psi(y_A-\varepsilon/2)}\right)^n\right]. \end{align*} The right-hand side goes to $0$ as $n\to \infty$, i.e., \begin{align}\label{a8.7} \lim_{n\to\infty} \mathbb{P}_n(|Y_{n+1}-y_A|\geq \varepsilon)=0. \end{align} A calculation analogous to \eqref{a8.10} gives for $\delta >0$, \begin{align}\label{a8.11} &\left(\int_0^{u_0-\delta}+\int_{u_0+\delta}^{z(y)}\right)\alphapha_y^{k}(u) (z(y)-u)^{d-1}\rd u\\ & \quad \leq\frac{2z^{d}(y)}{d\sqrt{k}}\left(\alphapha^{k}_y\left(u_0-\delta\right)+\alphapha^{k}_y\left(u_0+\delta\right)-\alphapha_y^{k}(0)-\alphapha_y^{k}(z(y))\right) \notag\\ &\quad\leq \frac{2z^{d}(y)}{d\sqrt{k}}\left(\alphapha^{k}_y\left(u_0-\delta\right)+\alphapha^{k}_y\left(u_0+\delta\right)\right).\notag \end{align} Recall that $k=n-1$ and, by \eqref{a8.8} and \eqref{a8.9}, that $\frac 1 n \sum_{i=1}^n y_i = u$. The following remark made above about $\psi$ applies also to $\alphapha_y$ because of \eqref{a7.9}: ``$\psi(y)$ attains its maximum at $y_A$ and it is strictly increasing on $(R,y_A)$ and strictly decreasing on $(y_A,E/(Mg))$.'' Combining \eqref{a7.8} and \eqref{a8.11}, we obtain for every fixed $y$, \begin{align}\label{a8.20} \limsup_{n\to\infty} \mathbb{P}_n& \left(\left|\frac 1 n \sum_{i=1}^n Y_i - u_0(y)\right| >\delta \mid Y_{n+1} = y\right) \\ &\leq\limsup_{k\to\infty} \frac {\frac{2z^{d}(y)}{d\sqrt{k}}\left(\alphapha^{k}_y\left(u_0-\delta\right)+\alphapha^{k}_y\left(u_0+\delta\right)\right)} {\frac{c_4}{\sqrt{k}}z^d(y)\alphapha_y^k(u_0)} =0.\notag \end{align} Using the fact that $u_0(y)$ is a continuous function of $y$ (see, e.g., \eqref{a8.5}), \eqref{a8.7}, \eqref{a8.20} and applying the dominated convergence theorem to the indicator function of the event $\left\{ \left|\frac 1 n \sum_{i=1}^n Y_i - u_0(y)\right| >\varepsilon \right\}$ we obtain from \eqref{a8.7} and \eqref{a8.20}, \begin{align*} \lim_{n\to\infty} \mathbb{P}_n \left(|Y_{n+1} - y_A|> \varepsilon \text{ or } \left| \sum_{i=1}^n Y_i - u_0(y_A)\right| >\varepsilon\right) =0. \end{align*} It remains to recall that we have shown that $u_0(y_A) = u_A$. \end{proof} Recall definitions \eqref{j24.4} of $( \mathbf{X}_n,\mathbf{Y}_n)$, \eqref{j24.3} of $\nu_{x,y,\lambda}$, and \eqref{j13.2} of $\mathbf{D}_n(x,y,u)$. \begin{lemma}\label{j15.1} Fix any $\widehat x\in D_b'$, $u>0$ and $\widehat y\geq R$, and let $\widehat \mathcal{B}= \mathcal{B}(( \widehat x, \widehat y), R)$. Let $\mathbb{P}_n^{\widehat x,\widehat y,u}$ denote the uniform distribution on $\mathbf{D}_n(\widehat x,\widehat y,u)$. Suppose that $( \mathbf{X}_n,\mathbf{Y}_n)$ has the distribution $\mathbb{P}_n^{\widehat x,\widehat y,u}$. (i) When $n\to \infty$ then for every fixed $j\geq 1$, the distribution of $(X_j,Y_j)$ converges to $\nu_{\widehat x,\widehat y,\lambda}$, where $\lambda$ is the solution to $ u = \mathscr{u}(\lambda,y)$. Moreover, for any $j_1\ne j_2$, the joint distribution of $((X_{j_1},Y_{j_1}),(X_{j_2}, Y_{j_2}))$ converges to $\nu_{\widehat x,\widehat y,\lambda} \times \nu_{\widehat x,\widehat y,\lambda}$. (ii) The convergence in (i) is uniform in the sense that for any bounded $d$-dimensional rectangular parallelepipeds $A_1,A_2\subseteqset \mathbb{R}^d$ with non-empty interior, any $0< u_1 < u_2 < \infty$, and any $\varepsilon>0$, there exists $n_1$ such that for all $n\geq n_1$, $\widehat x \in D_b'$, $y>R$ and $u\in[u_1,u_2]$, \begin{align}\label{j13.6} \left| \mathbb{P}_n^{\widehat x,y,u} (((X_{j_1},Y_{j_1}),(X_{j_2}, Y_{j_2})) \in A_1 \times A_2) - \nu_{\widehat x,y,\lambda} \times \nu_{\widehat x,y,\lambda} (A_1 \times A_2) \right| < \varepsilon. \end{align} \end{lemma} \begin{proof} We will reuse some ideas from the proof of Proposition \ref{j11.2}. Let $ (Z^{1 }_1,T^{1 }_1),\ldots,(Z^{1 }_{n},T^{1 }_n)$ be i.i.d., with $Z^{1 }_i \in \mathbb{R}^{d-1}$ and $T^{1 }_i >0$ for $1\leq i \leq n$. Assume that $T^{1 }_i$ and $Z^{1 }_i$ are independent, $Z^{1 }_i$ has the uniform distribution in $D_b$, and $T^{1 }_i$ has the exponential distribution with parameter $\lambda$. Let $f_1(z_1,t_1, \dots,z_n, t_n)$ be the density of $\left((Z^{1 }_1,T^{1 }_1),\ldots,(Z^{1 }_{n},T^{1 }_n)\right)$. We have \begin{align}\label{j11.3} f_1(z_1,t_1, \dots,z_n, t_n) = |D_b|^{-n} \lambda ^n \mathbb{E}p(-\lambda(t_1+ \dots + t_n)), \end{align} for $z_i \in D_b$, $t_i >0$, $1\leq i \leq n$. Let $(Z^{2 }_1,T^{2 }_1),\ldots,(Z^{2 }_{n},T^{2 }_n)$ be i.i.d., with $( Z^{2 }_i, T^{2 }_i)$ being distributed as $( Z^{1 }_i, T^{1 }_i)$ conditioned by $\{(Z^{1 }_i,T^{1 }_i)\notin \widehat \mathcal{B}\}$. Hence, $(Z^{2 }_i,T^{2 }_i)$ has the distribution $\nu_{\widehat x, \widehat y, \lambda}$. By the definition of $q$ and \eqref{j11.3}, the density $f_2$ of $((Z^{2 }_1,T^{2 }_1),\ldots,(Z^{2 }_{n},T^{2 }_n))$ is given by \begin{align}\label{j11.4} f_2(z_1,t_1, \dots,z_n, t_n) = \left(\frac q {|D_b|}\right)^{-n} |D_b|^{-n} \lambda^n \mathbb{E}p(-\lambda(t_1+ \dots + t_n)) \end{align} on the set $(D\setminus \widehat \mathcal{B})^n$. Let $S^2_n = \frac 1 n \sum_{k=1}^n T^2_k $. Let $((Z^{3 }_1,T^{3 }_1),\ldots,(Z^{3 }_{n},T^{3 }_n))$ be the sequence $((Z^{2 }_1,T^{2 }_1),\ldots,(Z^{2 }_{n},T^{2 }_n))$ conditioned by $\{S^2_n = u\}$. Note that the distribution of $((Z^{3 }_1,T^{3 }_1),\ldots,(Z^{3 }_{n},T^{3 }_n))$ is the uniform distribution on $\mathbf{D}_n(\widehat x,\widehat y,u)$. Hence, the distribution of $(X_j,Y_j)$ under $\mathbb{P}_n^{\widehat x,\widehat y,u}$ is the same as the distribution of $(Z^{3 }_{j},T^{3 }_j)$. We will show that the distribution of $(Z^{3 }_{j},T^{3 }_j)$ converges to that of $(Z^{2 }_{j},T^{2 }_j)$ as $n\to \infty$. This is equivalent to the weak convergence of $\mathbb{P}_n^{\widehat x,\widehat y,u}$ to $\nu_{\widehat x, \widehat y, \lambda}$ since the distribution of $(Z^{2 }_i,T^{2 }_i)$ has the distribution $\nu_{\widehat x, \widehat y, \lambda}$. Fix $j\geq 1$ and consider $n> j$. Let $S^{2,j}_n = \frac 1 {n-1} \sum_{k=1, \dots, n; k\ne j} T^2_k $. Since $(Z^{2 }_1,T^{2 }_1),\ldots,(Z^{2 }_{n},T^{2 }_n)$ is an i.i.d. sequence, the density $f_{(Z^{3 }_{j},T^{3 }_j)}$ has the form \begin{align}\label{j11.5} f_{(Z^{3 }_{j},T^{3 }_j)}(z,t) = c_1 f_{(Z^{2 }_{j},T^{2 }_j)}(z,t) f_{S^{2,j}_n} \left(u+ \frac 1 {n-1} (u-t)\right), \end{align} where $c_1$ is the normalizing constant. Let $\sigmagma^2 = \var T^2_i$ and $T^4_i = (T^2_i - u)/(\sigmagma\sqrt{n-1})$. Note that, since $ u = \mathscr{u}(\lambda,y)$, $\mathbb{E} T^4_i=0$. Let \begin{align*} S^{4,j}_n = \sum_{k=1, \dots, n; k\ne j} T^4_k = \frac 1 {\sigmagma\sqrt{n-1}} \sum_{k=1, \dots, n; k\ne j} (T^2_k - u) = \frac{\sqrt{ n-1}}{\sigmagma} (S^{2,j}_n-u). \end{align*} Hence \begin{align*} f_{S^{2,j}_n} (u+s) = \frac{\sqrt{ n-1}}{\sigmagma} f_{S^{4,j}_n}\left(\frac{\sqrt{ n-1}}{\sigmagma} s\right). \end{align*} By Lemma \ref{j10.2}, \begin{align}\label{j24.5} f_{S^{2,j}_n}&\left(u+ \frac 1 {n-1} (u-t)\right) = \frac{\sqrt{ n-1}}{\sigmagma} f_{S^{4,j}_n}\left(\frac{\sqrt{ n-1}}{\sigmagma} \frac 1 {n-1} (u-t)\right)\\ &= \frac{\sqrt{ n-1}}{\sigmagma} f_{S^{4,j}_n} \left(\frac 1 {\sigmagma\sqrt{n-1}} (u-t)\right) =\frac{\sqrt{ n-1}}{\sigmagma} \left(\varphii\left( \frac 1 {\sigmagma\sqrt{n-1}} (u-t)\right) + A\right) .\notag \end{align} Since the random variable $T^2_i $ has the same distribution as the random variable with the same name in the proof of Proposition \ref{j11.2}, the estimates \eqref{y22.10}-\eqref{y22.11} for $A$, and those at the end of the proof of Proposition \ref{j11.2} apply in the present case. Thus \begin{align}\label{j26.1} |A| \leq \frac {c_2}{\sqrt{n-1}}, \end{align} where $c_2$ depends only on $|D_b|$. Fix any $u_1 >0$. Then, by \eqref{y22.6} and \eqref{y22.2}, there exists $c_3>0$ such that for all $u \geq u_1$, \begin{align}\label{j26.2} \sigmagma^2 \geq c_3. \end{align} Fix any $u_2\in(u_1,\infty)$. It follows from \eqref{j24.5}, \eqref{j26.1} and \eqref{j26.2} that for any fixed $0 \leq t_1 < t_2 < \infty$, $u\in[u_1,u_2]$ and $\varepsilon>0$ there exists $n_1$ such that for $n\geq n_1$, $t_3,t_4\in[t_1,t_2]$, and $t_5\notin[t_1,t_2]$, \begin{align}\label{j11.6} 1-\varepsilon <\ &\frac{ f_{S^{2,j}_n}\left(u+ \frac 1 {n-1} (u-t_3)\right)} {f_{S^{2,j}_n}\left(u\right)} <1+\varepsilon,\\ &\frac{ f_{S^{2,j}_n}\left(u+ \frac 1 {n-1} (u-t_5)\right)} {f_{S^{2,j}_n}(u)} <2.\label{j11.7} \end{align} This, \eqref{y22.8} and \eqref{j11.5} imply that the distribution of $(Z^{3 }_{j},T^{3 }_j)$ converges to that of $(Z^{2 }_{j},T^{2 }_j)$ as $n\to \infty$. Moreover, for any bounded $d$-dimensional rectangular parallelepiped $A_1\subseteqset \mathbb{R}^d$ with non-empty interior, any $0< u_1 < u_2 < \infty$, and any $\varepsilon>0$, there exists $n_1$ such that for all $n\geq n_1$, $\widehat x \in D_b'$, $y>R$ and $u\in[u_1,u_2]$, \begin{align*} \left| \mathbb{P}_n^{\widehat x,y,u} ((X_{j_1},Y_{j_1}) \in A_1) - \nu_{\widehat x,y,\lambda} (A_1 ) \right| < \varepsilon. \end{align*} A completely analogous argument shows that for any $j_1\ne j_2$, the distribution of $\left((Z^{3 }_{j_1},T^{3 }_{j_1}),(Z^{3 }_{j_2},T^{3 }_{j_2})\right)$ converges to that of $\left((Z^{2 }_{j_1},T^{2 }_{j_1}),(Z^{2 }_{j_2},T^{2 }_{j_2})\right)$ as $n\to \infty$, and also part (ii) of the lemma holds true. \end{proof} Recall $\nu_{\widetilde x,y,\lambda}$ defined in \eqref{j24.3}, the empirical measure $\mathbb{Q}_n$ defined in \eqref{j13.1}, and $\mathbf{D}_n(x,y,u)$ defined in \eqref{j13.2}. \begin{lemma}\label{j14.1} The marginal distribution of $X_{n+1}$ under $\mathbb{P}_n$ is uniform in $D_b'$. Given $\{X_{n+1}=x\}$, the conditional distribution of $\mathbb{Q}_n$ converges to $\nu_{x,y_{\mathrm{A}},\lambda_A}$ weakly, in probability as $n\to \infty$. \end{lemma} \begin{proof} It follows easily from the microcanonical ensemble formula \eqref{j10.4} that the marginal distribution of $X_{n+1}$ under $\mathbb{P}_n$ is uniform in $D_b'$. The same formula \eqref{j10.4} implies that one can represent $\mathbb{P}_n$ as follows. Let $\mathbb{P}_n^{x,y,u}$ denote the uniform distribution on $\mathbf{D}_n(x,y,u)$. Then there exists a probability measure $\mu_n$ on $D_b'\times [R,\infty) \times \mathbb{R}_+$ such that $\mathbb{P}_n = \int \mathbb{P}_n^{x,y,u} \rd \mu_n(x,y,u)$. Let $\mathbb{P}_n^x = \int \mathbb{P}_n^{x,y,u} \mu_n(x,\rd y,\rd u)$. Let $\mathbb{E}_n^x$ and $\mathbb{E}_n^{x,y,u} $ denote expectations corresponding to $\mathbb{P}_n^x$ and $\mathbb{P}_n^{x,y,u}$. Fix any $x\in D_b'$ and write $\nu_x'= \nu_{x,y_A,\lambda_A}$. Fix any bounded $d$-dimensional rectangular parallelepiped $A\subseteqset \mathbb{R}^d$ with non-empty interior. We have \begin{align}\label{j13.4} &\mathbb{E}_n^x \left( \mathbb{Q}_n(A) - \nu_x'(A)\right)^2 = \mathbb{E}_n^x \left( \frac 1 n \sum_{i=1}^n \mathbf{1}_{A} (X_i,Y_i) - \nu_x'(A)\right)^2\\ &= \mathbb{E}_n^x \left( \frac 1 n \sum_{i=1}^n \left(\mathbf{1}_{A} (X_i,Y_i) - \nu_x'(A)\right)\right)^2 \notag\\ &= \frac 1 {n^2} \sum_{i=1}^n \mathbb{E}_n^x \left(\mathbf{1}_{A} (X_i,Y_i) - \nu_x'(A)\right)^2 \notag\\ &\qquad+ \frac 2 {n^2} \sum_{i=1}^{n-1} \sum_{j=i+1}^n \mathbb{E}_n^x \left[ \left(\mathbf{1}_{A} (X_i,Y_i) - \nu_x'(A)\right) \left(\mathbf{1}_{A} (X_j,Y_j) - \nu_x'(A)\right) \right] \notag\\ &= \frac 1 n \mathbb{E}_n^x \left(\mathbf{1}_{A} (X_1,Y_1) - \nu_x'(A)\right)^2 \notag\\ &\qquad+ \frac {2(n^2-n)}{n^2} \mathbb{E}_n^x \left[ \left(\mathbf{1}_{A} (X_1,Y_1) - \nu_x'(A)\right) \left(\mathbf{1}_{A} (X_2,Y_2) - \nu_x'(A)\right) \right] \notag\\ &= \frac 1 n \mathbb{E}_n^x \left(\mathbf{1}_{A} (X_1,Y_1) - \nu_x'(A)\right)^2 \notag\\ &\qquad+ \frac {2(n^2-n)}{n^2} \Big( \mathbb{E}_n^x \left[ \mathbf{1}_{A} (X_1,Y_1)\mathbf{1}_{A} (X_2,Y_2)\right] -\mathbb{E}_n^x \left[ \mathbf{1}_{A} (X_1,Y_1)\nu_x'(A)\right] \notag\\ &\qquad \qquad-\mathbb{E}_n^x \left[ \nu_x'(A)\mathbf{1}_{A} (X_2,Y_2)\right] + (\nu_x'(A))^2\Big).\notag \end{align} It is easy to see that the function $(x,y,u) \to \mathbb{P}_n^{x,y,u}$ is continuous in the weak topology. This, Proposition \ref{j13.3} and \eqref{j13.6} imply that \begin{align}\label{j13.7} &\lim_{n\to \infty} \mathbb{E}_n^x \left[ \mathbf{1}_{A} (X_1,Y_1)\mathbf{1}_{A} (X_2,Y_2)\right] \\ &\quad= \lim_{n\to \infty}\int \mathbb{E}_n^{x,y,u} \left[ \mathbf{1}_{A} (X_1,Y_1)\mathbf{1}_{A} (X_2,Y_2)\right] \mu_n(x,\rd y,\rd u) = (\nu_x'(A))^2.\notag \end{align} For the same reason, \begin{align}\label{j13.8} \lim_{n\to \infty} \mathbb{E}_n^x \left[ \mathbf{1}_{A} (X_k,Y_k)\right] &= \nu_x'(A), \qquad k=1,2. \end{align} The bound $\mathbb{E}_n^x \left(\mathbf{1}_{A} (X_1,Y_1) - \nu_x'(A)\right)^2 \leq 1$, \eqref{j13.4}, \eqref{j13.7} and \eqref{j13.8} imply that \begin{align*} \lim_{n\to \infty}\mathbb{E}_n^x \left( \mathbb{Q}_n(A) - \nu_x'(A)\right)^2 =0. \end{align*} The lemma follows because this statement holds for every $A$. \end{proof} \begin{proof}[Proof of Theorem \ref{a20.4}] The theorem follows from Proposition \ref{j13.3} and Lemma \ref{j14.1}. \end{proof} \section{Inverse temperature}\label{y19.7} \begin{proposition} The inverse temperature of the gas is asymptotically proportional to $\lambda_A$ defined in Theorem \eqref{j24.1}. More precisely, for every fixed $j\geq 1$, \begin{align} \lim_{n\to\infty} \mathbb{E}_n \left(\frac 1 2 m \|V_j\|^2\right)= \lim_{n\to\infty} \mathbb{E}_n \left(\frac 1 n\sum_{i=1}^{n}\frac 1 2 m \|V_i\|^2\right)&=\frac{dmg}{2\lambda_A}.\label{j15.5} \end{align} \end{proposition} \begin{proof} Since the total energy $E$ is fixed, we have \begin{align*} \frac 1 n\sum_{i=1}^{n} mg Y_i \leq E, \qquad MgY_{n+1} \leq E, \qquad \frac 1 {n+1}\sum_{i=1}^{n+1}\frac 1 2 m \|V_i\|^2\leq E. \end{align*} Hence, these sequences of random variables are tight. Recall $\mathbb{P}_n^{x,y,u}$ and $\mu_n$ from the proof of Lemma \ref{j14.1}. By Proposition \ref{j13.3}, \begin{align}\label{a8.14} \lim_{n\to\infty} \mathbb{E}_n &\left(\frac 1 n\sum_{i=1}^{n} mg Y_i\right) = \lim_{n\to\infty}\int \mathbb{E}_n^{x,y,u} \left(\frac 1 n\sum_{i=1}^{n} mg Y_i\right)\rd \mu_n(x,y,u)\\ &= \lim_{n\to\infty}\int u \rd \mu_n(x,y,u) = mg u_A =mg \frac { \int_{D\setminus\mathcal{B}((\widetilde x, y_A), R)} y\lambda_A e^{-\lambda_A y} \rd x \rd y} { \int_{D\setminus\mathcal{B}((\widetilde x, y_A), R)} \lambda_A e^{-\lambda_A y} \rd x \rd y}. \notag \end{align} Proposition \ref{j13.3} and tightness also imply that \begin{align}\label{a8.13} \lim_{n\to\infty} \mathbb{E}_n MgY_{n+1} = Mgy_A. \end{align} This and \eqref{a8.14} show that the expected value of the potential energy of the point particles and the ball converges to a fixed number. Since the total energy $E$ is fixed, the expectation of the kinetic energy converges weakly to a fixed number as well, i.e., for some $\sigmagma>0$, \begin{align}\label{j15.3} \lim_{n\to\infty} \mathbb{E}_n\left(\frac 1 {n+1}\sum_{i=1}^{n+1}\frac 1 2 m \|V_i\|^2\right) = \sigmagma^2. \end{align} The total energy is fixed so \eqref{a8.14}, \eqref{a8.13} and \eqref{j15.3} show that \begin{align*} \sigmagma^2 + mg \frac { \int_{D\setminus\mathcal{B}((\widetilde x, y_A), R)} y\lambda_A e^{-\lambda_A y} \rd x \rd y} { \int_{D\setminus\mathcal{B}((\widetilde x, y_A), R)} \lambda_A e^{-\lambda_A y} \rd x \rd y} + Mg y_A = E. \end{align*} Since $y_A$ and $\lambda_A$ solve \eqref{j27.10}, we must have $\sigmagma^2 = \frac{dmg}{2\lambda_A}$. It remains to note that due to the symmetry of $\mu_{\mathbf{y}_{n+1}}$ in \eqref{j10.4}, \begin{align*} \lim_{n\to\infty} \mathbb{E}_n \left(\frac 1 n\sum_{i=1}^{n}\frac 1 2 m \|V_i\|^2\right)= \lim_{n\to\infty} \mathbb{E}_n\left(\frac 1 {n+1}\sum_{i=1}^{n+1}\frac 1 2 m \|V_i\|^2\right) = \sigmagma^2= \frac{dmg}{2\lambda_A}. \end{align*} \end{proof} \section{Uniqueness of the stationary distribution}\label{y19.8} \begin{proof}[Proof of Theorem \ref{j18.1} (ii)] The idea of the proof is the following. If there were more than one invariant measure, at least two of them would be mutually singular by Birkhoff's ergodic theorem (\cite{Sin94}). Given any two starting configurations we will exhibit two deterministic trajectories meeting at the same point in the phase space at some time $t_1>0$. Then we will argue that due to the random nature of some reflections, both processes have densities that are strictly positive in some neighborhood of that point in the phase space. Hence, there are no mutually singular invariant measures. Much of the proof will be presented in a very informal way. This is because our argument is totally elementary but it would be extremely tedious to write (or read) in a fully rigorous way. {\it Step 1}. Assume that the initial condition of the system does not belong to any of the families (1) and (2) described in the statement of the theorem. We will construct a single trajectory of the system. The trajectory will respect the laws of elastic collisions when they are assumed, i.e., for all collisions of the ball with the point particles and the walls of the container. For any reflection of a point particle from a wall of the container, we will choose a direction after the reflection from all possible directions in a way that meets the needs of the argument. Recall that ``walls'' of the container include its bottom so point particles reflect according to the Lambertian law from the side walls and the bottom of the container. Fix distinct points $z_1, \dots, z_{n+1}$ in $D_b$, such that the distance of $z_{n+1}$ from the side wall of the container is greater than $R$. We let the system evolve according to the original dynamics until one of the point particles hits a wall at a time $s_1$. We assume that the particle that hit the wall is labeled 1, since the labeling of point particles is irrelevant. The first point particle can reflect in any direction, including directions arbitrarily close to the boundary. So it can stay arbitrarily close to the boundary for an arbitrarily long time and move towards any point on the boundary, with the only limitation being its constant energy (the sum of potential and kinetic energies), see Figs. \ref{fig1}-\ref{fig2}. We let the system evolve according to the original dynamics after time $s_1$, except that the first point particle will follow its own trajectory, constructed independently. Let $s_2$ be the next time when a particle different from the first one hits a wall. We choose a trajectory for the first particle very close to the wall and moving towards $z_1$ in such a way that it avoids a collision with the ball on $[s_1,s_2]$ (recall that point particles do not interact). \begin{figure} \caption{Side view of a point particle trajectory reflecting from the bottom of the container. } \label{fig1} \end{figure} \begin{figure} \caption{View from above of a point particle trajectory reflecting from the bottom of the container. } \label{fig2} \end{figure} We proceed by induction. Suppose that, for some $j<n$, a deterministic trajectory of the system has been constructed on $[0,s_j]$, including the trajectories of point particles labeled $1,\dots , j$. These point particles stay close to the walls from the time of the first hit of a wall until $s_j$. Particle $k$ moves towards $z_k$ from the first time it hits a wall until $s_j$, for $k=1,\dots,j$. Given this inductive assumption, we let the system evolve according to the original dynamics after time $s_j$, except that point particles labeled $1,\dots, j$ will follow their own trajectories, constructed independently. Let $s_{j+1}$ be the next time when a particle different from $1,\dots,j$ hits a wall. We will call this particle $j+1$. We choose a trajectory for each of the particles $1,\dots, j+1$ very close to the wall. The $k$-th particle is moving towards $z_k$ in such a way that it avoids a collision with the ball on $[s_j,s_{j+1}]$, for $k=1,\dots, j+1$. We stop the construction when we have a trajectory of the system on $[0,s_{n}]$. We will continue after discussing a delicate point in the next step. {\it Step 2}. It is possible that fewer than $n$ point particles hit the walls of the container. This can happen only if some particles always reflect from the top part of the ball. In this case, we let one of the point particles that are staying close to the boundary move towards the point where the ball reflects from the bottom of the container and then we let the point particle hit the ball slightly off center. That will nudge the ball off its trajectory. The result will be that the point particles formerly reflecting from the top of the ball will move to the side and eventually hit a wall. {\it Step 3}. At this step of the construction of the trajectory of the system, point particles will come close to the point at the bottom where the ball reflects and they will hit the ball, one at a time, see Fig. \ref{fig3}. All other point particles will keep close to the bottom and stay away from the ball. \begin{figure} \caption{Side view of the container. A point particle collides with the ball. } \label{fig3} \end{figure} There are two major goals of the construction that can be achieved by this procedure. First, we can achieve equipartition of the energy. Second, we can put the ball above $z_{n+1}$ and make it move vertically. We will explain how we can arrange for equipartition of energy between the point particles and the ball, so that $(E-MgR)/(n+1)$ of energy is given to each point particle and $(E-MgR)/(n+1) + MgR$ of energy is given to the ball. Note that the minimal amount of energy a point particle can have is 0, assuming that it is sitting motionless at the bottom. For the ball, the minimal amount of energy is $MgR$. The point particles do not interact with each other so the only way to transfer the energy between them is via collisions with the macroscopic ball. We let point particles approach the point where the ball reflects from the bottom, one at a time. Then we make the direction of the velocity of the point particle close to vertical (or at least considerably different from the horizontal; see Fig. \ref{fig3}). We make the point particle hit the ball either when the ball is moving up or down. In the first case, the point particle will lose energy and in the second case it will acquire energy. By manipulating the place of the collision and the velocity direction of the point particle before the collision, and by repeating the procedure, if necessary, many times, we can partition the energy between particles and the ball in an arbitrary way. We need to add a few words clarifying the algorithm described above. If a particle and the ball have the same amount of kinetic energy and $n$ is large then the speed of the particle is much larger than the speed of the ball. Hence, we can start by transferring energy to the ball from all point particles that have more than $(E-MgR)/(n+1)$ of energy. Then the energy can be transferred from the ball to the particles that had less than $(E-MgR)/(n+1)$ of energy, one by one. {\it Step 4}. We make the $n$-th point particle collide with the ball as depicted in Fig. \ref{fig3} to change the trajectory of the ball so it reflects vertically at $z_{n+1}\in D_b$. After this is done, energy might not be equidistributed. If necessary, we induce energy transfer between the $n$-th particle and the ball by collisions of the particle with the bottom of the ball, as in Fig. \ref{fig4}. \begin{figure} \caption{Side view of the container. A point particle hits the ball at the lowest point on the surface. } \label{fig4} \end{figure} Fix some time $t_1$ greater than the duration of the trajectory constructed so far, such that the ball hits $z_{n+1}$ at time $t_1$. Make all point particles follow trajectories such that they all hit their own base points $z_k$, $k=1,\dots, n$, at time $t_1$. For this to be possible, it may be necessary to move $t_1$ to one of the later times when the ball hits $z_{n+1}$. For future reference, let the deterministic trajectory constructed above be called $\Gamma = \{\Gamma(t), 0\leq t \leq t_1\}$. {\it Step 5}. To finish the proof, we will argue as follows. We have shown that the system can get to the same configuration at time $t_1$ for every initial configuration (the time $t_1$ may depend on the initial conditions---this is not a problem). The trajectory that we constructed is deterministic but it ``agrees'' with the dynamics of the system, including Lambertian reflections. Lambertian reflections introduce randomness. They make the state of the system at time $t_1$ random, with a density. The densities for different initial configurations overlap so there is only one stationary measure. The subtle point is that the state density at time $t_1$ is not with respect to Lebesgue measure on $\mathbb{R}^{2nd}$ but on a hypersurface of dimension $2nd-1$ because the total energy is fixed (see \eqref{y19.1}). We will now outline an argument addressing this concern. Recall notation from Section \ref{y19.2} and let \begin{align*} \mathbf{Z}_j(t) &= ( (X_1(t), \dots, X_j(t)), (Y_1(t), \dots, Y_j(t)), (V_1(t), \dots, V_j(t))) ,\\ \mathbf{Z}_{j+}(t) &= ( (X_j(t), \dots, X_{n+1}(t)), (Y_j(t), \dots, Y_{n+1}(t)), (V_j(t), \dots, V_{n+1}(t))) ,\\ \mathbf{z}_j &= ( (x_1, \dots, x_j), (y_1, \dots, y_j), (v_1, \dots, v_j)) ,\\ \mathbf{z}_{j+} &= ( (x_j, \dots, x_{n+1}), (y_j, \dots, y_{n+1}), (v_j, \dots, v_{n+1})) , \end{align*} where $x_k\in \mathbb{R}^{d-1}$, $y_k\in R$ and $v_k \in \mathbb{R}^d$. For arbitrarily thin tube around $\Gamma$ there is a strictly positive probability that the system with random reflections $\mathbf{Z}_{n+1}$ will stay inside the tube until time $t_1$. Consider a tube so thin that point particles undergoing random reflections in the tube collide with the ball in the same order as along the deterministic trajectory $\Gamma$. We will assume without loss of generality that the point particles exchange energy with the ball along $\Gamma$ in the order $1,\dots, n$. Let $u_1$ be the last time the first particle hits a wall of the container before starting the process of exchanging the energy with the ball. If the tube is very thin, the first particle will not collide with the ball after exchanging the energy with the ball. Let $u_1'$ be the last time the first ball collides with the ball before time $t_1$. We claim that for some small neighborhood $U_1$ of $\Gamma (t_1)$, some $c_1>0$ and $\mathbf{z}_{n+1}=(\mathbf{z}_1, \mathbf{z}_{2+}) \in U_1$, \begin{align}\label{a1.2} &\mathbb{P}_n(\mathbf{Z}_1(t_1)\in \rd \mathbf{z}_1 \mid \mathbf{Z}_{n+1}(u_1))/\rd \mathbf{z}_1 \geq c_1,\\ &\mathbb{P}_n(\mathbf{Z}_{n+1}(t_1)\in \rd \mathbf{z}_{n+1} \mid \mathbf{Z}_{n+1}(u_1'))/\rd \mathbf{z}_{n+1} \geq c_1 \mathbb{P}_n(\mathbf{Z}_{2+}(t_1)\in \rd \mathbf{z}_{2+} \mid \mathbf{Z}_{n+1}(u_1'))/\rd \mathbf{z}_{2+}.\label{a1.3} \end{align} The claim \eqref{a1.2} is true because the first particle acquires a random amount of energy, and its position and velocity direction are random due to Lambertian reflections following $u_1$. The claim \eqref{a1.3} is a form of independence for conditional processes (conditioned to stay in separate tubes). We proceed by induction. Suppose $2\leq j \leq n-1$. Let $u_j$ be the last time the $j$-th particle hit a wall of the container before starting the process of exchanging the energy with the ball. Let $u_j'$ be the last time the $j$-th particle collided with the ball before time $t_1$. Then for some small neighborhood $U_j\subseteqset U_{j-1}$ of $\Gamma (t_1)$, some $c_j>0$ and $\mathbf{z}_{n+1}=(\mathbf{z}_j, \mathbf{z}_{j+}) \in U_j$, \begin{align*} &\mathbb{P}_n(\mathbf{Z}_j(t_1)\in \rd \mathbf{z}_j \mid \mathbf{Z}_{n+1}(u_j))/\rd \mathbf{z}_j \geq c_j,\\ &\mathbb{P}_n(\mathbf{Z}_{n+1}(t_1)\in \rd \mathbf{z}_{n+1} \mid \mathbf{Z}_{n+1}(u_j'))/\rd \mathbf{z}_{n+1} \geq c_j \mathbb{P}_n(\mathbf{Z}_{(j+1)+}(t_1)\in \rd \mathbf{z}_{(j+1)+} \mid \mathbf{Z}_{n+1}(u_j'))/\rd \mathbf{z}_{(j+1)+}. \end{align*} We apply this claim to $j=n-1$ to obtain \begin{align*} \mathbb{P}_n(\mathbf{Z}_{n+1}(t_1)\in \rd \mathbf{z}_{n+1} \mid \mathbf{Z}_{n+1}(u_{n-1}'))/\rd \mathbf{z}_{n+1} \geq c_1 \mathbb{P}_n(\mathbf{Z}_{n+}(t_1)\in \rd \mathbf{z}_{n+} \mid \mathbf{Z}_{n+1}(u_{n}'))/\rd \mathbf{z}_{n+}. \end{align*} At this point it remains to analyze the interaction of the $n$-th particle and the ball. The position of the ball and its energy and velocity direction can be all made to have a joint density by collisions with the $n$-th point particle after time $u_n$. Finally, the position and velocity direction of the $n$-th point particle have a conditional density given everything else, because of its Lambertian reflections from the bottom of the container. The energy of the $n$-th particle cannot be adjusted but this is fine because the same energy conservation principle applies to systems starting from other initial conditions. \end{proof} \begin{remark}\label{a1.1} (i) There are many elementary examples of invariant measures for our dynamical system if we assume specular reflections of point particles from the walls of the container. To construct one of them, place all point particles on disjoint vertical lines which do not intersect the macroscopic ball at time 0. Make initial velocities vertical for the ball and all point particles. Each of these objects will stay on a vertical line forever. To construct an invariant measure, use the ergodic theorem. (ii) The following example of an invariant distribution illustrates family (1) in Theorem \ref{j18.1}. Suppose that the macroscopic ball has a vertical initial velocity and all point particles are located on the vertical line passing through the ball's center, they are placed above the ball, and they all have vertical initial velocities. In this case the center of the ball and the point particles will remain on the same vertical line forever and their velocities will also remain vertical (although the positions and velocities will not remain constant). For this initial condition, point particles will not hit the walls of the container so there will be no opportunity for the random reflections to cause mixing in the system. Just like in part (i) of the remark, one can use the ergodic theorem to construct an invariant measure. \end{remark} \section{Acknowledgments} We are grateful to Shuntao Chen, Persi Diaconis, Martin Hairer, Robert Ho\l yst, Werner Krauth, Mathew Penrose and David Ruelle for the most useful advice. \end{document}
\begin{document} \title{ space{-1em} \begin{abstract} Consider the acoustic wave equation with unknown wave speed $c$, not necessarily smooth. We propose and study an iterative control procedure that erases the history of a wave field up to a given depth in a medium, without any knowledge of $c$. In the context of seismic or ultrasound imaging, this can be viewed as removing multiple reflections from normal-directed wavefronts. \end{abstract} \section{Introduction} \label{s:intro} \gdef\@thefnmark{}\@footnotetext{\!\!$^{\text{*}}$ \url{[email protected]}\qquad $^{\text{\textdagger}}$ \url{[email protected]} \qquad $^{\text{\textdaggerdbl}}$ \url{[email protected]} \qquad $^\parallel$ \url{[email protected]}} \gdef\@thefnmark{}\@footnotetext{\!\!$^{\text{*}}$ $^{\text{\textdagger}}$ $^{\text{\textdaggerdbl}}$ Department of Computational and Applied Mathematics, Rice University.} \gdef\@thefnmark{}\@footnotetext{\!\!$^\parallel$ Department of Mathematics, University of Washington and Institute for Advanced Study, Hong Kong University of Science and Technology.} Consider the acoustic wave equation with an unknown wave speed $c$, not necessarily smooth, on a finite or infinite domain $\Omega\subset\RR^n$. Assume that we can probe our domain $\Omega$ with arbitrary Cauchy data outside of $\Omega$, and measure the reflected waves outside $\Omega$ for sufficiently large time. The inverse problem is to deduce $c$ from these reflection data, and this is the basis for many wave-based imaging methods, including seismic and ultrasound imaging. Toward this goal, we will define and study a time reversal-type iterative process, the \emph{scattering control series}. We were inspired by the work of Rose~\cite{Rose02} in one dimension, who developed a ``single-sided autofocusing'' procedure and identified it as Volterra iteration for the classical Marchenko equation. The Marchenko equation solves the inverse problem for the one-dimensional acoustic wave equation\footnote{More precisely, the Marchenko equation treats the constant-speed wave equation with potential, to which the one-dimensional acoustic wave equation can be reduced by a change of coordinates.}, recovering $c$ on a half-line from measurements made on the boundary. In the course of our research, it became evident that the new procedure is quite closely linked to boundary control problems~\cite{Belishev97,DKO}, and has similar properties to Bingham et al.'s iterative time-reversal control procedure~\cite{BKLS}. In essence, scattering control allows us to isolate the deepest portion of a wave field generated by given Cauchy data--- behavior we demonstrate with both an exact and microlocal (asymptotically high-frequency) analysis. Along the way we present several applications of scattering control, including the removal of multiple reflections and the measurement of energy content of a wave field at a particular depth in $\Omega$. In a future paper, we anticipate illustrating how to locate discontinuities in $c$ and recover $c$ itself. In the mathematical literature, the inverse problem's data are typically given on the boundary of $\Omega$, in terms of the Dirichlet-to-Neumann map or its inverse. We find that the Cauchy data-based reflection map allows us a much cleaner analysis. It is not hard to see (cf.~Proposition~\ref{p:DN-determines-Cauchy}) that the Dirichlet-to-Neumann map determines the Cauchy data reflection map, so no extra information is needed. We start with an informal, graphical introduction to the problem. Section~\ref{s:exact} defines the scattering control series rigorously and provides an exact analysis of its behavior and convergence properties. Section~\ref{s:microlocal} pursues the same questions from a microlocal perspective. The discrepancy that arises between the exact and microlocal analyses allows us to provide more insight on convergence in Section~\ref{s:compare}. Section~\ref{s:marchenko} concludes by connecting our work to that of Rose and Marchenko. \subsection{Motivation} \label{s:motivation} Before defining the scattering control equation and series, we begin by motivating our problem with a graphical example. In Figure~\ref{f:mr-demo}, the domain is $\Omega=\{x>0\}\subset\RR$, with a piecewise constant wave speed $c$ having two discontinuities. We extend $c$ to all of $\RR$, but assume it is known only outside $\Omega$. Now consider the solution of the acoustic wave equation on $\RR$ for time $t\in[0,2T]$, with rightward-traveling Cauchy data $h_0$ supported outside $\Omega$. The initial wave scatters from the discontinuities in $c$, producing an infinite sequence of reflections (Figure~\subref*{f:mr-demo-original}). In imaging, one attempts to recover $c$ or some proxy for it. In many imaging algorithms currently in use, only waves having undergone a single reflection (so-called \emph{primary reflections}) are typically desired, while the remaining \emph{multiple reflections} only complicate the interpretation of the data. As a result, much research in seismic imaging has been directed toward removing or attenuating multiple reflections. \begin{figure} \caption{(a) A domain $\Omega$ (shaded) with unknown wave speed $c$ is probed by exterior Cauchy data $h_0$. Two discontinuities in $c$ (dashed) scatter the incoming wave. (b) An appropriate trailing pulse added to $h_0$ suppresses multiple reflections.} \label{f:mr-demo-original} \label{f:mr-demo-ma} \label{f:mr-demo} \end{figure} For the problem at hand, it is plausible (and can be proven) that by adding a proper control, or \emph{trailing pulse} to the initial data, the multiple reflections may be suppressed, at the cost of a harmless additional outgoing pulse (Figure~\subref*{f:mr-demo-ma}). If $c$ were known inside the domain (cf.~\sref{s:ml-construct}), an appropriate control may be constructed microlocally under some geometric conditions. The issue, of course, is to find the control knowing only the reflection response of $\Omega$. Rather than attacking the multiple reflection suppression problem, however, we consider a related problem obtained by focusing on the interior, rather than exterior, of $\Omega$. Returning to Figure~\subref{f:mr-demo-ma}, we note that the wave field rightmost portion of the medium contains a single, purely transmitted wave, which we call the \emph{direct transmission} of the initial data $h_0$. Slightly more precisely, the wave field inside $\Omega$ at time $2T$ is generated exactly by the direct transmission at time $T$. The control has therefore isolated the direct transmission; our problem is to find such a control for a given $h_0$ using only information available outside $\Omega$. \subsection{Almost direct transmission} \label{s:adt} At its heart, the direct transmission is a geometric optics construction, and is valid only in the high-frequency limit where geometric optics holds. Consequently, the directly transmitted wave field can be isolated only microlocally (modulo smooth functions). We will consider the geometric optics viewpoint later, but initially avoid a microlocal approach, as follows. Informally, suppose $h_0$ creates a wave that enters $\Omega$ at time 0, travelling normal to the boundary. At a later time $T$, the directly transmitted wave may be singled out from all others by its distance from the boundary: namely, $T$ (as long as it has not crossed the cut locus). By \emph{distance} we mean the travel time distance, which for $c$ smooth is Riemannian distance in the metric $c^{-2}dx^2$. With this in mind, given Cauchy data $h_0$ supported just outside $\Omega$ we substitute for the direct transmission the \emph{almost direct transmission}, the part of the wave field of $h_0$ at time $T$ of depth at least $T$. More precisely, let $\Theta$ be a domain containing $\Omega$ and $\supp h_0$; then let $\Theta_T\subset\Theta$ be the set of points in $\Theta$ greater than distance $T$ from the boundary. The almost direct transmission of initial data $h_0$ at time $T$ is the restriction to $\Theta_T$ of its wave field at $t=T$ (Figure~\ref{f:adt}). \begin{figure} \caption{Almost direct transmission of initial data $h_0$ at time $T>0$.} \label{f:adt} \end{figure} The nonzero volume of $\Theta\setminus\Omega$ means that some multiply reflected rays may still reach $\Theta_T$. Hence, we have in mind taking a limit as $\Theta\to\Omega$ and the support of $h_0$ approaches a point on $\bdy\Omega$. In this limit, the support of the almost direct transmission converges to a point along the normal directly-transmitted ray, for sufficiently small $T$ (at least in the absence of caustics and before reaching the cut locus); see Figure~\ref{f:adt-shrink}. \begin{figure} \caption{Shrinking the support of the initial data $h_0$ to a point. The dashed line indicates the normal geodesic from that point; the support of the almost direct transmission shrinks to a point on the geodesic.} \label{f:adt-shrink} \end{figure} \section{Exact scattering control} \label{s:exact} We set up the problem and our notation in~\sref{s:exact-setup}, then introduce the scattering control procedure in~\sref{s:exact-maf}, where we study its behavior and convergence properties. The final result, expressed in Corollary~\ref{c:dt-wave field}, is that scattering control recovers the almost direct transmission's wave field outside $\Theta$, modulo harmonic extensions. In~\sref{s:energy}, we apply this to recover the energy (with a harmonic extension) and kinetic energy of this portion of the wave field. Proofs for the results in these sections follow in~\sref{s:exact-proofs}. \subsection{Setup} \label{s:exact-setup} \subsubsection{Unique continuation} \label{s:uc} Let $\Omega\subseteq\RR^n$ be a Lipschitz domain, and let $c$ be a wave speed satisfying $c,c^{-1} \in L^\infty(\RR^n)$. Initially, the sole extra restriction we impose on $c$ is that it satisfy a certain form of unique continuation. More precisely, assume there is a Lipschitz distance function $d(x,y)$ such that any $u\in C(\RR, H^1(\RR^n))$ satisfying either: \pagebreak \begin{itemize} \item $u,\d_t u=0$ for $t=0$ and $d(x,x_0)<T$ (finite speed of propagation) \item $u=0$ on a neighborhood of $[-T,T]\times\{x_0\}$ (unique continuation) \end{itemize} is also zero on the \emph{light diamond} \[ D(x_0,T) = \set{(t,x)}{d(x,x_0)<T-\tabs{t}}\!, \] if $(\d_t^2-c^2\Delta) u=0$ on a neighborhood of $D(x_0,T)$, for any $x_0\in\RR^n$, $T>0$. While the set of wavespeeds with this property has not been settled in general, several large classes of $c$ are eligible, stemming from the well-known work of Tataru~\cite{Tataru}. Originally known for smooth sound speeds~\cite[Theorem 4]{SU-TATVariable}, Stefanov and Uhlmann later extended this to piecewise smooth speeds with conormal singularities~\cite[Theorem 6.1]{SU-TATBrain}, and Kirpichnikova and Kurylev to a class of piecewise smooth speeds in a certain kind of polyhedral domain~\cite[\textsection5.1]{KK}. The corresponding travel time $d(x,y)$ is the infimum of the lengths of all $C^1$ curves $\gamma(s)$ connecting $x$ and $y$, measured in the metric $c^{-2}dx^2$, such that $\gamma^{-1}(\singsupp c)$ has measure zero. \subsubsection{Geometric setup} \label{s:geometric-setup} Next, let us set up the geometry of our problem. We will probe $\Omega$ with Cauchy data (an \emph{initial pulse}) concentrated close to $\Omega$, in some Lipschitz domain $\Theta\supset\Omega$. We will add to this initial pulse a Cauchy data control (a \emph{tail}) supported outside $\Theta$, whose role is to remove multiple reflections up to a certain depth, controlled by a time parameter $T\in (0,\frac12\diam\Omega)$. This will require us to consider controls supported in a Lipschitz neighborhood $\Upsilon$ of $\clsr\Theta$ that satisfies $d(\bdy\Upsilon,\clsr\Theta)>2T$ and is otherwise arbitrary. While we are interested in what occurs inside $\Omega$, the initial pulse region $\Theta$ will actually play a larger role in the analysis. First, define the \emph{depth} $d^*_\Theta(x)$ of a point $x$ inside $\Theta$: \begin{equation} d^*_\Theta(x) = \when{+d(x,\bdy\Theta), & x \in \Theta,\\ -d(x,\bdy\Theta), & x \notin \Theta.} \label{e:exact-depth} \end{equation} Larger values of $d^*_\Theta$ are therefore deeper inside $\Theta$. For each $t$, define\footnote{We tacitly assume throughout that $\Theta_t$, $\Theta_t^\star$ are Lipschitz.} the open sets \begin{nalign} \Theta_t^{\phantom\star} &= \set{x\in\Upsilon}{d^*_\Theta(x) > t}\!,\\ \Theta_t^\star &= \set{x\in\Upsilon}{d^*_\Theta(x) < t}\!. \label{e:def-Theta-t-star} \end{nalign} As in~\eqref{e:def-Theta-t-star} above, we use a superscript $\star$ to indicate sets and function spaces lying outside, rather than inside, some region. \subsubsection{Acoustic wave equation} \label{s:wave-setup} Let $\tilde{\mathbf C}$ be the space of Cauchy data of interest: \begin{align} \tilde{\mathbf C} = H_0^1(\Upsilon) \oplus L^2(\Upsilon), \end{align} considered as a Hilbert space with the \emph{energy inner product} \begin{equation} \big\langle{(f_0,f_1),\,(g_0,g_1)\big\rangle} = \int_{\Upsilon} \left(\gradient f_0(x)\cdot\gradient \conj g_0(x) + c^{-2}f_1(x)\conj g_1(x)\right)\,dx. \end{equation} Within $\tilde{\mathbf C}$ define the subspaces of Cauchy data supported inside and outside $\Theta_{t}$: \begin{nalign} \mathbf H_t &= H_0^1(\Theta_{t})\oplus L^2(\Theta_{t}), & \hspace{1in} \mathbf H &= \mathbf H_0,\\ \tilde{\mathbf H}_t^{\mathrlap\star} &= H_0^1(\Theta_{t}^\star) \oplus L^2(\Theta_{t}^\star), & \tilde{\mathbf H}^{\star}\! &= \tilde{\mathbf H}_0^\star. \end{nalign} Define the energy and kinetic energy of Cauchy data $h=(h_0,h_1)\in\tilde{\mathbf C}$ in a subset $W\subseteq\RR^n$: \begin{align} \En_W(h) &= \int_W \left(\dabs{\gradient h_0}^2 + c^{-2} \dabs{h_1}^2\right)\,dx, & \KE_W(h) &= \int_W c^{-2}\dabs{h_1}^2\,dx. \end{align} Next, define $F$ to be the solution operator~\cite{LionsMagenes1} for the acoustic wave initial value problem: \begin{align} &F\colon H^1(\RR^n)\oplus L^2(\RR^n)\to C(\RR,H^1(\RR^n)), & & F(h_0,h_1) = u \text{\; s.t. } \begin{pdebracketed} (\partial_t^2-c^2\Delta)u &= 0,\\ \drestr{u}_{t=0} &= h_0,\\ \drestr{\d_t u}_{t=0} &= h_1. \end{pdebracketed} \label{e:wave-ivp} \end{align} Let $R_s$ propagate Cauchy data at time $t=0$ to Cauchy data at $t=s$: \begin{align} R_s = \left(F,\d_t F\right)\!\Big|_{t=s} \mspace{-8mu} \colon H^1(\RR^n)\oplus L^2(\RR^n)\to H^1(\RR^n)\oplus L^2(\RR^n). \end{align} Now combine $R_s$ with a time-reversal operator $\nu\colon \tilde{\mathbf C}\to\tilde{\mathbf C}$, defining for a given $T$ \begin{align} R &= \nu\circ R_{2T}, & \nu&\colon (f_0,f_1)\mapsto(f_0,-f_1). \end{align} In our problem, only waves interacting with $(\Omega,c)$ in time $2T$ are of interest. Consequently, let us ignore Cauchy data not interacting with $\Theta$, as follows. Let $\mathbf G=\tilde{\mathbf H}^\star\cap\big( R_{2T}(H^1_0(\RR^n\setminus\clsr\Theta)\oplus L^2(\RR^n\setminus\clsr\Theta))\big)$ be the space of Cauchy data in $\tilde{\mathbf C}$ whose wave fields vanish on $\Theta$ at $t=0$ and $t=2T$. Let $\mathbf C$ be its orthogonal complement inside $\tilde{\mathbf C}$, and ${\mathbf H}_t^\star$ its orthogonal complement inside $\tilde{\mathbf H}_t^\star$. With this definition, $R$ maps $\mathbf C$ to itself isometrically. \subsubsection{Projections inside and outside $\Theta_t$} \label{s:projections-setup} The final ingredients needed for the iterative scheme are restrictions of Cauchy data inside and outside $\Theta$. While a hard cutoff is natural, it is not a bounded operator in energy space: a jump at $\bdy\Theta$ will have infinite energy. The natural replacements are Hilbert space projections. More generally, we consider projections inside and outside $\Theta_t$. Let $\pi_t$, $\pi_t^\star$ be the orthogonal projections of $\mathbf C$ onto $\mathbf H_t$, $\mathbf H_t^\star$ respectively; let $\clsr\pi_t=1-\pi_t^\star$. As usual, write $\clsr\pi=\clsr\pi_0$, $\pi^\star=\pi_0^\star$. The complementary projection $I-\pi_t-\pi^\star_t$ is the orthogonal projection onto $\mathbf I_t$, the orthogonal complement to $\mathbf H_t \oplus\mathbf H_t^\star$ in $\mathbf C$. It may be described by the following lemma, which is in essence the Dirichlet principle. \begin{lemma} $\mathbf I_t$ consists of all functions of the form $(i_0,0)$, where $i_0\in H_0^1(\Upsilon)$ is harmonic in $\Upsilon\setminus\bdy\Theta_t$. \label{l:characterization-of-It} \end{lemma} \noindent Lemma~\ref{l:characterization-of-It} provides two useful pieces of information. First, $\mathbf I=\mathbf I_0$ is independent of $c$. Secondly, we can identify the behavior of the projections $\clsr\pi_t$, $\pi^\star_t$. Inside $\Theta_t$ the projection $\clsr\pi_t h$ equals $h$, while outside $\Theta_t$, it agrees with the $\mathbf I_t$ component of $h$, which is the harmonic extension of $h\restrictto{\bdy\Theta_t}$ to $\Upsilon$ (with zero trace on $\bdy\Upsilon$). Similarly, $\pi^\star_t h$ is zero on $\Theta_t$, and outside $\Theta_t$ equals $h$ with this harmonic extension subtracted. It will be useful to have a name for the behavior of $\clsr\pi_t h$, and so we define the notion of \emph{stationary harmonicity:} \begin{defn} Cauchy data $(h_0,h_1)$ are \emph{stationary harmonic} on $W\subseteq\RR^n$ if $h_0\restrictto W$ is harmonic and $h_1\restrictto W=0$. \end{defn} \subsection{Scattering control} \label{s:exact-maf} Suppose we have Cauchy data $h_0\in\mathbf H$. We can probe $\Omega$ with $h_0$ and observe $Rh_0$ outside $\Omega$. In particular, the reflected data $\pi^\star R$ can be measured, and from these data, we would like to procure information about $c$ inside $\Omega$. However, multiple scattering as waves travel into and out of $\Omega$ makes $\pi^\star Rh_0$ difficult to interpret. In this section, we construct a control in $\mathbf H^\star$ that eliminates multiple scattering in the wave field of $h_0$ up to a depth $T$ inside $\Theta$. More specifically, consider the \emph{almost direct transmission} of $h_0$: \begin{defn} The \emph{almost direct transmission} of $h_0\in\mathbf H$ at time $T$ is the restriction $R_Th_0\restrictto{\Theta_{T}}$. \end{defn} Ideally, we would like to recover (indirectly) this restricted wave field. If considered as Cauchy data on the ambient space $\Upsilon$, the almost direct transmission has infinite energy in general due to the sharp cutoff at the boundary of $\Theta_T$. As a workaround, consider the almost direct transmission's minimal-energy extension to $\Upsilon$. This involves a harmonic extension of the first component of Cauchy data: \begin{defn} The \emph{harmonic almost direct transmission} of $h_0$ at time $T$ is \begin{equation} h\DT = h\DT(h_0,T)=\clsr\pi_T R_T h_0. \label{e:adt-def} \end{equation} \end{defn} By Lemma~\ref{l:characterization-of-It}, $h\DT$ is equal to $R_Th_0$ inside $\Theta_T$; outside $\Theta_T$, its first component is extended harmonically from $\bdy\Theta_T$, while the second component is extended by zero. \subsubsection{Scattering control series} Our major tool is a Neumann series, the \emph{scattering control series} \begin{equation} h_\infty = \sum_{i=0}^\infty (\pi^\star R\pi^\star R)^i h_0, \label{e:neumann} \end{equation} formally solving the \emph{scattering control equation} \begin{equation} (I-\pi^\star R\pi^\star R)h_\infty = h_0. \label{e:maf} \end{equation} The series in general does not converge in $\mathbf C$; but it does converge in an appropriate weighted space, as we show in Theorem~\ref{t:limit-maf}. Applying $\clsr\pi$ to~\eqref{e:neumann}, we see that $h_\infty$ consists of $h_0$ plus a control in $\mathbf H^\star$. Our first theorem characterizes the behavior of the series. \begin{theorem} Let $h_0\in\mathbf H$ and $T\in(0,\frac12\diam\Theta)$. Then isolating the deepest part of the wave field of $h_0$ is equivalent to summing the scattering control series: \begin{nalign} (I-\pi^\star R\pi^\star R)h_\infty = h_0 &\iff R_{-T} \clsr\pi R_{2T}h_\infty = h\DT \text{ and } h_\infty \in h_0+\mathbf H^\star. \label{e:basic-maf-behavior} \end{nalign} Above, $R_{-T} \clsr\pi R_{2T}h_\infty$ may also be replaced by $R_{-s} \clsr\pi_{T-s} R_{T+s}h_\infty$ for any $s\in[0,T]$. Such an $h_\infty$, if it exists, is unique in $\mathbf C$. As for the harmonic extension in $h\DT$, it is equal to $\clsr\pi R_{2T}h_\infty$ outside $\Theta$: \begin{align} h\DT\big|_{\Theta^\star} &= j_0\big|_{\Theta^\star}, & \text{where\quad}\clsr\pi R_{2T}h_\infty = (j_0,j_1), \label{e:harmonic-ext-identity} \end{align} and is bounded: \begin{equation} \En_{\Theta_T^\star}(h\DT) \leq C\norm{h_0} \label{e:harmonic-ext-bound} \end{equation} for some $C=C(c,T)$ independent of $h_0$. \label{t:basic-maf} \end{theorem} Equation~\eqref{e:basic-maf-behavior} tells us that the wave field created by $h_\infty$ inside $\Theta$ at $t=2T$ is entirely due to the harmonic almost direct transmission at $t=T$ (Figure~\ref{f:basic-maf}). More generally, the wave field of $h_\infty$ agrees with that of $h\DT$ on its domain of influence. This is not true of $h_0$'s wave field, where other waves, including multiple reflections, will pollute the wave field at time $2T$. It follows that the tail $h_\infty-h_0$ enters $\Omega$ and carries all of the scattered energy of $h_0$ out with it. We will see this from an energy standpoint in Section~\ref{s:energy} and from a microlocal (geometric optics) standpoint in Section~\ref{s:microlocal}. \begin{figure} \caption{Illustration of the wave field generated by scattering control, as given by Theorem~\ref{t:basic-maf} \label{f:basic-maf} \end{figure} The question now is to study whether the Neumann series~\eqref{e:neumann} converges at all. Since $R$ is an isometry and $\pi^\star$ a projection, we have $\norm{\pi^\star R\pi^\star R}\leq 1$. From our later spectral characterization, we know that $\norm{\pi^\star R h}<\norm h$, strictly, for all $h\in\mathbf H^\star$. This is also true for a completely trivial reason: we eliminated $\mathbf G$ when constructing $\mathbf C$. What hinders convergence is that $\norm{h}-\norm{\pi^\star Rh}$ might be arbitrarily small; in other words, almost all the energy could be reflected off $\Theta$. Note that if the series fails to converge, no other finite energy control in $\mathbf H^\star$ can isolate the harmonic almost direct transmission of $h_0$; see Proposition~\ref{p:only-neumann}. In the next theorem, we investigate convergence via the spectral theorem. It turns out that the only problem is outside $\Theta$; inside $\Theta$ the partial sums' wave fields at $t=2T$ do converge, and their energies are in fact monotonically decreasing. We will also demonstrate that the Neumann series converges in $\mathbf H$ for a dense set of $h_0$, and identify a larger space in which the Neumann series converges for any $h_0$. For the statement of the theorem, define $\mathbf J$ to be the following space of Cauchy data, which, roughly speaking, remains completely inside or completely outside $\Theta$ in time $2T$: \begin{equation} \mathbf J=\big(\mathbf H\cap R(\mathbf H)\big) \oplus \big(\mathbf H^\star \cap R(\mathbf H^\star)\big). \end{equation} Let $\chi\colon\mathbf C\to\mathbf J$ be the orthogonal projection onto $\mathbf J$. \begin{theorem} With $h_0, T$ as in Theorem~\ref{t:basic-maf}, define the partial sums \begin{equation} h_k = \sum_{i=0}^k (\pi^\star R\pi^\star R)^i h_0. \label{e:partial-sums} \end{equation} Then the deepest part of the wave field can be (indirectly) recovered from $\{h_k\}$ regardless of convergence of the scattering control series: \begin{align} \lim_{k\to\infty} R_{-T}\clsr\pi R_{2T}h_k &= R_T\chi h_0 = h\DT, & \norm{\clsr\pi Rh_k}&\searrow\norm{h\DT}\!. \label{e:maf-interior-limit} \end{align} The set of $h_0$ for which the scattering control series converges in $\mathbf C$, \begin{equation} \mathcal Q = \set{h_0\in\mathbf H}{(I-\pi^\star R\pi^\star R)^{-1} h_0\in\mathbf C}\!, \label{e:Q-definition} \end{equation} is dense in $\mathbf H$. For all $h_0\in\mathbf H$, the partial sum tails $h_k-h_0$ converge in a weighted space that can be formally written as \begin{align} &\frac{I}{\sqrt{I-N^2}}(1-\chi)\mathbf C, & N &= \clsr\pi R\clsr\pi + \pi^\star R\pi^\star. \label{e:space-of-maf-convergence} \end{align} \label{t:limit-maf} \end{theorem} As an immediate corollary of~\eqref{e:maf-interior-limit}, we recover in the limit the wave field generated by the harmonic almost direct transmission outside $\Theta$, using only observable data. \begin{corollary} Let $F\DT(t,x) = (Fh\DT)(t-T,x)$ be the harmonic almost direct transmission's wave field. Then \begin{align} (Fh_k)(t,x) - (F\pi^\star R_{2T}h_k)(t-2T,x)&\to F\DT(t,x) & \text{ as } k\to\infty, \end{align} the convergence being $H^1$ in space, uniformly in $t$. \label{c:dt-wave field} \end{corollary} We end this section with three small propositions. The first states that the scattering control equation has no solution if the Neumann series diverges. \begin{proposition} Let $h_0, T$ be as in Theorem~\ref{t:basic-maf}, and suppose $(I-\pi^\star R\pi^\star R)k=h_0$ for some $k\in \mathbf H^*$. Then the scattering control series~\eqref{e:neumann} converges. \label{p:only-neumann} \end{proposition} The second proposition characterizes the space $\mathbf H^\star$ containing the Cauchy data controls. Essentially, each control is supported in a $2T$-neighborhood of $\Theta$ and its wave field is contained in this neighborhood for $t\in[0,2T]$, up to harmonic functions. \begin{proposition} \nobelowdisplayskip The control space $\mathbf H^\star$ consists of Cauchy data supported outside $\Theta$ whose wave fields are stationary harmonic outside a $2T$-neighborhood of $\Theta$ at $t=0,2T$: \begin{equation} \mathbf H^\star = \set{h\in\tilde{\mathbf C}}{\pi_{-2T}^\star h = \pi_{-2T}^\star R^{}_{2T} h = \clsr\pi h = 0}\!. \label{e:H-star-characterization} \end{equation} \label{p:H-star-characterization} \end{proposition} The third proposition shows that our reflection data (the Cauchy solution operator $F$, restricted to the exterior of $\Omega$) is determined by the Dirichlet-to-Neumann map, which is the data usually assumed given in boundary control problems and the inverse problem. As a result, our method requires no additional information, from a theoretical standpoint. \begin{proposition} Let $c_1,c_2$ be $L^\infty$ wave speeds on a $C^1$ domain $\Omega\subseteq\RR^n$. Extend $c_1,c_2$ to $\Omega^\star=\RR^n\setminus\clsr\Omega$ by setting them equal to some $c_0\in C^\infty(\RR^n)$. Define solution operators $F_1,F_2$ corresponding to $c_1,c_2$ as in~\eqref{e:wave-ivp}, and \emph{Dirichlet-to-Neumann} maps \begin{align} \Lambda_i&\colon g \mapsto \drestr{\d_\nu u}_{\RR\times\bdy\Omega}, \text{ where } \begin{pdebracketed} (\d_t^2-c_i^2\Delta) u &= 0,\\ \drestr u_{\RR\times\bdy\Omega} &= g,\\ \drestr u_{t=0} = \drestr{\d_t u}_{t=0} &= 0. \end{pdebracketed} \label{e:DN-def} \end{align} If $\Lambda_1=\Lambda_2$, then $\drestr{F_1h}_{\RR\times\Omega^\star}=\drestr{F_2h}_{\RR\times\Omega^\star}$ for all $h\in H^1(\Omega^\star)\oplus L^2(\Omega^\star)$. \label{p:DN-determines-Cauchy} \end{proposition} \subsection{Recovering internal energy} \label{s:energy} As a direct application of the results in~\sref{s:exact-maf}, we show how scattering control can recover the energy of the harmonic almost direct transmission using only data outside $\Omega$, assuming $\supp h_0\subset \Theta\setminus\clsr\Omega$. If the Neumann series converges to some $h_\infty\in\mathbf C$, we can recover the energy directly from $h_\infty$, but if not, Theorem~\ref{t:limit-maf} allows us to recover the same quantities as a convergent limit involving the Neumann series' partial sums. In a forthcoming paper we demonstrate how these energies may be used in inverse boundary value problems for the wave equation that arise in imaging. \begin{proposition} Let $h_0\in\mathbf H$, $T>0$, and suppose $(I-\pi^\star R\pi^\star R)h_\infty = h_0$. Then we can recover the harmonic almost direct transmission's energy from data observable on $\Theta^\star\cup\supp h_0$: \begin{align} \En_{\RR^n}(h\DT) &= \En_{\RR^n} \big(h_\infty\big) - \En_{\RR^n} \big(\pi^\star Rh_\infty\big). \label{e:basic-E-recovery} \intertext{We can also recover the kinetic energy of the almost direct transmission (with no harmonic extension) from data observable on $\Theta^\star\cup\supp h_0$:} \KE_{\Theta_T}(R_Th_0) &= \frac12\tform{h_0,h_0-R \pi^\star Rh_\infty - R h_\infty}. \label{e:basic-KE-recovery} \end{align} \label{p:basic-energy} \end{proposition} \begin{proposition} Let $h_0\in\mathbf H$ and $T>0$, and $h_k$ as before. We can recover the energy of the harmonic almost direct transmission as a convergent limit involving data observable on $\Theta^\star\cup\supp h_0$: \begin{equation} \En_{\RR^n}(h\DT) = \lim_{k\to\infty}\left[\En_{\RR^n}(h_k) - \En_{\RR^n}(\pi^\star R h_k)\right]. \label{e:limit-E-recovery} \end{equation} Similarly, for the kinetic energy of the almost direct transmission, \begin{nalign} 4\,\KE_{\Theta_{T}}(R_Th_0) &= \lim_{k\to\infty}\Big[\En(h_k)+\En(h_0)-\En(\pi^\star R \pi^\star R h_k) \\ &\qquad\qquad + 2\tform{\pi^\star Rh_k,\, h_k-R\pi^\star Rh_k} - 2\tform{h_0,\,R \pi^\star Rh_k + R h_k}\Big]. \label{e:limit-KE-recovery} \end{nalign} \label{p:limit-energy} \end{proposition} \subsection{Proofs} \label{s:exact-proofs} \begin{proof}[Proof of Theorem~\ref{t:basic-maf}] The proof is mostly a simple application of unique continuation and finite speed of propagation. \paragraph{Equation~\eqref{e:basic-maf-behavior} ($\Rightarrow$)} Let $v(t,x)=FR_{-2T}\clsr\pi R_{2T} h_\infty$ be the solution of the wave equation with Cauchy data $\clsr\pi R_{2T}h_\infty$ at $t=2T$. We will often consider Cauchy data at a particular time, and so define $\mathbf v=(v,\d_t v)$. Applying $\bar\pi$ to the defining equation $(I-\pi^\star R\pi^\star R)h_\infty=h_0$ implies $\clsr\pi h_\infty=h_0$; also $(\pi^\star \mathbf v)(0,\cdot)=0$, since \begin{nalign} 0 = \pi^\star h_0 &= \pi^\star (I-\pi^\star R_{-2T}\pi^\star R_{2T}) h_\infty\\ &= \pi^\star R_{-2T}\clsr\pi R_{2T} h_\infty\\ &= (\pi^\star \mathbf v)(0,\cdot). \end{nalign} Outside of $\Theta$, then, $\mathbf v(0,\cdot)$ and $\mathbf v(2T,\cdot)$ are equal to their projections in $\mathbf I$, and therefore are stationary harmonic. Equivalently, $\partial_t v$ and $\partial_{tt} v$ are zero on $\Theta^\star$ for $t=0,2T$. Because $c$ is time-independent, $\partial_t v$ is also a (distributional) solution to the wave equation. If $\partial_t v\in C(\RR, H^1(\RR^n))$, then Lemma~\ref{l:uc-top-and-bottom} applied to $\partial_t v$ gives $\partial_t v(T,\cdot)=\partial_{tt} v(T,\cdot)=0$ on $\Theta_T^\star$; it follows that $\mathbf v(T,\cdot)$ is stationary harmonic on $\Theta_T^\star$. For the general case, choose a sequence of mollifiers $\rho_\eps\to\delta$ in $\mathcal E'(\RR)$ and apply Lemma~\ref{l:uc-top-and-bottom} to $\rho_\eps'(t) \ast v$ to obtain the same conclusion. \begin{figure} \caption{Finite speed of propagation applied twice to wave field $v$.} \label{f:double-fsp} \end{figure} By finite speed of propagation (FSP), $\bar\pi_{\abs s}R_s\bar\pi=\bar\pi_{\abs s}R_s$ for any $s\in\RR$. Applying this twice, we find that in $\Theta_T$ at time $T$, the solution $v$ is equal to $h_\infty$'s wave field, which in turn is equal to $h_0$'s wave field (Figure~\ref{f:double-fsp}): \begin{equation} \clsr\pi_T \mathbf v(T,\cdot) = \clsr\pi_T R_{-T} \clsr\pi R_{2T} h_\infty \eqFSP \clsr\pi_T R_{-T} R_{2T} h_\infty = \clsr\pi_T R_T h_\infty \eqFSP \clsr\pi_T R_T \bar\pi h_\infty = \clsr\pi_T R_Th_0 \eqdef h\DT. \label{e:double-FSP} \end{equation} However, since $\mathbf v(T,\cdot)$ is stationary harmonic on $\Theta_T^\star$, we can remove the projection on the left-hand side: $\clsr\pi_T R_{-T} \clsr\pi R_{2T} h_\infty=R_{-T} \clsr\pi R_{2T} h_\infty$. This proves the forward direction of~\eqref{e:basic-maf-behavior}. More generally, it follows that $\clsr\pi_{T-s} R_{T+s} h_\infty=\mathbf v(T+s,\cdot)=R_s h\DT$ for $s\in[0,T]$. Indeed, $\mathbf v(T+s,\cdot)=R_{T+s} h_\infty$ on $\Theta_{T-s}$ by finite speed of propagation, and using Lemma~\ref{l:uc-top-and-bottom} as above implies $\mathbf v(T+s,\cdot)$ is stationary harmonic on $\Theta_{T-s}^\star$ for $s\in[0,T]$. \paragraph{Equation~\eqref{e:harmonic-ext-identity}} As above, apply Lemma~\ref{l:uc-top-and-bottom} to $\d_t v$. This implies that $\d_t v\restrictto{[0,2T]\times\Theta^\star}=0$. Hence $v$ is constant in time in $\Theta^\star$. At time $T$, we have $\mathbf v(T,\cdot)=\clsr\pi_T R_Th_0$, and the pressure field $v(T,\cdot)$ is the harmonic extension of the first component of $R_Th_0\restrictto{\bdy\Theta_T}$. At time $2T$, $\mathbf v$ equals $\clsr\pi R_{2T}h_\infty$ on $\Theta^\star$ by construction, proving~\eqref{e:harmonic-ext-identity}. \paragraph{Equation~\eqref{e:basic-maf-behavior} ($\Leftarrow$)} Conversely, suppose $R_{-T}\clsr\pi R_{2T} h_\infty=h\DT$. Let $v(t,x)=(Fh\DT)(t-T,x)$ be the wave field generated by the harmonic almost direct transmission. Since $\mathbf v(T,\cdot)$ is stationary harmonic in $\Theta_T^\star$ we have $(\d_t\mathbf v)(T,\cdot)=0$ there. Applying finite speed of propagation, $(\d_t \mathbf v)(0,\cdot)=0$ on $\Theta^\star$, so $(\pi^\star \mathbf v)(0,\cdot)=0$. Because $R_{-T}\clsr\pi R_{2T} h_\infty=h\DT$, the solution $v$ is equal to $(F\clsr\pi R_{2T}h_\infty)(t-2T,x)$, the wave field generated by $\clsr\pi R_{2T}h_\infty$. Hence $\pi^\star R_{-2T}\clsr\pi R_{2T}h_\infty = 0$, and we have \begin{equation} (I - \pi^\star R \pi^\star R)h_\infty = (I - \pi^\star R (\pi^\star+\clsr\pi) R)h_\infty = (I - \pi^\star)h_\infty = \clsr\pi h_\infty. \label{e:some-maf-solution} \end{equation} Therefore $h_\infty$ is a solution of the scattering control equation for some initial pulse $\clsr\pi h_\infty$; by hypothesis, this initial pulse is $h_0$. \paragraph{Uniqueness of $h_\infty$} Since $R$ is unitary and $\pi$ is a projection, any $g\in\mathbf C$ satisfies \begin{equation} \norm{\pi^\star R\pi^\star Rg} \leq \norm{\pi^\star R g} \leq \dnorm g\!. \label{e:maf-iteration-no-energy-gain} \end{equation} Now, suppose that $(I-\pi^\star R\pi^\star R)g=0$ for some $g\in\mathbf C$. As $g=\pi^\star R\pi^\star Rg$ no energy can be lost in either application of $\pi^\star$, and both inequalities of~\eqref{e:maf-iteration-no-energy-gain} are in fact equalities. Hence $\clsr\pi g$ and $\clsr\pi R_{2T}g$ must be zero, implying $g\in \mathbf G$. But by construction $\mathbf G\cap\mathbf C=\{0\}$, establishing uniqueness. Conversely, any $g\in\mathbf G$ satisfies $g=\pi^\star R\pi^\star R g$ by finite speed of propagation, so in fact $\mathbf G = \ker(I-\pi^\star R\pi^\star R)$. \paragraph{Equation~\eqref{e:harmonic-ext-bound}} Finally, since $i=h\DT\restrictto{\Theta^\star}=\clsr\pi_T R_Th_0\restrictto{\Theta^\star}$, it follows immediately that \begin{equation} \norm i \leq\norm{\clsr\pi_T R_Th_0}\leq\norm{R_Th_0}=\norm{h_0}. \end{equation} The proof is complete. \end{proof} \noindent In the proof of Theorem~\ref{t:basic-maf}, we used the following corollary of finite speed of propagation and unique continuation: \begin{lemma} \nobelowdisplayskip Let $u\in C(\RR,H^1(\RR^n))$ be a solution of $(\partial_t^2-c^2\Delta)u=0$ such that $u(0,\cdot)=u(2T,\cdot)=\d_t u(0,\cdot)=\d_t u(2T,\cdot)=0$ on $\Theta^\star$. Then $u$ is zero on the set \[ \mathcal D = \set{(t,x)}{d^*_\Theta(x)<T-\tabs{t-T}}\!. \] \label{l:uc-top-and-bottom} \end{lemma} \begin{proof} By finite speed of propagation, $u$ is zero on a neighborhood of $[0,2T]\times \Theta_{-T-\delta}$ for all $\delta>0$, and thus by unique continuation, also zero on the union of open light diamonds centered at points in $[0,2T]\times\bdy\Theta_{-T-\delta}$. This includes $[0,2T]\times \Theta_{-T/2-\delta}$, and repeating the argument, we find that $u=0$ on all open light diamonds centered at points in $[0,2T]\times \Theta_{-T/2^n-\delta}$ for all $n\in\ZZ$ and $\delta>0$. The union of these open light diamonds is $\mathcal D$. \end{proof} \begin{proof}[Proof of Theorem~\ref{t:limit-maf}] The proof is via the spectral theorem, which will also shed further light on the behavior of the Neumann series. First, note $R=\nu\circ R_{2T}$ is self-adjoint as well as unitary, since $R^*=R_{2T}^*\circ\nu^*=R_{-2T}\circ\nu=\nu\circ R_{2T}$. Divide $R$ into two self-adjoint parts, $N$ and $Z$: \begin{align} N &= \pi^\star R\pi^\star + \clsr\pi R\clsr\pi, & Z &= \pi^\star R\clsr\pi + \clsr\pi R\pi^\star. \end{align} In other words, thinking of $\im\pi^\star=\mathbf H^\star$ and $\im\clsr\pi=\mathbf H\oplus\mathbf I$ as two halves of $\mathbf C$, the operator $N$ describes wave movement within one half, while $Z$ describes movement from one half to the other. For any $f\in\mathbf H$ the identity $f = R^2f = (N^2+Z^2)f + (NZ+ZN)f$ holds. If $f\in\mathbf H^\star$ or $f\in\mathbf H\oplus\mathbf I$, then $(NZ+ZN)f$ is in the opposite half from $f$, so $NZ+ZN=0$, and $N^2+Z^2=I$ when the domain is restricted to either half. Applying the spectral theorem to $N$, identify $\mathbf C$ with $L^2(X,\mu)$ for some set $X$ and measure $\mu$, upon which $N$ acts as a multiplication operator $n(x)$. As $Z$ and $N$ do not commute, $Z$ has no special form with respect to this spectral representation. Since $\norm N\leq \norm R=1$, we have $\abs n\leq 1$. Split $X$ into two sets \begin{nalign} X' &= n^{-1}(\{-1,1\}), \\ X'' &= n^{-1}((-1,1)) = X\setminus X'. \end{nalign} For $h\in L^2(X',\mu)$, \begin{equation} \norm{Nh}=\left(\int_X n^2\abs h^2\,d\mu \right)^{\mathrlap{1/2}} \;\; = \norm{h}=\norm{Rh}\!, \end{equation} implying $Zh=0$. Conversely, if $Zh=0$, then $\norm{Nh}=\norm h$, implying $n=\pm 1$ on $\supp h$. In consequence, $L^2(X',\mu)=\ker Z=\mathbf J$, and hence $\chi$ is multiplication by the characteristic function of $X'$. Returning to the Neumann series, since $(\pi^\star)^2 = \pi^\star$, rewrite $h_k$ as \begin{nalign} h_k - h_0 = \sum_{i=0}^{k-1} (\pi^\star R\pi^\star R\pi^\star )^i (\pi^\star R\pi^\star)(\pi^\star R\bar\pi) h_0 = \sum_{i=0}^{k-1} n^{2i+1} Z h_0 &= n\frac{1-n^{2k}}{1-n^2} Zh_0. \label{e:spectral-tail} \end{nalign} Turning to $\clsr\pi R h_k$ now, since $Zn=-nZ$ on $\im\pi^\star\owns n^iZh_0$ and $Z^2=1-n^2$, \begin{nalign} \clsr\pi R h_k = Z(h_k-h_0)+nh_0 &= Zn\frac{1-n^{2k}}{1-n^2}Zh_0 + nh_0\\ &= -n\frac{1-n^{2k}}{1-n^2}Z^2h_0 + nh_0\\ &= n^{2k+1}h_0. \end{nalign} $n^{2k+1}h_0$ converges pointwise, monotonically, as a function in $L^2(X,\mu)$: \begin{equation} (\clsr\pi Rh_k)(x) = n^{2k+1}h_0(x) \to \when{nh_0(x), & \abs{n(x)}=1;\\ 0, & \abs{n(x)}<1.} \qquad \forall x\in X. \label{e:lim-pibar-Rhk} \end{equation} The convergence holds not only pointwise but also in $L^2(X,\mu)$ by dominated convergence. Its limit function is exactly $n\chi h_0=R\chi h_0$, the projection of $Rh_0$ onto $\mathbf J$, proving the first limit in~\eqref{e:maf-interior-limit}. Also, as a consequence of the monotonicity, $\norm{\clsr\pi Rh_k}\searrow \norm{R\chi h_0}=\norm{\chi h_0}$. Hence, while the Neumann series $\{h_k\}$ may diverge, the component of $Rh_k$ in $\mathbf H\oplus\mathbf I$ (and therefore inside $\Theta$) converges and is actually decreasing in energy. \paragraph{Proof of~\eqref{e:space-of-maf-convergence}} Starting from~\eqref{e:spectral-tail}, we wish to commute $Z$ and the powers of $n$. In the weighted space $L^2(X'',(1-n^2)^2\mu)$, \begin{equation} h_k-h_0\to \frac{n}{1-n^2}Zh_0 = \frac{n}{1-n^2}Z(1-\chi) h_0 = -Z\frac{n}{1-n^2}(1-\chi) h_0. \end{equation} The factor $(1-\chi)$ is a projection away from the kernel of $Z$, where $(1-n^2)^{-1}$ blows up. We may insert it because $\mathbf J=\ker Z$, and therefore $Z\chi=0$. After doing so, the second equality holds because $(1-\chi)h_0$ lies in the inside half $\mathbf H\oplus\mathbf I$. Any $j\in\mathbf H$ (or $\mathbf H^\star$) satisfies $\norm j^2 = \norm{Rj}^2=\norm{Zj}^2+\norm{Nj}^2$, so \begin{equation} \dnorm{Zj}^2 = \int_X (1-n^2)\abs j^2\,d\mu = \dnorm{\sqrt{1-n^2}\,j}^2\!. \end{equation} Applying this relation to $h_k-h_0$, \begin{equation} \norm{h_k-h_0} = \norm{n\frac{1-n^{2k}}{\sqrt{1-n^2}}(1-\chi) h_0}. \end{equation} Therefore, $h_k-h_0$ lies in the weighted space $L^2(X'',(1-n^2)\mu)$, and, by dominated convergence, converges to a function $h_\infty-h_0\in L^2(X'',(1-n^2)\mu)$. Formally, this latter space can be written $(I-N^2)^{-1/2} (1-\chi) \mathbf C$, establishing~\eqref{e:space-of-maf-convergence}. \paragraph{Density of $\mathcal Q$} Decompose $X$ as the disjoint union of the family of sets \begin{nalign} X_{-1} &= n^{-1}(\{-1,0,1\});\\ X_i &= n^{-1}((-1+2^{-i-1},-1+2^{-i})\cup(1-2^{-i},1-2^{-i-1})) \qquad i=0,1,\dotsb. \end{nalign} Let $h_0^{\smash{(i)}}=h\cdot\mathbf 1_{X_{-1}\sqcup\dotsb\sqcup X_i}$, where $\mathbf 1_A$ denotes the indicator function of $A\subseteq X$. Then $h_0^{\smash{(i)}}\to h_0$ in $L^2(X,\mu)$. Using the fact that $Zn=-nZ$ on $\mathbf H^\star$, as before the $k\mith$ partial sum of the Neumann series for $h_0^{\smash{(i)}}$ is \begin{equation} h^{(i)}_k = h^{(i)}_0 + n\frac{1-n^{2k}}{1-n^2} Zh^{(i)}_0 = h^{(i)}_0 - Zn\frac{1-n^{2k}}{1-n^2} (1-\chi) h^{(i)}_0. \end{equation} Since either $n=\pm1$ (so that $1-\chi=0$) or $\abs n<1-2^{-i-1}$, the multiplier $n\frac{1-n^{2k}}{1-n^2} (1-\chi)$ is bounded in $k$ and the Neumann series converges in $\mathbf C$. Hence $h^{\smash{(i)}}_0\in\mathcal Q$ for all $i$, proving $\mathcal Q$ is dense. \paragraph{Proof of $R \chi h_0=h\DT$} When $h_k$ converges in $\mathbf C$, by Theorem~\ref{t:basic-maf} we have \begin{equation} \lim_{k\to\infty} R_{-T}\clsr\pi R_{2T}h_k=h\DT. \end{equation} The left hand side is equal to $R\chi h_0$; hence for $h_0\in\mathcal Q$, \begin{equation} R\chi h_0=h\DT. \label{e:restricted-chi-dt-link} \end{equation} By the unitarity of $R$ and~\eqref{e:harmonic-ext-bound}, $h_0\mapsto h\DT$ is a continuous map from $\mathbf H$ to $\mathbf C$. The left-hand side is likewise continuous in $h_0$. So, since $\mathcal Q$ is dense in $\mathbf H$,~\eqref{e:restricted-chi-dt-link} holds for all $h_0\in\mathbf H$. This together with our earlier work establishes~\eqref{e:maf-interior-limit}. By the same argument, $h\DT=\lim_{k\to\infty} R_{-s}\clsr\pi_{T-s} R_{T+s}h_k$ for any $s\in[0,T]$. \end{proof} \begin{proof}[Proof of Proposition~\ref{p:basic-energy}] Equation~\eqref{e:basic-E-recovery} follows directly from~\eqref{e:basic-maf-behavior}: \begin{nalign} \En(h\DT) = \En(R_{-T} \clsr\pi R_{2T} h_\infty) = \En(\clsr\pi R h_\infty) = \En(R h_\infty) - \En(\pi^\star R h_\infty) = \En(h_\infty) - \En(\pi^\star R h_\infty). \end{nalign} For \eqref{e:basic-KE-recovery}, let $v(t,x)=(F\clsr\pi R_{2T}h_\infty)(t-2T,x)$, as in the proof of Theorem~\ref{t:basic-maf}. Subtract its time-reversal to get the solution $w(t,x)=v(t,x)-v(2T-t,x)$, and as before write $\mathbf v=(v,\d_t v)$, $\mathbf w=(w,\d_t w)$. Consider the energy of $\mathbf w$ at $t=T$. Now $w(T,\cdot)=0$ everywhere and $\d_tw = 2\d_t v=0$ on $\Theta_T^\star$ (as shown by the proof of Theorem~\ref{t:basic-maf}), so the only energy of $\mathbf w$ at time $T$ is inside $\Theta_{T}$: \begin{nalign} \En(\mathbf w(T,\cdot)) &= \int_{\RR^n} c^{-2} \dabs{\d_t w(T,\cdot)}^2\,dx = \int_{\RR^n} c^{-2} \dabs{2\d_t v(T,\cdot)}^2\,dx\\ &\qquad\qquad\quad = 4\,\KE_{\Theta_T}(\mathbf v(T,\cdot)) \eqFSP 4\,\KE_{\Theta_T}(R_Th_\infty) \eqFSP 4\,\KE_{\Theta_T}(R_Th_0). \label{e:KE-w-link} \end{nalign} The last two equalities are by finite speed of propagation, as in~\eqref{e:double-FSP}. By conservation of energy, \begin{align} \En(\mathbf w(T,\cdot)) = \En(\mathbf w(2T,\cdot)) &= \En(\clsr\pi R h_\infty - \clsr\pi R\clsr\pi R h_\infty). \label{e:w-energy} \end{align} Expanding out the energy norm on the right hand side, \begin{equation} 4\,\KE_{\Theta_T}(R_Th_0) = \dnorm{\clsr\pi R h_\infty}^2 + \dnorm{\clsr\pi R \clsr\pi Rh_\infty}^2 - 2\tform{\clsr\pi R h_\infty,\clsr\pi R \clsr\pi Rh_\infty}. \end{equation} Using $\clsr\pi R\clsr\pi Rh_\infty + \clsr\pi R\pi^\star Rh_\infty=\clsr\pi h_\infty=h_0$, and $\pi^\star R\clsr\pi Rh_\infty=0$, \begin{nalign} \dnorm{\clsr\pi R h_\infty}^2 &= \dnorm{Rh_\infty}^2 - \dnorm{\pi^\star Rh_\infty}^2 \\ &= \dnorm{h_\infty}^2 - \dnorm{\pi^\star Rh_\infty}^2;\\ \dnorm{\clsr\pi R \clsr\pi Rh_\infty}^2 &= \dnorm{h_0 - \clsr\pi R \pi^\star Rh_\infty}^2\\ &= \dnorm{h_0}^2 + \dnorm{\clsr\pi R\pi^\star Rh_\infty}^2 - 2\form{h_0, \clsr\pi R\pi^\star Rh_\infty}\\ &= \dnorm{h_0}^2 + \dnorm{\pi^\star Rh_\infty}^2 - \dnorm{\pi^\star R\pi^\star Rh_\infty}^2 - 2\tform{h_0, R\pi^\star Rh_\infty};\\ \form{\clsr\pi R h_\infty,\clsr\pi R \clsr\pi Rh_\infty} &= \form{R h_\infty, R \clsr\pi Rh_\infty} - \form{\pi^\star R h_\infty,\pi^\star R \clsr\pi Rh_\infty} \\ &= \form{h_\infty, \clsr\pi Rh_\infty}\\ &= \tform{h_0, Rh_\infty}. \label{e:expand-ke-energy-diff} \end{nalign} Recalling $\pi^\star R\pi^\star R h_\infty = h_0-h_\infty$ and simplifying yields~\eqref{e:basic-KE-recovery}. \end{proof} \begin{proof}[Proof of Proposition~\ref{p:limit-energy}] \paragraph{Proof of~\eqref{e:limit-E-recovery}} The energy recovery formula follows directly from Theorem~\ref{t:limit-maf}: \begin{nalign} \lim_{k\to\infty}\left[\En(h_k)-\En(\pi^\star Rh_k)\right] &= \lim_{k\to\infty} \dnorm{Rh_k}^2-\dnorm{\pi^\star Rh_k}^2\\ &= \lim_{k\to\infty} \dnorm{\clsr\pi Rh_k}^2\\ &= \dnorm{h\DT}^2. \end{nalign} \paragraph{Proof of~\eqref{e:limit-KE-recovery}} The proof is similar to~\eqref{e:basic-KE-recovery}, but with extra terms. By~\eqref{e:KE-w-link}--\eqref{e:expand-ke-energy-diff}, $h_\infty$ satisfies \begin{align} 4\,\KE_{\Theta_T}(R_Th_0) &= \En(\clsr\pi R h_\infty - \clsr\pi R\clsr\pi R h_\infty) \label{e:ke-expansion-h-infinity-1} \\ &= \En(h_\infty)+\En(h_0)-\En(\pi^\star R \pi^\star R h_\infty) - 2\tform{h_0,R \pi^\star Rh_\infty + R h_\infty}. \label{e:ke-expansion-h-infinity-2} \end{align} For $h_k$, we must modify the second equality as $\pi^\star R\clsr\pi R h_k$ is no longer zero. Instead, write $\pi^\star R\clsr\pi Rh_k$ as $\pi^\star h_k-\pi^\star R\pi^\star Rh_k$ to obtain \begin{nalign} \En(\clsr\pi R h_k - \clsr\pi R\clsr\pi R h_k) &= \En(h_k)+\En(h_0)-\En(\pi^\star R \pi^\star R h_k) \\ &\qquad\qquad + 2\tform{\pi^\star Rh_k,\pi^\star h_k- \pi^\star R\pi^\star Rh_k}- 2\tform{h_0,R \pi^\star Rh_k + R h_k}. \label{e:ke-expansion-h-k} \end{nalign} The right-hand side is the quantity in the limit in~\eqref{e:limit-KE-recovery}. As $k\to\infty$, it converges to~\eqref{e:ke-expansion-h-infinity-2} by continuity as long as $h_0\in\mathcal Q$; hence its limit is $4\,\KE_{\Theta_T}(R_Th_0)$. This proves~\eqref{e:limit-KE-recovery} when $h_0\in\mathcal Q$. Then, by continuity and the density of $\mathcal Q$, \eqref{e:limit-KE-recovery} must hold for all $h_0\in\mathbf H$. Interestingly, to obtain kinetic energy we used initial data \begin{nalign} \lim_{k\to\infty} \left[\clsr\pi R h_k - \clsr\pi R\clsr\pi R h_k\right]= R\chi h_0-\clsr\pi\chi h_0 = (n-1)\chi h_0, \end{nalign} equal to $-2$ times the projection of $h_0$ onto $L^2(n^{-1}(\{-1\}),\mu)$. \end{proof} \begin{proof}[Proof of Lemma~\ref{l:characterization-of-It}] The proof is essentially that of the Dirichlet principle. First, while ${\mathbf H} = {\mathbf H}_t^\star \oplus \mathbf I_t\oplus {\mathbf H}_t$, we note that also (with tildes) \begin{equation} \tilde{\mathbf H} = \tilde{\mathbf H}_t^\star \oplus \mathbf I_t\oplus {\mathbf H}_t. \end{equation} This is true simply because $\mathbf I_t$ is orthogonal to $\mathbf G$ and hence to $\tilde{\mathbf H}_t^\star={\mathbf H}_t^\star\oplus \mathbf G$. Now, for one direction of the proof, consider an arbitrary $i=(i_0,i_1)\in\mathbf I_t$. Since $\Theta_t$ is Lipschitz, its boundary has measure zero, so $L^2(\Upsilon)=L^2(\Theta_{t}^\star)\oplus L^2(\Theta_{t})$. Hence $i_1$ must be zero. Let $\phi\in \mathbf H_t$ be nonzero and $a>0$. Then $\norm{i+a\phi}^2=\norm i^2+a^2\norm\phi^2>\norm i^2$ by orthogonality. Hence $a=0$ is a local minimum of $\norm{i+a\phi}^2$, and the derivative of this quantity with respect to $a$ is zero at $a=0$: \begin{nalign} 0 = \left.\smd a \dnorm{i+a\phi}^2\right|_{a=0} = 2\dform{i,\phi} = 2\int_{\mathrlap{\Upsilon}} \quad \gradient i_0\cdot\gradient\phi_0. \label{e:i0-weakly-harmonic} \end{nalign} Since $i_0$ is weakly harmonic on $\Theta_{t}$, it is strongly harmonic; in the same way it is harmonic on $\Theta_{t}^\star$. Conversely, if $i_0\in H^1_0(\Upsilon)$ is harmonic on $\Upsilon\setminus\bdy\Theta_t$, it is weakly harmonic, immediately implying $(i_0,0)$ is orthogonal to $\mathbf H_t$ and $\mathbf H_t^\star$. \end{proof} \begin{proof}[Proof of Proposition~\ref{p:only-neumann}] First, we have the equivalence \begin{equation} (I-\pi^\star R\pi^\star R)h_\infty=h_0 \iff (I-\pi^\star R\pi^\star R)(h_\infty-h_0)=\pi^\star R\pi^\star Rh_0. \end{equation} Since $\pi^\star R\pi^\star$ is self-adjoint and $\norm{\pi^\star R\pi^\star}\leq 1$ (cf.\ the proof of Theorem~\ref{t:limit-maf}), it suffices to apply the following lemma. \end{proof} \begin{lemma} Let $A$ be a self-adjoint linear operator on a Hilbert space $X$ with $\norm A\leq 1$. If $x,y\in X$ satisfy $(I-A^2)y=x$, then the Neumann series $\sum_{k=0}^\infty A^{2k}x$ converges to the minimal-norm solution $y=y^*$ to $(I-A^2)y=x$. \label{l:only-neumann} \end{lemma} \begin{proof} By the spectral theorem, $X$ can be identified with $L^2(W,\mu)$ for some set $W$ and measure $\mu$, upon which $A$ acts as a (real-valued) multiplication operator $a(w)$; also $\norm A\leq 1$ implies $\abs a\leq1$ for all $w\in W$. If $i(w)$ denotes the indicator function of $a^{-1}(\pm1)$, then $y=y^*=iy$ is the minimal-norm solution of $(I-A^2)y=x$. Let $y_n=y_n(w)=\sum_{k=0}^n a^{2k} x$ be the $n\mith$ partial sum of the Neumann series; then $y_n(w)$ converges monotonically away from zero to $yi$ for each $w$. Hence $y_n\to y^*$ in $L^2(W,\mu)$. \end{proof} \begin{proof}[Proof of Proposition~\ref{p:H-star-characterization}] Our first task is to characterize $\mathbf G$, the space of functions staying outside $\Theta$ in time $2T$. We make a guess $\mathbf G_1$ for $\mathbf G$ and show that the two are equal by unique continuation, using Lemma~\ref{l:uc-top-and-bottom}. After identifying $\mathbf G$, it will be easy to identify $\mathbf H^\star$, its complement in $\tilde{\mathbf H}^\star$. First, define \begin{nalign} \mathbf G_0 &= H_0^1(\Theta^\star_{-2T})\oplus L^2(\Theta^\star_{-2T}),\\ \mathbf G_1 &= G_0 + R_{2T}G_0. \end{nalign} By finite speed of propagation, $\mathbf G_0,\, R_{2T}\mathbf G_0\subseteq\mathbf G$, so $\mathbf G_1\subseteq \mathbf G$. We want to show that in fact $\mathbf G=\mathbf G_1$. Accordingly, suppose $g\in\mathbf G$ and $g\perp\mathbf G_1$. Having $g\perp\mathbf G_0$ implies $\pi^\star_{-2T}g = 0$; similarly $g\perp R_{2T}\mathbf G_0$ implies $\pi^\star_{-2T}Rg=0$. That is, the wave field of $g$ is stationary harmonic outside a $2T$-neighborhood of $\Theta$ at $t=0,2T$. As in the proof of Theorem~\ref{t:basic-maf}, we can apply Lemma~\ref{l:uc-top-and-bottom} to (a smoothed version of) $\d_t Fg$ to conclude that $R_Tg$ is stationary harmonic outside a $T$-neighborhood of $\Theta$ at time $T$; i.e., \begin{equation} \pi^\star_{-T}R_T g=0. \label{e:perp-to-G0-implies} \end{equation} On the other hand, $g\in \mathbf G$ implies that $\clsr\pi g = \clsr\pi Rg=0$; the wave field of $g$ is zero on $\Theta$ at $t=0,2T$. Applying Lemma~\ref{l:uc-top-and-bottom}, we can conclude that the wave field of $g$ is zero on a $T$-neighborhood of $\Theta$ at time $T$; i.e. \begin{equation} \clsr\pi_{-T} R_T g = 0. \label{e:perp-to-RG0-implies} \end{equation} Hence $R_T g = \pi^\star_{-T}R_Tg + \clsr\pi_{-T}R_Tg = 0$; we conclude that $g=0$, and therefore $\mathbf G = \mathbf G_1$. Now, we can prove~\eqref{e:H-star-characterization}. $\mathbf H^\star$ is the complement of $\mathbf G$ in $\tilde{\mathbf H}^\star$. For Cauchy data $h\in \tilde{\mathbf C}$, \begin{align} h \in \tilde{\mathbf H}^\star &\iff \clsr\pi h = 0, \end{align} and since $\mathbf G = \mathbf G_1$, equations~(\ref{e:perp-to-G0-implies}--\ref{e:perp-to-RG0-implies}) imply \begin{align} h \perp \mathbf G &\iff h\perp\mathbf G_0 \text{ and } h\perp R\mathbf G_0 \iff \pi^\star_{-2T} h = 0 \text{ and } \pi^\star_{-2T}Rh = 0. \mbox\qedhere \end{align} \end{proof} \begin{proof}[Proof of Proposition~\ref{p:DN-determines-Cauchy}] Let $h\in H^1(\Omega^\star)\oplus L^2(\Omega^\star)$, and let $u_1=F_1h$ be the solution with respect to $c_1$. Define $u_2$ to be the solution of the IBVP~\eqref{e:DN-def} with boundary data $\restr{u_1}_{\RR\times\bdy\Omega}$. Since $c_1$ and $c_2$ have identical Dirichlet-to-Neumann maps, it follows that $\restr{\d_\nu u_1}_{\RR\times\bdy\Omega}=\restr{\d_\nu u_2}_{\RR\times\bdy\Omega}$. Therefore, $u_2$ may be extended to $\RR\times\RR^n$ by setting it equal to $u_1$ outside $\Omega$, and both $u_2$ and $\d_\nu u_2$ will be continuous on $\RR\times\bdy\Omega$. Hence $u_2$ satisfies the wave equation with respect to $c_2$ inside and outside $\Omega$, and satisfies the interface conditions at $\bdy\Omega$. Therefore, it is a solution of the $c_2$ wave equation on all of $\RR^n$~\cite[Theorem 2.7.3]{StolkThesis}. By uniqueness of the Cauchy problem, $u_2 = F_2h$, and by definition $u_2=u_1=F_1h$ on $\Omega^\star$. \end{proof} \section{Microlocal analysis of scattering control} \label{s:microlocal} In this section, we turn from our exact analysis of scattering control to a study of its microlocal (high-frequency limit) behavior, allowing us to study reflections and transmissions of wavefronts naturally. To accomodate the microlocal analysis, we first narrow the setup somewhat, and consider a microlocally-friendly version of the scattering control equation in~\sref{s:ml-maf}. Section~\ref{s:ml-adt} introduces a natural analogue of the almost direct transmission, based on depths of singularities (covectors), rather than points. Just as before, isolating the microlocal almost direct transmission is sufficient for solving the microlocal scattering control equation (\sref{s:ml-isolating}). If the wave speed $c$ is known, it is not hard, as~\sref{s:ml-construct} shows, to construct solutions assuming some natural geometric conditions. Our main result, Theorem~\ref{t:ml-convergence}, is that the scattering control iteration converges to a similar solution, to leading order in amplitude, under the same conditions. Finally,~\sref{s:ml-uniqueness} discusses uniqueness for the microlocal scattering control equation. Proofs of the key results follow in~\sref{s:ml-proofs}. \paragraph{Notation} Throughout, ``$\eqml$'' denotes equality modulo smooth functions or smoothing operators, and $\To^*M=T^*M\setminus 0$ ($M$ a manifold). A \emph{graph FIO} is a Fourier integral operator associated with a canonical graph. Finally, for a set of covectors $W\subseteq T^*M$, let $\mathcal D'_W$, $\mathcal E'_W$ denote the spaces of distributions with wavefront set in $W$. \subsection{Microlocal scattering control} \label{s:ml-maf} In this section, we begin by restricting $\Omega$ and $c$ suitably in order to study reflection and transmission of singularities. We also adjust the scattering control equation slightly, replacing projections with smooth cutoffs, and employing a parametrix for wave propagation. Let $\Omega\subseteq\RR^n$ be a smooth open submanifold, and $c$ a piecewise smooth \footnote{As usual, ``smooth'' means $C^\infty$ throughout.} wave speed that is singular only on a set of disjoint, closed \footnote{If $c$ is singular on some non-closed hypersurface $\Gamma_i$, we may be able to ``close up'' $\Gamma_i$ in such a way that it does not intersect the other hypersurfaces.} , connected, smooth hypersurfaces $\Gamma_i$ of $\clsr\Omega$, called \emph{interfaces}. Let $\Gamma=\bigcup\Gamma_i$; let $\{\Omega_j\}$ be the connected components of $\RR^n\setminus\Gamma$. Also assume each smooth piece of $c$ extends smoothly to $\RR^n$. The projections $\clsr\pi$, $\pi^\star$ arose quite naturally in the exact setting, taking the roles of cutoffs inside and outside $\Theta$. Because they introduce singularities along $\bdy\Theta$, it is natural to replace them by smooth cutoffs for a microlocal study. We will also separate the initial data $h_0$ from the cutoff region. To accommodate both aims, choose nested open sets $\Theta',\,\Theta''$ between $\Omega$ and $\Theta$: \begin{equation} \Omega\subseteq\Theta' \subseteq\clsr{\Theta'}\subseteq\Theta''\subseteq\clsr{\Theta''}\subseteq\Theta, \end{equation} and smooth cutoffs $\sigma,\sigma^\star\colon\RR^n\to[0,1]$ such that \begin{align} \sigma(x) &= \when{1, & x\in\Theta''\!,\\ 0, & x\notin\Theta,} & &\supp\sigma = \Theta, \\ \sigma^\star &= 1 - \sigma, & &\supp\sigma^\star = \RR^n\setminus\Theta''. \end{align} The sets $\Theta',\,\Theta''$ should be thought of as arbitrarily close to $\Theta$; we will write $\Theta'^\star=\RR^n\setminus\clsr{\Theta'}$. Finally, a standard parametrix $\tilde R$ accounting for reflections and refractions will frequently replace the exact propagator $R$, discussed at greater length in Appendix~\ref{s:parametrix-construction}. Most importantly, $\tilde R$ includes microlocal cutoffs along glancing rays, so that $Rh_0\eqml\tilde Rh_0$ as long as $\WF(h_0)$ is disjoint from a set of covectors $\mathcal W\subset T^*(\RR^n\setminus\Gamma)$ producing near-glancing broken bicharacteristics. The object of study is now the \emph{microlocal scattering control equation} \begin{equation} (I - \sigma^\star R\sigma^\star R) h_\infty \eqml h_0, \label{e:ml-maf} \end{equation} and accompanying formal Neumann series \begin{equation} h_\infty \eqml \sum_{i=0}^\infty (\sigma^\star R)^{2i} h_0. \label{e:ml-maf-series} \end{equation} In general, the operator $(\sigma^\star R)^2$ preserves but does not improve Sobolev regularity, preventing us from assigning any meaning to this infinite sum \emph{a priori}.\footnote{Were $(\sigma^\star R)^2$ to have negative Sobolev order,~\eqref{e:ml-maf-series} may be interpreted as an asymptotic series. This situation occurs, for example, for $c$ with $C^{1,\alpha}$ or weaker singularities~\cite{dHUV}, in the absence of diving rays.} Instead, we will consider the limiting behavior of its partial sums. \subsection{Microlocal almost direct transmission} \label{s:ml-adt} \begin{figure} \caption{Microlocal almost direct transmission. (a) The wavefront set of the solution with point source $h_0$ includes reflected and refracted singularities due to an interface $\Gamma$. (b) The microlocal almost direct transmission does not include the reflected singularities; their depth is less than $T$. (c) Wavefront set of the (non-microlocal) almost direct transmission, for comparison.} \label{f:ml-adt} \end{figure} The almost direct transmission played a central role in the exact analysis of scattering control. We begin by studying its natural microlocal analogue. Intuitively, the \emph{microlocal almost direct transmission} $h\MDT$ is the microlocal restriction of the solution at time $T$ to singularities in $\To^*\Theta$ whose distance from the surface $\bdy T^*\Theta$ is at least $T$ (Figure~\ref{f:ml-adt}). The distance here should be defined as the length of the shortest broken bicharacteristic segment connecting a covector to the boundary (Figure~\ref{f:cotangent-depth}). In general, our $h\MDT$ is not equivalent to the ideal direct transmission, which would contains only transmitted waves, but it may still serve as a useful proxy. In the remainder of the section, we briefly define distance in the cotangent bundle, then use it to define the microlocal almost direct transmission $h\MDT$. \paragraph{Distance in the Cotangent Bundle} Let $V = \RR\times(\RR^n\setminus\Gamma)$. For brevity, we shall simply say $\gamma\colon(s_-,s_+)\to \To^*V_\pm$ is a \emph{bicharacteristic} if it is a bicharacteristic for $\d_t^2-c^2\Delta$; is \emph{unit speed}, i.e., $dt/ds=1$ on $\gamma$; and is \emph{maximal}, i.e., cannot be extended. Here $s_\pm$ may be infinite. \begin{figure} \caption{Depth of a singularity. The broken bicharacteristic segments joining covector $\xi$ to the boundary are shown, projected to $\RR^n$ (solid); they reflect and refract at interfaces (dotted lines). The depth of $\xi$ in $T^*\Theta$ is defined as the length of the shortest of these paths to the boundary (bold).} \label{f:cotangent-depth} \end{figure} A \emph{broken bicharacteristic} $\gamma\colon(s_0,s_1)\cup(s_1,s_2)\cup\dotsb\cup(s_{k-1},s_k)\to \To^*V$ is a sequence of bicharacteristics connected by reflections and refractions obeying Snell's law: for $i=1,\dotsc,k-1$, \begin{align} \gamma(s_i^-), \; \gamma(s_i^+) &\in \To^*([0,2T]\times\Gamma), & (di_\Gamma)^*\gamma(s_i^-)&=(di_\Gamma)^*\gamma(s_i^+), \label{e:broken-bichar-interfaces} \end{align} where $i_\Gamma\colon\Gamma\hookrightarrow\Omega$ is inclusion. Since any broken bicharacteristic may be parameterized by time, we will often abuse notation and consider $\gamma$ as a map from $t\in\RR$ into $\To^*\mathbf(\RR^n\setminus\Gamma)$. The \emph{distance} of a covector $\xi\in\To^*(\RR^n\setminus\Gamma)$ from the boundary of $M\subseteq\RR^n$ is \begin{equation} d(\xi,\,\bdy T^*M) = \min \cset{\abs{a-b}}{\gamma(a) = \xi,\ \gamma(b)\in\bdy T^*\mathbf M}, \end{equation} the minimum taken over broken bicharacteristics $\gamma$. Extend $d(\cdot,\bdy T^*\mathbf M)$ to all $\xi\in\To^*\RR^n$ by lower semicontinuity. In general, $d$ will not be continuous at $\To^*(\RR\times\Gamma)$. \emph{Depth} is the same as distance, but with a sign indicating whether $\xi$ is inside or outside $M$: \begin{equation} d^*_{T^*M}(\xi) = \when{ +d(\xi,\,\bdy T^*M), & \xi\in T^*\mathbf M,\\ -d(\xi,\,\bdy T^*M), & \text{otherwise}. } \end{equation} \begin{figure} \caption{Example of a depth sublevel set $(T^*\Theta)_T$, with wave speed $c=1$. Each marked circle describes the unit covectors based at its center point: those inside $(T^*\Theta)_T$ are marked in black, those outside in white. Near the boundary, $(T^*\Theta)_T$ contains only nearly horizontal covectors, while below $\Theta_T$ it contains covectors in all directions, as the distance to the surface in any direction is greater than $T$.} \label{f:ml-depth-set} \end{figure} \paragraph{Microlocal Almost Direct Transmission} Let $(T^*M)_t$ be the set of covectors of depth greater than $t$ in a manifold $M$: \begin{equation} (T^*M)_t = \set{\xi\in T^*M}{d^*_{T^*M}(\xi)>t}. \end{equation} Figure~\ref{f:ml-depth-set} illustrates $(T^*M)_t$ in a simple case. Note $(T^*M)_t\supsetneq T^*(M_t)$ in general, where $M_t$ is defined as in~\eqref{e:def-Theta-t-star}. A \emph{microlocal almost direct transmission} of $h_0$ at time $T$ is a distribution $h\MDT$ satisfying \begin{align} h\MDT &\eqml R_Th_0 \quad\text{on $(T^*\Theta')_T$} & \WF(h\MDT) &\subseteq \clsr{(T^*\Theta'')_T}. \label{e:ml-adt} \end{align} Essentially, $h\MDT$ is any sufficiently sharp microlocal cutoff of $R_Th_0$ outside $(T^*\Theta')_T$. Note that there is a gap $G=\clsr{(T^*\Theta'')_T}\setminus (T^*\Theta')_T$ in which we do not characterize $h\MDT$; the gap is needed in case $\WF(R_Th_0)$ intersects $\bdy(T^*\Theta')_T$, since then the cutoff may not be infinitely sharp. The solutions of~\eqref{e:ml-adt} form an equivalence class modulo $\mathcal D'_G+C^\infty(\RR^n)$, since any two choices of $h\MDT$ differ exactly by a distribution with wavefront set in $G$. With this equivalence class in mind, we denote by $h\MDT$ any solution of~\eqref{e:ml-adt} and refer to it simply as \emph{the} microlocal almost direct transmission. Note that \begin{equation} \begin{array}{r@{\;}c@{\;}l} \WF(h\MDT) & \subset & (T^*\Theta)_T\\ & & \quad\rotatebox{90}{$\subset$}\\ \WF(h\DT) & \subset & T^*(\Theta_T).\\ \end{array} \end{equation} \begin{figure} \caption{Microlocal almost direct transmission: $h\MDT$ contains the singularities in $R_Th_0$ of depth at least $T$ in $T^*\Theta'$. (a) Depth diagram; interfaces marked with small circles. (b) Projection onto $\RR^n$; interfaces dotted.} \label{f:ml-mdt-depth} \end{figure} It is natural to visualize $h\MDT$ with a \emph{depth diagram} plotting the depths of the wave field's singularities over time (Figure~\ref{f:ml-mdt-depth}). The depth of a singularity traveling along any broken bicharacteristic $\gamma$ is a piecewise linear function of time, with derivative $\pm1$ almost everywhere, so a depth diagram consists of line segments of slope $\pm1$. Note that the depth of $\gamma(t)$ is (up to sign) the shortest distance from $\gamma(t)$ to the surface along \emph{any} broken bicharacteristic, not only along $\gamma$. \begin{figure} \caption{Depth discontinuity at interfaces. (a) Covectors $\alpha_3$, $\alpha_4$ are closer to the boundary (via $\gamma_1$) than $\alpha_2$, which cannot take this path. (b) Depths of the positive bicharacteristics $\gamma_i$ through these $\alpha_i$, meeting the interface at time $t_0$. A jump occurs at the interface along either broken bicharacteristic through $\alpha_2$.} \label{f:discont-depth-rd-diagram} \label{f:discont-depth} \end{figure} \begin{remarks} \begin{itemize} \item Along a broken bicharacteristic, $d^*_{T^*\Theta'}$ is often discontinuous at interfaces, as illustrated in Figure~\ref{f:discont-depth}. To see why, consider a bicharacteristic $\gamma_1$ encountering an interface; let $\gamma_3,\gamma_4$ be the reflected and transmitted bicharacteristics, and let $\gamma_2$ be the opposite incoming bicharacteristic. In general, one of the $\gamma_i$, say $\gamma_1$, provides the shortest route from the interface to the boundary. Singularities along $\gamma_3$ or $\gamma_4$ can reach the boundary along $\gamma_1$, while those along $\gamma_2$ cannot and must take a longer path. Consequently, a jump in depth occurs when passing from $\gamma_2$ to either $\gamma_3$ or $\gamma_4$. \item Along a singly reflected bicharacteristic, depth does not switch from increasing to decreasing at the moment of reflection in general. Instead, depth will change from increasing to decreasing halfway along; compare the broken bicharacteristic $\gamma_1\cup\gamma_3$ in Figure~\ref{f:discont-depth}. \item Depth (and hence $h\MDT$) cannot intrinsically distinguish reflections from transmissions. This is possible only under geometric assumptions ensuring that reflected waves travel toward the boundary, and transmitted waves travel away from it; e.g., $\Theta=\{x_n>0\}$ a half\-space, and $c$ a function of $x_n$ alone. \end{itemize} \end{remarks} \subsection{Isolating the microlocal almost direct transmission} \label{s:ml-isolating} One of our earlier key facts, expressed in Theorem~\ref{t:basic-maf}, is that solving the (exact) scattering control equation $(I-\pi^\star R\pi^\star R)h_\infty=h_0$ for $h_\infty$ is equivalent to isolating the almost direct transmission: $\clsr\pi R_{2T}h_\infty=R_Th\DT$ (assuming $h_\infty=h_0$ on $\clsr{\Theta}$). In other words, the wave field of $h_\infty$ at $t=2T$ inside the domain $\Theta$ is exactly the almost direct transmission's wave field, undisrupted by any waves from shallower regions. \begin{figure} \caption{Isolating $h\MDT$. A singularity from $h_0$ travels inward, reflecting and refracting from two interfaces (indicated by open circles). The multiply-reflected ray (dotted) will enter the domain of influence of $h\MDT$ (shaded). To prevent this, $h_\infty$ must include an appropriate singularity to eliminate the multiply-reflected ray. The horizontal axis is depth in the cotangent bundle. } \label{f:isolate-mdt-before} \label{f:isolate-mdt-after} \label{f:isolate-mdt} \end{figure} Our main goal now is to consider the microlocal version of this equivalence: is solving the microlocal scattering control equation~\eqref{e:ml-maf} equivalent to isolating $h\MDT$? As before, one direction is easy: if a tail $h_\infty$ is found that isolates $h\MDT$ (in the sense that $R_{2T}h_\infty\eqml R_Th\MDT$ on $\Theta$) it is a solution of~\eqref{e:ml-maf}. The idea behind crafting such an $h_\infty$ we have seen already in Figure~\ref{f:mr-demo}: $h_\infty$ should include appropriate extra singularities that ensure singularities in the wave field of $h_0$ at depth less than $T$ do not interfere with $h\MDT$'s wave field. Figure~\ref{f:isolate-mdt} illustrates the situation. \begin{lemma} Let $h_0\in \mathcal E'(\Theta'\setminus\Gamma)\oplus \mathcal E'(\Theta'\setminus\Gamma)$. Suppose $h_\infty\in \mathcal E'(\RR^n\setminus\Gamma)\oplus \mathcal E'(\RR^n\setminus\Gamma)$ isolates the microlocal almost direct transmission, in the sense that \begin{equation} \drestr{h_\infty}_{\Theta}\eqml \drestr{h_0}_{\Theta} \text{\ \ and } \drestr{R_{2T}h_\infty}_{\Theta}\eqml\drestr{R_Th\MDT}_{\Theta}. \end{equation} Then $h_\infty$ satisfies the microlocal scattering control equation, $(I-\sigma^\star R\sigma^\star R)h_\infty \eqml h_0$. The same holds true with $\tilde R$ replacing $R$. \label{l:isolate-mdt} \end{lemma} \begin{proof} Let $v(t,x)=(F\sigma R_{2T}h_\infty)(t-2T,x)$ be the wave field generated by $\sigma R_{2T}h_\infty$, and $\mathbf v=(v,\d_t v)$. Since $\WF(h\MDT)\subseteq \clsr{(T^*\Theta'')_T}$, propagation of singularities limits the wavefront set of $R_Th\MDT$ to $\clsr{T^*\Theta''}$, where the cutoff $\sigma$ is identity. Hence $\mathbf v$ at time $2T$ agrees with $R_Th\MDT$. Moving to time $T$, we have $\mathbf v(T,\cdot)\eqml f\MDT$; by propagation of singularities again, $\WF(\mathbf v(0,\cdot))\subseteq \clsr{T^*\Theta''}$. In particular, $\sigma^\star R\sigma Rh_\infty = \sigma^\star \mathbf v(0,\cdot)$ is smooth. We conclude that \begin{equation} \sigma^\star R\sigma^\star R h_\infty = \sigma^\star R(1 - \sigma) Rh_\infty \eqml \sigma^\star h_\infty - 0 \eqml h_\infty - h_0. \end{equation} The same argument holds with the parametrix $\tilde R$ in place of $R$. \end{proof} Just like Theorem~\ref{t:basic-maf}, Lemma~\ref{l:isolate-mdt} assures us that solving the microlocal scattering control equation is necessary for producing a tail $h_\infty-h_0$ that isolates $h\MDT$. The other direction of the problem (does a solution of the microlocal scattering control equation isolate $h\MDT$?) is a more subtle question, taken up in the following sections. Our overarching goal is to show that $h\MDT$, like its non-microlocal version $h\DT$, may be found by the Neumann-type iteration~\eqref{e:ml-maf-series}. We start by explicitly constructing a Fourier integral operator $A$ that isolates $h\MDT$, given $c$. By Lemma~\ref{l:isolate-mdt} this FIO is a microlocal inverse for $I-\sigma^\star R\sigma^\star R$. Now, Neumann iteration also provides a (formal) microlocal inverse for this operator. The existence of $A$ can be used to show that Neumann iteration isolates $h\MDT$ as well, in a principal symbol sense. This leads to the question of injectivity for $I-\sigma^\star R\sigma^\star R$, explored in greater depth in Section~\ref{s:ml-uniqueness}. \subsection{Constructive parametrix for $I-\sigma^\star R\sigma^\star R$} \label{s:ml-construct} In this section, we lay out conditions on $\Theta$, $c$, $h_0$ under which we can show the existence of an $h_\infty$ isolating $h\MDT$, and thereby $I-\sigma^\star R\sigma^\star R$. The motivation for this relatively straightforward task is that it enables the study the convergence behavior of the microlocal Neumann iteration in the following section. We start by making a number of definitions; most of which are illustrated in Figure~\ref{f:ml-constructive-terms}. \footnote{Note that for simplicity Figure~\ref{f:ml-constructive-terms} is not generic; in light of the remarks in \sref{f:ml-adt}, the behavior of $d^*_{T^*\Theta'}$ is typically much more complicated.} \begin{defn} \renewcommand\labelenumi{(\alph{enumi})} \renewcommand\labelenumii{\roman{enumii}.} \begin{enumerate} \item The forward and backward \emph{microlocal domains of influence} $\mathcal D^+\MDT$, $\mathcal D^-\MDT$ are defined by: \begin{nalign} \mathcal D^-\MDT &= \set{(t,\eta)\in[0,T]\times \To^*\RR^n}{d^*_{T^*\Theta'}(\eta)>t}\!,\\ \mathcal D^+\MDT &= \set{(t,\eta)\in[T,2T]\times \To^*\RR^n}{d^*_{T^*\Theta'}(\eta)>2T-t}\!. \end{nalign} By propagation of singularities, every $\eta\in\WF(h\MDT)$ is connected to some $\eta'\in\WF(h_0)$ by a broken bicharacteristic inside $\mathcal D^-\MDT$. \item A \emph{returning} bicharacteristic $\gamma:(t_-,t_+)\to\To^*(\RR^n\setminus\Gamma)$ is one that leaves $\mathcal D^-\MDT$ before $t=T$. More precisely, $\gamma(t_0)\in\mathcal D^-\MDT$ and $\lim_{t\to t_1}\gamma(t)\notin\mathcal D^-\MDT$ for some $t_0,t_1\in(t_-,t_+]$, $t_0<t_1$. \begin{figure} \caption{Terminology for constructing an inverse of $I-\sigma^\star R\sigma^\star R$. Here $\Theta$ is a halfspace $\{x_n>0\} \label{f:ml-constructive-terms} \end{figure} \item Bicharacteristics $\gamma_1$, $\gamma_2$ are \emph{connected} if their union $\gamma_1\cup\gamma_2$ is a broken bicharacteristic. A bicharacteristic $\gamma_1$ terminating in an interface may have one (totally reflected), or two (reflected and transmitted) connecting bicharacteristics there. If it has two, there exists an \emph{opposite} bicharacteristic $\gamma_3$ sharing $\gamma_1$'s connecting bicharacteristics. \item A bicharacteristic $\gamma\colon(t_-,t_+)\to\To^*(\RR^n\setminus\Gamma)$ is \emph{$(\pm)$-escapable} if either: \begin{enumerate} \item it has \emph{escaped}: $\gamma$ is defined at $t=T\pm T$ and $\gamma(T\pm T)\notin T^*\Theta$, \end{enumerate} or recursively, after only finitely many recursions, either \begin{enumerate} \stepcounter{enumii} \item all of its connecting bicharacteristics at $t_\pm$ are $(\pm)$-escapable; \item one of its connecting bicharacteristics at $t_\pm$ is $(\pm)$-escapable, and the opposite bicharacteristic is $(\mp)$-escapable. \end{enumerate} In the final case, if the $(\pm)$-escapable connecting bicharacteristic is a reflection, we also require $c$ to be discontinuous at $\lim_{t\to t_\pm}\gamma(t)$ to ensure the reflection operator has nonzero principal symbol there. \end{enumerate} \end{defn} Roughly speaking, we may ensure a singularity traveling along a $(+)$-escapable bicharacteristic never creates a singularity in $\mathcal D^+\MDT$ by choosing $h_\infty$ appropriately. Similarly, we may \emph{produce} a singularity along a $(-)$-escapable bicharacteristic without introducing any extra singularities inside $\mathcal D^+\MDT$. Now, if every returning bicharacteristic in $\WF(Fh_0)$ is $(+)$-escapable, we can find an $h_\infty$ isolating $h\MDT$ with an FIO construction, leading to a microlocal inverse of $I-\sigma^\star R\sigma^\star R$. Accordingly, let $\mathcal S\subset T^*\Theta'$ be the set of $\xi\notin\mathcal W$ such that every returning bicharacteristic belonging to a broken bicharacteristic through $\xi$ is $(+)$-escapable \footnote{Recall from~\sref{s:ml-maf} that $\mathcal W$ is the set of covectors for which the parametrix $\tilde R$ is valid.} . We then have the following result: \begin{proposition} There is an FIO $A\colon \mathcal E'(\Theta')\oplus \mathcal E'(\Theta')\to\mathcal D'(\RR^n)\oplus\mathcal D'(\RR^n)$ of order 0 satisfying \begin{nalign} (I-\sigma^\star R\sigma^\star R) A &\eqml I \quad\text{on $\mathcal D'_{\mathcal S}$}. \end{nalign} Furthermore, $R_{2T}Ah_0\eqml R_T h\MDT$ for any $\WF(h_0)\subset\mathcal S$. \label{p:constructive-parametrix} \end{proposition} Note that, because any broken ray intersects only finitely many interfaces in the time interval $t\in[0,2T]$, the condition of being $(\pm)$-escapable is open, and in particular $\mathcal S$ is open. \subsection{Convergence of microlocal Neumann iteration} \label{s:ml-convergence} With the microlocal inverse $A$ constructed for $I-\sigma^\star R\sigma^\star R$ (knowing $c$), we may now examine the behavior of Neumann iteration (which does not require knowing $c$). Recalling~\eqref{e:ml-maf-series}, define the Neumann iteration operators \begin{equation} N_k = \sum_{i=0}^k (\sigma^*\tilde R)^{2i}. \label{e:ml-neumann-partial-sum-ops} \end{equation} In this section we present our main microlocal theorem: the operators $N_k$ isolate $h\MDT$ in a particular leading order sense as $k\to\infty$. Throughout, as in~\eqref{e:ml-neumann-partial-sum-ops} we substitute for $R$ the parametrix $\tilde R$ having cutoffs near glancing rays. Since $\lim N_k$ has no microlocal interpretation in general we will instead consider the convergence of the partial sum operators' principal symbols. Technically, of course, these symbols belong to separate spaces, since each $N_k$ is associated with a different Lagrangian in general. Hence, we first define a suitable symbol space containing the principal symbols of $A$ and $N_k$, and any reasonable FIO parametrix of~\eqref{e:ml-maf}. We then introduce a natural $\ell^2$ norm, which acts as a \emph{microlocal energy norm}, on restrictions of the symbol space, and state the convergence theorem. To describe the principal symbols of $A$ and $N_k$, we split them into finite sums of \PsiDO{}s composed with fixed unitary FIO, then record the \PsiDO{}s' principal symbols; this is a kind of polar decomposition. As is well-known (see appendix~\ref{s:parametrix-construction}), after a standard microlocal splitting of the wave equation into positive and negative wave speeds, $\tilde R$ is a sum of graph FIO $R_s$, one for each finite sequence $s\in\{\text R,\text T\}^j$, $j\geq 0$ of reflections and transmissions. For each $s$, let $C_s$ be the canonical transformation of $R_s$; form the set of all possible compositions \begin{equation} \sC=\set{C_{s^{(1)}}\circ\dotsb\circ C_{s^{(m)}}\!}{m\geq 0}. \end{equation} and enumerate this resulting set with a single index $i$: \begin{equation} \sC=\set{\sC_i}{i\in\mathcal I}. \end{equation} Hence, each composition of reflections, transmissions, and time-reversals leads to a canonical transformation $\sC_i$; in general, a single $\sC_i$ might be represented by (infinitely many) different compositions $C_{s^{(1)}}\circ\dotsb\circ C_{s^{(m)}}$. We term an FIO \emph{$\sC$-compatible} if it is associated with a finite union of $\sC_i$. Next, fix a set of elliptic FIO $(J_i)_{i\in\mathcal I}$ associated with the $\sC_i$ that are microlocally unitary, that is, $J_i^*J_i\eqml I$. Any $\sC$-compatible FIO $\mathcal Z$ may now be written in the form $\mathcal Z = \sum_{i\in\mathcal I} P_iJ_i$ for appropriate \PsiDO{}s $P_i$. Define the \emph{principal symbol of $\mathcal Z$ with respect to $(J_i)_{i\in\mathcal I}$} to be the tuple of principal symbols of the $P_i$, restricted to the cosphere bundle: \begin{equation} \sigma_0=\sigma_0(\mathcal Z) = \big(\sigma_0(P_i)\big)_{i\in\mathcal I} \in C^\infty\big(S^*(\bRR^n\setminus\bGamma)\times \mathcal I\big), \end{equation} The boldface $\bRR^n\setminus\bGamma$ denotes a doubled space containing two copies of $\RR^n\setminus\Gamma$; due to the microlocal splitting this is a natural space for Cauchy data. For convenience, we consider the tuple $\sigma_0$ as a function on a single domain having one copy of $S^*(\bRR^n\setminus\bGamma)$ for each $i\in\mathcal I$. Note that a full symbol for $\mathcal Z$ (not needed here) could be defined analogously. Now, for $\eta\in S^*(\bRR^n\setminus\bGamma)$ define \begin{equation} \mathcal G_{\eta} = \set{(\sC_i(\eta),i)}{i\in\mathcal I,\,\eta\in\mathcal D(\sC_i)} \subset S^*(\bRR^n\setminus\bGamma)\times \mathcal I, \end{equation} where $\mathcal D(\sC_i)$ is the domain of $\sC_i$. That is, $\mathcal G_{\eta}$ contains all covectors reachable from $\eta$, together with a knowledge of the paths $i$ taken for each. Consider the restriction of a principal symbol $\sigma_0(\mathcal Z)$ to the space $\mathcal G_\eta$. Here, $\sigma_0(\mathcal Z)$ may be viewed both as an element of $\mathcal G_\eta$ and the unique linear operator on $\mathcal G_\eta$ defined by left-composition: \begin{equation} \sigma_0(\mathcal Z)\colon \sigma_0(\mathcal Z')\big|_{\mathcal G_\eta} \mapsto \sigma_0(\mathcal Z\mathcal Z')\big|_{\mathcal G_\eta}, \end{equation} for $\sC$-compatible FIOs $\mathcal Z'$. The composition $\mathcal Z\mathcal Z'$ is well-defined as an FIO since all operators involved are sums of graph FIO. The key idea is that the $\ell^2$ norm on $\mathcal G_\eta$ provides a natural microlocal energy operator norm for $\mathcal Z$. In particular (see Lemma~\ref{l:ml-energy-conservation} in~\sref{s:ml-proofs}), just as $\norm R=1$ w.r.t.\ the exact operator norm, so composition with $\tilde r$ has operator norm 1 on the $\ell^2(\mathcal G_\eta)$ principal symbol space, in the absence of glancing ray cutoffs. Combining this norm with existence of an $\ell^2$-bounded microlocal inverse of $I-\sigma^\star\tilde R\sigma^\star\tilde R$, we can prove principal symbol convergence for Neumann iteration. In the limit, furthermore, the wave field produced by Neumann iteration at $t=2T$ inside $\Theta'$ agrees with that produced by the given microlocal inverse, modulo $C^\infty$. \begin{theorem} Suppose $\tilde{\mathcal S} \subset\To^*(\bRR^n\setminus\bGamma)$ is a conic set on which $I-\sigma^\star \tilde R\sigma^\star \tilde R$ has a $\sC$-compatible right parametrix $\tilde A$ on $\tilde{\mathcal S}$; that is, $(I-\sigma^\star \tilde R\sigma^\star \tilde R)\tilde A\eqml I$ on $\tilde{\mathcal S}$. Assume that $\sigma_0(\tilde A)$ restricts to a bounded operator on $\ell^2(\mathcal G_\eta)$ for each $\eta\in\tilde{\mathcal S}\cap S^*(\bRR^n\setminus\bGamma)$. Then, for every $\eta\in\tilde{\mathcal S}\cap S^*(\bRR^n\setminus\bGamma)$, the Neumann series principal symbols $\sigma_0(N_k)$ converge to some $n_\infty\in\ell^2(\mathcal G_{\eta})$. Furthermore, $\sigma_0(\tilde RN_k)\to \sigma_0(\tilde R\tilde A)$ in $\ell^2(\mathcal G_\eta\cap S^*\bTheta')$. \label{t:ml-convergence} \end{theorem} Of course, we have in mind for $\tilde A$ the concrete parametrix $A$ of Proposition~\ref{p:constructive-parametrix}. This parametrix is $\sC$-compatible~(cf.~\sref{s:ml-construct-proof}); it also has finitely many graph FIO components, so it is a bounded operator on $\ell^2(\mathcal G_\eta)$. Taking $\tilde A=A$ we have the following direct corollary of Proposition~\ref{p:constructive-parametrix} and Theorem~\ref{t:ml-convergence}: \begin{corollary} For every $\eta\in\mathcal S\cap S^*(\bRR^n\setminus\bGamma)$, the Neumann series principal symbols $\sigma_0(N_k)$ converge in $\ell^2(\mathcal G_{\eta})$. Furthermore, $\sigma_0(RN_k)\to \sigma_0(RA)$ in $\ell^2(\mathcal G_\eta\cap S^*\bTheta')$. \label{c:ml-convergence-mdt} \end{corollary} According to Proposition~\ref{p:constructive-parametrix}, we have $R_{2T}Ah_0\eqml R_T h\MDT$ on $T^*\bTheta'$. Hence, the corollary implies that to leading order, the same is true of the $N_k$ as $k\to\infty$; they also isolate $h\MDT$. Note that Theorem~\ref{t:ml-convergence} does not claim that the principal symbol limit $n_\infty$ is itself the principal symbol of some FIO. In particular, the support of $n_\infty$ on some fiber $\mathcal G_\eta$ may be infinite, that is, $n_\infty$ maps $\eta$ to infinitely many singularities. In this case it is not obvious that $n_\infty$ corresponds to any FIO. Conversely, if $n_\infty$ is smooth and its restriction to every $\mathcal G_\eta$ has finite support, an FIO $N_\infty$ with principal symbol $n_\infty$ is easily constructed. \subsection{Microlocal uniqueness} \label{s:ml-uniqueness} The previous two sections treated the solution of $(I-\sigma^\star R\sigma^\star R)h_\infty\eqml h_0$, both constructively and iteratively. In this section we turn to the question of uniqueness; i.e.~the solutions of $g\eqml \sigma^\star R\sigma^\star Rg$. As we will see, the microlocal scattering control equation displays two distinct kinds of nonuniqueness: a normal type, due to diving rays and total reflections, and a pathological type, involving an infinite-energy sequence of reinforcing singularities. The first type is analogous to the nonuniqueness seen in the exact setting. In the exact case, the kernel $\mathbf G$ of $I-\pi^\star R\pi^\star R$ consists only of initial data whose wave fields are supported outside $\Theta$, due to unique continuation. In other words, no waves can enter $\Theta$, completely reflect, and leave in finite time $2T$. Microlocally, however, there is a much richer space of completely reflecting wave fields, including totally reflecting and diving rays. Note that these rays do not affect $\trestr{h_\infty}_{\Theta'}$ and in particular do not interfere with the wave field of $h\MDT$, up to smoothing. The second type of nonuniqueness is unique to the microlocal setting. In this case, the wave field produced by initial data $g$ does include singularities inside $\bTheta'$ at time $2T$, which $\sigma^\star$ cuts off. The (microlocal) energy lost in this cutoff must be replenished by a second singularity in the initial data, which in turn must be replenished a third, and so on, necessitating an infinite chain of singularities. Since $Rg$ is not smooth in $\bTheta'$, the converse of Lemma~\ref{l:isolate-mdt} fails. In the following examples, we illustrate these two nonuniqueness types at length. \begin{figure} \caption{Regular nonuniqueness for microlocal scattering control; interfaces are marked with discs. (a) An appropriate combination of singularities at $a$ and $b$ is smooth on the dashed bicharacteristic and reflects from $\Theta$. (b) A singularity from $h_0$ can be cancelled at either $a$ or $b$.} \label{f:regular-ml-nonuniqueness-kernel} \label{f:regular-ml-nonuniqueness-h0} \label{f:regular-ml-nonuniqueness} \end{figure} \begin{example} Figure~\subref*{f:regular-ml-nonuniqueness-kernel} presents an element of the microlocal kernel of $(I-\sigma^\star R\sigma^\star R)$, with a diving or totally reflecting ray and one interface. If $g$ has singularities at $a$ and $b$ satisfying an appropriate pseudodifferential relation, its wave field will be smooth along the dashed ray. Thus the cutoffs $\sigma^\star$ have no effect, and $\sigma^\star R\sigma^\star R g\eqml RRg=g$, implying $(I-\sigma^\star R\sigma^\star R)g\eqml 0$. Figure~\subref*{f:regular-ml-nonuniqueness-h0} illustrates how this lack of injectivity leads to multiple solutions $h_\infty$. Here, a stray ray from the direct transmission can be cancelled by an appropriate singularity at either $a$ or $b$, or a linear combination of them. The proof of Theorem~\ref{t:ml-convergence} shows that Neumann iteration converges in principal symbol to a solution operator having ``least microlocal energy'' in the sense of a weighted $\ell^2$ norm on its principal symbol. \label{x:ml-nonuniqueness-1} \end{example} \begin{example} \begin{figure}\label{f:pathological-ml-nonuniqueness-1D} \end{figure} \begin{figure} \caption{Two-dimensional version of Figure~\ref{f:pathological-ml-nonuniqueness-1D} \label{f:pathological-ml-nonuniqueness-2D} \end{figure} Figure~\ref{f:pathological-ml-nonuniqueness-1D} shows a one-dimensional setup exhibiting the second type of nonuniqueness. While this example is contrived, Figure~\ref{f:pathological-ml-nonuniqueness-2D} shows how an equivalent and more realistic higher-dimensional version may be constructed. (Both examples involve non-compact domains, and we conjecture noncompactness is required for this type of nonuniqueness.) Here $\Theta$ consists of an infinite series of disconnected open intervals $(-\infty,w_0)\cup(v_1,w_1)\cup(v_2,w_2)\cup\dotsb$. On each finite interval $c$ has two jump discontinuities; assume $\Theta'$ is sufficiently close to $\Theta$ to contain these singularities. Two sequences of unit covectors $\{a_i\}_{i=0}^\infty,\,\{b_i\}_{i=1}^\infty\subset S^*\bTheta^\star\setminus\mathcal W$ are chosen so that the canonical relation of $\sigma^\star\tilde R$ sends $a_i$ to $\{b_i,b_{i+1}\}$ and $b_i$ to $\{a_{i-1},a_i\}$. We now construct a $g$ in the microlocal kernel of $I-\sigma^\star R\sigma^\star R$ with an infinite sequence of singularities at $a_0,a_1,a_2,\dotsc$. First, note that the canonical relation of $\sigma^\star \tilde R\sigma^\star \tilde R$ sends $a_i$ $(i>1)$ to $\{a_{i-1},a_i,a_{i+1}\}$. Suppose now that we choose some initial data $g$ with a singularity at $a_0$. After applying $\sigma^\star R\sigma^\star R$, some portion of this singularity's amplitude will be lost due to the $\sigma^\star$ cutoffs. We may, however, restore the lost amplitude by adding an appropriate singularity to $g$ at $a_1$. In turn, some of this new singularity's amplitude will be lost under $\sigma^\star R\sigma^\star R$, which we make up for with an appropriate singularity at $a_2$, and so on. Rigorously, decompose $\sigma^\star \tilde R\sigma^\star\tilde R$ near each $a_i$ as the sum of three graph FIO $A_{-1}$, $A_0$, $A_1$ whose canonical graphs map $a_i$ to $a_{i-1}$, $a_i$, and $a_{i+1}$ respectively. Modify $A_0$, say, by a smooth operator so that $\sigma^\star R\sigma^\star R=A_{-1}+A_0+A_1$ exactly. It can be shown (cf.~\eqref{e:geometric-ps-refl-trans}) that the $A_k$ are elliptic. Now, choosing any $g_0\in L^2(\Theta^\star)$ with $\WF(g_0)=\RR^+ a_0$, we look for $g_i$, $i=1,2,\dotsc$ with wavefront sets at $\RR^+a_i$ such that the sum $g=\sum g_i$ satisfies $(I-\sigma^\star R\sigma^\star R)g\eqml 0$. This leads to the infinite matrix equation \begin{equation} \setlength\arraycolsep{3pt} \left( I - \begin{bmatrix*}[l] A_0 & A_{-1} & & \\ A_1 & A_0 & A_{-1} & \phantom\ddots\\ \phantom{A_{-1}} & A_1 & A_0 & \ddots\\ & & \ddots & \ddots \end{bmatrix*} \right) \begin{bmatrix} g_0 \\ g_1 \vphantom\vdots \\ g_2 \vphantom\vdots \\ \vdots \end{bmatrix} \eqml 0. \label{e:path-eg-matrix-eqn} \end{equation} By ellipticity,~\eqref{e:path-eg-matrix-eqn} has a solution, namely $g_{i+1} \eqml (A_{-1})^{-1} \big((I-A_0)g_i+A_1g_{i-1}\big)$. To construct an associated $g$, we use the fact that the $\{a_i\}$ are discrete in $S^*(\bTheta^\star)$ (which implies $\Theta$ is unbounded). Each $g_i$ is locally $L^2$, so after multiplying by a smooth cutoff near the base point of $a_i$, we may assume $g_i\in L^2$. Applying radial cutoffs in the Fourier domain, we may assume that $\norm{g_i}_{L^2}\leq 2^{-i}$, so $g=\sum g_i$ converges in $L^2$. Defining $g_{-1}=0$, consider \begin{equation} (I - \sigma^\star R\sigma^\star R)g = \sum_{i=0}^\infty - A_{1}g_{i-1} + (I-A_0)g_i - A_{-1}g_{i+1}. \label{e:error-path-nonuniqueness} \end{equation} Each summand is smooth by construction, and compactly supported near the base point of $a_i$. Because the $\{a_i\}$ are discrete, we can ensure only finitely many summands of~\eqref{e:error-path-nonuniqueness} are nonzero at any given point. Hence the entire sum is smooth, showing $g$ is in the microlocal kernel of $I-\sigma^\star R\sigma^\star R$. As expected, $Rg$ is not smooth in $\Theta'$; it is not hard to see it must be singular at every $b_i$. Hence, solving $(I-\sigma^\star R\sigma^\star R)h_\infty\eqml h_0$ is not sufficient for isolating $h\MDT$. \label{x:ml-nonuniqueness-2} \end{example} \paragraph{Uniqueness and Isolating $h\MDT$} We now close the circle, and return to the question of whether solving $(I-\sigma^\star R\sigma^\star R)h_\infty\eqml h_0$ is equivalent to isolating $h\MDT$. Of our two types of nonuniqueness, only the second interferes with isolating $h\MDT$. We may rule it out, to leading order, by assuming the same kind of microlocal energy boundedness seen earlier in Theorem~\ref{t:ml-convergence}: namely, $\ell^2$ boundedness of the parametrix's principal symbol. Assuming this condition, we reach a partial converse of Lemma~\ref{l:isolate-mdt}: a solution of the microlocal scattering control equation isolates $h\MDT$ to leading order as long as this is possible. We frame our proposition as a uniqueness result. \begin{proposition} \nobelowdisplayskip Suppose $B_1,B_2$ are $\sC$-compatible microlocal right inverses for $I-\sigma^\star \tilde R\sigma^\star \tilde R$ on a conic subset $\tilde{\mathcal S}\subset\To^*(\bRR^n\setminus\bGamma)$. If their principal symbols restrict to elements of $\ell^2(\mathcal G_\eta)$ for all $\eta\in\tilde{\mathcal S}$, \begin{align} \trestr{\tilde RB_1h_0}_{\bTheta'}&\eqml\trestr{\tilde RB_2 h_0}_{\bTheta'} \bmod{H^{s+1}(\bRR^n\setminus\bGamma)} & \text{for all } h_0\in H^s(\bRR^n\setminus\bGamma)\cap\mathcal D'_{\tilde{\mathcal S}}. \end{align} \label{p:ml-uniqueness} \end{proposition} In particular, as long as there is some ``finite microlocal energy'' parametrix isolating $h\MDT$ on a conic set $\tilde{\mathcal S}\subset\To^*(\bRR^n\setminus\bGamma)$, all other finite microlocal energy parametrices on $\tilde{\mathcal S}$ also isolate $h\MDT$. \subsection{Proofs} \label{s:ml-proofs} \subsubsection{Microlocal convergence (\sref{s:ml-convergence})} The major task in proving Theorem~\ref{t:ml-convergence} is to show that composition with $\tilde R$ has operator norm at most 1 on $\ell^2(\mathcal G_{\eta})$ for any $\eta$ --- a microlocal version of energy conservation. We begin with its proof. To present the energy conservation lemma, note that composition with $\tilde R$ is linear and well-defined on $\sC$-compatible FIO. It therefore induces a linear operator $\tilde r$ on their principal symbols in the space $C^\infty\left(S^*(\bRR^n\setminus\bGamma)\times\mathcal I\right)$. Since $\mathcal G_\eta$ is closed under the canonical relation of $\tilde R$, operator $\tilde r$ restricts to a linear operator on $\ell^2(\mathcal G_\eta)$ for any $\eta\in S^*(\bRR^n\setminus\bGamma)$. \begin{lemma}[Microlocal Energy Conservation] Let $\eta\in S^*(\bRR^n\setminus\bGamma)$. Then $\norm{\tilde r}\leq 1$ with respect to the operator norm on $\ell^2(\mathcal G_\eta)$. \label{l:ml-energy-conservation} \end{lemma} \begin{proof} First, assume that there are no cutoffs in the parametrix $\tilde R$ due to glancing rays originating in $\mathcal G_\eta$. In this case, $\tilde R^2\eqml R^2=I$, so $\tilde r^2=I$ likewise. If $\tilde r$ were self-adjoint, it would follow that $\norm{\tilde r}_{\ell^2}=1$. Certainly $\tilde R$ is microlocally self-adjoint, since $\tilde R^*\eqml R^*=R\eqml \tilde R$. This property does not immediately carry over to $\tilde r$ due to the presence of Maslov factors; fortunately, it is still possible to show $\tilde r$ is self-adjoint. Let $(\alpha,i),\,(\beta,j)\in \mathcal G_{\eta}$, and let $e_{\alpha,i},\,e_{\beta,j}\in \ell^2(\mathcal G_\eta)$ be the vectors having 1 in the $(\alpha,i)$ or $(\beta,j)$ position respectively and zeros elsewhere. It suffices to show that \begin{equation} \form{\tilde r e_{\alpha,i},\,e_{\beta,j}} = \overline{\form{\tilde r e_{\beta,j},\,e_{\alpha,i}}}. \label{e:tilde-r-sa-comparison} \end{equation} To compute each side, we choose \PsiDO{}s $P,P'\in\Psi^0$ with $\sigma_0(P)=\sigma_0(P')=1$ near $\alpha,\,\beta$ respectively. Decompose \begin{align} \tilde RPJ_i &\eqml \sum_{j\in\mathcal I} Q_j J_j, & \tilde RP'J_j &\eqml \sum_{i\in\mathcal I} Q'_iJ_i. \label{e:tilde-r-sa-decomp} \end{align} The left- and right-hand sides of~\eqref{e:tilde-r-sa-comparison} then become $\overline{\sigma_0(Q_j)(\beta)}$ and $\sigma_0(Q'_i)(\alpha)$. If there is no $C_s$ carrying $(\alpha,i)$ to $(\beta,j)$ (that is, $C_s(\alpha)=\beta$ and $C_s\circ\sC_i=\sC_j$ on their common domain of definition), there is also no $C_{s'}$ carrying $(\beta,j)$ to $(\alpha,i)$, and vice versa. In this case, both sides of~\eqref{e:tilde-r-sa-comparison} are zero. Otherwise, there are unique $C_s$ and $C_{s'}$ satisfying the above; let $R_s$ and $R_{s'}$ be the microlocal restrictions of $\tilde R$ to each of these canonical relations near $\alpha$ and $\beta$ respectively. We may replace $\tilde R$ in the first and second equations of~\eqref{e:tilde-r-sa-decomp} by $R_s$ and $R_{s'}$, respectively. Furthermore, $R_{s'}\eqml R_s^*$ since $\tilde R$ is microlocally self-adjoint and $C_{s'}=(C_{s})^{-1}$. Now we apply singular symbol calculus (see~\cite{C}) to both sides of the first equation of~\eqref{e:tilde-r-sa-decomp} and evaluate at $\beta$ and $\alpha$. Let lowercase letters ($r_s$, $j_i$, etc.) denote singular principal symbols (of $R_s$, $J_i$, etc.). This yields \begin{nalign} r_s(\beta)j_i(\eta)i^{\kappa(\ud\sC_i(V_\eta),\,V_\alpha,\,\ud C_s^{-1}(V_\beta))/2} &= q_j(\beta) j_j(\eta), \\ r_{s'}(\alpha)j_j(\eta)i^{\kappa(\ud\sC_j(V_\eta),\,V_\beta,\,\ud C_s(V_\alpha))/2} &= q'_i(\alpha) j_i(\eta), \end{nalign} where $V_\gamma$ denotes the vertical subspace in $T_\gamma T^*\mathbf (\bRR^n\setminus\bGamma)$, and $\kappa$ is the Kashiwara index~\cite{LV,S}. Solving for $\overline{q_j(\beta)}$ and $q'_i(\alpha)$ we obtain \begin{nalign} \form{\tilde re_{\alpha,i},\,e_{\beta,j}} &= \overline{q_j(\beta)} = \overline{r_s(\beta)}\frac{\;\overline{j_i(\eta)}\;}{\overline{j_j(\eta)}} i^{-\kappa(\ud\sC_i(V_\eta),\,V_\alpha,\,\ud C_s^{-1}(V_\beta))/2}, \\ \overline{\form{\tilde r e_{\beta,j},\,e_{\alpha,i}}} &= q'_i(\alpha) = r_{s'}(\alpha)\frac{j_j(\eta)}{j_i(\eta)} i^{\kappa(\ud\sC_j(V_\eta),\,V_\beta,\,\ud C_s(V_\alpha))/2}. \end{nalign} Comparing terms, $\overline{r_s(\beta)}=r_{s'}(\alpha)$ since $R_{s'}=R_s^*$, and similarly $\overline{j_i(\eta)}/\overline{j_j(\eta)}=j_j(\eta)/j_i(\eta)$, because $J_i$ being unitary implies $\abs{j_i}=1$. As for the Kashiwara indices, since $\kappa$ is coordinate-invariant and alternating, \begin{nalign} \kappa(\ud\sC_i(V_\eta),V_\alpha,\ud C_s^{-1}(V_\beta)) &= \kappa(\ud\sC_j(V_\eta),\ud C_s(V_\alpha),V_\beta) \\ &= -\kappa(\ud\sC_j(V_\eta),V_\beta,\ud C_s(V_\alpha)). \end{nalign} The conclusion is that $\tilde r$ is self-adjoint, and therefore $\norm{\tilde r}=1$, since $\norm{\tilde r^2}=\norm{I}=1$. In the presence of near-glancing rays in $\mathcal G_\eta$, the parametrix constructed in appendix~\ref{s:parametrix-construction} includes pseudodifferential cutoffs away from glancing rays (in constructing $\varphi^+$ and $\JBS$). In a neighborhood of any $\alpha\in\mathcal G_\eta$ for which some broken ray is at least partially cut off, $\tilde R$ is microlocally equivalent to a composition of propagators and pseudodifferential cutoffs \begin{equation} \tilde R \eqml \upsilon\circ \tilde R_{t_m} \circ P_{m-1} \circ \tilde R_{t_{m-1}} \circ \dotsb \circ P_{1} \circ \tilde R_{t_1}, \end{equation} where $t_1+\dotsb+t_m=2T$ and $P_1,\dotsc,P_{m-1}\in\Psi^0$ have principal symbols of magnitude at most 1, and none of the intermediate propagators $\tilde R_{t_k}$ involve glancing ray cut offs when $\tilde R$ is restricted to the neighborhood of $\alpha$. For each $k=0,\dotsc,m$, we let $\sC^{(k)}=\{\smash{C^{\smash{(k)}}_s}\circ \sC_i\}$ be the set of compositions of $\sC_i$'s with canonical graphs $C^{\smash{(k)}}_s$ defined as in~\sref{s:ml-convergence} but with $2T$ replaced by $t_1+\dotsb+t_k$. Naturally, $\sC^{(0)}\!=\sC^{(m)}\!=\sC$. Choose sets of corresponding unitary operators $\{J^{\smash{(k)}}_i\}$ as before for each $k$. Then composition by each $\tilde R_{t_k}$ sends $\sC^{(k)}$- to $\sC^{(k+1)}$-compatible FIO, and as before induces a map between their principal symbol spaces; the argument above shows it is an isometry with respect to the $\ell^2$ norms. Composition with the pseudodifferential cutoffs $P_k$ acts by pointwise multiplication by $p_k$ on these $\ell^2$ spaces, and hence has operator norm at most 1. Since $\sC^{(m)}=\sC$, operator $\tilde r$ is given by the composition of all these operators $\tilde r_{t_m}\circ p_{m-1}\circ \tilde r_{t_{m-1}}\circ\dotsb$, and thus $\norm{\tilde r}\leq 1$. \end{proof} \begin{proof}[Proof of Theorem~\ref{t:ml-convergence}] We begin with the first statement of the theorem: convergence of the $N_k$'s principal symbols in $\ell^2(\mathcal G_{\eta})$. Since composition with $\sigma^\star$ multiplies principal symbols pointwise by $\sigma^\star$, it is a linear operator on $\ell^2(\mathcal G_\eta)$ with norm at most 1. Therefore $\sigma^\star\tilde r\sigma^\star\tilde r$, the operation of principal symbol composition with $\sigma^\star\tilde R\sigma^\star\tilde R$, has norm at most 1 as an operator on $\ell^2(\mathcal G_\eta)$. Let $n_k$, $\tilde a$, and $i$ denote the principal symbols of $N_k$, $\tilde A$, and the identity with respect to the $J_i$. We will see that $\tilde a$'s existence implies the convergence of $n_k$ by the spectral theorem, applied to a symmetrization of $\sigma^\star\tilde r$. Restricting to $\mathcal G_\eta$, suppose \begin{equation} (I-\sigma^\star \tilde r\sigma^\star\tilde r)u=i \text{\qquad for some } u\in \ell^2(\mathcal G_\eta). \label{e:posited-u} \end{equation} Then $u=i+v$ for some $v$ in the range of $\sigma^\star$. In particular, $v$ is supported in $\mathcal G_\eta\cap T^*\bTheta'^\star$. Solving~\eqref{e:posited-u} for $w=v/\sqrt{\sigma^\star}$ gives \begin{align} (I-\sqrt{\sigma^\star} \tilde r\sigma^\star\tilde r \sqrt{\sigma^\star})\frac{v}{\sqrt{\sigma^\star}} &=\sqrt{\sigma^\star}\tilde r\sigma^\star\tilde ri. \label{e:posited-w} \end{align} As the process is reversible, $u$ is a solution of~\eqref{e:posited-u} if and only if $w=(u-i)/\sqrt{\sigma^\star}$ solves~\eqref{e:posited-w} in the weighted space $\ell^2(\mathcal G_\eta\cap T^*\bTheta'^\star,\sigma^\star)$. Now, if there is any solution to~\eqref{e:posited-w}, applying Lemma~\ref{l:only-neumann} to the self-adjoint operator $\sqrt{\sigma^\star}\tilde r \sqrt{\sigma^\star}$ shows that the Neumann series \begin{equation} w_0 = \sum_{k=0}^\infty \big[\,\sqrt{\sigma^\star}\tilde r\sigma^\star \tilde r\sqrt{\sigma^\star}\,\big]^k \sqrt{\sigma^\star}\tilde r\sigma^\star\tilde ri \end{equation} converges in $\ell^2(\mathcal G_\eta\cap T^*\bTheta'^\star,\sigma^\star)$ to the minimal-norm solution of~\eqref{e:posited-w}. The corresponding $u_0=i+\sqrt{\sigma^\star} w_0\in\ell^2(\mathcal G_\eta)$ is exactly $\lim n_k$. In particular, $u=\tilde a$ is a solution of~\eqref{e:posited-u} and it is in $\ell^2(\mathcal G_\eta)$ since its support in $\mathcal G_\eta$ is finite. Hence, the Neumann series partial sum principal symbols converge in $\ell^2(\mathcal G_\eta)$. They may not converge to $\tilde a$, as $I-\sigma^\star \tilde r\sigma^\star \tilde r$ may have a nontrivial nullspace. Consider this nullspace. Suppose $(I-\sigma^\star\tilde r\sigma^\star\tilde r)g=0$ for some $g\in\ell^2(\mathcal G_\eta)$, so that $g = \sigma^\star\tilde r\sigma^\star\tilde r g$. But since the operator norms of $\sigma^\star$ and $\tilde r$ are at most 1, we must have \begin{equation} \norm{g} = \norm{\tilde r g} = \norm{\sigma^\star\tilde r g} = \norm{\tilde r\sigma^\star \tilde r g} = \norm{\sigma^\star\tilde r\sigma^\star\tilde rg}. \end{equation} The second equality implies that $\tilde r g$ is supported in $T^*\bTheta'^\star$. Taking $g=\tilde a-\lim n_k$, we conclude $\tilde r a$ and $\tilde r\circ \lim n_k$ are equivalent in $T^*\bTheta'^\star$, finishing the proof. \end{proof} \subsubsection{Constructive parametrix (\sref{s:ml-construct})} \label{s:ml-construct-proof} \begin{proof}[Proof of Proposition~\ref{p:constructive-parametrix}] The proof is purely technical, specifying a recursive procedure for constructing a set of incoming singularities that ensure that only the directly-transmitted singularity reaches $D^+\MDT$. The notation of Appendix~\ref{s:parametrix-construction} will be used throughout. Our key constructions will be order-0 FIO $\Xi\supi_\pm,\Xi\supo_\pm\colon C^\infty(\RR\times\bdy Z)\to \mathcal D'(\mathbf Z)$ producing tails outside $\Theta$ for $(\pm)$-escapable bicharacteristics. Following~\sref{s:ml-construct}, the $\Xi_+\supio$-constructed tail for a singularity on a $(+)$-escapable bicharacteristic ensures this singularity escapes $\Theta$ at time $2T$, without generating any singularities in $h\MDT$'s microlocal forward domain of influence, $D^+\MDT$. The $\Xi\supio_-$-constructed tail generates a given singularity on a $(-)$-escapable bicharacteristic, again without causing any singularities to enter $D^+\MDT$. The $\Xi_\pm\supo$ are defined on outgoing boundary data while the $\Xi_\pm\supi$ are defined on incoming data, microlocally near the final, resp., initial covectors of $(\pm)$-escapable bicharacteristics. Let $\gamma\colon(t_-,t_+)\to T^*\mathbf Z$ be a $(\pm)$-escapable bicharacteristic. Denote by $\beta\supo$ the pullback to the boundary of its final point: $\beta\supo=(di_\Gamma)^*\gamma(t_\pm)$, where by abuse of notation we consider $\gamma(t_\pm)$ as a space-time covector, in $\To^*(\RR\times\mathbf Z)$. Define $\beta\supi=(di_\Gamma)^*\gamma(t_\mp)$ similarly. We now define $\Xi\supio_\pm$ microlocally near $\beta\supio$, starting with the incoming maps $\Xi\supi_\pm$. \begin{itemize} \item \emph{If $t_\pm\in(0,2T)$:} We simply follow the bicharacteristic and apply $\Xi\supo_\pm$ at the other end. In the $(+)$ case define $\Xi\supi_+\eqml \Xi\supo_+\JBB$ near $\beta\supi$. In the $(-)$ case, define $\Xi_-\eqml \Xi_- \JBB^- M$ near $\beta\supi$, where $\JBB^-=\upsilon\JBB\upsilon$ is like $\JBB$ but propagating backward in time. \item \emph{If $\gamma$ escapes, $t_\pm\notin[0,2T]$:} This is the terminal case. In the $(+)$ case, there is nothing to do: define $\Xi_+\eqml 0$ near $\beta\supi$. For the $(-)$ case, define $\Xi_-\eqml \JCB^{-1}$ near $\beta\supi$ to obtain the necessary Cauchy data. \end{itemize} \noindent We now turn to $\Xi\supo_\pm$, considering each case in the definition of $(\pm)$-escapability. \begin{itemize} \item \emph{If $\gamma$ escapes:} This case never arises: $\Xi\supi_\pm$ is not defined in terms of $\Xi\supo_\pm$ for such $\gamma$. \item \emph{If all outgoing bicharacteristics are $(\pm)$-escapable:} Recursively apply $\Xi\supi_\pm$ to the reflected and transmitted (if any) bicharacteristics, defining $\Xi\supo_\pm\eqml \Xi\supi_\pm M$ near $\beta\supo$. \item \emph{If one outgoing bicharacteristic is $(\pm)$-escapable, and the opposite incoming ray is $(\mp)$-escapable:} This is the core case. In the $(+)$ case, near $\beta\supo$ let \begin{equation} \Xi\supo_+\eqml \when{ -\Xi\supi_-M\subR^{-1} M^{}\subT + \Xi\supi_+ (M^{}\subR-M^{}\subT M\subR^{-1} M^{}\subT), & \qquad\text{case (R),}\\ -\Xi\supi_-M\subT^{-1} M^{}\subR + \Xi\supi_+ (M^{}\subT-M^{}\subR M\subT^{-1} M^{}\subR), & \qquad\text{case (T),} } \end{equation} according to whether the reflected (R) or transmitted (T) outgoing ray is $(+)$-escapable. The inverses are all microlocal. The $(-)$ case is slightly different: near $\beta\supo$, \begin{equation} \Xi\supo_-\eqml \when{ \Xi\supi_-M\subR^{-1} + \Xi\supi_+ M^{}\subT M\subR^{-1}, & \qquad\text{case (R),}\\ \Xi\supi_-M\subT^{-1} + \Xi\supi_+ M^{}\subR M\subT^{-1}, & \qquad\text{case (T).} } \end{equation} For case (R), the requirement in the definition that $c$ be discontinuous at $\beta\supio$ implies that $M\subR$'s principal symbol is nonzero there (cf.~\eqref{e:geometric-ps-refl-trans}), guaranteeing the existence of a parametrix $M\subR^{-1}$ near $\beta\supio$. For case (T), $M\subT$ always has positive principal symbol, regardless of $c$. \end{itemize} While $\Xi\supio_\pm$ is defined recursively, by definition only finitely many recursions are needed to reach the non-recursive case where $\gamma$ escapes. Since all the cases are open conditions on $\beta$, operators $\Xi\supio_\pm$ are well-defined (assuming that in regions where both the second and third cases hold, we decide between them consistently). Furthermore, the $\Xi\supio_\pm$ are order-0 FIO, since they are microlocally sums of compositions of order-0 FIO associated with invertible canonical graphs. We now use $\Xi\supio_\pm$ to define a parametrix $A$. Given $\eta\in \mathcal S\subset \To^*\bTheta'$, consider the escaping bicharacteristics starting at $\eta$. Each is associated with a distinct sequence of reflections and transmissions $s=(s_1,\dotsc,s_k)\in \{R,T\}^k$ for some $k$, and a corresponding propagation operator \begin{equation} \mathcal P_s = \JBB M_{s_k}\dotsb\JBB M_{s_2}\JBB M_{s_1}\JCB. \end{equation} Let $\mathfrak S$ be the set of escaping bicharacteristic sequences $s$, and define \begin{equation} A_\eta = I + \Xi\supo_+\sum_{s\in\mathfrak S} \mathcal P_s, \label{e:def-A-eta} \end{equation} Then define $A$ by patching together the $A_\eta$ with a microlocal partition of unity. As $\Xi\supio_\pm$\!, $\mathcal P_s$ are FIO of order 0, so is $A$. We now check that $A$ isolates $h\MDT$ and is therefore a microlocal right inverse for $I-\sigma^\star R\sigma^\star R$ by Lemma~\ref{l:isolate-mdt}. Let $h_0$ be microsupported in a sufficiently small neighborhood of $\eta\in\mathcal S$ and let $h_\infty = Ah_0$. Define the outgoing boundary parametrix \begin{equation} \mathfrak B = \JBS \sum_{k=0}^\infty (M\JBB)^k. \end{equation} With $\mathcal P_s$, $\mathfrak S$ as before, define $\mathfrak S^\perp$ to be the set of sequences $s$ for which no $s'\in\mathfrak S$ is a prefix. Then $\tilde Fh_\infty$ splits into three components: \begin{equation} \tilde Fh_\infty = \tilde F(h_\infty-h_0) + \mathfrak B M \sum_{s\in\mathfrak S}\mathcal P_sh_0 + \sum_{s\in\mathfrak S^\perp} \tilde F_s. \end{equation} For $t\in[T,2T]$, the last term is the wave field of $h\MDT$; accordingly, it suffices to prove that the sum of first two terms are smooth in $D^+\MDT$. Rewrite \begin{equation} \tilde F(h_\infty-h_0) + \mathfrak BM \sum_{s\in\mathfrak S}\mathcal P_sh_0 = \sum_{s\in\mathfrak S} (\tilde F\Xi\supo_+ + \mathfrak BM) \mathcal P_s h_0. \end{equation} By construction, $\tilde F\Xi\supo_+ + \mathfrak BM$ is smoothing at the terminal end of $(+)$-escapable bicharacteristics, and in particular on $\WF(\mathcal P_s h_0)$ for each $s\in\mathfrak S$, as desired. Hence $\tilde R_{2T} h_0\eqml\tilde R_T h\MDT$. Applying Lemma~\ref{l:isolate-mdt}, we conclude $(I-\sigma^\star\tilde R\sigma^\star\tilde R)Ah_0\eqml h_0$. The same result holds for all $h_0\in\mathcal D'_{\mathcal S}$ by a microlocal partition of unity. \end{proof} \subsubsection{Uniqueness (\sref{s:ml-uniqueness})} \begin{proof}[Proof of Proposition~\ref{p:ml-uniqueness}] Let $b_1$, $b_2$, $i$ be the principal symbols of $B_1$, $B_2$, and the identity. Letting $\sigma^\star$ and $\tilde r$ denote the operators on the space of principal symbols induced by multiplication with $\sigma^\star$ and composition with $\tilde R$, respectively, $(I-\sigma^\star\tilde r\sigma^\star\tilde r)(b_1-b_2)=0$. As in the proof of Theorem~\ref{t:ml-convergence}, it follows that $\tilde r(b_1-b_2)$ is supported in $T^*\bTheta'^\star$. \end{proof} \section{Comparison of the exact and microlocal analyses} \label{s:compare} Both the exact analysis of Section~\ref{s:exact} and the microlocal analysis of Section~\ref{s:microlocal} prove that scattering control isolates a certain portion of the wave field of $h_0$ at $t=T$, while effectively erasing the rest. Our two analyses, however, predict the isolation of two \emph{different} portions of the wave field. Surprising at first glance, this disparity provides further insight on scattering control, which we explore in this section. While the arguments are quite general, we consider for simplicity two particular examples that illustrate the fundamental differences between dimensions $n=1$ and $n>1$. In the one-dimensional example, the microlocal and exact analyses align as $h\DT$ and $h\MDT$ are essentially equal; the result is unconditional convergence of the Neumann iteration, both exactly and microlocally. In higher dimensions, however, $h\DT$ and $h\MDT$ can be quite different, causing a loss of convergence in finite energy space. \subsection{Convergence in $n=1$ dimension} Let $\Omega=(\eps,\infty)$ and $\Theta=(0,\infty)$ for fixed $\eps>0$; let $\Theta',\Theta''$ be arbitrary. Let $c$ be piecewise smooth on $\RR$, and equal to 1 on $\Omega^\star$. In general, the distance of a point from $\bdy\Theta$ is the minimum distance of a singularity at that point from $\bdy\Theta$: \begin{equation} d(x,\bdy\Theta) = \min_{\xi\in \To^*_x\RR\vphantom{\mathring T}}d(\xi,\bdy T^*\Theta). \label{e:1D-cotangent-distance} \end{equation} In one dimension, this means $d^*_{T^*\Theta}(\xi)=d^*_\Theta(x)$ if $\xi\in\To^*_x\RR$. Hence, $h\DT$ and $h\MDT$ are essentially equivalent, differing only in their respective usage of harmonic extensions and smooth cutoffs. We now discuss the microlocal and exact behaviors that arise in scattering control. On the microlocal side,~\eqref{e:1D-cotangent-distance} implies every returning bicharacteristic is trivially $(+)$-escapable, as no glancing or totally reflected waves arise. Consequently, the constructive parametrix $A$ may be defined everywhere in $\To^*\bTheta'$, and hence by Theorem~\ref{t:ml-convergence} microlocal Neumann iteration always converges in principal symbol. On the exact side, the \emph{exact} Neumann series converges to a finite energy solution $h_\infty$ of $(I-\pi^\star R\pi^\star R)h_\infty=h_0$, thanks again to microlocal analysis. To see why, first separate the initial data into rightward- and leftward-traveling waves (possible since $c=1$ there). The rightward-traveling portion has a directly transmitted component inside $\Theta$, which is its image under an elliptic graph FIO. Due to the ellipticity this directly transmitted wave carries a positive fraction of the initial energy, by \Garding's inequality and unique continuation (compare Stefanov and Uhlmann's work~\cite{SU-TATBrain}). Leftward-traveling waves, meanwhile, may be safely ignored, since $c$ is constant for $x<0$. The full proof requires some care, and we defer it to~\sref{s:compare-proofs}. \begin{proposition} Let $\Omega$, $\Theta$, $c$ be as above, and $\eps<2T$. Then $\norm{\pi^\star R\pi^\star R}<1$ on $H^1(\Omega^\star)\oplus L^2(\Omega^\star)$; in particular $\sum_{k=0}^\infty (\pi^\star R)^{2k}h_0$ always converges. \label{p:1D-convergence} \end{proposition} \subsection{Convergence in $n>1$ dimensions} \begin{figure} \caption{A singularity in $h\MDT$ but not $h\DT$. Its distance along the slanted bicharacteristic is greater than $T$, but its base point is less than distance $T$ from the boundary. Hence $\eta\in (T^*\Theta)_T$ but $\eta\notin T^*(\Theta_T)$.} \label{f:slant-singularity} \end{figure} Consider a halfspace $\Theta=\{x_n\geq 0\}$, and let $c(x)=1$. Any $\eta=(x',x_n,\xi',\xi_n)\in \To^*\bTheta$ with $x_n>T$ then belongs to $T^*(\bTheta_T)$. However, if $\xi'\neq 0$, then $d^*_{T^*\bTheta}(\eta)>x_n$ and $\eta\notin(T^*\bTheta)_T$ if $T$ is sufficiently close to $x_n$ (Figure~\ref{f:slant-singularity}). This discrepancy, which of course occurs for general $\Theta$, $c$ when $n>1$, implies that $h\DT$ is fundamentally smaller than $h\MDT$. Furthermore, it prevents the exact Neumann series from converging (in finite energy space) for any $h_0$ producing singularities in the gap $(T^*\bTheta)_T\setminus \clsr{T^*(\bTheta_T)}$, as we now show. Suppose $\eta\in\WF(R_Th_0)\cap\big((T^*\bTheta)_T\setminus \clsr{T^*(\bTheta_T)}\big)$, and $\gamma$ is the bicharacteristic passing through $\eta$ at $t=T$. If there were a finite energy solution $h_\infty\in\mathbf C$ of the scattering control equation~\eqref{e:maf}, the proof of Theorem~\ref{t:basic-maf} implies (via unique continuation) that the wave field $v(t,x)=(F\clsr\pi Rh_\infty)(2T-t,x)$ is stationary harmonic at $t=T$ on $\bTheta_T^\star$, and in particular smooth at $\eta$. Propagation of singularities makes this impossible, since $\gamma([0,2T])$ lies completely inside $\Theta$. Hence no $h_\infty\in\mathbf C$ exists, and the Neumann series for $h_0$ must diverge, implying that $\norm{\pi^\star R\pi^\star R}=1$. Using this argument, a divergent Neumann series may be constructed whenever $(T^*\bTheta)_T \neq T^*(\bTheta_T)$. Hence we expect $\norm{\pi^\star R\pi^\star R}=1$ in general for $n>1$ dimensions, in opposition to Proposition~\ref{p:1D-convergence} in 1D. It is worth noting that in numerical tests the Neumann iteration appears to follow its microlocally predicted behavior (isolation of $h\MDT$) more closely than its exact behavior (isolation of $h\DT$). \subsection{Proof of convergence in one dimension} \label{s:compare-proofs} \begin{proof}[Proof of Proposition~\ref{p:1D-convergence}] This proof is inspired in large part by a proof of Stefanov and Uhlmann~\cite[Prop. 5.1]{SU-TATBrain}. Let $x(t)$ be the inverse function of the travel time $t=\int_0^x c(x')^{-1}\,\ud x'=d^*_\Theta(x)$; then $\Theta_t=(x(t),\infty)$. Choose $\delta>0$ small enough that $\abs{t_1-t_2}>\delta/2$ for any distinct $x(t_1),x(t_2)\in\singsupp c$. In $(-\infty,\eps)$ take the factorization $\d_t^2-\Delta=(\d_t+i\d_x)(\d_t-i\d_x)$ associated with d'Alembert solutions $u(t,x)=f(x-t)+g(x+t)$. Identifying $h_0$ with $(f,g)\in H^1\times H^1$, \begin{equation} \dnorm{h_0}^2 = \int_0^\eps c^{-2}\dabs{g'-f'}^2 + \dabs{f'+g'}^2\,dx = 2\big(\dnorm{f'}_{L^2}^2+\dnorm{g'}_{L^2}^2\big). \end{equation} The leftward-traveling component $g$ is trivially handled, since it is preserved by $R\pi^\star R$: indeed, if $f= 0$, then $\supp Rh_0\subset (-2T,-2T+\eps)$, and $\pi^\star R\pi^\star Rh_0=\pi^\star R^2h_0=0$. Hence we restrict attention to rightward-traveling initial data $h_0=(f,0)$. Intuitively, the energy of the direct transmission of $f$, that is, its image under the graph FIO components of $R$ involving only transmissions, should be bounded away from zero by \Garding's inequality since these components are elliptic. To start, assume $\supp h_0$ is contained in an interval $(a,b)$ of width $b-a\leq\delta$, so that no multiply-reflected rays enter the direct transmission region $I=(x(a+2T),x(b+2T))$. Furthermore, assume $c$ is constant on $I$, so that $Rh_0$ again divides into leftward- and rightward-travelling components $F,G$. On $I$ we have $Rh_0 \eqml (R\DT^++R\DT^-) h_0$, where $R\DT^\pm$ are elliptic graph FIO (one for each family of bicharacteristics) associated with propagation along purely transmitted broken bicharacteristics; see Appendix~\ref{s:parametrix-construction}. Let $\pi_\pm=\fiv 2(I\pm iH)$ be the projections onto positive and negative frequencies (where $H$ is the Hilbert transform), and define the elliptic FIO $R\DT=R\DT^+\pi_+ + R\DT^-\pi_-$. Now on $I$ we have $F'\eqml \psi\d_x R\DT \d_x^{-1}f'$. Applying \Garding's inequality to the normal operator of $\d_x R\DT\d_x^{-1}$, with an appropriate spatial cutoff, \begin{nalign} \norm{h_0} = \sqrt 2\dnorm{f'}_{L^2} &\leq C_1\sqrt 2\dnorm{F'}_{L^2(I)} + \dnorm {Kf'}_{L^2}\\ & = C_1\left(\En_{I}( Rh_0)\right)^{1/2} + \tnorm {\tilde K h_0}\\ & \leq C_1\norm{\clsr\pi Rh_0} + \tnorm {\tilde K h_0}, \label{e:1D-garding} \end{nalign} where $K,\tilde K$ are compact operators. In fact, $h_0=(f,0)\perp\ker \clsr\pi R$, so the compact error term $\norm{\tilde Kh_0}$ may be eliminated. To see this, by unique continuation $h_1=(f_1,g_1)\in\ker\clsr\pi R$ implies $Fh_1=0$ along $\RR\times\bdy\Omega$ and $[\eps,2T]\times\bdy\Theta$. Since $Fh_1=f_1(x-t)+g_1(x+t)$ outside $\Omega$, we conclude $f_1=0$. Conversely, $\clsr\pi(0,g_1)=0$ so that $\ker\clsr\pi R=\{(0,g_1)\}\perp h_0$. Hence on the subspace $g=0$, for some constant $C_2>0$, \begin{equation} \norm{\pi^\star R\pi^\star R} \leq \norm{\pi^\star R} \leq 1 - \frac 1{C_2}. \end{equation} and as $\pi^\star R\pi^\star R(f,g)=\pi^\star R\pi^\star R(f,0)$ this proves the result for all $h_0$. The same is true even if $c$ is not constant on $I$, since without affecting $\pi^\star R\pi^\star R$ we may modify $c$ so as to be constant on some deeper interval $(x(2T'),\infty)$, $T'>T+\epsilon/2$, and deduce an estimate analogous to~\eqref{e:1D-garding}, but at the later time $t=2T'$. By finite speed of propagation and conservation of energy, we can move the estimate back to $t=2T$ to establish~\eqref{e:1D-garding}. Finally, if $\eps>\delta$, it is possible that the direct transmission of a shallower part of $h_0$ may be cancelled by that of a deeper part of $h_0$, derailing the \Garding{} estimate. However, if this occurs the shallower and deeper parts of $h_0$ must be related by an elliptic FIO; therefore, the shallower part's energy is controlled by the deeper part's direct transmission. To make a simpler version of this idea rigorous, cover $(-2T,\eps)$ with intervals of width $\delta$: \begin{align} I_j&=((j-1)\delta,j\delta), & j&=\lfloor -2T/\delta\rfloor,\dotsc,\lceil \eps/\delta\rceil=k. \end{align} Choose $f_j\in H^1\Loc$ with $f_j'=\mathbf 1_{I_j}f'$, where $\mathbf 1_{I_j}$ denotes the characteristic function. For each $j$, we have an estimate of the form~\eqref{e:1D-garding} with $h_0=(0,f_j)$. Let $E_j=\sqrt 2\norm{f_j'}_{L^2}$ be the energy of $f_j$. Now, let $j_0$ be the smallest $j$ for which $E_j\geq 2C_2^{-1}\sum_{i>j}E_i$; this is true of $j=k$ so such a $j_0$ always exists. By finite speed of propagation, the energy of $Rh_0$ in $I'' = (x(2T+(j_0-1)\delta), x(2T+j_0\delta))$ depends only on $f_i$ with $i\geq j_0$. But the direct transmission of $f_{j_0}$ contributes at least energy $2\sum_{i>j_0} E_i$, so by conservation of energy and \Garding's inequality \begin{equation} \big\lVert{f_{j_0}'}\big\rVert_{L^2} \lesssim \En_{I''}(Rh_0)+\tnorm{\tilde K h_0}. \end{equation} However, we may bound all of $f'$ in terms of $f_{j_0}'$. For, if $j>j_0$ certainly $\tnorm{f_j'}\lesssim \tnorm{f_{j_0}'}$; for $j<j_0$, this is also true as $E_j\not\geq 2C_2^{-1}E_{j_0}$. Hence \begin{equation} \dnorm{f'}_{L^2} < C_3\En_{I''}(Rh_0)+\tnorm{\tilde K h_0}, \end{equation} with a constant $C_3=C_3(C_2,\eps,\delta,T)$. The remainder of the proof follows as before. \end{proof} \section{Connecting scattering control to the Marchenko equation} \label{s:marchenko} In this section, we illustrate the connection between Marchenko's integral equation and scattering control by first generalizing Rose's focusing algorithm~\cite{Rose02} to higher dimensions. This will show how one can eliminate multiple scattering in higher dimensions to eventually obtain a focused wave. We will start by summarizing Rose's approach in one space dimension to eliminate multiple scattering and obtain a focused wave. We will then explain the drawbacks to his approach, and provide our results that generalize his one-sided autofocusing results to higher dimensions. In addition, the one dimensional case will provide an accurate illustration of the microlocal solution $A$ constructed in Proposition~\ref{p:constructive-parametrix}. This will provide a clear distinction between the scattering control process and Rose's focusing algorithm where the advantages of scattering control are readily apparent. Lastly, we will connect our results with the 1D Marchenko equation used to solve the inverse scattering problem. \subsection{Rose's one-sided autofocusing} \label{sec Rose autofocusing} In \cite{Rose02}, Rose tries to focus an acoustic wave (working in $\RR_t \times \RR_x$) inside a medium occupying $\{ x > 0 \}.$ On the left side, $\{ x < 0 \}$, the wave speed is known, say $1$ for simplicity. Inside $x<0$, the total wave field $u$ may directly be decomposed into its incoming and outgoing components: $$ u(x,t) = u_{\text{in}}(x,t) + u_{\text{out}}(x,t).$$ One is given the reflection response operator that we denote $\R(t)$ which relates the incoming and outgoing waves at the boundary $\{x=0\}$. By linearity, one has exactly $$ u_{\text{out}}(x=0,t) = \int \R(t-t')\,u_{\text{in}}(0,t')\,\ud t'.$$ The goal of Rose is to determine a boundary control $u_{\text{in}}(x=0,t)$ such that the total wave field $u$ will be a distribution with support equal to $\{ x = x_f\}$ at time $t=0$ for some focusing point $x_f > 0$ one is interested in. Letting $t_f$ denote the focusing time, i.e.~$t_f = d_c(0,x_f)$, Rose uses the ansatz $u_{\text{in}}(x=0,t) = \delta(t+t_f) + \Omega\tail(t;t_f)$, and then finds an equation that $\Omega\tail$ must solve in order to obtain focusing. Rose shows that $\Omega\tail$ must solve (see \cite[Equation (8)]{Rose02}) \begin{equation} { \label{eq: Rose equation} \Omega\tail(-t;t_f) + \R(\Omega\tail(-t;t_f)) = -\R(\delta(-t+t_f)) \text{ for }t<t_f, }\end{equation} where the action of $\R$ applied to a test function $\phi$ is \begin{equation} { \R\phi = \int_{-\infty}^{\infty}\R(t+t')\phi(t')\,\ud t'. }\end{equation} Equation (\ref{eq: Rose equation}) for $\Omega\tail(-t;t_f)$ is the Marchenko equation encountered in 1D potential scattering, which we will describe in more detail later. Also, if one denotes $r_0 = \delta(t-t_f)$ and $\tilde{K}\tail = \Omega\tail(-t;t_f)$, then this equation reads $$ \tilde{K}\tail + \R \tilde{K}\tail = -\R r_0 \text{ for } t< t_f,$$ Note that this approach relies heavily on the directional decomposition of a wave field into incoming and outgoing waves. In higher dimensions, such a decomposition may only be done microlocally, and as such, the reflection response operator $R_{\text{Rose}}$ would only be defined microlocally (see \cite{Stolk04} for a detailed account on doing this direction decomposition). The seismic literature has avoided this issue by ignoring the presence of evanescent and glancing waves, so a rigorous mathematical proof to obtain exact focusing in the presence of conormal singularities in higher dimensions has never been done. The whole point of using Cauchy data rather than boundary data is to avoid such microlocal considerations and obtain an iteration method in an exact sense. Thus, based on the above equations, if we wanted to generalize this to higher dimensions in an exact sense using our Cauchy data setup, one may naively guess that the appropriate equation should be \[ K\tail + \pi^{\star}R K\tail = -\pi^{\star}Rr_0\] for $r_0, K\tail \in \mathbf{C}$, with $r_0$ having support in $\Theta$ and $K\tail$ having support outside $\Theta.$ Notice that no directional wave decomposition is necessary to write down this equation. This in fact turns out to be the correct equation, and we provide a rigorous analysis in the next section. \subsection{Elimination of multiple scattering via a generalized Marchenko equation using Cauchy data} We prove here a generalization to arbitrary dimension of Rose's equation (\ref{eq: Rose equation}) that allows one to eliminate multiple scattering of the pressure wave field. This is the key step that will allow one to focus a pressure field or velocity field at a given time. However, to avoid difficult microlocal issues with directional wave decompositions, we prove a theorem using Cauchy data rather than boundary data. Afterwards, we relate how this connects to Rose's algorithm for focusing discussed in the previous section as well as the classical Marchenko equation, which use boundary control rather than Cauchy data. We now state the following general theorem about eliminating multiple scattering above a certain depth level $T$ (given in travel time coordinates) inside the medium, i.e.~within $\Theta^\star_T$. \begin{theorem} \label{thm: focusing} Let $u$ be the solution to the wave equation with Cauchy data $r_{\infty} = r_0 + K\tail \in \mathbf{C}$, where $r_0$ has support in $\Theta$, and $K\tail$ has support outside $\Theta$. Let $T>0$. \begin{enumerate} \item[(i)](Necessity) If $u(T)$ has support in $\Theta_T$, then necessarily $K\tail$ satisfies the following equation \begin{equation} { \label{eq: generalized Rose equation} K\tail + \pi^\star RK\tail = -\pi^\star Rr_0 }\end{equation} \item[(ii)](Partial converse) Suppose $K\tail$ satisfies $$ K\tail + \pi^\star RK\tail = -\pi^\star Rr_0.$$ Then $\Pi^\star_T u(T) = 0$ and $u(T)\restrictto{\Theta_T} = R_Tr_0 \restrictto{\Theta_T}.$ \item[(iii)](Uniqueness of the tail) Any two tails may only differ by Cauchy data that is totally internally reflected, and does not penetrate $\Theta$ in time $2T$. That is, if $K\tail + \pi^\star RK\tail = 0,$ then $K\tail =0$ in $\mathbf{C}$. \item[(iv)](Almost Solvability) The set of $r_0 \in \mathbf H$ for which one has a convergent Neumann series solution for $K\tail$, $$ \mathfrak{Q} := \{ r_0 \in \mathbf H : (I+\pi^\star R)^{-1}r_0 \in \mathbf C \}$$ is dense in $\mathbf H$. \end{enumerate} \end{theorem} (Note that $\Pi_T^\star$ denotes the orthogonal projection from $H^1(\Theta_T^*)$ onto $H_0^1(\Theta_T^*)$.) \begin{rem}\label{rem: control of multiple scattering} The main content of this theorem is that once $r_0$ is given, then one has a formula to construct $K\tail$ that controls the multiple scattering inside $\Theta^\star_T$ at time $T$. The construction of $K\tail$ gives \emph{no information} on what happens inside $\Theta_T$ at time $T$ since $K\tail$ does not affect this region. What happens inside $\Theta_T$ is entirely determined by $r_0$. Thus, for the purposes of focusing, one needs to construct $r_0$ beforehand such that the associated pressure field restricted to $\Theta_T$ at time $T$ will have a singular support at a single point. In Wapenaar et al.~\cite{Wap}, the authors assume they have an approximate velocity profile to construct an approximation to the direct transmission (denoted $\mathcal{T}^{\text{inv}}_d$ in equation (16) there), which is analogous to the $r_0$ we have here. They then construct a tail (denoted by $M$) analogous to our $K\tail$ to control the multiple scattering. \end{rem} \begin{rem} Notice that this theorem never mentions a focusing point but rather an inside region $\Theta_T$. This is because in order to make the theorem more general, we did not specify any support conditions for $r_0$. Typically however, one sends an incident pulse $r_0$ that is supported close to but outside $\Omega$, which is meant to be the direct transmission. Then the domain of influence of $r_0$ inside $\Theta_T$ at time $T$ is only a small region in a neighborhood of $\partial \Theta_T$ containing the desired point of focus (see Figure \ref{f:adt}). We relate the above theorem to focusing via a corollary at the end of this section. \end{rem} \begin{rem} As mentioned in \cite{Rose02} as well, this result only describes how to control multiple scattering of the pressure field, but says nothing about the velocity field at time $T$; hence energy is not controlled and the wave field may still have a large kinetic energy even at time $T$. Also, after the time $t=T$, the Cauchy data inside $\Theta_T^\star$ generate waves that may and generally do enter the inner layer $\Theta_T$ even before time $t=2T$. The main advantage of scattering control is that it controls both the pressure \textit{and} velocity field so that for $T \leq t \leq 2T$, the wave generated by the time $T$ Cauchy data inside $\Theta^*_T$ will not penetrate the domain of influence of the direct transmission $\bar{\pi}_TR_Tr_{0}$. \end{rem} \begin{proof} We start with (i). Suppose we found a wave field $u$ such that $u(T)$ has support in $\Theta_T$, and Cauchy data $r_{\infty} = r_0 + K\tail$ as in the statement of the theorem. Let us denote $$ w(t) = u(T+t) + u(T-t).$$ Observe that $$ w(0) =0 \text{ outside }\Theta_T, \qquad \text{and} \qquad w_t(0) = 0. $$ By finite propagation speed, one also has $w(t,x) = 0$ when $d(x,\Theta_T) > t$. Notice that all points in $\Theta^\star$ are at least distance $T$ away from $\Theta_T$ so one has $$ \pi^\star \mathbf{w}(T) = 0$$ This precisely means that $$ u(2T) = - u(0) \text{ on }\Theta^\star$$ and $$ - u_t(2T) = -u_t(0) \text{ on } \Theta^\star.$$ Written in operator form, this amounts to $$ \pi^\star\nu \circ R_{2T}r_{\infty} = -\pi^\star r_{\infty},$$ where we recall that $R_s$ does not just propagate $s$ units of time, but also give the Cauchy data at time $t=s$. Plugging in $r_{\infty} = r_0 + K\tail$ above gives \begin{align}\label{eq: Marchenko for the tail} &\pi^\star R(r_0 + K\tail) = -\pi^\star (r_0 + K\tail) \nonumber \\ &\Leftrightarrow \pi^\star R r_0 + \pi^\star RK\tail = -\pi^\star r_0 -\pi^\star K\tail = -K\tail \nonumber \\ &\Leftrightarrow K\tail + \pi^\star RK\tail = -\pi^\star Rr_0. \end{align} \paragraph{Proof of (ii)} First, if one adds $r_0$ to both side of (\ref{eq: Marchenko for the tail}), and brings $-\pi^*R r_0$ to the the left hand side, one obtains \begin{equation} { \label{eq: Marchenko for the full Cauchy} (I + \pi^*R)r_{\infty} = r_0. }\end{equation} Again denote $u(t) = (Fr_{\infty})(t),$ and let $w(t)$ be a superposition of $u(t)$ and its time reversal; that is $$ w(t) = (Fr_{\infty})(t) + (Fr_{\infty})(2T-t).$$ Then using (\ref{eq: Marchenko for the full Cauchy}) and recalling that $r_0$ vanishes outside of $\Theta$, we have $$ \mathbf{w}(0) = r_{\infty} + R r_{\infty} \text{ is harmonic in }\Theta^\star.$$ Similarly, $$ \mathbf{w}(2T) = R_{2T}r_{\infty} + \nu \circ r_{\infty} = \nu \circ ( R r_{\infty} + r_{\infty}) \text{ is harmonic in }\Theta^\star.$$ Note that $w_t(2T) = 0 = w_t(0)$ in $\Theta^\star$. Since $w$ also solves that wave equation, then $\p^2_tw$ vanishes wherever $w$ is harmonic. By translation invariance of the wave operator, $\p_t w$ (the mollification argument to make this precise is exactly as in the proof of (\ref{e:basic-maf-behavior})) also solves the wave equation while also having Cauchy data at times $t=0$ and $t=2T$ vanishing in $\Theta^*$. By Lemma 3, $\p_t\mathbf{ w}(T) = 0$ inside $\Theta^\star_T$. Looking at just the first component of $\mathbf{w}(T)$ this says exactly that $u(T)$ is harmonic in $\Theta^\star_T$, which is equivalent to $\Pi_T^\star u(T)=0$. The second statement in the theorem follows from finite propagation speed, as $K\tail$ is supported in $\Theta^\star$. \paragraph{Proof of (iii)} Suppose that $K\tail + \pi^\star RK\tail = 0$. Since $\pi^\star$ is a projection and $R$ is unitary, one has $$ \norm{\pi^\star RK\tail} \leq \norm{K\tail}.$$ However, since $K\tail = - \pi^\star RK\tail$, then the inequality above must in fact be an equality and so $\norm{\pi^\star RK\tail} = \norm{K\tail}$. Since $R$ is unitary, one has $$\norm{K\tail}^2 = \norm{RK\tail}^2 = \norm{\pi^*RK\tail}^2 + \norm{\bar{\pi}RK\tail}^2 = \norm{K\tail}^2 + \norm{\bar{\pi}RK\tail}^2.$$ Thus, $\bar{\pi}RK\tail = 0$ and so $ K\tail = -\pi^\star RK\tail = -RK\tail$, implying that $K\tail \in \mathbf{G}.$ \paragraph{Proof of (iv)} Denote $K_l = \sum_{j=0}^l (-\pi^\star R)^j(-\pi^\star R r_0).$ The proof follows almost verbatim as the proof showing the density of the set $\mathcal Q$ defined in (\ref{e:Q-definition}). \end{proof} In order to make Remark \ref{rem: control of multiple scattering} more transparent on how this theorem relates to focusing, we add the following corollary. First, we conjecture that following the methods of boundary control in \cite{DKO}, one may extract certain travel times between points on the boundary to points in the interior and use that to create an $r_0$ supported outside $\Omega$, such that at a time $T$, the first component of $R_T(r_0)\restrictto{\Omega_T}$ has singular support equal to a single point. Thus we believe that it will be possible to satisfy the assumption in the following corollary using boundary control methods. \begin{cor} Suppose $r_0 \in \mathbf{C}$, a time $t=T$, and $\Theta\supset \Omega$ are such that $\supp(r_0) \subset \Theta$ and the singular support of $F(r_0)(T)\restrictto{\Theta_T}$ is nontrivial, contained inside $B_{\epsilon}(x_f)$ for some small $\epsilon>0$. Then if $K\tail$ solves (\ref{eq: Marchenko for the tail}), then the singular support of $u(T)$ is nontrivial and contained in $B_{\epsilon}(x_f)$. \end{cor} The corollary is stated using the energy spaces employed throughout the paper. However, we believe it can be refined to encompass general distributions and in particular a point singular support so that one has a focusing wave in the usual sense. \begin{rem} We emphasize again that despite the attractiveness of the corollary, it only gives focusing of the pressure field and says nothing about the velocity field. Thus, once one goes past time $t=T$, one has lost all control and one has no information on the wave field at such times, which is usually quite complex since $K\tail$ needs to be quite complicated in order to control the multiple scattering that allows focusing. Thus, the scattering control procedure is much more useful in this regard. \end{rem} We close this section with an analogous theorem to Theorem \ref{thm: focusing} which controls the multiple scattering of the velocity field instead. The proof is almost identical excepting sign changes so we omit it. \begin{theorem} (Multiple scattering control of velocity field)\label{thm: focusing velocity} Let $u$ be the solution to the wave equation with Cauchy data $r_{\infty} = r_0 + K\tail \in \mathbf{C}$, where $r_0$ has support in $\Theta$, and $K\tail$ has support outside $\Theta$. Let $T>0$. \begin{enumerate} \item[(i)](Necessity) If $u_t(T)$ has support in $\Theta_T$, then necessarily $K\tail$ satisfies the following equation \begin{equation} { K\tail - \pi^\star RK\tail = -\pi^\star Rr_0 \nonumber }\end{equation} \item[(ii)](Partial converse) Suppose $K\tail$ satisfies $$ K\tail - \pi^\star RK\tail = -\pi^\star Rr_0.$$ Then $u_t(T) \restrictto{\Theta^\star_T}= 0$ and $\mathbf{u}(T) \restrictto{\Theta_T} = R_Tr_0 \restrictto{\Theta_T}.$ \item[(iii)](Uniqueness of the tail) Any two tails may only differ by Cauchy data that is totally internally reflected, and does not penetrate $\Theta$ in time $2T$. That is, if $K\tail - \pi^\star RK\tail = 0,$ then $K\tail =0$ in $\mathbf{C}$. \item[(iv)](Almost Solvability) The set of $r_0 \in \mathbf H$ for which one has a convergent Neumann series solution for $K\tail$, $$ \mathfrak{Q} := \{ r_0 \in \mathbf H : (I-\pi^\star R)^{-1}r_0 \in \mathbf C \}$$ is dense in $\mathbf H$. \end{enumerate} \end{theorem} \begin{rem} We note that an almost identical proof used to recover kinetic energy of the almost direct transmission in Proposition \ref{p:basic-energy} and \ref{p:limit-energy} may be used here to recover this energy from $K\tail$ instead. \end{rem} At this point, one might be led to believe that information may be lost or gained by using our Cauchy data setup versus the boundary setup that is done in Rose. This is actually not the case, and we show in the next section that in one dimension, where one does not worry about glancing rays, both formulations are completely equivalent. \subsection{Equivalence between Cauchy and boundary formulations in one dimension} For simplicity, we assume here that $\Omega$ occupies $x>0$ and $\Theta$ is exactly the half-space $\{ x>-\epsilon\}$ for some $\epsilon > 0$. Without loss of generality, we assume that the wave speed is constantly equal to $1$ outside $\Omega$, i.e.~$c\restrictto{\Omega^\star} =1$. Then any wave field inside $\Omega^\star$ is of the form \begin{equation} { u \restrictto{\Omega^\star} = f(t-x) + g(x+t) }\end{equation} We assume that $\supp(f(s))\subset \{ -T < s < T+\epsilon \}$ ($T$ is the focusing time; i.e.~we are focusing at a point $x_T$ which is distance $T$ away from $0$ using the metric determined by $c$) and that the left going wave $g$ is activated only after the right going wave $f$ hits the boundary $\{ x =0 \}.$ Precisely, this means that $$ \supp(g(s)) \subset \{ s > -T \}.$$ As described in the last section, one has \begin{equation} {\label{eq: CtoB g is related to f} g(t) = \R \ast f = \int_{-\infty}^{\infty} \R(t-t')f(t')dt'. }\end{equation} This is well-defined in an exact sense precisely since there are no glancing rays in 1 space dimension. See for example \cite{AR02} for details. To avoid dealing with harmonic extensions, as they do not add anything essential, we will assume that $R$ applied to any of our Cauchy data has $0$ trace on $\partial \Theta^*= \{ x=0\}$. This merely ensures that $$ \pi^*R = \mathbf{1}_{\Theta^*}R = \mathbf{1}_{\{{x<-\epsilon}\}}R$$ when applied to such Cauchy data. Next, observe that since $g(s) = 0 $ when $ s \leq 0$ and using the support condition of $f$, our Cauchy data (initially given at $t=-T$ as opposed to $t=0$) and its time-$2T$ propagation is \begin{nalign} \tilde{\f}(x):=\u(-T) &= \begin{pmatrix}f(-T-x)\\f'(-T-x)\end{pmatrix},\\ \pi^*R(\u(-T)) = \pi^*R\tilde{\f} &= \mathbf{1}_{\{ x< -\epsilon \}}\begin{pmatrix}g(T+x)\\-g'(T+x)\end{pmatrix}. \end{nalign} Then by $(\ref{eq: CtoB g is related to f})$ we have \begin{equation} { (\pi^*R\tilde{\f})(t-T) = \mathbf{1}_{\{{t<T-\epsilon}\}}\nu\g(t) = \mathbf{1}_{\{{t<T-\epsilon}\}}\nu(\R\star \f)(t), }\end{equation} where we get an equation for $g'(t)$ by differentiating (\ref{eq: CtoB g is related to f}), and we use the notation $\f, \g$ to represent a column vector of $f,g$ and their derivative. Let us denote $\JCB$ as the \emph{Cauchy-to-boundary} map, which maps Cauchy data at time $t=-T$ to boundary data on $\{ x =0 \}$. In this simple setting, it is well-defined as a map $\JCB: D'(\RR_x) \to D'(\RR_t)$ explicitly defined on smooth functions as $$ \JCB v(t) = v(t-T)$$ with an obvious extension to elements in $\mathbf{C}$. Since $\tilde{\f} = \JCB^{-1}\f(-\cdot)$ and $\R \star \phi(-\cdot) = \R \phi(\cdot)$, we have a nice relationship between $R_{2T}$ and $\R$ given by \begin{equation} { \JCB R_{2T}\JCB^{-1}(\f(-\cdot)) = \R(\f(-\cdot)) \qquad \text{for }t<T. }\end{equation} \begin{prop}(Equivalence of Rose and Cauchy-Marchenko in one dimension) Let $f(t) =K\tail(t)+r_0(t)$ denote the incoming boundary data, and $\tilde{\f}(x) = \JCB^{-1}(\textbf{K}\tail(t)+\textbf{r}_0(t)) := \tilde{K}\tail(x) + \tilde{r}_0(x)$ be the corresponding Cauchy data at time $-T$ with all the assumptions described earlier. Then, $\tilde{K}\tail$ satisfies the Cauchy-Marchenko equation with $\tilde{r}_0$ iff $K\tail$ satisfies the Rose equation with $r_0$; that is, $$ \tilde{K}\tail(x) + \pi^*R\tilde{K}\tail(x) = -\pi^*R\tilde{r}_0$$ $$ \Leftrightarrow $$ $$ K\tail(-t) + \R(K\tail(-\cdot)) = -\R(r_0(-\cdot)) \text{ for } t<T-\epsilon $$ \end{prop} \begin{proof} Suppose we start with the Cauchy-Marchenko equation in the form (\ref{eq: Marchenko for the full Cauchy}) (translating everything by time $T$ and using the notation of boldface letters to represent a vector consisting of the funcion and its time derivative): \begin{align} &\u(-T) + \pi^\star R(\u(-T)) = \bar{\pi}\u(-T) \nonumber \\ \label{eq: CtoB first piece} &\Leftrightarrow \tilde{\f}(x) + \pi^\star R\tilde{\f}(x) = \bar{\pi}\tilde{\f}(x) \\ &\Leftrightarrow \JCB \tilde{\f} + \JCB \pi^\star R\tilde{\f} = \JCB \bar{\pi}\tilde{\f} \label{eq: CtoB 2nd piece} \\ &\Leftrightarrow \f(-t) + \mathbf{1}_{\{t<T-\epsilon \}}\nu(\R \star \f)(t) = \mathbf{r}_0(-t) \nonumber \end{align} This is essentially the right equation for Rose, but we rewrite it in the more familiar form: \begin{align} &\f(-t) + \mathbf{1}_{\{t<T-\epsilon \}}\nu(\R \star \f)(t) = \mathbf{r}_0(-t) \nonumber \\ &\Leftrightarrow \mathbf{K}\tail(-t) + \nu(\R \star \mathbf{K}\tail)(t) = -\nu(\R \star \mathbf{r}_0)(t) \text{ for } t<T-\epsilon \nonumber \\ &\Leftrightarrow \mathbf{K}\tail(-t) + \nu \R(\mathbf{K}\tail(-\cdot)) = -\nu \R(\mathbf{r}_0(-\cdot)) \text{ for } t<T-\epsilon, \nonumber \\ &\Leftrightarrow \when{ K\tail(-t) + \R(K\tail(-\cdot)) = -\R(r_0(-\cdot)) \nonumber \\ \smd t\left[ K\tail(-t) + \R(K\tail(-\cdot))\right] = -\smd t\R(r_0(-\cdot)) } \text{ for } t<T-\epsilon \nonumber \\ &\Leftrightarrow K\tail(-t) + \R(K\tail(-\cdot)) = -\R(r_0(-\cdot)) \text{ for } t<T-\epsilon \nonumber \end{align} where the first equality is obtained be subtracted $\mathbf{r}_0(-t)$ from both sides of the first equation and writing $f= r_0+K\tail$. \end{proof} \begin{rem} The above result helps explain the truncation that Rose does in \cite{Rose02} to obtain his autofocusing algorithm. The Corollary essentially shows that $K\tail(t)$ must satisfy $$ \mathbf{1}_{\{t<T-\epsilon\}}K\tail(-t) + \mathbf{1}_{\{t<T-\epsilon\}}\R(K\tail(-\cdot)) = - \mathbf{1}_{\{t<T-\epsilon\}}\R(r_0(-\cdot))$$ One naturally assumes that the tail come after the direct transmission $r_0$, which means $K\tail(t)$ is supported in $t > -T+\epsilon$ and hence $ \mathbf{1}_{\{t<T-\epsilon\}}K\tail(-t) = K\tail(-t).$ Thus, the Neumann series becomes \begin{align*} K\tail(-t) &= -\mathbf{1}_{\{t<T-\epsilon\}}\R(r_0(-\cdot)) + (\mathbf{1}_{\{t<T-\epsilon\}}\R)^2(r_0(-\cdot)) \\ &\qquad - (\mathbf{1}_{\{t<T-\epsilon\}}\R)^3(r_0(-\cdot)) + \dots \end{align*} and we may clearly see the truncation happening at each step of the algorithm. The truncation is essential since we just proved the equivalence of Rose's algorithm to our Cauchy scheme, and we already proved that our equation (\ref{eq: Marchenko for the tail}) is necessary and sufficient to control multiple scattering. The proof shows that the truncation essentially comes from (\ref{eq: Marchenko for the tail}) only holding within a certain region in space (i.e.~$\Theta^\star$ in that theorem) that was determined by finite speed of propagation and unique continuation. In one dimension and after using the Cauchy-to-Boundary map, this spatial region corresponds to the time-truncation appearing in Rose. \end{rem} We will describe in the following sections the connection between the equations of the previous theorems, the Marchenko equation, and scattering control. \subsection{Connection to the Marchenko equation} Burridge~\cite{Bur80} considers the 1-dimensional inverse scattering problem for the plasma wave operator $\Box_q = \Box + q(x)$ where $q = 0$ in $x<0$. (recall that in 1 dimension, the acoustic wave equation may be put into this form by a change of variables as in \cite{Bur80}). Since it is not relevant for this part, we will avoid describing the function spaces where all of our distributions here belong. One is interested in solutions to $ \Box_q u = 0$ with certain boundary conditions at $x=0$ that allow for only left-going solutions inside $x<0$ (see \cite[Section 3]{Bur80} for details). It is shown in \cite{Bur80} that there is a special Green's function solution of the form $G = \delta(t-x) + K(x,t)$ such that $\supp(K) \subset \{ |t| \leq x, \ x \geq 0 \}$ and one may recover $q$ from knowing $K$. The given data are the reflected waves due to a right-going incidence wave in the region $x <0$. Analytically, there is a causal Green's function: $$G_1(x,t) = \delta(t-x) + K_1(x,t)$$ with $\supp(K_1) \subset \{ t \geq |x|, \ t >0\}$. One is given the data $\M(t) = K_1(x=0,t)$ (interpreted as a generalized trace), and the goal is to recover $K$ from $\R$. Then it is shown in \cite[Section 3]{Bur80} that for each fixed $x$, $K$ must satisfy the following integral equation known as the \emph{Marchenko} equation: \begin{equation} { K(x,t) + \int_{-x}^xK(x,\tau) \M(t+ \tau) d\tau =-\M(t+x) \qquad \text{ for } t<x. }\end{equation} To relate this to (\ref{eq: generalized Rose equation}), change variables to travel time coordinates $$ z = \int_0^x c(x')^{-1}\,\ud x'.$$ Comparing with (\ref{eq: Rose equation}), we see that $t_f = z(x_f)$ and $K(z,t) = \Omega\tail(-t;z)$ solves the Marchenko equation above with $\R$ as the given data in place of $\M$. The connection to (\ref{eq: generalized Rose equation}) is now readily apparent from the previous subsections. \subsection{Connection to scattering control} Notice that the proof of multiple scattering control in Theorem \ref{thm: focusing} and its corollary essentially utilizes the operators $I+ \pi^\star R$ and $I-\pi^\star R$ to control scattering from the pressure field and the velocity field respectively. Our scattering control series is a middle ground that allows one to control scattering in both the pressure field and the velocity field such that after time $t=2T$, the exterior data coming from the direct transmission is distinguished. Indeed, the scattering control operator is precisely $$ I - \pi^*R\pi^*R = (I-\pi^*R)(I+\pi^*R),$$ whose Neumann series solutions involve exactly the even terms in the Neumann series of $I-\pi^\star R$. Figure~\ref{f:rose-and-sc} depicts the differences between Rose's autofocusing and scattering control in a simple one-dimensional example. \begin{figure} \caption{ These figures correspond to the incident pulse in Figure \ref{f:mr-demo-original} \label{f: 1d comparisons} \label{f: Roses tail} \label{f: pulse with ma tail} \label{f:rose-and-sc} \end{figure} \appendix \section{Wave equation parametrix with reflection and transmission} \label{s:parametrix-construction} We briefly review how a parametrix for the acoustic wave equation initial value problem with piecewise smooth wave speed may be constructed in terms of reflections and transmissions, neglecting glancing rays. This is now-classical FIO theory, drawing from the work of many authors, including Chazarain~\cite{Chazarain}, Hansen~\cite{Hansen}, and Taylor~\cite{Taylor75}. As nothing novel is developed here, we do not include proofs; our goal is simply to provide a bookkeeping system for use in the paper. Recalling~\sref{s:ml-maf}, consider $c(x)$ piecewise smooth with singular support contained in disjoint closed smooth hypersurfaces $\Gamma_i$, with $\Gamma=\bigcup\Gamma_i$. The interfaces separate $\RR^n\setminus\Gamma$ into disjoint components $\Omega_j$. In order to distinguish the sides of each hypersurface $\Gamma_i$, consider an \emph{exploded space} $Z$ in which the connected components of $\RR^n\setminus\Gamma$ are separate. It may be defined in terms of its closure, as a disjoint union \begin{align*} \clsr Z &= \bigsqcup_{\smash[b]j} \clsr\Omega_j, & Z &= \bigcup_{\smash[b]j} \Omega_j\subset\clsr Z. \end{align*} In this way, $\bdy Z$ contains two copies of each $\Gamma_i$, one for each adjoining $\Omega_j$. Before proceeding further, we perform a standard microlocal splitting in order to separate forward- and backward-moving singularities. Recall that $\d_t^2-c^2\Delta$ factors microlocally into half-wave operators $(\d_t+iQ)(\d_t-iQ)$. The full solution operator $F$ is then equivalent microlocally to a sum of solution operators $F^\pm$ corresponding to $\d_t\pm iQ$, with initial data related by a microlocally invertible matrix \PsiDO{} $P$: \begin{align} F(f_0,f_1) &\eqml F^+g_++F^-g_-, & \begin{bmatrix} g_+\\g_-\end{bmatrix}\eqml P\begin{bmatrix} f_0\\f_1\end{bmatrix}. \end{align} The Cauchy data $(g_+,g_-)$ may be interpreted as a single distribution $g$ on a doubled space $\mathbf Z=Z_+\sqcup Z_-$ containing two copies of $Z$. We now describe a parametrix $\tilde R$ for $R=\nu\circ R_{2T}$ as a sum of graph FIO on $\mathbf Z$ built from sequences of reflections and transmissions, along with operators propagating data from one boundary to another, or propagating the initial data to boundary data. The key feature of the propagators is that waves reaching the boundary of a subdomain $\Omega_j$ simply leave $\Omega_j$ rather than reflecting. To handle reflections and refractions, we record the outgoing boundary data left by waves escaping $\Omega_j$ and convert them to appropriate incoming boundary data on each side of the interface, which generate reflected and refracted waves. \paragraph{Cauchy Propagators: $\JCS$, $\JCSp$, $\JCB$} We first develop a reflectionless solution operator $\JCS$ for the Cauchy problem on $\mathbf Z$. To begin, extend each restriction $c_j= \restr{c}_{\Omega_j}$ to a smooth function on $\RR^n$. Let $E^\pm_j$ be the half-wave Lax parametrix associated to $\d_t\pm iQ$, $Q=(-c_j^2\Delta)^{\smash{1/2}}$. Each $\eta\in \To^*\Omega_{\pm,j}$ is associated with a unique $c_j$-bicharacteristic $\gamma_\eta(t)$ in $\To^*\RR^n$ passing through $\eta$ at $t=0$, which may escape and possibly re-enter $\Omega_{\pm,j}$ as $t\to\pm\infty$. To prevent re-entry of wavefronts, we introduce a pseudodifferential cutoff $\varphi(t,\xi)$, omitting some details for brevity. Let $t_{\mathrm e\pm}$, $t_{\mathrm r\pm}$ denote the first positive and negative escape and re-entry times; let $\varphi(t,\gamma_\eta(t))$ be identically one on $[t_{\mathrm e-},t_{\mathrm e+}]$ and supported in $(t_{\mathrm r-}, t_{\mathrm r+})$. Modify $\varphi$ on a small neighborhood of $\RR\times\To^*\bdy\Omega_{\pm,j}$ (the glancing rays) to ensure it is smooth. Finally, let $\JCS$ be the restriction of $\varphi(t,D_x)\circ E^\pm_j$ to $\RR\times\Omega_{\pm,j}$; this is the desired reflectionless propagator. We also require a variant $\JCSp$ of $\JCS$ in which waves travel only forward in time. For this replace $\varphi$ with some $\varphi^+$ supported in $(t_{\mathrm e-}, t_{\mathrm r+})$ and equal to 1 on $[0,t_{\mathrm e+}]$. Restricting $\JCSp$ to the boundary, we obtain the \emph{Cauchy-to-boundary} map $\JCB=\restr{\JCSp}^{\phantom+}_{\RR\times\bdy\mathbf Z}$. It can be shown (cf.~\cite{Chazarain}) that $\JCS,\JCSp\in I^{-1/4}(\mathbf Z\shortrightarrow \RR\times \mathbf Z)$, and $\JCB\in I^0(\mathbf Z\shortrightarrow\RR\times\bdy\mathbf Z)$. As desired, $\JCS$ and $\JCSp$ are parametrices: $(\d_t\pm iQ)\JCS h,(\d_t\pm iQ)\JCSp h\eqml 0$ for $\WF(h)$ lying in a set $\mathcal V\subset\To^*\mathbf Z$ whose bicharacteristics are sufficiently far from glancing. By a direct argument with oscillatory integral representations, it can also be shown that $\JCB$ is elliptic at covectors in $\mathcal V$ whose bicharacteristics intersect $\bdy\mathbf Z$. The near-glancing covector set $\mathcal W$ of~\sref{s:microlocal} is then $\To^*\mathbf Z\setminus\mathcal V$. \paragraph{Boundary Propagators} Outgoing solutions from boundary data $f\in\mathcal D'(\RR\times\mathbf Z)$ may be obtained by microlocally converting boundary data to Cauchy data, then applying $\JCS$. The boundary-to-Cauchy conversion can be achieved by applying a microlocal inverse of $\JCB$, conjugated by the time-reflecting map $S_s\colon t\mapsto s-t$ for an appropriate $s$. More precisely, near any covector $\beta=(t,x';\tau,\xi')\in\bdy\Omega_{\pm,j}$ in the hyperbolic region $\abs\tau>c_j\abs{\xi'}$ there exists a unique bicharacteristic $\gamma$ passing through\footnote{That is, $(di)^*\gamma(t)=\beta$, where $i\colon \bdy \mathbf Z\hookrightarrow \clsr {\mathbf Z}$.} $\beta$ and lying inside $\bOmega_{\pm,j}$ in some time interval $[s,t)$, $s<t$. Then $\JBS$ may be defined as $S_s\JCS \JCB^{-1}S_s$ microlocally near $\beta$. On the elliptic region $\abs\tau<c_j\abs{\xi'}$ define $\JBS$ as a parametrix for the elliptic boundary value problem; see e.g.~\cite[\textsection4.8]{SU-TATBrain}. Applying a microlocal partition of unity, we obtain a global definition of $\JBS$ away from a neighborhood of the glancing region $\abs\tau=c_j\abs{\xi'}$. It can be proven that $\JBS\in I^{-1/4}(\RR\times\bdy\mathbf Z\shortrightarrow \RR\times \mathbf Z)$. Its restriction to the boundary $r_\bdy\circ\JBS$ consists of a pseudodifferential operator equal to the identity on $\mathcal W$ and an elliptic graph FIO $\JBB\in I^0(\RR\times\bdy\mathbf Z\shortrightarrow\RR\times\bdy\mathbf Z)$ describing waves traveling from one boundary to another. \paragraph{Reflection and Transmission} It is well known that transmitted and reflected waves arise from requiring a weak solution to be $C^1$ at interfaces. Given incoming boundary data $f\in\mathcal E'(\RR\times\bdy\mathbf Z)$ (an image of $\JCB$ or $\JBB$) microsupported near $\beta$, we seek data $f\subR,\,f\subT$ satisfying the $C^1$ constraints \begin{nalign} f + f\subR &\eqml \iota f\subT,\\ \d_\nu (\upsilon\JBS\upsilon f + \JBS f\subR)\big|_{\RR\times\bdy\mathbf Z} &\eqml \iota \d_\nu\JBS f\subT\big|_{\RR\times\bdy\mathbf Z}. \label{e:C1-continuity} \end{nalign} Here, $\upsilon$ is time-reversal, so $\upsilon\JBS\upsilon$ is the outgoing solution that generated $f$. The map $\iota\colon\bdy\mathbf Z\to\bdy\mathbf Z$ reverses the copies of each boundary component within $\bdy\mathbf Z$, and $\d_\nu$ denotes the normal derivative. The second equation in~\eqref{e:C1-continuity} simplifies to a pseudodifferential equation \begin{equation} N\subI f + N\subR f\subR \eqml N\subT f\subT \label{e:C1-continuity-2} \end{equation} with operators $N\subI$, $N\subR$, $N\subT\in\Psi^1(\RR\times\bdy Z)$ that may be explicitly computed. The system~(\ref{e:C1-continuity}--\ref{e:C1-continuity-2}) may be microlocally inverted to recover $f\subR=M\subR f$, $f\subT=M\subT f$ in terms of pseudodifferential reflection and transmission operators $M\subR,\,\iota M\subT\in\Psi^0(\RR\times\bdy\mathbf Z)$. Let $M=M\subR+M\subT$. The principal symbols of $M\subR$ and $\iota M\subT$ have well-known geometric interpretations. In the doubly hyperbolic region where $\abs\tau<c\abs{\xi'}$ on both sides of the interface, \begin{align} \sigma_0(M\subR) &= \frac{\cot\theta\subR-\cot\theta\subT}{\cot\theta\subR+\cot\theta\subT}, & \sigma_0(\iota M\subT) &= \frac{2\cot\theta\subR}{\cot\theta\subR+\cot\theta\subT}, \label{e:geometric-ps-refl-trans} \end{align} where $\theta\subR$, $\theta\subT$ are the angles between the normal and the associated reflected and transmitted bicharacteristics. Here $\cot\theta\subR=\big(c\subR^{-2}\tau^2-\abs{\xi'}^2\big){}^{1/2}/\abs{\xi'}$, where $c\subR$ is the wave speed at $\beta$ on the reflected side, and similarly for $\theta_{\textrm T}$. From~\eqref{e:geometric-ps-refl-trans} we deduce $M\subT$ is elliptic in the doubly-hyperbolic region, while $M\subR$ is elliptic as long as $c$ is discontinuous at the interface. Note that while the principal symbol of $\iota M\subT$ may exceed 1, this does not violate energy conservation since $M\subT$ operates on boundary rather than Cauchy data. \paragraph{Parametrix} With all the necessary components defined, we now set \begin{nalign} \tilde F &= \JCS + \JBS M\sum_{k=0}^\infty (\JBB M)^k\JCB,\\ \tilde R &= r_{2T}\circ\tilde F, \label{e:parametrices} \end{nalign} where $r_{2T}$ is restriction to $t=2T$, plus time-reversal. Again omitting the proof, it can be shown that $\tilde F\eqml F$ and $\tilde R\eqml R$ away from glancing rays; that is, for initial data $h_0$ such that every broken bicharacteristic originating in $\WF(h_0)$ is sufficiently far from glancing. Recalling that $M=M\subR+M\subT$, we may write $\tilde R$ as a sum of graph FIO indexed by sequences of reflections and transmissions: \begin{nalign} \tilde R &= \smash{\sum_{\mathclap{\substack{s\in\{R,T\}^k\\k\geq0}}} \,\tilde R_s,} \qquad\qquad& \tilde R_{()} &= r_{2T} \JCS,\\ &&\tilde R_{(s_1,\dotsc,s_k)} &= r_{2T} \JBS M_{s_k}\JBB\dotsb M_{s_2}\JBB M_{s_1} \JCB. \label{e:propagator-graph-components} \end{nalign} The solution operator $\tilde F$ likewise decomposes into analogous components $\tilde F_s$. \paragraph{Comparison with Layered Media Parametrices} The above construction is in fact the natural generalization from the flat interface case of a layered media. Indeed, suppose our space $\Theta$ is only a small perturbation of the flat layered media case (see \cite{Hij87} for notation and analysis in the flat case). This ensures that bicharacteristic segments starting from $\Gamma_i$ hit $\Gamma_{i-1}$ or $\Gamma_{i+1}$ first before hitting another interface (here, $\Omega_i$ lies below $\Gamma_i$ and above $\Gamma_{i+1}$). The full wave field may be microlocally decomposed into upgoing and downgoing components at each interface $\Gamma_i$ denoted $u^{(i)-}$, resp. $u^{(i)+}$ as described in \cite[proof of Theorem 3.1]{StDeHoop02}. Then localizing the construction of the boundary-to-boundary maps $\JBB$, we obtain $\JBB^{i,i+1}$ (resp. $\JBB^{i,i-1}$), which propagate $u^{i,+}$ (resp. $u^{i,-}$) to interface $\Gamma_{i+1}$ (resp. $\Gamma_{i-1}$). Next, there are reflection and transmission operators, denoted $R^{i,j},T^{i,j} \in \Psi^0(\RR \times \Gamma_i)$ which are essentially the $M_R, M_T$ operators from before but microlocally restricted to a particular ``side'' of a particular interface. The indexing is such that $R^{i,j}$ denotes the reflection coefficient of a wave inside $\Omega_j$ reflecting off of $\Gamma_i$. While $T^{i,j}$ denotes the transmission coefficient for a wave from $\Omega_i$ into $\Omega_j$ where the constructions are made exactly as in the previous section. Under this simplified geometry, the outgoing waves at interface $\Gamma_i$ are given by \[ u^{(i)+} = T^{i-1,i}\JBB^{i-1,i}u^{(i-1)+} +R^{i,i}\JBB^{i+1,i}u^{(i+1)-} \] and \[ u^{(i)-} = R^{i,i-1}\JBB^{i-1,i}u^{(i-1)+} +T^{i,i-1}\JBB^{i+1,i}u^{(i+1)-}. \] This is all for $i\geq 2$, while for $i=1$ we must take into account the source term $\phi \in \mathcal{D}'(\Gamma_1)$ (assuming this is the only source) and only those incoming waves from $\Gamma_2$: \begin{align*} u^{(1)+} &= R^{1,1}\JBB^{2,1}u^{(2)-} + \phi_{\text{source}}^+ \\ u^{(1)-} &= T^{1,0}\JBB^{2,1}u^{(2)-} + \phi_{\text{source}}^-. \end{align*} Denote $u^{\pm} = [u^{(1)\pm}, \dots, u^{(r)\pm}]^T$. Thus, as done in \cite{Cist73}, we may combine, the $R,T$ operators and the corresponding $\JBB$ occurring in the above formulas into one operator (for example, $R^{i,i}\JBB^{i+1,i}$ becomes a single operator). Then we form $T^{\pm}$ and $R^{\pm}$, each a $r \times r$ matrix of FIO's, to obtain the following recursive formula: \[ \col{ u^+ \\ u^-} = \left[\begin{matrix} T^+ & R^+ \\R^- & T^- \end{matrix}\right] \col{u^+\\u^-} + \col{(\phi^+_{\text{source}},0,\dots,0)^T\\(\phi^-_{\text{source}},0,\dots,0)^T}. \] Hence, it is fitting to denote $S_{sc} = \begin{bsmallmatrix} T^+ & R^+ \\R^- & T^- \end{bsmallmatrix}$ as the scattering ``matrix'', which corresponds to $\JBB M$ appearing in (\ref{e:parametrices}). To connect this construction to (\ref{e:parametrices}), start with Cauchy data $\phi_{\text{Cauchy}} \in \mathbf{C}$ with microsupport close to a single covector, whose corresponding geodesic hits $\Gamma_1$ transversely. Then the solution restricted to $\Gamma_1$ near this first intersection is microlocally equal to $$\phi_{\Gamma_1} = \phi_{\text{incoming}} + \phi_{\text{source}},$$ where $\phi_{\text{incoming}}=\JCB \phi_{\text{Cauchy}}$ and $\phi_{\text{source}}^+ = T^{0,1}\JCB\phi_{\text{Cauchy}}$ and $\phi_{\text{source}}^- = R^{1,0}\JCB\phi_{\text{Cauchy}}$. So the upgoing and downgoing parts of the solution at the interfaces are given by \[ \col{u^+\\u^-} = \col{(\phi^+_{\text{incoming}},0,\dots,0)^T\\(\phi^-_{\text{incoming}},0,\dots,0)^T} + \sum_{k=0}^{\infty} S^k_{sc} \col{(\phi^+_{\text{source}},0,\dots,0)^T\\(\phi^-_{\text{source}},0,\dots,0)^T}. \] After applying the boundary to solution operator, we obtain a formula exactly analogous to (\ref{e:parametrices}), and one can use the scattering matrix to track the principal symbols of the wave field in each $\Omega_i$ separately. \paragraph{Funding Acknowledgements:} P.~C.~and V.~K.~were supported by the Simons Foundation under the MATH $+$ X program. M.~V.~dH.~was partially supported by the Simons Foundation under the MATH $+$ X program, the National Science Foundation under grant DMS-1559587, and by the members of the Geo-Mathematical Group at Rice University. G.~U.~is Walker Family Endowed Professor of Mathematics at the University of Washington, and was partially supported by the National Science Foundation, a Si-Yuan Professorship at Hong Kong University of Science and Technology, and a FiDiPro Professorship at the Academy of Finland. \end{document}
\begin{document} \title{\bf\Large{Minimal Time Generation of Density Matrices for a~Two-Level Quantum System Driven by Coherent and Incoherent Controls}} \author{\normalsize {\bf Oleg~V.~Morzhin}\footnote{E-mail: {\tt [email protected]}}~$^{,1}$ \quad and \quad {\bf Alexander~N.~Pechen}\footnote{E-mail: {\tt [email protected]} (corresponding author)}~$^{,1,2}$ \\ \small $^1$ Steklov Mathematical Institute of Russian Academy of Sciences,\\ \small Department of Mathematical Methods for Quantum Technologies, \\ \small 8 Gubkina Str., Moscow, 119991, Russia; \\ \small $^2$ National University of Science and Technology ``MISiS'',\\ \small 6 Leninskiy prospekt, Moscow, 119991, Russia} \date{ } \maketitle \makeatletter \renewcommand{\@makefnmark}{} \makeatother \begin{abstract} The article considers a two-level open quantum system whose dynamics is driven by a combination of coherent and incoherent controls. Coherent control enters into the Hamiltonian part of the dynamics whereas incoherent control enters into the dissipative part. The goal is to find controls which move the system from an initial density matrix to a given target density matrix as fast as possible. To achieve this goal, we reformulate the optimal control problem in terms of controlled evolution in the Bloch ball and then apply Pontryagin maximum principle and gradient projection method to numerically find minimal time and optimal coherent and incoherent controls. General method is provided and several examples of initial and target states are explicitly considered. {\bf Key words}: Quantum control, open quantum system, coherent control, incoherent control. \end{abstract} \tableofcontents \section{Introduction} Control of quantum systems of atomic and molecular scale is an important branch of modern science with existing and prospective applications in physics, chemistry and quantum technology~\cite{RiceBook2000, BrumerBook2003, TannorBook2007, FradkovBook2007, WisemanBook2010, Petersen2010, ZagoskinBook2011, Brif_Chakrabarti_Rabitz_article_2010, Glaser2015Report, CPKoch_2016_OpenQS, Borzi_book_2017, Lyakhov_Pechen_Lee_2018, Amosov_Mokeev_2018, Avanesov_Kronberg_Pechen_2018}. Two general types of quantum control exist. Coherent control drives essentially the Hamiltonian aspects of the dynamics, and is typically realized by a shaped laser pulse~\cite{RiceBook2000, BrumerBook2003, TannorBook2007, FradkovBook2007, Brif_Chakrabarti_Rabitz_article_2010}. Incoherent control drives non-Hamiltonian, i.e., dissipative aspects of the dynamics, and can be realized by reservoir engineering~\cite{Pechen_Rabitz_2006}, measurement apparatus~\cite{WisemanBook2010,Pechen_Trushechkin_2015}, etc. Control of quantum systems driven by a combination of coherent and incoherent controls was considered in~\cite{Pechen_Rabitz_2006, Pechen_Rabitz_2014}. Then it was shown that for any initial and target states of a general $n$-level quantum system there exist a combination of coherent and incoherent controls which move the initial state arbitrarily close to the target state asymptotically as final time $T\to\infty$~\cite{Pechen_PhysRevA_2011}. Therefore the quantum system under coherent and incoherent controls is asymptotically controllable in the space of all density matrices. However, method of~\cite{Pechen_PhysRevA_2011} does not guarantee that this state-to-state transfer is as fast as possible. This motivates the problem of finding a way to steer the initial state to a final state in a minimal possible time, which we study below for a two-level system. Time-optimal control problems were considered for open and closed two-level systems, for example, in~\cite{Lapert_2010,Boscain_Gronberg_Long_Rabitz_2014, Albertini_DAlessandro_2016}. Optimal control at the quantum speed limit for a two-level Landau- Zener system is analyzed~\cite{Caneva2009}. Manipulation of states of a degenerate quantum system is considered~\cite{Volovich_Kozyrev_2016}. In this article we consider minimal time steering of an initial density matrix of a two-level system into a target density matrix by coherent and incoherent controls. We reformulate the control problem as evolution of a real vector in the Bloch ball, then apply Pontryagin maximum principle~\cite{Pontryagin_et_al_book_1962} and a version of gradient projection method (GPM)~\cite{Nikolskii_2007, Demyanov_Rubinov_book_1970, Bertsekas_book_2016}. Using GPM in the functional space of piecewise continuous control functions, we compute sequential improvements of controls for various $\rho_0$ and $\rho_{\rm target}$. \section{Formulation of the Problem} Most general state of a two-level system is described by a density matrix, i.e., by a~$(2 \times 2)$ Hermitian matrix $\rho(t) \in \mathbb{C}^{2 \times 2}$ which is positive, $\rho(t) \geq 0$, and has unit trace, ${\rm Tr} \rho(t) = 1$. Evolution of the density matrix is described by the master equation (see \cite{Pechen_Rabitz_2006}) \begin{eqnarray} \dfrac{d \rho(t)}{dt} &=& -\dfrac{i}{\hbar} \Big[ \widehat{\bf H}_0 + \widehat{\bf V} v(t), \rho(t) \Big] + \gamma D(\rho(t), n(t)), \qquad \rho(0) = \rho_0. \label{f1} \end{eqnarray} Operators $\widehat{\bf H}_0$ and $\widehat{\bf V}$ are Hermitian, and $\widehat{\bf H}_0$ has two different eigenvalues. Without loss of generality we consider \[ \widehat{\bf H}_0 = \hbar \omega \begin{pmatrix} 0 & 0 \\ 0 & 1 \end{pmatrix},\qquad \widehat{\bf V} = \mu \begin{pmatrix} 0 & 1 \\ 1 & 0 \end{pmatrix}, \] where $\omega > 0$, $\mu \in \mathbb{R}$, $\mu \neq 0$. Dissipative superoperator describes interaction between the system and its environment and has the form \begin{eqnarray} D(\rho(t), n(t)) &=& n(t) \Big( \sigma^+ \rho(t) \sigma^- + \sigma^- \rho(t) \sigma^+ - \dfrac{1}{2} \Big\{ \sigma^- \sigma^+ + \sigma^+ \sigma^-, \rho(t) \Big\} \Big) + \nonumber \\ &+&\Big( \sigma^+ \rho(t) \sigma^- - \dfrac{1}{2} \left\{ \sigma^- \sigma^+, \rho(t)\right\}\Big). \label{f2} \end{eqnarray} The parameter $\gamma > 0$ determines strength of interaction with the environment. Matrices $\sigma^\pm$ are \[ \sigma^- = \begin{pmatrix} 0 & 0 \\ 1 & 0 \end{pmatrix},\qquad \sigma^+ = \begin{pmatrix} 0 & 1 \\ 0 & 0 \end{pmatrix}. \] We use the notations for commutator $[A, B] = AB - BA$ and anti-commutator $\{A, B\} = AB + BA$ of two operators $A$ and $B$. Function $v = v(t)$, $t \in [0, T]$ represents a coherent control (e.g., shaped laser field), where $T$ is the final time. Function $n(t)$, $t \in [0, T]$ represents incoherent control (for example, non-equilibrium spectral density or temperature of the environment). The incoherent control by its physical meaning is a non-negative function. Consider controls $v$ and $n$ as piecewise continuous functions bounded as $v_{\min} \leq v(t) \leq v_{\max}$, $0 \leq n(t) \leq n_{\max}$. Thus \begin{equation} \begin{array}{c} u = (v, n) \in \mathcal{U} = PC([0,T]; Q), \qquad Q = [v_{\min}, v_{\max}] \times [0, n_{\max}] \subset \mathbb{R}^2. \end{array} \label{f3} \end{equation} Consider for the system (\ref{f1}) --- (\ref{f3}) the following terminal constraint: \begin{equation} \rho(T) = \rho_{\rm target} \label{f4} \end{equation} where $\rho_{\rm target}$ is some given target density matrix. The reachable set $\mathcal{R}(T, \rho_0, \mathcal{U})$ for the system (\ref{f1}) --- (\ref{f3}) is the set of all states $\rho(t)$ which can be obtained from $\rho_0$ by controls from $\mathcal{U}$ to the time $T$. It can happen that for small enough $T$ the corresponding reachable set $\mathcal{R}(T, \rho_0, \mathcal{U})$ does not contain the target state $\rho_{\rm target}$. The problem of moving the system (\ref{f1}), (\ref{f3}) from a given initial density matrix $\rho_0$ to a given target density matrix $\rho_{\rm target}$ during as small as possible time $T$ can be formulated as minimization of the objective \begin{equation} J(u,T) = T \to \min \label{f5} \end{equation} subject to constraint (\ref{f4}). In other words, the goal is to find \begin{gather*} \overline{T} = \min\left\{T> 0~|~ \rho(T) = \rho_{\rm target} \right\} \end{gather*} and the corresponding control $\overline{u} \in \mathcal{U}$. \section{Evolution in the Bloch Ball} In this section we reformulate the original control problem as controlled evolution in the Bloch ball. Consider the representation of density matrix (e.g., \cite{Holevo_book_De_Gruyter_2012}) \begin{equation} \rho = \dfrac{1}{2} \left( \sigma_0 + \sum\limits_{j=1}^3 x_j \sigma_j \right) = \dfrac{1}{2} \begin{pmatrix} 1 + x_3 & x_1 - i x_2 \\ x_1 + i x_2 & 1 - x_3 \end{pmatrix}, \label{f6} \end{equation} where matrices $\sigma_0 = \mathbb{I}_2$, $\sigma_1 = \begin{pmatrix} 0 & 1 \\ 1 & 0 \end{pmatrix}$, $\sigma_2 = \begin{pmatrix} 0 & -i \\ i & 0 \end{pmatrix}$, $\sigma_3 = \begin{pmatrix} 1 & 0 \\ 0 & -1 \end{pmatrix}$ form the Pauli basis. Vector $x = (x_1, x_2, x_3) \in \mathbb{R}^3$ satisfies the condition $\| x \|^2 \leq 1$. For pure quantum states vector $x$ satisfies the condition $\| x \|^2 = 1$ for each $t$, i.e. the points $x$ evolve on the Bloch sphere. If $\| x\|^2 < 1$, then $x$ represent a mixed quantum state and such $x$ are located in the inner part of the Bloch ball. The point in the origin represents the completely mixed state. Using (\ref{f6}), we rewrite the system (\ref{f1}), (\ref{f2}) as \begin{eqnarray} \dfrac{dx_1}{dt} &=& -\dfrac{\gamma}{2} x_1 + \omega x_2 - \gamma x_1 n, \qquad x_1(0) = x_{1,0}, \label{f7} \\ \dfrac{dx_2}{dt} &=& -\omega x_1 - \dfrac{\gamma}{2} x_2 - 2\kappa x_3 v - \gamma x_2 n, \qquad x_2(0) = x_{2,0}, \label{f8} \\ \dfrac{dx_3}{dt} &=& 2 \kappa x_2 v - \gamma x_3 + \gamma - 2 \gamma x_3 n, \qquad x_3(0) = x_{3,0}, \label{f9} \end{eqnarray} where $\kappa = \mu/\hbar$. The terminal constraint (\ref{f4}) takes the form \begin{equation} x_i(T) = x_{i,\rm target}. \label{f10} \end{equation} The values $x_{i,0}$ and $x_{i,\rm target}$ are calculated for the given matrices $\rho_0$ and $\rho_{\rm target}$ as $x_{i,0} = {\rm Tr} \rho_0 \sigma_i$ and $x_{i, \rm target} = {\rm Tr} \rho_{\rm target} \sigma_i$. \section{Optimization Method} \subsection{Reducing to a Sequence of Fixed-Time Optimal Control Problems} We apply for solving the optimal control problem (\ref{f3}), (\ref{f5}), (\ref{f7}) --- (\ref{f10}) the following approach. Consider a series of optimal control problems $P_j$, $j = 1, 2, \dots K$, where $j$th problem has no terminal constraint and is considered with some final time $T = T_j \in \left\{ T_1, T_2, \dots, T_K \right\}$. Cost criterion for each optimal control problem $P_j$ is \begin{equation} J_j(u) = \| x(T_j) - x_{\rm target} \|^2 \to \min. \label{f11} \end{equation} Thus, instead of the problem (\ref{f3}), (\ref{f5}), (\ref{f7}) --- (\ref{f10}) we consider a series of the problems (\ref{f3}), (\ref{f7}) --- (\ref{f9}), (\ref{f11}) for $j = 1, 2, \dots, K$. The goal is to obtain the minimal possible $T_j$ for which $J_j = 0$. Setting some sufficiently large $T_1$, we solve the sequence of optimal control problems until we can move the system from the initial state $x_0$ to the target state $x_{\rm target}$. The constraint $\| x(t)\|^2 \leq 1$ is satisfied automatically by the evolution equation and there is no need to use a special method for taking into account this constraint. \subsection{Solving a Fixed-Time Optimal Control Problem} This subsection considers GPM for solving a particular optimal control problem of the type (\ref{f3}), (\ref{f7}) --- (\ref{f9}), (\ref{f11}). We omit the index $j$ in the final time $T_j$ for shorten the notation. We apply the Pontryagin maximum principle \cite{Pontryagin_et_al_book_1962} which uses the Pontryagin function and the conjugate variables. In this case, the Pontryagin function is \begin{eqnarray*} H(p,x,u) = \mathcal{K}_v(p,x) v + \mathcal{K}_n(p,x) n + \widetilde{H}(p,x), \label{Pontryagin_function_f1} \end{eqnarray*} where $p \in \mathbb{R}^3$, the switching functions \begin{eqnarray*} \mathcal{K}_v(p,x) &=& \dfrac{\partial H}{\partial v} = 2 \kappa \left(p_3 x_2 - p_2 x_3 \right), \label{Pontryagin_function_f2} \\ \mathcal{K}_n(p,x) &=& \dfrac{\partial H}{\partial n} = -\gamma \left( p_1 x_1 + p_2 x_2 + 2 p_3 x_3 \right), \label{Pontryagin_function_f3} \end{eqnarray*} and \begin{eqnarray*} \widetilde{H}(p,x) &=& p_1 \Big(-\dfrac{\gamma}{2} x_1 + \omega x_2 \Big) + p_2 \Big(-\omega x_1 - \dfrac{\gamma}{2} x_2 \Big) + p_3 \Big(\gamma - \gamma x_3 \Big). \label{Pontryagin_function_f4} \end{eqnarray*} The conjugate system is \begin{eqnarray} \dfrac{dp_1}{dt} &=& \dfrac{\gamma}{2} p_1 + \gamma p_1 n + \omega p_2, \label{f12} \\ \dfrac{dp_2}{dt} &=& - \omega p_1 + \dfrac{\gamma}{2} p_2 + \gamma p_2 n - 2 \kappa p_3 v, \label{f13} \\ \dfrac{dp_3}{dt} &=& 2 \kappa p_2 v + \gamma p_3 + 2 \gamma p_3 n, \label{f14} \\ \qquad p_i(T) &=& -2 \left(x_i(T) - x_{i, \rm target}\right), \qquad i = 1, 2, 3. \label{f15} \end{eqnarray} The gradient of the cost functional $J$ at some control $u \in \mathcal{U}$ is the following: \begin{eqnarray} \dfrac{\delta J}{\delta u(t)} &=& -\dfrac{\partial H}{\partial u}\left(p(t), x(t), u(t) \right) = -\left(\mathcal{K}_v\left(p(t), x(t) \right), \mathcal{K}_n\left(p(t), x(t) \right) \right), \label{gradient_f} \end{eqnarray} where $x$, $p$ are correspondingly the solutions of the systems (\ref{f7}) --- (\ref{f9}) and (\ref{f12}) --- (\ref{f15}) for the considered control $u$. Fix some value $\alpha > 0$ which defines the step of the method, and $0 < \varepsilon \ll 1$ which defines the stopping criterion (e.g., $\varepsilon = 10^{-9}$). At $k$th iteration, GPM is represented by the following operations: \begin{enumerate} \item For the current admissible process $(u^{(k)}, x^{(k)})$ compute the corresponding solution $p^{(k)}$ of the system (\ref{f12}) --- (\ref{f15}). \item Compute the gradient (\ref{gradient_f}) at the triple of the functions $u = u^{(k)}$, $x=x^{(k)}$, $p=p^{(k)}$. \item Form the function \begin{eqnarray} u^{(k)}(t; \alpha) = u^{(k)}(t) - \alpha \dfrac{\delta J}{\delta u(t)}\Big|_{u = u^{(k)}}. \label{PGM_f0} \end{eqnarray} The components $v^{(k)}$ and $n^{(k)}$ of the function $u^{(k)}(t; \alpha)$ are: \begin{eqnarray} v^{(k)}(t; \alpha) &=& v^{(k)}(t) + \alpha \mathcal{K}_v(p^{(k)}(t), x^{(k)}(t)), \label{PGM_f1} \\ n^{(k)}(t; \alpha) &=& n^{(k)}(t) + \alpha \mathcal{K}_n(p^{(k)}(t), x^{(k)}(t)). \label{PGM_f2} \end{eqnarray} Further, form the function \begin{gather} u^{(k)}_{\rm Pr}(t; \alpha) = {\rm Pr}_Q(u^{(k)}(t; \alpha)), \label{ff} \end{gather} where ${\rm Pr}_Q$ is the orthogonal projection which maps any point outside of $Q$ to a closest point in $Q$, and leaves unchanged points in $Q$ \cite{Bertsekas_book_2016}. Its explicit action on the vector $u^{(k)}(t; \alpha)$ is the following: \begin{eqnarray} v^{(k)}_{\rm Pr}(t; \alpha) &=& \begin{cases} v^{\min}, & v^{(k)}(t; \alpha) < v^{\min},\\ v^{(k)}(t; \alpha), & v^{\min} \leq v^{(k)}(t; \alpha) \leq v^{\max}, \\ v^{\max}, & v^{(k)}(t; \alpha) > v^{\max}, \end{cases} \label{PGM_f3} \\ n^{(k)}_{\rm Pr}(t; \alpha) &=& \begin{cases} 0, & n^{(k)}(t; \alpha) < 0,\\ n^{(k)}(t; \alpha), & 0 \leq n^{(k)}(t; \alpha) \leq n^{\max}, \\ n^{\max}, & n^{(k)}(t; \alpha) > n^{\max}. \end{cases} \label{PGM_f4} \end{eqnarray} \item Form the control $u^{(k)}(\cdot; \alpha, \beta) = \left(v^{(k)}(\cdot; \alpha, \beta), n^{(k)}(\cdot; \alpha, \beta)\right)$ which components are \begin{eqnarray} v^{(k)}(t; \alpha, \beta) &=& v^{(k)}(t) + \beta \left(v^{(k)}_{\rm Pr}(t; \alpha) - v^{(k)}(t) \right), \label{PGM_f5} \\ n^{(k)}(t; \alpha, \beta) &=& n^{(k)}(t) + \beta \left(n^{(k)}_{\rm Pr}(t; \alpha) - n^{(k)}(t) \right), \label{PGM_f6} \end{eqnarray} where $\beta \in (0,1]$. \item Compute the value \begin{eqnarray} \beta^{(k)} &=& {\rm arg}\min\limits_{\beta \in (0,1]} f(\beta), \label{PGM_f7} \end{eqnarray} where $f(\beta)= J\left( u^{(k)}(\cdot; \alpha, \beta) \right)$. This iteration step is the hardest because the problem (\ref{PGM_f7}) requires global optimization and the function $f(\beta)$ is defined implicitly such that for each $\beta$ the value $f(\beta)$ is computed through solving the Cauchy problem (\ref{f7}) --- (\ref{f9}) with the corresponding $u(\cdot) = u^{(k)}(\cdot; \alpha, \beta)$. \item Construct the next approximation \begin{eqnarray} u^{(k+1)}(t) &=& u^{(k)}_{\rm Pr}(t; \alpha, \beta^{(k)}), \label{PGM_f8} \end{eqnarray} the corresponding solution $x^{(k+1)}$ of the Cauchy problem (\ref{f7}) --- (\ref{f9}), and compute the value $J(u^{(k+1)})$. If the inequality \begin{eqnarray} \left| J(u^{(k+1)}) - J(u^{(k)}) \right| < \varepsilon \label{PGM_f9} \end{eqnarray} is satisfied, then stop the iteration process; otherwise, take $k:= k +1$ and go to the next iteration. \end{enumerate} In this article, we consider control of two-level quantum systems. For such systems $\rho(t)$ is $(2 \times 2)$ matrix which admits convenient parametrization~(\ref{f6}) by a vector in the Bloch ball. In the general case of an $n$-level quantum system, $\rho(t)$ is an $(n \times n)$ matrix for which there is no such a simple parametrization. The extension of our method to this general case is an important task for a future work, that may require other parametrizations for $\rho(t)$, as for example, parametrization considered in~\cite{Ilin_Shpagina_Uskov_Lychkovskiy_article_2018}. GPM is a first order (based on gradient of the cost functional) method which contains at each iteration the computationally hard step~(\ref{PGM_f7}) for finding the most suitable variation of the control $u^{(k)}$. For the considered control problem, it may be useful to adapt the 2nd order Krotov method (e.g.,~\cite{Sklarz_Tannor_article_2002}), which in contrast with GPM, being a method for nonlocal improvements, does not use computationally hard variation of the control~$u^{(k)}$. Both GPM and the Krotov methods can give sequential improvements in the functional space of controls and can take into account constraints on the control values. It may be also useful to exploit GRAPE (GRadient Ascent Pulse Engineering)~\cite{Khaneja_Reiss_Kehlet_SchulteHerbruggen_Glaser_2005} and CRAB (Chopped Random-Basis Quantum Optimization)~\cite{Caneva_Calarco_Montangero_2011} methods. Both GPM and GRAPE use gradient of the cost functional but in contrast to GPM, GRAPE operates in a finite-dimensional space of parameters describing piecewise-constant parametrization of control functions. CRAB also works in a finite-dimensional space of parameters. However, these parameters describe trigonometric parametrization of the control. This parametrization takes into account the wave nature of coherent control. For optimization of the parameters in CRAB, one can use zero-order methods such as the Nelder-Mead method. \section{Numerical Results} Consider in the system (\ref{f7}) --- (\ref{f9}), $\omega = 1$, $\gamma = 2 \times 10^{-3}$, $\kappa =10^{-2}$, and in (\ref{f3}), $v_{\min} = -10$, $v_{\max} = 10$, and $n_{\max} = 1$. Set the parameter $\alpha = 10^3$. The value of $\alpha$ is chosen sufficiently large to compensate small values of the switching functions. Figure~\ref{fig:1} shows the results of numerical optimization for moving the system from the initial state with vector $(0, 0, -1)$ to the target state with vector $(0, 0, 0.5)$ for different values of $T$. When $T$ is relatively small, the method gives solution such that $x_1$, $x_2$, $x_3$ have large amplitudes almost during the entire time period. Figure~\ref{fig:2} shows how the cost functional $J$ decreases (in logarithmic scale) vs the iteration number in the sequential updates of $u$ for $T = 70$. \begin{figure} \caption{Moving the system in the Bloch ball from the initial state $(0, 0, -1)$ to the target state $(0, 0, 0.5)$ for $T=400$ (top), $T = 200$ (middle), and $T = 70$ (bottom). Left: evolution of the state in the Bloch ball. Center: evolution of components $x_1$, $x_2$, $x_3$. Right: optimal coherent (solid blue line) and incoherent (dash orange line) controls.} \label{fig:1} \end{figure} \begin{figure} \caption{Decrease of the cost functional $J$ vs iteration number (case $T=70$ which corresponds to the bottom pictures at Figure~\ref{fig:1} \label{fig:2} \end{figure} Figure~\ref{fig:3} shows the numerical results for moving a pure state with $(0,-1,0)$ into the target completely mixed state $(0,0,0)$. \begin{figure} \caption{Moving the system from the initial state $(0, -1, 0)$ to the target state $(0, 0, 0)$. Left: evolution of the state in the Bloch ball. Center: evolution of components $x_1$, $x_2$, $x_3$. Right: optimal coherent (solid blue line) and incoherent (dash orange line) controls.} \label{fig:3} \end{figure} \section{Conclusions} In this work manipulation of states of a two-level open quantum system driven by coherent and incoherent controls is considered. The control goal is to steer an initial density matrix into a target density matrix in as small as possible final time. To achieve this goal, we consider a decreasing series of final times, starting from some large enough final time. For each final time the control problem is formulated as minimizing distance to the target state. Then Pontryagin maximum principle for fixed final time and gradient projection method are applied to construct a numerical method for solving this problem. Several examples of initial and final states are explicitly considered using this method and minimal time and corresponding controls are numerically found. {\bf Acknowledgements}. Sections~2,~4~--~6 of this work are performed within the Russian Science Foundation Project No.~17-11-01388. Derivation of the dynamical equations in Section~3 is performed within the project No.~1.669.2016/FPM of the Ministry of Science and Higher Education of the Russian Federation. \end{document}
\begin{document} \frontmatter \numberwithin{equation}{chapter} \selectlanguage{english} \begin{abstract} The image reconstruction problem consists in finding an approximation of a function $f$ starting from its Radon transform $Rf$. This problem arises in the ambit of medical imaging when one tries to reconstruct the internal structure of the body, starting from its X-ray tomography. The classical approach to this problem is based on the Back-Projection Formula. This formula gives an analytical inversion of the Radon transform, provided that all the values of $Rf$ are known. In applications only a discrete set of values of $Rf$ is given, thus, one can only obtain an approximation of $f$. Another class of methods, called ART, can be used to solve the reconstruction problem. Following the ideas contained in ART, we try to apply the Hermite-Birkhoff interpolation to the reconstruction problem. It turns out that, since the Radon transform of a kernel basis function can be infinity, a regularization technique is needed. The method we present here is then based on positive definite kernel functions and it is very flexible thanks to the possibility to choose different kernels and parameters. We study the behavior of the methods and compare them with classical algorithms. \end{abstract} \tableofcontents \chapter{Introduction and content} This thesis is the result of a three-months stage at the Univerit\"{a}t Hamburg during which I studied the problem of clinical image reconstruction, i.e. the problem of obtaining the image of the internal structure of a sample starting from its X-ray tomography. From a mathematical point of view this correspond to find a function $f$ knowing its Radon transform $Rf$. In the first Chapter the problem of image reconstruction is defined, we formalize the concept of Computed Axial Tomography and the history behind it. In Chapter 2 we discuss the mathematical aspect of the problem and its relation with the Radon transform. Then we follow the classical approach for solving the problem and deduce an inversion formula for the Radon transform: the \emph{Back-Projection Formula}. Finally, we adapt the Back-Projection Formula to be used in real applications and thus we obtain the classical Fourier-based discrete image reconstruction algorithms. In Chapter 3 we introduce a different class of methods, called Algebraic Reconstruction Techniques (ART) and use them to solve our problem. Following the ART approach, in Chapter 4 we describe kernel based methods and show how they can be used to solve the image reconstruction problem. Chapters 5 and 6 are the original part of the work. In Chapter 5 we introduce a regularization technique that is necessary to implement kernel based image reconstruction and we realize such methods using specific positive definite kernel functions. In Chapter 6 we study from a numerical point of view the behavior of the methods in function of particular shape parameters and compare these methods with the Fourier based algorithms. In order to use the algorithms in a simple way, we also realized a graphical user interface that allows the user to test the algorithms on a set of predefined mathematical phantoms, with the possibility to choose options. \chapter{List of symbols} \begin{tabular}{ll} $l_{t,\theta}$ & line in the plane characterized by values $t$ and $\theta$\\ $Rf(t,\theta)$ & Radon transform of the function $f$ at a point $(t,\theta)$\\ $\mathcal{S}$ & Schwartz space of rapidly decreasing functions\\ $Bh(x,y)$ & back projection of the function $h$ at point $(x,y)$\\ $F_{n}f(\omega)$ & $n$-dimensional Fourier transform of the function $f$ at a point $\omega$\\ $Ff(\omega)$ & $1$-dimensional Fourier transform of the function $f$ at a point $\omega$\\ $R_{D}f$ & discrete Radon transform of the function $f$ \\ $B_{D}h$ & discrete back projection of the function $h$ \\ $F_{D}f$ & discrete Fourier transform of the function $f$ \\ $FWHM(\phi)$ & full width half maximum of the function $\phi$\\ $\lambda^{y}K(\cdot,y)$ & linear operator $\lambda$ applied to the function $K$ with respect to the variable $y$ \\ $R_{w}f$ & Radon transform of the function $f$ multiplied by the window function $w$\\ $\text{erf}(x)$ & error function evaluated at a point $x$\\ $\numberset{P}^{d}_{k}$ & space of polynomial of degree lower or equal to $k$ on $\numberset{R}^{d}$\\ $k(A)$ & 1-norm condition number of the matrix $A$ \end{tabular} \mainmatter \chapter{Computed axial tomography} Computed axial tomography (CAT or CT) is a method that generates images of the interior of the body by digital computation applied to the measured transmission of X-rays tomography. In this process, an X-ray source and a set of aligned X-ray detectors are rotated around the patient (see Figure \ref{fig: toshiba_ct_scanner}). The word tomography is derived from the Greek \emph{tomos} (slice) and \emph{graphein} (to write). The history of CT scan starts in Germany in 1895, when Wilhelm Conrad R\"ontgen (1859-1923; Figure \ref{fig: william_roentgen}) discovered a new type of radiation, which he called X-rays \cite{RON}. This type of electromagnetic radiation, which has shorter wavelength then visible light and the ability to penetrate matter, was immediately used to image the interior of the human body. Figure \ref{fig: roentgen_first_xray} shows one of the first X-ray images, this kind of images showed a two dimensional projection of the inner structures. In 1901 R\"ontgen received the first Nobel prize for physics. Basic to the CT technology are the theoretical principles of reconstruction of a three-dimensional object from multiple two-dimensional views relying on a mathematical model formulated by Johann Radon (1887-1956) in 1917 \cite{RAD17}. \begin{figure} \caption{The discovery of X-ray} \label{fig: william_roentgen} \label{fig: roentgen_first_xray} \label{fig: dics_xray} \end{figure} In 1979 the Nobel Prize for Medicine and Physiology was awarded jointly to Allan McLeod Cormack (1924-1998) and Godfrey Newbold Hounsfield (1919-2004), the two scientists primarily responsible for the development of computerized axial tomography in the 1960s and early 1970s. Cormack developed certain mathematical algorithms that could be used to create an image from X-ray data \cite{COR}. Working completely independently of Cormack and at about the same time, Hounsfield, a research scientist at EMI Central Research Laboratories in the United Kingdom, designed the first operational CT scanner, the first commercially available model and presented the first pictures of a patient's head \cite{HOU}. Compared to a plan X-ray image, the CT image showed remarkable contrast between tissues with small differences in X-ray attenuation coefficient (Figure \ref{fig: ct_brain} shows the CT scan of a section of the brain). Since 1980, the number of CT scans performed every year in the United States has risen from about 3 million to over 67 million (for further details about X-ray history one should refer to \cite{BASIC} or \cite{HISTORY}). \begin{figure} \caption{Computed axial tomography today} \label{fig: toshiba_ct_scanner} \label{fig: ct_brain} \label{fig: ct_today} \end{figure} The problem behind CT scans is essentially mathematical: if we know the values of the integral of two- or three- dimensional function along all possible cross-sections, then how can we reconstruct the function itself? This is a particular case of what is called as an \emph{inverse problem} and it was studied by the Austrian mathematician Johann Radon in the early part of the twentieth century. Radon's work incorporated a sophisticated use of theory of transform and integral operators. The practical obstacles to implementing Radon's theories are several. First Radon's inversion methods assume knowledge of the behavior of the function along every cross-section, while in practice only a discrete set of cross-sections can be sampled. Thus it is possible to construct only an approximation of the solution. Second, the computation power needed to process a multitude of discrete measurements and obtain from them a good approximation of the solution has been available for just a few decades. In order to overcome these obstacles theoretical approaches and approximation methods have been developed. \section{X-rays} A CT scan is generated form a set of thousands of X-ray beams, consisting of 160 or more beams at each of 180 directions. When a single X-ray beam of known intensity passes through a medium, some of the energy present in the beam is absorbed by the medium and some passes through. The intensity of the beam as it emerges from the medium can be measured by a detector. The difference between the initial and final intensities tell us about the ability of the medium to absorb energy. The idea behind the CT scan is that, by measuring the changes in the intensity of X-ray beams passing through the medium in different directions and by comparing the measurements, we can determine which location within the sample are more or less absorbent than others. In our analysis of the X-rays behavior we will make some assumptions: \begin{itemize} \item X-ray beam is \emph{monochromatic}. That is each photon has the same energy level $E$ and the beam propagates at a constant frequency. If $N(x)$ denotes the number of photons per second passing through a point $x$, then the intensity of the beam at the point $x$ is \begin{equation*} I(x)=E\cdot N(x); \end{equation*} \item X-ray beam has \emph{zero width}; \item X-ray beams are \emph{not subject to refraction or diffraction}. \end{itemize} Every substance has the property to absorbs a part of the photons that pass through it. To quantify this property we define the \emph{attenuation coefficient} of a material: \begin{definition} The \emph{attenuation coefficient} of a substance is the fractional number of photons removed from a beam of radiation per unit thickness of material through which it is passing due to all absorption and scattering processes. \end{definition} In radiology a variant of the attenuation coefficient is used: the \emph{Hounsfield unit}. Developed by Godfrey Hounfield, the Hounsfield unit represents a comparison of the attenuation coefficient of the medium with that of water. Specifically: \begin{definition} The \emph{Hounsfield unit} of a medium is \begin{equation*} H_{\text{medium }}=\frac{A_{\text{medium}}-A_{\text{water}}}{A_{\text{water}}}, \end{equation*} where $A$ denotes the attenuation coefficient. \end{definition} Suppose now an X-ray beam passes through some medium located between the position $x$ and the position $x+\Delta x$. Suppose $A(x)$ is the attenuation coefficient of the medium located there. Then the portion of all photons that will be absorbed in the interval $[x,x+\Delta x]$ is $p(x)=A(x)\Delta x$. The number of photons absorbed per second by the medium is then $p(x)N(x)=A(x)N(x)\Delta x$. Multiplying both sides by the energy level $E$ of each photon, we see that the loss of intensity of the X-ray over this interval is \begin{equation*} \Delta I\approx -A(x)I(x)\Delta x. \end{equation*} Let $\Delta x\rightarrow0$ to get the differential equation known as the \emph{Beer's law}: \begin{equation} \frac{dI}{dx}=-A(x)I(x). \label{eq: beerLaw} \end{equation} In other words: \emph{The rate of change or intensity per millimeter of a nonrefractive, monochromatic, zero-width X-ray beam passing through a medium is jointly proportional to the intensity of the beam and to the attenuation coefficient of the medium.} The differential equation \eqref{eq: beerLaw} is separable. If the beam starts at the point $x_{0}$ with initial intensity $I_{0}=I(x_{0})$ and is detected, after passing through the medium, at the point $x_{1}$ with final intensity $I_{1}=I(x_{1})$, we get \begin{equation*} \int_{x_{0}}^{x_{1}}{\frac{dI}{I}}=-\int_{x_{0}}^{x_{1}}{A(x)\,dx}, \end{equation*} from which it follows that \begin{equation} \int_{x_{0}}^{x_{1}}{A(x)\,dx}=\ln\left( \frac{I_{0}}{I_{1}}\right). \label{eq: averageA} \end{equation} Here we know the initial and final values of $I$ and we want to determine the coefficient function $A$. Thus, form the measured intensity of the X-ray we are able to compute not the values of $A$ itself, but the value of the integral of $A$ along the line of the X-ray. From equation \eqref{eq: averageA} it is easy to see that we can not discriminate two functions that have the same value of the integral along the X-ray path $[x_{0},x_{1}]$. The fundamental question of image reconstruction asks if it is possible to do that knowing the value of the integral of $A$ along every line: \textbf{The fundamental question of image reconstruction}: \emph{Can we reconstruct the function $A(x,y,z)$ (within some finite region) if we know the average value of $A$ along every line that passes through the region?} (cfr. \cite{BASIC} pp. 7.) In our study of CT scans, we will consider a two dimensional slice of the sample, obtained as the intersection of the sample and some plane, which we will generally assume coincides with the $xy$-plane. In this context, we interpret the attenuation coefficient function as a function $A(x,y)$ of two variables. \chapter{Fourier based methods}\label{chap: fourier_methods} In this chapter we study the methods that are used nowadays in the CT scanner. The mathematical foundation of these methods is based on the work of J. Radon on an integral transform, called in his honor \emph{Radon transform}, and its inverse. Roughly speaking we can think that sending a set of X-ray beams through a sample and measuring the intensity of the beams after their passage through it, correspond to compute the Radon transform of the sample's attenuation coefficient. Thus applying an inversion formula of the Radon transform gives us the value of the attenuation coefficient within the sample. In theory this is possible if we know the value of the Radon transform in every point of the sample. In practice only a discrete set of values can be recorded by a X-ray machine, that's why we can only obtain an approximation of the original attenuation coefficient function and we will have to consider problems that arise working with discrete functions, such as sampling, filtering and interpolation. We start this chapter formalizing the concept of Radon transform. Since this operator involves the computation of the integral of a function along lines in the plane, we need first to define a suitable characterization of lines in $\numberset{R}^{2}$. \section{Characterization of lines in $\numberset{R}^{2}$} Consider again the equation \eqref{eq: averageA}: \begin{equation} \int_{x_{0}}^{x_{1}}{A(x)\,dx}=\ln\left( \frac{I_{0}}{I_{1}}\right). \label{eq: averageA2} \end{equation} Suppose a sample of material occupies a finite region in space. At each point $(x,y,z)$ within the sample, the material there has an attenuation coefficient $A(x,y,z)$. An X-ray beam passing through the sample follows a line $l$ from an initial point $P$ (assumed to be outside the region) to a final point $Q$ (also assumed to be outside the region). The emission/detection machine measures the initial and final intensities of the beam at $P$ and $Q$, from which the value $\ln(I_{0}/I_{1})$ is calculated. According to \eqref{eq: averageA2} this is equal to the value of the integral $\int_{\overline{PQ}}{A(x,y,z)\,ds}$, where $ds$ represents arclength units along the segment $\overline{PQ}$ of the line $l$. Thus the measurement of each X-ray beam gives us information about the average value of $A$ along the path of the beam and it is fundamental to find a useful representation of lines that can help us in solving the image reconstruction problem. For simplicity let assume that we are interested only in the cross-section of a sample that lies in the $xy$-plane. Each X-ray will follow a segment of a line in the plane and we look for a way of cataloging all such lines. The approach we adopt is characterizing every line in the plane by a point that the line passes through and a normal vector to the line. Then, let $\vec{\textsf{n}}$ be a vector that is normal to a given line $l$, then there exists some angle $\theta$ such that $\vec{\textsf{n}}$ is parallel to the line radiating out from the origin at an angle $\theta$ measured counterclockwise from the positive $x$-axis (Figure \ref{fig: charac_line}). This line is also perpendicular to $l$ and thus intersects $l$ at some point whose coordinates in the plane have the form $(t\cos{\theta},t\sin{\theta})$ for some real number $t$. The line $l$ is hence characterized by the values of $t$ and $\theta$ and so we denote $l=l_{t,\theta}$. \begin{figure} \caption{A line in the plane can be characterized by two real numbers $t$, $\theta$} \label{fig: charac_line} \end{figure} \begin{definition} For any real numbers $t$ and $\theta$, the line $l_{t,\theta}$ is the line passing through the point $(t\cos{\theta},t\sin{\theta})$ and perpendicular to the vector $\vec{\textsf{n}}=(\cos{\theta},\sin{\theta})$. \end{definition} Because of the relationships $l_{t,\theta+2\pi}=l_{t,\theta}$ and $l_{t,\theta+\pi}=l_{-t,\theta}$ for all $t,\theta$, there is not a unique representation of the form $l_{t,\theta}$ for a line. For this reason we will consider only the set of lines \begin{equation*} \{l_{t,\theta}\ : \ t\in\numberset{R}, \, 0\leq\theta<\pi\}. \end{equation*} If we consider the unit vector $(-\sin{\theta},\cos{\theta})$, perpendicular to $\vec{\textsf{n}}$, every point on $l_{t,\theta}$ can be written as \begin{equation*} (t\cos{\theta},t\sin{\theta})+s(-\sin{\theta},\cos{\theta}), \end{equation*} for some number $s\in\numberset{R}$. So we can parametrize a line $l_{t,\theta}$ as $(x(s),y(s))$, where $s\in\numberset{R}$ and \begin{equation*} \begin{cases} &x(s)=t\cos{\theta}-s\sin{\theta}\\ &y(s)=t\sin{\theta}+s\cos{\theta} \end{cases} \end{equation*} Note that for every point $(x(s),y(s))\in l_{t,\theta}$, we have $x(s)^{2}+y(s)^{2}=t^{2}+s^{2}$. With this parametrization the arclenght element along the line $l_{t,\theta}$ is given by \begin{equation*} \sqrt{\left( \frac{dx}{ds}\right)^{2}+\left( \frac{dy}{ds}\right)^{2}}ds=\sqrt{(-\sin{\theta})^{2}+(\cos{\theta})^{2}}ds=ds \end{equation*} Therefore for a given function $A(x,y)$ defined in the plane, we get \begin{equation} \int_{l_{t,\theta}}{A(x,y)}=\int_{\numberset{R}}{A(t\cos{\theta}-s\sin{\theta},t\sin{\theta}+s\cos{\theta})\,ds}. \label{eq: int_line} \end{equation} The value of this integral is exactly what an X-ray emission/detection machine measures when an X-ray is emitted along the line $l_{t,\theta}$. Finally note that for an arbitrary point $(x_{0},y_{0})$ in the plane and for a given value $\theta$, there is a unique value of $t$ such that $(x_{0},y_{0})\in l_{t,\theta}$. The value of $t$ is given by the solution of the system \begin{equation*} \begin{cases} &x_{0}=t\cos{\theta}-s\sin{\theta}\\ &y_{0}=t\sin{\theta}+s\cos{\theta}, \end{cases} \end{equation*} that is \begin{equation*} \begin{cases} &t=x_{0}\cos{\theta}+y_{0}\sin{\theta}\\ &s=-x_{0}\sin{\theta}+y_{0}\cos{\theta}. \end{cases} \end{equation*} This formula will be used in the next sections to operate some change of variables that will be used in finding an inversion formula of the Radon transform. \section{The Radon transform} \subsection{Definition and basic properties} The fundamental question of image reconstruction is: is it possible to reconstruct a function $f$, representing the attenuation coefficient of a cross section of a sample, starting from the value of the integral of $f$ along \emph{every} line $l_{t,\theta}$ in the plane? We will consider the integral of $f$ for any values of $t$ and $\theta$, in other words, given a function $f$ we associate to every point $(t,\theta)$ a number representing the value of the integral $\int_{l_{t,\theta}}{f}$. This leads us to the definition of the Radon transform: \begin{definition} For a given function $f:\numberset{R}^{2}\rightarrow\numberset{R}$, the Radon transform of $f$ is defined by \begin{equation*} Rf(t,\theta)=\int_{l_{t,\theta}}{f\,ds}=\int_{\numberset{R}}{f(t\cos{\theta}-s\sin{\theta},t\sin{\theta}+s\cos{\theta})\,ds}, \end{equation*} $\forall\,t\in\numberset{R},\,\theta\in[0,\pi).$ \end{definition} So the Radon transform is an operator that, to a given function $f$ of the Cartesian coordinates $(x,y)$, associates a function $Rf$ of the polar coordinates $(t,\theta)$. \begin{example} \label{es: radonCirc} Consider a circle of radius $r>0$ and a function $r_{r}$ defined as follows: \begin{equation*} f_{r}(x,y)=\left\{ \begin{aligned} &1 \qquad \text{if} \ x^{2}+y^{2}\leq r^{2}\\ &0 \qquad \text{otherwise}, \end{aligned} \right. \end{equation*} since $x^{2}+y^{2}=t^{2}+s^{2}$, the Radon transform of $f$ is \begin{equation*} Rf(t,\theta)=\int_{\numberset{R}}{f(t\cos{\theta}-s\sin{\theta},t\sin{\theta}+s\cos{\theta})\,ds}=\int_{t^{2}+s^{2}\leq r^{2}}{1\,ds}, \end{equation*} so we get \begin{equation} \label{eq: radonCirc} Rf(t,\theta)=\left\{ \begin{aligned} &2\sqrt{r^{2}-t^{2}} \quad &\text{if} \ t\leq |r|\\ &0 \quad &\text{if} \ t>|r|, \end{aligned} \right. \end{equation} \end{example} \begin{proposition} The Radon transform is a linear operator: for two functions $f$ and $g$ and constants $\alpha$ and $\beta$, \begin{equation*} R(\alpha f+\beta b)=\alpha Rf+\beta Rg. \end{equation*} \end{proposition} \begin{proof} It follows from the linearity of the integral. \end{proof} \begin{example} Consider the function \begin{equation*} f(x,y)=\left\{ \begin{aligned} &\frac{1}{2} \qquad \text{if} \ x^{2}+y^{2}\leq r_{1}^{2}\\ &1 \qquad \text{if} \ r_{1}^{2}<x^{2}+y^{2}\leq r_{2}^{2}\\ &0 \qquad \text{otherwise}, \end{aligned} \right. \end{equation*} with $0<r_{1}<r_{2}$. We observe that $f$ can be rewritten as $f=f_{r_{2}}-\frac{1}{2}f_{r_{1}}$, where $f_{r_{1}}$ and $f_{r_{2}}$ are defined as in Example \ref{es: radonCirc}, by equation \eqref{eq: radonCirc} and the linearity of the Radon transform, we get \begin{equation*} Rf(t,\theta)=Rf_{r_{2}}(t,\theta)-\frac{1}{2}Rf_{r_{1}}(t,\theta) =\left\{ \begin{aligned} &2\sqrt{r_{2}^{2}-t^{2}}-\sqrt{r_{1}^{2}-t^{2}} \ &\text{if} \ |t|\leq r_{1}\\ &2\sqrt{r_{2}^{2}-t^{2}} \ &\text{if} \ r_{1}<|t|\leq r_{2}\\ &0 \ &\text{if} \ |t|>r_{2}. \end{aligned} \right. \end{equation*} \end{example} \subsubsection{Domain of Radon transform} As we see from the definition, the Radon transform is a linear operator acting on functions and it involves improper integral on lines that can be infinity for some function. It is then natural to ask ourself for what kind of function is defined the Radon transform and in particular which is the domain of this operator, i.e. which space of functions is composed of all and only the functions that admit finite Radon transform. It can be proved (see \cite{SIGU}) that the space we are looking for is the Schwartz space \begin{equation*} \mathcal{S}=\left\{f:\ \sup_{x}{\left|\ |x|^{m}P(\partial_{1},\partial_{2})f(x)\right|<\infty}, \ \forall\,m\in\numberset{N}, P\ \text{polynomial} \right\}. \end{equation*} of rapidly decreasing functions, but for the moment we can not consider this problem since the functions involved in medical imaging correspond to attenuation coefficient of finite size samples and therefore are compact supported. In Chapter \ref{chap: kernelMethods} we will face the problem of how to compute Radon transforms of functions that do not belong to $\mathcal{S}$ and we will find there some expedient to overcome this obstacle. \subsection{Back projection} Our aim is to recover a function $f$, representing the attenuation-coefficient of a sample, from the values of its Radon transform $Rf$. We start by considering a point $(x_{0},y_{0})$ in the plane. For any values of $\theta$ there exists one and only one value of $t$ such that the line $l_{t,\theta}$ passes through the point $(x_{0},y_{0})$. In particular, the value of $t$ is $t=x_{0}\cos{\theta}+y_{0}\sin{\theta}$. In practice, any X-ray beam passing through a point $(x_{0},y_{0})$ follows the line $l_{(x_{0}\cos{\theta}+y_{0}\sin{\theta}),\theta}$ for some angle $\theta$. So the Radon transform $Rf ( x_{0}\cos{\theta}+y_{0}\sin{\theta},\theta )$ takes into account the value of the attenuation coefficient $f(x_{0},y_{0})$. The first way one can try to recover $f(x_{0},y_{0})$ is to compute the average of the Radon transform along all lines passing through $(x_{0},y_{0})$, that is \begin{equation*} \frac{1}{\pi}\int_{0}^{\pi}{Rf{(x_{0}\cos{\theta}+y_{0}\sin{\theta},\theta)\,d\theta}} \end{equation*} This leads us to the definition of the following transform, called \emph{back projection}: \begin{definition} Let $h=h(t,\theta)$ a function in polar coordinates. The \emph{back projection} of $h$ at the point $(x,y)$ is given by \begin{equation*} Bh(x,y)=\frac{1}{\pi}\int_{0}^{\pi}{h{(x\cos{\theta}+y\sin{\theta},\theta)\,d\theta}} \end{equation*} \end{definition} Back projection is a linear transform: \begin{proposition} The back projection is a linear transform, i.e. for all functions $h_{1}$ and $h_{2}$ and for all constants $c_{1}$ and $c_{2}$, we have \begin{equation*} B(c_{1}h_{1}+c_{2}h_{2})=c_{1}Bh_{1}+c_{2}Bh_{2} \end{equation*} \end{proposition} We observe that the back projection \begin{equation*} BRf(x,y)=\frac{1}{\pi}\int_{0}^{\pi}{Rf{(x\cos{\theta}+y\sin{\theta},\theta)\,d\theta}} \end{equation*} of the Radon transform, does not give us the value of $f(x,y)$. Indeed, the value $Rf(x\cos{\theta}+y\sin{\theta},\theta)$ represents the total accumulation of the attenuation-coefficient $f$ along a particular line. The integral $BRf$ is computing the average values of those averages. Hence it gives us a smoothed version of $f$. \begin{example} \label{ex: backPro} Consider $f_{1}$ a function corresponding to a disc of radius $1/2$ centered at the origin with constant density 1, that is \begin{equation*} f_{1}(x,y)=\left\{ \begin{aligned} &1 \quad \text{if} \ x^{2}+y^{2}<\frac{1}{4}\\ &0 \quad \text{otherwise}. \end{aligned} \right. \end{equation*} Then, for each line passing through the origin, we have $Rf_{1}(0,\theta)=1$ and consequently $BRf_{1}(0,0)=1$. Now suppose $f_{2}$ be defined by \begin{equation*} f_{2}(x,y)=\left\{ \begin{aligned} &1 \quad \text{if} \ \frac{1}{4}<x^{2}+y^{2}<\frac{3}{4}\\ &0 \quad \text{otherwise}. \end{aligned} \right. \end{equation*} Again, for every line $l_{0,\theta}$ passing through the origin, we have $Rf_{2}(0,\theta)=1$ and $BRf_{2}(0,0)=1$. Thus $BRf_{1}(0,0)=BRf_{2}(0,0)=1$, but $f_{1}(0,0)=1$ and $f_{2}(0,0)=0$. This shows the fact that the back projection of the Radon transform does not necessarily reproduce the original function. \end{example} \section{The Filtered Back-Projection Formula} In this section we will discuss the relationships between the Radon transform, the back projection and the Fourier transform. Thanks to these formulas we will obtain the inversion of the Radon transform. In other words we will be able to get the values of a function $f$, representing for example an X-ray attenuation coefficient, starting form the values of its Radon transform. In the next paragraphs we will consider successive transforms of a function, for example in the \emph{central slice theorem} in section \ref{subsec: centralSlice} we will consider the Fourier transform of the Radon transform. In all these cases we will assume that all the transforms are well defined, i.e. we will assume that a function $f$ belongs to the Schwartz space of rapidly decreasing functions $\mathcal{S}$. For example one can think to $f$ as a compact supported function. \subsection{The Central Slice Theorem}\label{subsec: centralSlice} The interaction between the Radon transform and the Fourier transform is given by the \emph{Central Slice Theorem}, also known as the \emph{Central Projection Theorem}. We recall that the $n$-dimensional Fourier transform of a function $f:\numberset{R}^{n}:\rightarrow\numberset{R}$ is defined as \begin{equation*} (F_{n}f(x))(\omega)=\int_{\numberset{R}^{n}}{f(x)e^{-ix\cdot\omega}\,dx} \quad \forall\,\omega\in\numberset{R}^{n}, \end{equation*} where $i$ denotes the imaginary unit and $x\cdot\omega$ the standard inner product in $\numberset{R}^{n}$. For a function in polar coordinates $f(t,\theta)$, we consider the 1-dimensional Fourier transform $F=F_{1}$, applied only to the variable $t$, i.e. \begin{equation*} (Ff(t,\theta))(\omega)\int_{\numberset{R}}{f(t)e^{-it\omega}\,dt}, \quad \omega\in\numberset{R}. \end{equation*} We can now state the Central Slice Theorem in the case $n=2$: \begin{theorem}[The Central Slice Theorem] For a function $f$ defined in the plane and for all real numbers $r$, $\theta$, \begin{equation*} F_{2}f(r\cos{\theta},r\sin{\theta})=F(Rf)(r,\theta). \end{equation*} \end{theorem} \proof The definition of the Fourier transform gives \begin{equation} F_{2}f(r\cos{\theta},r\sin{\theta})=\int_{-\infty}^{+\infty}{\int_{-\infty}^{+\infty}{f(x,y)e^{-ir(x\cos{\theta}+y\sin{\theta})}\,dx}\,dy} \label{eq: centralSlice1} \end{equation} Consider now the change of variables \begin{equation*} \begin{cases} &x=t\cos{\theta}-s\sin{\theta}\\ &y=t\sin{\theta}+s\cos{\theta} \end{cases} \qquad \begin{cases} &t=x\cos{\theta}+y\sin{\theta}\\ &s=-x\sin{\theta}+y\cos{\theta}. \end{cases} \end{equation*} Note that the quantity $t=x\cos{\theta}+y\sin{\theta}$ is exactly the line $l_{t,\theta}$. Moreover $dxdy=dtds$, indeed \begin{equation*} \left| \begin{array}{cc} \frac{\partial x}{\partial t} & \frac{\partial x}{\partial s} \\ \frac{\partial y}{\partial t} & \frac{\partial y}{\partial s} \end{array} \right|= \left| \begin{array}{cc} \cos{\theta} & -\sin{\theta} \\ \sin{\theta} & \cos{\theta} \end{array} \right|= \cos^{2}{\theta}+\sin^{2}{\theta}=1. \end{equation*} The integral in \eqref{eq: centralSlice1} becomes then \begin{align*} &\int_{-\infty}^{+\infty}{\int_{-\infty}^{+\infty}{f(t\cos{\theta}-s\sin{\theta},t\sin{\theta}+s\cos{\theta})e^{-irt}\,ds}\,dt}=\\ &=\int_{-\infty}^{+\infty}{\left(\int_{-\infty}^{+\infty}{f(t\cos{\theta}-s\sin{\theta},t\sin{\theta}+s\cos{\theta})\,ds}\right)e^{-irt}\,dt}, \end{align*} where we have factored out the inner integral since the term $e^{-irt}$ does not depends on $s$. Now the inner integral in the last equation is exactly the definition of the Radon transform of the function $f$ evaluated at point $(t,\theta)$. Thus the last integral equals \begin{equation*} \int_{-\infty}^{+\infty}{Rf(t,\theta)e^{-irt}\,dt}, \end{equation*} that is the definition of the 1-dimensional Fourier transform of $Rf$ at the point $(r,\theta)$. In conclusion \begin{equation*} F_{2}f(r\cos{\theta},r\sin{\theta})=F(Rf)(r,\theta). \end{equation*} \endproof \subsection{The Filtered Back-Projection} Applying the back projection to the Radon transform gives a smoothed version of the original function. The following theorem, called \emph{Filtered Back-Projection Formula}, shows how to correct the smoothing effect and recover the original function. \begin{theorem}[The Filtered Back-Projection Formula]\label{thm: filteredBP} For all function $f$ and for all real number $x$,$y$, \begin{equation} f(x,y)=\frac{1}{2}B\{F^{-1}[|r|F(Rf(r,\theta))]\}(x,y). \label{eq: filteredBP} \end{equation} \end{theorem} \proof By the Fourier inversion theorem, for any function $f$ and any point in the plane $(x,y)$, we have \begin{equation*} f(x,y)=F_{2}^{-1}F_{2}f(x,y). \end{equation*} Applying the definition we have \begin{equation*} f(x,y)=\frac{1}{4\pi^{2}}\int_{-\infty}^{+\infty}{\int_{-\infty}^{+\infty}{F_{2}f(X,Y)e^{i(xX+yY)}\,dX}\,dY}. \end{equation*} We pass now from Cartesian coordinates $(X,Y)$ to polar coordinates $(r,\theta)$, where $X=r\cos{\theta}$ and $Y=r\sin{\theta}$, with $r\in\numberset{R}$ and $\theta\in[0,\pi]$. Because of this change of coordinates in the integral, we have $dXdY=|r|drd\theta$ and so \begin{equation*} f(x,y)=\frac{1}{4\pi^{2}}\int_{0}^{\pi}{\int_{-\infty}^{+\infty}{F_{2}f(r\cos{\theta},r\sin{\theta})e^{ir(x\cos{\theta}+y\sin{\theta})}|r|\,dr}\,d\theta}. \end{equation*} Applying the central slice theorem to the factor $F_{2}f(r\cos{\theta},r\sin{\theta})=F(Rf(r,\theta))$, we get \begin{equation*} f(x,y)=\frac{1}{4\pi^{2}}\int_{0}^{\pi}{\int_{-\infty}^{+\infty}{F(Rf)(r,\theta)e^{ir(x\cos{\theta}+y\sin{\theta})}|r|\,dr}\,d\theta}. \end{equation*} In the last equation, the inner integral is by definition, $2\pi$ times the inverse Fourier transform of the function $|r|F(Rf)(r,\theta)$, evaluated at the point $(x\cos{\theta}+y\sin{\theta},\theta)$. So we can write \begin{equation*} f(x,y)=\frac{1}{2\pi}\int_{0}^{\pi}{F^{-1}[|r|F(Rf)(r,\theta)](x\cos{\theta}+y\sin{\theta},\theta)\,d\theta}, \end{equation*} that is half of the back projection of the function $F^{-1}[|r|F(Rf)(r,\theta)]$. Hence we finally obtain the desired formula \begin{equation*} f(x,y)=\frac{1}{2}B\{F^{-1}[|r|F(Rf)(r,\theta)]\}(x,y). \end{equation*} \endproof Observe that the factor $|r|$ in the formula \eqref{eq: filteredBP} is fundamental. Indeed without this factor, the Fourier transform and its inverse, would cancel out and the result would be simply the back projection of the Radon transform of $f$, that as shown in example \ref{ex: backPro} does not lead to recover $f$. The Filtered Back-Projection formula is the basis for image reconstruction. However it assumes that the values of $Rf(t,\theta)$ are known for all possible values $(t,\theta)$. In practice only a finite number of X-ray samples are taken and we must approximate an image from the resulting data. \section{Filtering} \label{sec: filter} Consider the Filtered Back-Projection formula in \eqref{eq: filteredBP}: \begin{equation*} f(x,y)=\frac{1}{2}B\{F^{-1}[|r|F(Rf(r,\theta))]\}(x,y) \end{equation*} and suppose there exists a function $\phi(t)$ such that $F\phi(r)=|r|$. In this case we could write \begin{equation*} |r|F(Rf)(r,\theta)=[F\phi\cdot F(Rf)](r,\theta). \end{equation*} By the properties of the Fourier transform we would have \begin{equation*} |r|F(Rf)(r,\theta)=F(\phi\ast Rf)(r,\theta), \end{equation*} hence \begin{align*} F^{-1}[|r|F(Rf)(r,\theta)]&=F^{-1}[F(\phi\ast Rf)(r,\theta)]=\\ &=(\phi\ast Rf). \end{align*} We could then write equation \eqref{eq: filteredBP} as \begin{equation} f(x,y)=\frac{1}{2}B(\phi\ast Rf)(x,y). \label{eq: modFilteredBP} \end{equation} In this way the formula of the reconstruction of $f$ would be simpler. The problem is that such a function $\phi$ does not exist. However, the previous discussion will be useful if we consider data $Rf$ to be affected of noise, that is the case when we have to work with real data from the X-ray machine. Consider the function $|r|F(Rf)(r,\theta)$. The variable $r$ represent a frequency that is present in a signal, so if the Radon transform has a component at high frequency, this component is magnified by the factor $|r|$. Since noise has high frequency, that means that the noise present in the image is amplified and this effect corrupt the reconstructed image. In order to obtain a formula less sensitive to noise, instead of $|r|$ we use a function, actually a \emph{low-pass filter}, such that for $r$ close to 0, it is near to the absolute-value function $|r|$, but vanishes if the value of $|r|$ is large. Moreover, in order to use the formula \eqref{eq: modFilteredBP}, in place of $|r|$ we consider a function of the form $A=F\phi$, where $A$ has compact support, or in other words, we consider $\phi$ \emph{band-limited function}. In this way we obtain an approximation of $f$: \begin{equation*} f(x,y)\approx\frac{1}{2}B(F^{-1}A\ast Rf)(x,y). \end{equation*} Typically the function $A$ is of the form $A(\omega)=|\omega|F(\omega)\chi_{[-L,L]}(\omega)$, for some $L>0$, where $\chi_{I}$ represents the characteristic function of the set $I$. The function $F$ is even and $F(0)=1$ in order to have an approximation of the function $|\cdot|$ near the origin and $\phi$ real valued. Typical low-pass filters used in medical imaging are: \begin{itemize} \item The \emph{Ram-Lak filter}: \begin{equation*} A_{1}(\omega)=|\omega|\chi_{[-L,L]}(\omega), \end{equation*} is simply the truncation of the absolute-value function to a finite interval. \item The \emph{Shepp-Logan filter}: \begin{equation*} \begin{aligned} A_{3}(\omega)&=|\omega|\left(\frac{\sin(\pi\omega/(2L))}{\pi\omega/(2L)}\right)\chi_{[-L,L]}(\omega)=\\ &=\left\{ \begin{aligned} &\frac{2L}{\pi}|\sin(\pi\omega/(2L))| & &\text{if} \ |\omega|\leq L\\ &0 & &\text{otherwise}. \end{aligned} \right. \end{aligned} \end{equation*} \item The \emph{low-pass cosine filter}: \begin{equation*} A_{2}(\omega)=|\omega|\cos(\pi\omega/(2L))\chi_{[-L,L]}. \end{equation*} \end{itemize} The plot of these filters in the case $L=10$ is shown in Figure \ref{fig: lowpass_filter}. \begin{figure} \caption{Main low pass filters used in medical imaging} \label{fig: lowpass_filter} \end{figure} \subsection{Filter resolution} Consider a function $\phi$, suppose $\phi\geq0$, with a single maximum value $M$ in $x=0$ and increasing for $x<0$, decreasing for $x>0$ (for example $\phi$ can be a Gaussian). For another function $f$, the filtered version of $f$ with $\phi$ is given by $f\ast\phi$. Let now the numbers $x_{1}, x_{2}$ be such that $x_{1}<0<x_{2}$ and $\phi(x_{1})=\phi(x_{2})=M/2$, half of the maximum value of $\phi$. The distance $x_{2}-x_{1}$ is called \emph{full width half maximum} of the function $\phi$, in symbol $FWHM(\phi)$. The resolution of the filter defined by the convolution with $\phi$ is set to be equal to $FWHM(\phi)$. \begin{figure} \caption{Full width half maximum of a Gaussian} \label{fig: fwhm} \end{figure} To understand the reason of this definition, consider a function $f$ that consists of two unit impulses separated by a distance of $d$. It is easy to show that if $d>FWHM(\phi)$, then the graph of $f\ast\phi$ has two peaks, but if $d\leq FWHM(\phi)$, then the graph of $f\ast\phi$ has only one peak and so we have lost of details. So we conclude that the smallest distance between two different features of $f$ that can still be seen in the filtered signal $f\ast\phi$ is $FWHM(\phi)$. One should choose the filter function $\phi$ in accordance with the resolution required. Intuitively we can think that a function $\phi$ with a small $FWHM$ is spiker than a function having large $FWHM$ and has better resolution. The following examples can help to understand better \begin{example}[$FWHM$ of a Gaussian] Let $F(\omega)=e^{-B\omega^{2}}$, where $\omega\in\numberset{R}$ and $B$ is a positive constant. The maximum value of $F$ is $F(0)=1$. Half maximum is hence achieved for $e^{-B\omega^{2}}=1/2$ i.e. $\omega=\pm\sqrt{\ln(2)/B}$. therefore \begin{equation*} FWHM=2\sqrt{\ln(2)/B} \end{equation*} \end{example} \begin{example}[$FWHM$ of a the Lorentz signal] The Lorentz signal is given by \begin{equation*} g(\omega)=\frac{T_{2}}{1+4\pi^{2}T_{2}^ {2}(\omega-\omega_{0})^{2}}, \end{equation*} where $\omega\in\numberset{R}$ and $T_{2},\omega_{0}$ are constants. The maximum of $g$ is given by $g(\omega_{0})=T_{2}$ and $g(\omega)=T_{2}/2$ if and only if $\omega=\pm1/(2\pi T_{2})$. Therefore \begin{equation*} FWHM=\frac{1}{\pi T_{2}} \end{equation*} \end{example} \begin{figure} \caption{Gaussian filter ($B=2,\ FWHM=1.1774$) and Lorenz signal ($T_{2} \label{fig: example_fwhm} \end{figure} \section{Discrete problem} By Theorem \ref{thm: filteredBP} we know that if completed continuous data are available, then we can exactly reconstruct a function $f$ starting from its Radon transform. In particular this is possible thanks to the back projection formula \eqref{eq: filteredBP} \begin{equation*} f(x,y)=\frac{1}{2}B\{F^{-1}[|r|F(Rf)(r,\theta)]\}(x,y). \end{equation*} We have also seen, in Section \ref{sec: filter}, that in practice is convenient to replace the absolute-value function with a low-pass filter $A$. Thus, we may use the approximation \begin{equation} f(x,y)\approx\frac{1}{2}B[F^{-1}(A\ast Rf)](x,y). \label{eq: approxBP2} \end{equation} In the practical implementation of this formula, we have to consider that only a finite number of values of $Rf(r,\theta)$ are measured by the X-ray machine. As a consequence of this fact we have to answer to some question about accuracy and computation. First of all we have to understand the sampling process, i.e. the process of computing only a discrete set of value of a continuous function; then we have to find the corresponding form of formula \eqref{eq: approxBP2} for discrete functions; and finally we will use the process of interpolation to obtain value of the function we can not directly measure. \subsection{Phantoms} \label{sec: phantoms} Different choices of filters, interpolation methods, and other parameters, will give us different reconstruction of the same image, thus we need a technique for testing the accuracy of one particular image reconstruction algorithm. In order to have a good accuracy test, we should know the original image we want to reconstruct. Moreover the method should be independent from the possible noise present in the data, but should depend only on the algorithm used in the reconstruction. To solve this problem, Shepp and Logan (\cite{SHEP}) introduced the concept of \emph{mathematical phantom}. A mathematical phantom (or simply a phantom) is a simulated object whose structure is defined by mathematical formulas. Thus no errors occur in collecting the data from the object and when an algorithm is applied to produce a reconstruction of the phantom, all inaccuracies are due to the algorithm. In this way we can compare different algorithms meaningfully. Figure \ref{fig: shepp_phantom} shows the well-known \emph{Shepp-Logan phantom}. This phantom is widely used to test the quality of an image reconstruction algorithm since it is a good imitation of the human brain. \begin{figure} \caption{The Shepp-Logan phantom} \label{fig: shepp_phantom} \end{figure} \subsection{Sampling} Sampling is the process of computing the values of a function, or a signal defined in $\numberset{R}$, only on a discrete set of points $\{x_{k}\}_{k\in\numberset{Z}}$. For example, points $x_{k}$ can be taken with uniformly spacing, i.e. $x_{k}=k\cdot d$ for some positive number $d$, called the sampling spacing. The sampling spacing $d$ determines the smallest detail of $f$ that can be seen after sampling: if $d$ is small we have a better resolution, while bigger values of $d$ give us less resolution. On the other hand, small values of $d$ generate a bigger amount of data and make algorithm slower, so we want to find an optimal values of $d$ which is a compromise for this trade-off. If we think to a signal as a sum of sinusoidal waves, the narrowest detail in the signal is given by the wave with the shortest wavelength (maximum frequency). If the signal is band limited, the Nyquist Theorem \ref{thm: nyquist} below tells us that the signal can be completely recovered starting from its sampled version, provided that the sampling spacing is small enough. Suppose $f$ band limited, i.e. its Fourier transform is zero outside a finite interval: $Ff(\omega)=0$ for $|\omega|>L$. If we extend $Ff$ periodically out of $[-L,L]$, its Fourier coefficients are given by \begin{equation*} c_{n}=\frac{1}{2L}\int_{-L}^{L}{Ff(\omega)e^{-i\omega n\frac{\pi}{L}}\,d\omega}, \qquad n\in\numberset{Z}, \end{equation*} thus \begin{align*} 2\pi f\left(n\frac{\pi}{L}\right)&=2\pi F^{-1}Ff(n\frac{\pi}{L})=\int_{\numberset{R}}{Ff(\omega)e^{i\omega n\frac{\pi}{L}}\,d\omega}=\\ &=\int_{-L}^{L}{Ff(\omega)e^{i\omega n\frac{\pi}{L}}\,d\omega}=2Lc_{-n}. \end{align*} Assuming $Ff$ continuous we have \begin{equation*} Ff(\omega)=\sum_{n=-\infty}^{\infty}{c_{-n}e^{-i\omega n\frac{\pi}{L}}}=\frac{\pi}{L}\sum_{n=-\infty}^{\infty}f\left(n\frac{\pi}{L}\right)e^{-i\omega n\frac{\pi}{L}} \end{equation*} and so \begin{equation*} f(x)=F^{-1}Ff(x)=\sum_{n=-\infty}^{\infty}f\left(\pi\frac{n}{L}\right)\frac{\sin{(Lx-n\pi)}}{Lx-n\pi} \end{equation*} that is $f$ can be exactly reconstructed from the values $f(n\pi/L)$, $n\in\numberset{Z}$. The optimal sampling spacing is therefore $d=\frac{\pi}{L}$, since $L$ is the maximum value of $|\omega|$ in $Ff$, the smallest wavelength is $\frac{2\pi}{L}$, hence the optimal sampling distance is equal to half the size of the smallest detail present in the signal. This result is resumed in the following \begin{theorem}[Nyquist Theorem]\label{thm: nyquist} If $f$ is a square integrable and band limited function, i.e. $Ff(\omega)=0$ for all $|\omega|>L$, then for all $x\in\numberset{R}$ \begin{equation} f(x)=\sum_{n=-\infty}^{\infty}f\left(n\frac{\pi}{L}\right)\frac{\sin{(Lx-n\pi)}}{Lx-n\pi}. \label{eq: nyquist} \end{equation} \end{theorem} We observe that formula \eqref{eq: nyquist} involves an infinite series and that its general term $\sin{(Lx-n\pi)}/(Lx-n\pi)$ converges slowly. So we need a large number of samples for a good approximation. To address this, we can use a smaller sampling distance $\frac{\pi}{R},\ R>L$ to gain a series with better convergence. This process is called \emph{oversampling}. \subsection{Discrete filters} The image reconstruction formula \eqref{eq: approxBP2} involves the inverse Fourier transform of a low pass filter. In practice also this function will be sampled like the Radon transform. Since the filters we consider are band limited, we use the Nyquist theorem \ref{thm: nyquist} to know how many samples are needed to get an accurate representation of the filter. Here, we reconsider the filters introduced in section \ref{sec: filter}: \begin{itemize} \item The \emph{Shepp-Logan filter} is defined by \begin{equation*} \begin{aligned} A_{3}(\omega)&=|\omega|\left(\frac{\sin(\pi\omega/(2L))}{\pi\omega/(2L)}\right)\chi_{[-L,L]}(\omega)=\\ &\left\{ \begin{aligned} &\frac{2L}{\pi}|\sin(\pi\omega/(2L))| \qquad \text{if} \ |\omega|\leq L\\ &0 \qquad \text{otherwise}. \end{aligned} \right. \end{aligned} \end{equation*} for some $L>0$. The inverse Fourier transform of $A_{3}$ is a band limited function and is given by \begin{align*} F^{-1}A_{3}(x)&=\frac{1}{\pi}\int_{0}^{L}{\frac{2L}{\pi}\sin{(\pi\omega/(2L))}\cos{\omega}\,d\omega}=\\ &=\frac{L}{\pi^{2}}\left[\left(\frac{\cos{(Lx-\pi/2)}}{x-\pi/(2L)}-\frac{\cos{(Lx+\pi/2L)}}{x+\pi/(2L)} \right)- \right.\\ &\left.\left(\frac{1}{x-\pi/(2L)}-\frac{1}{x+\pi/(2L)}\right) \right]. \end{align*} According to Nyquist theorem, $F^{-1}A_{3}$ can be reconstructed exactly from its values taken at distance $\pi/L$. Setting $x=n\pi/L$, for $n\in\numberset{Z}$, we get \begin{equation*} F^{-1}A_{3}(\pi n/L)=\frac{4L^{2}}{\pi^{3}(1-4m^{2})}. \end{equation*} \item The \emph{Ram-Lak filter} is given by \begin{equation*} A_{1}(\omega)=|\omega|\chi_{[-L,L]}(\omega). \end{equation*} Proceeding as in the previous case we find that the inverse Fourier transform of the Ram-Lak filter satisfies \begin{equation*} F^{-1}A_{1}(x)=\frac{1}{\pi}\left[\frac{Lx\sin{(Lx)}}{x^{2}}-\frac{2\sin^{2}{(Lx/2)}}{x^{2}} \right]. \end{equation*} Setting again $x=\pi n/L$ we obtain \begin{equation*} F^{-1}A_{1}(\pi n/L)=\frac{L^{2}}{2\pi}\left[\frac{2\sin{(\pi n)}}{\pi n}-\left(\frac{\sin{(\pi n/2)}}{\pi n/2}\right)^{2} \right]. \end{equation*} \item Finally we consider the \emph{low-pass cosine filter}: \begin{equation*} A_{2}(\omega)=|\omega|\cos(\pi\omega/(2L))\chi_{[-L,L]}. \end{equation*} The inverse Fourier transform of $A_{2}$, evaluated at multiples of the Nyquist distance is \begin{equation*} F^{-1}A_{2}(\pi n/L)=\frac{2L^{2}}{\pi^{2}}\left[\frac{\pi\cos{(\pi n)}}{1-4n^{2}}-\frac{2(1+4n^{2})}{(1-4n^{2})^{2}} \right]. \end{equation*} \end{itemize} Figure \ref{fig: ift} shows the sampled version of the inverse Fourier transform of these filters. \begin{figure} \caption{Sampled inverse Fourier transform} \label{fig: ift_rlf} \label{fig: ift_slf} \label{fig: ift_cos} \label{fig: ift} \end{figure} \subsection{Discrete functions} \subsubsection{Discrete convolution} In order to implement formula \eqref{eq: approxBP2} we have to decide what is convolution of discrete functions. A discrete function is a mapping from the integers into the set of real numbers. For a discrete function $g$, we write $g_{n}$ for $g(n)$, for all $n\in\numberset{Z}$. \begin{definition} The discrete convolution of two discrete functions $f$ and $g$ is defined by \begin{equation*} (f\ast g)_{m}=\sum_{j=-\infty}^{+\infty}{f_{j}g_{m-j}} \quad \forall m\in\numberset{Z}. \end{equation*} \end{definition} The discrete convolution satisfies all principal properties of the standard convolution (e.g. commutativity and linearity). If only a finite set of values $\{f_{k}=f(dk): k=0,\ldots,N-1\}$ is known, like in real applications, there exist two different ways to extend the sequence to all integers: \begin{enumerate} \item Set $f_{k}=0$ for all $k\notin\{0,\ldots,N-1\}$; \item Extend the sequence to be periodic with period $N$, $f_{m}=f_{m+nN}$, where for $m\in\numberset{Z}$, $n$ is the only integer such that $m+nN\in\{0,\ldots,N-1\}$. We call such a function \emph{$N$-periodic discrete function}. \end{enumerate} The convolution of two $N$-periodic discrete functions is also a $N$-periodic discrete function, defined by \begin{equation*} (f\ast g)_{m}=\sum_{j=0}^{N-1}{f_{j}g_{m-j}} \quad \forall m\in\numberset{Z}. \end{equation*} Some problem can arise using discrete functions. For example if we are sampling a non periodic function, the periodic model is not the best to be used. But, even if the function is periodic, it may be not clear what the appropriate period is and so we might sample the function on a set of values that do not correspond to one period, then extending data to form a discrete periodic function, we have the wrong one. The solution to these problems is a technique called \emph{zero padding}. We take a finite set of values of a function $g$, then we pad the sequence with a lot of zeros and finally we form a periodic discrete function. The following theorem tells us that the convolution between a zero padded function and another discrete function gives the same result of "true" discrete convolution at least at the points where the values has been sampled. \begin{theorem} \label{thm: discrete_convolution} Let $f,g$ be discrete functions and suppose that $\exists K\in\numberset{N}$ such that $g_{k}=0$ for $k<0$ and $k\geq K$. Let $M\in\numberset{Z}$, $M\geq K-1$ and let $\tilde{f},\tilde{g}$ $(2M+1)$-periodic discrete functions defined by $\tilde{f}_{m}=f_{m}$, $\tilde{g}_{m}=g_{m}$ for $-M\leq m\leq M$. Then for all $m$ such that $0\leq m \leq K-1$ we have $(f\ast g)_{m}=(\tilde{f}\ast\tilde{g})_{m}$. \end{theorem} \begin{remark} The proof of this theorem is just an application of the definition of convolution for discrete functions. For details we refer the reader to \cite{BASIC}. \end{remark} \subsubsection{Discrete Radon transform} In the context of a CT scan, the X-ray machine does not access the attenuation coefficient along every line, but the Radon transform is sampled for finite number of angles $\theta\in[0,\pi)$ and, for each angle, for a finite number of values of $t$. Values of $\theta$ and $t$ are equally spaced and we consider the \emph{parallel beam geometry}: the X-ray machine rotates by a fixed angle and, at each angle, the beams form a set of parallel lines (Figure \ref{fig: parallel_beam}). \begin{figure} \caption{The parallel beam geometry} \label{fig: parallel_beam} \end{figure} If $N$ is the number of angles at which the machine takes scans, then the values of $\theta$ that occur are $\{ k\frac{\pi}{N}, \ k=1,\ldots,N-1\}$. Assume that, at each angle, the set of parallel beams is composed of $2M+1$ equally spaced lines and let $d$ be the distance between two lines, with the object to be scanned centered at the origin. Then the corresponding values of $t$ are $\{jd: \ j=-M,\ldots,M\}$. The continuous Radon transform $Rf$ is then replaced by its discrete counterpart $R_{D}f$, defined by \begin{equation*} R_{D}f_{j,k}=Rf(jd,k\pi/N) \end{equation*} for $j=-M,\ldots,M$ and $k=0,\ldots,N-1$. Theorem \ref{thm: discrete_convolution} above applies to the discrete convolution of the sampled band-limited function $F^{-1}A$ and the sampled Radon transform $R_{D}f$. Since the scanned object has finite size, we can set $R_{D}f(j,\theta)=0$ for $|j|$ sufficiently large. Thus with enough zero padding the discrete Radon transform can be extended to be periodic in the radial variable $jd$. For discrete function in polar coordinates, the discrete convolution is carried out in the radial variable only, so in the reconstruction formula \eqref{eq: approxBP2} we have: \begin{equation*} (F^{-1}A\ast R_{D}f)_{m,\theta}=\sum_{j=0}^{N-1}{F^{-1}A_{j}R_{D}}f_{m-j,\theta}. \end{equation*} \subsubsection{Discrete Fourier transform} \begin{definition} The \emph{discrete Fourier transform} of a $N$-periodic discrete function is another $N$-periodic discrete function $F_{D}f$ defined by \begin{equation*} (F_{D}f)_{j}=\sum_{k=0}^{N-1}{f_{k}e^{-i2\pi k j/N}}, \quad \text{for}\ j=0,\ldots,N-1 \end{equation*} and extended to be periodic for other values of $j$. The \emph{discrete inverse Fourier transform} of $f$ is given by \begin{equation*} (F^{-1}_{D}f)_{j}=\frac{1}{N}\sum_{k=0}^{N-1}{f_{k}e^{i2\pi k j/N}}, \quad \text{for}\ j=0,\ldots,N-1 \end{equation*} and extended to be periodic for other values of $j$. \end{definition} The following theorems show that the properties of the Fourier transform are still valid for its discrete version. \begin{theorem} For a discrete function $f$ with period $N$, \begin{equation*} F^{-1}_{D}(F_{D}f)_{n}=f_{n}, \quad \text{for all integers $n$}. \end{equation*} \end{theorem} \begin{theorem} For two $N$ discrete functions $f=\{f_{k}:\ 0\leq k\leq N-1\}$ and $g=\{g_{k}:\ 0\leq k\leq N-1\}$, we have \begin{itemize} \item $F_{D}(f\ast g)=(F_{D}f)(F_{D}g)$; \item $F_{D}(fg)=\frac{1}{N}(F_{D}f)\ast (F_{D}g)$; \item $(F_{D}\bar{f})_{j}=(\overline{F_{D}f})_{-j}$; \item Parceval equality: \begin{equation*} \sum_{j=0}^{N-1}{|f_{j}|^{2}}=\frac{1}{N}\sum_{j=0}^{N-1}{|(F_{D}f)_{j}|^{2}} \end{equation*} \end{itemize} \end{theorem} For a proof of these facts we suggest to see \cite{BASIC}. \subsubsection{Discrete back projection} In the continuous setting the back projection has been defined by \begin{equation*} Bh(x,y)=\frac{1}{\pi}\int_{0}^{\pi}{h(x\cos{\theta}+y\sin{\theta})\,d\theta}. \end{equation*} Now, in the discrete case, we replace the continuous variable $\theta$ with angles $k\pi/N$ for $k=0,\ldots,N-1$ and state the following \begin{definition} The \emph{discrete back projection} of a function $h$ is defined by \begin{equation*} B_{D}h(x,y)=\frac{1}{N}\sum_{k=0}^{N-1}{h(x\cos{k\frac{\pi}{N}}+y\sin{k\frac{\pi}{N}},k\pi/N)}. \end{equation*} \end{definition} In our case, $B_{D}$ has to be applied to $h=(F^{-1}_{D}A)\ast (R_{D}f)$ and the reconstruction grid within which the final image is to be presented is a rectangular array of pixels located at $(x_{m},y_{n})$, each of which is to be assigned a color or a gray-scale value. Hence $B_{D}$ needs the values of $h$ at points $(x_{m}\cos{k\pi/N}+y_{n}\sin{k\pi/N},k\pi/N)$, while the Radon transform is sampled at points $(jd,k\pi/N)$ arranged in a polar grid. The solution to this problem is \emph{interpolation}. \section{Interpolation} The process to obtain a function $f(x)$, $x\in\numberset{R}$ starting form a discrete set of values $f_{k}=f(x_{k})$, $k=1,\ldots,N+1$ is called interpolation. There exist several interpolation schemes. Here we give a short introduction to the most commonly used. \begin{itemize} \item \textbf{Nearest neighbor}: $f(x)=f_{k}$, where $x_{k}$ is the closest point to $x$. This is the simplest method but generates a discontinuous function; \item \textbf{Linear}: $f$ is obtained connecting successive points $(x_{k},f_{k}),\ (x_{k+1},f_{k+1})$ with segment: \begin{equation*} f(x)=\frac{f_{k+1}-f_{k}}{x_{k+1}-x_{k}}(x-x_{k})+f_{k} \quad \text{for}\ x\in[x_{k},x_{k+1}]; \end{equation*} \item \textbf{Cubic polynomial spline}: successive points $(x_{k},f_{k}),\ (x_{k+1},f_{k+1})$ are connected by apiece of a cubic polynomial. The pieces are joint together asking for $C^{2}$ continuity of the resulting curve. Also values of $f'(x_{k})$ are prescribed; \item \textbf{Lagrange interpolation}: $f$ is given by a polynomial of degree $N$: \begin{equation*} f(x)=\sum_{j=1}^{N+1}{f_{j}\frac{\prod_{k\neq j}{(x-x_{k})}}{\prod_{k\neq j}{(x_{j}-x_{k})}}}. \end{equation*} \end{itemize} We notice that the nearest neighbor interpolation can be written as \begin{equation*} I_{f}(x)=\sum_{m}f_m\chi_{[-\frac{1}{2},\frac{1}{2})}\left(\frac{x}{d}-m\right), \end{equation*} where $\chi_{J}$ denotes the characteristic function of a set $J$ and $f_m$ is the value of the function $f$ at the sample point $md$. Similarly the linear interpolation can be written $I_{f}(x)=\sum_{m}f_m\Lambda\left(\frac{x}{d}-m\right)$, with \begin{equation*} \Lambda(x)= \begin{cases} 1-|x| &\quad\text{if}\ |x|\leq1\\ 0 &\quad \text{if}\ |x|>1. \end{cases} \end{equation*} Generalizing this approach we define, for a weighting function $W$ satisfying certain conditions, the $W$-interpolation $I_{W}(f)$ of a discrete function $f$ is \begin{equation*} I_{W}(f)=\sum_{m}{f_mW\left(\frac{x}{d}-m\right)} \quad \ x\in\numberset{R}. \end{equation*} We want $I_{W}f(kd)=f_k$. Then we choose $W$ such that $W(0)=1$ and $W(m)=0$ for all $m\in\numberset{Z}$, $m\neq0$. Moreover, if we want to preserve also the integral, we ask $W$ to be such that \begin{equation*} \int_{\numberset{R}}{I_{W}(f)(x)\,dx}=d\sum_{m}{f_{m}}. \end{equation*} Then, $W$ should satisfy \begin{equation*} \int_{\numberset{R}}{{W}(u)\,du}=1. \end{equation*} \begin{remark}[Interpolation and convolution]\label{rk: intconv} Suppose that a discrete function $g$ is given by the discrete convolution $g=\phi\ast f$ and let $W$ be a weighting function. Then the $W$-interpolation \begin{equation*} I_{W}g(x)=I_{W}(\phi\ast f)(x)\sum_{k}{\sum_{m}{\phi(m-k)W\left(\frac{x-kd}{d}-(m-k)\right)f(k)}} \end{equation*} \end{remark} can be approximated as \begin{equation*} I_{W}(\phi\ast f)(x)\approx \sum_{k}{I_{W}(\phi)(x-kd)f(k)}, \end{equation*} that is, we can approximate the interpolation of $\phi\ast f$ with a weighted sum of values $f(k)$ and the interpolation $I_{W}(\phi)$ of $\phi$ at points $x-kd$ (cfr. \cite{BASIC}, pages 82-86). \section{Discrete image reconstruction: Algorithms} Having examined the discrete version of all elements in the formula \eqref{eq: approxBP2}, we have now all the necessary tools for approximating $f$ starting from a discrete set of samples of its Radon transform. \begin{enumerate} \item \textbf{Image reconstruction algorithm I}. Let $I$ be the interpolation of $(F^{-1}_{D}A)\ast(R_{D}f)$, so that $I(t,k\pi/N)$ is interpolated from the computed values $(F^{-1}_{D}A)\ast(R_{D}f)(jd,k\pi/N)$. Then for all points $(x_{m},y_{n})$ in the grid, we approximate \begin{align*} f(x_{m},y_{n})&\approx\frac{1}{2}B_{D}I(x_{m},y_{n})=\\ &=\frac{1}{2N}\sum_{k=0}^{N-1}{I\left(x_{m}\cos{\left(k\frac{\pi}{N}\right)}+y_{n}\sin{\left(k\frac{\pi}{N}\right)},k\frac{\pi}{N}\right)}. \end{align*} \item \textbf{Image reconstruction algorithm II}. Instead of interpolating the filtered Radon transform, we interpolate the filter and then, as shown in remark \ref{rk: intconv}, we form a weighted sum of the sampled Radon transform: \begin{align*} &W(k)=\sum_{j}{I_{F^{-1}A}\left(x_{m}\cos{\left(k\frac{\pi}{N}\right)}+y_{n}\sin{\left(k\frac{\pi}{N}\right)}-jd,k\frac{\pi}{N}\right)}R_{D}f(jd,k\frac{\pi}{N}) \end{align*} \begin{align*} &f(x_{m},y_{n})\approx\frac{1}{2N}\sum_{k=0}^{N-1}{W(k)}. \end{align*} \end{enumerate} We conclude this chapter applying the reconstruction formula in a particular case. \subsubsection{Example: crescent-shaped phantom} \label{subsec: crescent_shape} We want to apply the reconstruction algorithm introduced in the previous section to a particular phantom called \emph{crescent-shaped phantom} (Figure \ref{fig: phantom}) whose analytic expression is \begin{equation*} f(x,y)=\left\{ \begin{aligned} &1 & &\text{if}\ x^{2}+y^{2}\leq\frac{1}{4}\,\wedge\,(x-\frac{1}{8})^{2}+y^{2}>\frac{9}{64}\\ &\frac{1}{2} & &\text{if}\ (x-\frac{1}{8})^{2}+y^{2}\leq\frac{9}{64}\\ &0 & &\text{if}\ x^{2}+y^{2}>\frac{1}{4}. \end{aligned} \right. \end{equation*} In order to compute samples of the Radon transform, we calculate $Rf$ analytically. We observe that $f$ can be written as a sum of two functions: $f=f_{1}-\frac{1}{2}f_{2}$, where $f_{1}$ and $f_{2}$ are given by \begin{equation*} f_{1}(x,y)=\left\{ \begin{aligned} &1 & &\text{if}\ x^{2}+y^{2}\leq\frac{1}{4}\\ &0 & &\text{otherwise} \end{aligned} \right.\qquad f_{2}(x,y)=\left\{ \begin{aligned} &1 & &\text{if}\ (x-\frac{1}{8})^{2}+y^{2}\leq\frac{9}{64}\\ &0 & &\text{otherwise} \end{aligned} \right. \end{equation*} for all $(x,y)\in\numberset{R}^{2}$. By the linearity of the Radon transform, we have \begin{equation} \boxed{ Rf=Rf_{1}-\frac{1}{2}Rf_{2}. } \label{eq: linearity} \end{equation} We know (see example \ref{es: radonCirc}) that for all fixed value $r>0$, the Radon transform of the function \begin{equation*} f_{r}(x,y)=\left\{ \begin{aligned} &1 & &\text{if}\ x^{2}+y^{2}\leq r^{2}\\ &0 & &\text{otherwise}, \end{aligned} \right.\qquad \end{equation*} is given by \begin{equation} Rf_{r}(t,\theta)=\left\{ \begin{aligned} &2\sqrt{r^{2}-t^{2}}& &\text{if}\ |t|\leq r\\ &0 & &\text{if}\ |t|>r \end{aligned} \right. \label{eq: radon_fr} \end{equation} and the function $f_{1}$ equals $f_{r_{1}}$ for $r_{1}=\frac{1}{2}$. So we can use equation \eqref{eq: radon_fr} to compute its Radon transform. Function $f_{2}$ is not of the form $f_{r}$ for some $r$, but can be obtained shifting such a function. More precisely $f_{2}(x,y)=f_{r_{2}}(x-c,y)$, where $r_{2}=\frac{3}{8}$ and $c=\frac{1}{8}$. So we can use the shift property of the Radon transform for computing $Rf_{2}$: \begin{theorem}[Shift property of the Radon transform] \label{thm: shiftProp} Let $g:\numberset{R}^{2}\rightarrow\numberset{R}$ a function and let $G(t,\theta)=Rg(t,\theta)$ it's Radon transform. If \begin{equation*} h(x,y)=g(x-c_{x},y-c_{y}), \end{equation*} then the Radon transform $H(t,\theta)=Rh(t,\theta)$ of $h$ is given by \begin{equation} H(t,\theta)=G(t-c_{x}\cos{\theta}-c_{y}\sin{\theta},\theta). \label{eq: shift} \end{equation} \end{theorem} See \cite{PEY} for more details about Radon transform shifting properties. Thus, from \eqref{eq: radon_fr} and \eqref{eq: shift}, we gain $Rf_{2}(t,\theta)=Rf_{r_{2}}(t-c\cos{\theta},\theta)$, that is \begin{equation*} Rf_{2}(t,\theta)=\left\{ \begin{aligned} &2\sqrt{r_{2}^{2}-(t-c\cos{\theta})^{2}}& &\text{if}\ |t-c\cos{\theta}|\leq r_{2}\\ &0 & &\text{if}\ |t-c\cos{\theta}|>r_{2}. \end{aligned} \right. \end{equation*} By equation \eqref{eq: linearity} we conclude that \begin{equation*} Rf(t,\theta)=\left\{ \begin{aligned} &2\sqrt{r_{1}^{2}-t^{2}} & &\text{if} \ |t|\leq r_{1} \, \wedge \, |t-c\cos{\theta}|>r_{2}\\ &2\sqrt{r_{1}^{2}-t^{2}}-\sqrt{r_{2}^{2}-(t-c\cos{\theta})^{2}}& &\text{if}\ |t-c\cos{\theta}|\leq r_{2}\\ &0 & &\text{if}\ |t|>r_{1}. \end{aligned} \right. \end{equation*} Figure \ref{fig: radon} shows the spectra of this function. \begin{figure} \caption{Crescent-shaped phantom and its Radon transform } \label{fig: phantom} \label{fig: radon} \label{fig: phantomRadon} \end{figure} Suppose now $M=20$ and $N=18$. We sample the domain $[-1,1]\times[0,\pi)$ with values $t_{k}=kd$, $k=-M,\ldots,M$ and $\theta_{j}=j\frac{\pi}{N}$, for $j=0,\ldots,N-1$, where $d=0.05$, obtaining the discrete Radon transform $Rf(kd,j\pi/N)$. We consider \emph{Shepp-Logan filter} as low pass filter and we consider $\frac{1}{2L}$ as sampling spacing. Indeed, when we used the continuous Fourier transform, we considered $\frac{\pi}{L}$ as sampling spacing, in accordance with Nyquist theorem. Now, to compensate the additional factor $2\pi$ in the definition of the discrete inverse Fourier transform, we use $\frac{\pi}{2\pi L}=\frac{1}{2L}$. To match this spacing with that of the Radon transform, we want $\frac{1}{2L}=d=0.05$ and so $L=10$, then \begin{equation*} A(\omega)= \begin{cases} \frac{\pi}{20}|\sin{(0.05\pi\omega)}| &\quad \ |\omega|\leq10\\ 0 &\quad \ |\omega|>10 \end{cases} \end{equation*} and \begin{equation*} (F_{D}^{-1}A)_{n}=\frac{400}{\pi^{3}(1-4n^{2})}. \end{equation*} Next we compute the discrete convolution $\gamma=F_{D}^{-1}A\ast Rf$: for $-20\leq m\leq20$, $0\leq j\leq17$ \begin{equation*} \gamma(m,j\pi/N)=\sum_{k=-20}^{20}{(F_{D}^{-1}A)_{m-k}Rf(0.05k,j\pi/N)}. \end{equation*} Applying \emph{linear interpolation} to the variable $t$ of $\gamma$, we obtain \begin{equation*} h(t,j\pi/N)=\sum_{m=-20}^{20}{\gamma(m,j\pi/N)\Lambda(20t-m)}, \quad t\in[-1,1]. \end{equation*} Finally we use the \emph{reconstruction algorithm I} and we have the approximation \begin{equation*} f(x,y)\approx\frac{1}{36}\sum_{j=0}^{17}{h(x\cos{(j\pi/N)}+y\sin{(j\pi/N)},j\pi/N)}. \end{equation*} Figure \ref{fig: reconstCshape} shows the reconstructed function. \begin{figure} \caption{Reconstruction of $f$} \label{fig: reconstCshape} \end{figure} \chapter{Algebraic Reconstruction Techniques}\label{chap: art} The Fourier based methods we have seen so far are the algorithms used in modern CT scan. Another approach to image reconstruction is based on linear algebra. Algorithms that use this approach are known as \emph{algebraic reconstruction techniques}, or ART. For example, the first CT scanner designed in the late 1960s by Godfrey Hounsfield used these methods. While the Fourier transform approach solves the continuous problem and then passes to the discrete one, ART considers the discrete problem from the beginning. Let us start reminding that an image is given by a grid of pixels (picture elements) and at each pixel is assigned a color (or a gray scale value) that represents the value of the attenuation coefficient in the region of the given pixel. Suppose that our image is formed by $K\times K$ pixels, each of them representing a small square in the plane. Define the pixel basis functions $b_{1},b_{2},\ldots,b_{K^{2}}$ as \begin{equation*} b_{i}(x,y)=\left\{ \begin{aligned} &1 & &\text{if $(x,y)$ lies in pixel number $i$}\\ &0 & &\text{otherwise,} \end{aligned} \right. \end{equation*} and let $x_{i}$ the color value of the $i$-th pixel. Then the resulting image can be written as \begin{equation*} I(x,y)=\sum_{i=1}^{K^{2}}{b_{i}(x,y)x_{i}}. \end{equation*} Applying the Radon transform to both sides, we get \begin{equation*} RI(t,\theta)=\sum_{i=1}^{K^{2}}{Rb_{i}(x,y)x_{i}}. \end{equation*} The X-ray machine gives us the value of the attenuation coefficient function $f$ for some finite set of lines $l_{t_{j},\theta_{j}}$, $j=1,\ldots,J$. Let us denote by $p_{j}=Rf(t_{j},\theta_{j})$ these values. We want to approximate the attenuation coefficient $f$ with image $I$, so we set $p_{j}=RI(t_{j},\theta_{j})$ and $r_{j.i}=Rb_{i}(t_{j},\theta_{j})$, for $j=1,\ldots,J$ and $i=1,\ldots,K^{2}$, and we ask that \begin{equation} p_{j}=\sum_{i=1}^{K^{2}}{x_{i}r_{j,i}}, \quad j=1,\ldots,J. \label{eq: system} \end{equation} Thus we obtain a system of $J$ linear equations and $K^{2}$ unknowns. This system is very large but spare and typically overdetermined or underdetermined. We need then specific techniques for the solution of such a system. Before looking to these methods, let us see in detail how to generate the linear system \eqref{eq: system}. \section{Generation of the linear system} In this section we consider the problem of generate the linear system $Ax=p$, i.e. we want to compute $A$ and $p$ starting from the values of the Radon transform $Rf$ of an attenuation coefficient function $f$ obtained from a X-ray machine working with parallel beam geometry. We know the values $Rf(t_{k},\theta_{l})$ with $t_{k}=kd$, $k=-M,\ldots,M$ and $\theta_{l}=l\frac{\pi}{N}$, $j=0,\ldots,N$. We want to compute $A=(r_{j,i})\ i=1,\ldots,K^{2}\ j=1,\ldots,J$ where $K^2$ is the dimension of the reconstructed gray-scale image $I=\{PX_{i}\}_{i=1,\ldots,K^{2}}$, with the components $x_{i}$ of the solution of the system representing the color of pixel $PX_{i}$; $J=(2M+1)N$ is the number of samples $(t_{j},\theta_{j})$ on which $Rf$ is measured and $r_{j,i}=Rb_{i}(t_{j},\theta_{j})$ is the Radon transform of the $i$-th pixel-basis function $b_{i}$, computed at point $(t_{j},\theta_{j})$, with $b_{i}$ defined by \begin{equation*} b_{i}(x,y)=\left\{ \begin{aligned} &1 &\text{if} \ (x,y)\in PX_{i}\\ &0 &\text{if} \ (x,y)\notin PX_{i} \end{aligned} \right. . \end{equation*} In order to solve this problem we assume that: \begin{enumerate} \item The support of the function $f$ and the samples points $(t_{j},\theta_{j})$ are contained in the unit square $[-1,1]\times[-1,1]$, this implies that $d=\frac{1}{M}$; \item The reconstructed image $I$ also lies in $[-1,1]\times[-1,1]$ and its center is at the origin $(0,0)$. If we consider $I$ as a matrix $I(r,s)$, $r=1,\ldots,K$, $s=1,\ldots,K$, whose components are the values $x_{i}$ of pixels $PX_{i}$, the center is the pixel of indexes $r=\lfloor\frac{K+1}{2}\rfloor$, $s=\lfloor\frac{K+1}{2}\rfloor$. We denote $c=\lfloor\frac{K+1}{2}\rfloor$; \item The $K^{2}$ pixels in $I$ are ordered as follows: \begin{equation*} I=\left( \begin{array}{cccc} x_{1} & x_{2} & \cdots &x_{K}\\ x_{K+1} & x_{K+2} & \cdots & x_{2K}\\ \vdots & & & \vdots\\ x_{K(K-1)+1} & x_{K(K-1)+2} & \cdots &x_{K^{2}}\\ \end{array} \right); \end{equation*} \item Considering $I$ as a function $I:\numberset{R}^2\rightarrow\numberset{R}$, i.e. $I(x,y)=\sum_{i}{b_{i}(x,y)x_{i}}$, the Cartesian coordinates of pixels are $PX_{i}=[x_{i},x_{i+1})\times(y_{i+1},y_{i}]$. Thus, we are considering a top-down, left-right enumeration of vertexes, in accord with the matrix indexing. Note that $PX_{i}$ includes the top horizontal side and the left vertical side, but not the right and the bottom sides (see Figure \ref{fig: pixel}), exception are the pixels in the last row and in the last column of $I$ that include all sides. We identify a pixel with the coordinates of its top-left vertex $(x_{i},y_{i})$; \begin{figure} \caption{Coordinates of a pixel} \label{fig: pixel} \end{figure} \item X-ray beams has zero width. \end{enumerate} Using these assumptions, we find that pixel $PX_{i}$, determinate by coordinates $(x_{i},y_{i})$, given by \begin{equation*} \boxed{ x_{i}=\frac{\bar{x}_{i}}{c}-1; \qquad y_{i}=\frac{-\bar{y}_{i}}{c}+1. } \end{equation*} In particular \begin{itemize} \item $PX_{i}=[x_{i},x_{i+1})\times(y_{i+1},y_{i}]$, $x_{i+1}=x_{i}+c^{-1}$, $y_{i+1}=y_{i}-c^{-1}$; \item If $i\in\{K,2K,\ldots,K^{2}\}\ \numberset{R}ightarrow\ PX_{i}=[x_{i},x_{i+1}]\times(y_{i+1},y_{i}]$; \item if $i\in\{K(K-1)+1,\ldots,K^{2}\}\ \numberset{R}ightarrow\ PX_{i}=[x_{i},x_{i+1})\times[y_{i+1},y_{i}]$. \end{itemize} \section{Construction of $A$ and $p$} Assume that we know $(t_{j},\theta_{j})$ and pixel coordinates $PX_{i}=[x_{i},x_{i+1})\times(y_{i+1},y_{i}]$. What we want to do now is to compute $r_{j,i}=Rb_{i}(t_{j},\theta_{j})$. Let \begin{equation*} A=(r_{j,i})=\left( \begin{array}{cccc} r_{11} & r_{12} & \cdots & r_{1K^{2}}\\ r_{21} & r_{12} & \cdots & r_{2K^{2}}\\ \vdots & & & \vdots\\ r_{J1} & r_{J2} & \cdots & r_{JK^{2}} \end{array} \right)=(A^{1},A^{2},\ldots,A^{K^{2}}). \end{equation*} and $p=Rf(t_{j},\theta_{j})$. For a fixed $i\in\{1,\ldots,K^{2}\}$ we define $\tilde{A}^{i}\in\numberset{R}^{2M+1}\times\numberset{R}^{ N}$ the matrix such that $\tilde{A}^{i}(r,s)=Rb_{i}(rd,s\frac{\pi}{N})$, with $r=-M,\ldots,M,\ s=0,\ldots,N-1$, i.e. the columns of $\tilde{A}^{i}$ represent values of $Rb_{i}$ for a fixed value of $\theta$: \begin{equation*} \tilde{A}^{i}=\left( \begin{array}{ccc} Rb_{i}(-Md,0) & \cdots & Rb_{i}(-Md,(N-1)\frac{\pi}{N})\\ \vdots & & \vdots\\ Rb_{i}(Md,0) & \cdots & Rb_{i}(Md,(N-1)\frac{\pi}{N}) \end{array} \right). \end{equation*} If $R\in\numberset{R}^{2M+1}\times\numberset{R}^{ N}$ is the matrix containing data $Rf(t_{k},\theta_{l})$ and if we set \begin{equation*} p=R(:) \end{equation*} then we have that the $i$-th column of $A$ is \begin{equation*} A^{i}:=\tilde{A}^{i}(:), \end{equation*} where the operator $(:)$ indicates the analogous Matlab operator (see \cite{MATLAB}). The problem can therefore be reduced to computation of the columns of $\tilde{A}^{i}$ for a fixed $i$. Let $i\in\{1,\ldots,K^{2}\}$ and $\theta\in[0,\pi)$ fixed. For $t\in\numberset{R}$ let $r(t)=Rb_{i}(t,\theta)$. We observe that since $b_{i}\equiv1$ inside pixel $PX_{i}$ and $b_{i}\equiv0$ outside (and since we assume X-ray beams to have zero width), the value of $r$ is the length of the intersection between line $l_{t,\theta}$ and $PX_{i}$. Indeed: \begin{align*} Rb_{i}(t,\theta)&=\int_{\numberset{R}}{b_{i}(t\cos{\theta}-s\sin{\theta},t\sin{\theta}+s\cos{\theta})\,ds}=\\ &=\int_{\{s\in\numberset{R}: (t\cos{\theta}-s\sin{\theta},t\sin{\theta}+s\cos{\theta})\in PX_{i}\}}{ds}=m(C), \end{align*} where $m$ denotes the Lebesgue measure on $\numberset{R}$ and $C=\{s\in\numberset{R}:\ t\cos{\theta}-s\sin{\theta}\in[x_{i},x_{i+1}),\ t\sin{\theta}+s\cos{\theta}\in(y_{i+1},y_{i}] \}=l_{t,\theta}\cap PX_{i}$. To determine for which values of $t$ the line $l_{t,\theta}$ lies in $PX_{i}$, we consider lines that pass through the vertexes of $PX_{i}$. Let \begin{align*} &P_{1}=(x_{i},y_{i}) & &P_{2}=(x_{i},y_{i+1}) & &P_{3}=(x_{i+1},y_{i+1}) & &P_{4}=(x_{i+1},y_{i}) \end{align*} and let $t_{h}$ be such that the line $l_{t_{h},\theta}$ passes through point the $P_{h},\ h=1,2,3,4$ (Figure \ref{fig: calcolo_t0}). Since $(x_{0},y_{0})\in l_{x_{0}\cos{\theta}+y_{0}\sin{\theta},\theta}$, we have \begin{align*} &t_{1}=x_{i}\cos{\theta}+y_{i}\sin{\theta} & &t_{2}=x_{i}\cos{\theta}+y_{i+1}\sin{\theta} \\ &t_{3}=x_{i+1}\cos{\theta}+y_{i+1}\sin{\theta} & &t_{4}=x_{i+1}\cos{\theta}+y_{i}\sin{\theta} \end{align*} Moreover, to determine the length of the intersection $l_{t,\theta}\cap PX_{i}$, we need to know the intersections between $l_{t,\theta}$ and the sides of $PX_{i}$. Let \begin{align*} &E_{12}=l_{t,\theta}\cap P_{1}P_{2} & &E_{23}=l_{t,\theta}\cap P_{2}P_{3} & &E_{34}=l_{t,\theta}\cap P_{3}P_{4} & &E_{14}=l_{t,\theta}\cap P_{1}P_{4}. \end{align*} Let us compute for example $E_{12}$: \begin{equation*} l_{t,\theta}= (t\cos{\theta}-s\sin{\theta},t\sin{\theta}+s\cos{\theta})=(x(s),y(s)) \end{equation*} the line through $P_{1}P_{2}$ is $x=x_{i}$, so we want $x(s)=x_{i}$, $\numberset{R}ightarrow$ \begin{align*} &s=\frac{t\cos{\theta}-x_{i}}{\sin{\theta}}, &y(s)=t\sin{\theta}+\frac{t\cos{\theta}-x_{i}}{\sin{\theta}}\cos{\theta}=\frac{t-x_{i}\cos{\theta}}{\sin{\theta}}. \end{align*} In a similar way we find $E_{23},E_{34},E_{41}$: \begin{align*} &E_{12}=\left(x_{i},\frac{t-x_{i}\cos{\theta}}{\sin{\theta}}\right) & &E_{23}=\left(\frac{t-y_{i+1}\sin{\theta}}{\cos{\theta}},y_{i+1}\right)\\ &E_{34}=\left(x_{i+1},\frac{t-x_{i+1}\cos{\theta}}{\sin{\theta}}\right) & &E_{14}=\left(\frac{t-y_{i}\sin{\theta}}{\cos{\theta}},y_{i}\right)\\ \end{align*} Of course this values are different in the limit case $\cos{\theta}=0$ or $\sin{\theta}=0$, i.e. for $\theta=0,\pi/2$. In these cases intersections between $l_{t,\theta}$ and $PX_{i}$ coincide with the vertexes $P_{h}$. Depending on the values of $\theta$, the behavior of $l_{t,\theta}$ and $l_{t_{h},\theta}$ changes (see Figure \ref{fig: calcolo_t0}). \begin{figure} \caption{Lines $t_{h} \label{fig: calcolo_t1} \label{fig: calcolo_t2} \label{fig: calcolo_t0} \end{figure} Therefore one should distinguish the cases $\theta\in[0,\frac{\pi}{4}),\ \theta\in[\frac{\pi}{4},\frac{\pi}{2}),\ \theta\in[\frac{\pi}{2},\frac{3}{4}\pi)\ \text{and}\ \theta\in[\frac{3}{4}\pi,\pi)$. Consider for example $\theta\in[0,\frac{\pi}{4})$. In this case we have $t_{2}\leq t_{1}<t_{3}\leq t_{4}$, therefore \begin{align*} &\text{if} \ t<t_{2}\, \vee\, t\geq t_{4} \ \numberset{R}ightarrow \ l_{t,\theta}\cap PX_{i}=\emptyset\\ &\text{if} \ t_{2}\leq t<t_{4} \ \numberset{R}ightarrow \ l_{t,\theta}\cap PX_{i}=AB \end{align*} where $AB$ is given by \begin{enumerate} \item $AB=E_{23}E_{12} \quad$ if $t_{2}\leq t<t_{1}$; \item\label{case: 1b} $AB=E_{23}E_{14}\quad$ if $t_{1}\leq t\leq t_{3}$; \item $AB=E_{34}E_{14}\quad$ if $t_{3}<t<t_{4}$; \item\label{case: 1d} In the limit case $i\in\{K,2K,\ldots,K^{2}\}$, i.e. if we are considering a pixel in the last column of $I$, we have to account that side $P_{3}P_{4}\in PX_{i}$, so \begin{equation*} \text{if} \ x_{i}=x_{K^{2}}\, \wedge \, \theta=0 \, \wedge \, t=t_{4} \ \numberset{R}ightarrow r=\overline{AB}=\overline{P_{3}P_{4}}=c^{-1}. \end{equation*} \end{enumerate} We notice that in case \ref{case: 1b}. $AB(t)=AB(t_{1})=AB(t_{3})=const$. Moreover for $\theta=0$, only cases \ref{case: 1b}. and \ref{case: 1d}. are possible, then we do not need to compute $1/\sin{\theta}$. The determination of $AB$ in the others 3 cases is similar. What remains to do now is to compute the length of $AB$. Let us start considering $AB=E_{12}E_{23}$ (see Figure \ref{fig: calcoloAB}). The coordinates of the points are \begin{align*} &E_{12}=\left(x_{i},\frac{t-x_{i}\cos{\theta}}{\sin{\theta}}\right) & &E_{23}=\left(\frac{t-y_{i+1}\sin{\theta}}{\cos{\theta}},y_{i+1}\right) \end{align*} We notice that the triangle $E_{12}\widetriangle{P}_{2}E_{23}$ is rectangle in $P_{2}$ and that, by definition, $\theta=P_{2}\hat{E}_{12}E_{23}$. \begin{figure} \caption{Computation of $AB$ } \label{fig: calcolo_AB1} \label{fig: calcolo_AB2} \label{fig: calcoloAB} \end{figure} By Pythagoras theorem \begin{equation*} \overline{E_{12}E_{23}}=\frac{\overline{P_{2}E_{23}}}{\sin{\theta}}, \end{equation*} thus \begin{align*} P_{2}E_{23}=&x_{E_{23}}-x_{P_{2}}=\frac{t-y_{i+1}\sin{\theta}}{\cos{\theta}}-x_{i}=\\ &=\frac{t-y_{i+1}\sin{\theta}-x_{i}\sin{\theta}}{\cos{\theta}}=\\ &=\frac{t-t_{2}}{\cos{\theta}}. \end{align*} We conclude that \begin{equation*} \overline{E_{12}E_{23}}=\frac{t-t_{2}}{\sin{\theta}\cos{\theta}}, \qquad \text{for}\ \theta\in(0,\frac{\pi}{2}). \end{equation*} We now consider $AB=E_{23}E_{14}$: as stated before, for all $t\in[t_{1},t_{3}]$, $\theta\in[0,\frac{\pi}{4})$, $E_{23}E_{14}(t)=E_{23}E_{14}(t_{1})$, hence \begin{equation*} \overline{E_{23}E_{14}}=\frac{\overline{P_{1}P_{2}}}{\cos{\theta}}=\frac{c^{-1}}{\cos{\theta}}. \end{equation*} We can reduce the number of cases if we order $t_{h}$, $h=1,2,3,4$ in increasing order: $t_{\min}\leq t_{\min2}\leq t_{\max2}\leq t_{\max}$. Thus, we conclude that \begin{itemize} \item for $\theta\neq0,\frac{\pi}{2}$ we have \begin{equation} Rb_{i}(t,\theta)=\left\{ \begin{aligned} &r_{1} & &\text{if}\ t_{\min}<t<t_{\min2}\\ &r_{2} & &\text{if}\ t_{\min2}\leq t\leq t_{\max2}\\ &r_{3} & &\text{if}\ t_{\max2}<t<t_{\max}, \end{aligned} \right. \label{eq: Rgen} \end{equation} where \begin{align*} r_{1}&=\left| \frac{t-t_{\min}}{\sin{\theta}\cos{\theta}}\right| & r_{2}&=c^{-1}\min{\left(\frac{1}{|\cos{\theta}|},\frac{1}{|\sin{\theta}|}\right)} & r_{3}&=\left| \frac{t-t_{\max}}{\sin{\theta}\cos{\theta}}\right|. \end{align*} \item for $\theta=0$ \begin{equation} Rb_{i}(t,0)=\left\{ \begin{aligned} &c^{-1} & &\text{if}\ t_{\min}\leq t<t_{\max} \, \vee \, (x_{i}=x_{K^{2}}\,\wedge\,t=t_{\max})\\ &0 & &\text{otherwise} \end{aligned} \right. \label{eq: R0} \end{equation} \item for $\theta=\frac{\pi}{2}$ \begin{equation} Rb_{i}(t,\frac{\pi}{2})=\left\{ \begin{aligned} &c^{-1} & &\text{if}\ t_{\min}< t\leq t_{\max} \, \vee \, (y_{i}=y_{K^{2}}\,\wedge\,t=t_{\min})\\ &0 & &\text{otherwise} \end{aligned} \right. . \label{eq: Rpi2} \end{equation} \end{itemize} \subsection{Algorithm} We now summarize in a pseudo-algorithm the principal steps involved in the computation of $A$ and $p$. Operations are indicated in Matlab language. \begin{enumerate} \item Input: \begin{verbatim} R: (2M+1)N matrix representing the Radon data K: dimension of the output image \end{verbatim} \item Initialization: \begin{verbatim} A=zeros((2M+1)*N,K^{2}); c=floor((K+1)/2); \end{verbatim} \item Compute pixels coordinates: \begin{verbatim} Y=repmat([0:K,K+1,1]); X=Y'; x=X/c-1; y=-Y/c+1; \end{verbatim} \item Compute values $t_{h}=t_{h}(x_{i},y_{i},\theta)$, $h=1,2,3,4$: \begin{verbatim} T=zeros(K+1,K+1,N); for j=1:N, T(:,:,j)=x*cos(theta(j))+y*sin(theta(j)); end \end{verbatim} \item Compute $A$: \begin{itemize} \item For all \texttt{i=1:K*K} extract sub-matrix \texttt{Ti} of \texttt{T} containing $t_{h}$ values corresponding to pixel $PX_{i}$; \item Sort \texttt{Ti} in increasing order; \item for all \texttt{j=1:N}, for all \texttt{k=1:2M+1}, compute $\tilde{A}^{i}(t_{k},\theta_{j})$ using equations \eqref{eq: Rgen},\eqref{eq: R0},\eqref{eq: Rpi2}; \item Fill $i$-th column of \texttt{A}: \begin{verbatim} A(:,i)=Atilde_i(:); \end{verbatim} \end{itemize} \item Compute $p$: \begin{verbatim} p=R(:); \end{verbatim} \end{enumerate} \section{Solving the system} In the previous section we saw how to compute matrix $A$ and the r.h.s. $p$ of the linear system generated from the algebraic approach to the image reconstruction problem. In this section we discuss other methods useful for the solution of this system. We start observing that the matrix $A$ can be very large. In fact, every sampling of the Radon transform produces an equation, while at every pixel in the output image is associated an unknown. Moreover the system can be typically both underdetermined (more unknowns then equations) or overdetermined (more equations then unknowns). Another important property of the system $Ax=p$ is that the matrix $A$ is sparse. Indeed every particular line $l_{t_{j},\theta_{j}}$ passes through relatively few pixels in the grid. Thus most of the values $r_{jk}$ are equal to zero. In order to solve the system we will use two different methods depending on whether the system is overdetermined or underdetermined. In the first case we will use the least square approximation, that means that the solution $x$ will be given by $x=\text{argmin}_{y}\norm{Ay-b}$. In the second case we will use an iterative method called \emph{Kaczmarz's method} that will be discussed in the next paragraph. \subsection{Kaczmarz's method} The Kaczmarz's method \cite{KACZ} is an iterative procedure for approximating a solution of a linear system $Ax=p$, $A\in\numberset{R}^{m\times n}$, $p\in\numberset{R}^{m}$. If we denote $r_{i}$ the $i$-th row of $A$ and $p_{i}$ the $i$-th component of $p$, we can say that a vector $x$ is solution of $Ax=p$ if and only if \begin{equation*} r_{i}\cdot x=p_{i} \quad \forall i=1,\ldots,m. \end{equation*} We also notice that the set $L_{i}=\{ x\in\numberset{R}^{n}:\, r_{i}\cdot x=p_{i} \}$ is an affine subspace of $\numberset{R}^{n}$. The idea of Kaczmarz's method is to project an initial approximated solution $x_{0}$ on all these affine spaces, generating in this way a sequence of vectors, each of them satisfies one of the equations $r_{i}\cdot x=p_{i}$. \begin{definition} Let $L_{p,r}=\{ x\in\numberset{R}^{n}:\, r\cdot x=p \}$, for $r\in\numberset{R}^{n}$ and $p\in\numberset{R}$, an affine space, let $u\in\numberset{R}^{n}$. The \emph{affine projection} of $u$ on $L_{p,r}$ is the vector $\bar{u}\in L_{p,r}$ such that \begin{equation*} \norm{\bar{u}-u}_{2}=\min_{x\in L_{p.r}}{\norm{x-u}_{2}}. \end{equation*} \end{definition} \begin{proposition} The affine projection $\bar{u}$ of a vector $u$ in the affine space $L_{p,r}$ is given by \begin{equation*} \bar{u}=u-\frac{r\cdot u-p}{\norm{r}_{2}^{2}}r. \end{equation*} \end{proposition} The Kaczmarz's method proceeds as following. From an initial guess it computes its affine projection on the first affine space. This projection is then projected on the next affine space in our list and so on until the last affine space. These operations consist of one iteration and the result of this iteration become the starting point of the next one. In detail the algorithm proceed as follow: \begin{enumerate} \item Select $x_{0}$; \item for $k=1,\ldots,K_{\max}$, $x_{k-1}^{0}=x_{k-1}$ (where $K_{\max}$ is the maximum number of iteration allowed); \item for $i=1,\ldots,m$, \begin{equation} x_{k}^{i}=x_{k}^{i-1}-\frac{r_{i}\cdot x_{k}^{i-1}-p_{i}}{\norm{r_{i}}^{2}}r_{i} \label{eq: kaczmarz} \end{equation} \item $x_{k}=x_{k-1}^{m}$. \end{enumerate} The sequence $x_{0},x_{1},x_{2}\ldots$ generated by the method converges to a vector $x$ that satisfies $Ax=p$ (see Theorem 9.14 in \cite{BASIC} and references there). However the convergence can be slow and a lot of steps are needed to get a good approximation. Moreover if the system has no solution, like in many image reconstruction applications, then the behavior of the sequence is not clear and can be chaotic. In the field of medical imaging the size of the system can be a serious problem, but, as we know, the matrix $A$ is also sparse. This means that when we compute $x_{k}^{i}$ from $x_{k}^{i-1}$, we only change the components of $x_{k}^{i-1}$ that correspond to non zero entries of $r_{i}$. So we can increase efficiency storing the location of these entries. Another fact that is connected to the nature of the reconstruction problem is that adjacent X-ray beams transmitted along line $l_{t,\theta}$, for similar values of $t$ and $\theta$, will intersect many of the same pixels, thus the corresponding affine spaces will be almost parallel. As a consequence, the convergence is slow and a lot of iteration is needed to reach a good approximate image. We conclude this section introducing a variation of the Kaczmarz's method that involves the introduction of a relaxation parameters in the formula \eqref{eq: kaczmarz}. Let $\lambda_{i,k}$ be such that $0<\lambda_{i,k}<2$, then we replace formula \eqref{eq: kaczmarz} with \begin{equation*} x_{k}^{i}=x_{k}^{i-1}-\lambda_{i,k}\frac{r_{i}\cdot x_{k}^{i-1}-p_{i}}{\norm{r_{i}}^{2}}r_{i}. \end{equation*} The parameter $\lambda_{i,k}$ can accelerate the convergence of an indeterminate system. Note that if $\lambda_{i,k}=2$, then the vector $x_{k}^{i}$ is just the reflection of $x_{k}^{i-1}$ across $L_{i}$ and there is no improvement in the proximity to a solution. That's why we consider $0<\lambda_{i,k}<2$. \chapter{Kernel based methods} \label{chap: kernelMethods} In this chapter we present another approach for solving the image reconstruction problem based on kernel functions. Reproducing kernels have already been used in image reconstruction \cite{REI}. Here we use a different approach. As usual our data are the discrete Radon transform of a function $f:\numberset{R}^2\rightarrow\numberset{R}$ $\{Rf(t_{j},\theta_{j})\}_{j=1}^{n}$, from which we want to find an approximation of the function $f$. The basic idea is to seek for the approximation $s$ of $f$ in a functions space $S$ with finite dimension $n$, that is $S=span\{s_{1},s_{2},\ldots, s_{n}\}$. Thus a function $s\in S$ can be written as $s=\sum_{j=1}^{n}{c_{j}s_{j}}$ for some $c_{j}\in\numberset{R}$. Then we ask that $Rs$ coincides with $Rf$ on the points $(t_{j},\theta_{j})$ for all $j=1,\ldots,n$, i.e. \begin{equation} (Rs)(t_{j},\theta_{j})=(Rf)(t_{j},\theta_{j}) \quad j=1,\ldots,n. \label{eq: kerProblem} \end{equation} By linearity of $R$ the coefficients $c_{j},\ j=1,\ldots,n$ are given by the solution of the linear system $Ac=b$, $c\in\numberset{R}^{n}$, where \begin{align*} A&=(Rs_{k}(t_{j},\theta_{j}))_{j,k=1,\ldots,n} & b&=(Rf(t_{j},\theta_{j}))_{j=1,\ldots,n}. \end{align*} \section{Hermite-Birkhoff interpolation} We generalize the image reconstruction problem \eqref{eq: kerProblem} considering the problem of finding a function $s\in S$ such that $f|_{\Lambda}=s|_{\Lambda}$ for some function $f$, where $\Lambda=\{\lambda_{1},\ldots,\lambda_{n}\}$ is a set of linearly independent linear functionals (see \cite{ISKE3,ISKE4}). In our specific case we will consider $\lambda_{j}f=Rf(t_{j},\theta_{j})$. We also assume $S=span\{s_{1},s_{2},\ldots, s_{n}\}$ with $|\Lambda|=n$. By linearity, the problem is equivalent to the linear system \begin{equation*} Ac=f_{\Lambda}, \label{eq: HermBirk} \end{equation*} where \begin{align*} A&=(\lambda_{j}s_{k})_{j,k=1,\ldots,n} & f_{\Lambda}&=(\lambda_{j}f)_{j=1,\ldots,n} & s=\sum_{j=1}^{n}{c_{k}s_{k}}. \end{align*} \begin{theorem}[Mairhuber-Curtis \cite{MAIR}, \cite{CURTIS}] Let $\Omega\subseteq\numberset{R}^{d}$, $d\geq2$, suppose $\Omega$ contains a interior point, then there is no Chebjichev system $s_{1},\ldots,s_{n}$, $n\geq2$ on $\Omega$, i.e. for all $s_{1},\ldots,s_{n}$ real valued functions on $\Omega$, exists $\Lambda=\{\lambda_{1},\ldots,\lambda_{n}\}$, $|\Lambda|=n$ such that the matrix $(\lambda_{k}(s_{j}))_{j,k=1,\ldots,n}$ is singular, where $\lambda_{k}=\delta_{\xi_{k}}$ for pairwise distinct points $\xi_{k}\in\Omega$ are the functionals "evaluation at $\xi_{k}$". \end{theorem} This theorem tells us that if we want to find a basis $s_{1},\ldots,s_{n}$ of $S$ such that the system \eqref{eq: HermBirk} has a unique solution for all data $\Lambda$, the basis should depends on the location of the data, i.e. on $\Lambda$ itself. For this reason we will choose \begin{equation*} s_{j}=\lambda_{j}^{y}K(\cdot,y), \end{equation*} where $K:\numberset{R}^{d}\times\numberset{R}^{d}\rightarrow\numberset{R}$ and $\lambda_{j}^{y}$ indicates that operator $\lambda_{j}$ is applied to variable $y$. \subsection{Positive definite kernels} The problem is then solving $f|_{\Lambda}=s|_{\Lambda}$ with $s=\sum_{j}{c_{j}\lambda_{j}^{y}K(\cdot,y)}$. It can be also written as a linear system $A_{K,\Lambda}c=f_{\Lambda}$, where $c\in\numberset{R}^{n}$ and $A_{K,\Lambda}=(\lambda_{k}^{x}\lambda_{j}^{y}K(x,y))_{j,k=1}^{n}$. In order to have a unique solution for every choice of $\Lambda$, $A_{K,\Lambda}$ must be non-singular. This is certain true if we assume $K$ symmetric, i.e. $K(x,y)=K(y,x)$ for all $x,y\in\numberset{R}^{d}$, and positive definite, that means that $A_{K,\Lambda}$ is positive definite for all $\Lambda$. The following theorem gives us a characterization of positive definite functions. \begin{theorem}[Bochner] Assume $\Phi:\numberset{R}^{d}\rightarrow\numberset{R}$ even and continuous and that its Fourier transform $\hat{\Phi}$ is such that the Fourier inversion theorem holds: \begin{equation*} \Phi(x)=\frac{1}{(2\pi)^{d}}\int_{\numberset{R}^{d}}{\hat{\Phi}(\omega)e^{ix\omega}\,d\omega}, \end{equation*} then, if $\hat{\Phi}(\omega)\geq0$, $\forall\omega\in\numberset{R}^{d}$, $K(x,y)=\Phi(\norm{x-y})$ is positive definite. \end{theorem} \begin{example}[Gaussian] $\Phi(x)=e^{-\norm{x}^{2}}$ is positive definite since $\hat\Phi(\omega)=e^{-\frac{\norm{\omega}^{2}}{4}}>0$; \end{example} \begin{example}[Inverse multiquadric] $\Phi(x)=\frac{1}{\sqrt{1+\norm{x}^{2}}}$ is positive definite since $\hat\Phi(\omega)=K_{\frac{d-1}{2}}(\norm{\omega})\norm{\omega}^{-\frac{d-1}{2}}>0$, where $K_{\nu}$ denotes the Bessel function of 2nd kind of order $\nu$. \end{example} \section{Conditionally positive definite kernels} \begin{definition} A set of functional $\Lambda$ is said to be $k$-unisolvent (or unisolvent w.r.t $\numberset{P}^{k}_{d}$) if for $p\in\numberset{P}_{k}^{d}$ we have \begin{equation*} p|_{\Lambda}=0 \quad \numberset{R}ightarrow \quad p\equiv0, \end{equation*} where $\numberset{P}_{k}^{d}$ denotes the set of all polynomial of degreed less or equal of $k$ on $\numberset{R}^{d}$. \end{definition} \begin{definition} The radial kernel $\Phi$ is conditionally positive definite of order $k$ and we write $\Phi\in cdp(k)$, if for $K(x,y)=\Phi(\norm{x-y})$ the quadratic form \begin{equation*} c^{T}A_{K,\Lambda}c=\sum_{i,j=1}^{n}{c_{i}c_{j}\lambda_{i}^{x}\lambda_{j}^{y}\Phi(\norm{x-y})} \end{equation*} is positive for all possible $\Lambda$, $|\Lambda|=n$ and vectors $c\in\numberset{R}^{n}\setminus\{0\}$ satisfying \begin{equation*} \sum_{j=1}^{n}{c_{j}\lambda_{j}(p)}=0, \qquad \forall\,p\in\numberset{P}_{k}^{d}. \end{equation*} \end{definition} \subsection{Reconstruction by conditionally positive kernel functions} We introduce a polynomial part in the interpolant $s$, so what we have to do now is to solve $f|_{\Lambda}=s|_{\Lambda}$ with $s$ of the form \begin{equation*} s=\sum_{j=1}^{n}{c_{j}\lambda_{j}^{y}\Phi(\norm{\cdot-y})}+p, \end{equation*} where $p\in\numberset{P}^{d}_{k-1}$ ($k=k(\Phi)$) and vector $c\in\numberset{R}^{n}$ satisfying the \emph{vanishing moment condition} \begin{equation*} \sum_{j=1}^{n}{c_{j}\lambda_{j}(p)}=0 \quad \forall\, p\in\numberset{P}_{k-1}^{d}. \end{equation*} \begin{theorem}[Michelli,Wu\label{thm: unicity}] The reconstruction problem $f|_{\Lambda}=s|_{\Lambda}$ has under vanishing moment condition a unique solution $s$, provided that $\Phi\in cdp(k)$ and the functionals $\Lambda$ are $(k-1)$-unisolvent. \end{theorem} \proof Let $p_{1},\ldots,p_{m}\in\numberset{P}_{k-1}^{d}$ a basis of $\numberset{P}_{k-1}^{d}$, where $m=\text{dim}(\numberset{P}_{k-1}^{d})=\left(\begin{array}{c} (k-1)+d\\ d \end{array}\right)$. Then $s$ can be written as \begin{equation*} s=\sum_{j=1}^{n}{c_{j}\lambda_{j}^{y}\Phi(\norm{\cdot-y})}+\sum_{l=1}^{m}{d_{l}p_{l}}, \end{equation*} for some $c\in\numberset{R}^{n},\ d\in\numberset{R}^{m}$. Condition $f_{\Lambda}=s|_{\Lambda}$ and the vanishing moment condition, are equivalent to the linear system \begin{equation*} \left\{ \begin{aligned} &\lambda_{i}^{x}s=\lambda_{i}^{x}f & &\forall\, i=1,\ldots,n\\ &\sum_{j=1}^{m}{c_{j}\lambda_{j}^{x}(p_{l})}=0 & &\forall\,l=1,\ldots,m \end{aligned} \right. \end{equation*} that in matricial form is \begin{equation} \left( \begin{array}{cc} \Phi & P\\ P^{T} & O \end{array} \right) \left( \begin{array}{c} c\\ d \end{array} \right)=\left( \begin{array}{c} f|_{\Lambda}\\ \textbf{0} \end{array} \right) \label{eq: recSyst} \end{equation} where $\Phi_{i,j}=\lambda_{i}^{x}\lambda_{j}^{y}\Phi(\norm{x-y})$, $i,j=1,\ldots,n$, $P_{j,l}=\lambda_{j}(p_{l})$, $j=1,\ldots,n$, $l=1,\ldots,m$ and $O\in\numberset{R}^{m}\times\numberset{R}^{m}$ and $\textbf{0}\in\numberset{R}^{m}$ are a matrix and a vector with all components equal to zero. We consider the homogeneous system \begin{equation*} \left\{ \begin{aligned} &\Phi c+Pd=\textbf{0}\\ &P^{T}c=\textbf{0} \end{aligned} \right.\Leftrightarrow \left\{ \begin{aligned} &c^{T}\Phi c+c^{T}Pd=0\\ &c^{T}P=\textbf{0} \end{aligned} \right. \end{equation*} substituting the second equation in the first one, we have $c^{T}\Phi c=0$. Since $\Phi\in cdp(k)$, $c=\textbf{0}$. The first equation becomes then $Pd=\textbf{0}$, but $\Lambda$ is $(k-1)$-unisolvent, that implies $d=\textbf{0}$. So the unique solution to the homogeneous system is the null solution. \endproof \begin{example} Conditionally positive functions: \begin{enumerate} \item Polyharmonic splines: \begin{equation*} \varphi(r)=\left\{ \begin{aligned} &r^{2k-d}\log{r} & &\text{if $d$ is even}\\ &r^{2k-d} & &\text{if $d$ is odd} \end{aligned} \right. \end{equation*} $2k>d$; \item Gaussian: $\varphi(r)=e^{-r^{2}}$, $k=0$; \item Multiquadrics: $\varphi(r)=(1+r^{2})^{\nu}$, $\nu>0,\ \nu\notin\numberset{N}$, $k=\lceil\nu\rceil$; \item Inverse multiquadrics: $\varphi(r)=(1+r^{2})^{\nu}$, $\nu<0$, $k=0$; \item Power function: $\varphi(r)=r^{\beta}$, $0<\beta\notin2\numberset{N}$, $k=\lceil\frac{\beta}{2}\rceil$. \end{enumerate} \end{example} \section{Native function spaces} In this section we will show that the solution of the Hermite-Birkhoff interpolation is optimal in the sense that it is the function of minimum norm among all functions that interpolate data $f|_{\Lambda}$, where the norm is taken in a suitable Hilbert space. For a fixed positive definite function $K:\numberset{R}^{d}\times\numberset{R}^{d}\rightarrow\numberset{R}$, we define the function spaces \begin{align*} S_{\Lambda}&=span\{\lambda{y}K(.,y):\ \lambda\in\Lambda\} & S&=\{s\in S_{\Lambda}: \ |\Lambda|<\infty\} \end{align*} and the dual space \begin{equation*} L=\{\lambda\equiv\lambda_{c,\Lambda}=\sum_{j=1}^{n}{c_{j}\lambda_{j}: \ c\in\numberset{R}^{n}, \ |\Lambda|<\infty}\}. \end{equation*} We observe that, for all $s\in S$ there exists $\lambda\in L$ such that $s\equiv s_{\lambda}=\lambda^{y}K(\cdot,y)$. Indeed \begin{equation*} s=\sum_{j=1}^{n}{c_{j}\lambda_{j}^{y}K(\cdot,y)=\sum_{j=1}^{n}{c_{j}\lambda_{j}^{y}K(\cdot,y)} =\left(\sum_{j=1}^{n}{c_{j}\lambda_{j}}\right)^{y}K(\cdot,y)}=\lambda^{y}K(\cdot,y), \end{equation*} with $\lambda=\sum{c_{j}\lambda_{j}}\in L$. We define an inner product on $L$: \begin{equation*} (\lambda,\mu)_{K}=\lambda^{x}\mu^{y}K(x,y)=\sum_{j=1}^{n}{\sum_{k=1}^{n}{c_{j}d_{k}\lambda_{j}^{x}\mu_{k}^{y}K(x,y)}}, \end{equation*} where $\lambda=\sum{c_{j}\lambda_{j}}$ and $\mu=\sum{d_{k}\mu_{k}}$, and the norm \begin{equation*} \norm{\lambda}_{K}=(\lambda,\lambda)_{K}^{1/2}. \end{equation*} Thanks to the duality relation between $L$ and $S$, we introduce a topology also on $S$ so that $(s_{\lambda},s_{\mu})_{K}=(\lambda,\mu)_{K}$, $\norm{\cdot}_{K}=(\cdot,\cdot)_{K}^{1/2}$ \begin{remark} $L\cong S$: $L$ and $S$ are isometric with respect to the norm $\norm{\cdot}_{K}$. Moreover, for all $\mu\in L$, $\mu$ is continuous on $S$. In fact \begin{equation*} |\mu(s_{\lambda})|=|\mu^{x}\lambda^{y}K(x,y)|=|(\mu,\lambda)_{K}|\leq\norm{\mu}_{K}\norm{\lambda}_{K}=\norm{\mu}_{K}\norm{s_{\lambda}}_{K}. \end{equation*} \end{remark} We now set $D=\bar{L}$ and $F=\bar{S}$ the topological closures with respect to $\norm{\cdot}_{K}$. The following theorem holds: \begin{theorem}[Madych-Nelson, 1983\label{thm: reprDF}] For all $s_{\mu}\in S$, for all $\lambda\in D$ we have \begin{equation} (\lambda^{y}K(\cdot,y),s_{\mu})_{K}=(s_{\lambda},s_{\mu})_{K}=(\lambda,\mu)_{K}=\lambda^{x}\mu^{y}K(x,y)=\lambda(s_{\mu}). \label{eq: reprDF} \end{equation} \end{theorem} \proof The statements holds for $\lambda,\mu\in L$ and the representation \eqref{eq: reprDF} follows by continuity. \endproof \begin{definition}\label{def: rep_kernel} Let $H=\{f:\ \Omega\subseteq\numberset{R}^{d}\rightarrow\numberset{R}\}$ a Hilbert space, $K:\Omega\times\Omega\rightarrow\numberset{R}$ is a \emph{reproducing kernel} for $H$ if: \begin{enumerate} \item $K(\cdot,x)\in H$ for all $x\in\Omega$; \item $f(x)=(f,K(\cdot,x))_{H}$, for all $f\in H,\ x\in\Omega$. \end{enumerate} \end{definition} \begin{corollary} $K:\numberset{R}^{d}\times\numberset{R}^{d}\rightarrow\numberset{R}$ is the reproducing kernel of the Hilbert space $F$. \end{corollary} \proof We prove properties 1. and 2. of Definition \ref{def: rep_kernel} \begin{enumerate} \item For $\delta_{z}\in L, \ z\in\numberset{R}^{d}$, $\delta_{z}^{y}K(\cdot,y)=K(\cdot,z)\in F$, for all $z\in\numberset{R}^{d}$; \item For $\lambda=\delta_{z}\in L$, by theorem \ref{thm: reprDF} $(K(\cdot,z),f)_{K}=f(z)$, for all $f\in F,\ z\in\numberset{R}^{d}$ \end{enumerate} \endproof \begin{corollary} The point evaluation $\delta_{z}:F\rightarrow\numberset{R}$ are continuous on $F$. \end{corollary} \proof $|\delta_{z}(f)|=|f(z)|\leq\norm{\delta_{z}}_{K}\norm{f}_{K}$ for all $z,f$. \endproof \begin{corollary} Let $s\equiv s_{f,\Lambda}\in S$ denote the unique interpolation to $f\in F$ on $\Lambda$: $s|_{\Lambda}=f|_{\Lambda}$, then the Pythagoras theorem \begin{equation*} \norm{f}^{2}_{K}=\norm{s}^{2}_{K}+\norm{f-s}^{2}_{K} \end{equation*} holds. \end{corollary} \proof For $s=\lambda^{y}K(\cdot,y)\in S$, $\lambda\in L$, we find $(s,g)_{K}=0$ for all $g$ such that $\lambda(g)=0$, i.e. $s\equiv s_{f,\Lambda}$ is orthogonal to the kernel of the functional $\lambda\in\Lambda$. Hence \begin{equation*} (s_{f,\Lambda},f-s_{f,\Lambda})_{K}=0, \end{equation*} i.e.the interpolant $s_{f,\Lambda}\in S$ is the orthogonal projection of $f\in F$ onto $S$. \endproof \subsection{Optimality of the interpolation method} The following results are consequences of Theorem \ref{thm: reprDF} and its corollaries. \begin{theorem} The interpolant $s\equiv s_{f\Lambda}\in S$ is the unique minimizer of the energy functional $\norm{\cdot}_{K}$ among all interpolants to data $f|_{\Lambda}$, i.e. \begin{equation*} \norm{s}_{K}\leq \norm{g}_{K} \ \forall g\in S \ \text{s.t.}\ g|_{\Lambda}=f|_{\Lambda}. \end{equation*} In this sense the interpolation scheme is optimal. \end{theorem} \begin{corollary} The interpolant $s\equiv s_{f\Lambda}\in S$ is the unique best approximation to $f\in F$ from $S$ with respect to $\norm{\cdot}_{K}$. \end{corollary} \chapter{Kernel based image reconstruction}\label{chap: kernelRec} In this chapter we apply the kernel based methods saw in Chapter \ref{chap: kernelMethods} to the problem of image reconstruction. We will see that the Hermite-Birkhoff interpolation can not be applied to the original reconstruction problem because the Radon transform of a kernel basis function can be infinity. We will then overcome this obstacle introducing a regularization of the integrals involved in the computation of the Radon transform. Thanks to this regularization it is possible to generate a liner system, solving the linear system one can find an approximation of the image to reconstruct. This technique can be used with both parallel beam geometry and scattered data. This second case is useful when one wants to reduce the dosage of X-rays passing through the sample. Scattered data can then be interpolated using suitable methods. For example a radial functions method was recently introduced by Beatson and zu Castell \cite{ZUC} to obtain new values of the Radon transform. This technique can be combined with the methods introduced below to obtain a reconstruction using less initial data. \newline Let $f:\numberset{R}^{2}\rightarrow\numberset{R}$ be a function. Consider again the problem $s|_{\Lambda}=f|_{\Lambda}$, where \begin{align*} f|_{\Lambda}&=\{\lambda_{j}f\}_{j=1}^{n}, & \lambda_{j}f&=R[f(x_{1},x_{2})](t_{j},\theta_{j}), \ j=1,\ldots,n, \end{align*} and $s$ is an approximation of $f$ belonging to the space \begin{equation*} S=\left\{\sum_{j=1}^{n}{c_{j}\lambda_{j}^{y}K(\cdot,y)}:\ c_{j}\in\numberset{R},\ K(x,y)=\Phi(\norm{x-y})\, \text{positive definite}\right\}. \end{equation*} Let us denote $b_{j}(x)=\lambda_{j}^{y}(K(x,y))=R[K(x,y)](t_{j},\theta_{j})$, $x\in\numberset{R}^{2}$, $1\leq j\leq n$, the basis of $S$, so that $s(x)=\sum_{j=1}^{n}{c_{j}b_{j}(x)}$ for some $c\in\numberset{R}^{n}$. The interpolation conditions $s|_{\Lambda}=f|_{\Lambda}$ are equivalent to $\lambda_{k}s=\lambda_{k}f\ \forall k=1,\ldots,n$. By linearity of the Radon transform we obtain \begin{equation*} \sum_{j=1}^{n}{c_{j}\lambda^{x}_{k}\lambda_{j}^{y}K(x,y)}=\lambda_{k}f, \ k=1,\ldots,n, \end{equation*} or in matrix form \begin{align} Ac&=f_{\Lambda} \label{eq: sist_kernel} \end{align} with $ f_{\Lambda}=(\lambda_{1}f,\ldots,\lambda_{n}f)^{t}$ and $A=(a_{k,j})_{1\leq k,j \leq n}$ given by \begin{equation*} a_{k,j}=\lambda_{k}^{x}\lambda_{j}^{y}K(x,y)=R^{x}\left\{R^{y}[K(x,y)](t_{j},\theta_{j})\right\}(t_{k},\theta_{k}). \end{equation*} Thus, to determine $c$ and then solution $s$, we have to solve the linear system \eqref{eq: sist_kernel}. The first step in solving system \eqref{eq: sist_kernel} is of course computing the matrix $A$. We start by considering a generic basis function $b_{j}(x)=R^{y}[K(x,y)](t_{j},\theta_{j})$ (for simplicity of notation we omit index $j$ and so we denote $(t_{j},\theta_{j})=(t,\theta)$). We notice that since the kernel $K$ is of the form $K(x,y)=\Phi(\norm{x-y})$, we can use the shift property of the Radon transform to simplify the computation of $b_{j}$. Indeed, if we set $k(y)=K(0,y)=\Phi(\norm{y})$, then $K(x,y)=\Phi(\norm{x-y})=k(x-y)=k(y-x)$. Hence, by theorem \ref{thm: shiftProp} (shift property), if $g(t,\theta)=R[k(y)](y,\theta)$, we have \begin{equation*} R^{y}[K(x,y)](t,\theta)=R^{y}[k(y-x)](t,\theta)=g(t-x\cdot v,\theta), \end{equation*} where $v=(\cos{\theta},\sin{\theta})$. So, in order to obtain $b_{j}$ we have only to compute $R[k(y)](t,\theta)=R[K(0,y)](t,\theta)$. Notice that this property is independent of the particular kind of kernel (Gaussian, multiquadrics, etc.) used and so is applicable with any kernel function of the form $K(x,y)=\Phi(\norm{x-y})$. \section{Gaussian kernel reconstruction} \label{sec: gaussRec} We start considering the Gaussian kernel \begin{equation*} K(x,y)=e^{-\norm{x-y}^2}. \end{equation*} \begin{align*} R[k(y)](t,\theta)&=\int_{\numberset{R}}{k(t\cos{\theta}-s\sin{\theta},t\sin{\theta}+s\cos{\theta})\,ds}=\\ &=\int_{\numberset{R}}{\exp{(-({\theta}-s\sin{\theta})^{2}-(t\sin{\theta}+s\cos{\theta})^{2})}\,ds}=\\ &=\int_{\numberset{R}}{e^{-(t^{2}+s^{2})}\,ds}=e^{-t^2}\int_{\numberset{R}}{e^{-s^{2}}\,ds}=\\ &=\sqrt{\pi}e^{-t^{2}}=g(t,\theta). \end{align*} Thus \begin{align*} R^{y}[K(x,y)](t,\theta)&=g(t-x\cdot v,\theta)=\sqrt{\pi}e^{-(t-x\cdot v)^{2}} \end{align*} and we conclude that \begin{align*} &\boxed{ b_{j}(x)=\sqrt{\pi}e^{-(t_{j}-x\cdot v_{j})^{2}} } & &\text{where}\ v_{j}=(\cos{\theta_{j},\sin{\theta_{j}}}). \end{align*} We now want to compute $a_{k,j}=R[b_{j}](t_{k},\theta_{k})$. Again for simplicity of notation, we write $(t_{j},\theta_{j})=(t,\theta)$ and $(t_{k},\theta_{k})=(r,\varphi)$, then \begin{align*} R[\sqrt{\pi}e^{-(t-x\cdot v)^{2}}](r,\varphi)&=\sqrt{\pi}\int_{\numberset{R}}\exp\left(-\left[t-(r\cos{\varphi}-s\sin{\varphi})\cos{\theta}\right.\right.&\\ &\qquad\left.\left.-(r\sin{\varphi}+s\cos{\varphi})\sin{\theta}\right]^{2}\right)\,ds=\\ &=\sqrt{\pi}\int_{\numberset{R}}\exp\left(-\left[t-r(\cos{\varphi}\cos{\theta}+\sin{\varphi}\sin{\theta})+\right.\right.\\ &\qquad\left.\left.+s(\sin{\varphi}\cos{\theta}-\cos{\varphi}\sin{\theta})\right]^{2}\right)\,ds=\\ &=\sqrt{\pi}\int_{\numberset{R}}{\exp{(-[t-r\cos{(\varphi-\theta)}+s\sin{(\varphi-\theta)}]^{2})}\,ds}. \end{align*} If we set $a=\sin{(\varphi-\theta)}$ and $b=t-r\cos{(\varphi-\theta)}$, we can write \begin{equation*} a_{k,j}=R[\sqrt{\pi}e^{-(t-x\cdot v)^{2}}](r,\varphi)=\sqrt{\pi}\int_{\numberset{R}}{e^{-(as+b)^{2}}\,ds}. \end{equation*} Hence, if $a\neq0$ we have \begin{equation*} a_{k,j}=\frac{\sqrt{\pi}}{a}\int_{\numberset{R}}{e^{-(as+b)^{2}}\,d(as+b)}=\frac{\sqrt{\pi}}{a}\int_{\numberset{R}}{e^{-u^{2}}\,du}=\frac{\pi}{a}, \end{equation*} while, in the case in which $a=0$, \begin{equation*} a_{k,j}=\sqrt{\pi}\int_{\numberset{R}}{e^{-b^{2}}\,ds}=\infty, \end{equation*} since both $\varphi$ and $\theta$ are in $[0,\pi)$, $a=0$ if and only if $\varphi=\theta$, so we conclude \begin{equation*} \boxed{ a_{k,j}=\left\{ \begin{aligned} &\frac{\pi}{\sin{(\theta_{k}-\theta_{j})}} & &\text{if}\ \theta_{k}\neq\theta_{j}\\ &+\infty & &\text{if}\ \theta_{k}=\theta_{j}. \end{aligned} \right. } \end{equation*} \subsection{Regularization}\label{subsec: regularization} We saw that matrix $A$ can have infinity entries. More precisely $R[b_{j}](t_{k},\theta_{k})=+\infty$ for some values of $j$ and $k$ (those values s.t. $\theta_{k}=\theta_{j}$), that means that for these values $b_{j}(x)$ is not integrable on line $l_{t_{k},\theta_{k}}$. To overcome this obstacle we must find some regularization technique so that the value of the Radon transform of the basis elements $b_{j}$ is finite for all $k,j$. The simplest choice is to consider a truncation of the integral, i.e. computing \begin{equation*} \int_{-\bar{L}}^{\bar{L}}{b_{j}(x(s))\,ds} \qquad \bar{L}\gg0, \end{equation*} instead of the integral on the whole real line. This approach is equivalent to compute $R[b_{j}(x)\chi_{[-L,L]}(\norm{x})](t_{k},\theta_{k})$, where \begin{equation*} \chi_{[-L,L]}(r)=\left\{ \begin{aligned} &1 & &\text{if}\ -L\leq r\leq L\\ &0 & &\text{otherwise} \end{aligned} \right. \end{equation*} is the characteristic function of the set $[-L,L]$ for some $L>0$. Moreover, in general, we can multiply $b_{j}$ for a window function $w$, where $w$ is such that \begin{equation*} \int_{l_{t_{k},\theta_{k}}}{b_{j}w}<\infty \quad \forall\,j,k. \end{equation*} Possible choices of $w$ are: \begin{itemize} \item the characteristic function of a compact set $w(x)=\chi_{[-L,L]}(\norm{x})$; \item the Gaussian function $w(x)=e^{-\varepsilon^{2}\norm{x}^{2}}$; \item the cosine window $w(x)=\cos{\frac{\pi\norm{x}}{2L}}\chi_{[-L,L]}{\norm{x}}$; \end{itemize} This approach can be interpreted also as substituting the operator $R$ with another operator, say $R_{w}$, defined by \begin{equation*} R_{w}[f]=R[fw], \qquad \text{for all} \ f:\numberset{R}^{2}\rightarrow\numberset{R}. \end{equation*} Note that, since $R$ is a linear operator, also $R_{w}$ is so. Indeed, for all $f,g$ functions, for all $\alpha,\beta$ constants \begin{align*} R_{w}[\alpha f+\beta g]&=R[(\alpha f+\beta g)w]=R[\alpha fw+\beta gw]=\\ &=\alpha R[fw]+\beta R[gw]=\\ &=\alpha R_{w}[f]+\beta R_{w}[g]. \end{align*} Then if we approximate $f_{k}=R[f](t_{k},\theta_{k})$ with $f_{w,k}=R_{w}[f](t_{k},\theta_{k})$, we can consider the interpolation problem \begin{equation*} f_{k}\approx f_{w,k}=R_{w}[s](t_{k},\theta_{k})\qquad \forall\,k=1,\ldots,n. \end{equation*} By linearity of $R_{w}$ \begin{equation*} f_{k}\approx \sum_{j=1}^{n}{c_{j}R_{w}[b_{j}](t_{k},\theta_{k})}\qquad \forall\,k=1,\ldots,n, \end{equation*} that leads us to the linear system $A_{w}c=f$, where $(A_{w})_{k,j}=R_{w}[b_{j}](t_{k},\theta_{k})=R[b_{j}w](t_{k},\theta_{k})$. We notice that for all $k$, the difference between $f_{k}$ and $f_{w,k}$ is bounded by \begin{align*} |f_{w,k}-f_{k}|&=|R_{w}[f](t_{k},\theta_{k})-R[f](t_{k},\theta_{k})|=\\ &=\left| \int_{\numberset{R}}{f(x(s))w(x(s))\,ds}-\int_{\numberset{R}}{f(x(s))\,ds}\right|\leq\int_{\numberset{R}}{|f||w-1|\,ds}\leq\\ &\leq\norm{w-1}_{\infty}\norm{f}_{L^{1}(\numberset{R}^{2})} \end{align*} Thus, for $w\rightarrow1$, $|f_{w,k}-f_{k}|\rightarrow0$ but also $R[b_{j}w](t_{k},\theta_{k})\rightarrow R[b_{j}](t_{k},\theta_{k})$ and this quantity can be infinity. For $w\rightarrow0$, $R[b_{j}w](t_{k},\theta_{k})\rightarrow0$ but the difference \begin{equation*} |f_{w,k}-f_{k}|\rightarrow\left|\int{f(x(s))\,ds}\right|\leq\norm{f}_{L^{1}}. \end{equation*} Before starting on computing $A_{w}$ consider the following example. Let $K$ be the inverse multiquadric kernel given by \begin{equation*} K(x,y)=\frac{1}{\sqrt{1+\norm{x-y}^{2}}}. \end{equation*} As before we can just consider $R[k(y)]=R[K(0,y)]$ because of the relation $k(y-x)=K(x,y)$ and the shift property of the Radon transform. What we obtain is \begin{equation*} R[k(y)](t,\theta)=\int_{\numberset{R}}{\frac{1}{\sqrt{1+t^{2}+s^{2}}}\,ds}=+\infty, \end{equation*} that means that in this case, not only $b_{j}(x)$ is not integrable on some line (as in the Gaussian kernel case), but even $K(x,y)$ is not integrable on any line $l_{t,\theta}$. In this case we have to consider a further regularization of the integral. The remedy we adopt is to multiply the function $K$ itself by a window function $w$ such that $R^{y}[K(x,y)w](t,\theta)$ exists finite for all $(t,\theta)$. In choosing the function $w$, we consider that we would like to still use the shift property of the Radon transform, therefore we take $w$ of the form $w=w(x,y)=\tilde{w}(\norm{x-y})$ so that, if we set now $k(y)=K(0,y)w(0,y)$, it is still true that $k(y-x)=K(x,y)w(x,y)$. Moreover we will choose $w$ to be positive definite, in this way, since the product of positive definite function is positive definite, also $K\cdot w$ is so. Notice that this kind of regularization does not correspond, as in the first case, to replace the operator $R$ with another operator. In fact now the function $w$ depends on $x$. What we are doing now is simply substituting the positive definite kernel $K(x,y)$ with another positive definite kernel given by $K(x,y)w(x,y)$ that is integrable on every line in the plane $(y_{1},y_{2})$. \subsection{Regularization by truncation} We first consider the regularization of the Gaussian reconstruction problem using $w_{L}(x)=\chi_{[-L,L]}(\norm{x})$ as window function. With this choice $R[b_{j}](t_{k},\theta_{k})$ is replaced by $R_{L}[b_{j}](t_{k},\theta_{k})=R[b_{j}w_{L}](t_{k},\theta_{k})$, where $b_{j}(x)=R^{y}[K(x,y)](t_{j},\theta_{j})$ and K the Gaussian kernel. In applications is useful to use kernels depending on a shape parameter $\varepsilon$ so that choosing suitable values of $\varepsilon>0$ one can obtain system matrix with a better condition number. We will consider \begin{equation*} K(x,y)=e^{-\varepsilon^2\norm{x-y}^{2}}. \end{equation*} Basis $b_{j}$ then becomes \begin{equation} \boxed{ b_{j}(x)=\frac{\sqrt{\pi}}{\varepsilon}e^{-\varepsilon^{2}(t_{j}-x\cdot v_{j})^{2}}. } \label{eq: basis_gauss} \end{equation} Indeed \begin{align*} R[k(y)]=\int_{\numberset{R}}{e^{-\varepsilon^2(t^{2}+s^{2})}\,ds}=e^{-\varepsilon^{2}t^{2}}\int_{\numberset{R}}{e^{-\varepsilon^{2}s^{2}}\,ds}=\frac{\sqrt{\pi}}{\varepsilon}e^{-\varepsilon^{2}t^{2}}. \end{align*} Components of matrix $A_{L}$ are given by $a_{k,j}=R_{L}[b_{j}](t_{k},\theta_{k})$, so we have \begin{align*} R_{L}[b_{j}](r,\varphi)=\int_{\numberset{R}}{\frac{\sqrt{\pi}}{\varepsilon}e^{-\varepsilon^{2}(t-x(s)\cdot v)^{2}}\chi_{[-L,L]}(\norm{x(s)})\,ds}, \end{align*} where $x(s)=(r\cos{\varphi}-s\sin{\varphi},r\sin{\varphi}+s\cos{\varphi})$ and $v=(\cos{\varphi},\sin{\varphi})$. If we set again \begin{equation} a=\sin{(\varphi-\theta)} \qquad b=t-r\cos{(\varphi-\theta)} \label{eq: def_ab} \end{equation} we have \begin{align*} R_{L}[b_{j}](r,\varphi)&=\frac{\sqrt{\pi}}{\varepsilon}\int_{\numberset{R}}{e^{-\varepsilon^{2}(as+b)^{2}}\chi_{[-L,L](\sqrt{r^{2}+s^{2}})}\,ds}=\\ &=\frac{\sqrt{\pi}}{\varepsilon}\int_{-\sqrt{L^{2}-r^{2}}}^{\sqrt{L^{2}-r^{2}}}{e^{-\varepsilon^{2}(as+b)^{2}}\,ds}, \end{align*} where we are assuming $L\gg0$ so that $|r|<L$. Now we distinguish two cases: \begin{enumerate} \item if $a=0$ then \begin{equation*} R_{L}[b_{j}](r,\varphi)=\frac{\sqrt{\pi}}{\varepsilon}\int_{-\sqrt{L^{2}-r^{2}}}^{\sqrt{L^{2}-r^{2}}}{e^{-\varepsilon^{2}b^{2}}\,ds}=\frac{\sqrt{\pi}}{\varepsilon}e^{-\varepsilon^{2}b^{2}}2\sqrt{L^{2}-r^{2}}; \end{equation*} \item if $a\neq0$ we set $u=\varepsilon(as+b)$ so that \begin{equation*} R_{L}[b_{j}](r,\varphi)=\frac{\sqrt{\pi}}{\varepsilon^{2}a}\int_{c_{1}}^{c_{2}}{e^{-u^{2}}\,du} \end{equation*} where $c_{1}=\varepsilon(-\sqrt{L^{2}-r^{2}}+b)$ and $c_{2}=\varepsilon(\sqrt{L^{2}-r^{2}}+b)$ and $\int_{c_{1}}^{c_{2}}{e^{-u^{2}}\,du}=\frac{\sqrt{\pi}}{2}\text{erf}{(c_{2})}-\text{erf}{(c_{1})}$, where \emph{erf} is the usual error function \begin{equation*} \text{erf}(x)=\frac{2}{\sqrt{\pi}}\int_{0}^{x}{e^{-u^{2}}\,du}. \end{equation*} \end{enumerate} In conclusion we have to solve the linear system $A_{L}c=f$ with components of $A_{L}$ given by \begin{equation*} \boxed{ a_{k,j}=\left\{ \begin{aligned} &\frac{\sqrt{\pi}}{\varepsilon^{2}a}\int_{c1}^{c2}{e^{-u^{2}}\,du} & &\text{if}\ \theta_{k}\neq\theta_{j}\\ &\frac{2\sqrt{\pi}}{\varepsilon}e^{-\varepsilon^{2}b^{2}}\sqrt{L^{2}-t_{k}^{2}} & &\text{if}\ \theta_{k}=\theta_{j} \end{aligned} \right. } \end{equation*} where \begin{align*} &a=\sin{(\theta_{k}-\theta_{j})} & &b=t_{j}-t_{k}\cos{(\theta_{k}-\theta_{j})}\\ &c_{1}=\varepsilon(-\sqrt{L^{2}-t_{k}^{2}}+b) & &c_{2}=\varepsilon(\sqrt{L^{2}-t_{k}^{2}}+b). \end{align*} Solving this system we obtain $c$ and we can then evaluate the solution $s$ by \begin{equation*} s(x)=\sum_{j=1}^{n}{c_{j}b_{j}(x)} \end{equation*} with $b_{j}$ given by \eqref{eq: basis_gauss}. Figure \ref{fig: gauss_trunc} shows the results of applying this method to the crescent-shaped phantom for suitable values of $\varepsilon$ and $L$ in the case of parallel beam geometry, where samples of the Radon transform of $f$ are taken at angles $\theta_{p}=p\pi/N$, $i=1,\ldots,N-1$ and $t_{q}=q/M$, $q=-M,\ldots,M$. We also observe that without using the shape parameter $\varepsilon$, i.e. using $\varepsilon=1$, the matrix $A_{L}$ would have been highly ill-conditioned and the result very different. \begin{figure} \caption{Reconstruction of the crescent-shaped phantom with Gaussian kernel and truncation regularization : (a) Original phantom; (b) $\varepsilon=1$, $L=10$, $k_{1} \label{fig: orig_ph} \label{fig: gauss_trunc1} \label{fig: gauss_trunc2} \label{fig: mq_time3} \label{fig: gauss_trunc} \end{figure} \subsection{Regularization by Gaussian filtering}\label{subsec: reg_gauss} Consider again the Gaussian kernel $K(x,y)=e^{-\varepsilon^{2}\norm{x-y}^{2}}$ and the associated basis of the space $S$, defined in \eqref{eq: basis_gauss}. In this section we will use another window function in order to regularize the integral $R[b_{j}]$, i.e. we will multiply the function $b_{j}$ by another Gaussian function \begin{equation*} w_{\nu}(x)=e^{-\nu^{2}\norm{x}^{2}}. \end{equation*} In other words we will consider the operator $R_{\nu}$ given by $R_{\nu}[f]=R[fw_{\nu}]$ instead of the classical Radon transform. We have \begin{align*} R_{\nu}[b_{j}](r,\varphi)&=\int_{\numberset{R}}{b_{j}(x(s))e^{-\nu^{2}\norm{x(s)}^{2}}\,ds}=\frac{\sqrt{\pi}}{\varepsilon}\int_{\numberset{R}}{e^{-\varepsilon^{2}(as+b)^{2}}e^{-\nu^{2}(r^{2}+s^{2})}\,ds} \end{align*} where $a,b$ are defined by \eqref{eq: def_ab}. Then, \begin{align*} R_{\nu}[b_{j}](r,\varphi)&=\frac{\sqrt{\pi}}{\varepsilon}e^{-\varepsilon^{2}b^{2}-\nu^{2}r^{2}}\int_{\numberset{R}}{\exp{(-[(\varepsilon^{2}a^{2}+\nu^{2})s^{2}+2abs\varepsilon^{2}])}\,ds}=\\ &=\frac{\sqrt{\pi}}{\varepsilon}\exp{\left(-\varepsilon^{2}b^{2}-\nu^{2}r^{2}+\frac{a^{2}b^{2}\varepsilon^{4}}{a^{2}\varepsilon^{2}+\nu^{2}}\right)}\cdot\\ &\qquad\int_{\numberset{R}}{\exp{\left(-\left[\sqrt{\varepsilon^{2}a^{2}+\nu^{2}}s+\frac{ab\varepsilon^{2}}{\sqrt{\varepsilon^{2}a^{2}+\nu^{2}}}\right]^{2}\right)}\,ds}=\\ &=\frac{\pi}{\varepsilon\sqrt{a^{2}\varepsilon^{2}+\nu^{2}}}\exp{\left[-\nu^{2}\left(r^{2}+\frac{\varepsilon^{2}b^{2}}{a^{2}\varepsilon^{2}+\nu^{2}}\right)\right]}. \end{align*} We have now two options: \begin{enumerate} \item The regularization $R_{\nu}[b_{j}](t_{k},\theta_{k})$ for all values of $k,j$, that leads to a linear system with matrix $A_{1}^{\nu}$ whose components are \begin{equation*} \boxed{ a_{k,j}=\frac{\pi\exp{\left[-\nu^{2}\left(r^{2}+\frac{\varepsilon^{2}b^{2}}{a^{2}\varepsilon^{2}+\nu^{2}}\right)\right]}}{\varepsilon\sqrt{a^{2}\varepsilon^{2}+\nu^{2}}}. } \end{equation*} \item The regularization $R_{\nu}[b_{j}](t_{k},\theta_{k})$ only for those values of $k,j$ for which $b_{j}$ has not finite Radon transform, i.e. only when $a=0$, while for $a\neq0$ we consider the usual Radon transform. This corresponds to the matrix $A_{2}^{\nu}$ whose elements are \begin{equation*} a_{k,j}=\left\{ \begin{aligned} &\frac{\pi}{\varepsilon^{2}a} & &\text{if}\ a\neq0\\ &\frac{\pi\exp{[-(\nu^{2}r^{2}+\varepsilon^{2}b^{2})]}}{\varepsilon\nu} & &\text{if}\ a=0. \end{aligned} \right. \end{equation*} \end{enumerate} Numerical experiments (Figures \ref{fig: gauss_gauss_pm} and \ref{fig: gauss_gauss_s}) show that the first option gives better results, provided that the value of $\varepsilon$ is relatively big ($\approx30$) and value of $\nu$ is quite small $(\approx0.5)$ so that the condition number of the matrix $A^{\nu}_{1}$ is small. \begin{figure} \caption{Gaussian reconstruction, parallel beam geometry data} \label{fig: gauss_gauss_1pm} \label{fig: gauss_gauss_2pm} \label{fig: gauss_gauss_pm} \end{figure} \begin{figure} \caption{Gaussian reconstruction, scattered data} \label{fig: gauss_gauss_1s} \label{fig: gauss_gauss_2pm} \label{fig: gauss_gauss_s} \end{figure} \section{Inverse multiquadrics reconstruction}\label{sec: imqRec} We now consider the same reconstruction problem by using the inverse multiquadrics as kernel function: \begin{equation*} K(x,y)=\frac{1}{\sqrt{1+\varepsilon^{2}\norm{x-y}^2}}. \end{equation*} As we saw in section \ref{subsec: regularization} $K$ does not admit finite Radon transform and so we have to multiply $K$ by another window function $w$ of the form $w=w(\norm{x-y})$, so that $R^{y}[K(x,y)w](t,\theta)<\infty$ for all $t,\theta$. The window function we consider is the characteristic function of a compact set: \begin{equation*} w=\chi_{[-L,L]}(\norm{x-y}), \qquad L\gg0. \end{equation*} As in the Gaussian case we use the shift property of the Radon transform and we first compute \begin{align*} R_{L}[k(y)](t,\theta)&=R[K(0,y)\chi_{[-L,L]}(\norm{y})](t,\theta)=\\ &=\int_{\numberset{R}}{\frac{1}{\sqrt{1+\varepsilon^{2}(t^{2}+s^{2})}}\chi_{[-L,L]}(\sqrt{t^{2}+s^{2}})\,ds}=\\ &=\int_{-\sqrt{L^{2}-t^{2}}}^{\sqrt{L^{2}-t^{2}}}{\frac{1}{\sqrt{c^{2}+(\varepsilon s)^{2}}}\,ds} \end{align*} where $c=\sqrt{1+\varepsilon^{2}t^{2}}$ and we assume $|t|<L$. We then apply the substitution $u=\varepsilon s$ in the integral \begin{align}\label{eq: asinh_sqrt} \frac{1}{\varepsilon}\int_{-\varepsilon\sqrt{L^{2}-t^{2}}}^{\varepsilon\sqrt{L^{2}-t^{2}}}{\frac{1}{\sqrt{c^{2}+s^{2}}}\,du}&=\left[\frac{2}{\varepsilon}\text{asinh}\left(\frac{u}{c}\right)\right]_{0}^{\varepsilon\sqrt{L^{2}-t^{2}}}=\frac{2}{\varepsilon}\text{asinh}\left(\varepsilon\sqrt{\frac{L^{2}-t^{2}}{1+\varepsilon^{2}t^{2}}}\right), \end{align} where \begin{equation*} \text{asinh}(x)=\log{(x+\sqrt{1+x^{2}})}, \end{equation*} so we can also write integral \eqref{eq: asinh_sqrt} as \begin{align*} \frac{2}{\varepsilon}\left[\text{asinh}\left(\frac{u}{c}\right)\right]_{0}^{\varepsilon\sqrt{L^{2}-t^{2}}}&=\frac{2}{\varepsilon}\left[\log(u+\sqrt{c^{2}+u^{2}})\right]_{0}^{\varepsilon\sqrt{L^{2}-t^{2}}}=\\ &=\frac{2}{\varepsilon}\left(\log{(\varepsilon\sqrt{L^{2}-t^{2}}+\sqrt{1+\varepsilon^{2}L^{2}})}-\frac{1}{2}\log{(1+\varepsilon^{2}t^{2})} \right). \end{align*} We conclude that the basis associated to the kernel $Kw$ is \begin{equation*} \boxed{ b_{j}(x)=\frac{2}{\varepsilon}\text{asinh}\left(\varepsilon\sqrt{\frac{L^{2}-(t_{j}-x\cdot v_{j})^{2}}{1+\varepsilon^{2}(t_{j}-x\cdot v_{j})^{2}}}\right)\chi_{[-L,L]}(t_{j}-x\cdot v_{j}), } \end{equation*} where, as usual, $v_{j}=(\cos{\theta_{j}},\sin{\theta_{j}})$. We can now compute the matrix $A=(a_{k,j})_{k,j=1}^{n}$: \begin{equation*} a_{k,j}=R[b_{j}(x)](r,\varphi)=\int_{\numberset{R}}{\frac{2}{\varepsilon}\text{asinh}\left(\varepsilon\sqrt{\frac{L^{2}-(as+b)^{2}}{1+\varepsilon^{2}(as+b)^{2}}}\right)\chi_{[-L,L]}(as+b)\,ds}, \end{equation*} where $a,b$ are defined as usual and $r=t_{k}$, $\varphi=\theta_{k}$. We observe that if $a=0$, then \begin{equation*} a_{k,j}=\frac{2}{\varepsilon}\text{asinh}\left(\varepsilon\sqrt{\frac{L^{2}-b^{2}}{1+\varepsilon^{2}b^{2}}}\right)\int_{\numberset{R}}{\chi_{[-L,L]}(b)\,ds}=+\infty \end{equation*} when $|b|<L$, that is our case. So we have to consider a further regularization of $R$. We choose to truncate, i.e. we compute \begin{equation*} a_{k,j}=R_{H}[b_{j}(x)](r,\varphi)=R[b_{j}(x)\chi_{[-H,H]}(\norm{x})],\qquad H>0. \end{equation*} As for the Gaussian case, we have two options: consider the regularization $R_{H}$ for all values of $k$ and $j$ or use it only when $\theta_{k}=\theta_{j}$ i.e. when $a=0$. As before we consider the first option since the resulting matrix has a better condition number. Thus, we obtain \begin{equation*} a_{k,j}=\frac{2}{\varepsilon}\int_{\numberset{R}}{\text{asinh}\left(\varepsilon\sqrt{\frac{L^{2}-(as+b)^{2}}{1+\varepsilon^{2}(as+b)^{2}}}\right)\chi_{[-L,L]}(as+b)\chi_{[-H,H]}(\sqrt{s^{2}+r^{2}})\,ds} \end{equation*} that in the case $a=0$ becomes \begin{equation*} a_{k,j}=\frac{4}{\varepsilon}\text{asinh}\left(\varepsilon\sqrt{\frac{L^{2}-b^{2}}{1+\varepsilon^{2}b^{2}}}\right)\sqrt{H^{2}-r^{2}}, \end{equation*} provided that $|b|<L$ and $|r|<H$. In the case $a\neq0$ we consider the substitution $u=\varepsilon(as+b)$ that leads us to the integral \begin{align} \boxed{ a_{k,j}=\frac{2}{\varepsilon^{2}a}\int_{c_{1}}^{c_{2}}{\text{asinh}\left(\sqrt{\frac{\varepsilon^{2}L^{2}-u^{2}}{1+u^{2}}}\right)\,du} } \label{eq: int_asinh} \end{align} \begin{align*} &c_{1}=\varepsilon\max{(-L,-|a|\sqrt{H^{2}-r^{2}}+b)} & &c_{2}=\varepsilon\min{(L,|a|\sqrt{H^{2}-r^{2}}+b)}. \end{align*} All what we have to do now is to compute integrals \eqref{eq: int_asinh}. The computation of these integrals can be found in appendix A. Applying this method again to the crescent-shaped phantom, choosing $\varepsilon=30$, $H=L=20\max|t_{j}|$ we obtain the reconstruction shown in Figure \ref{fig: imq_trunc}. We observe that we have acceptable reconstruction only with parallel beam geometry data, to obtain good results also with scattered data we should consider another window function instead of the characteristic function. \begin{figure} \caption{Inverse multiquadric reconstruction of the crescent-shaped phantom using $\varepsilon=30$, $H=L=20$.} \label{fig: imq_trunc_pb} \label{fig: imq_trunc_s} \label{fig: imq_trunc} \end{figure} \section{Multiquadrics reconstruction} We now consider the multiquadric kernel \begin{equation*} K(x,y)=\sqrt{1+\rho^{2}\norm{x-y}^2}, \qquad \rho>0. \end{equation*} As in the case of inverse multiquadrics, $K$ is not integrable on any line in the $(y_{1},y_{2})$ plane. The approach we follow to regularize the problem is to consider a Gaussian weighting function for computing both basis functions $b_{j}$ and the matrix $A$. We start with the Gaussian-filtered basis \begin{equation*} b_{j}(x)=R^{y}[K(x,y)e^{-\varepsilon^{2}\norm{x-y}^{2}}](t_{j},\theta_{j}), \quad \varepsilon>0. \end{equation*} We recall that this operation corresponds to use kernel $\tilde{K}(x,y)=K(x,y)e^{-\varepsilon^{2}\norm{x-y}^{2}}$ in place of $K$. Proceeding as usual, we first compute $b_{j}(x)$ then the coefficients $a_{j,k}$. Now, \begin{align*} b_{j}(0)&=\int_{\numberset{R}}{\sqrt{1+\rho^{2}(t^{2}+s^{2})}e^{-\varepsilon^{2}(t^{2}+s^{2})}\,ds}=\\ &=2\rho e^{\frac{\varepsilon^{2}}{\rho^{2}}}\int_{0}^{+\infty}{\sqrt{\frac{1}{\rho^{2}}+t^{2}+s^{2}}\ e^{-\varepsilon^{2}(\frac{1}{\rho^{2}}+t^{2}+s^{2})}\,ds}. \end{align*} Setting $c=\sqrt{\frac{1}{\rho^{2}}+t^{2}}$ and then integrating by parts, we obtain \begin{align*} b_{j}(0)&=2\rho e^{\frac{\varepsilon^{2}}{\rho^{2}}}\left\{c^{2}\left[\frac{1}{4}\sinh{(2s)}+\frac{s}{2}\right]_{s=0}^{+\infty}+\right.\\ &\left. -\int_{0}^{+\infty}{c^{2}\left[\left(\frac{1}{4}\sinh{(2s)}+\frac{s}{2}\right)e^{-\varepsilon^{2}(c^{2}+s^{2})}\right](-2\varepsilon^{2}s)e^{-\varepsilon^{2}(c^{2}+s^{2})}\,ds}\right\}=\\ &=c^{2}\rho e^{\frac{\varepsilon^{2}}{\rho^{2}}}\left[\left(\frac{1}{2}\sinh{(2s)}+s\right) e^{-\varepsilon^{2}(c^{2}+s^{2})}\right]_{s=0}^{+\infty}+\\ &+2c^{2}\rho e^{\frac{\varepsilon^{2}}{\rho^{2}}}\varepsilon^{2}\int_{0}^{+\infty}{\left(\frac{1}{2}\sinh{(2s)}+s\right) se^{-\varepsilon^{2}(c^{2}+s^{2})}\,ds}=\\ &=c^{2}\rho e^{\frac{\varepsilon^{2}}{\rho^{2}}}I_{1}+2c^{2}\rho e^{\frac{\varepsilon^{2}}{\rho^{2}}}\varepsilon^{2}I_{2}, \end{align*} where \begin{align*} I_{1}=\lim_{r\rightarrow+\infty}{\left[\left(\frac{1}{2}\sinh{(2r)}+r\right) e^{-\varepsilon^{2}(c^{2}+r^{2})}\right]}-e^{-\varepsilon^{2}c^{2}}(\frac{1}{2}\sinh{(0)}+0)=0 \end{align*} and \begin{align*} I_{2}&=\frac{1}{2}\int_{0}^{+\infty}{s\sinh{(2s)}e^{-\varepsilon^{2}(c^{2}+s^{2})}\,ds}+\int_{0}^{+\infty}{s^{2}e^{-\varepsilon^{2}(c^{2}+s^{2})}\,ds}=\\ &=\frac{1}{2}e^{-\varepsilon^{2}c^{2}}\frac{\sqrt{\pi}}{2}\frac{e^{\varepsilon^{-2}}}{\varepsilon^{3}}+e^{-\varepsilon^{2}c^{2}}\frac{\sqrt{\pi}}{4}\frac{1}{\varepsilon^{3}}=\frac{\sqrt{\pi}}{4}\frac{e^{-\varepsilon^{2}c^{2}}}{\varepsilon^{3}}e^{\varepsilon^{-2}}. \end{align*} We conclude that \begin{equation*} b_{j}(0)=\frac{\sqrt{\pi}\rho}{2\varepsilon}\left(\frac{1}{\rho^{2}}+t^{2}\right)e^{-\varepsilon^{2}t^{2}+\varepsilon^{-2}} \end{equation*} and so \begin{equation*} \boxed{ b_{j}(x)=\frac{\sqrt{\pi}\rho}{2\varepsilon}\left(\frac{1}{\rho^{2}}+(t_{j}-x\cdot v_{j})^{2}\right)e^{-\varepsilon^{2}(t_{j}-x\cdot v_{j})^{2}+\varepsilon^{-2}}, \quad v_{j}=(\cos{\theta_{j}},\sin{\theta_{j}}). } \end{equation*} In order to compute the matrix $A$, we consider $R[b_{j}(x)](r,\varphi)$ . Setting $x_{s}=(r\cos{\theta}-s\sin{\theta},r\sin{\theta}+s\cos{\theta})$, we have \begin{align*} a_{k,j}&=R[b_{j}(x)](r,\varphi)=\int_{\numberset{R}}{\frac{\sqrt{\pi}\rho}{2\varepsilon}\left(\frac{1}{\rho^{2}}+(t-x_{s}\cdot v)^{2}\right)e^{-\varepsilon^{2}(t-x_{s}\cdot v)^{2}+\varepsilon^{-2}}\,ds}=\\ &=\frac{\sqrt{\pi}\rho}{2\varepsilon}e^{\varepsilon^{-2}}\left[ \int_{\numberset{R}}{\rho^{-2}e^{-\varepsilon^{2}(as+b)^{2}}\,ds}+\int_{\numberset{R}}{(as+b)^{2}e^{-\varepsilon^{2}(as+b)^{2}}\,ds} \right], \end{align*} where $a=\sin{(\varphi-\theta)}$ and $b=t-r\cos{(\varphi-\theta)}$. If $a=0$ this integral is infinite. To avoid this case we consider the regularization $R_{\nu}$ of $R$, that is \begin{align*} R_{\nu}[b_{j}(x)](r,\varphi)&=\frac{\sqrt{\pi}\rho}{2\varepsilon}e^{\varepsilon^{-2}}\int_{\numberset{R}}{\left(\frac{1}{\rho^{2}}+(t-x_{s}\cdot v)^{2}\right)e^{-\varepsilon^{2}(t-x_{s}\cdot v)^{2}}e^{-\nu^{2}\norm{x_{s}}^{2}}\,ds}=\\ &=\frac{\sqrt{\pi}\rho}{2\varepsilon}e^{\varepsilon^{-2}}\int_{\numberset{R}}{\left(\frac{1}{\rho^{2}}+(as+b)^{2}\right)e^{-\varepsilon^{2}(as+b)^{2}-\nu^{2}(r^{2}+s^{2})}\,ds}=\\ &=C_{\varepsilon,\nu}\left[ \int_{\numberset{R}}{\rho^{-2}e^{-(cs+d)^{2}}\,ds}+\int_{\numberset{R}}{(as+b)^{2}e^{-(cs+d)^{2}}\,ds} \right], \end{align*} where \begin{align*} c&=\sqrt{\nu^{2}+\varepsilon^{2}a^{2}}, & d&=\frac{ab\varepsilon^{2}}{c}, & C_{\varepsilon,\nu}&=\frac{\sqrt{\pi}\rho}{2\varepsilon}\exp{(\varepsilon^{-2}+d^{2}-\varepsilon^{2}b^{2}-\nu^{2}r^{2})}. \end{align*} Since \begin{equation*} \int_{\numberset{R}}{(as+b)^{2}e^{-(cs+d)^{2}}\,ds}=\frac{\sqrt{\pi}}{2|c|^{3}}(a^{2}(2d^{2}+1)-4abcd+2b^{2}c^{2}), \end{equation*} we conclude that \begin{equation*} \boxed{ a_{k,j}=\frac{\pi\exp{\left(\frac{1}{\varepsilon^{2}}-\frac{\nu^{2}\varepsilon^{2}b^{2}}{\nu^{2}+\varepsilon^{2}a^{2}}-\nu^{2}r^{2}\right)}}{2\varepsilon\sqrt{\nu^{2}+\varepsilon^{2}a^{2}}}\left[\frac{1}{\rho} +\frac{\rho}{2}\frac{a^{2}(\nu^{2}+\varepsilon^{2}a^{2})+2b^{2}\nu^{4}}{(\nu^{2}+\varepsilon^{2}a^{2})^{2}}\right] } \end{equation*} Figure \ref{fig: mq_gauss} shows the result of using the multiquadric kernel with Gaussian filtering for the reconstruction of the crescent-shaped phantom. \begin{figure} \caption{Multiquadric reconstruction of the crescent-shaped phantom using $\phi=1,\ \varepsilon=30,\ \nu=0.8.$} \label{fig: mq_gauss_pb} \label{fig: mq_gauss_s} \label{fig: mq_gauss} \end{figure} \section{Compactly supported radial basis functions} Another important class of positive definite functions we consider are the compactly supported radial basis functions. If a function as compact support, then it is automatically strictly positive definite and it is strictly positive definite on $\numberset{R}^{d}$ only for a fixed maximum value of the dimension $d$. Moreover one can show that there not exist compactly supported radial functions that are strictly conditionally positive definite of order $m>0$ (see \cite{WU} for more informations about compactly supported radial functions). \subsubsection{Wendland's compactly supported functions} A popular family of compactly supported functions was introduced by Wendland \cite{WEND}. Wendland starts with the truncated power function $\varphi_{l}(r)=(1-r)^{l}_{+}$, which is striclty positive definite and radial on $\numberset{R}^{d}$ for $d\leq2l-1$, and then applies repeatedly the integral operator $\mathcal{I}$ defined as follow: \begin{definition} Let $\varphi:[0,\infty)\rightarrow\numberset{R}$ such that $t\varphi(t)\in L^{1}[0,\infty)$, then we define \begin{equation*} \mathcal{I}\varphi(r)=\int_{r}^{\infty}{t\varphi(t)\,dt}, \quad r\geq0. \end{equation*} \end{definition} We can now define the Wendland's compactly supported functions: \begin{definition} With $\varphi_{l}(r)=(1-r)^{l}_{+}$, we define \begin{equation*} \varphi_{d,k}=\mathcal{I}^{k}\varphi_{\lfloor d/2\rfloor+k+1} \end{equation*} \end{definition} \begin{example}\label{ex: wend} The explicit representation of $\varphi_{d,k}$ for $k=0,1,2,3$ are: \begin{align*} \varphi_{d,0}(r)&=(1-r)^{l}_{+},\\ \varphi_{d,1}(r)&=(1-r)^{l}_{+}[(l+1)r+1],\\ \varphi_{d,2}(r)&=(1-r)^{l}_{+}[(l^2+4l+3)r^2+(3l+6)r+3],\\ \varphi_{d,3}(r)&=(1-r)^{l}_{+}[(l^3+9l^2+23l+15)r^3+(6l^2+36l+45)r^2+(15l+45)r+15], \end{align*} where $l=\lfloor d/2\rfloor +k+1$ and equalities are up to a multiplicative constant. \end{example} We observe that all the functions in Example \ref{ex: wend} are compactly supported and have a polynomial representation on their support. This is true in general as stated in the following \begin{theorem} The functions $\varphi_{d,k}$ are strictly positive definite and radial on $\numberset{R}^{d}$ and are of the form \begin{equation*} \varphi_{d,k}(r)=\left\{ \begin{aligned} &p_{d,k}(r) & &\text{if}\ r\in[0,1]\\ &0 & &\text{if}\ r>1, \end{aligned} \right. \end{equation*} where $p_{d,k}$ is a polynomial of degree $\lfloor d/2\rfloor+3k+1$. Moreover $\varphi_{d,k}\in C^{2k}(\numberset{R}^{d})$ are unique up to a constant factor and the polynomial degree is minimal for given space dimension $d$ and smoothness $2k$. \end{theorem} The proof of this theorem can be found in \cite{WEND}. \begin{figure} \caption{Plot of Wendland's compactly supported functions.} \label{fig: compactlySupported} \end{figure} \subsection{Compactly supported kernel reconstruction}\label{subsec: compSupp} Compactly supported radial basis functions can be used as well in image reconstruction: if $\varphi(r)$ is a compactly supported function, one sets $K(x,y)=\varphi(\varepsilon\norm{x-y}), \ \varepsilon>0$, and uses $K$ as kernel function. The property of compact support can be useful in the computation of the Radon transform, in particular if $\varphi$ is compactly supported and of class at least $C^{0}$ on its support, then the Radon transform $Rf$ is well defined, indeed \begin{align*} |Rf|=&\left|\int_{\numberset{R}}{f(x_{s})\,ds}\right|=\left|\int_{Supp(f)}{f(x_{s})\,ds}\right|\leq\int_{Supp(f)}{|f(x_{s})|\,ds}\leq\\ &\leq\sup_{x\in Supp(f)}{|f(x)|}\cdot m(Supp(f))<\infty. \end{align*} For example Wendland's and Wu's compactly supported functions, having a polynomial representation on their domain, admit finite Radon transform. This means that for this class of functions the basis $b_{j}(x)=R^{y}[K(x,y)](t_{j},\theta_{j})$ is well defined. However the fact that the kernel $K$ is compactly supported does not imply that $b_{j}(x)$ has finite Radon transform and then matrix $A=(a_{j,k})$ can have non finite entries as shown in the following example. \begin{example}\label{ex: radon_wend} Consider the Wendland's function $\varphi_{2,1}(r)=(1-r)_{+}^{4}(4r+1)$ and set \begin{equation*} K(x,y)=\varphi_{2,1}(\varepsilon\norm{x-y})=(1-\varepsilon\norm{x-y})_{+}^{4}(4\varepsilon\norm{x-y}+1) \end{equation*} The support $\{(x,y):\ \varepsilon\norm{x-y}\leq1\}$ of $K$ is compact. We compute $b_{j}(x)$ using the shift property of the Radon transform: \begin{align*} R^{y}[K(0,y)](t,\theta)&=\int_{\numberset{R}}{(1-\varepsilon\sqrt{t^{2}+s^{2}})^{4}_{+}(4\varepsilon\sqrt{t^{2}+s^{2}}+1)\,ds}=\\ &=\int_{\sqrt{t^{2}+s^{2}}\leq\frac{1}{\varepsilon}}{4\varepsilon(1-\varepsilon\sqrt{t^{2}+s^{2}})^{4}\sqrt{t^{2}+s^{2}}\,ds}+\\ &+\int_{\sqrt{t^{2}+s^{2}}\leq\frac{1}{\varepsilon}}{(1-\varepsilon\sqrt{t^{2}+s^{2}})^{4}\,ds}, \end{align*} thus \begin{equation*} R^{y}[K(0,y)](t,\theta)=\left\{ \begin{aligned} &I_{1}+I_{2} & &\text{if}\ |t|\leq\frac{1}{\varepsilon}\\ &0 & &\text{if} \ |t|>\frac{1}{\varepsilon}, \end{aligned} \right. \end{equation*} with \begin{align*} I_{1}&=\int_{|s|\leq\sqrt{\frac{1}{\varepsilon^{2}}-t^{2}}}{4\varepsilon\sqrt{t^{2}+s^{2}}(1-\varepsilon\sqrt{t^{2}+s^{2}})^{4}\,ds}=\\ &=\frac{1}{\varepsilon}\left[p_{1}(\varepsilon t)\text{acosh}\left(\frac{1}{\varepsilon|t|}\right)-p_{2}(\varepsilon t)\sqrt{1-\varepsilon^{2}t^{2}}\right], \end{align*} where \begin{align*} &p_{1}(r)=\frac{r^2}{2}(5r^4+36r^2+8), & &p_{2}(r)=\frac{1}{30}(437r^4+360r^2-8)\\ &\text{acosh}(r)=\log(r+\sqrt{r^2-1}). \end{align*} While the second integral is \begin{align*} I_{2}&=\int_{|s|\leq\sqrt{\frac{1}{\varepsilon^{2}}-t^{2}}}{(1-\varepsilon\sqrt{t^{2}+s^{2}})^{4}\,ds}=\\ &=\frac{1}{\varepsilon}\left[-p_{3}(\varepsilon t)\text{acosh}\left(\frac{1}{\varepsilon|t|}\right)+p_{4}(\varepsilon t)\sqrt{1-\varepsilon^{2}t^{2}}\right], \end{align*} with \begin{align*} &p_{3}(r)=r^2(3r^2+4), & &p_{4}(r)=\frac{1}{15}(16r^4+83r^2+6). \end{align*} Then we obtain \begin{align*} I_{1}+I_{2}&=\frac{1}{\varepsilon}\left[(p_1-p_{3})(\varepsilon t)\text{acosh}\left(\frac{1}{\varepsilon|t|}\right)+(p_{4}-p_{2})(\varepsilon t)\sqrt{1-\varepsilon^{2}t^{2}}\right]=\\ &=\frac{1}{\varepsilon}\left[\frac{5}{2}\varepsilon^{4}t^{4}(\varepsilon^2t^2+6)\text{acosh}\left(\frac{1}{\varepsilon|t|}\right)-\frac{1}{6}(81\varepsilon^4t^4+28\varepsilon^2t^2-4)\sqrt{1-\varepsilon^{2}t^{2}}\right]. \end{align*} We observe that $I_{1}$ and $I_{2}$ are not well defined for $t=0$, in this particular case it is easy to prove that the value of the integral is $\frac{2}{3\varepsilon}$, moreover $\lim_{t\rightarrow0}{(I_{1}+I_{2})}=\frac{2}{3\varepsilon}$ so we can define the continuous function \begin{equation*} g_{\varepsilon}(t)=\left\{ \begin{aligned} &I_{1}+I_{2} & &\text{if} \ \varepsilon|t|\leq1, \ t\neq0\\ &\frac{2}{3\varepsilon} & &\text{if} \ t=0, \end{aligned} \right. \end{equation*} then $b_{j}(x)$ is given by \begin{equation*} b_{j}(x)=\left\{ \begin{aligned} &g_{\varepsilon}(t_j-x\cdot v_j) & &\text{if} \ \varepsilon|t_j-x\cdot v_j|\leq1\\ &0 & &\text{if} \ \varepsilon|t_j-x\cdot v_j|>1. \end{aligned} \right. \end{equation*} For fixed $x$, $R^{y}[K(x,y)](t,\theta)$ as function of $(t,\theta)$ has compact support, but if we consider fixed values $(t_j,\theta_j)$, then $R^{y}[K(x,y)](t,\theta)=b_{j}(x)$ as function of $x$ has $\{x\in\numberset{R}^{2}: \ \varepsilon|t_j-x\cdot v_j|\leq1\}$ as support, that is the strip between lines $\varepsilon(t_j-x\cdot v_j)=\pm1$ that is not limited and thus not compact. If we now compute $a_{k,j}=R[b_{j}(x)](t_{k},\theta_{k})$ we see that this quantity can be infinity: \begin{equation*} a_{k,j}=R[b(x)](r,\varphi)=\int_{\numberset{R}}{b(x_{s})\,ds}, \end{equation*} where $x_{s}=(r\cos{\varphi}-s\sin{\varphi},r\sin{\varphi}+s\cos{\varphi})$. We set as usual $t-x_{s}\cdot v=as+b$, then \begin{equation*} a_{k,j}=\int_{\varepsilon|as+b|\leq1}{g_{\varepsilon}(as+b)\,ds}. \end{equation*} If $a\neq0$ we can set $u=\varepsilon(as+b)$ obtaining \begin{align*} a_{k,j}&=\frac{1}{\varepsilon^{2}a}\int_{|u|\leq1}{\left[\frac{5}{2}u^{4}(u^2+6)\text{acosh}\left(\frac{1}{|u|}\right)-\frac{1}{6}(81u^4+u^2-4)\sqrt{1-u^{2}}\right]\,du}=\\ &=-\frac{9}{112}\frac{\pi}{\varepsilon^2a} \end{align*} But if $a=0$ we have \begin{align*} a_{k,j}&=\frac{1}{\varepsilon^{2}}\left[\frac{5}{2}\varepsilon^{4}b^{4}(\varepsilon^2b^2+6)\text{acosh}\left(\frac{1}{\varepsilon|b|}\right)+\right.\\ &\left.-\frac{1}{6}(81\varepsilon^4b^4+28\varepsilon^2b^2-4)\sqrt{1-\varepsilon^{2}b^{2}}\right]\int_{\varepsilon|b|\leq1}{\,ds} \end{align*} that is 0 if $\varepsilon|b|>1$ but is $\infty$ if $\varepsilon|b|\leq1$. \end{example} In order to have a matrix $A$ with all finite entries, we again consider the regularization $R_{w}$ of $R$ for some weighting function $w$. Working with compactly supported radial basis functions it's natural to use another compactly supported function as weighting function, in this way we are sure that $R_{w}[b_{j}]=R[b_{j}w]$ is finite (indeed $bw$ is compactly supported) and it is possible to compute analytically the value of $a_{k,j}$. We consider the following case: \begin{align*} K(x,y)&=\varphi_{2,0}=(1-\varepsilon\norm{x-y})_{+}^{2}, \quad \varepsilon>0,\\ w(x)&=(1-\nu^{2}\norm{x}^{2})_{+},\quad \nu>0. \end{align*} We start by computing \begin{align*} R[K(0,y)](t,\theta)&=\int_{\numberset{R}}{(1-\varepsilon\sqrt{t^2+s^2})_{+}^{2}\,ds}=\int_{\varepsilon\sqrt{t^2+s^2}\leq1}{(1-\varepsilon\sqrt{t^2+s^2})^2\,ds}=\\ &=\left\{ \begin{aligned} &g(t) & &\text{if}\ |t|\leq\frac{1}{\varepsilon}\\ &0 & &\text{if}\ |t|>\frac{1}{\varepsilon} \end{aligned} \right. \end{align*} where \begin{align*} g(t)&=\int_{|s|\leq\sqrt{\frac{1}{\varepsilon^2}-t^2}}{(1-\varepsilon\sqrt{t^2+s^2})^2\,ds}=\\ &=\left\{ \begin{aligned} &\frac{2}{\varepsilon}\left[\frac{\sqrt{1-\varepsilon^2t^2}}{3}(2\varepsilon^2t^2+1)-\varepsilon^2t^2\text{acosh}\left(\frac{1}{\varepsilon|t|}\right)\right] & &\text{if}\ t\neq0\\ &\frac{2}{3\varepsilon} & &\text{if}\ t=0 \end{aligned} \right. \end{align*} We conclude that \begin{align*} b_{j}(x)=R^{y}[K(x,y)](t_j,\theta_j)=\left\{ \begin{aligned} &g(t_j-x\cdot v_j) & &\text{if}\ |t_j-x\cdot v_j|\leq\frac{1}{\varepsilon}\\ &0 & &\text{if}\ |t_j-x\cdot v_j|>\frac{1}{\varepsilon} \end{aligned} \right. \end{align*} As in Example \ref{ex: radon_wend}, it is possible to show that $a_{k,j}=\infty$ if $a=0$ and $\varepsilon|b|\leq1$, where $a=\sin{(\theta_{k}-\theta_{j})}$ and $b=t_{j}-t_k\cos{(\theta_{k}-\theta_{j})}$, so we introduce the regularization of $R$ by multiplication for the weighting function $w=(1-\nu^{2}\norm{x}^{2})_{+}$ and we have \begin{equation*} a_{k,j}=R_{w}[b_{j}(x)](t_{k},\theta_{k}). \end{equation*} Using simpler notation, \begin{equation*} R[wb(x)](r,\varphi)=\int_{\numberset{R}}{b(x_{s})(1-\nu^{2}\norm{x_{s}}^2)_{+}\,ds}. \end{equation*} The computation of this integral can be found in appendix B. \section{Scaled problem}\label{sec: scaled_problem} The Hermite-Birkhoff reconstruction problem can be expressed as finding a function $s\in S$ satisfying \begin{equation} \lambda_{j}(s)=\lambda_{j}(f) \quad \forall\ j=1,\ldots,n, \label{eq: hb_prob} \end{equation} where $\lambda_{1},\ldots,\lambda_{n}$ are linearly independent linear operators and \begin{equation*} S=\left\{\sum_{j=1}^{n}{c_{j}\lambda_{j}^{y}K(\cdot,y)}:\ c_{j}\in\numberset{R},\ K \text{positive definite kernel}\right\}. \end{equation*} By linearity equation \ref{eq: hb_prob} can be written as a linear system $Ac=f|_{\Lambda}$, where the elements of the matrix $A$ are given by $a_{k,j}=\lambda^{x}_{k}\lambda^{y}_{j}[K(x,y)]$ and $(f|_{\Lambda})_{j}=\lambda_{j}(f)$. Since the matrix $A$ can be highly ill-conditioned, it can be convenient to consider the scaled problem (\cite{ISKE1},\cite{ISKE2}). For $h>0$ the scaled reconstruction problem is \begin{equation*} \lambda_{j}(s^{h}(h\cdot))=\lambda_{j}(f(h\cdot)) \quad \forall\ j=1,\ldots,n, \end{equation*} where \begin{equation*} s^{h}\in S^{h}=\left\{\sum_{j=1}^{n}{c_{j}\lambda_{j}^{y}K(\cdot,hy)}:| c_{j}\in\numberset{R}\right\}. \end{equation*} In this way one obtains the linear system $A^{h}c=f^{h}|_{\Lambda}$ given by \begin{align*} a_{k,j}^{h}&=\lambda_{k}^{x}\lambda_{j}^{y}[K(hx,hy)], & &(f^{h}|_{\Lambda})_{j}=\lambda_{j}(f(h\cdot)). \end{align*} In the case of image reconstruction the operator $\lambda_{j}$ represents the Radon transform evaluated at point $(t_{j},\theta_{j})$. Thus, in order to compute $a_{k,j}^{h}$ and $f_{j}^{h}$ one has to understand which is the relationship between the Radon transform of a function $f$ and the Radon transform of the scaled function $f(h\cdot)$. This relationship is given by the following \begin{theorem}[Dilatation-property of the Radon transform]\label{thm: dilatation_radon} Let $f:\numberset{R}^{2}\rightarrow\numberset{R}$ be such that $R[f(x)](t,\theta)=g(t,\theta)$, then for all $h>0$ \begin{equation*} R[f(hx)](t,\theta)=\frac{1}{h}g(ht,\theta). \end{equation*} \proof \begin{align*} R[f(hx)](t,\theta)&=\int_{\numberset{R}}{f(ht\cos{\theta}-hs\sin{\theta},ht\sin{\theta}+hs\cos{\theta})\,ds}=\\ &=\int_{\numberset{R}}{f(ht\cos{\theta}-r\sin{\theta},ht\sin{\theta}+r\cos{\theta})\,\frac{dr}{h}}=\\ &=\frac{1}{h}R[f(x)](th,\theta). \end{align*} \endproof \end{theorem} Thanks to this property it's possible to compute $a_{k,j}^{h},\ f_{k}^{h}$ and $b_{j}^{h}(x)=\lambda_{j}^{y}K(x,hy)$: \begin{align*} &f_{k}^{h}=\lambda_{k}^{x}(f(hx))=\frac{1}{h}R[f(x)](ht_{k},\theta_{k})\\ &b_{j}^{h}(x)=\frac{1}{h}R^{y}[K(\cdot,y)](ht_{j},\theta_{j})\\ &a_{k,j}^{h}=\lambda_{k}^{x}\lambda_{j}^{y}[K(hx,hy)]=\frac{1}{h}\lambda_{k}^{x}[b_{j}^{h}(hx)]=\frac{1}{h^2}R[b_{j}(x)](ht_{k},\theta_{k}) \end{align*} We note that in order to compute $f_{k}^{h}$ we need to know the Radon transform of the unknown function $f$ at points $(ht_{k},\theta_{k})$ that is not possible if the X-ray machine gives us data only at points $(t_{k},\theta_{k})$. However, for our aim, we assume to know the analytical expression of $R[f]$. The solution of the scaled reconstruction problem is \begin{equation*} s^{h}(hx)=\sum_{j=1}^{n}{c_{j}b_{j}^{h}(hx)}, \end{equation*} with $c=(c_{1},\ldots,c_n)^{T}$ solution of the linear system $A^{h}c=f^{h}$. It is important to notice that thanks to the dilatation-property \ref{thm: dilatation_radon} we don't have to compute the Radon transform for every different value of $h$, but we only need to compute it in the case $h=1$, multiply for $\frac{1}{h}$ and then scaling evaluation points $(t_{k},\theta_{k})$ to $(ht_{k},\theta_{k})$. \chapter{Numerical results} In the previous chapters we saw some theoretical tools that can be used to obtain the value of a function $f:\numberset{R}^{2}\rightarrow\numberset{R}$ starting from a sampling of its Radon transform. In chapter \ref{chap: fourier_methods} we studied the continuous problem and we found an analytical inversion formula for the Radon transform: the back projection formula. Then, in order to use this formula in real applications, we introduced the process of linear filtering and interpolation. In chapter \ref{chap: art} and \ref{chap: kernelMethods} we followed a different approach: starting form the discrete problem for finding an approximation of a function, belonging to a particular finite dimensional space of functions, such that its Radon transform coincides with the measured Radon transform of the unknown function $f$. The Kaczmarz's method consider pixel basis functions to determine an approximation of $f$, while kernel-based methods use positive definite functions to generate a functions space where to find a solution. We also saw that in this second case the problem need some kind of regularization so that the Radon transform of the kernel functions is well defined. What we want to do now is to compare all these methods from a numerical point of view, studying the behavior of the solution and the approximation error in function of the data and the parameters involved in the algorithms. \section{Optimal parameters} In chapter \ref{chap: kernelRec} we introduced a regularization technique for solving the Hermite-Birkhoff interpolation problem of image reconstruction using kernel based methods. In particular, the original problem was to find $s=\sum_{j=1}^{n}{c_{j}\lambda_{j}K(\cdot,y)}$ such that $\lambda_{k}f=\lambda_{k}s$ for all $k=1,\ldots,n$, where $\lambda_{k}g=R[g(x)](t_{k},\theta_{k})$ and $K$ is a positive definite kernel. By linearity this problem is equivalent to solve the linear system \begin{equation*} \lambda_{k}f=\sum_{j=1}^{n}{c_{j}\lambda_{k}^{x}\lambda_{j}^{y}K(x,y)}, \quad k=1,\ldots,n. \end{equation*} The main problem found in applying this method is that the Radon transform $\lambda_{k}^{x}\lambda_{j}^{y}K(x,y)$ or $\lambda_{j}^{y}K(x,y)$ can be infinity. The solution we adopted was to consider kernel functions $K$ such that $b_{j}(x)=\lambda_{j}^{y}K(x,y)$ is well defined (e.g. multiplying any kernel $K(x,y)$ for a suitable function $\phi(\norm{x-y})$) and to substitute operator $R$ with another linear operator so that, when computing matrix $A=(a_{k,j})=(\lambda_{k}b_{j}(x))$, we have $a_{k,j}<\infty$ for all $k,j=1,\ldots,n$. In our discussion we chose operator $R_{w}$ defined by $R_{w}[g]=R[gw]$ where $w$ is an appropriate window function. Both kernel $K$ and window function $w$ depend on one or more parameters; in this section we will discuss, thanks to numerical experiment, how these parameters influence the quality of the reconstructed image. In order to do that we will apply our methods on predefined phantoms and by varying a parameter we will see the behavior of the solution. The error measure we will use to determine the quality of the result is the \emph{root mean square error} \begin{equation*} RMSE=\sqrt{\frac{\sum_{j=1}^{m}{(x_{i}-\hat{x}_{i})^{2}}}{m}}, \end{equation*} where $m$ is the dimension of the image, $x_{i}$ and $\hat{x}_{i}$ the gray scale value of pixel $i$ of the original and reconstructed image respectively. More the $RMSE$ is close to zero, more the solution will be considered accurate. \subsection{Window function parameters} We begin our analysis considering the parameter that influence operator $R_{w}$ and the window functions introduced in sections \ref{sec: imqRec} and \ref{sec: gaussRec}, i.e. $w_{\nu}(x)=\exp{(-\nu^{2}\norm{x}^{2})}$ and $w_{L}=\chi_{[-L,L]}(\norm{x})$. Let us start with the truncated inverse multiquadric kernel \begin{equation*}K(x,y)=(1+\varepsilon^{2}\norm{x-y}^{2})^{-1/2}\chi_{[-L_{1},L_{1}]}(\norm{x-y}) \end{equation*} and the characteristic window function $w_{L_{2}}=\chi_{[-L_{2},L_{2}]}(\norm{x})$, with $L_{1},L_{2}>2\max{|t_{j}|}$ (as we saw in section \ref{sec: imqRec}). Varying the parameter $L_{2}$ for fixed values of $\varepsilon$, $L_{1}$ and data\footnote{in this chapter we will always consider the parallel beam geometry as acquisition method of data.} $\{(t_{j},\theta_{j})\}_{j=1}^{n}$ and applying this reconstruction technique to three different phantoms we can see that the $RMSE$ presents a minimum for a particular value $L_{2opt}$ (Figure \ref{fig: mse_L2_imq56}). This optimal value is influenced by $\varepsilon$, $L_{1}$, $n$ (in particular if the number of data increases, also $L_{2opt}$ increases - cfr. Figures \ref{fig: mse_L2_imq1} and \ref{fig: mse_L2_imq3}). We also notice that the $RMSE$ decrease very rapidly for $L_{2}<L_{2opt}$ but for $L_{2}\geq L_{2opt}$ the $RMSE$ is increasing with a very small rate, so one should choose $L_{2}$ in a way to be sure that $L_{2}\geq L_{2opt}$. \begin{figure} \caption{$RMSE$ of inverse multiquadric reconstruction in function of parameter $L_{2} \label{fig: mse_L2_imq5} \label{fig: mse_L2_imq6} \label{fig: mse_L2_imq56} \end{figure} \begin{figure} \caption{$RMSE$ of inverse multiquadric reconstruction as a function of the parameter $L_{2} \label{fig: mse_L2_imq1} \label{fig: mse_L2_imq3} \label{fig: mse_L2_imq13} \end{figure} Another advantage in choosing $L_{2}$ large is that the condition number of the matrix $A$ is smaller (see Figure \ref{fig: rcond_L2_imq16}). \begin{figure} \caption{Reciprocal of the condition number of $A$ for inverse multiquadric reconstruction as a function of the parameter $L_{2} \label{fig: rcond_L2_imq1} \label{fig: mse_L2_imq6} \label{fig: rcond_L2_imq16} \end{figure} However the value of $L_{2}$ does not determine so drastically the behavior of the solution, whose quality remains acceptable for large values of $L_{2}$. More interesting is the case of the Gaussian window function $w(x)=e^{-\nu^{2}\norm{x}^{2}}$. Also in this case there exists an optimal value $\nu_{opt}$ such that $RMSE$ is minimum. But now for $\nu>\nu_{opt}$ the $RMSE$ increases with a fast rate and so the quality of the reconstruction becomes worse (see for example Figures \ref{fig: mse_nu_gauss3} and \ref{fig: mse_nu_mq2}). The value $\nu_{opt}$ depends on the phantom used, i.e. on data. This is not surprising because we know that the approximation error depends on $|f_{w,k}-f_{k}|$ (see section \ref{subsec: regularization}). On the other hand $\nu_{opt}$ has only small variation w.r.t. the changing of other shape parameters (e.g. is independent on $\varepsilon$ in the case of Gaussian kernel - see Figures \ref{fig: mse_nu_gauss1} and \ref{fig: mse_nu_gauss2}). \begin{figure} \caption{$RMSE$ of Gaussian and multiquadric reconstruction as a function of the parameter $\nu$. (a) Gaussian kernel, bull's eye phantom, $N=50,\ M=40,\ K=64,\ \varepsilon=30$; (b) Gaussian kernel, bull's eye phantom, $N=50,\ M=40,\ K=64,\ \varepsilon=60$; (c) Gaussian kernel, Shepp-Logan phantom, $N=30,\ M=20,\ K=256,\ \varepsilon=50$ (d) Multiquadric kernel, crescent-shaped phantom, $N=30,\ M=20,\ K=64,\ \rho=1,\ \varepsilon=30$.} \label{fig: mse_nu_gauss1} \label{fig: mse_nu_gauss2} \label{fig: mse_nu_gauss3} \label{fig: mse_nu_mq2} \label{fig: mse_nu} \end{figure} The fact that the $RMSE$ is increasing for $\nu>\nu_{opt}$ can be explained considering the condition number $k(A)$ of the system matrix $A$. In fact, $k(A)$ increases with $\nu$ (see Figure \ref{fig: rcond_nu} where the reciprocal of $k(A)$ is plotted in function of $\nu$). The quantity $k(A)=k_{1}(A)$ is the 1-norm condition number of the matrix $A$. Its inverse is estimated using the MATLAB function \texttt{rcond} (see \cite{MATLAB}). \begin{figure} \caption{Reciprocal of the condition number of $A$ for Gaussian kernel reconstruction as a function of the parameter $\nu.$ (a) Parameters and data as in Figure \ref{fig: mse_nu_gauss1} \label{fig: rcond_nu_gauss1} \label{fig: rcond_nu_gauss3} \label{fig: rcond_nu} \end{figure} The case of multiquadrics $K(x,y)=\sqrt{1+\rho^{2}\norm{x-y}}e^{-\varepsilon^{2}\norm{x-y}}$ with Gaussian window function is similar to the Gaussian kernel case, provided that $\rho$ is small enough, as explained in the next paragraph. At last we consider the case of compactly supported kernel. Let $K=(1-\varepsilon\norm{x-y})^{2}_{+}$ and $w(x)=(1-\nu^2\norm{x}^{2})_{+}$. Assume $\varepsilon\approx 1$, then it turns out that the $RMSE$ is minimal for $\nu\approx0$ (Figure \ref{fig: mse_nu_compact}). For small values of $\nu$ also the condition number of $A$ is larger (Figure \ref{fig: rcond_nu_compact}), so it is convenient to use $\nu\approx0$. \begin{figure} \caption{$RMSE$ of compactly supported reconstruction as a function of the parameter $\nu$. $N=30$, $M=20,\ K=64,\ \varepsilon=1.1.$} \label{fig: mse_nu_compact_cre} \label{fig: mse_nu_compact_bull} \label{fig: mse_nu_compact} \end{figure} \begin{figure} \caption{Reciprocal of the condition number of $A$ for compactly supported kernel reconstruction as a function of the parameter $\nu.$} \label{fig: rcond_nu_cre} \label{fig: rcond_nu_bull} \label{fig: rcond_nu_compact} \end{figure} We observe that using small values of $\nu$ corresponds to use a compactly supported window function $w$ with wide support, in this way one loses less information when filters basis $b_{j}$ with $w$. We can use the information that the approximation in optimal for $\nu\approx0$ to simplify the expression of the matrix $A$. Indeed, when $a\neq0$, if $\nu\rightarrow0$, then $a_{k,j}\rightarrow\frac{\pi}{6\varepsilon^2 a}$ and we can use this simpler expression of $a_{k,j}$ instead of \eqref{eq: matrix_cs} (see appendix B). This option is equivalent to consider the regularization $R_{w}$ only when $a=0$, while using the original operator $R$ when $a\neq0$ (that is what we called option 2 in the section \ref{subsec: reg_gauss}). Using this second option the behavior of $RMSE$ becomes more regular (see Figure \ref{fig: mse_nu_compact2}). \begin{figure} \caption{$RMSE$ of compactly supported reconstruction as a function of the parameter $\nu$. Regularization only for $a=0$. Parameters and data as in Figure \ref{fig: mse_nu_compact} \label{fig: mse_nu_compact_cre2} \label{fig: mse_nu_compact_bull2} \label{fig: mse_nu_compact2} \end{figure} Finally we observe that using more data, one should use a bigger value of $\nu$, as shown in Figure \ref{fig: mse_nu_compact_data}. \begin{figure} \caption{$RMSE$ of compactly supported reconstruction as a function of the parameter $\nu$ for the Bull's eye phantom with $N=50$, $M=40,\ K=64,\ \varepsilon=1.1.$} \label{fig: mse_nu_compact_data} \end{figure} \subsection{Kernel shape parameters} We now consider the second main shape parameter involved in kernel methods, i.e. the kernel shape parameter $\varepsilon$. We will assume to work with an optimal window function parameter (as discussed in the previous paragraph). We observe that in the following cases the behavior of the $RMSE$ as a function of $\varepsilon$ is similar to that of the $RMSE$ as a function of $\nu$. \begin{itemize} \item \emph{Inverse multiquadrics}. There is an optimal value $\varepsilon_{opt}$ such that $RMSE$ is minimum, but for $\varepsilon>\varepsilon_{opt}$ the $RMSE$ increase slowly (see Figures \ref{fig: mse_ep_imq1} and \ref{fig: mse_ep_imq2}); \item \emph{Gaussian}. There is an optimal value $\varepsilon_{opt}$ such that $RMSE$ is minimum and for $\varepsilon>\varepsilon_{opt}$ the $RMSE$ increases fast. Moreover $\varepsilon_{opt}$ increases if the the total number of data increases (see Figure \ref{fig: mse_ep_gauss1} and \ref{fig: mse_ep_gauss2}). \end{itemize} \begin{figure} \caption{$RMSE$ of Gaussian and inverse multiquadric reconstruction as a function of the parameter $\varepsilon$. (a) Gaussian kernel, crescent-shaped phantom, $N=30,\ M=20,\ K=64,\ \nu=0.5$; (b) Gaussian kernel, bull's eye phantom, $N=50,\ M=40,\ K=64,\ \nu=0.7$; (c) Inverse multiquadric kernel, crescent-shaped phantom, $N=30,\ M=20,\ K=64,\ L_{1} \label{fig: mse_ep_gauss1} \label{fig: mse_ep_gauss2} \label{fig: mse_ep_imq1} \label{fig: mse_ep_imq2} \label{fig: mse_ep} \end{figure} The difference with the window parameter is that now the condition number $k(A)$ is smaller for big value of $\varepsilon$ (equivalently the reciprocal $k(A)^{-1}$ increases with $\varepsilon$ - Figure \ref{fig: rcond_ep}). \begin{figure} \caption{Reciprocal of the condition number of $A$ for Gaussian and inverse multiquadric kernel reconstruction as a function of the parameter $\varepsilon.$ (a) Parameters and data as in Figure \ref{fig: mse_ep_gauss1} \label{fig: rcond_ep_gauss1} \label{fig: rcond_ep_imq2} \label{fig: rcond_ep} \end{figure} Another parameter involved in the inverse multiquadric reconstruction is $L_{1}$. We observe that under a certain threshold the behavior of the error as a function of $L_{1}$ is chaotic, but over this threshold the $RMSE$ remains almost constant. The reciprocal of the condition number is instead decreasing (Figure \ref{fig: L1_imq}). \begin{figure} \caption{ $RMSE$ and $k^{-1} \label{fig: mse_L1_imq1} \label{fig: rcond_L1_imq1} \label{fig: L1_imq} \end{figure} Consider now the Gaussian-multiquadric kernel \begin{equation*} K(x,y)=\sqrt{1+\rho^{2}\norm{x-y}^{2}}e^{-\varepsilon^{2}\norm{x-y}^{2}} \end{equation*}. In this case we have two different shape parameters $\rho$ and $\varepsilon$. As in the Gaussian case, large values of $\varepsilon$ generate matrix with a better condition number ($\rho$ fixed). For fixed $\varepsilon\gg\rho$ we have $K(x,y)\approx e^{-\varepsilon^{2}\norm{x-y}^{2}}$ a kernel similar to the Gaussian case, but values $\rho\approx 0$ give a bigger conditioning. \begin{figure} \caption{$RMSE$ and $k^{-1} \label{fig: mse_rho_mq2} \label{fig: rcond_rho_mq2} \label{fig: rho_mq} \end{figure} If $\rho$ is of the same order of $\varepsilon$ then $k(A)$ is small but $MSE$ becomes larger. So we have again a situation with an optimal value for $\rho$ and $\varepsilon$ that arises from the trade-off to have a well conditioned matrix and a good approximation of the non-regularized reconstruction problem (trade-off principle \cite{SCHAB}). Moreover, as well as $\varepsilon$ also the optimal value of $\rho$ depends on the number of data and on the phantom. For example, using the crescent-shaped phantom, the value of $\rho_{opt}$ varies from $\approx3$ (for $N=20,\ M=15$) to $\approx7$ (for $N=50,\ M=40$); while considering the Shepp-Logan filter we obtain $\rho_{opt}<0.5$ (for $N=20,\ M=15$) and $\rho_{opt}<2$ (for $N=50,\ M=40$) (Figure \ref{fig: rho_mq_shepp}). \begin{figure} \caption{$RMSE$ for multiquadric reconstruction of the Shepp-Logan in function of $\rho$ with $K=256,\ \nu=1.3.$} \label{fig: mse_rho_sl1} \label{fig: mse_rho_sl2} \label{fig: rho_mq_shepp} \end{figure} At last we consider again the compactly supported kernel. Varying $\varepsilon$ we see that the $RMSE$ presents a minimum for an optimal value $\varepsilon_{opt}$ that is between 1 and 2 for both the crescent-shaped and the bull's eye phantoms (Figures \ref{fig: mse_ep_compact_c} and \ref{fig: mse_ep_compact_b}). Considering instead the Shepp-Logan phantom the $RMSE$ decreases if $\varepsilon$ increases (Figure \ref{fig: mse_ep_compact_sl}). However, in all the cases, the $RMSE$ remains almost constant for $\varepsilon$ large enough, while the reciprocal of the condition number increases with $\varepsilon$ (Figure \ref{fig: rcond_ep_compact}). \begin{figure} \caption{$RMSE$ of compactly supported reconstruction as a function of the parameter $\varepsilon$. $N=30$, $M=20,\ K=64,\ \nu=10^{-6 } \label{fig: mse_ep_compact_c} \label{fig: mse_ep_compact_b} \label{fig: mse_ep_compact_sl} \label{fig: mse_ep_compact} \end{figure} \begin{figure} \caption{Reciprocal of the condition number of the matrix $A$ of compactly supported reconstruction as a function of the parameter $\varepsilon$ with $N=30$, $M=20,\ K=64,\ \varepsilon=1.1.$} \label{fig: rcond_ep_compact} \end{figure} \subsection{Scale parameter} Considering the scaled reconstruction problem of section \ref{sec: scaled_problem}, one has also to consider the behavior of the solution depending on the scale parameter $h$. Numerical experiments show that there is an optimal value of $h$ such that the $RMSE$ is minimum and $k^{-1}(A)$ is maximum (Figure \ref{fig: mse_h_gauss} and \ref{fig: rcond_h_gauss}). It turns out that, in most of the cases we examined, the optimal value of $h$ is $h\approx1$. Thus, in our discussion we will consider $h=1$. \begin{figure} \caption{Gaussian kernel reconstruction of the crescent-shaped phantom in function of $h$. Here $\varepsilon=50,\ \nu=0.5, \ N=30,\ M=20,\ K=64.$} \label{fig: mse_h_gauss} \label{fig: rcond_h_gauss} \label{fig: h_gauss} \end{figure} \section{Comparison of the methods} In this section we compare the classical Fourier methods, introduced in chapter \ref{chap: fourier_methods}, with the kernel-based methods of chapter \ref{chap: kernelRec}. In this second case we assume the use of optimal shape parameters. We compare the solutions of different algorithms varying the phantom and also testing their behavior when introducing some noise in the data. Again we use the $RMSE$ to measure how much the solutions differ from the original phantom. We start considering the behavior of the $RMSE$ in function of the number of the data $n$ (where again data are supposed to be taken using a parallel beam geometry). As one would aspect, with both kernel and Fourier-based methods, the $RMSE$ decreases when $n$ increases. In particular is interesting to notice that in the Fourier reconstruction, the $RMSE$ decreases with an exponentially rate and so, for large $n$, there is no big improvement of the solution. For example in the case of the Shepp-Logan phantom, for $n>18090$ there is a variation of the $RMSE$ lower than $5.67\cdot 10^{-3}$ (Figure \ref{fig: fou_rec1}). \begin{figure} \caption{Reconstruction of the Shepp-Logan filter using the back projection formula with Shepp-Logan filter and linear interpolation. (a) $RMSE$ as a function of the number of data (without noise); (b) $RMSE$ as a function of the number of data (with Gaussian noise with mean $\mu=0.01$ and variance $\sigma=0.01$); (c) Reconstruction without noise with $N=180,\ M=200$; (d) Reconstruction with Gaussian noise with $N=180,\ M=200$.} \label{fig: fou_rec1} \label{fig: fou_noise1} \label{fig: fou_shepp} \label{fig: fou_shepp_noise} \label{fig: fou_rec_time} \end{figure} In the case of kernel reconstruction the use of big amount of data should consider CPU limits\footnote{All the computation shown in this chapter are made using a computer with a CPU core i5, 2,53 GHz and a RAM of 4 GB.} . Indeed the matrix $A$ used to compute the coefficient $c$ of the solution belongs to the space $Mat(n,n)$, moreover, to evaluate the solution on a grid of $K\times K$ pixels, we must multiply $c$ for a matrix $B\in Mat(K^{2},n)$ representing the the basis functions of the space our solution belongs to. Thus, for example, with $K=256,\ N=50,\ M=40$, one obtains two (non-sparse) matrix one with $(N*(2M+1))^{2}=16.402.500$ elements and the other with $K^{2}\cdot(N*(2M+1))=265.420.800$ elements. Furthermore we notice that if $n$ increases, also $k(A)$ increases (see Figure \ref{fig: mq_rcond1}). Considering a problem with reasonable values of $n$ and $K$ (e.g. $n<130,\ K<256$), we observe that the $RMSE$ of the kernel methods behaves in the same way as the Fourier based methods and has a comparable magnitude (Figure \ref{fig: fou_mq_rec1}). In particular, Figure \ref{fig: cshape_fou_gau_rec} shows how the Gaussian kernel reconstruction applied to the crescent-shaped phantom gives better result w.r.t. to the back projection formula. \begin{figure} \caption{Comparison of Fourier and Gaussian kernel reconstruction methods of the crescent-shaped phantom: (a) Root mean square error; (b) Fourier method $N=50,\ M=40;$ (c) Gaussian kernel $N=50,\ M=40$.} \label{fig: fou_gau_rec1} \label{fig: fou_cshape1} \label{fig: gau_cshape} \label{fig: cshape_fou_gau_rec} \end{figure} Thus we conclude that kernel based methods can be useful in the context of limited number of available data. \begin{figure} \caption{Comparison of the Fourier and multiquadric kernel reconstruction of the Shepp-Logan phantom: (a) Root mean square error; (b) Reciprocal of the condition number of matrix $A$ of the kernel method; (c) Elapsed time for Fourier based method; (d) Elapsed time for multiquadric reconstruction method.} \label{fig: fou_mq_rec1} \label{fig: mq_rcond1} \label{fig: fou_time2} \label{fig: mq_time1} \label{fig: fou_mq} \end{figure} Comparing the reconstruction of a phantom using different kernels, we see that the Gaussian-multiquadric kernel and the Gaussian kernel give similar results while the truncated-multiquadric kernel and the compactly supported kernel are less accurate (Figure \ref{fig: bull_ker_rec}). \begin{figure} \caption{Comparison of kernel reconstruction methods of the bull's eye phantom using different kernel functions: (a) Root mean square error; (b) Reconstruction with inverse multiquadrics kernel $N=50,\ M=35;$ (c) Reconstruction with Gaussian kernel $N=50,\ M=40$.} \label{fig: g_mq__imq_c_rec1} \label{fig: imq_bull1} \label{fig: gau_bull1} \label{fig: bull_ker_rec} \end{figure} The biggest problem in the kernel methods, a part from the memory limitation, is the computational time. Referring to Figure \ref{fig: fou_time2} and \ref{fig: mq_time1}, we can see how the elapsed time (in sec) during the execution of the algorithm grows exponentially with the number of data, while using Fourier techniques the time depend linearly on the dimension of the problem. We belive that this is due to the implementation in MATLAB of Fourier transform by the FFTW algorithm \cite{FFTW}. Finally we test our methods after the introduction of noise in the data. We first notice that for large $n$ the $RMSE$ of the Fourier methods increase (Figure \ref{fig: fou_noise1}). Considering a kernel based method we observe (in the range of acceptable $n$) a behavior similar to the Fourier case, where the $RMSEs$ computed with different methods have the same order of magnitude (Figure \ref{fig: cfr_noise} and \ref{fig: cfr_cshape_noise}). \begin{figure} \caption{Comparison of the Fourier and Gaussian kernel reconstruction of the crescent-shaped phantom with no noise data and Gaussian noise data with 0.001 mean and 0.001 variance: (a) Fourier method; (b) Gaussian kernel method.} \label{fig: fou_cfr_noise1} \label{fig: gau_cfr_noise1} \label{fig: cfr_noise} \end{figure} \begin{figure} \caption{Comparison of the Fourier and Gaussian kernel reconstruction of the crescent-shaped phantom with Gaussian noise data (mean $\mu= 0.001$ and variance $\sigma=0.001$: (a) Fourier method; (b) Gaussian kernel method.} \label{fig: fou_cshape_noise} \label{fig: gau_cshape_noise} \label{fig: cfr_cshape_noise} \end{figure} \section{Graphical user interface} In order to test the various methods on different phantoms we developed a graphical user interface (GUI) allowing the user to choose the options of the reconstruction and the parameters using the mouse and the keyboard and to access to all output information of the method. The GUI has been realized in MATLAB (version 7.7.0 R2008b). Figure \ref{fig: gui} shows the main window of the GUI when it is started. Referring always to Figure \ref{fig: gui} we can see it presents two windows where the original phantom and the reconstructed image of the phantom will be displayed. \begin{figure} \caption{Graphical user interface} \label{fig: gui} \end{figure} On the bottom of the window there is a panel that, thanks to pop-up menus, allows to choose the phantom and the reconstruction algorithm. There are three available phantoms: the crescent-shaped phantom (introduced in section \ref{subsec: crescent_shape}), the bull's eye phantom (Figure \ref{fig: bull_eye}) and the Shepp-Logan phantom (Figure \ref{fig: shepp_phantom} in section \ref{sec: phantoms}). \begin{figure} \caption{Bull's eye phantom} \label{fig: bull_eye} \end{figure} Clicking on the options button a second window will be opened (figura \ref{fig: option_figure}). \begin{figure} \caption{Options window} \label{fig: option_figure} \end{figure} Thanks to this options window it is possible to modify the number of sampled angle $N$, the number of samples on the $t$ axis ($2M+1$) and the dimension of the output image ($K^{2}$). Moreover, depending on the selected algorithm, it is possible to change the predefined parameters used in the methods. For example, when using the back projection formula, one can choose both interpolation technique (nearest neighbor, linear, cubic) and the low pass filter (Ram-Lak, Shepp-Logan, cosine filter). Finally one can add a certain amount of noise to the Radon data to test the robustness of a method under the action of noise. Possible choices of noise are the Gaussian noise (with mean and variance decided by the user), Poisson noise and shot (or "salt and pepper") noise. Figure \ref{fig: gui_shepp_noise} shows the result of applying the back projection formula to the Shepp-Logan phantom adding Gaussian noise to data. \begin{figure} \caption{Shepp-Logan phantom reconstruction with back-projection formula and Gaussian noise-data (mean $\mu=0$, variance $\sigma=0.001$).} \label{fig: gui_shepp_noise} \end{figure} It is also always possible to decide if to plot in a new figure the sinogram of the phantom, i.e. the sampled Radon transform used for the reconstruction. Figure \ref{fig: sinogram_kaczmarz} shows the case of Kaczmarz's method. \begin{figure} \caption{Sinogram and sparsisty of the matrix system of the Kaczmarz method.} \label{fig: sinogram_kaczmarz} \end{figure} In addition, depending on the algorithm a further plot will be displayed: \begin{itemize} \item Using the back projection formula, the interpolation of the convolution between the low pass filter and the Radon transform is shown; \item With the Kaczmarz's method, the sparsity of the system matrix (figura \ref{fig: sinogram_kaczmarz}); \item In the case of kernel methods, the image of the sysem matrix. \end{itemize} The reconstruction begins pushing the start button. Figures \ref{fig: gui_cshape_mq} and \ref{fig: gui_bulleye_art} show two different applications of the GUI. \begin{figure} \caption{Example of reconstruction using the graphical user interface. (a) Multiquadric reconstruction of the crescent-shaped phantom; (b) Kaczmarz's reconstruction of the bull's eye phantom.} \label{fig: gui_cshape_mq} \label{fig: gui_bulleye_art} \label{fig: gui_examples} \end{figure} During the computation of the solution, in the MATLAB command window, it is displayed the status of the process, e.g. for the Kaczmarz's method \begin{verbatim} Start reconstruction.. Radon transform computed.. computing ART system.. applying Kaczmarz's method.. residual: 0.5895 done. \end{verbatim} Where residual indicates the norm of the difference $Ax_{k}-b$ of the solution at the final iteration $k$. Closing the GUI window or saving the workspace from the menu, one can find all information about the solution and the algorithm used in the output structure \texttt{out}. This structure contains the following fields: \begin{itemize} \item \texttt{radon}: sampled Radon transform of the phantom ($N\times (2M+1)$ matrix); \item \texttt{reconstruction}: Reconstructed image of the phantom ($K\times K$ matrix ); \item \texttt{phantom}: name of the used phantom (string); \item \texttt{algorithm}: name of the used algorithm (string); \item \texttt{options}: options used in the reconstruction (structure depending on the algorithm). \end{itemize} The \texttt{options} structure contains information about the sampling ($N,M,K$), on the noise (type of noise and mean and variance in the Gaussian case) and a logical value that determines if the sinogram is plotted or not. Additional parameters depend on the algorithm: \begin{itemize} \item Back projection formula: interpolation technique and the low pass filter; \item Kaczmarz's method: relaxed parameter $\lambda$ and a maximum number of iterations and a tolerance that decide when to stop the reconstruction process; \item Kernel methods: the name of the kernel used (Gaussian, multiquadric, inverse multiquadric), the shape parameter of the kernel and which the window function has been used. \end{itemize} The graphical user interface is designed in a way that is easy to extend its functionalities for example adding new phantoms or new reconstruction methods. Another improvement that can be added is the possibility to directly compare the results of a reconstruction using two different methods or different parameters. \backmatter \chapter{Conclusions} In this thesis we studied the problem of clinical image reconstruction. The problem was faced both from an analytical point of view, considering the mathematical aspect of the problem and the classical methods used to solve it, and also with a numerical approach, implementing new algorithms to solve it and comparing the behavior of the different methods. In the first part we focused on classical Fourier based methods. These methods are founded on the back projection formula and its discretization . We saw that in a discrete context it is possible to obtain only an approximated solution because of the presence of noise and the constrain to have only a finite amount of data. In the second part we introduced a different approach for solving the image reconstruction problem, called ART. With this kind of methods the solution is obtained solving a linear system. In particular positive definite kernel can be used to this aim, provided a regularization of the Radon transform functional. The regularization we used is to multiply the kernel function by a window function so that the Radon transform of the product function is finite. Then, we realized these algorithms using particular kernel and window functions and studied their behavior in function of shape parameters. In the last part of the thesis we compared kernel based with Fourier based methods. We saw that the quality of the reconstruction of the two methods is similar, also in the case of noisy data. The main limits of kernel based methods are the computational time, that grows exponentially with the size of the problem, and a bound for the number of data usable, indeed the linear system involved in the problem can become huge. Possible improvement and further works consist in using other kind of kernels and window functions. In this case the main difficulties can be finding the analytical expression of the Radon transform, or try other regularization techniques for the Radon transform integral. Implementing faster algorithms for solving the linear system, for example generating structured or sparse matrix, can be also another improvement. Moreover, an accurate study of the approximation error can give useful informations, e.g. in the determination of optimal shape parameters. Finally, one can introduce a polynomial term in the expression of the solution and then use conditionally positive definite kernels. The big number of applications and the vastness of possibilities that can be followed in using the kernel based approach show why this research field has become so important in the last years. \appendix \chapter{Appendix A: Inverse multiquadrics kernel matrix}\label{app: invMul} We compute the elements of the matrix $A$ of the inverse multiquadrics reconstruction problem (section \ref{sec: imqRec}). We recall that in that case \begin{align*} a_{k,j}=\frac{2}{\varepsilon^{2}a}\int_{c_{1}}^{c_{2}}{\text{asinh}\left(\sqrt{\frac{\varepsilon^{2}L^{2}-u^{2}}{1+u^{2}}}\right)\,du}, \end{align*} with \begin{align*} &c_{1}=\varepsilon\max{(-L,-|a|\sqrt{H^{2}-r^{2}}+b)} & &c_{2}=\varepsilon\min{(L,|a|\sqrt{H^{2}-r^{2}}+b)}. \end{align*} Hence, all we have to do is the compute \begin{equation*} I=\int{\text{asinh}\left( \sqrt{\frac{M^{2}-u^{2}}{1+u^{2}} }\right)\,du}, \qquad M>0. \end{equation*} Using the logarithmic representation of asinh we can write \begin{align*} I=&\int{\log{\left( \sqrt{M^{2}-u^{2}}+\sqrt{1+M^{2}}\right)}\,du}-\frac{1}{2}\int{\log{(1+u^{2})}\,du}=\\ &=I_{1}-\left(\text{atan}{u}+\frac{u}{2}\log{(1+u^{2})}-u\right). \end{align*} Integrating $I_{1}$ by parts \begin{align*} I_{1}&=u\log{\left( \sqrt{M^{2}-u^{2}}+\sqrt{1+M^{2}}\right)}+\\ &\qquad+\int{\frac{u^{2}}{(M^{2}-u^{2})-\sqrt{M^{2}-u^{2}}\sqrt{M^{2}+1}}\,du}=\\ &=u\log{\left( \sqrt{M^{2}-u^{2}}+\sqrt{1+M^{2}}\right)}+I_{2}. \end{align*} Adding and subtracting $M^{2}$ in the numerator, we get \begin{align*} I_{2}&=-\int{\frac{M^{2}-u^{2}}{(M^{2}-u^{2})-\sqrt{M^{2}-u^{2}}\sqrt{M^{2}+1}}\,du}+\\ &+M^{2}\int{\frac{1}{(M^{2}-u^{2})-\sqrt{M^{2}-u^{2}}\sqrt{M^{2}+1}}\,du}=\\ &=I_{3}+M^{2}I_{4}. \end{align*} Setting $\alpha=\text{acos}{\frac{u}{M}}$ and $c=\sqrt{1+\frac{1}{M^{2}}}$ in $I_{3}$, we have \begin{align*} I_{3}&=-\int{\frac{\sqrt{M^{2}-u^{2}}}{\sqrt{M^{2}-u^{2}}+\sqrt{M^{2}+1}}\,du}=M\int{\frac{\sin^{2}{\alpha}}{\sin{\alpha}+c}\,d\alpha}=\\ &=M\int{\frac{\sin^{2}{\alpha}-c^{2}}{\sin{\alpha}+c}\,d\alpha}+Mc^{2}\int{\frac{1}{\sin{\alpha}+c}\,d\alpha}=\\ &=M(-\cos{\alpha}-c\alpha)+Mc^{2}\frac{2\text{atan}\left( \frac{\cos{\alpha}}{\sin{\alpha}+(\sqrt{c-1}+\sqrt{c+1})^{2}} \right) +\alpha}{\sqrt{c-1}\sqrt{c+1}}=\\ &=-u-\left(\sqrt{M^{2}+1}\right)\text{acos}\left( \frac{u}{M}\right)+\\ &\qquad+(M^{2}+1)\left[2\text{atan}\left( \frac{u}{\sqrt{M^{2}-u^{2}}+\sqrt{M^{2}+1}+1}\right)+ \text{acos}\left( \frac{u}{M}\right)\right]. \end{align*} Finally $I_{4}$: \begin{align*} I_{4}=\int{\frac{1}{(M^{2}-u^{2})-\sqrt{M^{2}-u^{2}}\sqrt{M^{2}+1}}\,du}=\text{atan}\left(u\sqrt{\frac{M^{2}+1}{M^{2}-u^{2}}} \right)-\text{atan}u \end{align*} Putting together the results, since $I=I_{1}-I_{3}-M^{2}I_{4}$, we obtain \begin{align*} &\int{\text{asinh}\left( \sqrt{\frac{M^{2}-u^{2}}{1+u^{2}} }\right)\,du}=\frac{u}{2}\text{asinh}\left( \sqrt{\frac{M^{2}-u^{2}}{1+u^{2}} }\right)-(1+M^{2})\text{atan}u+\\ &\qquad+\sqrt{M^{2}+1}\left(\sqrt{M^{2}+1}-1\right)\text{acos}\left( \frac{u}{M}\right)+M^{2}\text{atan}\left(u\sqrt{\frac{M^{2}+1}{M^{2}-u^{2}}} \right)+\\ &\qquad+2(M^{2}+1)\text{atan}\left( \frac{u}{\sqrt{M^{2}-u^{2}}+\sqrt{M^{2}+1}+1}\right). \end{align*} Where, of course, this formula is valid for $|u|<M$. \chapter{Appendix B: Compactly supported kernel matrix}\label{app: compSupp} We compute the elements of the matrix $A$ of the compactly supported function reconstruction problem (section \ref{subsec: compSupp}). We recall that in that case \begin{equation*} a_{k,j}=\int_{\numberset{R}}{b(x_{s})(1-\nu^{2}\norm{x_{s}})_{+}\,ds}, \end{equation*} where \begin{align*} b(x)=\left\{ \begin{aligned} &g(t_j-x\cdot v_j) & &\text{if}\ |t_j-x\cdot v_j|\leq\frac{1}{\varepsilon}\\ &0 & &\text{if}\ |t_j-x\cdot v_j|>\frac{1}{\varepsilon} \end{aligned} \right. \end{align*} and \begin{align*} g(t)&=\left\{ \begin{aligned} &\frac{2}{\varepsilon}\left[\frac{\sqrt{1-\varepsilon^2t^2}}{3}(2\varepsilon^2t^2+1)-\varepsilon^2t^2\text{acosh}\left(\frac{1}{\varepsilon|t|}\right)\right] & &\text{if}\ t\neq0\\ &\frac{2}{3\varepsilon} & &\text{if}\ t=0. \end{aligned} \right. \end{align*} Since $\norm{x_{s}}^{2}=r^2+s^2$, one obtains \begin{align*} a_{kj}&=(1-\nu^{2}r^2)\int_{\nu\sqrt{r^2+s^2}\leq1}{b(x_{s})\,ds}-\nu^2\int_{\nu\sqrt{r^2+s^2}\leq1}{b(x_{s})s^2\,ds}=\\ &=\left\{ \begin{aligned} &(1-\nu^{2}r^2)I_{1}-\nu^2I_{2} & &\text{if}\ |r|\leq\frac{1}{\nu}\\ &0 & &\text{if}\ |r|>\frac{1}{\nu} \end{aligned} \right. \end{align*} Setting $t-x_{s}\cdot v=as+b$ and $D_{s}=\left\{s:\ |s|\leq\sqrt{\frac{1}{\nu^2}-r^2},\ \varepsilon|as+b|\leq1\right\}$, \begin{align*} I_{1}&=\frac{2}{\varepsilon}\int_{D_{s}}{\left[\frac{\sqrt{1-\varepsilon^2(as+b)^2}}{3}(2\varepsilon^2(as+b)^2+1)\right]\,ds}+\\ &-\frac{2}{\varepsilon}\int_{D_{s}}{\varepsilon^2(as+b)^2\text{acosh}\left(\frac{1}{\varepsilon|as+b|}\right)\,ds}\\ I_{2}&=\frac{2}{\varepsilon}\int_{D_{s}}{\left[\frac{\sqrt{1-\varepsilon^2(as+b)^2}}{3}(2\varepsilon^2(as+b)^2+1)\right]s^2\,ds}+\\ &-\frac{2}{\varepsilon}\int_{D_{s}}{\varepsilon^2(as+b)^2\text{acosh}\left(\frac{1}{\varepsilon|as+b|}\right)s^2\,ds}. \end{align*} We distinguish the cases $a=0$ and $a\neq0$: \begin{itemize} \item If $a=0$ \begin{equation*} I_{1}=\left\{ \begin{aligned} &\frac{4}{\varepsilon}\sqrt{\frac{1}{\nu^2}-r^2}\left[\frac{\sqrt{1-\varepsilon^2b^2}(2\varepsilon^2b^2+1)}{3}-2\varepsilon^2b^2\text{acosh}\left(\frac{1}{\varepsilon|b|}\right)\right] & &\text{if} \ |b|\leq\frac{1}{\varepsilon}\\ &0 & &\text{if} \ |b|>\frac{1}{\varepsilon} \end{aligned} \right. \end{equation*} \begin{equation*} I_{2}=\left\{ \begin{aligned} &\frac{4}{3\varepsilon}\left(\frac{1}{\nu^2}-r^2\right)^{\frac{3}{2}}\left[\frac{\sqrt{1-\varepsilon^2b^2}(2\varepsilon^2b^2+1)}{3}-2\varepsilon^2b^2\text{acosh}\left(\frac{1}{\varepsilon|b|}\right)\right] & &\text{if} \ |b|\leq\frac{1}{\varepsilon}\\ &0 & &\text{if} \ |b|>\frac{1}{\varepsilon} \end{aligned} \right. \end{equation*} thus we conclude that \begin{itemize} \item if $\nu|r|\leq1$, $\varepsilon|b|\leq1$ and $b\neq0$ \begin{equation*} a_{kj}=\frac{8}{3}\frac{(1-\nu^2r^2)^{3/2}}{\varepsilon\nu}\left[\frac{\sqrt{1-\varepsilon^2b^2}(2\varepsilon^2b^2+1)}{3}-2\varepsilon^2b^2\text{acosh}\left(\frac{1}{\varepsilon|b|}\right)\right]; \end{equation*} \item if $\nu|r|\leq1$ and $b=0$ \begin{equation*} a_{kj}=\frac{8}{9\varepsilon}\left(\frac{1}{\nu^2}-r^2\right)^{\frac{2}{3}} \end{equation*} \item if $\nu|r|>1$, $\varepsilon|b|>1$, then $a_{k,j}=0$ \end{itemize} \item If $a\neq0$ \begin{align*} I_1=\int_{c_{1}}^{c_2}{\frac{2}{\varepsilon}\left[\frac{\sqrt{1-u^2}}{3}(2u^2+1)\right]\,\frac{du}{\varepsilon a}}-\int_{c_1}^{c_2}{\frac{2}{\varepsilon}u^2\text{acosh}\left(\frac{1}{|u|}\right)\,\frac{du}{\varepsilon a}}, \end{align*} where $u=\varepsilon(as+b)$ and \begin{align*} &c_1=\max{\left(-1,b\varepsilon-\varepsilon |a|\sqrt{\frac{1}{\nu^2}-r^2}\right)}, &c_2=\min{\left(1,b\varepsilon+\varepsilon |a|\sqrt{\frac{1}{\nu^2}-r^2}\right)} \end{align*} thus \begin{align*} I_1&=\frac{1}{6\varepsilon^2 a}\left[3\arcsin{u}+u\sqrt{1-u^2}(2u^2+1)\right]_{c_1}^{c_2}+\\ &-\frac{2}{\varepsilon^2 a}\left[\frac{1}{6}\arcsin{u}+\frac{u^3}{3}\text{acosh}\left(\frac{1}{|u|}\right)-\frac{u}{6}\sqrt{1-u^2}\right]_{c_1}^{c_2}=\\ &=\frac{1}{3\varepsilon^2 a}\left[\frac{1}{2}\arcsin{u}+u\sqrt{1-u^2}(u^2+\frac{3}{2})-2u^3\text{acosh}\left(\frac{1}{|u|}\right)\right]_{c_1}^{c_2} \end{align*} and the second integral becomes \begin{align*} I_2&=\int_{c_{1}}^{c_2}{\frac{2}{\varepsilon}\left[\frac{\sqrt{1-u^2}}{3}(2u^2+1)\left(\frac{u-\varepsilon b}{\varepsilon a}\right)^2\right]\,\frac{du}{\varepsilon a}}+\\ &-\int_{c_1}^{c_2}{\frac{2}{\varepsilon}u^2\text{acosh}\left(\frac{1}{|u|}\right)\left(\frac{u-\varepsilon b}{\varepsilon a}\right)^2\,\frac{du}{\varepsilon a}}=\\ &=\frac{2}{3\varepsilon^4 a^3}\left[\frac{1}{12}\left(b^2\varepsilon^2+\frac{1}{10}\right)\arcsin{u}+\frac{2}{9}b\varepsilon (1-u^2)^{\frac{3}{2}}+\right.\\ &-\frac{u^3}{30}(6u^2-15b\varepsilon u+10b^2\varepsilon^2)\text{acosh}\left(\frac{1}{|u|}\right)+\\ &+\frac{\sqrt{1-u^2}}{60}\left(\frac{20}{3}u^5-16b\varepsilon u^4+(10b^2\varepsilon^2+\frac{19}{3})u^3+\right.\\ &\left.\left.-\frac{14}{3}b\varepsilon u^2+(15b^2\varepsilon^2-\frac{1}{2})u-\frac{28}{3}b\varepsilon\right) \right]_{c_1}^{c_2}. \end{align*} Finally, if $\nu|r|\leq1$, we have: \begin{align} a_{k,j}&=\frac{1}{3\varepsilon^2 a }\left[ \frac{1}{2}\arcsin{u}\left( 1-\nu^2r^2-\frac{\nu^2}{\varepsilon^2 a^2}(b^2\varepsilon^2+\frac{1}{10})\right)+\right.\\ &+\sqrt{1-u^2}\left( u(u^2+\frac{3}{2})(\-\nu^2r^2)-\frac{\nu^2q_2(u)}{10\varepsilon^2 a^2}-\frac{4\nu^2b\varepsilon}{3\varepsilon^2 a^2}(1-u^2)\right)+\\ &\left.+u^3\text{acosh}\left(\frac{1}{|u|}\right)\left( \frac{\nu^2q_1(u)}{5\varepsilon^2 a^2}-2(1-\nu^2r^2)\right) \right]_{c_1}^{c_2} \label{eq: matrix_cs} \end{align} where \begin{align*} &q_{1}(u)=6u^2-15b\varepsilon u+10b^2\varepsilon^2\\ &q_{2}(u)=\frac{20}{3}u^5-16b\varepsilon u^4+(10b^2\varepsilon^2+\frac{19}{3})u^3+\\ &-\frac{14}{3}b\varepsilon u^2+(15b^2\varepsilon^2-\frac{1}{2})u-\frac{28}{3}b\varepsilon. \end{align*} \end{itemize} We observe that if $\varepsilon|b|\leq1$, then $c_{1}<c_{2}$ always holds. While for $\nu|r|>1$, $a_{k,j}=0$. At last we observe that because of the term $\text{acosh}(|u|^{-1})$ in \eqref{eq: matrix_cs}, we have to consider apart the cases $c_{1}=0,\ c_{2}=0$. In these cases, it is easy to see that, because of continuity, it is sufficient to consider the limit of \eqref{eq: matrix_cs} for $u\rightarrow0$, so that $u^3\text{acosh}(|u|^{-1})\rightarrow0$. \cleardoublepage \addcontentsline{toc}{chapter}{Bibliography} \end{document}
\begin{document} \title{Compactness of Schur A-multipliers and Haagerup Tensor Products} \begin{abstract} In this paper we study the connection between Haagerup tensor product and compactness of Schur $A$-multiplier. In particular, we give a new characterization of elementary $C^{\ast}$-algebra in terms of completely compact Schur $A$-multiplier. \end{abstract} \section{Introduction} Schur multipliers, a class of maps generalising the operators of entry-wise (Schur) multiplication on finite matrices, were first abstractly studied by Grothendieck in \cite{M025}. Since then they have played an important role in operator theory. In the simplest situation they arise in the following manner: to a (discrete) set $X$ and a function $\phi: X \times X \to \mathbb{C}$, one associates an operator $S_{\phi}$ on the space of compact operators on the Hilbert space $\ell^2(X)$; if the resulting map is (completely) bounded, we call $S_{\phi}$ a Schur multiplier with symbol $\phi$. In \cite{MR1766604}, Hladnik studied an important class of Schur multipliers: compact Schur multipliers, i.e the map $S_{\phi}$ is a compact operator. Hladnik identified the space of Schur multipliers with Haagerup tensor product $c_0 \otimes_h c_0$. Recently, in \cite{MTT16}, McKee, Todorov and Turowska generalised the notion of Schur multipliers to new setting, on which we will inverstigate the generalization of Hladnik's thorem. This paper is organised as following. In section 2, we give some basic definitions which we used in this paper, including the definition of Schur $A$-multiplier. In section 3, we prove that if either $X$ or $Y$ is not discrete measure space, then there is no non-zero compact Schur $A$-multiplier. By this result, we could restrict our attention to the case $X=Y=\mathbb{N}$. In section 4, we study some properties of Haagerup tensor product. We prove a Theorem which based on the work of Smith \cite{MR1138841}, Ylinen \cite{MR0296716} and Saar (see \cite{compactness}), to get a complete relationship between Haagerup tensor product and completely compact maps on $C^{\ast}$-algebras, which will be very useful for the study of the compactness of Schur $A$-multipliers in the later section. Furthemore, that theorem gives a new characterization of the $C^{\ast}$-algebra of compact operators of some Hilbert space. In second 5 and section 6, we study the complete compactness of Schur $A$-multipliers when $A$ is $\ast$-isomorphic to a subalgebra of $\mathcal{K}(H)$, and we prove a generalisation of Hladnik's Theorem. In Section 7, it contains our main results. We study the relationship between complete compactness and compactness, in the end we use these relations and the results of previous sections to prove that the generalization of Hladnik's Theorem is true if and only if the $C^{\ast}$-algebra $A$ if $\ast$-isomorphic $\mathcal{B}(H)$ for some finite dimensional Hilbert space $H$. \section{Revision of the Operator-Valued Schur Multiplier} For any measure space ($Z,\lambda$) and Banach space $B$, we let $\mathfrak{L}_2(Z, B)$ denote the space of all square integrable $\lambda$-measurable functions from $Z$ into $B$. If $B=K$ for some Hilbert space $K$, then $\mathfrak{L}_2(Z, K)$ is a Hilbert space. Furthermore, for any Hilbert space $K$, we denote the space of bounded operators on $K$ by $\mathcal{O}(K)$, and denote the space of compact operators on $K$ by $\mathcal{O}_c(K)$ . Let $(X,\mu)$ and $(Y,\nu)$ be standard measure spaces (see \cite{MTT16}), $H$ a separable Hilbert space, and $A \subseteq \mathcal{O}(H)$ a $C^{\ast}$-algebra. We write $\mathcal{O}_c=\mathcal{O}_c(\mathfrak{L}_2(X), \mathfrak{L}_2(Y))$ for the space of all compact operators from $\mathfrak{L}_2(X,H)$ into $\mathfrak{L}_2(Y,H)$. If $k \in \mathfrak{L}_2(Y \times X, \mathcal{O}(H))$ and $\xi \in \mathfrak{L}_2(X,H)$ then for almost all $y \in Y$, the function $x \to k(y,x) \xi(x)$ is weakly measurable; moreover \begin{equation*} \int_X \|k(y,x)\xi(x)\| d \mu(x) \leq \|\xi\|_2 \text{\Huge{(}}\int_X \|k(y,x)\|^2 d \mu(x)\text{\Huge{)}}^{\frac{1}{2}}. \end{equation*} Such functions $k$ will often be referred to as $kernels$. It follows that the formula \begin{equation*} (T_k\xi)(y)=\int_X k(y,x) \xi(x) d\mu(x) \ \ \ \ (y \in Y), \end{equation*} defines a (weakly measurable) function $T_k\xi: Y \to H$, and a bounded operator $T_k: \mathfrak{L}_2(Y, H) \to \mathfrak{L}_2(X, H)$. Moreover, by \cite{MTT16} we have $\| T_k \| \leq \| k \|_2$ and $ T_k$=0 if and only if $k=0$ almost everywhere. If $\mathcal{X}$ and $\mathcal{Y}$ are operator space, we denote the space of all completely bounded linear maps from $\mathcal{X}$ into $\mathcal{Y}$ by $CB(\mathcal{X},\mathcal{Y})$ and write $CB(\mathcal{X})=CB(\mathcal{X},\mathcal{X})$. For the background of operator spaces and completely bounded maps, we refer the reader to Section 1.2. In this thesis, if $f$ is a linear map from an operator space $\mathcal{X}$ into an operator space $\mathcal{Y}$, we use $f_n$ to denote the corresponding map from $M_n(\mathcal{X})$ into $M_n(\mathcal{Y})$. Now we define \begin{equation*} \mathcal{S}_2(X \times Y, A)=\{T_k: k \in \mathfrak{L}_2(Y \times X, A)\} \end{equation*} and note that $\mathcal{S}_2(Y \times X, A)$ is a dense subspace of the minimal tensor product $\mathcal{O}_c \otimes A$, thus in particular it is an operator space. A function $\varphi: X \times Y \to CB(A, \mathcal{O}(H))$ will be called $pointwise \ measurable$ if, for every $a \in A$, the function $(x,y) \to \varphi(x,y)(a)$ from $X \times Y$ into $\mathcal{O}(H)$ is weakly measurable (\cite{MTT 16}). Let $\varphi: X \times Y \to CB(A, \mathcal{O}(H))$ be a bounded pointwise measurable function. For $k \in \mathfrak{L}_2(Y \times X, A)$, let $\varphi \cdot k: Y \times X \to \mathcal{O}(H)$ be the function given by \begin{equation*} (\varphi \cdot k)(y,x)=\varphi(x,y)(k(y,x)) \ \ \ \ ((y,x) \in Y \times X). \end{equation*} It is easy to show that $\varphi \cdot k$ is weakly measurable and $\| \varphi \cdot k \|_2 \leq \| \varphi \|_{\infty} \| k\|_2$ (\cite[Section 2]{MTT 16}). Let \begin{equation*} S_{\varphi}: \mathcal{S}_2(Y \times X, A) \to \mathcal{S}_2(Y \times X, \mathcal{O}(H)) \end{equation*} be the linear map given by \begin{equation*} S_{\varphi}(T_k)=T_{\varphi \cdot k} \ \ \ ( k \in \mathfrak{L}_2(Y \times X, A)). \end{equation*} \begin{definition} A bounded poinwise measurable map \begin{equation*} \varphi: X \times Y \to CB(A, \mathcal{O}(H)) \end{equation*} will be called a Schur $A$-multiplier if the map $S_{\varphi}$ is completely bounded. \end{definition} Equivalently, a bounded pointwise measurable function $\varphi: X \times Y \to CB(A,\mathcal{O}(H))$ is a Schur $A$-multiplier if and only if the map $S_{\varphi}$ possesses a completely bounded extension to a map from $\mathcal{O}_c \otimes A$ into $\mathcal{O}_c \otimes \mathcal{O}(H)$ (which we will still denote by $S_{\varphi}$). For the sake of convenience, we will not distinguish Schur $A$-multiplier $\varphi: X \times Y \to CB(A, \mathcal{O}(H))$ and the corresponding linear map \begin{equation*} S_{\varphi}: \mathcal{O}_c(\mathfrak{L}_2(X), \mathfrak{L}_2(Y)) \otimes A \to \mathcal{O}_c(\mathfrak{L}_2(X), \mathfrak{L}_2(Y)) \otimes \mathcal{O}(H), \end{equation*} when we use the terminology `Schur $A$-multiplier'. Another important notion is complete compactness which is defined as follows (see \cite{completelycompact}) \begin{definition} If $\mathcal{X}$ and $\mathcal{Y}$ are operator spaces, a completely bounded map $\Phi: \mathcal{X} \to \mathcal{Y}$ is called completely compact if for each $\epsilon >0$ there exists a finite dimensional subspace $F \subset \mathcal{Y}$ such that \begin{equation*} {\rm{dist}} (\Phi^{(m)}(x), M_m(F)) < \epsilon, \end{equation*} for every $x \in M_m(\mathcal{X})$ with $\|x\| \leq 1$ for every $m \in \mathbb{N}$. \end{definition} Let us recall that a completely bounded linear map which is approximated by a net of linear maps with finite rank in the complete bounded norm is completely compact \cite[Proposition 3.2]{compactness}. We will use this fact without reference frequently. If $\mathcal{X}$ and $\mathcal{Y}$ are operator spaces, we denote the set of compact (resp. completely compact) operators from $\mathcal{X}$ into $\mathcal{Y}$ by $\mathfrak{CO}(\mathcal{X,Y})$ (resp. $\mathfrak{CCO}(\mathcal{X,Y})$). \begin{remark}\label{hfsadfjklhvuivrioreiu} Let $\mathcal{X}$, $\mathcal{Y}$ be operator spaces, $\varphi: \mathcal{X} \to \mathcal{Y}$ be completely bounded linear map. If $\mathcal{Z} \subset \mathcal{Y}$ is operator space such that $\varphi(\mathcal{X}) \subset \mathcal{Z}$ and there is completely bounded map $h: \mathcal{Y} \to \mathcal{Z}$ with $h(d)=d$ for all $d \in \mathcal{Z}$, we define $\psi: \mathcal{X} \to \mathcal{Z}$ by $\psi(a)=\varphi(a)$ for all $a \in \mathcal{X}$, then since the composition of completely compact map and completely bounded map is completely compact, we conclude that $\varphi$ is completely compact if and only if $\psi$ is completely compact $($the proof is easy consequence of \cite[Proposition 3.2]{compactness}$)$. We will use this fact without reference in this paper. \end{remark} \begin{lemma}\label{negativecriteria} Let $\mathcal{X}$ and $\mathcal{Y}$ be operator spaces, $\varphi: \mathcal{X} \to \mathcal{Y}$ a completely bounded linear map which is not completely compact. If $\mathcal{Z}$ is an operator space containing $\mathcal{X}$ as a subspace, and $f: \mathcal{Z} \to \mathcal{X}$ is a completely bounded surjective linear map such that $\|f\|_{cb}=1$ and $f|\mathcal{X}=I_{\mathcal{X}}$ (here $I_{\mathcal{X}}: \mathcal{X} \to \mathcal{X}$ is the identity operator defined on $\mathcal{X}$), then $\varphi \circ f$ is not completely compact. \end{lemma} \begin{proof} we have \begin{equation*}\begin{split} &\{y \in M_n(\mathcal{X}): \|y\| \leq 1\} =f_n(\{x \in M_n(\mathcal{Z}): \|x\| \leq 1\}), \\& \varphi_n(\{y \in M_n(\mathcal{X}): \|y\| \leq 1\}) = (\varphi \circ f)_n(\{x \in M_n(\mathcal{Z}): \|x\| \leq 1\}), \end{split}\end{equation*} by the definition of complete compactness $\varphi \circ f$ is not completely compact. \end{proof} \section{Some properties of compact \\ Schur A-multiplier} In this section, we will prove that if $(X, \mu)$ and $(Y, \upsilon)$ are standard measure spaces, then there is no non-trivial compact Schur A-multiplier if either $(X, \mu)$ or $(Y, \upsilon)$ is non-atomic. In the following $A$ $\subseteq \mathcal{O}(H)$ is $C^{\ast}$-algebra, and we fix admissible topologies on $X$ and $Y$ respectively. \begin{lemma} \label{lemma 1} Let D be any compact subset of $X \times Y$ with $(\mu \times \upsilon) (D) >0$. Then for arbitrary positive number $\epsilon>0$, there are $\mu$-measurable subset $D_X$ of $X$ and $\upsilon$-measurable subset $D_Y$ of $Y$ such that \begin{equation*}\begin{split} & (\mu \times \upsilon )((D_X \times D_Y) \setminus D) < \epsilon \cdot (\mu \times \upsilon)(D_X \times D_Y)<{\infty}. \end{split}\end{equation*} Furthermore, if $(X, \mu) ( resp. (Y, \upsilon))$ is non-atomic, there are infinitely many mutually disjoint $\mu$ -measurable subsets $\{D_n\}_{n \in \mathbb{N} }$ of $D_X$(resp. there are infinitely many mutually disjoint $\upsilon$ -measurable subsets $\{C_n\}_{n \in \mathbb{N} }$ of $D_Y$ ), such that \begin{equation*}\begin{split} & (\mu \times \upsilon )((D_n \times D_Y) \setminus D) < \epsilon \cdot (\mu \times \upsilon)(D_n \times D_Y) <{\infty}, \\& (resp. \ (\mu \times \upsilon )((D_X \times C_n) \setminus D) < \epsilon \cdot (\mu \times \upsilon)(D_X \times C_n) <{\infty}) \end{split}\end{equation*} for all $n \in \mathbb{N}$. \end{lemma} \begin{proof} Let $0< \epsilon <1$ be a given positive number, we choose a number $\delta$ such that $0< \delta < \epsilon \cdot (\mu \times \upsilon )(D)$. By the construction of the product measures (see ~\cite{MR1681462}), there exists a set $\{ V_n \times W_n: n \in \mathbb{N} \}$ of disjoint rectangles such that $D \subset \cup_{n=1}^{\infty} V_n \times W_n $ and \begin{equation*}\begin{split} \sum _{n=1}^{\infty} \ (\mu \times \upsilon )(V_n \times W_n)< (\mu \times \upsilon )(D)+ \delta. \end{split}\end{equation*} It is easy to see that there is at least one $n \in \mathbb{N}$ such that \begin{equation*} (\mu \times \upsilon )((V_n \times W_n) \setminus D)< \epsilon \cdot \mu \times \upsilon(V_n \times W_n). \end{equation*} Now suppose $(X, \mu)$ is non-atomic. Let $n \in \mathbb{N} $ be such that \begin{equation}\begin{split}\label{definitionofn} &\quad \frac{1}{n} \cdot (\mu \times \upsilon )(D_X \times D_Y) \\& < \epsilon \cdot (\mu \times \upsilon )(D_X \times D_Y)-(\mu \times \upsilon)((D_X \times D_Y) \setminus D) , \end{split}\end{equation} and let $\{E_k\}_{k=1}^{n}$ be disjoint $\mu$-measurable subsets of $D_X$ such that $\mu(E_k)=\frac{1}{n} \mu(D_X)$ for each $k$ (for the existence of these sets, see ~\cite[I.4]{probability}). Thus we have \begin{equation*} \mu(D_X)= \mu(\cup_{k=1}^n E_k). \end{equation*} We claim that there are at least two distinct numbers $r, \ m \in \mathbb{N}$ such that \begin{equation*} (\mu \times \upsilon )((E_k \times D_Y)\setminus D) \le \epsilon \cdot (\mu \times \upsilon )(E_k \times D_Y), k=r, m. \end{equation*} because if this was not true we would have \begin{equation*}\begin{split} \epsilon \cdot (\mu \times \upsilon )(D_X \times D_Y)-(\mu \times \upsilon )((D_X \times D_Y) \setminus D) & < (\mu \times \upsilon )(E_n \times D_Y) \\&=\frac{1}{n} \cdot (\mu \times \upsilon )(D_X \times D_Y), \end{split}\end{equation*} this contradicts (\ref{definitionofn}). This contradiction proved the existence of the two distinct numbers $r$ and $m \in \mathbb{N}$. Now let $D_1=E_r$. We replace $D_X$ by $E_m$ and repeated the same argument, our proof is completed. \end{proof} We list the following two lemmas for reference, their proofs are routine. \begin{lemma} \label{lemma2} Let $\varphi: X \times Y \to CB(A, \mathcal{O}(H))$ be a Schur-A multiplier such that $\varphi(x,y)(a)=0$ for $(\mu \times \upsilon)$-almost $(x,y) \in X \times Y$ for all $a \in A$, then $S_{\varphi}=0$. \end{lemma} \begin{lemma}\label{decompsition of measure} If $(Z, \lambda)$ is a standard measure space, $C=\{e \in Z: \lambda(\{e\})>0 \}$, then $C$ is countable and $(Z \setminus C, \lambda)$ is non-atomic. \end{lemma} \begin{proposition}\label{X is non-atomic} Let $(X, \mu)$ or $(Y, \nu)$ be non-atomic standard measure space, and $\varphi: X \times Y \to CB(A, \mathcal{O}(H))$ be a compact Schur A-multiplier, then $\varphi=0$ for $( (\mu \times \nu) )$-alomost all $(x,y) \in X \times Y$. \end{proposition} \begin{proof} We prove that if $(X,\mu)$ is non-atomic, then $S_{\varphi}=0$ if $\varphi$ is compact Schur $A$-multiplier. The other part is proved by the same argument. By Lemma \ref{lemma2}, there is $a \in A$ with $\|a\|=1$ such that for some positive number $c >0$, the measure of the set \begin{equation}\label{(3)} D=\{(x,y) \in X \times Y: \|\varphi(x,y)(a)\| >c \} \end{equation} is positive. By the Vector-Valued Lusin's Theorem \cite[Corollary B.28]{MR2288954}, there exists a compact subset $E \subset D$ such that $(\mu \times \upsilon) (E)>0$ and the map from $E$ into $\mathcal{O}(H)$ defined by \begin{equation*} (x,y) \mapsto \varphi(x,y)(a), \end{equation*} is continuous. Therefore $\{\varphi(x,y)(a): (x,y) \in E\}$ is compact subset in $\mathcal{O}(H)$. Let $\epsilon$ be a fixed positive number. By \cite[ Lemma B.23]{MR2288954}, there is a function $f$ of the form $f=\sum_{i=1}^n \chi_{A_i} \otimes a_i$, where $a_i \in \mathcal{O}(H)$ and $A_i \subset E$ is measurable, such that $\|f\|_{\infty} \le \| \varphi \|$ and \begin{equation*} \|\varphi(x,y)(a)-f(x,y)\| < \epsilon \ \ \ \ ( (x,y) \in E). \end{equation*} By (\ref{(3)}) it is easy to see that there is at least one $A_k$ such that $(\mu \times \upsilon)(A_k) >0$ and we can assume that $A_k$ is compact. By Lemma \ref{lemma 1}, there are $\mu$-measurable subset $D_X$ and $\upsilon$-measurable subset $D_Y$, such that \begin{equation}\begin{split}\label{equation (5)} & (\mu \times \upsilon)((D_X \times D_Y) \setminus A_k) < \epsilon \cdot (\mu \times \upsilon)(D_X \times D_Y) < {\infty}. \end{split}\end{equation} Now we define the function $\vartheta: X \times Y \to \mathcal{O}(H)$ in $\mathfrak{L}_2(X \times Y, \mathcal{O}(H))$ by \begin{equation*}\begin{split} \vartheta (x,y)=\chi_{D_X \times D_Y}(x,y) a_k \ \ \ \ ( (x,y) \in X \times Y). \end{split}\end{equation*} Let $h \in H$ be such that $\|h\|=1$ and \begin{equation}\label{equation 9} \|\vartheta(x,y)(h)\|=\|a_k(h)\| \ge c- \epsilon \ \ \ \ ( (x,y) \in D_X \times D_Y). \end{equation} Since $(X, \mu)$ is non-atomic, by Lemma \ref{lemma 1} there are infinitely many mutually disjoint $\mu$-measurable subsets $\{D_n\}_{n \in \mathbb{N} }$ of $D_X$ such that \begin{equation*} (\mu \times \upsilon)((D_n \times D_Y) \setminus A_k) < \epsilon \cdot \mu \times \upsilon((D_n \times D_Y)) <{\infty}. \end{equation*} Define \begin{equation}\label{euqation 8.1} k_n(x,y):=\frac{a}{\mu (D_n)^{\frac{1}{2}} \cdot \upsilon (D_Y)^{\frac{1}{2}}} \chi_{D_n \times D_Y} (x,y), \xi_n(x):= \frac{h}{\mu (D_n)^{\frac{1}{2}}} \chi_{D_n} (x) \end{equation} Then $\{k_n \}_{n \in \mathbb{N} } \in \mathfrak{L}_2(X \times Y, \mathcal{O}(H))$ and $\|k_n\|_2=\|a\|=1$ ($n \in \mathbb{N}$); $\{ \xi_n \}_{n \in \mathbb{N}} \subset \mathfrak{L}_2(X, H)$ and $\|\xi_n\|=1$ ($n \in \mathbb{N}$). So $\{ \|T_{k_n} \| \}_{n \in \mathbb{N}}$ is bounded. We can complete our proof by showing that $\{T_{\varphi \cdot k_n}\}_{n \in \mathbb{N} }$ has no Cauchy subsequence if the given $\epsilon$ is small enough. By (\ref{equation (5)}), (\ref{equation 9}), Cauchy-Schwarz inequality and that $\|(T_{\varphi \cdot k_n}-T_{\varphi \cdot k_m})(\xi_n)\| \leq \|T_{\varphi \cdot k_n}-T_{\varphi \cdot k_m}\|$, it is rountine to verify that if $\epsilon < (1/100) \cdot c$ we have \begin{equation*}\begin{split} & \text{\Large{$\|$}}(T_{\varphi \cdot k_n}-T_{\varphi \cdot k_m})\text{\Large{$\|$}} \ge \text{\Huge{(}}\int_{D_Y}\text{\Large{$\|$}}\int_{D_n} \frac{1}{\mu (D_n) \cdot \upsilon (D_Y)^{\frac{1}{2}}} \vartheta(x,y)(h) d \mu x\text{\Large{$\|$}}^2 d \upsilon\text{\Huge{)}}^{\frac{1}{2}} \\& \quad - \text{\Huge{(}}\int_{D_Y}\text{\Large{$\|$}}\int_{D_n} \frac{1}{\mu (D_n) \cdot \upsilon (D_Y)^{\frac{1}{2}}} (\varphi(x,y)(a)- \vartheta (x,y))(h) d \mu x\text{\Large{$\|$}}^2d \upsilon y \text{\Huge{)}}^{\frac{1}{2}} \\& \ \ \ \ \ \ \ \ \ \ \ \ > \frac{1}{2} \cdot c, \end{split}\end{equation*} our proof is complete. \end{proof} \section{Haagerup tensor products and completely compact maps} Let $K$ be a fixed Hilbert space. We will study the connection between Haagerup tensor product and completely compact maps. If $\{A_i\}_{i \in I}$ is a collection of $C^{\ast}$-algebras, we denote their $C_0$ (or it is called $C^{\ast}$)- direct sum by $\sum_{i \in I}^{\oplus0} A_i$ (see ~\cite{MR936628}). For a $C^{\ast}$-algebra $A$, we follow Fell and Doran ~\cite{MR936628} to call $A$ elementary $C^{\ast}$-algebra if $A$ is $\ast$-isomorphic to $\mathcal{O}_c(H)$ for some Hilbert space $H$; on the other hand, we call $A$ compact type $C^{\ast}$-algebra if $A$ is $\ast$-isomorphic to a subalgebra of $\mathcal{O}_c(H)$ for some Hilbert space $H$. If $A$ is compact type $C^{\ast}$-algebra, we shall identify $A=\sum_{i \in I} ^{\oplus 0} \mathcal{O}_c(H_i)$ for some Hilbert spaces $H_i$ ($i \in I$) (~\cite[Theorem VI.23.3]{MR936628}). \begin{lemma}\label{inversecompact} If $A$ is a compact-type $C^{\ast}$-algebra then the following are equivalent: (i) A is elementary. (ii) For any $\digamma \in \mathfrak{CCO}(A)$, there are families $\{a_i\}_{i \in J}$ and $\{b_j\}_{j \in J}$ of elements of $A$ such that $\sum_{j \in J} a_i ^{\ast} a_i$ and $\sum_{j \in J} b_j b_j^{\ast}$ are convergent and \begin{equation*} \digamma(r)= \sum_{j \in J} b_j \ r \ a_j \ \ \ \ (r \in A). \end{equation*} If these conditions hold, we have $\mathfrak{CCO}(A)=A \otimes_h A$. \end{lemma} \begin{proof} The implication from (i) to (ii) is \cite[Corollary 3.6]{compactness}. (ii) implies (i) :Let $A= \sum_{i \in I}^{\oplus0} \mathcal{O}_c(H_i)$ for some collection $\{H_i\}_{i \in I}$ of Hilbert spaces, we claim that $I$ is single point. Let $H=\sum_{i \in I}^{\oplus} H_i$, if we represent any element $r$ of $\mathcal{O}(H)$ by a matrix $(r_{i,j})_{i,j \in I}$, where $r_{i,j} \in \mathcal{O}(H_j, H_i)$, then $s \in A$ is a diagonal matrix such that $s_{i,i} \in \mathcal{O}_c (H_i)$. Furthermore, for any $i,j \in I$ we define a map $E_{i,j}: \mathcal{O}_c(H_j, H_i) \to \mathcal{O}_c(H)$ by the following way: for any $a \in \mathcal{O}_c(H_j, H_i)$, $E_{i,j}(a)$ is the matrix in $\mathcal{O}_c(H)$ whose all entries are 0 except for the $i,j$-th entry, which is $a$. Now we take two distinct points $i_1, i_2$ in $I$, let $a \in \mathcal{O}_c(H_{i_1})$, $b \in \mathcal{O}_c(H_{i_2}, H_{i_1})$, and $c \in \mathcal{O}_c(H_{i_1}, H_{i_2})$ be all non-zero. We define $\Lambda: \mathcal{O}_c(H) \to \mathcal{O}_c(H)$ by \begin{equation*} \Lambda(r)=(E_{i_1, i_1}(a) + E_{i_1, i_2}(b)) \ r \ (E_{i_1, i_1}(a)+ E_{i_2,i_1}(c)) \ \ \ \ (r \in \mathcal{O}_c(H)), \end{equation*} then $\Lambda$ is completely compact by \cite[Corollary 3.6]{compactness}, and it is easy to verify that $\Lambda(A) \subset A$. Let $\digamma: A \to A$ be defined by $\digamma(r)=\Lambda(r)$ $(r \in A)$, by Remark \ref{hfsadfjklhvuivrioreiu} $\digamma$ is completely compact. But $\digamma \neq \phi_v$ for any $v \in A \otimes_h A$ because $\phi_v (\mathcal{O}_c(H_i)) \subset \mathcal{O}_c(H_i)$ for any $v \in A \otimes A$ and $i \in I$. This contradiction proved that $I$ is single point, $A$ is elementary. \end{proof} \begin{theorem}\label{new} If $A$ is a $C^{\ast}$-algebra, then the following are equivalent: (i) A is elementary, (ii) For any $\digamma \in \mathfrak{CCO}(A)$, there are families $\{a_i\}_{J}$ and $\{b_j\}_{j \in J}$ of elements of $A$ such that $\sum_{j \in J} a_i ^{\ast} a_i$ and $\sum_{j \in J} b_j b_j^{\ast}$ are convergent and \begin{equation*} \digamma(r)= \sum_{j \in J} b_j \ r \ a_j. \end{equation*} If these conditions hold, we have $\mathfrak{CCO}(A)=A \otimes_h A$. \end{theorem} \begin{proof} The implication from (i) to (ii) is ~\cite[Corollary 3.6]{compactness}. (ii) implies (i): In particular, for any $u \in A$, the map $x \mapsto uxu$ is compact, by ~\cite{MR0296716} there is a faithful $\ast$-representation $\pi$ of $A$ on Hilbert space $X$ such that $\pi(A) \subset \mathcal{O}_c(X)$, thus if we identify $A$ with its image in $\mathcal{O}(X)$, we can consider that $A$ is a norm-closed $\ast$-subalgebra of $\mathcal{O}_c(X)$. By Lemma \ref{inversecompact} (i) holds. \end{proof} \section{Compactness and Haagerup tensor products} By the results of Section 3, the only interesting compact Schur $A$-multipliers are defined on $X, \ Y=\mathbb{N}$, equipped with the counting measure. We assume that $A \subset \mathcal{O}(H)$ for some Hilbert space $H$, and we can drop the assumption that $A$ is separable. We identify each $T \in \mathcal{O}(H^{\infty})$ with a matrix $(T_{m,n})_{m,n \in \mathbb{N}}$, where $T_{m,n} \in \mathcal{O}(H)$. Furthermore, we define the conditional expectation $\mathcal{E}: \mathcal{O}(\ell^2) \to \ell^{\infty}$ by \begin{equation*} \mathcal{E}(S)(n)=S_{n,n},\rm{ \ for \ all \ matrix \ S \in \mathcal{O}(\ell^2} ) . \end{equation*} For each $n \in \mathbb{N}$ we define $\mathcal{E}_n: \mathcal{O}(\ell^2) \to \ell^{\infty}$ by \begin{equation*}\begin{split} &\mathcal{E}_n(S)(k)=S_{k,k}\ \ \ \ (k \leq n); \\& \mathcal{E}_n(S)(k)=0 \ \ \ \ \ (k > n) \end{split}\end{equation*} ($\rm{ for \ all \ matrix \ S \in \mathcal{O}(\ell^2} )$). Since $\ell^2$ is commutative $C^{\ast}$-algebra, $\mathcal{E}$ is completely bounded. Therefore the action of Schur $A$- multiplier $\varphi$ on $\mathcal{O}_c(\ell^2(\mathbb{N})) \otimes A$ can be regarded with \begin{equation*}\begin{split} S_{\varphi} & : \mathcal{O}_c(\ell^2(\mathbb{N})) \otimes A \rightarrow \mathcal{O}_c(\ell^2(\mathbb{N})) \otimes \mathcal{O}(H) \\& : (T_{m,n})_{m,n \in \mathbb{N}} \mapsto (\varphi (n,m) (T_{m,n}))_{m,n \in \mathbb{N}}. \end{split}\end{equation*} \begin{lemma}\label{matrix product} Let $S_{\varphi}$ be a $Schur \ A $-multiplier. If there exist an index set $J$, and families of $\{R_i\}_{i \in J}$ and $\{S_i\}_{i \in J}$ $\subset \mathcal{O}(H^{\infty})$ such that $\sum_{i \in J} R_i R_i^{\ast}$ and $\sum_{i \in J} S_i^{\ast}S_i$ are convergent, and \begin{equation}\label{1841702} S_{\varphi}(T)= \sum_{i \in J} R_i T S_i, \ T \in \mathcal{O}_c(\ell^2({\mathbb{N}})) \otimes A, \end{equation} then $\sum_{i \in J} \mathcal{E}(R_i) \mathcal{E}(R_i)^{\ast}$ and $\sum_{i \in J}\mathcal{E}(S_i)^{\ast}\mathcal{E}(S_i)$ are convergent and \begin{equation}\label{1841701} S_{\varphi} (T) = \sum_{i \in J} \mathcal{E}(R_i) T \mathcal{E}(S_i), \ T \in \mathcal{O}_c(\ell^2({\mathbb{N}})) \otimes A. \end{equation} \end{lemma} \begin{proof} The convergence of $\sum_{i \in J} \mathcal{E}(R_i)\mathcal{E}(R_i)^{\ast}$ and $\sum_{i \in J} \mathcal{E}(S_i)^{\ast}\mathcal{E}(S_i)$ is an easy convergence of $\sum_{i \in J} R_i R_i^{\ast}$ and $\sum_{i \in J} S_i^{\ast}S_i$ . Let $R_i=(a^{(i)}_{m,n})_{m,n \in \mathbb{N}}$, $S_i=(b^{(i)}_{m,n})_{m,n \in \mathbb{N}}$, where $a^{(i)}_{m,n}, b^{(i)}_{m,n} \in \mathcal{O}(H)$. Since $S_{\varphi}$ is linear and continuous, the linear span of $\{E_{p,q}(a): a \in A; p,q \in \mathbb{N}\}$ is norm-dense in $\mathcal{O}_c(\ell^2({\mathbb{N}})) \otimes A$ (here we recall that $E_{p,q}(a)$ is the matrix whose entries are all 0 but $p,q$-th entry is $a$), thus in oder to verify (\ref{1841701}), it is sufficient to verify that it holds for $E_{p,q}(a)$ for any $a \in A$ and $p,q \in \mathbb{N}$. By (\ref{1841702}) we have ($\cdot$ is the multiplication of matice) \begin{equation}\begin{split}\label{19011701} &e_{p,q} \otimes 1_{\mathcal{O}(H)} \cdot (S_{\varphi}(E_{p,q}(a)) \cdot e_{p,q} \otimes 1_{\mathcal{O}(H)}= E_{p,q}(\sum_{i \in J} a^{(i)}_{p,p} a b^{(i)}_{p,q}). \end{split}\end{equation} On the other hand we have \begin{equation}\label{19011702} e_{p'',q''} \otimes 1_{\mathcal{O}(H)} \cdot S_{\varphi}(E_{p,q}(a)) \cdot e_{p',q'} \otimes 1_{\mathcal{O}(H)}=0 \end{equation} for all $p'' \rm{or} \ q''\neq p$, or $p' \rm{or} \ q' \neq q$. Now (\ref{19011701}) and (\ref{19011702}) imply that \begin{equation*}\begin{split} S_{\varphi}(E_{p,q}(a))=\sum_{i \in J} \mathcal{E}(R_i) \ E_{p,q}(a) \ \mathcal{E}(S_i) \end{split}\end{equation*} Our proof is complete. \end{proof} In the sequel part, we use symbol $\mathfrak{CS} (A, \mathcal{O}(H))$ $(resp. \mathfrak{CCS}(A,\mathcal{O}(H)))$ to denote the set of compact $(resp. completly \ compact)$ Schur $A$-multiplier. Furthermore, we define $\mathfrak{CS}(A)=\mathfrak{CS}(A,A) (resp. \mathfrak{CCS}(A)=\mathfrak{CCS}(A,A)) $. \begin{remark}\label{usefulkey} Recall the discussion in section 1.2, we have \begin{equation*} \mathfrak{CO} (A, \mathcal{O}(H)) \subset \mathfrak{CS} (A, \mathcal{O}(H)), \end{equation*} and \begin{equation*} \mathfrak{CCO}(A,\mathcal{O}(H)) \subset \mathfrak{CCS}(A,\mathcal{O}(H)), \end{equation*} \end{remark} \begin{proposition}\label{maintheorem} $\mathfrak{CCS}(\mathcal{O}_c(H))=c_0(\mathbb{N}, \mathcal{O}_c(H)) \otimes_h c_0(\mathbb{N}, \mathcal{O}_c(H))$ \end{proposition} \begin{proof} Suppose $\varphi$ is Schur $\mathcal{O}_c(H)$-multiplier. Each $T \in \mathcal{O}_c(\ell^2(\mathbb{N})) \otimes \mathcal{O}_c(H)$ may be identified with a matrix $(T_{m,n})_{m,n \in \mathbb{N}}$, where $T_{m,n} \in \mathcal{O}_c(H)$ for all $m,n \in \mathbb{N}$. By Lemma \ref{inversecompact} there exist an index set $J$ and $\{S_i \}_{i \in J} \subset \mathcal{O}_c(H^{\infty})$, $\{R_i \}_{i \in J} \subset \mathcal{O}_c(H^{\infty})$ such that $\sum_{ i \in J} R_i R^{\ast}_{i \in J}$ and $\sum_{i \in J}S_i^{\ast}S_i$ converge uniformly and \begin{equation*} S_{\varphi}(T)=\sum_{i \in J}R_i T S_i, T \in \mathcal{O}_c(H^{\infty}). \end{equation*} By Lemma \ref{matrix product} we have \begin{equation}\label{diagoanl form of multiplier} S_{\varphi}(T)= \sum_{i \in J} \mathcal{E}(R_i) T \mathcal{E}(S_i), \ T \in \mathcal{O}_c(\ell^2(\mathbb{N})) \otimes A. \end{equation} Now it is easy to verify that $\{\mathcal{E}(R_i)\}_{i \in J}$, $\{\mathcal{E}(S_i)\}_{i \in J}$ are collections of compact operators, $\sum_{i \in J} \mathcal{E}(R_i) \mathcal{E}(R_i)^{\ast}$ and $\sum_{i \in J} \mathcal{E}(S_i)^{\ast} \mathcal{E}(S_i)$ converge in norm. By \cite{MR1138841}, let $v=\sum_{i \in J}\mathcal{E}(R_i) \otimes \mathcal{E}(S_i) \in c_0(\mathbb{N}, \mathcal{O}_c(H)) \otimes_h c_0(\mathbb{N}, \mathcal{O}_c(H))$, we have $\|S_{\varphi}\|_{cb}$=$\|v\|_h$. Conversely, for any $v \in c_0(\mathbb{N},\mathcal{O}_c(H)) \otimes_h c_0(\mathbb{N},\mathcal{O}_c(H))$, there are $\{R'_k\}_{k \in \mathbb{N}},\{S'_k\}_{k \in \mathbb{N}}$ in $\mathcal{O}_c(H)$ such that $v=\sum_{k=1}^{\infty}R'_k \otimes S'_k$ and $\text{\Large{$\|$}}\sum_{k=1}^{\infty} R'_k {R'_k}^{\ast}\text{\Large{$\|$}} < + \infty$, $\text{\Large{$\|$}}\sum_{k=1}^{\infty}{S'_k}^{\ast}S'_k\text{\Large{$\|$}}< + \infty$, then \begin{equation*} (\phi_v)|(\mathcal{O}_c(H^{\infty})) , T \mapsto \sum_{k=1}^{\infty} R'_k T S'_k \end{equation*} is a completely compact map on $\mathcal{O}_c(H^{\infty})$, and it is easy to see that there is Schur $\mathcal{O}_c(H)$-multiplier $\varphi$ such that $S_{\varphi}=\phi_v$. Therefore the map \begin{equation*} C_0(\mathbb{N},\mathcal{O}_c(H)) \otimes_h C_0(\mathbb{N},\mathcal{O}_c(H)) \to \mathfrak{CCS}(\mathcal{O}_c(H(H))), v \mapsto (\phi_v)|\mathcal{O}_c(H^{\infty}) \end{equation*} is linear isometry with range $\mathfrak{CCS}(\mathcal{O}_c(H))$. Our proof is complete. \end{proof} \begin{theorem}\label{final1} If $A$ is $C^{\ast}$-algebra, then the following two conditions are equivalent: (I)$A$ is elementary; (II) $\mathfrak{CCS}(A)=c_0(\mathbb{N}, A) \otimes_h c_0(\mathbb{N}, A)$. If these conditions hold, Schur $A$-multiplier $\varphi$ is completely compact if and only if there are index set $J$, $\{a^i_k\}_{i \in J, k \in \mathbb{N}}$ and $\{b^i_k\}_{i \in J, k \in \mathbb{N}}$ $\subset A$ such that : (i) \, $\sum _i a^i_k (a^i_k)^{\ast}$ and $\sum_i (b^i_k)^{\ast} b^i_k $ are convergent in the norm of $A$ for each $k \in \mathbb{N}$, and \begin{equation*} \sum_{i \in J} a_k^i(a_k^i)^{\ast}, \sum_{i \in J}(b_k^i)^{\ast}b_k^i \to 0 \ \ {\rm{if}} \ \ k \to \infty \end{equation*} (ii) for any $m,n \in \mathbb{N}$, \begin{equation}\label{equation of multiplier} \varphi(m,n)( x)= \sum_i a^i_n \, x \, b^i_m \ \ \ \ ( x \in A). \end{equation} \end{theorem} \begin{proof} The implication from $(I)$ to $(II)$ was proved in the previous proposition. $(II)$ implies $(I)$: This is the combination of Remark \ref{usefulkey} and Proposition \ref{new}. Therefore if $(I)$ or $(II)$ holds, we may identify $A=\mathcal{O}_c(H)$ for some Hilbert space $H$. Let $\varphi: \mathbb{N} \times \mathbb{N} \to CB(A)$ be a given Schur $A$-multiplier. We prove that $\varphi$ is completely compact if and only if $(i)$ and $(ii)$ hold. If $(i)$ and $(ii)$ hold, then the second part of $(i)$ implies that $\|a_k^i \|^2=\| a_k^i (a_k^i)^{\ast}\| \to 0$ (resp. $\|a_k^i \|^2=\|(b^i_k)^{\ast} b^i_k\| \to 0$) for each fixed $i$ if $k \to \infty$.Thus we may define $R_i$ (resp. $S_i$) $\in c_0(\mathbb{N},\mathcal{O}_c(H))$ by $(R_i)_{k,k}=a^i_k$ (resp. $(S_i)_{k,k}=b^i_k$). Now $(i)$ implies that $\sum_{i \in J} R_i R_i^{\ast}$ and $\sum_{i \in J} S_i^{\ast}S_i$ are convergent in $c_0(\mathbb{N},\mathcal{O}_c(H))$, then we conclude that $\sum_i R_i \otimes_h S_i$ is in $c_0(\mathbb{N},\mathcal{O}_c(H)) \otimes_h c_0(\mathbb{N},\mathcal{O}_c(H))$, and $(ii)$ implies that \begin{equation*} S_{\varphi}(T)= \sum_i R_i \, T \, S_i \ \ \ \ (T \in \mathcal{O}_c(\ell^2) \otimes \mathcal{O}_c(H)), \end{equation*} so by Theorem \ref{maintheorem} $S_{\varphi}$ is completely compact map. Now suppose that $\varphi$ is completely compact Schur $\mathcal{O}_c(H)$-multiplier, then there is $v=\sum_{k \in \mathbb{N}} R_k \otimes_h S_k$ in $c_0(\mathbb{N}, \mathcal{O}_c(H)) \otimes c_0(\mathbb{N}, \mathcal{O}_c(H))$ such that $S_{\varphi}=(\phi_v)|(\mathcal{O}_c(H^{\infty}))$. Furthermore, $\sum_{i \in \mathbb{N}}R_i R_i^{\ast}$ and $\sum_{i \in \mathbb{N}}S_i^{\ast}S_i$ are convergent in the norm of $\mathcal{O}(H^{\infty})$, we conclude that $\sum_{i \in \mathbb{N}}R_i R_i^{\ast}$, $\sum_{i \in \mathbb{N}}S_i^{\ast}S_i \in C_0(\mathbb{N}, \mathcal{O}_c(H))$. We define $a^i_k=(R_i)_{k,k}$ and $b^i_k=(S_i)_{k,k}$ for each $i, \, k \in \mathbb{N}$, then it is easy to verify that $\{a_k^i\}_{i,k \in \mathbb{N}}$ and $\{b_k^i\}_{i,k \in \mathbb{N}}$ satisfy (1) and (2). \end{proof} \section{Applications to compact-type $C^{\ast}$-algebra} In this section, we will use the results of last section to study the compactness of Schur $A$-multiplier where $A$ is compact-type $C^{\ast}$-algebra. We identify $A=\sum_{i \in I}^{0 \oplus} \mathcal{O}_c(H_i)$ for some collection $\{H_i\}_{i \in I}$ of Hilbert spaces, take $H=\sum_{i \in I}^{\oplus} H_i$ and let $f: \mathcal{O}_c(H) \to A$ be the canonical projection, thus $f$ is completely positive. We shall say that the pair $(H,f)$ is $associated$ to $A$. \begin{lemma}\label{extension} Let $A$ be compact-type $C^{\ast}$-algebra, $(H,f)$ its associated pair. If $\varphi: \mathbb{N} \times \mathbb{N} \to CB(A)$ is Schur $A$-multiplier, then there is a Schur $\mathcal{O}_c(H)$-multiplier $\psi: \mathbb{N} \times \mathbb{N} \to CB(\mathcal{O}_c(H))$ such that $S_{\psi}|(\mathcal{O}_c(\ell^2(\mathbb{N})) \otimes A)=S_{\varphi}$. Moreover, $S_{\varphi}$ is (completely) compact if and only if $S_{\psi}$ may be chosen to be (completely) compact map. \end{lemma} \begin{proof} Since $f$ is completely bounded, by \cite[Theorem 2.6]{MTT16} $\rho: \mathbb{N} \times \mathbb{N} \to CB(\mathcal{O}_c(H), A)$ defined by \begin{equation*} \rho(n,m)=f \end{equation*} is Schur $zmathcal{O}_c(H)$-multiplier. For each $m,n$ let us define $\psi(n,m): \mathcal{O}_c(H) \to \mathcal{O}_c(H)$ by \begin{equation*} \psi(n,m)(a)= (\varphi(n,m) \circ \rho(n,m)) \ (a) \ \ \ \ (a \in \mathcal{O}_c(H)). \end{equation*} Then $S_{\psi}(T)=S_{\varphi} \circ S_{\rho}(T)$ for all $T \in \mathcal{O}_c(\ell^2)\otimes \mathcal{O}_c(H)$, $S_{\psi}: \mathcal{O}_c(\ell^2)\otimes \mathcal{O}_c(H) \to \mathcal{O}_c(\ell^2)\otimes \mathcal{O}_c(H)$ is Schur $\mathcal{O}_c(H)$-multiplier whose range is contained in $\mathcal{O}_c(\ell^2) \otimes A$ and $S_{\psi}(T)=S_{\varphi}(T)$ for all $T \in \mathcal{O}_c(\ell^2)\otimes A$. Now since $S_{\rho}$ is completely bounded, we conclude that if $S_{\varphi}$ is (completely) compact then $S_{\psi}$ is (completely) compact. Conversely, if $S_{\psi}$ is (completely) compact, since $S_{\psi}(T)=S_{\varphi}(T)$ for all $T \in \mathcal{O}_c(\ell^2) \otimes A$ and that there is completely positive map $id \otimes f : \mathcal{O}_c(\ell^2) \otimes \mathcal{O}_c(H) \to \mathcal{O}_c(\ell^2) \otimes A$ which is identity map on $\mathcal{O}_c(\ell^2) \otimes A$, we conclude that $S_{\varphi}$ is (completely) compact. \end{proof} By Lemma \ref{extension} and Theorem \ref{final1} together we have: \begin{theorem}\label{compacttypecase} If $A$ is compact type $C^{\ast}$-algebra, then there is Hilbert space $H$ such that the following are equivalent: \\ (i) $\varphi$ is in $\mathfrak{C.C.S}$(A); \\ (ii) there are index set $J$ and $\{a^i_k\}_{i \in J, k \in \mathbb{N}}$ and $\{b^i_k\}_{i \in J, k \in \mathbb{N}}$ $\subset \mathcal{O}_c(H)$ such that: \ \ \ \ \ (1) \, $\sum _i a^i_k (a^i_k)^{\ast}$ and $\sum_i (b^i_k)^{\ast} b^i_k $ are convergent in the norm of $\mathcal{O}(H)$ for each $k \in \mathbb{N}$ and $\sum_{i \in J} a_k^i(a_k^i)^{\ast}, \sum_{i \in J}(b_k^i)^{\ast}b_k^i \to 0 $ as $k \to \infty$; \ \ \ \ \ (2) for any $m,n \in \mathbb{N}$, \begin{equation*} \varphi(m,n) ( x) =\sum_i a^i_n \, x \, b^i_m, x \in A. \end{equation*} \end{theorem} \begin{remark} If we compare the previous theorem with Theorem \ref{final1}, we noticed that in condition (ii) $\{a^i_k\}_{i \in J, k \in \mathbb{N}}$ and $\{b^i_k\}_{i \in J, k \in \mathbb{N}}$ can be chosen from $A$ if and only if $A$ is $\ast$-isomorphic to $\mathcal{O}_c(K)$ for some Hilbert space $K$. \end{remark} \section{Compactness and complete compactness} In this section, we prove some results by aid of which we could identify compactness and complete compactness in some cases. We fix $\Omega$ to be a compact Hausdorff space, $C(\Omega)$ to be the space of all continuous complex-valued functions on $\Omega$. The proofs of the following two lemmas are standard and we ommit them: \begin{lemma}\label{power} Let $\mathcal{X}$ and $\mathcal{Y}$ be operator spaces, $\Psi \in \mathcal{O}_c(\mathcal{X}, \mathcal{Y})$ . If $(\Phi_{\alpha})_{\alpha \in \mathbb{A}} \subset B(\mathcal{Y})$ is a net with $sup_{\alpha \in \mathbb{A}} \text{\Large{$\|$}} \Phi_{\alpha}\text{\Large{$\|$}} < \infty$ and $\Phi \in B(\mathcal{Y})$ such that $\Phi_{\alpha}(a) \to \Phi(x)$ for all $x \in \mathcal{X}$. Then $\text{\Large{$\|$}}\Phi_{\alpha} \circ \Psi-\Phi \circ \Psi\text{\Large{$\|$}} \to _{\alpha \in \mathbb{A}} 0$. \end{lemma} \begin{lemma}\label{1842401} For any $(f_{i,j})_{i,j=1}^{\infty} \in \mathcal{O}(\ell^2) \otimes_{min} C(\Omega)$, we have \begin{equation*} \|(f_{i,j})_{i,j=1}^{\infty}\|=sup\{\|(f_{i,j}(\omega))_{i,j=1}^{\infty}\|: \omega \in \Omega\}, \end{equation*} where the norm of the right hand-side is taken from $\mathcal{O}(\ell^2)$. In particular, $\|(f_{i,j}(\omega))_{i,j=1}^{\infty}\| \leq\|(f_{i,j})_{i,j=1}^{\infty}\|$ . \end{lemma} \begin{lemma}\label{18051601} If $A$ is a $C^{\ast}$-algebra which is $\ast$-isomorphic to a subalgebra of $M_n \otimes C(\Omega)$ for some $n \in \mathbb{N}$ and compact space $\Omega$, then compact linear map from $A$ into $A$ is completely compact. \end{lemma} \begin{proof} Let $\varphi: A \to A$ be a compact linear map. We identify $A$ as a $C^{\ast}$-subalgebra of $ M_n \otimes C(\Omega)$. Since $M_n \otimes C(\Omega)$ is nuclear, let $\{\psi_m: M_n \otimes C(\Omega) \to M_n \otimes C(\Omega)\}_{m \in I}$ be a net of finite-rank completely positive maps such that $\psi_m(a) \to a$ for all $a \in M_n \otimes C(\Omega)$ provided $m \to \infty$. Then we have $\psi_{m} \circ \varphi(a) \to \varphi(a)$ for all $a \in A$. Since $\{\|\psi_m \circ \varphi\|\}$ is bounded, by Lemma \ref{power} we have $\|\psi_m \circ \varphi -\varphi\| \to 0$. Therefore $\{\psi_m \circ \varphi\}$ is Cauchy net in $\mathcal{O}(A, M_n \otimes C(\Omega))$. But $\mathcal{CB}(A, M_n \otimes C(\Omega))=\mathcal{O}(A,M_n \otimes C(\Omega))$, by Open Mapping Theorem we conclude that $\{\psi \circ \varphi\}$ is Cauchy net in $\mathcal{CB}(A,M_n \otimes C(\Omega))$ as well, it is easy to verify that $\psi_m \circ \varphi \to \varphi$ completely. Since each $\psi_m \circ \varphi$ is of finite rank, $\varphi$ is completely compact. \end{proof} \begin{proposition} Let $V$ be an operator space, and $n \in \mathbb{N}$ be fixed number. Let $\varphi_{i,j}: V \to C(\Omega)$ be completely bounded maps ($i,j=1, \ldots ,n$), we define $S_{\varphi}: M_n(V) \to M_n(C(\Omega))$ by \begin{equation*} S_{\varphi}((v_{i,j})_{i,j=1}^n)=(\varphi_{i,j}(v_{i,j}))_{i,j=1}^n. \end{equation*} Then $\|S_{\varphi}\|_{cb}=\|S_{\varphi}\|$. \end{proposition} \begin{proof} Let $\mathcal{D}_n$ be the subalgebra of all the diagonal matrices in $M_n$. We will use the idea of the proof of \cite[proposition 8.6]{Paulsen} . For any $C \in \mathcal{D}_n$, we have \begin{equation*}\begin{split} & S_{\varphi}(C(v_{i,j})_{i,j=1}^n)=C(\varphi_{i,j}(v_{i,j}))_{i,j=1}^n, \\& S_{\varphi}((v_{i,j})_{i,j=1}^n C)=(\varphi_{i,j}(v_{i,j}))_{i,j=1}^n C \end{split}\end{equation*} where we define the multiplication between $C$ and elements of $M_n(V)$ or $M_n({C(\Omega)})$ by the multiplication of matrices. Since $S_{\varphi}$ is $\mathcal{D}_n$-bimodule map, by the similar argument of \cite[Proposition 8.6]{Paulsen} we can prove that $S_{\varphi}$ is completely bounded. \end{proof} Now we may get the following proposition immediately: \begin{proposition}\label{18050701} Let $V$ be an operator space, $A$ a $C^{\ast}$-algebra which is $\ast$-isomorphic to a subalgebra of $M_n \otimes C(\Omega)$ for some $n \in \mathbb{N}$. Let $\varphi_{i,j}: V \to A$ be a bounded linear map (so it is completely bounded automatically) for each $i,j \in \mathbb{N}$, if $S_{\varphi}: \mathcal{O}_c(\ell^2) \otimes V \to \mathcal{O}_c(\ell^2) \otimes A$ is a bounded linear map which satisfies \begin{equation*} S_{\varphi}((v_{i,j})_{i,j=1}^n)=(\varphi_{i,j}(v_{i,j}))_{i,j=1}^n, \end{equation*} then $S_{\varphi}$ is completely bounded and $\|S_{\varphi}\|_{cb} = \|S_{\varphi}\|$. \end{proposition} Combine Lemma \ref{18051601} and Proposition \ref{18050701} we get: \begin{proposition}\label{18051602} If $A$ is a $C^{\ast}$-algebra which is $\ast$-isomorphic to a subalgebra of $M_n \otimes C(\Omega)$ for some $n \in \mathbb{N}$ and compact space $\Omega$, then the compact Schur $A$-multipliers are completely compact. \end{proposition} By modifying the proof of \cite{completelycompact}, it is not hard to prove: \begin{lemma}\label{counterexample} Let $H$ be a Hilbert space with infinite dimension, $I$ and index set which has the same cardinal number of $H$ when we regard $H$ as a set. Let us select an arbitrary pairwise orthogonal family $\{H_i\}_{i \in I}$ of finite-dimensional subspaces of $H$ such that $\sum_{i \in I} H_i=H$ and that there is a subset $\{i_k\}_{k \in \mathbb{N}} \subset I$ such that $dim(H_k)>k$, then there exists a linear map $\varphi: \mathcal{O}_c(H) \to \mathcal{O}_c(H)$ such that $\varphi(\mathcal{O}(H_k)) \subset \mathcal{O}(H_k)$ and that $\varphi$ is completely bounded, compact, but not completely compact. \end{lemma} The lemma is similar to the previous: \begin{lemma}\label{counterexample2} Let $H$ be a Hilbert space with infinite dimension, $I$ and index set which has the same cardinal number of $H$ when we regard $H$ as a set. Let us select an arbitrary pairwise orthogonal family $\{H_i\}_{i \in I}$ of finite-dimensional subspaces of $H$ such that $\sum_{i \in I} H_i=H$ and that there is a subset $\{i_k\}_{k \in \mathbb{N}} \subset I$ such that $dim(H_k)>k$, then there exists a linear map $\theta: \sum_{i \in I}^{\oplus 0} \mathcal{O}(H_i) \to \sum_{i \in I}^{\oplus0} \mathcal{O}(H_i)$ such that $\theta(\mathcal{O}(H_i)) \subset \mathcal{O}(H_i)$ and that $\theta$ is completely bounded, compact, but not completely compact. \end{lemma} \begin{proposition}\label{1} Let $A$ be a compact-type $C^{\ast}$-algebra such that all compact completely bounded linear map $\varphi: A \to A$ is completely compact, then $A$ is $\ast$-isomorphic to $\sum_{k \in I} ^{\oplus0} M_{n_k}$, $n_k \leq N$ for some $N \in \mathbb{N}$, where $I$ is an index set. \end{proposition} \begin{proof} We assume A= $\sum_{k \in I} ^{\oplus0} \mathcal{O}_c(H_k)$, $H=\sum^{\oplus}_{k \in I}H_k$. So there are three cases: $(1)$ $H_k$ is of infinite dimension for some $k$; $(2)$ Each $H_k$ is of finite-dimension but $\{{\rm{dim}}(H_k)\}_{k \in I}$ is unbounded, so it is convenient to assume that $ A=\sum_{k \in I} ^{\oplus0} M_{n_k} $, and there is a subset $ \{n_{k_i}\}$ of $\{n_k: k \in I\}$ such that $i \leqq n_{k_i}$ for all $i \in \mathbb{N}$; $(3)$ A= $\sum_{k \in I}^{\oplus0} M_{n_k}$, $n_k \leq N$ for some $N \in \mathbb{N}$. We need to prove that (1) and (2) are not true. (1) is failed: Suppose $H_k$ is of infinite-dimension. By Lemma \ref{counterexample}, there is a map $\varphi: \mathcal{O}_c(H_k) \to \mathcal{O}_c(H_k)$ which is completely bounded, compact but not completely compact. Let $g: \sum_{i \in I} ^{\oplus0} \mathcal{O}_c(H_i) \to \mathcal{O}_c(H_k)$ be the canonical extension of $\varphi$, that is, $g|\mathcal{O}_c(H_k)$=$\varphi$ and $g| \sum_{i \neq k}^{\oplus0} \mathcal{O}_c(H_i)=0$, then $g$ is compact, completely bounded, but by Lemma \ref{negativecriteria} it is easy to see that $g$ is not completely compact. But $g$ can be regarded as a compact, completely bounded but not completely compact linear map from $\sum_{i \in \mathbb{N}}^{\oplus0}\mathcal{O}_c(H_i)$ into $\sum_{i \in \mathbb{N}}^{\oplus0}\mathcal{O}_c(H_i)$, so (1) is failed. (2) is failed: In this case, let $B= \sum_{i \in \mathbb{N}}^{\oplus0} M_i$. By Lemma \ref{counterexample}, there is a completely bounded linear map $\varphi: B \to B$ satisfing $\varphi(M_i) \subset M_i$ which is compact but not completely compact. But $B=\sum_{i \in \mathbb{N}}^{\oplus0} M_i$ is a norm-closed ${\ast}$-subalgebra of $A=\sum_{k \in I} ^{\oplus0} M_{n_{k}}$ (since $i \leq n_{k_i}$, $M_i \subset M_{n_{k_i}}$), and there is conditional expectation $E$ from $A$ to $B$. So by Lemma \ref{negativecriteria} it is easy to see that $\varphi \circ E: \sum_{k \in I} ^{\oplus0} M_{n_k} \to B$ is compact and completely bounded, but it is not completely compact. Furthermore, $\varphi \circ E$ can be regarded as a linear map from $A$ into $A$ which is compact, completely bounded but not completely compact, so (2) is failed. \end{proof} \begin{proposition}\label{identification of c and cc} Let A= $\sum_{k \in I} ^{\oplus0} M_{n_k}$ such that $n_k \leq N$ for some $N \in \mathbb{N}$, then for any $C^{\ast}$-algebra $B$, the linear map $\varphi: B \to A$ is compact if and only if $\varphi$ is completely compact. \end{proposition} \begin{proof} Since $A$ is $\ast$-isomorphic to a $C^{\ast}$-subalgebra of $M_N \otimes C(I \cup \{\infty\})$, and $I\cup \{\infty\}$ is compact space, then the statement is an easy consequence of Proposition \ref{18051602}. \end{proof} Now we could summarize a theorem as following: \begin{theorem}\label{18051001} If $A$ is a compact-type $C^{\ast}$-algebra, then the following is equivalent: (i)Any compact completely bounded bounded linear map $\varphi: A \to A$ is completely compact; (ii) A= $\sum_{k \in I} ^{\oplus0} M_{n_k}$, $n_k \leq N$ for some $N \in \mathbb{N}$. If these conditions hold, then for any $C^{\ast}$-algebra $B$, if linear map $\varphi: B \to \sum_{k \in I} ^{\oplus0} M_{n_k}$ is compact, then it is completely compact. \end{theorem} Now let us go back to study Schur $A$-multiplier defined on $\mathbb{N} \times \mathbb{N}$. \begin{theorem}\label{akey} If $A \subset \mathcal{O}_c(H)$ is $C^{\ast}$-algebra, then $\mathfrak{CS}(A)=\mathfrak{CCS}(A)$ if and only if $A=\sum_{k \in I} ^{\oplus0} M_{n_k}$ such that $n_k \leq N$ for some $N \in \mathbb{N}$ . \end{theorem} \begin{proof} This is the combination of Proposition \ref{18050701} and Theorem \ref{18051001} because for $A \subset \mathcal{O}_c(H)$, $A=\sum_{k \in I} ^{\oplus0} M_{n_k} $ implies that $A$ is $\ast$-isomorphic to a subalgebra of $M_n \otimes C(\Omega)$ for some $\Omega$ and $n$. \end{proof} \begin{corollary} If $A$ is a finite dimension $C^{\ast}$-algebra, then $\mathfrak{CS}(A)=\mathfrak{CCS}(A)$. \end{corollary} Combine Theorem \ref{akey} and Proposition \ref{maintheorem}, we get \cite[Proposition 5]{MR1766604}: \begin{corollary} $\mathfrak{CS}(\mathbb{C})=c_0(\mathbb{N}, \mathbb{C}) \otimes_h c_0(\mathbb{N}, \mathbb{C})$. \end{corollary} \begin{theorem}\label{mainthe} If $A$ is a $C^{\ast}$-algebra, the following two conditions are equivalent: (i) A is $\ast$-isomorphic to $\mathcal{O}(H)$ for some finite dimensional Hilbert space; (ii) $\mathfrak{CS}(A)=c_0(\mathbb{N}, A) \otimes_h c_0(\mathbb{N}, A)$. \end{theorem} \begin{proof} The implication from (i) to (ii) is the combination of Theorem \ref{final1} and Theorem \ref{akey}. (ii) implies (i): By Remark \ref{usefulkey}, for any $u \in A$, the map from $A$ into $A$ defined by $x \mapsto uxu$ is compact, then by Ylinen ~\cite{MR0296716} $A$ is compact-type, thus $A$ has the following form: \begin{equation*} A=\sum_{i \in I} ^{\oplus0} \mathcal{O}_c(H_i), \end{equation*} where each $H_i$ is Hilbert space, and by Theorem \ref{compacttypecase}, for any $v \in C_0(\mathbb{N}, A) \otimes_h C_0(\mathbb{N}, A)$, $\phi_v$ is completely compact Schur $A$-multiplier, so condition (ii) implies that \begin{equation*} \mathfrak{CS}(A) \subset \mathfrak{CCS}(A), \end{equation*} and of course this implies that \begin{equation*} c_0(\mathbb{N}, A) \otimes_h c_0(\mathbb{N}, A)=\mathfrak{CS}(A) = \mathfrak{CCS}(A). \end{equation*} Therefore, by Theorem \ref{akey} $A=\sum_{k \in I} ^{\oplus0} M_{n_k}$, $n_k \leq N$ for some $N \in \mathbb{N}$. On the other hand, by Theorem \ref{final1}, $A$ is elementary $C^{\ast}$-algebra, hence there is at most one $n_k$ is non-zero, (i) is proved. \end{proof} \end{document}
\begin{equation}gin{document} \title{A Leray regularized ensemble-proper orthogonal decomposition method for parameterized convection-dominated flows} \author{Max Gunzburger, Traian Iliescu, and Michael Schneier} \maketitle \begin{equation}gin{abstract} Partial differential equations (PDEs) are often dependent on input quantities which are inherently uncertain. To quantify this uncertainty, these PDEs must be solved over a large ensemble of parameters. Even for a single realization this can a computationally intensive process. In the case of flows governed by the Navier-Stokes equations, an efficient method has been devised for computing an ensemble of solutions. To further reduce the computational cost of this method, an ensemble proper orthogonal decomposition (POD) method was recently proposed. The main contribution of this work is the introduction of POD spatial filtering for ensemble-POD methods. The POD spatial filter makes possible the construction of the Leray ensemble-POD model, which is a regularized reduced order model for the numerical simulation of convection-dominated flows. The Leray ensemble-POD model employs the POD spatial filter to smooth (regularize) the convection term in the Navier-Stokes equations and greatly diminishes the numerical inaccuracies produced by the ensemble-POD method in the numerical simulation of convection-dominated flows. Specifically, for the numerical simulation of a convection-dominated two-dimensional flow between two offset cylinders, we show that the Leray ensemble-POD method yields accurate results, whereas the ensemble-POD is highly inaccurate. The second contribution of this work is a new numerical discretization of the variable viscosity ensemble algorithm in which the average viscosity is replaced with the maximum viscosity. It is shown that this new numerical discretization is significantly more stable than those in current use. Furthermore, error estimates for the novel Leray ensemble-POD algorithm with this new numerical discretization are also proven. \end{abstract} \begin{equation}gin{keywords} Navier-Stokes equations, ensemble computation, proper orthogonal decomposition, Leray regularization, POD differential filter \end{keywords} \section{Introduction} The mathematical models used in realistic applications often times rely on input quantities which are subject to a degree of uncertainty. Some of these quantities include the initial conditions, forcing functions, model coefficients, and the boundary conditions. In order to develop robust models the impact of this uncertainty must be quantified. Common approaches for recovering accurate solutions of these models are the Monte Carlo and stochastic collocation methods \cite{gunzburger_webster_zhang_2014}. These algorithms all require the underlying model to be solved over an ensemble of parameters. Depending upon the problem the spatial resolution required for accurate realizations of the model can render these approaches computationally intractable. In particular, realizations for flow models such as the incompressible Navier-Stokes equations (NSE) can take on the order of weeks. In this work, we are interested in computing ensembles of solutions for the NSE with uncertainty present in the initial conditions, viscosities, and body forces. Specifically, for $j=1,\ldots,J$, we have \begin{equation}gin{equation}\label{eq:NSE} \left\{\begin{equation}gin{aligned} u_{t}^j+u^{j}\cdot\nabla u^{j}-\nu_j\Delta u^{j}+\nabla p^{j} & =f^{j}(x,t)&\quad\forall x\in\Omega\times(0,T]\\ \nabla\cdot u^{j} & =0&\quad\forall x\in\Omega\times(0,T]\\ u^{j} & =0&\quad\forall x\in\partial\Omega\times(0,T]\\ u^{j}(x,0) & =u^{j,0}(x)&\quad\forall x\in\Omega, \end{aligned}\right. \end{equation} where $\Omega \subset \mathbb{R}^{d}$, $d= 2,3$, is an open regular domain. Historically, the solution of the NSE for each parameter has been treated as a separate problem. Recently new algorithms have been developed \cite{J15, J17, JL14,LW17,AMJ16,MohebujjamanR17} that allow for simultaneous calculations at each time step. Specifically the focus of these algorithms has been to use the same linear system for each right hand side. Taking advantage of this problem structure, efficient block solvers, such as block CG \cite{FOP95}, block QMR \cite{FM97}, and block GMRES \cite{GS96} can then be utilized. To further improve the efficiency of these ensemble algorithms, reduced order models (ROMs)~\cite{hesthaven2015certified,quarteroni2015reduced} were recently utilized~\cite{GJS17,GJS16}. Specifically, the proper orthogonal decomposition (POD) method was used to extract the dominant (most energetic) modes from a high-resolution numerical simulation, and the NSE were projected onto these POD modes to obtain an ensemble-POD model. In~\cite{GJS17,GJS16}, it was shown that the ensemble-POD model significantly decreased the computational cost of the standard ensemble methods, without compromising their numerical accuracy. We note, however, that the numerical investigation of the ensemble-POD model in~\cite{GJS17,GJS16} was restricted to low Reynolds numbers. It is well known that, for convection-dominated flows, standard ROMs generally yield inaccurate results, usually in the form of spurious numerical oscillations (see, e.g.,~\cite{giere2015supg,xie2017approximate,xie2017data}). To mitigate these ROM inaccuracies, several numerical stabilization techniques have been proposed over the years (see, e.g.,~\cite{balajewicz2013low,balajewicz2016minimal,benosman2016robust,carlberg2013gnat,kalashnikova2010stability,osth2014need,wang20162d,2017arXiv170900243C,2017arXiv171003569F, weller2009numerical,weller2009robust}). {\ Regularized ROMs (Reg-ROMs)} are recently proposed stabilized ROMs for the numerical simulation of convection-dominated flows, both deterministic~\cite{sabetghadam2012alpha,wells2017evolve,XIE201812} and stochastic~\cite{iliescu2017regularized}. These Reg-ROMs use {explicit ROM spatial filtering} to regularize (smooth) various ROM terms and thus increase the numerical stability of the resulting ROM. This idea was first used by the great Jean Leray~\cite{leray1934sur} in the mathematical study of the NSE and later on in, e.g.,~\cite{geurts2003regularization,layton2012approximate} to develop regularized models for the numerical simulation of turbulent flows~\cite{geurts2003regularization,layton2012approximate}. In the ROM arena, Reg-ROMs were also successfully used in the numerical simulation of convection-dominated flows. For example, the Reg-ROMs used in the numerical simulation of a 3D flow past a circular cylinder at a Reynolds number $Re=1000$ produced accurate results in which the spurious numerical oscillations of standard ROMs were significantly decreased~\cite{wells2017evolve}. In this paper, we put forth ROM spatial filtering and Reg-ROMs as a means to mitigate the numerical inaccuracies that are generally produced by the ensemble-POD method when this is applied to convection-dominated flows. Specifically, we propose and investigate the Leray ensemble-POD method, which replaces the convective field in the nonlinearity of the standard ensemble-POD method with its spatially filtered version. For the spatial filter in the Leray ensemble-POD method, we use the POD differential filter~\cite{wells2017evolve,xie2017approximate}. In Section~\ref{POD_sec}, we also propose a new numerical discretization of the variable viscosity ensemble algorithm in which the average viscosity is replaced with the maximum viscosity. We show that this new numerical discretization is significantly more stable than those in current use. Furthermore, we prove error estimates for the new Leray ensemble-POD algorithm with this new numerical discretization. Finally, in Section~\ref{numex}, we test the new Leray ensemble-POD method in the numerical simulation of the two-dimensional flow between offset circles used in \cite{GJS16,GJS17}. To this end, we compare the new Leray ensemble-POD method with the standard ensemble-POD method and a fine resolution numerical simulation, which is used as a benchmark. \section{Notation and preliminaries} We denote by $\|\cdot\|$ and $(\cdot,\cdot)$ the $L^{2}(\Omega)$ norm and inner product, respectively, and by $\|\cdot\|_{L^{p}}$ and $\|\cdot\|_{W_{p}^{k}}$ the $L^{p}(\Omega)$ and Sobolev $W^{k}_{p}(\Omega)$ norms, respectively. $H^{k}(\Omega)=W_{2}^{k}(\Omega)$ with norm $\|\cdot\|_{k}$. For a function $v(x,t)$ that is well defined on $\Omega \times [0,T]$, we define the norms $$ |||v|||_{2,s} : = \Big(\int_{0}^{T}\|v(\cdot,t)\|_{s}^{2}dt\Big)^{\frac{1}{2}} \qquad \text{and} \qquad \||v|||_{\infty,s} := \text{ess\,sup}_{[0,T]}\|v(\cdot,t)\|_{s} . $$ The space $H^{-1}(\Omega)$ denotes the dual space of bounded linear functionals defined on $H^{1}_{0}(\Omega)=\{v\in H^{1}(\Omega)\,:\,v=0 \mbox{ on } \partial\Omega\}$; this space is equipped with the norm $$ \|f\|_{-1}=\sup_{0\neq v\in X}\frac{(f,v)}{\| \nabla v\| } \quad\forall f\in H^{-1}(\Omega). $$ The solutions spaces $X$ for the velocity and $Q$ for the pressure are respectively defined as $$ \begin{equation}gin{aligned} X : =& [H^{1}_{0}(\Omega)]^{d} = \{ v \in [L^{2}(\Omega)]^{d} \,:\, \nabla v \in [L^{2}(\Omega)]^{d \times d} \ \text{and} \ v = 0 \ \text{on} \ \partial \Omega \} \\ Q : =& L^{2}_{0}(\Omega) = \Big\{ q \in L^{2}(\Omega) \,:\, \int_{\Omega} q dx = 0 \Big\}. \end{aligned} $$ A weak formulation of (\ref{eq:NSE}) is given as follows: for $j=1, \ldots, J$, find $u^j:(0,T]\rightarrow X$ and $p^j:(0,T]\rightarrow Q$ such that, for almost all $t\in(0,T]$, satisfy \begin{equation}gin{equation}\label{wfwf} \left\{\begin{equation}gin{aligned} (u_{t}^j,v)+(u^{j}\cdot\nabla u^{j},v)+\nu_j(\nabla u^{j},\nabla v)-(p^{j} ,\nabla\cdot v) & =(f^{j},v)&\quad\forall v\in X\\ (\nabla\cdot u^{j},q) & =0&\quad\forall q\in Q\\ u^{j}(x,0)&=u^{j,0}(x).& \end{aligned}\right. \end{equation} The subspace of $X$ consisting of weakly divergence-free functions is defined as $$ V :=\{v\in X \,:\,(\nabla\cdot v,q)=0\,\,\forall q\in Q\} \subset X. $$ We denote conforming velocity and pressure finite element spaces based on a regular triangulation of $\Omega$ having maximum triangle diameter $h$ by $ X_{h}\subset X$ {and} $ Q_{h}\subset Q. $ We assume that the pair of spaces $(X_h,Q_h)$ satisfy the discrete inf-sup (or $LBB_h$) condition required for stability of finite element approximations; we also assume that the finite element spaces satisfy the approximation properties $$ \begin{equation}gin{aligned} \inf_{v_h\in X_h}\| v- v_h \|&\leq C h^{s+1}&\forall v\in [H^{s+1}(\Omega)]^d\\ \inf_{v_h\in X_h}\| \nabla ( v- v_h )\|&\leq C h^s&\forall v\in [H^{s+1}(\Omega)]^d\\ \inf_{q_h\in Q_h}\| q- q_h \|&\leq C h^s&\forall q\in H^{s}(\Omega), \end{aligned} $$ where $C$ is a positive constant that is independent of $h$. The Taylor-Hood element pairs ($P^s$-$P^{s-1}$), $s\geq 2$, are one common choice for which the $LBB_h$ stability condition and the approximation estimates hold \cite{GR79, Max89}. To ensure the uniqueness of the NSE solution and ensure that standard finite element error estimates, we make the following regularity assumptions on the data and true solution: { \begin{equation}gin{assumption}\label{assumption:reg} In \eqref{eq:NSE} we assume that $u^0 \in V$, $f^{j} \in L^{2}(0,T;L^{2}(\Omega))$, $u^{j} \in L^{\infty}(0,T;H^{s+1}(\Omega))\cap H^{1}(0,T;H^{s+1}(\Omega))\cap H^{2}(0,T;L^{2}(\Omega))$, and $p \in L^{\infty}(0,T; Q \cap H^k(\Omega))$. \end{assumption} } Using the regularity assumptions above and assuming a sufficiently small $\Delta t$, the following error estimate can be proven for the full discretization of~\eqref{wfwf} with Taylor-Hood elements and the Crank-Nicolson time-discretization~\cite{john2016divergence,layton2008numerical}: \begin{equation}gin{eqnarray} \| u(t^{N}) - u_{h}^{N} \|^2 + \nu \, \Delta t \, \sum_{n=1}^{M} \| \nabla (u(t^{n}) - u_{h}^{n}) \|^2 \leq C \, \left( h^{2m} + \Delta t^4 \right), \, \label{eqn:error-estimate-fe} \end{eqnarray} where $C$ is independent of $h$ and $\Delta t$. We define the trilinear form $$ b(w,u,v) = (w\cdot\nabla u,v) \qquad\forall u,v,w\in [H^1(\Omega)]^d $$ and the explicitly skew-symmetric trilinear form given by $$ b^{\ast}(w,u,v):=\frac{1}{2}(w\cdot\nabla u,v)-\frac{1}{2}(w\cdot\nabla v,u) \qquad\forall u,v,w\in [H^1(\Omega)]^d \, , $$ which satisfies the bounds \cite{Layton08} \begin{equation}gin{gather} b^{\ast}(w,u,v)\leq C_{b^*} \| \nabla w\| \| \nabla u\| (\| v \| \| \nabla v \| )^{1/2}\qquad\forall u, v, w \in X \label{In1}\\ b^{\ast}(w,u,v)\leq C_{b^*} (\| w \| \| \nabla w\| )^{1/2} \| \nabla u\| \| \nabla v \| \qquad\forall u, v, w \in X .\label{In2} \end{gather} We also define the discretely divergence-free space $V_h$ as $$ V_{h} :=\{v_{h}\in X_{h}\,:\,(\nabla\cdot v_{h},q_{h})=0\,\,\forall q_{h}\in Q_{h}\} \subset X. $$ In most cases, and for the Taylor-Hood element pair in particular, $V_{h} \not\subset V$, i.e., discretely divergence-free functions are not weakly divergence-free. \begin{equation}gin{definition}\label{def21} Let $t^{n}=n\Delta t$, $n=0,1,2,\ldots,N$, where $N:=T/\Delta t$, denote a partition of the interval $[0,T]$. For $j=1, \ldots, J$ and $n=0,1,2,\ldots,N$, let $u^{j,n}(x):=u^{j}(x,t^{n})$. Then, the \text{\bf ensemble mean} is defined, for $n=0,1,2,\ldots,N$, by $$ <u>^n : =\frac{1}{J}\sum_{j=1}^{J}u^{j,n}.\label{Enmean} $$ \end{definition} The full space and time model which we will base our method off of is similar to the one used in \cite{GJW17,2017arXiv170604060G}. For $j = 1, \ldots,J$, given $u^{j,0}_h \in X_h$ and $u^{j,1}_h\in X_h$, for $n=0,1,2,\ldots,N-1$ find $u^{j,n+1}_h\in X_h$ and $p_h^{j,n+1}\in Q_h$ satisfying \begin{equation}gin{equation*} \begin{equation}gin{aligned} &\Big(\frac{u^{j,n+1}_h - u^{j,n}_h}{\Delta t}, v_h \Big) + b^{\ast}(<u_h>^{n} , u^{j,n+1}_h ,v_h)+ b^{\ast}(u^{j,n}_h - <u_h>^{n} ,u^{j,n}_h, v_h)\\ & + \nu_{max} (\nabla u^{j,n+1}_h, \nabla v_h) + (\nu_j - \nu_{max}) (\nabla u^{j,n}_h, \nabla v_h) \\ &- (p^{j,n+1}_h , \nabla \cdot v_h) =( f^{j,n+1}, v_h) \quad \forall v_h\in X_h\\ &\qquad\quad (\nabla \cdot u_h^{j,n+1}, q_h )= 0 \qquad \forall q_h\in Q_h. \end{aligned} \end{equation*} The major difference between the two algorithms the use of maximum value of the viscosities $\nu_{max}$ rather than the average $<\nu>$ resulting in a superior stability condition. \section{Proper Orthogonal Decomposition Ensemble Based Models} \label{POD_sec} \subsection{Proper Orthogonal Decomposition} In this subsection we briefly describe the POD method and apply it to the previously stated ensemble algorithm. A more detailed description of this method can be found in \cite{KLV01}. Given a positive integer $N_S$, let $0=t_0<t_1< \cdots < t_{N_S} = T$ denote a uniform partition of the time interval $[0,T]$. For $j=1,\ldots,J_S$, we select $J_S$ different initial conditions $u^{j,0}(x)$, viscosities $\nu^j$, and forcing functions $f^j$ denoted by $u_{h,S}^{j,m}(x)\in X_h$, $j=1,\ldots,J_S$, $m=1,\ldots,N_S$, the finite element approximation to \eqref{eq:NSE} evaluated at $t=t_m$, $m=1,\ldots,N_S$. We then define the space spanned by the $J_S(N_S+1)$ discrete snapshots as \begin{equation}gin{equation*} X_{h,S}:=\text{span} \{ u_{h,S}^{j,m}(x) \}_{j=1,m=0}^{J_S,N_S} \subset V_h \subset X_h. \end{equation*} Denoting by $\vec{u}_S^{j,m}$ the vector of coefficients corresponding to the finite element function $u_{h,S}^{j,m}(x)$, where $K=\dim X_h$, we define the $K\times J_S(N_S+1)$ {\em snapshot matrix} $\mathbb{A}$ as $$ \mathbb{A} = \big(\vec{u}_S^{1,0},\vec{u}_S^{1,1}, \ldots , \vec{u}_S^{1,N_S}, \vec{u}_S^{2,0},\vec{u}_S^{2,1}, \ldots , \vec{u}_S^{2,N_S}, \ldots , \vec{u}_S^{J_S,0},\vec{u}_S^{J_S,1}, \ldots , \vec{u}_S^{J_S,N_S}\big), $$ i.e., the columns of $\mathbb{A}$ are the finite element coefficient vectors corresponding to the discrete snapshots. The POD method then seeks a low dimensional basis $$ X_R :=\text{span}\{{\varphi}_i\}_{i=1}^R \subset X_{h,S} \subset V_h\subset X_h $$ which can approximate the snapshot data. This basis can be determined by solving the constrained minimization problem \begin{equation}gin{equation}\label{Min} \begin{equation}gin{aligned} \min \sum_{k=1}^{J_S} \sum_{l=0}^{N_S}\Big \| u_{h,s}^{k,l}-\sum_{j=1}^R (u_{h,s}^{k,l}, \varphi_j)\varphi_j\Big \| ^2 \\ \text{subject to } (\varphi_i, \varphi_j)= \ensuremath{\Delta t}a_{ij}\quad\mbox{for $i,j=1,\ldots,R$}, \end{aligned} \end{equation} where $\ensuremath{\Delta t}a_{ij}$ denotes the Kronecker delta. Defining the correlation matrix $\mathbb{C} = \mathbb{A}^{T}\mathbb{M}\mathbb{A}$ where $\mathbb{M}$ denotes the finite element mass matrix, this problem can then be solved by considering the eigenvalue problem \begin{equation}gin{equation*} \mathbb{C}\vec{a}_{i} = \lambda_{i}\vec{a}_{i}. \end{equation*} It can then be shown the POD basis functions will be given by \begin{equation}gin{equation*} \vec\varphi_i = \frac{1}{\sqrt{\lambda_i}}\mathbb{A}\vec{a}_{i}, \ \ \ i = 1, \ldots, R. \end{equation*} We now define the POD $L^{2}$ projection we will need for the ensuing stability and error analysis. \begin{equation}gin{definition}[POD $L^{2}$ projection] Let $P_{r}: L^{2}(\Omega) \rightarrow X_{R}$ such that \begin{equation}gin{equation} (u - P_{r}u,\varphi) = 0 \qquad\forall \varphi\in X_{R}. \end{equation} \end{definition} Next we give a POD inverse estimate. Let $\mathbb{S}_{R} = (\nabla \varphi_{i}, \nabla \varphi_{j})_{L^{2}}$ be the POD stiffness matrix and let $\||\cdot|\|_{2}$ denote the matrix $2$-norm \begin{equation}gin{lemma}[POD inverse estimate] \begin{equation}gin{equation}\label{POD:inveq} \|\nabla \varphi \| \leq \||\mathbb{S}_{R}|\|_{2}^{\frac{1}{2}}\|\varphi\| \ \ \ \forall \varphi \in X_{R}. \end{equation} \end{lemma} \subsection{Ensemble-POD Algorithm} Using this POD basis we can now construct the ensemble-POD algorithm. The construction is similar to the full finite element approximation except we seek a solution in the POD space $X_{R}$ using the basis $\{ \varphi_{i}\}_{i=1}^{R}$. The fully discrete algorithm can be written as: \begin{equation}gin{equation}\label{En-POD-Weak} \begin{equation}gin{aligned} &\big(\frac{u_{R}^{j,n+1}-u_{R}^{j,n}}{\Delta t}, \varphi\big)+b^{\ast}({<u_{R} >^{n}},u_{R}^{j,n+1},\varphi)+b^{\ast}({u_{R}^{j, n}-<u_{R} >^{n}},u_{R}^{j,n} ,\varphi)\\& +\nu_{max}(\nabla u_{R}^{j,n+1},\nabla \varphi) + (\nu_{j} - \nu_{max})(\nabla u_{R}^{j,n},\nabla \varphi) =(f^{j,n+1},\varphi)\qquad\forall \varphi\in X_{R}. \end{aligned} \end{equation} We note that because $X_{R} \subset V_{h}$ the POD basis is discretely divergence-free by construction. Therefore, there is no pressure term present in (3.2). In recent works constructing a basis for the pressure space in addition to the velocity space has been investigated. The interested reader should consult \cite{NME:NME4772}. \subsection{Leray Ensemble-POD Algorithm} To construct the Leray ensemble-POD algorithm we use the ROM differential filter. \begin{equation}gin{definition}[ROM differential filter] $\forall v \in X$ let $\overline{v}^{R}$ be the unique element of $X_{R}$ such that \begin{equation}gin{equation} \ensuremath{\Delta t}a^{2}(\nabla \overline{v}^{R},\nabla \varphi) + (\overline{v}^{R},\varphi) = (v,\varphi) \qquad\forall \varphi\in X_{R}. \end{equation} \end{definition} Here $\ensuremath{\Delta t}a$ is known as the filtering radius. The differential filter was first developed by Germano~\cite{germano1986differential} for large eddy simulations. It was introduced in the ROM setting in \cite{sabetghadam2012alpha} and expanded further in \cite{wells2017evolve,XIE201812}. Incorporating this into the ensemble framework the fully discrete Leray ensemble-POD algorithm can be written as: \begin{equation}gin{equation}\label{En-Leray-POD-Weak} \begin{equation}gin{aligned} &\big(\frac{u_{R}^{j,n+1}-u_{R}^{j,n}}{\Delta t}, \varphi\big)+b^{\ast}(\overline{<u_{R} >^{n}},u_{R}^{j,n+1},\varphi)+b^{\ast}(\overline{u_{R}^{j, n}-<u_{R} >^{n}},u_{R}^{j,n} ,\varphi)\\& +\nu_{max}(\nabla u_{R}^{j,n+1},\nabla \varphi) + (\nu_{j} - \nu_{max})(\nabla u_{R}^{j,n},\nabla \varphi) =(f^{j,n+1},\varphi)\qquad\forall \varphi\in X_{R}. \end{aligned} \end{equation} \section{Stability Analysis} In this section we present a result pertaining to the stability of the Leray ensemble-POD algorithm. A stability bound for the ensemble-POD algorithm for a fixed viscosity was proven in Theorem 4.2 in~\cite{GJS17}, while a stability bound for an ensemble-FE algorithm with variable viscosity was proven in Theorem 2.1 in~\cite{GJW17}. The stability bound proven in this section is less restrictive than the bound proven in ~\cite{GJW17} due to the use of $\nu_{max}$ in the algorithm as opposed to $<\nu>$. \begin{equation}gin{theorem} \label{stab:theorem} Consider algorithm \eqref{En-Leray-POD-Weak}; define $0 \leq \ensuremath{\epsilon}ilon \leq 1$ such that \begin{equation}gin{eqnarray} \max\limits_{1 \leq j \leq J} \frac{|\nu_{j} - \nu_{max}|}{ \nu_{max}} = 1 - \ensuremath{\epsilon}ilon \label{eqn:epsilon} \end{eqnarray} and assume the following condition holds for $j = 1 \ldots J$: \begin{equation}gin{equation}\label{stab:assumption} \frac{C_{b^*}^2 \, \Delta t}{\nu_{max}} \||\mathbb{S}_{R}|\|_{2}^{\frac{1}{2}}\|\nabla(\overline{u_{R}^{j,n}- <u_{R}>^{n}})\|^{2} \leq \ensuremath{\epsilon}ilon. \end{equation} Then, for any $N \geq 1$ \begin{equation}gin{equation} \begin{equation}gin{aligned} &\frac{1}{2} \|u_{R}^{j,N}\|^{2} + \frac{\nu_{max} \Delta t}{2}\|\nabla u_{R}^{j,N}\|^{2} + \frac{\ensuremath{\epsilon}ilon \, \nu_{max} \, \Delta t}{4} \sum_{n=0}^{N-1} \|u_{R}^{j,n+1}\|^{2}. \\ &\leq \sum_{n=0}^{N-1} \frac{\Delta t}{\nu_{max} \ensuremath{\epsilon}ilon}\|f_{j}^{n+1}\|^{2}_{-1} + \frac{1}{2}\|u_{R}^{0}\|^{2} + \frac{\nu_{max} \Delta t}{2}\|\nabla u_{R}^{j,0}\|^{2} \stackrel{notation}{=} C_{stab} \, . \end{aligned} \label{eqn:theorem-stability-1} \end{equation} \begin{equation}gin{proof} Setting $\varphi = u_{R}^{j,n+1}$ and using the skew-symmetry of the trilinear term we have \begin{equation}gin{equation}\label{Theor1:eq1} \begin{equation}gin{aligned} &\frac{1}{2}\|u_{R}^{j,n+1}\|^{2} - \frac{1}{2}\|u_{R}^{j,n}\|^{2} + \frac{1}{2}\|u_{R}^{j,n+1} - u_{R}^{j,n}\|^{2} + \nu_{max} \Delta t \|\nabla u_{R}^{j,n+1}\|^{2} \\ &+ \Delta t b^{\ast}(\overline{u_{R}^{j, n}-<u_{R}>^{n}},u_{R}^{j,n}, u_{R}^{j,n+1} - u_{R}^{j,n} ) = \\&\Delta t (f_{j}^{n+1},u_{R}^{j,n+1}) - \Delta t (\nu_{j} - \nu_{max}) (\nabla u_{R}^{j,n},\nabla u_{R}^{j,n+1} ) \end{aligned} \end{equation} Now applying Young's inequality on the right hand side we have \begin{equation}gin{equation}\label{Theor1:eq2} \begin{equation}gin{aligned} &\frac{1}{2}\|u_{R}^{j,n+1}\|^{2} - \frac{1}{2}\|u_{R}^{j,n}\|^{2} + \frac{1}{2}\|u_{R}^{j,n+1} - u_{R}^{j,n}\|^{2} + \nu_{max} \Delta t \|\nabla u_{R}^{j,n+1}\|^{2} \\ &+ \Delta t b^{\ast}(\overline{u_{R}^{j, n}-<u_{R}>^{n}},u_{R}^{j,n}, u_{R}^{j,n+1} - u_{R}^{j,n} ) \leq \frac{\alpha \Delta t \nu_{max} }{4}\|\nabla u_{R}^{j,n+1}\|^{2} \\ &+ \frac{\Delta t}{\alpha \nu_{max}} \|f_{j}^{n+1}\|^{2}_{-1} + \frac{\begin{equation}ta \Delta t \nu_{max} }{4}\|\nabla u_{R}^{j,n+1}\|^{2} + \frac{\Delta t(\nu_{j} - \nu_{max})^{2} }{\begin{equation}ta \nu_{max}} \|\nabla u_{R}^{j,n}\|^{2}. \end{aligned} \end{equation} Since both $\frac{\begin{equation}ta \Delta t \nu_{max} }{4}\|\nabla u_{R}^{j,n+1}\|^{2}$ and $\frac{\Delta t(\nu_{j} - \nu_{max})^{2} }{\begin{equation}ta \nu_{max}} \|\nabla u_{R}^{j,n}\|^{2} $ need to be absorbed into $\nu_{max}\Delta t \|u^{j,n+1}_{R}\|^{2}$ we minimize the quantity $\frac{\begin{equation}ta \Delta t \nu_{max} }{4} + \frac{\Delta t(\nu_{j} - \nu_{max})^{2} }{\begin{equation}ta \nu_{max}}$ by selecting $\begin{equation}ta = \frac{2|\nu_{j} - \nu_{max}|}{\nu_{max}} $. It then follows that \begin{equation}gin{equation}\label{Theor1:eq3} \begin{equation}gin{aligned} &\frac{1}{2}\|u_{R}^{j,n+1}\|^{2} - \frac{1}{2}\|u_{R}^{j,n}\|^{2} + \frac{1}{2}\|u_{R}^{j,n+1} - u_{R}^{j,n}\|^{2} + \nu_{max} \Delta t \|\nabla u_{R}^{j,n+1}\|^{2} \\ &+ \Delta tb^{\ast}(\overline{u_{R}^{j, n}-<u_{R}>^{n}},u_{R}^{j,n}, u_{R}^{j,n+1} - u_{R}^{j,n} ) \leq \frac{\alpha \Delta t \nu_{max} }{4}\|\nabla u_{R}^{j,n+1}\|^{2} \\ & + \frac{\Delta t}{\alpha \nu_{max}} \|f_{j}^{n+1}\|^{2}_{-1} + \frac{\Delta t|\nu_{j} - \nu_{max}| }{2}\|\nabla u_{R}^{j,n+1}\|^{2} + \frac{\Delta t|\nu_{j} - \nu_{max}| }{2} \|\nabla u_{R}^{j,n}\|^{2}. \end{aligned} \end{equation} Next we bound the trilinear term using \eqref{In1} and \eqref{POD:inveq}, obtaining \begin{equation}gin{equation}\label{Theor1:eq4} \begin{equation}gin{aligned} -\Delta t b^{\ast}&(\overline{u_{R}^{j, n}-<u_{R} >^{n}},u_{R}^{j,n}, u_{R}^{j,n+1} - u_{R}^{j,n} ) \\ &\leq C_{b^\ast} \Delta t \|\overline{u_{R}^{j, n}-<u_{R}>^{n}}\| \| \nabla u_{R}^{j,n}\|\left(\|\nabla (u_{R}^{j,n+1} - u_{R}^{j,n}) \| \|u_{R}^{j,n+1} - u_{R}^{j,n}\| \right)^{\frac{1}{2}} \\ &\leq C_{b^\ast} \Delta t \||\mathbb{S}_{R}|\|^{\frac{1}{4}}_{2} \|\overline{u_{R}^{j, n}-<u_{R} >^{n}}\| \| \nabla u_{R}^{j,n}\| \|u_{R}^{j,n+1} - u_{R}^{j,n}\| \, . \end{aligned} \end{equation} Then using Young's inequality we obtain \begin{equation}gin{equation}\label{Theor1:eq5} \begin{equation}gin{aligned} -\Delta t b^{\ast}&(\overline{u_{R}^{j, n}-<u_{R}>^{n}},u_{R}^{j,n}, u_{R}^{j,n+1} - u_{R}^{j,n} ) \\ &\leq \frac{C_{b^*}^2 \, \Delta t^{2}}{2} \||\mathbb{S}_{R}|\|^{\frac{1}{2}}_{2} \|\overline{u_{R}^{j, n}-<u_{R} >^{n}}\|^{2} \| \nabla u_{R}^{j,n}\|^{2} + \frac{1}{2}\|u_{R}^{j,n+1} - u_{R}^{j,n}\|^{2} \, . \end{aligned} \end{equation} Combining like terms we then have \begin{equation}gin{equation}\label{Theor1:eq6} \begin{equation}gin{aligned} &\frac{1}{2}\|u_{R}^{j,n+1}\|^{2} - \frac{1}{2}\|u_{R}^{j,n}\|^{2} + \nu_{max} \Delta t\left(1 - \frac{\alpha}{4} - \frac{|\nu_{j} - \nu_{max}|}{2 \nu_{max}}\right)\|\nabla u_{R}^{j,n+1}\|^{2} \\ &\leq \frac{\Delta t}{\alpha \nu_{max}} \|f_{j}^{n+1}\|^{2}_{-1} + \frac{C_{b^*}^2 \, \Delta t^{2}}{2} \||\mathbb{S}_{R}|\|^{\frac{1}{2}}_{2} \|\overline{u_{R}^{j, n}-<u_{R}>^{n}}\|^{2} \| \nabla u_{R}^{j,n}\|^{2} \\ &+ \frac{\Delta t|\nu_{j} - \nu_{max}| }{2} \|\nabla u_{R}^{j,n}\|^{2} . \end{aligned} \end{equation} Rearranging terms it follows that \begin{equation}gin{equation}\label{Theor1:eq7} \begin{equation}gin{aligned} &\frac{1}{2}\|u_{R}^{j,n+1}\|^{2} - \frac{1}{2}\|u_{R}^{j,n}\|^{2} + \nu_{max} \Delta t \biggr( \big(1 - \frac{\alpha}{4} - \frac{|\nu_{j} - \nu_{max}|}{2 \nu_{max}}\big)\|\nabla u_{R}^{j,n+1}\|^{2} \\ &- (\frac{|\nu_{j} - \nu_{max}|}{2 \nu_{max}} + \frac{C_{b^*}^2 \, \Delta t}{2 \, \nu_{max}} \||\mathbb{S}_{R}|\|^{\frac{1}{2}}_{2} \|\overline{u_{R}^{j, n}-<u_{R} >^{n}}\|^{2}) \| \nabla u_{R}^{j,n}\|^{2} \biggr) \leq \frac{\Delta t}{\alpha \nu_{max}} \|f_{j}^{n+1}\|^{2}_{-1}. \end{aligned} \end{equation} Using the fact that$\max\limits_{1 \leq j \leq J} \frac{|\nu_{j} - \nu_{max}|}{ \nu_{max}} = 1 - \ensuremath{\epsilon}ilon$ for $0 \leq \ensuremath{\epsilon}ilon \leq 1$ and taking $\alpha = \ensuremath{\epsilon}ilon$ we have \begin{equation}gin{equation}\label{Theor1:eq8} \begin{equation}gin{aligned} &\frac{1}{2}\|u_{R}^{j,n+1}\|^{2} - \frac{1}{2}\|u_{R}^{j,n}\|^{2} + \nu_{max} \Delta t \biggr( \left( \frac{1}{2} + \frac{\ensuremath{\epsilon}ilon}{4} \right) \|\nabla u_{R}^{j,n+1}\|^{2} \\ &- (\frac{1}{2} - \frac{\ensuremath{\epsilon}ilon}{2} + \frac{C_{b^*}^2 \, \Delta t}{2 \, \nu_{max}} \||\mathbb{S}_{R}|\|^{\frac{1}{2}}_{2} \|\overline{u_{R}^{j, n}-<u_{R} >^{n}}\|^{2}) \| \nabla u_{R}^{j,n}\|^{2} \biggr) \leq \frac{ \Delta t}{\nu_{max} \ensuremath{\epsilon}ilon} \|f_{j}^{n+1}\|^{2}_{-1}. \end{aligned} \end{equation} Now using assumption \eqref{stab:assumption}, \eqref{Theor1:eq8} we have \begin{equation}gin{equation}\label{Theor1:eq9} \begin{equation}gin{aligned} &\frac{1}{2}\|u_{R}^{j,n+1}\|^{2} - \frac{1}{2}\|u_{R}^{j,n}\|^{2} + \nu_{max} \Delta t \biggr( \frac{1}{2}\|\nabla u_{R}^{j,n+1}\|^{2} - \frac{1}{2} \| \nabla u_{R}^{j,n}\|^{2} \biggr) \\ & + \nu_{max} \Delta t \frac{\ensuremath{\epsilon}ilon}{4} \, \|u_{R}^{j,n+1}\|^{2} \leq \frac{ \Delta t}{\nu_{max} \ensuremath{\epsilon}ilon} \|f_{j}^{n+1}\|^{2}_{-1}. \end{aligned} \end{equation} Summing up~\eqref{Theor1:eq9} from $0$ to $N-1$ yields~\eqref{eqn:theorem-stability-1}. \end{proof} \end{theorem} \\ \begin{equation}gin{remark} The term $\ensuremath{\epsilon}ilon$ in the above theorem measures the relative uncertainty present in the viscosities. In practice, the amount of uncertainty present in the viscosities can be one or two orders of magnitude. In this case $\ensuremath{\epsilon}ilon \approx \mathcal{O}(10^{-1})$ or $\mathcal{O}(10^{-2})$. \end{remark} \section{Error analysis} \label{err_analysis} We next provide an error analysis for Leray ensemble-POD solutions. First, we present several results obtained in \cite{GJS17}, which we use in the analysis. We also use the following notation: \begin{equation}gin{definition}[Generic Constant $C$] Let $C$ be a generic constant that can depend on $f,u^{j}$, but not on $h, \Delta t, R, \lambda_i, \ensuremath{\epsilon}ilon, \nu_{max}, \ensuremath{\Delta t}a, C_{stab}, C_{b^{*}}$. \end{definition} The following lemma is similar to Lemma 5.1 in~\cite{GJS17}. \begin{equation}gin{lemma} \label{lm:L2err} {\rm[$L^2(\Omega)$ norm of the error between snapshots and their projections onto the POD space]} We have \begin{equation}gin{equation*} \frac{1}{J_S(N_S+1)} \sum_{j=1}^{J_S} \sum_{m=0}^{N_S}\Big \| u_{h,S}^{j,m}-\sum_{i=1}^R (u_{h,S}^{j,m}, \varphi_i)\varphi_i\Big \| ^2 = \sum_{i=R+1}^{J_S(N_S+1)} \lambda_i \end{equation*} and thus for $j=1,\ldots,J_S$, \begin{equation}gin{equation*} \frac{1}{N_S+1} \sum_{m=0}^{N_S}\Big \| u_{h,S}^{j,m}-\sum_{i=1}^R (u_{h,S}^{j,m}, \varphi_i)\varphi_i\Big \| ^2 \leq J_S\sum_{i=R+1}^{J_S(N_S+1)} \lambda_i . \end{equation*} \end{lemma} The following lemma is similar to Lemma 5.2 in~\cite{GJS17}. \begin{equation}gin{lemma}\label{lm:H1err}{\rm [$H^1(\Omega)$ norm of the error between snapshots and their projections in the POD space.]} We have \begin{equation}gin{equation*} \frac{1}{J_S(N_S+1)} \sum_{j=1}^{J_S} \sum_{m=0}^{N_S}\Big \| \nabla \Big( u_{h,S}^{j,m}-\sum_{i=1}^R (u_{h,S}^{j,m}, \varphi_i)\varphi_i\Big)\Big\| ^2 =\sum_{i=R+1}^{J_S(N_S+1)} \lambda_i \| \nabla \varphi_i\|^2 \end{equation*} and thus, for $j=1,\ldots,J_S$, \begin{equation}gin{equation*} \frac{1}{N_S+1} \sum_{m=0}^{N_S} \Big\| \nabla\Big( u_{h,S}^{j,m}-\sum_{i=1}^R (u_{h,S}^{j,m}, \varphi_i)\varphi_i \Big)\Big\| ^2 \leq J_S\sum_{i=R+1}^{J_S(N_S+1)} \lambda_i \| \nabla \varphi_i\|^2. \end{equation*} \end{lemma} The following lemma is similar to Lemma 5.3 in~\cite{GJS17} (see also Lemma 3.3 in~\cite{IW14}). \begin{equation}gin{lemma}\label{lm:Projerr}{\rm [Error in the projection onto the POD space]} Consider the partition $0=t_0<t_1< \cdots < t_{N_S} = T$ used in Section \ref{POD_sec}. For any $u \in H^{1}(0,T;[H^{s+1}(\Omega)]^d)$, let $u^{m}=u(\cdot, t_m)$. Then, the error in the projection onto the POD space $X_R$ satisfies the estimates \begin{equation}gin{equation*} \begin{equation}gin{aligned} \frac{1}{N_S+1}& \sum_{m=0}^{N_S} \| u^{j,m}-P_R u^{j,m}\| ^2 \leq C\left( h^{2s+2} + \Delta t^4 \right) + J_S\sum_{i=R+1}^{J_S(N_S+1)} \lambda_i \\\\ \frac{1}{N_S+1} &\sum_{m=0}^{N_S} \| \nabla \left(u^{j,m}-P_R u^{j,m}\right)\| ^2 \\& \leq (C+h^2 \|\hspace{-1pt}| {\mathbb S}_R \|\hspace{-1pt}| _2 ) h^{2s} + (C+\|\hspace{-1pt}| {\mathbb S}_R \|\hspace{-1pt}| _2 )\Delta t^4 + J_S\sum_{i=R+1}^{J_S(N_S+1)} \| \nabla \varphi_i\| ^2\lambda_i . \end{aligned} \end{equation*} \end{lemma} We assume the following estimates are also valid, as done in \cite{IW14}. \begin{equation}gin{assumption}\label{assumption1} Consider the partition $0=t_0<t_1< \cdots < t_{N_S} = T$ used in Section \ref{POD_sec}. For any $u \in H^{1}(0,T;[H^{s+1}(\Omega)]^d)$, let $u^{m}=u(\cdot, t_m)$. Then, the error in the projection onto the POD space $X_R$ satisfies the estimates \begin{equation}gin{equation*} \begin{equation}gin{aligned} &\| u^{j,m}-P_R u^{j,m} \| ^2 \leq C\left( h^{2s+2} + \Delta t^4 \right) + J_S\sum_{i=R+1}^{J_S(N_S+1)} \lambda_i \\\\ &\|\nabla \left(u^{j,m}-P_R u^{j,m} \right)\|^2 \\&\leq (C+h^2 \|\hspace{-1pt}| {\mathbb S}_R \|\hspace{-1pt}| _2 ) h^{2s} + (C+\|\hspace{-1pt}| {\mathbb S}_R \|\hspace{-1pt}| _2 )\Delta t^4 + J_S\sum_{i=R+1}^{J_S(N_S+1)} \| \nabla \varphi_i\| ^2\lambda_i . \end{aligned} \end{equation*} \end{assumption} { Next we need to make an assumption on the regularity of $u^{j,m}_{R}$ in order to establish an estimate for the ROM filtering error. We note that this assumption is consistent with our regularity assumption \ref{assumption:reg}. \\ \begin{equation}gin{assumption}\label{assumption:leray} We assume that $\Delta u^{j,m}_{R} \in L^{2}$ \end{assumption} } \\\\ We now state an estimate for the ROM filtering error which is a simple extension of Lemma 4.3 in \cite{XIE201812}. \begin{equation}gin{lemma} \label{lm:ROMfiltering}{\rm [ROM filtering error estimates]} If $\Delta u_{R}^{j,m} \in L^{2}$, then the following estimate holds: \begin{equation}gin{equation} \begin{equation}gin{aligned} & \ensuremath{\Delta t}a^{2}\|\nabla(u_{R}^{j,m} - \overline{u_{R}^{j,m}})\|^{2} + \|u_{R}^{j,m} - \overline{u_{R}^{j,m}}\|^{2} \\& \leq C\biggr( C\left( h^{2s+2} + \Delta t^4 \right) + J_S\sum_{i=R+1}^{J_S(N_S+1)} \lambda_i \biggr) \\& + C\ensuremath{\Delta t}a^{2} \biggr( (C+h^2 \|\hspace{-1pt}| {\mathbb S}_R \|\hspace{-1pt}| _2 ) h^{2s} + (C+\|\hspace{-1pt}| {\mathbb S}_R \|\hspace{-1pt}| _2 )\Delta t^4 + J_S\sum_{i=R+1}^{J_S(N_S+1)} \| \nabla \varphi_i\| ^2\lambda_i \biggr) \\& + C\ensuremath{\Delta t}a^{4}\|\Delta u_{R}^{j,m} \|^{2} . \end{aligned} \end{equation} \end{lemma} Lastly we state a result for the stability of the ROM filtered variables proven in Lemma 4.4 in~\cite{XIE201812}. \\ \begin{equation}gin{lemma}\label{lm:ROMstability}{\rm [ROM stability estimates]} For $u \in X$, we have \begin{equation}gin{equation} \begin{equation}gin{aligned} &\|\overline{u}\| \leq \|u\| \\ & \| \nabla \overline{u}\| \leq |\|S_{R}\||_{2}^{\frac{1}{2}}\|u\|. \\ \end{aligned} \end{equation} For $u \in X_{R}$, we have \begin{equation}gin{equation} \| \nabla \overline{u}\| \leq \|\nabla u \|. \end{equation} \end{lemma} Let $e^{j,n}=u^{j,n}-u_{R}^{j,n}$ denote the error between the true solution and the POD approximation; then, we have the following error estimates. \begin{equation}gin{theorem} Consider the Leray ensemble-POD algorithm and the partition $0 = t_{0} < t_{1} < \cdots < t_{N_{S}}$ used in Section \ref{POD_sec}. Suppose for any $0 \leq n \leq N_{S}$, the stability conditions from Theorem \ref{stab:theorem} and all previously stated regularity assumptions hold. Then for any $1 \leq N \leq N_{S}$, there is a positive constant $C$, such that the following bound holds: \begin{equation}gin{equation} \begin{equation}gin{aligned} &\frac{1}{2}\|e^{j,N}\|^{2} + \frac{\nu_{max}}{2}\| \nabla e^{j,N}\|^{2} + \frac{\ensuremath{\epsilon}ilon}{4}\nu_{max} \| \nabla e^{j,N}\|^{2} + C\nu_{max}\Delta t \sum_{n=0}^{N-1} \| e^{j,n+1} \|^{2} \\ &\leq \exp\left(\frac{C_{b^{*}}^{4} \, C^{4} \,T} {\ensuremath{\epsilon}ilon^{3}\,\nu_{max}^{3}}\right) \biggr[\biggr(\frac{ C \, \nu_{max} \Delta t}{\ensuremath{\epsilon}ilon} + \frac{C \Delta t}{\ensuremath{\epsilon}ilon} \frac{|\nu_j - \nu_{max}|^{2}}{\nu_{max}} + 2C \||\mathbb{S}_{R}|\|_{2}^{-\frac{1}{2}} + \frac{C \, C_{b^*}^2 \, \Delta t}{\ensuremath{\epsilon}ilon \, \nu_{max}} \\ &+ \frac{C \, C_{b^*}^2 \, C_{stab}}{2 \, \ensuremath{\epsilon}ilon \, \nu_{max}} + \frac{2 \,C \, C_{b^*}^2 \, C^{2}_{stab}}{\ensuremath{\epsilon}ilon^{2} \, \nu^{2}_{max}} + \frac{C \, C_{b^*}^2 \, \ensuremath{\Delta t}a}{ \ensuremath{\epsilon}ilon \, \nu_{max}} \biggr) \times \\& \biggr((C+h^2 \|\hspace{-1pt}| {\mathbb S}_R \|\hspace{-1pt}| _2 ) h^{2s} + (C+\|\hspace{-1pt}| {\mathbb S}_R \|\hspace{-1pt}| _2 )\Delta t^4 + J_S\sum_{i=R+1}^{J_S(N_S+1)} \| \nabla \varphi_i\| ^2\lambda_i \biggr) \\& + \frac{C\,C_{b^*}^2}{\ensuremath{\Delta t}a \, \ensuremath{\epsilon}ilon \, \nu_{max}} \biggr(C\left( h^{2s+2} + \Delta t^4 \right)+ J_S\sum_{i=R+1}^{J_S(N_S+1)} \lambda_i \biggr) \\& + \frac{C \, \Delta t^{2}}{\ensuremath{\epsilon}ilon}\frac{|\nu_j - \nu_{max}|^{2}}{\nu_{max}} + C \Delta t \||\mathbb{S}_{R}|\|_{2}^{-\frac{1}{2}} + \frac{C h^{2s}} {d \, \ensuremath{\epsilon}ilon \, \nu_{max}}\| |p^{j} | \|^{2}_{2,s} + \frac{C \Delta t^{2}}{\ensuremath{\epsilon}ilon \, \nu_{max}} \\& + \frac{C\,C_{b^*}^2 \, \Delta t^{2}}{\ensuremath{\epsilon}ilon \, \nu_{max}} + \frac{C\,C_{b^*}^2 \, \Delta t \, \ensuremath{\Delta t}a^3}{\ensuremath{\epsilon}ilon \, \nu_{max} \, }\biggr] \\& + \left(1 + CN\nu_{max}\Delta t \right) \times \left(C\left( h^{2s+2} + \Delta t^4 \right) + J_S\sum_{i=R+1}^{J_S(N_S+1)} \lambda_i \right) \\& +(\nu_{max} + \frac{\ensuremath{\epsilon}ilon}{2}\nu_{max}) \times \\& \left((C+h^2 \|\hspace{-1pt}| {\mathbb S}_R \|\hspace{-1pt}| _2 ) h^{2s} + (C+\|\hspace{-1pt}| {\mathbb S}_R \|\hspace{-1pt}| _2 )\Delta t^4 + J_S\sum_{i=R+1}^{J_S(N_S+1)} \| \nabla \varphi_i\| ^2\lambda_i \right). \end{aligned} \end{equation} \begin{equation}gin{proof} The weak solution of the NSE $u^{j}$ satisfies \begin{equation}gin{equation} \label{NSE:weak} \begin{equation}gin{aligned} \left(\frac{u^{j,n+1} - u^{j,n}}{\Delta t},\varphi\right)& + b^{*}(u^{j,n+1},u^{j,n+1},\varphi) + \nu_{j} (\nabla u^{j,n+1}, \nabla \varphi) - (p^{j,n+1},\nabla \cdot \varphi) \\ &= (f^{j,n+1},\varphi) + Intp(u^{j,n+1};\varphi) \end{aligned} \end{equation} where \begin{equation}gin{equation} Intp(u^{j,n+1};\varphi) = (\frac{u^{j,n+1} - u^{j,n}}{\Delta t} - u^{j}_{t}(t^{n+1}),\varphi). \end{equation} We split the error \begin{equation}gin{equation} e^{j,n} = u^{j,n} - u_{R}^{j,n} = (u^{j,n} - P_{R} u^{j,n}) + (P_{R}u^{j,n} - u_{R}^{j,n}) = \eta^{j,n} + \xi_{R}^{j,n}, \qquad j = 1,\hdots, J. \end{equation} Subtracting \eqref{En-Leray-POD-Weak} from \eqref{NSE:weak} as well as adding and subtracting the terms \newline $\nu_{max}(\nabla u_{j}^{n+1},\nabla \varphi)$ and $\nu_{j} - \nu_{max} ( \nabla u_{j}^{n+1},\nabla \varphi)$ we have \begin{equation}gin{equation} \begin{equation}gin{aligned} (\frac{\xi_{R}^{j,n+1} - \xi_{R}^{j,n}}{\Delta t} &, \varphi) + \nu_{max}(\nabla \xi_{R}^{j,n+1},\nabla \varphi) + (\nu_{j} - \nu_{max})(\nabla(u^{j,n+1}-u^{j,n}),\nabla \varphi)\\ &+ (\nu_j - \nu_{max})(\nabla \xi_{R}^{j,n},\nabla \varphi) + b^{*}(u^{j,n+1},u^{j,n+1},\varphi) \\ &- b^{*}(\overline{<u_{R} >^{n}},u_{R}^{j,n+1},\varphi) - b^{*}(\overline{u_{R}^{j, n}-<u_{R} >^{n}},u_{R}^{j,n},\varphi) \\ &- (p^{j,n+1},\nabla \cdot \varphi) \\ & = -(\frac{\eta^{j,n+1} - \eta^{j,n}}{\Delta t}, \varphi) - \nu_{max}(\nabla \eta^{j,n+1},\nabla \varphi) \\ &- (\nu_j - \nu_{max})(\nabla \eta^{j,n},\nabla \varphi) + Intp(u^{j,n+1};\varphi). \end{aligned} \end{equation} Setting $\varphi = \xi_{R}^{j,n+1}$ rearranging the nonlinear terms by adding and subtracting \newline $b^{*}(\overline{u_{R}^{j, n}-<u_{R} >^{n}},u_{R}^{j,n+1} ,\xi_{R}^{j,n+1})$ , and using the fact that $(\eta^{j,n+1} - \eta^{j,n}, \xi_{R}^{j,n+1}) = 0$ by the definition of the $L^{2}$ projection we have \begin{equation}gin{equation} \label{eq:err1} \begin{equation}gin{aligned} \frac{1}{\Delta t}&\left(\frac{1}{2}\|\xi_{R}^{j,n+1}\|^{2} - \frac{1}{2}\|\xi_{R}^{j,n}\|^{2} + \frac{1}{2}\|\xi_{R}^{j,n+1} - \xi_{R}^{j,n} \|^{2} \right) + \nu_{max} \|\nabla \xi_{R}^{j,n+1}\|^{2} \\ &= - (\nu_{j} - \nu_{max})(\nabla(u^{j,n+1}-u^{j,n}),\nabla \xi_{R}^{j,n+1}) - (\nu_j - \nu_{max})(\nabla \xi_{R}^{j,n},\nabla \xi_{R}^{j,n+1}) \\ & - \nu_{max}(\nabla \eta^{j,n+1},\nabla \xi_{R}^{j,n+1}) - (\nu_j - \nu_{max})(\nabla \eta^{j,n},\nabla \xi_{R}^{j,n+1})\\ & + b^{*}(\overline{u_{R} ^{j,n}},u_{R}^{j,n+1},\xi_{R}^{j,n+1}) - b^{*}(\overline{u_{R}^{j, n}-<u_{R} >^{n}},u_{R}^{j,n+1} - u_{R}^{j,n},\xi_{R}^{j,n+1}) \\ &- b^{*}(u^{j,n+1},u^{j,n+1},\xi_{R}^{j,n+1}) + (p^{j,n+1},\nabla \cdot \xi_{R}^{j,n+1}) + Intp(u^{j,n+1};\xi_{R}^{j,n+1}). \end{aligned} \end{equation} We bound the viscous terms in a similar manner to Theorem 3.1 of \cite{GJW17} \begin{equation}gin{equation} \label{err1eq} \begin{equation}gin{aligned} -(\nu_{j} - \nu_{max})&(\nabla(u^{j,n+1} - u^{j,n}),\nabla \xi^{j,n+1}_{R}) \leq \\ &\frac{\Delta t}{4\tilde{\epsilon}}\frac{|\nu_{j} - \nu_{max}|^{2}}{\nu_{max}}\left(\int_{t^{n}}^{t^{n}+1}\|\nabla u_{j,t}\|^{2}dt\right) + \tilde{\epsilon}\nu_{max}\|\nabla \xi_{R}^{j,n+1}\|^{2}, \end{aligned} \end{equation} \begin{equation}gin{equation} -\nu_{max}(\nabla \eta^{j,n+1},\nabla \xi_{R}^{j,n+1}) \leq \frac{ \nu_{max}}{4\tilde{\epsilon}}\|\nabla \eta^{j,n+1}\|^{2} + \tilde{\epsilon}\nu_{max}\|\nabla \xi_{R}^{j,n+1}\|^{2}, \end{equation} \begin{equation}gin{equation} \begin{equation}gin{aligned} -(\nu_{j} - \nu_{max})&(\nabla \eta^{j,n},\nabla \xi_{R}^{j,n+1}) \leq \\ &\frac{1}{4\tilde{\epsilon}}\frac{|\nu_{j} - \nu_{max}|^{2}}{\nu_{max}}\|\nabla \eta^{j,n}\|^{2} + \tilde{\epsilon}\nu_{max}\|\nabla \xi_{R}^{j,n+1}\|^{2}, \end{aligned} \end{equation} \begin{equation}gin{equation} -(\nu_{j} - \nu_{max}) (\nabla \xi_{R}^{j,n},\nabla \xi_{R}^{j,n+1}) \leq \frac{|\nu_{j} - \nu_{max}|}{2}\| \nabla \xi_{R}^{j,n}\|^{2} + \frac{|\nu_{j} - \nu_{max}|}{2}\| \nabla \xi_{R}^{j,n+1}\|^{2}. \end{equation} We next rewrite the second nonlinear term on the right hand side of \eqref{eq:err1}. \begin{equation}gin{equation} \begin{equation}gin{aligned} b^{*}&(\overline{u_{R}^{j, n}-<u_R>^n},u_{R}^{j,n+1}-u_{R}^{j,n},\xi_{R}^{j,n+1})\\ &=-b^{*}(\overline{u_{R}^{j, n}-<u_{R} >^{n}},e^{j,n+1}-e^{j,n},\xi_{R}^{j,n+1})\\ &\quad+b^{*}(\overline{u_{R}^{j, n}-<u_{R} >^{n}} ,u^{j,n+1}-u^{j,n},\xi_{R}^{j,n+1})\\ &=-b^{*}(\overline{u_{R}^{j, n}-<u_{R} >^{n}},\eta^{j,n+1},\xi_{R}^{j,n+1})\\ &\quad+b^{*}(\overline{u_{R}^{j, n}-<u_{R} >^{n}},\eta ^{j,n},\xi_{R}^{j,n+1})\\ &\quad+b^{*}(\overline{u_{R}^{j, n}-<u_{R} >^{n}},\xi_{R}^{j,n},\xi_{R}^{j,n+1})\\ &\quad+b^{*}(\overline{u_{R}^{j, n}-<u_{R} >^{n}},u ^{j,n+1}-u^{j,n},\xi_{R}^{j,n+1})\text{ .} \end{aligned} \end{equation} As done in Theorem 3.1 of \cite{GJW17} using Young's inequality, \eqref{In1}, \eqref{In2}, and \eqref{POD:inveq} we derive the estimates \begin{equation}gin{equation} \begin{equation}gin{aligned} -b^{*}(&\overline{u_{R}^{j, n}-<u_{R} >^{n}},\eta^{j,n+1},\xi_{R}^{j,n+1}) \leq \\ &\frac{C_{b^*}^{2}\nu_{max}^{-1}}{4\tilde{\epsilon}}\|\nabla(\overline{u_{R}^{j, n}-<u_{R} >^{n}})\|^{2}\|\nabla \eta^{j,n+1}\|^{2} + \tilde{\epsilon}\nu_{max}\|\nabla \xi_{R}^{j,n+1}\|^{2}, \end{aligned} \end{equation} \begin{equation}gin{equation} \begin{equation}gin{aligned} b^{*}(&\overline{u_{R}^{j, n}-<u_{R} >^{n}},\eta^{j,n},\xi_{R}^{j,n+1}) \leq \\ &\frac{C_{b^*}^{2}\nu_{max}^{-1}}{4\tilde{\epsilon}}\|\nabla(\overline{u_{R}^{j, n}-<u_{R} >^{n}})\|^{2}\|\nabla \eta^{j,n}\|^{2} + \tilde{\epsilon}\nu_{max}\|\nabla \xi_{R}^{j,n+1}\|^{2}, \end{aligned} \end{equation} \begin{equation}gin{equation} \begin{equation}gin{aligned} &b^{*}(\overline{u_{R}^{j, n}-<u_{R} >^{n}},u^{j,n+1} - u^{j,n},\xi_{R}^{j,n+1}) \leq \\ &\frac{C \,C_{b^*}^{2}\nu_{max}^{-1}}{4\tilde{\epsilon}} \Delta t \|\nabla(\overline{u_{R}^{j, n}-<u_{R} >^{n}})\|^{2} + \tilde{\epsilon}\nu_{max}\|\nabla \xi_{R}^{j,n+1}\|^{2}. \end{aligned} \end{equation} \noindent By skew-symmetry, inequality \eqref{In2} and the inverse inequality \eqref{POD:inveq}, we have \begin{equation}gin{equation} \begin{equation}gin{aligned} &b^{\ast}(\overline{u_{R}^{j, n}-<u_{R} >^{n}},\xi_{R}^{j,n},\xi_{R}^{j,n+1})\\ &\leq C_{b^*} \, \| \nabla (\overline{u_{R}^{j, n}-<u_{R} >^{n}})\| \| \nabla\xi_{R}^{j,n}\| \sqrt{\| \xi_{R}^{j,n+1}-\xi_{R} ^{j,n}\| \| \nabla (\xi_{R}^{j,n+1}-\xi_{R} ^{j,n})\| }\\ &\leq C_{b^*} \, \|\hspace{-1pt}| {\mathbb S}_R \|\hspace{-1pt}| _2^{1/4}| \nabla (\overline{u_{R}^{j, n}-<u_{R} >^{n}})\| \| \nabla\xi_{R}^{j,n}\|\| \xi_{R}^{j,n+1}-\xi_{R} ^{j,n}\| \\ & \leq \frac{1}{2\Delta t}\| \xi_{R}^{j,n+1}-\xi_{R}^{j,n}\| ^{2}+\left( \frac{C_{b^*}^2 \Delta t}{2} \|\hspace{-1pt}| {\mathbb S}_R \|\hspace{-1pt}| _2^{\frac{1}{2}}\| \nabla (\overline{u_{R}^{j, n}-<u_{R} >^{n}})\| ^{2}\right) \| \nabla \xi_{R}^{j,n}\| ^{2}. \end{aligned} \label{eqn:5.18} \end{equation} Bounding the other two nonlinear terms we add and subtract the terms \newline $b^{*}(u^{j,n},u^{j,n+1},\xi_{R}^{j,n+1})$ and $b^{*}(\overline{u^{j,n}_R},u^{j,n+1},\xi_{R}^{j,n+1})$. It then follows from \eqref{In1} \begin{equation}gin{equation} \begin{equation}gin{aligned} &- b^{*}(u^{j,n+1},u^{j,n+1},\xi_{R}^{j,n+1}) +b^{*}(\overline{u_{R} ^{j,n}},u_{R}^{j,n+1},\xi_{R}^{j,n+1}) \\ & = -b^{*}(u^{j,n} - \overline{u_{R} ^{j,n}} , u^{j,n+1}, \xi_{R}^{j,n+1}) -b^{*}(\overline{u_{R} ^{j,n}},\eta^{j,n+1},\xi_{R}^{j,n+1}) \\ &-b^{*}(u^{j,n+1} - u^{j,n}, u^{j,n+1}, \xi_{R}^{j,n+1}). \end{aligned} \end{equation} Now by Young's inequality, \eqref{In2}, the stability analysis, i.e. $\|\overline{u^{j,n}_{R}}\|^{2} \leq C_{stab}$, and the assumption $u^{j} \in L^{\infty}(0,T,H^{1}(\Omega))$ we have \begin{equation}gin{equation} \begin{equation}gin{aligned} b^{*}(\overline{u_{R}^{j,n}},\eta^{j,n+1},\xi_{R}^{j,n+1}) &\leq C_{b^*}\|\nabla \overline{u^{j,n}_{R}}\|^{\frac{1}{2}}\| \overline{u^{j,n}_{R}}\|^{\frac{1}{2}}\|\nabla \eta^{j,n+1}\|\|\nabla \xi_{R}^{j,n+1}\| \\ &\leq \frac{C_{stab}C_{b^*}^{2}}{4\tilde{\epsilon}}\nu_{max}^{-1} \|\nabla \overline{u_{R}^{j,n}}\| \|\nabla \eta^{j,n+1}\|^{2} + \tilde{\epsilon}\nu_{max}\|\nabla \xi_{R}^{j,n+1}\|^{2}, \end{aligned} \end{equation} as well as \begin{equation}gin{equation} \begin{equation}gin{aligned} b^{*}(u^{j,n+1} - u^{j,n}, u^{j,n+1}, &\xi_{R}^{j,n+1}) \leq \frac{C \, C_{b^*}^{2}\Delta t}{4\tilde{\epsilon}}\nu_{max}^{-1} + \tilde{\epsilon}\nu_{max}\|\nabla \xi_{R}^{j,n+1}\|^{2}. \end{aligned} \end{equation} We can then rewrite the term \begin{equation}gin{equation} \begin{equation}gin{aligned} -b^{*}(u^{j,n} - \overline{u_{R} ^{j,n}} , &u^{j,n+1}, \xi_{R}^{j,n+1}) =\\ &-b^{*}(e^{j,n} , u^{j,n+1}, \xi_{R}^{j,n+1}) - b^{*}(u_{R}^{j,n} - \overline{u_{R}^{j,n}}, u^{j,n+1}, \xi_{R}^{j,n+1}). \end{aligned} \end{equation} Bounding the second term \begin{equation}gin{equation} \begin{equation}gin{aligned} -b^{*}(u_{R}^{j,n} - &\overline{u_{R} ^{j,n}} , u^{j,n+1}, \xi_{R}^{j,n+1}) \\ &\leq C_{b^*}\|u_{R}^{j,n} - \overline{u_{R}^{j,n}}\|^{\frac{1}{2}} \|\nabla (u_{R}^{j,n} - \overline{u_{R}^{j,n}})\|^{\frac{1}{2}}\|\nabla u^{j,n+1}\|\| \nabla \xi^{j,n+1}_{R}\| \\ & \leq \frac{C_{b^*}^{2}}{4\tilde{\epsilon}}\nu_{max}^{-1}\|u_{R}^{j,n} - \overline{u_{R}^{j,n}}\| \|\nabla (u_{R}^{j,n} - \overline{u_{R}^{j,n}})\|\|\nabla u^{j,n+1}\|^{2} + \tilde{\epsilon}\nu_{max}\|\nabla \xi^{j,n+1}_{R}\|^{2}. \end{aligned} \end{equation} Then after decomposing $e^{j,n} = \eta^{j,n} + \xi_{R}^{j,n}$ again using Young's inequality and the assumption $u^{j} \in L^{\infty}(0,T,H^{1}(\Omega))$ \begin{equation}gin{equation} -b^{*}(\eta^{j,n} , u^{j,n+1}, \xi_{R}^{j,n+1}) \leq \frac{C C_{b^*}^2}{4 \, \tilde{\epsilon}} \nu_{max}^{-1} \, \| \nabla \eta^{j,n}\|^{2} + \tilde{\epsilon}\nu_{max}\|\nabla \xi^{j,n+1}_{R}\|^{2} \end{equation} and \begin{equation}gin{equation} \begin{equation}gin{aligned} -b^{*}(\xi_{R}^{j,n} , u^{j,n+1}, \xi_{R}^{j,n+1}) &\leq C_{b^*} \|\nabla \xi_{R}^{j,n}\|^{\frac{1}{2}}\|\xi_{R}^{j,n}\|^{\frac{1}{2}}\|\nabla u^{j,n+1}\|\|\nabla \xi_{R}^{j,n+1}\| \\ &\leq C_{b^*} C \left( \alpha\|\nabla \xi_{R}^{j,n+1}\|^{2} + \frac{1}{4 \alpha} \|\nabla \xi_{R}^{j,n}\|\|\xi_{R}^{j,n}\| \right). \\ &\leq C_{b^*} C \left( \alpha\|\nabla \xi_{R}^{j,n+1}\|^{2} + \frac{1}{4 \alpha} \left( \begin{equation}ta \| \nabla \xi_{R}^{j,n} \|^{2} + \frac{1}{\begin{equation}ta}\| \xi_{R}^{j,n} \|^{2} \right) \right) \\ & {=} \, \tilde{\epsilon}\nu_{max} \|\nabla \xi_{R}^{j,n+1}\|^{2} + \frac{{13} \, \tilde{\epsilon}}{4}\nu_{max}\| \nabla \xi_{R}^{j,n} \|^{2} + \frac{{ C_{b^*}^4 \, C^4} }{52 \, \nu_{max}^{3}\tilde{\epsilon}^{3}}\| \xi_{R}^{j,n} \|^{2}. \end{aligned} \end{equation} For the pressure term since $\xi_{j,r}^{n+1} \in X^{R} \subset V^{h}$ it follows for $q_{h} \in Q^{h}$ \begin{equation}gin{equation} \begin{equation}gin{aligned} (p_{j}^{n+1}, \nabla \cdot \xi^{j,n+1}_{R}) =& (p^{j,n+1}- q_{h}^{n+1}, \nabla \cdot \xi_{R}^{j,n+1})\\ &\leq \tilde{\epsilon}\nu_{max}\|\nabla \xi_{R}^{n+1}\|^{2} + \frac{\nu_{max}^{-1}}{4 d\tilde{\epsilon}}\|p^{j,n+1} - q_{h}^{j,n+1} \|^{2}. \end{aligned} \end{equation} For the last term we have \begin{equation}gin{equation} \label{errlast} \begin{equation}gin{aligned} Intp(u^{j,n+1},\xi_{R}^{j,n+1}) &\leq \|\frac{u^{j,n+1}-u^{j,n}}{\Delta t} - u^{j}_{t}(t^{n+1})\|\|\nabla \xi_{R}^{j,n+1}\| \\ & \leq \frac{C \Delta t}{4\tilde{\epsilon}} \nu_{max}^{-1} t + \tilde{\epsilon}\nu_{max}\|\nabla \xi_{R}^{j,n+1}\|^{2}. \end{aligned} \end{equation} Now combining \eqref{err1eq} - \eqref{errlast}, \eqref{eq:err1} becomes \begin{equation}gin{equation}\label{comb1} \begin{equation}gin{aligned} & \frac{1}{\Delta t}\left(\frac{1}{2}\|\xi_{R}^{j,n+1}\|^{2} - \frac{1}{2}\|\xi_{R}^{j,n}\|^{2} \right) \\ & + \nu_{max}\|\nabla \xi_{R}^{j,n+1}\|^{2} - \frac{C_{b^*}^2 \Delta t}{2} \|\hspace{-1pt}| {\mathbb S}_R \|| _2^{\frac{1}{2}}\| \nabla (\overline{u_{R}^{j, n}-<u_{R}>^{n}})\| ^{2}\ \| \nabla \xi_{R}^{j,n}\| ^{2} \\ & - \frac{13 \tilde{\ensuremath{\epsilon}ilon}}{4}\nu_{max}\|\nabla \xi_{R}^{j,n+1}\|^{2} - \frac{13 \tilde{\ensuremath{\epsilon}ilon}}{4} \, \nu_{max}\|\nabla \xi_{R}^{j,n}\|^{2} \\ & - \frac{|\nu_{j} - \nu_{max}|}{2}\|\nabla \xi_{R}^{j,n+1}\|^{2} - \frac{|\nu_{j} - \nu_{max}|}{2}\|\nabla \xi_{R}^{j,n}\|^{2} \\ & \leq { \frac{C_{b^*}^4 \, C^4}{52 \, \nu_{max}^{3} \, \tilde{\epsilon}^{3}}\| \xi_{R}^{j,n} \|^{2} } + \frac{C\Delta t}{4 \, \tilde{\epsilon}} \frac{|\nu_{j} - \nu_{max}|^{2}}{\nu_{max}} + \frac{\nu_{max}}{4 \, \tilde{\epsilon}} \, \|\nabla \eta^{j,n+1}\|^{2} \\ & + \frac{C C_{b^*}^2 \, \Delta t}{4 \, \tilde{\epsilon} \, \nu_{max}} \, \|\nabla(\overline{u_{R}^{j, n}-<u_{R}>^{n}})\|^{2} \\ & + \frac{1}{4 \, d \, \tilde{\epsilon} \, \nu_{max}} \, \|p^{j,n+1} - q_{h}^{n+1} \|^{2} + \frac{C \Delta t}{4 \, \tilde{\epsilon} \, \nu_{max}} \, \\ & + \frac{C C_{b^*}^2 \, \Delta t}{4 \, \tilde{\epsilon} \nu_{max}} \, + \frac{C_{b^*}^2}{4 \, \tilde{\epsilon} \nu_{max}} \, \|u_{R}^{j,n} - \overline{u_{R}^{j,n}}\| \|\nabla (u_{R}^{j,n} - \overline{u_{R}^{j,n}})\|\|\nabla u^{j,n+1}\|^{2} \\ & + \frac{1}{4 \, \tilde{\epsilon}} \, \frac{|\nu_{j} - \nu_{max}|^{2}}{\nu_{max}} \, \|\nabla \eta^{j,n}\|^{2} + \frac{C_{b^*}^{2}}{4 \, \tilde{\epsilon} \, \nu_{max}} \, \|\nabla(\overline{u_{R}^{j, n}-<u_{R}>^{n}})\|^{2} \, \|\nabla \eta^{j,n}\|^{2} \\ & + \frac{C_{b^*}^{2}}{4 \, \tilde{\epsilon} \, \nu_{max}} \, \|\nabla(\overline{u_{R}^{j, n}-<u_{R}>^{n}})\|^{2} \, \|\nabla \eta^{j,n+1}\|^{2} + \frac{C_{b^*}^{2} \, {C_{stab}} }{4 \, \tilde{\epsilon} \, \nu_{max}} \, \|\nabla \overline{u_{R}^{j,n}}\| \|\nabla \eta^{j,n+1}\|^{2} \\ & + \frac{C_{b^*}^{2} \, C }{4 \, \tilde{\epsilon} \, \nu_{max}} \, \|\nabla \eta^{j,n}\|^{2} \, . \end{aligned} \end{equation} The terms on the LHS of~\eqref{comb1} (except first) can be rearranged as follows: \begin{equation}gin{equation}\label{eqn:traian-1} \begin{equation}gin{aligned} \left( \nu_{max} - \frac{13 \tilde{\ensuremath{\epsilon}ilon}}{4} \nu_{max} - \frac{|\nu_{j} - \nu_{max}|}{2} \right) &\|\nabla \xi_{R}^{j,n+1}\|^{2} \\ - \left( \frac{13 \tilde{\epsilon}}{4} \, \nu_{max} + \frac{|\nu_{j} - \nu_{max}|}{2} + \frac{C_{b^*}^2 \Delta t}{2} \|\hspace{-1pt}| {\mathbb S}_R \|| _2^{\frac{1}{2}}\| \nabla (\overline{u_{R}^{j, n}-<u_{R}>^{n}})\| ^{2} \right) &\|\nabla \xi_{R}^{j,n}\|^{2} \, . \end{aligned} \end{equation} Choosing $\tilde{\ensuremath{\epsilon}ilon} = \frac{\ensuremath{\epsilon}ilon}{13}$ and using~\eqref{eqn:epsilon} in~\eqref{eqn:traian-1}, \eqref{comb1} yields \begin{equation}gin{equation} \label{comb2} \begin{equation}gin{aligned} &\frac{1}{\Delta t}\left(\frac{1}{2}\|\xi_{R}^{j,n+1}\|^{2} - \frac{1}{2}\|\xi_{R}^{j,n}\|^{2} \right) \\ & + \left( \frac{\nu_{max}}{2} + \frac{\ensuremath{\epsilon}ilon \, \nu_{max}}{4} \right) \left( \| \nabla \xi_{R}^{j,n+1} \|^{2} - \| \nabla \xi_{R}^{j,n} \|^{2} \right) \\ & + {\nu_{max}}\biggl(\frac{\ensuremath{\epsilon}ilon}{2} - \frac{C_{b^*}^2 \, \Delta t}{2 \nu_{max}} \||\mathbb{S}_{R}|\|^{\frac{1}{2}}_{2} \|\nabla(\overline{u_{R}^{j, n}-<u_{R} >^{n}})\|^{2} \biggr)\| \nabla \xi_{R}^{j,n} \|^{2} \\ & \leq { \frac{13^3 \, C_{b^*}^4 \, C^4}{52 \, \nu_{max}^{3} \, \ensuremath{\epsilon}ilon^{3}}\| \xi_{R}^{j,n} \|^{2} } + \frac{13 \, C\, \Delta t}{4 \, \ensuremath{\epsilon}ilon} \frac{|\nu_{j} - \nu_{max}|^{2}}{\nu_{max}} \, + \frac{13 \, \nu_{max}}{4 \, \ensuremath{\epsilon}ilon} \, \|\nabla \eta^{j,n+1}\|^{2} \\ & + \frac{13 \, C \, C_{b^*}^2 \, \Delta t}{4 \, \ensuremath{\epsilon}ilon \, \nu_{max}} \, \|\nabla(\overline{u_{R}^{j, n}-<u_{R}>^{n}})\|^{2} \, \\ & + \frac{13}{4 \, d \, \ensuremath{\epsilon}ilon \, \nu_{max}} \, \|p^{j,n+1} - q_{h}^{n+1} \|^{2} + \frac{13\, C \, \Delta t}{4 \, \ensuremath{\epsilon}ilon \, \nu_{max}} \, \\ & + \frac{13 \, C \, C_{b^*}^2 \, \Delta t}{4 \, \ensuremath{\epsilon}ilon \nu_{max}} \, + \frac{13 \, C_{b^*}^2}{4 \, \ensuremath{\epsilon}ilon \nu_{max}} \, \|u_{R}^{j,n} - \overline{u_{R}^{j,n}}\| \|\nabla (u_{R}^{j,n} - \overline{u_{R}^{j,n}})\|\|\nabla u^{j,n+1}\|^{2} \\ & + \frac{13}{4 \, \ensuremath{\epsilon}ilon} \, \frac{|\nu_{j} - \nu_{max}|^{2}}{\nu_{max}} \, \|\nabla \eta^{j,n}\|^{2} + \frac{13 \, C_{b^*}^{2}}{4 \, \ensuremath{\epsilon}ilon \, \nu_{max}} \, \|\nabla(\overline{u_{R}^{j, n}-<u_{R}>^{n}})\|^{2} \, \|\nabla \eta^{j,n}\|^{2} \\ & + \frac{13 \, C_{b^*}^{2}}{4 \, \ensuremath{\epsilon}ilon \, \nu_{max}} \, \|\nabla(\overline{u_{R}^{j, n}-<u_{R}>^{n}})\|^{2} \, \|\nabla \eta^{j,n+1}\|^{2} + \frac{13 \, C_{b^*}^{2} \, {C_{stab}} }{4 \, \ensuremath{\epsilon}ilon \, \nu_{max}} \, \|\nabla \overline{u_{R}^{j,n}}\| \|\nabla \eta^{j,n+1}\|^{2} \\ & + \frac{13 \, C_{b^*}^{2} \, {C}}{4 \, \ensuremath{\epsilon}ilon \, \nu_{max}} \, \|\nabla \eta^{j,n}\|^{2} \, . \end{aligned} \end{equation} It follows from the stability condition \eqref{stab:assumption} that \begin{equation}gin{equation} \label{stab:cond} {\nu_{max}} \left(\frac{\ensuremath{\epsilon}ilon}{2} - \frac{{C_{b^*}^2} \, \Delta t}{2 \nu_{max}} \||\mathbb{S}_{R}|\|^{\frac{1}{2}}_{2} \|\nabla(\overline{u_{R}^{j, n}-<u_{R} >^{n}})\|^{2}\right) \geq {C \nu_{max}} \geq 0 \end{equation} Now we use \eqref{stab:cond}, sum \eqref{comb2} from $n=0$ to $N-1$, multiply both sides by $\Delta t$, and absorb constants. Since $U_{R}^{j,0} = \sum_{i=1}^{R}(u^{j,0},\varphi_i)\varphi_i$, we have $\|\xi_{R}^{j,0}\|^{2} = 0$ and $\|\nabla \xi_{R}^{j,0}\|^{2} = 0$. It then follows from \eqref{comb2} that we have \begin{equation}gin{equation}\label{comb3} \begin{equation}gin{aligned} &\frac{1}{2}\|\xi_{R}^{j,N}\|^{2} + \frac{\nu_{max}}{2}\| \nabla \xi_{R}^{j,N}\|^{2} + {\frac{\ensuremath{\epsilon}ilon}{4}\nu_{max}\| \nabla \xi_{R}^{j,N}\|^{2}} + C\nu_{max}\Delta t \sum_{n=0}^{N-1} \| \nabla \xi_{R}^{j,n+1} \|^{2} \\ & \leq \Delta t \sum_{n=0}^{N-1 }\bigg \{ {\frac{C_{b^*}^4 \, C^4}{\ensuremath{\epsilon}ilon^{3} \, \nu_{max}^{3}}\| \xi_{R}^{j,n} \|^{2}} + \frac{C\Delta t}{{\ensuremath{\epsilon}ilon}}\frac{|\nu_{j} - \nu_{max}|^{2}}{\nu_{max}} + \frac{C \nu_{max}}{{\ensuremath{\epsilon}ilon}}\|\nabla \eta^{j,n+1}\|^{2} \\ &+ \frac{C}{{\ensuremath{\epsilon}ilon}} \frac{|\nu_{j} - \nu_{max}|^{2}}{\nu_{max}}\|\nabla \eta^{j,n}\|^{2} + {\frac{C\,C_{b^*}^2}{\ensuremath{\epsilon}ilon \, \nu_{max}}} \|\nabla(\overline{u_{R}^{j, n}-<u_{R} >^{n}})\|^{2}\|\nabla \eta^{j,n+1}\|^{2} \\ &+ {\frac{C\,C_{b^*}^2}{\ensuremath{\epsilon}ilon \, \nu_{max}}}\|\nabla(\overline{u_{R}^{j, n}-<u_{R}>^{n}})\|^{2}\|\nabla \eta^{j,n}\|^{2} + {\frac{C \, C_{b^*}^2 \, C_{stab}}{\ensuremath{\epsilon}ilon \, \nu_{max}}}\|\nabla \overline{u_{R}^{j,n}}\| \|\nabla \eta^{j,n+1}\|^{2} \\ & {\frac{C \, C_{b^*}^2}{\ensuremath{\epsilon}ilon \nu_{max}}\|\nabla \eta^{j,n}\|^{2}}+ {\frac{C\,C_{b^*}^2 \, \Delta t }{\ensuremath{\epsilon}ilon \, \nu_{max}}} \|\nabla(\overline{u_{R}^{j, n}-<u_{R} >^{n}})\|^{2} \\ &+ {\frac{C}{d \, \ensuremath{\epsilon}ilon \, \nu_{max}}} \|p^{j,n+1} - q_{h}^{n+1} \|^{2} + {\frac{C \, \Delta t} {\ensuremath{\epsilon}ilon \, \nu_{max} } } \\ &+ {\frac{C\,C_{b^*}^2 \, \Delta t }{\ensuremath{\epsilon}ilon \, \nu_{max}}} + {\frac{C\,C_{b^*}^2 \,}{\ensuremath{\epsilon}ilon \, \nu_{max}}} \|u_{R}^{j,n} - \overline{u_{R}^{j,n}}\| \|\nabla (u_{R}^{j,n} - \overline{u_{R}^{j,n}})\|\|\nabla u^{j,n+1}\|^{2} \bigg \}. \end{aligned} \end{equation} Now using assumption \ref{assumption1}, lemma \ref{lm:ROMstability}, and the stability result from theorem \ref{stab:theorem}, i.e. {$\frac{\ensuremath{\epsilon}ilon \, \nu_{max} \Delta t}{4} \sum_{n=0}^{N-1} \|\nabla u_{R}^{j,n} \|^{2} \leq C_{stab}$}, we have \begin{equation}gin{equation} \begin{equation}gin{aligned} & {\frac{\Delta t\, C \, C_{b^*}^2 \, C_{stab}}{\ensuremath{\epsilon}ilon \, \nu_{max}}} \sum_{n=0}^{N-1}\|\overline{\nabla u_{R}^{j,n}}\|\| \nabla \eta^{j,n+1}\|^{2} \\ & \leq {\frac{\Delta t\, C \, C_{b^*}^2 \, C_{stab}}{\ensuremath{\epsilon}ilon \, \nu_{max}}} \left(\sum_{n=0}^{N-1} \frac{1}{2} + \sum_{n=0}^{N-1}\frac{\|\nabla u_{R}^{j,n}\|^{2}}{2} \right) \times \\ &\biggr((C+h^2 \|\hspace{-1pt}| {\mathbb S}_R \|\hspace{-1pt}| _2 ) h^{2s} + (C+\|\hspace{-1pt}| {\mathbb S}_R \|\hspace{-1pt}| _2 )\Delta t^4 + J_S\sum_{i=R+1}^{J_S(N_S+1)} \| \nabla \varphi_i\| ^2\lambda_i \biggr). \end{aligned} \end{equation} Rearranging the first term \begin{equation}gin{equation} \begin{equation}gin{aligned} &{{\frac{\Delta t\, C \, C_{b^*}^2 \, C_{stab}}{\ensuremath{\epsilon}ilon \, \nu_{max}}} \left(\sum_{n=0}^{N-1} \frac{1}{2} + \sum_{n=0}^{N-1}\frac{\|\nabla u_{R}^{j,n}\|^{2}}{2} \right)} \\ &= {{\frac{C \, C_{b^*}^2 \, C_{stab}}{2 \, \ensuremath{\epsilon}ilon \, \nu_{max}}} + {\frac{2 \,C \, C_{b^*}^2 \, C_{stab}}{\ensuremath{\epsilon}ilon^{2} \, \nu^{2}_{max}}} \frac{\ensuremath{\epsilon}ilon \nu_{max} \Delta t}{4} \left(\sum_{n=0}^{N-1}\|\nabla u_{R}^{j,n}\|^{2} \right)}. \end{aligned} \end{equation} It then follows that \begin{equation}gin{equation} \begin{equation}gin{aligned} &{\frac{\Delta t\, C \, C_{b^*}^2 \, C_{stab}}{\ensuremath{\epsilon}ilon \, \nu_{max}} \sum_{n=0}^{N-1}\|\overline{\nabla u_{R}^{j,n}}\|\| \nabla \eta^{j,n+1}\|^{2}} \\ & {\leq \left(\frac{C \, C_{b^*}^2 \, C_{stab}}{2 \, \ensuremath{\epsilon}ilon \, \nu_{max}} + \frac{2 \,C \, C_{b^*}^2 \, C^{2}_{stab}}{\ensuremath{\epsilon}ilon^{2} \, \nu^{2}_{max}} \right) \times} \\ &{\biggr((C+h^2 \|\hspace{-1pt}| {\mathbb S}_R \|\hspace{-1pt}| _2 ) h^{2s} + (C+\|\hspace{-1pt}| {\mathbb S}_R \|\hspace{-1pt}| _2 )\Delta t^4 + J_S\sum_{i=R+1}^{J_S(N_S+1)} \| \nabla \varphi_i\| ^2\lambda_i \biggr)}. \end{aligned} \end{equation} {Next using lemma \ref{lm:ROMfiltering} and Assumptions \ref{assumption:reg} and \ref{assumption:leray}} \begin{equation}gin{equation} \label{eqn:theorem-convergence-1} \begin{equation}gin{aligned} & {\frac{C\,C_{b^*}^2 \, \Delta t}{\ensuremath{\epsilon}ilon \, \nu_{max}}} \sum_{n=0}^{N-1} \|u_{R}^{j,n} - \overline{u_{R}^{j,n}}\| \|\nabla (u_{R}^{j,n} - \overline{u_{R}^{j,n}})\|\|\nabla u^{j,n+1}\|^{2} \\ &\leq {\frac{C\,C_{b^*}^2 \, \Delta t}{\ensuremath{\epsilon}ilon \, \nu_{max}}} \sum_{n=0}^{N-1} \|\nabla u^{j,n+1}\|^{2} \frac{1}{\ensuremath{\Delta t}a} \biggr[C\biggr(C\left( h^{2s+2} + \Delta t^4 \right)+ J_S\sum_{i=R+1}^{J_S(N_S+1)} \lambda_i \biggr) \\& + C\ensuremath{\Delta t}a^{2} \biggr((C+h^2 \|\hspace{-1pt}| {\mathbb S}_R \|\hspace{-1pt}| _2 ) h^{2s} + (C+\|\hspace{-1pt}| {\mathbb S}_R \|\hspace{-1pt}| _2 )\Delta t^4 + J_S\sum_{i=R+1}^{J_S(N_S+1)} \| \nabla \varphi_i\| ^2\lambda_i \biggr) \\& + C\ensuremath{\Delta t}a^{4}\|\Delta u_{R}^{j,n} \|^{2}\biggr] \\ &\leq {\frac{C\,C_{b^*}^2}{\ensuremath{\epsilon}ilon \, \nu_{max} \, \ensuremath{\Delta t}a}} \biggr[C\biggr(C\left( h^{2s+2} + \Delta t^4 \right)+ J_S\sum_{i=R+1}^{J_S(N_S+1)} \lambda_i \biggr) \\& + C\ensuremath{\Delta t}a^{2} \biggr((C+h^2 \|\hspace{-1pt}| {\mathbb S}_R \|\hspace{-1pt}| _2 ) h^{2s} + (C+\|\hspace{-1pt}| {\mathbb S}_R \|\hspace{-1pt}| _2 )\Delta t^4 + J_S\sum_{i=R+1}^{J_S(N_S+1)} \| \nabla \varphi_i\| ^2\lambda_i \biggr) \\& + C\ensuremath{\Delta t}a^{4}\|\Delta u_{R}^{j,n} \|^{2}\biggr]. \end{aligned} \end{equation} Next using theorem \ref{stab:theorem} we have \begin{equation}gin{equation} \begin{equation}gin{aligned} \frac{\Delta t \, C\, C_{b^{*}}^{2}}{\ensuremath{\epsilon}ilon \, \nu_{max}} \|\nabla(\overline{u_{R}^{j, n}-<u_{R} >^{n}})\|^{2} &= \frac{C \||\mathbb{S}_{R}|\|_{2}^{-\frac{1}{2}} }{\ensuremath{\epsilon}ilon}\frac{C_{b^{*}}^{2} \Delta t }{\nu_{max}}\||\mathbb{S}_{R}|\|_{2}^{\frac{1}{2}} \|\nabla(\overline{u_{R}^{j, n}-<u_{R} >^{n}})\|^{2} \\ &\leq C \||\mathbb{S}_{R}|\|_{2}^{-\frac{1}{2}}. \end{aligned} \end{equation} Therefore we can bound the quantities \begin{equation}gin{equation} \begin{equation}gin{aligned} &\frac{\Delta t \, C\, C_{b^{*}}^{2}}{\ensuremath{\epsilon}ilon \, \nu_{max}} \|\nabla(\overline{u_{R}^{j, n}-<u_{R} >^{n}})\|^{2} \|\eta_{j}^{n+1}\|^{2} \leq C \||\mathbb{S}_{R}|\|_{2}^{-\frac{1}{2}} \|\eta_{j}^{n+1}\|^{2} \\ &\frac{\Delta t \, C\, C_{b^{*}}^{2}}{\ensuremath{\epsilon}ilon \, \nu_{max}} \|\nabla(\overline{u_{R}^{j, n}-<u_{R} >^{n}})\|^{2} \|\eta_{j}^{n}\|^{2} \leq C \||\mathbb{S}_{R}|\|_{2}^{-\frac{1}{2}} \|\eta_{j}^{n}\|^{2} \\ &\frac{\Delta t^{2} \, C\, C_{b^{*}}^{2}}{\ensuremath{\epsilon}ilon \, \nu_{max}} \|\nabla(\overline{u_{R}^{j, n}-<u_{R} >^{n}})\|^{2} \leq C \Delta t \||\mathbb{S}_{R}|\|_{2}^{-\frac{1}{2}}. \end{aligned} \end{equation} Now combining everything, absorbing constants, invoking the discrete Gronwall's inequality, using Assumption \ref{assumption1} and the stability estimate \eqref{stab:assumption} \eqref{comb3} becomes \begin{equation}gin{equation} \begin{equation}gin{aligned} \label{ineq:final} &\frac{1}{2}\|\xi_{R}^{j,N}\|^{2} + \frac{\nu_{max}}{2}\| \nabla \xi_{R}^{j,N}\|^{2} + {\frac{\ensuremath{\epsilon}ilon}{4} \nu_{max} \| \nabla \xi_{R}^{j,N}\|^{2}} + C\nu_{max}\Delta t \sum_{n=0}^{N-1} \| \nabla \xi_{R}^{j,n+1} \|^{2} \\ &\leq \exp\left(\frac{C_{b^{*}}^{4} \, C^{4} \,T} {\ensuremath{\epsilon}ilon^{3}\,\nu_{max}^{3}}\right) \biggr[\biggr(\frac{ C \, \nu_{max} \Delta t}{\ensuremath{\epsilon}ilon} + \frac{C \Delta t}{\ensuremath{\epsilon}ilon} \frac{|\nu_j - \nu_{max}|^{2}}{\nu_{max}} + 2C \||\mathbb{S}_{R}|\|_{2}^{-\frac{1}{2}} + \frac{C \, C_{b^*}^2 \, \Delta t}{\ensuremath{\epsilon}ilon \, \nu_{max}} \\ &+ \frac{C \, C_{b^*}^2 \, C_{stab}}{2 \, \ensuremath{\epsilon}ilon \, \nu_{max}} + \frac{2 \,C \, C_{b^*}^2 \, C^{2}_{stab}}{\ensuremath{\epsilon}ilon^{2} \, \nu^{2}_{max}} + \frac{C \, C_{b^*}^2 \, \ensuremath{\Delta t}a}{ \ensuremath{\epsilon}ilon \, \nu_{max}} \biggr) \times \\& \biggr((C+h^2 \|\hspace{-1pt}| {\mathbb S}_R \|\hspace{-1pt}| _2 ) h^{2s} + (C+\|\hspace{-1pt}| {\mathbb S}_R \|\hspace{-1pt}| _2 )\Delta t^4 + J_S\sum_{i=R+1}^{J_S(N_S+1)} \| \nabla \varphi_i\| ^2\lambda_i \biggr) \\& + \frac{C\,C_{b^*}^2}{\ensuremath{\Delta t}a \,\ensuremath{\epsilon}ilon \, \nu_{max}} \biggr(C\left( h^{2s+2} + \Delta t^4 \right)+ J_S\sum_{i=R+1}^{J_S(N_S+1)} \lambda_i \biggr) \\& + \frac{C \, \Delta t^{2}}{\ensuremath{\epsilon}ilon}\frac{|\nu_j - \nu_{max}|^{2}}{\nu_{max}} + C \Delta t \||\mathbb{S}_{R}|\|_{2}^{-\frac{1}{2}} + \frac{C h^{2s}} {d \, \ensuremath{\epsilon}ilon \, \nu_{max}}\| |p^{j} | \|^{2}_{2,s} + \frac{C \Delta t^{2}}{\ensuremath{\epsilon}ilon \, \nu_{max}} \\& + \frac{C\,C_{b^*}^2 \, \Delta t^{2}}{\ensuremath{\epsilon}ilon \, \nu_{max}} + \frac{C\,C_{b^*}^2 \, \ensuremath{\Delta t}a^3}{\ensuremath{\epsilon}ilon \, \nu_{max} \, } \biggr]. \end{aligned} \end{equation} By the triangle inequality we have $\|e^{j,n}\|^{2} \leq 2(\|\xi_{R}^{j,n} \|^{2} + \| \eta^{j,n}\|^{2})$ from which it follows \begin{equation}gin{equation} \begin{equation}gin{aligned} &\frac{1}{2}\|e^{j,N}\|^{2} + \frac{\nu_{max}}{2}\| \nabla e^{j,N}\|^{2} + {\frac{\ensuremath{\epsilon}ilon}{4}\nu_{max} \| \nabla e^{j,N}\|^{2}} + C\nu_{max}\Delta t \sum_{n=0}^{N-1} \| e^{j,n+1} \|^{2} \\ & \leq \|\eta^{j,N}\|^{2} + \nu_{max}\| \nabla \eta^{j,N}\|^{2} + {\frac{\ensuremath{\epsilon}ilon}{2} \nu_{max} \| \nabla \eta^{j,N}\|^{2}} + C\nu_{max}\Delta t \sum_{n=0}^{N-1} \| \eta^{j,n+1} \|^{2} \\ & + \|\xi_{R}^{j,N}\|^{2} + \nu_{max}\| \nabla \xi_{R}^{j,N}\|^{2} + {\frac{\ensuremath{\epsilon}ilon}{2} \nu_{max} \| \nabla \xi_{R}^{j,N}\|^{2}} + C\nu_{max}\Delta t \sum_{n=0}^{N-1} \| \xi_{R}^{j,n+1} \|^{2}. \end{aligned} \end{equation} Now applying inequality \eqref{ineq:final} and Assumption \ref{assumption1} the result follows. \end{proof} \end{theorem} \section{Numerical Experiments}\label{numex} In this section we provide numerical experiments for the Leray ensemble-POD algorithm \eqref{En-Leray-POD-Weak} demonstrating the efficacy of this approach. All computations will be done using the FEniCS software suite \cite{LNW12} and all meshes generated via the built in meshing package \textbf{mshr}. \subsection{Problem Setting} For the numerical experiments we consider the two-dimensional flow between offset cylinders used in \cite{GJS17}. The domain is a disk with a smaller off-center disc inside. Let $r_{1}=1$, $r_{2}=0.1$, $c_{1}=1/2$, and $c_{2}=0$; then, the domain is given by \[ \Omega=\{(x,y):x^{2}+y^{2}\leq r_{1}^{2} \text{ and } (x-c_{1})^{2} +(y-c_{2})^{2}\geq r_{2}^{2}\}. \] The mesh utilized contains 14,590 degrees of freedom and is given in figure \ref{meshCinC}. We discretize in space via the $P^2$-$P^1$ Taylor-Hood element pair. The no-slip, no-penetration boundary conditions are imposed on both cylinders. In our test problems the flow will be driven by the counterclockwise rotational body force \[ f(x,y,t)=\big(-4y(1-x^{2}-y^{2})\,,\,4x(1-x^{2}-y^{2})\big)^{T}. \] \begin{equation}gin{figure}[h!] \centering \includegraphics[width = 12cm]{figures/mesh.png} \caption{Mesh for flow between offset circles resulting in 14,590 total degrees of freedom for the Taylor-Hood element pair.} \label{meshCinC} \end{figure} This flow displays interesting structures which interact with the inner circle. Specifically the flow rotates about the origin and interacts with the immersed cylinder forming a Von K\'arm\'an vortex street. \subsection{Numerical Results} In this experiment we demonstrate the improved accuracy and stability of the Leray ensemble-POD algorithm. In order to generate the POD basis we use two different viscosities $\nu_1 = .0016$ and $\nu_2 = .002$. The initial conditions will be generated by solving a steady Stokes problem using the previously defined counterclockwise rotational body force. We run a finite element code utilizing a linearly implicit backwards Euler method for each viscosity from $t_0 = 0$ to $T= 6$ with fixed time step $\Delta t = .01$. At time $T=3.0$ we begin taking snapshots every $.04$ seconds. In Figure \ref{eigvals_ex1} we show the decay of the singular values for the snapshot matrix. \begin{equation}gin{figure}[h!] \centering \includegraphics[width=10cm]{figures/eigvaldecay_ex1.pdf} \caption{The 40 largest eigenvalues for the snapshot matrix.} \label{eigvals_ex1} \end{figure} To illustrate the accuracy of the Leray ensemble-POD algorithm we compare it against the ensemble-POD algorithm using the same viscosities from the offline stage. The computations are carried out over the time interval $t_0 = 3$ to $T= 6$ with fixed time step $\Delta t = .01$ and $r = 10$ reduced basis functions. The initial condition at $T = 3$ is the $L^{2}$ projection of the FE solution at $T = 3.0$ into the POD space The filtering length for the Leray ensemble-POD algorithm is taken to be $\ensuremath{\Delta t}a = .025$. The filtering length is selected as the value of $\ensuremath{\Delta t}a$ which allows the average kinetic energy of Leray ensemble-POD to most closely match the average kinetic energy of the benchmark solution. We purposefully utilize a small number of basis functions to demonstrate the situation where the ROM does not allow for all spatial scales to be resolved. To determine the accuracy of our methods the average of the solutions from the implicit backwards Euler method for $\nu_1$ and $\nu_2$ will be used as a benchmark. In Figure \ref{KE_fig} we compare the average kinetic energy evolution of the Leray ensemble-POD and ensemble-POD against our benchmark solution. It can be seen that the ensemble-POD fails to match the kinetic energy of the benchmark solution, while the Leray ensemble-POD approximates it reasonably well. In Figure \ref{error_fig} we compare the evolution of the error in the $L^{2}$ norm of Leray ensemble-POD and ensemble-POD algorithms. The Leray ensemble-POD has a significantly smaller error than the ensemble-POD algorithm. In Figure \ref{mode_evolution} we plot the average POD mode evolution for ensemble-POD versus Leray ensemble-POD. We see that the oscillations in the POD modes are damped for Leray ensemble-POD. \begin{equation}gin{figure}[h!] \centering \includegraphics[width=12.cm]{figures/energy_evolution_ex1.pdf} \caption{For $3 \leq t \leq 6$, the average energy of the Leray ensemble-POD, ensemble-POD, and Implicit Euler method.} \label{KE_fig} \end{figure} \begin{equation}gin{figure}[h!] \centering \includegraphics[width=12.cm]{figures/error_evolution.pdf} \caption{For $3 \leq t \leq 6$, the $L^{2}$ error evolution of the Leray ensemble-POD and ensemble-POD algorithms with $r=10$ basis functions.} \label{error_fig} \end{figure} \begin{equation}gin{figure}[h!] \centering \includegraphics[width=6.cm]{figures/pod_mode_evolution_a0.pdf} \includegraphics[width=6.cm]{figures/pod_mode_evolution_a1.pdf} \includegraphics[width=6.cm]{figures/pod_mode_evolution_a2.pdf} \includegraphics[width=6.cm]{figures/pod_mode_evolution_a9.pdf} \caption{The time evolution of the average POD modes for ensemble-POD versus Leray ensemble-POD.} \label{mode_evolution} \end{figure} \section{Conclusions} In this work, a Leray regularized ensemble-POD method is developed for the incompressible Navier-Stokes equations with perturbations in the forcing function, initial conditions, and viscosities. The proposed algorithm is first ensemble-POD approach designed to work for higher Reynolds number flows. The stability and convergence of the finite element discretization of the Leray ensemble-POD model are proven. In the numerical simulation of two-dimensional flow past two offset cylinders, it is shown that the Leray ensemble-POD model is significantly more accurate than the standard ensemble-POD model. \end{document}
\begin{document} \begin{abstract} We study the dynamics of the interface between two incompressible fluids in a two-dimensional porous medium whose flow is modeled by the Muskat equations. For the two-phase Muskat problem, we establish global well-posedness and {\it decay to equilibrium} for small $H^2$ perturbations of the rest state. For the one-phase Muskat problem, we prove local well-posedness for $H^2$ initial data of arbitrary size. Finally, we show that solutions to the Muskat equations instantaneously become infinitely smooth. \end{abstract} \maketitle {\small \tableofcontents} \section{Introduction} We consider the two-phase Muskat moving free-boundary problem: \begin{subequations}\label{laplacian} \begin{alignat}{2} {\mathcal D}elta P^{\partial\hspace{1pt}}m &= 0 \qquad&&\text{in}\quad{\mathcal O}mega^{\partial\hspace{1pt}}m(t)\,,\\ \jump{P} &= {\mathcal G}amma(t)\cdot e_2 &&\text{on }{\mathcal G}amma(t),\\ \jump{{\rm n}abla P\cdot n} &= 0 &&\text{on }{\mathcal G}amma(t),\\ {\partial\hspace{1pt}}artial{\mathcal O}mega^+(t)\cap {\partial\hspace{1pt}}artial{\mathcal O}mega^-(t) &= {\mathcal G}amma(t) && \forall\,t\geq0\,,\\ {\mathcal V}({\mathcal G}amma(t)) &= -{\rm n}abla P^{\partial\hspace{1pt}}m \cdot n \qquad&&\text{on}\quad{\mathcal G}amma(t)\,, \end{alignat} \end{subequations} where ${\mathcal O}mega^+(t)$ and ${\mathcal O}mega^-(t)$ denote the time-dependent fluid domains associated with the two phases, ${\mathcal G}amma(t)$ denotes the free boundary, ${\mathcal G}amma(t)\cdot e_2$ is the second component of its parametrization, and ${\mathcal V}({\mathcal G}amma(t))$ is its normal velocity. We use the notation $\jump{f} = f^+ - f^-$ to denote the jump of a function $f$ across ${\mathcal G}amma(t)$. The problem ({\rm e}f{laplacian}) arises in the literature as the Hele-Shaw cell (with gravity) or the Muskat problem. Many recent results on the Muskat problem rely on the fact that equations ({\rm e}f{laplacian}a-e) can be rewritten as a system of equations for the interface $$ {\mathcal G}amma(t) = ({\partial\hspace{1pt}}si_1(t,x_1), {\partial\hspace{1pt}}si_2(t,x_1)), \qquad x_1 \in \mathbb{R} \,, \ \ t \in [0,T]\,, $$ taking the form $$ {\partial\hspace{1pt}}artial_t {\partial\hspace{1pt}}si=T[{\partial\hspace{1pt}}si], $$ where $T[{\partial\hspace{1pt}}si]$ is a highly nonlinear singular integral operator, whose linearization (about a flat interface) behaves like $\sqrt{-{\mathcal D}elta}$. In order to establish existence theorems for the system ({\rm e}f{laplacian}), this singular-integral-operator approach makes extensive use of the explicit integral kernel representations for the operator $T$ for the following fluid domains (or geometries): \begin{alignat*}{2} &\text{\bf(a)} \ \ \overline{{\mathcal O}mega^+(t)}\cup {\mathcal O}mega^-(t) && = {\mathbb R}^2 \,, \\ &\text{\bf(b)} \ \ \overline{{\mathcal O}mega^+(t)}\cup {\mathcal O}mega^-(t) && = {\mathbb T}\times{\mathbb R}\,,\\ &\text{\bf(c)} \ \ \overline{{\mathcal O}mega^+(t)}\cup \overline{{\mathcal O}mega^-(t)} && = {\mathbb R}\times[-l,l] \,. \end{alignat*} In the case of general domain geometries, we are not aware of any existence and regularity theories. The classical problem ({\rm e}f{laplacian}a-e) is related to both the (two-phase) Stefan problem \begin{subequations}\label{stefan} \begin{alignat}{2} {\partial\hspace{1pt}}artial_tP^{\partial\hspace{1pt}}m-{\mathcal D}elta P^{\partial\hspace{1pt}}m &= 0 \qquad&&\text{in}\quad{\mathcal O}mega^{\partial\hspace{1pt}}m(t)\,,\\ P^{\partial\hspace{1pt}}m&= 0 &&\text{on }{\mathcal G}amma(t),\\ {\partial\hspace{1pt}}artial{\mathcal O}mega^+(t)\cap {\partial\hspace{1pt}}artial{\mathcal O}mega^-(t) &= {\mathcal G}amma(t) && \forall\,t\geq0\,,\\ {\mathcal V}({\mathcal G}amma(t)) &= \jump{{\rm n}abla P\cdot n} \qquad&&\text{on}\quad{\mathcal G}amma(t)\,, \end{alignat} \end{subequations} and also with the Muskat problem with variable permeability $\beta(x)$, \begin{subequations}\label{laplacianpermeability} \begin{alignat}{2} \text{div}\,\left(\beta(x){\rm n}abla P^{\partial\hspace{1pt}}m{\rm i}ght) &= 0 \qquad&&\text{in}\quad{\mathcal O}mega^{\partial\hspace{1pt}}m(t)\,,\\ \jump{P} &= {\mathcal G}amma(t)\cdot e_2 &&\text{on }{\mathcal G}amma(t),\\ \jump{{\rm n}abla P\cdot n} &= 0 &&\text{on }{\mathcal G}amma(t),\\ {\partial\hspace{1pt}}artial{\mathcal O}mega^+(t)\cap {\partial\hspace{1pt}}artial{\mathcal O}mega^-(t) &= {\mathcal G}amma(t) && \forall\,t\geq0\,,\\ {\mathcal V}({\mathcal G}amma(t)) &= -{\rm n}abla P^{\partial\hspace{1pt}}m \cdot n \qquad&&\text{on}\quad{\mathcal G}amma(t)\,. \end{alignat} \end{subequations} Herein, we introduce a new method to analyze the system ({\rm e}f{laplacian}a-e), which is based on the analysis of the partial differential equations rather than any associated integral kernel. Our methodology can treat the two-phase Muskat problem with two different viscosities or with a non-constant permeability. Our method can also be applied to the Stefan problem \cite{HaSh2014}, to the free-boundary problem for the incompressible Euler equations \cite{CoSh2007,CoSh2014}, as well as to the compressible Euler equations \cite{CoSh2012, CoHoSh2013} . One of the main interests of this new method is that it can be adapted to several space dimensions and arbitrary domain geometries $ {\mathcal O}mega^+(t)\cup {\mathcal O}mega^-(t). $ \subsection{Darcy's law} The Muskat problem, introduced in \cite{Muskat}, models the evolution of two fluids of varying density in a two-dimensional porous medium. The presence of the solid matrix inside the porous medium has an important consequence: the usual fluid equations for the conservation of momentum are replaced with the {\it empirical} Darcy's Law (see \cite{bear, NB}) given by \begin{equation}\label{basic-model} \frac{\mu}{\beta}u=-{\rm n}abla p-(0,g{\rm h}o)^T, \end{equation} where $\mu,{\rm h}o$ are the viscosity and the density of the fluid, respectively, $\beta$ is the permeability of the medium, $p$ is the pressure, and $g$ is the acceleration due to gravity. As ({\rm e}f{basic-model}) is a model of aquifers, oil wells or geothermal reservoirs, this problem is of practical importance in geoscience (see, for example, \cite{CF,Parseval-Pillai-Advani:model-variation-permeability} and the references therein); moreover, it has also been considered as a model for the velocity of cells in tumor growth (see \cite{F, P}). The movement of a fluid trapped between two parallel vertical plates, which are separated by a very narrow distance, is known as the Hele-Shaw cell problem (see \cite{HeleShaw:motion-viscous-fluid-parallel-plates}). The equations of motion in a Hele-Shaw cell are $$ \frac{12\mu}{d^2}u=-{\rm n}abla p-(0,g{\rm h}o)^T, $$ where $d$ is the distance between the plates. The similarity of both problems is obvious and, in fact, the Muskat problem is equivalent to the two-phase Hele-Shaw problem with gravity. \subsection{The Muskat problem set in various geometries} We shall consider various domain geometries in this paper, and we begin with the case of a domain with infinite depth. \subsubsection{The infinitely-deep case} Let $(u^{\partial\hspace{1pt}}m,p^{\partial\hspace{1pt}}m)$ denote the velocity and the pressure in the fluid domains ${\mathcal O}mega^{\partial\hspace{1pt}}m(t)$, and let ${\mathcal G}amma(t)$ denote the material interface between ${\mathcal O}mega^+(t)$ and ${\mathcal O}mega^-(t)$; that is, ${\mathcal G}amma(t) = \cls{{\mathcal O}mega^+(t)}\cap \cls{{\mathcal O}mega^-(t)}$. Setting, the permeability $\beta\equiv1$, the two-phase Muskat problem has the following Eulerian description: \begin{subequations}\label{HS_Eulerian} \begin{alignat}{2} \mu^{\partial\hspace{1pt}}m u^{\partial\hspace{1pt}}m + {\rm n}abla p^{\partial\hspace{1pt}}m &= - {\rm h}o^{\partial\hspace{1pt}}m e_2 \qquad&&\text{in}\quad{\mathcal O}mega^{\partial\hspace{1pt}}m(t)\,,\\ \operatorname{div} u &= 0 &&\text{in}\quad{\mathcal O}mega^{\partial\hspace{1pt}}m(t)\,,\\ {\mathcal V}({\mathcal G}amma(t)) &= u^{\partial\hspace{1pt}}m \cdot n \qquad&&\text{on}\quad{\mathcal G}amma(t)\,,\\ {\mathcal O}mega^{\partial\hspace{1pt}}m(0) &= {\mathcal O}mega^{\partial\hspace{1pt}}m &&\text{on}\quad\{t=0\}\,,\\ \overline{{\mathcal O}mega^+(t)}\cup{\mathcal O}mega^-(t) &= {\mathbb R}^2 &&\text{for every }\quad t\geq 0\,, \end{alignat} \end{subequations} where $e_2 = (0,1)$, $n(\cdot ,t)$ is the outward pointing unit normal on ${\partial\hspace{1pt}}artial{\mathcal O}mega^-(t)$. In particular, we consider the case that $${\mathcal G}amma(t) = (x_1, h(x_1,t))$$ is the graph of the height function $h(x_1,t)$, and we assume that either $x_1 \in \mathbb{T} ^1$, or that $x_1 \in \mathbb{R} ^1$ and that $h(x_1,t)$ vanishes at infinity. It follows that the two time-dependent fluid domains ${\mathcal O}mega^{\partial\hspace{1pt}}m(t)$ are given by $$ {\mathcal O}mega^+(t) = \big\{(x_1,x_2)\,\big|\, x_2 > h(x_1,t)\big\}\,,\quad {\mathcal O}mega^-(t) = \big\{(x_1,x_2)\,\big|\, x_2 < h(x_1,t)\big\}\,. $$ Since $ \operatorname{div} u^{\partial\hspace{1pt}}m=0$, we must have that $\jump{u\cdot n}=0$ on ${\mathcal G}amma(t)$; furthermore, as we assume that the effect of surface tension is negligible\footnote{Our methodology can treat the Muskat problem with surface tension in the same way.}, we set $$\jump{p}= 0 \text{ on } {\mathcal G}amma(t)\,.$$ \subsubsection{The finitely-deep case with general geometry} We shall additionally consider geometries which generalize the \emph{infinitely-deep} case that ${\mathcal O}mega^+(t)\cup{\mathcal O}mega^-(t)={\mathbb R}^2$ or the \emph{confined} case that $\overline{{\mathcal O}mega^+(t)}\cup\overline{{\mathcal O}mega^-(t)}={\mathbb R}\times[-l,l]$ (and $\|h\|_{L^\infty}<l$). Let $\tilde{t}(x_1)$ and $\tilde{b}(x_1)$ be two smooth functions. Given two constants $c_t>0$, $c_b<0$, we write $$ b(x_1)=c_b+\tilde{b}(x_1),\;t(x_1)=c_t+\tilde{t}(x_1). $$ We assume that the the two fluids flow in bounded domains of the type $$ {\mathcal O}mega^+(t)\cup{\mathcal O}mega^-(t)=\{(x_1,x_2),\,b(x_1)<x_2<t(x_1)\},\text{ for every } t\geq0; \eqno{\rm({\rm e}f{HS_Eulerian}e')} $$ thus, each phase is given by $$ {\mathcal O}mega^+(t)=\{(x_1,x_2),\, x_1\in{\mathbb R},\,h(x,t)<x_2<t(x)\}, $$ and $$ {\mathcal O}mega^-(t)=\{(x_1,x_2),\, x_1\in{\mathbb R},\,b(x)<x_2<h(x,t)\}. $$ Note that additional impervious boundary conditions must be added to the system \eqref{HS_Eulerian} on the {\it fixed} bottom and top boundaries. These are given by $$ u\cdot n=0\text{ at }{\partial\hspace{1pt}}artial(\overline{{\mathcal O}mega^+(t)}\cup\overline{{\mathcal O}mega^-(t)}). \eqno{\rm({\rm e}f{HS_Eulerian}f)} $$ Finally, we assume that the initial height function $h_0$ satisfies $$ b(x_1)<h_0(x_1)<t(x_1). $$ \subsubsection{The one-phase Muskat problem} We shall also consider the one-phase Muskat problem, corresponding to the case that $(\mu^+,{\rm h}o^+)=(0,0)$. In other words, only one fluid flows through the porous medium, and the ``top'' phase corresponds to vacuum. Furthermore, we consider the case that the interface is periodic (so $x_1\in{\mathbb T}$). Then, our time-dependent domain is given by $$ {\mathcal O}mega(t) = {\mathbb T}\times\big\{c_b<x_2 < h(x_1,t)\big\}\text{ for every }\quad t\geq 0\,, $$ with moving boundary $$ {\mathcal G}amma(t) = {\mathbb T}\times\big\{x_2 = h(x_1,t)\big\}\text{ for every }\quad t\geq 0\,. $$ To simplify notation for the one-phase problem, we set $(\mu^-,{\rm h}o^-)=(1,1)$. We again use $(u,p)$ to denote the velocity and the pressure of this fluid in the fluid domain ${\mathcal O}mega(t)$ with free boundary ${\mathcal G}amma(t)$. The one-phase Muskat problem is written as \begin{subequations}\label{HS_Eulerian_Onephase} \begin{alignat}{2} u+ {\rm n}abla p &= - e_2 \qquad&&\text{in}\quad{\mathcal O}mega(t)\,,\\ {\operatorname{div}} u &= 0 &&\text{in}\quad{\mathcal O}mega(t)\,,\\ {\mathcal V}({\mathcal G}amma(t)) &= u \cdot n \qquad&&\text{on}\quad{\mathcal G}amma(t)\,,\\ u \cdot e_2 &= 0\qquad&&\text{on}\quad\{x_2=c_b\}\,,\\ p &= 0 \qquad&&\text{on}\quad{\mathcal G}amma(t)\,, \end{alignat} \end{subequations} where $e_2 = (0,1)$, $n(\cdot ,t)$ is the outward pointing unit normal on ${\mathcal G}amma(t)$, and ${\mathcal V}({\mathcal G}amma(t))$ is the normal velocity of ${\mathcal G}amma(t)$. As we only have one phase, ({\rm e}f{HS_Eulerian_Onephase}e) expresses the continuity of the pressure on ${\mathcal G}amma(t)$. Note, also, that we have added the impermeable boundary condition on the fixed bottom boundary in({\rm e}f{HS_Eulerian_Onephase}d). \subsection{The Rayleigh-Taylor stability condition} The Rayleigh-Taylor stability (or sign) condition is defined as \begin{equation*} RT(t)={\mathcal B}igjump{\frac{{\partial\hspace{1pt}}artial p}{{\partial\hspace{1pt}}artial n}}=-({\rm n}abla p^-({\mathcal G}amma(t))-{\rm n}abla p^+({\mathcal G}amma(t)))\cdot n>0. \end{equation*} Due to the incompressibility of the fluids, and using Darcy's law together with the fact that the curve can be parametrized as a graph, the Rayleigh-Taylor stability condition reduces to the following expression: \begin{equation}\label{RTstable} RT(t)=(\mu^--\mu^+)u\cdot n+\frac{{\rm h}o^--{\rm h}o^+}{\sqrt{1+(h'(x))^2}}=-\jump{\mu}u\cdot n-\frac{\jump{{\rm h}o}}{{\sqrt{1+(h'(x))^2}}} >0. \end{equation} In particular, for the case of two equal viscosities $\mu^-=\mu^+$, the fluids are in the stable regime if the lighter fluid is above the heavier fluid. Our research focuses on the stable case, so, henceforth, we shall assume that $\jump{{\rm h}o}<0$. Note that in the one-phase Muskat problem, the Rayleigh-Taylor stability condition reduces to \begin{equation}\label{RTonephase} RT(t)=-{\rm n}abla p^-({\mathcal G}amma(t))\cdot n>0. \end{equation} This stability condition is ubiquitous in free boundary problems; it also appears in the Stefan problem, the water waves problem, the incompressible Euler equations, the compressible Euler equations with physical-vacuum boundary, and the MHD equations. When the initial data does not verify the Rayleigh-Taylor stability condition, then the Muskat problem is ill-posed (see, for instance, \cite{c-g07,CGO}). It has also been shown for the Muskat problem that there exists initial data such that the Rayleigh-Taylor stability condition can break-down in finite time \cite{ccfgl,CGO, GG}. We note that if the height function $h( \cdot , t) $ (which represents the moving interface ${\mathcal G}amma(t)$) is small in certain norms, and if we assume that $\jump{{\rm h}o}<0$, then the Rayleigh-Taylor stability condition is achieved without any other hypothesis on the initial data. In the case of the unbounded, one-phase Muskat problem, it is known that the Rayleigh-Taylor stability condition is automatically satisfied due to Hopf's Lemma and Darcy's Law (see \cite{ccfgonephase}); however, in the one-phase case with a flat, bounded domain, it is not clear that the Rayleigh-Taylor stability condition is automatically satisfied, because of a non-zero Neumann boundary condition on the fixed bottom boundary. \subsection{Prior results on the Muskat problem and related models} Free-boundary problems for incompressible fluids in a porous medium have been extensively studied in recent years. For the Muskat problem with fluids having the same viscosities ($\jump{\mu}=0$), the qualitative behavior for arbitraraly large initial data is well understood. In particular, for the infinitely-deep case, C\'ordoba \& Gancedo proved the local existence of solutions for $H^3({\mathbb R})$ initial data in the stable Rayleigh-Taylor regime and the ill-posed character of the Muskat problem in the unstable Rayleigh-Taylor regime in \cite{c-g07}, a maximum principle for $\|h(t)\|_{L^\infty}$ in \cite{c-g09}, and local existence in the case with more than two phases in \cite{c-g10}. In a remarkable paper, Castro, C\'ordoba, Fefferman, Gancedo \& L\'opez-Fern\'andez \cite{ccfgl} proved the existence of turning waves, \emph{i.e.} interfaces such that there exists $T_{1}$ such that $$ \limsup_{t{\rm i}ghtarrow T_{1}} \|h'(t)\|_{L^\infty}=\infty. $$ Later, Castro, C\'ordoba, Fefferman \& Gancedo obtained in \cite{castro2012breakdown} the existence of curves showing finite-time singularities. These curves correspond to analytic initial data in the Rayleigh-Taylor stable regime such that there exists $T_{1}$ and $T_2$ such that, at $t=T_1$, the solution enters the Rayleigh-Taylor unstable regime and later, at $t=T_2$, is no longer $C^4$. The confined case when the two viscosities are the same ($\jump{\mu}=0$) has been treated by C\'ordoba, Granero-Belinch\'on \& Orive \cite{CGO}. When the porous medium is inhomogeneous, the evolution of the interface has been studied by Berselli, C\'ordoba \& Granero-Belinch\'on \cite{BCG} and G\'omez-Serrano \& Granero-Belinch\'on \cite{GG}. Ambrose \cite{AmbroseST} studied the limit of zero surface tension for initial data which satisfies ({\rm e}f{RTstable}). For further results, see also the review by Castro, C\'ordoba \& Gancedo \cite{Castro-Cordoba-Gancedo:recent-results-muskat}. For the related Hele-Shaw cell problem, Constantin \& Pugh \cite{Peter}, using complex analysis tools, proved the stability and exponential decay of solution. Chen \cite{chen1993hele} studied the two-phase Hele-Shaw problem with surface tension and proved global well-posedness for small enough initial interfaces. Elliot \& Ockendon \cite{elliott1982weak} proved the existence of weak solutions, while Escher \& Simonett \cite{ES} obtained local, classical solutions in multiple space dimensions. Escher \& Simonett \cite{escher1998center} proved global existence and stability near spherical shapes using center manifold theory. The global existence and decay for solutions of the one-phase Hele-Shaw problem with various fluid injection-rates was studied by Cheng, Coutand \& Shkoller \cite{cheng2012global}. Returning to the Muskat problem, when the initial data is assumed to be small in certain lower-order norms and the two fluid viscosities are equal, there are several available results for global-in-time solutions. In \cite{ccgs-10}, C\'ordoba, Constantin, Gancedo \& Strain proved the global existence of $H^3$ Sobolev class solutions for initial data with \emph{small} derivative in the Wiener algebra $A({\mathbb R})$, and global existence of Lipschitz (weak) solutions for initial data with \begin{equation}\label{sizecon} \|h'_0\|_{L^\infty}<1. \end{equation} Therein, the authors also proved an $L^2$ energy balance. The global weak solution of \cite{ccgs-10} was later extended to the confined case by Granero-Belinch\'on in \cite{G}. It is worth noting that, due to the effect of the impervious boundaries, the size restrictions on the data are not as clear as \eqref{sizecon} and for the confined setting, involve $\|h_0\|_{L^\infty}, \|h'_0\|_{L^\infty},$ and the depth. Very recently, in \cite{ccgs-13}, C\'ordoba, Constantin, Gancedo, Rodr\'iguez-Piazza \& Strain obtained global existence for small data in the case of a two-dimensional interface; furthermore, among other results, they proved the existence of a global solution in $H^2$ for data with \emph{small} derivative in the Wiener algebra $A({\mathbb R})$, and the existence of a global solution in $H^{1.5}$ if the initial data is also in the Wiener algebra $A({\mathbb R})$ and satisfies a smallness assumption. We remark that these global-in-time existence results are for initial data of {\it medium}-size, in the sense that initial data must be bounded by constants of $O(1)$. In the case of two fluids with different viscosities, there are fewer results. The local existence for arbitrary $\mu^{\partial\hspace{1pt}}m$, ${\rm h}o^{\partial\hspace{1pt}}m$ and $H^3$ data was proven by C\'ordoba, C\'ordoba \& Gancedo in \cite{c-c-g10}. In the case of surface tension, Escher \& Matioc \cite{e-m10} and Escher, Matioc \& Matioc \cite{escher2011generalized} established local and global existence, and stability, in the little H\"{o}lder spaces. The singularity formation for the one-phase case (when $\mu^+={\rm h}o^+=0$) has been studied by Castro, C\'ordoba, Fefferman \& Gancedo in \cite{ccfgonephase} where they proved the existence of the so-called interface ``splash'' singularity wherein a locally smooth interface self-intersects at a point. C\'ordoba \& Pern\'as-Casta\~no in \cite{cponephase} proved the non-existence of ``splat'' singularity, in which a locally smooth interface self-intersects on a curve. Gancedo \& Strain \cite{gancedo2013splasnqgmsukat} proved that the Muskat problem with three different fluids cannot develop a ``splash'' singularity in finite time. In related work, Fefferman, Ionescu \& Lie \cite{FeIoLi2013} and Coutand \& Shkoller \cite{CoSh20142} have shown that a finite-time splash singularity cannot occur for the two-fluid Euler equations. Very closely related to the Hele-Shaw and Muskat models, the Stefan problem ({\rm e}f{stefan}a-d) is a model of phase transition, and serves as yet another example of a classical free-boundary problem. One fundamental difference, however, with the Muskat problem is that there does not exist a contour dynamics description of the free-boundary evolution; on the other hand, it has been widely studied using a variety of parabolic PDE methods. For instance, the existence of classical solutions \emph{with derivative loss} was obtained by Meirmanov \cite{meirmanov1992stefan}, while the regularity of the free boundary was treated by Kinderlehrer \& Nirenberg in a series of papers \cite{kinderlehrer1977regularity, kinderlehrer1978smoothness}, wherein they showed that if the free boundary is $C^1$ and the temperature $P$ satisfy certain conditions, the interface is analytic in space and of Gevrey class in time. More recently, Had\v{z}i\'{c} \& Shkoller \cite{hadzic5817well, HaSh2014} proved the local and global existence \emph{without derivative loss}, as well as the decay of solutions to equilibrium states. \subsection{Well-posedness for $H^s$ data with $s\leq 2.5$} Mathematically, an $H^s$ well-posedness result, with $s\leq 2.5$, for \eqref{contour} and \eqref{contour2} is challenging because the usual energy estimates indicate that $\|h\|_{C^{2+\delta}}$ is the quantity in the \emph{available} continuation criterion (see \cite{c-g07, CGO}). As we have already noted, most prior existence theorems have relied upon the contour equations for the interface, which, in the case of the infinitely-deep, unconfined Muskat problem is given as \begin{equation}\label{contour} {\partial\hspace{1pt}}artial_t h=\text{p.v.}\int_{\mathbb R}\frac{(h'(x_1)-h'(x_1-y))y}{y^2+(h(x_1)-h(x_1-y))^2}dy, \end{equation} and for the finitely-deep medium, confined Muskat problem (with domain ${\mathbb R}\times[-l,l]$) as \begin{align}\label{contour2} {\partial\hspace{1pt}}artial_t h& =\text{p.v.}\int_{\mathbb R}\frac{(h'(x_1)-h'(x_1-y))\sinh(y)}{\cosh(y)-\cos(h(x_1)-h(x_1-y))}dy +\text{p.v.}\int_{\mathbb R}\frac{(h'(x_1)+h'(x_1-y))\sinh(y)}{\cosh(y)+\cos(h(x_1)+h(x_1-y))}dy \,. \end{align} These contour equations are obtained from the Birkhoff-Rott integral together with the following expression for the vorticity: $$ \omega(x_1, x_2,t)=\varpi(x_1,t)\delta_{{\mathcal G}amma(t)}, $$ where $x_1 \in \mathbb{R} $ parametrizes ${\mathcal G}amma(t)$, $\varpi(x_1,t)$ is the amplitude of vorticity, and $\delta_{{\mathcal G}amma(t)}$ is the Dirac delta-distribution which is a function of $(x_1,x_2)$ on the moving interface ${\mathcal G}amma(t) \subset \mathbb{R}^2 $. In particular, as the contour equations use the kernel for the operator ${\rm n}abla^{\partial\hspace{1pt}}erp {\mathcal D}elta^{-1}$, there have been no prior existence theorems for arbitrary domain geometries. In the case that $\jump{\mu}=0$, the contour equations have a significant simplification with respect to the case of two different viscosities. This is due to the fact that, if $\jump{\mu}=0$, the amplitude of the vorticity is $\varpi=\jump{{\rm h}o} h'$; however, in the case with two different viscosities, the amplitude for the vorticity $\varpi$ verifies the integral equation $$ -({\rm h}o^2-{\rm h}o^1)h'(x_1)=\left(\mu^2-\mu^1{\rm i}ght)\text{p.v.}\int_{\mathbb R} \varpi(\beta) \mathcal{B} (x_1,h(x_1),\beta,h(\beta))d\beta\cdot (1,h'(x_1))+\left(\frac{\mu^2+\mu^1}{2}{\rm i}ght)\varpi, $$ where $\mathcal{B} $ denotes the kernel of ${\rm n}abla^{\partial\hspace{1pt}}erp{\mathcal D}elta^{-1}$ (which depends on the domain). For instance, if the union of the two fluid domains is $ \mathbb{R}^2 $, then $$ \mathcal{B} (x_1,x_2,y_1,y_2)=\left(-\frac{x_2-y_2}{(x_2-y_2)^2+(x_1-y_1)^2}, \frac{x_1-y_1}{(x_2-y_2)^2+(x_1-y_1)^2}{\rm i}ght). $$ Thus, to write the amplitude of the vorticity in terms of the interface, one needs to invert an operator as in C{\'o}rdoba, C{\'o}rdoba, \& Gancedo \cite{c-c-g10}. This is a difficult issue, and with our method, we are able to avoid it entirely. \section{Statement of the main theorems} Our first result is \begin{theorem}[$H^2$ local well-posedness for the two-phase problem]\label{localsmall} Let $h_0\in H^2({\mathbb R})$ be the initial height function and let $\mu^{\partial\hspace{1pt}}m,\,{\rm h}o^{\partial\hspace{1pt}}m>0,$ be fixed constants. Then for every arbitrarily small $s>0$ there exist small enough constants $\sigma_s$, $\tilde{\sigma}$, $T(h_0)>0$, such that if either \begin{enumerate} \item (for the infinitely-deep Muskat problem ({\rm e}f{HS_Eulerian}a-e)) if \begin{equation}\label{cgs1} \|h_0\|_{H^{1.5+s}({\mathbb R})}< \sigma_s \end{equation} or \item (for the confined Muskat problem ({\rm e}f{HS_Eulerian}a-d,e',f)) if $$ \|h_0\|_{H^{1.5+s}({\mathbb R})}< \sigma_s $$ $$ \max\{|\tilde{t}|_2,|\tilde{b}|_2\}\leq\tilde{\sigma}, $$ \end{enumerate} then there exists a unique local-in-time solution $$ h\in C([0,T(h_0)];H^2(\mathbb{R}))\cap L^2(0,T(h_0);H^{2.5}(\mathbb{R})). $$ Moreover, this solution verifies $$ \|h(t)\|_{L^2({\mathbb R})}^2+\int_0^t\|\sqrt{\mu^+} u^+(\mathfrak{t})\|_{L^2({\mathcal O}mega^+(\mathfrak{t}))}^2d \mathfrak{t} +\int_0^t\|\sqrt{\mu^-} u^-(\mathfrak{t} )\|_{L^2({\mathcal O}mega^-(\mathfrak{t} ))}^2d\mathfrak{t} =\|h_0\|_{L^2({\mathbb R})}^2, $$ and $$ \max_{0\leq s\leq T(h_0)}\{\|h(s)\|_{H^2({\mathbb R})}^2\}+\int_0^{T(h_0)}\|h(s)\|_{H^{2.5}({\mathbb R})}^2ds\leq C_1\|h_0\|_{H^2({\mathbb R})}^2, $$ for a fixed constant $C_1$. \end{theorem} We remark that the constants appearing in this theorem depend on the physical parameters $\mu^{\partial\hspace{1pt}}m,{\rm h}o^{\partial\hspace{1pt}}m>0$. The proof of this result in the infinitely-deep case has been split into several steps in Section {\rm e}f{sec2}. For the sake of simplicity, the proof is given for the case that $s=0.25$ in ({\rm e}f{cgs1}), but the general case is obtained in a straightforward manner. This proof also covers the confined problem with flat {\it top} and {\it bottom} boundaries. Observe that the solution gains an extra half-derivative in space, when integrated in time. As we shall explain, this {\it parabolic-regularity} property is obtained by using the jump condition related to the expression for the amplitude of the vorticity. In Section {\rm e}f{sec3}, we provide the proof for the case of general domain geometries. Next, we address the question of global existence and decay to equilibrium of classical solutions for small data. Indeed, if the initial data is periodic, Theorem {\rm e}f{localsmall} can be strengthened, and we obtain \begin{theorem}[$H^2$ global well-posedness and decay to equilibrium]\label{globalsmall} Let $h_0\in H^2({\mathbb T})$ be the periodic, zero-mean initial height function for the infinitely-deep Muskat problem ({\rm e}f{HS_Eulerian}a-d) with $\mu^{\partial\hspace{1pt}}m,{\rm h}o^{\partial\hspace{1pt}}m>0$. Then there exists a small enough constant $\sigma_2=\sigma_2(\mu^{\partial\hspace{1pt}}m,{\rm h}o^{\partial\hspace{1pt}}m)$, such that if $\|h_0\|_{H^2({\mathbb T})}\leq \sigma_2$, there exists a unique global-in-time solution $$ h\in C([0,\infty];H^2(\mathbb{T}))\cap L^2(0,\infty;H^{2.5}(\mathbb{T})). $$ Moreover, this solution verifies $$ \max_{0\leq s\leq \infty}\{\|h(s)\|_{H^2({\mathbb T})}^2\}+\int_0^\infty\|h(s)\|_{H^{2.5}({\mathbb T})}^2ds\leq C\|h_0\|_{H^2({\mathbb T})}^2, $$ together with the decay estimate $$ \|h(t)\|_{L^2({\mathbb T})}^2\leq c(h_0)e^{-\alpha t},\text{ and, more generally, } \|h(t)\|_{H^r({\mathbb T})}^2\leq c(h_0,r)e^{-\left(1-\frac{r}{2}{\rm i}ght)\alpha t} $$ for every $0\leq \alpha<2,$ $0\leq r<2$. \end{theorem} The proof of this result is given in Section {\rm e}f{sec4}. Notice that the decay of the linear problem ($\alpha=2$) is not reached and appears to be critical. \begin{remark} We can compare the global existence result given by our Theorem {\rm e}f{globalsmall} with the global existence results in \cite{ccgs-10,ccgs-13} for the case that $\jump{\mu}=0$. On the one hand, because of the embedding inequality $$ \| u\|_{A({\mathbb R})} \le C \| u\|_{H^{0.5+s}({\mathbb R}) } \,,\,s>0 $$ we see that we must impose more severe size constraints our initial data than the results of \cite{ccgs-10,ccgs-13}; on the other hand, our result can also handle the case that $\mu^+ {\rm n}eq \mu^-$, and we find the exponential decay rate back to the equilibrium configuration. \end{remark} For the one-phase Muskat problem (the case where $\mu^+={\rm h}o^+=0$), our previous result is improved: \begin{theorem}[Local well-posedness for the one-phase problem]\label{localonephase} Fix $\mu^+={\rm h}o^+=0$ $\mu^-,{\rm h}o^->0$, $\tilde{b}(x_1)=0$. Let $h_0\in H^{2}({\mathbb T})$ such that $\min_{x_1}h_0(x_1)>c_b$, be the initial height function for the confined, one-phase Muskat problem ({\rm e}f{HS_Eulerian_Onephase}a-e) satisfying the Rayleigh-Taylor stability condition \eqref{RTonephase}. Then there exists $T(h_0)$ and a unique local-in-time solution $$ h\in C([0,T(h_0)];H^2(\mathbb{T}))\cap L^2(0,T(h_0);H^{2.5}(\mathbb{T})) $$ for the confined Muskat problem ({\rm e}f{HS_Eulerian_Onephase}a-e). Moreover, this solution verifies $$ \|h(t)\|_{L^2({\mathbb T})}^2+\int_0^t\|\sqrt{\mu^-} u(\mathfrak{t})\|_{L^2({\mathcal O}mega^-(\mathfrak{t}))}^2d\mathfrak{t}=\|h_0\|_{L^2({\mathbb T})}^2, $$ and $$ \max_{0\leq s\leq T(h_0)}\{\|h(s)\|_{H^2({\mathbb T})}^2\}+\int_0^{T(h_0)}\|h(s)\|_{H^{2.5}({\mathbb T})}^2ds\leq C_1\|h_0\|_{H^2({\mathbb T})}^2, $$ for a fixed constant $C_1$. \end{theorem} \begin{remark} Note that in Theorem {\rm e}f{localonephase}, the initial data can be arbitrarily large; in particular, we place no smallness condition on the data. \end{remark} The proof of Theorem {\rm e}f{localonephase} is given in Section {\rm e}f{sec5}. Finally, as a consequence of our half-derivative gain in space, $L^2$-in-time, we have the following \begin{theorem}[Instantaneous parabolic smoothing]\label{Cinftyonephase} Given ${\mathcal G}amma$ and a solution $h$ to the Muskat problem satisfying $$ h\in C([0,T(h_0)];H^2({\mathcal G}amma))\cap L^2(0,T(h_0);H^{2.5}({\mathcal G}amma)) $$ and either \begin{enumerate} \item ${\mathcal G}amma={\mathbb R}$ and $h$ is the solution to for the infinitely-deep Muskat problem ({\rm e}f{HS_Eulerian}a-e) obtained under the hypotheses of Theorem {\rm e}f{localsmall}, \item ${\mathcal G}amma={\mathbb R}$ and $h$ is the solution to for the confined Muskat problem ({\rm e}f{HS_Eulerian}a-d,e',f) obtained under the hypotheses of Theorem {\rm e}f{localsmall}, \item ${\mathcal G}amma={\mathbb T}$ and $h$ is the solution to for the one-phase Muskat problem ({\rm e}f{HS_Eulerian_Onephase}a-e) obtained under the hypotheses of Theorem {\rm e}f{localonephase}, \end{enumerate} then, in fact, $$ h(\cdot ,t)\in C^\infty({\mathcal G}amma)\text{ if }\delta\leq t\leq T(h_0),\,\,\forall\,\delta>0. $$ \end{theorem} The proof of this result is given in Section {\rm e}f{sec6}. \subsection{Notation} \subsubsection{Matrix notation} Let $A$ be a matrix, and $b$ be a column vector. Then, we write $A^i_j$ for the component of $A$, located on row $i$ and column $j$; consequently, using the Einstein summation convention, we write $$ (Ab)^k=A^k_ib^i\text{ and }(A^Tb)^k=A^i_k b^i. $$ \subsubsection{Sobolev norms} For $s\ge 0$, we let $$ \|u\|_{s,+} = \|u^+\|_{H^s({\mathcal O}mega^+)}\,,\ \|u\|_{s,-} = \|u^-\|_{H^s({\mathcal O}mega^-)}\,,\ \|u\|_{s,{\partial\hspace{1pt}}m} = \|u^+\|_{s,+} + \|u^-\|_{s,-} $$ and $$ |h|_s = \|h\|_{H^s({\mathcal G}amma)}\,. $$ Let ${\mathbb R}^2_+$ and ${\mathbb R}^2_-$ denote the upper and lower half plane, respectively. Then, abusing notation, we write $$ \|v\|_{s,+} = \|v^+\|_{H^s({\mathbb R}^2_+)}\,,\ \|v\|_{s,-} = \|v^-\|_{H^s({\mathbb R}^2_-)}\,,\ \|v\|_{s,{\partial\hspace{1pt}}m} = \|v^+\|_{s,+} + \|v^-\|_{s,-} $$ and $$ |h|_s = \|h\|_{H^s({\mathbb R})}\,. $$ \subsubsection{The derivatives} We let $f'$ denote the (tangential) derivative of $f$ with respect to $x_1$; that is, $$ f' = \frac{{\partial\hspace{1pt}} f}{{\partial\hspace{1pt}} x_1}. $$ For $k=1,2$, we write $$ f_{,k}=\frac{{\partial\hspace{1pt}}artial f}{{\partial\hspace{1pt}}artial x_k}. $$ For a diffeomorphism ${\partial\hspace{1pt}}si$, we let $\text{curl}_{\partial\hspace{1pt}}si u=\text{curl} u\circ{\partial\hspace{1pt}}si$ and $\text{div}_{\partial\hspace{1pt}}si u=\text{div} u\circ{\partial\hspace{1pt}}si$. \subsubsection{Mollifiers} We consider $\mathcal{J}$ a symmetric, positive mollifier with total integral equal to 1. For $\kappa>0$, we define \begin{equation*} \mathcal{J}_\kappa(x_1)=\frac{1}{\kappa}\mathcal{J}\left(\frac{x_1}{\kappa}{\rm i}ght) \end{equation*} and we denote $$ f^\kappa=\mathcal{J}_\kappa f=\mathcal{J}_\kappa*f \ \text{ and } \ f^{\kappa\kappa}=\mathcal{J}_\kappa\mathcal{J}_\kappa f\,. $$ \subsubsection{Dependence on space and time} For a function $f(x,t)$, we shall often write $f(t)$ to denote $f(\cdot ,t)$. We associate to the pair of functions $u^{\partial\hspace{1pt}}m:{\mathcal O}mega^{\partial\hspace{1pt}}m(t) \to \mathbb{R} $, the function $u: \mathbb{R}^2 \to \mathbb{R} $ as follows: $$ u=u^+\textbf{1}_{{\mathcal O}mega^+(t)}+u^-\textbf{1}_{{\mathcal O}mega^-(t)}. $$ When we write $\int_ {{\mathcal O}mega^+(t)} u ( \cdot ,t) dx$, this is understood to mean $\int_ {{\mathcal O}mega^+(t)} u^+ ( \cdot ,t) dx$. \section{The ALE and semi-ALE formulations of the Muskat problem} \subsection{The ALE and semi-ALE formulation}\label{localsmall1} \subsubsection{The ALE formulation} We let $\delta {\partial\hspace{1pt}}si^+$ denote the harmonic extension of $h$ to the upper half plane: \begin{subequations}\label{deltapsi} \begin{alignat}{2} {\mathcal D}elta \delta {\partial\hspace{1pt}}si^{+} &= 0 &&\text{in}\quad {\mathbb R}^2_+ \,,\\ \delta {\partial\hspace{1pt}}si^{+} &= h \qquad&&\text{on}\quad \{x_2 = 0\}\,. \end{alignat} \end{subequations} We define $\delta {\partial\hspace{1pt}}si^{-}(x_1,x_2)=\delta {\partial\hspace{1pt}}si^{+}(x_1,-x_2)$. We write $e$ for the identity map given by $e(x) = x$ and define ${\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m=e + \delta{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m e_2.$ Then, ${\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m( \cdot , t) :{\mathbb R}^2_{\partial\hspace{1pt}}m\mapsto {\mathcal O}mega^{\partial\hspace{1pt}}m(t)$ is a solution to \begin{subequations}\label{psi_eq} \begin{alignat}{2} {\mathcal D}elta {\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m &= 0 &&\text{in}\quad {\mathbb R}^2_{\partial\hspace{1pt}}m\,,\\ {\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m &= e + h e_2 \qquad&&\text{on}\quad \{x_2 = 0\}\,, \end{alignat} \end{subequations} We note that ({\rm e}f{psi_eq}b) is the same as $$ {\partial\hspace{1pt}}si(x_1,0,t) = \big(x_1, h(x_1,t)\big)\,. \eqno{\rm({\rm e}f{psi_eq}b')} $$ Setting $J^{\partial\hspace{1pt}}m= \det({\rm n}abla{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m)$, we see that $$ A^{\partial\hspace{1pt}}m = ({\rm n}abla {\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m)^{-1} = (J^{\partial\hspace{1pt}}m)^{-1} \left[\begin{array}{cc} ({\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m)^2,_2 & - ({\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m)^1,_2 \\ -({\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m)^2,_1 & ({\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m)^1,_1 \end{array} {\rm i}ght]=\frac{1}{1+\delta{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m_{,2}} \left[\begin{array}{cc} 1+\delta{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m_{,2} & 0 \\ -\delta{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m_{,1} & 1 \end{array} {\rm i}ght]\,. $$ For a fixed $s>0$, using classical elliptic theory, we have $\|{\rm n}abla\delta{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m\|_{1+s,{\partial\hspace{1pt}}m}\leq C|h|_{1.5+s}$, and \begin{equation}\label{diffeo} J^{\partial\hspace{1pt}}m=1+\delta{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m_{,2}>1-\|\delta{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m_{,2}\|_{L^\infty({\mathbb R}^2)}>1-C\|{\rm n}abla\delta{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m\|_{1+s,{\partial\hspace{1pt}}m}>1-C|h|_{1.5+s}. \end{equation} Consequently, if $|h( \cdot , t)|_{1.5+s}$ is sufficiently small, then ${\partial\hspace{1pt}}si(t)$ is a diffeomorphism. For example, $|h( \cdot , t)|_{1.5+s}$ is small whenever the initial data $h_0 \in H^{1.5+s}({\mathbb R})$ and $t$ are sufficiently smal. Letting $$v^{\partial\hspace{1pt}}m = u^{\partial\hspace{1pt}}m \circ {\partial\hspace{1pt}}si \ \text{ and } \ q^{\partial\hspace{1pt}}m = p^{\partial\hspace{1pt}}m\circ{\partial\hspace{1pt}}si\,,$$ the chain-rule shows that ({\rm e}f{HS_Eulerian}) can be written on the fixed domains as \begin{subequations}\label{HS_ALE} \begin{alignat}{2} \mu^{\partial\hspace{1pt}}m v^{\partial\hspace{1pt}}m+ (A^{\partial\hspace{1pt}}m)^T {\rm n}abla q^{\partial\hspace{1pt}}m &= - {\rm h}o^{\partial\hspace{1pt}}m \delta^i_2 \qquad&&\text{in}\quad {\mathbb R}^2_{\partial\hspace{1pt}}m\,,\\ (A^{\partial\hspace{1pt}}m)^j_i (v^{\partial\hspace{1pt}}m)^i,_j &= 0 &&\text{in}\quad {\mathbb R}^2_{\partial\hspace{1pt}}m\,, \end{alignat} \end{subequations} where $\delta^j_i$ is the Kronecker delta. \subsubsection{The evolution equation for $h$} We derive the evolution equation for $h$ to complete the system ({\rm e}f{HS_ALE}). We first note that $$ J^{\partial\hspace{1pt}}m (A^{\partial\hspace{1pt}}m)^{\rm T} e_2 = (-({\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m)^2,_1, ({\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m)^1,_1) = ({\partial\hspace{1pt}}si^{{\partial\hspace{1pt}}m{\partial\hspace{1pt}}rime})^{\partial\hspace{1pt}}erp\,, $$ where $f^{\partial\hspace{1pt}}erp = (-f_2,f_1)$. Since ${\partial\hspace{1pt}}si^{{\partial\hspace{1pt}}m{\partial\hspace{1pt}}rime}( \cdot ,t)$ is tangent to ${\mathcal G}amma(t)$, we must have ${\partial\hspace{1pt}}si^{{\partial\hspace{1pt}}m{\partial\hspace{1pt}}rime}( \cdot ,t)^{\partial\hspace{1pt}}erp$ is a normal vector field to ${\mathcal G}amma(t)$; moreover, by ({\rm e}f{psi_eq}b) we must have \begin{equation}\label{JAtN_id} J A^{\rm T} e_2 = (-h',1)\quad\text{on}\quad\{x_2 = 0\}\,. \end{equation} The identity above also suggests that \begin{equation}\label{defn:n} n\circ {\partial\hspace{1pt}}si = \frac{(-h',1)}{\sqrt{1+h^{{\partial\hspace{1pt}}rime 2}}} \quad\text{on}\quad\{x_2 = 0\}\,. \end{equation} On the other hand, differentiating ({\rm e}f{psi_eq}b') in $t$, we find that \begin{equation} {\partial\hspace{1pt}}si_t \cdot (n\circ {\partial\hspace{1pt}}si) = h_t \big(e_2 \cdot (n\circ {\partial\hspace{1pt}}si)\big)\qquad\text{on}\quad \{x_2 = 0\}\,. \label{h_eq_temp} \end{equation} By ({\rm e}f{HS_Eulerian}c) (or the interface moves along with the fluid velocity), ${\partial\hspace{1pt}}si_t \cdot (n\circ {\partial\hspace{1pt}}si) = (u\cdot n)\circ {\partial\hspace{1pt}}si$; thus ({\rm e}f{JAtN_id}, ({\rm e}f{defn:n}) and ({\rm e}f{h_eq_temp}) imply that $$ v \cdot (n\circ {\partial\hspace{1pt}}si) = \frac{h_t}{\sqrt{1+h^{{\partial\hspace{1pt}}rime 2}}} $$ or equivalently, $$ h_t = v \cdot (-h',1) = v \cdot (J A^{\rm T} e_2) = (JAv) \cdot e_2\,\text{ on } \{x_2=0\}. \eqno{\rm({\rm e}f{HS_ALE}c)} $$ The coupled equations ({\rm e}f{psi_eq}a,b) and ({\rm e}f{HS_ALE}a,b,c), together with the initial condition $$ h = h_0 \qquad\text{on}\quad\{t=0\} \eqno{\rm({\rm e}f{HS_ALE}d)} $$ is the ALE formulation of ({\rm e}f{HS_Eulerian}). \subsection{The semi-ALE formulation}\label{sec2.2} For the purposes of reinstating a linear divergence-free constraint on the velocity field, we let \begin{equation}\label{w-vel} w^{\partial\hspace{1pt}}m = J^{\partial\hspace{1pt}}m A^{\partial\hspace{1pt}}m v^{\partial\hspace{1pt}}m \end{equation} or componentwise, $w^{\partial\hspace{1pt}}m\cdot e_k = J^{\partial\hspace{1pt}}m (A^{\partial\hspace{1pt}}m)^k_i (v^{\partial\hspace{1pt}}m)\cdot e_i$. Then, by the Piola identity, $(J^{\partial\hspace{1pt}}m(A^{\partial\hspace{1pt}}m)^i_{j})_{,i}=0,$ and ({\rm e}f{HS_ALE}b) implies that $$ {\operatorname{div}} w^{\partial\hspace{1pt}}m = 0 \qquad\text{in}\quad{\mathbb R}^2_{\partial\hspace{1pt}}m\,. $$ Therefore, $(w^{\partial\hspace{1pt}}m,q^{\partial\hspace{1pt}}m,h)$ satisfies \begin{subequations}\label{HS_semiALE} \begin{alignat}{2} \mu w^{\partial\hspace{1pt}}m\cdot e_k + J^{\partial\hspace{1pt}}m (A^{\partial\hspace{1pt}}m)^k_i (A^{\partial\hspace{1pt}}m)^j_i q^{\partial\hspace{1pt}}m,_j &= - {\rm h}o^{\partial\hspace{1pt}}m J^{\partial\hspace{1pt}}m (A^{\partial\hspace{1pt}}m)^k_2 \qquad&&\text{in}\quad {\mathbb R}^2_{\partial\hspace{1pt}}m\,,\\ {\operatorname{div}}\, w^{\partial\hspace{1pt}}m &= 0 &&\text{in}\quad {\mathbb R}^2_{\partial\hspace{1pt}}m\,,\\ \jump{w\cdot e_2} = \jump{q} &= 0 &&\text{on}\quad \{x_2 = 0\}\,,\\ {\mathcal D}elta {\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m &= 0 &&\text{in}\quad {\mathbb R}^2_{\partial\hspace{1pt}}m\,,\\ {\partial\hspace{1pt}}si &= e + h e_2 \qquad&&\text{on}\quad \{x_2 = 0\}\,,\\ h_t &= w \cdot e_2 \qquad &&\text{on}\quad \{x_2 = 0\}\,,\\ h &= h_0 &&\text{on}\quad {\mathbb R} \times \{t=0\}\,. \end{alignat} \end{subequations} Equation ({\rm e}f{HS_semiALE}) is the semi-ALE formulation of ({\rm e}f{HS_Eulerian}). Since $A\, {\rm n}abla{\partial\hspace{1pt}}si=\text{Id}$ we have $$ Ae_2=A\left(A^T\left({\rm n}abla{\partial\hspace{1pt}}si^T\cdot e_2{\rm i}ght){\rm i}ght), $$ and ({\rm e}f{HS_semiALE}a) can also be written as $$ \mu^{\partial\hspace{1pt}}m w^{\partial\hspace{1pt}}m\cdot e_k + J^{\partial\hspace{1pt}}m (A^{\partial\hspace{1pt}}m)^k_i (A^{\partial\hspace{1pt}}m)^j_i (q^{\partial\hspace{1pt}}m + {\rm h}o^{\partial\hspace{1pt}}m {\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m\cdot e_2),_j = 0 \qquad\text{in}\quad {\mathbb R}^2_{\partial\hspace{1pt}}m\,. \eqno{\rm({\rm e}f{HS_semiALE}a')} $$ Let $Q^{\partial\hspace{1pt}}m = q^{\partial\hspace{1pt}}m + {\rm h}o^{\partial\hspace{1pt}}m x_2$. Since $A^{\partial\hspace{1pt}}m= [ {\rm n}abla {\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m] ^{-1} $, it follows that $$ \mu^{\partial\hspace{1pt}}m \frac{({\rm n}abla{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m)^T{\rm n}abla{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m w^{\partial\hspace{1pt}}m}{J^{\partial\hspace{1pt}}m}+{\rm n}abla(Q^{\partial\hspace{1pt}}m+{\rm h}o^{\partial\hspace{1pt}}m\delta{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m)=0 \,. $$ Using $Q$ rather than $q$, we write the system ({\rm e}f{HS_semiALE}) as \begin{subequations}\label{HS_semiALE1} \begin{alignat}{2} \mu^{\partial\hspace{1pt}}m w^{\partial\hspace{1pt}}m + {\rm n}abla (Q^{\partial\hspace{1pt}}m + {\rm h}o^{\partial\hspace{1pt}}m \delta{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m) &= \left(\text{Id}-\frac{({\rm n}abla{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m)^T{\rm n}abla{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m }{J^{\partial\hspace{1pt}}m}{\rm i}ght)\mu^{\partial\hspace{1pt}}m w^{\partial\hspace{1pt}}m \qquad&&\text{in}\quad {\mathbb R}^2_{\partial\hspace{1pt}}m\,,\\ {\operatorname{div}}\, w^{\partial\hspace{1pt}}m &= 0 &&\text{in}\quad {\mathbb R}^2_{\partial\hspace{1pt}}m\,,\\ \jump{w\cdot e_2} = \jump{Q} &= 0 &&\text{on}\quad \{x_2 = 0\}\,,\\ {\mathcal D}elta \delta {\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m &= 0 &&\text{in}\quad {\mathbb R}^2_{\partial\hspace{1pt}}m\,,\\ \delta{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m &= h \qquad&&\text{on}\quad \{x_2 = 0\}\,,\\ h_t &= w \cdot e_2 \qquad &&\text{on}\quad \{x_2 = 0\}\,,\\ h &= h_0 &&\text{on}\quad {\mathbb R} \times \{t=0\}\,. \end{alignat} \end{subequations} The advantage of the formulation \eqref{HS_semiALE1} is that the nonlinear terms are on the right-hand side, keeping the left-hand side linear. Indeed, using $$ {\rm n}abla{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m={\rm n}abla(x+\delta{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m e_2)=\text{Id}+{\rm n}abla\delta{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m e_2\,, $$ we have that \begin{equation}\label{eq1} \left(\text{Id}-\frac{({\rm n}abla{\partial\hspace{1pt}}si)^T{\rm n}abla{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m }{J^{\partial\hspace{1pt}}m}{\rm i}ght)\mu^{\partial\hspace{1pt}}m w^{\partial\hspace{1pt}}m=\left(\begin{array}{cc}\delta{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m_{,2}-(\delta{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m_{,1})^2 & -\delta{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m_{,1}(1+\delta{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m_{,2})\\ -\delta{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m_{,1}(1+\delta{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m_{,2}) & -\delta{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m_{,2}(1+\delta{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m_{,2})\end{array}{\rm i}ght)\frac{\mu^{\partial\hspace{1pt}}m w^{\partial\hspace{1pt}}m}{J^{\partial\hspace{1pt}}m} \end{equation} \section{The approximate $ \kappa $-problem} \subsection{An approximation of the semi-ALE formulation: the $\kappa $-problem} Letting $A^{\partial\hspace{1pt}}m_ \kappa = ( {\rm n}abla {\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m_ \kappa ) ^{-1} $, we define the following approximation of ({\rm e}f{HS_semiALE1}) which we term the $ \kappa $-problem: \begin{subequations}\label{HS_semiALE_reg} \begin{alignat}{2} \mu^{\partial\hspace{1pt}}m w^{\partial\hspace{1pt}}m+J^{\partial\hspace{1pt}}m_\kappa A^{\partial\hspace{1pt}}m_\kappa (A^{\partial\hspace{1pt}}m_\kappa)^T{\rm n}abla(Q^{\partial\hspace{1pt}}m+{\rm h}o^{\partial\hspace{1pt}}m\delta{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m_\kappa)&=0 && \text{in}\quad {\mathbb R}^2_{\partial\hspace{1pt}}m\times [0,T_\kappa ]\,,\\ \jump{w\cdot e_2}=\jump{Q}&= 0 && \text{on}\quad {\mathcal G}amma\times [0,T_\kappa ]\,,\\ {\operatorname{div}}\, w^{\partial\hspace{1pt}}m &= 0 &&\text{in}\quad {\mathbb R}^2_{\partial\hspace{1pt}}m\times [0,T_\kappa ]\,,\\ {\mathcal D}elta \delta{\partial\hspace{1pt}}si^{+}_\kappa &= 0 &&\text{in}\quad {\mathbb R}^2_+\times [0,T_\kappa ]\,,\\ \delta{\partial\hspace{1pt}}si^{+}_\kappa &= \mathcal{J}_\kappa\mathcal{J}_\kappa h_\kappa \qquad&&\text{on}\quad {\mathcal G}amma\times [0,T_\kappa ]\,,\\ \delta{\partial\hspace{1pt}}si^{-}_\kappa(x_1,x_2) &= \delta{\partial\hspace{1pt}}si^{+}_\kappa(x_1,-x_2) \qquad&&\text{on}\quad {\mathbb R}^2_{-}\times [0,T_\kappa ]\,,\\ {\partial\hspace{1pt}}si^{{\partial\hspace{1pt}}m}_\kappa(x_1,x_2) &= (x_1,x_2+\delta{\partial\hspace{1pt}}si^{{\partial\hspace{1pt}}m}_\kappa(x_1,x_2)) \qquad&&\text{in}\quad {\mathbb R}^2_{-}\times [0,T_\kappa ]\,,\\ h_{\kappa t} &= w \cdot e_2 \qquad &&\text{on}\quad {\mathcal G}amma\times(0,T_\kappa ]\,,\\ h_\kappa &= \mathcal{J}_\kappa h_0 &&\text{on}\quad {\mathbb R} \times \{t=0\}\,. \end{alignat} \end{subequations} This approximation relies on the following two operations: \begin{enumerate} \item the initial data $h_0$ is regularized in ({\rm e}f{HS_semiALE_reg}i), and \item in order to have smooth ALE maps ${\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m$ via elliptic extension, we (symmetrically) mollify the height function on ${\mathcal G}amma$ in ({\rm e}f{HS_semiALE_reg}e), thus producing a smooth evolving interface. \end{enumerate} Note that $w$ and $Q$ depend implicitly on $\kappa$. \subsection{The ALE formulation of the $\kappa$-problem} The $ \kappa $-approximation becomes very clear when we return to the original ALE formulation given in ({\rm e}f{HS_ALE}). Indeed, we use $A_ \kappa ^{\partial\hspace{1pt}}m $ in place of $A^{\partial\hspace{1pt}}m$ and ${\partial\hspace{1pt}}si_ \kappa ^{\partial\hspace{1pt}}m$ in place of ${\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m$ and write ({\rm e}f{HS_semiALE_reg}a-b) equivalently as \begin{subequations}\label{HS_ALEk} \begin{alignat}{2} \mu^{\partial\hspace{1pt}}m \mathcal{V} ^{\partial\hspace{1pt}}m+ (A_ \kappa ^{\partial\hspace{1pt}}m)^T {\rm n}abla \mathcal{Q} ^{\partial\hspace{1pt}}m &= - {\rm h}o^{\partial\hspace{1pt}}m \delta^i_2 \qquad&&\text{in}\quad {\mathbb R}^2_{\partial\hspace{1pt}}m\times [0,T_\kappa ]\,,\\ (A_\kappa ^{\partial\hspace{1pt}}m)^j_i (\mathcal{V} ^{\partial\hspace{1pt}}m)^i,_j &= 0 &&\text{in}\quad {\mathbb R}^2_{\partial\hspace{1pt}}m\times [0,T_\kappa ]\,, \end{alignat} \end{subequations} where $$ \frac{{\rm n}abla{\partial\hspace{1pt}}si_\kappa^{\partial\hspace{1pt}}m}{J^{\partial\hspace{1pt}}m_\kappa} w ^{\partial\hspace{1pt}}m = \mathcal{V} ^{\partial\hspace{1pt}}m \text{ and } \mathcal{Q} ^{\partial\hspace{1pt}}m=Q^{\partial\hspace{1pt}}m-{\rm h}o^{\partial\hspace{1pt}}m x_2 $$ \subsection{The Eulerian formulation of the $\kappa$-problem} Pulling back ({\rm e}f{HS_semiALE_reg}) using the diffeomorphisms $({\partial\hspace{1pt}}si_ \kappa ^{\partial\hspace{1pt}}m)^{-1}$ defined in ({\rm e}f{HS_semiALE_reg}d-g), we obtain the Eulerian form of the $ \kappa $-problem: \begin{subequations}\label{HS_Eulerian_kappa} \begin{alignat}{2} \mu^{\partial\hspace{1pt}}m \mathcal{U} ^{\partial\hspace{1pt}}m + {\rm n}abla \mathcal{P} ^{\partial\hspace{1pt}}m &= - {\rm h}o^{\partial\hspace{1pt}}m e_2 \qquad&&\text{in}\quad{\mathcal O}mega_\kappa^{\partial\hspace{1pt}}m(t)\times [0,T_\kappa ]\,,\\ \operatorname{div} \mathcal{U} &= 0 &&\text{in}\quad{\mathcal O}mega_\kappa^{\partial\hspace{1pt}}m(t)\times [0,T_\kappa ]\,,\\ {\mathcal V}({\mathcal G}amma_\kappa(t)) &= \mathcal{U} ^{\partial\hspace{1pt}}m \cdot n_\kappa \qquad&&\text{on}\quad{\mathcal G}amma_\kappa(t)\times [0,T_\kappa ]\,,\\ \overline{{\mathcal O}mega_\kappa^+(t)}\cup{\mathcal O}mega_\kappa^-(t) &= {\mathbb R}^2 &&\text{for every }\quad t\in [0,T_\kappa ]\,, \end{alignat} \end{subequations} where \begin{align*} \mathcal{U}^{\partial\hspace{1pt}}m & = \mathcal{V}^{\partial\hspace{1pt}}m \circ ({\partial\hspace{1pt}}si_ \kappa ^{\partial\hspace{1pt}}m)^{-1} \,,\\ \mathcal{P}^{\partial\hspace{1pt}}m & = \mathcal{Q}^{\partial\hspace{1pt}}m \circ ({\partial\hspace{1pt}}si_ \kappa ^{\partial\hspace{1pt}}m)^{-1} \,, \end{align*} and \begin{align*} {\mathcal G}amma _ \kappa (t)& =\{(x_1,h^{\kappa\kappa}(x_1,t)),\,x_1\in{\mathbb R}\} \,, \\ n_\kappa (x_1,t) & =(-h^{\kappa\kappa{\partial\hspace{1pt}}rime}(x_1,t),1) \,, \\ {\mathcal O}mega^+_ \kappa (t)& =\{(x_1,x_2),x_2>h^{\kappa\kappa}(x_1,t)),\,x_1\in{\mathbb R}\} \,, \\ {\mathcal O}mega^-_ \kappa (t)&=\{(x_1,x_2),x_2<h^{\kappa\kappa}(x_1,t)),\,x_1\in{\mathbb R}\} \,. \end{align*} we obtain a solution to ({\rm e}f{HS_semiALE_reg}). \subsection{An alternative semi-ALE formulation of the $\kappa$-problem} In order to construct solutions to the $\kappa $-problem for initial height functions in $H^2({\mathcal G}amma)$ of arbitrary size, we use a different family of diffeomorphisms which have the property that the Jacobian determinant is equal to one. For this purpose, we introduce the diffeomorphisms $$ {{\mathcal P}si_ \kappa }^{{\partial\hspace{1pt}}m} = (x_1, x_2 + {h}^{\kappa\kappa}) \,.\\ $$ Because of the mollifiers present in the definition of $ {h}^{\kappa\kappa}$, we see that the maps ${\mathcal P}si_ \kappa^{\partial\hspace{1pt}}m ( \cdot ,t): {\mathcal G}amma \to {\mathcal O}mega^{\partial\hspace{1pt}}m_\kappa (t)$ are $C^ \infty $ diffeomorphisms, and that $ \det {\rm n}abla {\mathcal P}si_\kappa ^{\partial\hspace{1pt}}m =1$. Letting \begin{align*} \mathscr{V}^{\partial\hspace{1pt}}m& = \mathcal{U} \circ {\mathcal P}si_ \kappa ^{\partial\hspace{1pt}}m \,,\\ \mathscr{Q}^{\partial\hspace{1pt}}m& = \mathcal{P} \circ {\mathcal P}si_ \kappa ^{\partial\hspace{1pt}}m + {\rm h}o^{\partial\hspace{1pt}}m x_2 \,, \end{align*} and defining \begin{align*} \mathcal{A}_ \kappa ^{\partial\hspace{1pt}}m& = [ {\rm n}abla {\mathcal P}si_\kappa ^{\partial\hspace{1pt}}m] ^{-1} \,, \\ \mathscr{W} ^{\partial\hspace{1pt}}m &= \mathcal{A}_ \kappa ^{\partial\hspace{1pt}}m \mathscr{V} ^{\partial\hspace{1pt}}m \,, \end{align*} we have our alternative semi-ALE description of the $ \kappa $-problem: \begin{subequations}\label{HS_semiALE_reg2} \begin{alignat}{2} \mu^{\partial\hspace{1pt}}m \mathscr{W} ^{\partial\hspace{1pt}}m+ \mathcal{A} ^{\partial\hspace{1pt}}m_\kappa (\mathcal{A} ^{\partial\hspace{1pt}}m_\kappa)^T{\rm n}abla(\mathscr{Q}^{\partial\hspace{1pt}}m+{\rm h}o^{\partial\hspace{1pt}}m h^{\kappa\kappa})&=0 && \text{in}\quad {\mathbb R}^2_{\partial\hspace{1pt}}m \times [0,T_\kappa ]\,,\\ \jump{\mathscr{W} \cdot e_2}=\jump{\mathscr{Q}}&= 0 && \text{on}\quad {\mathcal G}amma \times [0,T_ \kappa ]\,,\\ {\operatorname{div}}\, \mathscr{W} ^{\partial\hspace{1pt}}m&= 0 &&\text{in}\quad {\mathbb R}^2_{\partial\hspace{1pt}}m\times [0,T_ \kappa ]\,,\\ h_t &= \mathscr{W} \cdot e_2 \qquad &&\text{on}\quad {\mathcal G}amma\times (0,T_ \kappa ] \,,\\ h &= \mathcal{J}_\kappa h_0 &&\text{on}\quad{\mathcal G}amma \times \{t=0\}\,. \end{alignat} \end{subequations} Note well that a solution to ({\rm e}f{HS_semiALE_reg2}) give a solution to ({\rm e}f{HS_Eulerian_kappa}) and hence a solution to the original semi-ALE formulation ({\rm e}f{HS_semiALE_reg}). \subsection{The construction of solutions to the $ \kappa $-problem ({\rm e}f{HS_semiALE_reg})} In this section we prove the following result: \begin{proposition}\label{approxsol} For $h_0\in H^2$, there exist a time $T_\kappa$ and a unique solution $h\in C([0,T_\kappa],H^2({\mathbb R}))$ to the approximate $ \kappa $-problem ({\rm e}f{HS_semiALE_reg}a-i). \end{proposition} Given $$\bar{h}\in C([0,T];H^2({\mathbb R})) \ \text{ and } \ \bar{h}_t\in L^2(0,T;L^2({\mathbb R}))\,,$$ we consider the following linear problem: \begin{subequations}\label{HS_semiALE_reg_fix} \begin{alignat}{2} \mu^{\partial\hspace{1pt}}m w^{\partial\hspace{1pt}}m+\bar{J}^{\partial\hspace{1pt}}m \bar{A}^{\partial\hspace{1pt}}m (\bar{A}^{\partial\hspace{1pt}}m)^T{\rm n}abla(Q^{\partial\hspace{1pt}}m+{\rm h}o^{\partial\hspace{1pt}}m\bar h^{ \kappa \kappa })&=0 && \text{in}\quad \{{\mathbb R}^2_{{\partial\hspace{1pt}}m}\}\,,\\ \jump{w\cdot e_2}=\jump{Q}&= 0 && \text{in}\quad \{x_2 {\rm n}e 0\}\,,\\ {\operatorname{div}}\, w^{\partial\hspace{1pt}}m &= 0 &&\text{in}\quad {\mathbb R}^2_{\partial\hspace{1pt}}m\,,\\ \overline{{\mathcal P}si}^{{\partial\hspace{1pt}}m } &= (x_1, x_2 + \bar{h}^{\kappa\kappa}) \ \ &&\text{in}\quad {\mathbb R}^2_{\partial\hspace{1pt}}m\,,\\ h_{ t} &= w \cdot e_2 \qquad &&\text{on}\quad \{x_2 = 0\}\,,\\ h &= \mathcal{J}_\kappa h_0 &&\text{on}\quad {\mathbb R} \times \{t=0\}\,. \end{alignat} \end{subequations} To simplify notation, we have dropped the $\kappa$-subscript used to indicate implicit dependence on $ \kappa $, but we have kept the $ \kappa $-superscript to indicate an explicit mollification operation; in particular, $$\bar{h}^{\kappa}=\mathcal{J}_\kappa \bar{h} \ \text{ and } \ \bar{h}^{\kappa\kappa}=\mathcal{J}_\kappa\mathcal{J}_\kappa \bar{h} \,.$$ Note that $$ \|{\rm n}abla\overline{{\mathcal P}si}^{\partial\hspace{1pt}}m - \operatorname{Id} \|_{s-1,{\partial\hspace{1pt}}m}\leq C|\bar{h}^{\kappa\kappa}|_{s} \leq C(\kappa,s)|\bar{h}^\kappa|_0. $$ We shall also (temporarily) drop the $(\cdot)^{\partial\hspace{1pt}}m$ notation on $A$, ${\partial\hspace{1pt}}si$, ${\rm h}o$, and $\mu$, as it will be clear from the context which phase we are analyzing. \subsubsection{The existence of ${\rm n}abla Q^{\partial\hspace{1pt}}m$} Taking the divergence of ({\rm e}f{HS_semiALE_reg_fix}a) we obtain the elliptic equation for $Q^{\partial\hspace{1pt}}m$ \begin{equation}\label{eqQapprox} -\text{div}\left(\frac{1}{\mu} \overline{A}\, \overline{A}^T{\rm n}abla Q^{\partial\hspace{1pt}}m {\rm i}ght)=\text{div}\left( \frac{{\rm h}o}{\mu} \overline{A}\, \overline{A}^T {\rm n}abla \bar{h}^{ \kappa \kappa } {\rm i}ght) \text{ in } \mathbb{R}^2 _{{\partial\hspace{1pt}}m}\,, \end{equation} where ${\rm n}abla \bar{h}^{ \kappa \kappa }= (\bar{h}_{,1}^{\kappa \kappa } , 0)$. Due to the fact that the domain is unbounded, we consider a constant $ \gamma$ satisfying $0<\gamma<{\frac{1}{2}} $, and define the following elliptic equation in $ \mathbb{R}^2_{\partial\hspace{1pt}}m$ for the modified pressure functions $Q_\gamma^{\partial\hspace{1pt}}m $: \begin{equation}\label{cgsQ} \gamma Q_\gamma^{\partial\hspace{1pt}}m -\text{div}\left(\frac{1}{\mu} \overline{A}\, \overline{A}^T{\rm n}abla Q^{\partial\hspace{1pt}}m {\rm i}ght)=\text{div}\left( \frac{{\rm h}o}{\mu} \overline{A}\, \overline{A}^T {\rm n}abla \bar{h}^{ \kappa \kappa } {\rm i}ght) \text{ in } \mathbb{R}^2 _{{\partial\hspace{1pt}}m}\,. \end{equation} Using ({\rm e}f{HS_semiALE_reg_fix}a) and ({\rm e}f{HS_semiALE_reg_fix}b), we supplement ({\rm e}f{cgsQ}) with the following jump conditions across $\{x_2=0\}$: \begin{equation}\label{jump0} \jump{Q_\gamma }=0 \end{equation} and \begin{equation}\label{jump1} \bigjump{\left((1/\mu) \overline{A}\, \overline{A}^T{\rm n}abla Q_\gamma {\rm i}ght)\cdot e_2}=-\jump{({\rm h}o/\mu))\left(\overline{A}\,\overline{A}^T{\rm n}abla \bar{h}^{ \kappa \kappa }{\rm i}ght)\cdot e_2}. \end{equation} Recall that a function $Q_\gamma = {Q_\gamma}^+ {\bf 1}_{{\mathbb R}^2_+} + {Q_\gamma}^- {\bf 1}_{{\mathbb R}^2_-} \in H^1 ( \mathbb{R}^2 )$ is said to be a weak solution of ({\rm e}f{cgsQ})--({\rm e}f{jump1}) if \begin{equation}\label{cgs_weak} \gamma\int_{{\mathbb R}^2_+ \cup {\mathbb R}^2_-} Q_\gamma P\, dx+\int_{{\mathbb R}^2_+ \cup {\mathbb R}^2_-} {\frac{1}{\mu}} \overline{A}\, \overline{A}^T \, {\rm n}abla Q_\gamma \, {\rm n}abla P dx = \int_{\{x_2=0\}}g\, Pdx_1 +\int_{{\mathbb R}^2_+ \cup {\mathbb R}^2_-} f \, P dx \end{equation} for all $P\in H^1({\mathbb R}^2)$, where $g = \jump{({\rm h}o/\mu)) \overline{A}\,\overline{A}^T{\rm n}abla \bar{h}^{\kappa\kappa}} \cdot e_2$ and $f =\text{div}\left(({\rm h}o/\mu) \overline{A}\,\overline{A}^T{\rm n}abla \bar{h}^{\kappa\kappa}{\rm i}ght)$. This problem can be written as $$ B(Q_\gamma,P)=\mathcal{L}_1(P)+\mathcal{L}_2(P)\text{ for all }P\in H^1({\mathbb R}^2)\,, $$ where \begin{align*} B(Q_\gamma,P) &=\gamma\int_{{\mathbb R}^2_+ \cup {\mathbb R}^2_-} Q_\gamma Pdx+\int_{{\mathbb R}^2_+ \cup {\mathbb R}^2_-}{\rm n}abla P\left((1/\mu) \overline{A}\, \overline{A}^T{\rm n}abla Q_\gamma{\rm i}ght) dx, \\ \mathcal{L}_1(P)&=\int_{\{x_2=0\}}\jump{({\rm h}o/\mu)\left( \overline{A}\, \overline{A}^T{\rm n}abla \bar{h}^{ \kappa \kappa } {\rm i}ght)\cdot e_2} Pdx_1, \\ \mathcal{L}_2(P)&=\int_{{\mathbb R}^2_+ \cup {\mathbb R}^2_-}P\text{div}\left(({\rm h}o/\mu) \overline{A}\,\overline{A}^T{\rm n}abla \bar{h}^{ \kappa \kappa }{\rm i}ght)dx. \end{align*} The existence of $Q_\gamma \in H^1( {{\mathbb R}^2})$ will follow from the Lax-Milgram theorem, once we verify the necessary hypotheses. From the fundamental theorem of calculus, we have that $$ \|\overline{A}_0\overline{A}_0^T-\overline{A}( \cdot , t) \overline{A}^T(\cdot ,t)\|_{L^\infty}\leq C_ \kappa \sqrt{t} \int_0^t|\bar h_t(s)|^2_0ds \le C_ \kappa \sqrt{t} \,, $$ where $C_ \kappa $ is a constant which depends on $ \kappa $. Since $[\overline{A}_0 \overline{A}_0^T]^i_j \xi _i\xi _j \ge \lambda | \xi |^2$, we see that for $t$ sufficiently small, $$ {\frac{\lambda }{2}} |\xi|^2\leq [\overline{A} (\cdot, t ) \overline{A}^T(\cdot , t)]^i_j\xi^i\xi^j\leq 2 \lambda |\xi|^2, $$ The bilinear form is bounded, as $$ |B(Q_\gamma,P)|\leq C(\bar{h}^{\kappa\kappa})\|Q_\gamma\|_{1,{\partial\hspace{1pt}}m}\|P\|_{1,{\partial\hspace{1pt}}m}, $$ and it is also coercive, since $$ |B(Q_\gamma,Q_\gamma)| \geq c(\gamma,\lambda) \|Q_\gamma\|^2_{1,{\partial\hspace{1pt}}m}. $$ Thus, we need to prove that $\mathcal{L}_i(P)$ are continuous functionals on $H^1({\mathbb R}^2)$. We have that $$ |\mathcal{L}_1(P)|\leq C(\bar{h}^{\kappa\kappa})|P|_0\leq C(\bar{h}^{\kappa\kappa})\|P\|_{1,{\partial\hspace{1pt}}m}, $$ and using the divergence theorem, \begin{align*} |\mathcal{L}_2(P)| &\leq\left|\int_{{\mathbb R}^2_+ \cup {\mathbb R}^2_-}{\rm n}abla P({\rm h}o/\mu) \overline{A}\,\overline{A}^T{\rm n}abla \overline{A} dx{\rm i}ght|+\left|\int_{\{x_2=0\}} P\jump{({\rm h}o/\mu) \overline{J}\, \overline{A}\, \overline{A}^T{\rm n}abla \bar{h}^{\kappa\kappa}}e_2dx_1{\rm i}ght|\\ & \leq C(\bar{h}^{\kappa\kappa})\left(\|{\rm n}abla P\|_{0,{\partial\hspace{1pt}}m}+|P|_0{\rm i}ght)\leq C(\bar{h}^{\kappa\kappa})\|P\|_{1,{\partial\hspace{1pt}}m}. \end{align*} We have thus verified the hypotheses of the Lax-Milgram theorem. To obtain estimates which are uniform in $\gamma$, we test ({\rm e}f{cgsQ}) with $Q_\gamma$, and integrate by parts. Since $Q_\gamma \in H^1( \mathbb{R}^2 )$, ${Q_\gamma}^+ = {Q_\gamma}^-$ on $\{x_2=0\}$ (in the sense of trace); hence, we have that \begin{align*} {\frac{1}{2}} \|{\rm n}abla Q_\gamma\|_{0,{\partial\hspace{1pt}}m}^2 & \leq -\int_{\{x_2=0\}} Q_\gamma \jump{\left((\overline{J}/\mu) \overline{A} \,\overline{A}^T{\rm n}abla Q_\gamma{\rm i}ght)\cdot e_2} dx_1 +\int_{{\mathbb R}^2_+ \cup {\mathbb R}^2_-}{\rm n}abla Q_\gamma({\rm h}o/\mu) \overline{A}\, \overline{A}^T{\rm n}abla \bar{h}^{\kappa\kappa} dx \\ & \qquad -\int_{\{x_2=0\}} Q_\gamma\jump{\left(({\rm h}o/\mu) \overline{A}\, \overline{A}^T{\rm n}abla \bar{h}^{ \kappa \kappa } {\rm i}ght)\cdot e_2}dx_1. \end{align*} In particular, using the jump condition \eqref{jump1}, we find that \begin{equation}\label{cgs1001} \|{\rm n}abla Q_\gamma\|_{0,{\partial\hspace{1pt}}m}\leq C|\bar{h}^{\kappa\kappa}|_{0.5}, \end{equation} where the constant in the right-hand side is independent of $\gamma$. As such, we obtain the existence of a weak limit $ {\rm n}abla Q_ \gamma {\rm i}ghtharpoonup F\in L^2({\mathbb R}^2)$; moreover, the weak limit is a gradient: $F={\rm n}abla Q$. Indeed, if $U\subset {\mathbb R}^2$, by means of the Poincar\'e inequality, we have that $$ \|Q_\gamma-\text{mean}(Q_\gamma)\|_{L^2(U)}\leq C(U)\|{\rm n}abla Q_\gamma\|_{0,{\partial\hspace{1pt}}m}. $$ In particular, we obtain that $Q_\gamma$ converges weakly in $L^2(U)$. We write $Q$ for this limit and note that $ {\rm n}abla Q$ also satisfies ({\rm e}f{cgs1001}). Thus, considering a test function ${\partial\hspace{1pt}}hi$ with compact support within $U$, as $ \gamma \to 0$, we have that \begin{align*} \int_U {\partial\hspace{1pt}}hi{\rm n}abla Q_\gamma dx=-\int_U \text{div}{\partial\hspace{1pt}}hi Q_\gamma dx & {\rm i}ghtharpoonup -\int_U \text{div}{\partial\hspace{1pt}}hi Q dx=\int_U {\partial\hspace{1pt}}hi {\rm n}abla Q dx, \\ \int_U {\partial\hspace{1pt}}hi{\rm n}abla Q_\gamma dx & {\rm i}ghtharpoonup \int_U {\partial\hspace{1pt}}hi F dx. \end{align*} Using the uniqueness of the weak limit, we conclude the claim. We then easily obtain that $Q\in L^2_{loc}({\mathbb R}^2)\cap \dot{H}^1({\mathbb R}^2_{\partial\hspace{1pt}}m)$ is a distributional solution to $$ -\text{div}\left((1/\mu) \overline{A}\, \overline{A}^T{\rm n}abla Q{\rm i}ght)=\text{div}\left(({\rm h}o/\mu) \overline{A}\, \overline{A}^T{\rm n}abla \bar{h}^{ \kappa \kappa } {\rm i}ght). $$ \subsubsection{The existence of $w$ and $h$} We consider the Banach space $$ X=\{(f,f_t),\,f\in C(0,T;H^2),\,f_t\in L^2(0,T;L^2)\}, $$ with norm $$ \|(f,f_t)\|_X=\max_{0\leq s\leq t}\|f(s)\|_{H^2}+\left(\int_0^t\|f_t(s)\|_{L^2}^2ds{\rm i}ght)^{0.5}. $$ We define the operator $S[\bar{h},\bar{h}_t]$ by $$ S[\bar{h},\bar{h}_t] =\left(h(t),w_2(\cdot,0,t){\rm i}ght)=\left(h(x_1,0) +\int_0^t w(x_1,0,s)\cdot e_2ds,w(x_1,0,t)\cdot e_2{\rm i}ght). $$ As $\bar{h}^{\kappa\kappa}$ is $C^\infty$, the same is true for $\bar{{\mathcal P}si},\bar{A},\bar{J}$. The usual elliptic estimates for \eqref{eqQapprox} provide the regularity $$ {\rm n}abla Q\in L^\infty(0,T;C^\infty({\mathbb R}^2_{\partial\hspace{1pt}}m)). $$ Using ({\rm e}f{HS_semiALE_reg_fix}a), we have that $$ w \in L^\infty(0,T;C^\infty({\mathbb R}^2_{\partial\hspace{1pt}}m)). $$ Consequently, the operator $S$ verifies $$ S:X{\rm i}ghtarrow X. $$ For two pairs ($\bar{h}_1,\bar{h}_{1t}$) and ($\bar{h}_2,\bar{h}_{2t}$), we estimate the he Lipschitz norm: \begin{eqnarray*} \|S[\bar{h}_1,\bar{h}_{1t}]-S[\bar{h}_2,\bar{h}_{2t}]\|_X&\leq& T\max_{0\leq t\leq T}|(w_{1}(t)-w_{2}(t))\cdot e_2|_2 \\ &&+\left(\int_0^T |(w_1(s)-w_2(s))\cdot e_2|_0^2 ds{\rm i}ght)^{0.5}\\ &\leq& \sqrt{T}C_\kappa\|\left(\bar{h}_1,\bar{h}_{1t}{\rm i}ght)-\left(\bar{h}_2,\bar{h}_{2t}{\rm i}ght)\|_X \end{eqnarray*} Now, if $T=T_\kappa$ is chosen small enough, then the mapping $S$ is a contraction and then there exists a unique fixed-point, which is a local solution of our approximate $\kappa $-problem. \section{Proof of Theorem {\rm e}f{localsmall}: Local well-posedness for the infinitely-deep case}\label{sec2} \subsection{$\kappa$-independent estimates}\label{localsmall3} In this section, we prove that there is a time of existence $T^*$, independent of $\kappa$, and a priori estimates on on $[0,T^*]$ also independent of $ \kappa $; we will thus be able to pass to the limit as $ \kappa \to 0$ and conclude the existence of a limiting function $h$. To do so, we define the higher-order energy function (or norm) that will be shown to be bounded independent of $\kappa $: \begin{equation}\label{energy} E(t)=\max_{0\leq s\leq t}\{|h^{\kappa}(s)|_2^2\}+\int_0^t\|w(s)\|_{2,{\partial\hspace{1pt}}m}^2ds. \end{equation} Then, using Proposition {\rm e}f{approxsol}, there exists an approximate solution up to time $T_\kappa$ for every $\kappa>0$. We can take $T_\kappa$ as small as needed to ensure that $$ \sup_{0\leq t\leq T_\kappa}E(t)\leq z^*,\;\forall \kappa>0, $$ for a constant $z^*$ that will specified below. A priori, these times $T_\kappa$ may tend to zero as $\kappa \to 0$. In the following sections, we are going to obtain uniform bounds for $E(t)$ up to a uniform time $T^*$, preventing the shrinking of the lifespan of the solution as $ \kappa \to 0$. For the sake of clarity, we take $s=0.25$ in the statement of Theorem {\rm e}f{localsmall} (the proof for general $s$ is analogous) and consider $\sigma\ll1$ a universal constant (that will be specified below). Furthermore, we take $T_\kappa$ small enough so we can ensure that \begin{equation}\label{smallnessbootstrap} \sup_{0\leq t\leq T_\kappa}|h^\kappa(t)|_{1.75}< \sigma. \end{equation} \subsubsection{The estimates of $\delta{\partial\hspace{1pt}}si,J$} Using classical elliptic theory for the equations ({\rm e}f{HS_semiALE_reg}d-f), we get \begin{equation}\label{ellipticdeltapsi} \|{\rm n}abla \delta{\partial\hspace{1pt}}si\|_{s,{\partial\hspace{1pt}}m}\leq C|h^{\kappa\kappa}|_{s+0.5}, \end{equation} thus, $$ \|{\rm n}abla{\partial\hspace{1pt}}si- \operatorname{Id} \|_{s,{\partial\hspace{1pt}}m}\leq C|h^{\kappa\kappa}|_{s+0.5}, \text{ and }\|J- 1\|_{s,{\partial\hspace{1pt}}m}=\|\delta{\partial\hspace{1pt}}si_{,2}\|_{s,{\partial\hspace{1pt}}m}\leq C|h^{\kappa\kappa}|_{s+0.5}. $$ \subsubsection{Estimates for $h\in L^\infty(0,T_ \kappa ;L^2({\mathbb R}))$, $v^{\partial\hspace{1pt}}m\in L^2(0,T_ \kappa ;L^2({\mathbb R}^2_{\partial\hspace{1pt}}m))$}\label{secmaxprinL2} We let $a = J\, A$ denote the cofactor matrix of $ {\rm n}abla {\partial\hspace{1pt}}si$; using the fact that $ {\rm n}abla {\partial\hspace{1pt}}si^2 = e_2$, we write ({\rm e}f{HS_ALE}a) as \begin{equation}\label{cgs1000} \mu J v^i + a^k_i \left(q + {\rm h}o {\partial\hspace{1pt}}si^2{\rm i}ght)_{,k} = 0 \text{ in } \mathbb{R}^2_{\partial\hspace{1pt}}m \times (0,T_ \kappa ] \,. \end{equation} Since on ${\mathcal G}amma:= \mathbb{R} $, ${\partial\hspace{1pt}}si^2 = h$ and $v \cdot \tilde n = h_t$, taking the $L^2( \mathbb{R}^2_{\partial\hspace{1pt}}m)$ inner-product of ({\rm e}f{cgs1000}) with $v^i$, the fact that $a^k_i,_k =0$ by the Piola identity and that $ a^k_i N_k = \tilde n$ to obtain the basic $L^2$ energy law: $$ \frac{1}{2}\frac{d}{dt}\|\mathcal{J}_\kappa h(t)\|^2_{L^2({\mathbb R})}+\frac{1}{-\jump{{\rm h}o}}\|\sqrt{\mu^{\partial\hspace{1pt}}m J} v^{\partial\hspace{1pt}}m\|_{L^2({\mathbb R}^2_{\partial\hspace{1pt}}m)}^2=0. $$ Integrating in time, we find that $$ |h^{\kappa\kappa}(t)|_0\leq |\mathcal{J}_\kappa h(t)|_{0}\leq |\mathcal{J}_\kappa\mathcal{J}_\kappa h_0|_{0}\leq |h_0|_{0}, $$ and $$ \mu^{\partial\hspace{1pt}}m\int_0^t\|\sqrt{J} v^{\partial\hspace{1pt}}m\|_{L^2({\mathbb R}^2_{\partial\hspace{1pt}}m)}^2 ds\leq -\jump{{\rm h}o}|h_0|_{0}. $$ From ({\rm e}f{cgs1001}) and the smallness bound \eqref{smallnessbootstrap}, we see that $$ \|{\rm n}abla Q\|_{0,{\partial\hspace{1pt}}m}\leq C \sigma ,\,\text{and}\, \|w\|_{0,{\partial\hspace{1pt}}m}\leq C \sigma \,. $$ \subsubsection{Verifying the smallness condition for $|h^{\kappa}|_{1.75},\|w\|_{1.5,{\partial\hspace{1pt}}m}$}\label{sec4.3} Using ({\rm e}f{HS_semiALE_reg_fix}g) together with the Cauchy-Schwarz and trace inequalities, we have that \begin{equation}\label{H1.5} |h(t)-\mathcal{J}_\kappa h_0|_{1.5}=\left|\int_0^t w\cdot e_2 ds{\rm i}ght|_{1.5} \leq \sqrt{t}\sqrt{C\int_0^t\|w(s)\|_{2,{\partial\hspace{1pt}}m}^2 ds} \leq \sqrt{tCE(t)} \,, \end{equation} and that $$ |h^{\kappa }(t)-\mathcal{J}_\kappa^2 h_0|_{1.5}=\left|\mathcal{J}_\kappa\int_0^t w\cdot e_2 ds{\rm i}ght|_{1.5} \leq \sqrt{tCE(t)}. $$ We can ensure that $|h^{\kappa}(t)|_2\leq \sqrt{z^*}$, so that $$ |h^{\kappa}(t)-\mathcal{J}_\kappa^2 h_0|^2_{1.75}\leq C|h^{\kappa}(t)-\mathcal{J}_\kappa^2 h_0|_{1.5}|h^{\kappa}(t)-\mathcal{J}_\kappa^2 h_0|_2\leq C\sqrt{t}z^*, $$ and, by choosing \begin{equation}\label{T1} T_\kappa\leq T^*_1=\left(\frac{(\sigma-|h_0|_{1.75})^2}{4Cz^*}{\rm i}ght)^2, \end{equation} we have that \begin{equation}\label{H1.75} |h^{\kappa}(t)|_{1.75}\leq |h_0|_{1.75}+\sqrt{C\sqrt{t}z^*}<\sigma,\,\,\forall\,0\leq t\leq T^\kappa. \end{equation} Using ({\rm e}f{HS_semiALE_reg}a,c) and the fact that $\delta {\partial\hspace{1pt}}si$ is the harmonic extension of $h$, it follows that $Q$ satisfies \begin{alignat*}{2} \text{div}\left[(J/\mu) A A^T{\rm n}abla Q{\rm i}ght]&=\frac{{\rm h}o}{\mu}\text{div}\left[\left(\text{Id}-J A A^T{\rm i}ght){\rm n}abla \delta{\partial\hspace{1pt}}si{\rm i}ght]\qquad&&\text{in}\quad {\mathbb R}^2_{\partial\hspace{1pt}}m\,, \end{alignat*} with jump conditions given by ({\rm e}f{HS_semiALE_reg}b) and \eqref{jump1}. It follows that we have the following elliptic equation for $Q$: \begin{subequations}\label{Qe_eq} \begin{alignat*}{2} \mu^{-1}{\mathcal D}elta Q &= {\operatorname{div}} \big[ \mu^{-1}({\text{Id}} - J A A^T) {\rm n}abla (Q + {\rm h}o \delta{\partial\hspace{1pt}}si) \big] &&\text{in}\quad {\mathbb R}^2_{\partial\hspace{1pt}}m, \\ \jump{Q} &= 0 &&\text{on}\quad\{x_2 = 0\},\\ \bigjump{\mu^{-1}\smallexp{$\displaystyle{} \frac{{\partial\hspace{1pt}} Q}{{\partial\hspace{1pt}} {\rm N}}$}} &= \jump{\mu^{-1}({\text{Id}} - J A A^T) ({\rm n}abla Q) e_2} - \jump{\mu^{-1}{\rm h}o} J A^2_i A^j_i \delta {\partial\hspace{1pt}}si_{,j} \quad\ &&\text{on}\quad\{x_2 = 0\}. \end{alignat*} \end{subequations} From standard elliptic estimates, \begin{eqnarray*} \|{\rm n}abla Q\|_{1.25,{\partial\hspace{1pt}}m} &\leq& C {\mathcal B}ig[\big\|({\text{Id}} - J A A^T) {\rm n}abla (Q + {\rm h}o \delta{\partial\hspace{1pt}}si)\big\|_{1.25,{\partial\hspace{1pt}}m} \\ && + \big|\jump{\mu^{-1}({\text{Id}} - J A A^T) {\rm n}abla Q e_2}\big|_{0.75} + \big|\jump{\mu^{-1}{\rm h}o A^2_i A^j_i \delta {\partial\hspace{1pt}}si_{,j}}\big|_{0.75}{\mathcal B}ig] \\ &\leq& C {\mathcal B}ig[\big\|{\text{Id}} - J A A^T\big\|_{L^\infty({\mathbb R}^2)} \big(\|{\rm n}abla Q\|_{1.25,{\partial\hspace{1pt}}m} + {\rm h}o \|{\rm n}abla\delta{\partial\hspace{1pt}}si\big\|_{1.25,{\partial\hspace{1pt}}m}\big) \\ &&+ \big\|{\text{Id}} - J A A^T\big\|_{1.25,{\partial\hspace{1pt}}m} \big(\|{\rm n}abla Q\|_{L^\infty({\mathbb R}^2)} + {\rm h}o \|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{L^{\infty}({\mathbb R}^2)}\big) \\ &&+ \big|\jump{\mu^{-1}{\rm h}o JA^2_i A^j_i \delta {\partial\hspace{1pt}}si_{,j}}\big|_{0.75}{\mathcal B}ig], \end{eqnarray*} where the constant $C$ depends on $\mu^{\partial\hspace{1pt}}m$. Using the smallness condition \eqref{H1.75}, we have $$ \big|J A^2_i A^j_i \delta {\partial\hspace{1pt}}si_{,j}\big|_{0.75} \leq C \left(|h^{\kappa\kappa}|_{1.75},\, \big\|{\text{Id}} - J A A^T\big\|_{1.25,{\partial\hspace{1pt}}m}+|h^{\kappa\kappa}|_{1.75}{\rm i}ght)\leq C |h^{\kappa\kappa}|_{1.75}. $$ As a consequence, we have \begin{equation}\label{Q1.25} \|{\rm n}abla Q\|_{1.25,{\partial\hspace{1pt}}m} \leq C|h^{\kappa\kappa}|_{1.75}. \end{equation} For the higher norm, we have \begin{align*} \|{\rm n}abla Q\|_{1.5,{\partial\hspace{1pt}}m} &\le C {\mathcal B}ig[\big\|({\text{Id}} - J A A^T) {\rm n}abla (Q + {\rm h}o \delta{\partial\hspace{1pt}}si)\big\|_{1.5,{\partial\hspace{1pt}}m} \\ &\qquad + \big|\jump{\mu^{-1}({\text{Id}} - J A A^T) {\rm n}abla Q e_2}\big|_1 + \big|\jump{\mu^{-1}{\rm h}o A^2_i A^j_i \delta {\partial\hspace{1pt}}si_{,j}}\big|_1{\mathcal B}ig] \\ &\le C {\mathcal B}ig[\big\|{\text{Id}} - J A A^T\big\|_{L^\infty({\mathbb R}^2)} \big(\|{\rm n}abla Q\|_{1.5,{\partial\hspace{1pt}}m} + {\rm h}o \|{\rm n}abla\delta{\partial\hspace{1pt}}si\big\|_{1.5,{\partial\hspace{1pt}}m}\big) \\ &\qquad + \big\|{\text{Id}} - J A A^T\big\|_{1.5,{\partial\hspace{1pt}}m} \big(\|{\rm n}abla Q\|_{L^\infty({\mathbb R}^2)} + {\rm h}o \|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{L^{\infty}({\mathbb R}^2)}\big) \\ &\qquad + \big|\jump{\mu^{-1}{\rm h}o J A^2_i A^j_i \delta {\partial\hspace{1pt}}si_{,j}}\big|_1{\mathcal B}ig]\\ &\le C {\mathcal B}ig[\big\|{\text{Id}} - J A A^T\big\|_{L^\infty({\mathbb R}^2)} \big(\|{\rm n}abla Q\|_{1.5,{\partial\hspace{1pt}}m} + {\rm h}o \|{\rm n}abla\delta{\partial\hspace{1pt}}si\big\|_{1.5,{\partial\hspace{1pt}}m}\big) \\ &\qquad + \big\|{\text{Id}} - J A A^T\big\|_{1.5,{\partial\hspace{1pt}}m} \big(\|{\rm n}abla Q\|_{1.25,{\partial\hspace{1pt}}m} + |h^{\kappa\kappa}|_{1.75} \big) \\ &\qquad + \big|\jump{\mu^{-1}{\rm h}o J A^2_i A^j_i \delta {\partial\hspace{1pt}}si_{,j}}\big|_1{\mathcal B}ig]\,. \end{align*} Using \eqref{JAtN_id} and \eqref{jump1}, $$ \big|J A^2_i A^j_i \delta {\partial\hspace{1pt}}si_{,j}\big|_1 \leq C |h^{\kappa\kappa}|_{1.75}(1+|h^{\kappa\kappa}|_{1.75}) |h^{\kappa\kappa}|_2, $$ and $$ \big\|{\text{Id}} - J A A^T\big\|_{1.5,{\partial\hspace{1pt}}m}\leq C |h^{\kappa\kappa}|_{2}(1+|h^{\kappa\kappa}|_{1.75})\leq C |h^{\kappa\kappa}|_{2}. $$ Using $1+\sigma<2$, $$ \|{\rm n}abla Q\|_{1.5,{\partial\hspace{1pt}}m} \leq (C|h^{\kappa\kappa}|_{1.75} \big(1 + |h^{\kappa\kappa}|_{1.75}\big))\|{\rm n}abla Q\|_{1.5,{\partial\hspace{1pt}}m} +C|h^{\kappa\kappa}|_{2}|h^{\kappa\kappa}|_{1.75}+|h^{\kappa\kappa}|_{2}|h^{\kappa\kappa}|_{1.75}, $$ and, using the smallness condition \eqref{H1.75}, \begin{equation}\label{Q1.5} \|{\rm n}abla Q\|_{1.5,{\partial\hspace{1pt}}m} \leq C|h^{\kappa\kappa}|_{1.75}|h^{\kappa\kappa}|_{2}. \end{equation} Using ({\rm e}f{HS_semiALE1}a) and \eqref{eq1}, we obtain \begin{align} \|w\|_{1.5,{\partial\hspace{1pt}}m} & \leq C|h^{\kappa\kappa}|_{1.75}|h^{\kappa\kappa}|_{2} \,,\\ \|w\|_{1.25,{\partial\hspace{1pt}}m} & \leq C|h^{\kappa\kappa}|_{1.75} \,. \label{w1.25} \end{align} \subsubsection{The Rayleigh-Taylor stability condition revisited}\label{sec2.5.4} Once we have the smallness condition $$ \sup_{0\leq t\leq T^\kappa}\|w(t)\|_{1.25,{\partial\hspace{1pt}}m} \leq C|h^{\kappa\kappa}(t)|_{1.75}\leq \sigma, $$ we find that \begin{equation}\label{smallv} \sup_{0\leq t\leq T^\kappa}\|v(t)\|_{1.25,{\partial\hspace{1pt}}m}\leq C\sup_{0\leq t\leq T^\kappa}\left\|{\rm n}abla {\partial\hspace{1pt}}si\cdot w{\rm i}ght\|_{1.25,{\partial\hspace{1pt}}m}\leq C\sigma. \end{equation} The Rayleigh-Taylor stability condition is controlled as follows: $$ RT(t)>-\frac{\jump{{\rm h}o}}{2} -|\jump{\mu}|2\|v\|_{1.25,{\partial\hspace{1pt}}m}\geq -\frac{\jump{{\rm h}o}}{2} -|\jump{\mu}|C\sigma. $$ Consequently, if we impose $$ \sigma\leq \frac{-\jump{{\rm h}o}}{4C|\jump{\mu}|}, $$ the Rayleigh-Taylor stability condition is satisfied for every time $0\leq t\leq T^\kappa$. Furthermore, we have \begin{equation}\label{RTalltime} -\jump{{\rm h}o}-\jump{\mu}v\cdot n\sqrt{1+ h^{\kappa\kappa{\partial\hspace{1pt}}rime 2}}\geq-\jump{{\rm h}o}-|\jump{\mu}|2\|v\|_{1.25,{\partial\hspace{1pt}}m}\geq \frac{-\jump{{\rm h}o}}{2},\,\,\forall\,0\leq t\leq T^\kappa. \end{equation} \subsubsection{Estimates for $h\in L^2(0,T;H^{2.5}({\mathbb R}))$}\label{sectionH2.5} Taking the inner-product of the equation ({\rm e}f{HS_ALE}a) with the tangent vector ${\partial\hspace{1pt}}si'$, we find that $$ \mu^{\partial\hspace{1pt}}m v^{\partial\hspace{1pt}}m \cdot {\partial\hspace{1pt}}si' + Q^{\partial\hspace{1pt}}m_{,1} + {\rm h}o^{\partial\hspace{1pt}}m \delta {\partial\hspace{1pt}}si,_1 = 0 \qquad\text{on}\quad\{x_2=0\}\,. $$ Taking the difference of the equations above, by ({\rm e}f{HS_semiALE_reg}e), we obtain that $$ \jump{\mu v \cdot {\partial\hspace{1pt}}si'} + \jump{{\rm h}o} h^{\kappa\kappa}_{,1} = 0\,. $$ Then the equation above implies that $$ {\mathcal B}igjump{\mu v \cdot \frac{(1,h^{\kappa\kappa{\partial\hspace{1pt}}rime})}{\sqrt{1+ h^{\kappa\kappa{\partial\hspace{1pt}}rime 2}}}} + \jump{{\rm h}o} \frac{h^{\kappa\kappa{\partial\hspace{1pt}}rime}}{\sqrt{1+ h^{\kappa\kappa{\partial\hspace{1pt}}rime 2}}} = 0\,. $$ Differentiating the equation above with respect to $x_1$ and using that the normal velocity is continuous, we conclude that $h^{\kappa \kappa}$ satisfies that \begin{equation}\label{h_reg_eq} -\jump{{\rm h}o}h^{\kappa\kappa{\partial\hspace{1pt}}rime{\partial\hspace{1pt}}rime} = \jump{\mu}(v\cdot n)\sqrt{1+h^{\kappa\kappa{\partial\hspace{1pt}}rime 2}}h^{\kappa\kappa{\partial\hspace{1pt}}rime{\partial\hspace{1pt}}rime}+(1+h^{\kappa\kappa{\partial\hspace{1pt}}rime 2}) \jump{\mu v' \cdot (1,h^{\kappa\kappa{\partial\hspace{1pt}}rime})}\,. \end{equation} By Proposition {\rm e}f{H0.5_fg} and the trace theorem, the inequality above further implies that \begin{eqnarray}\label{h2.50} |h^{\kappa\kappa{\partial\hspace{1pt}}rime{\partial\hspace{1pt}}rime}|_{0.5} &\leq& C (1+\left|h^{\kappa\kappa}{\rm i}ght|^3_{1.75}) \left(\left|v^+{\rm i}ght|_{1.5}+\left|v^-{\rm i}ght|_{1.5}{\rm i}ght)+C |h^{\kappa\kappa{\partial\hspace{1pt}}rime{\partial\hspace{1pt}}rime}|_{0.5} |v\cdot (-h^{\kappa\kappa{\partial\hspace{1pt}}rime},1)|_{0.75} \,. \end{eqnarray} Since $ v = \frac{{\rm n}abla {\partial\hspace{1pt}}si w}{J}$, \begin{align}\label{h2.51} |v^{\partial\hspace{1pt}}m|_{1.5} &\leq C\left\|\frac{(\text{Id}+{\rm n}abla(\delta{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m e_2)) w^{\partial\hspace{1pt}}m}{J}{\rm i}ght\|_{2,{\partial\hspace{1pt}}m}{\rm n}onumber\\ &\leq C\left\|\frac{w^{\partial\hspace{1pt}}m}{J}{\rm i}ght\|_{2,{\partial\hspace{1pt}}m}+C\left\|\frac{{\rm n}abla (\delta{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m e_2) w^{\partial\hspace{1pt}}m}{J}{\rm i}ght\|_{2,{\partial\hspace{1pt}}m} \end{align} with \begin{eqnarray}\label{h2.52} \left\|\frac{w^{\partial\hspace{1pt}}m}{J}{\rm i}ght\|_{2,{\partial\hspace{1pt}}m}&\leq& C\left\|w^{\partial\hspace{1pt}}m{\rm i}ght\|_{2,{\partial\hspace{1pt}}m}+C\|w^{\partial\hspace{1pt}}m\|_{L^\infty({\mathbb R}^2)}\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{2,{\partial\hspace{1pt}}m}+C\|{\rm n}abla w\|_{L^4}\|{\rm n}abla \delta{\partial\hspace{1pt}}si_{,2}\|_{L^4}{\rm n}onumber\\ &\leq& C\left\|w^{\partial\hspace{1pt}}m{\rm i}ght\|_{2,{\partial\hspace{1pt}}m}+C\|w^{\partial\hspace{1pt}}m\|_{1.25,{\partial\hspace{1pt}}m}\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{2,{\partial\hspace{1pt}}m}+C\|w\|_{1.5,{\partial\hspace{1pt}}m}\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{1.5,{\partial\hspace{1pt}}m}, \end{eqnarray} \begin{eqnarray}\label{h2.53} \left\|\frac{{\rm n}abla (\delta{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m e_2) w^{\partial\hspace{1pt}}m}{J}{\rm i}ght\|_{2,{\partial\hspace{1pt}}m}&\leq& C\left\|w^{\partial\hspace{1pt}}m{\rm i}ght\|_{L^\infty({\mathbb R}^2)}\left[\left\|{\rm n}abla\delta{\partial\hspace{1pt}}si{\rm i}ght\|_{2,{\partial\hspace{1pt}}m}\left(1+\left\|{\rm n}abla\delta {\partial\hspace{1pt}}si{\rm i}ght\|_{L^\infty({\mathbb R}^2)}{\rm i}ght){\rm i}ght.{\rm n}onumber\\ &&\left.+\left\|D^2\delta{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m{\rm i}ght\|_{L^4}^2{\rm i}ght] +C\left\|{\rm n}abla \delta{\partial\hspace{1pt}}si J^{-1}{\rm i}ght\|_{L^\infty({\mathbb R}^2)} \left\|w{\rm i}ght\|_{2,{\partial\hspace{1pt}}m}{\rm n}onumber\\ &\leq& C\left\|w{\rm i}ght\|_{1.25,{\partial\hspace{1pt}}m}\left[\left\|{\rm n}abla\delta{\partial\hspace{1pt}}si{\rm i}ght\|_{2,{\partial\hspace{1pt}}m}\left(1+\left\|{\rm n}abla\delta {\partial\hspace{1pt}}si{\rm i}ght\|_{1.25,{\partial\hspace{1pt}}m}{\rm i}ght){\rm i}ght.{\rm n}onumber\\ &&\left.+\left\|{\rm n}abla\delta{\partial\hspace{1pt}}si{\rm i}ght\|_{1.5,{\partial\hspace{1pt}}m}^2{\rm i}ght]+C\left\|{\rm n}abla \delta{\partial\hspace{1pt}}si{\rm i}ght\|_{1.25,{\partial\hspace{1pt}}m} \left\|w{\rm i}ght\|_{2,{\partial\hspace{1pt}}m}. \end{eqnarray} Collecting the estimates \eqref{h2.50}-\eqref{h2.53}, we get \begin{align*} |h^{\kappa\kappa}|_{2.5} & \leq C\|w\|_{2,{\partial\hspace{1pt}}m}+C (1+\left|h^{\kappa\kappa}{\rm i}ght|_{1.75})^4\left(\left|h^{\kappa \kappa}{\rm i}ght|_{2.5} \left\|w{\rm i}ght\|_{1.25,{\partial\hspace{1pt}}m}{\rm i}ght.\\ & \qquad +\left|h^{\kappa\kappa}{\rm i}ght|_{1.75} \left\|w{\rm i}ght\|_{2,{\partial\hspace{1pt}}m} \left.+\left|h^{\kappa \kappa}{\rm i}ght|_{2}^2+\left|h^{\kappa \kappa}{\rm i}ght|_{2}\|w\|_{1.5,{\partial\hspace{1pt}}m}{\rm i}ght)+C\|v\|_{1.25,{\partial\hspace{1pt}}m}|h^{\kappa\kappa}|_{2.5}. \end{align*} Using \eqref{w1.25}, \eqref{smallv} and the smallness condition \eqref{H1.75}, we have that \begin{eqnarray*} |h^{\kappa\kappa}|_{2.5} &\leq& C\|w\|_{2,{\partial\hspace{1pt}}m}+ C \left(\left|h^{\kappa\kappa}{\rm i}ght|_{1.75} \left\|w{\rm i}ght\|_{2,{\partial\hspace{1pt}}m}+\left|h^{\kappa \kappa}{\rm i}ght|_{2}^2+\left|h^{\kappa \kappa}{\rm i}ght|_{2}\|w\|_{1.5,{\partial\hspace{1pt}}m}{\rm i}ght). \end{eqnarray*} Consequently, \begin{eqnarray}\label{h2.5} \int_0^t|h^{\kappa\kappa}|^2_{2.5} &\leq& CE(t)+t(E(t))^2+ C\sigma^2 E(t). \end{eqnarray} \subsubsection{The energy estimates}\label{sec4.5} Writing ({\rm e}f{HS_semiALE_reg}a) as $$ \mu w + {\rm n}abla (Q + {\rm h}o \delta{\partial\hspace{1pt}}si) = \left(\text{Id}-\frac{({\rm n}abla{\partial\hspace{1pt}}si)^T{\rm n}abla{\partial\hspace{1pt}}si }{J}{\rm i}ght)\mu w, $$ differentiating with respect to $x_1$ twice, testing the resulting equation against $w''$, using integration-by-parts on the gradient term, and using ({\rm e}f{HS_semiALE1}b), we find that $$ \|\sqrt{\mu}w''\|_{0,{\partial\hspace{1pt}}m}^2 -\jump{{\rm h}o}\int_{\mathbb R} \jump{(Q + {\rm h}o \delta{\partial\hspace{1pt}}si)''w''\cdot N}dx_1 = \int_{{\mathbb R}^2}\left(\left(\text{Id}-\frac{({\rm n}abla{\partial\hspace{1pt}}si)^T{\rm n}abla{\partial\hspace{1pt}}si}{J}{\rm i}ght)\mu w{\rm i}ght)''w''dx. $$ Using ({\rm e}f{HS_semiALE_reg}h), we see that $$ -\jump{{\rm h}o}\int_{\mathbb R} \jump{(Q + {\rm h}o \delta{\partial\hspace{1pt}}si)''w''\cdot N}dx_1 = \frac{\jump{{\rm h}o}}{2}\frac{d}{dt}|h^{\kappa{\partial\hspace{1pt}}rime{\partial\hspace{1pt}}rime}|_0^2 \,, $$ and defining $$RHS=\int_{{\mathbb R}^2}\left(\left(\text{Id}-\frac{({\rm n}abla{\partial\hspace{1pt}}si)^T{\rm n}abla{\partial\hspace{1pt}}si }{J}{\rm i}ght)\mu w{\rm i}ght)''w''dx\,,$$ we have that $$ \|\sqrt{\mu}w''\|_{0,{\partial\hspace{1pt}}m}^2 + \frac{\jump{{\rm h}o}}{2}\frac{d}{dt}|h^{\kappa{\partial\hspace{1pt}}rime{\partial\hspace{1pt}}rime}|_0^2 = RHS \,, $$ and we proceed to estimate $RHS$. Using the H\"{o}lder inequality together with \eqref{eq1}, we get that \begin{align*} RHS & =\int_{{\mathbb R}^2}\left(\left(\text{Id}-\frac{({\rm n}abla{\partial\hspace{1pt}}si)^T{\rm n}abla{\partial\hspace{1pt}}si }{J}{\rm i}ght)\mu w{\rm i}ght)''w''dx\\ & \leq C\|w''\|_{0,{\partial\hspace{1pt}}m}^2\left(\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{L^\infty({\mathbb R}^2)}^2+\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{L^\infty({\mathbb R}^2)}{\rm i}ght)\\ & \qquad +C\|w''\|_{0,{\partial\hspace{1pt}}m}\left[\|{\rm n}abla\delta{\partial\hspace{1pt}}si''\|_{0,{\partial\hspace{1pt}}m}\|w\|_{L^\infty({\mathbb R}^2)}{\rm i}ght.\\ & \qquad \left.+\|w'\|_{L^4({\mathbb R}^2)}\|{\rm n}abla\delta{\partial\hspace{1pt}}si'\|_{L^4({\mathbb R}^2)}{\rm i}ght]\left(\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{L^\infty({\mathbb R}^2)}+1{\rm i}ght). \end{align*} Using the Sobolev inequality, we obtain that \begin{align*} RHS& \leq C\|w''\|_{0,{\partial\hspace{1pt}}m}^2\left(\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{1.25,{\partial\hspace{1pt}}m}^2+\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{1.25,{\partial\hspace{1pt}}m}{\rm i}ght) \\ & \qquad +C\|w''\|_{0,{\partial\hspace{1pt}}m}\left[\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{2,{\partial\hspace{1pt}}m}\|w\|_{L^\infty({\mathbb R}^2)}+\|w\|_{1.5,{\partial\hspace{1pt}}m}^2 +\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{1.5,{\partial\hspace{1pt}}m}^2{\rm i}ght] \left(\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{1.25,{\partial\hspace{1pt}}m}+1{\rm i}ght). \end{align*} Using the elliptic estimate \eqref{ellipticdeltapsi}, we find that \begin{align*} RHS & \leq C\|w\|_{2,{\partial\hspace{1pt}}m}^2\left(|h^{\kappa\kappa}|_{1.75}^2+|h^{\kappa\kappa}|_{1.75}{\rm i}ght)\\ & \qquad +C\|w\|_{2,{\partial\hspace{1pt}}m}\left[|h^{\kappa\kappa}|_{2.5}\|w\|_{1.25,{\partial\hspace{1pt}}m}+\|w\|_{1,{\partial\hspace{1pt}}m}\|w\|_{2,{\partial\hspace{1pt}}m}{\rm i}ght.\\ & \qquad \left.+|h^{\kappa\kappa}|_{1.5}|h^{\kappa\kappa}|_{2.5}{\rm i}ght]\left(|h^{\kappa\kappa}|_{1.75}+1{\rm i}ght). \end{align*} Recalling \eqref{w1.25}, we get that $$ RHS\leq C\|w\|_{2,{\partial\hspace{1pt}}m}^2|h^{\kappa\kappa}|_{1.75} +C\|w\|_{2,{\partial\hspace{1pt}}m}\left[|h^{\kappa\kappa}|_{1.75}\left(|h^{\kappa\kappa}|_{2.5}+\|w\|_{2,{\partial\hspace{1pt}}m}{\rm i}ght)+|h^{\kappa\kappa}|_{1.5}|h^{\kappa\kappa}|_{2.5}{\rm i}ght] $$ Integrating in time and using \eqref{energy}, \eqref{H1.5}, \eqref{H1.75} and \eqref{h2.5}, we obtain that $$ \int_0^tRHS\leq C\sigma\left(E(t)+t(E(t))^2{\rm i}ght) $$ thus, we conclude that \begin{equation}\label{w''} \frac{-\jump{{\rm h}o}}{2}|h^{\kappa}(t)|_2^2+\min\{\mu^+,\mu^-\}\int_0^t\|w''\|_{0,{\partial\hspace{1pt}}m}^2\leq \frac{-\jump{{\rm h}o}}{2}|h_0|_2^2+C\sigma E(t)+tC(E(t))^2. \end{equation} \subsubsection{The Hodge decomposition elliptic estimates}\label{sec4.6} Using Proposition {\rm e}f{normaltrace}, we have that $$ |w^2|_{1.5}\leq |w''\cdot e_2|_{-0.5}\leq C\left(\|w''\|_{0,{\partial\hspace{1pt}}m}+\|{\operatorname{div}} w''\|_{0,{\partial\hspace{1pt}}m}{\rm i}ght)\leq C\|w''\|_{0,{\partial\hspace{1pt}}m}. $$ Consequently, we can bound $\int_0^t |w^2|^2_{1.5}ds$ using \eqref{w''}. Using that $u$ is irrotational in each phase, we obtain $u^2_{,1}-u^1_{,2}=A^j_1v^2_{,j}-A^j_2v^1_{,j}=0$. Recalling $$ v=J^{-1}{\rm n}abla{\partial\hspace{1pt}}si\cdot w,\text {i.e. } v^j=J^{-1}{\partial\hspace{1pt}}si^{j}_{,i}w^i, $$ and we get \begin{eqnarray*} w^2_{,1}-w^1_{,2}&=&w^2_{,1}-w^1_{,2}-A^j_1(J^{-1}{\partial\hspace{1pt}}si^{2}_{,i}w^i)_{,j}+A^j_2(J^{-1}{\partial\hspace{1pt}}si^{1}_{,i}w^i)_{,j}\\ &=& w^2_{,1}(1-A^1_1J^{-1}{\partial\hspace{1pt}}si^{2}_{,2})+w^1_{,2}(1-A^2_2J^{-1}{\partial\hspace{1pt}}si^{1}_{,1})\\ &&+\sum_{(i,j){\rm n}eq (1,2)}A^j_2J^{-1}{\partial\hspace{1pt}}si^{1}_{,i}w^i_{,j}-\sum_{(i,j){\rm n}eq (2,1)}A^j_1J^{-1}{\partial\hspace{1pt}}si^{2}_{,i}w^i_{,j}\\ &&-A^j_1(J^{-1}{\partial\hspace{1pt}}si^{2}_{,i})_{,j}w^i+A^j_2(J^{-1}{\partial\hspace{1pt}}si^{1}_{,i})_{,j}w^i. \end{eqnarray*} Using $1-A^1_1J^{-1}{\partial\hspace{1pt}}si^{2}_{,2}=0,$ $1-A^2_2J^{-1}{\partial\hspace{1pt}}si^{1}_{,1}=\delta{\partial\hspace{1pt}}si_{,2}(2+\delta{\partial\hspace{1pt}}si_{,2})/(1+\delta{\partial\hspace{1pt}}si_{,2})^2,$ $A^1_2=-{\partial\hspace{1pt}}si^{1}_{,2}=0$ we further simplify \begin{eqnarray*} w^2_{,1}-w^1_{,2}&=&w^1_{,2}\frac{\delta{\partial\hspace{1pt}}si_{,2}(2+\delta{\partial\hspace{1pt}}si_{,2})}{(1+\delta{\partial\hspace{1pt}}si_{,2})^2}-\frac{\delta{\partial\hspace{1pt}}si_{,1}}{1+\delta{\partial\hspace{1pt}}si_{,2}}w^1_{,1}+\frac{\delta{\partial\hspace{1pt}}si_{,1}}{1+\delta{\partial\hspace{1pt}}si_{,2}}w^2_{,2}-\left(\frac{\delta{\partial\hspace{1pt}}si_{,1}}{1+\delta{\partial\hspace{1pt}}si_{,2}}{\rm i}ght)^2w^1_{,2}\\ &&-A^j_1J^{-1}_{,j}{\partial\hspace{1pt}}si^{2}_{,i}w^i-A^j_1J^{-1}\delta{\partial\hspace{1pt}}si_{,ij}w^i-\frac{\delta{\partial\hspace{1pt}}si_{,22}}{(1+\delta{\partial\hspace{1pt}}si_{,2})^3}w^1\\ &=&w^1_{,2}\frac{\delta{\partial\hspace{1pt}}si_{,2}(2+\delta{\partial\hspace{1pt}}si_{,2})}{(1+\delta{\partial\hspace{1pt}}si_{,2})^2}-\frac{2\delta{\partial\hspace{1pt}}si_{,1}}{1+\delta{\partial\hspace{1pt}}si_{,2}}w^1_{,1}-\left(\frac{\delta{\partial\hspace{1pt}}si_{,1}}{1+\delta{\partial\hspace{1pt}}si_{,2}}{\rm i}ght)^2w^1_{,2}\\ &&+2\frac{\delta{\partial\hspace{1pt}}si_{,1}\delta{\partial\hspace{1pt}}si_{,12}w^1}{(1+\delta{\partial\hspace{1pt}}si_{,2})^2}-\frac{\delta{\partial\hspace{1pt}}si_{,11}w^1}{1+\delta{\partial\hspace{1pt}}si_{,2}}-\frac{\delta{\partial\hspace{1pt}}si_{,22}(1+(\delta{\partial\hspace{1pt}}si_{,1})^2)}{(1+\delta{\partial\hspace{1pt}}si_{,2})^3}w^1. \end{eqnarray*} Due to Proposition {\rm e}f{H0.5_fg}, we find that \begin{eqnarray*} \|J^3 {\operatorname{curl}} w\|_{1,{\partial\hspace{1pt}}m}&\leq& C\|w\|_{2,{\partial\hspace{1pt}}m}\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{1.25,{\partial\hspace{1pt}}m}(1+\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{1.25,{\partial\hspace{1pt}}m})^2\\ &&+C\|w\|_{2,{\partial\hspace{1pt}}m}\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{1.25,{\partial\hspace{1pt}}m}^2(1+\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{1.25,{\partial\hspace{1pt}}m})\\ &&+C\|w\|_{1.25,{\partial\hspace{1pt}}m}\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{2,{\partial\hspace{1pt}}m}\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{1.25,{\partial\hspace{1pt}}m}(1+\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{1.25,{\partial\hspace{1pt}}m})\\ &&+C\|w\|_{1.25,{\partial\hspace{1pt}}m}\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{2,{\partial\hspace{1pt}}m}(1+\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{1.25,{\partial\hspace{1pt}}m})^2\\ &\leq&C\|w\|_{2,{\partial\hspace{1pt}}m}\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{1.25,{\partial\hspace{1pt}}m}(1+\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{1.25,{\partial\hspace{1pt}}m})^2\\ &&+C\|w\|_{1.25,{\partial\hspace{1pt}}m}\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{2,{\partial\hspace{1pt}}m}(1+\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{1.25,{\partial\hspace{1pt}}m})^2. \end{eqnarray*} From the smallness condition \eqref{H1.75}, we have that $$ {\frac{1}{2}}\|{\operatorname{curl}} w\|_{0,{\partial\hspace{1pt}}m}\leq \|J^3{\operatorname{curl}} w\|_{0,{\partial\hspace{1pt}}m},$$ $$ {\frac{1}{2}}\|{\rm n}abla {\operatorname{curl}} w\|_{0,{\partial\hspace{1pt}}m}\leq \|J^3 {\rm n}abla {\operatorname{curl}} w\|_{0,{\partial\hspace{1pt}}m}\leq\|{\rm n}abla(J^3{\operatorname{curl}} w)\|_{0,{\partial\hspace{1pt}}m}+\|{\rm n}abla J^3 {\operatorname{curl}} w\|_{0,{\partial\hspace{1pt}}m}.\,, $$ and from the Sobolev embedding theorem, \begin{eqnarray*} \|{\rm n}abla J^3 {\operatorname{curl}} w\|_{0,{\partial\hspace{1pt}}m}^2&\leq& C(1+\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{1.25,{\partial\hspace{1pt}}m})^4\int_{\mathbb R} ({\rm n}abla\delta{\partial\hspace{1pt}}si_{,2})^2({\operatorname{curl}} w)^2dx\\ &\leq& C(1+\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{1.25,{\partial\hspace{1pt}}m})^4\|{\rm n}abla{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{L^4}^2\|{\operatorname{curl}} w\|_{L^4}^2\\ &\leq& C(1+\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{1.25,{\partial\hspace{1pt}}m})^4\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{1.5,{\partial\hspace{1pt}}m}^2\|w\|_{1.5,{\partial\hspace{1pt}}m}^2. \end{eqnarray*} We conclude that \begin{eqnarray*} \|{\operatorname{curl}} w\|_{1,{\partial\hspace{1pt}}m}&\leq& C\|w\|_{2,{\partial\hspace{1pt}}m}\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{1.25,{\partial\hspace{1pt}}m}(1+\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{1.25,{\partial\hspace{1pt}}m})^2\\ &&+C\|w\|_{1.25,{\partial\hspace{1pt}}m}\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{2,{\partial\hspace{1pt}}m}(1+\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{1.25,{\partial\hspace{1pt}}m})^2\\ &&+C(1+\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{1.25,{\partial\hspace{1pt}}m})^4\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{1.5,{\partial\hspace{1pt}}m}^2\|w\|_{1.5,{\partial\hspace{1pt}}m}^2\\ &\leq& C\|w\|_{2,{\partial\hspace{1pt}}m}|h^{\kappa\kappa}|_{1.75}+C|h^{\kappa\kappa}|_{2.5}|h^{\kappa\kappa}|_{1.75}+C|h^{\kappa\kappa}|_{2}\|w\|_{1.5,{\partial\hspace{1pt}}m}^2. \end{eqnarray*} Using Proposition {\rm e}f{Hodge}, we get $$ \|w\|_{2,{\partial\hspace{1pt}}m} \leq C {\mathcal B}ig[\|w\|_{0,{\partial\hspace{1pt}}m} + \|{\operatorname{curl}} w\|_{1,{\partial\hspace{1pt}}m} + \|{\operatorname{div}} w\|_{1,{\partial\hspace{1pt}}m} + |w\cdot e_2|_{1.5}{\mathcal B}ig], $$ and, using \eqref{w''}, we get \begin{eqnarray}\label{winL2H2} \int_0^t\|w\|_{2,{\partial\hspace{1pt}}m}^2&\leq & C\left( \frac{-\jump{{\rm h}o}}{2}|h_0|_2^2+C\sigma E(t)+tC(1+E(t)+(E(t))^2)E(t){\rm i}ght). \end{eqnarray} \subsubsection{A polynomial-type inequality for the energy function $E(t)$} Notice that $$ |h^{\kappa\kappa}(t)|_{2}\leq |h^{\kappa}(t)|_{2}. $$ Furthermore, as $h^{\kappa\kappa}\in L^2(0,T_\kappa;H^{2.5}({\mathbb R}))$ and $$ |h^{\kappa\kappa}_t|_{1.5}\leq |h_{\kappa t}|_{1.5}= |w^2|_{1.5}\leq C\|w\|_{2,{\partial\hspace{1pt}}m}, $$ we have $h^{\kappa\kappa}_t\in L^2(0,T_\kappa;H^{1.5}({\mathbb R}))$. Consequently $h^{\kappa\kappa}\in C(0,T_\kappa;H^2({\mathbb R}))$ and $E(t)$ is a continuous function. Collecting the previous estimates \eqref{w''} and \eqref{winL2H2} yields \begin{eqnarray}\label{polinomine} E(t)&\leq & \mathcal{C}\left( \frac{-\jump{{\rm h}o}}{2}|h_0|_2^2+\sigma E(t)+t(1+E(t)+(E(t))^2)E(t){\rm i}ght). \end{eqnarray} \subsubsection{The uniform-in-$\kappa$ time} Recall that we assume that $T_\kappa$ is small enough to guarantee that $E(t)\leq z^*$ for $z^*>0$ a constant (depending on the size of the initial data) that will be chosen below. We set $$ \sigma = \frac{1}{2\mathcal{C}}, $$ where $\mathcal{C}$ is the constant appearing in \eqref{polinomine}. We note that $\mathcal{C}$ is a constant depending only on the constants from the Sobolev embedding theorem and the elliptic estimate ({\rm e}f{Hodge}). We can simplify \eqref{polinomine} to find that $$ E(t)\leq 2C|h_0|_2^2+t\mathcal{P}(E(t)). $$ This inequality implies that there exists a uniform-in-$\kappa$ time, $T^*_2(z^*,|h_0|_2)$, such that $$ E(t)\leq z^*\;\forall t\leq \bar{T}_\kappa=\min\{T^*_1(|h_0|_1.75),T^*_2(z^*,|h_0|_2),T_\kappa\} \,; $$ see Section 9 of \cite{CoSh2006} for a proof. We set $z^*=4C|h_0|_2^2$, and recalling \eqref{T1}, we define $$ T^*=\min\{T_1^*,T_2^*\},\,\tilde{T}_\kappa=\min\{T^*(|h_0|_2,|h_0|_{1.75}),T_\kappa\}. $$ As a consequence, we have the bounds $$ E(t)\leq 4C|h_0|_2^2,\,|h^{\kappa\kappa}(t)|_{1.75}< \sigma,\,\forall t\leq \tilde{T}_\kappa. $$ Our goal now is to show that we can reach $t=T^*$. To do so, we argue by contradiction. First, we assume that $\tilde{T}_\kappa=T^*$. Then we have a uniform-in-$\kappa$ lifespan, and a bound for every approximate solution. As a consequence, we can pass to the limit in $\kappa$. On the other hand, if $\tilde{T}_\kappa=T_\kappa$, we can extend the solution up to $\tilde{T}_\kappa+\delta$, for a small enough $\delta=\delta(z^*)$. Moreover, this extended solution verifies $$ E(t)\leq 4C|h_0|_2^2,\,|h^{\kappa\kappa}(t)|_{1.75}< \sigma,\,\forall 0\leq t\leq T_\kappa+\delta,\;\;\forall\kappa. $$ By induction, we can reach $T^*$. This concludes the existence portion of Theorem {\rm e}f{localsmall}. \subsection{Passing to the limit as $ \kappa \to 0$} Once we have the uniform bound $$ \max_{0\leq s\leq t}\{|h^{\kappa}(s)|_2^2\}+\int_0^t|h^{\kappa\kappa}(s)|_{2.5}^2+\|w(s)\|_{2,{\partial\hspace{1pt}}m}^2ds\leq C, $$ we obtain the existence of weak limits $$ h\in L^\infty(0,T^*;H^2({\mathbb R}))\cap L^2(0,T^*;H^{2.5}({\mathbb R})), $$ $$ h_t\in L^\infty(0,T^*;H^1({\mathbb R}))\cap L^2(0,T^*;H^{1.5}({\mathbb R})), $$ $$ w\in L^\infty(0,T^*;H^{1.5}({\mathbb R}^2_{\partial\hspace{1pt}}m))\cap L^2(0,T^*;H^{2}({\mathbb R}^2_{\partial\hspace{1pt}}m)), $$ $$ {\rm n}abla Q\in L^\infty(0,T^*;H^{1.5}({\mathbb R}^2_{\partial\hspace{1pt}}m)). $$ Using the Rellich-Kondrachov compactness theorem, we can prove that $(h,w,Q)$ is a distributional solution to \eqref{HS_semiALE1}. \subsection{The uniqueness of the solution} To prove uniqueness of solutions, we use the energy method. We assume that there exists two solutions, $h_1$ and $h_2$, corresponding to the same initial data $h_0$. Furthermore, we have that the corresponding higher-order energy functions $E_1(t)$ and $E_2(t)$, defined in \eqref{energy}, are uniformly bounded: $$ E_1(t)+E_2(t)\leq 2z^*,\;\;\forall 0\leq t\leq T^*. $$ We consider the new higher-order energy function $$ \overline{E}(t)=\max_{0\leq s\leq t}\{|\overline{h}(s)|_2^2\}+\int_0^t\|\overline{w}(s)\|_{2,{\partial\hspace{1pt}}m}^2ds, $$ where we denote the difference of both solutions using a bar: $$ \overline{h}=h_1-h_2,\,\overline{\delta{\partial\hspace{1pt}}si}=\delta{\partial\hspace{1pt}}si_1-\delta{\partial\hspace{1pt}}si_2 \text{ and }\overline{w}=w_1-w_2. $$ We have that $$ \overline{E}(t)\leq E_1(t)+E_2(t)\leq 2z^*,\;\;\forall 0\leq t\leq T^*. $$ The difference verifies the following system \begin{subequations}\label{HS_semiALE1diff} \begin{alignat}{2} \mu\bar{w} + {\rm n}abla (\bar{Q} + {\rm h}o \bar{\delta{\partial\hspace{1pt}}si}) &= \left(\text{Id}-\frac{({\rm n}abla{\partial\hspace{1pt}}si_1)^T{\rm n}abla{\partial\hspace{1pt}}si_1 }{J_1}{\rm i}ght)\mu w_1{\rm n}onumber\\ &\quad-\left(\text{Id}-\frac{({\rm n}abla{\partial\hspace{1pt}}si_2)^T{\rm n}abla{\partial\hspace{1pt}}si_2 }{J_2}{\rm i}ght)\mu w_2 \qquad&&\text{in}\quad \{x_2{\rm n}eq 0\}\,,\\ {\operatorname{div}} \bar{w} &= 0 &&\text{in}\quad \{x_2{\rm n}eq 0\}\,,\\ \jump{\bar{w}^2} = \jump{\bar{Q}} &= 0 &&\text{on}\quad \{x_2 = 0\}\,,\\ {\mathcal D}elta \bar{\delta {\partial\hspace{1pt}}si}^{\partial\hspace{1pt}}m &= 0 &&\text{in}\quad {\mathbb R}^2_{\partial\hspace{1pt}}m\,,\\ \bar{\delta{\partial\hspace{1pt}}si}^{\partial\hspace{1pt}}m &= \bar{h} \qquad&&\text{on}\quad \{x_2 = 0\}\,,\\ \bar{h}_t &= \bar{w} \cdot e_2 \qquad &&\text{on}\quad \{x_2 = 0\}\,,\\ \bar{h} &= 0 &&\text{on}\quad {\mathbb R} \times \{t=0\}\,. \end{alignat} \end{subequations} Recalling the equation for the evolution of the interface, we have that \begin{equation}\label{energyuniqueness6} |\bar{h}(t)|_{1.5}\leq \sqrt{t}C\sqrt{\bar{E}(t)}\leq \sqrt{t}C\sqrt{2z^*},\;|\bar{h}(t)|_{1.75}\leq C\sqrt[4]{t}\sqrt{\bar{E}(t)}\leq C\sqrt[4]{t}\sqrt{2z^*}, \end{equation} $$ \mu^{-1}{\mathcal D}elta \bar{Q} = \mu^{-1}{\operatorname{div}} \big[ ({\text{Id}} - J_1 A_1 A^{\rm T}_1) {\rm n}abla (Q_1 + {\rm h}o \delta{\partial\hspace{1pt}}si_1) -({\text{Id}} - J_2 A_2 A^{\rm T}_2) {\rm n}abla (Q_2 + {\rm h}o \delta{\partial\hspace{1pt}}si_2) \big] $$ with jump conditions $\jump{\bar{Q}} = 0$ and \begin{eqnarray*} \bigjump{\mu^{-1}\smallexp{$\displaystyle{} \frac{{\partial\hspace{1pt}} \bar{Q}}{{\partial\hspace{1pt}} {\rm N}}$}} &=& \jump{\mu^{-1}({\text{Id}} - J_1 A_1 A^{\rm T}_1) ({\rm n}abla Q_1) e_2} - \jump{\mu^{-1}{\rm h}o J_1 (A_1)^2_i (A_1)^j_i \delta {\partial\hspace{1pt}}si_{1,_j}}\\ &&-\jump{\mu^{-1}({\text{Id}} - J_2 A_2 A^{\rm T}_2) ({\rm n}abla Q_2) e_2} + \jump{\mu^{-1}{\rm h}o J_2 (A_2)^2_i (A_2)^j_i \delta {\partial\hspace{1pt}}si_{2,_j}}. \end{eqnarray*} Using that $$ \text{Id}-JAA^T = \left[\begin{array}{cc} \delta{\partial\hspace{1pt}}si_{,2} & - \delta{\partial\hspace{1pt}}si_{,1} \\ - \delta{\partial\hspace{1pt}}si_{,1} & \frac{\delta{\partial\hspace{1pt}}si_{,1}^2}{1+\delta{\partial\hspace{1pt}}si_{,2}}+\delta{\partial\hspace{1pt}}si_{,2} \end{array} {\rm i}ght]\,, $$ elliptic estimates show that \begin{eqnarray*} \|{\rm n}abla \bar{Q}\|_{1.25,{\partial\hspace{1pt}}m}&\leq& C\left[{\rm i}ght.\|\text{Id}-J_1A_1A_1^T\|_{1.25,{\partial\hspace{1pt}}m}\left(\|{\rm n}abla \bar{Q}\|_{1.25,{\partial\hspace{1pt}}m}+\|{\rm n}abla \bar{\delta{\partial\hspace{1pt}}si}\|_{1.25,{\partial\hspace{1pt}}m}{\rm i}ght)\\ &&+\|{\rm n}abla(Q_2+{\rm h}o\delta{\partial\hspace{1pt}}si_2)\|_{1.25,{\partial\hspace{1pt}}m}\|{\rm n}abla \bar{\delta{\partial\hspace{1pt}}si}\|_{1.25,{\partial\hspace{1pt}}m}\\ &&+\|\bar{JA}\|_{1.25,{\partial\hspace{1pt}}m}\|{\rm n}abla \delta{\partial\hspace{1pt}}si_1\|_{1.25,{\partial\hspace{1pt}}m}(\|A_1-\text{Id}\|_{1.25,{\partial\hspace{1pt}}m}+1)\\ &&+(\|J_2A_2-\text{Id}\|_{1.25,{\partial\hspace{1pt}}m}+1)\|{\rm n}abla\bar{\delta{\partial\hspace{1pt}}si}\|_{1.25,{\partial\hspace{1pt}}m}(\|A_1-\text{Id}\|_{1.25,{\partial\hspace{1pt}}m}+1)\\ &&+(\|J_2A_2-\text{Id}\|_{1.25,{\partial\hspace{1pt}}m}+1)\|{\rm n}abla \delta{\partial\hspace{1pt}}si_2\|_{1.25,{\partial\hspace{1pt}}m}\|\bar{A}\|_{1.25,{\partial\hspace{1pt}}m}\left.{\rm i}ght]\\ &\leq&C|h_1|_{1.75}\left(\|{\rm n}abla \bar{Q}\|_{1.25,{\partial\hspace{1pt}}m}+|\bar{h}|_{1.75}{\rm i}ght)+C|h_2|_{1.75}|\bar{h}|_{1.75}\\ &&+C|\bar{h}|_{1.75}|h_1|_{1.75}(|h_1|_{1.75}+1)+C(|h_2|_{1.75}+1)|\bar{h}\|_{1.75}(|h_1|_{1.75}+1)\\ &&+C(|h_2|_{1.75}+1)|h_2|_{1.75}|\bar{h}|_{1.75}, \end{eqnarray*} and, using the smallness condition \eqref{H1.75}, $$ \|{\rm n}abla \bar{Q}\|_{1.25,{\partial\hspace{1pt}}m}\leq C|\bar{h}|_{1.75}. $$ Similarly, we find that \begin{eqnarray*} \|{\rm n}abla \bar{Q}\|_{1.5,{\partial\hspace{1pt}}m}&\leq& C\left[\|\text{Id}-J_1A_1A_1^T\|_{1.5,{\partial\hspace{1pt}}m}\left(\|{\rm n}abla \bar{Q}\|_{1.25,{\partial\hspace{1pt}}m}+\|{\rm n}abla \bar{\delta{\partial\hspace{1pt}}si}\|_{1.25,{\partial\hspace{1pt}}m}{\rm i}ght){\rm i}ght.\\ &&+\|{\rm n}abla(Q_2+{\rm h}o\delta{\partial\hspace{1pt}}si_2)\|_{1.25,{\partial\hspace{1pt}}m}\|{\rm n}abla \bar{\delta{\partial\hspace{1pt}}si}\|_{1.5,{\partial\hspace{1pt}}m}\\ &&+\|{\rm n}abla(Q_2+{\rm h}o\delta{\partial\hspace{1pt}}si_2)\|_{1.5,{\partial\hspace{1pt}}m}\|{\rm n}abla \bar{\delta{\partial\hspace{1pt}}si}\|_{1.25,{\partial\hspace{1pt}}m}\\ &&+\|\bar{JA}\|_{1.5,{\partial\hspace{1pt}}m}\|{\rm n}abla \delta{\partial\hspace{1pt}}si_1\|_{1.25,{\partial\hspace{1pt}}m}(\|A_1-\text{Id}\|_{1.25,{\partial\hspace{1pt}}m}+1)\\ &&+(\|J_2A_2-\text{Id}\|_{1.25,{\partial\hspace{1pt}}m}+1)\|{\rm n}abla\bar{\delta{\partial\hspace{1pt}}si}\|_{1.5,{\partial\hspace{1pt}}m}(\|A_1-\text{Id}\|_{1.25,{\partial\hspace{1pt}}m}+1)\\ &&+(\|J_2A_2-\text{Id}\|_{1.25,{\partial\hspace{1pt}}m}+1)\|{\rm n}abla \delta{\partial\hspace{1pt}}si_2\|_{1.25,{\partial\hspace{1pt}}m}\|\bar{A}\|_{1.5,{\partial\hspace{1pt}}m}\left.{\rm i}ght]\\ &\leq&C(|h_1|_{2}+|h_2|_{2})|\bar{h}|_{1.75}+C(|h_1|_{1.75}+|h_2|_{1.75}+1)|\bar{h}|_{2}\,, \end{eqnarray*} so we conclude that $$ \|{\rm n}abla \bar{Q}\|_{1.25,{\partial\hspace{1pt}}m}+\|\bar{w}\|_{1.25,{\partial\hspace{1pt}}m}\leq C\sqrt[4]{t}. $$ \begin{equation}\label{energyuniqueness5} \|{\rm n}abla \bar{Q}\|_{1.5,{\partial\hspace{1pt}}m}+\|\bar{w}\|_{1.5,{\partial\hspace{1pt}}m}\leq C\sqrt[4]{t}+C|\bar{h}|_2. \end{equation} Next, as we have that $$ \bar{v}=\frac{{\rm n}abla\bar{\delta{\partial\hspace{1pt}}si}w_1}{J_1}+\frac{{\rm n}abla{\partial\hspace{1pt}}si_2\bar{w}}{J_2}+{\rm n}abla{\partial\hspace{1pt}}si_2w_1\frac{-\bar{J}}{J_1J_2}, $$ and $$ |\bar{v}|_{1.5}\leq c\sigma\|{\rm n}abla\bar{\delta{\partial\hspace{1pt}}si}\|_{2,{\partial\hspace{1pt}}m}+C\|\bar{w}\|_{2,{\partial\hspace{1pt}}m}.\,, $$ using \eqref{h_reg_eq}, we compute that \begin{equation}\label{energyuniqueness4} \int_0^t|\bar{h}(s)|^2_{2.5}ds\leq \mathcal{P}(\bar{E}(t)). \end{equation} Recalling \eqref{eq1} and for $i=1$ or $2$, denoting the matrix $B_i$ by $$ B_i=\left(\begin{array}{cc}\delta{\partial\hspace{1pt}}si_{i,2}-\delta{\partial\hspace{1pt}}si_{i,1}^2 & -\delta{\partial\hspace{1pt}}si_{i,1}(1+\delta{\partial\hspace{1pt}}si_{i,2})\\ -\delta{\partial\hspace{1pt}}si_{i,1}(1+\delta{\partial\hspace{1pt}}si_{i,2}) & \delta{\partial\hspace{1pt}}si_{i,2}(1+\delta{\partial\hspace{1pt}}si_{i,2})\end{array}{\rm i}ght), $$ we write the right-hand side in ({\rm e}f{HS_semiALE1diff}a) as $$ RHS=B_1\frac{\mu\bar{w}}{J_1}+B_1\frac{-\mu w_2\bar{J}}{J_2J_1}+(B_1-B_2)\frac{\mu w_2}{J_2}. $$ Testing against $\bar{w}$ and integrating-by-parts in ({\rm e}f{HS_semiALE1diff}a), we get that \begin{multline}\label{energyuniqueness3} |\bar{h}(t)|_0^2+\min\{\mu^+,\mu^-\}\int_0^t\|\bar{w}(s)\|^2_{0,{\partial\hspace{1pt}}m}ds\\ \leq Cz^*\left[\int_0^t\|\bar{w}(s)\|_{0,{\partial\hspace{1pt}}m}^2ds+\int_0^t\|\bar{w}(s)\|_{0,{\partial\hspace{1pt}}m}\|{\rm n}abla\bar{\delta{\partial\hspace{1pt}}si}(s)\|_{0,{\partial\hspace{1pt}}m}ds{\rm i}ght]\leq Cz^*\mathcal{P}(\bar{E}(t))t. \end{multline} The energy estimates show that \begin{equation}\label{energyuniqueness} \min\{\mu^+,\mu^-\}\int_0^t\|\bar{w}''(s)\|_{0,{\partial\hspace{1pt}}m}^2-\frac{\jump{{\rm h}o}}{2}\frac{d}{dt}|\bar{h}(s)|_{2}^2 ds=\int_0^t\int_{{\mathbb R}^2}RHS''\bar{w}''dxds\leq C(\sqrt{t}+\sqrt[4]{t})\mathcal{P}(\bar{E}(t))\,, \end{equation} and once again using the Hodge decomposition, we find that \begin{equation}\label{energyuniqueness2} \int_0^t\|\bar{w}(s)\|_{2,{\partial\hspace{1pt}}m}^2ds\leq C(t+\sqrt{t}+\sqrt[4]{t})\mathcal{P}(\bar{E}(t)). \end{equation} Collecting the previous estimates \eqref{energyuniqueness6}-\eqref{energyuniqueness2} and using the smallness of $\sigma$, we get the following polynomial inequality $$ \bar{E}(t)\leq (t+\sqrt{t}+\sqrt[4]{t})\mathcal{P}(\bar{E}(t)), $$ which implies the uniqueness. This concludes the proof of Theorem {\rm e}f{localsmall} for a infinitely-deep domain. \section{Proof of Theorem {\rm e}f{localsmall}: Local well-posedness for the confined case}\label{sec3} We define our reference domains $$ {\mathcal O}mega^+=\{(x_1,x_2),\, x_1\in{\mathbb R}\, (\text{or }x_1\in{\mathbb T}),0<x_2<c_t\}, $$ $$ {\mathcal O}mega^-=\{(x_1,x_2),\, x_1\in{\mathbb R}\, (\text{or }x_1\in{\mathbb T}),c_b<x_2<0\}, $$ and the reference interface $$ {\mathcal G}amma=\{(x_1,x_2),\, x_1\in{\mathbb R}\, (\text{or }x_1\in{\mathbb T}),x_2=0\}. $$ We denote by $$ {\mathcal G}amma_{bot}=\{(x_1,x_2),\,x_2=c_b\},\, {\mathcal G}amma_{top}=\{(x_1,x_2),\,x_2=c_t\}, $$ the fixed {\it bottom} and {\it top} boundaries. We consider $\delta{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m$ as the solution of $$ {\mathcal D}elta\delta{\partial\hspace{1pt}}si^+=0,\, \delta{\partial\hspace{1pt}}si^+=h\text{ if } x_2\in{\mathcal G}amma,\, \delta{\partial\hspace{1pt}}si^+=\tilde{t}(x)\text{ if } x_2\in {\mathcal G}amma_{top}. $$ and $$ {\mathcal D}elta\delta{\partial\hspace{1pt}}si^-=0,\, \delta{\partial\hspace{1pt}}si^-=h\text{ if } x_2\in{\mathcal G}amma,\, \delta{\partial\hspace{1pt}}si^-=\tilde{b}(x)\text{ if } x_2\in {\mathcal G}amma_{bot}. $$ We define the mapping ${\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m=e+(0,\delta{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m)$. In particular, $$ {\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m({\mathcal G}amma,t)=(x_1,h(x_1,t)),\,{\partial\hspace{1pt}}si^+({\mathcal G}amma_{top},t)=(x_1,t(x_1)),\,{\partial\hspace{1pt}}si^-({\mathcal G}amma_{bot},t)=(x_1,b(x_1)), $$ so $$ {\partial\hspace{1pt}}si:{\mathcal O}mega^{\partial\hspace{1pt}}m\mapsto{\mathcal O}mega^{\partial\hspace{1pt}}m(t). $$ Using estimates similar to those in \eqref{diffeo}, ${\partial\hspace{1pt}}si$ is a diffeomorphism if $h,\tilde{t},\tilde{b}$ are small in the $H^{1.75}$ norm. We define $v=u\circ {\partial\hspace{1pt}}si$, $q=p\circ {\partial\hspace{1pt}}si$, $A=({\rm n}abla{\partial\hspace{1pt}}si)^{-1}$, $J= \text{det}({\rm n}abla{\partial\hspace{1pt}}si)$, $w^k=JA^k_iv^i$ and $Q=q+{\rm h}o x_2$. We write $$ n_b=\frac{(\tilde{b}'(x_1),-1)}{\sqrt{1+(\tilde{b}'(x))^2}}\text{ and }n_t=\frac{(\tilde{t}'(x_1),1)}{\sqrt{1+(\tilde{t}'(x))^2}} $$ for the normal vectors at $t(x)$ and $b(x)$, respectively. Then, we have the boundary conditions $$ u\cdot n_b=0\text{ at }(x_1,x_2)\in \{(x_1,b(x_1))\},u\cdot n_t=0\text{ at }(x_1,x_2)\in \{(x_1,t(x_1))\}, $$ which translate to $$ v\cdot n_b=0\text{ at }{\mathcal G}amma_{bot},v\cdot n_t=0\text{ at }{\mathcal G}amma_{top}. $$ Since $JA^Te_2=(-{\partial\hspace{1pt}}si^2_{,1},{\partial\hspace{1pt}}si^1_1)$, then $$ JA^Te_2=(-\tilde{b}'(x_1),1)\text{ at }{\mathcal G}amma_{bot},JA^Te_2=(-\tilde{t}'(x_1),1)\text{ at }{\mathcal G}amma_{top}. $$ Using this, we can write the following boundary conditions for the semi-ALE velocity $$ v\cdot (-n_b)=v\cdot (JA^Te_2)=(JAv)\cdot e_2=w\cdot e_2=0\text{ at }{\mathcal G}amma_{bot}, $$ $$ v\cdot n_t=v\cdot (JA^Te_2)=(JAv)\cdot e_2=w\cdot e_2=0\text{ at }{\mathcal G}amma_{top}. $$ As in Section {\rm e}f{sec2.2}, we obtain \begin{subequations}\label{HS_semiALE1general} \begin{alignat}{2} \mu w + {\rm n}abla (Q + {\rm h}o \delta{\partial\hspace{1pt}}si) &= \left(\text{Id}-\frac{({\rm n}abla{\partial\hspace{1pt}}si)^T{\rm n}abla{\partial\hspace{1pt}}si }{J}{\rm i}ght)\mu w \qquad&&\text{in}\quad \{x_2{\rm n}eq 0\}\,,\\ {\operatorname{div}} w &= 0 &&\text{in}\quad \{x_2{\rm n}eq 0\}\,,\\ \jump{w^2} = \jump{Q} &= 0 &&\text{on}\quad \{x_2 = 0\}\,,\\ w^2 &= 0 &&\text{on}\quad \{x_2 = c_b,c_t\}\,,\\ {\mathcal D}elta \delta {\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m &= 0 &&\text{in}\quad {\mathcal O}mega^{{\partial\hspace{1pt}}m}_{\partial\hspace{1pt}}m\,,\\ \delta{\partial\hspace{1pt}}si^{\partial\hspace{1pt}}m &= h \qquad&&\text{on}\quad \{x_2 = 0\}\,,\\ \delta{\partial\hspace{1pt}}si^+ &= \tilde{t} \qquad&&\text{on}\quad \{x_2 = c_t\}\,,\\ \delta{\partial\hspace{1pt}}si^- &= \tilde{b} \qquad&&\text{on}\quad \{x_2 = c_b\}\,,\\ h_t &= w \cdot e_2 \qquad &&\text{on}\quad \{x_2 = 0\}\,,\\ h &= h_0 &&\text{on}\quad {\mathbb R} \times \{t=0\}\,. \end{alignat} \end{subequations} Multiplying ({\rm e}f{HS_semiALE1general}a) with $e_2$ and evaluating at ${\mathcal G}amma_{top}$, we obtain that $$ Q^2_{,2}=-{\rm h}o^+\delta{\partial\hspace{1pt}}si_{,2}-\mu^+ w_1\tilde{t}'\,, $$ and similarly at ${\mathcal G}amma_{bot}$, $$ Q^2_{,2}=-{\rm h}o^-\delta{\partial\hspace{1pt}}si_{,2}-\mu^- w_1\tilde{b}'. $$ Given $|h_0|_{1.75}< \sigma\ll1$ and $|\tilde{t}|_{2},|\tilde{b}|_{2}\leq \tilde{\sigma}\ll1$, we can regularize the problem as in ({\rm e}f{HS_semiALE_reg_fix}a-h) and we get an approximate solution $(w_\kappa,Q_\kappa,h_\kappa)$ that exists up to time $T_\kappa>0$. This solution has a finite energy, $E(t)$, as defined in \eqref{energy}. We take $T_\kappa$ small enough so $E(t)\leq z^*$ (for a constant that will be chosen later). With the boundary conditions for $Q$, we can form the associated elliptic problem as in Section {\rm e}f{sec4.3} and we get the following bounds (analogous to \eqref{Q1.25}, \eqref{Q1.5}): $$ \|{\rm n}abla Q\|_{1.25,{\partial\hspace{1pt}}m}\leq c|h|_{1.75}+c\|w\|_{1.25,{\partial\hspace{1pt}}m}(|\tilde{t}|_{1.75}+|\tilde{b}|_{1.75}). $$ and $$ \|{\rm n}abla Q\|_{1.5,{\partial\hspace{1pt}}m}\leq c|h|_{1.75}|h|_2+c\|w\|_{1.5,{\partial\hspace{1pt}}m}(|\tilde{t}|_2+|\tilde{b}|_2)+c|h|_2, $$ In particular, using the elliptic estimates for the pressure $Q$, we have that $$ \|w\|_{1.5,{\partial\hspace{1pt}}m}\leq C|h|_2,\, \|w\|_{1.25,{\partial\hspace{1pt}}m}\leq C|h|_{1.75}, $$ where we have used the smallness of $\tilde{\sigma}$ to obtain the desired polynomial bounds. The bound $h\in L^2 ( 0,T_ \kappa ; H^{2.5} ({\mathcal G}amma))$ is obtained in the same way as in the proof of Theorem {\rm e}f{localsmall}. Using the boundary condition $w_2^{\partial\hspace{1pt}}m=0$ and $x_2=c_t,c_b$, the new terms coming from the boundaries in the energy estimates vanishes and we obtain the inequality $$ E(t)\leq C|h_0|_2^2+C\sigma_1 E(t) +t\mathcal{P}(E(t)), $$ which, since $\sigma_1\ll1$, implies the existence of a uniform-in-$\kappa$ $T^*$ such that $$ E(t)\leq z^*, |h(t)|_{1.75}<\sigma_1\,\forall\,0\leq t\leq \min\{T_\kappa,T^*\}. $$ We reach $T^*$ by induction. The uniqueness is obtained in the same way. This proves Theorem {\rm e}f{localsmall}. \section{Proof of Theorem {\rm e}f{globalsmall}: Global existence and decay to equilibrium}\label{sec4} Recall that in this case we have ${\mathcal O}mega^+(t)\cup{\mathcal O}mega^-(t)={\mathbb T}\times {\mathbb R}$. \subsection{A linearization of ({\rm e}f{HS_semiALE})}\label{sec4.1.1} We denote by $\hat{f}$ the Fourier series of $f$. We write ${\mathcal L}ambda$ for the square root of the Laplacian: $$ {\mathcal L}ambda f=\sqrt{-{\partial\hspace{1pt}}artial_x^2}f,\qquad \widehat{{\mathcal L}ambda f}(\xi)=|\xi|\hat{f}(\xi). $$ It is well-known that the previous operator has a kernel representation \begin{equation*} {\mathcal L}ambda f(x_1) = \frac{1}{2{\partial\hspace{1pt}}i}\, \text{p.v.} \int_{-{\partial\hspace{1pt}}i}^{\partial\hspace{1pt}}i \frac{f(x_1)-f(x_1-s)}{\sin^2\left(\frac{s}{2}{\rm i}ght)}\, d s, \end{equation*} From \eqref{deltapsi} and $\delta {\partial\hspace{1pt}}si^{-}(x_1,x_2)=\delta {\partial\hspace{1pt}}si^{+}(x_1,-x_2)$, we have that \begin{equation*} \delta {\partial\hspace{1pt}}si^{{\partial\hspace{1pt}}m},_2 = \mp{\mathcal L}ambda h \qquad \text{on}\quad \{x_2 = 0\}\,, \end{equation*} so that the Dirichlet-to-Neumann map is the Zygmund operator. We define the Neumann-to-Dirichlet map ${\mathcal L}ambda^{-1}$ by $$ \widehat{{\mathcal L}ambda^{-1} f}(\xi)=|\xi|^{-1}\hat{f}(\xi). $$ Notice that if $f$ has zero mean, the previous operator is well-defined. Equation ({\rm e}f{HS_semiALE1}a) may be written as $$ \mu w + {\rm n}abla (Q + {\rm h}o \delta{\partial\hspace{1pt}}si) = F $$ with $$ F=(F_1,F_2)=\left(\text{Id}-\frac{({\rm n}abla{\partial\hspace{1pt}}si)^T{\rm n}abla{\partial\hspace{1pt}}si }{J}{\rm i}ght)\mu w. $$ By taking the inner product of this equation with $e_2$, and then evaluating on $\{x_2 = 0\}$, we find that \begin{equation}\label{eqlinear} \mu^{\partial\hspace{1pt}}m h_t + Q^{{\partial\hspace{1pt}}m}_{,2}+{\rm h}o^{\partial\hspace{1pt}}m\delta{\partial\hspace{1pt}}si^{{\partial\hspace{1pt}}m}_{,2}=F_2^{\partial\hspace{1pt}}m, \end{equation} where $$ F_2^{\partial\hspace{1pt}}m=-(\mu^{\partial\hspace{1pt}}m w_1^{\partial\hspace{1pt}}m h'+\mu^{\partial\hspace{1pt}}m w_2(\mp{\mathcal L}ambda h))\,. $$ Summing over the two phases, $$ F_2^++F_2^-=-(\mu^+ w_1^++\mu^-w^-_1) h'-\jump{\mu}{\mathcal L}ambda h w_2. $$ On the other hand, taking the divergence of the equation ({\rm e}f{HS_semiALE1}a), we get $$ {\mathcal D}elta Q=\text{div}F. $$ The continuity of $q$ gives us the jump condition $\jump{Q}=0$. Using equation ({\rm e}f{HS_semiALE1}a) and \eqref{eq1}, $$ \jump{Q_{,2}}=\left({\rm h}o^++{\rm h}o^-{\rm i}ght){\mathcal L}ambda h-\jump{\mu}h_t+\jump{F_2}, \text{ with } \jump{F_2}=-(\jump{\mu w_1} h'-(\mu^++\mu^-)w_2 {\mathcal L}ambda h). $$ We define $\bar{Q}^{\partial\hspace{1pt}}m$ such that \begin{eqnarray*} {\mathcal D}elta \bar{Q}^{\partial\hspace{1pt}}m &=& 0 \text{ in }\quad {\mathbb R}^2_{\partial\hspace{1pt}}m \,,\\ \bar{Q}^{\partial\hspace{1pt}}m &=& -\frac{{\rm h}o^++{\rm h}o^-}{2}h+\frac{\jump{\mu}}{2}{\mathcal L}ambda^{-1}h_t \qquad\text{ on }\quad \{x_2 = 0\}\,. \end{eqnarray*} Then, $$ \bar{Q}^{\partial\hspace{1pt}}m_{,2}={\partial\hspace{1pt}}m\frac{{\rm h}o^++{\rm h}o^-}{2}{\mathcal L}ambda h\mp\frac{\jump{\mu}}{2}h_t,\text{ on } \{x_2=0\}. $$ Consequently, $\jump{\bar{Q}}=0$ and $\jump{\bar{Q}^{\partial\hspace{1pt}}m_{,2}}=({\rm h}o^++{\rm h}o^-){\mathcal L}ambda h-\jump{\mu}h_t$. Setting $\tilde{Q}=Q-\bar{Q}$, then $\tilde{Q}$ is a solution of \begin{equation}\label{Qtilda} {\mathcal D}elta \tilde{Q}=\text{div}F, \end{equation} with the jump conditions \begin{equation}\label{Qtilda2} \jump{\tilde{Q}}=0 \text{ and }\jump{\tilde{Q}_{,2}}=\jump{F_2}. \end{equation} As a consequence, equation \eqref{eqlinear} becomes \begin{equation*} \mu^{\partial\hspace{1pt}}m h_t \mp\frac{\jump{\mu}}{2}h_t \mp{\rm h}o^{\partial\hspace{1pt}}m{\mathcal L}ambda h{\partial\hspace{1pt}}m\frac{{\rm h}o^++{\rm h}o^-}{2}{\mathcal L}ambda h=F_2^{\partial\hspace{1pt}}m-\tilde{Q}^{\partial\hspace{1pt}}m_{,2}, \end{equation*} Summing the equations for both phases, we obtain \begin{equation}\label{eqlineal2b} \frac{\mu^++\mu^-}{2}h_t =\frac{\jump{{\rm h}o}}{2}{\mathcal L}ambda h+\frac{F^+_2+F^-_2-\tilde{Q}_{,2}^+-\tilde{Q}_{,2}^-}{2}. \end{equation} \subsection{Energy estimates for the total norm} For notational simplicity, we set $\jump{{\rm h}o}=-2$ and $\mu^++\mu^-=2$, but in what follows, any finite values are permissible. Using the Duhamel Principle on \eqref{eqlineal2b}, we write the so-called mild solution as \begin{equation}\label{mild} h(t) = h_0e^{-{\mathcal L}ambda t}+\int_0^t\left(\frac{F^+_2(s)+F^-_2(s)-\tilde{Q}_{,2}^+(s)-\tilde{Q}_{,2}^-(s)}{2}{\rm i}ght)e^{-{\mathcal L}ambda (t-s)}ds\,. \end{equation} Note, that in this analysis, we are restricting our attention to zero mean, periodic functions. As to the linear semi-group, it is well-known that \begin{equation}\label{linear-decay} \|e^{-{\mathcal L}ambda t}\|_{L^2\mapsto L^2}\leq e^{-t}\,, \end{equation} since the first eigenvalue of ${\mathcal L}ambda$ agrees with the first eigenvalue of $- {\mathcal D}elta $. Let $\sigma_2$ denote a constant that will be fixed later. We choose $h_0\in H^2$ such that $|h_0|_2\leq \sigma_2\ll1$. Using Theorem {\rm e}f{localsmall}, there exists a local in time solution up to time $T=T(h_0)$. Moreover, this solution remains in the Rayleigh-Taylor stable regime and satisfies $$ \max_{0\leq t\leq T}|h(t)|^2_{2}+\int_0^t |h(s)|_{2.5}^2ds\leq C_1|h_0|_2^2, $$ and \begin{equation}\label{smallglobal} \max_{0\leq t\leq T}|h(t)|_{1.75}<\sigma_{0.25}\ll1, \end{equation} where $C_1$ and $\sigma_{0.25}$ are the constants appearing in Theorem {\rm e}f{localsmall}. We define the new {\it total norm} as \begin{equation}\label{totalenergy} {\varepsilon}rtiii{(w,h)}^2_T=\max_{0\leq t\leq T}\left\{|h(t)|_2^2+e^{\alpha t}|h(t)|_0^2+\int_0^t\|w(s)\|_{2,{\partial\hspace{1pt}}m}^2ds{\rm i}ght\}, \end{equation} for a given $0<\alpha<2$. Hence, a uniform bound for ${\varepsilon}rtiii{(w,h)}_T$ for every $t>0$ implies the $e^{-\alpha t/2}$ decay-rate for $|h(t)|_0$. Just as we obtained the $H^{2.5}$ estimate for $h^{ \kappa \kappa }$ in ({\rm e}f{h2.50}), we have the following estimate: \begin{eqnarray}\label{h6.2} |h^{{\partial\hspace{1pt}}rime{\partial\hspace{1pt}}rime}|_{0.5} &\leq& C (1+\left|h{\rm i}ght|^3_{1.75}) \left(\left|v^+{\rm i}ght|_{1.5}+\left|v^-{\rm i}ght|_{1.5}{\rm i}ght)+C |h^{{\partial\hspace{1pt}}rime{\partial\hspace{1pt}}rime}|_{0.5} |v\cdot (-h^{{\partial\hspace{1pt}}rime},1)|_{0.75} \,. \end{eqnarray} Using the estimates \eqref{w1.25}, \eqref{smallv}, \eqref{h2.51}-\eqref{h2.53}) together with \eqref{smallglobal}, we obtain that \begin{align*} \int_0^t|h(s)|_{2.5}^2ds & \leq C\left(\int_0^t\|w(s)\|^2_{2,{\partial\hspace{1pt}}m}ds+\int_0^t|h|_{2}^4ds+\int_0^t\left|h(s){\rm i}ght|_{2}^2\|w(s)\|^2_{1.5,{\partial\hspace{1pt}}m}ds{\rm i}ght). \end{align*} Using the interpolation inequality $|h|^2_2\leq C|h|_{1.5}|h|_{2.5}$, together with \eqref{smallglobal}, we find that \begin{align*} \int_0^t|h(s)|_{2.5}^2ds & \leq C{\varepsilon}rtiii{(w,h)}^2\left(1+{\varepsilon}rtiii{(w,h)}^2{\rm i}ght). \end{align*} Our goal is to show that $e^{\alpha t}|h(t)|_0^2$ remains small for all time. To do so, we take the $L^2({\mathcal G}amma)$-norm of equation \eqref{mild}, and find that \begin{equation}\label{decay-inequality} |h(t)|_0\leq e^{-t}|h_0|_0+\frac{1}{2}\int_0^t|F^+_2(s)+F^-_2(s)-\tilde{Q}_{,2}^+(s)-\tilde{Q}_{,2}^-(s)|_0e^{-(t-s)}ds. \end{equation} We define \begin{equation}\label{eqI1} I_1=\frac{1}{2}\int_0^t|F^+_2(s)+F^-_2(s)|_0e^{-(t-s)}ds, \end{equation} \begin{equation}\label{eqI2} I_2=\frac{1}{2}\int_0^t|\tilde{Q}_{,2}^+(s)+\tilde{Q}_{,2}^-(s)|_0e^{-(t-s)}ds, \end{equation} We are going to use the linear decay rate ({\rm e}f{linear-decay}) to establish the nonlinear decay rate for small solutions. This will amount to establishing certain integrability properties of the nonlinear term ({\rm e}f{decay-inequality}). Notice now that, using \eqref{Qtilda} and \eqref{Qtilda2}, we have the bound $$ \|{\rm n}abla\tilde{Q}\|_{0,{\partial\hspace{1pt}}m}\leq C\|F\|_{0,{\partial\hspace{1pt}}m}. $$ Given ${\partial\hspace{1pt}}hi\in H^1({\mathbb R}^2)$, we compute $$ \int_{\{x_2=0\}} \tilde{Q}_{,2}{\partial\hspace{1pt}}hi dx_1=\int_{{\mathcal O}mega^{\partial\hspace{1pt}}m}{\rm n}abla \tilde{Q}{\rm n}abla {\partial\hspace{1pt}}hi dx-\int_{{\mathcal O}mega^{\partial\hspace{1pt}}m} F{\rm n}abla{\partial\hspace{1pt}}hi+\int_{\{x_2=0\}}F\cdot N{\partial\hspace{1pt}}hi dx_1, $$ so $$ |\tilde{Q}_{,2}|_{-0.5}\leq C(\|F\|_{0,{\partial\hspace{1pt}}m}+|F_2|_{-0.5}). $$ By elliptic estimates and the trace theorem, $$ |\tilde{Q}_{,2}|_{0.5}\leq C(\|F\|_{1,{\partial\hspace{1pt}}m}+|F_2|_{0.5}). $$ Thus, using interpolation, $$ |\tilde{Q}_{,2}|_{0}\leq C(\|F\|_{0.5,{\partial\hspace{1pt}}m}+|F_2|_{0}). $$ Using the H\"{o}lder inequality and the boundedness of the Hilbert transform in $L^p$ for $1<p<\infty$, we have that $$ |F^+_2(s)+F^-_2(s)|_0\leq C|w|_{L^4}|h'|_{L^4}. $$ Due to the Sobolev embedding theorem, the trace theorem and elliptic estimates, we have that $$ |w|_{L^4}|h'|_{L^4}\leq C|w|_{0.25}|h|_{1.25}\leq C\|w\|_{0.75,{\partial\hspace{1pt}}m}|h|_{1.25}\leq C|h|^2_{1.25}. $$ In particular, $$ |F^+_2(s)+F^-_2(s)|_0\leq C|h|_{0}|h|_{2.5}. $$ Using \eqref{eqI1}, we find that \begin{eqnarray}\label{decay1} I_1&\leq& C{\varepsilon}rtiii{(w,h)}_T^{0.5}\int_0^t(e^{\alpha s})^{-0.5}|h(s)|_{2.5}e^{-(t-s)}ds{\rm n}onumber\\ &\leq& C(1+{\varepsilon}rtiii{(w,h)}_T^2)^{0.5}{\varepsilon}rtiii{(w,h)}_T^{1.5}e^{-t}\left(\int_0^te^{(2-\alpha)s}ds{\rm i}ght)^{0.5}{\rm n}onumber\\ &\leq& \frac{C}{\sqrt{2-\alpha}}(1+{\varepsilon}rtiii{(w,h)}_T^2)^{0.5}{\varepsilon}rtiii{(w,h)}_T^{1.5}e^{-t}\left(e^{(2-\alpha)t}-1{\rm i}ght)^{0.5}. \end{eqnarray} The remaining terms \eqref{eqI2} are written as $$ I_2\leq\frac{1}{2}\int_0^t(\|F\|_{0.5,{\partial\hspace{1pt}}m}+|F^+_2(s)|+|F^-_2(s)|_0)e^{-(t-s)}ds $$ The terms with $|F^+_2(s)|+|F^-_2(s)|_0$ are similar to those with $|F^+_2(s)+F^-_2(s)|_0$. Using \eqref{eq1} and elliptic estimates, we have that \begin{align} \|F\|_{0,{\partial\hspace{1pt}}m} & \leq C\|w\|_{L^4}\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{L^4}{\rm n}onumber\\ & \leq C\|w\|_{0.5,{\partial\hspace{1pt}}m}\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{0.5,{\partial\hspace{1pt}}m}{\rm n}onumber\\ & \leq C|h|_{1}|h|_{1},\label{interF} \end{align} and, using \eqref{w1.25}, \begin{align} \|{\rm n}abla F\|_{0,{\partial\hspace{1pt}}m} & \leq C\left(\|{\rm n}abla w\|_{L^4}\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{L^4}+\|w\|_{L^4}\|{\rm n}abla^2\delta{\partial\hspace{1pt}}si\|_{L^4}{\rm i}ght){\rm n}onumber\\ & \leq C\left(\|w\|_{1.5,{\partial\hspace{1pt}}m}\|{\rm n}abla \delta{\partial\hspace{1pt}}si\|_{0.5,{\partial\hspace{1pt}}m}+\|w\|_{0.5,{\partial\hspace{1pt}}m}\|{\rm n}abla\delta{\partial\hspace{1pt}}si\|_{1.5,{\partial\hspace{1pt}}m}{\rm i}ght){\rm n}onumber\\ & \leq C|h|_1|h|_2.\label{interF2} \end{align} Due to linear interpolation between \eqref{interF} and \eqref{interF2}, we have \begin{equation} \|F\|_{0.5,{\partial\hspace{1pt}}m} \leq C|h|_{1}|h|_{1.5} \leq C|h|_{0}|h|_{2.5}.\label{decay2} \end{equation} Collecting the estimates \eqref{decay1} and \eqref{decay2}, $$ \frac{1}{2}\int_0^t|F^+_2(s)+F^-_2(s)-\tilde{Q}_{,2}^+(s)-\tilde{Q}_{,2}^-(s)|_0e^{-(t-s)}ds \leq (1+{\varepsilon}rtiii{(w,h)}_T^2)^{0.5}{\varepsilon}rtiii{(w,h)}_T^{1.5}e^{-\alpha t/2}, $$ and \begin{align*} e^{\alpha t}|h(t)|^2_0 & \leq 2\left(e^{(\alpha-2)t}|h_0|^2_0 +C\left(1+{\varepsilon}rtiii{(w,h)}_T^2{\rm i}ght){\varepsilon}rtiii{(w,h)}_T^{3}{\rm i}ght)\\ & \leq 2|h_0|_0^2+\left(1+{\varepsilon}rtiii{(w,h)}_T^2{\rm i}ght){\varepsilon}rtiii{(w,h)}_T^{3}. \end{align*} Now we have to estimate the terms $$ \max_{0\leq t\leq T} |h(t)|_2^2+\int_0^t\|w(s)\|^2_{2,{\partial\hspace{1pt}}m}ds. $$ Using the same type of estimates as in Sections {\rm e}f{sec4.3},{\rm e}f{sec4.5} and {\rm e}f{sec4.6}, we get the inequality $$ {\varepsilon}rtiii{(w,h)}_T\leq C_2|h_0|_2+\mathcal{P}({\varepsilon}rtiii{(w,h)}_T), $$ where the polynomial $\mathcal{P}$ has order $m$ with $m > 1$. Now, by choosing the initial data to be sufficiently small, we have a global bound $$ {\varepsilon}rtiii{(w,h)}_T\leq 2C_2|h_0|_2\leq 2C_2\sigma_2. $$ Furthermore, using interpolation between Sobolev spaces, we have $$ \sup_{0\leq t\leq T}|h(t)|_{1.75}^2\leq 2C_2\sigma_2e^{-\frac{\alpha t}{8}}. $$ We take $\sigma_2$ small enough so that $$ 2C_2\sigma_2<\sigma_{0.25}, $$ and we obtain that the smallness of $|h|_{1.75}$ propagates. Consequently, at time $t=T$, the solution remains in the stable regime (see Section {\rm e}f{sec2.5.4}), and the condition \eqref{smallglobal} is, in fact, improved. Due to this fact, we can apply Theorem {\rm e}f{localsmall} to continue the solution up to $t=2T$. As the same estimates hold in the time interval $nT\leq t\leq (n+1)T$ for $n\in{\mathbb Z}^+$, we conclude the proof of Theorem {\rm e}f{globalsmall} by means of a classical continuation argument. \section{Proof of Theorem {\rm e}f{localonephase}: Local well-posedness for the one-phase problem}\label{sec5} We now focus our attention on the one-phase Muskat problem ({\rm e}f{HS_Eulerian_Onephase}a-e). \subsection{Constructing the family of diffeomorphisms ${\partial\hspace{1pt}}si( \cdot ,t)$}\label{subsection_psi} We define our reference domain, fixed {\it bottom} boundary, and reference interface, respectively, as follows: \begin{equation}\label{ref_domain} {\mathcal O}mega=\mathbb{T}\times[c_b,0]\,, \quad {\mathcal G}amma_{bot}=\{(x_1,c_b),x_1\in{\mathbb T}\}\,, \text{ and }{\mathcal G}amma=\{(x_1,0),x_1\in{\mathbb T}\}. \end{equation} In particular, our reference domain is $C^\infty$. We let $N=e_2$ denote the unit normal vector on ${\mathcal G}amma$. Given a function $h\in C(0,T;H^2)$ with initial data $h(0)=h_0$, we fix $0<\delta\ll1$ and define \begin{equation}\label{Omegadelta} {\mathcal O}mega^\delta(0)=\{(x_1,x_2),\, x_1\in{\mathbb T},\,c_b<x_2<\mathcal{J}_\delta h_0(x_1)\}, \end{equation} \begin{equation}\label{Gammadelta} {\mathcal G}amma^\delta(0)=\{(x_1,\mathcal{J}_\delta h_0(x_1)),\, x_1\in{\mathbb T}\}, \end{equation} and \begin{equation}\label{phi1} {\partial\hspace{1pt}}hi_1(x_1,x_2)=\left(x_1,x_2+\mathcal{J}_\delta h_0(x_1)\left(1-\frac{x_2}{c_b}{\rm i}ght){\rm i}ght). \end{equation} This function ${\partial\hspace{1pt}}hi_1:{\mathcal O}mega{\rm i}ghtarrow{\mathcal O}mega^\delta(0)$ is a $C^\infty$ diffeomorphism. Next, we define the function ${\partial\hspace{1pt}}hi_2:{\mathcal O}mega^{\delta}(0){\rm i}ghtarrow{\mathcal O}mega(0)$ as the solution to the following elliptic problem: \begin{subequations}\label{phi2} \begin{alignat}{2} {\mathcal D}elta {\partial\hspace{1pt}}hi_2&= 0 \qquad&&\text{in}\quad {\mathcal O}mega^\delta(0)\times[0,T]\,,\\ {\partial\hspace{1pt}}hi_2 &= e+[h_0(x_1)-\mathcal{J}_\delta h_0(x_1)]e_2 \qquad &&\text{on}\quad {\mathcal G}amma^\delta(0)\times[0,T]\,,\\ {\partial\hspace{1pt}}hi_2 &= e &&\text{on}\quad {\mathcal G}amma_{bot}\times[0,T] \,. \end{alignat} \end{subequations} Since ${\mathcal O}mega^\delta(0)$ is a $C^ \infty $ domain, standard elliptic regularity theory shows that ${\partial\hspace{1pt}}hi_2 \in H^{2.5}({\mathcal O}mega^\delta(0))$, and since for $ \delta >0$ taken sufficiently small, $|h_0-\mathcal{J}_\delta h_0|_{2}\ll 1$, $\|{\rm n}abla {\partial\hspace{1pt}}hi_2 - \text{Id} \|_{C^0} \ll 1$; hence, from the inverse function theorem, ${\partial\hspace{1pt}}hi_2:{\mathcal O}mega^{\delta}(0){\rm i}ghtarrow{\mathcal O}mega(0)$ is an $H^{2.5}$-class diffeomorphism. We define \begin{equation}\label{psi0a} {\partial\hspace{1pt}}si(0)={\partial\hspace{1pt}}hi_2\circ{\partial\hspace{1pt}}hi_1:{\mathcal O}mega{\rm i}ghtarrow {\mathcal O}mega(0). \end{equation} This mapping is also a diffeomorphism that maps $$ {\partial\hspace{1pt}}si(0):{\mathcal G}amma{\rm i}ghtarrow {\mathcal G}amma(0) $$ Furthermore, using the chain rule, we have that $$ \|{\partial\hspace{1pt}}si(0)\|_{2,-}\leq c(\delta)|h_0|_{1.5},\,\, \|{\partial\hspace{1pt}}si(0)\|_{3,-}\leq c(\delta)|h_0|_{2.5}. $$ Using interpolation, we obtain \begin{equation}\label{psi02.5} \|{\partial\hspace{1pt}}si(0)\|_{2.5,-}\leq c(\delta)|h_0|_{2}. \end{equation} (We note that $\delta>0$ is fixed number, so the dependence of the constant in ({\rm e}f{psi02.5}) on $\delta$ is harmless.) We have thus defined our initial diffeomorphism ${\partial\hspace{1pt}}si(0)$; we next define our time-dependent family of diffeomorphisms ${\partial\hspace{1pt}}si(t) = {\partial\hspace{1pt}}si( \cdot, t)$ as follows: \begin{subequations}\label{psita} \begin{alignat}{2} {\mathcal D}elta {\partial\hspace{1pt}}si(t)&= {\mathcal D}elta {\partial\hspace{1pt}}si(0) \qquad&&\text{in}\quad {\mathcal O}mega\times[0,T]\,,\\ {\partial\hspace{1pt}}si(t)&= e+h(x_1,t)e_2 \qquad &&\text{on}\quad {\mathcal G}amma\times[0,T]\,,\\ {\partial\hspace{1pt}}si(t)&= e \qquad &&\text{on}\quad {\mathcal G}amma_{bot}\times[0,T]\,. \end{alignat} \end{subequations} Writing $J(t)=\text{det}({\rm n}abla{\partial\hspace{1pt}}si(t))$, we have the bounds \begin{equation}\label{J1.25} \|J(t)-J(0)\|_{1.25,-}\leq C\|{\partial\hspace{1pt}}si(t)-{\partial\hspace{1pt}}si(0)\|^2_{2.25,-}\leq C|h(t)- h_0|_{1.75}^2. \end{equation} Consequently, using $h\in C(0,T;H^2)$, for sufficiently small time $t$, we have $$ \min_{x\in{\mathcal O}mega^-}\frac{J(0)}{2}< J(t)< 2\max_{x\in{\mathcal O}mega^-} J(0), $$ and we once again see that ${\partial\hspace{1pt}}si(t): {\mathcal O}mega \to {\mathcal O}mega(t)$ is a diffeomorphism. Furthermore, ${\partial\hspace{1pt}}si(t)$ is a $H^{2.5}$-class diffeomorphism thanks to the elliptic estimate $$ \|{\partial\hspace{1pt}}si(t)\|_{2.5,-}\leq c(|h(t)|_{2}+1). $$ \subsection{The ALE formulation} With ${\partial\hspace{1pt}}si(t)= {\partial\hspace{1pt}}si( \cdot ,t)$ defined in Section {\rm e}f{subsection_psi} (see \eqref{psi0a} and \eqref{psita}), we set $A=({\rm n}abla{\partial\hspace{1pt}}si)^{-1}$ and $J =\det {\rm n}abla {\partial\hspace{1pt}}si$. As we noted above, ${\partial\hspace{1pt}}si(t,{\mathcal G}amma)={\mathcal G}amma(t)$. We define our ALE variables: $v=u\circ{\partial\hspace{1pt}}si,q=p\circ{\partial\hspace{1pt}}si$. We let $$ \tilde{\tau}=(1,h'(x_1,t)),\,\tilde{n}=(-h'(x_1,t),1), $$ denote the (non-unitary) tangent and normal vectors, respectively, to ${\mathcal G}amma(t)$. We let $g= |{\partial\hspace{1pt}}si'|^2$ denote the induced metric, and define the unit tangent vector $\tau=\tilde{\tau}/\sqrt{g}$ and the unit normal vector $n=\tilde{n}/\sqrt{g}$. Since the interface ${\mathcal G}amma(t)$ moves with the fluid, $$ v\cdot \tilde{n}={\partial\hspace{1pt}}si_t\cdot \tilde{n}=h_tN\cdot \tilde{n}=h_t. $$ Hence, the ALE representation of the one-phase Muskat problem is given as \begin{subequations}\label{HS_ALE_Onephase} \begin{alignat}{2} v^i+ A^k_i(q+{\partial\hspace{1pt}}si^2)_{,k}&=0 \qquad&&\text{in}\quad{\mathcal O}mega\times[0,T]\,,\\ A^i_jv^j_{,i} &= 0 &&\text{in}\quad{\mathcal O}mega\times[0,T]\,,\\ h(t)&=h_0+\int_0^t v^i \tilde{n}_ids\qquad&&\text{on}\quad{\mathcal G}amma\times[0,T]\,,\\ q&=0 &&\text{on}\quad{\mathcal G}amma\times[0,T]\,,\\ v\cdot e_2&=0 &&\text{on}\quad{\mathcal G}amma_{bot}\times[0,T]\,. \end{alignat} \end{subequations} \subsubsection{The matrix $A$} From the identity $A{\rm n}abla{\partial\hspace{1pt}}si=\text{Id}$, we see that \begin{equation}\label{propA} A_t=-A{\rm n}abla {\partial\hspace{1pt}}si_t A,\qquad A_{,k}=-A{\rm n}abla {\partial\hspace{1pt}}si_{,k} A,\qquad A''=-2 A'{\rm n}abla{\partial\hspace{1pt}}si' A-A{\rm n}abla{\partial\hspace{1pt}}si'' A. \end{equation} These identities will be often used. \subsection{A smooth approximation of the ALE formulation} Given an initial data $h_0\in H^2$ and two regularization parameters ${\epsilon}ilon,\kappa>0$, we define a smooth approximation of the initial height function $\mathcal{J}_{\epsilon}ilon h_0$. We write $h_{{\epsilon}ilon,\kappa}(x_1,t)$ for the free boundary corresponding to the initial data $\mathcal{J}_{\epsilon}ilon h_0$. We define $$ {\mathcal O}mega^{\delta,{\epsilon}ilon}(0)=\{(x_1,x_2),\, x_1\in{\mathbb T},\,c_b<x_2<\mathcal{J}_\delta\mathcal{J}_{\epsilon}ilon h_0(x_1)\}, $$ $$ {\mathcal G}amma^{\delta,{\epsilon}ilon}(0)=\{(x_1,\mathcal{J}_\delta\mathcal{J}_{\epsilon}ilon h_0(x_1)),\, x_1\in{\mathbb T}\}, $$ and \begin{equation}\label{phi1epsilonkappa} {\partial\hspace{1pt}}hi^{{\epsilon}ilon,\kappa}_1(x_1,x_2)=\left(x_1,x_2+\mathcal{J}_\delta\mathcal{J}_{\epsilon}ilon h_0(x_1)\left(1-\frac{x_2}{c_b}{\rm i}ght){\rm i}ght). \end{equation} We construct ${\partial\hspace{1pt}}hi^{{\epsilon}ilon,\kappa}_2$ by solving \begin{subequations}\label{phi2epsilonkappa} \begin{alignat}{2} {\mathcal D}elta {\partial\hspace{1pt}}hi^{{\epsilon}ilon,\kappa}_2&=0\qquad &&\text{on}\quad {\mathcal O}mega^{\delta,{\epsilon}ilon}(0)\times[0,T_{{\epsilon}ilon,\kappa}]\,,\\ {\partial\hspace{1pt}}hi^{{\epsilon}ilon,\kappa}_2(t)&= e+[\mathcal{J}_\kappa\mathcal{J}_\kappa\mathcal{J}_{\epsilon}ilon h_0(x_1)-\mathcal{J}_\delta\mathcal{J}_{\epsilon}ilon h_0(x_1)]e_2 \qquad &&\text{on}\quad {\mathcal G}amma\times[0,T_{{\epsilon}ilon,\kappa}],\,,\\ {\partial\hspace{1pt}}hi^{{\epsilon}ilon,\kappa}_2 &= e &&\text{on}\quad {\mathcal G}amma_{bot}\times[0,T_{{\epsilon}ilon,\kappa}] \,. \end{alignat} \end{subequations} We can use Proposition {\rm e}f{approxsol} together with \eqref{phi1epsilonkappa} and \eqref{phi2epsilonkappa} to construct solutions to the approximate $ {\epsilon}ilon \kappa$-problem on a time interval $[0,T_{{\epsilon}ilon,\kappa}]$: \begin{subequations}\label{HS_ALE_Onephase_reg} \begin{alignat}{2} v_{{\epsilon}ilon,\kappa}^i+ (A_{{\epsilon}ilon,\kappa})^k_i(q_{{\epsilon}ilon,\kappa}+{\partial\hspace{1pt}}si_{{\epsilon}ilon,\kappa}^2)_{,k}&= 0 \qquad&&\text{in}\quad{\mathcal O}mega\times[0,T_{{\epsilon}ilon,\kappa}]\,,\\ (A_{{\epsilon}ilon,\kappa})^i_j(v_{{\epsilon}ilon,\kappa})^j_{,i} &= 0 &&\text{in}\quad{\mathcal O}mega\times[0,T_{{\epsilon}ilon,\kappa}]\,,\\ h_{{\epsilon}ilon,\kappa}(t)&=\mathcal{J}_{\epsilon}ilon h_0+\int_0^t v_{{\epsilon}ilon,\kappa}^i J_{{\epsilon}ilon,\kappa}(A_{{\epsilon}ilon,\kappa})^k_iN^k ds\qquad&&\text{on}\quad{\mathcal G}amma\times[0,T_{{\epsilon}ilon,\kappa}]\,,\\ q_{{\epsilon}ilon,\kappa}&=0 &&\text{on}\quad{\mathcal G}amma\times[0,T_{{\epsilon}ilon,\kappa}]\,,\\ v_{{\epsilon}ilon,\kappa}\cdot e_2&=0 &&\text{on}\quad{\mathcal G}amma_{bot}\times[0,T_{{\epsilon}ilon,\kappa}]\,,\\ {\partial\hspace{1pt}}si_{{\epsilon}ilon,\kappa}&={\partial\hspace{1pt}}hi^{{\epsilon}ilon,\kappa}_2\circ{\partial\hspace{1pt}}hi^{{\epsilon}ilon,\kappa}_1&&\text{in}\quad{\mathcal O}mega\times\{t=0\}\,,\\ {\mathcal D}elta {\partial\hspace{1pt}}si_{{\epsilon}ilon,\kappa}(t)&={\mathcal D}elta {\partial\hspace{1pt}}si_{{\epsilon}ilon,\kappa}(0)&&\text{in}\quad{\mathcal O}mega\times[0,T_{{\epsilon}ilon,\kappa}]\,,\\ {\partial\hspace{1pt}}si_{{\epsilon}ilon,\kappa}(t)&=e+\mathcal{J}_\kappa\mathcal{J}_\kappa h_{{\epsilon}ilon,\kappa}(t)N&&\text{on}\quad{\mathcal G}amma\times[0,T_{{\epsilon}ilon,\kappa}]\,,\\ {\partial\hspace{1pt}}si_{{\epsilon}ilon,\kappa}(t)&=e&&\text{on}\quad{\mathcal G}amma_{bot}\times[0,T_{{\epsilon}ilon,\kappa}] \,, \end{alignat} \end{subequations} where $$ A_{{\epsilon}ilon,\kappa} = [{\rm n}abla {\partial\hspace{1pt}}si_{{\epsilon}ilon,\kappa}] ^{-1} \text{ and } J_{{\epsilon}ilon,\kappa} = \det {\rm n}abla {\partial\hspace{1pt}}si_{{\epsilon}ilon,\kappa} \,. $$ Having solutions to ({\rm e}f{HS_ALE_Onephase_reg}), we focus on obtaining the uniform (in ${\epsilon}ilon$ and $\kappa$) lifespan. We are going to perform the estimates in a two step procedure. First, we focus on $\kappa-$independent estimates (that may depend on ${\epsilon}ilon$), and then we focus on ${\epsilon}ilon-$independent estimates. To simplify notation, we drop the ${\epsilon}ilon$ and $\kappa$ notation except when it is computationally used, but note that our dependent variables implicitly depend upon ${\epsilon}ilon$ and $\kappa$. \subsection{$\kappa$-independent estimates} Abusing notation, we redefine $$ \tilde{\tau}=(1,\mathcal{J}_\kappa\mathcal{J}_\kappa h'(x_1,t)),\,\tilde{n}=(-\mathcal{J}_\kappa\mathcal{J}_\kappa h'(x_1,t),1). $$ We define the higher-order energy function to be $$ E(t)=\max_{0\leq s\leq t}|h^{\kappa}(s)|^2_{2}+\int_0^t\|v(s)\|_{2,-}^2ds. $$ The solutions to ({\rm e}f{phi2epsilonkappa}) have sufficient regularity to ensure that our higher-order energy function $E(t)$ is continuous. We take $T_{{\epsilon}ilon,\kappa}$ small enough to ensure that the following four conditions hold: \begin{enumerate} \item for a fixed constant $\delta_1>0$ that only depends on $h_0$, \begin{equation}\label{bootstrap1} \|A(t)-A(0)\|_{L^\infty}\leq \delta_1\ll 1 \,; \end{equation} \item $E(t)\leq z^*$ for a fixed constant $z^*$ (that will be chosen below)\,; \item $\min_{0\leq t\leq T_{\kappa}}-q_{,2}(t)>-\frac{q_{,2}(0)}{2}$\,; \item with $c_b$ given in ({\rm e}f{ref_domain}), \begin{equation}\label{bootstrap4} \min_{x_1} h(x_1,t)>c_b \,. \end{equation} \end{enumerate} Again, we let $C$ denote a constant that may change from line to line. This constant may depend on $h_0$ and ${\epsilon}ilon$, but not on $\kappa.$ We let $\mathcal{P}(x)$ denote a polynomial with coefficients that may depend on $h_0$ and ${\epsilon}ilon$, but, again, they do not depend on $\kappa$. This polynomial may change from line to line. Our goal is to prove the following polynomial estimate for the energy: $$ E(t)\leq \mathcal{M}_0+\sqrt[12]{t}\mathcal{Q}(E(t)), $$ for a certain constant $\mathcal{M}_0$ and polynomial $\mathcal{Q}$. We choose $T_{{\epsilon}ilon, \kappa}\leq \min\{1,T^*_1\}$ with $T^*_1$ such that $$ \mathcal{Q}(z^*)\left(T^*_1{\rm i}ght)^{1/12}\leq \delta_2\ll 1, $$ for $ \delta _2$ a fixed constant satisfying $0<\delta_2<\delta_1\ll 1$. \subsubsection{Estimates for some lower-order norms of $h^{\kappa}$} From ({\rm e}f{HS_ALE_Onephase_reg}c), \begin{equation}\label{ht1.5} \int_0^t |h_{t}|^2_{1.5}ds\leq C \, E(t). \end{equation} Using \eqref{ht1.5} together with the fundamental theorem of calculus, we have that \begin{equation}\label{lowf} |h(t)-\mathcal{J}_{\epsilon}ilon h_0|_{1.5}\leq \sqrt{t}\left(\int_0^t|h_{t}|_{1.5}^2ds{\rm i}ght)^{1/2}\leq C \sqrt{t}\sqrt{E(t)} \end{equation} Now, \begin{equation}\label{lowf2} |h^{\kappa}(t)-\mathcal{J}_{\epsilon}ilon h^{\kappa}_0|_{1.75}\leq C|h(t)-\mathcal{J}_{\epsilon}ilon h_0|_{1.5}^{1/2}|h^{\kappa}(t)-\mathcal{J}_{\epsilon}ilon h^{\kappa}_0|_{2}^{1/2}\leq C \sqrt{E(t)}t^{1/4}, \end{equation} and $$ |h^{\kappa}(t)|_{1.75}\leq C|h_0|_{1.75}. $$ Notice that, by taking a small enough time and using \eqref{lowf}, we recover our \emph{bootstrap} assumption \eqref{bootstrap4}. \subsubsection{Some estimates for the mapping ${\partial\hspace{1pt}}si$} We consider here the regularity properties of the mapping ${\partial\hspace{1pt}}si$ given in ({\rm e}f{HS_ALE_Onephase_reg}e-h). We have the following estimates $$ \|{\partial\hspace{1pt}}si(0)\|_{2,-}\leq C(\delta)|h_0|_{1.5},\,\, \|{\partial\hspace{1pt}}si(0)\|_{2.5,-}\leq C(\delta)|h_0|_{2},\,\,\|{\partial\hspace{1pt}}si(0)\|_{3,-}\leq C(\delta)|h_0|_{2.5}, $$ and, using elliptic estimates, \eqref{J1.25}, and \eqref{lowf2}, \begin{equation}\label{psi2.25b} \|{\partial\hspace{1pt}}si(t)-{\partial\hspace{1pt}}si(0)\|_{2.25,-}\leq C|{\partial\hspace{1pt}}si(t)-{\partial\hspace{1pt}}si(0)|_{1.75}\leq C|h(t)-h(0)|_{1.75}\leq \sqrt[4]{t}C\sqrt{E(t)}, \end{equation} \begin{equation}\label{J1.25b} \|J(t)-J(0)\|_{1.25,-}\leq \sqrt[4]{t}C\sqrt{E(t)}. \end{equation} By taking a small enough time, we can obtain the uniform bounds \begin{equation}\label{Jmin} \max_{0\leq t\leq T_{{\epsilon}ilon, \kappa}} \|J(t)\|_{1.25,-}+\|{\partial\hspace{1pt}}si(t)\|_{2.25,-}\leq C,\qquad \min_{0\leq t\leq T_{{\epsilon}ilon, \kappa}}\min_{x\in{\mathcal O}mega} J(t)\geq C. \end{equation} Using elliptic estimates as in Section {\rm e}f{subsection_psi}, we have \begin{equation}\label{boundpsi} \|{\partial\hspace{1pt}}si(t)\|_{2.5,-}\leq C(|h(t)|_{2}+1),\qquad \|{\partial\hspace{1pt}}si(t)\|_{3,-}\leq C(|h(t)|_{2.5}+1) \end{equation} Furthermore, \begin{equation}\label{boundA} \|A(t)-A(0)\|^2_{1,-}\leq tE(t) \,, \end{equation} and using interpolation once again, we have that \begin{align} \|A(t)-A(0)\|_{1.25,-}^2 & \leq C\|A(t)-A(0)\|_{1,-}\|A(t)-A(0)\|_{1.5,-}\leq\sqrt{t}C\sqrt{E(t)} \,, \label{boundA2}\\ \|A(t)-A(0)\|_{1.375,-}^2& \leq \sqrt[4]{t}C\sqrt{E(t)}\,. \label{boundA3} \end{align} In particular, by taking a small enough time, our previous \emph{bootstrap} assumption \eqref{bootstrap1} is strengthened. Furthermore, using \eqref{boundA3}, $$ \|A(t)\|_{1.375,-}\leq C. $$ \subsubsection{Some estimates for lower-order norms of $v$} Just as in Section {\rm e}f{secmaxprinL2}, we have the following $L^2$ energy law: $$ |\mathcal{J}_\kappa h (t)|^2_0+2\int_0^t\|v(s)\|_0^2ds=|\mathcal{J}_\kappa \mathcal{J}_{\epsilon}ilon h_0|_0^2\,, $$ from which it follows that \begin{equation}\label{lowv} 2\int_0^t\|v(s)\|_{0,-}^2ds\leq |h_0|_0^2. \end{equation} \subsubsection{The estimates for the pressure} The elliptic problem for $q$ is \begin{alignat*}{2} -(A^i_jA^k_jq_{,k})_{,i} & =0 && \text{ in }{\mathcal O}mega \,,\\ q& =0 && \text{ on }{\mathcal G}amma\,, \\ q,_k A^k_j A^i_j N_i &={\partial\hspace{1pt}}si^2_{,2} && \text{ on } {\mathcal G}amma_{bot} \,, \end{alignat*} where we recall that on ${\mathcal G}amma$, $N= e_2$ while on ${\mathcal G}amma_{bot}, N=-e_2$. We have that $A_0 A_0^T$ is symmetric and positive semi-definite: $[A_0 A_0^T]^i_j \xi _i\xi _j \ge \mathcal{L} | \xi |^2$; consequently, due to \eqref{boundA3}, $$ \|A_0A_0^T-A(t) A^T(t)\|_{L^\infty}\leq C \sqrt{t}\sqrt{E(t)} \,, $$ and we see that for $t$ sufficiently small, $$ {\frac{\mathcal{L} }{2}} |\xi|^2\leq [A (\cdot, t ) A^T(\cdot , t)]^i_j\xi^i\xi^j\leq 2 \mathcal{L} |\xi|^2. $$ We have that $$ C\|{\rm n}abla q\|_{0,-}^2\leq \int_{\mathcal O}mega A^i_jA^k_jq_{,k}q_{,i} dx=\int_{{\mathcal G}amma_{bot}}{\partial\hspace{1pt}}si^2_{,2}q ds. $$ In particular, due to Poincar\'e inequality, there exists a universal constant such that $$ \|q\|_{1,-}\leq C. $$ Elliptic estimates (see Lemma {\rm e}f{lemaa6}) together with \eqref{boundA2} show that $$ \|q\|_{2.25,-}\leq C\|{\rm n}abla q\|_{L^\infty({\mathcal O}mega^-)}\leq C\|q\|_{2.125}, $$ and then, using interpolation and Young's inequality, we find the bound \begin{equation}\label{Q1.25onephase} \|q\|_{2.25,-}\leq C. \end{equation} Thus, once again, elliptic estimates show that \begin{equation}\label{Q1.5onephase} \|q\|_{2.5,-}\leq C\left(1+\|A(t)\|_{1.5,-})\|{\rm n}abla q\|_{L^\infty({\mathcal O}mega^-)}{\rm i}ght)\leq C(1+\|A(t)\|_{1.5,-}) \,, \end{equation} and consequently, \begin{equation}\label{v1.5} \sup_{0\leq t\leq T_{{\epsilon}ilon,\kappa}} \|v\|_{1.5,-}\leq C(|h^{\kappa\kappa}|_2+1), \quad \sup_{0\leq t\leq T_{{\epsilon}ilon,\kappa}} |h_{t}|_{1}\leq CE(t). \end{equation} \subsubsection{The Rayleigh-Taylor stability condition revisited} By the assumption \eqref{RTonephase} in the Theorem {\rm e}f{localonephase}, for $0<{\epsilon}ilon,\kappa\ll1$ taken sufficiently small, $$ -{\rm n}abla p(0)\cdot \tilde{n}(0)>0\text{ at }{\mathcal G}amma(t), $$ so $$ -A^2_i(0)q_{,2}(0)\tilde{n}^i(0)=-JA^2_i(0)A^2_i(0)q_{,2}(0)>0\text{ at }{\mathcal G}amma. $$ In particular, \begin{equation}\label{lambdaRT} \lambda=\min_{x_1}-q_{,2}(0)>0\text{ at }{\mathcal G}amma. \end{equation} To simplify notation, we write $$ B^{ik}(t)=A^i_j(t)A^k_j(t), $$ and we study the elliptic problem for $$ \bar{q}=q(t)-q(0): $$ \begin{align*} -(B^{ik}(t)\bar{q}_{,k})_{,i}& =-([B^{ik}(0)-B^{ik}(t)]q_{,k}(0))_{,i}&& \qquad\text{in}\,{\mathcal O}mega\times[0,T_{{\epsilon}ilon,\kappa}]\\ \bar{q}& =0 && \qquad\text{in}\,{\mathcal G}amma\times[0,T_{{\epsilon}ilon,\kappa}]\\ \bar{q}_{,k} B^{ik}(t) N_i& =[B^{ik}(0)-B^{ik}(t)]q_{,k}(0)N_i+ {\partial\hspace{1pt}}si^2_2(t)-{\partial\hspace{1pt}}si^2_2(0) && \qquad \text{in}\,{\mathcal G}amma_{bot}\times[0,T_{{\epsilon}ilon,\kappa}]. \end{align*} Using elliptic estimates together with the estimates \eqref{psi2.25b}, \eqref{boundA2}, \eqref{boundA3} and the smallness condition on the time, we obtain \begin{eqnarray*} \|\bar{q}\|_{2,-}&\leq& C\left(\|([B^{ik}(0)-B^{ik}(t)]q_{,k}(0))_{,i}\|_{0,-}+|[B^{ik}(0)-B^{ik}(t)]q_{,k}(0)N_i+ {\partial\hspace{1pt}}si^2_{,2}(t)-{\partial\hspace{1pt}}si^2_{,2}(0)|_{0.5}{\rm i}ght)\\ &\leq& C\left(\|B(0)-B(t)\|_{1.25,-}\|q(0)\|_{2,-}+\|{\rm n}abla[B(0)-B(t)]\|_{0,-}\|q(0)\|_{2.25,-}{\rm i}ght.\\ &&\left.+|[B^{2k}(0)-B^{2k}(t)]|_{0.5}|q_{,k}(0)|_{0.75}+|{\partial\hspace{1pt}}si^2_{,2}(t)-{\partial\hspace{1pt}}si^2_{,2}(0)|_{0.5}{\rm i}ght)\\ &\leq& \sqrt{t}\mathcal{P}(E(t))\\ &\leq& \delta_2. \end{eqnarray*} We use the inequality $$ \|fg\|_{r,-}\leq C\|f\|_{r,-}\|g\|_{s,-},\,\,0\leq r\leq s,\,s>1+r $$ to find that $$ \|[B^{ik}(0)-B^{ik}(t)]q_{,ki}(0)\|_{0.25,-}\leq C\|q(0)\|_{2.25,-}\|[B^{ik}(0)-B^{ik}(t)]\|_{1.375,-}. $$ We apply \eqref{boundA3} to find that $$ \|[B^{ik}(0)-B^{ik}(t)]q_{,ki}(0)\|_{0.25,-}\leq \sqrt[8]{t}\mathcal{P}(E(t)). $$ This is the only place where the bound \eqref{boundA3} plays an essential role. For any other smallness estimate concerning $A(t)-A(0)$ it is enough with \eqref{boundA2}. We want a bound showing the smallness of $\bar{q}_{,2}$ pointwise on ${\mathcal G}amma$. As a result, we need an estimate stronger than just $H^2$. We focus our attention then in $H^{2.25}$. Elliptic regularity then shows that \begin{align} \|\bar{q}\|_{2.25,-}&\leq C\left(\|([B^{ik}(0)-B^{ik}(t)]q_{,k}(0))_{,i}\|_{0.25,-}+|[B^{ik}(0)-B^{ik}(t)]q_{,k}(0)N_i+ {\partial\hspace{1pt}}si^2_{,2}(t)-{\partial\hspace{1pt}}si^2_{,2}(0)|_{0.75}{\rm i}ght.{\rm n}onumber\\ & \qquad \left.+(1+\|B(t)\|_{1.25,-})\|{\rm n}abla \bar{q}\|_{L^\infty({\mathcal O}mega^-)}{\rm i}ght){\rm n}onumber\\ &\leq \sqrt[8]{t}\mathcal{P}(E(t)){\rm n}onumber\\ &\leq \delta_2\label{qbar}. \end{align} Consequently, on ${\mathcal G}amma$, we have that $$ -q_{,2}(x_1,t)=-q_{,2}(x_1,t)+q_{,2}(x_1,0)-q_{,2}(x_1,0)\geq -q_{,2}(x_1,0)-C\delta_2, $$ and our \emph{bootstrap} assumption \eqref{bootstrap4} is satisfied: $$ -\min_{x_1}q_{,2}(x_1,t)\geq -\min_{x_1}q_{,2}(x_1,0)-C\delta_2\geq -\frac{\min_{x_1}q_{\kappa,2}(x_1,0)}{2}. $$ \subsubsection{The estimate for $h\in L^2 ( 0,T_ \kappa ; H^{2.5} ({\mathcal G}amma))$} From equation ({\rm e}f{HS_ALE_Onephase_reg}a), we see that $$ v\cdot \tau=-\tau\cdot e_2\; \text{ at }{\mathcal G}amma. $$ It follows that $$ -\frac{v'\cdot \tau}{\tilde{n}\cdot e_2+v\cdot \tilde{n}}=-\frac{v'\cdot \tau}{1+h_{ t}}=\frac{h^{\kappa\kappa{\partial\hspace{1pt}}rime{\partial\hspace{1pt}}rime}}{g^{3/2}}. $$ Thus, \begin{eqnarray*} h^{\kappa\kappa{\partial\hspace{1pt}}rime{\partial\hspace{1pt}}rime}&=&-\frac{v_{1}'+h^{\kappa\kappa{\partial\hspace{1pt}}rime}v_{2}'}{1+h_t}(1+(h^{\kappa\kappa{\partial\hspace{1pt}}rime})^2)\\ &=&-(v_{ 1}'+h^{\kappa\kappa{\partial\hspace{1pt}}rime}v_{ 2}')(1+(h^{\kappa\kappa{\partial\hspace{1pt}}rime})^2)+\frac{(v_{ 1}'+h^{\kappa\kappa{\partial\hspace{1pt}}rime}v_{ 2}')h_{ t}}{1+h_{ t}}(1+(h^{\kappa\kappa{\partial\hspace{1pt}}rime})^2), \end{eqnarray*} and, using \eqref{v1.5}, $$ \int_0^t|h^{\kappa\kappa}|_{2.5}^2ds\leq CE(t). $$ \subsubsection{The energy estimates} We write ({\rm e}f{HS_ALE_Onephase}a) as $$ v^i+ A^k_i(q_{,k}+{\partial\hspace{1pt}}si^2)_{,k}=0 \text{ in }{\mathcal O}mega. $$ We take two horizontal derivatives of this expression, test against $v''$ and integrate by parts to find that $$ \int_0^t\int_{{\mathcal O}mega^-}|v''|^2 dxdy+\mathfrak{I}_1+\mathfrak{I}_2+\mathfrak{I}_3=0. $$ The higher-order terms are \begin{align*} \mathfrak{I}_1& =\int_0^t\int_{{\mathcal O}mega^-}A^k_i (q+{\partial\hspace{1pt}}si\cdot e_2)''_{,k}(v^i)''dxdy, \\ \mathfrak{I}_2&=\int_0^t\int_{{\mathcal O}mega^-}(A^k_i)'' (q+{\partial\hspace{1pt}}si\cdot e_2)_{,k}(v^i)''dx dy,\end{align*} while $$ \mathfrak{I}_3=2\int_0^t\int_{{\mathcal O}mega^-}(A^k_i)' (q+{\partial\hspace{1pt}}si\cdot e_2)'_{,k}(v^i)''dx dy. $$ is the lower-order term. Integrating by parts in the term $I_1$ and using $JA^k_iN^k=\sqrt{g}n_i$, we obtain $$ \mathfrak{I}_1=\mathfrak{J}_1+\mathfrak{J}_2, $$ with \begin{align*} \mathfrak{J}_1&=-\int_0^t\int_{{\mathcal O}mega^-}(q+{\partial\hspace{1pt}}si\cdot e_2)''(A^k_i (v^i)'')_{,k}dx dy, \\ \mathfrak{J}_2&=\int_0^t\int_{\mathcal G}amma {\partial\hspace{1pt}}si''\cdot e_2 J^{-1}(v''\cdot \tilde{n}) dsdy=\int_0^t\int_{\mathcal G}amma J^{-1} \mathcal{J}_\kappa\mathcal{J}_\kappa h'' (v''\cdot \tilde{n}) dsdy. \end{align*} Using the Piola identity $(JA^k_i)_{,k} =0$ and the divergence-free condition $v^i,_k A^k_i=0$, we see that $$ (A^k_i (v'')^i)_{,k}=(A^k_i)_{,k} (v'')^i+A^k_i (v'')^i_{,k}=-J_{,k} A^k_i J^{-1}(v'')^i-(A^k_i)'' v^i_{,k}-2(A^k_i)'(v^i)'_{,k}, $$ and $\mathfrak{J}_1=\mathfrak{K}_1+\mathfrak{K}_2+\mathfrak{K}_3$ where \begin{align*} \mathfrak{K}_1&=\int_0^t\int_{{\mathcal O}mega^-}(q+{\partial\hspace{1pt}}si^2)''(A^k_i)'' v^i_{,k}dx dy, \\ \mathfrak{K}_2&=\int_0^t\int_{{\mathcal O}mega^-}(q+{\partial\hspace{1pt}}si^2)''2 (A^k_i)' (v^i)'_{,k}dx dy,\\ \mathfrak{K}_3&=\int_0^t\int_{{\mathcal O}mega^-}(q+{\partial\hspace{1pt}}si^2)''J_{,k}J^{-1}A^k_i(v^i)''dx dy \end{align*} The term $\mathfrak{K}_2$ can be easily bounded using \eqref{propA}, \eqref{boundpsi} and \eqref{Q1.5onephase} together with the Sobolev embedding theorem: $$ |\mathfrak{K}_2|\leq C\int_0^t\|v\|_{2,-}\|A'\|_{L^4}\left(\|q\|_{2.5,-}+\|{\partial\hspace{1pt}}si\cdot e_2\|_{2.5,-}{\rm i}ght)dy\leq \sqrt{t}\mathcal{P}(E(t)). $$ To bound the term $\mathfrak{K}_3$, we use H\"{o}lder's inequality with an $L^2-L^4-L^4-L^\infty$ bound, we have that $$ \mathfrak{K}_3\leq \sqrt{t}\mathcal{P}(E(t)). $$ The term $\mathfrak{K}_1$ can be simplified using \eqref{propA}; we write $\mathfrak{K}_1=\mathfrak{L}_1+\mathfrak{L}_2$, with \begin{align*} \mathfrak{L}_1&=-\int_0^t\int_{{\mathcal O}mega^-}(q+{\partial\hspace{1pt}}si^2)''(2A'{\rm n}abla{\partial\hspace{1pt}}si' A)^k_i v^i_{,k}dx dy, \\ \mathfrak{L}_2&=-\int_0^t\int_{{\mathcal O}mega^-}(q+{\partial\hspace{1pt}}si^2)''A^k_j{\partial\hspace{1pt}}si^j_{,11r} A^r_i v^i_{,k}dx dy, \end{align*} where we recall that ${\partial\hspace{1pt}}si_{,11}={\partial\hspace{1pt}}si''$. $\mathfrak{L}_1$ is estimated using H\"{o}lder's inequality and the Sobolev embedding theorem: \begin{eqnarray*} |\mathfrak{L}_1|&\leq& \int_0^t (\|q\|_{2.5,-}+\|{\partial\hspace{1pt}}si\|_{2.5,-})\|A\|_{L^\infty}\|v\|_{1.5,-}\|A\|_{1.5,-}\|{\rm n}abla {\partial\hspace{1pt}}si\|_{1.5,-}dy\\ &\leq & C\sqrt{t}\mathcal{P}(E(t)). \end{eqnarray*} Similarly, \begin{eqnarray*} |\mathfrak{L}_2|&\leq& C(\| q\|_{2.5,-}+\|{\partial\hspace{1pt}}si^2\|_{2.5,-})\|A\|_{L^\infty}^2\sqrt{t}\left(\int_0^t\|{\partial\hspace{1pt}}si(y)\|_{3,-}^2dy{\rm i}ght)^{0.5}\|v\|_{1.5,-}\\ &\leq&\sqrt{t}\mathcal{P}(E(t)). \end{eqnarray*} Next, using \eqref{propA}, we write $ \mathfrak{I}_2=\mathfrak{K}_4+\mathfrak{K}_5$, where \begin{align*} \mathfrak{K}_4&=-\int_0^t\int_{{\mathcal O}mega^-}A^k_j{\partial\hspace{1pt}}si^j_{,11r} A^r_i (q+{\partial\hspace{1pt}}si^2)_{,k}(v^i)''dx dy,\\ \mathfrak{K}_5&=-\int_0^t\int_{{\mathcal O}mega^-}2(A')^k_j{\partial\hspace{1pt}}si^j_{,1r} A^r_i (q+{\partial\hspace{1pt}}si^2)_{,k}(v^i)''dx dy. \end{align*} We have that $$ |\mathfrak{K}_5|\leq \int_0^t C\|A\|_{1.5,-}\|{\rm n}abla{\partial\hspace{1pt}}si\|_{1.5,-}\|A\|_{L^\infty}\| {\rm n}abla (q+{\partial\hspace{1pt}}si^2)\|_{L^\infty}\|v\|_{2,-}dy\leq \sqrt{t}\mathcal{P}(E(t)). $$ For $\mathfrak{K}_4$, we integrate-by-parts and write $\mathfrak{K}_4=\mathfrak{L}_3+\mathfrak{L}_4$, where \begin{align*} \mathfrak{L}_3&=\int_0^t\int_{{\mathcal O}mega^-}{\partial\hspace{1pt}}si^j_{,11} (A^k_jA^r_i (q+{\partial\hspace{1pt}}si^2)_{,k}(v^i)'')_{,r}dx dy, \\ \mathfrak{L}_4&=-\int_0^t\int_{{\mathcal G}amma}{\partial\hspace{1pt}}si^j_{,11} A^k_jA^r_i (q+{\partial\hspace{1pt}}si^2)_{,k}(v^i)''N^rds. \end{align*} We further decompose $\mathfrak{L}_3$ as $\mathfrak{L}_3=\mathfrak{M}_1+\mathfrak{M}_2+\mathfrak{M}_3$, where \begin{align*} \mathfrak{M}_1&=\int_0^t\int_{{\mathcal O}mega^-}{\partial\hspace{1pt}}si^j_{,11} A^k_{j,r}A^r_i (q+{\partial\hspace{1pt}}si^2)_{,k}(v^i)''dx dy, \\ \mathfrak{M}_2&=\int_0^t\int_{{\mathcal O}mega^-}{\partial\hspace{1pt}}si^j_{,11} A^k_{j}A^r_{i,r} (q+{\partial\hspace{1pt}}si^2)_{,k}(v^i)''dx dy, \\ \mathfrak{M}_3&=\int_0^t\int_{{\mathcal O}mega^-}{\partial\hspace{1pt}}si^j_{,11} A^k_{j}A^r_i (q+{\partial\hspace{1pt}}si^2)_{,rk}(v^i)''dx dy, \\ \mathfrak{M}_4&=\int_0^t\int_{{\mathcal O}mega^-}{\partial\hspace{1pt}}si^j_{,11} A^k_{j}A^r_i (q+{\partial\hspace{1pt}}si^2)_{,k}(v^i)''_{,r}dx dy. \end{align*} For the first three terms, \begin{eqnarray*} |\mathfrak{M}_1|+|\mathfrak{M}_2|+|\mathfrak{M}_3|&\leq& \int_0^t\|{\rm n}abla{\partial\hspace{1pt}}si\|_{1.5,-}\|v\|_{2,-}\|A\|_{L^\infty}\left[\|A\|_{1.5,-}(\|{\rm n}abla q\|_{1.25,-}{\rm i}ght.\\ &&+\left.\|{\rm n}abla{\partial\hspace{1pt}}si\|_{1.25,-})+\|A\|_{L^\infty}(\|{\rm n}abla q\|_{1.5,-}+\|{\rm n}abla{\partial\hspace{1pt}}si\|_{1.5,-}){\rm i}ght]dy\\ &\leq& \sqrt{t}\mathcal{P}(E(t)). \end{eqnarray*} In the term $\mathfrak{M}_4$, we use $v^i,_k A^k_i =0$ and write $\mathfrak{M}_4=\mathfrak{N}_1+\mathfrak{N}_2$, where \begin{align*} \mathfrak{N}_1&=-\int_0^t\int_{{\mathcal O}mega^-}{\partial\hspace{1pt}}si^j_{,11} A^k_{j}(A^r_i)'' (q+{\partial\hspace{1pt}}si^2)_{,k} v^i_{,r}dx, \\ \mathfrak{N}_2&=-2\int_0^t\int_{{\mathcal O}mega^-}{\partial\hspace{1pt}}si^j_{,11} A^k_{j}(A^r_i)' (q+{\partial\hspace{1pt}}si^2)_{,k} v^i_{,1r}dx. \end{align*} These terms can be estimated in the same fashion as the term $K_1$ above. Also, \begin{eqnarray*} |\mathfrak{N}_1|&\leq& \sqrt{t}C\left(\int_0^t\|{\partial\hspace{1pt}}si(y)\|_{3,-}^2dy{\rm i}ght)^{0.5}\|v\|_{1.5,-}\|{\rm n}abla{\partial\hspace{1pt}}si\|_{1.5,-}(\|{\rm n}abla q\|_{1.25,-}+\|{\rm n}abla {\partial\hspace{1pt}}si\|_{1.25,-})\\ &\leq& \sqrt{t}\mathcal{P}(E(t)), \end{eqnarray*} and \begin{eqnarray*} |\mathfrak{N}_2|&\leq& \sqrt{t}C\left(\int_0^t\|v(y)\|_{2,-}^2dy{\rm i}ght)^{0.5}\|{\rm n}abla {\partial\hspace{1pt}}si \|_{1.5,-}^2(\|{\rm n}abla q\|_{1.25,-}+\|{\rm n}abla {\partial\hspace{1pt}}si\|_{1.25,-})\\ &\leq& \sqrt{t}\mathcal{P}(E(t)). \end{eqnarray*} The term $\mathfrak{I}_3$ can be bounded using H\"{o}lder's inequality and the Sobolev embedding theorem: $$ |\mathfrak{I}_3|\leq \sqrt{t}\mathcal{P}(E(t)). $$ We next analyze the boundary integrals. We have that $$ BI=\mathfrak{J}_2+\mathfrak{L}_4=\int_0^t\int_{{\mathcal G}amma}({\partial\hspace{1pt}}si''\cdot (v+e_2))((v^i)'' \tilde{n}_iJ^{-1})ds. $$ To estimate this terms we will extensively use the lower bound for $J$. We write $BI=\mathfrak{O}_1+\mathfrak{O}_2+\mathfrak{O}_3$, where \begin{align*} \mathfrak{O}_1&=\int_0^t\int_{{\mathcal G}amma}({\partial\hspace{1pt}}si''\cdot (v+e_2))h_t''J^{-1}dsdy, \\ \mathfrak{O}_2&=-\int_0^t\int_{{\mathcal G}amma}({\partial\hspace{1pt}}si''\cdot (v+e_2))(v \cdot \tilde{n}'' J^{-1})dsdy \\ \mathfrak{O}_3&=-2\int_0^t\int_{{\mathcal G}amma}({\partial\hspace{1pt}}si''\cdot (v+e_2))(v' \cdot \tilde{n}'J^{-1})dsdy. \end{align*} The inequality $|v|_{1}\leq C\|v\|_{1.5,-}$ together with the embedding $H^{0.25}({\mathcal G}amma)\subset L^4({\mathcal G}amma)$ shows that $$ |\mathfrak{O}_3|\leq C(|v|_1^2+1)\int_0^t |h^{\kappa\kappa}|^2_{2.25}dy\leq \sqrt{t}\mathcal{P}(E(t)). $$ The term $\mathfrak{O}_2$ reads $$ \mathfrak{O}_2=\int_0^t\int_{{\mathcal G}amma}h^{\kappa\kappa{\partial\hspace{1pt}}rime{\partial\hspace{1pt}}rime}(v_2+1)(v_1 h^{\kappa\kappa{\partial\hspace{1pt}}rime{\partial\hspace{1pt}}rime{\partial\hspace{1pt}}rime})J^{-1}dsdy. $$ By forming an exact derivative, integrating-by-parts and using \eqref{boundpsi}, we see that $$ |\mathfrak{O}_2|\leq C\int_0^t |h^{\kappa\kappa{\partial\hspace{1pt}}rime{\partial\hspace{1pt}}rime}|_{L^3}^2|{\rm n}abla{\partial\hspace{1pt}}si'|_{L^3}dy\leq C\int_0^t |h^{\kappa\kappa}|_{2+1/6}^2\|{\partial\hspace{1pt}}si\|_{2+2/3,-}dy\leq C\int_0^t |h^{\kappa\kappa}|_{2+1/6}^3dy. $$ Consequently, due to the interpolation inequality $$ |h^{\kappa\kappa}|_{2+1/6}^3\leq C|h^{\kappa\kappa}|_{2}^2|h^{\kappa\kappa}|_{2.5}, $$ we find that $$ |\mathfrak{O}_2|\leq \sqrt{t}\mathcal{P}(E(t)). $$ Using $[(v+e_2)\cdot \tau]=0$ and $\sqrt{g}n_i=JA^k_iN^k$ , the term $\mathfrak{O}_1$ can be written as \begin{align*} \mathfrak{O}_1&=\int_0^t\int_{{\mathcal G}amma}({\partial\hspace{1pt}}si''\cdot [(v+e_2)\cdot n] n)h_t''J^{-1}dsdy\\ &=\int_0^t\int_{{\mathcal G}amma}({\partial\hspace{1pt}}si''\cdot [-A^2_iq_{,2} (\sqrt{g})^{-1}A^2_i] n)h_t''dsdy\\ &=\int_0^t\int_{{\mathcal G}amma}{\partial\hspace{1pt}}si''\cdot [-q_{,2} ] \tilde{n}h_t'' J^{-2}dsdy\\ &=\int_0^t\int_{{\mathcal G}amma}h^{\kappa\kappa{\partial\hspace{1pt}}rime{\partial\hspace{1pt}}rime}[-q_{,2}]h_t''J^{-2}dsdy\\ &=\int_0^t\int_{{\mathcal G}amma}h^{\kappa\kappa{\partial\hspace{1pt}}rime{\partial\hspace{1pt}}rime}\left[\frac{-q_{,2}(t)}{J^{-2}(t)}+\frac{q_{,2}(0)}{{J^{-2}(0)}}-\frac{q_{,2}(0)}{J^{-2}(0)}{\rm i}ght]h_t''dsdy\\ &=\mathfrak{P}_1+\mathfrak{P}_2+\mathfrak{P}_3. \end{align*} Using \eqref{qbar}, \begin{align*} |\mathfrak{P}_1| & =\left|\int_0^t\int_{{\mathcal G}amma}h^{\kappa\kappa{\partial\hspace{1pt}}rime{\partial\hspace{1pt}}rime}J^{-2}(t)[q_{,2}(t)-q_{,2}(0)]h_t''dsdy{\rm i}ght|\\ & \leq C\int_0^t|h^{\kappa\kappa}|_{2.5}\|J^{-2}\|_{L^\infty}\|q_{,2}(t)-q_{,2}(0)\|_{1.25,-}|h_t|_{1.5}dy\\ & \leq \delta_2CE(t). \end{align*} The second error term can be bounded in the same way using \eqref{J1.25b}: \begin{align*} |\mathfrak{P}_2| & =\left|\int_0^t\int_{{\mathcal G}amma}h^{\kappa\kappa{\partial\hspace{1pt}}rime{\partial\hspace{1pt}}rime}q_{,2}(0)[J^{-2}(t)-J^{-2}(0)]h_t''dsdy{\rm i}ght|\\ & \leq C\int_0^t|h^{\kappa\kappa}|_{2.5}\|J(t)-J(0)\|_{1.25,-}|h_t|_{1.5}dy\\ & \leq \delta_2CE(t). \end{align*} Finally, $\mathfrak{P}_3=\mathfrak{Q}_1+\mathfrak{Q}_2$ with \begin{align} \mathfrak{Q}_1&=\int_0^t\int_{{\mathcal G}amma}h^{\kappa{\partial\hspace{1pt}}rime{\partial\hspace{1pt}}rime}[\mathcal{J}_\kappa(-q_{,2}(0)J^{-2}(0)h_t'')-[-q_{,2}(0)J^{-2}(0)]\mathcal{J}_\kappa h_t'']dsdy, \label{Qone}\\ \mathfrak{Q}_2&=\int_0^t\int_{{\mathcal G}amma}h^{\kappa{\partial\hspace{1pt}}rime{\partial\hspace{1pt}}rime}[-q_{,2}(0)J^{-2}(0)] h_t^{\kappa{\partial\hspace{1pt}}rime{\partial\hspace{1pt}}rime}dsdy. {\rm n}onumber \end{align} The term $\mathfrak{Q}_1$ can be bounded using Proposition {\rm e}f{commutator}: $$ |\mathfrak{Q}_1|\leq \int_0^t|h^{\kappa\kappa}|_2|q_{,12}(0)J^{-2}(0)+q_{,2}(0)J^{-3}(0)J_{,1}(0)|_{L^{\infty}}|h^{\kappa}_t|_1 dy. $$ The term $|q_{,12}(0)|_{L^{\infty}}$ can be bounded (using standard elliptic estimates) in terms of the initial data as long as the initial data verifies $|\mathcal{J}_{\epsilon}ilon h_0|_{2.5+s}<\infty$, $s>0$. The same situation arises when dealing with $J_{,1}(0)$. Consequently, this term $\mathfrak{Q}_1$ requires ${\epsilon}ilon>0$, and, in this latter case, we have $$ |\mathfrak{Q}_1|\leq\sqrt{t}\mathcal{P}(E(t)).$$ Recalling \eqref{Jmin} and \eqref{lambdaRT}, the term $Q_2$ gives us an energy term $$ C\frac{\lambda}{2}\left[|h^{\kappa{\partial\hspace{1pt}}rime{\partial\hspace{1pt}}rime}|_0^2-|h_0^{\kappa{\epsilon}ilon{\partial\hspace{1pt}}rime{\partial\hspace{1pt}}rime}|_0^2{\rm i}ght]\leq \frac{1}{2}\int_{{\mathcal G}amma}-q_{,2}(0)J^{-2}(0)[(h^{\kappa{\partial\hspace{1pt}}rime{\partial\hspace{1pt}}rime})^2-(h_0^{\kappa{\epsilon}ilon{\partial\hspace{1pt}}rime{\partial\hspace{1pt}}rime})^2]ds \,; $$ hence, \begin{equation}\label{almostdone} \int_0^t\|v(y)\|_{0,-}^2+\|v''(y)\|_{0,-}^2dy+|h^{\kappa}(t)|_2^2\leq \mathcal{M}_0+\sqrt[12]{t}\mathcal{P}(E(t)), \end{equation} where $\mathcal{M}_0$ is a number depending only on the initial data, $h_0$, and the value of the regularizing parameter ${\epsilon}ilon>0$. \subsubsection{The Hodge decomposition elliptic estimates} Since in each phase, $ \operatorname{curl} u=0$, it follows that $ v^2,_j A^j_1 - v^1,_jA^j_2 =0$. Therefore, $$ (A^j_1(t)-A^j_1(0))v^2_{,j}-(A^j_2(t)-A^j_2(0))v^1_{,j} =-A^j_1(0)v^2_{,j}+A^j_2(0)v^1_{,j} \,, $$ so that $$ \|A^j_1(0)v^2_{,j}-A^j_2(0)v^1_{,j}\|_{1,-}\leq C\|A(t)-A(0)\|_{L^\infty}\|v\|_{2,-}+\|A(t)-A(0)\|_{1.5,-}\|v\|_{1.5,-}, $$ and $$ \int_0^t\|A^j_1(0)v^2_{,j}(y)-A^j_2(0)v^1_{,j}(y)\|_{1,-}^2dy\leq\sqrt{t}\mathcal{P}(E(t)). $$ Similarly, since in each phase $v^j,_i A^i_j =0$, $$ [A^i_j(t)-A^i_j(0)]v^j_{,i}=-A^i_j(0)v^j_{,i}, $$ and $$ \int_0^t\|A^i_j(0)v^j_{,i}(y)\|_{1,-}^2dy\leq\sqrt{t}\mathcal{P}(E(t)). $$ Finally, $$ |v_2|_{1.5}\leq |v''\cdot N|_{-0.5}\leq C\|v''\|_{0,-}\leq \mathcal{M}_0+\sqrt[12]{t}\mathcal{P}(E(t)). $$ Applying Proposition {\rm e}f{Hodge2}, we obtain \begin{equation}\label{vH2} \int_0^t\|v(y)\|_{2,-}^2dy\leq \mathcal{M}_0+\sqrt[12]{t}\mathcal{P}(E(t)). \end{equation} \eqref{vH2} together with \eqref{almostdone} and the properties of the mollifiers gives us the bound $$ E(t)\leq \mathcal{M}_0+\sqrt[12]{t}\mathcal{Q}(E(t)), $$ with $E(t)$ being a continuous function. Thus, we infer the existence of $T^*_{\epsilon}ilon$ such that $$ E(t)\leq 2\mathcal{M}_0\,\,\forall 0\leq t\leq T^*_{\epsilon}ilon. $$ Notice that $T^*_{\epsilon}ilon$ depends only on ${\epsilon}ilon$ and $h_0$. \subsubsection{Passing to the limit and uniqueness} Once the uniform bounds are obtained, we can pass to the limit $\kappa{\rm i}ghtarrow0$ in the standard way using Rellich-Kondrachov theorem. \subsection{${\epsilon}ilon$-independent estimates} In the above analysis, only the integral $Q_1$ in ({\rm e}f{Qone}) depends on our smoothing parameter ${\epsilon}ilon>0$; nevertheless, upon passing to the limit $\kappa{\rm i}ghtarrow0$, the integral $Q_1$ no longer appears. The main point is that the regularizing effect due to ${\epsilon}ilon>0$ was only necessary because of $\kappa>0$. As $\kappa=0$, we can now close the estimates and tend ${\epsilon}ilon$ to zero. After taking the limit in $\kappa$, we have a solution to the following system \begin{align*} v_{{\epsilon}ilon}^i+ (A_{{\epsilon}ilon})^k_i(q_{{\epsilon}ilon}+{\partial\hspace{1pt}}si_{{\epsilon}ilon}^2)_{,k}&= 0 \qquad&&\text{in}\quad{\mathcal O}mega\times[0,T_{{\epsilon}ilon}]\,,\\ (A_{{\epsilon}ilon})^i_j(v_{{\epsilon}ilon})^j_{,i} &= 0 &&\text{in}\quad{\mathcal O}mega\times[0,T_{{\epsilon}ilon}]\,,\\ h_{{\epsilon}ilon}(t)&=\mathcal{J}_{\epsilon}ilon h_0+\int_0^t v_{{\epsilon}ilon}^i \tilde{n}_i ds\qquad&&\text{on}\quad{\mathcal G}amma\times[0,T_{{\epsilon}ilon}]\,,\\ q_{{\epsilon}ilon}&=0 &&\text{on}\quad{\mathcal G}amma\times[0,T_{{\epsilon}ilon}]\,,\\ v_{{\epsilon}ilon}\cdot e_2&=0 &&\text{on}\quad{\mathcal G}amma_{bot}\times[0,T_{{\epsilon}ilon}]\,,\\ {\partial\hspace{1pt}}si_{{\epsilon}ilon}&={\partial\hspace{1pt}}hi^{{\epsilon}ilon}_2\circ{\partial\hspace{1pt}}hi^{{\epsilon}ilon}_1&&\text{in}\quad{\mathcal O}mega\times\{t=0\}\,,\\ {\mathcal D}elta {\partial\hspace{1pt}}si_{{\epsilon}ilon}(t)&={\mathcal D}elta {\partial\hspace{1pt}}si_{{\epsilon}ilon}(0)&&\text{in}\quad{\mathcal O}mega\times[0,T_{{\epsilon}ilon}]\,,\\ {\partial\hspace{1pt}}si_{{\epsilon}ilon}(t)&=e+h_{\epsilon}ilon(t)N&&\text{on}\quad{\mathcal G}amma\times[0,T_{{\epsilon}ilon}]\,,\\ {\partial\hspace{1pt}}si_{{\epsilon}ilon}(t)&=e&&\text{on}\quad{\mathcal G}amma_{bot}\times[0,T_{{\epsilon}ilon}] \,, \end{align*} and ${\partial\hspace{1pt}}hi^{{\epsilon}ilon}_2$ and ${\partial\hspace{1pt}}hi^{{\epsilon}ilon}_2$ are given by \begin{equation*} {\partial\hspace{1pt}}hi^{{\epsilon}ilon}_1(x_1,x_2)=\left(x_1,x_2+\mathcal{J}_\delta\mathcal{J}_{\epsilon}ilon h_0(x_1)\left(1-\frac{x_2}{c_b}{\rm i}ght){\rm i}ght), \end{equation*} and \begin{align*} {\mathcal D}elta {\partial\hspace{1pt}}hi^{{\epsilon}ilon}_2&=0\qquad &&\text{on}\quad {\mathcal O}mega^{\delta,{\epsilon}ilon}(0)\times[0,T_{{\epsilon}ilon}]\,,\\ {\partial\hspace{1pt}}hi^{{\epsilon}ilon}_2(t)&= e+[\mathcal{J}_{\epsilon}ilon h_0(x_1)-\mathcal{J}_\delta\mathcal{J}_{\epsilon}ilon h_0(x_1)]e_2 \qquad &&\text{on}\quad {\mathcal G}amma\times[0,T_{{\epsilon}ilon}],\,,\\ {\partial\hspace{1pt}}hi^{{\epsilon}ilon}_2 &= e &&\text{on}\quad {\mathcal G}amma_{bot}\times[0,T_{{\epsilon}ilon}] \,. \end{align*} Now we define the energy $$ E(t)=\max_{0\leq s\leq t}|h(s)|_{2}+\int_0^t\|v(s)\|_{2,-}^2ds. $$ We repeat the energy estimates. The only modification affects the term $\mathfrak{O}_1$, that now reads \begin{align*} \mathfrak{O}_1&=\int_0^t\int_{{\mathcal G}amma}({\partial\hspace{1pt}}si''\cdot [(v+e_2)\cdot n] n)h_t''J^{-1}dsdy\\ &=\int_0^t\int_{{\mathcal G}amma}({\partial\hspace{1pt}}si''\cdot [-A^2_iq_{,2} (\sqrt{g})^{-1}A^2_i] n)h_t''dsdy\\ &=\int_0^t\int_{{\mathcal G}amma}{\partial\hspace{1pt}}si''\cdot [-q_{,2} ] \tilde{n}h_t'' J^{-2}dsdy\\ &=\int_0^t\int_{{\mathcal G}amma}h''[-q_{,2}]h_t''J^{-2}dsdy\\ &=\int_0^t\int_{{\mathcal G}amma}h''\left[\frac{-q_{,2}(t)}{J^{-2}(t)}+\frac{q_{,2}(0)}{{J^{-2}(0)}}-\frac{q_{,2}(0)}{J^{-2}(0)}{\rm i}ght]h_t''dsdy\\ &=\mathfrak{P}_1+\mathfrak{P}_2+\mathfrak{P}_3. \end{align*} These terms can be bounded in a straightforward way. We get the polynomial estimate $$ E(t)\leq \mathcal{M}_0+\sqrt[12]{t}\mathcal{Q}(E(t)), $$ and the existence of $T^*$ such that $$ E(t)\leq 2\mathcal{M}_0\,\,\forall 0\leq t\leq T^*. $$ This $T^*$ only depends on the initial data $h_0$. Now, we can pass to the limit ${\epsilon}ilon{\rm i}ghtarrow0$ using Rellich-Kondrachov theorem. The uniqueness is obtained using the energy method as in Section {\rm e}f{sec4}. This concludes with the proof of Theorem {\rm e}f{localonephase}. \section{Proof of Theorem {\rm e}f{Cinftyonephase}: Instantaneous parabolic smoothing}\label{sec6} The proof of this result is a two-step procedure. First, we show that we always can gain an extra half derivative almost everywhere in time. The second step of the argument is a classical bootstrapping procedure. \subsection{Two-phase Muskat problem} We begin with the two-phase case, and consider initial data $h_{\delta0}\in H^3$ for the infinitely-deep Muskat problem ({\rm e}f{HS_Eulerian}a-e)) or the confined Muskat problem ({\rm e}f{HS_Eulerian}a-d,e',f) satisfying the smallness criterion \eqref{cgs1} in Theorem {\rm e}f{localsmall}. We define the higher-order energy function \begin{equation}\label{energyH3} E(t)=\max_{0\leq s\leq t}\{|h(s)|_3^2\}+\int_0^t\|w(s)\|_{3,{\partial\hspace{1pt}}m}^2ds. \end{equation} Repeating our energy estimates using three tangential derivatives rather than two, we obtain the polynomial inequality $$ E(t)\leq C|h_{\delta0}|_3^2+\sqrt{t}\mathcal{P}(E(t)). $$ As a consequence, there exists a time $T^*$ such that we have the bound $$ \max_{0\leq s\leq T^*}\{|h(s)|_3^2\}+\int_0^t|h(s)|_{3.5}^2ds\leq C|h_{\delta0}|_3^2. $$ Interpolating with the bound obtained in Theorem {\rm e}f{localsmall}, we have that \begin{equation}\label{cinfty2} \max_{0\leq s\leq T^*}\{|h(s)|_{2.5}^2\}+\int_0^t|h(s)|_{3}^2ds\leq C|h_{\delta0}|_{2.5}^2. \end{equation} Now, given $h_0\in H^2$ satisfying the smallness condition \eqref{cgs1}, due to Theorem {\rm e}f{localsmall}, we have a solution $h\in C([0,T^*],H^2)\cap L^2(0,T^*;H^{2.5}({\mathcal G}amma))$. In particular, we can choose $0<\delta\leq T^*$ arbitrarily small so that $h(\delta)=h_{\delta0}\in H^{2.5}({\mathcal G}amma)$ and verifies the smallness criterion \eqref{cgs1}. We are going to use $h_{\delta0}$ as the new initial data for the problem. Applying \eqref{cinfty2}, we have thus that the initial data $h_{\delta0}$ provides us with a solution $$ h_\delta\in C([\delta,T^*]H^{2.5}({\mathcal G}amma))\cap L^2(\delta ,T^*;H^{3}({\mathcal G}amma)). $$ Due to the uniqueness of solution proved in Theorem {\rm e}f{localsmall}, we conclude that the original initial data $h_0$ gives us a solution $$ h\in C([0,T^*],H^2({\mathcal G}amma))\cap C([\delta,T^*],H^{2.5}({\mathcal G}amma))\cap L^2(\delta,T^*; H^{3}({\mathcal G}amma)) $$ for an arbitrarily small $\delta>0$. Now we proceed by bootstrapping. We can repeat the argument and show that for every positive time, we have that the unique solution in Theorem {\rm e}f{localsmall} is $$ h(\cdot,t)\in C^\infty({\mathcal G}amma)\,\,\text{ if }\delta\leq t\leq T^*,\,\,\forall\delta>0. $$ \subsection{One-phase Muskat problem} For the one-phase Muskat problem ({\rm e}f{HS_Eulerian_Onephase}a-e), we consider $h_{\delta0}\in H^3$ satisfying the Rayleigh-Taylor stability condition \eqref{RTstable}. Once again redoing the energy estimates with three tangential derivatives, there exists a time $T^*$, and the bound $$ \max_{0\leq s\leq T^*}\{|h(s)|_3^2\}+\int_0^t|h(s)|_{3.5}^2ds\leq C|h_{\delta0}|_3^2. $$ Interpolating with the bound obtained in Theorem {\rm e}f{localonephase}, we obtain the bound \eqref{cinfty2}. Now, given $h_0\in H^2({\mathcal G}amma)$ satisfying the Rayleigh-Taylor stability condition \eqref{RTstable}, due to Theorem {\rm e}f{localonephase}, we have a solution $h\in C([0,T^*],H^2({\mathcal G}amma))\cap C([\delta,T^*],H^{2.5}({\mathcal G}amma))\cap L^2(\delta,T^*; H^{3}({\mathcal G}amma))$. By bootstrapping, we see that $h(\cdot , t)\in C^\infty({\mathcal G}amma)$ if $t\geq\delta>0$. \section{Auxiliary results} \subsection{The $H^{d/2}$-norm of products} We need the following \begin{proposition}\label{H0.5_fg} For all $\delta > 0$, there exists $C_\delta > 0$ such that \begin{equation*} |fg|_{0.5} \le C_\delta |f|_{0.5+\delta} |g|_{0.5}\,. \end{equation*} and, in two dimensions, \begin{equation*} \|fg\|_{1,{\partial\hspace{1pt}}m} \le C_\delta \|f\|_{1+\delta,{\partial\hspace{1pt}}m} \|g\|_{1,{\partial\hspace{1pt}}m}\,. \end{equation*} \end{proposition} \begin{proof} The $L^2$ part can be bounded as follows: \begin{equation}\label{app1} |fg|^2_0\leq \|f\|_{L^\infty({\mathbb R})}^2|g|_0^2\leq C_\delta|f|_{0.5+\delta}^2|g|_{0.5}^2, \end{equation} where we have used the Sobolev embedding $$ H^{0.5+\delta}({\mathbb R})\hookrightarrow L^\infty({\mathbb R}). $$ The seminorm term can be bounded using Kato-Ponce inequality for ${\mathcal L}ambda=\sqrt{-{\partial\hspace{1pt}}artial_x^2}$ $$ |{\mathcal L}ambda^{0.5}(fg)|_0\leq C_\delta\left(\|g\|_{L^{\frac{1}{\delta}}({\mathbb R})}\|{\mathcal L}ambda^{0.5} f\|_{L^{\frac{2}{1-2\delta}}({\mathbb R})}+\|f\|_{L^{\infty}({\mathbb R})}\|{\mathcal L}ambda^{0.5} g\|_{L^{2}({\mathbb R})}{\rm i}ght). $$ The Sobolev embeddings $$ H^{\delta}({\mathbb R})\hookrightarrow L^q({\mathbb R}),\,q\in\left[2,\frac{2}{1-2\delta}{\rm i}ght],\,H^{0.5}({\mathbb R})\hookrightarrow L^q({\mathbb R}),\,q\in\left.\left[2,\infty{\rm i}ght.{\rm i}ght), $$ give us \begin{equation}\label{app2} |{\mathcal L}ambda^{0.5}(fg)|_0\leq C_\delta\|g\|_{0.5}\|f\|_{0.5+\delta}. \end{equation} Collecting the estimates \eqref{app1} and \eqref{app2}, we conclude the first statement. With the same ideas and the embedding $$ H^{\delta}({\mathbb R}^2)\hookrightarrow L^q({\mathbb R}^2),\,q\in\left[2,\frac{2}{1-\delta}{\rm i}ght],\,H^{1}({\mathbb R}^2)\hookrightarrow L^q({\mathbb R}^2),\,q\in\left.\left[2,\infty{\rm i}ght.{\rm i}ght), $$ we conclude the result. \end{proof} \subsection{The Hodge decomposition elliptic estimates} \begin{proposition}\label{Hodge}Let ${\mathcal O}mega$ be a domain with boundary ${\partial\hspace{1pt}}artial{\mathcal O}mega$ of Sobolev class $H^{k+0.5}$. Then for $v \in H^k({\mathcal O}mega) $, \begin{equation*} \|v\|_{H^k({\mathcal O}mega)} \le C {\mathcal B}ig[\|v\|_{L^2({\mathcal O}mega)} + \|{\operatorname{curl}} v\|_{H^{k-1}({\mathcal O}mega)} + \|{\operatorname{div}} v\|_{H^{k-1}({\mathcal O}mega)} + \|v\cdot N\|_{H^{k-0.5}(\bdy{\mathcal O}mega)}{\mathcal B}ig]\,, \end{equation*} where $N$ denotes the outward unit normal to $\bdy {\mathcal O}mega$. \end{proposition} \begin{proposition}\label{Hodge2}Let ${\mathcal O}mega$ be a domain with boundary ${\partial\hspace{1pt}}artial{\mathcal O}mega$ of Sobolev class $H^{k+0.5}$. Let ${\partial\hspace{1pt}}si_0$ be a given smooth mapping and define $$ {\operatorname{curl}}_{{\partial\hspace{1pt}}si_0} v={\operatorname{curl}}(v\circ{\partial\hspace{1pt}}si_0)=(A_0)^j_1(v\circ{\partial\hspace{1pt}}si_0)^2_{,j}-(A_0)^j_2(v\circ{\partial\hspace{1pt}}si_0)^1_{,j}, $$ $$ {\operatorname{div}}_{{\partial\hspace{1pt}}si_0} v={\operatorname{div}}(v\circ{\partial\hspace{1pt}}si_0)=(A_0)^i_j(v\circ{\partial\hspace{1pt}}si_0)^j_{,i}, $$ where $A_0=({\rm n}abla{\partial\hspace{1pt}}si_0)^{-1}$. Then for $v \in H^k({\mathcal O}mega) $, \begin{equation*} \|v\|_{H^k({\mathcal O}mega)} \le C {\mathcal B}ig[\|v\|_{L^2({\mathcal O}mega)} + \|{\operatorname{curl}}_{{\partial\hspace{1pt}}si_0} v\|_{H^{k-1}({\mathcal O}mega)} + \|{\operatorname{div}}_{{\partial\hspace{1pt}}si_0} v\|_{H^{k-1}({\mathcal O}mega)} + \|v\cdot N\|_{H^{k-0.5}(\bdy{\mathcal O}mega)}{\mathcal B}ig]\,, \end{equation*} where $N$ denotes the outward unit normal to $\bdy {\mathcal O}mega$. \end{proposition} The proof of Propositions {\rm e}f{Hodge} and {\rm e}f{Hodge2} are given in Cheng \& Shkoller \cite{ChSh2014}. \begin{proposition}\label{normaltrace} Suppose that $v'\in L^2({\mathcal O}mega)$ with $\text{div}v\in L^2({\mathcal O}mega)$. Then $v'\cdot N \in H^ {-\frac{1}{2}} (\bdy{\mathcal O}mega)$ and \begin{equation*} \|v' \cdot N\|_{H^{-1/2}(\bdy{\mathcal O}mega)}\leq C\left(\|v' \|_{L^2({\mathcal O}mega)}+\| \operatorname{div} v \|_ { L^2({\mathcal O}mega) } {\rm i}ght). \end{equation*} \end{proposition} \subsection{A commutator estimate} The following is Lemma 5.1 in Coutand \& Shkoller \cite{CoSh2010}: \begin{proposition}\label{commutator}Let ${\mathcal O}mega$ be a domain and assume that its boundary, ${\partial\hspace{1pt}}artial{\mathcal O}mega$, is smooth. Then \begin{equation*} |\mathcal{J}_\kappa(fg')-f\mathcal{J}_\kappa g'|_0\leq C\|f\|_{W^{1,\infty}}|g|_0. \end{equation*} \end{proposition} \subsection{An elliptic estimate} Let's consider $$ {\mathcal O}mega={\mathbb T}\times[-1,0], $$ and the elliptic problem \begin{subequations}\label{elliptic} \begin{alignat}{2} - \operatorname{div} (A {\rm n}abla u)&= f \qquad&&\text{in}\quad{\mathcal O}mega\,,\\ u &= 0 &&\text{on}\quad {\partial\hspace{1pt}}artial{\mathcal O}mega\,. \end{alignat} \end{subequations} Then, we have the following elliptic estimate \begin{lemma}\label{lemaa6} Suppose that the matrix $A\in H^{1.5}({\mathcal O}mega)$ with $A > 0$, and that $f\in H^{0.5}({\mathcal O}mega)$. Then the solution to ({\rm e}f{elliptic}a-b) verifies $$ \|{\mathcal L}ambda^{1.25}{\rm n}abla u\|_{L^2({\mathcal O}mega) }\leq C\left(\|{\mathcal L}ambda^{0.25}f\|_{L^2({\mathcal O}mega)}+\|{\mathcal L}ambda^{1.25}A\|_{L^2({\mathcal O}mega)}\|{\rm n}abla u\|_{L^\infty({\mathcal O}mega)}{\rm i}ght.\left. +\|{\mathcal L}ambda^{0.5}{\rm n}abla u\|_{L^2({\mathcal O}mega)}\|{\mathcal L}ambda^{0.25}{\rm n}abla A\|_{L^2({\mathcal O}mega)}{\rm i}ght) \,, $$ and $$ \|{\mathcal L}ambda^{1.5}{\rm n}abla u\|_{L^2({\mathcal O}mega) }\leq C\left(\|{\mathcal L}ambda^{0.5}f\|_{L^2({\mathcal O}mega)}+\|{\mathcal L}ambda^{1.5}A\|_{L^2({\mathcal O}mega)}\|{\rm n}abla u\|_{L^\infty({\mathcal O}mega)}{\rm i}ght.\left. +\|{\mathcal L}ambda^{0.75}{\rm n}abla u\|_{L^2({\mathcal O}mega)}\|{\mathcal L}ambda^{0.25}{\rm n}abla A\|_{L^2({\mathcal O}mega)}{\rm i}ght) \,. $$ \end{lemma} \begin{proof} We proof only the first estimate, being the second one straightforward. We consider the approximate problem \begin{subequations}\label{ellipticreg} \begin{alignat}{2} - (\tilde{A}^i_j \tilde{u}_{,j})_{,i}&= f \qquad&&\text{in}\quad{\mathcal O}mega\,,\\ \tilde{u} &= 0 &&\text{on}\quad {\partial\hspace{1pt}}artial{\mathcal O}mega\,. \end{alignat} \end{subequations} where $\tilde{A}$ is a $C^\infty$ regularization of $A$. For a given ${\partial\hspace{1pt}}hi\in H^1({\mathcal O}mega),$ we consider the weak formulation of the problem ({\rm e}f{ellipticreg}a-b): $$ \int_{\mathcal O}mega \tilde{A}^i_j \tilde{u}_{,j}{\partial\hspace{1pt}}hi_{,i}dx=\int_{\mathcal O}mega f {\partial\hspace{1pt}}hi dx. $$ These problems have solutions $\tilde{u}$ which are smooth. We focus on high norm uniform estimate. To do that, we pick ${\partial\hspace{1pt}}hi= {\mathcal L}ambda^3 \tilde{u}$, where $\widehat{{\mathcal L}ambda u} =|k|\hat{u}(k)$. Then, using the self-adjointness of the ${\mathcal L}ambda$ operator, the weak formulation reads $$ \int_{\mathcal O}mega {\mathcal L}ambda^{1.5}\left(\tilde{A}^i_j \tilde{u}_{,j}{\rm i}ght){\mathcal L}ambda^{1.5}\tilde{u}_{,i}dx=\int_{\mathcal O}mega {\mathcal L}ambda^{0.5}f {\mathcal L}ambda^{2.5}\tilde{u} dx. $$ We write \begin{eqnarray*} I&=&\int_{\mathcal O}mega {\mathcal L}ambda^{1.5}\left(\tilde{A}^i_j \tilde{u}_{,j}{\rm i}ght){\mathcal L}ambda^{1.5}\tilde{u}_{,i}dx\\ &=&\int_{\mathcal O}mega [{\mathcal L}ambda^{1.5},\tilde{A}^i_j]\tilde{u}_{,j}{\mathcal L}ambda^{1.5}\tilde{u}_{,i}dx+\int_{\mathcal O}mega \tilde{A}^i_j{\mathcal L}ambda^{1.5}\tilde{u}_{,j}{\mathcal L}ambda^{1.5}\tilde{u}_{,i}dx. \end{eqnarray*} Notice that the first term can be estimated by layers (\emph{i.e.} fixing $x_2\in[-1,0]$) using the Kenig-Ponce-Vega estimate (see \cite{kato1988commutator} and \cite{KenigPonceVega}) along the $x_1$ coordinate: \begin{eqnarray*} \|[{\mathcal L}ambda^{1.5},\tilde{A}^i_j]\tilde{u}_{,j}\|_{L^2({\mathbb T})}&\leq& C\left(\|{\mathcal L}ambda^{1.5}\tilde{A}^i_j\|_{L^2({\mathbb T})}\|{\rm n}abla\tilde{u}\|_{L^\infty({\mathbb T})}+\|{\mathcal L}ambda^{0.5}{\rm n}abla\tilde{u}\|_{L^4({\mathbb T})}\|{\rm n}abla A\|_{L^{4}({\mathbb T})}{\rm i}ght)\\ &\leq& C\left(\|{\mathcal L}ambda^{1.5}\tilde{A}^i_j\|_{L^2({\mathbb T})}\|{\rm n}abla\tilde{u}\|_{L^\infty({\mathbb T})}+\|{\mathcal L}ambda^{0.75}{\rm n}abla\tilde{u}\|_{L^2({\mathbb T})}\|{\mathcal L}ambda^{0.25}{\rm n}abla \tilde{A}\|_{L^{2}({\mathbb T})}{\rm i}ght) \end{eqnarray*} Using Tonelli's theorem, together with $\|\cdot\|_{L^2({\mathcal O}mega)}^2=\int_{-1}^0\|\cdot\|^2_{L^2({\mathbb T})}dx_2$, we have \begin{eqnarray*} \|[{\mathcal L}ambda^{1.5},\tilde{A}^i_j]\tilde{u}_{,j}\|_{L^2({\mathcal O}mega)}&\leq& C\left(\|{\mathcal L}ambda^{1.5}\tilde{A}^i_j\|_{L^2({\mathcal O}mega)}\|{\rm n}abla\tilde{u}\|_{L^\infty({\mathcal O}mega)}+\|{\mathcal L}ambda^{0.75}{\rm n}abla\tilde{u}\|_{L^2({\mathcal O}mega)}\|{\mathcal L}ambda^{0.25}{\rm n}abla \tilde{A}\|_{L^2({\mathcal O}mega)}{\rm i}ght). \end{eqnarray*} The second integral provides us with the estimate $$ \|{\mathcal L}ambda^{1.5}{\rm n}abla \tilde{u}\|_{L^2}\leq C\left(\|{\mathcal L}ambda^{0.5}f\|_{L^2({\mathcal O}mega)}+\|{\mathcal L}ambda^{1.5}\tilde{A}^i_j\|_{L^2({\mathcal O}mega)}\|{\rm n}abla\tilde{u}\|_{L^\infty({\mathcal O}mega)}{\rm i}ght.\left. +\|{\mathcal L}ambda^{0.75}{\rm n}abla\tilde{u}\|_{L^2({\mathcal O}mega)}\|{\mathcal L}ambda^{0.25}{\rm n}abla \tilde{A}\|_{L^2({\mathcal O}mega)}{\rm i}ght). $$ Passing to the limit $\tilde{A}{\rm i}ghtarrow A$, we conclude the desired uniform estimate for $\tilde{u}$. \end{proof} \end{document}
\begin{document} \pagenumbering{arabic} \addtocounter{secnumdepth}{1} \title{The divided cell algorithm and the inhomogeneous Lagrange and Markoff spectra } \begin{abstract} The divided cell algorithm was introduced by Delone in 1947 to calculate the inhomogeneous minima of binary quadratic forms and developed further by E. S. Barnes and H. P. F. Swinnerton-Dyer in the 1950s. We show how advances of the past fifty years in both symbolic computation and our understanding of homogeneous spectra can be combined to make divided cells more useful for organizing information about inhomogeneous approximation problems. A crucial part of our analysis relies on work of Jane Pitman, who related the divided cell algorithm to the regular continued fraction algorithm. In particular, the relation to continued fractions allows two divided cells for the same problem to be compared without stepping through the chain of divided cells connecting them. \end{abstract} \section{Preliminaries} Notational conventions and a basic framework for working with approximation problems are collected here for the convenience of the reader. Diophantine Approximation problems deal with finding where the restriction of a function to a special subset is small. In this paper, the function will be defined on the plane $\Reals^2$ and the subset will be the integer lattice $\Integers^2$. The function will take nonnegative values, so ``small'' will mean ``close to zero''. It is customary to work with the reciprocal of the original function, and to freely treat $\infty=1/0$ as a number, since it will be a possible value of the supremum of a set of nonnegative numbers. By working with the name denoting a function instead of the traditional convention of denoting a sequence with subscripts, we are allowed the uncluttered notation $\limsup f$ to denote the infimum over all cofinite subsets $S$ of $\Naturals$ of the supremum of the values of $f$ restricted to $S$. The computation of the infimum of a function can often be organized by endowing the domain with a \emph{partial order} for which the given function is order preserving. If the partial order has the property that descending sequences are finite, then the infimum of the values over the whole set is equal to the infimum over the set of \emph{minimal points} for the partial order. This is valuable when the set of minimal points has special properties. In particular, it is usually possible to index the minimal points by the set of all integers $\Integers$ so that a pair of adjacent minimal points has some special property. Such a function defined on set of all integers $\Integers$ will be called a \emph{chain}. The integer lattice in $\Reals^2$ is identified with $\Integers^2$ by giving it a basis. Certain bases aid in the identification of the minimal points. These depend on the expression and have been called \emph{reduced}. Dually, the expression giving the function in terms of a reduced basis has also been called reduced. Families of related problems lead to a space of reduced bases, and the study of all reduced bases for a single problem can be expressed in terms of a dynamical system on this space. This study will lead to strong results when the underlying space is \emph{compact}. Our emphasis here will be \emph{visual} with pictures of the plane $\Reals^2$ including the lattice $\Integers^2$. However, while a basis for the lattice is used in the algebraic description of the objects in the figure, other considerations may be used in the choice of \emph{viewing coordinates}. \section{The Markoff Spectrum} Traditionally, as in \cite{cusickandflahive89}, the homogeneous \emph{Markoff Spectrum} is the set of values \begin{equation} M(F)=\sup\SET{\frac{\sqrt{D(F)}}{\absval{F(x,y)}}} {x,y\in\Integers, (x,y)\neq(0,0)} \label{eq:defM} \end{equation} where $F(x,y)=Ax^2+Bxy+Cy^2$ is an \emph{indefinite} binary quadratic form of discriminant $D(F)=B^2-4AC$. The quantity $M(F)$ is given as a \emph{normalized inverted minimum}: \emph{normalized} to allow natural comparison between values of different forms $F$; \emph{inverted} to allow simpler expressions for interesting values in the spectrum. Those $F$ with $F(x,y)=0$ for integers $x$ and $y$ (not both zero), as well as those taking arbitrarily small values, have $M(F)=\infty$. The interesting cases are those for which $M(F)$ is finite. Since $F$ is indefinite, it can be factored over $\Reals$. We write \begin{equation} F(x,y)=(a_0x+b_0y)(a_1x+b_1y), \label{eq:factform} \end{equation} and introduce new variables $\xi=a_0x+b_0y$, $\eta=a_1x+b_1y$ to get $F=\xi\eta$. Then, $\sqrt{D(F)}=\absval{a_0b_1-a_1b_0}$. In particular, the expression $F(x,y)$ is encoded by the matrix \begin{equation} A=\mattwoc{a_0&b_0\\a_1&b_1\\}. \label{eq:hommat} \end{equation} Left multiplication by this matrix takes the column with components $(x,y)$ to one with components $(\xi,\eta)$. Thus, it gives a change of variables between the arithmetic and geometric aspects of the study of the values of $F$ on the integer lattice. The rows of the matrix are the coefficients in the factors of $F(x,y)$. As a change-of-variables matrix, its columns give the $(\xi,\eta)$ coordinates of the generators of the lattice. A change of basis in the lattice multiplies the matrix in (\ref{eq:hommat}) on the right by an integer matrix of determinant $\pm1$; scaling the factors of $F(x,y)$ multiplies on the left by a real diagonal matrix. The value of $M(F)$ is not changed by these actions. A \emph{visual} approach to the Markoff Spectrum must show the integer lattice and the lines $a_ix+b_iy=0\ (i=0,1)$. However, it is more convenient to use $(\xi,\eta)$ as \emph{viewing coordinates} since a \emph{fixed} $F$ will be studied using \emph{different bases} for the integer lattice. In practice, this may be modified by a change of scale $(\xi,\eta)\to(a\xi,\eta/a)$ in order to bring different lattice points into focus. Because of this choice of viewing coordinates, the lines where $F(x,y)=0$ will be called the \emph{axes} of $F$. For example, a picture of $F(x,y)=x^2-3y^2$ on the integer lattice uses viewing coordinates $(\xi,\eta)$ with $\xi=x+y\sqrt3,\eta=x-y\sqrt3$. Figure~\ref{fig:hom} shows this view of $F=0$ (now just the coordinate axes), the lattice generated by $(x,y)=(1,0)$ and $(x,y)=(0,1)$, and the lattice cell whose $(x,y)$ coordinates are $(0,0),(1,0),(1,1),(0,1)$. \begin{figure} \caption{ \label{fig:hom} \label{fig:hom} \end{figure} In computing $M(F)$, if a lattice point $P_0$ is closer to both axes than the lattice point $P_1$ is, then $\absval{F}$ is smaller at $P_0$ than at $P_1$. This relation between $P_0$ and $P_1$ is a partial order of the type mentioned in the Preliminaries. Thus, only the \emph{minimal points} for this partial order need be considered when finding $M(F)$. Arranging the minimal points in order of their distance to a specified axis of $F$ gives a \emph{chain} of minimal points (except when an axis contains a nonzero lattice point). Since these results are well known, and have been given in detail in \cite{bumby1991}, features of this chain are only sketched here. The corresponding results for inhomogeneous problems will be described later in more detail. Figure~\ref{fig:hom} shows that $(x,y)=(1,0)$ and $(x,y)=(1,1)$ are minimal points, but $(x,y)=(0,1)$ is not since $(1,0)$ is closer to both axes. A full description shows that two successive minimal points always generate the lattice. These are the \emph{reduced bases} of the lattice. In a precise definition of a reduced basis, it is convenient to fix the order of the axes, the order of the generators of the lattice, and to choose between a generating vector and its negative. With one set of choices, if $x$ and $y$ are the coordinates with respect to a reduced basis, one has $a_0\geq a_1\geq0$ and $b_1\geq -b_0\geq0$ in (\ref{eq:factform}) and (\ref{eq:hommat}). Since the matrix (\ref{eq:hommat}) determines the reduced basis, it is appropriate to also speak of a \emph{reduced matrix} when these conditions hold. The inhomogeneous case will also require matrices with $b_0\geq0\geq a_0$, but these are avoided in the tradition treatment of the homogeneous case. For \emph{three} consecutive minimal points, a matrix whose columns are the basis consisting of the second and third points is the product of the corresponding matrix for the first and second points with the matrix \begin{equation} \mattwor{0&1\\1&-a\\} \label{eq:stepmat} \end{equation} with $a=\left\lfloor{a_0/b_0}\right\rfloor$. To restore orientation and obtain the required signs of the matrix element, this must be multiplied on the left by a diagonal matrix whose first diagonal entry is positive and whose second entry is negative. In the example, when the matrix for the reduced basis $(1,1),(1,0)$ multiplied by the matrix in (\ref{eq:stepmat}) and rescaled, leads to the equation \begin{equation} \mattwoc{1+\sqrt3&1\\1-\sqrt3&1\\}\mattwor{0&1\\1&-2\\}= \mattwoc{-1+\sqrt3&0\\0&-1-\sqrt3\\} \mattwoc{\frac{1+\sqrt3}{2}&1\\\frac{1-\sqrt3}{2}&1\\}. \label{eq:exstep} \end{equation} The space of all reduced matrices with fixed determinant forms a compact set. Note that compactness requires that all inequalities be \emph{inclusive}. Classical work often aimed for unique representations and required some inequalities to be strict, but sacrificing uniqueness to have a compact space of reduced matrices allows the Spectrum to be characterized in terms of \emph{attained} extrema. The chain of the matrices given by (\ref{eq:stepmat}) is one description of the steps in the continued fraction algorithm. It produces a \emph{symbolic dynamics} that is useful for describing the relation between the reduced bases of a given form and the computation of $M(F)$. In particular, a consistent choice of a vector from each reduced basis leads to a chain of minimal points $(x_n,y_n)$, and $M(F)=\sup M_n(F)$ where \begin{equation} M_n(F)=\frac{\sqrt{D(F)}}{\absval{F(x_n,y_n)}}. \label{eq:local} \end{equation} Each index $n$ should be associated with both the minimal point $(x_n,y_n)$ and the reduced basis with this point as first element. A sequence of indices can be found for which $M_n(F)\to M(F)$ and also the corresponding reductions converge (see Lemma~6 of Chapter~1 of \cite{cusickandflahive89}). This shows that every value in the Markoff Spectrum is an \emph{attained} supremum. This result is known as the \emph{Compactness Theorem} for the Markoff Spectrum. A novel variation on this approach, allowing generalization to higher dimensions, can be found in \cite{sensual}. The \emph{Divided Cell Algorithm} transfers these properties of the continued fraction to inhomogeneous problems. \section{The Lagrange Spectrum} If the form $F(x,y)$ in (\ref{eq:factform}) is $x(y-x\alpha)$, then $F(0,1)=0$, giving $M(F)=\infty$. However, if $\alpha$ is irrational, no other \emph{minimal points} $(x,y)$ have $F(x,y)=0$. If $0<\alpha<1$, we set $(x_{-1},y_{-1})=(1,0)$ and $(x_0,y_0)=(0,1)$, giving a reduced basis; and then index the other minimal points by positive integers. Properties of rational approximations to $\alpha$ are determined by $L(\alpha)=\limsup M_n(F)$ for $n\in\Naturals$. The set of such values is called the \emph{Lagrange Spectrum}. Theorem~1 of chapter~3 of \cite{cusickandflahive89} says that the Lagrange Spectrum is a subset of the Markoff Spectrum. This follows from the proof of the Compactness Theorem for the Markoff Spectrum. When $L(\alpha)$ is finite, the forms appearing in the construction are all equivalent to $F$, but the limiting form $F^*$ is nonzero on all lattice points other than the origin and $L(\alpha)=M(F^*)$. In this paper, we will concentrate on the inhomogeneous Markoff Spectrum, but applications to the inhomogeneous Lagrange Spectrum will follow by constructing a convergent sequence of reductions of the inhomogeneous expression $x(y-x\alpha-\beta)$. \section{The inhomogeneous Markoff Spectrum} For \emph{inhomogeneous} problems, the form $F$ defined in (\ref{eq:factform}) is replaced by \begin{equation} F_I(x,y)=(a_0x+b_0y+c_0)(a_1x+b_1y+c_1), \label{eq:ifactform} \end{equation} while we continue to require $(x,y)\in\Integers^2$. Figures illustrating such problems will continue to be drawn in \emph{viewing coordinates} for which $F_I(x,y)=0$ on the axes of the coordinate system. The origin of this coordinate system is no longer required to be a lattice point. For example, Figure~\ref{fig:inhom} modifies the example of Figure~\ref{fig:hom} by using factors $\xi_I=x+y\sqrt3-1-0.5\sqrt3$ and $\eta_I=x-y\sqrt3-1+0.5\sqrt3$ to study the expression $F_I(x,y)=\xi_I\eta_I$. Note that the origin is now at $(x,y)=(1,0.5)$. The parallelogram in the figure has vertices whose $(x,y)$ coordinates are $(0,0),(1,1),(2,1)$, and $(0,1)$ with edges that form a reduced basis. Earlier work has required that $(c_0,c_1)$ not be in the lattice generated by $(a_0,a_1)$ and $(b_0,b_1)$ to explicitly exclude the homogeneous case. We propose to allow this case to \emph{exclude itself} because it necessarily has a lattice point where $F_I(x,y)=0$ and interest will be centered on those $F_I(x,y)$ that are bounded away from zero on the lattice. \begin{figure} \caption{\label{fig:inhom} \label{fig:inhom} \end{figure} Notice that the parallelogram in Figure~\ref{fig:inhom} has one vertex in each quadrant bounded by axes of $F_I$. This property defines a \emph{divided cell}: the word ``cell'' refers to a fundamental parallelogram of the lattice, and it is ``divided'' by having its vertices separated by the axes. The definition of the inhomogeneous Markoff value is \begin{equation} M_I(F_I)=\sup\SET{\absval{\frac{a_0b_1-a_1b_0}{F_I(x,y)}}} {x,y\in\Integers} \label{eq:defMI} \end{equation} using the notation of (\ref{eq:ifactform}). Note that the origin of the lattice is not excluded here, since it has no special role in $F_I$. To describe an inhomogeneous problem, the matrix of (\ref{eq:hommat}) must be replaced by \begin{equation} B=\matthreec{a_0&b_0&c_0\\a_1&b_1&c_1\\}. \label{eq:inhommat} \end{equation} Left multiplication by this matrix takes the column with components $(x,y,1)$ to one with components $(\xi_I,\eta_I)$. Again, the rows of the matrix are the coefficients in the factors of $F_I(x,y)$. The interpretation of columns is a little different from the homogeneous case: the third column is the image of the origin, and the first two columns give generators of the lattice. If the matrix is augmented with a third row $[0\ 0\ 1]$, one gets the \emph{affine} change-of-variables matrix relating the column with components $(x,y,1)$ to one with components $(\xi_I,\eta_I,1)$. In this matrix, a column with $0$ in the third position represents a direction; one with $1$ in the third position represents a point. The vertices of the cell are found by adding the sum of a subset of the first two columns to the third column. All such columns have $1$ in the third position, so they can be expected to represent points. Left multiplication by a two by two diagonal matrix changes the scale on the axes and right multiplication by an integer matrix with a third row $[0\ 0\ 1]$ and determinant $\pm1$ gives an affine change of basis in the lattice. \section{Divided cells as reduced objects} We continue the convention of using the $(x,y)$ for coordinates in the integer lattice, but describing geometric properties using $(\xi_I,\eta_I)$ as viewing coordinates. Divided cells will be the reduced objects for the study of $F_I(x,y)$ on the integer lattice. Several proofs of the existence of divided cells have been given, beginning with Delone \cite{del47} in 1947. Our proof uses work of Pitman \cite{pitman58}, and will be given after discussing the role of divided cells in Diophantine Approximation. We choose the line $a_0x+b_0y+c_0=0$ to be the vertical axis in Figure~\ref{fig:inhom} with the positive halfspace on the right. Treating the vertices of the cell as the basic fundamental parallelogram of $\Integers^2$ with $(0,0)$ in the lower left quadrant of the figure, gives $c_0\leq0, b_0+c_0\leq0, a_0+c_0\geq0, a_0+b_0+c_0\geq0$. These inequalities imply $a_0\geq\absval{b_0}$. A similar analysis of $a_1x+b_1y+c_1=0$ as the horizontal line leads to $b_1\geq\absval{a_1}$. Conversely, these conditions on $a_0,a_1,b_0,b_1$ give a nonempty set of possible solutions for $c_0,c_1$. Since $M_I(F_I)$ is invariant under scaling of the linear factors of $F_I(x,y)$, one may introduce a convenient scaling, like $a_0=b_1=1$. Then $a_1$ and $b_0$ are each chosen from the interval $[-1,1]$, and then each of $c_0$ and $c_1$ is chosen from an appropriate closed intervals. In this way, the space of divided cells can be represented by the fourth power of a closed interval. This scaling will not be used in this paper, but we will insist that $a_0>0$ and $b_1>0$, forcing the \emph{base vertex} to be in the third quadrant. This construction shows that the specification of a divided cell can be done in two steps: first choose generators of the lattice giving the directions of the sides of the cell; then locate the origin. Barnes \cite{BarIV} introduced the term ``I-reduced'' for the lattice bases arising in this way. We keep the name, but take it to mean that $a_0\geq\absval{a_1}$ and $b_1\geq\absval{b_0}$. If $a_1b_0\leq0$, the cells are essentially the reduced cells of the homogeneous case. Such cells will be called \emph{Gaussian}, or G-cells, indicating that they are reduced in the sense of Gauss. Note that, in contrast to the homogeneous case, no attempt is made to fix the sign of $a_1$. The cells that are \emph{not} Gaussian will be called N-cells. Since definitions should use \emph{inclusive} inequalities, the correct characterization of an N-cell is $a_1b_0\geq0$. This allows a cell to be both a G-cell and an N-cell, but only when one of its sides is parallel to an axis. If a parallelogram is an I-reduced cell, then the possible locations of the origin in the cell form a rectangle inside the cell. This rectangle is called the \emph{inner box} (which we will sometimes call simply a ``box'') of the I-reduced cell. Figure~\ref{fig:cellbox} shows two typical examples. In the figure, a G-cell is on the left and an N-cell is on the right. The width of the box is $a_0-\absval{a_1}$, so that it degenerates to a vertical line segment if $a_0=\absval{a_1}$. Similarly, the inner box degenerates to a horizontal line segment if $b_1=\absval{b_0}$. When both $a_0=\absval{a_1}$ and $b_1=\absval{b_0}$, the box is only a single point. This illustrates that the first row, which gives the coefficients in the equation of the axis shown in the vertical position and describes the first coordinates of the cell, governs the divided cell step. \begin{figure} \caption{\label{fig:cellbox} \label{fig:cellbox} \end{figure} As in the homogeneous case, the partial order defining minimal points considers distances to both axes. However, this time it is necessary to treat each quadrant separately. Within a fixed quadrant, points that are closer to both axes will be smaller points in the partial order. We call this the \emph{basic partial order}. It will need to be modified, but this is a good tentative definition. \section{The divided cell algorithm} Once one divided cell is available, it is possible to construct a \emph{chain} of divided cells containing that cell. This construction is the \emph{Divided Cell Algorithm}. It must be shown that $M_I(F_I)$ can be computed using only the vertices of the cells obtained by this algorithm. This is essentially the content of Theorem~5 of \cite{BarS-DIII}. Another approach to using divided cells to compute $M_I(F_I)$ is given by the theorem on page 530 of \cite{del47}. Our proof will distinguish \emph{six} related chains arising from the divided cell algorithm: a chain of cells, a chain of boxes, and four chains of minimal points. The relations among these chains is not as direct as it is in the homogeneous case, so it is useful to keep them separate while showing how they are related. The chains of minimal points --- one chain in each quadrant --- play a key role in showing that all divided cells lie in a single chain and are used to characterize the quantity $M_I(F_I)$ defined in (\ref{eq:defMI}). Since the chains in different quadrants are independent, distances in different quadrants may be weighted differently. We don't explore that here, but some consequences can be found in Section~3 of \cite{BarS-DIII}. Finally, the chain of boxes shows the simplest progression from one axis to the other. All of these chains terminate if there is a lattice direction parallel to an axis, but our statements will make no effort to distinguish that case. The construction of the Divided Cell Algorithm is used in two settings: given expression $F_I$, it produces its chain of divided cells; given only an I-reduced basis for a lattice, it describes all possible successor I-reduced bases. \begin{thm}\label{thm:chain} If $F_I$ admits one divided cell, then there is a chain of divided cells containing that cell. Given an I-reduced basis, there is one shape of an N-cell arising as a successor and one possible shape of a G-cell arising as a successor. The N-cell always occurs, but the G-cell may not: the number of positions of the N-cell is always one more than the number of positions of the G-cell. \end{thm} \begin{proof} Suppose that we are given a divided cell defined by a matrix as in (\ref{eq:inhommat}). Since this is a divided cell, $a_0\geq\absval{a_1}$ and $b_1\geq\absval{b_0}$, with additional bounds on each $c_i$ in terms of $a_i$ and $b_i$. The details of the divided cell step depend on the sign of $b_0$. The algorithm terminates if $b_0=0$, and there are only minor differences between the other cases, so only the case of $b_0>0$ will be illustrated. The definition of a divided cell then gives that $c_0<c_0+b_0\leq0\leq c_0+a_0$, so that the line segment from $(c_0,c_1)$ to $(c_0+b_0,c_1+b_1)$ forms the left side of the cell and crosses the horizontal axis. This side can be extended until it crosses the vertical axis, giving an integer $h>0$ with $c_0+hb_0\leq0\leq c_0+(h+1)b_0$. The segment from $T_-\colon(c_0+hb_0,c_1+hb_1)$ to $T_+\colon(c_0+hb_0+b_0,c_1+hb_1+b_1)$ will form the \emph{top} of the next cell. Similarly, the bottom of the next cell is found by extending the right side to get a segment from $B_-\colon(c_0+a_0-kb_0,c_1+a_1-kb_1)$ to $B_+\colon(c_0+a_0+b_0-kb_0,c_1+a_1b_1-kb_1)$ for some $k>0$. Since segments $T_-T_+$ and $B_-B_+$ both cross the vertical axis, it follows that $(h+k-1)b_0\leq a_0\leq(h+k+1)b_0$. This analysis shows that right multiplication by \begin{equation} S=\matthreec{0&-1&1\\1&h+k&-k\\0&0&1\\}(b_0>0) \textbox{ or }S=\matthreec{0&1&0\\-1&h+k&1-h\\0&0&1\\}(b_0<0) \label{eq:transmat} \end{equation} gives the matrix representing the next cell. In each case, $h$ and $k$ are positive integers with $(h+k-1)\absval{b_0}\leq a_0\leq(h+k+1)\absval{b_0}$. If $a_0/b_0$ is not an integer, $h+k$ must be one of the two integers nearest to $\absval{a_0/b_0}$. The \emph{shape} of the successor cell is determined by $h+k$; and the \emph{position} by $h$. Those with different $h$ and the same $h+k$ are translates of one another The leftmost possible cell is an N-cell with $h=1$ and $k$ as large as possible. If $\absval{a_0/b_0}<2$, this is the only successor. Otherwise, the rules for determining the inner box show that decreasing $k$ by $1$ and keeping $h$ fixed gives a G-cell whose inner box abuts the box of this leftmost cell. Then, keeping this $k$ and increasing $h$ by $1$ gives a \emph{translate} of the leftmost N-cell whose inner box abuts the box of this G-cell. The rightmost box will be an N-cell, and the the union of the inner boxes of these possible successors covers the inner box of the original cell. \end{proof} Note that the first row of (\ref{eq:inhommat}), which gives the coefficients in the equation of the axis shown in the vertical position and describes the first coordinates of the cell, governs the divided cell step. The first part of Theorem~\ref{thm:chain} is illustrated in Figure~\ref{fig:cellsboxes} showing a divided cell with its box and, in two separate graphs, two successor cells with their boxes. In this picture, the original cell is a G-cell, and both types of successor are shown with the G-cell on the left (note that this figure contains G-cells with different signs of $a_1$). Several lattice points are also included. \begin{figure} \caption{\label{fig:cellsboxes} \label{fig:cellsboxes} \end{figure} Figure~\ref{fig:cellsboxes} may also be used to analyze the chain of divided cell vertices in each quadrant. In the pictures, the common lattice direction of a cell and its successor gives a line joining the vertices of those cells in the second quadrant, and also in the fourth quadrant. Moreover, these two lines are adjacent lattice lines in that direction, so that there are no lattice points interior to the strip bounded by those lines. However, in the fourth quadrant of the second picture there is a point on one of these lines that is not a vertex of a divided cell although it meets our preliminary requirement for being a minimal point. We will now resolve this difficulty. We state the theorem for the first quadrant in order to have names for the edges that we use, and concentrate on points with small first coordinate but the proof is readily applied to both coordinates in all quadrants. \begin{thm}\label{thm:linord}Given a divided cell $C$, let $I$ be the projection of the open top edge of $C$ on the horizontal axis. For each lattice line $L$ parallel to this edge, let $I_L$ be the points on $L$ whose projection on the horizontal axis lies in $I$. Then each $I_L$ contains at most one lattice point, and only those above the top edge of $C$ contain such a lattice point in the first quadrant. Furthermore, the projections onto the vertical axis of the $I_L$ are disjoint, so the ordering of these points by their second coordinate is the same as the order on the line $L$ containing the point. \end{thm} \begin{proof} For a lattice line $L$, the distance between consecutive lattice points on $L$ is fixed, and the top edge of $C$ gives one example of such a pair of consecutive lattice points. Again, since the lines are parallel, the difference of first coordinates is also fixed and equal to the width of $I$ in this case. Hence, except when the endpoints of $I_L$ are lattice points, there is a unique lattice point in each $I_L$. Similarly, the relation between projections on the vertical axis of two consecutive $I_L$ is also fixed, so it will be the same as the relation between the projections of the top and bottom edges of $C$. However, $C$ is a divided cell, so all points on the top edge have positive second coordinate and all points on the bottom edge have negative second coordinate, so the projections of these edges are disjoint. For $L$ below the top edge of $C$, all points of $I_L$ have negative second coordinate, so $I_L$ contains no point in the first quadrant. \end{proof} Any point in the first quadrant that is closer to the vertical axis than the vertex $P$ of $C$ in that quadrant must project into $I$, but Theorem~\ref{thm:linord} shows that all lattice points with that property have larger second coordinate than $P$. Hence $P$ is a minimal point. When a side of $C$ is extended to meet the positive vertical axis, one obtains a lattice line with one lattice point on each line parallel to the top edge of $C$. When the left side of the $C$ is used in this construction, the first description of the divided cell step shows that the first lattice point in the first quadrant is a vertex of the successor divided cell. If it is the extension of the right side of $C$ that meets the positive vertical axis, the first several lattice points will be in the first quadrant, but only the first and last of these are vertices of divided cells. This bypassing of minimal points in the divided cell algorithm is easily accommodated by augmenting the basic partial order. \begin{thm}\label{thm:convex} If a line meets a quadrant in a bounded interval, the product of the distances to the axes is zero at the endpoints of the interval and has a unique interior maximum. The distance decreases as one moves from the location of the maximum towards either axis. \end{thm} \begin{proof} A calculus exercise! When expressed in terms of one of the coordinates the distance is a quadratic polynomial with negative coefficient of the second degree term. \end{proof} When the line in Theorem~\ref{thm:convex} is a lattice line, this says that we may modify the basic partial order to also say that a lattice point is greater than another lattice point on the line that is on the same side of the point of maximum value of $F_I$ and farther from that point. With this modification, the only minimal points on the line in this quadrant are the vertices of the original divided cell and its successor. Augmenting the basic partial order in this way on \emph{every} lattice line and forming the transitive closure gives a new partial order called the \emph{extended partial order} with fewer minimal points. We will say the $P$ is \emph{nearer} that $Q$ if $P\leq Q$ in the extended partial order. \begin{thm}\label{thm:minpt} If divided cells exist, every minimal lattice point for the extended partial order is a vertex of a divided cell. \end{thm} \begin{proof} This is now little more than using a known divided cell as the basis and using previous results of this section for an induction step. By symmetry, it suffices to show the result for a minimal lattice point $P$ in the first quadrant that is closer to the vertical axis. By Theorem~\ref{thm:linord} there are finitely many minimal lattice points whose second coordinate lies between that of $P$ and the original divided cell vertex in this quadrant. By the discussion following Theorem~\ref{thm:convex}, the first of these is the vertex of a divided cell. There are fewer minimal lattice points in the first quadrant between this cell and the selected point, allowing induction to work. \end{proof} We illustrate the second part of Theorem~\ref{thm:chain} with Figure~\ref{fig:allboxes} showing the inner boxes of the possible successors. To draw both Figure~\ref{fig:cellsboxes} and Figure~\ref{fig:allboxes}, we used $a_0/a_1=2+\sqrt5\approx4.236$. The two parts of Figure~\ref{fig:allboxes} show the number of each type of cell predicted by Theorem~\ref{thm:chain}. To avoid clutter, the cells are not shown in Figure~\ref{fig:allboxes}, but the vertices are. Note that lattice points appear as vertices of the inner box of an N-cell, but the inner box of a G-cell is strictly interior to the cell and contains no lattice point. The cells that are collected in each of the pictures in Figure~\ref{fig:allboxes} are translates of one another in agreement with the expressions for their vertices appearing in the proof of Theorem~\ref{thm:chain}. \begin{figure} \caption{\label{fig:allboxes} \label{fig:allboxes} \end{figure} What Figure~\ref{fig:allboxes} shows is that the inner boxes of two successor cells intersect in at most a vertical segment and that the union of all of these boxes covers the inner box of the original cell. Combining this with the proof of Theorem~\ref{thm:minpt} we find that the chain of boxes shows a systematic increase of height and decrease of width as we step through the chain. \section{Pitman's Theorem}Jane Pitman \cite{pitman58} related divided cells, which are the reduced cells of an inhomogeneous approximation problem, to the reduced bases of the corresponding homogeneous problem given by the continued fraction algorithm. Consequences of her work are an easy proof of the existence of divided cells (given here as the Corollary to Theorem~\ref{thm:Pitman}) and tools for recognizing minimal points. \begin{thm}\label{thm:Pitman}The cell of a Gaussian reduced form gives rise to two I-reduced N-cells. If the Gaussian cell has $a_1\geq0$, then the matrices corresponding to the other cells are obtained by multiplying its matrix by \begin{equation} \matthreer{1&0&0\\1&1&0\\0&0&1\\} \textbox{ or } \matthreer{1&-1&1\\0&1&0\\0&0&1\\}. \label{eq:neighborplus} \end{equation} The union of the inner boxes of these three cells is (apart from duplication on the boundary) a fundamental domain for the lattice. \end{thm} \begin{proof} Figure~\ref{fig:threebox} gives a ``proof without words''. It illustrates how the fundamental domain of the given Gaussian cell may be cut into pieces that may be translated and reassembled to form the union of the three boxes described in the statement of the theorem. The parallelogram whose sides are not horizontal or vertical is the given cell. The box in the center of the figure is the inner box of this cell. The other boxes are the inner boxes of N-cells described in the statement of the theorem. The dashed line divides the part of the original cell outside the boxes into pieces congruent to the portions of the boxes outside the cell. \end{proof} \begin{figure} \caption{\label{fig:threebox} \label{fig:threebox} \end{figure} \begin{cor}Every linear inhomogeneous problem in $\Reals^2$ has divided cells. \end{cor} \begin{proof} Employ the homogeneous theory to reduce the linear part $(a_0x+b_0y)(a_1x+b_1y)$. Then locate the intersection of the axes in Figure~\ref{fig:threebox}. The cell corresponding to that box is a desired divided cell. \end{proof} The N-cells described by Theorem~\ref{thm:Pitman} are called the \emph{neighbors} of the G-cell in that theorem. One of these neighbors is characterized by $\absval{a_0/b_0}\geq2$; the other by $\absval{b_1/a_1}\geq2$. Conversely, each of these inequalities allows the construction of a neighboring G-cell of a given N-cell. If both inequalities hold, then the N-cell serves as an immediate link between consecutive reductions of the linear part of $F_I(x,y)$. However, it is also possible to find N-cells for which neither of these will hold, so that they are not neighbors of a G-cell. Such cells will be considered in the next section. The boxes shown in Figure~\ref{fig:threebox} give the matrices shown in (\ref{eq:neighborplus}). The third column affects only the location of a cell and not its shape, and is significant only for describing cells having some particular relation to the original G-cell. When $a_1\leq0$, the transition matrices of (\ref{eq:neighborplus}) are replaced by \begin{equation} \matthreer{1&1&0\\0&1&0\\0&0&1\\} \textbox{ or } \matthreer{1&0&0\\-1&1&0\\0&0&1\\}. \label{eq:neighborminus} \end{equation} (If $a_1=0$, its sign should be chosen opposite to the sign of $b_0$; if $a_1=b_0=0$ the different constructions only involve cells with degenerate boxes.) \section{Superfluous Cells} This section investigates the role of the I-reduced cells that are neither G-cells nor neighbors of G-cells. We refer to such cells as \emph{superfluous cells} for a reason that is given in Theorem~\ref{thm:superfluous}. Figure~\ref{fig:superfluous} shows the portion of a chain of divided cells starting with a cell $C_-$ for which $-2<a_0/b_0<-1$ and $b_1/a_1<-2$ (the values used when drawing the figure were $-(3+\sqrt5)/4\approx-1.309016994$ and $-(3+\sqrt5)\approx-5.236067977$). The figure also includes the inner box of the first cell that is seen to also be the inner box of \emph{all} cells shown. All but the last of these has a unique successor, and the figure shows this chain of unique successors. The last cell shown, $C_+$, has $a_0/b_0<-2$, so there will be a choice of possible successors, none of which are shown. For all the cells $C$ shown in Figure~\ref{fig:superfluous}, the cell $C_+$ will be called the \emph{forward anchor} of $C$ and $C_-$ will be called the \emph{backward anchor} of $C$. \begin{figure} \caption{ \label{fig:superfluous} \label{fig:superfluous} \end{figure} \begin{thm}\label{thm:superfluous}For a superfluous cell, the anchors are uniquely determined. For every vertex of a superfluous cell, a vertex of one of the anchors is nearer in the extended partial order. \end{thm} \begin{proof} Since $1\leq\absval{a_0/b_0}\leq2$, the proof of Theorem~\ref{thm:chain} shows that the divided cell algorithm involves a unique successor that is also an N-cell. As long as that cell is superfluous, the algorithm generates a unique forward chain. A closer examination of the function giving $a_0/b_0$ for the successor in terms of the corresponding quantity in the original cell is an expansive mapping with $\pm1$ as fixed points. From this, it follows that, apart from degenerate cases, the chain starting from any superfluous cell will reach a neighbor of a G-cell in a finite number of steps (this is not difficult to show, but the details are awkward to express, so they will be omitted). The process stops at the \emph{forward anchor} of the superfluous cell. The process of stepping backwards through the chain of divided cells is governed by the ratio $b_1/a_1$ in the same way, leading to the \emph{backward anchor} of the original cell. Thus, Figure~\ref{fig:superfluous} describes the \emph{only} way that superfluous cells can occur and relates these cells to the anchors. A study of the explicit matrix relating a superfluous cell to its successor shows that the anchors are attached to consecutive reduced bases of the lattice. Two of the vertices of a superfluous cell are also vertices of its inner box. These will be called the \emph{inner vertices} of the cell. The inner vertices are shared with all cells shown in Figure~\ref{fig:superfluous} including the anchors, so they have now been found in a non-superfluous cell. The remaining vertices of the cells in Figure~\ref{fig:superfluous} (the \emph{outer vertices}) lie on a lattice line parallel to and adjacent to the line joining the inner vertices, and one of the vertices of an anchor will be nearer in the extended partial order than a given outer vertex of a superfluous cell. \end{proof} \section{A rigorous Framework}The emphasis here has been visual. Figures were used to illustrate the constructions and proofs. These figures were drawn using the \emph{Maple} Symbolic Computation System. In order to tell the system what to draw, the cells and boxes were represented by matrices like $B$ of (\ref{eq:inhommat}). The visual approach was present in \cite{del47}, but was not used much by subsequent authors. Computers have facilitated the re-introduction of graphics into exposition, including the use of color where appropriate (the figures in this paper were presented in color at the conference). At the same time, increasing fluency in the language of Linear Algebra has encouraged the use of matrices to represent the objects met in the study. Our intent here was to use these developments to present old results in a way that will encourage new research. Some weaknesses of the Divided Cell \emph{Algorithm} have appeared in our exposition, but we have also shown that its application to Inhomogeneous Diophantine Approximation can rely on methods like the ordinary continued fraction that are associated to the Homogeneous Markoff Spectrum. Divided Cells become a tool for organizing the subject rather than a device for computing properties of individual problems. \section{Acknowledgments} We thank Takao Komatsu for all he did to make to make this conference a success. We acknowledge the funding that he obtained for our participation in the conference. The inspiration to study divided cells came from Bill Moran. We also acknowledge the funding that brought us (separately) to Adelaide for that work and apologize tor the long delay in producing the fruits of that work. \end{document}
\begin{document} \title[Dimension of the Poset of Regions]{The Order Dimension of the Poset of Regions in a Hyperplane Arrangement} \author{Nathan Reading} \address{ Mathematics Department\\ University of Michigan\\ Ann Arbor, MI 48109-1109\\ USA} \thanks{The author was partially supported by NSF grant DMS-0202430.} \email{[email protected]} \urladdr{http://www.math.lsa.umich.edu/$\sim$nreading/} \subjclass[2000]{Primary 52C35; Secondary 20F55, 06A07} \begin{abstract} We show that the order dimension of the weak order on a Coxeter group of type A, B or D is equal to the rank of the Coxeter group, and give bounds on the order dimensions for the other finite types. This result arises from a unified approach which, in particular, leads to a simpler treatment of the previously known cases, types A and B~\cite{Flath,hyperplane}. The result for weak orders follows from an upper bound on the dimension of the poset of regions of an arbitrary hyperplane arrangement. In some cases, including the weak orders, the upper bound is the chromatic number of a certain graph. For the weak orders, this graph has the positive roots as its vertex set, and the edges are related to the pairwise inner products of the roots. \end{abstract} \maketitle \section{Introduction} \label{main results} For a finite Coxeter group $W$, let $\dim(W)$ be the order dimension of the weak order on $W$, or in other words the order dimension of the poset of regions of the corresponding Coxeter arrangement. The order dimension of a finite poset $P$ is the smallest $n$ so that $P$ can be embedded as an induced subposet of the componentwise order on $\mathbb R^n$. \begin{theorem} \label{dimensions} The order dimension of the weak order on an irreducible finite Coxeter group has the following value or bounds: \[\begin{array}{rcccl} && \dim(A_n) & = & n\\ && \dim(B_n) & = & n\\ && \dim(D_n) & = & n\\ 6 &\le & \dim(E_6) & \le & 9\\ 7 &\le & \dim(E_7) & \le & 11\\ 8 &\le & \dim(E_8) & \le & 19\\ 4 &\le & \dim(F_4) & \le & 5\\ && \dim(H_3) & = & 3\\ 4 &\le & \dim(H_4) & \le & 6\\ && \dim(I_2(m)) & =& 2 \end{array}\] \end{theorem} The order dimension of the weak order on a reducible finite Coxeter group is the sum of the dimensions of the irreducible components. The result for $A_n$ was proven previously by Flath~\cite{Flath} using the combinatorial interpretation of $A_n$, while the results for $A_n$ and $B_n$ were obtained previously by an argument using supersolvability~\cite{hyperplane}. Theorem \ref{dimensions} gives values or bounds for all types of finite Coxeter groups, including new results on type D and the exceptional groups. The theorem is based on a unified approach which, in particular, provides a significantly simpler proof of the results for types A and B. The lower bounds of Theorem~\ref{dimensions} are easily proven by considering the atoms and coatoms of the posets (Proposition~\ref{lower}). The upper bounds are proven by way of a more general theorem giving an upper bound on the order dimension of the poset of regions of any hyperplane arrangement. Specifically, for a hyperplane arrangement~${\mathcal A}$ and a fixed region~$B$, let ${\mathcal P}({\mathcal A},B)$ be the poset of regions, that is, the adjacency graph of the regions of~${\mathcal A}$, directed away from~$B$. Then there is a directed graph ${\mathcal D}({\mathcal A},B)$ whose vertex set is~${\mathcal A}$ such that the following holds: \begin{theorem} \label{acyclic} For a central hyperplane arrangement~${\mathcal A}$ with base region~$B$, the order dimension of ${\mathcal P}({\mathcal A},B)$ is bounded above by the size of any covering of ${\mathcal D}({\mathcal A},B)$ by acyclic induced sub-digraphs. \end{theorem} By a covering of ${\mathcal D}({\mathcal A},B)$ by acyclic induced sub-digraphs we mean a partition ${\mathcal A}=I_1\cup I_2\cup\cdots\cup I_k$ such that each $I_j$ induces an acyclic sub-digraph of ${\mathcal D}({\mathcal A},B)$. The size of such a covering is $k$. It is well-known that, in general, order dimension can be characterized as a problem of covering a directed graph by acyclic induced sub-digraphs (see for example~\cite{hyperplane}). However, if one does this for ${\mathcal P}({\mathcal A},B)$ one generally gets a directed graph with many more vertices than ${\mathcal D}({\mathcal A},B)$. For a large class of arrangements, the minimal cycles in ${\mathcal D}({\mathcal A},B)$ have cardinality two. Thus the order dimension of ${\mathcal P}({\mathcal A},B)$ is bounded above by the chromatic number of the graph $G({\mathcal A},B)$ whose vertex set is ${\mathcal A}$ and whose edges are the two-cycles of ${\mathcal D}({\mathcal A},B)$. Other connections between graph coloring and order dimension have been made, for example in~\cite{Fels-Trot,FHRT,Yan}. When~${\mathcal A}$ is a Coxeter arrangement, the edges of $G({\mathcal A},B)$ can be determined by considering inner products of pairs of roots in the corresponding root system. This leads to straightforward colorings of the graphs for Coxeter arrangements of types A, B and D. The dimension results in types G and I are trivial using Theorem~\ref{acyclic} or by much simpler considerations. The value and bounds for types E, F and H come from computer computations of $\chi(G({\mathcal A},B))$. The programs used for these computations were written by John Stembridge, and are available on the author's website. The proof of Theorem~\ref{acyclic} uses a new formulation of order dimension, similar in spirit to the formulation in terms of critical pairs~\cite{Rab-Riv}. A well-known theorem of Dushnik and Miller~\cite{Du-Mil} says that the order dimension of a poset $P$ is the smallest $d$ so that $P$ can be embedded as an induced subposet of $\mathbb R^d$. The components of the embedding need not be linear extensions of $P$, but rather are order-preserving maps of $P$ into linear orders. Proposition~\ref{box} uses subcritical pairs (see~\cite{hyperplane}) to give conditions on a set of order-preserving maps from $P$ into linear orders, which are necessary and sufficient for the maps to be the components of an embedding. The subcritical pairs of ${\mathcal P}({\mathcal A},B)$ are identified with the shards of $({\mathcal A},B)$. Introduced in~\cite{hyperplane}, the shards are the components of hyperplanes in~${\mathcal A}$ which result from ``cutting'' the hyperplanes in a certain way. This geometric information about the subcritical pairs leads to the proof of Theorem~\ref{acyclic}. Hyperplane arrangements are dual to zonotopes, and the Hasse diagram of ${\mathcal P}({\mathcal A},B)$ is the same as the 1-skeleton of the corresponding zonotope. Thus, given~${\mathcal A}$ and~$B$, one might hope to give an embedding of ${\mathcal P}({\mathcal A},B)$ by mapping each region to the corresponding vertex of an equivalent zonotope. We show that this can be done when~${\mathcal A}$ is a supersolvable arrangement. The body of the paper is organized as follows. In Section~\ref{arr} we give definitions and preliminary results about hyperplane arrangements and posets of regions. Section~\ref{dim} contains background information about order dimension, and states and proves the reformulation mentioned above (Proposition~\ref{box}). Theorem~\ref{acyclic} is proven in Section~\ref{dim po}, while Section~\ref{coxeter} contains the details of the coloring problem in the case of Coxeter arrangements, leading to the proof of Theorem~\ref{dimensions}. Section~\ref{zonotopal} is a discussion of zonotopal embeddings, and Section~\ref{supersolvable} is an application of Sections \ref{dim po} and \ref{zonotopal} to the case of supersolvable arrangements. \section{Hyperplane Arrangements} \label{arr} In this section we give definitions related to hyperplane arrangements, and prove some basic facts about join-irreducible and meet-irreducible elements of the poset of regions of an arrangement. An {\em arrangement}~${\mathcal A}$ is a finite, nonempty collection of {\em hyperplanes} (codimension~1 linear subspaces) in $\mathbb R^n$. In general, one might consider arrangements of affine hyperplanes, but in this paper all arrangements will consist of hyperplanes containing the origin. Such arrangements are called {\em central}. The complement of the union of the hyperplanes is disconnected, and the closures of its connected components are called {\em regions}. The {\em span} of~${\mathcal A}$, written $\mbox{{\rm Span}}({\mathcal A})$, is understood to mean the linear span of the normal vectors of~${\mathcal A}$, and the {\em rank} of~${\mathcal A}$ is the dimension of $\mbox{{\rm Span}}({\mathcal A})$. The {\em poset ${\mathcal P}({\mathcal A},B)$ of regions} of~${\mathcal A}$ with respect to a fixed region~$B$ is a partial order on the regions defined as follows. Define $S(R_1,R_2)$ to be the set of hyperplanes separating $R_1$ from $R_2$. For any region $R$, the set $S(R):=S(R,B)$ is called the {\em separating set} of $R$. The poset of regions is a partial order on the regions with $R_1\le R_2$ if and only if $S(R_1)\subseteq S(R_2)$. The fixed region~$B$, called the {\em base region}, is the unique minimal element of ${\mathcal P}({\mathcal A},B)$. The definition of ${\mathcal P}({\mathcal A},B)$ is an embedding into a product of $|{\mathcal A}|$ chains, so the dimension of ${\mathcal P}({\mathcal A},B)$ is at most $|{\mathcal A}|$. For more details on this poset, see~\cite{BEZ,Edelman}. When~${\mathcal A}$ is central, the antipodal anti-automorphism of ${\mathcal P}({\mathcal A},B)$, denoted by $R\mapsto -R$, corresponds to complementation of separating sets. In particular there is a unique maximal element $-B$. A central arrangement is {\em simplicial} if every region is a simplicial cone. Figure~\ref{ex} shows ${\mathcal P}({\mathcal A},B)$ for a non-simplicial arrangement~${\mathcal A}$ in $\mathbb R^3$ with base region~$B$. The hyperplane arrangement is represented as an arrangement of great circles on a 2-sphere. The northern hemisphere is pictured and the sphere is opaque so that the southern hemisphere is not visible. The equator is shown as a dotted line to indicate that the equatorial plane is not in~${\mathcal A}$. The anti-automorphism $R\mapsto -R$ corresponds to a half-turn of the Hasse diagram of ${\mathcal P}({\mathcal A},B)$. \begin{figure} \caption{A hyperplane arrangement~${\mathcal A} \label{ex} \end{figure} A subset ${\mathcal A}'\subseteq{\mathcal A}$ is a {\em rank-two subarrangement} if $|{\mathcal A}'|>1$ and there is some codimension-two subspace $L$ of $\mathbb R^n$ such that ${\mathcal A}'$ consists of all the hyperplanes containing $L$. There is a unique region $B'$ of ${\mathcal A}'$ containing~$B$, and the hyperplanes in ${\mathcal A}'$ bounding $B'$ are called {\em basic} hyperplanes in ${\mathcal A}'$. Rank-two subarrangements and basic hyperplanes are used to define several combinatorial structures which are central to the results in this paper. The {\em basic digraph} ${\mathcal D}({\mathcal A},B)$ is the directed graph whose vertex set is~${\mathcal A}$, with directed edges $H_1\rightarrow H_2$ whenever $H_1$ is basic in the rank-two subarrangement determined by $H_1\cap H_2$. If $H_1$ and $H_2$ are basic in ${\mathcal A}'$ but $H\in{\mathcal A}'$ is not, then $(H\cap B') = (H_1\cap H_2\cap B')$. Intersecting both sides of the equality with~$B$, we obtain the following, which we name as a lemma for easy reference later. \begin{lemma} \label{basic containment} If $H_1$ and $H_2$ are basic in ${\mathcal A}'$ but $H\in{\mathcal A}'$ is not, then $(H\cap B) = (H_1\cap H_2\cap B)$. \qed \end{lemma} The bound of Theorem~\ref{acyclic} is not sharp. For example, an arrangement~${\mathcal A}$ is {\em 3-generic} if every rank-two subarrangement contains exactly two hyperplanes~\cite{Ziegler}. For a 3-generic arrangement, ${\mathcal D}({\mathcal A},B)$ is complete, in the sense that every pair of vertices is connected by one directed edge in each direction. Thus Theorem~\ref{acyclic} gives the upper bound $|{\mathcal A}|$ on the order dimension of ${\mathcal P}({\mathcal A},B)$. There is a unique (up to combinatorial isomorphism) 3-generic arrangement in $\mathbb R^3$ with $|{\mathcal A}|=4$. The intersection of this arrangement with the unit sphere cuts the sphere into 8 triangles and 6 quadrilaterals, so as to be combinatorially isomorphic to the boundary of the cuboctahedron. If $B$ is chosen to be one of the triangular regions, then ${\mathcal P}({\mathcal A},B)$ has order dimension 3, as can be seen by modifying the usual embedding of the Boolean algebra. In light of Proposition~\ref{lower} which will be proved in Section~\ref{dim}, this example also illustrates the fact that the order dimension depends on the choice of base region. In the example of Figure~\ref{ex}, the rank-two subarrangements are the following subsets of~${\mathcal A}$: 12, 13, 23, 15, 26, 34, 146, 245 and 356. Figure~\ref{ex2} shows the basic digraph for this example. Note the three-cycle $4\rightarrow 5\rightarrow 6\rightarrow 4$. \begin{figure} \caption{The basic digraph ${\mathcal D} \label{ex2} \end{figure} The {\em shards} of an arrangement are pieces of the hyperplanes which arise as follows. For each $H\in{\mathcal A}$, and for each rank-two subarrangement ${\mathcal A}'$ containing $H$, if $H$ is not basic in ${\mathcal A}'$, cut $H$ by removing $L$ from $H$, where $L$ is the codimension-two subspace defining ${\mathcal A}'$. Each hyperplane may be cut several times, and the resulting connected components of the hyperplanes in~${\mathcal A}$ are called the {\em shards} of~${\mathcal A}$ with respect to~$B$. Shards were introduced in~\cite{hyperplane} in connection with certain lattice properties of ${\mathcal P}({\mathcal A},B)$ for a simplicial arrangement~${\mathcal A}$. Figure~\ref{ex3} shows the decomposition into shards of the example $({\mathcal A},B)$ of Figures~\ref{ex} and~\ref{ex2}. Once again, the drawing shows the northern hemisphere. The southern-hemisphere picture is similar, and in this example all of the shards intersect both hemispheres. \begin{figure} \caption{The decomposition of~${\mathcal A} \label{ex3} \end{figure} Let $P$ be a poset. The join $\vee X$ of a set $X\subseteq P$ is the unique minimal upper bound for $X$ in $P$, if such exists. An element $j$ of a poset $P$ is {\em join-irreducible} if there is no set $X\subseteq P$ with $j\not\in X$ and $j=\vee X$. If $P$ has a unique minimal element ${\hat{0}}$, then ${\hat{0}}$ is $\vee\emptyset$ and thus is not join-irreducible. Meet-irreducible elements are defined dually. In a lattice, $j$ is join-irreducible if and only if it covers exactly one element, but this need not be the case in a non-lattice. However, a region $J$ in ${\mathcal P}({\mathcal A},B)$ is join-irreducible if and only if it covers exactly one region $J_*$, because cover relations in ${\mathcal P}({\mathcal A},B)$ correspond to deleting one element from the separating set. If~${\mathcal A}$ is a central arrangement, a region $M$ is meet-irreducible if and only if it is covered by exactly one element, denoted $M^*$. The shards of a finite central arrangement are related to the join- and meet-irreducibles of the poset of regions, as explained below. Given a shard $\Sigma$, let $H_\Sigma$ be the hyperplane of~${\mathcal A}$ containing $\Sigma$. Let $U(\Sigma)$ be the set of {\em upper regions} of $\Sigma$, that is, the set of regions $R$ of~${\mathcal A}$ which intersect $\Sigma$ in codimension one and which have $H_\Sigma\in S(R)$. The set $L(\Sigma)$ of {\em lower regions} of $\Sigma$ is the set of regions $R$ of~${\mathcal A}$ which intersect $\Sigma$ in codimension one and which have $H_\Sigma\not\in S(R)$. In the following propositions, $U(\Sigma)$ and $L(\Sigma)$ are considered to be subposets of ${\mathcal P}({\mathcal A},B)$. \begin{prop} \label{j sigma} A region $J$ is join-irreducible in ${\mathcal P}({\mathcal A},B)$ if and only if $J$ is minimal in $U(\Sigma^J)$ for some shard $\Sigma^J$, in which case $S(J_*)=S(J)-\set{H_{\Sigma^J}}$. \end{prop} \begin{proof} Suppose $J$ is join-irreducible. Then $J$ and $J_*$ are separated by some shard $\Sigma$ and $S(J_*)=S(J)-\set{H_\Sigma}$. Since $J$ covers only $J_*$ and $H_\Sigma\not\in S(J_*)$, any region $R<J$ has $H_\Sigma\not\in S(R)$. In particular, $R$ is not in $U(\Sigma)$, so the region $J$ is minimal in $U(\Sigma)$. Conversely, suppose $J$ is minimal in $U(\Sigma)$ for some shard $\Sigma$, and suppose that $J$ covers more than one region. Let $J_*$ be the region whose separating set is $S(J)-\set{H_\Sigma}$. If $b$ is some vector in $B$, then the facets of $J$ which one would cross to go down by a cover in ${\mathcal P}({\mathcal A},B)$ are the facets of $J$ whose outward-directed normals have positive inner product with $b$. In particular, this set of facets is a ball, and therefore we can find a region $R$ covered by $J$ so that $R\cap J\cap J_*$ has codimension two. Let $S(J)-S(R)=\set{H}$ and let ${\mathcal A}'$ be the rank-two subarrangement containing $H$ and $H_\Sigma$. The subarrangement ${\mathcal A}'$ and the regions adjacent to $\cap{\mathcal A}'$ are depicted in Figure~\ref{prooffig}. \begin{figure}\label{prooffig} \end{figure} Since $J$ covers both $J_*$ and $R$ by respectively crossing $H_\Sigma$ and $H$, the hyperplanes $H_\Sigma$ and $H$ are basic in ${\mathcal A}'$. Because $J$ intersects $\cap{\mathcal A}'$ in codimension two, there is a region $R'$ whose separating set is $(S(J)-{\mathcal A}')\cup H_\Sigma$. This region is in $U(\Sigma)$, contradicting the minimality of~$J$. \end{proof} The following proposition is dual to Proposition~\ref{j sigma}. \begin{prop} \label{m sigma} A region $M$ is meet-irreducible in ${\mathcal P}({\mathcal A},B)$ if and only if $M$ is maximal in $L(\Sigma_M)$ for some shard $\Sigma_M$, in which case $S(M^*)=S(M)\cup\set{H_{\Sigma_M}}$. \qed \end{prop} We will write $H^J$ for $H_{\Sigma^J}$ and $H_M$ for $H_{\Sigma_M}$. We conclude the section with a technical observation which is used in the proof of Theorem~\ref{acyclic}. \begin{lemma} \label{shards inherit} Let~${\mathcal A}$ be a central hyperplane arrangement with base region~$B$ and let $I\subseteq {\mathcal A}$. Let $H\in I$ be a sink in the sub-digraph of ${\mathcal D}({\mathcal A},B)$ induced by $I$, let ${\mathcal A}^-:={\mathcal A}-\set{H}$ and let $B^-$ be the region of ${\mathcal A}^-$ containing~$B$. Then the shards of $({\mathcal A},B)$ contained in hyperplanes in $I-\set{H}$ are exactly the shards of $({\mathcal A}^-,B^-)$ contained in hyperplanes $I-\set{H}$. \end{lemma} \begin{proof} Since $H$ is a sink in the sub-digraph of ${\mathcal D}({\mathcal A},B)$ induced by $I$, for any $H'\in I-\set{H}$, the hyperplane $H$ is not basic in the rank-two subarrangement determined by $H\cap H'$. In particular, removing $H$ has no effect on the process of ``cutting'' $H'$ into shards. \end{proof} \section{Order dimension and subcritical pairs} \label{dim} In this section we give background information on order dimension and a new formulation of order dimension in terms of subcritical pairs. A poset $E$ on the same ground set as $P$ is called an {\em extension} of $P$ if $a\le_Pb$ implies $a\le_Eb$. An extension is called {\em linear} if it is a total order. The {\em order dimension} $\dim(P)$ of a finite poset $P$ is the smallest $d$ so that $P$ can be written as the intersection---as relations---of $d$ linear extensions of $P$. Say $Q$ is a(n) {\em (induced) subposet} of $P$ if there is a one-to-one map $i:Q\rightarrow P$ such that $x\le_Qy$ if and only if $i(x)\le_Pi(y)$. If $Q$ is an induced subposet of $P$, then $\dim(Q)\le\dim(P)$. The ``standard example'' of a poset of dimension $n$ is the collection of subsets of $[n]$ having cardinality 1 or $n-1$. In an arbitrary finite central arrangement~${\mathcal A}$ with base region~$B$, the collection of regions covering~$B$ or covered by $-B$ form a subposet of ${\mathcal P}({\mathcal A},B)$ which is isomorphic to a standard example. Each facet (maximal face) of~$B$ corresponds to a region covering~$B$, and thus we have the following lower bound on $\dim({\mathcal P}({\mathcal A},B))$. \begin{prop} \label{lower} The order dimension of ${\mathcal P}({\mathcal A},B)$ is at least the number of facets of~$B$, which is at least the rank of ${\mathcal A}$. \qed \end{prop} A pair $(j,m)$ in a poset $P$ is called {\em subcritical} if: \begin{enumerate} \item[(i) ] $j\not\le m$, \item[(ii) ] For all $x\in P$, if $x<j$ then $x\le m$, \item[(iii) ] For all $x\in P$, if $x>m$ then $x\ge j$. \end{enumerate} The set of subcritical pairs of $P$ is denoted $\mbox{{\rm Subcrit}}(P)$. The more commonly used {\em critical pairs} are defined by replacing condition (i) with \begin{enumerate} \item[(i') ] $j$ is incomparable to $m$. \end{enumerate} Thus critical pairs are in particular subcritical, and a subcritical pair $(j,m)$ that is not critical has the property that $j$ covers $m$ but covers nothing else, and $m$ is covered by $j$ and by nothing else. The following proposition was proven in~\cite{Rab-Riv} for critical pairs in a lattice, and the proof for subcritical pairs in a poset is essentially the same. \begin{prop} \label{irr} If $(j,m)$ is a subcritical pair in a poset $P$, then $j$ is join-irreducible and $m$ is meet-irreducible. \qed \end{prop} An extension $E$ of a poset $P$ is said to {\em reverse} a critical or subcritical pair $(j,m)$ if $m<j$ in $E$. The following formulation of order-dimension is due to Rabinovitch and Rival. \begin{prop}\cite{Rab-Riv} \label{critical} The order dimension of a finite poset $P$ is equal to the smallest $d$ such that there exist linear extensions $L_1,\ldots,L_d$ such that for each critical pair $(j,m)$ of $P$ there is some $L_i$ which reverses $(j,m)$. \qed \end{prop} Since critical pairs are in particular subcritical, one can substitute ``subcritical'' for ``critical'' in Proposition~\ref{critical}. Subcritical pairs also occur in~\cite{hyperplane}. A well-known theorem of Dushnik and Miller~\cite{Du-Mil} says that the order dimension of a poset $P$ is the smallest $d$ so that $P$ can be embedded as an induced subposet of $\mathbb R^d$. For a poset $P$ with $|P|=n$ and $\dim(P)=d$, we can use $d$ linear extensions whose intersection is $P$ to embed $P$ as a subposet of $[n]^d$. The theorem of Dushnik and Miller suggests that we can embed $P$ into a smaller $d$-dimensional ``box.'' Subcritical pairs are the key to embedding a poset into a small box. Let $\eta:P\rightarrow Q$ be an order-preserving map from $P$ to $Q$. That is, whenever $x\le y$ in $P$, then $\eta(x)\le\eta(y)$ in $Q$. Say $\eta$ {\em reverses} a subcritical pair $(j,m)$ if $\eta(m)<\eta(j)$. The strict inequality is essential here. \begin{prop} \label{box} The order dimension of a finite poset $P$ is equal to the smallest $d$ such that there exist order-preserving maps $\eta_1,\ldots,\eta_d:P\to\mathbb N$ such that for each subcritical pair $(j,m)$ of $P$ there is some $\eta_i$ which reverses $(j,m)$. \end{prop} \begin{proof} Suppose ${\mathbf \eta}=(\eta_1,\eta_2,\ldots,\eta_d)$ is an embedding of $P$ into $\mathbb N^d$, and let $(j,m)$ be a subcritical pair. Since $j\not\le m$, there must be some $\eta_i$ which reverses $(j,m)$. Conversely, suppose that there exist order-preserving maps $\eta_1,\ldots,\eta_d:P\to\mathbb N$ such that for each subcritical pair $(j,m)$ of $P$ there is some $\eta_i$ which reverses $(j,m)$. Let ${\mathbf \eta}:=(\eta_1,\eta_2,\ldots,\eta_d)$. To show that ${\mathbf \eta}$ is an embedding, we must show that for any pair $(a,b)$ in $P$ with $a\not\le b$, there is some $i\in[D]$ such that $\eta_i(b)<\eta_i(a)$. The simple proof of this fact follows the proof of Proposition~\ref{critical}. Suppose $(a,b)$ is an exception, or in other words, $\eta_i(b)\ge\eta_i(a)$ for all $i\in D$. If there exists $a'< a$ such that $a'\not\le b$, replace $a$ by $a'$ to obtain a new pair $(a',b)$, which is also an exception. (If $\eta_i(b)<\eta_i(a')$ for some $i$, then because $\eta_i$ is order-preserving we have $\eta(b)<\eta(a')\le\eta(a)$, contradicting the fact that $(a,b)$ was an exception.) Similarly, if there exists $b'>b$ with $a\not\le b'$, the pair $(a,b')$ is an exception. Continue making these replacements, and since $a$ always moves down in the poset and $b$ always moves up, the process will eventually terminate by finding an exception which is also a subcritical pair. This contradiction shows that ${\mathbf \eta}$ is indeed an embedding. \end{proof} Some modifications of Proposition~\ref{box} are worth mentioning, although they will not be used in this paper. Similar modifications of Proposition~\ref{critical} are given in~\cite[Section 1.12]{Trotter}. \begin{prop} \label{flubby box} The order dimension of a finite poset $P$ is the smallest $d$ such that there exist posets $Q_i$ and order-preserving maps $\eta_i:P\rightarrow Q_i$ for $i\in[d]$, such that for each subcritical pair $(j,m)$ of $P$ there is some $\eta_i$ which reverses $(j,m)$. \qed \end{prop} \begin{comment} \begin{proof} If $d$ is the dimension of $P$ then, as noted above, there exist $d$ order-preserving maps from $P$ onto linear extensions of $P$ with the desired property. Conversely, if there exist $D$ such maps $\eta_i$, we can take the $Q_i$ to be linear orders, because there is some order preserving map $\gamma$ from each $Q_i$ to a linear extension of itself, and $\gamma\circ\eta$ still has the desired property with respect to subcritical pairs. Each linear order is a subposet of $\mathbb N$, and thus we have an order-preserving map ${\mathbf \eta}:P\rightarrow\mathbb N$. By Proposition~\ref{box}, this is an embedding and thus $D\ge d$. \end{proof} \end{comment} \begin{prop} \label{flubbier box} The order dimension of a finite poset $P$ is the smallest $d$ such that there exist posets $Q_i$, subposets $P_i$ of $P$ and order-preserving maps $\eta_i:P_i\rightarrow Q_i$ for $i\in[d]$, such that for each subcritical pair $(j,m)$ of $P$ there is some $i$ with $j,m\in P_i$ and $(j,m)$ reversed by $\eta_i$. \qed \end{prop} Proposition \ref{flubby box} follows from Proposition \ref{box} by considering linear extensions of the $Q_i$. Proposition \ref{flubbier box} follows from Proposition \ref{flubby box} via the following observation: If $P'$ is an induced subposet of a finite poset $P$, then any order-preserving map $\eta':P'\rightarrow Q$ can be extended to an order preserving map $\eta:P\rightarrow E$, where $E$ is some extension of $Q$. \begin{comment} \begin{proof} Take $E$ to be any linear extension of $Q$. We can think of $\eta'$ as an order-preserving map from $P'$ to $E$, and thus we need to prove that $\eta'$ can be extended to an order-preserving map from $P$ to $E$. This can be done by induction on $|P|-|P'|$. The case $P=P'$ is vacuous. Suppose $x\in(P-P')$, and let $\eta'$ be an order-preserving map $\eta':P'\rightarrow E$. By induction, $\eta'$ can be extended to an order-preserving map $\eta^*$ from $P-\set{x}$ to $E$. Now $\max(\eta^*(\set{y\in P:y<x}))\ge \min(\eta^*(\set{z\in P:z>x}))$. Otherwise there exists some elements $y<x<z$ such that $\eta^*(z)<\eta^*(y)$, contradicting the fact that $\eta^*$ is order-preserving. \end{proof} \end{comment} \section{Order dimension of the poset of regions} \label{dim po} In this section we relate the shards of $({\mathcal A},B)$ to the subcritical pairs in ${\mathcal P}({\mathcal A},B)$. This relationship, along with Proposition~\ref{box}, is then used to prove Theorem~\ref{acyclic} via an explicit embedding. \begin{prop} \label{sc sigma} Let~${\mathcal A}$ be a central arrangement. A pair $(J,M)$ in ${\mathcal P}({\mathcal A},B)$ is subcritical if and only if there is a shard $\Sigma$ such that $J$ is minimal in $U(\Sigma)$, $M$ is maximal in $L(\Sigma)$ and $J_*\le M$. \end{prop} \begin{proof} Suppose $(J,M)$ is subcritical. Then by Proposition~\ref{irr}, $J$ is join-irreducible and $M$ is meet-irreducible, so $J_*$ and $M^*$ are defined. By condition (ii), $J_*\le M$, and in light of Propositions~\ref{j sigma} and~\ref{m sigma} it remains to show that $\Sigma^J=\Sigma_M$. By condition (iii), $J\le M^*$ as well. Thus we have $S(J_*)\subseteq S(M)$ and $S(J)\subseteq S(M^*)$. Therefore $H^J\in S(M^*)$. If we also have $H^J\in S(M)$ then $S(J)\subseteq S(M)$, contradicting the fact that $(J,M)$ is a subcritical pair. So $H^J=H_M$, or in other words $\Sigma^J$ and $\Sigma_M$ are contained in the same hyperplane. Suppose for the sake of contradiction that $\Sigma^J\neq\Sigma_M$. Then there is a codimension-two subspace $L$ between $\Sigma^J$ and $\Sigma_M$ in $H^J$ such that $H^J$ is not basic in the associated rank-two subarrangement ${\mathcal A}'$. Then necessarily, one of the two basic hyperplanes is in $S(J_*)\cap{\mathcal A}'$ but not in $S(M)\cap{\mathcal A}'$. This contradiction to $J_*\le M$ shows that $\Sigma^J=\Sigma_M$. Conversely, suppose that there is a shard $\Sigma$ such that $J$ is minimal in $U(\Sigma)$, $M$ is maximal in $L(\Sigma)$ and $J_*\le M$. Then by Propositions~\ref{j sigma} and~\ref{m sigma}, $J$ is join-irreducible and $M$ is meet-irreducible and because $J_*\le M$ we have $J\le M^*$ as well. Thus conditions (ii) and (iii) are satisfied. Since $M\in L(\Sigma)$ and $J\in U(\Sigma)$, we have $J\not\le M$. \end{proof} \begin{lemma} \label{shard arrow} Let $(J,M)$ be a subcritical pair in ${\mathcal P}({\mathcal A},B)$ for a central arrangement~${\mathcal A}$ and let $H\in{\mathcal A}$. If $H\not\in S(J)$ and $H\in S(M)$, then $H^J$ is basic in the rank-two subarrangement determined by $H\cap H^J$. \end{lemma} \begin{proof} Suppose $H\not\in S(J)$ and $H\in S(M)$ for some critical pair $(J,M)$ and let ${\mathcal A}'$ be the rank-two subarrangement determined by $H\cap H^J$. By Proposition~\ref{sc sigma}, the codimension-one faces $J\cap J_*$ and $M\cap M^*$ are in the same shard $\Sigma_J$, and thus in particular $H^J$ is basic in ${\mathcal A}'$. \end{proof} Let $I\subseteq{\mathcal A}$ induce an acyclic sub-digraph on ${\mathcal D}({\mathcal A},B)$. Let $F_I$ be the set of subcritical pairs $(J,M)$ in ${\mathcal P}({\mathcal A},B)$ such that $H^J\in I$. Let $H_1,H_2,\ldots,H_{|I|}$ be an ordering of the hyperplanes in $I$ such that whenever $H_i\rightarrow H_j$ in ${\mathcal D}({\mathcal A},B)$, we have $i<j$. For any region $R$ of~${\mathcal A}$, let $\eta_I(R)$ be the word of length $|I|$ in 0's and 1's whose $i^{\mbox{{\small th}}}$ letter is 0 if $H_i\not\in S(R)$ and 1 if $H_i\in S(R)$. Thinking of this word as a binary number, we have constructed a map $\eta_I$ from ${\mathcal P}({\mathcal A},B)$ to the interval $[0,2^{|I|}-1]$. The map is order-preserving because the order on ${\mathcal P}({\mathcal A},B)$ is containment of separating sets. \begin{lemma} \label{binary} The map $\eta_I$ reverses all of the subcritical pairs in $F_I$. \end{lemma} \begin{proof} The proof is by induction on $k:=|I|$. If $k=1$, the result is trivial, so suppose $k>1$, consider the arrangement ${\mathcal A}^-:={\mathcal A}-\set{H_k}$, with base region $B^-$ as in Lemma \ref{shards inherit}. The hyperplane $H_k$ is a sink in the sub-digraph of ${\mathcal D}({\mathcal A},B)$ induced by $I$, so $I^-:=I-\set{H_k}$ induces an acyclic sub-digraph of ${\mathcal D}({\mathcal A},B)$. By Lemma~\ref{shards inherit}, the shards of $({\mathcal A},{\mathcal B})$ contained in hyperplanes of $I-\set{H}$ are exactly the shards of $({\mathcal A}^-,B^-)$ contained in hyperplanes of $I-\set{H}$. The notation $\eta_{I^-}$ could be interpreted either as a map on ${\mathcal P}({\mathcal A},B)$ or on ${\mathcal P}({\mathcal A}^-,B^-)$. However, for a region $R$ of~${\mathcal A}$, if $R^-$ is the region of ${\mathcal A}^-$ containing $R$, then $S(R,B)\cap I^-=S(R^-,B^-)\cap I^-$, so the distinction is meaningless. If $(J,M)$ is a subcritical pair in $F_I$ not associated with the hyperplane $H_k$, then by Lemma~\ref{shards inherit} and Proposition~\ref{sc sigma}, $(J^-,M^-)$ is a subcritical pair in ${\mathcal P}({\mathcal A}^-,B^-)$ associated to some hyperplane in $I^-$. Thus by induction, $\eta_{I^-}(M^-)<\eta_{I^-}(J^-)$. This is a strict inequality in the lexicographic order, and since $\eta_I$ is obtained from $\eta_{I^-}$ by appending an additional digit on the right, the strict inequality is preserved regardless of what the new digits are. Thus we have $\eta_I(M)<\eta_I(J)$. If, on the other hand, $(J,M)$ is a subcritical pair associated with $H_k$, the last digit of $\eta_I(J)$ is 1 and the last digit of $\eta_I(M)$ is 0. Thus if we can show that $S(M)\cap I\subseteq S(J)\cap I$, we will have $\eta_I(M)<\eta_I(J)$. Suppose to the contrary that there is some $H\in I$ with $H\in S(M)$ but $H\not\in S(J)$. Then Lemma~\ref{shard arrow} says that $H_k$ is basic in the rank two subarrangement ${\mathcal A}'$ determined by $H$ and $H_k$. However, this means that $H_k\rightarrow H$ in ${\mathcal D}({\mathcal A},B)$, and thus $H$ should have occurred after $H_k$ in the ordering on $I$. \end{proof} Recall that Theorem~\ref{acyclic} states that the order dimension of ${\mathcal P}({\mathcal A},B)$ is bounded above by the smallest $k$ such that ${\mathcal A}=I_1\cup I_2\cup\cdots\cup I_k$ and $I_j$ induces an acyclic sub-digraph of ${\mathcal D}({\mathcal A},B)$ for each $j$. \begin{proof}[Proof of Theorem~\ref{acyclic}] Lemma~\ref{binary} can be used for each sub-digraph to obtain the components of an order-preserving map ${\mathcal P}({\mathcal A},B)\rightarrow\mathbb N^k$ which satisfies the hypotheses of Proposition~\ref{box}. \end{proof} The directed graph in Figure~\ref{ex2} can be partitioned into three acyclic sub-digraphs, but not fewer. The partition is $I_1:=\set{1\rightarrow 4}$, $I_2:=\set{2\rightarrow 5}$, $I_3:=\set{3\rightarrow 6}$. Let $\eta_1:=\eta_{I_1}$ as in Lemma~\ref{binary}, and similarly $\eta_2$ and $\eta_3$. The image of the map ${\mathbf \eta}=(\eta_1,\eta_2,\eta_3)$ is illustrated in Figure~\ref{ex4}. In this figure, the first coordinate of ${\mathbf \eta}$ is the horizontal axis, the third coordinate is the vertical axis, and the positive direction of the 2nd coordinate points down into the page. It may also aid the reader's visualization to know that in this example, all of the regions of~${\mathcal A}$ map to the boundary of the cube. \begin{figure} \caption{An embedding of the poset of regions ${\mathcal P} \label{ex4} \end{figure} The {\em basic graph} $G({\mathcal A},B)$ is the graph whose vertex set is~${\mathcal A}$, with edges $\set{H_1,H_2}$ whenever $H_1$ and $H_2$ are the basic hyperplanes in some rank-two subarrangement. The directed graph ${\mathcal Q}({\mathcal A},B)$ has vertex-set~${\mathcal A}$, with $H\rightarrow H'$ whenever $H$ is a basic hyperplane in some rank-two subarrangement and $H'$ is a non-basic hyperplane in the same subarrangement. The edges in $G({\mathcal A},B)$ are exactly the directed two-cycles in ${\mathcal D}({\mathcal A},B)$. The directed graph ${\mathcal Q}({\mathcal A},B)$ is obtained from ${\mathcal D}({\mathcal A},B)$ by deleting the directed edges which are contained in two-cycles. The following is an immediate corollary of Theorem~\ref{acyclic}. \begin{cor} \label{graph} If ${\mathcal Q}({\mathcal A},B)$ is acyclic, then $\dim({\mathcal P}({\mathcal A},B))\le\chi(G({\mathcal A},B))$. \end{cor} Here $\chi(G)$ is the chromatic number of the graph $G$. The acyclicity of ${\mathcal Q}({\mathcal A},B)$ also has important consequences for order-theoretic and lattice-theoretic properties of ${\mathcal P}({\mathcal A},B)$ \cite{hyperplane}. In the example of Figures~\ref{ex} through~\ref{ex4}, ${\mathcal Q}({\mathcal A},B)$ is not acyclic, and thus Corollary~\ref{graph} does not apply. \section{Colorings of root systems} \label{coxeter} In this section, we use Corollary~\ref{graph} to relate the dimension of the weak order on a finite Coxeter group to a coloring problem on the corresponding root system. Colorings are given which prove Theorem~\ref{dimensions} for types A, B, D and I. For types E, F, and H, the bounds were determined using computer programs written by John Stembridge and available on the author's website. Given a non-zero vector $v$ in $\mathbb R^n$, let $H_v$ be the hyperplane normal to $v$, and let $r_v$ be the Euclidean reflection fixing $H_v$. A {\em (finite) root system} is a finite collection of vectors in $\mathbb R^n$, satisfying the following properties: \begin{enumerate} \item[(i) ] For any $\beta\in\Phi$, we have $r_\beta\Phi=\Phi$. \item[(ii) ] For any $\beta\in\Phi$, we have $\beta\mathbb R\cap\Phi=\set{\pm\beta}$. \end{enumerate} The group $W$ generated by the reflections $r_\beta$ for $\beta\in\Phi$ is a finite {\em Coxeter group}, and the arrangement of hyperplanes ${\mathcal A}_\Phi:=\set{H_\beta:\beta\in\Phi}$ is a {\em Coxeter arrangement}. Each hyperplane corresponds to two roots. The rank of a root system $\Phi$ is the dimension of its linear span or equivalently, it is the rank of ${\mathcal A}_\Phi$. Coxeter arrangements are simplicial, and $W$ acts transitively on the regions of ${\mathcal A}_\Phi$. Choose some base region~$B$, and for each hyperplane $H$ in ${\mathcal A}_\Phi$, choose the normal root $\beta^+_H$ so that for each region $R$, the separating set $S(R)$ is exactly the set of hyperplanes $H$ with $\langle x,\beta^+_H\rangle>0$ for every $x$ in the interior of $R$. The set $\Phi^+:=\set{\beta^+_H:H\in{\mathcal A}_\Phi}$ is the set of {\em positive roots} of $\Phi$. Sometimes it is convenient to blur the distinction between the set of hyperplanes and the set of positive roots. So, for example, we will talk about rank-two subarrangements of root systems, and basic roots in a rank-two subarrangement. Consider the set of hyperplanes defining facets of~$B$, and call the corresponding set of positive roots the {\em simple roots} ${\mathcal D}elta$. Since ${\mathcal A}_\Phi$ is simplicial, ${\mathcal D}elta$ is a set of linearly independent vectors. The set $\set{r_\alpha:\alpha\in{\mathcal D}elta}$ is a set of {\em simple reflections} which generate $W$. For more details on root systems and Coxeter groups, the reader is referred to~\cite{Bourbaki,Humphreys}. Root systems have been classified, and we will name Coxeter arrangements according to their corresponding root systems. There are infinite families $A_n$, $B_n$, $C_n$ and $D_n$, and exceptional root systems $E_6$, $E_7$, $E_8$, $F_4$, $G_2$, $H_3$, $H_4$ and $I_2(m)$. The root systems $B_n$ and $C_n$ correspond to the same Coxeter arrangement, so we will only consider $B_n$. Since $G_2$ is the same as $I_2(6)$, we will not consider it separately. In what follows, we will present specific examples of each type of root system by specifying a set of positive roots. That set of positive roots determines the associated Coxeter arrangement~${\mathcal A}$ and the choice of base region~$B$, and for convenience we will substitute the name of the root system for the notation $({\mathcal A},B)$. For example, we will refer to ${\mathcal P}(A_n)$, ${\mathcal D}(A_n)$ and $G(A_n)$ with the obvious meanings. The poset ${\mathcal P}({\mathcal A}_\Phi,B)$ is isomorphic to the {\em weak order} on $W$. We wish to use root systems to apply Corollary~\ref{graph} to posets of regions of Coxeter arrangements, or equivalently, to the weak orders on the corresponding Coxeter groups. Caspard, Le Conte de Poly-Barbut and Morvan showed that ${\mathcal Q}({\mathcal A},B)$ is acyclic whenever~${\mathcal A}$ is a Coxeter arrangement~\cite{boundedref}. This was done, using different notation, in the course of establishing a lattice-theoretic result about the weak order on a finite Coxeter group. Theorem 28 of~\cite{hyperplane} is a different, more geometric proof of the acyclicity of ${\mathcal Q}({\mathcal A},B)$ in the case of a Coxeter arrangement. The acyclicity of ${\mathcal Q}({\mathcal A},B)$ allows us to use the more straightforward bound of Corollary~\ref{graph}. The key to applying Corollary~\ref{graph} is to relate $G({\mathcal A},B)$ to the inner products of roots. If the roots in $\Phi$ consist of more than one $W$ orbit, one can rescale the roots without altering properties (i) and (ii) as long as the rescaling is uniform on each $W$-orbit. For a suitable scaling, the root system has the property that in any rank-two subarrangement, the basic roots are the unique pair of distinct roots which minimize the pairwise inner products of distinct positive roots in that rank-two subarrangement. All of the root systems presented here are scaled so as to have that property. \subsection*{Type A} The Coxeter arrangement $A_{n-1}$ corresponds to the root system whose positive roots are $\set{\epsilon_i-\epsilon_j:1\le j<i\le n}$. This root system has rank $n-1$. Rank-two subarrangements of the root system $A_{n-1}$ come in two different forms: A pair of positive roots whose inner product is zero, or a set of three positive roots whose pairwise inner products are 1, 1 and -1. The hyperplanes corresponding to a pair of orthogonal roots are joined by an edge in $G(A_{n-1})$, and the basic roots in a rank-two subarrangement of cardinality three are the pair whose inner product is -1. Thus independent sets in $G(A_{n-1})$ are sets of roots in which all pairwise inner products are 1. It is easy to identify the maximal independent sets as having the form $J_i:=\set{\epsilon_i-\epsilon_j:1\le j<i}$ for some fixed $i$ or the form $\set{\epsilon_i-\epsilon_j:j<i\le n}$ for some fixed $j$. One $(n-1)$-coloring of $G(A_{n-1})$ uses the sets $J_i$ for $i=2,3,\ldots,n$. It is also easy to specify the basic digraph ${\mathcal D}(A_{n-1})$. Besides the 2-cycles, the directed edges are of the form $\epsilon_i-\epsilon_j\rightarrow\epsilon_i-\epsilon_k$ and $\epsilon_j-\epsilon_k\rightarrow\epsilon_i-\epsilon_k$ whenever $k<j<i$. This is because $\set{\epsilon_i-\epsilon_j,\epsilon_i-\epsilon_k,\epsilon_j-\epsilon_k}$ is a rank-two subarrangement whose basic roots are $\epsilon_i-\epsilon_j$ and $\epsilon_j-\epsilon_k$. The regions defined by $A_{n-1}$ are in bijection with permutations $\pi_1\pi_2\cdots\pi_n$ of $[n]$. This notation means that $\pi:i\mapsto \pi_i$. The separating set of a region corresponds to the {\em inversion set} $I(\pi):=\set{(i,j):1\le i<j\le n:\pi_i>\pi_j}$, and containment of inversion sets is called the {\em weak order} on the symmetric group $S_n$. Thus the coloring of $G(A_{n-1})$ described above and the maps defined in Lemma~\ref{binary} give an embedding of the weak order on $S_n$ into $\mathbb R^{n-1}$. Specifically, for $i=2,3,\ldots,n$, let $\eta_i(\pi):=\set{j:j<i,\pi_j>\pi_i}$, and interpret this set as a binary number by letting $j$ correspond to the $j^{\mbox{{\small th}}}$ digit. This is an embedding of the weak order on $S_n$ into the product $[0,1]\times[0,3]\times[0,7]\times\cdots\times[0,2^{n-1}-1]$. \begin{comment} CAN I get a nicer zonotopal embedding (like maybe that one with the factorials?) by being smarter than Proposition~\ref{sufficient}? Test this by computer. How about type B? \end{comment} \subsection*{Type B} The root system $B_n$ has positive roots \[\set{\epsilon_i\pm\epsilon_j:1\le j<i\le n}\cup\set{\epsilon_i:i\in[n]}.\] Rank-two subarrangements of $B_n$ can consist of two or three positive roots with the same pairwise inner products as in type A, or they can be a set of four positive roots whose pairwise inner products are -1, 0, 0, 1, 1, and 1. The edges in $G(B_n)$ are pairs of roots with inner product -1 and some pairs of roots which have inner product zero. The rank-two subarrangements of cardinality four have the form $\set{\epsilon_i,\epsilon_j,\epsilon_i\pm\epsilon_j}$, with basic roots $\epsilon_j$ and $\epsilon_i-\epsilon_j$. Thus pairs of the form $\set{\epsilon_i,\epsilon_j}$ and $\set{\epsilon_i+\epsilon_j,\epsilon_i-\epsilon_j}$ are non-edges in $G(B_n)$ even though these pairs have inner product zero. Noting that $B_1\subseteq B_2\subseteq\cdots\subseteq B_n$, we obtain an $n$-coloring by setting $I_j=B_j-B_{j-1}$ for $j=1,2,\ldots,n$. Another particularly nice coloring decomposes the positive roots into colors of size $n$ so that any pair of roots in the same color have inner product 1. The $i^{\mbox{{\small th}}}$ color in this coloring is the set \[\set{\epsilon_i-\epsilon_j:1\le j<i}\cup\set{\epsilon_i}\cup\set{\epsilon_i+\epsilon_k:1\le i<k}.\] Using these two colorings, one constructs maps, as in Lemma~\ref{binary}, to embed ${\mathcal P}(B_n)$ into $[0,1]\times[0,7]\times\cdots\times[0,2^{2n-1}-1]$ or into $[0,2^n-1]^n$ Figure~\ref{exB} shows these two colorings of $G(B_3)$, the basic graph of the Coxeter arrangement $B_3$. In this figure, the vector $\epsilon_1$ points to the right, $\epsilon_2$ points towards the top of the page, and $\epsilon_3$ points down into the page. The hyperplanes are colored in three colors: black, gray and dotted. \begin{figure} \caption{Two colorings of $G(B_3)$.} \label{exB} \end{figure} \subsection*{Type D} The positive roots of $D_n$ are $\set{\epsilon_i\pm\epsilon_j:1\le j<i\le n}$. Rank-two subarrangements of the $D_n$ consist of two or three positive roots with the same pairwise inner products as in type A, so the edges in $G(D_n)$ are pairs of roots with inner product -1. One can color the positive roots by restricting the second coloring given above for $B_n$. Specifically, the $i^{\mbox{{\small th}}}$ color is the set \[\set{\epsilon_i-\epsilon_j:1\le j<i}\cup\set{\epsilon_i+\epsilon_k:1\le i<k}.\] This gives an embedding of ${\mathcal P}(D_n)$ into $[0,2^{n-1}-1]^n$. \subsection*{Type I} The graph $G(I_2(m))$ has only a single edge, and thus is two-colorable. It is also readily apparent by inspection that the dimension of ${\mathcal P}(I_2(m))$ is two. \subsection*{Other types} In each of the infinite families of Coxeter arrangements, the upper bound from Corollary~\ref{graph} agrees with the lower bound of Proposition~\ref{lower}, and thus the order dimension equals the rank of the arrangement. Intriguingly, the situation is different for most of the exceptional groups. The computational results are: \begin{eqnarray*} & \chi(G(E_6)) & = 9\\ & \chi(G(E_7)) & = 11\\ 16 \le & \chi(G(E_8)) & \le 19\\ & \chi(G(F_4)) & = 5\\ & \chi(G(H_3)) & =3 \\ & \chi(G(H_4)) & = 6\\ \end{eqnarray*} Of the six Coxeter arrangements of types E, F and H, only $H_3$ has the property that the chromatic number of $G$ is equal to the rank of the arrangement. \begin{comment} \subsection*{Type E} \subsection*{Type F} The root system $F_4$ has positive roots $\set{\epsilon_i\pm\epsilon_j:1\le j<i\le 4}\cup\set{\frac{1}{2}(\pm\epsilon_1\pm\epsilon_2\pm\epsilon_3+\epsilon_4)}$. Rank-two subarrangements of $F_4$ look like rank-two subarrangements of $B_n$ except that some of the rank-two subarrangements of cardinality three have pairwise inner products $\pm\frac{1}{2}$. The edges in $G(F_4)$ are pairs of roots with inner product -1 or $\frac{1}{2}$, and certain pairs of roots which have inner product zero. One 5-coloring of $G(F_4)$ is the following. \begin{eqnarray*} \mbox{Color 1} & = & \set{\epsilon_4\pm\epsilon_i:1\le i<4}\cup\set{\epsilon_4}\\ \mbox{Color 2} & = & \set{\epsilon_3\pm\epsilon_i:1\le i<3}\cup\set{\epsilon_3}\\ \mbox{Color 3} & = & \set{\epsilon_2-\epsilon_1,\epsilon_2,\frac{1}{2}(-\epsilon_1+\epsilon_2\pm\epsilon_3+\epsilon_4)}\\ \mbox{Color 4} & = & \set{\epsilon_2+\epsilon_1,\epsilon_1,\frac{1}{2}(\epsilon_1+\epsilon_2\pm\epsilon_3+\epsilon_4)}\\ \mbox{Color 5} & = &\set{\frac{1}{2}(\pm\epsilon_1-\epsilon_2\pm\epsilon_3+\epsilon_4)} \end{eqnarray*} To see that these five sets are indeed independent in $G({\mathcal A},B)$, notice first that all inner products within the colors are non-negative. Thus we need to consider the pairs of roots with inner product zero. The only pairs of roots in Colors 1 and 2 with zero inner product have the form $\epsilon_i\pm\epsilon_j$, but these pairs are non-edges in $G(F_4)$ just as they were non-edges in $G(B_4)$. All pairwise inner products in Colors 3 and 4 are positive. Color 5 contains two pairs of roots with inner product zero, but is still independent. One of the pairs is $\frac{1}{2}(+\epsilon_1-\epsilon_2+\epsilon_3+\epsilon_4)$ and $\frac{1}{2}(-\epsilon_1-\epsilon_2-\epsilon_3+\epsilon_4)$, which is contained in a rank-two subarrangement with $\epsilon_4-\epsilon_2$ and $\epsilon_3+\epsilon_1$. The basic roots in this rank-two subarrangement are $\epsilon_3+\epsilon_1$ and $\frac{1}{2}(-\epsilon_1-\epsilon_2-\epsilon_3+\epsilon_4)$. The other pair of roots in Color 5 with inner product zero is a non-edge in $G(F_4)$ for a similar reason. HOW TO show there is no 4-coloring?? \subsection*{Type H} \subsection*{Type I} The root systems $I_2(m)$ is the rank-two root systems related to an $m$-gon as explained above. In particular $G(I_2(m))$ has only a single edge and $\dim({\mathcal P}(I_2(m)))=2$, as is apparent for other reasons. It is also easy to construct zonotopal embeddings of ${\mathcal P}(I_2(m))$. \end{comment} \section{Zonotopal embeddings} \label{zonotopal} In this section, we define zonotopal embeddings of the poset of regions, and prove a proposition which gives sufficient conditions for constructing such embeddings. In Section \ref{supersolvable}, we apply these condition to supersolvable arrangements. Given an arrangement~${\mathcal A}$ and base region~$B$, one can choose a set of normal vectors $\set{n_H:H\in{\mathcal A}}$ such that for each region $R$, the separating set $S(R)$ is exactly the set of hyperplanes $H$ with $\langle x,n_H\rangle>0$ for every $x$ in the interior of $R$. One associates a zonotope to $({\mathcal A},B)$ by taking the Minkowski sum of the line segments connecting the origin to each $n_H$. The 1-skeleton of this zonotope, directed away from the origin, defines a poset isomorphic to ${\mathcal P}({\mathcal A},B)$. The isomorphism is $Z~:~R\mapsto \sum_{H\in S(R)}n_H$. The combinatorial type of the zonotope (and thus the partial order) is not changed when the normal vectors are scaled by positive constants. One might hope that, with some suitable scaling of the normals, and some choice of basis for $\mathbb R^n$, the map $Z$ is an embedding (in the sense of order-dimension) of ${\mathcal P}({\mathcal A},B)$ into $\mathbb R^n$. Specifically, choose a basis $b_1,b_2,\ldots,b_n$ for $\mathbb R^n$, and for any vector $v\in\mathbb R^n$, let $v_i$ be the coefficient of $b_i$ when $v$ is expanded in terms of the basis $b_1,b_2,\ldots,b_n$. Let $Z_i$ be the map $R\mapsto(Z(R))_i$, the $i^{\mbox{{\small th}}}$ component of the vector $Z(R)$. Call $Z$ a {\em zonotopal embedding} of ${\mathcal P}({\mathcal A},B)$ if for every pair of regions of~${\mathcal A}$, we have $R_1\le R_2$ if and only if $Z_i(R_1)\le Z_i(R_2)$ for all $i\in [n]$. As an example, consider the hyperplane arrangement in $\mathbb R^2$ whose normal vectors are $n_{H_1}=(1,0)$, $n_{H_2}=(0,1)$ and $n_{H_3}=(1,1)$, choose~$B$ to be the region containing the vector $(-1,-1)$, and let the $b_i$ be the standard basis. In this case $Z$ is not an embedding in the sense of order dimension. Consider the regions $R_1$ and $R_2$ with $S(R_1)=\set{H_1,H_3}$ and $S(R_2)=\set{H_2}$. We have $R_1\not\ge R_2$, but $Z(R_1)=(2,1)>(0,1)=Z(R_2)$. However, we can obtain the same arrangement by choosing $n_{H_2}=(r,r)$ for any $r>0$, and when $r<1$, the map $Z$ is a zonotopal embedding. We now prove a proposition which will help us, in some cases, to find a scaling of the normals so that $Z$ is an embedding. For each $H\in{\mathcal A}$, define $\nu(H):=\set{H'\in{\mathcal A}:H\rightarrow H' \mbox{ in }{\mathcal D}({\mathcal A},B)}$. Recall that in ${\mathcal D}({\mathcal A},B)$, we have $H\rightarrow H'$ whenever $H$ is basic in the rank-two subarrangement ${\mathcal A}'$ determined by $H\cap H'$. \begin{prop} \label{sufficient} Suppose for some $H\in{\mathcal A}$ that \[(n_H)_i>\sum_{H'\in \nu(H)}(n_{H'})_i.\] Then the map $Z_i$ reverses all subcritical pairs whose associated hyperplane is $H$. \end{prop} \begin{proof} Let $(J,M)$ be a subcritical pair associated to $H$. We need to show that $Z_i(M)<Z_i(J)$. By canceling terms occurring on both sides of the comparison, we see that this is equivalent to proving that \[\sum_{H'\in S(M)-S(J)}(n_{H'})_i<\sum_{H'\in S(J)-S(M)}(n_{H'})_i.\] But $H$ is the unique hyperplane in $S(J)-S(M)$, so the right hand sum is $(n_H)_i$. Any hyperplane $H'$ in $S(M)-S(J)$ intersects the shard associated to $(J,M)$. If we had $H\not\rightarrow H'$ in ${\mathcal D}({\mathcal A},B)$, the intersection $H\cap H'$ would coincide with a cutting of $H$ into shards, and $H'$ would not intersect any shard in $H$. Thus $S(M)-S(J)\subseteq\nu(H)$. Now we have \[(n_H)_i>\sum_{H'\in \nu(H)}(n_{H'})_i\ge\sum_{H'\in S(M)-S(J)}(n_{H'})_i.\] \end{proof} \section{Supersolvable arrangements} \label{supersolvable} In this section we apply Theorem~\ref{acyclic} and Proposition~\ref{sufficient} to supersolvable arrangements. The result is a tidier proof of a theorem of~\cite{hyperplane} on the order dimension of the poset of regions of a supersolvable arrangement, and a proof that these posets admit zonotopal embeddings. A Coxeter arrangement is supersolvable if and only if it is of type~A or~B~\cite{Bar-Ihr}, so in particular, weak orders on $A_n$ and $B_n$ admit zonotopal embeddings. An arrangement~${\mathcal A}$ is supersolvable if its lattice of intersections $L({\mathcal A})$ is supersolvable. The reader unfamiliar with $L({\mathcal A})$ and/or supersolvability can take the following theorem to be the definition of a supersolvable arrangement, or see~\cite{BEZ,Or-Ter} for definitions. \begin{thm}\cite[Theorem~4.3]{BEZ} \label{characterization} Every hyperplane arrangement of rank 1 or 2 is supersolvable. A hyperplane arrangement~${\mathcal A}$ of rank $d\ge 2$ is supersolvable if and only if it can be written as ${\mathcal A}={\mathcal A}_0\uplus{\mathcal A}_1$, where \begin{enumerate} \item[(i) ]${\mathcal A}_0$ is a supersolvable arrangement of rank $d-1$. \item[(ii) ]For any $H',H''\in{\mathcal A}_1$, there is a unique $H\in{\mathcal A}_0$ such that $H'\cap H''\subseteq H$. \end{enumerate} \qed \end{thm} Here ``$\uplus$'' refers to disjoint union. Since ${\mathcal A}_0$ has rank one less than~${\mathcal A}$, the intersection of $\cap{\mathcal A}_0$ with $\mbox{{\rm Span}}({\mathcal A})$ has dimension 1. Call this subspace $D$. \begin{lemma} \label{no H} If $H\in{\mathcal A}_1$ then $D\not\subseteq H$. \end{lemma} \begin{proof} Suppose that $D\subseteq H'$ for some $H'\in{\mathcal A}_1$. Since the rank of~${\mathcal A}$ is strictly greater than the rank of ${\mathcal A}_0$, there is some $H''\in{\mathcal A}_1$ not containing $D$. Then $H'\cap H''$ is contained in some unique hyperplane $H$ of ${\mathcal A}_0$. But then $H=H'$, because both contain the span of $D$ and $H'\cap H''$. This contradicts the fact that~${\mathcal A}$ is the disjoint union of ${\mathcal A}_0$ and ${\mathcal A}_1$. \end{proof} Let $R$ be a region of ${\mathcal A}_0$, let $v$ be any vector in the interior of $R$. By Lemma~\ref{no H}, no hyperplane in ${\mathcal A}_1$ contains $D$, so the affine line $v+D$ intersects every hyperplane in ${\mathcal A}_1$. By Theorem~\ref{characterization}(ii), we can linearly order the hyperplanes of ${\mathcal A}_1$ according to where they intersect $v+D$, and this ordering does not depend on the choice of $v\in\mbox{int}(R)$, but only on a choice of direction on $D$. In particular, consider the set of regions of~${\mathcal A}$ contained in $R$: the graph of adjacency on these regions is a path. As in~\cite{BEZ}, define a {\em canonical base region} inductively: Any region of an arrangement of rank 2 is a canonical base region. For a supersolvable arrangement ${\mathcal A}={\mathcal A}_0\uplus{\mathcal A}_1$, and a region $R$ of~${\mathcal A}$, let $R_0$ be the region of ${\mathcal A}_0$ containing $R$. Then~$B$ is a canonical base region if $B_0$ is a canonical base region of ${\mathcal A}_0$ and if the regions of~${\mathcal A}$ contained in $B_0$ are linearly ordered in ${\mathcal P}({\mathcal A},B)$. The linear order on the regions of~${\mathcal A}$ contained in $B_0$ also gives a linear order $H_1,H_2,\ldots,H_k$ on the hyperplanes in ${\mathcal A}_1$. \begin{prop} \label{superindependent} If ${\mathcal A}={\mathcal A}_0\cup{\mathcal A}_1$ is a supersolvable arrangement and~$B$ is a canonical base region, then ${\mathcal A}_1$ induces an acyclic sub-digraph of ${\mathcal D}({\mathcal A},B)$. \end{prop} \begin{proof} First we show that there are no 2-cycles in the sub-digraph of ${\mathcal D}({\mathcal A},B)$ induced by ${\mathcal A}_1$. Suppose to the contrary that $H'$ and $H''$ in ${\mathcal A}_1$ are both basic in the rank-two subarrangement ${\mathcal A}'$ they determine. By Theorem~\ref{characterization}, there is a unique $H\in{\mathcal A}_0\cap{\mathcal A}'$, and Lemma~\ref{basic containment} says that $(H\cap B) = (H'\cap H''\cap B)$. But $H\cap B$ intersects $D$ in dimension one, and thus so does $H'\cap H''\cap B$. In particular, $H'\cap H''$ contains $D$, contradicting Lemma~\ref{no H}. This contradiction proves that there are no 2-cycles in the sub-digraph of ${\mathcal D}({\mathcal A},B)$ induced by ${\mathcal A}_1$. Next, we claim that whenever $H_i\rightarrow H_j$ in ${\mathcal D}({\mathcal A},B)$, for $H_i,H_j\in {\mathcal A}_1$, we must have $i<j$. To see this, consider starting at some vector $v$ in the interior of~$B$ and moving along $v+D$ in such a direction as to meet the hyperplanes in ${\mathcal A}_1$. Since $H_i\rightarrow H_j$, the hyperplane $H_i$ is basic in the rank-two subarrangement ${\mathcal A}'$ determined by $H_i\cap H_j$, and by the previous paragraph, no other hyperplane in ${\mathcal A}_1$ is basic in ${\mathcal A}'$. As we move along $v+D$, we must cross a basic hyperplane in ${\mathcal A}'$ before we meet $H_j$. But we are moving parallel to every hyperplane in ${\mathcal A}_0$, so the basic hyperplane we must cross is $H_i$. Thus $H_j$ follows $H_i$ in the ordering on ${\mathcal A}_1$, or in other words, $i<j$. Since moving along arrows in ${\mathcal D}({\mathcal A},B)$ always moves us further in the ordering on ${\mathcal A}_1$, we can in particular never close a cycle. \end{proof} By induction, when ${\mathcal A}$ is supersolvable and $B$ is a canonical base region, we can cover ${\mathcal D}({\mathcal A},B)$ with $k$ acyclic induced sub-digraphs, where $k$ is the rank of~${\mathcal A}$. Since this is exactly the lower bound of Proposition~\ref{lower}, we have given a tidier proof of the following theorem which was first proven in~\cite{hyperplane}. \begin{theorem} \label{dimension} The order dimension of the poset of regions (with respect to a canonical base region) of a supersolvable hyperplane arrangement is equal to the rank of the arrangement. \qed \end{theorem} The proof of Proposition~\ref{superindependent} shows that if we order ${\mathcal A}_1$ as $H_1,H_2,\ldots,H_k$, we can construct the map $\eta_{{\mathcal A}_1}$ of Lemma~\ref{binary}. By induction, we obtain an explicit embedding in connection with Theorem \ref{dimension}. A map very similar to $\eta_{{\mathcal A}_1}$ was considered in~\cite{hyperplane}, but an explicit embedding was not given there because of the lack of Proposition~\ref{box}. It is also possible to give a zonotopal embedding of the poset of regions (with respect to a canonical base region) of a supersolvable hyperplane arrangement. \begin{theorem} \label{super zone} Let~${\mathcal A}$ be a supersolvable hyperplane arrangement of rank $d$, and let~$B$ be a canonical base region. Then ${\mathcal P}({\mathcal A},B)$ has a zonotopal embedding in $\mathbb R^d$. \end{theorem} \begin{proof} Think of~${\mathcal A}$ as a sequence ${\mathcal A}_1\subset{\mathcal A}_2\subset\cdots\subset {\mathcal A}_d={\mathcal A}$ of supersolvable arrangements with ${\mbox{rank}}({\mathcal A}_i)=i$ and such that for each $i\in[d-1]$, Theorem~\ref{characterization} gives the partition ${\mathcal A}_i={\mathcal A}_{i-1}\uplus({\mathcal A}_i-{\mathcal A}_{i-1})$. Since the canonical base region~$B$ was chosen according to an inductive definition, we have a canonical base region $B_i$ for each ${\mathcal A}_i$. Choose $b_i$ to be a vector in $\mbox{{\rm Span}}({\mathcal A}_i)\cap(\cap{\mathcal A}_{i-1})$ and choose the direction of $b_i$ so that, starting in $B_i$ and traveling in the direction of $b_i$, one would reach the other ${\mathcal A}_i$-regions contained in $B_{i-1}$. The vectors $b_i$ are used to define the components of the map $Z$, as defined in Section~\ref{zonotopal}. Choose the directions of the normal vectors to~${\mathcal A}$ as in Section~\ref{zonotopal}. We will prove by induction on $d$ that the normal vectors can be scaled so that for every $i\in[d]$ and every $H\in{\mathcal A}_i$, we have \begin{equation} \label{goal} (n_H)_i>\sum_{H'\in \nu(H)}(n_{H'})_i. \end{equation} Then in particular, by Proposition~\ref{sufficient}, the map $Z$ defined in section~\ref{zonotopal} is a zonotopal embedding of ${\mathcal P}({\mathcal A},B)$. The case $d=1$ is trivial, so suppose $d\ge 2$, and consider first the case $i=d$ and then the case $i<d$. For every $H'\in {\mathcal A}_{d-1}$, we have $(n_{H'})_d=0$ because $b_d\in H'$. By Proposition~\ref{superindependent}, ${\mathcal A}_d$ induces an acyclic digraph of ${\mathcal D}({\mathcal A},B)$, so we can satisfy Inequality (\ref{goal}) with $i=d$ for every $H\in{\mathcal A}_d$. In the case $i<d$, by induction we have for each $H\in{\mathcal A}_{d-1}$, \[(n_H)_i>\sum_{H'\in \nu(H)\cap{\mathcal A}_{d-1}}(n_{H'})_i.\] To satisfy Inequality (\ref{goal}) for each $i<d$ and each $H$, we need to be able to add into the right sides some terms arising from hyperplanes in ${\mathcal A}_d$. Since the inequality is strict, this can be done as long as all the new terms are small enough. To this end, we uniformly scale the normals to hyperplanes, preserving their relative proportions, and thus preserving Inequality (\ref{goal}) in the case $i=d$ as well. \end{proof} \section{Comments and questions} \subsection*{The exceptional types} The most immediate problem left unsolved is to determine the order dimension of the groups $E_6$, $E_7$, $E_8$, $F_4$, and $H_4$. Absent further theoretical advances, this promises to be a computationally intense problem. If any of the dimensions exceeds the rank of the arrangement, it would be the first example known to the author of a simplicial arrangement in which the dimension of the poset of regions exceeds the rank. If each dimension is equal to the rank, is there a uniform proof of that fact (i.e.\ not relying on the classification of finite Coxeter groups)? \subsection*{Quotients} As noted in the introduction, Flath~\cite{Flath} determined the order dimension of the weak order on type A. More generally, she determined the weak order for arbitrary (one-sided) quotients (with respect to parabolic subgroups) of the weak order on type A. What are the dimensions of the quotients in other types? \subsection*{Computation} To embed the poset of regions by the method of Theorem~\ref{acyclic}, one needs to know the separating set of each element. However, Theorem~\ref{acyclic} does lead to an improvement in computation. Suppose that one wishes answer the question ``Is $R_1\le R_2$ in ${\mathcal P}({\mathcal A},B)$?'' Suppose also that the basic unit of computation is to compute the answers to the questions ``Is $H$ in $S(R_1)$?'' and ``Is $H$ in $S(R_2)$?'' for a single $H\in{\mathcal A}$. If at any point in the computation we get the answers ``yes'' and ``no'' to the two questions, we can conclude that $R_1\not\le R_2$. If we begin with a covering of ${\mathcal D}({\mathcal A},B)$ by acyclic sub-digraphs $I_1,\ldots,I_d$ and test the hyperplanes within each sub-digraph in the order specified by Lemma~\ref{binary}, we obtain a further reduction: Whenever we get the answers ``no'' and ``yes'' for a hyperplane $H\in I_k$, we can conclude that $\eta_{I_k}(R_1)<\eta_{I_k}(R_2)$, and it is not necessary to test the remaining hyperplanes in $I_k$. This computational savings derives from ordering the hyperplanes in $I_k$ in a way that is compatible with ${\mathcal D}({\mathcal A},B)$, and possibly there is a more general computational scheme which is directly based on ${\mathcal D}({\mathcal A},B)$ or some variant. \begin{comment} General theorems about dimensions of congruence normal lattices?? Give a graph whose vertices are the labels in the CN-labeling, with edges whenever two labels are on edges sharing a common bottom element, or sharing a common top element. \end{comment} \section{Acknowledgments} The author wishes to thank Vic Reiner for helpful conversations and John Stembridge for helpful conversations and for writing computer programs to handle the exceptional types, as well as an anonymous referee for pointing out an error in a previous version of the proof of Proposition~\ref{j sigma}. \newcommand{\journalname}[1]{\textrm{#1}} \newcommand{\booktitle}[1]{\textrm{#1}} \end{document}
\begin{document} \title{On the Hilbert Method in the Kinetic Theory of Multicellular Systems: Hyperbolic Limits and Convergence Proof\protect\thanks{Dedicated to Abdelghani Bellouquid who prematurely passed away on August 2015.}} \author{Mohamed Khaladi, Nisrine Outada\thanks{Universit\'e Cadi Ayyad, Facult\'e des Sciences Semlalia, LMDP, UMMISCO (IRD- UPMC), Marrakech 40000, B.P. 2390, Maroc} and Nicolas Vauchelet\thanks{Universit\'e Paris 13, Sorbonne Paris Cit\'e, Laboratoire Analyse G\'eom\'etrie et Applications, CNRS UMR 7539, 93430 Villetaneuse, France} } \maketitle \begin{abstract} We consider a system of two kinetic equations modelling a multicellular system : The first equation governs the dynamics of cells, whereas the second kinetic equation governs the dynamics of the chemoattractant. For this system, we first prove the existence of global-in-time solution. The proof of existence relies on a fixed point procedure after establishing some a priori estimates. Then, we investigate the hyperbolic limit after rescaling of the kinetic system. It leads to a macroscopic system of Cattaneo type. The rigorous derivation is established thanks to a compactness method. \end{abstract} {\bf Keywords} Kinetic systems; Hyperbolic limit; Averaging lemma; hyperbolic limit. \section{Introduction} Our paper deals with derivation of models suitable to describe the behavior of multicellular systems from their description at the microscopic scale delivered by models derived by suitable generalizations of the kinetic theory. This problem can be viewed as a possible generalization of the celebrated sixth Hilbert problem~\cite{[H902]} which has been object of several interesting contributions in the classical kinetic theory. The literature in the field is documented in the review papers by Perthame~\cite{[PE04]} and Saint Raymond~\cite{[S09]}. As it is known, the time-space scaling can be referred to the so called parabolic and hyperbolic limits or equivalently low and high field limits. The parabolic limit leads to a drift--diffusion type system (or reaction--diffusion system) in which the diffusion processes dominate the behavior of the solutions. The hyperbolic limit leads to models where the influence of the diffusion terms is of lower (or equal) order of magnitude in comparison with other convective or interaction terms. Accordingly, different macroscopic models are obtained corresponding to different scaling assumptions. The derivation of macroscopic equations from the kinetic theory description was introduced for dispersed biological entities in the pioneer paper~\cite{[ODA88]} and subsequently developed by various authors as witnessed in the bibliography of the survey~\cite{[BBNS12]}. An interesting application has been the derivation of Keller-Segel type models. A broad bibliography has been produced on this challenging topic as reviewed in Sections 5 and 6 of the survey~\cite{[BBTW15]}. The rationale of the approach proposed in~\cite{[ODA88]} consists in deriving a kinetic type model corresponding to the transport equation where the collision operator, namely the right hand side term of the kinetic equation, is perturbed by small stochastic term modeling a poisson velocity jump process. The small parameter corresponds to the entity of the perturbation, while an expansion of the dependent variable is developed in terms of powers of the said parameter. Very recent applications have been delivered in~\cite{[BBC16],[BC17],[OVAK16]}. This approach is useful even when it developed at a formal level as it leads to interesting models at the macroscopic scale based on models of the dynamics at the microscopic scale rather than on artificial assumptions to close mass and momentum conservation equations. However, as it is known, most of the literature is developed at a formal level, where \textit{ad hoc} assumptions are needed to prove convergence of the aforementioned power expansions. The derivation of hyperbolic models involves additional problems on the convergence of Hilbert type expansions technically related to loss of regularity. Indeed, this is the main challenge of our paper which is tackled in four sections. In more details, Section 2 presents a kinetic theory model of cross diffusion phenomena, where an hyperbolic scaling is is used to include propagation phenomena with finite speed; a binary mixture is accounted for and the statement of the initial value problem is delivered. Section 3 develops a qualitative analysis of the initial value problem and ends up with a local, in time, existence result and with the extension to arbitrarily large times. Finally, a convergence proof of an Hilbert type expansion is delivered in Section 4, however, due to technical difficulties, we restrict ourself to one dimension. \section{A kinetic model of chemotaxis} In this section we recall briefly the kinetic model presented in \cite{[OVAK16]}. For this aim, let $f(t,x,v)$ and $g(t,x,v)$ denotes, respectively, the density of cells and of the chemoattractant, depending on time $t$, position $x \in \mathbb{R}^d$ and velocity $v \in V \subseteq \mathbb{R}^d$. Then our kinetic model of chemotaxis reads: \begin{equation}\label{SYST} \begin{cases} \partial_t f + v \cdot \nabla_{x}f= L(g,f), \\ \partial_t g + v \cdot \nabla_{x}g= l(g)+G(f,g), \end{cases} \end{equation} where the perturbation turning operators $L$ and $l$ model the dynamics of biological organisms by velocity-jump process, and are integral operators defined by \begin{equation} L(f)=\int_{V}\left(T(v,v')f(t,x,v')-T(v',v)f(t,x,v)\right)dv', \end{equation} \begin{equation}\label{operatorl} l(f)=\int_{V}\left(\mathcal{K}(v,v')f(t,x,v')-\mathcal{K}(v',v)f(t,x,v)\right)dv', \end{equation} while the operator $G(f,g)$, which describe proliferation/destruction interactions, is given by \begin{equation} G(f,g) = a \left< f \right> - b \left< g \right>, \end{equation} where $a$, $b$ are real positive constants, and $\left<\cdot\right>$ stands for the $(v)$-mean of a function, i.e $\displaystyle{\left<h\right>:=\int_{V} h(t,x,v)dv}$ for $h \in L^1(V)$. The turning kernels $T(v,v')$ and $K(v,v')$ describe the reorientation of cells, i.e the random velocity changes from the previous velocity $v'$ to the new $v$. Moreover, it is assumed that the set of admissible velocities $V$ is a spherically symmetric bounded domain of $\mathbb{R}^d$ with $V\subset B_\nu$ (the ball of radius $\nu>0$). This corresponds to the assumption that any individual of the population chooses a direction with bounded velocity. As it is mentioned in the introduction, our contribution in this paper will be the rigorous derivation of a diffusive type model for movement of chemotaxis, obtained as a hydrodynamic limit of the kinetic model \eqref{SYST}. In detail, let us assume a hyperbolic scaling for the first population: \begin{equation} x \longrightarrow \varepsilon x, \quad t \longrightarrow \varepsilon t, \end{equation} where $\varepsilon > 0$ is a small parameter which will be allowed to tend to zero. In this way we obtain from \eqref{SYST} the following scaled kinetic equation \begin{equation}\label{csks} \begin{cases} \partial_t f_\varepsilon + v \cdot \nabla_{x}f_\varepsilon= \frac{1}{\varepsilon} L(g_\varepsilon,f_\varepsilon), \\ \partial_t g_\varepsilon + v \cdot \nabla_{x}g_\varepsilon= l(g_\varepsilon)+G(f_\varepsilon,g_\varepsilon). \end{cases} \end{equation} In addition we assume that the operator $L$ admits the following decomposition: \begin{equation}\label{decom} L(g_\varepsilon,f_\varepsilon)=L^0(f_\varepsilon)+\varepsilon L^1(g_\varepsilon,f_\varepsilon), \end{equation} where the perturbation turning operators $L^0$ and $L^1$ are linear integral operators with respect to $f_\varepsilon$, and reads: \begin{equation}\label{n0} L^{0}(f_\varepsilon)=\int_{V}\left(T^{0}(v,v')f_\varepsilon(t,x,v')-T^{0}(v',v)f_\varepsilon(t,x,v)\right)dv', \end{equation} \begin{equation}\label{n1} L^{1}[g_\varepsilon](f_\varepsilon)=\int_{V}\left(T^{1}[g_\varepsilon](v,v')f_\varepsilon(t,x,v')-T^{1}[g](v',v)f_\varepsilon(t,x,v)\right)dv', \end{equation} while the operator $l$ is still defined by Eq. \eqref{operatorl}. In this work we consider the following turning kernels $T^0$, $T^1$, and $\mathcal{K}$ given by \begin{equation}\label{T0gamma} T^{0}(v,v')=\frac{\mu_0}{|V|}(1+ \gamma^2 v \cdot v'), \qquad \gamma^2 \int_V v\otimes v\,dv = |V| I_d, \end{equation} \begin{equation} T^{1}[g](v,v')=\frac{\mu_1}{|V|}-\frac{\mu_{2} \gamma^2}{|V|}v'\cdot \alpha(<g_\varepsilon>), \end{equation} \begin{equation} \mathcal{K}(v,v')=\frac{\sigma}{|V|}, \end{equation} where $\mu_0$, $\mu_1$, $\mu_2$, $\sigma$ are real positive constants, $\alpha$ is a mapping $ \mathbb{R} \longrightarrow \mathbb{R}^d$, and $\varepsilonrt V \varepsilonrt$ denotes the volume of $V$. Notice that since $V$ is assumed to be spherically symmetric, the constant $\gamma$ in \eqref{T0gamma} is well-defined. With these considerations and after a straightforward calculation we obtain the following kinetic system, we refer to the paper \cite{[OVAK16]} for more details, \begin{equation}\label{cks} \begin{cases} \displaystyle \partial_t f_\varepsilon+ v\cdot \nabla_x f_\varepsilon = \dfrac{\mu_0}{\varepsilon} \left( F_{J_\varepsilon} - f_\varepsilon \right) + \mu_1 \left( \frac{n_\varepsilon}{\varepsilonrt V \varepsilonrt} - f_\varepsilon \right)\\ \hspace{2.8cm} \displaystyle - \mu_2 \gamma^2 \left( \frac{J_\varepsilon}{\varepsilonrt V \varepsilonrt} -v f_\varepsilon \right) \cdot \alpha(S_\varepsilon) , \\ \displaystyle \partial_t g_\varepsilon + v \cdot \nabla_x g_\varepsilon = \sigma \left( \frac{S_\varepsilon}{\varepsilonrt V \varepsilonrt} -g_\varepsilon \right) + a n_\varepsilon - bS_\varepsilon, \end{cases} \end{equation} where: \begin{itemize} \item The local densities $n_\varepsilon(t,x)$ and $S_\varepsilon(t,x)$ are defined by \begin{equation*} n_\varepsilon(t,x)=\int_V f_\varepsilon(t,x,v)dv, \quad \text{and} \quad S_\varepsilon(t,x) = \int_V g_\varepsilon(t,x,v) dv, \end{equation*} while the flux function $J_\varepsilon(t,x)$ fulfills \begin{equation*} J_\varepsilon(t,x) = \int_V v f_\varepsilon(t,x,v) dv. \end{equation*} \item The equilibrium function $F_{J_\varepsilon}(t,x,v)$ is assumed to be a linear combination of $1$, $v_1, \ldots , v_d$: \begin{equation}\label{defFJ} F_{J_\varepsilon}(t,x,v) = \dfrac{1}{\varepsilonrt V \varepsilonrt} \left( n_\varepsilon(t,x) + \gamma^2 J_\varepsilon(t,x) \cdot v \right). \end{equation} This equilibrium function is such that (see \eqref{T0gamma} for the definition of $\gamma$) $$ \int_V F_{J_\varepsilon}(t,x,v)\,dv = n_\varepsilon(t,x), \qquad \int_V v F_{J_\varepsilon}(t,x,v)\,dv = J_\varepsilon(t,x). $$ \end{itemize} This system is completed with initial condition \begin{equation}\label{ic} f_\varepsilon(0,x,v) = f^0_\varepsilon(x,v), \quad \text{and} \quad g_\varepsilon(0,x,v) = g^0_\varepsilon(x,v). \end{equation} \section{Existence result} The existence of solutions to kinetic models of chemotaxis coupled to parabolic or elliptic system for the chemoattractant concentration has been studied in several papers (see for instance \cite{[CMPS04],[EH06],[HKS],[V10]}). However, the study of coupled kinetic systems like Eq. \eqref{cks} is less common. The aim of this section is to study the Cauchy problem \eqref{cks}-\eqref{ic} for fixed $\varepsilon>0$. More in detail we will state and prove an existence and uniqueness result for the kinetic model \eqref{cks}-\eqref{ic} in Theorem \ref{th:exist}. The proof is based on a fixed point procedure, after establishing some a priori estimates. We now introduce some notations which will be used throughout this section: $X_T:=L^\infty((0,T) \times \mathbb{R}^d \times V)$ stands for the Lebesgue space of essentially bounded measurable functions, with norm given by \begin{equation*} \left \| f \right \|_{L^\infty_{t,x,v}} = \inf \left\lbrace C \geq 0; \; \varepsilonrt f(x) \varepsilonrt \leq C \; \text{for almost every} \; (t,x,v) \in (0,T) \times \mathbb{R}^d \times V \right\rbrace, \end{equation*} and we have analogous definitions for $L^\infty_{x}$, $L^\infty_{t,x}$ and $L^\infty_{x,v}$. Moreover, we define $X_T^+$ the subspace of $X_T$ with nonnegative functions. We assume that $\alpha$ is a bounded and globally Lipschitz continuous function on $\mathbb{R}$: There exists $\alpha_\infty>0$, $L_\alpha>0$ such that \begin{equation}\label{(H)} \forall\, S_1,S_2\in\mathbb{R}, \qquad \| \alpha(S_1)\|\leq \alpha_\infty, \quad \| \alpha \left( S_1 \right) - \alpha \left( S_2 \right) \| \leq L_\alpha |S_1 - S_2|. \end{equation} \begin{definition} We say that $f$ is a weak solution of \eqref{cks}--\eqref{ic} on $X_T$ for $T>0$, if $f \in X_T$ and satisfies \begin{equation*} \begin{cases} &\displaystyle\int_{(0,T) \times \mathbb{R}^d \times V}\left(\partial_t \varphi + v \cdot \nabla_x \varphi \right) f \,dxdvdt = -\dfrac{\mu_0}{\varepsilon} \int_{(0,T) \times \mathbb{R}^d \times V} \left(F_J -f\right) \varphi \,dxdvdt \\[2mm] & \qquad\quad \displaystyle - \mu_1 \int_{(0,T) \times \mathbb{R}^d \times V} \left( \dfrac{n}{\varepsilonrt V \varepsilonrt}-f \right) \varphi \,dxdvdt - \int_{\mathbb{R}^d \times V} f^0(x,v) \; \varphi(0,x,v) \,dxdv \\ & \qquad\quad \displaystyle + \mu_2 \gamma^2 \int_{(0,T) \times \mathbb{R}^d \times V} \left( \dfrac{J}{\varepsilonrt V \varepsilonrt} -vf \right) \cdot \alpha(S) \, \varphi \,dxdvdt, \\ &\displaystyle\int_{(0,T) \times \mathbb{R}^d \times V} \displaystyle\left(\partial_t \varphi + v \cdot \nabla_x \varphi \right) g \,dxdvdt = -\sigma \int_{(0,T) \times \mathbb{R}^d \times V} \left( \dfrac{n}{\varepsilonrt V \varepsilonrt} -g \right) \varphi \,dxdvdt \\ & \qquad\quad \displaystyle + \int_{(0,T) \times \mathbb{R}^d \times V} \left( an - bS \right) \varphi \,dxdvdt - \int_{\mathbb{R}^d \times V} g^0(x,v) \; \varphi(0,x,v)\,dxdv, \end{cases} \end{equation*} for any test function $\varphi \in \mathcal{D}([0,T) \times \mathbb{R}^d \times V)$. \end{definition} We now state the main result of this section. \begin{theorem}[Existence of weak solutions] \label{theorem1}\label{th:exist} Let $(f^0,g^0)\in L^\infty_{x,v}\times L^\infty_{x,v}$ be nonnegative and assume that $\alpha$ satisfies assumption \eqref{(H)}. Then the Cauchy problem \eqref{cks}-\eqref{ic} has a unique global weak solution $(f,g)$, with $(f,g)\in X_T^+\times X_T^+$. Moreover, if $(f^0,g^0)\in L^1_{x,v}\times L^1_{x,v}$, then for any $t\in [0,T]$, $\|f(t,\cdot,\cdot)\|_{L^1_{x,v}} = \|f^0\|_{L^1_{x,v}}$ and $\|g(t,\cdot,\cdot)\|_{L^1_{x,v}} = \frac ab \|f^0\|_{L^1_{x,v}}(1-e^{-b|V|t}) + \|g^0\|_{L^1_{x,v}} e^{-b|V|t}$. \end{theorem} The proof of Theorem \ref{theorem1} is divided into several steps. We first establish some a priori estimates thanks to a characteristics method. Then, applying a fixed point procedure, we establish the existence of a local in time solution. This solution can be extended for arbitrary time $T>0$ and therefore we get a global existence result. \subsection{A priori estimates}\label{sub1} We start with the following a priori estimates. \begin{lemma}[A priori estimates] \label{lemma1} Let $T>0$ and suppose that $\alpha$ satisfies assumption \eqref{(H)}. Let $(f^0,g^0)$ be given in $L^\infty_{x,v}\times L^\infty_{x,v}$. Let $(f,g)$ be a weak solution of \eqref{cks}-\eqref{ic} such that $(f,g)\in X_T^+ \times X_T^+$ and $(\nabla_x f, \nabla_x g)\in {X_T}^d \times {X_T}^d$. Then $(f,g)$ satisfies the following estimates: \begin{equation}\label{lem 2.1} \|n \|_{L^\infty _{t,x }}+ \|f\|_{X_T} \leq C_1 \|f^0\|_{L^\infty _{x,v }}, \end{equation} \begin{equation}\label{lem 2.2} \|S \|_{L^\infty _{t,x }}+\|g\|_{X_T} \leq C_2 \big(\|f^0\|_{L^\infty _{x,v }}+\|g^0\|_{L^\infty _{x,v }}\big). \end{equation} Furthermore, if the initial data $(f^0,g^0)\in L^1_{x,v}\times L^1_{x,v}$ then we have, $\forall\,t\in [0,T]$, $\|f(t,\cdot,\cdot)\|_{L^1_{x,v}} = \|f^0\|_{L^1_{x,v}}$, and $$ \|g(t,\cdot,\cdot)\|_{L^1_{x,v}} = \frac ab \|f^0\|_{L^1_{x,v}}(1-e^{-b |V| t}) + \|g^0\|_{L^1_{x,v}} e^{-b|V|t}. $$ Moreover, if the initial data are given in $W^{1,\infty}_{x,v}\times W^{1,\infty}_{x,v}$ and assuming that $\alpha\in C^1(\mathbb{R})$, then \begin{equation}\label{lem 2.3} \|\nabla _x n \|_{(L^\infty _{x,v})^d}+\|\nabla_x f\|_{(X_T)^d} \leq C_3 \big(\|\nabla_x f^0\|_{(L^\infty _{x,v })^d}+\|\nabla_x g^0\|_{(L^\infty _{x,v })^d}\big), \end{equation} \begin{equation}\label{lem 2.4} \|\nabla _x S \|_{(L^\infty _{x,v})^d}+\|\nabla_x g\|_{(X_T)^d} \leq C_4 \big(\|\nabla_x f^0\|_{(L^\infty _{x,v })^d}+\|\nabla_x g^0\|_{(L^\infty _{x,v })^d}\big), \end{equation} where the constants $C_i,\; i=1,2,3,4,$ are independents of time $T>0$. \end{lemma} \begin{proof} {1.} First we begin with the proof of Eq. (\ref{lem 2.1}). For this purpose we write the first equation of system \eqref{cks} in the following way \begin{equation}\label{2.5} \partial_t f + v \cdot \nabla_x f + Kf = R_1, \end{equation} where the functions $K$ and $R_1$ are given by \begin{equation} K=\dfrac{\mu_0}{\varepsilon} +\mu_1 -\mu_2 \gamma^2 v \cdot \alpha(S), \;\; \text{and} \;\; R_1=\dfrac{\mu_0}{\varepsilon}F_{J}+ \dfrac{\mu_{1} n}{|V|} - \mu_{2}\gamma^2 J \cdot \alpha(S), \end{equation} where the expression of $F_J$ is given in \eqref{defFJ}. Integrating \eqref{2.5} along the characteristics, we get \begin{equation}\label{2.7} \begin{split} f(t,x,v) = \exp\left(\int_t^0 K(\tau,\widetilde{x}_\tau, v )d\tau \right) f^0(x-tv,v)\hspace*{1.1cm} \\ + \int_0^t \exp\left( \int_t^s K(\tau,\widetilde{x}_\tau, v )d\tau \right) R_1(x,\widetilde{x}_s,v)ds, \end{split} \end{equation} where we set $\widetilde{x}_\tau=x+(\tau - t)v$ (this notation will be used throughout this section). Moreover, using assumption \eqref{(H)}, for each $0\leq s \leq \tau \leq t\leq T $ we have \begin{equation} \left|K(\tau , \widetilde{x}_\tau, v )\right| \leq \dfrac{\mu_0}{\varepsilon}+\mu_1+\mu_2\alpha_\infty\gamma^2\nu. \end{equation} It follows \begin{equation}\label{2.8} \exp\left( \int_t^s K(\tau,\widetilde{x}_\tau, v )d\tau \right)\leq e^{C_1 T} \leq C_2. \end{equation} According to Eqs. (\ref{2.7}) and (\ref{2.8}) we write \begin{equation}\label{2.9} f(t,x,v)\leq C_2 f^0(x-tv,v)+C_2 \int_0^t |R_1(s,\widetilde{x}_s, v)|ds. \end{equation} We estimate the last term of the right hand side of the later inequality as follows: \begin{equation*} \begin{split} \int_0^t |R_1(s,\widetilde{x}_s,v)|ds \leq &\left(\dfrac{\mu_0}{\varepsilon|V|}+\dfrac{\mu_1}{|V|}\right)\int_0^tn(s,\widetilde{x}_s)ds \\ &+\left(\dfrac{\mu_0\gamma^2\nu}{\varepsilon |V|} + \mu_2 \alpha_\infty\gamma^2 \right)\int_0^t|J(s,\widetilde{x}_s)|ds\\ \leq & \left[\dfrac{\mu_0}{\varepsilon|V|}+\dfrac{\mu_1}{|V|}+\dfrac{\mu_0\gamma^2\nu^2}{\varepsilon |V|}+\mu_2 \alpha_\infty\gamma^2\nu\right] \int_0^t n(s,\widetilde{x}_s)\,ds\\ \leq & C_3 \int_0^t \|n(s,.)\|_{L^\infty_x}\,ds. \end{split} \end{equation*} Injecting this last estimate in (\ref{2.9}), we obtain \begin{equation}\label{2.10} f(t,x,v)\leq C_2 \|f^0\|_{L^\infty _{x,v}}+C_4\int_0^t \| n(s, \cdot) \|_{L^\infty _x}\,ds. \end{equation} An integration with respect to $v$ provides \begin{equation} \|n(t,\cdot)\|_{L^\infty _x}\leq C_2|V|\; \|f^0\|_{L^\infty _{x,v}}+C_4 |V| \int_0^t \| n(s,\cdot) \|_{L^\infty _x}\,ds. \end{equation} Therefore, applying Gronwall's inequality we get \begin{equation}\label{2.11} \| n(t, \cdot) \|_{L^\infty _x}\leq C \|f^0\|_{L^\infty _{x,v}}. \end{equation} Using Eq. (\ref{2.10}) together with (\ref{2.11}), we obtain a similar bound on $f$ in $L^\infty_{x,v}$. This completes the proof of the first assertion (\ref{lem 2.1}). {2.} The proof of (\ref{lem 2.2}) is straightforward and follows the same ideas as of estimate \eqref{lem 2.1}. Indeed, we have \begin{equation}\label{2.12} \partial_t g + v \cdot \nabla_x g + \sigma g = R_2, \;\; \text{where} \;\; R_2= \left(\dfrac{\sigma}{|V|}-b\right)S+a\,n. \end{equation} Integrating along the characteristics, we get \begin{equation} g(t,x,v)=e^{-\sigma t}g^0(x-tv,v)+\int_0^te^{(s-t)\sigma}R_2(s,\widetilde{x}_s)ds, \end{equation} and easy computation yields \begin{eqnarray}\label{12.5} g(t,x,v) & \leq & g^0(x-tv,v)+\int_0^t|R_2(s,\widetilde{x}_s)|ds\nonumber\\ & \leq & \|g^0\|_{L^\infty _{x,v}}+\left| \dfrac{\sigma}{|V|}-b\right|\int_0^t|S(s,\widetilde{x}_s)|ds+ a \int_0^t|n(s,\widetilde{x}_s)|ds \nonumber\\ & \leq & \|g^0\|_{L^\infty _{x,v}}+\left| \dfrac{\sigma}{|V|}-b\right|\int_0^t\|S(s, \cdot)\|_{L^\infty _x}\,ds + a \int_0^t \| n(s,\cdot) \|_{L^\infty _x}\,ds. \qquad \end{eqnarray} According to (\ref{lem 2.1}) we can write $$\|n(s,.)\|_{L^\infty _x}\leq C_1\|f^0\|_{L^\infty _{x,v}},$$ hence, from \eqref{12.5} it follows that \begin{equation}\label{2.13} g(t,x,v)\leq \|g^0\|_{L^\infty _{x,v}}+C_1\|f^0\|_{L^\infty _{x,v}}+C_2 \int_0^t\|S(s, \cdot)\|_{L^\infty_x}\,ds. \end{equation} Integrating over $V$, we obtain \begin{equation} S(t,x)\leq |V|\, \|g^0\|_{L^\infty _{x,v}} + C_1 |V| \,\|f^0\|_{L^\infty _{x,v}}+C_2|V| \int_0^t\|S(s,\cdot)\|_{L^\infty _x}\,ds, \end{equation} and we estimate $S$ thanks to Gronwall's inequality and we conclude the proof of (\ref{lem 2.2}) with (\ref{2.13}). {3.} Assuming the initial data in $L^1_{x,v}$, we have by integration of the first equation in \eqref{cks}: $\|f(t,\cdot,\cdot)\|_{L^1_{x,v}} = \|f^0\|_{L^1_{x,v}}.$ Integrating the second equation in \eqref{cks}, we get $$ \frac{d}{dt}\|g(t,\cdot,\cdot)\|_{L^1_{x,v}} = a |V| \|f^0\|_{L^1_{x,v}} - b |V| \|g(t,\cdot,\cdot)\|_{L^1_{x,v}}. $$ We obtain the desired estimate by integrating in time this later identity. {4.} We now prove (\ref{lem 2.3}) and (\ref{lem 2.4}). To begin with, we rewrite \eqref{cks} in the following way \begin{equation} \begin{cases} \partial_t f + v \cdot \nabla_x f + \widetilde{K}f = \widetilde{R}_1,\\ \partial_t g + v \cdot \nabla_x g = R_2, \end{cases} \end{equation} where the functions $\widetilde{K}$ and $\widetilde{R}_1$ are defined by \begin{equation} \widetilde{K} = \dfrac{\mu_0}{\varepsilon}+\mu_1, \;\; \text{and}\;\; \widetilde{R}_1 = \dfrac{\mu_0}{\varepsilon}F_{J}+\dfrac{\mu_1\,n}{|V|} - \frac{\mu_2\gamma^2}{|V|} J \cdot \alpha(S)+\mu_2\gamma^2 v \cdot \alpha(S)f, \end{equation} while $R_2$ is still given in (\ref{2.12}). Therefore, we obtain \begin{equation} \label{2.14} f(t,x,v)=e^{-t\widetilde{K}}f^0(x-tv,v)+\int_0^te^{(s-t)\widetilde{K}}\widetilde{R}_1(s,\widetilde{x}_s,v)ds, \end{equation} and \begin{equation}\label{2.15} g(t,x,v)=e^{-t\sigma}g^0(x-tv,v)+\int_0^te^{(s-t)\sigma}R_2(s,\widetilde{x}_s)ds. \end{equation} Let $i\in\{1,\ldots,d\}$ be arbitrary but fixed index, and for a generic function $h$ we denote by $h_i$ the partial derivate $\partial_{x_i}h$. Hence, from (\ref{2.14}) and (\ref{2.15}) we get \begin{equation} \label{2.16} f_i(t,x,v)=e^{-t\widetilde{K}}f_i^0(x-tv,v)+\int_0^te^{(s-t)\widetilde{K}}\partial_{x_i}\left(\widetilde{R}_1(s,\widetilde{x}_s,v)\right) ds, \end{equation} and \begin{equation}\label{2.17} g_i(t,x,v)=e^{-t\sigma}g_i^0(x-tv,v)+\int_0^te^{(s-t)\sigma}\partial_{x_i}\left(R_2(s,\widetilde{x}_s)\right) ds. \end{equation} We now estimate separately $f_i$ and $g_i$. From (\ref{2.16}) it follows that \begin{equation} \label{2.18} |f_i(t,x,v)| \leq \|f^0_i\|_{L^\infty_{x,v}}+\int_0^t \left|\partial_{x_i}\left(\widetilde{R}_1(s,\widetilde{x}_s,v)\right)\right| ds. \end{equation} We have \begin{equation}\label{2.19} \begin{split} \partial_{x_i} & \big( \widetilde{R}_1(s, \widetilde{x}_s,v) \big) = \dfrac{\mu_0}{\varepsilon |V|}\left(n_i(s,\widetilde{x}_s)+\gamma^2 J_i(s,\widetilde{x}_s) \cdot v\right)+\dfrac{\mu_1\,n_i(s,\widetilde{x}_s)}{|V|}\\ &-\frac{\mu_2\gamma^2}{|V|} J_i(s,\widetilde{x}_s) \cdot \alpha\left(S(s,\widetilde{x}_s)\right) - \frac{\mu_2\gamma^2}{|V|} S_i(s,\widetilde{x}_s)J(s,\widetilde{x}_s) \cdot \alpha ' \left(S(s,\widetilde{x}_s)\right)\\ &+\mu_2\gamma^2 v \cdot \alpha\left(S(s,\widetilde{x}_s)\right)\,f_i(s,\widetilde{x}_s,v) +\mu_2\gamma^2 S_i(s,\widetilde{x}_s)\,v \cdot \alpha ' \left(S(s,\widetilde{x}_s)\right)\,f(s,\widetilde{x}_s,v) \\ = & \left( \dfrac{\mu_0}{\varepsilon |V|}+ \dfrac{\mu_1}{|V|}\right)n_i(s,\widetilde{x}_s)+\left( \dfrac{\mu_0\gamma^2\,v}{\varepsilon |V|}- \frac{\mu_2\gamma^2}{|V|} \alpha\left(S(s,\widetilde{x}_s)\right)\right) \cdot J_i(s,\widetilde{x}_s)\\ & -\frac{\mu_2\gamma^2}{|V|} S_i(s,\widetilde{x}_s)J(s,\widetilde{x}_s) \cdot \alpha ' \left(S(s,\widetilde{x}_s)\right) + \mu_2\gamma^2 v \cdot \alpha\left(S(s,\widetilde{x}_s)\right)\,f_i(s,\widetilde{x}_s,v) \\ & + \mu_2\gamma^2 S_i(s,\widetilde{x}_s)\,v \cdot \alpha' \left(S(s,\widetilde{x}_s)\right)\,f(s,\widetilde{x}_s,v). \end{split} \end{equation} We introduce the following notations \begin{equation} \widetilde{n}_i(s)=\int_V \|f_i(s,\cdot,v)\|_{L^\infty_x} dv, \text{\quad and \quad} \widetilde{S}_i(s)=\int_V \|g_i(s,\cdot,v)\|_{L^\infty_x} dv. \end{equation} In this way we have \begin{equation} |n_i(s,\widetilde{x}_s)| \leq \widetilde{n}_i(s), \quad |J_i(s,\widetilde{x}_s)|\leq \nu \widetilde{n}_i(s), \quad \text{and} \quad |S_i(s,\widetilde{x}_s)|\leq \widetilde{S}_i(s). \end{equation} Then from (\ref{2.19}) we immediately obtain \begin{equation}\label{2.20} \begin{split} \Big | \partial_{x_i}\big(& \widetilde{R}_1(s,\widetilde{x}_s,v)\big) \Big| \leq C_1 \widetilde{n}_i(s) +C_2 \widetilde{S}_i(s)\|n(s,\cdot)\|_{L^\infty_x}\\ & +C_3 \|f_i(s,\cdot,v)\|_{L^\infty_{x}} +C_4\widetilde{S}_i(s)\|f(s,\cdot,\cdot)\|_{L^\infty_{x,v}}. \end{split} \end{equation} According to (\ref{lem 2.1}) we have \begin{equation} \|n(s,\cdot)\|_{L^\infty_x} \leq C_1 \|f^0\|_{L^\infty_{x,v}}, \text{\quad and \quad} \|f(s,\cdot,\cdot)\|_{L^\infty_{x,v}}\leq C_2\|f^0\|_{L^\infty_{x,v}}. \end{equation} Therefore, using (\ref{2.20}) we deduce that \begin{equation} \Big| \partial_{x_i}\big( \widetilde{R}_1(s,\widetilde{x}_s,v) \big) \Big| \leq C_1 \widetilde{n}_i(s) +C_2 \widetilde{S}_i(s)+C_3 \|f_i(s,\cdot,v)\|_{L^\infty_{x}}. \end{equation} This last estimate together with (\ref{2.18}) allow to write \begin{equation}\label{2.21} \|f_i(t,\cdot,v)\|_{L^\infty_x} \leq \|f_i^0 \|_{L^\infty_{x,v}} + C_1 \int_0^t \left(\widetilde{n}_i(s) + \widetilde{S}_i(s)\right) ds + C_3 \int_0^t \|f_i(s,\cdot,v)\|_{L^\infty_x} ds. \end{equation} The estimate on $g_i$ can be done similarly to assertion (\ref{2.21}). Indeed from Eq. (\ref{2.17}) it follows that \begin{equation}\label{2.22} \|g_i(t,\cdot,v)\|_{L^\infty_x}\leq \|g_i^0\|_{L^\infty_{x, v}} + \int_0^t \left|\partial_{x_i}\left(R_2(s,\widetilde{x}_s)\right)\right| ds, \end{equation} and we compute the first partial derivative of $R_2$ as follows \begin{equation} \partial_{x_i}\left(R_2(s,\widetilde{x}_s)\right)=\left( \dfrac{\sigma}{|V|}-b \right)S_i+n_i. \end{equation} Hence \begin{equation}\label{2.23} \partial_{x_i}\left(R_2(s,\widetilde{x}_s)\right)\leq \left| \dfrac{\sigma}{|V|}-b \right|\widetilde{S}_i(s)+\widetilde{n}_i(s). \end{equation} Taking Eqs. (\ref{2.22}) and (\ref{2.23}) into account we deduce that \begin{equation}\label{2.24} \|g_i(t,\cdot,v)\|_{L^\infty_x}\leq \|g_i^0\|_{L^\infty_{x,v}} + \int_0^t \left( \widetilde{n}_i(s)+\widetilde{S}_i(s) \right) ds. \end{equation} Next integrating, with respect to $v$. Eqs. (\ref{2.21}) and (\ref{2.24}) and adding the resulting inequalities, we can write \begin{equation}\label{24.5} \widetilde{n}_i(t)+\widetilde{S}_i(t)\leq C_1 \left( \|f_i^0\|_{L^\infty_{x,v}}+\|g_i^0\|_{L^\infty_{x,v}}\right)+ C_2 \int_0^t \left( \widetilde{n}_i(s)+\widetilde{S}_i(s) \right) ds. \end{equation} Therefore, in view of Gronwall's inequality, equation \eqref{24.5} yields \begin{equation}\label{2.25} |n_i(s,x)|+|S_i(s,x)|\leq \widetilde{n}_i(s)+\widetilde{S}_i(s)\leq C_1 \left( \|f_i^0\|_{L^\infty_{x,v}}+\|g_i^0\|_{L^\infty_{x,v}}\right), \end{equation} and a similar estimate is obtained for $f_i$ and $g_i$ using (\ref{2.21}), (\ref{2.24}) and (\ref{2.25}). This complete the a-priori estimates. \end{proof} \subsection{Proof of Theorem \ref{theorem1}.} We are now in position to prove the existence result. The idea of the proof follows standard techniques consisting in, first, proving local in time existence by a fixed point procedure, second, iterating this process to obtain global in time existence.\\ For the local in time existence, let $T>0$, we introduce the map $$\mathcal{F}:X_T \longrightarrow X_T,\qquad f \longmapsto \mathcal{F}(f):= \mathcal{F}_2(\mathcal{F}_1(f))$$ where $G=\mathcal{F}_1(f)$ is a weak solution of the following problem: \begin{equation*} \begin{cases} \displaystyle \partial_tG+v \cdot \nabla_x G=\left(\dfrac{\sigma}{|V|}-b\right) \int_V Gdv+ an -\sigma G,\\ G(0,x,v)=g^0(x,v)\in L^\infty_{x,v}, \end{cases} \end{equation*} with the notation $n(t,x)=\int_Vf(t,x,v)dv$, while the functional $\mathcal{F}_2$ is defined by: $F=\mathcal{F}_2(g)$ is a weak solution of \begin{equation*} \begin{cases} \displaystyle \partial_t F+v\cdot \nabla_x F=\dfrac{\mu_0}{\varepsilon}\left[\dfrac{1}{|V|}\left(\int_V F dv +\gamma^2\int_V vFdv \cdot v \right)-F\right] \\ \hspace*{2.6cm}\displaystyle +\mu_1\left( \dfrac{1}{|V|}\int_V F dv - F \right) - \mu_2 \gamma^2 \left(\dfrac{1}{|V|} \int_V vFdv - vF\right) \cdot \alpha(S),\\ F(0,x,v)=f^0(x,v)\in L^\infty_{x,v}, \end{cases} \end{equation*} with $S(t,x)=\int_Vg(t,x,v)dv$. Existence of solutions for these two linear systems is now standard. It is clear, adapting the techniques of Lemma \ref{lemma1} that $\mathcal{F}_1$ and $\mathcal{F}_2$ map $X_T$ into itself. Our objective is to show that $\mathcal{F}$ defines a contraction on $X_T$ for $T$ small enough. Let $f_1$ and $f_2$ be given in $X_T$, then we have the following result: \begin{lemma}\label{lemma2} For $T>0$ small enough, there exists a constant $C_1(T)<1$ such that \begin{equation}\label{2.34} \left\|\mathcal{F}_1(f_1)-\mathcal{F}_1(f_2)\right\|_{X_T}\leq C_1(T)\|f_1-f_2\|_{X_T}. \end{equation} \end{lemma} \begin{proof} We set $G_{12}=\mathcal{F}_1(f_1)-\mathcal{F}_1(f_2)$, then we have \begin{equation}\label{2.35} \partial_tG_{12}+v \cdot \nabla_x G_{12}=\left(\dfrac{\sigma}{|V|}-b\right) \int_V G_{12}dv-\sigma G_{12}+a(n_1-n_2), \end{equation} with the notations $n_i(t,x)=\int_Vf_i(t,x,v)dv,\; i=1,2$. Analogously to the proof of Lemma \ref{lemma1}, we write identity (\ref{2.35}) in the following way \begin{equation}\label{2.36} \partial_tG_{12}+v \cdot \nabla_x G_{12}+\sigma G_{12}=R_1, \end{equation} where \begin{equation} R_1=\left(\dfrac{\sigma}{|V|}-b\right)\int_V G_{12}dv+a(n_1-n_2). \end{equation} Moreover, from equation \begin{equation}\label{2.37} \dfrac{d}{ds}\left(e^{(s-t)\sigma}G_{12}(s,\widetilde{x}_s,v)\right)=e^{(s-t)\sigma}R_1(s,\widetilde{x}_s), \end{equation} it follows that \begin{equation}\label{2.38} G_{12}(t,x,v)=\int_0^t e^{(s-t)\sigma}R_1(s,\widetilde{x}_s) ds. \end{equation} $\big($We recall the notation $\widetilde{x}_s=x+(s-t)v$ $\big)$. Since $e^{(s-t)\sigma}<1$, for all $0\leq s\leq t\leq T$ we deduce from (\ref{2.38}) the following estimate \begin{equation}\label{2.39} |G_{12}(t,x,v)|\leq \int_0^t |R_1(s,\widetilde{x}_s)| ds. \end{equation} However, we have \begin{equation} \begin{split} \left|R_1(s,\widetilde{x}_s)\right| & \leq \left|\dfrac{\sigma}{|V|}-b \right|\,|V|\,\|G_{12}(s,\cdot,\cdot)\|_{L^\infty_{x,v}}+ a\,|V|\,\|f_1-f_2\|_{X_T}\\ & = C_1 \,\|G_{12}(s,\cdot,\cdot)\|_{L^\infty_{x,v}} + C_2 \,\|f_1-f_2\|_{X_T}. \end{split} \end{equation} Using this last inequality in Eq. (\ref{2.39}) we get \begin{equation} |G_{12}(t,x,v)|\leq \int_0^t C_1 \,\|G_{12}(s,\cdot,\cdot)\|_{L^\infty_{x,v}} ds + C_2 \,\|f_1-f_2\|_{X_T}, \end{equation} and the Gronwall lemma gives the desired estimate (\ref{2.34}), which finished the proof of Lemma \ref{lemma2}. \end{proof} Now, let us introduce $g_1=\mathcal{F}_1(f_1)$ and $g_2=\mathcal{F}_1(f_2)$. Then we claim that: \begin{lemma}\label{lemma3} For $T>0$ small enough, there exists a constant $C_2(T)<1$ such that \begin{equation}\label{2.40} \left\|\mathcal{F}_2(g_1)-\mathcal{F}_2(g_2)\right\|_{X_T}\leq C_2(T)\|g_1-g_2\|_{X_T}. \end{equation} \end{lemma} \begin{proof} The proof of Lemma \ref{lemma3} follows the same techniques as in the proof of Lemma \ref{lemma2}, but with more technical difficulties. To begin with we set $F_{12}=\mathcal{F}_2(g_1)-\mathcal{F}_2(g_2)$, then we have \begin{eqnarray}\label{2.41} && \partial_tF_{12}+v \cdot \nabla_x F_{12} = \dfrac{\mu_0+\varepsilon\mu_1}{\varepsilon |V|}\int_V F_{12}dv+\dfrac{\mu_0 \gamma^2}{\varepsilon |V|}\int_V vF_{12}dv \cdot v \nonumber\\ & & -\dfrac{\mu_2 \gamma^2}{|V|}\int_V vF_{12}dv \cdot \alpha(S_1) -\dfrac{\mu_2 \gamma^2}{|V|}\int_V \mathcal{F}_2(g_2)dv \cdot \left(\alpha(S_1)-\alpha(S_2)\right)\\ & &-\left(\dfrac{\mu_0}{\varepsilon}+\dfrac{\mu_1}{|V|}\right)F_{12} + \mu_2 \gamma^2 vF_{12} \cdot \alpha(S_1)+ \mu_2 \gamma^2 v\mathcal{F}_2(g_2) \cdot \left(\alpha(S_1)-\alpha(S_2)\right),\nonumber \end{eqnarray} with $S_i(t,x)=\int_Vg_i(t,x,v)dv,\; i=1,2$. We introduce the following notations \begin{equation*} K=\dfrac{\mu_0}{\varepsilon}+\dfrac{\mu_1}{|V|}-\mu_2 \gamma^2 v \cdot \alpha(S_1), \end{equation*} and \begin{eqnarray*} R_1 & = & \dfrac{\mu_0+\varepsilon \mu_1}{\varepsilon |V|}\int_V F_{12} dv +\dfrac{\mu_0 \gamma^2}{\varepsilon|V|} \int_V vF_{12} dv \cdot v -\dfrac{\mu_2 \gamma^2}{|V|} \int_V vF_{12} dv \cdot \alpha(S_1)\\ & & -\dfrac{\mu_2 \gamma^2}{|V|}\int_V \mathcal{F}_2(g_2)dv \cdot \left(\alpha(S_1) -\alpha(S_2)\right)+ \mu_2 \gamma^2 v \mathcal{F}_2(g_2) \cdot \left(\alpha(S_1)-\alpha(S_2)\right). \end{eqnarray*} In this way we can write identity (\ref{2.41}) as \begin{equation}\label{2.42} \partial_tF_{12}+v \cdot \nabla_x F_{12}+ K F_{12} = R_1. \end{equation} A simple calculation shows that \begin{equation}\label{2.43} F_{12}(t,x,v)=\int_0^t \left[\exp\left(\int_t^s K(\tau,\widetilde{x}_\tau, v)d\tau \right)R_1(s,\widetilde{x}_s,v) \right]ds, \end{equation} and in view of estimate $\exp\left(\int_t^s K(\tau,\widetilde{x}_\tau, v)d\tau\right) \leq e^{C_1 T}$, we deduce from (\ref{2.43}) that \begin{equation}\label{2.44} |F_{12}(t,x,v)|\leq e^{C_1 T}\int_0^t|R_1(s,\widetilde{x}_s,v)| ds. \end{equation} Moreover, it is easy to see that \begin{equation}\label{2.45} \begin{split} |R_1(s,\widetilde{x}_s,v)| \leq & C_2 n_{12}(s,\widetilde{x}_s)+C_3 n_2(s,\widetilde{x}_s) \left| \alpha(S_1(s,\widetilde{x}_s))-\alpha(S_2(s,\widetilde{x}_s)) \right| \nonumber\\ & +C_4 \mathcal{F}_2(g_2)(s,\widetilde{x}_s,v)\left| \alpha(S_1(s,\widetilde{x}_s))-\alpha(S_2(s,\widetilde{x}_s)) \right|, \end{split} \end{equation} with the notation $n_{12}(t,x)=\int_V F_{12}(t,x,v) dv$ and $n_2(t,x)=\int_V\mathcal{F}_2(g_2)(t,x,v) dv$. Using Lemma \ref{lemma1} together with the assumption \eqref{(H)}, we get \begin{eqnarray}\label{2.46} |R_1(s,\widetilde{x}_s,v)| & \leq & C_2 |n_{12}(s,\widetilde{x}_x)|+C_5 \|f^0\|_{L^\infty_{x,v}}\, L_\alpha \| S_1-S_2\|_{L^\infty_{t,x}} \end{eqnarray} We remark that \begin{equation}\label{2.47} |n_{12}(s,\widetilde{x}_x)|\leq |V| \|F_{12}(s,\cdot,\cdot)\|_{L^\infty_{x,v}}, \end{equation} and \begin{equation}\label{2.48} \| S_1 - S_2 \|_{L^\infty_{t,x}} \leq |V| \|g_1-g_2\|_{X_T}. \end{equation} Then from (\ref{2.44}), (\ref{2.46}), (\ref{2.47}) and (\ref{2.48}) it follows that \begin{equation}\label{2.49} \|F_{12}(t,\cdot,\cdot)\|_{L^\infty_{x,v}}\leq e^{C_1 T}\int_0^t \|F_{12}(s,\cdot,\cdot)\|_{L^\infty_{x,v}} ds+TC_3 e^{C_1 T}\|g_1-g_2\|_{X_T}, \end{equation} and we conclude the proof of Lemma \ref{lemma3} using Gronwall inequality. \end{proof} The local existence in Theorem \ref{theorem1} follows from a direct application of the Banach fixed point theorem since $\mathcal{F}$ is a contraction on $X_T$ for $T$ small enough. This gives existence of a unique solution on $[0,T]$ for small enough $T$. Thanks to a priori estimates in Lemma \ref{lemma1} we may iterate this process to extend the solution on $[T,2T]$, then on $[2T,3T]$, ... It concludes the proof of Theorem \ref{theorem1}. \mbox{} {\small \fbox{}}\\ \section{Hyperbolic limit}\label{sec:hyp} Derivation of macroscopic model from the underlaying description at the microscopic scale, provided by the kinetic theory of active particles, is the subject of a growing literature. In \cite{[CMPS04],Hwang05,Hwang06,[BBNS12],Si14,Liao15} it has been proved that the Keller-Segel \cite{[BBTW15]} model can be derived as the limit of a kinetic model by using a moment method. The hyperbolic limit is considered in \cite{[FLP05],[BBNS07],NoDEA} leading to the same kind of macroscopic model with small diffusion. More recently these results have been extended in \cite{[OVAK16]} dealing with the coupled kinetic system \eqref{cks}. As a consequence a formal derivation of a class of hyperbolic equations of Cattaneo type is obtained. The aim of this section is to purpose a rigorous proof of the formal derivation of the hyperbolic limit performed in \cite{[OVAK16]}. However, due to technical difficulties, we restrict ourself to the one dimensional case, $d=1$. The main result can be stated as follows. \begin{theorem}\label{HLRC} Let $T>0$, $d=1$, and $V$ a symmetric bounded domain of $\mathbb{R}$ with $\gamma^2 = |V|\left(\int_V v^2\,dv\right)^{-1}$. Let $(f^0,g^0)\in (L^1_{x,v}\cap L^\infty_{x,v})^2$ be nonnegative and assume that $\alpha\in C^1(\mathbb{R})$ satisfies \eqref{(H)}. Let $(f_\varepsilon,g_\varepsilon) $ be the unique nonnegative weak solution of the scaled Cauchy problem \eqref{cks} on $[0,T]$. Then there exists a subsequence, denoted in the same way, and a couple $(f,g)$ such that \begin{equation} f_\varepsilon \rightharpoonup f, \quad g_\varepsilon \rightharpoonup g \quad \text{in}\ L^2_{t, x,v}. \end{equation} In addition, the moments \begin{equation} n=\int_V f(v) \,dv, \quad S=\int_V g(v) \,dv, \quad J=\int_V vf(v) \,dv, \end{equation} satisfy the following macroscopic system \begin{equation}\label{systlim} \begin{cases} \partial_t n + \partial_x J =0 \\ \partial_t J + \frac{1}{\gamma^2} \partial_x n = -\mu_1 J + \mu_2 n \, \alpha(S) \\ \partial_t g + v\partial_x g = \sigma \left(\frac{S}{|V|}-g\right) + a n - b S. \end{cases} \end{equation} Moreover, the asymptotic limit $f$ satisfies \begin{equation} f=\frac{1}{|V|} \left(n+ \gamma^2 J v\right). \end{equation} \end{theorem} The first two equations in system \eqref{systlim} form the so-called Cattaneo system for chemosensitive movement \cite{Dolak,Hillen}. Hence a direct consequence of this Theorem (and Theorem \ref{theorem1}) is the existence of a solution for the one dimensional Cattaneo system. Since the last equation has not been rescaled, it cannot be rewritten as a closed system with macroscopic variable. However, we deduce from the last equation in \eqref{systlim} that the moments $S=\langle g \rangle$ and $q=\langle v g \rangle$ verify the (non-closed) system \begin{align*} \partial_t S + \partial_x q =an-b S, \qquad\ \partial_t q + \partial_x Q(g) = -\sigma q, \end{align*} where the second order moment $Q$ is defined by $Q(g) = \int_V v^2 g(v) dv.$ \subsection{Uniform a priori estimates} We start with the following a priori estimates uniform with respect to $\varepsilon>0$: \begin{lemma}[A priori estimate in $L^2_{x,v}$]\label{L2-a-priori estimate} We suppose that we are in the conditions of theorem \ref{HLRC}. Then the following estimate \begin{equation} \| f_\varepsilon(t)\|^2_{L^2_{x,v}}+ \|g_\varepsilon(t)\|^2_{L^2_{x,v}} \leq C(T) \left( \|f^0\|^2_{L^2_{x,v}}+ \|g^0\|^2_{L^2_{x,v}} \right), \end{equation} holds true for a.e $t \in (0,T)$, where the constant $C(T)$ is independent of $\varepsilon$. \end{lemma} \begin{proof} We multiply the first equation of system \eqref{cks} by $f_\varepsilon$ \begin{eqnarray*} \dfrac{1}{2}\left(\partial_t f^2_\varepsilon + v\partial_x f^2_\varepsilon \right) &=& \dfrac{\mu_0}{\varepsilon}\left[ \dfrac{1}{|V|}\left(n_\varepsilon f_\varepsilon + J_\varepsilon\gamma^2 v f_\varepsilon \right)-f^2_\varepsilon \right]+\mu_1 \left(\dfrac{n_\varepsilon}{|V|}f_\varepsilon-f^2_\varepsilon\right)\\ &&- \mu_2 \gamma^2\left(\dfrac{J_\varepsilon f_\varepsilon}{|V|}-vf^2_\varepsilon \right)\alpha(S_\varepsilon), \end{eqnarray*} and integrate over $V$ to obtain \begin{eqnarray}\label{Lemma16-Eq1} \displaystyle \dfrac{1}{2}\left(\partial_t \int_V f^2_\varepsilon dv + \partial_x \int_V v f^2_\varepsilon dv\right) = \dfrac{\mu_0}{\varepsilon}\left[ \dfrac{1}{|V|}\left(n^2_\varepsilon + J^2_\varepsilon \gamma^2 \right)-\int_V f^2_\varepsilon dv \right] &&\nonumber\\ \displaystyle +\mu_1 \left(\dfrac{n^2_\varepsilon}{|V|}-\int_V f^2_\varepsilon dv \right)- \mu_2 \gamma^2\left(\dfrac{J_\varepsilon n_\varepsilon}{|V|}-\int_V vf^2_\varepsilon dv \right) \alpha(S_\varepsilon).&& \end{eqnarray} Let us introduce the symmetric and the anti-symmetric part of $f_\varepsilon$ as follows \begin{eqnarray*} f_\varepsilon^S(v)& = & \dfrac{1}{2}\left(f_\varepsilon(v) + f_\varepsilon(-v)\right), \quad v\in V, \\ f_\varepsilon^A(v)& = & \dfrac{1}{2}\left(f_\varepsilon(v) - f_\varepsilon(-v)\right), \quad v\in V. \end{eqnarray*} Since $V$ is symmetric, it follows that \begin{equation}\label{Lamma16-Eq2} f_\varepsilon = f_\varepsilon^S + f_\varepsilon^A, \quad n_\varepsilon=\int_V f^S_\varepsilon dv, \quad J_\varepsilon = \int_V v f^A_\varepsilon dv, \end{equation} and \begin{equation}\label{Lemma16-Eq3} \int_V f_\varepsilon^2 dv = \int_V \left(f^S_\varepsilon\right)^2 dv+ \int_V \left(f^A_\varepsilon\right)^2 dv. \end{equation} Using \eqref{Lemma16-Eq1}-\eqref{Lemma16-Eq3}, we have \begin{equation}\label{Lemma16-Eq4} \begin{split} \dfrac{1}{2}\Bigg(\partial_t \int_V f^2_\varepsilon dv + \partial_x & \int_V v f^2_\varepsilon dv\Bigg) = \dfrac{\mu_0}{\varepsilon}\Bigg[ \dfrac{1}{|V|}\left( \int_V f^S_\varepsilon dv \right)^2 -\int_V \left(f^S_\varepsilon\right)^2 dv\\ &+\dfrac{\gamma^2}{|V|}\left( \int_V v f^A_\varepsilon dv \right)^2 -\int_V \left(f^A_\varepsilon\right)^2 dv \Bigg]\\ &+\mu_1 \left[\dfrac{n^2_\varepsilon}{|V|}-\int_V f^2_\varepsilon dv \right] - \mu_2 \gamma^2\left(\dfrac{J_\varepsilon n_\varepsilon}{|V|}-\int_V vf^2_\varepsilon dv \right)\alpha(S_\varepsilon), \end{split} \end{equation} and according to Cauchy-Schwarz inequality we have \begin{equation}\label{Lemma16-Eq5} \left( \int_V f^S_\varepsilon dv \right)^2 \leq |V| \int_V \left(f^S_\varepsilon\right)^2 dv, \quad \left( \int_V v f^A_\varepsilon dv \right)^2 \leq \frac{|V|}{\gamma^2} \int_V \left(f^A_\varepsilon\right)^2 dv. \end{equation} By combining equations \eqref{Lemma16-Eq4} and \eqref{Lemma16-Eq5} we get \begin{equation}\label{Lemma16-Eq6} \dfrac{1}{2}\left( \partial_t \int_V f^2_\varepsilon dv + \partial_x \int_V v f^2_\varepsilon dv \right) \leq - \mu_2 \gamma^2 \left( \dfrac{J_\varepsilon n_\varepsilon}{|V|}-\int_V v f^2_\varepsilon dv \right)\alpha (S_\varepsilon). \end{equation} Moreover, we have \begin{eqnarray*} - \mu_2 \gamma^2 \left( \dfrac{J_\varepsilon n_\varepsilon}{|V|}-\int_V v f^2_\varepsilon dv \right)\alpha (S_\varepsilon) & =& \mu_2\gamma^2 \alpha (S_\varepsilon) \left( \int_V v f^2_\varepsilon dv - \dfrac{J_\varepsilon n_\varepsilon}{|V|} \right)\\ & \leq & \mu_2 \gamma^2 \nu \alpha_\infty \left( \int_V f^2_\varepsilon dv + \dfrac{ n^2_\varepsilon}{|V|} \right), \end{eqnarray*} and using \eqref{Lemma16-Eq5}, we obtain \begin{eqnarray}\label{Lemma17-Eq7} - \mu_2 \gamma^2 \left( \dfrac{J_\varepsilon n_\varepsilon}{|V|}-\int_V v f^2_\varepsilon dv \right)\alpha (S_\varepsilon)& \leq & 2 \mu_2 \gamma^2 \nu \alpha_\infty \int_V f^2_\varepsilon dv. \end{eqnarray} Hence, from \eqref{Lemma16-Eq6} and \eqref{Lemma17-Eq7} we get \begin{equation*} \partial_t \int_V f^2_\varepsilon dv + \partial_x \int_V v f^2_\varepsilon dv \leq C \int_V f^2_\varepsilon dv, \end{equation*} and integration over $x\in \mathbb{R}^d$ yields \begin{equation}\label{Lemma16-Eq8} \frac{d}{dt} \|f_\varepsilon(t)\|^2_{L^2_{x,v}}\leq C \|f_\varepsilon(t)\|^2_{L^2_{x,v}}. \end{equation} To derive a similar estimate for $g_\varepsilon$ we multiply the second equation of system \eqref{cks} by $g_\varepsilon$ and we integrate over $V$ to obtain \begin{equation*} \dfrac{1}{2}\left(\partial_t \int_V g^2_\varepsilon dv + \partial_x \int_V v g^2_\varepsilon dv \right) = \sigma\left( \dfrac{S^2_\varepsilon}{|V|}-\int_V g^2_\varepsilon dv \right)+ a n_\varepsilon S_\varepsilon - b S^2_\varepsilon. \end{equation*} Using the Cauchy-Schwarz inequality we can write \begin{equation*} \dfrac{1}{2}\left(\partial_t \int_V g^2_\varepsilon dv + \partial_x \int_V v g^2_\varepsilon dv \right) \leq \dfrac{a |V|}{2}\int_V f^2_\varepsilon dv + \left(\dfrac{a}{2}+b \right)|V|\int_V g^2_\varepsilon dv, \end{equation*} and integration over the space variable $x \in \mathbb{R}$ gives \begin{equation}\label{Lemma16-Eq9} \frac{d}{dt} \|g_\varepsilon(t)\|^2_{L^2_{x,v}} \leq a|V| \|f_\varepsilon(t)\|^2_{L^2_{x,v}} + (a+2b)|V|\|g_\varepsilon(t)\|^2_{L^2_{x,v}}. \end{equation} Let us now combine equations \eqref{Lemma16-Eq8} and \eqref{Lemma16-Eq9} to get $$ \frac{d}{dt} \left[ \|f_\varepsilon (t)\|^2_{L^2_{x,v}} + \|g_\varepsilon (t)\|^2_{L^2_{x,v}} \right] \leq C \left[ \|f_\varepsilon (t)\|^2_{L^2_{x,v}} + \|g_\varepsilon (t)\|^2_{L^2_{x,v}} \right]. $$ We conclude the proof thanks to a Gronwall's inequality. \end{proof} \subsection{Convergence by compactness}\label{sec:conv} According to Lemma \ref{L2-a-priori estimate}, the sequences $f_\varepsilon$, $g_\varepsilon$ are bounded in $L^\infty\left(0,T;L^2_{x,v}\right)$, hence there are bounded in $L^2_{t,x,v}$. Accordingly, it follows that there exist two subsequences, denoted in the same way, and $f$, $g \in L^2_{t,x,v}$ such that \begin{equation} f_\varepsilon \rightharpoonup f, \quad g_\varepsilon \rightharpoonup g \quad \text{in} \;\; L^2_{t,x,v}. \end{equation} Moreover, we have \begin{equation}\label{eq.4.22} \partial_t g_\varepsilon + v \partial_x g_\varepsilon = \sigma \left( \frac{S_\varepsilon}{\varepsilonrt V \varepsilonrt} -g_\varepsilon \right) + a n_\varepsilon - b S_\varepsilon \in L^2_{x,v}. \end{equation} Hence, according to averaging Lemma, see for instance \cite{[S09]} Proposition 3.3.1, we have \begin{equation} \int_V g_\varepsilon(v) \, dv = S_\varepsilon \mbox{ is uniformly bounded in } L^2\left(0,T; H^{\frac{1}{2}}(\mathbb{R})\right). \end{equation} Integrating equation \eqref{eq.4.22} with respect to $v$, we deduce clearly that $\partial_t S_\varepsilon \in L^2\left(0,T;W^{-1,1}(\mathbb{R})\right)$. Moreover, for each compact $K\subset \mathbb{R}$, we have the embeddings (see e.g. \cite{[BCD11]}) \begin{equation} H^{\frac{1}{2}}(K) \xhookrightarrow[compact]{} L^2(K) \xhookrightarrow[{\color{white}{---}}]{} W^{-1,1}(K). \end{equation} From Aubin-Lions compactness Lemma (see \cite{[S85]}), we deduce that the sequence $(S_\varepsilon)_\varepsilon$ is relatively compact in $L^2\left(0,T;L^2(K)\right)$. Hence we can extract a subsequence, still denoted $(S_\varepsilon)_\varepsilon$, which converges strongly towards $S$ in $L^2 \left((0,T)\times K\right)$. By uniqueness of the weak limit, we have that $S = \int_V g(v) dv$. However the convergence is global: \begin{equation} S_\varepsilon \to S \quad \text{in} \;\; L^2_{t,x}. \end{equation} Indeed, for any compact $[-R,R] \subset\mathbb{R}$ we may extract a subsequence $(S_\varepsilon)_\varepsilon$ such that $S_\varepsilon \to S$ strongly in $L^2([0,T]\times [-R,R])$, and we know that $S_\varepsilon = \int_V g_\varepsilon(v)\,dv$ where $$ \partial_t \int_V (f_\varepsilon^2 + g_\varepsilon^2) dv + \partial_x \int_V v (f_\varepsilon^2+g_\varepsilon^2) dv \leq C \int_V (f_\varepsilon^2+g_\varepsilon^2) dv. $$ Multiplying by a function $x\mapsto\phi(x)\in C^1(\mathbb{R})$ with bounded derivative and integrating, we deduce \begin{equation}\label{estimphi1} \frac{d}{dt} \int_{\mathbb{R}}\int_V (f_\varepsilon^2+g_\varepsilon^2) \phi \,dxdv \leq C \int_{\mathbb{R}}\int_V (f_\varepsilon^2+g_\varepsilon^2) \phi \,dxdv + \int_{\mathbb{R}} \int_V v(f_\varepsilon^2+g_\varepsilon^2) \phi' \,dxdv. \end{equation} In order to pass from local to global convergence, we need to prove that we have a bound on the tail at infinity. Let us show that $(S_\varepsilon)_\varepsilon$ is a Cauchy sequence in $L^2_{t,x}$. We compute $$ \int_0^T \int_{\mathbb{R}} |S_\varepsilon - S_{\varepsilon'}|^2\,dxdt = \int_0^T \int_{-R}^R |S_\varepsilon - S_{\varepsilon'}|^2\,dxdt + \int_0^T \int_{\mathbb{R}\setminus [-R,R]} |S_\varepsilon - S_{\varepsilon'}|^2\,dxdt. $$ From the above result, we know that the first term of the right hand side goes to $0$ as $\varepsilon, \varepsilon' \to 0$. For the second term, let us consider $\phi\in C^\infty(\mathbb{R}^d)$ such that $0\leq \phi\leq 1$, $\phi(x)=0$ for $|x|\leq 1/2$ and $\phi(x)=1$ for $|x|\geq 1$. We define $\phi_R(x) = \phi(x/R)$. Then, we have \begin{align*} \int_0^T \int_{\mathbb{R}\setminus [-R,R]} |S_\varepsilon - S_{\varepsilon'}|^2\,dxdt & \leq \int_0^T \int_{\mathbb{R}\setminus [-R,R]} |S_\varepsilon - S_{\varepsilon'}|^2 \phi_R\,dxdt \\ & \leq 2 \int_0^T \int_{\mathbb{R}} (|S_\varepsilon|^2 + |S_{\varepsilon'}|^2) \phi_R\,dxdt. \end{align*} Let us now use estimate \eqref{estimphi1} with $\phi_R$, since $\phi'_R(x) = \frac 1R \phi'(x/R)$, we have \begin{equation} \begin{split} \frac{d}{dt} \int_{\mathbb{R}}\int_V (f_\varepsilon^2+g_\varepsilon^2) \phi_R \,dxdv &\leq C \int_{\mathbb{R}}\int_V (f_\varepsilon^2+g_\varepsilon^2) \phi_R \,dxdv\\ & + \frac{1}{R}\int_{\mathbb{R}} \int_V v(f_\varepsilon^2+g_\varepsilon^2) \phi'(x/R) \,dxdv. \end{split} \end{equation} Applying a Gronwall Lemma, we deduce that $$ \int_{\mathbb{R}}\int_V (f_\varepsilon^2+g_\varepsilon^2) \phi_R \,dxdv \leq e^{CT} \left(\int_{\mathbb{R}}\int_V ((f^0)^2+(g^0)^2) \phi_R \,dxdv+ \frac{C\|\phi'\|}{R}\right). $$ Since the initial data $f^0$ and $g^0$ are given in $L^2_{x,v}$ and $\phi_R(x)=0$ on $B_{R/2}$, we deduce that the left hand side goes to $0$ as $R\to +\infty$, uniformly with respect to $\varepsilon$. Thus, $$ \int_0^T \int_{\mathbb{R}} (|S_\varepsilon|^2 + |S_{\varepsilon'}|^2) \phi_R\,dxdt \leq |V| \int_0^T\int_{\mathbb{R}}\int_V (f_\varepsilon^2+f_{\varepsilon'}^2) \phi_R \,dxdvdt $$ goes uniformly to $0$ as $R\to +\infty$. We conclude that the sequence $(S_\varepsilon)_\varepsilon$ is a Cauchy sequence in $L^2([0,T]\times\mathbb{R})$. \mbox{} {\small \fbox{}}\\ \subsection{Proof of Theorem \ref{HLRC}} Multiply the first and second equations of system \eqref{cks} by 1 and $v$ respectively, and integrate over $V$ to obtain the following system \begin{equation}\label{theorem15-Eq1} \begin{cases} \displaystyle \partial_t n_\varepsilon + \partial_x J_\varepsilon =0 \\ \displaystyle \partial_t J_\varepsilon + \partial_x \int_V v^2 f_\varepsilon \, dv = -\mu_1 J_\varepsilon + \mu_2 \gamma^2 \int_V v^2 f_\varepsilon \,dv\, \alpha(S_\varepsilon) \\ \partial_t g_\varepsilon + v\cdot \partial_x g_\varepsilon = \sigma\left(\frac{S_\varepsilon}{|V|}-g_\varepsilon\right)+an_\varepsilon-b S_\varepsilon. \end{cases} \end{equation} We have \begin{equation}\label{limfg} f_\varepsilon(t,x,v) \rightharpoonup f(t,x,v) \quad \mbox{ and } \quad g_\varepsilon(t,x,v) \rightharpoonup g(t,x,v) \quad \text{in} \;\; L^2_{t,x,v}. \end{equation} Therefore, since the set of velocities $V$ is bounded, we deduce \begin{equation}\label{lim1} n_\varepsilon(t,x) \rightharpoonup n(t,x), \quad J_\varepsilon(t,x) \rightharpoonup J(t,x) \quad \text{in} \;\; L^2_{t,x}, \end{equation} \begin{equation}\label{lim2} S_\varepsilon(t,x) \rightarrow S(t,x), \quad q_\varepsilon(t,x) \rightharpoonup q(t,x) \quad \text{in} \;\; L^2_{t,x}, \end{equation} \begin{equation}\label{lim3} \int_V v^2f_\varepsilon(t,x,v) dv \rightharpoonup \int_V v^2 f(t,x,v) dv \quad \text{in} \;\; L^2_{t,x}, \end{equation} \begin{equation}\label{lim4} \int_V v^2 g_\varepsilon(t,x,v) dv \rightharpoonup \int_V v^2 g(t,x,v) dv \quad \text{in} \;\; L^2_{t,x}, \end{equation} when $\varepsilon$ tends to zero. However, according to Section \ref{sec:conv} we have \begin{equation}\label{weak-lim5} \alpha(S_\varepsilon(t,x)) \, \int_V v^2 f_\varepsilon(t,x,v) \,dv \rightharpoonup \alpha(S(t,x)) \, \int_V v^2 f(t,x,v) \,dv \quad \text{in} \;\; L^2_{t,x}. \end{equation} Hence, by passing to limit in \eqref{theorem15-Eq1}, in the sense of distributions, and taking into account Eqs. \eqref{lim1}-\eqref{weak-lim5}, it follows that \begin{equation}\label{macro-2-Q} \begin{cases} \displaystyle \partial_t n + \partial_x J =0\\ \displaystyle \partial_t J + \partial_x \int_V v^2 f dv = -\mu_1 J + \mu_2 \gamma^2 \int_V v^2 f dv \, \alpha(S) \\ \displaystyle \partial_t g + v\cdot \partial_x g = \sigma\left(\frac{S}{|V|}-g\right)+an-b S. \end{cases} \end{equation} To identify the term $\int_V v^2 f(t,x,v) dv$, we multiply the first equation of system \eqref{cks} by $\varepsilon$ to get \begin{equation} \begin{split} \varepsilon \partial_t f_\varepsilon (t,x,v) + \varepsilon v & \cdot \partial_x f_\varepsilon (t,x,v)\\ = \mu_0 ( F_{n_\varepsilon, J_\varepsilon}(t,x,v) & -f_\varepsilon(t,x,v) ) + \varepsilon \mu_1 \left( \dfrac{n_\varepsilon (t,x)}{\varepsilonrt V \varepsilonrt}- f_\varepsilon (t,x,v) \right)\\ - \varepsilon & \mu_2 \gamma^2 \left( \dfrac{J_\varepsilon (t,x)}{\varepsilonrt V \varepsilonrt} -v f_\varepsilon (t,x,v) \right) \alpha\left(S_\varepsilon (t,x)\right). \end{split} \end{equation} Then, letting $\varepsilon$ go to zero yields \begin{equation} f=F_{n,J}(t,x,v)=\frac{1}{|V|} \left(n+ \gamma^2 J v\right) \end{equation} and a simple calculations shows that \begin{equation} \int_V v^2 f(t,x,v) dv =\int_V v^2 F_{n,J}(t,x,v) dv =\frac{1}{\gamma^2} n(t,x). \end{equation} Using this last equation in system \eqref{macro-2-Q} finishes the proof. \mbox{} {\small \fbox{}}\\ \end{document}
\begin{document} \title{A Multilevel Correction Scheme for Nonsymmetric Eigenvalue Problems by Finite Element Methods} \begin{abstract} A multilevel correction scheme is proposed to solve {\color{black}defective and nodefective of nonsymmetric partial differential operators} by the finite element method. The method includes multi correction steps in a sequence of finite element spaces. In each correction step, we only need to solve two source problems on a finer finite element space and two eigenvalue problems on the coarsest finite element space. The accuracy of the eigenpair approximation is improved after each correction step. This correction scheme improves overall efficiency of the finite element method in solving nonsymmetric eigenvalue problems. {\epsilon}nd{abstract} \begin{keywords} Nonsymmetric eigenvalue problem, multilevel correction, finite element method, high-efficiency {\epsilon}nd{keywords} \begin{AMS} 65N30, 65N25, 65L15, 65B99 {\epsilon}nd{AMS} \pagestyle{myheadings} \thispagestyle{plain} \markboth{Hehu Xie and Zhimin Zhang}{Multilevel correction for nonsymmetric eigenvalue} \section{Introduction} As we know, the numerical approximation of eigenvalue problems plays a central role in the analysis of the stability for nonlinear partial differential equations. For example in fluid mechanics, the analysis of the hydrodynamic stability always leads to a nonsymmetric eigenvalue problems (see \cite{CliffeHallHouston,HeuvelineRannacher_2001,HeuvelineRannacher_2006}). The stability of the underlying flow depends on the real part of the eigenvalue which has the smallest real part (see \cite{CliffeHallHouston,HeuvelineRannacher_2006}). For more details, please refer \cite{CliffeHallHouston,HeuvelineRannacher_2006,SchmidHenningson}. The aim of understanding the stability of nonlinear partial differential equations naturally leads to the computation of the eigenvalue problems with some numerical methods. The main content of this paper is to design an efficient finite element method to compute nonsymmetric eigenvalue problems. Recently, a multigrid method is designed to solve the self-adjoint eigenvalue problem based on a type of multilevel correction method \cite{LinLuoXie,LinXie,LinXie_Multigrid,Xie_IMA}. But as we know, the analysis of the stability for nonlinear partial differential equations always leads to nonsymmetric eigenvalue problems \cite{CliffeHallHouston,HeuvelineRannacher_2006} and the extensions of the multilevel method for self-adjoint eigenvalue problems to the nonsymmetric ones is not direct \cite{Kolman,XuZhou,YangFan} and needs more analysis. So the purpose of this paper is to propose a multilevel correction scheme to solve nonsymmetric eigenvalue problems based on the finite element method. In the past, a two-grid finite element method was proposed and analyzed by Xu and Zhou in \cite{XuZhou} for symmetric eigenvalue problems. Latter, Kolman used this idea to design a two-level method for nonsymmetric eigenvalue problems in \cite{Kolman}. Yang and Fan \cite{YangFan} also studied a two-grid method for nonsymmetric eigenvalue problems. As an alternative approach, in \cite{NagaZhang,NagaZhangZhou,WuZhang}, the authors used a recovery technique PPR to improve the convergence rate for both symmetric and nonsymmetric eigenvalue problems. {\color{black}All these methods are designed for the nonsymmetric eigenvalue problems under the assumption that the ascent of the concerned eigenvalues is only one which means the algebraic eigenspace is the same as the geometric eigenspace.} Along the line of multilevel correction method, here we present a multilevel correction scheme to solve nonsymmetric eigenvalue problems {\color{black}without the ascent assumption}. With the proposed method, solving nonsymmetric eigenvalue problems will not be much more expensive than solving corresponding source problems. The correction method for eigenvalue problems in this paper is based on a series of finite element spaces with different levels of accuracy which are related to the multilevel method (cf. \cite{Xu}). The standard Galerkin finite element method for nonsymmetric eigenvalue problems has been extensively investigated, e.g. Babu\v{s}ka and Osborn \cite{Babuska2,BabuskaOsborn}, Chatelin \cite{Chatelin} and references cited therein. Here we adopt some basic results in these papers to { carry on error estimates for our multilevel correction scheme}. It will be shown that the convergence rate of the eigenpair approximations can be improved after each correction step. Our multilevel correction procedure can be described as follows: (1)\ solve an eigenvalue problem in the coarsest finite element space; (2)\ solve a source problem in an augmented space with the associated eigenfunction from (1) as the load vector; (3)\ solve the eigenvalue problem again on a finite element space constructed by enhancing the coarsest finite element space with the eigenfunction obtained in step (2). Then go to step (2) for the next loop. In this method, we replace solving the eigenvalue problem in finer finite element spaces by solving a series of boundary value problems in a series of nested finite element spaces (with the finest space as the last one) and a series of eigenvalue problems in the coarsest finite element space; and yet, we achieve the same accuracy as solving the eigenvalue problem in the finest space. It is well known that there exist multigrid methods that solve boundary value problems with the optimal computational work (cf. \cite{Xu_Two_Grid}). Therefore, combined with the multigrid method, our correction method improves overall efficiency in solving nonsymmetric eigenvalue problems (cf. \cite{Xie_IMA,Xie_JCP}). An outline of the paper goes as follows. In Section 2, we introduce the finite element method for nonsymmetric eigenvalue problems. An one level correction scheme is described and analyzed in Section 3. In Section 4, we propose and analyze a multilevel correction algorithm to solve nonsymmetric eigenvalue problems by the finite element method. Some numerical examples are presented in Section 5 to validate our theoretical analysis and some concluding remarks are given in the last section. \section{Discretization by finite element method} In this section, we introduce some notation and error estimates of the finite element approximation for nonsymmetric eigenvalue problems. Throughout this paper, the letter $C$ (with or without subscripts) denotes a generic positive constant which may be different at different occurrences. For convenience, we use symbols $\lesssim$, $\gtrsim$, and $\approx$, such that $x_1\lesssim y_1, x_2\gtrsim y_2$, and $x_3\approx y_3$ have meanings: $x_1\leq C_1y_1$, $x_2 \geq c_2y_2$, and $c_3x_3\leq y_3\leq C_3x_3$, for some constants $C_1, c_2, c_3$, and $C_3$ that are independent of mesh sizes (cf. \cite{Xu}). We consider the following eigenvalue problem: Find $\lambda\in \mathcal{C}$ and $u$ such that \begin{equation}\label{Eigenvalue_Problem} \left\{ \begin{array}{rcl} -\nabla\cdot(A\nabla u)+\mathbf b\cdot\nabla u +\phi u&=&\lambda\varphi u,\ \ \ {\rm in}\ \Omega,\\ u&=&0,\ \ \ \ \ \ \ {\rm on}\ \partial\Omega,\\ \int_{\Omega}\varphi |u|^2d\Omega&=&1, {\epsilon}nd{array} \right. {\epsilon}nd{equation} where $\Omega\subset \mathcal{R}^d$ is a bounded polygonal domain, $A\in\mathcal{C}^{d\times d}$, $\mathbf b\in \mathcal{C}^d$, $\phi$ is a function defined on $\Omega$ and $\varphi$ is a real positive function with $\varphi\geq c_0>0$. We define $V:=H_0^1(\Omega)$ with the usual norm $\|\cdot\|_1$. The corresponding variational form of (\ref{Eigenvalue_Problem}) can be stated as follows: Find $(\lambda,u)\in \mathcal{C}\times V$ such that $b(u,u)=1$ and \begin{eqnarray}\label{Eigenvalue_Problem_Weak} a(u,v)&=&\lambda b(u,v),\ \ \ \ \ \forall v\in V, {\epsilon}nd{eqnarray} where \begin{eqnarray*} a(u,v)&=&(A\nabla u,\overline{\nabla v})+(\mathbf b\cdot \nabla u, \bar{v})+(\phi u,\bar{v}),\\ b(u,v)&=&(\varphi u,\bar{v}) {\epsilon}nd{eqnarray*} with $(\cdot,\cdot)$ denoting the inner product in the space $L^2(\Omega)$. The corresponding adjoint eigenvalue problem is: Find $(\lambda,u^*)\in \mathcal{C}\times V$ such that $b(u^*,u^*)=1$ and \begin{eqnarray}\label{Eigenvalue_Problem_Weak_Adjoint} a(v,u^*)&=&\lambda b(v,u^*),\ \ \ \ \ \forall v\in V. {\epsilon}nd{eqnarray} In the sequel, we also use the norm $\|v\|_b=\sqrt{b(v,v)}$ which is equivalent to the $L^2(\Omega)$ norm $\|\cdot\|_0$. Here the bilinear form $a(\cdot,\cdot)$ is assumed to satisfy \begin{eqnarray} \|w\|_1 \lesssim \sup_{{ v}\in V}\frac{a(w,v)}{\|v\|_1}\ \ {\rm and}\ \ \|w\|_1\lesssim \sup_{v\in V}\frac{a(v,w)}{\|v\|_1},\ \ \forall w\in V. {\epsilon}nd{eqnarray} We further assume { that $a(\cdot,\cdot)$ is} $V$-elliptic, i.e., \begin{eqnarray}\label{Ellipticity_Of_a} \|u\|_1^2&\lesssim & {\rm Re}\ a(u,u),\ \ \ \ \forall u\in V. {\epsilon}nd{eqnarray} \subsection{Operator reformulation} We introduce the operators $T,\ T_*\in \mathcal{L}(V)$ defined by the equation \begin{eqnarray} a(Tu,v)=b(u,v)={a}(u,T_*v),\ \ \ \ \ \forall u, v\in V. {\epsilon}nd{eqnarray} The eigenvalue problem (\ref{Eigenvalue_Problem_Weak}) can be written as an operator form for $\lambda\neq 0$ (denoting $\mu:=\lambda^{-1}$): \begin{eqnarray} Tu=\mu u, {\epsilon}nd{eqnarray} { with} \begin{eqnarray} T_*u^*=\bar{\mu}u^* {\epsilon}nd{eqnarray} for the adjoint eigenvalue problem (\ref{Eigenvalue_Problem_Weak_Adjoint}). Note that ellipticity condition (\ref{Ellipticity_Of_a}) guarantees that every eigenvalue $\lambda$ is nonzero. It is well known that the operators $T$ and $T_*$ are compact. Thus the spectral theory for compact operators gives us a complete characterization of the eigenvalue problem (\ref{Eigenvalue_Problem_Weak}). There is a countable set of eigenvalues of (\ref{Eigenvalue_Problem_Weak}). Let $\lambda$ be { an eigenvalue} of problem (\ref{Eigenvalue_Problem_Weak}). There exists a smallest integer $\alpha$ {\color{black}which are called the ascent} such that \begin{eqnarray} N((T-\mu)^{\alpha})=N((T-\mu)^{\alpha+1}), {\epsilon}nd{eqnarray} where $N$ denotes the null space and we use the notation $\mu=\lambda^{-1}$. Let $M(\lambda)=M_{\lambda,\mu}=N((T-\mu)^{\alpha})$ and $Q(\lambda)=Q_{\lambda,\mu}=N(T-\mu)$ denote the algebraic and geometric eigenspaces, respectively. The subspaces $Q(\lambda)\subset M(\lambda)$ are finite dimensional. The numbers $m={\rm dim}M(\lambda)$ and $q={\rm dim}Q(\lambda)$ are called the algebraic and the geometric multiplicities of $\mu$ (and $\lambda$). The vectors in $M(\lambda)$ are generalized eigenvectors. The order of a generalized eigenvector is the smallest integer $j$ such that $(T-\mu)^ju=0$ (vectors in $Q(\lambda)$ being generalized eigenvectors of order $1$). Let us point out that a generalized eigenvector $u^j$ of order $j$ satisfies \begin{eqnarray}\label{Iteration_Scheme} a(u^j,v)&=&\lambda b(u^j,v)+\lambda a(u^{j-1},v),\ \ \ \ \forall v\in V, {\epsilon}nd{eqnarray} where $u^{j-1}$ is a generalized eigenvector of order $j-1$. Similarly we define the spaces of (generalized) eigenvectors for the adjoint problem \begin{eqnarray*} M^*(\lambda)=M_{\lambda,\mu}^*=N((T_*-\bar{\mu})^{\alpha})\ \ \ {\rm and}\ \ \ Q^*(\lambda)=Q^*_{\lambda,\mu}=N(T_*-\bar{\mu}). {\epsilon}nd{eqnarray*} Note that $\mu$ is an eigenvalue of $T$ ($\lambda$ is an eigenvalue of problem (\ref{Eigenvalue_Problem_Weak})) if and only if $\bar{\mu}$ is an eigenvalue of $T_*$ ($\lambda$ is an eigenvalue of adjoint problem (\ref{Eigenvalue_Problem_Weak_Adjoint})) with the ascent $\alpha$ and the algebraic multiplicity $m$ for both eigenvalues being the same. \subsection{Galerkin discretization} Now, let us define the finite element approximations for the problem (\ref{Eigenvalue_Problem_Weak}). First we generate a shape-regular decomposition of the computing domain $\Omega\subset \mathcal{R}^d\ (d=2,3)$ into triangles or rectangles for $d=2$ (tetrahedrons or hexahedrons for $d=3$). The diameter of a cell $K\in\mathcal{T}_h$ is denoted by $h_K$. The mesh diameter $h$ describes the maximum diameter of all cells $K\in\mathcal{T}_h$. Based on the mesh $\mathcal{T}_h$, { we construct} a finite element space denoted by $V_h\subset V$. In order to { define our} multilevel correction method, we start the process on { an initial mesh $\mathcal{T}_H$ with mesh size $H$ and the initial finite element space $V_H$ defined on} $\mathcal{T}_H$. In this paper, the finite element space $V_h$ is assumed to satisfy \begin{eqnarray}\label{Inf_Sup_Discrete} \|w_h\|_1 \lesssim \sup_{v_h\in V_h}\frac{a(w_h,v_h)}{\|v_h\|_1}\ \ {\rm and} \ \ \|w_h\|_1\lesssim \sup_{v_h\in V_h}\frac{a(v_h,w_h)}{\|v_h\|_1},\ \ \forall w_h\in V_h. {\epsilon}nd{eqnarray} The standard Galerkin discretization of the problem (\ref{Eigenvalue_Problem_Weak}) is the following: Find $(\lambda_h,u_h)\in \mathcal{C}\times V_h$ such that $b(u_h,u_h)=1$ and \begin{eqnarray}\label{Eigenvalue_Problem_Weak_Discrete} a(u_h,v_h)&=&\lambda_h b(u_h,v_h),\ \ \ \ \ \forall v_h\in V_h. {\epsilon}nd{eqnarray} Similarly, the discretization of the adjoint problem (\ref{Eigenvalue_Problem_Weak_Adjoint}) can be defined as: Find $(\lambda_h,u_h^*)\in \mathcal{C}\times V_h$ such that $b(u_h^*,u_h^*)=1$ and \begin{eqnarray}\label{Eigenvalue_Problem_Weak_Discrete_Adjoint} a(v_h,{u_h^*})&=&\lambda_h b(v_h,{u_h^*}),\ \ \ \ \ \forall v_h\in V_h. {\epsilon}nd{eqnarray} By introducing Galerkin projections $P_h,\ P_h^*\in \mathcal{L}(V,V_h)$ with the following equations \begin{eqnarray*} a(P_hu,v_h)&=&a(u,v_h),\ \ \ \ \ \quad \forall u\in V,\ \forall v_h\in V_h,\\ a(v_h,u)&=&a(v_h,P_h^*u),\ \ \ \ \forall u\in V,\ \forall v_h\in V_h, {\epsilon}nd{eqnarray*} the equation (\ref{Eigenvalue_Problem_Weak_Discrete}) can be rewritten as an operator form with $\mu_h:=\lambda_h^{-1}$ (Note that $P_h$ is a bounded operator), \begin{eqnarray} P_hTu_h=\mu_hu_h. {\epsilon}nd{eqnarray} Similarly for the adjoint problem (\ref{Eigenvalue_Problem_Weak_Discrete_Adjoint}), we have \begin{eqnarray} P_h^*T_*u_h^*=\bar{\mu}_hu_h^*. {\epsilon}nd{eqnarray} \subsection{Spectral approximation of compact operators} Let $\mu$ be an eigenvalue (with algebraic multiplicity $m$) of the compact operator $T$. If $T$ is approximated by a sequence of compact operators $T_h$ converging to $T$ in norm, i.e., $\lim\limits_{h\rightarrow 0+}{ \|T-T_h\|_1}=0$, then for $h$ sufficiently small $\mu$ is approximated by $m$ numerical eigenvalues $\{\mu_{j,h}\}_{j=1,\cdots,m}$ (counted according to their algebraic multiplicities) of $T_h$, i.e., \begin{eqnarray*} \lim_{h\rightarrow 0+}\mu_{j,h}=\mu\ \ \ \ \ {\rm for}\ j=1,\cdots,m. {\epsilon}nd{eqnarray*} The space of generalized eigenvectors of $T$ is approximated by the subspace \begin{eqnarray} M_h(\lambda)=M_h^{\lambda,\mu}=\sum_{j=1}^mN((T_h-\mu_{j,h})^{\alpha_{\mu_{j,h}}}), {\epsilon}nd{eqnarray} where $\alpha_{\mu_{j,h}}$ is the smallest integer such that $N((T_h-\mu_{j,h})^{\alpha_{\mu_{j,h}}})=N((T_h-\mu_{j,h})^{\alpha_{\mu_{j,h}}+1})$. We similarly define the space $Q_h(\lambda)=Q_h^{\lambda,\mu}=\sum_{j=1}^mN(T_h-\mu_{j,h})$ and counterparts $M_h^*(\lambda)$, $Q_h^*(\lambda)$ for the adjoint problem . Now, we describe a computational scheme to produce the algebraic eigenspace { $M_h(\lambda)$} from the geometric eigenspace $Q_h(\lambda)=\{u_{1,h},\cdots,u_{q,h}\}$ corresponding to eigenvalues $\{\lambda_{1,h},\cdots,\lambda_{q,h}\}$, which converge to the same eigenvalue $\lambda$. Starting from all eigenfunctions in the geometric eigenspace $Q_h(\lambda)$ (of order $1$), we use the following recursive process to compute algebraic eigenspaces (cf. \cite{Shaidurov}) \begin{equation} \left\{ \begin{array}{rcl} a(u_{j,h}^{{\epsilon}ll},v_h)-\lambda_{j,h}b(u_{j,h}^{{\epsilon}ll},v_h)&=&\lambda_{j,h}a(u_{j,h}^{{\epsilon}ll-1},v_h), \ \ \forall v_h\in V_h,\\ b(u_{j,h}^{{\epsilon}ll}, v_h)&=&0,\ \ \ \quad\quad\quad\quad\quad\ \ \forall v_h\in Q_h({\lambda}), {\epsilon}nd{array} \right. {\epsilon}nd{equation} where ${\epsilon}ll\geq 2$, $u_{j,h}^{{\epsilon}ll}$ is the general eigenfunction of order ${\epsilon}ll$ and $u_{j,h}^1=u_{j,h}\in Q_h({\lambda})$ for $j=1,\cdots,q$. With the above process, we { generate the algebraic eigenspace} $$M_h(\lambda)=\{u_{1,h},\cdots, u_{q,h},\cdots, u_{m,h}\}$$ corresponding { to} eigenvalues $\{\lambda_{1,h},\cdots,\lambda_{q,h},\cdots, \lambda_{m,h}\}$, which converge to the same eigenvalue $\lambda$. Similarly, we can produce the adjoint algebraic eigenspace $M_h^*(\lambda)$ { from} the geometric eigenspace $Q_h^*(\lambda)$. For two linear spaces $A$ and $B$, we denote \begin{eqnarray*} \widehat{\Theta}(A,B) = \sup_{w\in A,\|w\|_1=1}\inf_{v\in B}\|w-v\|_1,\ \ \widehat{\Phi}(A,B) = \sup_{w\in A,\|w\|_b=1}\inf_{v\in B}\|w-v\|_{b}, {\epsilon}nd{eqnarray*} { and} define gaps between $A$ and $B$ in $\|\cdot\|_1$ as \begin{eqnarray} \Theta(A,B)=\max\big\{\widehat{\Theta}(A,B), \widehat{\Theta}(B,A)\big\}, {\epsilon}nd{eqnarray} and in $\|\cdot\|_b$ as \begin{eqnarray} \Phi(A,B)=\max\big\{\widehat{\Phi}(A,B), \widehat{\Phi}(B,A)\big\}. {\epsilon}nd{eqnarray} Before introducing the convergence results of the finite element approximation for nonsymmetric eigenvalue problems, we define the following notation \begin{eqnarray} &&\deltaelta_h(\lambda)=\sup_{{u}\in M(\lambda),\|u\|_1=1}\inf_{v_h\in V_h}\|u-v_h\|_1,\\ &&\deltaelta_h^*(\lambda)=\sup_{u^*\in M^*(\lambda),\|u^*\|_1=1}\inf_{v_h\in V_h}\|u^*-v_h\|_1,\\ &&\rho_h(\lambda)=\sup_{{u}\in M(\lambda),\|u\|_b=1}\inf_{v_h\in V_h}\|u-v_h\|_b,\\ &&\rho_h^*(\lambda)=\sup_{u^*\in M^*(\lambda),\|u^*\|_b=1}\inf_{v_h\in V_h}\|u^*-v_h\|_b,\\ &&{\epsilon}ta_a(h)=\sup_{f\in V,\|f\|_b=1}\inf_{v\in V_h}\|T f-v\|_1,\\ &&{\epsilon}ta_a^*(h)=\sup_{f\in V,\|f\|_b=1}\inf_{v\in V_h}\|T_* f-v\|_1. {\epsilon}nd{eqnarray} In order to derive { error bounds for} eigenpair approximations in the weak norm $\|\cdot\|_b$, we need the following error estimates in the weak norm $\|\cdot\|_b$ of the finite element approximation. \begin{lemma}\label{Negative_norm_estimate_Lemma} (\cite[Lemma 3.3 and Lemma 3.4]{BabuskaOsborn}) \begin{eqnarray} {\epsilon}ta_a(h)=o(1),\ \ \ {\epsilon}ta_a^*(h)=o(1)\ \ \ {\rm as}\ h\rightarrow 0, {\epsilon}nd{eqnarray} and \begin{eqnarray}\label{Negative_norm_Error} \rho_h(\lambda)&\lesssim& {\epsilon}ta_a^*(h)\deltaelta_h(\lambda),\\ \rho_h^*(\lambda)&\lesssim& {\epsilon}ta_a(h)\deltaelta_h^*(\lambda). {\epsilon}nd{eqnarray} {\epsilon}nd{lemma} The following theorem is a basic tool for our error estimates. \begin{theorem}\label{Error_Estimate_Theorem}(\cite[Section 8]{BabuskaOsborn}) When the mesh size $h$ is small enough, we have \begin{eqnarray} &&\Theta(M(\lambda),M_h(\lambda))\lesssim \deltaelta_h(\lambda),\ \ \ \Theta(M^*(\lambda),M^*_h(\lambda))\lesssim \deltaelta^*_h(\lambda),\\ &&\Phi(M(\lambda),M_h(\lambda))\lesssim \rho_h(\lambda),\ \ \ \Phi(M^*(\lambda),M^*_h(\lambda))\lesssim \rho^*_h(\lambda),\\ &&|\lambda-\widehat{\lambda}_{h}|\lesssim \deltaelta_h(\lambda)\deltaelta_h^*(\lambda), {\epsilon}nd{eqnarray} where $\widehat{\lambda}_h={ \frac{1}{m}}\sum_{j=1}^m\lambda_{j,h}$ with $\lambda_{1,h},\cdots,\lambda_{m,h}$ converging to $\lambda$. {\epsilon}nd{theorem} \section{One correction step} In this section, we present an one-step correction procedure to improve the accuracy of the current eigenvalue and eigenfunction approximations. This correction method contains solving some auxiliary source problems in a finer finite element space and two eigenvalue problems on a coarse finite element space. Assume that we have obtained the algebraic eigenpair approximations $(\lambda_{j,h_k},u_{j,h_k})\in\mathcal{R}\times V_{h_k}$ and the corresponding adjoint ones $(\lambda_{j,h_k},u^*_{j,h_k})\in\mathcal{R}\times V_{h_k}$ for $j=i,\cdots,i+m-1$, where eigenvalues $\{\lambda_{j,h_k}\}_{j=i}^{i+m-1}$ converge to the desired eigenvalue $\lambda_i$ of (\ref{Eigenvalue_Problem_Weak}). Now we introduce a correction step to improve the accuracy of the current eigenpair approximations. Let $V_{h_{k+1}}\subset V$ be the conforming finite element space based on a finer mesh $\mathcal{T}_{h_{k+1}}$ which is produced by refining $\mathcal{T}_{h_k}$ in the regular way. We start from a conforming linear finite element space $V_H$ on the coarsest mesh $\mathcal{T}_H$ to design the following one correction step. \begin{algorithm}\label{Correction_Step} One Correction Step \begin{enumerate} \item For $j=i,\cdots,i+m-1$ Do Solve the following two boundary value problems: Find $\widetilde{u}_{j,h_{k+1}}\in V_{h_{k+1}}$ such that \begin{eqnarray}\label{aux_problem} a(\widetilde{u}_{j,h_{k+1}},v_{h_{k+1}})&=&b(u_{j,h_k},v_{h_{k+1}}),\ \ \ \forall v_{h_{k+1}}\in V_{h_{k+1}}. {\epsilon}nd{eqnarray} Find $\widetilde{u}^*_{j,h_{k+1}}\in V_{h_{k+1}}$ such that \begin{eqnarray}\label{aux_problem_Adjoint} a(v_{h_{k+1}},\widetilde{u}^*_{j,h_{k+1}})&=&b(v_{h_{k+1}},u^*_{j,h_k}),\ \ \ \forall v_{h_{k+1}}\in V_{h_{k+1}}. {\epsilon}nd{eqnarray} End Do \item Define two new finite element spaces \begin{eqnarray*} V_{H,h_{k+1}}=V_H\oplus{\rm span}\{\widetilde{u}_{i,h_{k+1}},\cdots,\widetilde{u}_{i+m-1,h_{k+1}}\} {\epsilon}nd{eqnarray*} and \begin{eqnarray*} V^*_{H,h_{k+1}}=V_H\oplus{\rm span}\{\widetilde{u}^*_{i,h_{k+1}},\cdots,\widetilde{u}^*_{i+m-1,h_{k+1}}\}. {\epsilon}nd{eqnarray*} Solve the following two eigenvalue problems: Find $(\lambda_{j,h_{k+1}},u_{j,h_{k+1}})\in\mathcal{R}\times V_{H,h_{k+1}}$ such that $b(u_{j,h_{k+1}},u_{j,h_{k+1}})=1$ and \begin{eqnarray}\label{Eigen_Augment_Problem} \hskip-0.4cm a(u_{j,h_{k+1}},v_{H,h_{k+1}})=\lambda_{j,h_{k+1}} b(u_{j,h_{k+1}},v_{H,h_{k+1}}),\ \forall v_{H,h_{k+1}}\in V^*_{H,h_{k+1}}. {\epsilon}nd{eqnarray} Find $(\lambda_{j,h_{k+1}},u^*_{j,h_{k+1}})\in\mathcal{R}\times V^*_{H,h_{k+1}}$ such that $b(u^*_{j,h_{k+1}},u^*_{j,h_{k+1}})=1$ and \begin{eqnarray}\label{Eigen_Augment_Problem_Adjoint} \hskip -0.4cm a(v_{H,h_{k+1}},u^*_{j,h_{k+1}})={\lambda}_{j,h_{k+1}} b(v_{H,h_{k+1}},u^*_{j,h_{k+1}}),\ \forall v_{H,h_{k+1}}\in V_{H,h_{k+1}}. {\epsilon}nd{eqnarray} { \item Choose $2q$ eigenpairs $\{\lambda_{j,h_{k+1}}, u_{j,h_{k+1}}\}_{j=i}^{i+q-1}$ and $\{\lambda_{j,h_{k+1}}, u^*_{j,h_{k+1}}\}_{j=i}^{i+q-1}$ to define two new geometric eigenspaces} \begin{eqnarray*} Q_{h_{k+1}}(\lambda_i)={\rm span}\big\{u_{i,h_{k+1}},\cdots, u_{i+q-1,h_{k+1}}\big\} {\epsilon}nd{eqnarray*} and \begin{eqnarray*} Q^*_{h_{k+1}}(\lambda_i)={\rm span}\big\{u^*_{i,h_{k+1}},\cdots, u^*_{i+q-1,h_{k+1}}\big\}. {\epsilon}nd{eqnarray*} Based on these two geometric eigencpases, compute the corresponding algebraic eigenspaces \begin{eqnarray} M_{h_{k+1}}(\lambda_i)={\rm span}\big\{u_{i,h_{k+1}},\cdots, u_{i+m-1,h_{k+1}}\big\} {\epsilon}nd{eqnarray} and \begin{eqnarray} M^*_{h_{k+1}}(\lambda_i)={\rm span}\big\{u_{i,h_{k+1}},\cdots, u_{i+m-1,h_{k+1}}\big\}. {\epsilon}nd{eqnarray} {\epsilon}nd{enumerate} { The final output is:} \begin{eqnarray*} &&\big(\{\lambda_{j,h_{k+1}}\}_{j=i}^{i+m-1},M_{h_{k+1}}(\lambda_i),M^*_{h_{k+1}}(\lambda_i)\big)=\nonumber\\ &&\ \ \ \ \quad\quad {\it Correction}\big(V_H,\{\lambda_{j,h_k}\}_{j=i}^{i+m-1}{,}M_{h_k}(\lambda_i),M^*_{h_k}(\lambda_i),V_{h_{k+1}}\big). {\epsilon}nd{eqnarray*} {\epsilon}nd{algorithm} \begin{remark} Since in Step 1 of Algorithm \ref{Correction_Step}, the solving process for the boundary value problems is independent of each other for different $j$, we can implement them in parallel. {\color{black}Furthermore, the designing for this algorithm does not need the ascent assumption as in \cite{Kolman,YangFan}}. {\epsilon}nd{remark} \begin{theorem}\label{Error_Estimate_One_Correction_Theorem} Assume there exist real numbers $\varepsilon_{h_k}(\lambda_i)$ and $\varepsilon_{h_k}^*(\lambda_i)$ such that the given eigenpairs $\big(\{\lambda_{j,h_k}\}_{j=i}^{i+m-1}, M_{h_k}(\lambda_i),M^*_{h_k}(\lambda_i)\big)$ in {\it One Correction Step \ref{Correction_Step}} have following error estimates \begin{eqnarray} \Theta(M(\lambda_i),M_{h_k}(\lambda_i)) &\lesssim& \varepsilon_{h_k}(\lambda_i),\\ \Theta(M^*(\lambda_i),M^*_{h_k}(\lambda_i)) &\lesssim& \varepsilon^*_{h_k}(\lambda_i),\label{Error_u_h_1}\\ \Phi(M(\lambda_i),M_{h_k}(\lambda_i)) &\lesssim&{\epsilon}ta_a^*(H)\varepsilon_{h_k}(\lambda_i),\\ \Phi(M^*(\lambda_i),M^*_{h_k}(\lambda_i))&\lesssim& {\epsilon}ta_a(H)\varepsilon^*_{h_k}(\lambda_i).\label{Error_u_h_1_nagative} {\epsilon}nd{eqnarray} Then after one correction step, the resultant eigenpair approximation\\ $(\{\lambda_{j,h_{k+1}}\}_{j=i}^{i+m-1},M_{h_{k+1}}(\lambda_i),M^*_{h_{k+1}}(\lambda_i))$ have following error estimates \begin{eqnarray} \Theta(M(\lambda_i),M_{h_{k+1}}(\lambda_i)) &\lesssim& \varepsilon_{h_{k+1}}(\lambda_i),\label{Estimate_u_u_h_2}\\ \Theta(M^*(\lambda_i),M^*_{h_{k+1}}(\lambda_i)) &\lesssim& \varepsilon^*_{h_{k+1}}(\lambda_i),\label{Estimate_u_u_h_2_adjoint}\\ \Phi(M(\lambda_i),M_{h_{k+1}}(\lambda_i)) &\lesssim& {\epsilon}ta_a^*(H) \varepsilon_{h_{k+1}}(\lambda_i),\label{Estimate_u_h_2_Nagative}\\ \Phi(M^*(\lambda_i),M^*_{h_{k+1}}(\lambda_i)) &\lesssim& {\epsilon}ta_a(H) \varepsilon^*_{h_{k+1}}(\lambda_i),\label{Estimate_u_h_2_Nagative_Adjoint} {\epsilon}nd{eqnarray} where $\varepsilon_{h_{k+1}}(\lambda_i):={\epsilon}ta_a^*(H)\varepsilon_{h_k}(\lambda_i)+\deltaelta_{h_{k+1}}(\lambda_i)$ and $\varepsilon^*_{h_{k+1}}(\lambda_i):={\epsilon}ta_a(H)\varepsilon^*_{h_k}(\lambda_i)+\deltaelta^*_{h_{k+1}}(\lambda_i)$. {\epsilon}nd{theorem} \begin{proof} {\color{black} From (\ref{Iteration_Scheme}), there exist the basis functions $\big\{u_j\big\}_{j=i}^{i+m-1}$ of $M(\lambda_i)$ such that \begin{eqnarray} a(u_j,v)&=&b\left(\sum_{k=i}^{i+m-1}p_{jk}(\lambda_i)u_k, v\right),\ \ \ \forall v\in V, {\epsilon}nd{eqnarray} where $p_{jk}(\cdot)$ denotes a polynomial of degree no more than $\alpha$ for $k=i,\cdots,j$ with $p_{jj}(\lambda_i)=\lambda_i$ and $p_{jk}(\lambda_i)=0$ for $j<k\leq i+m-1$. We can define a matrix $\mathcal P:=(p_{j+1-i,k+1-i})_{i\leq j,k\leq i+m-1}\in \mathcal C^{q\times q}$ such that \begin{eqnarray} a(U,v)=b(\mathcal P U, v),\ \ \ \ \forall v\in V, {\epsilon}nd{eqnarray} where $U:=(u_i,\cdots, u_{i+m-1})^T$. It is easy to know that the matrix $\mathcal P$ is nonsingular providing $\lambda_i\neq 0$. For each $\widetilde u_{j,h_{k+1}}$, from the definitions of $\Theta(M(\lambda_i),M_{h_k}(\lambda_i))$ and $\Phi(M(\lambda_i),M_{h_k}(\lambda_i))$, there exist a vector $\mathcal R_j := (c_1,\cdots,c_m)^T\in \mathcal C^{m\times 1}$ such that \begin{eqnarray} \|u_{j,h_k}-\mathcal R_j^T U\|_1 &\lesssim & \varepsilon_{h_k}(\lambda_i), \ \ \quad \ \ \ \ \ \ \ {\rm for}\ j=i,\cdots,i+m-1,\\ \|u_{j,h_k}-\mathcal R_j^TU\|_0 &\lesssim& {\epsilon}ta_a^*(H)\varepsilon_{h_k}(\lambda_i), \ \ \ \ {\rm for}\ j=i,\cdots,i+m-1. {\epsilon}nd{eqnarray} For any $v_{h_{k+1}}\in V_{h_{k+1}}$, we have \begin{eqnarray}\label{Error_Estimate_1} &&{|}a(\widetilde u_{j,h_{k+1}}-P_{h_{{\epsilon}ll+1}}\mathcal R_j^T\mathcal P^{-1}U,v_{h_{k+1}}){|} ={|}a(\widetilde u_{j,h_{k+1}}-\mathcal R_j^T\mathcal P^{-1}U,v_{h_{k+1}}){|}\nonumber\\ &=&b(u_j^{h_k}-\mathcal R_j^T\mathcal{P}^{-1}\mathcal PU,v_{h_{k+1}}) = {|}b(u_{j}^{h_k}-\mathcal R_j^TU,v_{h_{k+1}}){|}\nonumber\\ &\lesssim& {\epsilon}ta_a^*(H)\varepsilon_{h_k}(\lambda_i)\|v_{h_{k+1}}\|_1, \ \ \ \ {\rm for}\ j=i,\cdots,i+m-1. {\epsilon}nd{eqnarray} From (\ref{Inf_Sup_Discrete}) and (\ref{Error_Estimate_1}), the following estimate holds \begin{eqnarray} \|\widetilde U_{j,h_{k+1}}-P_{h_{k+1}}\mathcal R_j^T\mathcal P^{-1}U\|_1 &\lesssim& {\epsilon}ta_a^*(H)\varepsilon_{h_k}(\lambda_i),\nonumber\\ && \ \ {\rm for}\ j=i,\cdots,i+m-1. {\epsilon}nd{eqnarray} Combining with the error estimate \begin{eqnarray} \|\mathcal R_j^T\mathcal P^{-1}U-P_{h_{{\epsilon}ll+1}}\mathcal R_j^T\mathcal P^{-1}U\|_1 &\lesssim& \deltaelta_{h_{k+1}}(\lambda_i),\nonumber\\ &&\ \ {\rm for}\ j=i,\cdots,i+m-1, {\epsilon}nd{eqnarray} we have \begin{eqnarray}\label{Error_Estimates_Tilde_u_J} \|\widetilde u_{j,h_{k+1}}-\mathcal R_j^T\mathcal P^{-1}U\|_1 &\lesssim& {\epsilon}ta_a^*(H)\varepsilon_{h_k}(\lambda_i)+\deltaelta_{h_{k+1}}(\lambda_i),\nonumber\\ &&\ \ \ \ \ \ \ {\rm for}\ j=i,\cdots,i+m-1. {\epsilon}nd{eqnarray} } After Step 3, from the definition of $V_{H,h_{k+1}}$ and (\ref{Error_Estimates_Tilde_u_J}), we derive \begin{eqnarray}\label{Definition_Varepsilon_k+1} &&\sup_{u \in M(\lambda_i),\|u\|_1=1} \inf_{v_{H,h_{k+1}}\in V_{H,h_{k+1}}}\| u-v_{H,h_{{\epsilon}ll+1}}\|_1\nonumber\\ &\leq& \sup_{u\in M(\lambda_i),\|u\|_1=1} \inf_{v_{h_{k+1}}\in W_{h_{k+1}}}\|u-v_{h_{k+1}}\|_1\nonumber\\ &\lesssim& \sup_{v_{h_{k+1}}\in W_{h_{k+1}}, \|v_{h_{k+1}}\|_1=1} \inf_{u\in M(\lambda_i)}\|v_{h_{{\epsilon}ll+1}}-u\|_1\nonumber\\ &\lesssim& \max_{j=i,\cdots,i+m-1}\|\widetilde u_{j,h_{k+1}}-\mathcal R_j^T\mathcal P^{-1} U\|_1\nonumber\\ &\lesssim& {\epsilon}ta_a^*(H)\varepsilon_{h_k}(\lambda_i)+\deltaelta_{h_{k+1}}(\lambda_i), {\epsilon}nd{eqnarray} where $W_{h_{k+1}}:={\rm span}\{\widetilde u_i^{h_{k+1}},\cdots,\widetilde u_{i+m-1}^{h_{k+1}}\}$. { Similarly}, \begin{eqnarray}\label{Definition_Varepsilon_k+1_Adjoint} &&\sup_{u_*\in M^*(\lambda),\|u\|_1=1}\inf_{v_{H,h_{k+1}}\in V^*_{H,h_{k+1}}}\|u^*-v_{H,h_{k+1}}\|_1\nonumber\\ &\lesssim& {\epsilon}ta_a(H)\varepsilon^*_{h_k}(\lambda_i)+\deltaelta^*_{h_{k+1}}(\lambda_i). {\epsilon}nd{eqnarray} Then from the error estimate results stated in Theorem \ref{Error_Estimate_Theorem} for the eigenvalue problem (see, e.g., \cite[Section 8]{BabuskaOsborn}) and (\ref{Definition_Varepsilon_k+1})-(\ref{Definition_Varepsilon_k+1_Adjoint}), the following error estimates hold \begin{eqnarray} \Theta(M(\lambda_i),M_{h_{k+1}}(\lambda_i)) &\lesssim& {\epsilon}ta_a^*(H)\varepsilon_{h_k}(\lambda_i)+\deltaelta_{h_{k+1}}(\lambda_i),\\ \Theta(M^*(\lambda_i),M^*_{h_{k+1}}(\lambda_i)) &\lesssim& {\epsilon}ta_a(H)\varepsilon^*_{h_k}(\lambda_i)+\deltaelta^*_{h_{k+1}}(\lambda_i). {\epsilon}nd{eqnarray} These are the desired estimates (\ref{Estimate_u_u_h_2}) and (\ref{Estimate_u_u_h_2_adjoint}). Furthermore, \begin{eqnarray} \Phi(M(\lambda_i),M_{h_{k+1}}(\lambda_i))&\lesssim & \widetilde{{\epsilon}ta}_a^*(H)\sup_{u\in M(\lambda),\|u\|_1=1} \inf_{v_{H,h_{k+1}}\in V_{H,h_{k+1}}}\|u-v_{H,h_{k+1}}\|_1\nonumber\\ &\leq & {\epsilon}ta_a^*(H)\varepsilon_{h_{k+1}}(\lambda_i), {\epsilon}nd{eqnarray} where \begin{eqnarray} \widetilde{{\epsilon}ta}_a^*(H):=\sup_{f\in V,\|f\|_b=1}\inf_{v_{H,h_{k+1}}\in V_{H,h_{k+1}}} \|T_*f-v_{H,h_{k+1}}\|_1\leq {\epsilon}ta_a^*(H). {\epsilon}nd{eqnarray} Then we obtain (\ref{Estimate_u_h_2_Nagative}). A similar argument leads to (\ref{Estimate_u_h_2_Nagative_Adjoint}). {\epsilon}nd{proof} \section{Multilevel correction scheme} In this section, we introduce a multilevel correction scheme based on the {\it One Correction Step} \ref{Correction_Step}. The method improves accuracy after each correction step, which is different from the two-grid methods in \cite{Kolman,XuZhou,YangFan}. \begin{algorithm}\label{Multi_Correction} Multilevel Correction Scheme \begin{enumerate} \item Construct a coarse conforming finite element space $V_{h_1}$ on $\mathcal{T}_{h_1}$ such that $V_H\subset V_{h_1}$ and solve the following two eigenvalue problems: Find $(\lambda_{h_1},u_{h_1})\in \mathcal{R}\times V_{h_1}$ such that $b(u_{h_1},u_{h_1})=1$ and \begin{eqnarray}\label{Initial_Eigen_Problem} a(u_{h_1},v_{h_1})&=&\lambda_{h_1}b(u_{h_1},v_{h_1}),\ \ \ \ \forall v_{h_1}\in V_{h_1}. {\epsilon}nd{eqnarray} Find $(\lambda_{h_1},u^*_{h_1})\in \mathcal{R}\times V_{h_1}$ such that $b(u^*_{h_1},u^*_{h_1})=1$ and \begin{eqnarray}\label{Initial_Eigen_Problem_Adjoint} a(v_{h_1}, u^*_{h_1})&=&\lambda_{h_1}b(v_{h_1}, u_{h_1}^*),\ \ \ \ \forall v_{h_1}\in V_{h_1}. {\epsilon}nd{eqnarray} Choose $2q$ eigenpairs $\{\lambda_{j,h_1},u_{j,h_j}\}_{j=i}^{i+q-1}$ and $\{\lambda_{j,h_1},u^*_{j,h_j}\}_{j=i}^{i+q-1}$ which approximate the desired eigenvalue $\lambda_i$ and its geometric eigenspaces of the eigenvalue problem (\ref{Initial_Eigen_Problem}) and its adjoint one (\ref{Initial_Eigen_Problem_Adjoint}). Based on these two geometric eigenspace, we compute the corresponding algebraic eigenspaces $M_{h_1}(\lambda_i):={\rm space}\big\{u_{i,h_1},\cdots,u_{i+m-1,h_1}\big\}$ and $M^*_{h_1}(\lambda_i):={\rm space}\big\{u^*_{i,h_1},\cdots,u_{i+m-1,h_1}^*\big\}$. Then do the following correction steps. \item Construct a series of finer finite element spaces $V_{h_2},\cdots,V_{h_n}$ on the sequence of nested meshes $\mathcal{T}_{h_2},\cdots,\mathcal{T}_{h_n}$ (cf. \cite{BrennerScott,Ciarlet}). \item Do $k=1,\cdots,n-1$\\ Obtain new eigenpair approximations $(\{\lambda_{j,h_{k+1}}\}_{j=i}^{i+m-1},M_{h_{k+1}}(\lambda_i),M^*_{h_{k+1}}(\lambda_i))$ by Algorithm \ref{Correction_Step} \begin{eqnarray*} &&\big(\{\lambda_{j,h_{k+1}}\}_{j=i}^{i+m-1},M_{h_{k+1}}(\lambda_i),M^*_{h_{k+1}}(\lambda_i)\big)=\nonumber\\ &&\ \ \ \ \quad\quad {\it Correction}\big(V_H,\{\lambda_{j,h_k}\}_{j=i}^{i+m-1},M_{h_k}(\lambda_i),M^*_{h_k}(\lambda_i),V_{h_{k+1}}\big). {\epsilon}nd{eqnarray*} End Do {\epsilon}nd{enumerate} Finally, we obtain eigenpair approximations $\big(\{\lambda_{j,h_n}\}_{j=i}^{i+m-1},M_{h_n}(\lambda_i),M^*_{h_n}(\lambda_i)\big)$. {\epsilon}nd{algorithm} \begin{theorem} After implementing Algorithm \ref{Multi_Correction}, the resultant eigenpair approximation $(\{\lambda_{j,h_n}\}_{j=i}^{i+m-1},M_{h_n}(\lambda_i),M^*_{h_n}(\lambda_i))$ has following error estimates \begin{eqnarray} \Theta(M(\lambda_i),M_{h_n}(\lambda_i)) &\lesssim& \varepsilon_{h_n}(\lambda_i),\label{Multi_Correction_Err_fun}\\ \Phi(M(\lambda_i),M_{h_n}(\lambda_i)) &\lesssim&{\epsilon}ta_a^*(H) \varepsilon_{h_n}(\lambda_i),\label{Multi_Correction_Err_fun_Weak}\\ \Theta(M^*(\lambda_i),M^*_{h_n}(\lambda_i)) &\lesssim& \varepsilon^*_{h_n}(\lambda_i),\label{Multi_Correction_Err_fun_Adjoint}\\ \Phi(M^*(\lambda_i),M^*_{h_n}(\lambda_i)) &\lesssim&{\epsilon}ta_a(H) \varepsilon^*_{h_n}(\lambda_i),\label{Multi_Correction_Err_fun_Weak_Adjoint}\\ |\widehat{\lambda}_{i,h_n}-\lambda_i|&\lesssim&\varepsilon_{h_n}(\lambda_i)\varepsilon^*_{h_n}(\lambda_i), \label{Multi_Correction_Err_eigen} {\epsilon}nd{eqnarray} where $\widehat{\lambda}_{i,h_n}=\frac{1}{m}\sum_{j=i}^{i+m-1}\lambda_{j,h_n}$, $\varepsilon_{h_n}(\lambda_i)=\sum_{k=1}^{n}{\epsilon}ta_a^*(H)^{n-k}\deltaelta_{h_k}(\lambda_i)$ and\\ $\varepsilon^*_{h_n}(\lambda_i)=\sum_{k=1}^{n}{\epsilon}ta_a(H)^{n-k}\deltaelta^*_{h_k}(\lambda_i)$. {\epsilon}nd{theorem} \begin{proof} First, the following estimates hold \begin{eqnarray} \Theta(M(\lambda_i),M_{h_1}(\lambda_i)) &\lesssim& \varepsilon_{h_1}(\lambda_i),\label{Multi_Correction_Err_fun_h_1}\\ \Phi(M(\lambda_i),M_{h_1}(\lambda_i)) &\lesssim&{\epsilon}ta_a^*(h_1) \varepsilon_{h_1}(\lambda_i)\leq {\epsilon}ta_a^*(H) \varepsilon_{h_1}(\lambda_i),\label{Multi_Correction_Err_fun_Weak_h_1}\\ \Theta(M^*(\lambda_i),M^*_{h_1}(\lambda_i)) &\lesssim& \varepsilon^*_{h_1}(\lambda_i),\label{Multi_Correction_Err_fun_Adjoint_h_1}\\ \Phi(M^*(\lambda_i),M^*_{h_1}(\lambda_i)) &\lesssim&{\epsilon}ta_a(h_1) \varepsilon^*_{h_1}(\lambda_i)\leq {\epsilon}ta_a(H) \varepsilon^*_{h_1}(\lambda_i).\label{Multi_Correction_Err_fun_Weak_Adjointh_1} {\epsilon}nd{eqnarray} Then we set $\varepsilon_{h_1}(\lambda_i):= \deltaelta_{h_1}(\lambda_i)$ and $\varepsilon_{h_1}^*(\lambda_i):=\deltaelta_{h_1}^*(\lambda_i)$. By recursive relation and Theorem \ref{Error_Estimate_One_Correction_Theorem}, { we derive} \begin{eqnarray}\label{epsilon_n_1} \Theta(M(\lambda_i),M_{h_n}(\lambda_i))&\lesssim&\varepsilon_{h_n}(\lambda_i) = {\epsilon}ta_a^*(H)\varepsilon_{h_{n-1}}(\lambda_i) +\deltaelta_{h_n}(\lambda_i)\nonumber\\ &\lesssim&{\epsilon}ta_a^*(H)^2\varepsilon_{h_{n-2}}(\lambda_i)+ {\epsilon}ta_a^*(H)\deltaelta_{h_{n-1}}(\lambda_i)+\deltaelta_{h_n}(\lambda_i)\nonumber\\ &\lesssim&\sum\limits_{k=1}^n{\epsilon}ta_a^*(H)^{n-k}\deltaelta_{h_k}(\lambda_i) {\epsilon}nd{eqnarray} and \begin{eqnarray}\label{Error_n-1_Negative_norm} \Phi(M(\lambda_i),M_{h_n}(\lambda_i))&\lesssim& {\epsilon}ta_a^*(H)\sum\limits_{k=1}^n{\epsilon}ta_a^*(H)^{n-k}\deltaelta_{h_k}(\lambda_i). {\epsilon}nd{eqnarray} These are the estimates (\ref{Multi_Correction_Err_fun}) and (\ref{Multi_Correction_Err_fun_Weak}) and the estimates (\ref{Multi_Correction_Err_fun_Adjoint}) and (\ref{Multi_Correction_Err_fun_Weak_Adjoint}) can be proved similarly. From Theorem \ref{Error_Estimate_Theorem}, (\ref{Multi_Correction_Err_fun}) and (\ref{Multi_Correction_Err_fun_Adjoint}), we can obtain the estimate (\ref{Multi_Correction_Err_eigen}). {\epsilon}nd{proof} \section{Numerical results} In this section, we give some numerical results to illustrate the efficiency of the multilevel correction scheme defined by Algorithm \ref{Multi_Correction}. Here, we solve the following eigenvalue problem \begin{equation}\label{Numerical_Exam_1} \left\{ \begin{array}{rcl} -\Delta u+\mathbf b\cdot \nabla u &=& \lambda u,\ \ \ {\rm in}\ \Omega,\\ u&=&0,\ \ \ \ \ {\rm on}\ \partial\Omega, {\epsilon}nd{array} \right. {\epsilon}nd{equation} where $\mathbf b=[b_1,b_2]^T\in\mathcal{C}^2$ is a constant vector and $\Omega=(0,1)\times(0,1)$. This example comes from \cite{HeuvelineRannacher_2001,HeuvelineRannacher_2006}. We choose $b_1=1$ and $b_2=1/2$ in Subsections \ref{Multi_Space_Subsection} and \ref{Multi_Grid_Subsection}. Then we choose $b_1=\cos(\pi x_1)\sin(\pi x_2)$ and $b_2=-\sin(\pi x_1)\cos(\pi x_2)$ in Subsection \ref{Multi_Level_L_Shape}. We also choose a complex vector $\mathbf b$ in the final example. When $b_1=1$ and $b_2=1/2$, the problem (\ref{Numerical_Exam_1}) is nonself-adjoint, but all of its eigenvalues are nondefective (all algebraic eigenfunctions are of order $1$) and real numbers \begin{eqnarray} \lambda_{k,{\epsilon}ll}=\frac{b_1^2+b_2^2}{4}+(k^2+{\epsilon}ll^2)\pi^2, {\epsilon}nd{eqnarray} for $k,{\epsilon}ll\in \mathcal{N}^+$. The corresponding eigenfunctions can be chosen as real functions \begin{eqnarray} u_{k,{\epsilon}ll} &=& {\epsilon}xp\Big(\frac{b_1x_1+b_2x_2}{2}\Big)\sin(k\pi x_1)\sin({\epsilon}ll\pi x_2). {\epsilon}nd{eqnarray} The corresponding adjoint eigenvalue problem has eigenvalues $\lambda_{k,{\epsilon}ll}$ and eigenfunctions \begin{eqnarray} u_{k,{\epsilon}ll}^* &=& {\epsilon}xp\Big(-\frac{b_1x_1+b_2x_2}{2}\Big)\sin(k\pi x_1)\sin({\epsilon}ll\pi x_2). {\epsilon}nd{eqnarray} \subsection{Multi-space way}\label{Multi_Space_Subsection} In this case, finer finite element spaces are constructed by increasing polynomial degrees of the beginning finite element space on the same mesh. We first solve the eigenvalue problem (\ref{Eigenvalue_Problem_Weak_Discrete}) { by linear finite element on a relatively coarser mesh $\mathcal{T}_H$, then perform the first correction step with quadratic element, followed by cubic} element for the second correction step and quartic element for the third correction step. Our initial mesh $\mathcal{T}_H$ is obtained from the Delaunay triangulation followed by four levels of regular mesh refinement. Figure \ref{Error_First_Eigenvalue_Multi_Space} depicts errors for the first eigenvalue ($5/16+2\pi^2$) approximation, and Figure \ref{Error_First_Eigenfunction_Multi_Space} plots numerical errors for the eigenfunction and the corresponding adjoint eigenfunction associated with the first eigenvalue. \begin{figure}[ht] \centering \includegraphics[width=7cm,height=5cm]{Error_Eigen_Nonsymm_Multi_Space.ps} \caption{\it { Here, $\lambda_h^1$ denote the eigenvalue approximation by linear element, $\lambda_h^2$ is the eigenvalue approximation by the first correction with quadratic element, $\lambda_h^3$ the eigenvalue approximation by the second correction with cubic element, $\lambda_h^4$ the eigenvalue approximation by the third correction with quartic element} } \label{Error_First_Eigenvalue_Multi_Space} {\epsilon}nd{figure} \begin{figure}[ht] \centering \includegraphics[width=6cm,height=5cm]{Error_EigenfunR_Nonsymm_Multi_Space.ps} \includegraphics[width=6cm,height=5cm]{Error_EigenfunL_Nonsymm_Multi_Space.ps} \caption{\it {Here, $u_h^1$ and $u_{*h}^1$ denote the eigenfunction approximation and its adjoint approximation by linear element, $u_h^2$ and $u_{*h}^2$ are eigenfunction approximation and its adjoint approximation by the first correction with quadratic element, $u_h^3$ and $u_{*h}^3$, eigenfunction and its adjoint approximation by the second correction with cubic element, $u_h^4$ and $u_{*h}^4$, eigenfunction and its adjoint approximation by the third correction with quartic element}\label{Error_First_Eigenfunction_Multi_Space}} {\epsilon}nd{figure} Furthermore, Figure \ref{Error_First_6_Eigenvalues_Multi_Space} provides numerical results for the summation of the errors for the first $6$ eigenvalues: $5/16+[2\pi^2,5\pi^2,5\pi^2,8\pi^2,10\pi^2,10\pi^2]$. \begin{figure}[ht] \centering \includegraphics[width=7cm,height=5cm]{Error_Eigen_Nonsymm_Multi_Space_6_Multi.ps} \caption{\it {Approximation errors for the summation of the errors for the first $6$ eigenvalues by the multi-space way. Here, $\lambda_{j,h}^1$ denote the eigenvalue approximation by linear element, $\lambda_{j,h}^2$ is the eigenvalue approximation by the first correction with quadratic element, $\lambda_{j,h}^3$ the eigenvalue approximation by the second correction with cubic element, $\lambda_{j,h}^4$ the eigenvalue approximation by the third correction with quartic element}} \label{Error_First_6_Eigenvalues_Multi_Space} {\epsilon}nd{figure} From Figures \ref{Error_First_Eigenvalue_Multi_Space}-\ref{Error_First_6_Eigenvalues_Multi_Space}, we find that each correction step improves the convergence order by two for eigenvalue approximation, and by one for eigenfunction approximation when the exact eigenfunction is sufficiently smooth. To end this subsection, we make a comparison with the PPR method \cite{NagaZhang}. We see from Figure \ref{Comparison_PPR} that the two-level correction scheme by the multi-space way has slightly better accuracy than the PPR method. However, the two-level correction needs to solve two extra boundary value problems while the PPR method only need to perform a local recovery at each node. Thus, we should say that the PPR method has better efficiency than the two-level correction under regular mesh refinement when the eigenfunction has regularity $H^3(\Omega)\cap W^{2,\infty}(\Omega)$. Nevertheless, three and four-level correction will outperform the PPR method. \begin{figure}[ht] \centering \includegraphics[width=7cm,height=5cm]{Compare_With_PPR.ps} \caption{\it { Comparison with the PPR method in \cite{NagaZhang} when $b_1=10$ and $b_2=1$. $\lambda_h^1$, eigenvalue approximation by linear element; $\lambda_h^2$, eigenvalue approximation by the first correction with quadratic element; $\lambda_{\rm ppr}$, the eigenvalue approximation by the PPR method}} \label{Comparison_PPR} {\epsilon}nd{figure} \subsection{Multi-grid way}\label{Multi_Grid_Subsection} An alternative way of the multilevel correction scheme is to construct finer finite element spaces by mesh refinement. We first solve the eigenvalue problem (\ref{Eigenvalue_Problem_Weak_Discrete}) in the linear finite element space on an initial coarse mesh $\mathcal{T}_H$ ($\mathcal{T}_{h_1}:=\mathcal{T}_H$). Then we refine the mesh regularly with the resultant meshes $\mathcal{T}_{h_k}$ satisfying $h_k=2^{1-k}H$ for $(k=2,\cdots,n)$, and solve auxiliary source problems (\ref{aux_problem}) and (\ref{aux_problem_Adjoint}) in the linear finite element space $V_{h_k}$ defined on $\mathcal{T}_{h_k}$ and the corresponding eigenvalue problems (\ref{Eigen_Augment_Problem}) and (\ref{Eigen_Augment_Problem_Adjoint}) in $V_{H,h_k}$. We have the following estimate \begin{eqnarray*} \varepsilon_{h_n}(\lambda)&=&\sum\limits_{k=1}^{n}H^{n-k}h_k =\sum\limits_{k=1}^{n}(2H)^{n-k}h_n\leq\frac{1}{1-2H}h_n\approx h_n, {\epsilon}nd{eqnarray*} and similarly $\varepsilon^*_{h_n}(\lambda)\approx h_n$, which implies that the multilevel correction method achieves the optimal convergence rate if the initial mesh size $H$ is reasonably small, say $H=1/4$ as we will use in our numerical tests. Numerical results for the first eigenvalue $\lambda=5/16+2\pi^2$ and the two associated eigenfunctions are demonstrated in Figures \ref{Error_First_Eigenvalue_Multi_Grid} and \ref{Error_First_Eigenfunction_Multi_Grid}, respectively. Here we use the uniform meshes with $H=1/4$. \begin{figure}[ht] \centering \includegraphics[width=7cm,height=5cm]{Error_Eigen_Nonsymm_Multi_Grid.ps} \caption{\it { Approximation errors} for the first eigenvalue $5/16+2\pi^2$ by the multi-grid way with $H=1/4$} \label{Error_First_Eigenvalue_Multi_Grid} {\epsilon}nd{figure} \begin{figure}[ht] \centering \includegraphics[width=6cm,height=5cm]{Error_EigenfunR_Nonsymm_Multi_Grid.ps} \includegraphics[width=6cm,height=5cm]{Error_EigenfunL_Nonsymm_Multi_Grid.ps} \caption{\it { Approximation errors for the first eigenfunction and its adjoint by the multi-grid way with $H=1/4$}} \label{Error_First_Eigenfunction_Multi_Grid} {\epsilon}nd{figure} Furthermore, Figure \ref{Error_First_6_Eigenvalues_Multi_Grid} provides numerical results for the summation of the errors for the first $6$ eigenvalues: $5/16+[2\pi^2,5\pi^2,5\pi^2,8\pi^2,10\pi^2,10\pi^2]$ with $H=1/8$ and $H=1/16$, respectively. \begin{figure}[ht] \centering \includegraphics[width=6cm,height=5cm]{Error_Eigen_Nonsymm_Multi_Grid_6_Multi_1_8.ps} \includegraphics[width=6cm,height=5cm]{Error_Eigen_Nonsymm_Multi_Grid_6_Multi_1_16.ps} \caption{\it {Approximation errors for the error summation of the first $6$ eigenvalues by the multi-grid way with $H=1/8$ (left) and $1/16$ (right)}} \label{Error_First_6_Eigenvalues_Multi_Grid} {\epsilon}nd{figure} We observe from Figures \ref{Error_First_Eigenvalue_Multi_Grid}-\ref{Error_First_6_Eigenvalues_Multi_Grid}, that our multilevel correction method with the multi-grid way produces eigenvalue and eigenfunction approximations with the optimal convergence rate. Therefore, we can combine the multigrid method for boundary value problems and our multilevel correction scheme (cf. \cite{LinXie,Xie_IMA}) to achieve better efficiency for nonsymmetric eigenvalue problems. \subsection{Eigenvalue problem on $L$-shape domain}\label{Multi_Level_L_Shape} In this subsection, we consider the eigenvalue problem (\ref{Numerical_Exam_1}) on the $L$-shape domain $\Omega=(-1,1)\times(-1,1)\backslash[0, 1)\times (-1, 0]$. Since $\Omega$ has a reentrant corner, the singularity of eigenfunctions is expected. As a consequence, the convergence rate for the first eigenvalue approximation is $4/3$ by the linear finite element method on quasi-uniform meshes. Since the exact eigenvalue is unknown, we choose an adequately accurate approximation $\lambda = 9.95240442893276$ as the exact first eigenvalue for our numerical tests. Our multilevel correction scheme is tested on a sequence of meshes $\mathcal{T}_H$ ($\mathcal{T}_{h_1}:=\mathcal{T}_H$), $\mathcal{T}_{h_2}, \cdots, \mathcal{T}_{h_n}$ produced by the adaptive refinement (cf. \cite{WuZhang,XuZhou_Eigen}). Here the ZZ recovery method (cf. \cite{ZienkiewiczZhu}) is adopted as the {\it a posteriori} error estimator for eigenfunction and adjoint eigenfunction approximations $\sqrt{\|u_h-u\|_{a,h}^2+\|u_h^*-u^*\|_{a,h}^2}$. Figure \ref{Mesh_AFEM_Exam_2} shows the initial mesh and the mesh after $12$ adaptive iterations. Figure \ref{Convergence_AFEM_Exam_2_First} gives the corresponding numerical results for the adaptive iterations. \begin{figure}[ht] \centering \includegraphics[width=5.5cm,height=5.5cm]{Mesh_Adap_Nonsym_Lshape_0.ps} \includegraphics[width=5.5cm,height=5.5cm]{Mesh_Adap_Nonsym_Lshape_12.ps} \caption{\it The initial mesh and the one after 12 adaptive iterations for the L-shape domain} \label{Mesh_AFEM_Exam_2} {\epsilon}nd{figure} \begin{figure}[ht] \centering \includegraphics[width=6cm,height=6cm]{Error_Eigenvalue_Adap_Multi_Nonsymm_H_2.ps} \includegraphics[width=6cm,height=6cm]{Error_Eigenfun_Adap_Multi_Nonsymm_H_2.ps} \caption{\it { Approximation errors of the first eigenvalue and the a posteriori errors of the associated eigenfunction and adjoint eigenfunction}} \label{Convergence_AFEM_Exam_2_First} {\epsilon}nd{figure} From Figure \ref{Convergence_AFEM_Exam_2_First}, we observe that the multilevel correction method works well on adaptive meshes with the optimal convergence rate. Furthermore, the situation is very different from the two-gird \cite{Kolman,XuZhou,YangFan} method in that the initial mesh has very little impact on the finest one. Thus the multilevel correction method can be coupled with the adaptive refinement naturally. \subsection{Eigenvalue problem with complex vector} In this subsection, we test the multilevel correction scheme for the problem (\ref{Numerical_Exam_1}) with complex vector $\mathbf b=[1+2\textrm{i}, 1/2-\textrm{i}]^T$. We use the multi-space and multi-grid ways as in Subsections \ref{Multi_Space_Subsection} and \ref{Multi_Grid_Subsection}, respectively, to check the multilevel correction scheme. Figure \ref{Error_First_6_Eigenvalues_Complex} shows the numerical results for the first $6$ eigenvalues. It is observed from Figure \ref{Error_First_6_Eigenvalues_Complex} that the multilevel correction method defined in Algorithm \ref{Multi_Correction} can also work very well for the nonsymmetric eigenvalue problems with complex vector. \begin{figure}[ht] \centering \includegraphics[width=6cm,height=5cm]{Error_Eigen_Multispace_Nonsymmetric_Multi_Complex.ps} \includegraphics[width=6cm,height=5cm]{Error_Eigen_Multigrid_Nonsymmetric_Multi_Complex.ps} \caption{\it { Approximation errors for the summation of errors for the first $6$ eigenvalues by the multi-space way (left) and the multi-grid way with $H=1/8$ (right). Here, $\lambda_{j,h}^1$ denote the eigenvalue approximation by linear element, $\lambda_{j,h}^2$ is the eigenvalue approximation by the first correction with quadratic element, $\lambda_{j,h}^3$ the eigenvalue approximation by the second correction with cubic element, $\lambda_{j,h}^4$ the eigenvalue approximation by the third correction with quartic element}} \label{Error_First_6_Eigenvalues_Complex} {\epsilon}nd{figure} \section{Concluding remarks} In this paper, we propose and analyze a multilevel correction scheme to improve the efficiency of both {\color{black}defective and nondefective} nonsymmetric eigenpair approximations. In this multilevel correction, we only need to solve eigenvalue problems in the coarsest finite element space. Sometimes, we also need to compute the algebraic eigenspace based on the geometric eigenspace when the ascent is larger than $1$. Furthermore, our multilevel correction scheme can be coupled with the multigrid method to construct a parallel method for eigenvalue problems (see, e.g, \cite{LinXie,LinXie_Multigrid,Xie_IMA,XuZhou_Eigen}). It can also be combined with adaptive techniques (cf. \cite{WuZhang}) for singular eigenfunction cases. These will be our future work. {\bf A final remark.} As long as higher eigenvalues are concerned, the multi-space way is preferred (than the multi-grid way). We can see it clearly by comparing numerical accuracies for summations of the first 6 eigenvalues in \S 5.1 and \S 5.2. \section*{Acknowledgments} The first author is supported in part by the National Natural Science Foundation of China (NSFC 91330202, 11001259, 11371026, 11201501, 11031006, 2011CB309703) and the National Center for Mathematics and Interdisciplinary Science, CAS and the President Foundation of AMSS-CAS. The second author is supported in part by the US National Science Foundation through grant DMS-1115530, DMS-1419040, and the National Natural Science Foundation of China (91430216, 11471031). \begin{thebibliography}{10} \bibitem{CliffeHallHouston} K. A. Cliffe, E. J. C. Hall and P. Houston, Adaptive discontinuous Galerkin methods for eigenvalue problems arising in incompressible fluid flows, SIAM J. Sci. Comput., 31(6) (2010), pp. 4607-4632. \bibitem{Babuska2} I. Babu\v{s}ka and J. E. Osborn, Finite element-Galerkin approximation of the eigenvalues and eigenvectors of selfadjoint problems, Math. Comp. 52 (1989), pp. 275-297. \bibitem{BabuskaOsborn} I. Babu\v{s}ka and J. Osborn, Eigenvalue Problems, In Handbook of Numerical Analysis, Vol. II, (Eds. P. G. Lions and Ciarlet P.G.), Finite Element Methods (Part 1), North-Holland, Amsterdam, pp. 641-787, 1991. \bibitem{BrennerScott} S. Brenner and L. Scott, {The Mathematical Theory of Finite Element Methods}, New York: Springer-Verlag, 1994. \bibitem{Chatelin} F. Chatelin, Spectral Approximation of Linear Operators, Academic Press Inc, New York, 1983. \bibitem{Ciarlet} P. G. Ciarlet, The finite Element Method for Elliptic Problem, North-holland Amsterdam, 1978. \bibitem{HeuvelineRannacher_2001} V. Heuveline and R. Rannacher, A posteriori error control for finite element app. roximations of elliptic eigenvalue problems, Adv. Comput. Math., 15 (2001), pp. 107-138. \bibitem{HeuvelineRannacher_2006} V. Heuveline and R. Rannacher, Adaptive FEM for eigenvalue problems with application in hydrodynamic stability analysis, J. Numer. Math., submitted, 2006. \bibitem{Kolman} K. Kolman, A two-level method for Nonsymmetric eigenvalue problems, Acta Mathematicae App. licatae Sinica (English series), 2(1) (2005), pp. 1-12. \bibitem{LinLuoXie} Q. Lin, F. Luo and H. Xie, {{\epsilon}m A multilevel correction method for Stokes eigenvalue problems and its applications}, Math. Methods Appl. Sci., DOI: 10.1002/mma.2866, 2013. \bibitem{LinXie} Q. Lin and H. Xie, {{\epsilon}m A multi-level correction scheme for eigenvalue problems}, Math. Comp., DOI: http://dx.doi.org/10.1090/S0025-5718-2014-02825-1, 2014. \bibitem{LinXie_Multigrid} Q. Lin and H. Xie, {{\epsilon}m A type of multigrid method for eigenvalue problem}, Research Report in ICMSEC, 2011-06 (2011). \bibitem{NagaZhang} A. Naga and Z. Zhang, {{\epsilon}m Function value recovery and its appplication in eigenvalue problems}, SIAM J. Numer. Anal., 50(1) (2012), pp. 272-286. \bibitem{NagaZhangZhou} A. Naga, Z. Zhang and A. Zhou, Enhancing eigenvalue app. roximation by gradient recovery, SIAM J. Sci. Comput., 28(4) (2006), pp. 1289-1300. \bibitem{SchmidHenningson} P. Schmid and D. S. Henningson, Stability and Transition in Shear Flows, Appl. Math. Sci. 142, Springer, New York, 2001. \bibitem{Shaidurov} V. Shaidurov, {{\epsilon}m Multigrid methods for finite element}, Kluwer Academic Publics, Netherlands, 1995. \bibitem{WuZhang} H. Wu and Z. Zhang, { Enhancing eigenvalue app. roximation by gradient recovery on adaptive meshes}, IMA J. Numer. Anal., 29(4) (2009), pp. 1008-1022. \bibitem{Xie_IMA} H. Xie, A type of multilevel method for the Steklov eigenvalue problem, IMA J. Numer. Anal., 1-17, doi:10.1093/imanum/drt009, 2013. \bibitem{Xie_JCP} H. Xie, A multigrid method for eigenvalue problem, J. Comput. Phys., 274 (2014), 550-561. \bibitem{Xu} J. Xu, Iterative methods by space decomposition and subspace correction, SIAM Review, 34(4) (1992), pp. 581-613. \bibitem{Xu_Two_Grid} J. Xu, {A new class of iterative methods for nonselfadjoint or indefinite problems}, SIAM J. Numer. Anal., 29 (1992), pp. 303-319. \bibitem{Xu_Nonlinear} J. Xu, {A novel two-grid method for semilinear elliptic equations}, SIAM J. Sci. Comput., 15 (1994), pp. 231-237. \bibitem{XuZhou} J. Xu and A. Zhou, {A two-grid discretization scheme for eigenvalue problems}, Math. Comput., 70(233) (2001), 17-25. \bibitem{XuZhou_Eigen} J. Xu and A. Zhou, {Local and parallel finite element algorithm for eigenvalue problems}, Acta Math. App. l. Sin. Engl. Ser., 18(2) (2002), pp. 185-200. \bibitem{YangFan} Y. Yang and X. Fan,{ Generalized Rayleigh quotient and finite element two-grid discretization schemes}, Science in China Series A: mathematics, 52(9) (2009), pp. 1955-1972. \bibitem{ZienkiewiczZhu} O. Zienkiewicz and J. Zhu, {A simple error estimator and adaptive procedure for practical engineering analysis}, Int. J. Numer. Methods Eng., 24 (1987), pp. 337-357. {\epsilon}nd{thebibliography} {\epsilon}nd{document}
\begin{document} \pagestyle{empty} \begin{titlepage} \begin{center} {{\mathcal{L}ARGE {\bf Value-distribution of the }}}\\ {{\mathcal{L}ARGE {\bf Riemann zeta-function and related functions}}}\\ {{\mathcal{L}ARGE {\bf near the critical line}}}\\[1ex] \end{center} \vspace*{2cm} \begin{center} Dissertationsschrift zur Erlangung des naturwissenschaftlichen\\[1ex] Doktorgrades der Julius-Maximilians-Universit\"at W\"urzburg \end{center} \vspace*{1cm} \vspace*{1cm} \begin{center} vorgelegt von\\[4ex] { {\large Thomas Christ}}\\[4ex] aus\\[4ex] Ansbach, Deutschland \end{center} \vspace*{2cm} \begin{center} {W\"urzburg 2013} \end{center} \end{titlepage} $\mbox{ }$ Eingereicht am 10.12.2013\\[0.5em] bei der Fakult\"at f\"ur Mathematik und Informatik\\[0.5em] der Julius-Maximilians-Universit\"at W\"urzburg\\[0.5em] 1. Gutachter: Prof. Dr. J\"orn Steuding\\[0.5em] 2. Gutachter: Prof. Dr. Ramunas Garunk\v{s}tis\\[0.5em] Tag der Disputation: 22.04.2014 \addtocontents{toc}{\protect\thispagestyle{empty}} \tableofcontents \pagestyle{fancy} \setcounter{page}{1} \chapter*{Notations} \addcontentsline{toc}{chapter}{Notations} \markboth{Notations}{Notations} We indicate some of the basic notations that we use in this thesis. Usually, we denote a complex variable by $s=\sigma+it$ with real part $\sigma$ and imaginary part $t$. \par {\bf Set of numbers.} \begin{longtable}{ll} $\mathbb{N}$ & $:=\{1,2,3,...\}$, the set of positive integers\\ $\mathbb{N}_0$ & $:=\mathbb{N}\cup\{0\}$, the set of non-negative integers\\ $\mathbb{P}$ & $:=\{2,3,5,...\}$, the set of prime numbers\\ $\mathbb{Z}$ & $:=\{...,-1,0,1,...\}$, the set of integers\\ $\mathbb{Q}$ & the set of rational numbers\\ $\mathbb{R}$ & the set of real numbers\\ $\mathbb{R}^+$ & the set of positive real numbers\\ $\mathbb{R}^+_0$ & the set of non-negative real numbers\\ $\mathbb{C}$ & the set of complex numbers\\ \textcolor{white}{$(x_n)_n$} &\textcolor{white}{ $:=(x_n)_{n\in\mathbb{N}}:=(x_1,x_2,...)$, a sequence of elements $x_n$ from a certain set $X$.} \end{longtable} {\bf Subsets of the complex plane.} \begin{longtable}{ll} $D_r(s_0)$ & open disc with radius $r>0$ and center $s_0\in\mathbb{C}$\\ $\mathbb{D} $ & $:= D_1(0)$, unit disc\\ $\partial\Omega$ & the boundary of a domain $\Omega\subset\mathbb{C}$\\ $\mathcal{D}$ & $:=\{\sigma+it\in\mathbb{C}\, : \, 0<\sigma<1, \, t>0\}$\\ $\widehat{\mathbb{C}}$ & := $\mathbb{C}\cup\{\infty\}$, the Riemann sphere\\ \textcolor{white}{$(x_n)_n$} &\textcolor{white}{ $:=(x_n)_{n\in\mathbb{N}}:=(x_1,x_2,...)$, a sequence of elements $x_n$ from a certain set $X$.} \end{longtable} {\bf Classes of functions.} \begin{longtable}{ll} $\mathcal{H}(\Omega)$ & set of functions analytic in a domain $\Omega\subset\mathbb{C}$\\ $\mathcal{M}(\Omega)$ & set of functions meromorphic in a domain $\Omega\subset\mathbb{C}$\\ $\mathcal{S}$ & the Selberg class, defined in Section \ref{sec:selbergclass} \\ $\mathcal{S}^{\#}$ & the extended Selberg class, defined in Section \ref{sec:selbergclass} \\ $\mathcal{S}_R^{\#}$ & a subclass of the extended Selberg class, defined in Section \ref{sec:selbergclass}\\ $\mathcal{S}^*$ & a subclass of the Selberg class, defined in Section \ref{sec:selbergclass}\\ $\mathcal{G}$ & an extension of $\mathcal{S}^{\#}$, defined in Section \ref{sec:classG}\\ $\mathbb{N}o$ & a class of functions, defined in Section \ref{sec:classN} \\ $\mathscr{H}^2$ & a space of Dirichlet series, defined in Section \ref{sec:DirichletH2} \\ $L_{\pmb{\sigma}}^p(K)$ & the $L^p$ space of a compact group $K$, defined in Section \ref{sec:K}\\ \textcolor{white}{$(x_n)_n$} &\textcolor{white}{ $:=(x_n)_{n\in\mathbb{N}}:=(x_1,x_2,...)$, a sequence of elements $x_n$ from a certain set $X$.} \end{longtable} {\bf Some further notations.} \begin{longtable}{ll} $ {\rm{meas\ }} A$ & Lebesgue measure of a measurable set $A\subset \mathbb{R}$.\\ $\# B$ & cardinality of a finite subset $B\subset \mathbb{R}$.\\ $\mbox{\ d}ens^* J$ & upper density of a subset $J\subset \mathbb{N}$, defined in Section \ref{sec:ergodicflow}\\ $\mbox{\ d}ens_* J$ & lower density of a subset $J\subset \mathbb{N}$, defined in Section \ref{sec:ergodicflow}\\ $(x_n)_n$ & $:=(x_n)_{n\in\mathbb{N}}:=(x_1,x_2,...)$, a sequence of elements $x_n$ from a certain set $X$\\ $n|m$ & $n$ is a divisor of $m$ \\ \end{longtable} {\bf Landau's $O$-notation and the Vinogradov symbols.}\par We use Landau's $O$-notation and the Vinogradov symbols in the following way. Let $f$ and $g$ be real valued functions, which are both defined on a subset of the reals. \begin{align*} \begin{minipage}{3cm}$f(x) = O\bigl( g(x) \bigr)$ \\or $f(x)\ll g(x)$, \\ as $x\rightarrow\infty$ \end{minipage} &: \iff \quad \quad \exists_{C>0} \quad \mbox{s.t.}\quad \limsup_{x\rightarrow\infty} \left|\frac{f(x)}{g(x)}\right| \leq C\\[1em] \begin{minipage}{3cm}$f(x)=o(g(x))$, \\ as $x\rightarrow\infty$ \end{minipage} &: \iff \quad \limsup_{x\rightarrow\infty} \left|\frac{f(x)}{g(x)}\right| = 0\\[1em] \begin{minipage}{3cm}$f(x) = \Omega\bigl( g(x) \bigr)$ \\or $f(x)\gg g(x)$,\\ as $x\rightarrow\infty$ \end{minipage} &: \iff \quad \exists_{C>0} \quad \mbox{s.t.}\quad \limsup_{x\rightarrow\infty} \left|\frac{f(x)}{g(x)}\right|\geq C\\[1em] \begin{minipage}{3cm}$f(x)\sim g(x)$, \\ as $x\rightarrow\infty$ \end{minipage}&: \iff \quad \lim_{x\rightarrow\infty} \left|\frac{f(x)}{g(x)}\right| = 1\\[1em] \begin{minipage}{3cm}$f(x)\asymp g(x)$, \\ as $x\rightarrow\infty$ \end{minipage}&: \iff \quad \exists_{A,B>0} \; \; \mbox{s.t.} \;\; \liminf_{x\rightarrow\infty} \left|\frac{f(x)}{g(x)}\right| \geq A \; \; \mbox{ and } \; \; \limsup_{x\rightarrow\infty} \left|\frac{f(x)}{g(x)}\right| \leq B \end{align*} Sometimes we write $O_{\alpha}(\cdot)$, resp. $\ll_{\alpha}$, and $\Omega_{\alpha}(\cdot)$, resp. $\gg_{\alpha}$, to indicate that the implied constants depend on the parameter $\alpha$, respectively. \chapter*{Acknowledgments} \addcontentsline{toc}{chapter}{Acknowledgments} First and foremost, I would like to express my deepest gratitude to my supervisor J\"orn Steuding. I am grateful for his tremendous support and insightful guidance during the last years without which this thesis would not have been completed. I appreciated the friendly and uncomplicated atmosphere in our working group and wish to thank for involving me into the academic and scientific life in such a marvelous way.\par I would like to give my special thanks to Antanas Laurin\v{c}ikas, Ramunas Garunk\v{s}tis and Justas Kalpokas from Vilnius university for fruitful collaborations and their warm and kind hospitality during my stay in Vilnius in 2011.\par From April 2011 to September 2013, my work was supported by a scholarship of the Hanns-Seidel-Stiftung funded by the German Federal Ministry of Education and Research (BMBF). I wish to thank the Hanns-Seidel-Stiftung for their ideational and financial support.\par I am grateful for the various positions that I could have at the Department of Mathematics in W\"urzburg during my doctorate studies. I would like to thank the members of chair IV and many other people from the department for their friendship, the inspiring discussions and the nice atmosphere at the department.\par Last but not least, I owe a great debt of gratitude to my family and my dear friends for their enduring support and many unforgettable moments. \begin{flushright} W\"urzburg, December 2013\\ Thomas Christ \end{flushright} \renewcommand{A.\arabic{equation}}{P.\arabic{equation}} \renewcommand{A.\arabic{section}}{P.\arabic{section}} \renewcommand{A.\arabic{theorem}}{P.\arabic{theorem}} \chapter*{Introduction and statement of the main results} \addcontentsline{toc}{chapter}{Introduction and statement of the main results} \markboth{Introduction and statement of the main result}{Introduction and statement of the main result} The Riemann zeta-function is a central object in multiplicative number theory. Its value-distribution in the complex plane encodes deep arithmetic properties of the prime numbers. In fact, many important insights into the distribution of the primes were revealed by exploring the analytic behaviour of the Riemann zeta-function.\par The value-distribution of the Riemann zeta-function, however, is far from being well-understood and bears many interesting analytic phenomena which are worth to be studied, independently of their arithmetical relevance. A crucial role is assigned to the analytic behaviour of the zeta-function on the so called critical line. The latter forms the background for several open conjectures; for example, the Riemann hypothesis, the Lindel\"of hypothesis and Ramachandra's denseness conjecture.\par The scope of this thesis is to understand the behaviour of the Riemann zeta-function near and on the critical line in a better way. \par In Section \ref{sec:riemann} of this introductory chapter, we introduce the Riemann zeta-function and expose the exceptional character of its behaviour on the critical line. \par To figure out which basic features of the Riemann zeta-function are responsible for certain phenomena in its value-distribution, it is reasonable to investigate the zeta-function in a broader context. In Section \ref{sec:selbergclass}, we consider the Selberg class, which was introduced by Selberg \cite{selberg:1992} as a promising attempt to gather all Dirichlet series which satisfy similar properties as the Riemann zeta-function.\par In Section \ref{sec:outline}, we provide an outline of this thesis, state the main results and briefly report on our methods. \section{The Riemann zeta-function}\label{sec:riemann} In the following, let $s=\sigma+it$ denote a complex variable with real part $\sigma$ and imaginary part $t$. In the half-plane $\sigma>1$, the Riemann zeta-function is defined by an absolutely convergent Dirichlet series $$ \zeta(s):=\sum_{n=1}^{\infty}\frac{1}{n^s}. $$ Euler revealed an intimate connection of $\zeta(s)$ to the prime numbers. He discovered that $\zeta(s)$ can be rewritten as an infinite product $$ \zeta(s)=\prod_{p\in\mathbb{P}}(1-p^{-s})^{-1}, \qquad \sigma>1, $$ where $\mathbb{P}$ denotes the set of prime numbers.\par In his seminal paper of 1859, Riemann \cite{riemann:1859} laid the foundations to investigate $\zeta(s)$ as a function of a complex variable $s$. He discovered that $\zeta(s)$ can be continued analytically to the whole complex plane, except for a simple pole at $s=1$ with residue $1$, and satisfies the functional equation \begin{equation}\label{fct-eq} \zeta(s)=\mathbb{D}elta(s)\zeta(1-s) \qquad \mbox{with }\qquad \mathbb{D}elta(s)= \pi^{s-\frac{1}{2}} \frac{\Gamma\left(\frac{1-s}{2}\right)}{\Gamma\left(\frac{s}{2}\right)}, \end{equation} where $\Gamma$ denotes the Gamma-function. Stirling's formula allows to describe the analytic behaviour of the factor $\mathbb{D}elta(s)$ appearing in the functional equation in a rather precise way. As $|t|\rightarrow\infty$, the asymptotic formula \begin{equation}\label{Delta} \mathbb{D}elta(\sigma+it) = \left( \frac{|t|}{2\pi}\right)^{\frac{1}{2}-\sigma-it} \exp\left( i(t+\tfrac{\pi}{4})\right) (1+O(|t|^{-1})) \end{equation} holds uniformly for $\sigma$ from an arbitrary bounded interval. The reflection principle $$ \zeta(\overline{s})=\overline{\zeta(s)} \qquad\mbox{for } s\in\mathbb{C} $$ provides a further functional equation for the Riemann zeta-function. Due to the latter, it is sufficient to study the value-distribution of the zeta-function in the upper half-plane $t\geq 0$.\par The functional equation \eqref{fct-eq}, together with the reflection principle, evokes a strong symmetry of the Riemann zeta-function with respect to the so called {\it critical line} $\sigma=\frac{1}{2}$. On the latter, the value-distribution of the Riemann zeta-function is exceptional in many ways. \par {\bf Zeros of the Riemann zeta-function.} The zeta-function has simple zeros at the negative even integers $s=-2n$, $n\in\mathbb{N}$. These zeros are called {\it trivial zeros}. All other zeros lie inside the so called {\it critical strip} $0\leq\sigma\leq1$. We denote these zeros by $\rho=\beta+i\gamma$ and call them {\it non-trivial zeros}. Due to the functional equation and the reflection principle, the non-trivial zeros are symmetrically distributed with respect to the critical line and the real axis. According to the Riemann-von Mangoldt formula, the number $N(T)$ of non-trivial zeros with imaginary part $\gamma\in(0,T]$ is asymptotically given by $$ N(T)=\frac{T}{2\pi}\log\frac{T}{2\pi e} + O(\log T), $$ as $T\rightarrow\infty$. The \textbf{\textit{ Riemann hypothesis (RH)}} states that all non-trivial zeros of the Riemann zeta-function lie on the critical line $\sigma=\frac{1}{2}$; or, equivalently, that $\zeta(s)\neq 0$ for $\sigma>\frac{1}{2}$. The Riemann hypothesis is neither proven nor disproven and is considered as a central open problem in number theory. Its arithmetic relevance lies in the impact of the non-trivial zeros on the error term in the prime number theorem. The fact that the Riemann zeta-function is non-vanishing in the half-plane $\sigma\geq 1$ leads to an asymptotic formula for the number $\pi(x)$ of primes $p\in\mathbb{P}$ with $p\leq x$. Building on ideas of Riemann, this was proved by Hadamard \cite{hadamard:1896} and de La Vall\'{e}e-Poussin \cite{vallee:1896}, independently. A zero-free region of the Riemann zeta-function to the left of $\sigma=1$ is needed in order to get an asymptotic formula for $\pi(x)$ with explicit error term. Up to now, the largest known zero-free region is due to Korobov \cite{korobov:1958} and Vinogradov \cite{vinogradov:1958}. They showed independently that, for sufficiently large $|t|$, the Riemann zeta-function has no zeros in the region defined by $$ \sigma \geq 1 - \frac{A}{(\log |t|)^{\frac{1}{3}} (\log\log|t|)^{\frac{2}{3}}} $$ with some constant $A>0$. Their result implies that $$ E(x):=\pi(x) - \int_2^x \frac{\mbox{\ d} u}{\log u} \ll x \exp\left(-B \frac{(\log x)^{\frac{3}{5}}}{(\log\log x)^{\frac{1}{5}}} \right) $$ with some constant $B>0$. So far, it is not known whether there exists a $\theta\in[\frac{1}{2},1)$ such that the zeta-function has no zeros in the half-plane $\sigma>\theta$. Von Koch \cite{koch:1900} showed that $E(x)\ll x^{\theta+\varepsilon}$ holds, with any $\varepsilon >0$, if and only if the Riemann zeta-function is non-vanishing in $\sigma>\theta$. Thus, in particular, the truth of the Riemann hypothesis would imply that $E(x)\ll x^{\frac{1}{2}+\varepsilon}$.\par There are some partial results supporting the Riemann hypothesis. Hardy \cite{hardy:1914} showed that there are infinitely many zeros on the critical line. His result was improved significantly by Selberg \cite{selberg:1942} who obtained that a positive proportion of all non-trivial zeros can be located on the critical line: let $N^{0}(T)$ denote the number of non-trivial zeros which lie on the critical line and have imaginary part $\gamma\in(0,T]$, then $$ U:=\liminf_{T\rightarrow\infty}\frac{N^{0}(T)}{N(T)}\geq C $$ with some (computable but very small) constant $C>0$. Selberg's lower bound for $U$ was improved considerably by Levinson \cite{levinson:1974} who obtained that $U\geq0.3437$. Later, Conrey \cite{conrey:1989} found that $U\geq 0.4088$ and, very recently, Bui, Conrey \& Young \cite{buiconreyyoung:2011} established $U\geq 0.4105$. \par Besides of measuring the number of zeros on the critical line, there are also attempts to bound the number of possible zeros off the critical line. Let $N(\sigma,T)$ denote the number of non-trivial zeros with real part $\beta>\sigma$ and imaginary part $\gamma\in(0,T]$. Due to a classical result of Selberg \cite{selberg:1946} we know that, uniformly for $\frac{1}{2}\leq \sigma \leq 1$, \begin{equation}\label{selbergzerodensity} N(\sigma,T)\ll T^{1-\frac{1}{4}(\sigma-\frac{1}{2})} \log T. \footnote{For more advanced zero-density estimates for the Riemann zeta-function the reader is referred to Titchmarsh \cite[\S 9]{titchmarsh:1986} and Ivi\'{c} \cite[Chapt. 11]{ivic:1985}.} \end{equation} Many computer experiments were done in order to find a counterexample for the Riemann hypothesis. However, until now no zero off the critical line was dedected. By using the Odlyzko and Sch\"onhage algorithm, Gourdon \cite{gourdan:2004} located the first $10^{13}$ zeros of the Riemann zeta-function on the critical line.\par According to the \textit{\textbf{simplicity hypothesis}}, one expects that all zeros of the Riemann zeta-function are simple. Indeed, no multiple zero has been found so far. It is known that at least a positive proportion of all zeros are simple. Let $N^*(T)$ denote the number of simple non-trivial zeros with imaginary part $\gamma\in(0,T]$. Levinson \cite{levinson:1974} proved that $$ S:=\liminf_{T\rightarrow\infty} \frac{N^*(T)}{N(T)} \geq \tfrac{1}{3}. $$ Bui, Conrey \& Young \cite{buiconreyyoung:2011} obtained that, unconditionally, $S\geq 0.4058$. Very recently, Bui \& Heath-Brown \cite{buiheathbrown:2013} proved that $S\geq \frac{19}{27}$, under the assumption of the Riemann hypothesis.\footnote{By assuming additionally the truth of the generalized Lindel\"of hypothesis, this was already known to Bui, Conrey \& Young \cite{buiconreyyoung:2011}. Bui \& Heath-Brown \cite{buiheathbrown:2013} succeeded to remove the generalized Lindel\"of hypothesis by making careful use of the generalized Vaughan identity.}\par Whereas the Riemann hypothesis deals with the horizontal distribution of the non-trivial zeros, there are also many open questions concerning the vertical distribution. Let $(\gamma_n)_n$ denote the sequence of all positive imaginary parts of non-trivial zeros in ascending order. Littlewood \cite{littlewood:1924} showed that the gap between two consecutive ordinates $\gamma_n$, $\gamma_{n+1}$ tends to zero, as $n\rightarrow\infty$. In particular, he obtained that, as $n\rightarrow\infty$, $$ \gamma_{n+1}-\gamma_n \ll \frac{1}{\log\log\log \gamma_n}. $$ According to the Riemann-von Mangoldt formula the average spacing between two consecutive ordinates $\gamma_n,\gamma_{n+1}\in(T,2T]$ is given by $\frac{2\pi}{\log T}$, as $T\rightarrow\infty$. The \textbf{\textit{gap conjecture}} predicts that there appear arbitrarily small and arbitrarily large deviations from the average spacing: let $$ \lambda:= \limsup_{n\rightarrow\infty} \frac{(\gamma_{n+1}-\gamma_n) \log\gamma_n}{2\pi} \qquad\mbox{and}\qquad \mu:= \liminf_{n\rightarrow\infty} \frac{(\gamma_{n+1}-\gamma_n) \log\gamma_n}{2\pi}. $$ Then, one expects that $\lambda=\infty$ and $\mu=0$. It was remarked by Selberg \cite{selberg:1946-3} and proved by Fujii \cite{fujii:1975} that $\lambda>1$ and $\mu<1$. These are still the only unconditional bounds for $\lambda$ and $\mu$ which are at our disposal. On the assumption of the Riemann hypothesis, the current records in bounding $\lambda$ and $\mu$ are $\lambda>2.766$, according to Bredberg \cite{bredberg:2011}, and $\mu<0.5154$, according to Feng \& Wu \cite{fengwu:2010}.\footnote{If one is willing to assume additional conjectures, there are better results available. By assuming the generalized Riemann hypothesis, Bui \cite{bui:2011-1} obtained that $\lambda>3.033$. By assuming the Riemann hypothesis and certain moment conjectures originating from random matrix theory, Steuding \& Steuding \cite{steudingsteuding:2007} showed that $\lambda=\infty$, as predicted by the gap conjecture.}\par Montgomery \cite{montgomery:1973} studied the pair correlation of ordinates $\gamma$, $\gamma'$ of non-trivial zeros. His investigations led him to the conjecture that, for any fixed $0<\alpha < \beta$, $$ \lim_{T\rightarrow\infty} \frac{1}{N(T)} \# \left\{\gamma,\gamma'\in(0,T] \, : \, \alpha \leq \frac{(\gamma-\gamma')\log T}{2\pi} \leq \beta \right\} = \int_{\alpha}^{\beta} \left( 1- \left( \frac{\sin\pi u}{\pi u}\right)^2 \right) \mbox{\ d} u. $$ This is known as \textit{\textbf{Montgomery's pair correlation conjecture (PCC)}}. The truth of the PCC implies that $S=1$ and $\mu=0$. Dyson pointed out to Montgomery that eigenvalues of random Hermitian matrices have exactly the same pair correlation function. This observation laid the foundation for many models for the Riemann zeta-function on the critical line by random matrix theory.\par {\bf $a$-points of the Riemann zeta-function.} Besides the zeros, it is reasonable to study the general distribution of the roots of the equation $\zeta(s)=a$, where $a$ is an arbitrarily fixed complex number. We call these roots $a$-points and denote them by $\rho_a=\beta_a + i\gamma_a$. For sufficiently large $n\in\mathbb{N}$, there is an $a$-point near every trivial zero $s=-2n$. Apart from these $a$-points generated by the trivial zeros, there are only finitely many $a$-points in the half-plane $\sigma\leq 0$. We refer to the $a$-points in $\sigma\leq 0$ as {\it trivial $a$-points} and call all other $a$-points {\it non-trivial $a$-points}. The non-trivial $a$-points can be located in a vertical strip $0\leq \sigma\leq R_a$ with a certain real number $R_a\geq 1$. In analogy to the case $a=0$, Landau \cite{BohrLandauLittlewood:1913} established a Riemann-von Mangoldt-type formula for the number $N_a(T)$ of non-trivial $a$-points with imaginary part $\gamma_a\in(0,T]$: as $T\rightarrow\infty$, $$ N_a(T)=\frac{T}{2\pi} \log\frac{T}{2\pi e c_a} + O\left( \log T\right) $$ with $c_a=1$ if $a\neq 1$ and $c_1=2$. Levinson \cite{levinson:1975} proved that all but $O(N_a(T) / \log\log T)$ of the $a$-points with imaginary part $\gamma_a\in(T,2T]$ lie in the strip \begin{equation}\label{levinsonstrip} \frac{1}{2} - \frac{(\log\log T)^2}{\log T} < \sigma < \frac{1}{2} + \frac{(\log\log T)^2}{\log T}. \end{equation} Thus, almost all $a$-points are arbitrarily close to the critical line. Under the assumption of the RH, this phenomenon was already known to Landau \cite{BohrLandauLittlewood:1913}. For $a\neq 0$, Bohr \& Jessen \cite{bohrjessen:1932} showed that the number $N_a(\sigma_1,\sigma_2,T)$ of non-trivial $a$-values which lie inside the strip $\sigma_1<\sigma<\sigma_2$ with arbitrarily chosen $\frac{1}{2}<\sigma_1<\sigma_2<1$ and have imaginary part $\gamma_a \in(0,T]$ is given asymptotically by $$ N_a(\sigma_1,\sigma_2,T) \sim c T, $$ as $T\rightarrow\infty$, with a constant $c>0$ that depends on $\sigma_1$, $\sigma_2$ and $a$. \par {\bf Voronin's universality theorem.} Building on works of Bohr \cite{bohrcourant:1914,bohrjessen:1930,bohrjessen:1932} and his collaborators, Voronin \cite{voronin:1975} discovered a remarkable universality property of the Riemann zeta-function which states, roughly speaking, that every analytic, non-vanishing function on a compact set with connected complement inside the strip $\frac{1}{2}<\sigma<1$ can be approximated by vertical shifts of the Riemann zeta-function. Voronin's universality theorem was generalized by Bagchi \cite{bagchi:1982}, Reich \cite{reich:1980} and others. In its strongest formulation it can be stated as follows. \begin{theorem}[Voronin's universality theorem] \label{th:universality} Let $\mathcal{K}$ be a compact set in the strip $\frac{1}{2}<\sigma <1$ with connected complement. Let $g$ be a continuous, non-vanishing function on $K$, which is analytic in the interior of $K$. Then, for every $\varepsilon>0$, $$ \liminf_{T\to\infty}\frac{1}{T} {\rm{meas\ }} \left\{\tau\in(0,T]\,:\,\max_{s\in{\mathcal{K}}}\vert\zeta(s+i\tau)-g(s)\vert<\varepsilon\right\}>0. $$ \end{theorem} Here and in the following, $ {\rm{meas\ }} X$ denotes the Lebesgue measure of a measurable set $X\subset \mathbb{R}$. Bagchi \cite{bagchi:1982} discovered that the Riemann hypothesis can be rephrased in terms of universality. The RH is true, if and only if, the zeta-function is {\it recurrent}, i.e., if the zeta-function can approximate itself in the sense of Voronin's universality theorem. The RH is true if and only if, for any compact subset $\mathcal{K}$ of $\frac{1}{2}<\sigma<1$ with connected complement and any $\varepsilon>0$, $$ \liminf_{T\to\infty}\frac{1}{T} {\rm{meas\ }} \left\{\tau\in(0,T]\,:\,\max_{s\in { K}}\vert\zeta(s+i\tau)-\zeta(s)\vert<\varepsilon\right\}>0. $$ As a direct consequence, the universality theorem implies the following denseness statement. For every $\frac{1}{2}<\sigma<1$ and $n\in\mathbb{N}_0$, the set $$ V_n(\sigma):=\{(\zeta(\sigma+it),\zeta'(\sigma+it), ... , \zeta^{(n)}(\sigma+it))\ : \ t\in [0,\infty) \} $$ lies dense in $\mathbb{C}^{n+1}$. For $n=0$, this was already known to Bohr et. al \cite{bohrcourant:1914,bohrjessen:1930,bohrjessen:1932}. It follows basically from the Dirichlet representation and the functional equation that, for $\sigma< 0$ or $\sigma> 1$, $$ \overline{V_0(\sigma)} \neq \mathbb{C}. $$ On the assumption of the Riemann hypothesis, Garunk\v{s}tis \& Steuding \cite{garunkstissteuding:2010} proved that, for $\sigma<\frac{1}{2}$, $$ \overline{V_0(\sigma)} \neq \mathbb{C}. $$ However, even by assuming the truth of the Riemann hypothesis, it is not known whether the values of the zeta-function on the critical line lie dense in $\mathbb{C}$ or not. According to \textbf{\textit Ramachandra's denseness conjecture}, we expect that $$ \overline{V_0(\tfrac{1}{2})} = \mathbb{C}. $$ By assuming several moment conjectures arising from random matrix theory models for the Riemann zeta-function, Kowalski \& Nikeghbali \cite{kowalskinikeghbali:2012} obtained that $\overline{V_0(\tfrac{1}{2})} = \mathbb{C}$. Garunk\v{s}tis \& Steuding \cite{garunkstissteuding:2010} showed that a multidimensional denseness statement for the zeta-function on the critical line does not hold. In particular, they proved that $$ \overline{V_1(\tfrac{1}{2})} \neq \mathbb{C}^2. $$ {\bf Mean-square value on vertical lines.} An essential ingredient in the proof of Bohr's denseness result and Voronin's universality theorem is the fact that $$ \lim_{T\rightarrow\infty}\frac{1}{T} \int_{-T}^T \left|\zeta(\sigma+it)\right|^2 \mbox{\ d} t = \sum_{n=1}^{\infty} n^{-2\sigma}<\infty \qquad \mbox{ for }\sigma>\tfrac{1}{2}. $$ On the critical line, the methods of Bohr and Voronin collaps, since $$ \frac{1}{T} \int_{-T}^T \left|\zeta(\tfrac{1}{2}+it)\right|^2 \mbox{\ d} t \sim \log T, \qquad \mbox{as }T\rightarrow\infty, $$ according to Hardy \& Littlewood \cite{hardylittlewood:1936}.\par {\bf Selberg's central limit law.} Due to Selberg (unpublished), the values of the Riemann zeta-function are Gaussian normally distributed, after some suitable normalization: for any measurable set $B\subset \mathbb{C}$ with positive Jordan content, as $T\rightarrow\infty$, $$ \frac{1}{T} {\rm{meas\ }} \left\{t\in (0,T]: \frac{\log\zeta\left(\frac{1}{2}+it\right)}{ \sqrt{\frac{1}{2}\log\log T}}\in {B}\right\} \sim \frac{1}{2\pi}\iint_{B}\exp\left(-{\textstyle{\frac{1}{2}}}(x^2+y^2)\right)\mbox{\ d} x\mbox{\ d} y. $$ For a first published proof, we refer to Joyner \cite{joyner:1986}. Note that $f(x,y):=\exp\left(-{\textstyle{\frac{1}{2}}}(x^2+y^2)\right)$ defines the density function of the bivariate Gaussian normal distribution. {\bf Growth behaviour of the Riemann zeta-function} The Riemann zeta-function is a function of finite order. For $\sigma\in\mathbb{R}$ and any $\varepsilon>0$, $$ \zeta(\sigma+it)\ll t^{\theta_{\zeta}(\sigma)+\varepsilon}, \qquad\mbox{as }|t|\rightarrow\infty, $$ where $\theta_{\zeta}(\sigma)$ is a continuous, convex function in $\sigma$ with $$ \theta_{\zeta}(\sigma)=\begin{cases}0 & \mbox{if }\sigma\geq 1,\\ \tfrac{1}{2}-\sigma & \mbox{if }\sigma\leq 0. \end{cases} $$ According to the \textbf{\textit{Lindel\"of hypothesis (LH)}}, we expect that $\theta_{\zeta}(\frac{1}{2})=0$. This would imply that $$ \theta_{\zeta}(\sigma)=\begin{cases}0 & \mbox{if }\sigma\geq \frac{1}{2},\\ \tfrac{1}{2}-\sigma & \mbox{if }\sigma<\frac{1}{2}. \end{cases} $$ However, the Lindel\"of hypothesis is neither proven nor disproven. The best known upper bound for $\theta_{\zeta}(\frac{1}{2})$ is due to Huxley \cite{huxley:2002, huxley:2005}. He proved that $$ \theta_{\zeta}(\tfrac{1}{2}) \leq \frac{32}{205} = 0.1560...\ . $$ The truth of the Riemann hypothesis implies the truth of the Lindel\"of hypothesis. The Lindel\"of hypothesis can be reformulated in terms of power moments to the right of the critical line. Due to classical works of Hardy \& Littlewood \cite{hardylittlewood:1923}, the Lindel\"of hypothesis is true if and only if, for every $k\in\mathbb{N}$ and every $\sigma>\frac{1}{2}$, \begin{equation}\label{Lind} \lim_{T\rightarrow \infty}\frac{1}{T}\int_1^T \left|\zeta(\sigma+it) \right|^{2k} \mbox{\ d} t =\sum_{n=1}^{\infty}\frac{d_k(n)^2}{n^{2\sigma}}, \end{equation} where $d_k$ denotes the generalized divisor function appearing in the Dirichlet series expansion of $\zeta^{k}$. The latter formula is proved only in the cases $k=1,2$ by works of Hardy \& Littlewood \cite{hardylittlewood:1922} and Ingham \cite{ingham:1926}. \section{The Selberg class and the extended Selberg class}\label{sec:selbergclass} Selberg \cite{selberg:1992} made a promising attempt to describe axiomatically the class of all Dirichlet series for which an analogue of the Riemann hypothesis is expected to be true.\par {\bf Definition of the Selberg class.} A function $\mathcal{L}$ belongs to the Selberg class $\mathcal{S}$ if it satisfies the following properties: \begin{itemize} \item[(S.1)] {\it Dirichlet series representation.} In the half-plane $\sigma>1$, $\mathcal{L}$ is given by an absolutely convergent Dirichlet series $$ \mathcal{L}(s) = \sum_{n=1}^{\infty} \frac{a(n)}{n^s} $$ with coefficients $a(n)\in\mathbb{C}$. \item[(S.2)] {\it Ramanujan hypothesis.} The Dirichlet series coefficients of $\mathcal{L}$ satisfy the growth condition $a(n)\ll n^{\varepsilon}$ for any $\varepsilon>0$, as $n\rightarrow\infty$; here, the implicit constant in the Vinogradov symbol may depend on $\varepsilon$. \item[(S.3)] {\it Euler product representation.} In the half-plane $\sigma>1$, $\mathcal{L}$ has a product representation $$ \mathcal{L}(s) = \prod_{p\in\mathbb{P}} \mathcal{L}_p(s), $$ where the product is taken over all prime numbers and $$ \mathcal{L}_p(s):=\exp \left(\sum_{k=1}^{\infty} \frac{b(p^k)}{p^{ks}} \right) $$ with suitable coefficients $b(p^k)\in\mathbb{C}$ satisfying $b(p^k)\ll p^{k\theta}$ with some $\theta<\frac{1}{2}$. \item[(S.4)] {\it Analytic continuation.} There exists a non-negative integer $k$ such that $(s-1)^k \mathcal{L}(s)$ defines an entire function of finite oder. \item[(S.5)] {\it Riemann-type functional equation.} $\mathcal{L}$ satisfies a functional equation \begin{equation*} \mathcal{L}(s) = \mathbb{D}elta_{\mathcal{L}} (s) \overline{\mathcal{L}(1-\overline{s})}, \end{equation*} where \begin{equation*} \mathbb{D}elta_{\mathcal{L}}(s):= \omega Q^{1-2s}\prod_{j=1}^f \frac{\Gamma\left( \lambda_j (1-s) + \overline{\mu_j}\right)}{\Gamma\left( \lambda_j s + \mu_j \right)}, \end{equation*} with positive real numbers $Q, \lambda_1,...,\lambda_f$ and complex numbers $\mu_1,...,\mu_f, \omega$ with ${\rm{Re} } \ \mu_j \geq 0$ and $|\omega|=1$. \end{itemize} For a concise survey on the Selberg class and a motivation for the choice of the axioms, the reader is referred to Perelli \cite{perelli:2005}.\par An important parameter of a function $\mathcal{L}\in\mathcal{S}$ is its so called degree which is defined by $$d_{\mathcal{L}}:=2 \sum_{j=1}^f \lambda_j$$ via the quantities $\lambda_j$ from the Riemann-type functional equation. The degree of $\mathcal{L}\in\mathcal{S}$ is uniquely determined. The Riemann zeta-function is an element of the Selberg class of degree one. Its $k$-th power ($k\in\mathbb{N}$) lies also in $\mathcal{S}$ and has degree $k$.\par Besides the Riemann zeta-function, the Selberg class contains many other arithmetical relevant $\mathcal{L}$-functions. Prominent examples of functions in $\mathcal{S}$ are Dirichlet $L$-functions attached to primitive characters, Dedekind zeta-functions, Hecke $L$-functions associated to algebraic number fields and, under appropriate normalizations, Hecke $L$-functions associated to certain modular forms.\par The Euler product representation of these examples has a very special form: \begin{itemize} \item[(S.3$^*$)] {\it Polynomial Euler product representation.} There exist an integer $m\in\mathbb{N}$ and $\alpha_1(p),...,\alpha_m(p)\in \mathbb{C}$ such that \begin{equation*} \mathcal{L}(s)=\prod_{p\in\mathbb{P}} \prod_{j=1}^m \left(1-\frac{\alpha_j(p)}{p^s}\right)^{-1} \end{equation*} in the half-plane $\sigma>1$. \end{itemize} In the value-distribution of functions in the Selberg class, there appear similar phenomena as in the case of the Riemann zeta-function. It follows from the Euler product representation that $\mathcal{L}\in\mathcal{S}$ has no zeros in the half-plane $\sigma>1$. The function $\mathcal{L}$ has zeros which are generated by the poles of the $\Gamma$-factors appearing the functional equation. These zeros are called trivial zeros of $\mathcal{L}$ and are located at the points $$ s= -\frac{\mu_j + k}{\lambda_j}, \qquad k\in\mathbb{N}_0, \; j=1,...,f $$ All other zeros of $\mathcal{L}$ are said to be non-trivial zeros. According to the \textbf{\textit{Grand Riemann hypothesis}}, one expects that every function $\mathcal{L}\in\mathcal{S}$ satisfies an analogue of the Riemann hypothesis, i.e. the non-trivial zeros of every function $\mathcal{L}\in\mathcal{S}$ are located on the critical line $\sigma=\frac{1}{2}$. For general functions in $\mathcal{S}$ much less is known than for the special case of the Riemann zeta-function. For example, by now, it is not verified whether every function $\mathcal{L}\in\mathcal{S}$ satisfies the following zero-density estimate: \begin{itemize} \item[(DH)] {\it Selberg's zero-density estimate.} Let $\mathcal{L}\in\mathcal{S}$ and $N_0(\sigma,T)$ denote the number of non-trivial zeros $\rho=\beta+i\gamma$ of $\mathcal{L}$ with real part $\beta>\sigma$ and imaginary part $\gamma\in(0,T]$. Then, there exists a positive number $\alpha$ such that $$ N_{0}(\sigma, T) \ll T^{1-\alpha(\sigma-\frac{1}{2})} \log T $$ uniformly in $\sigma>\frac{1}{2}$, as $T\rightarrow\infty$. \end{itemize} The \textbf{\textit{Grand density hypothesis}} asserts that (DH) is true for every $\mathcal{L}\in\mathcal{S}$. Besides the Riemann zeta-function, (DH) is verified for example for Dirichlet $L$-functions attached to primitive characters; see Selberg \cite{selberg:1946-2}. Certainly, the truth of the Grand Riemann hypothesis implies the truth of the Grand density hypothesis. Moreover, according to the \textbf{\textit{Grand Lindel\"of hypothesis}} we expect that every function $\mathcal{L}\in\mathcal{S}$ satisfies an analogue of the Lindel\"of hypothesis, i.e., for any $\varepsilon>0$, $$ \mathcal{L}\left(\tfrac{1}{2}+it\right) \ll t^{\varepsilon}, \qquad \mbox{as }t\rightarrow\infty. $$ Besides many unsolved analytic questions concerning functions in $\mathcal{S}$, there are also several structural problems related to $\mathcal{S}$ as a class of Dirichlet series. For example, one expects that the Dirichlet series coefficients $a(n)$ of $\mathcal{L}\in\mathcal{S}$ satisfy the following prime mean-square condition; see Steuding \cite[Chapt. 6.6]{steuding:2007}: \begin{itemize} \item[(S.6)] {\it Prime mean-square condition.} For $\mathcal{L}\in\mathcal{S}$, there exist a positive constant $\kappa_{\mathcal{L}}$ such that \begin{equation*} \lim_{x\rightarrow\infty}\frac{1}{\pi(x)} \sum_{p\leq x} |a(p)|^2 = \kappa_{\mathcal{L}}, \end{equation*} here, the summation is taken over all primes $p\leq x$. \end{itemize} Selberg \cite{selberg:1992} conjectured that the Dirichlet series coefficients $a(n)$ of any function $\mathcal{L}\in\mathcal{S}$ satisfy the following property: \begin{itemize} \item[(S.6$^*$)] {\it Selberg's prime coefficient condition.} For $\mathcal{L}\in\mathcal{S}$, there exists a positive integer $n_{\mathcal{L}}$ such that \begin{equation*} \sum_{\begin{subarray}{c} p\in\mathbb{P} \\ p\leq x \end{subarray}} \frac{|a(p)|^2}{p} = n_{\mathcal{L}} \log\log x + O(1), \end{equation*} as $x\rightarrow\infty$. \end{itemize} We know that the Riemann zeta-function and Dirichlet $L$-functions attached to primitive characters satisfy (S.6) and (S.6$^*$); see Mertens \cite{mertens:1874} and Dirichlet \cite{dirichlet:1837}. The conditions (S.6) and (S.6$^*$) are closely related to one another; see Steuding \cite[Chapt. 6.6]{steuding:2007} for details. Selberg conjectured that (S.6$^*$) results from a deeper structure in $\mathcal{S}$: obviously, the Selberg class is multiplicatively closed. We call a function $\mathcal{L}\in\mathcal{S}$ primitive if $$ \mathcal{L} = \mathcal{L}_1 \cdot \mathcal{L}_2 \qquad \mbox{ with }\mathcal{L}_1,\mathcal{L}_2\in\mathcal{S} $$ implies that $\mathcal{L}_1=\mathcal{L}$ or $\mathcal{L}_2 = \mathcal{L}$. Roughly speaking, primitive functions $\mathcal{L}\in\mathcal{S}$ cannot be written as a non-trivial product of other functions in $\mathcal{S}$. According to \textbf{\textit{Selberg's orthogonality conjecture}}, we expect that the following is true. \begin{itemize} \item[(S.6$^{**}$)] {\it Selberg's orthogonality conjecture.} For any primitive functions $\mathcal{L}_1,\mathcal{L}_2\in\mathcal{S}$ with Dirichlet series coefficients $a_{\mathcal{L}_1}(n)$, resp. $a_{\mathcal{L}_2}(n)$, $$ \sum_{\begin{subarray}{c} p\in\mathbb{P} \\ p\leq x \end{subarray}} \frac{a_{\mathcal{L}_1}(p)\overline{a_{\mathcal{L}_2}(p)}}{p} = \begin{cases} \log\log x + O(1) & \mbox{if }\mathcal{L}_1=\mathcal{L}_2,\\ O(1) & \mbox{otherwise.} \end{cases} $$ \end{itemize} Besides the Selberg class, we shall also work with certain subclasses or extensions of the Selberg class: {\bf The extended Selberg class $\mathcal{S}^{\#}$.} A function $\mathcal{L}\not\equiv 0$ belongs to the extended Selberg class $\mathcal{S}^{\#}$ if it satisfies axioms (S.1), (S.4) and (S.5). The functions in $\mathcal{S}^{\#}$ do not ne\-cessari\-ly satisfy the Riemann hypothesis. The Davenport-Heilbronn zeta-function is an element of $\mathcal{S}^{\#}$, but not of $\mathcal{S}$ and has non-trivial zeros off the critical line. However, one expects that the Lindel\"of hypothesis remains still true for every function $\mathcal{L}\in\mathcal{S}^{\#}$. \par {\bf The class $\mathcal{S}^{\#}_R$.} A function $\mathcal{L}\in \mathcal{S}^{\#}$ with $d_{\mathcal{L}}>0$ belongs to the class $\mathcal{S}^{\#}_R$ if it satisfies additionally the Ramanujan hypothesis (S.2). \par {\bf The class $\mathcal{S}^{*}$.} A function $\mathcal{L}\in\mathcal{S}$ belongs to the class $\mathcal{S}^*$ if $\mathcal{L}$ satisfies the zero-density estimate (DH) and Selberg's prime coefficient condition (S.6$^*$). One expects that both conditions hold for every function $\mathcal{L}\in\mathcal{S}$ and, thus, that $\mathcal{S}^*=\mathcal{S}$. We know that the Riemann zeta-function and Dirichlet $L$-functions attached to primitive characters are elements of $\mathcal{S}^{*}$. \section{Statement of the main results and outline of the thesis}\label{sec:outline} This thesis is divided into two parts. In part I we study the value-distribution of the Riemann zeta-function on and near the critical line. In particular, we focus on the collapse of the Voronin-type universality property at the critical line and the clustering of $a$-points around the critical line. We discuss the interplay of these two features and their connection to Ramachandra's denseness conjecture. In our argumentation, we shall use several results from the theory of normal families of meromorphic functions. For the convenience of the reader we summarize the results which we shall need in the appendix.\par The critical line is a natural boundary for the Voronin-type universality property of the Riemann zeta-function; see Section \ref{sec:failure}. In Chapter \ref{ch:conceptsuniv} we modify Voronin's universality concept. Roughly speaking, we add a scaling factor to the vertical shifts that appear in Voronin's universality theorem and regard $$ \zeta_{\tau}(s):=\zeta\left(\tfrac{1}{2}+\lambda(\tau)s +i\tau \right), \qquad s\in\mathbb{D}, $$ with $\tau\in[2,\infty)$ and a positive function $\lambda$ satisfying $\lim_{\tau\rightarrow\infty}\lambda(\tau)=0$. By sending $\tau$ to infinity, this leads to a limiting process for the Riemann zeta-function in a funnel-shaped neighbourhood of the critical line, more precisely in the region \begin{equation*}\label{region} S_{\lambda}:=\left\{ \sigma+it\in\mathbb{C} \, : \, \tfrac{1}{2}-\lambda(t)<\sigma<\tfrac{1}{2}+\lambda(t), \; \; t\geq 2\right\}. \end{equation*} We shall see in Proposition \ref{mainprop} that possible limit functions of this process depend on the choice of $\lambda$ and are strongly affected by the functional equation of the Riemann zeta-function. Our results do not only apply for the Riemann zeta-function but hold for meromorphic functions that satisfy a Riemann-type functional equation in general. For this purpose, we define in Chapter \ref{chapt:classG} the class $\mathcal{G}$, which generalizes the extended Selberg class $\mathcal{S}^{\#}$.\par \begin{figure} \caption{The funnel-shaped region $S_{\lambda} \label{fig:asymstrip} \end{figure} In Chapter \ref{ch:smalllarge} we shall see that Selberg's central limit law implies that, for suitably chosen $\lambda$, the limiting process of Chapter \ref{ch:conceptsuniv} has a strong tendency to converge to $g\equiv 0$ or to $g\equiv \infty$; see Theorem \ref{th:largesmall}. This provides information on the frequency of small and large values of the Riemann zeta-function in certain regions $S_{\lambda}$ and complements recults of Laurin\v{c}ikas \cite[Chapt. 3, Theorem 3.5.1]{laurincikas:1991-2}, Bourgade \cite{bourgade:2010} and others who established certain extensions of Selberg's central limit law; see Section \ref{sec:selbergcentrallimit}. For example, we deduce that the Riemann zeta-function assumes both arbitrarily small and arbitrarily large values on every path to infinity which lies inside $S_{\lambda}$, where the function $\lambda$ satisfies $$ \lambda(t)=\frac{c}{\log t}, \qquad t\geq 2, $$ with an arbitrary constant $c>0$; see Corollary \ref{cor:selbergsmalllarge} and Corollary \ref{cor:curvessmalllarge}. Selberg's central limit law does not only apply to the Riemann zeta-function. Due to Selberg \cite{selberg:1992} it holds (with suitable adaptions) for every function in the class $\mathcal{S}^*$. Thus, most of our results in Chapter \ref{ch:smalllarge} hold for arbitrary functions $\mathcal{L}\in\mathcal{S}^*$. \par Levinson \cite{levinson:1975} showed that the $a$-points of the Riemann zeta-function cluster around the critical line. In Chapter \ref{chapt:apoints} we investigate how to choose $\lambda$ such that almost all or infinitely many $a$-points of the Riemann zeta-function lie in the region $S_{\lambda}$. Levinson \cite{levinson:1975} relied essentially on a lemma of Littlewood which can be considered as an integrated version of the principle of argument. Endowed with a result of Selberg \cite{selberg:1992}, resp. Tsang \cite[\S 8]{tsang:1984}, Levinson's method yields that, for any $a\in\mathbb{C}$, almost all $a$-points of the Riemann zeta-function (in a certain density sense) lie inside the region $S_{\lambda}$, if $\lambda$ is chosen such that $$ \lambda(t)=\frac{\mu(t)\sqrt{\log\log t}}{\log t}, \qquad t\geq 2, $$ with an arbitrary positive function $\mu$ satisfying $\lim_{t\rightarrow\infty}\mu(t)=\infty$; see Theorem \ref{th:levinsonselberg}. Besides Levinson's method, we use certain arguments from the theory of normal families and rely on the notation of filling discs to study the $a$-point distribution of the Riemann zeta-function near the critical line. With these concepts we obtain new insights into the $a$-point distribution, complementing the observations of Levinson. In particular, we show that, for every $a\in\mathbb{C}$, with at most one exception, there are infinitely many $a$-points of the Riemann zeta-function inside the region $S_{\lambda}$, if $\lambda$ is chosen such that $$ \lambda(t)=\frac{\mu(t)}{\log t}, \qquad t\geq 2, $$ with any positive function $\mu$ satisfying $\lim_{t\rightarrow\infty}\mu(t)=\infty$; see Theorem \ref{th:levinsonselberg}. We shall see that, under quite general assumptions, the same is true for functions in $\mathcal{G}$. Beyond this, relying on a result of Ng \cite{ng:2008}, we prove that, under the assumption of the generalized Riemann hypothesis for Dirichlet $L$-functions, for every $a\in\mathbb{C}$, with at most one exception, there are infinitely many $a$-points of the Riemann zeta-function inside the region $S_{\lambda}$, if $\lambda$ satisfies $$ \lambda(t) = \mu(t)\exp \left(-c_0 \frac{\log t}{\log\log t} \right), \qquad t\geq 2, $$ where $\mu$ is any positive function satisfying $\lim_{t\rightarrow\infty}\mu(t)=\infty$ and $c_0$ any positive constant less than $\frac{1}{\sqrt{2}}$.\par The results of Chapter \ref{ch:smalllarge} and \ref{chapt:apoints} help us to approach Ramachandra's denseness conjecture in Chapter \ref{ch:curve}. Obviously, we have $$ 0\in V_0(\tfrac{1}{2})=\left\{\zeta(\tfrac{1}{2}+it) \, : \, t\in[2,\infty)\right\}.$$ Ramachandra's conjecture suggests that zero is in particular an interior point of $\overline{V_0(\tfrac{1}{2})}$. This is, however, neither proven nor disproven. Relying on the results of Chapter \ref{ch:smalllarge}, we show that there is a subinterval $A\subset [0,2\pi)$ of length at least $\frac{\pi}{4}$ such that, for every $\theta\in A$, there is a sequence $(t_n)_n$ of numbers $t_n\in[2,\infty)$ with $$ \zeta(\tfrac{1}{2}+it_n)\neq 0, \qquad \lim_{n\rightarrow\infty} \zeta(\tfrac{1}{2}+it_n) = 0 \qquad \mbox{ and } \qquad \arg \zeta(\tfrac{1}{2}+it_n) \equiv \theta \mod 2\pi; $$ see Theorem \ref{th:zeroasintpoint}. This may be interpreted as a weak counterpart of a result of Kalpokas, Korolev \& Steuding \cite{kalpokaskorolevsteuding:2013} who showed that, for every $\theta\in[0,2\pi)$, there is a sequence $(t_n)_n$ of numbers $t_n\in[2,\infty)$ with $\lim_{n\rightarrow\infty} t_n = \infty$ such that, for $n\in\mathbb{N}$, $$ |\zeta(\tfrac{1}{2}+it_n)|\geq C (\log t_n)^{5/4}\qquad \mbox{ and } \qquad \arg \zeta(\tfrac{1}{2}+it_n) \equiv \theta \mod 2\pi $$ with some positive constant $C$. \par Moreover, we investigate in Chapter \ref{ch:curve} whether there are any curves $$ \gamma:[1,\infty)\rightarrow \mathbb{C}, \qquad t \mapsto \tfrac{1}{2}+\varepsilonilon(t)+it $$ with $\lim_{t\rightarrow\infty}\varepsilonilon(t)=0$ such that the values of the Riemann zeta-function on these curves lie dense in $\mathbb{C}$. If we could establish a denseness result for the Riemann zeta-function on curves $\gamma$ with $\varepsilonilon(t)$ tending to zero fast enough, then the truth of Ramachandra's conjecture would follow; see Theorem \ref{th:curvesmotivation}. In Theorem \ref{th:densenessavalues} and Theorem \ref{th:enumerationbohr} we prove that there exist certain curves $\gamma$ on which the values of the Riemann zeta-function lie dense in $\mathbb{C}$. We rely here both on the $a$-point results of Chapter \ref{chapt:apoints} and on Bohr's method. However, we shall not be able to derive a denseness statement for the zeta-values on the critical line.\par In part II we study the value distribution of the Riemann zeta-function and related functions to the right of the critical line. We aim at a weak version of the Lindel\"of hypothesis. According to Hardy \& Littlewood \cite{hardylittlewood:1923}, the Lindel\"of hypothesis can be reformulated in terms of power moments to the right of the critical line. In particular, the Lindel\"of hypothesis is equivalent to statement \eqref{Lind}. Tanaka \cite{tanaka:2008} showed recently that \eqref{Lind} is true in the following measure-theoretical sense. Let $\pmb{1}_{X}$ denote the indicator function of a set $X\subset\mathbb{R}$ and $X^c := \mathbb{R}\setminus X$ its complement. Tanaka proved that there exists a subset $A\subset[1,\infty)$ of density \begin{equation}\label{densA} \lim_{T\rightarrow \infty} \frac{1}{T}\int_1^{T} \pmb{1}_{A}(t)\mbox{\ d} t = 0, \end{equation} such that, for every $k\in\mathbb{N}$ and every $\sigma>\frac{1}{2}$, \begin{equation}\label{t1} \lim_{T\rightarrow\infty}\frac{1}{T} \int_1^T \left| \zeta(\sigma+it)\right|^{2k} \pmb{1}_{A^c}(t) \mbox{\ d} t = \sum_{n=1}^{\infty} \frac{d_k(n)^2}{n^{2\sigma}}, \end{equation} where $d_k$ denotes the generalized divisor function. Thus, Tanaka showed that \eqref{Lind} holds if one neglects a certain set $A\subset [1,\infty)$ of density zero from the path of integration. Tanaka used some ergodic reasoning and methods from abstract harmonic analysis to establish his results.\par In the main theorem of Part II, Theorem \ref{th:probmom}, we extend Tanaka's result. We rely here essentially on his methods and ideas.\par We provide an integrated and discrete version of \eqref{t1}. The discrete version, for example, implies the following:\par \textit{Let $\alpha\in (\frac{1}{2},1]$ and $l>0$ such that $$l\notin \{2\pi k(\log\tfrac{n}{m})^{-1} \, : \, k,n,m\in\mathbb{N}, n\neq m\}.$$ Then, there is a subset $J\subset \mathbb{N}$ with $$ \lim_{N\rightarrow\infty} \frac{1}{N}\sum_{\begin{subarray}{c}n\in J \\ n\leq N \end{subarray}} 1=0 $$ such that, for every $k\in\mathbb{N}$, uniformly for $\sigma\in[\alpha,2]$ and $\lambda\in [0,l]$, \begin{equation*}\label{rzeta} \lim_{N\rightarrow\infty}\frac{1}{N} \sum_{\begin{subarray}{c}n\in J \\ n\leq N \end{subarray}} \bigl|\zeta(\sigma + i\lambda + inl) \bigr|^{2k} = \sum_{n=1}^{\infty} \frac{d_k(n)^2}{n^{2\sigma}}. \end{equation*}}Moreover, we show that Tanaka's result holds for a large class of functions with Dirichlet series expansion in $\sigma>1$. Our result implies, for instance, the following:\par \textit{Let $\mathcal{L}(s)$ be a Dirichlet series that satisfies the Ramanujan hypothesis. Suppose that $\mathcal{L}(s)$ extends to a meromorphic function of finite order in some half-plane $\sigma>u\geq \frac{1}{2}$ with at most finitely many poles. Suppose that $$ \limsup_{T\rightarrow\infty} \frac{1}{T} \int_{1}^{T} \left|\mathcal{L}(\sigma+it)\right|^2 \mbox{\ d} t < \infty \qquad \mbox{for }\sigma>u. $$ Then, for every $\alpha\in(u,1]$, there is a subset $A\subset[1,\infty)$ satisfying \eqref{densA} such that, for every $k\in\mathbb{N}$, uniformly for $\sigma\in[\alpha,2]$, \begin{equation*}\label{r} \lim_{T\rightarrow\infty} \frac{1}{T}\int_1^T \left| \mathcal{L}(\sigma+it)\right|^{2k} \pmb{1}_{A^c}(t) \mbox{\ d} t = \sum_{n=1}^{\infty} \frac{|a_k(n)|^2}{n^{2\sigma}} \end{equation*} and \begin{equation*}\label{rrr} \lim_{T\rightarrow\infty} \frac{1}{T}\int_1^T \mathcal{L}^k(\sigma+it) \pmb{1}_{A^c}(t) \mbox{\ d} t = a_k(1), \end{equation*} where the $a_k(n)$ denote the coefficients appearing in the Dirichlet series expansion of $\mathcal{L}^k$. If $\mathcal{L}$ can be written additionally as a polynomial Euler product in $\sigma>1$, then we find a subset $A\subset[1,\infty)$ satisfying \eqref{densA} such that, for every $k\in\mathbb{N}$, uniformly for $\sigma\in[\alpha,2]$, \begin{equation*}\label{rr} \lim_{T\rightarrow\infty}\frac{1}{T} \int_1^T \left| \mathcal{L}(\sigma+it)\right|^{-2k} \pmb{1}_{A^c}(t) \mbox{\ d} t = \sum_{n=1}^{\infty} \frac{|a_{-k}(n)|^2}{n^{2\sigma}}, \end{equation*} \begin{equation*}\label{rrrr} \lim_{T\rightarrow\infty} \frac{1}{T}\int_1^T \left| \log \mathcal{L}(\sigma+it)\right|^{2} \pmb{1}_{A^c}(t) \mbox{\ d} t = \sum_{n=1}^{\infty} \frac{|a_{\log\mathcal{L}}(n)|^2}{n^{2\sigma}} \end{equation*} and \begin{equation*}\label{rrrrr} \lim_{T\rightarrow\infty} \frac{1}{T}\int_1^T \left| \frac{\mathcal{L}'(\sigma+it)}{\mathcal{L}(\sigma+it)}\right|^2 \pmb{1}_{A^c}(t) \mbox{\ d} t = \sum_{n=1}^{\infty} \frac{|\mathcal{L}ambda_{\mathcal{L}}(n)|^2}{n^{2\sigma}}, \end{equation*} where the $a_{-k}(n)$, $a_{\log\mathcal{L}}(n)$ and $\mathcal{L}ambda_{\mathcal{L}}(n)$ denote the coefficients of the Dirichlet series expansion of $\mathcal{L}^{-k}$, $\log\mathcal{L}$ and $\mathcal{L}'/\mathcal{L}$, respectively.} By working with a certain normality feature we shall relax the conditions posed on $\mathcal{L}$ above; see Section \ref{sec:classN}. Moreover, we shall see that our results are connected to the Lindel\"of hypothesis in the extended Selberg class and complement existing mean-value results due to Carlson \cite{carlson:1922}, Potter \cite{potter:1940}, Steuding \cite{steuding:2007}, Reich \cite{reich:1980-2}, Good \cite{good:1978}, Selberg \cite{selberg:1992} and others; see Section \ref{sec:probmom}. \part[Value-distribution near the critical line]{Value-distribution near the critical line} \renewcommand{\arabic{chapter}}{\arabic{chapter}} \renewcommand{A.\arabic{section}}{\arabic{chapter}.\arabic{section}} \renewcommand{A.\arabic{equation}}{\arabic{chapter}.\arabic{equation}} \renewcommand{A.\arabic{theorem}}{\arabic{chapter}.\arabic{theorem}} \setcounter{chapter}{0} \chapter{A Riemann-type functional equation}\label{chapt:classG} In this chapter we define the class $\mathcal{G}$ which gathers all meromorphic functions that satisfy a Riemann-type functional equation. The class $\mathcal{G}$ generalizes the extended Selberg class $\mathcal{S}^{\#}$. By investigating the behaviour of functions in $\mathcal{G}$ we are able to detect analytic properties of functions in $\mathcal{S}^{\#}$ which are solely induced by a Riemann-type functional equation and do not depend on the Dirichlet series representation.\par In Section \ref{sec:Delta} we state some basic facts about the function $\mathbb{D}elta_p$ that characterizes a Riemann-type functional equation. In Section \ref{sec:classG} we define the class $\mathcal{G}$ and give a brief overview on its elements. \section{The factor \texorpdfstring{$\mathbb{D}elta_p$}{} of a Riemann-type functional equation}\label{sec:Delta} {\bf Definition and basic properties of $\mathbb{D}elta_p$.} For a given parameter tuple $$ p:=(\omega, Q, \lambda_1,...,\lambda_f, \mu_1,...,\mu_f), \qquad f\in\mathbb{N}_0, $$ consisting of positive real numbers $Q, \lambda_1,..., \lambda_f$ and complex numbers $\omega, \mu_1,...\mu_f$ with $|\omega| = 1$, we set \begin{equation}\label{def:Delta_p} \mathbb{D}elta_{p}(s):= \omega Q^{1-2s}\prod_{j=1}^f \frac{\Gamma\left( \lambda_j (1-s) + \overline{\mu_j}\right)}{\Gamma\left( \lambda_j s + \mu_j \right)}, \end{equation} where $\Gamma$ denotes the Gamma-function. Here, in contrast to the functions $\mathbb{D}elta_p$ used in the definition of the (extended) Selberg class, we do not pose any restriction on the real parts of the $\mu_j$'s.\par If $f=0$, we read \eqref{def:Delta_p} as $\mathbb{D}elta_p(s):=\omega Q^{1-2s}$ and say that $\mathbb{D}elta_p(s)$ has degree $d_p=0$. In this degenerate case, $\mathbb{D}elta_p(s)$ defines an analytic, non-vanishing function in $\mathbb{C}$. Moreover, for every function $\mathbb{D}elta_p$ with $d_p=0$, the corresponding parameter tuple $p=(\omega,Q)$ is uniquely determined.\par If $f\geq 1$, we define the degree of $\mathbb{D}elta_p(s)$ by $$d_p := 2\sum_{j=1}^f \lambda_j.$$ Certainly, in this case, $d_p$ is always a positive real number. As the Gamma-function is non-vanishing and analytic in $\mathbb{C}$, except for simple poles at the non-positive integers, $\mathbb{D}elta_p(s)$ with $d_p>0$ defines a meromorphic function in $\mathbb{C}$ with possible poles located at $$ s= 1 + \frac{n+\overline{\mu_j}}{\lambda_j}, \qquad n\in\mathbb{N}_0, \quad j=1,...,f, $$ and possible zeros located at $$ s=\frac{-n-\mu_j}{\lambda_j}, \qquad n\in\mathbb{N}_0, \quad j=1,...,f. $$ It might happen that zeros and poles arising from different Gamma-quotients cancel each other or lead to multiply zeros or poles. We observe that all poles and zeros of $\mathbb{D}elta_p$ lie in the horizontal strip defined by \begin{equation}\label{stripzerospoles} \min \left\{-\tfrac{|{\rm{Im} } \ \mu_j|}{\lambda_j}\, : \, j=1,...,f \right\}\leq t \leq \max \left\{\tfrac{|{\rm{Im} } \ \mu_j|}{\lambda_j}\, : \, j=1,...,f \right\}. \end{equation} For a given function $\mathbb{D}elta_p$ with $d_p>0$, its representation in the form \eqref{def:Delta_p} and, thus, its assigned parameter tuple $p$ is not unique: by means of the Gauss multiplication formula $$ (2\pi)^{(n-1)/2} n^{1/2 - n s} \Gamma\left(ns \right) = \prod_{k=0}^{n-1} \Gamma\left( s + \frac{k}{n} \right), \qquad n\in\mathbb{N}, $$ and the factorial formula $$ \Gamma(s+1) = s\Gamma(s) $$ for the Gamma-function, we can easily vary the shape of \eqref{def:Delta_p} and, thus, the data of $p$. However, as we shall see below, the degree $d_p$ of $\mathbb{D}elta_p$ remains invariant under these transformations.\par {\bf An asymptotic expansion for $\mathbb{D}elta_p$.} Using Stirling's formula \begin{equation}\label{eq:stirling} \log \Gamma(s) = \left(s-\frac{1}{2}\right) \log s - s + \frac{1}{2}\log 2\pi + O\left(\frac{1}{|s|} \right) \end{equation} which is valid for $s\in\mathbb{C}$ satisfying $|s|\geq 1$ and $|\arg s | \leq \pi - \mbox{\ d}elta$ with any fixed $\mbox{\ d}elta>0$, uniformly in $\sigma$, as $|s|\rightarrow\infty$, we find that \begin{align*} \Gamma(\sigma+it) & = \sqrt{2\pi} |t|^{\sigma-1/2} \exp\left(-\frac{\pi}{2}|t| + i\left( t\log|t| - t + \frac{\pi t}{2|t|}\left(\sigma-\frac{1}{2} \right) \right) \right)\\ & \qquad \times \left(1 + O\left(\frac{1}{|t|}\right) \right) \end{align*} holds uniformly for $\sigma$ from an arbitrary bounded interval, as $|t|\rightarrow\infty$. From this, we derive by a straightforward computation the following asymptotic expansion for $\mathbb{D}elta_p$ and its logarithmic derivative. \begin{lemma}\label{lem:asym_Delta_p} Let $\mathbb{D}elta_p(s)$ be defined by \eqref{def:Delta_p}. Then, uniformly for $\sigma$ from an arbitrary bounded interval, as $|t|\rightarrow\infty$, \begin{equation}\label{asymext_Delta_p} \mathbb{D}elta_p (\sigma+it) = \omega_p \left(\lambda_p Q^2 |t|^{d_p}\right)^{1/2 - \sigma - it} \exp\left( id_p t + i{\rm{Im} } \mu_p \log |t| \right) \left( 1+ O\left( \frac{1}{|t|} \right) \right) \end{equation} and \begin{equation}\label{asymext_logdiff_Delta_p} \frac{\mathbb{D}elta_p'(\sigma+it)}{\mathbb{D}elta_p(\sigma+it)} = - d_p \log |t| - \log Q^2 \lambda_p + O\left(\frac{1}{|t|} \right). \end{equation} Here, $d_p$ denotes the degree of $\mathbb{D}elta_p(s)$. The quantities $\mu_p$, $\lambda_p$ and $\omega_p$ are defined by $$ \mu_p := \sum_{j=1}^f (1-2 \mu_j), \qquad \qquad \lambda_p := \prod_{j=1}^f \lambda_j^{2\lambda_j} $$ and $$ \omega_p := \omega \exp\left( i\frac{\pi}{4}(2{\rm{Re} } \mu_p - d_p) - i {\rm{Im} } \mu_p \right) \prod_{j=1}^f \lambda_j ^{-i2{\rm{Im} } \mu_j}, $$ if $d_p>0$; and by $\mu_p:=0$, $\lambda_p:=1$ and $\omega_p:=\omega$, if $d_p=0$. \end{lemma} We observe that the quantity $\omega_p$ in Lemma \ref{lem:asym_Delta_p} has modulus one.\par {\bf Invariant parameters of $\mathbb{D}elta_p$.} From the asymptotic expansion \eqref{asymext_Delta_p}, we deduce that, for every function $\mathbb{D}elta_p$, the quantities $d_p$, $Q^2\lambda_p$, ${\rm{Im} } \mu_p$ and $\omega_p$ are uniquely determined, although the parameter tuple $p$ is in general not unique. For a deeper understanding of the structure of parameter tuples $p$ leading to the same function $\mathbb{D}elta_p$, we refer to Kaczorowski \& Perelli \cite{kaczorowskiperelli:2000}.\par {\bf The function $\mathbb{D}elta_p$ on the line $\sigma=\frac{1}{2}$.} By the definition of $\mathbb{D}elta_p(s)$, we have $$ \mathbb{D}elta_p(s)\cdot \overline{\mathbb{D}elta_p(1-\overline{s})} = 1 $$ for $s\in\mathbb{C}$. This implies in particular that for real $t$ $$ \left|\mathbb{D}elta_p(\tfrac{1}{2}+it) \right| = 1. $$ {\bf A logarithm and a square root function for $\mathbb{D}elta_p$.} For a given function $\mathbb{D}elta_p$, we define the slitted plane \begin{equation}\label{CDelta} \mathbb{C}_{\mathbb{D}elta_p} := \mathbb{C} \setminus \left( \bigcup_{z_0\in\mathcal{N}_{\mathbb{D}elta_p}} L_{z_0}\right), \end{equation} where $\mathcal{N}_{\mathbb{D}elta_p}$ denotes the union of all zeros and poles of $\mathbb{D}elta_p$ and $L_{z_0}$ the vertical half-line defined by $$ L_{z_0}:=\left\{{\rm{Re} } \ z_0 + it \, : \, -\infty < t\leq {\rm{Im} } \ z_0\right\}. $$ As all zeros and poles of $\mathbb{D}elta_p$ can be located inside the strip \eqref{stripzerospoles}, we observe that $\mathbb{C}_{\mathbb{D}elta_p}$ contains the half-plane $$ t> \max \left\{\tfrac{|{\rm{Im} } \ \mu_j|}{\lambda_j}\, : \, j=1,...,f \right\}. $$ Certainly, $\mathbb{C}_{\mathbb{D}elta_p}$ is a simply connected domain on which $\mathbb{D}elta_p$ is analytic and free of zeros. Thus, there exists a continuous argument function of $\mathbb{D}elta_p$ which we denote by $\arg \mathbb{D}elta_p$ and normalize such that $$ \arg \mathbb{D}elta_p(\tfrac{1}{2})\in[-\pi,\pi), $$ provided that $\frac{1}{2}\in\mathbb{C}_{\mathbb{D}elta_p}$. If this is not the case, we normalize $\arg\mathbb{D}elta_p$ such that $$ \lim_{\sigma \rightarrow \frac{1}{2}+} \arg\mathbb{D}elta_p(\sigma) \in [-\pi,\pi). $$ With these conventions, $$ \log \mathbb{D}elta_p(s):= \log |\mathbb{D}elta_p(s)| + i\arg \mathbb{D}elta_p(s) $$ defines an analytic logarithm and \begin{equation}\label{Delta12} \mathbb{D}elta_p(s)^{1/2}:= |\mathbb{D}elta_p(s)|^{1/2}\exp\left(i\tfrac{1}{2}\arg \mathbb{D}elta_p(s)\right). \end{equation} an analytic square root function of $\mathbb{D}elta_p$ on $\mathbb{C}_{\mathbb{D}elta_p}$. \section{The class \texorpdfstring{$\mathcal{G}$}{G} }\label{sec:classG} In the following, let $\mathcal{D}$ denote the half-strip defined by $$ 0<\sigma<1, \qquad t>0. $$ {\bf Definition of the class $\mathcal{G}$.} A function $G\not\equiv 0$ belongs to the class $\mathcal{G}$ if it is meromorphic in the half-strip $\mathcal{D}$ and if it satisfies a Riemann-type functional equation. By this, we mean that there is a parameter tuple $$ p:=(\omega, Q, \lambda_1,...,\lambda_f, \mu_1,...,\mu_f), \qquad f\in\mathbb{N}_0, $$ which consists of positive real numbers $Q, \lambda_1,..., \lambda_f$ and complex numbers $\omega, \mu_1,...\mu_f$ with $|\omega| = 1$ such that \begin{equation}\label{fcteqG} G(s) = \mathbb{D}elta_{p} (s) \overline{G(1-\overline{s})} \end{equation} for $s\in\mathcal{D}$, where $\mathbb{D}elta_p(s)$ is defined by \eqref{def:Delta_p}. \par {\bf Uniqueness of the functional equation and invariants for $G\in\mathcal{G}$.} In the following, let $G\in\mathcal{G}$ and $p_G$ be an admissible parameter tuple for which $G$ solves the functional equation \eqref{fcteqG}. Suppose that there is a further admissible parameter tuple $p'_G\neq p_G$ for which $G$ satisfies \eqref{fcteqG}. Since $G(s)/\overline{G(1-\overline{s})}$ defines a meromorphic function in the half-strip $\mathcal{D}$, the identity principle yields that $\mathbb{D}elta_{p_G}(s)=\mathbb{D}elta_{p'_G}(s)$ for $s\in\mathbb{C}$. Thus, to every function $G\in\mathcal{G}$, there corresponds a unique functional equation of the form \eqref{fcteqG} with a uniquely determined function $\mathbb{D}elta_{p_G}$, which we denote from now on by $\mathbb{D}elta_G:=\mathbb{D}elta_{p_G}$. For $G\in\mathcal{G}$, the admissible parameter tuple $p_{G}$ leading to the function $\mathbb{D}elta_{G}$ is in general not uniquely determined. However, from the preceeding section we know that the quantities $d_{p_G}$, $Q^2\lambda_{p_G}$, ${\rm{Im} } \ \mu_{p_G}$ and $\omega_{p_G}$, defined in Lemma \ref{lem:asym_Delta_p} via the data of $p_G$, do not depend on the choice of $p_G$. As for every $G\in\mathcal{G}$ the function $\mathbb{D}elta_G$ of the functional equation is uniquely determined, we can understand these characteristic quantities of $\mathbb{D}elta_G$ also as characteristic quantities of $G$. In particular, we refer to $d_{p_G}$ not only as degree of $\mathbb{D}elta_G$ but also as degree of $G$ and write $$ d_G := d_{p_G}= 2\sum_{j=1}^{f} \lambda_j . $$ {\bf The critical line.} Due to the functional equation \eqref{fcteqG}, the elements of $\mathcal{G}$ obey a certain symmetry with respect to the line $\sigma=\frac{1}{2}$. For this reason, we refer to the line $\sigma=\frac{1}{2}$ as {\it critical line} of a function $G\in\mathcal{G}$.\par {\bf Elements in $\mathcal{G}$.} Certainly, the class $\mathcal{G}$ contains all elements of the extended Selberg class $\mathcal{S}^{\#}$ and, thus, all elements of the Selberg class $\mathcal{S}$: $$ \mathcal{S} \subset \mathcal{S}^{\#} \subset \mathcal{G}. $$ The set of parameter tuples $p$ for which we can actually find solutions of the functional equation \eqref{fcteqG} inside $\mathcal{S}$ or $\mathcal{S}^{\#}$ is limited. For $d\geq 0$, we set $$ \mathcal{S}_d^{\#}:=\left\{\mathcal{L}\in\mathcal{S}^{\#} \, : \, d_{\mathcal{L}}=d \right\}\qquad \mbox{ and } \qquad \mathcal{S}_d:=\left\{\mathcal{L}\in\mathcal{S} \, : \, d_{\mathcal{L}}=d \right\}. $$ Obviously, $1\in\mathcal{S}_0$ and $\zeta^n \in \mathcal{S}_n$ for every $n\in\mathbb{N}$. The degree conjecture for the (extended) Selberg class asserts that $$ \bigcup_{d\in\mathbb{R}^+_0 \setminus \mathbb{N}_0 } \mathcal{S}_d^{\#} = \emptyset \qquad \mbox{ and } \qquad \bigcup_{d\in\mathbb{R}^+_0 \setminus \mathbb{N} } \mathcal{S}_d = \{1\}. $$ There are some results in support of this conjecture: Conrey \& Gosh \cite{conreygosh:1993} obtained that all functions $\mathcal{L}\in\mathcal{S}\setminus\{1\}$ have degree $d_{\mathcal{L}}\geq 1$. Essentially, it was already known to Richert \cite{richert:1957} and Bochner \cite{bochner:1958} that there are no functions $\mathcal{L}\in\mathcal{S}$ with degree $0<d_{\mathcal{L}}<1$. Using the machinery of linear and non-linear twists, Kaczorowski \& Perelli \cite{kaczorowskiperelli:2002, kaczorowskiperelli:2005, kaczorowskiperelli:2011} succeeded to prove that there are neither functions $\mathcal{L}\in\mathcal{S}^{\#}$ of degree $0<d_{\mathcal{L}}<1$ nor of degree $1<d_{\mathcal{L}}<2$. Beyond the degree conjecture, one expects that $\mathcal{S}_n^{\#}$, $n\in\mathbb{N}_0$, does not contain `too many' elements. Hamburger's theorem for the Riemann zeta-function (see Titchmarsh \cite[\S 2.13]{titchmarsh:1986}) gives a first impression on how a Riemann-type functional equation invokes strong restrictions on the Dirichlet series coefficients of $\mathcal{L}\in\mathcal{S}^{\#}$. It is a challenging problem to classify all elements in $\mathcal{S}^{\#}$ of given degree $d\in\mathbb{N}_0$. Kaczorowski \& Perelli \cite{kaczorowskiperelli:1999} proved that the Riemann zeta-function and shifts $L(s+i\theta,\chi)$ of Dirichlet $L$-functions attached to a primitive character with $\theta\in\mathbb{R}$ are the only functions in $\mathcal{S}_1$.\par The situation changes if one is looking for solutions of the functional equation $\eqref{fcteqG}$ among generalized Dirichlet series. Let $\mathcal{C}$ denote the set of generalized Dirichlet series $$ A(s)=\sum_{n=1}^{\infty} a(n) e^{-\lambda_n s}, $$ which are absolutely convergent in some half-plane $\sigma\geq \sigma_0$, admit an analytic continuation to $\mathbb{C}$ as an entire function of finite order and satisfy $$ 0=\lambda_0< \lambda_1 < \lambda_2 < ...\, , \qquad \lim_{n\rightarrow\infty}\lambda_n = \infty \qquad \mbox{ and } \qquad a(n)\in\mathbb{C} \quad \mbox{ for }n\in\mathbb{N} . $$ Kaczorowski \& Perelli \cite{kaczorowskiperelli:2004} showed that for every admissible parameter tuple $p$ with ${\rm{Re} } \ \mu_j \geq 0$ for $j=1,...,f$, the real vector space of all solutions $A\in\mathcal{C}$ of the functional equation $$ A(s)=\mathbb{D}elta_p(s) \overline{A(1-\overline{s})} $$ has an uncountable basis. \par We have seen that $\mathcal{G}$ contains both, functions represented by an ordinary Dirichlet series and functions represented by a generalized Dirichlet series. Beyond this, there are also functions in $\mathcal{G}$ which cannot be written as a Dirichlet series. Let $\mathbb{D}elta_p(s)$ be as in \eqref{def:Delta_p} with an admissible parameter tuple $p$ and $f$ a meromorphic function in $\mathcal{D}$, then the function $G_{f,p}$ given by $$ G_{f,p}(s):= f(s)+ \mathbb{D}elta_p(s)\overline{f(1-\overline{s})} $$ is meromorphic in $\mathcal{D}$ and satisfies the functional equation $$G_{f,p}(s)=\mathbb{D}elta_p(s)\overline{G_{f,p}(1-\overline{s})}.$$ Hence, $G_{f,p}$ is an element of $\mathcal{G}$. In general, $G_{f,p}$ does not have a Dirichlet series representation. Gonek modeled the Riemann zeta-function on the critical line by truncated Euler products. More precisely, he worked with $$ \zeta_{X}(s):=P_X(s) + \mathbb{D}elta_{\zeta}(s)\overline{P_X(1-\overline{s})}, $$ where $$ P_X(s):= \exp\left(\sum_{n\leq X} \frac{\mathcal{L}ambda_X(n)}{n^s\log n} \right), \qquad X\geq 2, $$ and $\mathcal{L}ambda_X$ is a suitably weighted version of the Riemann-von Mangoldt function. We deduce immediately that $\zeta_X$ is an element of $\mathcal{G}$. We refer to Gonek \cite{gonek:2012} and Christ, Kalpokas \& Steuding \cite{christkalpokassteuding:2010} for results on the analytic behaviour of $\zeta_X$ on the critical line and its intimate connection to the Riemann zeta-function. \par Other examples of functions in $\mathcal{G}$ can be constructed as follows: let $G\in\mathcal{G}$ and $f$ any meromorphic function $f$ in the strip $0<\sigma<1$. Then, it is easy to see that the function $H_{f,G}$ defined by $$ H_{f,G}(s):= G(s)\left( f(s) + \overline{f(\overline{s})} + f(1-s) + \overline{f(1-\overline{s})} \right), $$ is an element of $\mathcal{G}$. Functions of the latter type were used by Gauthier \& Zeron \cite{gauthierzeron:2004} to construct functions that share several properties with the Riemann zeta-function (same functional equation, simple pole at $s=1$, reflection principle) but have prescribed zeros off the critical line. \par {\bf An analogue of Hardy's $Z$-function.} For $X\subset \mathbb{C}$ and $a,b\in\mathbb{C}$, we set $$ aX+b := \left\{ax+b \, : \, x\in X \right\}. $$ For $G\in\mathcal{G}$, let $\mathbb{C}_{\mathbb{D}elta_G}$ and $\mathbb{D}elta_G(\tfrac{1}{2}+it)^{1/2}$ be defined as in \eqref{CDelta} and \eqref{Delta12}. We set \begin{equation}\label{Dstar} \mathcal{D}^* :=\mathcal{D}^*_{G}:= -i\cdot \left(\mathbb{C}_{\mathbb{D}elta_G}\cap \mathcal{D} \right) +i\tfrac{1}{2}. \end{equation} For $G\in\mathcal{G}$ and $t\in \mathcal{D}^*$, we define $$ Z_G(t) := G(\tfrac{1}{2}+it) \mathbb{D}elta_G(\tfrac{1}{2}+it)^{-1/2}. $$ The function $Z_G(t)$ forms the analogue for $G\in\mathcal{G}$ of Hardy's classical $Z$-function and allows us to model $G$ on the critical line as a real-valued function. \begin{lemma} \label{lem:hardyZ} Let $G\in\mathcal{G}$ and $\mathcal{D}^*$ be defined by \eqref{Dstar}. Then, the function $$ Z_G(t) := G(\tfrac{1}{2}+it) \mathbb{D}elta_G(\tfrac{1}{2}+it)^{-1/2} $$ is meromorphic on $ \mathcal{D}^*$. Moreover, for real $t\in\mathcal{D}^*$, $$ Z_G(t)\in\mathbb{R} \qquad \mbox{ and } \qquad \left| Z_G(t)\right| = \left|G(\tfrac{1}{2}+it) \right|. $$ \end{lemma} \begin{proof} It is immediately clear that $Z_G(t) = G(\tfrac{1}{2}+it) \mathbb{D}elta_G(\tfrac{1}{2}+it)^{-1/2}$ defines a meromorphic function on the domain $\mathcal{D}^*$. In the sequel, we assume that $t\in\mathcal{D}^*$ is real. Then, it follows from the functional equation that $$ Z(t)=G(\tfrac{1}{2}+it) \mathbb{D}elta_G(\tfrac{1}{2}+it)^{-1/2} = \overline{G(\tfrac{1}{2}+it) }\mathbb{D}elta_G(\tfrac{1}{2}+it)^{1/2}. $$ Using the relation $\mathbb{D}elta(s)\overline{\mathbb{D}elta(1-\overline{s})}=1$, we deduce that $$ \mathbb{D}elta_G(\tfrac{1}{2}+it)^{1/2} = \overline{\mathbb{D}elta_G(\tfrac{1}{2}+it)^{-1/2} }. $$ It follows that ${\rm{Im} } \ Z_G(t)=0$ and, consequently, that $Z_G(t)\in\mathbb{R}$. Moreover, since $$|\mathbb{D}elta_G(\tfrac{1}{2}+it)| =1,$$ we obtain that $\left| Z(t)\right| = \left|G(\tfrac{1}{2}+it) \right|$. \end{proof} \begin{figure} \caption{Hardy's classical $Z$-function for the Riemann zeta-function $t\mapsto Z_{\zeta} \end{figure} {\bf Some special representations for $G\in\mathcal{G}$.} By Lemma \ref{lem:hardyZ}, we can write any given function $G\in\mathcal{G}$ in the form \begin{equation}\label{repG1} G(\tfrac{1}{2}+it) = Z_G (t) \mathbb{D}elta(\tfrac{1}{2}+it)^{1/2}, \qquad \mbox{} t\in\mathcal{D}^*. \end{equation} There is a further possibility to represent a given function $G\in\mathcal{G}$. For $G\in\mathcal{G}$, we define $$ \mathcal{D}_{1/2}:=\mathcal{D}_{1/2,G}:= \left(\mathbb{C}_{\mathbb{D}elta_G}\cap \mathcal{D} \right) - \tfrac{1}{2}. $$ Moreover, we set $f_G(z):=Z_G (-iz)$. Then, \begin{equation}\label{repG} G(\tfrac{1}{2}+z) = f_G (z) \mathbb{D}elta(\tfrac{1}{2}+z)^{1/2}\qquad \mbox{ for }z\in\mathcal{D}_{1/2}. \end{equation} Here, the function $f_G$ satisfies a certain reflection principle. Since $Z_G(t)$ is real for real $t\in\mathcal{D}^*$, the relation $$ f_G(z) = Z_G(-iz) = \overline{Z_G(\overline{-iz})} = \overline{Z_G(i\overline{z})} = \overline{f_G(-\overline{z})} $$ holds for all purely imaginary $z\in\mathcal{D}_{1/2}$ and, thus, by the identity principle, for all $z\in\mathcal{D}_{1/2}$. This implies that $f_G$ is real on the intersection of $\mathcal{D}_{1/2}$ with the imaginary axis. \chapter{A modified concept of universality near the critical line}\label{ch:conceptsuniv} In this chapter, we study the collapse of the Voronin-type universality property of the Riemann zeta-function at the critical line and discuss a modified concept of universality.\par In section \ref{sec:failure}, we briefly discuss for which $\mathcal{L}$-functions a Voronin-type universality statement is known to be true. We provide a heuristic explanation that this universality property cannot persist beyond the critical line.\par In section \ref{sec:shiftingshrinking}, we try to maintain universality on the critical line by slightly changing the concept. Roughly speaking, we add a rescaling (or zooming) factor to the shifts that occur in Voronin's universality theorem and establish a limiting process in funnel-shaped neighbourhoods of the critical line. It will turn out that it is essentially the symmetry given by the functional equation that restricts the functions to be obtained by this process. For this reason, we investigate this process not only for the Riemann zeta-function but for all functions of the class $\mathcal{G}$, i.e. all functions that are meromorphic around the line $\sigma=\frac{1}{2}$ and satisfy a Riemann-type functional equation.\par In section \ref{sec:convnonconv}, we discuss convergence and non-convergence issues of this limiting process. \section{Failure of Voronin's universality theorem around the critical line}\label{sec:failure} Building on works of Bohr \cite{bohrcourant:1914,bohrjessen:1930,bohrjessen:1932} and his collaborators, Voronin \cite{voronin:1975} established a remarkable universality theorem for the Riemann zeta-function; see Theorem \ref{th:universality}. In the meantime, similar universality properties were discovered for many other $\mathcal{L}$-functions. Examples include Dirichlet $L$-functions (see Voronin \cite{voronin:1977}, Gonek \cite{gonek:1979} and Bagchi \cite{bagchi:1982}), Dedekind zeta-functions (see Voronin \cite{voronin:1977}, Gonek \cite{gonek:1979} and Reich \cite{reich:1977,reich:1980}) and Hecke $L$-functions to gr\"ossencharacters (see Mishou \cite{mishou:2003}).\par There are even $\mathcal{L}$-functions with a stronger universality property. For them the restriction on the target function $g$ in Voronin's universality theorem to be non-vanishing on $\mathcal{K}$ can be omitted. Here, prominent examples are Hurwitz zeta-functions whose allied parameter is either transcendental or rational but not equal to $\frac{1}{2}$ or $1$; see Bagchi \cite{bagchi:1981} and Gonek \cite{gonek:1979}. For a comprehensive account on different universal $\mathcal{L}$-functions, we refer the reader to Steuding \cite[Sect. 1.4-1.6]{steuding:2007}.\par Steuding \cite[Sect. 5.6]{steuding:2007} established a universality theorem for a large class of Dirichlet series. In particular, his results imply that every function $\mathcal{L}\in\mathcal{S}$ which has a polynomial Euler product (S.3*) and satisfies the prime mean-square condition (S.6) is universal in the sense of Voronin inside the strip $ \sigma_m<\sigma<1, $ where $\sigma_m$ denotes the abscissa of bounded mean-square of $\mathcal{L}$ which we define rigorously in Section \ref{subsec:meansquare}. For $\mathcal{L}\in\mathcal{S}$, we know that $ \sigma_m \leq \max\{\tfrac{1}{2},1-\tfrac{1}{d_{\mathcal{L}}}\}<1$, and under the assumption of the Lindel\"of hypothesis, that $\sigma_m\leq \frac{1}{2}$. \footnote{For details we refer to Section \ref{sec:unboundedness}} The critical line is a natural boundary for universality in the Selberg class. For a heuristic explanation, we restrict to the Riemann zeta-function and assume the truth of the Riemann hypothesis. Firstly, we observe that Voronin's universality theorem for the Riemann zeta-function implies that, for any compact set $\mathcal{K}$ inside the strip $\frac{1}{2}<\sigma<1$ with connected complement and any continuous, non-vanishing function $g$ (resp. $g\equiv\infty$) on $\mathcal{K}$ which is analytic in the interior of $\mathcal{K}$, there is a sequence $(\tau_k)_k$ of positive real numbers tending to infinity such that \begin{equation}\label{eq:limitshifts} \zeta(s+i\tau_k) \rightarrow g(s) \end{equation} uniformly on $\mathcal{K}$, as $k\rightarrow\infty$. This phenomenon collapses around the critical line $\sigma=\frac{1}{2}$. Let $D_r(\frac{1}{2})$ be the open disc with center $\frac{1}{2}$ and radius $0<r<\frac{1}{4}$. Assume that there is a sequence $(\tau_k)_k$ of positive real numbers tending to infinity such that $\zeta(s+i\tau_k)$ converges locally uniformly on $D_r(\frac{1}{2})$ to an analytic function $g\not\equiv 0$ (resp. to $g\equiv \infty$). As $g$ is analytic on $D_r(\frac{1}{2})$, it follows that $g$ has at most finitely many zeros in $D_r(\frac{1}{2})$. According to a result of Littlewood \cite{littlewood:1924}, there is a positive constant $A$ such that, for every sufficiently large $t$, the interval $$\left(t- \frac{A}{\log\log\log t}, t + \frac{A}{\log\log\log t}\right)$$ contains at least one ordinate of a non-trivial zero of the Riemann zeta-function. As we assumed the truth of the Riemann hypothesis, all non-trivial zeros can be located on the critical line. Thus, the number of zeros of $\zeta(s+i\tau_k)$ in $D_{r/2}(\frac{1}{2})\subset D_r(\frac{1}{2})$ tends to infinity. By the theorem of Hurwitz (Theorem \ref{th:hurwitz} in the appendix), we can conclude that $g\equiv 0$ is the only possible limit function that can be obtained in this case.\par Proposition \ref{mainprop} (a) of the next section provides an unconditional proof for the failure of Voronin's universality theorem around the critical line. We shall see that it is essentially the functional equation that is responsible for the collapse of universality. \section{A limiting process in neighbourhoods of the critical line}\label{sec:shiftingshrinking} In this section, we try to `rescue' universality on $\sigma=\frac{1}{2}$ by slightly changing the concept.\par Concepts of universality appear in various areas of analysis. There are real universal functions due to Fekete (see Steuding \cite[Appendix]{steuding:2007}), entire universal functions of Birkhoff-type due to Birkhoff \cite{birkhoff:1929} and entire universal functions of MacLane-type due to MacLane \cite{maclane:1952}. There are universal Taylor series due to Luh \cite{luh:1986} and a concept of universality for Fourier series of continuous functions on $\partial\mathbb{D}$ due to M\"uller \cite{mueller:2010}. Rubel \cite{rubel:1981} discovered a universal differential equation; see Elsner \& Stein \cite{elsnerstein:2011} for an overview on recent developments. There is a concept of differential universality for the Riemann zeta-function for which we refer to Christ, Steuding \& Vlachou \cite{christsteudingvlachou:2013}. The theory of hypercyclic operators investigates universality phenomena in a rather abstract topological and functional analytic setting; see the textbook of Bayart \& Matheron \cite{bayartmatheron:2009}. For a comprehensive survey on different concepts of universality the reader is referred to Grosse-Erdmann \cite{grosseerdmann:1999} and Steuding \cite[Appendix]{steuding:2007}.\par Andersson \cite{andersson:arxiv-1} discussed a universality property for the Riemann zeta-function on the critical line. He restricted the target functions $g$ to compact line segments $L_{\alpha}:=[\frac{1}{2}-i\alpha, \frac{1}{2}+i\alpha]$ with $\alpha>0$, i.e. connected compact sets without interior points, and asked for approximating $g$ by vertical shifts of the zeta-function on $\sigma=\frac{1}{2}$. He found out that among all continuous functions on $L_{\alpha}$ only the function $g\equiv 0$ (resp. $g\equiv\infty$) might possibly be approximated in this way. \par We shall persue the following approach: we add a scaling factor to the vertical shifts in \eqref{eq:limitshifts} and try to figure out which target functions $g$ are to be obtained by this modified limiting process. We investigate the latter not only for the Riemann zeta-function but for general functions of the class $\mathcal{G}$. \par In the sequel, let $\mathcal{M}(\Omega)$ denote the set of meromorphic functions on a domain $\Omega\subset \mathbb{C}$. Moreover, let $\mathcal{H}(\Omega)\subset\mathcal{M}(\Omega)$ denote the set of analytic functions on $\Omega$. For families $\mathcal{F}\subset \mathcal{M}(\Omega)$ there is a notion of normality. We refer to the appendix for basic definitions and fundamental results of this concept.\par Let $\mu:[2,\infty) \rightarrow\mathbb{R}^+ $ be a positive function satisfying $\mu(\tau)\leq \frac{1}{2}\log\tau$ for all $\tau\in[2,\infty)$. Every function $\mu$ induces a corresponding family $\{\varphi_{\mu(\tau)}\}_{\tau\in[2,\infty)}$ of linear conformal mappings \begin{equation}\label{eq:varphidef} \varphi_{\mu(\tau)}:\mathbb{D}\rightarrow \mathbb{C} \qquad \mbox{ by } \qquad s:=\varphi_{\mu(\tau)} (z):=\frac{1}{2} + \frac{\mu(\tau)}{\log\tau} z + i\tau . \end{equation} We observe that $\varphi_{\mu(\tau)}$ maps the center of the unit disc $\mathbb{D}$ to $\frac{1}{2}+i\tau$ and shrinks its radius linearly by a factor $$\lambda(\tau):=\frac{\mu(\tau)}{\log \tau}.$$ We call $\lambda(\tau)$ the scaling (or zooming) factor of $\varphi_{\mu(\tau)}$. The action of the map $\varphi_{\mu(\tau)}$ is illustrated in Figure \ref{fig:phi}. \par \begin{figure} \caption{The action of the conformal maps $\varphi_{\tau} \label{fig:phi} \end{figure} The condition $\mu(\tau)\leq \frac{1}{2}\log\tau$ assures that, for any $\tau\in[2,\infty)$, the image domain $\varphi_{\mu(\tau)}(\mathbb{D})$ lies completely inside the half-strip $\mathcal{D}$. Thus, for any function $G\in\mathcal{G}$ and any $\tau\in[2,\infty)$, \begin{equation}\label{Gtau} G_{\mu(\tau)}(z):=(G\circ \varphi_{\mu(\tau)}) (z) = G( \varphi_{\mu(\tau)} (z) ) = G\left(\frac{1}{2}+\frac{\mu(\tau)}{\log\tau} z + i\tau \right) \end{equation} defines a meromorphic function on $\mathbb{D}$. For sake of simplicity, we usually write $\varphi_{\tau}$ instead of $\varphi_{\mu(\tau)}$ and $G_{\tau}$ instead of $G_{\mu(\tau)}$.\par In the following we regard, for a given function $G\in\mathcal{G}$ and a given conformal mapping $\varphi_{\tau}$, the family $\mathcal{F}:=\{G_{\tau}\}_{\tau\in[2,\infty)}\subset\mathcal{M}(\mathbb{D})$. We try to figure out which functions $g\in\mathcal{M}(\mathbb{D})$ appear as limit functions of convergent sequences in $\mathcal{F}$. The following proposition shows that the set of possible limit functions depend essentially on the speed with which the scaling factor $\lambda(\tau)$ tends to zero as $\tau\rightarrow\infty$ and that the shape of the limiting functions is strongly affected by the functional equation of $G$. \begin{proposition}\label{mainprop} Let $G\in\mathcal{G}$ with degree $d_G>0$ and $\mu:[2,\infty) \rightarrow\mathbb{R}^+ $ be a positive function satisfying $\mu(\tau)\leq \frac{1}{2}\log\tau$ for $\tau\in[2,\infty)$. Let $\{G_{\tau}\}_{\tau\in[2,\infty)}$ be the family of functions on $\mathbb{D}$, generated by $G$ and $\mu$ via \eqref{Gtau}.\\ Assume that there is a sequence $(\tau_k)_k$ of real numbers $\tau_k\in[2,\infty)$ with $\lim_{k\rightarrow \infty} \tau_k = \infty$ such that $(G_{\tau_k})_k $ converges locally uniformly on $\mathbb{D}$ to a limit function $g$. \begin{itemize} \item[(a)] If $\lim_{k\rightarrow\infty}\mu(\tau_k) = \infty$, then $ g\equiv 0$ or $ g\equiv \infty. $ \item[(b)] If $\lim_{k\rightarrow\infty}\mu(\tau_k) = c$ with some $c\in(0,\infty)$, then $g\equiv\infty$ or $g$ is of the form \begin{equation}\label{shape1} g(z)=f_g(z)\exp\left(-\tfrac{cd_G}{2}z + i \ell\right) \end{equation} with some $\ell \in [0,\pi)$ and some meromorphic function $f_g$ on $\mathbb{D}$ satisfying $$f_g(z)=\overline{f_g(-\overline{z})} \qquad \mbox{ for } z\in\mathbb{D}.$$ \item[(c)] If $\lim_{k\rightarrow\infty}\mu(\tau_k) = 0$, then $g\equiv \infty$ or $g$ is of the form \begin{equation}\label{shape2} g(z)=f_g(z)\exp\left( i \ell\right) \end{equation} with some $\ell \in [0,\pi)$ and some meromorphic function $f_g$ on $\mathbb{D}$ satisfying $$f_g(z)=\overline{f_g(-\overline{z})}\qquad \mbox{ for } z\in\mathbb{D}.$$ \end{itemize} \end{proposition} The condition $f_g(z)=\overline{f_g(-\overline{z})}$ implies that $f_g$ is real on the intersection of the unit disc with the imaginary axis. As we shall see from the proof of Proposition \ref{mainprop}, the shapes \eqref{shape1} and \eqref{shape2} of the limit functions $g$ actually result from the representation of $G\in\mathcal{G}$ as $$ G(\tfrac{1}{2}+z) = f_G (z) \mathbb{D}elta(\tfrac{1}{2}+z)^{1/2}\qquad \mbox{ for every }z\in\mathcal{D}_{1/2} $$ with a certain function $f_G$ satisfying $f_G(z)=\overline{f_G(-\overline{z})}$; see \eqref{repG}.\par Proposition \ref{mainprop} is just hypothetical: we assume the convergence of the limiting process. In general, however, it seems very difficult to verify that a given sequence $(G_{\tau_k})_k$ converges locally uniformly on $\mathbb{D}$ or not. We will postpone convergence, resp. non-convergence issues to Section \ref{sec:convnonconv}. \par If we restrict in Proposition \ref{mainprop} to the Riemann zeta-function and assume the truth of the Riemann hypothesis, we get additional constraints on the shape of the possible limit functions: \begin{itemize} \item[(i)] According to Littlewood \cite{littlewood:1924}, there is a constant $A>0$ such that, for every sufficiently large $t$, the interval $$\left(t-\frac{A}{\log\log\log t}, t + \frac{A}{\log\log\log t}\right)$$ contains at least one imaginary part of a non-trivial zero of the Riemann zeta-function. Under the assumption of the Riemann hypothesis, all non-trivial zeros lie on the critical line. Thus, if $$\lambda(\tau)=\frac{\mu(\tau)}{\log\tau}>\frac{2A}{\log\log \log \tau}$$ for sufficiently large $\tau$, we can exclude the case $g\equiv\infty$ in Proposition \ref{mainprop} (a). \item[(ii)] The truth of the Riemann hypothesis implies that the logarithmic derivative of Hardy's $Z$-function $Z_{\zeta}(t)$ is monotonically decreasing between two sufficiently large consecutive zeros of $Z_{\zeta}(t)$; see Ivi\'{c} \cite[Chapt. 2.3]{ivic:2013}. By \eqref{repG1} and \eqref{repG}, this has effects on the shape of the limit functions $g$ in Proposition \ref{mainprop}.\par \end{itemize} With suitable adaptions it is also possible to state Proposition \eqref{mainprop} for functions $G\in\mathcal{G}$ with degree $d_G =0$. \begin{proposition}\label{mainpropzero} Let $G\in\mathcal{G}$ with degree $d_G=0$ and $\mu:[2,\infty) \rightarrow\mathbb{R}^+ $ be a positive function satisfying $\mu(\tau)\leq \frac{1}{2}\log\tau$ for $\tau\in[2,\infty)$. Let $\{G_{\tau}\}_{\tau\in[2,\infty)}$ be the family of functions on $\mathbb{D}$, generated by $G$ and $\mu$ via \eqref{Gtau}.\\ Assume that there is a sequence $(\tau_k)_k$ of real numbers $\tau_k\in[2,\infty)$ with $\lim_{k\rightarrow \infty} \tau_k = \infty$ such that $(G_{\tau_k})_k $ converges locally uniformly on $\mathbb{D}$ to a limit function $g$. \begin{itemize} \item[(a)] If $\lim_{k\rightarrow\infty}\frac{\mu(\tau_k)}{\log\tau_k} =c$ with some $c\in(0,\frac{1}{2}]$, then $g\equiv\infty$ or $g$ is of the form \begin{equation*} g(z)=f_g(z)\exp\left(-\tfrac{c \log Q^2}{2}z + i \ell\right) \end{equation*} with some $\ell \in [0,\pi)$ and some meromorphic function $f_g$ on $\mathbb{D}$ satisfying $$f_g(z)=\overline{f_g(-\overline{z})} \qquad \mbox{ for } z\in\mathbb{D}.$$ \item[(c)] If $\lim_{k\rightarrow\infty}\frac{\mu(\tau_k)}{\log\tau_k} = 0$, then $g\equiv \infty$ or $g$ is of the form \begin{equation*} g(z)=f_g(z)\exp\left( i \ell\right) \end{equation*} with some $\ell \in [0,\pi)$ and some meromorphic function $f$ on $\mathbb{D}$ satisfying $$f_g(z)=\overline{f_g(-\overline{z})}\qquad \mbox{ for } z\in\mathbb{D}.$$ \end{itemize} \end{proposition} As Proposition \ref{mainpropzero} can be proved by essentially the same method as Proposition \ref{mainpropzero} and as we are not too much interested in functions of degree zero in the further course of our investigations, we omit a proof.\par According to Kaczorowski \& Perelli \cite{kaczorowskiperelli:1999}, every function $\mathcal{L}\in\mathcal{S}^{\#}$ of degree $d_{\mathcal{L}}=0$ is given by a certain Dirichlet polynomial. This implies that $\mathcal{L}\in\mathcal{S}^{\#}$ with $d_{\mathcal{L}}=0$ is bounded in the half-strip $\mathcal{D}$. Thus, if we restrict ourselves in Proposition \ref{mainpropzero} to functions $\mathcal{L}\in\mathcal{S}^{\#}$ with $d_{\mathcal{L}}=0$, Montel's theorem assures that the family $\{\mathcal{L}_{\tau}\}_{\tau\in[2,\infty)}$ is normal for any admissible function $\mu$. {\bf Proof of Proposition \ref{mainprop}:} Before proving Proposition \ref{mainprop}, we start with some lemmas for the function $\mathbb{D}elta_p$. Recall that $\mathbb{D}elta_p$ depends on the parameter tuple $p=(\omega, Q,\lambda_1,...,\lambda_f,\mu_1,...,\mu_f)$ for which we defined the quantities $d_p$, $\omega_p$, $\mu_p$ and $\lambda_p$; see Lemma \ref{lem:asym_Delta_p}.\par The following lemma provides an asymptotic expansion for $$ \mathbb{D}elta_{p,\tau}(z):=\mathbb{D}elta_p(\varphi_{\tau}(z)) = \mathbb{D}elta_p\left(\frac{1}{2}+\frac{\mu(\tau)}{\log\tau}z + i\tau\right) $$ on $\mathbb{D}$, as $\tau\rightarrow\infty$. \begin{lemma}\label{lem:Delta_p_phi} Let $\mathbb{D}elta_p$ be defined by \eqref{def:Delta_p} and suppose that $d_p>0$. Let $\mu:[2,\infty)\rightarrow \mathbb{R}^+$ be a positive function with $\mu(\tau)\leq \frac{1}{2}\log\tau$ for $\tau\in[2,\infty)$. Let $\{\varphi_{\tau}\}_{\tau\in[2,\infty)}$ be the family of conformal mappings generated by $\mu$ via \eqref{eq:varphidef}. Then, uniformly for $z\in\mathbb{D}$, as $\tau\rightarrow\infty$, \begin{align*} \mathbb{D}elta_{p,\tau}(z):=\mathbb{D}elta_p(\varphi_{\tau}(z)) &= \omega_p \exp \left(-d_p\mu(\tau) z -i\nu_p(\tau) \right) \left(1+O\left(\frac{\mu(\tau)}{\log\tau} \right) +O\left(\frac{1}{\tau} \right) \right) \end{align*} with $$ \nu_p(\tau):= d_p \tau\log\tau + \tau\log(\lambda_p Q^2) -d_p \tau - {\rm{Im} } \ \mu_p \log\tau. $$ \end{lemma} \begin{proof} Respecting the conditions posed on the function $\mu$, the assertion follows directly from the asymptotic expansion for $\mathbb{D}elta_p(s)$ in Lemma \ref{lem:asym_Delta_p} and a short computation. \end{proof} By means of the asymptotic expansion for $\mathbb{D}elta_{p,\tau}$ on $\mathbb{D}$, as $\tau\rightarrow\infty$, we are now able to describe the limit behaviour of sequences in $\{ \mathbb{D}elta_{p,\tau}\}_{\tau\in[2,\infty)}$. \begin{lemma}\label{mainlemma} Let $\mathbb{D}elta_p$ be defined by \eqref{def:Delta_p} and suppose that $d_p>0$. Let $\mu:[2,\infty)\rightarrow \mathbb{R}^+$ be a positive, (not necessarily strictly) monotonically decreasing or increasing function with $\mu(\tau)\leq \frac{1}{2}\log\tau$ for $\tau\in[2,\infty)$. Let $\{\varphi_{\tau}\}_{\tau\in[2,\infty)}$ be the family of conformal mappings generated by $\mu$ via \eqref{eq:varphidef} and set $\mathbb{D}elta_{p,\tau}(z):=\mathbb{D}elta_p(\varphi_{\tau}(z))$ for $z\in\mathbb{D}$ and $\tau\geq 2$. \begin{itemize} \item[(a)] If $\lim_{\tau\rightarrow\infty}\mu(\tau) = \infty$, then there is no sequence $(\tau_k)_k$ of real numbers $\tau_k\in[2,\infty)$ with $\lim_{k\rightarrow\infty}\tau_k = \infty$ such that $(\mathbb{D}elta_{p,\tau_k})_k$ converges locally uniformly in some neighbourhood of zero. \item[(b)] If $\lim_{\tau\rightarrow\infty}\mu(\tau) = c$ with some $c\in(0,\infty)$, then, for every unbounded subset $A\subset[2,\infty)$, there exists a sequence $(\tau_k)_k$ of real numbers $\tau_k\in A$ with $\lim_{k\rightarrow\infty}\tau_k = \infty$ such that $(\mathbb{D}elta_{p,\tau_{k}})_k$ converges uniformly on $\mathbb{D}$ to a limit function $g$ given by $$ g(z)=a\exp(-cd_p z) \qquad \mbox{ for }z\in\mathbb{D} $$ with some $a\in\mathbb{C}$ satisfying $|a|=1$.\\ Conversely, if $\lim_{\tau\rightarrow\infty}\mu(\tau) = c$ with some $c\in(0,\infty)$, then, for every function $g$ of the form above, there exists a sequence $(\tau_k)_k$ of real numbers $\tau_k\in[2,\infty)$ with $\lim_{k\rightarrow\infty}\tau_k = \infty$ such that $(\mathbb{D}elta_{p,\tau_k})_k$ converges uniformly on $\mathbb{D}$ to $g$. \item[(c)] If $\lim_{\tau\rightarrow\infty}\mu(\tau) = 0$, then, for every unbounded subset $A\subset[2,\infty)$, there exists a sequence $(\tau_k)_k$ of real numbers $\tau_k\in A$ with $\lim_{k\rightarrow\infty}\tau_k = \infty$ such that $(\mathbb{D}elta_{p,\tau_{k}})_k$ converges uniformly on $\mathbb{D}$ to a constant limit function $$ g\equiv a $$ with some $a\in\mathbb{C}$ satisfying $|a|=1$.\\ Conversely, if $\lim_{\tau\rightarrow\infty}\mu(\tau) = 0$, then, for every constant function $g\equiv a$ with $|a|=1$, there exists a sequence $(\tau_k)_k$ of real numbers $\tau_k\in[2,\infty)$ with $\lim_{k\rightarrow\infty}\tau_k = \infty$ such that $(\mathbb{D}elta_{p,\tau_k})_k$ converges uniformly on $\mathbb{D}$ to $g$. \end{itemize} \end{lemma} Lemma \ref{mainlemma} (a) follows immediately from the asymptotic expansion for $\mathbb{D}elta_{p,\tau}$ on $\mathbb{D}$ and implies that, for any unbounded subset $A\subset[2,\infty)$, the family $\{\mathbb{D}elta_{p,\tau}\}_{\tau\in A}$ is not normal in any neighbourhood $U\subset \mathbb{D}$ of zero. Thus, by the rescaling lemma of Zalcman (Theorem \ref{th:zalcman}), we find a sequence $(\tau_k)_k$ of real numbers $\tau_k\in A$ with $\lim_{k\rightarrow\infty}\tau_k = \infty$, a sequence $(z_k)_k$ of complex numbers $z_k\in\mathbb{D}$ with $\lim_{k\rightarrow\infty}z_k=0$ and a sequence $(\rho_k)_k$ of positive real numbers $\rho_k$ with $\lim_{k\rightarrow\infty} \rho_k = 0 $ such that $$ h_k(z):= \mathbb{D}elta_{p,\tau_{k}}(z_k+\rho_k z) = \mathbb{D}elta\left(\tfrac{1}{2} + \frac{\mu(\tau_{k})}{\log\tau_k}(z_k+\rho_k z) + i\tau_{k}\right) $$ converges locally uniformly on $\mathbb{C}$ to a non-constant entire function. Having this in mind, the statement of Lemma (b) \ref{mainlemma} might not be too surprising. As the functions $\mathbb{D}elta_p$ are rather smooth, it makes sense that the limit functions of $(\mathbb{D}elta_{p,\tau_k})_k$ are constant, if the underlying scaling factor $\lambda(\tau_k)$ tends to zero fast enough. \begin{proof}[Proof of Lemma \ref{mainlemma}] As all poles and zeros of $\mathbb{D}elta_p$ are located in some horizontal strip, it follows that $\mathbb{D}elta_{p,\tau}$ is analytic and non-vanishing on $\mathbb{D}$ for sufficiently large $\tau$.\par {\bf Case (a): } Let $U\subset \mathbb{D}$ be an arbitrary neighbourhood of zero. Taking into account that $\lim_{\tau\rightarrow\infty} \mu(\tau) = \infty$, the asymptotic expansion of Lemma \ref{lem:Delta_p_phi} yields that, for $z=x+iy\in U$ with real part $x\neq 0$, \begin{align}\label{p1} \lim_{\tau\rightarrow\infty} \mathbb{D}elta_{p,\tau} (z) = \begin{cases} 0 & \mbox{if } x>0,\\ \infty & \mbox{if } x<0.\end{cases} \end{align} If there is a sequence $(\tau_k)_k$ of real numbers $\tau_k\in[2,\infty)$ with $\lim_{k\rightarrow\infty}\tau_k = \infty$ such that $(\mathbb{D}elta_{p,\tau_{k}})_k$ converges locally uniformly in $U$, then, according to the theorem of Weierstrass (Theorem \ref{th:weierstrass}), either $$ g(z):=\lim_{k\rightarrow\infty} \mathbb{D}elta_{p,\tau_k} (z) , \qquad z\in U, $$ defines an analytic function in $U$ or $g\equiv \infty$ in $U$. Both cases, however, are in contradiction to \eqref{p1}.\par {\bf Case (b): } According to our assumption on $\mu$ and the asymptotic expansion for $\mathbb{D}elta_{p,\tau}(z)$ of Lemma \ref{lem:Delta_p_phi}, we have, uniformly for $z=x+iy\in \mathbb{D}$, \begin{align}\label{p2} \lim_{\tau\rightarrow\infty} \left|\mathbb{D}elta_{p,\tau} (z) \right| =|\omega_p|\exp(-cd_px) \leq \exp(cd_p). \end{align} This implies that the family $\mathcal{F}:=\{\mathbb{D}elta_{p,\tau}\}_{\tau\in[2,\infty)}$ is uniformly bounded on $\mathbb{D}$. According to the theorem of Montel, every sequence in $\mathcal{F}$ has a subsequence that converges locally uniformly on $\mathbb{D}$ to an analytic function. Thus, we can extract from any given unbounded set $A\subset[2,\infty)$ a sequence $(\tau_k)_k$ with $\lim_{k\rightarrow\infty}\tau_k = \infty$ such that $(\mathbb{D}elta_{p,\tau_{k}})_k$ converges locally uniformly on $\mathbb{D}$ to an analytic function $g$. Next we shall figure out the shape of $g$: By means of \eqref{p2}, we have, uniformly for $z=x+iy\in\mathbb{D}$, $$ |g(z)| = \lim_{k\rightarrow\infty} \left|\mathbb{D}elta_{p,\tau_{k}} (z) \right|= |\omega_p| \exp(-cd_px) = \exp(-cd_px). $$ Obviously, $g$ is non-vanishing on $\mathbb{D}$. This allows us to write $$ g(z)=\eta_p \exp(-cd_px + i f(x,y)) \qquad \mbox{ for }z=x+iy\in\mathbb{D} $$ with some continuously differentiable function $f:\Omega \rightarrow\mathbb{R}$, where $$\Omega=\{(x,y)\in\mathbb{R}^2 \ : \ x+iy\in\mathbb{D}\}.$$ The Cauchy-Riemann differential equations, \begin{align*} \tfrac{\partial}{\partial x}\ {\rm{Re} } \ g &= \tfrac{\partial}{\partial y}\ {\rm{Im} } \ g,\\ \tfrac{\partial}{\partial y}\ {\rm{Re} } \ g &= - \tfrac{\partial}{\partial x}\ {\rm{Im} } \ g, \end{align*} yield that, for $(x,y)\in\Omega$, \begin{align*} \tfrac{\partial}{\partial x}f+\tfrac{\partial}{\partial y}f &=- cd_p,\\ \tfrac{\partial}{\partial x}f-\tfrac{\partial}{\partial y}f &= cd_p. \end{align*} Hence, $f(x,y)= - cy + \ell $ with some constant $\ell\in\mathbb{R}$. By setting $a:=\omega_p e^{i\ell}$, we get $$ g(z)=a\exp(-cd_p z) \qquad \mbox{ for }z\in\mathbb{D}. $$ By rescaling $\varphi_{\tau}$ in a suitable manner, we deduce from Lemma \ref{lem:Delta_p_phi}, that $(\mathbb{D}elta_{p,\tau_k})_k$ converges not only locally uniformly, but even uniformly on $\mathbb{D}$.\par Conversely, let $g:\mathbb{D}\rightarrow\mathbb{C}$ be a function of the form $g(z)=a\exp(-cd_p z)$ with arbitrary $a\in\mathbb{C}$ satisfying $|a|=|1|$. We choose $\ell\in[0,2\pi)$ such that $\omega_p e^{i\ell} = a$ and regard the function $$ \nu_p(\tau):= d_p \tau\log\tau + \tau\left(\log(\lambda_p Q^2)-d_p \right) - {\rm{Im} } \ \mu_p \log \tau $$ which is monotonically increasing for sufficiently large $\tau$. We define $(\tau_k)_k$ to be the sequence of all solutions $\tau\geq 2$ of $$ -\nu_p(\tau) \equiv \ell\mod 2\pi $$ in ascending order. As the sequence $(\tau_k)_k$ tends to infinity, the asymptotic expansion for $\mathbb{D}elta_{p,\tau}$ in Lemma \ref{lem:Delta_p_phi} yields that, uniformly for $z\in\mathbb{D}$, \begin{align*} \lim_{k\rightarrow\infty}\mathbb{D}elta_{p,\tau_k}(z) & = \omega_p\exp\left(-cd_p z + i\ell \right)= g(z). \end{align*} The assertion is proved.\par {\bf Case (c):} According to our assumption on $\mu$ and the asymptotic expansion for $\mathbb{D}elta_{p,\tau}$ of Lemma \ref{lem:Delta_p_phi}, we have, uniformly for $z=x+iy\in \mathbb{D}$, \begin{align}\label{p3} \lim_{\tau\rightarrow\infty} \left|\mathbb{D}elta_{p,\tau} (z) \right| =|\omega_p| =1. \end{align} This implies that the family $\{\mathbb{D}elta_{p,\tau}\}_{\tau\in[2,\infty)}$ is uniformly bounded on $\mathbb{D}$. Thus, Montel's theorem assures that, from every unbounded subset $A\subset[2,\infty)$, we can extract a sequence $(\tau_k)_k$ of real numbers $\tau_k\in A$ with $\lim_{k\rightarrow\infty}\tau_k = \infty$ such that $(\mathbb{D}elta_{p,\tau_k})_k$ converges locally uniformly on $\mathbb{D}$ to an analytic function $g$. By means of \eqref{p3}, we have, uniformly for $z\in\mathbb{D}$, $$ |g(z)| = \lim_{k\rightarrow\infty} \left|\mathbb{D}elta_{p,\tau_{k}} (z) ) \right|= 1 $$ Thus, it follows from the open mapping theorem that $g\equiv a$ with some $a\in\mathbb{C}$ satisfying $|a|=1$.\par Conversely, for a given function $g\equiv a$ with $a\in\mathbb{C}$ satisfying $|a|=1$, we can construct a sequence $(\tau_k)_k$ such that $(\mathbb{D}elta_{p,\tau_k})_k$ converges uniformly on $\mathbb{D}$ to $g$ in the same manner as in case (b). \end{proof} \begin{proof}[Proof of Proposition \ref{mainprop}] For $G\in\mathcal{G}$, let $\mathbb{D}elta_G$ denote the factor of the functional equation for $G$. In the following we set $\mathbb{D}elta_{G,\tau}(z):= \mathbb{D}elta_G (\varphi_{\tau}(z))$.\par Note that the locally uniform convergence of a sequence $(G_{\tau_k})_{k}$ on $\mathbb{D}$ implies that its limit function $g$ is either meromorphic on $\mathbb{D}$ or $g\equiv \infty$ on $\mathbb{D}$. {\bf Case (a):} Suppose that $(G_{\tau_k})_{k}$ converges locally uniformly on $\mathbb{D}$ to a meromorphic limit function $g\not\equiv 0$. Then, we find a compact set $\mathcal{K}\subset \mathbb{D}$ with non-empty interior which lies completely to the right of the imaginary axis, i.e. ${\rm{Re} } \ z >0$ for $z\in\mathcal{K}$, and a constant $m>0$ such that $|g(z)|\geq m$ for $z\in\mathcal{K}$. In particular, we have $$ \left|\lim_{k\rightarrow\infty} G(\varphi_{\tau_k}(z))\right| = \left|g(z)\right| \geq m \qquad \mbox{for }z\in\mathcal{K}. $$ If $z\in\mathbb{D}$ with ${\rm{Re} } \ z >0$, then the point $-\overline{z}$ lies also in $\mathbb{D}$ and satisfies ${\rm{Re} } (-\overline{z}) < 0$. Respecting the growth condition posed on $\mu$, the asymptotic expansion of Lemma \ref{lem:asym_Delta_p} yields that $$ \lim_{k\rightarrow\infty}\mathbb{D}elta_{G,\tau_k} (-\overline{z}) = \infty\qquad \mbox{for }z\in\mathcal{K}. $$ According to the functional equation of $G$, we get \begin{align*} g(-\overline{z}) & = \lim_{k\rightarrow\infty} G\left(\varphi_{\tau_k}(-\overline{z})\right) = \lim_{k\rightarrow\infty}\mathbb{D}elta_G (\varphi_{\tau_k}(-\overline{z})) \overline{G\left( 1- \overline{\varphi_{\tau_k}(-\overline{z})}\right)} \\ & = \lim_{k\rightarrow\infty}\mathbb{D}elta_p (\varphi_{\tau_k}(-\overline{z})) \overline{G\left( \varphi_{\tau_k}(z)\right)}\\ & = \infty \end{align*} for $z\in\mathcal{K}$. Thus, it follows from the identity principle, applied to the meromorphic function defined by $\overline{g(-\overline{z})}$ on $\mathbb{D}$, that $g\equiv \infty$ in $\mathbb{D}$. This contradicts our assumption. {\bf Case (b):} Suppose that $(G_{\tau_k})_k$ converges locally uniformly on $\mathbb{D}$ to a meromorphic limit function $g$. Certainly, we have for an arbitrary $z \in\mathbb{D}$, \begin{align}\label{eq:lim1} \lim_{k\rightarrow\infty} G(\varphi_{\tau_k}(z)) = g(z). \end{align} Moreover, according to the functional equation, we get that \begin{align*} G(\varphi_{\tau_k }(z)) & = \mathbb{D}elta_p(\varphi_{\tau_k}(z)) \overline{G(1-\overline{\varphi_{\tau_k}(z)})} = \mathbb{D}elta_p(\varphi_{\tau_k}(z)) \overline{G(\varphi_{\tau_k}(-\overline{z}))}. \end{align*} If $z\in\mathbb{D}$, then the point $-\overline{z}$ is also in $\mathbb{D}$. Thus, we obtain that $$ \lim_{k\rightarrow\infty} \overline{ G (\varphi_{\tau_k}(-\overline{z})) } = \overline{ g(-\overline{z})}. $$ Since $(G(\varphi_{\tau_k}(z)))_k$ converges locally uniformly on $\mathbb{D}$, this is also true for $(\overline{G(\varphi_{\tau_k}(-\overline{z}))})_k$. Consequently, $\overline{g(-\overline{z})}$ defines an analytic function on $\mathbb{D}$. According to case (b) of Lemma \ref{mainlemma} we find a subsequence $(\tau_{k_j})_j$ of $(\tau_k)_k$ such that, uniformly for $z\in\mathbb{D}$, $$ \lim_{j\rightarrow\infty} \mathbb{D}elta(\varphi_{\tau_{k_j}}(z)) = \exp(-cd_p z + i\ell) $$ with some $\ell\in[0,2\pi)$. Thus, \begin{equation}\label{eq:lim2} \lim_{j\rightarrow\infty} G(\varphi_{\tau_{k_j}}(z)) = \overline{g(-\overline{z})} \exp(-cd_p z + i\ell) \end{equation} holds locally uniformly for $z\in\mathbb{D}$. By the uniqueness of the limit function, we deduce from \eqref{eq:lim1} and \eqref{eq:lim2} that $g$ satisfies the functional equation $$ g(z)=\overline{g(-\overline{z})} \exp(-cd_p z + i\ell), \qquad \mbox{ }z\in\mathbb{D}. $$ By setting $$ f_g(z):= g(z)\exp\left(\frac{cd_p}{2} z - i\frac{\ell}{2}\right) $$ the functional equation translates to $$ f_g(z)=\overline{f_g(-\overline{z})}, \qquad \mbox{}z\in\mathbb{D}. $$ It follows that $g(z)=f_g(z)\exp\left(-\frac{cd_p}{2} z + i\frac{\ell}{2}\right) $ for $z\in\mathbb{D}$. The assertion follows by substituting $\ell/2 \mapsto \ell'$.\par {\bf Case (c):} To prove case (c), we follow the lines of the proof for case (b). This time, however, according to Lemma \ref{mainlemma} (c), we find a subsequence $(\tau_{k_j})_j$ of $(\tau_k)_k$ such that, uniformly for $z\in\mathbb{D}$, $$ \lim_{j\rightarrow\infty} \mathbb{D}elta_{\tau_{k_j}}(z) = \exp( i\ell) $$ with some $\ell \in[0,2\pi)$. This leads to the functional equation $$ g(z)=\overline{g(-\overline{z})} \exp( i\ell), \qquad \mbox{}z\in\mathbb{D}. $$ Again, by setting $$ f_g(z):= g(z)\exp\left(- i\frac{\ell}{2}\right), $$ this translates to $$ f_g(z)=\overline{f_g(-\overline{z})} \qquad \mbox{ }z\in\mathbb{D}, $$ and yields the representation $g(z)=f_g(z)\exp\left( i\frac{\ell}{2}\right)$. The assertion follows. \end{proof} \section{Convergence and non-convergence of the limiting process}\label{sec:convnonconv} In this section we describe natural mechanisms that enforce the limiting process introduced in the preceeding section to converge or not to converge. \subsection{Non-convergence of the limiting process}\label{subsec:noncon} Suppose that, for a given function $G\in\mathcal{G}$ and a given family $\{\varphi_{\tau}\}_{\tau\in[2,\infty)}$ of conformal mappings on $\mathbb{D}$, there is an unbounded subset $A \subset[2,\infty)$ such that the corresponding family $\{G_{\tau}\}_{\tau\in A}$ is not normal in $\mathbb{D}$. By Montel's fundamental normality test (Theorem \ref{th:FNT1}), this bears information on the $a$-point-distribution of $G$. Thus, certain non-convergence statements for the limiting process of Section \ref{sec:shiftingshrinking} are of equal interest as convergence statements. We will study non-convergence statements and their connection to the $a$-point-distribution of $G\in\mathcal{G}$ in Chapter \ref{chapt:apoints} in details. Here, we only state the following observation which follows immediately from Proposition \ref{mainprop} (a). \begin{lemma}\label{lem:nonconvergence} Let $G\in\mathcal{G}$ and $\mu:[2,\infty) \rightarrow\mathbb{R}^+$ be a positive function with $\lim_{\tau\rightarrow\infty}\mu(\tau) = \infty$. For $\tau\in[2,\infty)$ and $z\in\mathbb{D}$, let $\varphi_{\tau}(z):=\tfrac{1}{2}+\frac{\mu(\tau)}{\log\tau}\cdot z + i\tau$ and $G_{\tau}(z):=G(\varphi_{\tau}(z))$. Suppose that there is an $\alpha\in(0,\infty)$ and a sequence $(\tau_k)_k$ of real numbers $\tau_k\in[2,\infty)$ with $\lim_{k\rightarrow\infty}\tau_k = \infty$ such that \begin{equation}\label{ass:nonconvergence} \lim_{k\rightarrow\infty} \left|G(\tfrac{1}{2}+i\tau_k)\right| = \alpha. \end{equation} Then, the sequence $(G_{\tau_{k}})_k$ has no subsequence which converges locally uniformly on $\mathbb{D}$. \end{lemma} \begin{proof} Assume that $(G_{\tau_k})_k$ has a subsequence $(G_{\tau_{k_j}})_j$ that converges locally uniformly on $\mathbb{D}$ to a function $g$. Then, it follows from Proposition \ref{mainprop} (a) that $g\equiv 0$ or $g\equiv \infty$. Our assumption \eqref{ass:nonconvergence}, however, yields that $$ g(0)= \lim_{j\rightarrow\infty} G_{\tau_{k_j}}(0) = \lim_{j\rightarrow\infty} G(\tfrac{1}{2}+\tau_{k_j}) = a $$ with some $a\in\mathbb{C}$ satisfying $|a|=\alpha\in(0,\infty)$. This gives a contradiction and the lemma is proved. \end{proof} Let $G\in\mathcal{G}$. Due to the condition \eqref{ass:nonconvergence} in Lemma \ref{lem:nonconvergence}, it seems reasonable to determine the quantities \begin{equation}\label{def:alpha_inf} \alpha_{G,\scalebox{0.8}{\mbox{inf}}}:=\liminf_{\tau\rightarrow\infty} \left|G(\tfrac{1}{2}+i\tau)\right| \quad\mbox{and}\quad \alpha_{G,\scalebox{0.8}{\mbox{sup}}}:=\limsup_{\tau\rightarrow\infty} \left|G(\tfrac{1}{2}+i\tau)\right|. \end{equation} The intermediate value theorem for continuous functions implies that, for every $\alpha\in[\alpha_{G,\scalebox{0.8}{\mbox{inf}}},\alpha_{G,\scalebox{0.8}{\mbox{sup}}}]$, we find a sequence $(\tau_k)_k$ of real numbers $\tau_k\in[2,\infty)$ with $\lim_{k\rightarrow\infty}\tau_k=\infty$ such that $$ \lim_{k\rightarrow\infty} \left| G(\tfrac{1}{2}+it)\right| = \alpha. $$ For $\mathcal{L}\in\mathcal{S}^{\#}$ with $d_{\mathcal{L}}>0$, we expect that $\alpha_{\mathcal{L},\scalebox{0.8}{\mbox{inf}}}=0$ and $\alpha_{\mathcal{L},\scalebox{0.8}{\mbox{sup}}}=\infty$. However, it appears to be quite challenging to prove this for functions $\mathcal{L}\in\mathcal{S}^{\#}$ in general. We tackle this problem later on in Chapter~\ref{ch:smalllarge}.\par In the class $\mathcal{G}$, things are qualitatively different. Here, we find, for every $\alpha_1,\alpha_2\in[0,\infty)$ with $\alpha_1\leq \alpha_2$, a function $G\in\mathcal{G}$ such that $\alpha_{G,\scalebox{0.8}{\mbox{inf}}}=\alpha_1$ and $\alpha_{G,\scalebox{0.8}{\mbox{sup}}}=\alpha_2$: let $\mathbb{D}elta_{\zeta}(s)$ be the factor of the functional equation for the Riemann zeta-function. We recall that \begin{equation}\label{tools} \mathbb{D}elta_{\zeta}(s)\mathbb{D}elta_{\zeta}(1-s) = 1 \quad\mbox{and}\quad \overline{\mathbb{D}elta_{\zeta}(\overline{s})} = \mathbb{D}elta_{\zeta}(s)\quad \mbox{for }s\in\mathbb{C} \end{equation} and observe that $\mathbb{D}elta_{\zeta}(s)$ defines an analytic non-vanishing function in the half-strip $\mathcal{D}$. Let the square-root function $\mathbb{D}elta_{\zeta}(s)^{1/2}$ be defined according to Section \ref{sec:Delta} and let $\alpha_1,\alpha_2\in[0,\infty)$ with $\alpha_1\leq \alpha_2$. Suppose that $\pmb{\alpha}:=(\alpha_1,\alpha_2)\neq (0,0)$. Then, by means of \eqref{tools}, we verify easily that $$ G_{\pmb{\alpha}}(s):=\left(\frac{\alpha_1+\alpha_2}{2} + \frac{\alpha_2-\alpha_1}{2}\cdot\sin\left(-i(s-\tfrac{1}{2}) \right) \right) \mathbb{D}elta_{\zeta}(s)^{1/2} $$ defines a function in $\mathcal{G}$ which fulfills the functional equation $G_{\pmb{\alpha}}(s)=\mathbb{D}elta_{\zeta}(s)\overline{G_{\pmb{\alpha}}(1-\overline{s})}$ and satisfies $\alpha_{G_{\pmb{\alpha}},\scalebox{0.8}{\mbox{inf}}}=\alpha_1$ and $\alpha_{G_{\pmb{\alpha}},\scalebox{0.8}{\mbox{sup}}}=\alpha_2$. If $\pmb{\alpha}:=(\alpha_1,\alpha_2)=(0,0)$, then the function $$ G_{\pmb{0}}(s):= \exp(-s(1-s)) \mathbb{D}elta_{\zeta}(s)^{1/2} $$ lies in $\mathcal{G}$ and satisfies $\alpha_{G_{\pmb{0}},\scalebox{0.8}{\mbox{inf}}}=\alpha_{G_{\pmb{0}},\scalebox{0.8}{\mbox{sup}}}=0$. We observe further that, uniformly for $0<\sigma<1$, as $t\rightarrow\infty$, \begin{equation}\label{eq:Galphaabsch} G_{\pmb{0}}(\sigma+it) \ll \exp\left(-Ct^2 \right) \end{equation} with some constant $C>0$. Let $\mu:[2,\infty)\rightarrow\mathbb{R}^+$ be a positive function with $\mu(\tau)< \frac{1}{2}\log\tau$ for $\tau\geq 2$ and set $G_{\pmb{0},\tau}(z):=G(\frac{1}{2}+\frac{\mu(\tau)}{\log\tau}z+i\tau)$ for $\tau\geq 2$ and $z\in\mathbb{D}$. Then, \eqref{eq:Galphaabsch} implies that, for every sequence $(\tau_k)_k$ of real numbers $\tau_k\in[2,\infty)$ with $\lim_{k\rightarrow\infty}\tau_k = \infty$, the corresponding sequence $(G_{\pmb{0},\tau_k})_k$ converges locally uniformly on $\mathbb{D}$ to the function $g\equiv 0$. This yields a very first example that convergence of the limiting process in Proposition \ref{mainprop} is actually possible.\par With a little more effort we can adjust the function $G_{\pmb{\alpha}}$ above such that analogous results hold if we admit $\alpha_1,\alpha_2\in[0,\infty)\cup\{\infty\}$. \subsection{Convergence via the growth behaviour in \texorpdfstring{$\mathcal{S}^{\#}$}{} } \label{sec:orderofgrowth} For $\mathcal{L}\in\mathcal{S}^{\#}$, we can enforce the limiting process introduced in the Section \ref{sec:shiftingshrinking} to converge by adjusting the underlying conformal mapping $\varphi_{\tau}$ according to the growth behaviour of $\mathcal{L}$.\par {\bf Dirichlet series of finite order.} We say that a Dirichlet series $A(s)$, resp. its meromorphic continuation which we suppose to have only finitely many poles, is of finite order in the strip $-\infty \leq \sigma_1 \leq \sigma \leq \sigma_2 \leq \infty$, if there exists a non-negative real number $c$ such that for all $\sigma_1\leq \sigma \leq \sigma_2$ \begin{equation}\label{finiteorder} A(\sigma+it) \ll |t|^c \qquad \mbox{ as } |t|\rightarrow\infty . \end{equation} For a given $\sigma_1 \leq \sigma \leq \sigma_2$, we define $\theta_A(\sigma)$ to be the infimum of all $c\geq 0$ such that \eqref{finiteorder} holds; see Steuding \cite[Chapt. 2.1]{steuding:2007} and Titchmarsh \cite[\S 9.4]{titchmarsh:1939}. It follows from a Phragm\'{e}n-Lindel\"of type argument that the function $\theta_A(\sigma)$ is continuous, non-decreasing and convex-downwards; see Titchmarsh \cite[\S 9.41]{titchmarsh:1939}.\par {\bf The growth behaviour in the extended Selberg class.} Let $\mathcal{L}\in\mathcal{S}^{\#}$. Then, $\mathcal{L}$ is of finite order in every strip $-\infty < \sigma_1 \leq \sigma \leq \sigma_2 < \infty$. By the absolute convergence of $\mathcal{L}\in\mathcal{S}^{\#}$ in $\sigma>1$, we have $$ \theta_{\mathcal{L}}(\sigma)=0 \qquad \mbox{for} \qquad \sigma>1. $$ It follows then basically from the functional equation together with the asymptotic estimate $\mathbb{D}elta_{\mathcal{L}}(s)\asymp \left(|t|/2\pi \right)^{d_{\mathcal{L}}(1/2 - \sigma)}$, as $|t|\rightarrow\infty$, that $$ \theta_{\mathcal{L}}(\sigma)=d_{\mathcal{L}}\left(\tfrac{1}{2}-\sigma \right) \qquad \mbox{for} \qquad \sigma<0. $$ The convexity of the function $\theta_{\mathcal{L}}(\sigma)$ implies that $$ \max\left\{0,\, d_{\mathcal{L}}\left(\tfrac{1}{2}-\sigma \right) \right\}\leq \theta_{\mathcal{L}}(\sigma)\leq \frac{d_{\mathcal{L}}}{2}\left(1- \sigma \right) \qquad \mbox{for} \qquad 0\leq \sigma \leq 1. $$ For the peculiar case $\sigma=\frac{1}{2}$, this yields the bounds $0\leq \theta_{\mathcal{L}}(\frac{1}{2})\leq \frac{d_{\mathcal{L}}}{4}$. According to the Lindel\"of hypothesis, we expect that $\theta_{\mathcal{L}}(\frac{1}{2})=0$. \par {\bf The growth-behaviour of the derivatives.} For a non-negative integer $\ell$, let $\mathcal{L}^{(\ell)}$ denote the $\ell$-th derivative of $\mathcal{L}\in\mathcal{S}^{\#}$. Then, Cauchy's integral formula, applied to discs with center $\sigma+it$ and radius $1/\log |t|$, assures that, as $|t|\rightarrow\infty$, \begin{equation}\label{eq:finiteorderderivative} \mathcal{L}^{(\ell)}(\sigma+it) \ll_{\ell, \varepsilon} |t|^{\theta_{\mathcal{L}}(\sigma) + \varepsilon} \end{equation} with any $\varepsilon>0$. Thus, $\mathcal{L}$ and $\mathcal{L}^{(\ell)}$ have the same order of growth.\par {\bf Convergence of the limiting process.} First, we establish the following lemma which results from a `smoothness' argument. \begin{lemma}\label{lem:growth} Let $\mathcal{L}\in\mathcal{S}^{\#}$, $\ell\in\mathbb{N}_0$ and $\mbox{\ d}elta>0$. For $\tau\geq 2$, let $D_{\tau}$ be the disc defined by \begin{equation}\label{def:disclemgrowth} |s-\tfrac{1}{2}-i\tau| < \tau^{-\theta_{\mathcal{L}}(\frac{1}{2})-\mbox{\ d}elta}. \end{equation} Then, as $\tau\rightarrow\infty$, $$ \max_{s_1,s_2 \in D_{\tau}} \left|\mathcal{L}^{(\ell)}(s_1) - \mathcal{L}^{(\ell)}(s_2) \right| \ll_{\mbox{\ d}elta, \ell} \tau^{-\mbox{\ d}elta/2}. $$ \end{lemma} \begin{proof} Let $\mathcal{L}\in\mathcal{S}^{\#}$, $\ell\in\mathbb{N}_0$ and $\mbox{\ d}elta>0$. By \eqref{eq:finiteorderderivative} and the continuity of the function $\theta_{\mathcal{L}}(\sigma)$, the estimate \begin{equation}\label{eq:sebicu} \left| \mathcal{L}^{(\ell + 1)} (s)\right| \ll_{\mbox{\ d}elta,\ell} \tau^{\theta_{\mathcal{L}}(\sigma) + \mbox{\ d}elta/2} \end{equation} holds uniformly for $s\in D_{\tau}$, as $\tau\rightarrow\infty$. For $s_1,s_2\in D_{\tau}$, we denote the line segment connecting $s_1$ and $s_2$ by $[s_1,s_2]$. By \eqref{eq:sebicu} and a trivial estimation, we obtain that, uniformly for $s_1,s_2\in D_{\tau}$, as $\tau\rightarrow\infty$, $$ \left|\mathcal{L}^{(\ell)}(s_1) - \mathcal{L}^{(\ell)}(s_2) \right| = \left|\int_{[s_1,s_2]} \mathcal{L}^{(\ell + 1)}(s)\mbox{\ d} s \right| \ll_{\mbox{\ d}elta,\ell} \ \tau^{-\theta_{\mathcal{L}}(\frac{1}{2})-\mbox{\ d}elta} \ \cdot \ \tau^{\theta_{\mathcal{L}}(\sigma) + \mbox{\ d}elta/2} = \tau^{-\mbox{\ d}elta/2}. $$ This proves the lemma. \end{proof} Now, we are ready to give a very first convergent statement for the limiting process introduced in Section \ref{sec:shiftingshrinking}. \begin{theorem}\label{th:growth-conceptofuniv} Let $\mathcal{L}\in\mathcal{S}^{\#}$ and $\mbox{\ d}elta>0$. For $\tau\in[2,\infty)$ and $z\in\mathbb{D}$, let $$\varphi_{\tau}(z):=\tfrac{1}{2}+\tau^{-\theta_{\mathcal{L}}(\frac{1}{2})-\mbox{\ d}elta}\cdot z + i\tau \qquad \mbox{and}\qquad \mathcal{L}_{\tau}(z):=\mathcal{L}(\varphi_{\tau}(z)).$$ \begin{itemize} \item[(a)] Suppose that there is an $\alpha\in[0,\infty)$ and a sequence $(\tau_k)_k$ of real numbers $\tau_k\in[2,\infty)$ with $\lim_{k\rightarrow\infty}\tau_k = \infty$ such that \begin{equation}\label{9o} \lim_{k\rightarrow\infty} \left|\mathcal{L}(\tfrac{1}{2}+i\tau_k)\right| = \alpha. \end{equation} Then, there is an $a\in\mathbb{C}$ with $|a|=\alpha$ and a subsequence of $(\mathcal{L}_{\tau_{k}})_k$ which converges locally uniformly on $\mathbb{D}$ to $g\equiv a$. \item[(b)] Suppose that there is a sequence $(\tau_k)_k$ of real numbers $\tau_k\in[2,\infty)$ with $\lim_{k\rightarrow\infty}\tau_k = \infty$ such that \begin{equation}\label{9oo} \lim_{k\rightarrow\infty} \left|\mathcal{L}(\tfrac{1}{2}+i\tau_k)\right| = \infty. \end{equation} Then, there is a subsequence of $(\mathcal{L}_{\tau_{k}})_k$ which converges locally uniformly on $\mathbb{D}$ to $g\equiv \infty$. \end{itemize} \end{theorem} \begin{proof} We observe that $\varphi_{\tau_k}(\mathbb{D}) = D_{\tau_k}$, where $D_{\tau_k}$ is defined by \eqref{def:disclemgrowth} in Lemma \ref{lem:growth}. Suppose that there exists a sequence $(\tau_k)_k$ of real numbers $\tau_k\in[2,\infty)$ with $\lim_{k\rightarrow\infty}\tau_k = \infty$ such that $$ \lim_{k\rightarrow\infty} \left|\mathcal{L}(\tfrac{1}{2}+i\tau_k)\right| = \infty,\qquad \mbox{resp. }\lim_{k\rightarrow\infty} \left|\mathcal{L}(\tfrac{1}{2}+i\tau_k)\right| = 0. $$ Then, Lemma \ref{lem:growth} yields immediately that $(\mathcal{L}_{\tau_k})_k$ converges locally uniformly on $\mathbb{D}$ to $g\equiv \infty$, resp. $g\equiv 0$. Let $(\tau_k)_k$ be a sequence of real numbers $\tau_k\in[2,\infty)$ with $\lim_{k\rightarrow\infty}\tau_k = \infty$ such that \begin{equation}\label{limk} \lim_{k\rightarrow\infty} \left|\mathcal{L}(\tfrac{1}{2}+i\tau_k)\right| = \alpha \end{equation} with some $\alpha\in(0,\infty)$. Lemma \ref{lem:growth} assures that, for sufficiently large $k$, $$ \left|\mathcal{L}_{\tau_k}(z)\right|< \alpha + 1, \qquad \mbox{for }z\in\mathbb{D}. $$ Thus, the family $\{\mathcal{L}_{\tau_k}\}_k$ is bounded on $\mathbb{D}$ and, consequently, by Montel's theorem, normal in $\mathbb{D}$. This means that there is a subsequence $(\tau_{k_j})_j$ of $(\tau_k)_k$ such that $(\mathcal{L}_{\tau_{k_j}})_j$ converges locally uniformly on $\mathbb{D}$ to an analytic function $g$. Due to \eqref{limk} we have $$ g(0)= \lim_{j\rightarrow\infty} \mathcal{L}_{\tau_{k_j}}(0) =\lim_{j\rightarrow\infty} \mathcal{L}(\tfrac{1}{2}+i\tau_{k_j}) = a $$ with some $a\in\mathbb{C}$ satisfying $|a|=\alpha$. From Lemma \ref{lem:growth} we deduce that $$ g(z)= \lim_{j\rightarrow\infty} \mathcal{L}_{\tau_{k_j}}(z) = a $$ for $z\in\mathbb{D}$. Hence, $g\equiv a$ on $\mathbb{D}$. \end{proof} With confinements on the scaling factor of the mapping $\varphi_{\tau}$, an analogous statement of Theorem \ref{th:growth-conceptofuniv} can be made for every function $G\in\mathcal{G}$ by means of certain continuity arguments for which we refer the reader to Christ \cite[Lemma 2]{christ:2012}.\par In view of the conditions \eqref{9o} and \eqref{9oo} in Theorem \ref{th:growth-conceptofuniv}, it seems again worth to determine, for given function $G\in\mathcal{G}$, the quantities $\alpha_{G,\scalebox{0.8}{\mbox{inf}}}$ and $\alpha_{G,\scalebox{0.8}{\mbox{sup}}} $ defined by \eqref{def:alpha_inf}. \subsection{Convergence via the a-point-distribution in \texorpdfstring{$\mathcal{S}^{\#}_R$}{} } \label{subsec:convapoints} \begin{figure} \caption{The images of the rectangular domain $\mathcal{R} \label{fig:phirect} \end{figure} There is a further possibility to enforce convergence of the limiting process introduced in Section \ref{sec:shiftingshrinking}, namely by adjusting the underlying conformal mapping $\varphi_{\tau}$ according to the $a$-point-distribution of $\mathcal{L}\in\mathcal{S}^{\#}$.\par We shall study the $a$-point-distribution of functions in the extended Selberg class in details later on in Chapter \ref{chapt:apoints}. Here, we anticipate only some very basic observations. Let $\mathcal{L}\in\mathcal{S}^{\#}$ and $a\in\mathbb{C}$. A complex number $\rho_a$ is said to be an $a$-point of $\mathcal{L}$ if $\mathcal{L}(\rho_a)=a$. We distinguish between trivial and non-trivial $a$-points. For a given function $\mathcal{L}\in\mathcal{S}^{\#}$, we can fix real numbers $R_a> 1$ and $L<0$ such that there are no $a$-points of $\mathcal{L}$ in the half-plane $\sigma>R_a$, only trivial ones in the half-plane $\sigma< L$ and only non-trivial ones in the strip $L \leq \sigma\leq R_a$. \par Steuding \cite[Theorem 7.7]{steuding:2007} established a Riemann von-Mangoldt formula for a rather large subclass of the extended Selberg class. Let $\mathcal{S}^{\#}_R$ be the set of all functions $\mathcal{L}\in\mathcal{S}^{\#}$ with $d_{\mathcal{L}}>0$ which satisfy the Ramanujan hypothesis; see Section \ref{sec:selbergclass}. Obviously, we have $$ \mathcal{S}\setminus\{1\} \subset \mathcal{S}^{\#}_R\subset \mathcal{S}^{\#}. $$ For given $\mathcal{L}\in\mathcal{S}^{\#}_R$, let $N_a(T)$ denote the number of non-trivial $a$-points with imaginary part $0<\gamma_a\leq T$. Then, according to Steuding \cite[Theorem 7.7]{steuding:2007}, \begin{equation}\label{eq:RvMextS} N_{a}(T) \sim \frac{d_{\mathcal{L}}}{2\pi} T \log T + O(T), \qquad\mbox{ as $T\rightarrow\infty$.} \end{equation} For a subset $X\subset \mathbb{R}^+$, we define a density function by $$ \nu_{T}(X) := \frac{1}{T} {\rm{meas\ }} \left( X \cap (T, 2T] \right),\qquad T>0. $$ Further, let $\mathcal{R}(x,y)$ with $x,y> 0$ denote the rectangular domain defined by the vertices $\pm x\pm iy$. Now, we are ready to prove the following convergence statement. \begin{theorem}\label{th:convergenceviaapoints} Let $\mathcal{L}\in\mathcal{S}^{\#}_R$ and $a,b\in\mathbb{C}$ with $a\neq b$. Let $c>0$ and $\mathcal{R}:=\mathcal{R}(c,1)$. Further, let $\mu:[2,\infty) \rightarrow\mathbb{R}^+$ be monotonically decreasing such that $$ \lim_{\tau\rightarrow\infty}\mu(\tau) < \kappa_{\mathcal{L}} \qquad \mbox{with}\qquad \kappa_{\mathcal{L}}:= \frac{\pi }{2 d_{\mathcal{L}}}. $$ For $\tau\geq 2$ and $z\in\mathcal{R}$, let $\varphi_{\tau}(z):= \frac{1}{2}+\frac{\mu(\tau)}{\log \tau} z + i\tau$ and $\mathcal{L}_{\tau}(z):= \mathcal{L}\left(\varphi_{\tau}(z)\right)$. Then, for every $0<q<1$ with $\lim_{\tau\rightarrow\infty}\mu(\tau) < \kappa_{\mathcal{L}} q$, there exists a subset $\mathcal{W}\subset[2,\infty)$ with $$ \liminf_{T\rightarrow\infty}\ \nu_T(\mathcal{W})\geq 1-q $$ such that the family $\mathcal{F}:=\{\mathcal{L}_{\tau}\}_{\tau\in\mathcal{W}}\subset\mathcal{H}(\mathcal{R})$ omits the values $a$ and $b$ on $\mathcal{R}$. In particular, $\mathcal{F}$ is normal in $\mathcal{R}$. \end{theorem} \begin{proof} Let $\mathcal{L}\in\mathcal{S}^{\#}_R$ and let $\mu:[2,\infty)\rightarrow\mathbb{R}^+$ satisfy the conditions of Theorem \ref{th:convergenceviaapoints}. For any $a\in\mathbb{C}$, let $\Gamma_{a}$ denote the set of all imaginary parts $\gamma_a$ of $a$-points $\rho_a = \beta_a + i\gamma_a$ of $\mathcal{L}$ which lie in the region defined by \begin{equation}\label{apointstrip} \tfrac{1}{2}-\frac{\mu(t-1)}{\log (t-1)} \leq \sigma \leq \tfrac{1}{2}+\frac{\mu(t-1)}{\log (t-1)} , \qquad t> 2. \end{equation} Furthermore, for any two distinct $a,b\in\mathbb{C}$, we set $\Gamma_{a,b} := \Gamma_a \cup \Gamma_b$ and denote the number of elements $\gamma\in\Gamma_{a,b}$ with $0<\gamma \leq T$ by $N_{\Gamma_{a,b}}(T)$.\par Now, we fix two distinct $a,b\in\mathbb{C}$ as required in the assumptions of Theorem \ref{th:convergenceviaapoints}. Certainly, the inequality $$ N_{\Gamma_{a,b}}(T) \leq N_a(T) + N_b(T). $$ holds for $T>0$; here, $N_a(T)$ denotes the number of non-trivial $a$-points of $\mathcal{L}$ with imaginary part $0<\gamma_a \leq T$. The Riemann-von Mangoldt formula \eqref{eq:RvMextS} for functions in $\mathcal{S}_R^{\#}$ yields that, as $T\rightarrow\infty$, \begin{equation}\label{NGamma-1} N_{\Gamma_{a,b}}(T)\leq \frac{d_{\mathcal{L}}}{\pi} T \log T + o(T\log T). \end{equation} This asymptotic bound for $N_{\Gamma_{a,b}}$ seems to be very rough. However, as we shall briefly discuss at the end of this section, in most cases this bound might not be too far from the truth. \par For $\gamma\in\mathbb{R}$, we set $\gamma':= \gamma-1$ and define $$ \mathcal{L}ambda := \bigcup_{\gamma\in\Gamma_{a,b}} \left[\gamma - \frac{\mu(\gamma')}{\log \gamma'},\ \gamma + \frac{\mu(\gamma')}{\log \gamma'} \right]; $$ By means of \eqref{NGamma-1}, we obtain that $$ \nu_T(\mathcal{L}ambda) \leq \frac{1}{T}\cdot \frac{2\mu(T-1)}{\log (T-1)} \cdot N_{\Gamma_{a,b}}(T) \leq \frac{2d_{\mathcal{L}}}{\pi}\cdot \mu(T-1) + o(1), $$ as $T\rightarrow\infty$. Let $0<q<1$ be such that $\lim_{\tau\rightarrow\infty} \mu(\tau) < \kappa_{\mathcal{L}}q$. Then, we have \begin{equation}\label{eq:limsupconvergenceapoints} \limsup_{T\rightarrow\infty} \nu_T(\mathcal{L}ambda) \leq q . \end{equation} We choose $\tau_0\geq 2$ large enough such that $0<\frac{\mu(\tau)}{\log \tau}<\frac{1}{2}$ holds for $\tau\geq \tau_0$ and set $\mathcal{W}:= [\tau_0,\infty) \setminus \mathcal{L}ambda$. It follows immediately from \eqref{eq:limsupconvergenceapoints} that \begin{equation}\label{sel:measA} \liminf_{T\rightarrow\infty}\nu_T( \mathcal{W}) \geq 1-q. \end{equation} By the construction of the set $\mathcal{W}$ and the monotonicity of $\mu$, the family $ \{\mathcal{L}_{\tau}\}_{\tau\in\mathcal{W}} $ omits the values $a$ and $b$ on $\mathcal{R}$. Our choice of $\tau_0$ assures that a possible pole of $\mathcal{L}$ at $s=1$ does not generate a pole for any function $\mathcal{L}_{\tau}$ with $\tau\in[\tau_0,\infty)$ on $\mathcal{R}$. Thus, the functions $\mathcal{L}_{\tau}$ with $\tau\in[\tau_0,\infty)$ are analytic on $\mathcal{R}$. By Montel's fundamental normality test (Theorem \ref{th:FNT1}) we conclude that the family $\{\mathcal{L}_{\tau}\}_{\tau\in\mathcal{W}}$ is normal in $\mathcal{R}$. \end{proof} It would be interesting to know whether one can replace the constant $\kappa_{\mathcal{L}}$ in Theorem \ref{th:convergenceviaapoints} by a larger one. And, in fact, there are two estimates in our proof which appear to be quite rough. \begin{itemize} \item[(i)] We used the very rough bound \begin{equation*}\label{sebastian} N_{\Gamma_{a,b}}(T) \leq N_a(T) + N_b(T). \end{equation*} We could improve this bound if we knew more about the $a$-point-distribution of $\mathcal{L}$ in the funnel-shaped strip \eqref{apointstrip}. As we shall see in Chapter \ref{chapt:apoints}, we expect that, for $\mathcal{L}\in\mathcal{S}^{\#}_R$, almost all of its $a$-points lie arbitrarily close to the critical line. However, it seems to be difficult to get detailed information on how the $a$-points cluster around the critical line. Due to works of Levinson \cite{levinson:1975}, Selberg \cite{selberg:1992} and Tsang \cite{tsang:1984}, there is slightly more information at our disposal if a function $\mathcal{L}\in\mathcal{S}$ possesses a rich arithmetical structure. But, in most cases, our knowledge is not sufficient to estimate the number of $a$- and $b$-points in the domain \eqref{apointstrip} better than above. If we restrict to the Riemann zeta-function and assume the Riemann hypothesis, then we deduce from a conditional $a$-point result of Selberg (see Section \ref{apointslittlewood}) that, for $a\neq 0$, about half of the $a$-points lie outside the domain \eqref{apointstrip}, provided that $\lim_{t\rightarrow\infty}\mu(t)>0$. This allows us to replace the constant $\kappa_{\mathcal{L}}$ in Theorem \ref{th:convergenceviaapoints} by $2\kappa_{\mathcal{L}}$, provided that $a,b\neq 0$. \item[(ii)] In bounding $\nu_{T}(\mathcal{L}ambda)$ we did not respect that there might be quite many intervals $$ \left[\gamma - \frac{\mu(\gamma')}{\log \gamma'},\ \gamma + \frac{\mu(\gamma')}{\log \gamma'} \right]\qquad \mbox{with }\gamma':=\gamma-1\;\mbox{and}\;\gamma\in\Gamma_{a,b} $$ which overlap. However, to deal with this overlapping seems to be out of reach. We refer here to the many obstacles that prevent us from getting control over the gap conjecture and Montgomery's pair correlation conjecture in the case of the Riemann zeta-function. \end{itemize} \chapter{Small and Large values near the critical line} \label{ch:smalllarge} For functions $G\in\mathcal{G}$, we introduced in Chapter \ref{ch:conceptsuniv} a limiting process in neighbourhoods of the critical line and found out that the functional equation has strong effects on the shape of possible limit functions. In Theorem \ref{th:growth-conceptofuniv} and \ref{th:convergenceviaapoints} we investigated two natural mechanisms by which we can enforce the limiting process to converge. For a given $\mathcal{L}\in\mathcal{S}^{\#}$, the limit functions to be obtained by such a convergent process are connected with the quantities $$ \alpha_{\mathcal{L},\scalebox{0.8}{\mbox{inf}}}:=\liminf_{\tau\rightarrow\infty} \left|\mathcal{L}(\tfrac{1}{2}+i\tau) \right|\qquad \mbox{and}\qquad \alpha_{\mathcal{L},\scalebox{0.8}{\mbox{sup}}}:=\limsup_{\tau\rightarrow\infty} \left|\mathcal{L}(\tfrac{1}{2}+i\tau) \right|. $$ It appears to be quite challenging to determine $\alpha_{\mathcal{L},\scalebox{0.8}{\mbox{inf}}}$ and $\alpha_{\mathcal{L},\scalebox{0.8}{\mbox{sup}}}$ for general functions $\mathcal{L}\in\mathcal{S}^{\#}$. We postpone this problem to the end of this chapter and tackle it in Section \ref{sec:unboundedness}.\par If a function $\mathcal{L}\in\mathcal{S}$ has a sufficiently rich arithmetic structure in its Dirichlet series coefficients, Selberg's central limit implies that $\alpha_{\mathcal{L},\scalebox{0.8}{\mbox{inf}}}=0$ and $\alpha_{\mathcal{L},\scalebox{0.8}{\mbox{sup}}}=\infty$ and, thus, provides a more satisfactory answer than we can state for general functions $\mathcal{L}\in\mathcal{S}^{\#}$. We present Selberg's central limit law and several of its extensions in Section \ref{sec:selbergcentrallimit}. For suitable functions $\mathcal{L}\in\mathcal{S}$, we deduce from Selberg's central limit law in Section \ref{sec:smalllargeONcritline} information on the frequency of small and large values on the critical line.\par In Section \ref{sec:largesmall}, we discover that Selberg's central limit law implies that, for suitable functions $\mathcal{L}\in\mathcal{S}$, the limiting process of Theorem \ref{th:growth-conceptofuniv} and \ref{th:convergenceviaapoints} has a strong tendency to converge either to $g\equiv 0$ or to $g\equiv \infty$. \section{Selberg's central limit law} \label{sec:selbergcentrallimit} In the class $\mathcal{S}^*$ we gather all functions from the Selberg class for which both Selberg's prime coefficient condition (S.6$^*$) and Selberg's zero-density estimate (DH) are true; see Section \ref{sec:selbergclass}. \par {\bf Selberg's central limit law.} Selberg \cite{selberg:1992} derived that, for $\mathcal{L}\in\mathcal{S}^*$, the values of $\log \mathcal{L}(\frac{1}{2} + it)$ are Gaussian normally distributed after some suitable normalization. We set $$ \kappa_{\mathcal{L},T}(\sigma,t):= \frac{\log \mathcal{L}(\sigma+it)}{\sqrt{\frac{1}{2}n_{\mathcal{L}} \log \log T}}, $$ where $n_{\mathcal{L}}$ is defined by Selberg's prime coefficient condition (S.6$^*$). Then, for any measurable set $B\subset \mathbb{C}$ with positive Jordan content, we have \begin{equation*} \frac{1}{T}\ {\rm{meas\ }} \left\{ t\in (T,2T]\ : \ \kappa_{\mathcal{L},T}(\tfrac{1}{2},t) \in B \right\} \sim \frac{1}{2\pi} \iint_{B} e^{-\frac{1}{2}(x^2+y^2)}\mbox{\ d} x \mbox{\ d} y, \end{equation*} as $T\rightarrow\infty$. Note that $\varphi(x,y):= \frac{1}{2\pi}e^{-\frac{1}{2}(x^2+y^2)}$ defines the density function of the bivariate Gaussian normal distribution. Moreover, for any real numbers $\alpha$ and $\beta$ with $\alpha<\beta$, as $T\rightarrow\infty$, \begin{equation}\label{centrallimitlaw} \frac{1}{T}\ {\rm{meas\ }} \left\{ t\in (T,2T]\ : \ \alpha \leq {\rm{Re} } \left(\kappa_{\mathcal{L},T}(\tfrac{1}{2},t)\right) \leq \beta \right\} \qquad\qquad\qquad\mbox{ } \end{equation} $$\qquad\qquad\qquad\qquad\qquad\qquad = \frac{1}{\sqrt{2\pi}} \int_{\alpha}^{\beta}e^{-\frac{1}{2}x^2 } \mbox{\ d} x + O\left( \frac{(\log\log\log T)^2}{\sqrt{\log\log T}} \right) $$ and, similarly, with a slightly better error term, $$ \frac{1}{T}\ {\rm{meas\ }} \left\{ t\in (T,2T]\ : \ \alpha \leq {\rm{Im} } \left(\kappa_{\mathcal{L},T}(\tfrac{1}{2},t)\right)\leq \beta \right\} \qquad\qquad\qquad\mbox{ } $$ $$\qquad\qquad\qquad\qquad\qquad\qquad = \frac{1}{\sqrt{2\pi}} \int_{\alpha}^{\beta}e^{-\frac{1}{2}x^2} \mbox{\ d} x + O\left( \frac{\log\log\log T}{\sqrt{\log\log T}} \right). $$ In the case of the Riemann zeta-function, the asymptotic of Selberg's limit law for ${\rm{Re} } \left( \kappa_{\zeta,T}(\frac{1}{2},t)\right)$ was also discovered by Laurin\v{c}ikas \cite{laurincikas:1987}, independently of Selberg's work; however, without explicit error term.\footnote{We refer to Ivi\'{c} \cite{ivic:2002} for a short historical overview on preliminary works leading to Selberg's limit law.} Selberg himself never published a rigorous proof of his limit theorems. For a precise description of Selberg's method, we refer to Tsang \cite{tsang:1984}, who carried out all details in the case of the Riemann zeta-function. Joyner \cite{joyner:1986} proved Selberg's central limit law for a large class of Dirichlet series. Laurin\v{c}ikas \cite{laurincikas:1991-2} provided proofs for various limit laws connected with the Riemann zeta-function. Hejhal \cite{hejhal:2000} sketches the proof of Selberg's central limit law for linear combinations of functions in the class $\mathcal{S}^*$ with polynomial Euler product (S.3$^*$).\par {\bf Extensions of Selberg's central limit law.} There are several directions to extend Selberg's central limit law. In the following, we mainly restrict to the Riemann zeta-function and to limit laws with respect to ${\rm{Re} } \left(\kappa_{\zeta,T}(\frac{1}{2},t) \right)$. \par Laurin\v{c}ikas proved that, for the Riemann zeta-function, Selberg's central limit theorem is also valid on certain line segments to the right of the critical line. Let $$ 0\leq \varepsilonilon(T)\leq \frac{\mu(T)\sqrt{\log\log T}}{\log T}, \qquad T\geq 2, $$ with an arbitrary positive function $\mu$ satisfying $ \mu(T) \rightarrow \infty$ and $\mu(T)=o(\log\log T)$, as $T\rightarrow\infty$. Then, $$ \frac{1}{T}\ {\rm{meas\ }} \left\{ t\in (T,2T]\ : \ \alpha \leq {\rm{Re} } \left(\kappa_{\zeta,T}(\tfrac{1}{2}+\varepsilonilon(T),t)\right) \leq \beta \right\} \sim \frac{1}{\sqrt{2\pi}} \int_{\alpha}^{\beta}e^{-\frac{1}{2}x^2} \mbox{\ d} x , $$ as $T\rightarrow\infty$; see Laurin\v{c}ikas \cite[Chapt. 3, Theorem 3.5.1]{laurincikas:1991-2}. It is also possible to obtain normal distribution results if $$ \varepsilonilon(T)>\frac{\mu(T)\sqrt{\log\log T}}{\log T}, \qquad T\geq 2. $$ In this case, however, a change of normalization is necessary: one has to work with $$ \kappa'_{\zeta,T} (\tfrac{1}{2} +\varepsilonilon(T) , t) : = \frac{\log| \zeta(\tfrac{1}{2}+\varepsilonilon(T)+i t)|}{\sqrt{-\log\varepsilonilon(T)}}. $$ For details we refer to Laurin\v{c}ikas \cite[Chapt. 3.4, Theorem 3.4.1 and Corollary 3.4.2]{laurincikas:1991-2}.\par Under the assumption of the Riemann hypothesis, Hejhal \cite{hejhal:1989} established a central limit law for the modulus of the first derivative of the Riemann zeta-function on the critical line. Let $A(t):=(t/2\pi)\log (t/2\pi e)$. If the Riemann hypothesis is true, then $$ \frac{1}{T} {\rm{meas\ }} \left\{ t\in (T,2T]\ : \ \alpha \leq \frac{\log\left|\zeta'(\frac{1}{2}+it)/A'(\frac{1}{2}+it)\right|}{\sqrt{\frac{1}{2} \log\log T}} \leq \beta \right\} \sim \frac{1}{\sqrt{2\pi}} \int_{\alpha}^{\beta}e^{-\frac{1}{2}x^2} \mbox{\ d} x, $$ as $T\rightarrow\infty$.\par Hughes, Nikeghbali \& Yor \cite{hughesnikeghbaleyour:2008} and Bourgade \cite{bourgade:2010} gave multidimensional extensions of Selberg' central limit law. In the following, let $\omega$ be a random variable which is uniformly distributed on the interval $(0,1)$. In the terminology of probability theory, Selberg's central limit law states that $$ \frac{\log| \zeta(\tfrac{1}{2}+i\omega t)|}{\sqrt{\log\log t}} $$ converges in distribution, as $t\rightarrow\infty$, to a Gaussian normally-distributed random variable. Hughes, Nikeghbali \& Yor \cite{hughesnikeghbaleyour:2008} gave a multidimensional extension of Selberg's central limit law by showing that, for any $0<\lambda_1<...<\lambda_n$, the vector $$ \frac{1}{\sqrt{\log\log t}} \left(\log| \zeta(\tfrac{1}{2}+i\omega e^{(\log t)^{\lambda_1}}),...,\log| \zeta(\tfrac{1}{2}+i\omega e^{(\log t)^{\lambda_n}})| \right) $$ converges in distribution, as $t\rightarrow\infty$, to $(\lambda_1 \mathcal{N}_1, ..., \lambda_n \mathcal{N}_n)$, where $\mathcal{N}_1,...,\mathcal{N}_n$ are independent Gaussian normally-distributed random variables. Bourgade \cite{bourgade:2010} investigated vectors with respect to smaller shifts and revealed some interesting correlation structure. His results imply, for instance, that, for any $0\leq \mbox{\ d}elta\leq 1$, $$ \frac{1}{\sqrt{\log\log t}} \left(\log| \zeta(\tfrac{1}{2}+i\omega t),\log\left| \zeta\left(\tfrac{1}{2}+i\omega t +i\tfrac{1}{(\log t)^{\mbox{\ d}elta}}\right)\right| \right) $$ converges in distribution, as $t\rightarrow\infty$, to $(\mathcal{N}_1 , \mbox{\ d}elta \mathcal{N}_1 + \sqrt{1-\mbox{\ d}elta^2}\mathcal{N}_2)$, where $\mathcal{N}_1$ and $\mathcal{N}_2$ are independent Gaussian normally-distributed random variables.\par \section{Small and large values on the critical line}\label{sec:smalllargeONcritline} Selberg's central limit law provides information on the frequency of small and large values of the Riemann zeta-function on the critical line. \par For $\mathcal{L}\in\mathcal{S}$ and any two positive real functions $l,u:[2,\infty)\rightarrow\mathbb{R}^+$ satisfying $l(t)\leq u(t)$ for $t\in[2,\infty)$, we define \begin{equation*}\label{W_lu} W_{l(t),u(t)} := \left\{ t\in [2,\infty)\ : \ l(t) \leq |\mathcal{L}(\tfrac{1}{2}+it)| \leq u(t) \right\}. \end{equation*} In consistency with this notation, we set \begin{align*} W_{-\infty,u(t)} &:= \left\{ t\in [2,\infty)\ : \ |\mathcal{L}(\tfrac{1}{2}+it)| \leq u(t) \right\},\\ W_{l(t),+\infty} &:= \left\{ t\in [2,\infty)\ : \ |\mathcal{L}(\tfrac{1}{2}+it)| \geq l(t) \right\}. \end{align*} Recall that, for a given subset $X\subset \mathbb{R}^+$, we defined a density function by $$ \nu_{T}(X) := \frac{1}{T} {\rm{meas\ }} \left( X \cap (T, 2T] \right),\qquad T>0. $$ The subsequent theorem is an immediate consequence of Selberg's central limit law. \begin{theorem}\label{th:measselberglimitlaw} Let $\mathcal{L}\in\mathcal{S}^{*}$. Furthermore, let \begin{equation}\label{gx} g_{u}(t):= \exp\left( u \sqrt{\tfrac{1}{2} n_{\mathcal{L}} \log \log t} \right),\qquad t\geq 2, \end{equation} where $u$ is a real parameter and $n_{\mathcal{L}}$ is defined by Selberg's prime coefficient condition (S.3$^*$), and \begin{equation}\label{ET} E(T):= \frac{(\log\log\log T)^2}{\sqrt{\log\log T}}, \qquad T>1. \end{equation} \begin{itemize} \item[(a)] Let $\alpha,\beta\in\mathbb{R}$ with $\alpha<\beta$. Then, as $T\rightarrow\infty$, \begin{align*}\label{meas:normal1} \nu_{T}\left(W_{-\infty, g_{\alpha}(t)}\right) &= \frac{1}{\sqrt{2\pi}} \int_{-\infty}^{\alpha}e^{-x^2/2 } \mbox{\ d} x + O\left( E(T) \right), \\ \nu_{T}\left(W_{g_{\alpha}(t),g_{\beta}(t)}\right) &= \frac{1}{\sqrt{2\pi}} \int_{\alpha}^{\beta}e^{-x^2/2 } \mbox{\ d} x + O\left( E(T) \right),\\ \nu_{T}\left(W_{g_{\beta}(t),+\infty}\right) &= \frac{1}{\sqrt{2\pi}} \int_{\beta}^{+\infty}e^{-x^2/2 } \mbox{\ d} x + O\left( E(T)\right). \end{align*} \item[(b)] Let $m$ be a fixed positive real number. Then, as $T\rightarrow\infty$, \begin{align*} \nu_{T}\left(W_{-\infty,\frac{1}{m}}\right) & = \frac{1}{2} + O\left( E(T) \right),\\ \nu_{T}\left(W_{\frac{1}{m}, m}\right) & = O\left( E(T) \right),\\ \nu_{T}\left(W_{m,\infty}\right) & = \frac{1}{2} + O\left( E(T) \right). \end{align*} \item[(c)] Let $m:[2,\infty)\rightarrow\mathbb{R}^+$ be a positive function with $\lim_{t\rightarrow\infty}m(t) = \infty$ and such that, for any $\varepsilon>0$, the inequality $m(t)\leq g_{\varepsilon}(t)$ holds for sufficiently large $t$. Then, as $T\rightarrow\infty$, \begin{align*} \nu_{T}\left(W_{-\infty,\frac{1}{m(t)}}\right) &= \frac{1}{2} + o(1),\\ \nu_{T}\left(W_{\frac{1}{m(t)},m(t)}\right) &= o(1),\\ \nu_{T}\left(W_{m(t),\infty}\right) &= \frac{1}{2} + o(1). \end{align*} \end{itemize} \end{theorem} \begin{proof} Statement (a) follows immediately from Selberg's central limit law by noticing that ${\rm{Re} } (\log \mathcal{L}(\frac{1}{2}+it)) = \log |\mathcal{L}(\frac{1}{2}+it)|$. The statements (b) and (c) can be deduced from (a) by coupling the parameters $\alpha:=\alpha(T)$ and $\beta:=\beta(T)$ with $T$ in a suitable manner and by observing that $$ \frac{1}{\sqrt{2\pi}} \int_{-\infty}^{0}e^{-x^2/2 } \mbox{\ d} x = \frac{1}{\sqrt{2\pi}} \int_{0}^{\infty}e^{-x^2/2 } \mbox{\ d} x =\frac{1}{2}. $$ \end{proof} We close this section with a brief remark on large deviations in Selberg's central limit law. Let $\mathcal{L}\in\mathcal{S}^*$ and $\alpha:[2,\infty)\rightarrow\mathbb{R}^+$ be a positive function with $\lim_{T\rightarrow\infty}\alpha(T)=\infty$. We set $$ l(T):=\exp\left( \alpha(T) \sqrt{\tfrac{1}{2} n_{\mathcal{L}} \log \log T} \right),\qquad T\geq 2. $$ Selberg's central limit law implies that the asymptotic $$ \nu_T (W_{l(T),\infty}) \sim \frac{1}{\sqrt{2\pi}} \int_{\alpha(T)}^{\infty}e^{-\frac{1}{2}x^2} \mbox{\ d} x, \qquad\mbox{as }T\rightarrow\infty , $$ holds whenever $\alpha(T)\leq (\log\log\log T)^{\frac{1}{2}-\varepsilon}$ for sufficiently large $T$ with an arbitrary fixed $\varepsilon>0$. For larger deviations, i.e. if $$ \alpha(T)=\Omega\left( (\log\log\log T)^{\frac{1}{2}} \right), \qquad\mbox{ as } T\rightarrow\infty, $$ we obtain from Selberg's central limit law only the trivial bound \begin{equation*}\label{123} \nu_T (W_{l(T),\infty}) \ll E(T), \qquad\mbox{as }T\rightarrow\infty, \end{equation*} where $E(T)$ is defined by \eqref{ET}. In the case of the Riemann zeta-function, there are unconditional results due to Jutila \cite{jutila:1983}, Soundararajan \cite{soundararajan:2008} and Radziwi\l\l \ \cite{radziwill:2011} and conditional results (on the assumption of the Riemann hypothesis) due to Soundararajan \cite{soundararajan:2009} at our disposal which allow to describe the frequency of large deviations in Selberg's central limit law in a more precise manner than we get from the trivial bound \eqref{123}. \section{Small and large values near the critical line}\label{sec:largesmall} Relying on Selberg's central limit law, in particular on Theorem \ref{th:measselberglimitlaw}, we shall deduce that, for $\mathcal{L}\in\mathcal{S}^{*}$, the limiting processes of Theorem \ref{th:growth-conceptofuniv} and \ref{th:convergenceviaapoints} have a strong tendency to converge either to $g\equiv 0$ or to $g\equiv \infty$. \par \begin{theorem}\label{th:largesmall} Let $\mathcal{L}\in\mathcal{S}^{*}$ and $a\in\mathbb{C}\setminus\{0\}$. Let $c>0$ and $\mathcal{R}:=\mathcal{R}(c,1)$ be the rectangular domain defined by the vertices $\pm c \pm i$. Let $\mu:[2,\infty) \rightarrow\mathbb{R}^+$ be monotonically decreasing such that $$ \lim_{\tau\rightarrow\infty}\mu(\tau) < \tfrac{1}{2}\kappa_{\mathcal{L}} \qquad \mbox{with}\qquad \kappa_{\mathcal{L}}:= \frac{\pi }{2 d_{\mathcal{L}}}. $$ For $\tau\geq 2$ and $z\in\mathcal{R}$, we set $\varphi_{\tau}(z):= \frac{1}{2}+\frac{\mu(\tau)}{\log \tau} z + i\tau$ and $ \mathcal{L}_{\tau}(z):= \mathcal{L}\left(\varphi_{\tau}(z)\right)$. Then, for every $0<q<\frac{1}{2}$ with $\lim_{\tau\rightarrow\infty}\mu(\tau) < \kappa_{\mathcal{L}} q$, there exist subsets $\mathcal{W}_0,\mathcal{W_{\infty}}\subset[2,\infty)$ with \begin{align*} \liminf_{T\rightarrow\infty}\nu_T(\mathcal{W}_0) & \geq \tfrac{1}{2}-q, \\ \liminf_{T\rightarrow\infty}\nu_T(\mathcal{W}_{\infty}) & \geq \tfrac{1}{2}-q,\\ \liminf_{T\rightarrow\infty}\nu_T(\mathcal{W}_0\cup\mathcal{W}_{\infty}) & \geq 1-q, \end{align*} such that the following holds: \begin{itemize} \item[(a)] The family $\mathcal{F}:=\{\mathcal{L}_{\tau}\}_{\tau\in\mathcal{W}_0\cup\mathcal{W}_{\infty}}\subset\mathcal{H}(R)$ omits the two values $0$ and $a$ on $\mathcal{R}$. In particular, $\mathcal{F}$ is normal in $\mathcal{R}$. \item[(b)] For every sequence $(\tau_k)_k$ with $\tau_k \in \mathcal{W}_0$ and $\lim_{k\rightarrow\infty} \tau_k =\infty$, the sequence $(\mathcal{L}_{\tau_k})_k$ converges locally uniformly on $\mathcal{R}$ to $g\equiv 0$. \item[(c)] For every sequence $(\tau_k)_k$ with $\tau_k \in \mathcal{W}_0$ and $\lim_{k\rightarrow\infty} \tau_k =\infty$, the sequence $(\mathcal{L}_{\tau_k})_k$ converges locally uniformly on $\mathcal{R}$ to $g\equiv \infty$. \end{itemize} \end{theorem} \begin{proof} Let $\mathcal{L}\in\mathcal{S}^{*}$ and $a\in\mathbb{C}\setminus\{0\}$. Furthermore, let the function $\mu:[2,\infty)\rightarrow\mathbb{R}^+$ satisfy the conditions of the theorem.\par Then, according to Theorem \ref{th:convergenceviaapoints}, we find, for every $0<q<\frac{1}{2}$ with $\lim_{\tau\rightarrow\infty}\mu(\tau) < \kappa_{\mathcal{L}} q$, a subset $\mathcal{W}\subset[2,\infty)$ with \begin{equation*} \liminf_{T\rightarrow\infty} \nu_T(\mathcal{W})\geq 1-q \end{equation*} such that every function $\mathcal{L}_{\tau}$ with $\tau\in\mathcal{W}$ is analytic on $\mathcal{R}$ and omits there the values $0$ and $a$. It follows from Montel's fundamental normality test (Theorem \ref{th:FNT1}) that the family $\{\mathcal{L}_{\tau}\}_{\tau\in\mathcal{W}}$ is normal in $\mathcal{R}$. \par Let $g_x(t)$ be defined by \eqref{gx} and let $m:[2,\infty)\rightarrow\mathbb{R}^+$ be a positive function with $\lim_{t\rightarrow\infty}m(t) = \infty$ such that, for any $\varepsilon>0$, the inequality $m(t)\leq g_{\varepsilon}(t)$ holds for sufficiently large $t$. Furthermore, let the sets $ W_{-\infty,1/m(t)} , W_{m(t), +\infty}\subset[2,\infty)$ be defined by \begin{align*} W_{-\infty,\frac{1}{m(t)}} &:= \left\{ t\in [2,\infty)\ : \ |\mathcal{L}(\tfrac{1}{2}+it)| \leq \tfrac{1}{m(t)} \right\}, \shortintertext{and} W_{m(t),+\infty} &:= \left\{ t\in [2,\infty)\ : \ |\mathcal{L}(\tfrac{1}{2}+it)| \geq m(t) \right\}. \end{align*} According to Theorem \ref{th:measselberglimitlaw} (c), \begin{align*} \liminf_{T\rightarrow\infty} \nu_T\left( W_{-\infty, \frac{1}{m(t)}} \right) &\geq \frac{1}{2},\\ \liminf_{T\rightarrow\infty} \nu_T\left( W_{m(t), +\infty} \right) &\geq \frac{1}{2} \end{align*} and $$ \liminf_{T\rightarrow\infty} \nu_T\left( W_{-\infty, \frac{1}{m(t)}}\cup W_{m(t), +\infty} \right) =1. $$ We set $$ \mathcal{W}_0:=\mathcal{W}\cap W_{-\infty, \frac{1}{m(t)}} \qquad \mbox{and}\qquad \mathcal{W}_{\infty}:= \mathcal{W}\cap W_{m(t), +\infty}. $$ The lower density estimates for the sets $\mathcal{W}$, $ W_{-\infty,1/m(t)}$ and $W_{m(t), +\infty}$ above imply that \begin{align*} \liminf_{T\rightarrow\infty} \nu_T\left( \mathcal{W}_{0} \right) & \geq \frac{1}{2}-q,\\ \liminf_{T\rightarrow\infty} \nu_T\left( W_{\infty} \right) & \geq \frac{1}{2}-q \end{align*} and $$ \liminf_{T\rightarrow\infty} \nu_T\left( \mathcal{W}_{0}\cup \mathcal{W}_{\infty} \right) \geq 1-q. $$ As $\mathcal{W}_0\cup \mathcal{W}_{\infty}\subset \mathcal{W}$, statement (a) of the theorem follows immediately from the construction of the set $\mathcal{W}$.\par Now, let $(\tau_k)_k$ be a sequence with $\tau_k\in \mathcal{W}_0$ and $\lim_{k\rightarrow\infty} \tau_k =\infty$. The observations that $$ \left| \mathcal{L}_{\tau}(0)\right| = \left|\mathcal{L}(\tfrac{1}{2}+i\tau) \right|\leq \frac{1}{m(\tau)} \quad\mbox{ for }\tau\in\mathcal{W}_0 \qquad \mbox{ and } \lim_{\tau\rightarrow\infty} \frac{1}{m(\tau)}=0, $$ yield that \begin{equation}\label{sel:cond1} \lim_{k\rightarrow\infty}\mathcal{L}_{\tau_k}(0)=0. \end{equation} By the normality of $\{\mathcal{L}_{\tau}\}_{\tau\in\mathcal{W}_0}$ in $\mathcal{R}$, every subsequence of $(\mathcal{L}_{\tau_k})_{k}$ converges locally uniformly on $\mathcal{R}$. Assume that there is a subsequence $(\mathcal{L}_{\tau_{k_j}})_{j}$ of $ (\mathcal{L}_{\tau_k})_{k}$ that converges locally uniformly on $\mathcal{R}$ to a limit function $g\not\equiv 0$. Then, due to \eqref{sel:cond1}, the function $g$ has a zero at $z=0$. Thus, by the theorem of Hurwitz, every function $\mathcal{L}_{\tau_{k_j}}$ with sufficiently large $j$ has at least one zero in $\mathcal{R}$. This, however, contradicts the assumption that $\{\mathcal{L}_{\tau}\}_{\tau\in\mathcal{W}_0}$ omits the value $0$. Statement (b) of our theorem is proved.\par Now, let $(\tau_k)_k$ be a sequence with $\tau_k\in \mathcal{W}_{\infty}$ and $\lim_{k\rightarrow\infty}\tau_k = \infty$. Then, it follows from $$ \left| \mathcal{L}_{\tau}(0)\right| = \left|\mathcal{L}(\tfrac{1}{2}+i\tau) \right|\geq m(\tau) \quad\mbox{ for }\tau\in\mathcal{W}_{\infty} \qquad \mbox{ and } \lim_{\tau\rightarrow\infty} m(\tau)=\infty, $$ that $$ \lim_{k\rightarrow\infty} \mathcal{L}_{\tau_k} (0) = \infty. $$ As $\{\mathcal{L}_{\tau}\}_{\tau\in\mathcal{W}_{\infty}}$ is normal in $\mathcal{R}$ and omits the value `$\infty$', we conclude that $(\mathcal{L}_{\tau_k})_k$ converges locally uniformly on $\mathcal{R}$ to $g\equiv \infty$. This proves statement (c) of the theorem. \end{proof} Theorem \ref{th:largesmall} provides information on the frequency of small and large values of $\mathcal{L}\in\mathcal{S}^{*}$ in funnel-shaped neighbourhoods of the critical line. This information complements Selberg's central limit law and its extensions stated in Section \ref{sec:selbergcentrallimit}: Selberg's central limit law measures the number of points $\frac{1}{2}+it$ on the critical line for which a given function $\mathcal{L}\in\mathcal{S}^*$ takes small or large values. Laurin\v{c}ikas' extension allows us to do the same for points on certain line segments, which lie close to the right of the critical line at distances of order not exceeding $1/\log t$. By Bourgade's multidimensional extension we can measure the number of points $\frac{1}{2}+it$ on the critical line such that both $\mathcal{L}(\tfrac{1}{2}+it)$ and $\mathcal{L}(\tfrac{1}{2}+it + i \frac{1}{(\log t)^{\mbox{\ d}elta}})$, $0\leq\mbox{\ d}elta\leq 1$ take either small or large values. By means of Theorem \ref{th:largesmall} we can measure the number of certain rectangular subsets of the region $$ \frac{1}{2} - \frac{c}{\log t} \leq \sigma \leq \frac{1}{2}+ \frac{c}{\log t}, \qquad t\geq 2, $$ with $c>0$, on which a given function $\mathcal{L}\in\mathcal{S}^{*}$ assumes small or large values: \begin{corollary}\label{cor:selbergsmalllarge} Let $\mathcal{L}\in\mathcal{S}^{*}$. Let $c>0$ and $\mathcal{R}:=\mathcal{R}(c,1)$ be the rectangular domain defined by the vertices $\pm c \pm i$. Let $\mu:[2,\infty) \rightarrow\mathbb{R}^+$ be monotonically decreasing such that $$ \lim_{\tau\rightarrow\infty}\mu(\tau) < \tfrac{1}{2}\kappa_{\mathcal{L}} \qquad \mbox{with}\qquad \kappa_{\mathcal{L}}:= \frac{\pi }{2 d_{\mathcal{L}}}. $$ For $\tau\geq 2$, we set $\varphi_{\tau}(z):= \frac{1}{2}+\frac{\mu(\tau)}{\log \tau} z + i\tau$ and denote by $\mathcal{R}_{\tau}$ the image of the rectangle $\mathcal{R}$ under the mapping $\varphi_{\tau}$, i.e. $ \mathcal{R}_{\tau}:= \varphi_{\tau}\bigl(\mathcal{R}\bigr). $ Let $\overline{\mathcal{R}}_{\tau}$ denote the closure of $\mathcal{R}_{\tau}$. Then, for every real number $m>1$, every positive integer $\ell$ and every $0<q<\frac{1}{2}$ with $\lim_{\tau\rightarrow\infty}\mu(\tau) < \kappa_{\mathcal{L}} q$, there exist subsets $\mathcal{W}_{1/m},\mathcal{W}_{m}\subset[2,\infty)$ with \begin{align*} \liminf_{T\rightarrow\infty}\nu_T(\mathcal{W}_{\frac{1}{m}}) &\geq \tfrac{1}{2}-q,\\ \liminf_{T\rightarrow\infty}\nu_T(\mathcal{W}_{m}) &\geq \tfrac{1}{2}-q,\\ \liminf_{T\rightarrow\infty}\nu_T(\mathcal{W}_{\frac{1}{m}}\cup\mathcal{W}_{m}) & \geq 1-q, \end{align*} such that the following holds. \begin{itemize} \item[(a)] For any $s\in\overline{\mathcal{R}}_{\tau}$ with $\tau\in\mathcal{W}_{1/m}$ and any integer $0\leq l\leq \ell$ $$ |\mathcal{L}^{(l)}(s)|\leq \frac{1}{m}\cdot \left(\frac{\log\tau }{\mu(\tau)} \right)^{l} \qquad \mbox{ and } \qquad \mathcal{L}(s)\neq 0 . $$ \item[(b)] For any $s\in\overline{\mathcal{R}}_{\tau}$ with $\tau\in\mathcal{W}_m$ $$|\mathcal{L}(s)|\geq m \qquad \mbox{and}\qquad \left| \mathcal{L}' (s) \right| \leq \frac{1}{m} \cdot \frac{\log\tau}{ \mu(\tau)}\cdot \left| \mathcal{L}(s)\right|^2 . $$ \end{itemize} \end{corollary} \begin{proof} Corollary \ref{cor:selbergsmalllarge} is a direct consequence of Theorem \ref{th:largesmall}. However, to deduce the statements rigorously from Theorem \ref{th:largesmall}, some rescaling is necessary. First, we choose a sufficiently small $\eta>1$ such that $\lim_{t\rightarrow\infty} \eta \mu(t) < \kappa_{\mathcal{L}}$. Then, we set $\varphi_{\tau}^*(z) = \frac{1}{2}+\frac{ \eta\mu(t)}{\log \tau} z +i\tau$ and define $\mathcal{L}_{\tau}^*(z):= \mathcal{L}(\varphi_{\tau}^*(z))$. For the family $\{\mathcal{L}^*_{\tau}\}_{\tau\in[2,\infty)}$, the rectangle $\mathcal{R}:=\mathcal{R}(c,1)$ and an arbitrary $0<q<\frac{1}{2}$ with $\lim_{\tau\rightarrow\infty} \mu^*(\tau)< \kappa_{\mathcal{L}}q$, we choose the sets $\mathcal{W}_0, \mathcal{W}_{\infty}\subset[2,\infty)$ according to Theorem \ref{th:largesmall}. Let $$\mathcal{K}:=\overline{\mathcal{R}(\tfrac{c}{\eta}, \tfrac{1}{\eta})}$$ be the compact rectangular domain defined by the vertices $\pm \frac{c}{\eta}\pm i \frac{1}{\eta}$. As $\eta>1$, $\mathcal{K}$ is a compact subset of $\mathcal{R}$. Moreover, we have $$ \varphi_{\tau}^* (\mathcal{K}) =\overline{\varphi_{\tau}(\mathcal{R})} =\overline{\mathcal{R}}_{\tau} $$ for every $\tau\in[2,\infty)$. Let $(f_k)_k$ be a sequence of functions $f_k\in \mathcal{H}(\mathcal{R})$ which converges locally uniformly on $\mathcal{R}$ to $f\equiv 0$. Then, according to the theorem of Weierstrass, the corresponding sequence $(f_k^{(l)})_k$ of $l$-th derivatives with $l\in\mathbb{N}_0$ converges also locally uniformly on $\mathcal{R}$ to $f\equiv 0$. Thus, it follows from Theorem \ref{th:largesmall} (b) and (c) that, for arbitrary $m>1$ and $\ell\in\mathbb{N}$, we find a number $\tau_0\geq 2$ such that both $$ \left| \frac{\mbox{\ d}^{l}}{\mbox{\ d} z^{l}}\ \mathcal{L}^*_{\tau}(z)\right| \leq \frac{1}{m} $$ for every $z\in\mathcal{K}$, every $\tau\in\mathcal{W}_{\frac{1}{m}}:=\mathcal{W}_0\cap[\tau_0,\infty)$ and every integer $0\leq l\leq \ell$, and $$ \left| \frac{\mbox{\ d}^{l}}{\mbox{\ d} z^{l}}\ \frac{1}{\mathcal{L}^*_{\tau}(z)}\right| \leq \frac{1}{m} $$ for every $z\in\mathcal{K}$, every $\tau\in\mathcal{W}_{m}:=\mathcal{W}_{\infty}\cap[\tau_0,\infty)$ and every integer $0\leq l\leq \ell$. We observe that $$ \frac{\mbox{\ d}^{l}}{\mbox{\ d} z^{l}}\ \mathcal{L}^*_{\tau}(z) = \left( \frac{\eta \mu(\tau)}{\log\tau}\right)^l \cdot \mathcal{L}_{\tau}^{*(l)} (z) $$ for $0\leq l \leq \ell$ and $$ \frac{\mbox{\ d}^{l}}{\mbox{\ d} z^{l}}\ \frac{1}{\mathcal{L}^*_{\tau}(z)} = \left( \frac{\eta \mu(\tau)}{\log\tau}\right)^l \cdot \frac{ \mathcal{L}_{\tau}^{*(l)}(z)}{(\mathcal{L}_{\tau}^{*}(z))^2}. $$ for $l=0$ and $l=1$. For sake of simplicity, we omit here to evaluate analogous expressions for the $l$-th derivative if $l\geq 2$. Consequently, we obtain that $$ \left| \mathcal{L}_{\tau}^{*(l)} (z) \right|\leq\frac{1}{m} \cdot \left( \frac{\log\tau}{ \eta \mu(\tau)}\right)^l \leq \frac{1}{m} \cdot \left( \frac{ \mu(\tau)}{\log\tau}\right)^l $$ for every $z\in\mathcal{K}$, every $\tau\in\mathcal{W}_{\frac{1}{m}}$ and every integer $0\leq l\leq \ell$ and $$ \left| \frac{\mathcal{L}_{\tau}^{*(l)} (z)}{ (\mathcal{L}_{\tau}^{*}(z))^2} \right| \leq \frac{1}{m} \cdot \left( \frac{\log\tau}{ \eta \mu(\tau)}\right)^l\leq \frac{1}{m} \cdot \left( \frac{\log\tau}{ \mu(\tau)}\right)^l $$ for every $z\in\mathcal{K}$, every $\tau\in\mathcal{W}_{m}$ and $l=0,1$. Note that, for the choice $l=0$, the latter inequality implies that $$ \left|\mathcal{L}_{\tau}^{*}(z)\right| \geq m. $$ By setting $s:=\varphi^*_{\tau}(z)$, we can identify $\mathcal{L}_{\tau}^{*(l)}(z)$ on $K$ with $\mathcal{L}^{(l)}(s)$ on $\varphi_{\tau}^*(\mathcal{K}) = \overline{\mathcal{R}}_{\tau}$. The estimates \begin{align*} \liminf_{T\rightarrow\infty}\nu_T(\mathcal{W}_{\frac{1}{m}}) &\geq \tfrac{1}{2}-q,\\ \liminf_{T\rightarrow\infty}\nu_T(\mathcal{W}_{m}) &\geq \tfrac{1}{2}-q,\\ \liminf_{T\rightarrow\infty}\nu_T(\mathcal{W}_{\frac{1}{m}}\cup\mathcal{W}_{m}) & \geq 1-q, \end{align*} hold due to the definition of the sets $\mathcal{W}_{1/m}\subset \mathcal{W}_0$, $\mathcal{W}_m\subset \mathcal{W}_{\infty}$ and the choice of $\mathcal{W}_0$ and $\mathcal{W}_{\infty}$. The corollary is proved. \end{proof} Corollary \ref{cor:selbergsmalllarge} has some nice applications in the further course of our investigations. We shall deduce the following: \begin{itemize} \item[(i)] The Riemann zeta-function assumes both arbitrarily small and arbitrarily large values on every path to infinity which lies inside the region defined by $$ \tfrac{1}{2}-\frac{c}{ \log t}<\sigma < \tfrac{1}{2}+\frac{c}{ \log t}, \qquad t\geq 2 $$ with any fixed $c>0$; see Corollary \ref{cor:curvessmalllarge}. \item[(ii)] Let $\alpha,c>0$. Then, there is an $a\in\mathbb{C}$ with $|a|=\alpha$ and a sequence $(t_n)_n$ of numbers $t_n\in[2,\infty)$ such that $$ \lim_{n\rightarrow\infty} \zeta\left(\tfrac{1}{2} - \frac{c}{\log t_n} +it_n \right) = a; $$ see Corollary \ref{cor:apointsleft}. \item[(iii)] There is a subinterval $A\subset [0,2\pi)$ of length at least $\frac{\pi}{4}$ such that, for every $\theta\in A$, there is a sequence $(t_n)_n$ of numbers $t_n\in[2,\infty)$ with $$ \zeta(\tfrac{1}{2}+it_n)\neq 0, \qquad \lim_{n\rightarrow\infty} \zeta(\tfrac{1}{2}+it_n) = 0 \qquad \mbox{ and } \qquad \arg \zeta(\tfrac{1}{2}+it_n) \equiv \theta \mod 2\pi; $$ see Theorem \ref{th:zeroasintpoint}. \end{itemize} \section{Unboundedness on the critical line in the extended Selberg class}\label{sec:unboundedness} Selberg's central limit law implies that, for $\mathcal{L}\in\mathcal{S}^{*}$, \begin{equation}\label{eq:alphainfsup} \alpha_{\mathcal{L},\scalebox{0.8}{\mbox{inf}}}:=\liminf_{\tau\rightarrow\infty} \left|\mathcal{L}(\tfrac{1}{2}+i\tau) \right|=0\quad \mbox{and}\quad \alpha_{\mathcal{L},\scalebox{0.8}{\mbox{sup}}}:=\limsup_{\tau\rightarrow\infty} \left|\mathcal{L}(\tfrac{1}{2}+i\tau) \right|=\infty. \end{equation} Roughly speaking, every $\mathcal{L}\in\mathcal{S}^{*}$ assumes both arbitrarily small and arbitrarily large values on the critical line. It seems reasonable to expect that \eqref{eq:alphainfsup} holds for every function $\mathcal{L}\in\mathcal{S}^{\#}$ with $d_{\mathcal{L}}>0$. However, it turns out to be quite challenging to prove \eqref{eq:alphainfsup} for a general function $\mathcal{L}\in\mathcal{S}^{\#}$. In this section, we derive some sufficient conditions for a function $\mathcal{L}\in\mathcal{S}^{\#}$ to be unbounded on the critical line. Besides some fundamental insights in the extended Selberg class due to Kaczorowski \& Perelli \cite{kaczorowskiperelli:2002, kaczorowskiperelli:2005, kaczorowskiperelli:2011}, we rely here basically on the general theory of ordinary Dirichlet series, for which the reader is referred to the textbook of Titchmarsh \cite[Chapter 9]{titchmarsh:1939}. At the end of this section, we give some specific examples of functions in $\mathcal{S}$ for which our considerations imply that \eqref{eq:alphainfsup} is true. \subsection{Characteristic convergence abscissae in the extended Selberg class}\label{sec:charconvabs} For an ordinary Dirichlet series \begin{equation}\label{dirichletseries1} A(s) = \sum_{n=1}^{\infty} \frac{a(n)}{n^s} \end{equation} with coefficients $a(n)\in\mathbb{C}$, we can define certain characteristic convergence abscissae. If a Dirichlet series converges in a point $s_0\in\mathbb{C}$, then it converges uniformly in any angular domain $$ A_{\mbox{\ d}elta}(s_0):=\left\{ s\in\mathbb{C}\, : \, \left| \arg (s-s_0) \right| \leq \frac{\pi}{2} - \mbox{\ d}elta \right\} $$ with an arbitrary real number $0<\mbox{\ d}elta<\frac{\pi}{2}$. Consequently, the region of convergence of a Dirichlet series is always a half-plane and it is reasonable to define its abscissa of convergence as the real number $\sigma_c\in\mathbb{R}\cup\{\pm\infty\}$ such that the Dirichlet series converges in the half-plane $\sigma>\sigma_c$ and diverges in the half-plane $\sigma<\sigma_c$. It follows essentially from Abel's summation formula that the abscissa of convergence is given by \begin{equation}\label{eq:sigmac} \sigma_c = \limsup_{x\rightarrow\infty} \frac{\log \left|\sum_{n\leq x} a(n) \right|}{\log x} \qquad \mbox{or} \qquad \sigma_c = \limsup_{x\rightarrow\infty} \frac{\log \left|\sum_{n> x} a(n) \right|}{\log x}, \end{equation} according to whether $\sum_{n=1}^{\infty}a(n)$ diverges or converges.\par By a similar argument, the region of absolute convergence of a Dirichlet series is also a half-plane. For a Dirichlet series, we define the abscissa of absolute convergence as the real number $\sigma_a\in\mathbb{R}\cup\{\pm\infty\}$ such that the Dirichlet series converges absolutely in the half-plane $\sigma>\sigma_a$, but does not converge absolutely in the half-plane $\sigma<\sigma_a$. Abel's summation formula yields that $$ \sigma_a = \limsup_{x\rightarrow\infty} \frac{\log \sum_{n\leq x} \left| a(n) \right|}{\log x} \qquad \mbox{or} \qquad \sigma_a = \limsup_{x\rightarrow\infty} \frac{\log \sum_{n> x} \left| a(n) \right|}{\log x}, $$ according to whether $\sum_{n=1}^{\infty}\left| a(n) \right|$ diverges or converges.\par Besides $\sigma_c$ and $\sigma_a$, we define the abscissa of uniform convergence $\sigma_u$ as the infimum of all $\sigma^* \in\mathbb{R} \cup\{\pm\infty\}$ for which the Dirichlet series converges uniformly in the half-plane $\sigma\geq \sigma^*$.\par The abscissae $\sigma_c$, $\sigma_u$ and $\sigma_a$ of a given Dirichlet series do not necessarily coincide. Trivially, one has \begin{equation}\label{eq:abs1} -\infty\leq\sigma_c \leq \sigma_u \leq \sigma_a \leq \infty. \end{equation} It can be shown that \begin{equation}\label{eq:abs2} \sigma_a - \sigma_c \leq 1 \end{equation} if at least one of the two abscissae $\sigma_c$ and $\sigma_a$ is finite. In particular, the latter inequality is sharp; equality holds, for example, for Dirichlet $L$-functions with non-principle characters. Moreover, one has \begin{equation}\label{eq:abs3} \sigma_a - \sigma_u \leq \frac{1}{2}, \end{equation} if at least one of the two abscissae $\sigma_u$ and $\sigma_a$ is finite. According to a result of Bohnenblust \& Hille \cite{bohnenblusthille:1931}, this inequality is also sharp.\par {\bf The analytic character of Dirichlet series.} We suppose in the following that $A(s)$ is an ordinary Dirichlet series with finite convergence abscissae $\sigma_a$, $\sigma_c$ and $\sigma_u$. As a consequence of the theorem of Weierstrass, the Dirichlet series $A(s)$ defines an analytic function in its half-plane of convergence $\sigma>\sigma_c$. Possibly, this function may be continued meromorphically to a larger half-plane $\sigma > \sigma_0$ with $\sigma_0\leq \sigma_c$. If existent, we denote this meromorphic extension also by $A(s)$.\par {\bf Boundedness in the half-plane of uniform convergence.} Bohr \cite{bohr:1913} proved that a Dirichlet series $A(s)$ is bounded in every half-plane $\sigma\geq \sigma^*$ with $\sigma^*>\sigma_u$ and that $A(s)$ is unbounded in every half-plane $\sigma\geq \sigma^*$ with $\sigma^* <\sigma_u$ to which $A(s)$ can be continued meromorphically. \par In view of Bohr's result, for given $\mathcal{L}\in\mathcal{S}^{\#}$, it makes sense to localize the abscissa $\sigma_u$ in order to retrieve information on the boundedness and unboundedness of $\mathcal{L}$.\par {\bf The abscissae of convergence and absolute convergence for functions in the extended Selberg class.} The definition of the extended Selberg class implies that $\sigma_a\leq 1$ for every $\mathcal{L}\in\mathcal{S}^{\#}$. According to Kaczorowski \& Perelli \cite{kaczorowskiperelli:1999}, the elements in the extended Selberg class of degree $d_{\mathcal{L}}=0$ are given by certain Dirichlet polynomials. Thus, in this case, we have $\sigma_c=\sigma_u=\sigma_a = -\infty$. For all functions $\mathcal{L}\in\mathcal{S}^{\#}$ with non-zero degree, we expect that $\sigma_a=1$. However, it seems difficult to prove this in general.\par Of course, if $\mathcal{L}\in\mathcal{S}^{\#}$ has a pole at $s=1$, then $\sigma_c = \sigma_u=\sigma_a = 1$. Perelli \cite{perelli:2007} states that $\sigma_a=1$ holds for all $\mathcal{L}\in\mathcal{S}$ if Selberg's orthonormality conjecture (S.6$^{**}$) is true.\par Relying on non-linear twists of functions in the extended Selberg class, we are able to deduce lower bounds for $\sigma_a$ and $\sigma_c$ which depend on the degree $d_{\mathcal{L}}$ of $\mathcal{L}\in\mathcal{S}^{\#}$. Kaczorowski \& Perelli \cite{kaczorowskiperelli:2002, kaczorowskiperelli:2005, kaczorowskiperelli:2011} introduced linear and non-linear twists of functions $\mathcal{L}\in\mathcal{S}^{\#}$ to study the structure of the Selberg class, resp. the extended Selberg class. Using this machinery, they were able to obtain partial results towards the degree conjecture; see Section \ref{sec:classG}. In the sequel, we do not want to go too deep into the theory of non-linear twists. Thus, we state the results of Kaczorowski \& Perelli \cite{kaczorowskiperelli:2005} only in a very weak form which is sufficient for our purpose.\par Let $\mathcal{L}\in\mathcal{S}^{\#}$ with $d_{\mathcal{L}}>0$ and Dirichlet series representation \begin{equation}\label{eq:dirichletreprtwists} \mathcal{L}(s)=\sum_{n=1}^{\infty}\frac{a(n)}{n^s}, \qquad\sigma>1. \end{equation} For a parameter $\alpha>0$, the standard non-linear twist of $\mathcal{L}$ is defined by $$ \mathcal{L}(s,\alpha) = \sum_{n=1}^{\infty} \frac{a(n)}{n^s} \exp\left( -2\pi i \alpha n^{1/d_{\mathcal{L}}}\right), \qquad \sigma>1. $$ It follows from Kaczorowski \& Perelli \cite[Theorem 1 and 2]{kaczorowskiperelli:2005} that, for every parameter $\alpha>0$, the function $\mathcal{L}(s,\alpha)$ can be continued meromorphically to the whole complex plane and that there exists an $\alpha^*>0$ such that $\mathcal{L}(s,\alpha^*)$ has a simple pole at $$s=s_0:= \frac{d_{\mathcal{L}}+1}{2d_{\mathcal{L}}} + i\frac{{\rm{Im} } \mu_{\mathcal{L}}}{d_{\mathcal{L}}},$$ where $\mu_{\mathcal{L}}:=\sum_{j=1}^f (1-2 \mu_j)$ is defined by the data of the functional equation of $\mathcal{L}$; see Chapter \ref{chapt:classG}. It is essentially the pole of $\mathcal{L}(s,\alpha^*)$ at $s=s_0$ which gives us a lower bound for the abscissa of absolute convergence of the Dirichlet series \eqref{eq:dirichletreprtwists}. \begin{corollary}\label{cor:twists} Let $\mathcal{L}\in\mathcal{S}^{\#}$ with $d_{\mathcal{L}}>0$. Then, the abscissa of absolute convergence $\sigma_a$ of the Dirichlet series defining $\mathcal{L}$ is bounded by \begin{equation}\label{eq:abs4} \frac{1}{2} + \frac{1}{2 d_{\mathcal{L}}} \leq \sigma_a \leq 1. \end{equation} \end{corollary} \begin{proof} Let $\mathcal{L}\in\mathcal{S}^{\#}$ with $d_{\mathcal{L}}>0$ and Dirichlet series representation \eqref{eq:dirichletreprtwists}. Let $\sigma_a$ denote the abscissa of absolute convergence of the Dirichlet series defining $\mathcal{L}$. The upper bound $\sigma_a \leq 1$ follows directly from axiom (S.1) in the definition of the extended Selberg class. Suppose that $\sigma_a < \frac{1}{2} + \frac{1}{2 d_{\mathcal{L}}} $. Since $$ \left| \frac{a(n)}{n^{s}} \exp\left( -2\pi i \alpha n^{1/d_{\mathcal{L}}}\right) \right| = \left| \frac{a(n)}{n^{s}} \right| $$ for every $n\in\mathbb{N}$ and every $\alpha>0$, the Dirichlet series $$ \sum_{n=1}^{\infty} \frac{a(n)}{n^{s}} \exp\left( -2\pi i \alpha n^{1/d_{\mathcal{L}}}\right) $$ converges also absolutely in the half-plane $\sigma>\sigma_a$. By the identity principle, we conclude that, for every parameter $\alpha>0$, $$ \mathcal{L}(s,\alpha) = \sum_{n=1}^{\infty} \frac{a(n)}{n^{s}} \exp\left( -2\pi i \alpha n^{1/d_{\mathcal{L}}}\right) ,\qquad \sigma>\sigma_a. $$ In particular, $\mathcal{L}(s,\alpha)$ with $\alpha>0$ is analytic in the half-pane $\sigma>\sigma_a$. However, due to \cite[Theorem 1 and 2]{kaczorowskiperelli:2005}, there exists an $\alpha^*>0$ such that $\mathcal{L}(s,\alpha^*)$ has a simple pole at the point $s=s_0: = \frac{1}{2} + \frac{1}{2 d_{\mathcal{L}}} + i\frac{{\rm{Im} } \mu_{\mathcal{L}}}{d_{\mathcal{L}}}$ which lies in the half-plane $\sigma>\sigma_a$ according to our assumption on $\sigma_a < \frac{1}{2} + \frac{1}{2 d_{\mathcal{L}}}$. This yields a contradiction and the corollary is proved. \end{proof} As a byproduct of Corollary \ref{cor:twists}, we get that there are no functions $\mathcal{L}\in\mathcal{S}^{\#}$ of degree $0<d_{\mathcal{L}}<1$.\par From the simple pole of $\mathcal{L}(s,\alpha^*)$ at the point $s=s_0$, Kaczorowski \& Perelli \cite{kaczorowskiperelli:2005} deduced an $\Omega$-result for truncated sums of the Dirichlet coefficients of $\mathcal{L}$. They showed that, for $\mathcal{L}\in\mathcal{S}^{\#}$ with $d_{\mathcal{L}}\geq 1$ and Dirichlet series representation \eqref{eq:dirichletreprtwists}, \begin{equation}\label{eq:omegacoeffsum} \sum_{n\leq x} a(n) = x \cdot \mbox{res}_{s=1}\ \mathcal{L}(s) + \Omega\left(x^{\frac{d_{\mathcal{L}}-1}{2d_{\mathcal{L}}}} \right). \end{equation} By means of \eqref{eq:sigmac}, this yields a lower bound for the abscissa of convergence of $\sum_{n=1}^{\infty} \frac{a(n)}{n^s}$. \begin{corollary}\label{cor:sigmac} Let $\mathcal{L}\in\mathcal{S}^{\#}$ with $d_{\mathcal{L}}>0$. Then, the abscissa of convergence $\sigma_c$ of the Dirichlet series defining $\mathcal{L}$ is bounded by $$ \frac{1}{2} - \frac{1}{2 d_{\mathcal{L}}} \leq \sigma_c \leq 1. $$ \end{corollary} \begin{proof} Let $\mathcal{L}\in\mathcal{S}^{\#}$ with $d_{\mathcal{L}}>0$ and Dirichlet series representation $\mathcal{L}(s)=\sum_{n=1}^{\infty}\frac{a(n)}{n^s}$ in $\sigma>1$. The assumption $d_{\mathcal{L}}>0$ implies that $d_{\mathcal{L}}\geq 1$; see Section \ref{sec:classG}. Let $\sigma_c$ denote the abscissa of convergence of the Dirichlet series defining $\mathcal{L}$. The upper bound $\sigma_c\leq 1$ follows directly from axiom (S.1) in the definition of the extended Selberg class. If $\mathcal{L}$ has a pole at $s=1$, then we conclude immediately that $\sigma_c =1$. Thus, we may suppose that $\mathcal{L}$ has no pole at $s=1$. In this case, $ \mbox{res}_{s=1}\ \mathcal{L}(s) = 0$ and we get by combining \eqref{eq:sigmac} with \eqref{eq:omegacoeffsum} that $$ \sigma_c = \limsup_{x\rightarrow\infty} \frac{\log \left|\sum_{n\leq x} a(n) \right|}{\log x} \geq \frac{1}{2} - \frac{1}{2 d_{\mathcal{L}}}. $$ The assertion is proved. \end{proof} By means of \eqref{eq:abs1} and \eqref{eq:abs3}, we deduce from Corollary \ref{cor:sigmac} the following bounds for the abscissa of uniform convergence $\sigma_u$. \begin{corollary} Let $\mathcal{L}\in\mathcal{S}^{\#}$ with $d_{\mathcal{L}}>0 $. Then, the abscissa of uniform convergence $\sigma_u$ of the Dirichlet series defining $\mathcal{L}$ is bounded by $$ \max\left\{\sigma_a - \frac{1}{2}, \frac{1}{2}-\frac{1}{2d_{\mathcal{L}}} \right\}\leq \sigma_u \leq 1 . $$ \end{corollary} We expect that $\sigma_u = 1$ for all $\mathcal{L}\in\mathcal{S}^{\#}$. If $\mathcal{L}\in\mathcal{S}$ has a polynomial Euler product representation (S.3$^*$) and satisfies the prime mean-square condition (S.6), Steuding \cite[Chapt. 5]{steuding:2007} showed that $\mathcal{L}$ is universal in the sense of Voronin at least inside the strip $$ \max\left\{\frac{1}{2},\, 1-\frac{1}{d_{\mathcal{L}}}\right\}<\sigma<1. $$ This implies, in particular, that $\mathcal{L}$ is unbounded on every vertical line inside this strip and, consequently, by Bohr's fundamental observation stated above, that $\sigma_u = 1$. {\bf Problem.} Is it possible to prove that, for every $\mathcal{L}\in\mathcal{S}^{\#}$ with $d_{\mathcal{L}}>0$, $$ \sigma_u = \sigma_a = 1? $$ \subsection{Almost periodicity and a Phragm\'{e}n-Lindel\"of argument} According to Bohr \cite{bohr:1913}, we know that $\mathcal{L}\in\mathcal{S}^{\#}$ is unbounded in every open half-plane containing the line $\sigma=\sigma_u$, where $\sigma_u$ is the abscissa of uniform convergence of the Dirichlet series defining $\mathcal{L}$. Almost periodicity and a Phragm\'{e}n-Lindel\"of argument allow us to make statements about unboundedness on vertical half-lines to the left of $\sigma_u$. {\bf Almost periodicity in $\sigma>\sigma_u$.} Bohr \cite{bohr:1922} revealed that every Dirichlet series is almost periodic in its half-plane of uniform convergence. \begin{theorem}[Bohr, 1922]\label{th:almostperiod} Let $A(s)$ be an ordinary Dirichlet series and $\sigma_u$ its abscissa of uniform convergence. Then, for every $\sigma>\sigma_u$ and every $\varepsilon>0$, there exists a positive real number $\ell:=\ell(\sigma,\varepsilon)$ such that every interval $[t_0,t_0+\ell]\subset\mathbb{R}$ of length $\ell$ contains at least one number $\tau$ with the property that $$ \left|A(\sigma+it) - A(\sigma + i(t+\tau)) \right|< \varepsilon \qquad \mbox{ for all }t\in\mathbb{R}. $$ \end{theorem} For the general theory of almost periodic functions, the reader is referred to Bohr \cite{bohr:1924, bohr:1925, bohr:1926} and Besicovitch \cite{besicovitch:1932}.\par {\bf Unboundedness on vertical half-lines in $\sigma<\sigma_u$.} We shall prove the following lemma. \begin{lemma} \label{lem:unbounded} Let $\mathcal{L}\in\mathcal{S}^{\#}$. \begin{itemize} \item[(a)] Let $\sigma_u$ denote the abscissa of uniform convergence of the Dirichlet series defining $\mathcal{L}$. Suppose, that $\mathcal{L}$ is unbounded in the half-plane $\sigma> \sigma_u$. Then, for every $t_0>0$, the function $\mathcal{L}$ is unbounded both in the region defined by $$\sigma> \sigma_u, \qquad t\geq t_0,$$ and in the region defined by $$\sigma> \sigma_u, \qquad t\leq -t_0.$$ \item[(b)] Let $\sigma_0\leq 1$ and $t_0>0$. Suppose that $\mathcal{L}$ is unbounded in the region defined by $$ \sigma> \sigma_0,\qquad t\geq t_0. $$ Then, for every $\sigma^* \leq \sigma_0$, the function $\mathcal{L}$ is unbounded on the vertical half-line $L_{\sigma^*}:=\{\sigma^* + it\, :\, t\geq t_0\}$. \end{itemize} \end{lemma} \begin{proof} Statement (a) follows directly from the almost periodicity of $\mathcal{L}$ in the half-plane $\sigma>\sigma_u$.\par To prove statement (b) we shall apply a Phragm\'{e}n-Lindel\"of theorem for half-strips. Let $\mathcal{L}\in\mathcal{S}^{\#}$ and let $\sigma_0\leq 1$ and $t_0>0$ such that $\mathcal{L}$ is unbounded in the region defined by $\sigma> \sigma_0$, $t\geq t_0$. Note that this necessarily implies that $\mathcal{L}$ is unbounded in the half-strip $$ S_1:= \{\sigma+it \, :\, \sigma_0\leq \sigma\leq 2, \, t\geq t_0\}. $$ Suppose that there is a $\sigma^*\leq \sigma_0$ such that $\mathcal{L}$ is bounded on the half-line $L_{\sigma^*}$. By the absolute convergence, we know that $\mathcal{L}\in\mathcal{S}^{\#}$ is bounded on the half-line $L_2:=\{2+it\,:\,t\geq t_0\}$. Certainly, $\mathcal{L}$ is also bounded on the horizontal line segment $\{\sigma+it_0\, :\, \sigma^*\leq\sigma\leq 2 \}$. Altogether, we obtain that $\mathcal{L}$ is bounded on the boundary $\partial S_2$ of the half-strip $$ S_2:= \{\sigma+it \, :\, \sigma^*\leq \sigma\leq 2, \, t\geq t_0\}. $$ Thus, we can find a constant $M>0$ such that $|\mathcal{L}(s)|\leq M$ for all $s\in\partial S_2$. As $\mathcal{L}$ is analytic and of finite order in $S_2$, it follows from a Phragm\'{e}n-Lindel\"of theorem (see for example Levin \cite[Chapt. I, \S 14]{levin:1964}) that $|\mathcal{L}(s)|\leq M$ for all $s\in S_2$. This is a contradiction to our assumption that the function $\mathcal{L}$ is unbounded in $S_1\subset S_2$. \end{proof} In Theorem \ref{lem:unbounded} (a) we demand that $\mathcal{L}\in\mathcal{S}^{\#}$ is unbounded in its half-plane of uniform convergence $\sigma>\sigma_u$. We know that there are functions $\mathcal{L}\in\mathcal{S}^{\#}$ which are unbounded in the half-plane $\sigma>\sigma_u$. For example, if $\mathcal{L}$ has a pole at $s=1$, then $\mathcal{L}$ is necessarily unbounded in the half-plane $\sigma>\sigma_u=1$. In general, however, we cannot exclude that there are functions $\mathcal{L}\in\mathcal{S}^{\#}$ which are bounded in $\sigma>\sigma_u$. \par \subsection{Mean-square values in the extended Selberg class}\label{subsec:meansquare} In the theory of Dirichlet series, mean values on vertical lines play an important role. The following fundamental result goes back to Carlson \cite{carlson:1922}. \begin{theorem}[Carlson's theorem, 1922]\label{th:carlson} Let the function $A(s)$ be defined by a Dirichlet series of the form \eqref{dirichletseries1}. Suppose that, for $\sigma\geq\sigma_0$, the function $A(s)$ is analytic except for finitely many poles, of finite order and satisfies $$ \limsup_{T\rightarrow\infty} \frac{1}{T} \int_{-T}^T \left|A (\sigma_0+it) \right|^2 \mbox{\ d} t <\infty. $$ Then, for all $\sigma>\sigma_0$, $$ \lim_{T\rightarrow\infty}\frac{1}{T} \int_{-T}^T \left|A (\sigma+it) \right|^2 \mbox{\ d} t = \sum_{n=1}^{\infty} \frac{|a_n|^2}{n^{2\sigma}}. $$ \end{theorem} Carlson's theorem may be interpreted as a special case of Parseval's theorem in the theory of Hilbert spaces. For a proof, we refer to the original paper of Carlson \cite{carlson:1922} or to the textbook of Titchmarsh \cite[\S 9.51]{titchmarsh:1939}.\par Let $A(s)$ be a Dirichlet series which can be continued meromorphically to the half-plane $\sigma>\sigma_0$ with some $\sigma_0\in\mathbb{R}$ such that $A(s)$ is of finite order in $\sigma>\sigma_0$. In view of Carlson's theorem, it makes sense to define for $A(s)$ the abscissa of bounded mean-square $\sigma_m$ by taking $\sigma_m$ as the infimum of all $\sigma^*\in(\sigma_0,\infty)$ for which \begin{equation}\label{limsup} \limsup_{T\rightarrow\infty} \frac{1}{T} \int_{-T}^T \left|A (\sigma^*+it) \right|^2 \mbox{\ d} t < \infty. \end{equation} In the following, we call the half-plane $\sigma>\sigma_m$ the mean-square half-plane of $A(s)$. According to Titchmarsh \cite[\S 9.52]{titchmarsh:1939}, the abscissae $\sigma_m$ and $\sigma_a$ are related as follows: \begin{equation}\label{eq:sigmama} \sigma_m \geq \max \{\sigma_a - \tfrac{1}{2}, \sigma_0\}. \end{equation} Landau \cite[\S 226, Theorem 41]{landau:1953} showed that $$ \sigma_m \leq \tfrac{1}{2}\left(\sigma_a + \sigma_c\right). $$ According to Bohr, $A(s)$ is bounded on every vertical line in the half-plane $\sigma>\sigma_u$. This implies that the inequality $$ \sigma_m \leq \sigma_u $$ holds.\par Let $\theta_A(\sigma)$ denote the growth order of $A(s)$ as defined in Section \ref{sec:orderofgrowth}. Then, we have $\theta_A(\sigma)\leq \frac{1}{2}$ for all $\sigma>\sigma_m$; see Titchmarsh \cite[\S 9.55]{titchmarsh:1939}.\par {\bf Mean-square value in the extended Selberg class.} Relying on a result of Potter \cite{potter:1940}, who studied the mean-square value for Dirichlet series satisfying a quite general functional equation, Steuding \cite[Chapt. 6, Corollary 6.11\,]{steuding:2007} deduced that, for every function $\mathcal{L}\in\mathcal{S}$ with $d_{\mathcal{L}}>0$, \begin{equation}\label{eq:absmean} \sigma_m \leq \max \left\{\frac{1}{2},\, 1-\frac{1}{d_{\mathcal{L}}}\right\}. \end{equation} We easily deduce from Potter's result that \eqref{eq:absmean} holds not only for every function in $\mathcal{S}$ but also for every function in $\mathcal{S}^{\#}_R$. \par If $\mathcal{L}\in\mathcal{S}^{\#}_R$ has degree $d_{\mathcal{L}}=1$, then the inequalities \eqref{eq:abs4}, \eqref{eq:sigmama} and \eqref{eq:absmean} assure that $\sigma_m=\frac{1}{2}$. We expect that $\sigma_m = \frac{1}{2}$ for every function $\mathcal{L}\in\mathcal{S}^{\#}_R$. However, to prove this in general seems to be very difficult. If we assume that $\mathcal{L}\in\mathcal{S}^{\#}_R$ satisfies the Lindel\"of hypothesis, then a result of Steuding \cite[Chapt. 2.4, set $\sigma_{\mathcal{L}}=\mu_{\mathcal{L}}=0$ in Lemma 2.4]{steuding:2007} implies that $\sigma_m\leq\frac{1}{2}$. If we assume that $\mathcal{L}\in \mathcal{S}^{\#}_R$ has abscissa of absolute convergence $\sigma_a = 1$, then we deduce from \eqref{eq:sigmama} that $\sigma_m\geq \frac{1}{2}$. \par {\bf A sufficient condition for unboundedness on the critical line.} Let $\mathcal{L}\in\mathcal{\mathcal{S}}^{\#}$. If $$ \limsup_{T\rightarrow\infty}\frac{1}{2T} \int_{-T}^T \left|\mathcal{L}(\sigma+it) \right|^2 \mbox{\ d} t =\infty, $$ then it follows immediately that $$ \alpha_{\mathcal{L},\scalebox{0.8}{\mbox{sup}}}(\sigma):=\limsup_{t\rightarrow\infty} \left|\mathcal{L}(\tfrac{1}{2}+it)\right| = \infty \qquad\mbox{or}\qquad \alpha^{-}_{\mathcal{L},\scalebox{0.8}{\mbox{sup}}}(\sigma):=\limsup_{t\rightarrow-\infty} \left|\mathcal{L}(\tfrac{1}{2}+it)\right|. $$ This observation allows us to formulate sufficient conditions for $\mathcal{L}\in\mathcal{S}^{\#}$ to be unbounded on certain lines by relying on mean-square value results. \begin{lemma}\label{lem:unboundmeanvalue} Let $\mathcal{L}\in\mathcal{S}^{\#}$ with Dirichlet series representation $\mathcal{L}(s)=\sum_{n=1}^{\infty} \frac{a(n)}{n^s}$ in $\sigma>1$. Suppose that $\sum_{n=1}^{\infty}\frac{|a(n)|^2}{n}$ is divergent, then $$ \limsup_{T\rightarrow\infty} \frac{1}{2T} \int_{-T}^{T} \left|\mathcal{L}(\tfrac{1}{2}+it) \right|^2 \mbox{\ d} t = \infty; $$ and, in particular, $$ \alpha_{\mathcal{L},\scalebox{0.8}{\mbox{sup}}}:= \alpha_{\mathcal{L},\scalebox{0.8}{\mbox{sup}}}(\tfrac{1}{2})=\infty \qquad \mbox{or} \qquad \alpha^{-}_{\mathcal{L},\scalebox{0.8}{\mbox{sup}}}:= \alpha^{-}_{\mathcal{L},\scalebox{0.8}{\mbox{sup}}}(\tfrac{1}{2})=\infty. $$ \end{lemma} The statement follows from Carlson's theorem (Theorem \ref{th:carlson}) and a convexity theorem for the mean-square value which goes back to Hardy, Ingham \& Polya \cite{hardyinghampolya:1927}. \begin{theorem}[Hardy, Ingham \& Polya, 1927] \label{th:convexity_meanvalue} Let the function $f$ be analytic in the strip $\sigma_1<\sigma<\sigma_2$ and such that $|f|$ is continuous on the closure of the strip. Suppose that $f$ satisfies $$ f(\sigma+it) \ll e^{e^{k|t|}} \qquad \mbox{with } \qquad 0<k<\tfrac{\pi}{\sigma_2 - \sigma_1} $$ uniformly in $\sigma_1<\sigma<\sigma_2$. If, for arbitrary $p>0$, there are constants $A,B>0$ such that for every $T\geq 0$ $$ \frac{1}{2T} \int_{-T}^{T} \left| f(\sigma_1+it) \right|^{p} \mbox{\ d} t \leq A \qquad \mbox{and} \qquad \frac{1}{2T} \int_{-T}^{T} \left| f(\sigma_2+it) \right|^{p} \mbox{\ d} t \leq B , $$ then $$ \frac{1}{2T} \int_{-T}^{T} \left| f(\sigma+it) \right|^{p} dt \leq A^{\frac{\sigma_2 - \sigma}{\sigma_2 - \sigma_1}}B^{\frac{\sigma - \sigma_1}{\sigma_2 - \sigma_1}} $$ for every $\sigma_1\leq \sigma \leq \sigma_2$ and $T\geq 0$. \end{theorem} We shall now proceed to prove Lemma \ref{lem:unboundmeanvalue}. \begin{proof}[Proof of Lemma \ref{lem:unboundmeanvalue}] For $\sigma\in\mathbb{R}$, we define $$ J(\sigma):=\limsup_{T\rightarrow\infty} \frac{1}{2T} \int_{-T}^{T} \left|\mathcal{L}(\sigma+it) \right|^2 \mbox{\ d} t . $$ Suppose that there exists a constant $A>0$ such that $J(\frac{1}{2})\leq A$. Then, according to Carlson's theorem, $$ J(\sigma)=\sum_{n=1}^{\infty}\frac{|a(n)|^2}{n^{2\sigma}}\qquad\mbox{ for } \sigma>\tfrac{1}{2}. $$ Due to the divergence of $\sum_{n=1}^{\infty} |a(n)|^2 n^{-1}$, we have \begin{equation}\label{J12} \lim_{\sigma\rightarrow \frac{1}{2} +}J(\sigma) = \infty. \end{equation} Theorem \ref{th:convexity_meanvalue}, however, implies that $$ J(\sigma) \leq A \cdot J(\tfrac{3}{4}) \qquad \mbox{ for } \tfrac{1}{2}\leq \sigma \leq \tfrac{3}{4}. $$ This yields a contradiction to \eqref{J12}. Hence, $J(\frac{1}{2})=\infty$ and, consequently, $\alpha_{\mathcal{L},\scalebox{0.8}{\mbox{sup}}}=\infty$ or $\alpha^{-}_{\mathcal{L},\scalebox{0.8}{\mbox{sup}}}=\infty$. \end{proof} \subsection{Summary: The quantities \texorpdfstring{$\alpha_{\mathcal{L} ,inf}$}{} and \texorpdfstring{$\alpha_{\mathcal{L} ,sup}$}{} for \texorpdfstring{$\mathcal{L}\in\mathcal{S}^{\#}$}{}} \label{subsec:summaryunboundedness} In the following corollary we gather sufficient conditions which assure that, for a given function $\mathcal{L}\in\mathcal{S}^{\#}$, $$ \alpha_{\mathcal{L},\scalebox{0.8}{\mbox{sup}}}:=\limsup_{t\rightarrow\infty} \left|\mathcal{L}(\tfrac{1}{2}+it)\right| = \infty. $$ \begin{corollary}\label{cor:unbound1} Let $\mathcal{L}\in\mathcal{S}^{\#}$ with $d_{\mathcal{L}}>0$. Suppose that $\mathcal{L}$ satisfies at least one of the following conditions. \begin{itemize} \item[(a)] $\mathcal{L}$ is unbounded in some region defined by $\sigma> \sigma_0$, $t\geq t_0$ with some $\sigma_0\geq \frac{1}{2}$ and $t_0>0$. \item[(b)] $\mathcal{L}$ has a pole at $s=1$. \item[(c)] $\mathcal{L}$ is universal in the sense of Voronin in some strip $\frac{1}{2}\leq\sigma_1<\sigma<\sigma_2\leq 1$. \item[(d)] $\mathcal{L}\in\mathcal{S}^*$. \end{itemize} Then, $\alpha_{\mathcal{L},\scalebox{0.8}{\mbox{sup}}} = \infty.$ \end{corollary} \begin{proof} If $\mathcal{L}\in\mathcal{S}^{\#}$ satisfies property (a), then the statement of the corollary follows from Lemma \ref{lem:unbounded} (b). If $\mathcal{L}\in\mathcal{S}^{\#}$ has a pole at $s=1$, then $\mathcal{L}$ is unbounded in the half-plane $\sigma>\sigma_u=1$. By Lemma \ref{lem:unbounded} (a), $\mathcal{L}$ is unbounded in the region defined by $\sigma>1$, $t\geq 1$. Thus, property (b) is a special case of property (a). Similarly, property (c) is also a special case of (a): a Voronin-type universality property for a given function $\mathcal{L}\in\mathcal{S}^{\#}$ implies that $\mathcal{L}$ is unbounded in the region defined by $\sigma>\frac{1}{2}$, $t\geq 1$. If $\mathcal{L}$ satisfies property (d), the statement follows from Selberg's central limit law; see for example Theorem \ref{th:largesmall} (b). \end{proof} In the following corollary we gather sufficient conditions which assure that, for a given function $\mathcal{L}\in\mathcal{S}^{\#}$, $$ \alpha_{\mathcal{L},\scalebox{0.8}{\mbox{sup}}}:=\limsup_{t\rightarrow\infty} \left|\mathcal{L}(\tfrac{1}{2}+it)\right| = \infty \qquad\mbox{or}\qquad \alpha^{-}_{\mathcal{L},\scalebox{0.8}{\mbox{sup}}}:=\limsup_{t\rightarrow-\infty} \left|\mathcal{L}(\tfrac{1}{2}+it)\right|=\infty. $$ \begin{corollary} Let $\mathcal{L}\in\mathcal{S}^{\#}$ with $d_{\mathcal{L}}>0$. Suppose that $\mathcal{L}$ satisfies at least one of the following conditions. \begin{itemize} \item[(a)] The series $\sum_{n=1}^{\infty}\frac{|a(n)|^2}{n}$ is divergent. \item[(b)] $\mathcal{L}$ satisfies Selberg's prime coefficient condition (S.6$^*$). \end{itemize} Then, $\alpha_{\mathcal{L},\scalebox{0.8}{\mbox{sup}}}=\infty$ or $\alpha^{-}_{\mathcal{L},\scalebox{0.8}{\mbox{sup}}}=\infty$. \end{corollary} \begin{proof} If $\mathcal{L}\in\mathcal{S}^{\#}$ satisfies property (a), then the statement of the theorem follows directly from Lemma \ref{lem:unboundmeanvalue}. If $\mathcal{L}\in\mathcal{S}^{\#}$ satisfies property (b), then it is immediately clear that $\mathcal{L}$ also satisfies (a). \end{proof} To prove that, for given $\mathcal{L}\in\mathcal{S}^{\#}$, $$ \alpha_{\mathcal{L},\scalebox{0.8}{\mbox{inf}}} :=\limsup_{t\rightarrow\infty} \left|\mathcal{L}(\tfrac{1}{2}+it)\right| = 0, $$ seems to be even harder than to show that $\alpha_{\mathcal{L},\scalebox{0.8}{\mbox{sup}}} = \infty$. In the subsequent corollary we gather some more or less trivial conditions which assure that $\alpha_{\mathcal{L},\scalebox{0.8}{\mbox{inf}}} = 0$. \begin{corollary} Let $\mathcal{L}\in\mathcal{S}^{\#}$ with $d_{\mathcal{L}}>0$. Suppose that $\mathcal{L}$ satisfies at least one of the following conditions. \begin{itemize} \item[(a)] There are infinitely many zeros of $\mathcal{L}$ with positive imaginary parts located on the critical line. \item[(b)] $\mathcal{L}\in\mathcal{S}^*$. \end{itemize} Then, $\alpha_{\mathcal{L},\scalebox{0.8}{\mbox{inf}}} =0$. \end{corollary} \begin{proof} If $\mathcal{L}\in\mathcal{S}^{\#}$ satisfies property (a), then it is trivially clear that $\alpha_{\mathcal{L},\scalebox{0.8}{\mbox{inf}}} =0$. If $\mathcal{L}\in\mathcal{S}^{\#}$ satisfies property (b), then the statement follows from Selberg's central limit law; see Theorem \ref{th:largesmall}. \end{proof} To close this section, we give some specific examples of functions in $\mathcal{S}$ for which we know that \eqref{eq:alphainfsup} holds. According to Hardy \cite{hardy:1914}, the Riemann zeta-function has infinitely many zeros on the critical line with positive imaginary part. Moreover, the zeta-function is unbounded on the critical line as follows for example from the mean-value result of Hardy \& Littlewood \cite{hardylittlewood:1936}: $$ \frac{1}{T}\int_{1}^{T} \left|\zeta(\tfrac{1}{2}+it) \right|^2 dt \sim \log T , \qquad \mbox{as }T\rightarrow\infty. $$ Thus, the Riemann zeta-function satisfies \eqref{eq:alphainfsup}.\par We can partially transfer this reasoning to the Selberg class. Although the Grand Riemann hypothesis asserts that every functions $\mathcal{L}\in\mathcal{S}$ has all its non-trivial zeros on the critical line, very few can be verified about zeros located on $\sigma=\frac{1}{2}$. There are partial results only for some $\mathcal{L}\in\mathcal{S}$ of small degree, say $d_{\mathcal{L}}\leq 2$. Besides the Riemann zeta-function, it is known for Dirichlet $L$-functions with primitive character that a positive proportion of their non-trivial zeros lie on the critical line; see Zuravlev \cite{zuravlev:1978}. Moreover, Chandrasekharan \& Narasimhan \cite{chandrasekharannarasimhan:1968}, resp. Berndt \cite{berndt:1969}, proved that infinitely many non-trivial zeros of a Dedekind zeta-function $\zeta_K (s)$ associated to a quadratic field $K$ lie on the critical line. Recently, Mukhopadhyay, Srinivas \& Rajkumar \cite{mukhopadhyay:2008} showed that all functions in the Selberg class of degree $d_{\mathcal{L}} \leq 2$ satisfying some rather general conditions\footnote{For $\mathcal{L}\in\mathcal{S}$ with $d_{\mathcal{L}}=2$, the functional equation has to be such that the quantity $(2\pi)^{d_{\mathcal{L}}/2} Q \lambda^{1/2}$ is irrational and such that the quantity $\mu_{\mathcal{L}}:=\mu_{p_{\mathcal{L}}}$ is real. Moreover, the Dirichlet coefficients of $\mathcal{L}$ have to satisfy $\sum_{n\leq x} |a(n)|^2 = O(x)$. } have infinitely many zeros on the critical line. Mukhopadhyay et al. rely on a method due to Landau,\footnote{For a description of the method, see Titchmarsh \cite[\S 10.5]{titchmarsh:1986}.} which is based on the different asymptotic behaviour of the integrals $$ \int_T^{2T} Z_{\mathcal{L}}(t)\ \mbox{\ d} t \qquad \mbox{ and } \int_T^{2T} |Z_{\mathcal{L}}(t)|\ \mbox{\ d} t,\qquad \mbox{as } T\rightarrow\infty, $$ where $Z_{\mathcal{L}}(t)$ is the analogue of Hardy's $Z$-function for $\mathcal{L}\in\mathcal{S}$. For all these $\mathcal{L}$-functions mentioned above, the respective results on their zeros assure that $\alpha_{\mathcal{L},\scalebox{0.8}{\mbox{inf}}}=0$. \par Both Dirichlet $L$-functions with primitive character and Dedekind zeta-functions of quadratic fields have a sufficiently `nice' behaving Euler product such that a Voronin-type universality theorem can be verified for them in the strip $\frac{1}{2}<\sigma<1$ (see Bagchi \cite{bagchi:1982} and Reich \cite{reich:1980, reich:1982}, respectively). By Corollary \ref{cor:unbound1}, this implies that they satisfy $\alpha_{\mathcal{L},\scalebox{0.8}{\mbox{sup}}}=\infty$. \chapter{a-point-distribution near the critical line} \label{chapt:apoints} Let $\mathcal{L}\in\mathcal{S}^{\#}$. For given $a\in\mathbb{C}$, we refer to the roots of the equation $\mathcal{L}(s)=a$ as $a$-points of $\mathcal{L}$ and denote them by $\rho_a = \beta_a + i\gamma_a$. In view of the Riemann hypothesis, the case $a=0$ is of special interest. Nevertheless, it is reasonable to study the distribution of the $a$-points for general $a\in\mathbb{C}$. For the Riemann zeta-function, the distribution of $a$-points was studied, amongst others, by Bohr, Landau \& Littlewood \cite{BohrLandauLittlewood:1913}, Bohr \& Jessen \cite{bohrjessen:1932}, Levinson \cite{levinson:1974, levinson:1975}, Levinson \& Montgomery \cite{levinsonmontgomery:1974} and Tsang \cite{tsang:1984}. Selberg \cite{selberg:1992} discussed the distribution of $a$-points in the Selberg class. Steuding \cite[Chapt. 7]{steuding:2003, steuding:2007} investigated to which extent these methods, in particular the ones initiated by Levinson \cite{levinson:1975}, can be transferred to functions of the extended Selberg class. Their methods rely essentially on a lemma of Littlewood which may be interpreted as an integrated version of the principle of argument or as an analogue of Jensen's formula for rectangular domains. \begin{lemma}[Lemma of Littlewood, 1924] Let $b<c$ and $T>0$. Let $f$ be an analytic function on the rectangular region $$ \mathcal{R}:= \left\{s=\sigma+it\in\mathbb{C} \, : \, b\leq\sigma \leq c, \, T\leq t \leq 2T\right\}. $$ and denote the zeros of $f$ in $\mathcal{R}$ by $\rho=\beta+i\gamma$. Suppose that $f$ does not vanish on the right edge $\sigma = c$ of $\mathcal{R}$. Let $\mathcal{R}'$ be $\mathcal{R}$ minus the union of the horizontal cuts from the zeros of $f$ in $\mathcal{R}$ to the left edge of $\mathcal{R}$, and choose an analytic branch of $\log f(s)$ in the interior of $\mathcal{R}'$. Then, $$ - \frac{1}{2\pi i} \int_{\partial \mathcal{R}} \log f(s) \mbox{\ d} s = \sum_{\begin{subarray}{c} b<\beta<c \\ T<\gamma\leq 2T \end{subarray}} (\beta-b), $$ where the integral on the lefthand-side is taken over the counterclockwise orientated rectangular contour $\partial\mathcal{R}$. \end{lemma} With slight deviations, we took the formulation of Littlewood's lemma from Steuding \cite[Lemma 7.2]{steuding:2007}. For a proof, the reader is referred to the original paper of Littlewood \cite{littlewood:1924-2} or to Titchmarsh \cite[\S 9.9]{titchmarsh:1986}.\par In Section \ref{sec:apointsgeneral} we summarize some general results on the $a$-point distribution of functions in the extended Selberg class.\par In section \ref{apointslittlewood} we study in detail the $a$-point distribution of $\mathcal{L}\in\mathcal{S}^{\#}$ near the critical line. Levinson \cite{levinson:1975} (and conditionally under the Riemann hypothesis also Landau \cite{BohrLandauLittlewood:1913}) revealed an interesting feature of the Riemann zeta-function in its $a$-point distribution: almost all $a$-points of the Riemann zeta-function are located arbitrarily close to the critical line. Levinson's method builds essentially on the lemma of Littlewood and can be used to detected similar properties in the $a$-point distribution of many functions from the extended Selberg class; see Steuding \cite[Chapt. 7]{steuding:2003, steuding:2007}. By using a result of Selberg, we shall refine the statement of Levinson's theorem for functions in $\mathcal{S}^*$ In Section \ref{sec:apointsnormality} we use the notation of filling discs and certain arguments of the theory of normal families to describe the clustering of $a$-points near the critical line. As far as the author knows, the concept of filling discs was not yet used to study the value-distribution of $\mathcal{L}$-functions and yields some new insights in their analytic behaviour near the critical line. In fact, we shall see that the existence of filling discs for $\mathcal{L}\in\mathcal{S}^{\#}$ near the critical line is strongly connected to the non-convergence of the limiting process introduced in Section \ref{sec:shiftingshrinking}.\par \section{General results on the a-point-distribution in the extended Selberg class} \label{sec:apointsgeneral} {\bf Trivial $a$-points and half-planes free of non-trivial $a$-points.} Let $\mathcal{L}\in\mathcal{S}^{\#}$. Suppose that $\mathcal{L}$ has positive degree and Dirichlet series expansion \begin{equation}\label{di} \mathcal{L}(s)=\sum_{n=1}^{\infty} \frac{a(n)}{n^s}, \qquad \sigma>1, \end{equation} with leading coefficient $a(1)=1$. By the definition of the extended Selberg class, the Dirichlet series \eqref{di} converges absolutely in $\sigma>1$. If $\mathcal{L}\in\mathcal{S}$, the normalization $a(1)=1$ holds trivially due to the Euler product representation. Let $q>1$ denote the least integer such that the coefficient $a(q)$ of the Dirichlet expansion of $\mathcal{L}$ is not equal to zero. Then, due to the absolute convergence of \eqref{di} in $\sigma>1$, we obtain that \begin{equation}\label{eq:expansionL} \mathcal{L}(\sigma+it) = 1 +\frac{a(q)}{q^{\sigma+it}} + O\left(\frac{1}{(q+1)^{\sigma}}\right), \qquad \mbox{as }\sigma\rightarrow\infty. \end{equation} From this, we derive that, for every $a\in\mathbb{C}$, there exists a real number $R_a\geq 1$ such that $\mathcal{L}$ is free of $a$-points in the half-plane $\sigma>R_a$.\par Besides the right half-plane $\sigma> R_a$, which is free of $a$-points of $\mathcal{L}$, there is also a left half-plane which contains not too many $a$-points of $\mathcal{L}$. In the particular case of the Riemann zeta-function this observation is due to Landau \cite{BohrLandauLittlewood:1913} and in the general setting of the extended Selberg class due to Steuding \cite[Chapt. VII]{steuding:2007}. Their results rely basically on the functional equation and the principle of argument: let $M$ be the set of all $\sigma^*>1$ for which we find a constant $m(\sigma^*)>0$ such that \begin{equation}\label{bo} \left|\mathcal{L}(\sigma+it)\right| \geq m(\sigma^*) \qquad \mbox{ for }\sigma\geq \sigma^*. \end{equation} It follows from \eqref{eq:expansionL} that $M\neq\emptyset$. We define $L:=1-\inf M<0$. In the half-plane $\sigma<L$, there are $a$-points connected to the trivial zeros of $\mathcal{L}$. The number of these $a$-points with real part $-R < \beta_a < L$ coincides asymptotically, as $R\rightarrow\infty$, with the number of non-trivial zeros with real part $-R<\beta_0<L$ and, thus, grows linear in $R$; see Steuding \cite[Chapt. VII]{steuding:2007}. It follows from \eqref{bo} and the functional equation that, apart from the $a$-points generated by the non-trivial zeros of $\mathcal{L}$, there are at most finitely many other $a$-points in the half-plane $\sigma<L$. We call the $a$-points in the half-plane $\sigma<L$ {\it trivial $a$-points} and refer to all other $a$-points as {\it non-trivial $a$-points} of $\mathcal{L}$. We observe that all non-trivial $a$-points of $\mathcal{L}$ are located in the strip $L\leq\sigma\leq R_a$. \par {\bf Counting non-trivial $a$-points.} From now on, we assume additionally that $\mathcal{L}$ satisfies the Ramanujan hypothesis. Recall that we defined the class $\mathcal{S}^{\#}_R$ to contain all elements of the extended Selberg class which have positive degree and satisfy the Ramanujan hypothesis. Steuding \cite[Chapt. 7]{steuding:2007} generalized a result of Levinson \cite{levinson:1975} to the class $\mathcal{S}^{\#}_R$, which the latter established for the Riemann zeta-function: \begin{lemma}[Steuding, 2003]\label{lem:steuding1} Let $\mathcal{L}\in\mathcal{S}^{\#}_R$ with $a(1)=1$. Then, for $a\in\mathbb{C}\setminus\{1\}$ and sufficiently large negative $b$, as $T\rightarrow\infty$, $$ \sum_{T<\gamma_a \leq 2T} \left(\beta_a - b \right) = \left(\frac{1}{2}-b\right)\left(\frac{d_{\mathcal{L}}}{2\pi}T\log \frac{4T}{e} + T\log(\lambda Q^2) \right) - T \log |1-a| + O(\log T). $$ \end{lemma} The proof of Lemma \ref{lem:steuding1} relies essentially on Littlewood's lemma. Steuding \cite[Chapt. 7]{steuding:2007} deduced from Lemma \ref{lem:steuding1} a precise Riemann-von Mangoldt type formula for the number of $a$-points of $\mathcal{L}\in\mathcal{S}^{\#}_R$. Let $N_a (T)$ denote the number of non-trivial $a$-points of $\mathcal{L}$ with imaginary part $0<\gamma_a \leq T$. \begin{theorem}[Steuding, 2003]\label{th:riemannmangoldt} Let $\mathcal{L}\in\mathcal{S}^{\#}_R$ with $a(1)=1$. Then, for any $a\in\mathbb{C}\setminus\{1\}$, as $T\rightarrow\infty$, \begin{equation}\label{eq:riemannmangoldt} N_a(T) = \frac{d_{\mathcal{L}}}{2\pi} T \log \frac{T}{e} + \frac{T}{\pi} \log(\lambda Q^2) + O(\log T). \end{equation} \end{theorem} For special functions in $\mathcal{S}^{\#}_R$, asymptotic extensions for $N_a(T)$, in particular in the case $a=0$, were obtained already before. Exemplarily, we discuss the case of the Riemann zeta-function. Here, it was Riemann \cite{riemann:1859} who stated the asymptotic formula \eqref{th:riemannmangoldt} for $a=0$. A rigorous proof was given by von Mangoldt \cite{mangoldt:1895}. The case $a\neq 0$ was first established by Landau \cite[Chapt. II, \S 4]{BohrLandauLittlewood:1913}. Their original methods were based on contour integration with respect to the logarithmic derivative of $\zeta(s)$.\par {\bf $a$-points in the mean-square half-plane.} Let $\mathcal{L}\in\mathcal{S}^{\#}_R$ and $\sigma_m$ denote its abscissa of bounded mean-square. Let $N_{a}(\sigma,T)$ denote the number of $a$-points of $\mathcal{L}$ with real part $\beta_a >\sigma$ and imaginary part $0<\gamma_a\leq T$. As $\mathcal{L}$ is of finite order in any strip $-\infty < \sigma_1 \leq \sigma \leq \sigma_2 < \infty$, it follows from the general theory of Dirichlet series that, for every $a\in\mathbb{C}$ and every $\sigma>\sigma_m$, \begin{equation}\label{eq:NaT_meansquare} N_a(\sigma, T) \ll T; \end{equation} see for example Titchmarsh \cite[\S 9.622]{titchmarsh:1939}. According to the mean-square results due to Steuding \cite[Chapt. 4 \& 6]{steuding:2007}, we know that unconditionally $\sigma_m \leq \max\{\tfrac{1}{2},1-\tfrac{1}{d_{\mathcal{L}}}\}$ and that $\sigma_m\leq \frac{1}{2}$, if $\mathcal{L}$ satisfies the Lindel\"of hypothesis; see Section \ref{subsec:meansquare} for details.\par {\bf $a$-points in the strip of universality.} Suppose that $\mathcal{L}$ is an element of the Selberg class, is represented by a polynomial Euler product in $\sigma>1$ and satisfies the prime mean-square condition (S.6). Under these assumptions, Steuding \cite[Chapt. 5, Theorem 5.14]{steuding:2007} verified a Voronin-type universality property for $\mathcal{L}$ in the intersection of its mean-square half-plane $\sigma>\sigma_m$ with the strip $\frac{1}{2}<\sigma<1$. Let $N_{a}(\sigma_1,\sigma_2,T)$ denote the number of $a$-points of $\mathcal{L}$ with real part $\sigma_1<\beta_a <\sigma_2$ and imaginary part $0<\gamma_a\leq T$. As an immediate consequence of the universality property, we obtain that $$ \liminf_{T\rightarrow\infty} \frac{1}{T} N_a(\sigma_1,\sigma_2,T) >0 $$ for every $a\in\mathbb{C}\setminus\{0\}$ and every $\sigma_m <\sigma_1<\sigma_2<1$. Taking into account the upper bound \eqref{eq:NaT_meansquare}, this implies that, \begin{equation}\label{ud} N_a(\sigma_1,\sigma_2,T) \asymp T, \end{equation} for every $a\in\mathbb{C}\setminus\{0\}$ and every $\sigma_m<\sigma_1<\sigma_2<1$; see Steuding \cite{steuding:2007}. Kaczorowski \& Perelli \cite{kaczorowskiperelli:2003} established a zero-density estimate for functions in the Selberg class. They showed that, for any $\varepsilon>0$, uniformly for $\frac{1}{2}\leq \sigma\leq 1$, $$N_0(\sigma,T)\ll T^{4(d_{\mathcal{L}}+3)(1-\sigma)+\varepsilon}.$$ Thus, in particular, $N_0(\sigma,T)=o(T)$, if $1-\frac{1}{4(d_{\mathcal{L}}+3)}<\sigma\leq 1.$ Together with \eqref{ud}, this reveals a quantitative difference in the $a$-point distribution of $\mathcal{L}$ between $a=0$ and $a\neq 0$.\par For the Riemann zeta-function, Bohr \& Jessen \cite{bohrjessen:1932} obtained that $$ N_a(\sigma_1,\sigma_2,T)\sim c T $$ for every $a\in\mathbb{C}\setminus\{0\}$ and every $\frac{1}{2}<\sigma<1$ with a positive constant $c:=c(\sigma_1,\sigma_2,a)$ depending on $\sigma_1$, $\sigma_2$ and $a$. Moreover, in the case of the Riemann zeta function there are much more precise zero-density estimates at our disposal than the ones provided by Kaczorowski \& Perelli \cite{kaczorowskiperelli:2003}. We mention here a result of Selberg \cite{selberg:1946} who obtained that, uniformly for $\frac{1}{2}\leq \sigma \leq 1$, \begin{equation}\label{eq:Selbergzerodensityriemann} N_0(\sigma,T)\ll T^{1-\frac{1}{4}(\sigma-\frac{1}{2})} \log T. \end{equation} For more advanced results on zero-density estimates for the Riemann zeta-function the reader is referred to Titchmarsh \cite[\S 9]{titchmarsh:1986} and Ivi\'{c} \cite[Chapt. 11]{ivic:1985}. \par {\bf The special case $a=1$.} The case $a=1$ is special, as our assumption $a(1)=1$ yields that $\lim_{\sigma\rightarrow\infty} \mathcal{L}(s)=1$. This leads to some technical problems in the proofs of Lemma \ref{lem:steuding1} and Theorem \ref{th:riemannmangoldt}. However, one can easily overcome these obstacles by working with $$ \frac{q^s}{a(q)}(\mathcal{L}(s)-1), $$ where $q> 1$ is the least integer such that $a(q)\neq 0$; see Steuding \cite[Chapt. 7]{steuding:2007} for details. In this way, we get analogous results in Lemma \ref{lem:steuding1} and Theorem \ref{th:riemannmangoldt} for the case $a=1$ with a minor change in the asymptotic extensions of magnitude $O(T)$, respectively.\par {\bf $a$-points in the lower half-plane.} All the results stated in this section with respect to $a$-points in the upper half-plane hold in an analogous manner for $a$-points in the lower half-plane. \section{a-points near the critical line - approach via Littlewood's lemma} \label{apointslittlewood} In this section we study in detail the $a$-point distribution of functions in the extended Selberg class near the critical line. Under quite general assumptions on $\mathcal{L}\in\mathcal{S}^{\#}$, it is known that the $a$-points of $\mathcal{L}$ cluster around the critical line. We give an overview on existing results and provide a refinement of a theorem of Levinson \cite{levinson:1975}.\par Let $\mathcal{L}\in\mathcal{S}_R^{\#}$. By assuming a certain growth condition for the mean-value of $\mathcal{L}$ on the critical line, Steuding \cite[Chapt. 7.2]{steuding:2007} showed that almost all $a$-points lie arbitrarily close to the critical line. His methods build on works of Levinson \cite{levinson:1975}, who investigated the particular case of the Riemann zeta-function. \begin{theorem}[Steuding, 2003]\label{th:steuding3} Let $\mathcal{L}\in\mathcal{S}^{\#}_R$ with $a(1)=1$ and let $a\in\mathbb{C}$. Suppose that, for any $\varepsilon>0$, as $T\rightarrow\infty$, \begin{equation}\label{eq:LHgrowth} \frac{1}{T}\int_{T}^{2T} \left| \mathcal{L}\left(\tfrac{1}{2}+it\right) \right|^2 dt \ll T^{\varepsilon}. \end{equation} Then, for any $\mbox{\ d}elta>0$, all but $O(\mbox{\ d}elta T\log T)$ of the $a$-points of $\mathcal{L}$ with imaginary part $T<\gamma_a\leq 2T$ lie inside the strip $$ \tfrac{1}{2}-\mbox{\ d}elta < \sigma < \tfrac{1}{2}+\mbox{\ d}elta. $$ \end{theorem} In his original formulation of Theorem \ref{th:steuding3}, Steuding \cite{steuding:2007} demands that $\mathcal{L}\in\mathcal{S}^{\#}_R$ satisfies the Lindel\"of hypothesis. However, a close look at his proof reveals that only the somehow weaker condition \eqref{eq:LHgrowth} is needed. We expect that all functions in $\mathcal{S}^{\#}_R$ satisfy the Lindel\"of hypothesis. Thus, all functions $\mathcal{L}\in\mathcal{S}^{\#}_R$ should in particular satisfy the growth condition \eqref{eq:LHgrowth}.\par In the case of the Riemann zeta-function, Landau \cite[Chapt. II, \S 5]{BohrLandauLittlewood:1913} was the first who noticed that, for general $a\in\mathbb{C}$, almost all $a$-points lie arbitrarily close to the critical line. However, for his reasoning, he had to assume the Riemann hypothesis. Levinson \cite{levinson:1975} provided unconditional results for the Riemann zeta-function, exceeding both Landau's conditional observations and the information that may be retrieved from the general situation of Theorem \ref{th:steuding3}. \begin{theorem}[Levinson, 1975]\label{th:levinson} Let $a\in\mathbb{C}$. Then, as $T\rightarrow\infty$, all but\\ $O(T\log T / \log\log T)$ of the $a$-points of the Riemann zeta-function with imaginary part $T<\gamma_a\leq 2T$ lie inside the strip $$ \tfrac{1}{2}-\frac{(\log\log T)^2}{\log T} < \sigma < \tfrac{1}{2}+\frac{(\log\log T)^2}{\log T}. $$ \end{theorem} In the special situation of $a=0$, there are certain zero-density estimates for the Riemann zeta-function at our disposal which allow a more precise statement than the one provided by Theorem \ref{th:levinson} for general $a\in\mathbb{C}$. The following theorem is an immediate consequence of Selberg's zero-density estimate \eqref{eq:Selbergzerodensityriemann}. \begin{theorem}[Selberg, 1946] \label{th:selbergzero} Let $a\in\mathbb{C}$ and $\mu:[2,\infty)\rightarrow\mathbb{R}^+$ be a positive function with $\lim_{t\rightarrow\infty}\mu(t)=\infty$. Then, as $T\rightarrow\infty$, all but $O(T\log T \exp(-\mu(T)/4))$ of the zeros of the Riemann zeta-function with imaginary part $T<\gamma\leq 2T$ lie inside the strip $$ \tfrac{1}{2}-\frac{\mu(T)}{\log T} < \sigma < \tfrac{1}{2}+\frac{\mu(T)}{\log T}. $$ \end{theorem} Leaning on a result of Selberg \cite{selberg:1992}, we can slightly refine Levinson's result of Theorem \ref{th:levinson} and extend it to the class $\mathcal{S}^*$. We are pretty sure that, at least in the case of the Riemann zeta-function, both Levinson,\footnote{This is suggested by Levinson's remark at the end of his paper {\it Almost all roots of $\zeta(s)=a$ are arbitrarily close to $\sigma=\frac{1}{2}$}, \cite{levinson:1975}.} Selberg and Tsang\footnote{Tsang \cite{tsang:1984} states in his corollary after Theorem 8.2 that almost all $a$-points lie to the left of the line $\sigma = \mu(t)\sqrt{\log\log t}/\log t$ with any function $\lim_{t\rightarrow\infty}\mu(t)=\infty$. This is a one-sided version of Theorem \ref{th:levinsonselberg} in the special case of the Riemann zeta-function.} were aware of this refinement. However, apart from a brief hint by Heath-Brown in Titchmarsh \cite[\S 11.12]{titchmarsh:1986}, we could not find the following theorem stated in the literature explicitly. We recall that both the Riemann zeta-function and Dirichlet $L$-functions attached to primitive characters lie in $\mathcal{S}^*$ and that we expect that $\mathcal{S}^*=\mathcal{S}$; see Section \ref{sec:selbergclass}. \begin{theorem}\label{th:levinsonselberg} Let $\mathcal{L}\in\mathcal{S}^*$ and $a\in\mathbb{C}$. Let $\mu:[2,\infty)\rightarrow\mathbb{R}^+$ be a positive function with $\lim_{t\rightarrow\infty}\mu(t)=\infty$. Then, as $T\rightarrow\infty$, all but $O(T\log T / \mu(T))$ of the $a$-points of $\mathcal{L}$ with imaginary part $T<\gamma_a\leq 2T$ lie inside the strip defined by \begin{equation}\label{eq:striplevinsonselberg} \frac{1}{2}-\frac{\mu(T)\sqrt{\log\log T}}{\log T} <\sigma < \frac{1}{2}+\frac{\mu(T)\sqrt{\log\log T}}{\log T} . \end{equation} \end{theorem} In the proof of Theorem \ref{th:levinsonselberg}, we follow strongly the ideas of Levinson \cite{levinson:1975}. \begin{proof} Let $\mathcal{L}\in\mathcal{S}$ and let $a\in\mathbb{C}\setminus\{1\}$. It follows from Littlewood's lemma that \begin{equation}\label{eq:momentselberg} \sum_{\begin{subarray}{c} T< \gamma_a \leq 2T \\ \beta_a>\frac{1}{2} \end{subarray} } \left(\beta_a - \frac{1}{2} \right) = \frac{1}{2\pi}\int_0^T \log \left| \mathcal{L}(\tfrac{1}{2}+it) - a\right| \mbox{\ d} t - \frac{T}{2\pi}\log \left|1-a \right| + O(\log T). \end{equation} For details we refer to Levinson \cite[Lemma 2]{levinson:1975}, Steuding \cite[Chapt. VII, proof of Theorem 7.1]{steuding:2007} or Selberg \cite[eq. (3.5)]{selberg:1992}. In the case of the Riemann zeta-function, Levinson \cite{levinson:1975} obtained the assertion of Theorem \ref{th:levinson} by bounding the left-hand side of \eqref{eq:momentselberg} by $O(T\log\log T)$; here, he basically used Jensen's inequality in combination with the asymptotic formula $\int_0^T |\zeta(\frac{1}{2}+it)|^2\mbox{\ d} t\sim T\log T$, as $T\rightarrow\infty$. Similarly, Steuding \cite{steuding:2007} obtained the assertion of Theorem \ref{th:steuding3} by bounding the left-hand side of \eqref{eq:momentselberg} by $O(\varepsilon T\log T)$ and then following basically Levinson's ideas. In our case, we brush up Levinson's approach, by using a precise asymptotic expansion for the integral on the right-hand side of \eqref{eq:momentselberg}, which is due to Selberg \cite{selberg:1992}. The latter obtained that, for any $\mathcal{L}\in\mathcal{S}^{*}$, \begin{equation}\label{eq:momentselberg2} \int_{T}^{2T} \log \left| \mathcal{L}(\tfrac{1}{2}+it) - a\right| \mbox{\ d} t = \frac{\sqrt{n_{\mathcal{L}}}}{2\sqrt{\pi}} T \sqrt{\log\log T} + O_{|a|}(T); \end{equation} where the quantity $n_{\mathcal{L}}$ is defined by Selberg's prime coefficient condition (S.6$^*$). In some places, Selberg's proof seems a bit sketchy. We refer to Tsang \cite[\S 8]{tsang:1984}, who carried out all details in the case of the Riemann zeta-function, and to Hejhal \cite[\S 4]{hejhal:2000}, who provided a thorough description of Selberg's method.\footnote{Hejhal \cite{hejhal:2000} assumes additionally that $\mathcal{L}\in\mathcal{S}^*$ has a polynomial Euler product (S.3$^*$). However, this assumption is only needed for later purposes and not for the results in \S 4.}\par Suppose that $\mathcal{L}\in\mathcal{S}^{*}$. Then, by combining \eqref{eq:momentselberg} with \eqref{eq:momentselberg2}, we get that \begin{equation}\label{eq:apointslittlewood} \sum_{\begin{subarray}{c} \beta_a>\frac{1}{2} \\ T<\gamma_a\leq 2T \end{subarray}} \left(\beta_a-\frac{1}{2}\right) = \frac{\sqrt{n_{\mathcal{L}}}}{4\pi^{3/2}} T \sqrt{\log\log T} + O_{|a|}(T). \end{equation} Let $\mu:[2,\infty)\rightarrow\mathbb{R}^+$ be a positive function with $\lim_{t\rightarrow\infty}\mu(t) = \infty$. For positive $T$, we denote by $N_{a}^{(1)}(T)$ the number of $a$-points $\rho_a = \beta_a + i\gamma_a$ of $\mathcal{L}$ with $$ \beta_a > \frac{1}{2}+ \frac{\mu(T)\sqrt{\log\log T}}{\log T}, \qquad T<\gamma_a\leq 2T; $$ by $N_{a}^{(2)}(T)$ the number of $a$-points with $$ \frac{1}{2}- \frac{\mu(T)\sqrt{\log\log T}}{\log T} \leq \beta_a \leq \frac{1}{2}+ \frac{\mu(T)\sqrt{\log\log T}}{\log T}, \qquad T<\gamma_a\leq 2T; $$ and by $N_{a}^{(3)}(T)$ the number of $a$-points with $$ \beta_a < \frac{1}{2} -\frac{\mu(T)\sqrt{\log\log T}}{\log T}, \qquad T<\gamma_a\leq 2T. $$ The trivial estimate $$ \sum_{\begin{subarray}{c} \beta_a>\frac{1}{2} \\ T<\gamma_a\leq 2T \end{subarray}} \left(\beta_a-\frac{1}{2}\right) \geq \frac{\mu(T)\sqrt{\log\log T}}{\log T} N_{a}^{(1)}(T) $$ yields in combination with \eqref{eq:apointslittlewood} that, for sufficiently large $T$, $$ N_{a}^{(1)}(T) \ll \frac{T\log T}{\mu(T)}. $$ Moreover, for any real $b$, \begin{align*} \sum_{T<\gamma_a \leq 2T} \left(\beta_a + b \right) & = \sum_{\begin{subarray}{c} \beta_a>\frac{1}{2} \\ T<\gamma_a\leq 2T \end{subarray}} \left(\beta_a - \frac{1}{2} \right) + \sum_{\begin{subarray}{c} \beta_a\leq \frac{1}{2} \\ T<\gamma_a\leq 2T \end{subarray}}\left(\beta_a - \frac{1}{2} \right) + \sum_{T<\gamma_a \leq 2T} \left(\frac{1}{2} + b \right) \\ &\leq \sum_{\begin{subarray}{c} \beta_a>\frac{1}{2} \\ T<\gamma_a\leq 2T \end{subarray}} \left(\beta_a - \frac{1}{2} \right) - \frac{\mu(T)\sqrt{\log\log T}}{\log T} N_a^{(3)}(T) + \\ &\qquad \qquad\qquad +\left(\frac{1}{2} + b \right)\left( N_{a}^{(1)}(T)+N_{a}^{(2)}(T)+N_{a}^{(3)}(T)\right). \end{align*} If we take $b$ sufficiently large, we get by means of the asymptotic extensions \eqref{lem:steuding1} and \eqref{eq:apointslittlewood} and the Riemann-von Mangoldt formula \eqref{eq:riemannmangoldt} that, for sufficiently large $T$, $$ N_{a}^{(3)}(T) \ll \frac{T\log T}{\mu(T)}. $$ This proves the assertion for $a\in\mathbb{C}\setminus\{1\}$. The case $a=1$ can be treated in a similar manner by relying on suitably adjusted formulas; see the remark at the end of the preceeding section. \end{proof} Theorems \ref{th:steuding3}, \ref{th:levinson} and \ref{th:levinsonselberg} hold in an analogous manner for $a$-points in the lower half-plane.\par {\bf $a$-points close to the critical line with real part $\beta_a<\frac{1}{2}$.} Suppose that $\mathcal{L}\in\mathcal{S}^*$. Unconditionally, almost nothing is known about how the $a$-points of Theorem \ref{th:levinsonselberg}, which lie in the strip \eqref{eq:striplevinsonselberg}, are distributed to the left and to the right of the critical line. Under the assumption of the Riemann hypothesis, Selberg \cite{selberg:1992} obtained the following: for $T\geq 2$ and $\mu>0$, let $$ \sigma(T,\mu):= \frac{1}{2}-\mu\frac{\sqrt{\log\log T}}{\log T}. $$ Then, for every $a\in\mathbb{C}\setminus\{0\}$, as $T\rightarrow\infty$, \begin{equation}\label{eq:selberg_aleft} N_a(\sigma(T,\mu),T) \sim N_a (T) \cdot \frac{1}{\sqrt{2\pi}}\int_{-\mu'}^{\infty}e^{-x^2/2} \mbox{\ d} x \end{equation} with $$ \mu':= \frac{\sqrt{2}d_{\mathcal{L}}}{n_{\mathcal{L}}}\mu. $$ Recall that $\varphi(x)= \frac{1}{\sqrt{2\pi}}e^{-x^2/2}$ defines the density function of the Gaussian normal distribution. Thus, roughly speaking, Selberg's result states that about half of the $a$-points of $\mathcal{L}$ lie to the left of the critical line and are statistically well distributed at distances of order $\sqrt{\log\log T}/\log T$. In particular, this means that the left bound of the strip \eqref{eq:striplevinsonselberg} seems to be best possible. Moreover, by assuming the Riemann hypothesis, Selberg deduced, that, as $T\rightarrow\infty$, most of the other $a$-points with $T<\gamma_a\leq 2T$ in the strip \eqref{eq:striplevinsonselberg} lie quite close to the critical line at distances of order not exceeding $$ O\left(\frac{(\log\log\log T)^3}{\log T \sqrt{\log\log T}} \right). $$ The proof presented by Selberg \cite{selberg:1992} is quite sketchy. For a rigorous proof in the special case of the Riemann zeta-function, we refer to Tsang \cite[Theorem 8.3]{tsang:1984}. Selberg \cite{selberg:1992} also provides some heuristic reason in support of the following conjecture.\par {\bf Selberg's $a$-point conjecture.} {\it Let $\mathcal{L}\in\mathcal{S}$. For any $a\in\mathbb{C}\setminus\{0\}$, as $T\rightarrow\infty$, about $3/4$-th of all non-trivial $a$-points with $T<\gamma_a\leq 2T$ lie to the left of the critical line, and about $1/4$-th of all non-trivial $a$-points with $T<\gamma_a\leq 2T$ to its right.} \section{a-points near the critical line - approach via normality theory}\label{sec:apointsnormality} In this section we use the concept of filling discs to investigate the $a$-point distribution of functions $\mathcal{L}\in\mathcal{S}^{\#}$ near the critical line. Many of our results hold even for functions in the more general class $\mathcal{G}$, introduced in Chapter \ref{chapt:classG}. \subsection[Filling discs, Julia directions and Julia lines - Basic properties]{Filling discs, Julia directions and Julia lines - Definitions and basic properties}\label{sec:fillingdiscs_basics} We introduce the notion of filling discs, Julia directions and Julia lines. Roughly speaking, these concepts allow a more precise formulation of Picard's great theorem.\par Recall that $\mathcal{M}(\Omega)$, resp. $\mathcal{H}(\Omega)$, denotes the set of all functions which are meromorphic, resp. analytic, on a domain $\Omega\subset\mathbb{C}$. We say that $f\in\mathcal{M}(\Omega)$ satisfies the {\it Picard-property} on $E\subset \Omega$ if $f$ assumes on $E$ every value $a\in\widehat{\mathbb{C}}$, with at most two exceptions.\par {\bf Filling discs.} We call a sequence of discs $D_{\lambda_n}(z_n) \subset \Omega$, $n\in\mathbb{N}$, a sequence of {\it filling discs for $f\in\mathcal{M}(\Omega)$} if, for arbitrary $0<\varepsilon\leq 1$, the function $f$ satisfies the Picard-property on every infinite union of the discs $D_{\varepsilon\lambda_n}(z_n)$, i.e. on every set of the form $$ E:= \bigcup_{k\in\mathbb{N}} D_{\varepsilon \lambda_{n_k}} (z_{n_k}) \qquad \mbox{with }0<\varepsilon\leq1 . \footnote{In the literature it is convenient to demand additionally the growth condition $\lambda_n\leq |z_n|$ for the radii of the filling discs. For our purpose, however, this restriction is not relevant.} $$ We have necessarily that $\lim_{n\rightarrow\infty} z_n \in \partial \Omega$. Due to rich contributions by French mathematical schools,\footnote{Amongst others, the names of Julia, Milloux and Valiron are here to mention.} filling discs are sometimes referred to as {\it cercles de remplissage}.\par By Montel's fundamental normality test (Theorem \ref{th:FNT1}) and basic convergence properties, the existence of a sequence of filling discs is strongly connected to the non-normality of a certain family. In fact it is Montel's fundamental normality test that motivates the definition of filling discs and provides a first characterization of the latter. \begin{proposition}\label{prop:charfilldiscs} Let $f\in\mathcal{M}(\Omega)$. Suppose that $(z_n)_n$ is a sequence of points $z_n\in \Omega$ and $(\lambda_n)_n$ a sequence of positive real numbers such that $D_{\lambda_n} (z_n) \subset \Omega$ for $n\in\mathbb{N}$. Let $f_n:\mathbb{D}\rightarrow\mathbb{C}$ be defined by $f_n(z):=f(z_n +\lambda_n z)$.\\ Then, the discs $D_{\lambda_n}(z_n)$, $n\in\mathbb{N}$, form a sequence of filling discs for $f$ if and only if every infinite subset of the family $\mathcal{F}:=\{f_n\}_n$ is not normal at zero. \end{proposition} The assertion of Proposition \ref{prop:charfilldiscs} was observed by many people and, essentially, goes back to Montel \cite{montel:1912} and Julia \cite{julia:1919}. \begin{proof} Certainly, the functions $f_n$ are well-defined on $\mathbb{D}$. Suppose that every infinite subset of the family $\mathcal{F}:=\{f_n\}_n$ is not normal at zero. Then, Montel's fundamental normality test (Theorem \ref{th:FNT1}) implies that every infinite subset of $\mathcal{F}$ can omit at most two values $a,b\in\widehat{\mathbb{C}}$ on any neighbourhood $D_{\varepsilon}(0)$ of zero with $0<\varepsilon\leq 1$. By observing that $f_n(D_{\varepsilon}(0)) = D_{\varepsilon\lambda_n}(z_n)$, this is equivalent to the statement that the discs $D_{\lambda_n}(z_n)$, $n\in\mathbb{N}$, form a sequence of filling discs for $f$.\par Now, suppose that the discs $D_{\lambda_n}(z_n)$, $n\in\mathbb{N}$, form a sequence of filling discs for $f$. Assume that there exists a subsequence $(f_{n_k})_k$ of $(f_n)_n$ which converges locally uniformly on some disc $D_{\varepsilon}(0)$ with $0<\varepsilon\leq 1$. Then, by the theorem of Weierstrass (Theorem \ref{th:weierstrass}), its limit function $\phi$ is meromorphic on $D_{\varepsilon}(0)$. Consequently, taking $0<\varepsilon'<\varepsilon$ small enough, $\phi(\overline{D_{\varepsilon'}(0)})$ omits a non-empty open subset of $\mathbb{C}$. By uniform convergence, this implies that $\bigcup_{k\geq K} f_{n_k}(\overline{D_{\varepsilon'}(0)})$ omits more than three values for any sufficiently large $K\in\mathbb{N}$, contradicting the definition of filling discs. Consequently, every infinite subset of $\mathcal{F}$ is not normal at zero. \end{proof} By combining Proposition \ref{prop:charfilldiscs} with Marty's theorem (Theorem \ref{th:marty}), Lehto \cite{lehto:1958} derived a powerful characterization of filling discs; we refer here also to Clunie \& Hayman \cite{cluniehayman:1966} and Sauer \cite{sauer:2002} who pointed out a slight inexactness in Lehto's original formulation. For $f\in\mathcal{M}(\Omega)$, the spherical derivative $f^{\#}$ is defined by $$ f^{\#} (z) := \frac{|f'(z)|}{1+|f(z)|^2}, \qquad z\in\Omega; $$ for details we refer to the appendix. \begin{theorem}[Lehto's criterion, 1958]\label{th:lehto} Let $f\in\mathcal{M}(\Omega)$. Suppose that $(z_n)_n$ is a sequence of points $z_n\in \Omega$ and $(\lambda_n)_n$ a sequence of positive real numbers such that $D_{\lambda_n} (z_n) \subset \Omega$ for $n\in\mathbb{N}$. Then, the discs $D_{\lambda_n}(z_n)$, $n\in\mathbb{N}$, form a sequence of filling discs for $f$ if and only if there is a sequence $(w_n)_n$ of points $w_n\in \Omega$ such that \begin{equation}\label{crit_fillingdiscs} \lim_{n\rightarrow\infty}\lambda_n f^{\#}(w_n) = \infty \qquad \mbox{and} \qquad |z_n-w_n|=o(\lambda_n). \end{equation} \end{theorem} Lehto's criterion will play a central role in our further investigations.\par In a half-strip setting, we equip sequences of filling discs with certain counting functions. Let $S$ be a vertical half-strip in the upper half-plane defined by \begin{equation}\label{halfstrip} -\infty< x_1 < {\rm{Re} } \ z < x_2 <+\infty, \qquad {\rm{Im} } \ z >0 \end{equation} For a sequence $(z_n)_n$ of points $z_n\in S$ with $\lim_{n\rightarrow\infty} z_n = \infty$, we define $N_{\{z_n\}_n} (T) $ to be the number of elements in $\{z_n\}_n$ with imaginary part less than $T$. Similarly, for $f\in\mathcal{M}(S)$, any subset $E\subset S$ and any $a\in\widehat{\mathbb{C}}$, let $N_{a}(E,T)$ denote the number of $a$-points of $f$ on $E$ with imaginary part less than $T$. \begin{lemma}\label{lem:countingfillingdiscs} Let $S$ be a half-strip defined by \eqref{halfstrip} and $f\in\mathcal{H}(S)$. Let $D_{\lambda_n}(z_n)$, $n\in\mathbb{N}$, be a sequence of filling discs for $f$ in $S$ such that $\lim_{n\rightarrow\infty} z_n = \infty$ and $D_{\lambda_n}(z_n)\cap D_{\lambda_m}(z_m)=\emptyset$ for $n\neq m$. Let $E:=\bigcup_{n\in\mathbb{N}} D_{\lambda_n}(z_n)$. Then, for all but at most one $a\in\mathbb{C}$, $$ \limsup_{T\rightarrow\infty} \frac{N_{a}(E,T)}{N_{\{z_n\}_n}(T)} > 0. $$ \end{lemma} \begin{proof}\label{lem:fill_counting} Assume that there is an $a\in\mathbb{C}$ such that $$ \limsup_{T\rightarrow\infty} \frac{N_{a}(E,T)}{N_{\{z_n\}_n}(T)} = 0. $$ Then, since the discs $D_{\lambda_n}(z_n)$, $n\in\mathbb{N}$, are pairwise disjoint, there exists a subsequence $(z_{n_k})_k$ of $(z_n)_n$ with \begin{equation}\label{eq:countingsequence} \lim_{T\rightarrow\infty}\frac{N_{\{z_{n_k}\}_k}(T)}{N_{\{z_n\}_n}(T)} =1 \end{equation} such that $f$ omits the values $a$ and $\infty$ on $\bigcup_{k\in\mathbb{N}} D_{\lambda_{n_k}}(z_{n_k})$. Let $b\in\mathbb{C}\setminus\{a\}$. Then, we deduce from Proposition \ref{prop:charfilldiscs} and an extended version of Montel's fundamental normality test (Theorem \ref{th:FNTextension} (b)), for any $m\in\mathbb{N}$ there is an integer $K$ such that $f$ assumes the value $b$ more than $m$-times on every disc $D_{\lambda_{n_k}}(z_{n_k})$ with $k\geq K$. Thus, since the discs $D_{\lambda_{n_k}}(z_{n_k})$, $k\in\mathbb{N}$, are pairwise disjoint, we obtain that for every $b\in\mathbb{C}\setminus\{a\}$ and every $m\in\mathbb{N}$ $$ \liminf_{T\rightarrow\infty} \frac{N_{b}(E,T)}{N_{\{z_{n_k}\}_k}(T)} > m. $$ Together with \eqref{eq:countingsequence}, this implies that $$ \liminf_{T\rightarrow\infty} \frac{N_{b}(E,T)}{N_{\{z_{n}\}_n}(T)} = \infty. $$ As this holds for any $b\in\mathbb{C}\setminus\{a\}$, the assertion is proved. \end{proof} {\bf Julia directions.} A complex number $e^{i\theta_0}$ with $\theta_0\in\mathbb{R}$ is called {\it Julia direction for $f\in\mathcal{M}(\Omega)$ at $z_0\in\partial \Omega$} if $f$ satisfies the Picard-property in every sectorial domain $$ E_{R,\varepsilon}:=\left\{z_0 + re^{i\theta}\in\mathbb{C} \; : \; 0< r \leq R, \; |\theta-\theta_0|<\varepsilon \right\} \subset \Omega $$ with any $\varepsilon>0$ and any $R>0$. To define a Julia direction for $f$ at $z_0 = \infty$, one has to replace $E_{R,\varepsilon}$ by $$ E'_{R,\varepsilon}:=\left\{re^{i\theta}\in\mathbb{C} \; : \; r \geq R, \; |\theta-\theta_0|<\varepsilon \right\} \subset \Omega . $$ If $e^{i\theta_0}$ is a Julia direction for $f$ at $z_0$, then $f$ assumes every value $a\in\widehat{\mathbb{C}}$, with at most two exceptions, {\it infinitely often} on every set $E_{R,\varepsilon}$, resp. $E'_{R,\varepsilon}$. This follows immediately from the definition of a Julia direction.\par Let $f$ be a function that is analytic in a punctured neighbourhood of an essential singularity $z_0 \in \mathbb{C}$. Lehto \cite{lehto:1958} proved that \begin{equation}\label{limsup_esssing} \limsup_{z\rightarrow z_0} |z-z_0| f^{\#}(z) = \infty; \end{equation} here, one has to replace \eqref{limsup_esssing} by $ \limsup_{z\rightarrow z_0} |z| f^{\#}(z) = \infty $ if the essential singularity $z_0$ lies at infinity. By means of Lehto's criterion, we deduce that there is a sequence of points $z_n\in\Omega$ with $\lim_{n\rightarrow\infty}z_n = z_0$ such that the discs $D_{\lambda_n}(z_n)$, $n\in\mathbb{N}$, with $\lambda_n= |z_n-z_0|$ form a sequence of filling discs for $f$. This implies, in particular, that $f$ satisfies the Picard-property in every punctured neighbourhood of $z_0$. Thus, we have reproduced the analytic version of Picard's classical theorem. Moreover, by choosing $e^{i\theta_0}$ as an accumulating point of the set $\{e^{i\arg z_n}\, :\, n\in\mathbb{N}\} \subset \partial\mathbb{D}$, we deduce that their exists a Julia direction for $f$ at $z_0$. Thus, we have reproduced the statement of Julia's classical result on Julia directions (see Burckel \cite[Theorem 12.27]{burckel:1979} and Julia \cite{julia:1919}). {\bf Julia lines.} We call $$ L := \{z_0 + re^{i\theta_0} \; : \; r\in\mathbb{R} \} \qquad \mbox{ with fixed } z_0\in\mathbb{C}, \; \theta_0\in[0,2\pi) $$ a {\it Julia line} for $f\in\mathcal{M}(\mathbb{C})$ if $f$ satisfies the Picard-property in every open strip containing the line $L$.\footnote{In some literature, the ray $re^{i\theta_0}$, $r\in\mathbb{R}^+$, connected to a Julia direction $e^{i\theta_0}$ is also called Julia line.} Certainly, one can use Lehto's criterion to detect Julia lines. The following corollary is a direct consequence thereof. \begin{corollary}\label{cor:julia} Let $f\in\mathcal{M}(\mathbb{C})$. Suppose that, for a fixed $x\in\mathbb{R}$, there exists a sequence $(y_n)_n$ of positive real numbers $y_n$ with $\lim_{n\rightarrow\infty}y_n=\infty$ and a positive real number $\alpha$ such that $$ \lim_{n\rightarrow\infty} |f(x+iy_n)| = \alpha \qquad \mbox{ and } \qquad \lim_{n\rightarrow\infty} |f'(x+iy_n)| = \infty. $$ Then, ${\rm{Re} } \ z = x$ defines a Julia line for $f$. \end{corollary} \begin{proof} For arbitrary $\lambda>0$, $$ \lim_{n\rightarrow\infty} \lambda f^{\#}(x_0+iy_n) = \lim_{n\rightarrow\infty} \lambda \ \frac{|f'(x_0+iy_n)|}{1+|f(x_0+iy_n)|^2} = \infty. $$ By Lehto's criterion (Theorem \ref{th:lehto}), the discs $D_{\lambda}(x_0 + iy_n)$, $n\in\mathbb{N}$, form a sequence of filling discs for $f$ and the assertion follows. \end{proof} Mandelbrojt \& Gergen \cite{mandelbrojtgergen:1931} investigated Julia lines of entire functions defined by a generalized Dirichlet series $$ A(s)=\sum_{n=0}^{\infty} a(n) e^{-\lambda_n s}, $$ which is absolutely convergent in $\mathbb{C}$ and satisfies $$ 0=\lambda_0< \lambda_1 < \lambda_2 < ...\, , \qquad \lim_{n\rightarrow\infty}\lambda_n = \infty, \qquad a(n)\neq 0 \quad \mbox{ for all }n>0. $$ They derived conditions on the exponents $\lambda_n$ and on the growth behaviour of $A(s)$ which lead to the existence of horizontal Julia lines. Their results mainly rely on the fact that the quantities $$ \limsup_{N\rightarrow\infty}\frac{\#\{\lambda_n \, : \, n\leq N\}}{N}, \qquad \mbox{ resp. } \quad \liminf_{n\rightarrow\infty} (\lambda_{n+1} - \lambda_n), $$ are, at least in some mean sense, bounded from above, resp. from below. Thus, their results do not apply to ordinary Dirchlet series with non-vanishing coefficients $a(n)$. For extended results in this direction, we refer to the monographies of Mandelbrojt \cite{mandelbrojt:1952, mandelbrojt:1969}; in \cite{mandelbrojt:1952} particularly to Chapter II for results on analytic functions in strips and Chapter VII for Picard-type results on generalized Dirichlet series in strips, in \cite{mandelbrojt:1969} to Chapter VI again for Picard-type results. One might interpret the theory developed by Mandelbrojt as a counterpart of lacunary power series for Dirichlet series. It seems that his theory is not appropriate to describe the value-distribution of functions in the extended Selberg class. \subsection{Julia directions and Julia lines for the Riemann zeta-function} We investigate Julia directions and Julia lines of the Riemann-zeta function. With suitable adaptions, similar results can be stated for many functions in the extended Selberg class. Using another terminology, the following theorem was basically stated by Garunkt\v{s}tis \& Steuding \cite{garunkstissteuding:2010}. \begin{theorem} The Julia directions of the Riemann zeta-function are given by $e^{i\pi/2}$, $e^{i\pi}$ and $e^{i 3\pi/2}$. \\ All Julia lines of the Riemann zeta-function are vertical lines. Every line defined by ${\rm{Re} } \ z =\sigma \in [\frac{1}{2},1]$ is a Julia line. There are no Julia lines among the lines ${\rm{Re} } \ z = \sigma \in (-\infty,0) \cup (1,\infty)$. Assuming the Riemann hypothesis, the lines ${\rm{Re} } \ z =\sigma \in [0,\frac{1}{2})$ are no Julia lines, either. \end{theorem} \begin{proof} For a given $a\in\mathbb{C}$, the discs $D_{\lambda}(-2n)$, $n\in\mathbb{N}$, with any $\lambda>0$, cover all but finitely many trivial $a$-points of the Riemann zeta-function. This follows by a slight refinement of Landau's proof of Lemma 1 and Lemma 2 in Bohr, Landau \& Littlewood \cite[Chapt. II]{BohrLandauLittlewood:1913}. Thus, they form a sequence of filling discs. Consequently, $e^{i\pi}$ is a Julia direction and ${\rm{Im} } \ z = 0$ defines a Julia line for the zeta-function. As there are left and right half-planes free of non-trivial $a$-points, the only further Julia directions are given by $e^{i\pi/2}$ and $e^{i 3\pi/2}$ and all further Julia lines are vertical lines. Bohr's and Voronin's denseness results in combination with Lemma \ref{cor:julia} yield that every line ${\rm{Re} } \ z = \sigma \in(\frac{1}{2},1]$ is a Julia line. The clustering of $a$-points around the critical line (see Theorem \ref{th:levinsonselberg}) implies that the same is true for ${\rm{Re} } \ z = \frac{1}{2}$. By the inequality $$ \frac{\zeta(2\sigma)}{\zeta(\sigma)} \leq |\zeta(s)|\leq \zeta(\sigma), $$ which is valid in the half-plane $\sigma>1$, and the functional equation, there is no Julia direction among ${\rm{Re} } \ z \in (-\infty, 0) \cup (1,\infty)$. Assuming the Riemann hypothesis, we know that, for $\sigma>\frac{1}{2}$, as $t\rightarrow\infty$, $$\zeta(\sigma+it)\gg t^{-\varepsilon}$$ with any $\varepsilon>0$; see Titchmarsh \cite[\S 14.2]{titchmarsh:1986}. This in combination with the functional equation yields that, under assumption of the the Riemann hypothesis, the lines ${\rm{Re} } \ z = \sigma\in[0,\frac{1}{2})$ are no Julia lines either (see Garunk\v{s}tis \& Steuding \cite[Lemma 4 and Proposition 5]{garunkstissteuding:2010}). \end{proof} In the next section, we shall see that, in main parts, the functional equation is responsible for $\sigma=\frac{1}{2}$ being a Julia line. \subsection{Filling discs induced by a Riemann-type functional equation} {\bf A Riemann-type functional equation and its symmetry line $\sigma=\frac{1}{2}$.} A Riemann-type functional equation for a function $G\in\mathcal{G}$ invokes a strong connection between $G$ and $G'$ on its symmetry line $\sigma=\frac{1}{2}$. \begin{lemma}\label{lem:connectionGG'} Let $G\in\mathcal{G}$ with $d_G>0$. Then, for $t\in\mathbb{R}$ with $G(\frac{1}{2}+it)\neq 0$ and $G(\frac{1}{2}+it)\neq \infty$, as $|t|\rightarrow\infty$, $$ \left| \frac{G'(\frac{1}{2}+it)}{G(\frac{1}{2}+it)} \right| \geq \frac{d_G}{2}\log |t|-\frac{1}{2}\log (Q^2 \lambda) + O\left( \frac{1}{|t|}\right). $$ \end{lemma} \begin{proof} Logarithmic differentiation of the functional equation yields that $$ \frac{G'(s)}{G(s)} = \frac{\mathbb{D}elta'(s)}{\mathbb{D}elta(s)} - \overline{\left(\frac{G'(1-\overline{s})}{G(1-\overline{s})}\right)}. $$ For $s=\frac{1}{2}+it$, this and the asymptotic extension \eqref{asymext_logdiff_Delta_p} of $\mathbb{D}elta'/\mathbb{D}elta$ imply that \begin{equation}\label{Re_logdiff_Delta} 2 \ {\rm{Re} } \left( \frac{G'(\frac{1}{2}+it)}{G(\frac{1}{2}+it)} \right) = \frac{\mathbb{D}elta'(\frac{1}{2}+it)}{\mathbb{D}elta(\frac{1}{2}+it)} = -d_G\log |t|-\log (Q^2 \lambda) + O\left( \frac{1}{|t|}\right), \end{equation} provided that $|t|$ is sufficiently large and that $s=\frac{1}{2}+it$ is not a pole of $G'(s)/G(s)$. The assertion follows from \eqref{Re_logdiff_Delta} by using the relation $|z|\geq |{\rm{Re} } \ z|$. \end{proof} {\bf Filling discs induced by a Riemann-type functional equation on $\sigma=\frac{1}{2}$.} By Lehto's criterion, Lemma \ref{lem:connectionGG'} implies that, in most cases, a Riemann-type functional equation turns $\sigma=\frac{1}{2}$ into a Julia line. The following theorem is a special case of Corollary \ref{cor:julia} for functions satisfying a Riemann-type functional equation. \begin{theorem}\label{th:avalues-case1} Let $G\in\mathcal{G}$ with $d_G>0$. Suppose that there exists a sequence $(\tau_k)_k$ with $\tau_k\in[2,\infty)$ and $\lim_{k\rightarrow\infty} \tau_k = \infty$ and an $\alpha\in(0,\infty)$ such that \begin{equation}\label{cond1} \lim_{k\rightarrow \infty} |G(\tfrac{1}{2}+i\tau_k)| = \alpha . \end{equation} Then, for any positive function $\mu:[2,\infty)\rightarrow\mathbb{R}^+$ with $\lim_{\tau\rightarrow\infty}\mu(\tau) = \infty$, the discs defined by $$ |s-\tfrac{1}{2}-i\tau_k|< \frac{\mu(\tau_k)}{\log\tau_k}, \qquad k\in\mathbb{N}, $$ form a sequence of filling discs for $G$. In particular, $\sigma=\frac{1}{2}$ is a Julia line of $G$; and $G$ assumes every value $a\in\widehat{\mathbb{C}}$, with at most two exceptions, infinitely often inside the region defined by \begin{equation}\label{eq:strip-case1} \frac{1}{2}-\frac{\mu(t)}{\log t} < \sigma < \frac{1}{2}+\frac{\mu(t)}{\log t}, \qquad t\geq 2. \end{equation} \end{theorem} \begin{proof} Due to our conditions, we can assume without loss of generality that $G(\frac{1}{2}+i\tau_k)\neq 0,\infty$ for $k\in\mathbb{N}$. We deduce from Lemma \ref{lem:connectionGG'} that $$ \left| G'(\tfrac{1}{2}+i\tau_k) \right| \geq \frac{d_G}{4} \log \tau_k \left|G(\tfrac{1}{2}+i\tau_k )\right|, $$ provided that $k$ is large enough. Hence, for sufficiently large $k$, $$ G^{\#}(\tfrac{1}{2}+i\tau_k) = \frac{|G'(\frac{1}{2}+i\tau_k)|}{1 + |G(\frac{1}{2}+i\tau_k)|^2} \geq \frac{d_G}{4}\ \log\tau_k \ \frac{|G(\frac{1}{2}+i\tau_k)|}{1 + |G(\frac{1}{2}+i\tau_k)|^2} $$ Together with $$ \lim_{k\rightarrow\infty} \frac{|G(\frac{1}{2}+i\tau_k)|}{1 + |G(\frac{1}{2}+i\tau_k)|^2} = \frac{\alpha}{1+\alpha^2}\in (0,\tfrac{1}{2}], $$ which is true according to our assumption, this yields that $$ \lim_{k\rightarrow\infty }\frac{\mu(\tau_k)}{\log \tau_k} G^{\#}(\tfrac{1}{2}+i\tau_k) = \infty $$ for any positive function $\mu$ with $\lim_{\tau\rightarrow\infty} \mu(\tau) = \infty$. The assertion of the theorem follows immediately from Theorem \ref{th:lehto}. \end{proof} For general functions $G\in\mathcal{G}$, the conditions posed on $G$ in Theorem \ref{th:avalues-case1} are best possible:\par Theorem \ref{th:avalues-case1} does not necessarily apply to functions in $\mathcal{G}$ of degree zero: according to Kaczorowski \& Perelli \cite{kaczorowskiperelli:1999} any function $\mathcal{L}\in\mathcal{S}^{\#}\subset\mathcal{G}$ with $d_{\mathcal{L}}=0$ is given by a Dirichlet polynomial. Hence, $\mathcal{L}$ is bounded in any vertical strip around the critical line. \par Condition \eqref{cond1} cannot be removed in general as the following two examples show: Let the function $$ G_{\pmb{0}}(s):= \exp(-s(1-s)) \mathbb{D}elta_{\zeta}(s)^{1/2}, \qquad s\in\mathbb{C}_{\mathbb{D}elta_{G_{\pmb{0}}}} $$ be defined as in Section \ref{subsec:noncon}. Then, $G_{\pmb{0}}\in\mathcal{G}$ with $d_{G_{\pmb{0}}}=1$ and $\lim_{t\rightarrow\infty}G_{\pmb{0}}(\frac{1}{2}+it)=0$. Since \begin{equation*} G_{\pmb{0}}(\sigma+it) \ll \exp\left(-Ct^2 \right) \end{equation*} holds uniformly for $0<\sigma<1$, as $t\rightarrow\infty$, with a certain constant $C>0$, the function $G_{\pmb{0}}\in\mathcal{G}$ assumes any given $a\in\mathbb{C}\setminus\{0\}$ at most finitely often inside the half-strip $0<\sigma<1$, $t\geq 2$. Thus, the assertion of Theorem \ref{th:avalues-case1} does not hold for $G_{\pmb{0}}$. Similarly, the function $G_{\pmb \infty}$ defined by $$ G_{\pmb \infty}(s):=\mathbb{D}elta(s)^{\frac{1}{2}}\exp(s(1-s)),\qquad s\in\mathbb{C}_{\mathbb{D}elta_{G_{\pmb{0}}}} $$ yields an example for a function in $\mathcal{G}$ with $d_{G_{\pmb{\infty}}}=1$ and $\lim_{\tau\rightarrow\infty} |G_2(\frac{1}{2}+i\tau)|= \infty$, but not satisfying the assertion of Theorem \ref{th:avalues-case1}.\par In the general setting of the class $\mathcal{G}$, the radii $\frac{\mu(\tau_k)}{\log\tau_k}$ of the filling discs are best possible. This can be seen by considering the function $$ G_{1,\zeta}(s):=1+\mathbb{D}elta_{\zeta}(s). $$ Due to the asymptotic estimate $\mathbb{D}elta_{\zeta} (s) \asymp \left(|t|/2\pi \right)^{1/2 - \sigma}$, which holds uniformly for $0\leq \sigma\leq 1$, as $t\rightarrow\infty$, the function $G_{1,\zeta}(s)$ is bounded in any region defined by $$ \frac{1}{2} - \frac{c}{\log t} \leq \sigma \leq \frac{1}{2} + \frac{c}{\log t}, \qquad t>2, $$ with an arbitrary constant $c>0$. \par It seems reasonable to determine the quantities $\alpha_{G,\scalebox{0.8}{\mbox{inf}}}$ and $\alpha_{G,\scalebox{0.8}{\mbox{sup}}} $ defined by \eqref{def:alpha_inf} to check whether a given function $G\in\mathcal{G}$ satisfies condition \eqref{cond1}. By the intermediate value theorem, we find a sequence $(\tau_k)_k$ with $\tau_k\in[2,\infty)$ and $\lim_{k\rightarrow\infty} \tau_k = \infty$ and an $\alpha\in(0,\infty)$ such that \eqref{cond1} holds whenever $$ \alpha_{G,\scalebox{0.8}{\mbox{inf}}}\neq \alpha_{G,\scalebox{0.8}{\mbox{sup}}} \qquad \qquad \mbox{or}\qquad \qquad 0<\alpha_{G,\scalebox{0.8}{\mbox{inf}}}=\alpha_{G,\scalebox{0.8}{\mbox{sup}}}<\infty $$ For $\mathcal{L}\in\mathcal{S}^{*}$ we know that $\alpha_{\mathcal{L},\scalebox{0.8}{\mbox{inf}}}=0$ and $\alpha_{\mathcal{L},\scalebox{0.8}{\mbox{sup}}}=\infty$. We expect that the same is true for every function $\mathcal{L}\in\mathcal{S}^{\#}$. We refer to Section \ref{subsec:summaryunboundedness} for partial results. {\bf Normality approach vs. Levinson's method.} We shall compare the $a$-point result of Theorem \ref{th:avalues-case1}, given in terms of filling discs, with the $a$-point results, obtained by Levinson's method (Theorems \ref{th:steuding3}, \ref{th:levinson} and \ref{th:levinsonselberg}). Both concepts yield qualitatively and quantitatively different information and complement each other: Levinson's method is appropriate to count the number of $a$-values in neighbourhoods of line segments $\frac{1}{2}+it$, $t\in[T,2T]$ in a very good manner. Filling discs provide locally more precise information on the location of some of the $a$-points (combine for example Theorem \ref{th:FNTextension} with Proposition \ref{prop:charfilldiscs}). Moreover, the conditions in Theorem \ref{th:avalues-case1} for the existence of filling discs are rather weak compared to the conditions needed to evaluate the integrals involved in Levinson's method in a precise manner.\par In the case of the Riemann zeta-function, Theorem \ref{th:levinsonselberg} provides that almost all non-trivial $a$-values with ordinates $0<\gamma_a\leq T$ have a distance less than $$\frac{\mu(\gamma_a) \sqrt{\log\log \gamma_a}}{\log \gamma_a}$$ to the critical line; where $\mu$ is any positive function tending to infinity. Theorem \ref{th:avalues-case1} yields that, among them, there are infinitely many at distance less than $$\frac{\mu(\gamma_a)}{\log \gamma_a}$$ to the critical line.\par By assuming certain conjectures, we find filling discs for the Riemann zeta-function whose radii are significantly smaller than the ones provided by Theorem \ref{th:avalues-case1}. We refer to Section \ref{sec:filling_zeta}. However, for $\mathcal{L}\in\mathcal{S}^{\#}$ the radii of the filling discs cannot shrink arbitrarily fast, as $\tau\rightarrow\infty$.\par {\bf Lower bound for the radii of filling discs in $\mathcal{S}^{\#}$.} The radii $\lambda_k$ of a sequence of filling discs $D_{\lambda_k}(\frac{1}{2}+i\tau_k)$, $k\in\mathbb{N}$, for $\mathcal{L}\in\mathcal{S}^{\#}$ are trivially bounded from below by $|\tau_k|^{-\theta_{\mathcal{L}}(\frac{1}{2})-\mbox{\ d}elta}$, where $\mbox{\ d}elta>0$ is an arbitrary positive real number and $\theta_{\mathcal{L}}(\frac{1}{2})$ defined as in Section \ref{sec:orderofgrowth}. This observation follows from the subsequent lemma. \begin{lemma}\label{lem:lowerboundrad} Let $\mathcal{L}\in\mathcal{S}^{\#}$. Suppose that there are sequences $(\lambda_k)_k$, $(\tau_k)_k$ with $\lambda_k,\tau_k \in \mathbb{R}^+$ and $\lim_{k\rightarrow\infty}\tau_k = \infty$ such that $D_{\lambda_k}(\frac{1}{2}+i\tau_k)$, $k\in\mathbb{N}$, forms a sequence of filling discs for $\mathcal{L}$. Then, for any $\mbox{\ d}elta>0$, $$ \lim_{k\rightarrow\infty} \left( \lambda_k\tau_k^{\theta_{\mathcal{L}}(\frac{1}{2})+\mbox{\ d}elta}\right) =\infty. $$ \end{lemma} \begin{proof} Assume that there exists a $\mbox{\ d}elta>0$ and a subsequence of $(\lambda_k)_k$, which we assume to be $(\lambda_k)_k$, such that $$ \lambda_k \ll \tau_k^{-\theta_{\mathcal{L}}(\frac{1}{2})-\mbox{\ d}elta} $$ as $k\rightarrow\infty$. We set $f_k(z):=\mathcal{L}(\frac{1}{2} + i\tau_k + \lambda_k z)$ and regard the family $\mathcal{F}:=\{f_k\}_k$ of functions on $\mathbb{D}$. Then, \eqref{eq:finiteorderderivative} and the continuity of the function $\theta_{\mathcal{L}}(\sigma)$ assure that \begin{equation} \left| \mathcal{L}' (\tfrac{1}{2}+i\tau_k + \lambda_k z)\right| \ll_{\mbox{\ d}elta,\ell} \tau_k^{\theta_{\mathcal{L}}(\sigma) + \mbox{\ d}elta/2}, \end{equation} uniformly for $z\in \mathbb{D}$, as $k\rightarrow\infty$. Consequently, we obtain for the spherical derivatives $$ f^{\#}_k(z) \leq \lambda_k \left| \mathcal{L}' (\tfrac{1}{2}+i\tau_k + \lambda_k z)\right| \ll \tau_k^{-\theta_{\mathcal{L}}(\frac{1}{2})-\mbox{\ d}elta} \ \cdot \ \tau_k^{\theta_{\mathcal{L}}(\sigma) + \mbox{\ d}elta/2} = \tau_k^{-\mbox{\ d}elta/2} $$ uniformly for $z\in \mathbb{D}$, as $k\rightarrow\infty$. Hence, the family $\mathcal{F}^{\#}:= \{f_k^{\#}\}_k$ is bounded on $\mathbb{D}$. According to Marty's theorem, $\mathcal{F}$ is normal in $\mathbb{D}$. This is in contradiction to Proposition \ref{prop:charfilldiscs} which states that $\mathcal{F}$ is not normal in $\mathbb{D}$. \end{proof} {\bf Generalizations of Theorem \ref{th:avalues-case1}.} In Theorem \ref{th:avalues-case1} we studied filling discs induced by a Riemann-type functional equation on $\sigma=\frac{1}{2}$. It would be interesting to investigate whether similar statements can be retrieved for functions satisfying a different type of functional equation, for example the Selberg zeta-function. \subsection{Filling discs induced by Selberg's central limit law}\label{sec:Selberglimitlaw} In this section, we consider filling discs for functions $\mathcal{L}\in\mathcal{S}^{*}\subset\mathcal{S}^{\#}$ which satisfy Selberg's prime coefficient condition (S.6$^*$) and the zero-density estimate (DH). Let $n_{\mathcal{L}}$ be defined by (S.6$^*$) and set $$ g_{u}(t):= \exp\left( u \sqrt{\tfrac{1}{2} n_{\mathcal{L}} \log \log t} \right),\qquad t\geq 2,\qquad u\in\mathbb{R}. $$ For $\mathcal{L}\in\mathcal{S}^*$ and $\alpha,\beta\in\mathbb{R}$ with $\alpha<\beta$, we set $$ W_{g_{\alpha}(t),g_{\beta}(t)}:= \left\{ t\in [2,\infty)\ : \ g_{\alpha}(t) \leq \left|\mathcal{L}(\tfrac{1}{2}+it) \right| \leq g_{\alpha}(t)\right\}, $$ as in Section \ref{sec:smalllargeONcritline}. \begin{theorem}\label{th:selberg1} Let $\mathcal{L}\in\mathcal{S}^*$ and $\alpha,\beta\in\mathbb{R}$ with $\alpha<\beta$. Then, for any sequence $(\tau_k)_k$ with $\tau_k\in W_{g_{\alpha}(t),g_{\beta}(t)}\subset[2,\infty)$ and $\lim_{k\rightarrow\infty}\tau_k = \infty$, the discs defined by $$ |s-\tfrac{1}{2}-i\tau_k|< \frac{\mu(\tau_k)\lambda_{\alpha,\beta}(\tau_k)}{\log \tau_k},\qquad k\in\mathbb{N}, $$ where $\mu:[2,\infty)\rightarrow\mathbb{R}^+$ is any positive function with $\lim_{t\rightarrow\infty}\mu(t)=\infty$ and \begin{equation}\label{def:lambda} \lambda_{\alpha,\beta}(t):= \exp( (-\alpha+\max\{0,2\beta\}) \sqrt{\tfrac{1}{2} n_{\mathcal{L}} \log \log t} ), \end{equation} form a sequence of filling discs for $\mathcal{L}$. \end{theorem} \begin{proof} The existence of a sequence $(\tau_k)_k$ with $\tau_k\in W_{g_{\alpha}(t),g_{\beta}(t)}\subset[2,\infty)$ and $\lim_{k\rightarrow\infty}\tau_k = \infty$ is assured by Theorem \ref{th:measselberglimitlaw} (a). By Lemma \ref{lem:connectionGG'}, we know that, for sufficiently large $k\in\mathbb{N}$, $$ \left| \mathcal{L}^{\#}(\tfrac{1}{2}+i\tau_k)\right| >\tfrac{1}{3} d_{\mathcal{L}} \log \tau_k \ \frac{|\mathcal{L}(\frac{1}{2}+i\tau_k)|}{1+|\mathcal{L}(\frac{1}{2}+i\tau_k)|^2}. $$ Due to the definition of $W_{g_{\alpha}(t),g_{\beta}(t)}\subset[2,\infty)$, this implies that, for sufficiently large $k\in\mathbb{N}$, $$ \left|\mathcal{L}^{\#}(\tfrac{1}{2}+i\tau_k) \right|>\tfrac{1}{4} d_{\mathcal{L}} \log \tau_k \cdot \frac{ \exp( \alpha \sqrt{\tfrac{1}{2} n_{\mathcal{L}} \log \log \tau_k} ) }{1+ \exp( 2\beta \sqrt{\tfrac{1}{2} n_{\mathcal{L}} \log \log \tau_k} )} . $$ Thus, we obtain that $$ \lim_{k\rightarrow\infty} \frac{\mu(\tau_k)\lambda_{\alpha,\beta}(\tau_k)}{\log \tau_k } \ \mathcal{L}^{\#}(\tfrac{1}{2}+i\tau_k) = \infty $$ for any positive function $\mu$ satisfying $\lim_{t\rightarrow\infty}\mu(t)=\infty$ and $\lambda_{\alpha,\beta}(t)$ defined by \eqref{def:lambda}. The assertion follows by Lehto's criterion (Theorem \ref{th:lehto}). \end{proof} Theorem \ref{th:measselberglimitlaw} (a) provides a quantitative description of the set $W_{g_{\alpha}(t),g_{\beta}(t)}\subset[2,\infty)$, which allows us to estimate the number of pairwise disjoint filling discs for $\mathcal{L}\in\mathcal{S}^{*}$ in Theorem \ref{th:selberg1}.\par Recall the definition of the counting functions $N_{\{z_n\}_n}(T)$ and $N_a(E,T)$ introduced in Section \ref{sec:fillingdiscs_basics}. Let $\mathcal{L}\in\mathcal{S}^*$ and $\alpha,\beta\in\mathbb{R}$ with $\alpha<\beta$. Further, let $\lambda_{\alpha,\beta}$ be defined by \eqref{def:lambda} and $\mu:[2,\infty)$ be a positive function with $\lim_{t\rightarrow\infty}\mu(t)=\infty$. According to Theorem \ref{th:measselberglimitlaw} and Theorem \ref{th:selberg1}, we find a sequence $(\tau_k)_k$ of positive real numbers tending to infinity with $$ N_{\{\frac{1}{2}+i\tau_k\}_k}(T) \geq N_{\{\frac{1}{2}+i\tau_k\}_k}(T) - N_{\{\frac{1}{2}+i\tau_k\}_k}\left(\tfrac{T}{2}\right) \gg \frac{T\log T}{\mu\left(\frac{T}{2}\right) \lambda_{\alpha,\beta}(T)} $$ such that the discs defined by $$ \left|s-\tfrac{1}{2}-i\tau_k\right| <\frac{\mu(\tau_k)\lambda_{\alpha,\beta}(\tau_k)}{\log \tau_k}, \qquad k\in\mathbb{N}, $$ form a sequence of {\it pairwise disjoint} filling discs for $\mathcal{L}$. By Lemma \ref{lem:countingfillingdiscs}, this implies that, for every $a\in\mathbb{C}$ with at most one exception, the number $N_a(E,T)$ of $a$-points with $0<\gamma_a\leq T$ inside the region $E$ defined by $$ \tfrac{1}{2} - \frac{\mu(t)\lambda_{\alpha,\beta}(t)}{\log t} < \sigma < \frac{1}{2} + \frac{\mu(t)\lambda_{\alpha,\beta}(t)}{\log t}, \qquad t\geq 2. $$ satisfies $$ N_a(E,T) \gg \frac{T}{\mu\left(\frac{T}{2}\right) \lambda_{\alpha,\beta}(T)}. $$ However, by noticing that, for any real $\alpha<\beta$ and sufficiently large $t$ $$ \sqrt{\log\log t} < \lambda_{\alpha,\beta}(t), $$ we deduce from Theorem \ref{th:levinsonselberg} that $N_a(E,T)\sim \frac{d_{\mathcal{L}}}{2\pi} T \log T$, as $T\rightarrow\infty$. Thus, the filling disc setting of Theorem \ref{th:selberg1} does not beat Theorem \ref{th:levinsonselberg} in counting the $a$-points inside the region $E$.\par Nevertheless, as the information retrieved from filling discs is qualitatively different than the one provided by Levinson's method in Theorem \ref{th:levinsonselberg}, it is worth stating Theorem \ref{th:selberg1}. \subsection{Filling discs for the Riemann zeta-function via \texorpdfstring{ $\Omega$}{Omega}-results \texorpdfstring{for $\zeta'(\rho_a)$}{}}\label{sec:filling_zeta} In the case of the Riemann zeta-function there are (conditional) $\Omega$-results for $\zeta'(\rho_a)$ at our disposal which enable us to detect sequences of filling discs whose radii are significantly smaller than the ones in the general setting of Theorem \ref{th:avalues-case1}. {\bf Discrete moments of $\zeta'(s)$ with respect to non-trivial $a$-points.} Information about large values of $|\zeta'(\rho_a)|$ can be retrieved from asymptotic estimates for discrete moments \begin{equation*} I_{a}(T):=\frac{1}{N_a(T)} \sum_{0<\gamma_a \leq T} \zeta'(\rho_a), \quad \mbox{resp.} \quad J_{a,k}(T):=\frac{1}{N_a(T)} \sum_{0<\gamma_a \leq T} \left| \zeta'(\rho_a) \right|^{2k}; \end{equation*} here $N_a(T)$ denotes as usual the number of non-trivial $a$-points $\rho_a$ of the zeta-function with imaginary part $0<\gamma_a \leq T$. Asymptotic estimates for $I_{a}(T)$ and $J_{a,k}(T)$ can be established by residue methods. Naturally, these asymptotics are powerful tools to estimate the number of simple $a$-points, in particular the number of simple zeros of the Riemann zeta-function. For works in this direction, we refer to Garunk\v{s}tis \& Steuding \cite{garunkstissteuding:2010} concerning simple $a$-points and exemplarily to Conrey, Gosh \& Gonek \cite{conreygoshgonek:1988, conreygoshgonek:1998}, Bui, Conrey \& Young \cite{buiconreyyoung:2011} and Bui \& Heath-Brown \cite{buiheathbrown:2013} concerning simple zeros.\par Garunk\v{s}tis \& Steuding \cite{garunkstissteuding:2010} proved that for any fixed $a\in\mathbb{C}$, as $T\rightarrow\infty$, \begin{equation}\label{eq:asympIak} I_{a}(T) = \frac{1}{N_a(T)}\sum_{0<\gamma_a \leq T} \zeta'(\rho_a) \sim (\tfrac{1}{2}-a) \log\frac{T}{2\pi} + c(a) \end{equation} with some computable complex constant $c(a)$ depending on $a$.\footnote{Actually, Garunk\v{s}tis \& Steuding \cite{garunkstissteuding:2010} obtained even a more precise asymptotic formula. They also give a heuristic explanation why the leading term $\log T$ vanishes in the case $a=\frac{1}{2}$.} For $a=0$, this asymptotic was already established by Conrey, Gosh \& Gonek \cite{conreygoshgonek:1988} and later refined by Fujii \cite{fujii:1994}. From the asymptotic extension \eqref{eq:asympIak} of $I_{a}(T)$, we can immediately deduce the following corollary. \begin{corollary} Let $a\in\mathbb{C}\setminus\{\frac{1}{2}\}$. Then, for every constant $0<c< \left|\frac{1}{2}-a\right|$, there are infinitely many non-trivial $a$-points $\rho_a = \beta_a + i\gamma_a$ of the Riemann zeta-function such that \begin{equation}\label{zetaprime-large1} \left|\zeta'(\rho_a) \right| \geq c \log |\gamma_a|. \end{equation} \end{corollary} \begin{proof} Let $a\in\mathbb{C}\setminus\{\frac{1}{2}\}$ and $0<c< \left|\frac{1}{2}-a\right|$. Assume that for all but finitely many non-trivial $a$-points $\rho_a$, we have $$ \left|\zeta'(\rho_a) \right| < c \log |\gamma_a|. $$ Then, as $T\rightarrow\infty$, \begin{equation} |I_{a}(T)|\leq \frac{1}{N_a(T)} \sum_{0<\gamma_a \leq T} \left| \zeta'(\rho_a) \right| < c \log T + o(1). \end{equation} This is, however, in contradiction to \eqref{eq:asympIak}. \end{proof} By Lehto's criterion (Theorem \ref{th:lehto}), we are now able to show the following. \begin{theorem}\label{th:avalues-case1-zeta} For $a\in\mathbb{C}\setminus\{\frac{1}{2}\}$, there exists a sequence $(\rho_{a,k})_k$ of non-trivial $a$-points of the Riemann zeta-function with ordinates $\gamma_{a,k}>0$ such that the discs defined by $$ |s-\rho_{a,k}|< \frac{\mu(\gamma_{a,k})}{ \log \gamma_{a,k} },\qquad k\in\mathbb{N}, $$ with any positive function $\mu:[2,\infty)\rightarrow\mathbb{R}^+$ satisfying $\lim_{t\rightarrow\infty}\mu(t)=\infty$, form a sequence of filling discs for $\zeta$. \end{theorem} \begin{proof}[Proof of Theorem \ref{th:avalues-case1-zeta}] Let $(\rho_{a,k})_k$ be a sequence of non-trivial $a$-points with positive imaginary parts such that \eqref{zetaprime-large1} holds. Then, for any positive function with $\lim_{t\rightarrow\infty}\mu(t)$, we have $$ \lim_{k\rightarrow\infty} \frac{\mu(\gamma_{a,k})}{ \log \gamma_{a,k} } \zeta^{\#}(\rho_{a,k}) = \lim_{k\rightarrow\infty} \frac{\mu(\gamma_{a,k})}{ \log \gamma_{a,k} } \frac{|\zeta'(\rho_{a,k})|}{1+|a|^2} = \infty. $$ The theorem follows by Lehto's criterion (Theorem \ref{th:lehto}). \end{proof} As far as the author knows, non-trivial conditional or unconditional asymptotic estimates for $J_{a,k}(T)$ are only known in the case $a= 0$.\par {\bf Discrete moments of $\zeta'(s)$ with respect to non-trivial zeros.} Gonek \cite{gonek:1989} and Heijhal \cite{hejhal:1989} conjectured independently that $J_{0,k}(T)\asymp (\log T) ^{k^2 +2k}$ for $k\in\mathbb{R}$, as $T\rightarrow\infty$. Hughes, Keating \& O'Connell \cite{hugheskeatingoconnell:2000} conjectured that $J_{0,k}(T)\sim C_k (\log T) ^{k^2 +2k}$ for $k>-3/2$, as $T\rightarrow\infty$, with an explicit computable constant $C_k$ derived from models for the Riemann zeta-function by random matrix theory. Assuming the Riemann hypothesis, Gonek \cite{gonek:1984} obtained that $J_{0,1}\sim \frac{1}{12}(\log T)^3$ and Ng \cite{ng:2004} that $J_{0,2}\asymp (\log T)^8$. Using a method developed by Rudnick \& Soundararajan \cite{rudnicksoundararajan:2005} and assuming the generalized Riemann hypothesis (GRH) for Dirichlet $L$-functions, Milinovich \& Ng \cite{milinovichng:2013} could derive lower bounds for the discrete moments $J_{0,k}(T)$. For $k\in\mathbb{N}$, as $T\rightarrow\infty$, \begin{equation}\label{eq:asym_JkT} J_{0,k}(T)=\frac{1}{N(T)} \sum_{0<\gamma\leq T} \left|\zeta'(\rho) \right|^{2k} \gg (\log T) ^{k^2 +2k}. \;\;\; \end{equation} where $N(T):=N_0(T)$ denotes, as usual, the number of non-trivial zeros of $\zeta$ with imaginary part $0<\gamma\leq T$. The assumption of the GRH is required to derive a suitable asymptotic formula for sums of the shape $$ \sum_{0<\gamma\leq T} \zeta'(\rho)A_X(\rho)^{k-1}A_X(1-\rho)^k $$ with $A_X(s)=\sum_{n\leq X}n^{-s}$. Originally, Milinovich \cite{milinovich:phd} deduced this asymptotic formula from the main theorem of Ng \cite{ng:2007} where it was enough to assume the Riemann hypothesis. Later, they discovered a serious mistake in the error term of the asymptotic expansion in the main theorem of Ng \cite{ng:2007}. However, as they indicate in \cite{milinovichng:2013}, it appears that this mistake may be fixed with additional effort. Under the assumption of the Riemann hypothesis, Milinovich \cite{milinovich:2010} could bound $J_{0,k}(T)$ from above by $\ll (\log T) ^{k^2 +2k+\varepsilon}$ with an arbitrary $\varepsilon>0$. This upper bound together with the lower bound \eqref{eq:asym_JkT} supports the conjecture by Gonek \cite{gonek:1989}, Heijhal \cite{hejhal:1989} and Hughes, Keating \& O'Connell \cite{hugheskeatingoconnell:2000}. The following corollary is an immediate consequence of Milinovich's \& Ng's lower bound \eqref{eq:asym_JkT}. \begin{corollary}\label{cor:lowerboundzetaprimerho} Assume the GRH for Dirichlet $L$-functions. For $\mbox{\ d}elta>0$, let $A$ be the set of all non-trivial zeros $\rho$ of the Riemann zeta function satisfying $$ \left|\zeta'(\rho)\right| \geq (\log |\gamma|)^{\mbox{\ d}elta}. $$ Then, for any $\eta>0$, as $T\rightarrow\infty$, $N_{A}(T) \gg T^{1-\eta}.$ \, \footnote{The function $N_A(T)$ counts all elements in $A$ with positive imaginary part less or equal to $T$. For a precise definition, we refer to Section \ref{sec:fillingdiscs_basics}.} \end{corollary} \begin{proof} For a given $\mbox{\ d}elta>0$, we choose $K\in\mathbb{N}$ such that $K^2 + 2K > 2K\mbox{\ d}elta$. We denote by $A$ the set of all non-trivial zeros $\rho$ of the Riemann zeta function satisfying $\left|\zeta'(\rho)\right| \geq (\log |\gamma|)^{\mbox{\ d}elta} $. Analogously, we denote by $B$ the set of all non-trivial zeros $\rho$ for which $\left|\zeta'(\rho)\right| < (\log |\gamma|)^{\mbox{\ d}elta}$ is true. Assume that there exists a real number $\eta>0$ such that $N_A(T)\ll T^{1-\eta}$, as $T\rightarrow\infty$. Then, it follows from the Riemann-von Mangoldt formula that $N_B(T)\sim N(T)$, as $T\rightarrow\infty$. Under the assumption of the Riemann hypothesis, we know that all non-trivial zeros are on the line $\sigma=\frac{1}{2}$ and that $\zeta(\frac{1}{2}+it)\ll t^{\varepsilon}$ for any $\varepsilon>0$, as $t\rightarrow\infty$. Taking $\varepsilon=\eta/2K$ and putting everything together, we obtain that, as $T\rightarrow\infty$, \begin{align*} J_{0,K}(T)=&\frac{1}{N(T)} \sum_{0<\gamma\leq T} \left|\zeta'(\rho) \right|^{2K} \\ & \ll \frac{1}{N(T)} \left( N_A (T)\cdot T^{2K \cdot \frac{\eta}{2K}} + N_{B}(T)\cdot (\log T)^{2K\mbox{\ d}elta} \right)\\ & \ll (\log T)^{2K\mbox{\ d}elta}. \end{align*} By our choice of $K$, this is in contradiction to \eqref{eq:asym_JkT}. The assertion is proved. \end{proof} By Lehto's criterion (Theorem \ref{th:lehto}), we are now able to show the following. \begin{theorem}\label{th:avalues-case3} Assume the GRH for Dirichlet $L$-functions. Then, for every $\mbox{\ d}elta> 0$, there is a set $A$ of non-trivial zeros $\rho = \frac{1}{2}+i\gamma$ of the Riemann zeta-function with $N_A(T)\gg T^{1-\eta}$ for any $\eta>0$, as $T\rightarrow\infty$, such that the discs defined by $$ |s-\rho|<\frac{1}{(\log|\gamma|)^{\mbox{\ d}elta}}, \qquad \rho\in A , $$ form a sequence of filling discs for $\zeta(s)$. \end{theorem} \begin{proof} For a given $\mbox{\ d}elta>0$, we define $A$ to be the set of all non-trivial zeros $\rho=\frac{1}{2}+i\gamma$ of the Riemann zeta-function satisfying $$ |\zeta'(\tfrac{1}{2}+i\gamma)| \geq (\log|\gamma|)^{\mbox{\ d}elta + 1}. $$ Then, Corollary \ref{cor:lowerboundzetaprimerho} assures that $N_{A}(T)\gg T^{1-\eta}$ for any $\eta>0$, as $T\rightarrow\infty$. Moreover, $$ \lim_{\begin{subarray}{c} \, |\gamma|\rightarrow\infty \\ \frac{1}{2}+i\gamma\in A \end{subarray}} \frac{1}{(\log |\gamma|)^{\mbox{\ d}elta}} \, \zeta^{\#}(\tfrac{1}{2}+i\gamma) = \lim_{\begin{subarray}{c} \, |\gamma|\rightarrow\infty \\ \frac{1}{2}+i\gamma\in A \end{subarray}} \frac{1}{(\log |\gamma|)^{\mbox{\ d}elta}} \, \left|\zeta'(\tfrac{1}{2}+i\gamma) \right| = \infty. $$ and the theorem follows by Lehto's criterion (Theorem \ref{th:lehto}). \end{proof} {\bf Soundararajan's resonance method.} A resonance method developed by Sounda\-rara\-jan \cite{soundararajan:2008} yields another approach to retrieve information about large values of $\zeta'(\rho)$. Ng \cite{ng:2008} showed that, under assumption of the GRH for Dirichlet $L$-functions, there are infinitely many non-trivial zeros such that $$ \zeta'(\rho) \gg \exp \left(c_0 \frac{\log |\gamma|}{\log\log |\gamma|} \right) $$ where $c_0 = \frac{1}{\sqrt{2}} - \varepsilon$ with any $\varepsilon>0$. By Lehto's criterion, we deduce the following. \begin{theorem}\label{th:resonance} Assume the GRH for Dirichlet $L$-functions. Then, there exists a sequence $(\rho_k)_k$ of non-trivial zeros $\rho_k = \frac{1}{2}+i\gamma_k$ of the Riemann zeta-function with $\gamma_k>0$ such that the discs defined by $$ |s-\rho_k|<\mu(\gamma_k)\exp \left(-c_0 \frac{\log \gamma_k}{\log\log \gamma_k} \right), \qquad k\in\mathbb{N}, $$ form a sequence of filling discs for $\zeta(s)$, where $\mu:[2,\infty)\rightarrow\mathbb{R}^+$ is any positive function satisfying $\lim_{t\rightarrow\infty}\mu(t)=\infty$. \end{theorem} \begin{proof} According to Ng \cite{ng:2008}, for any fixed $0<c_0<\sqrt{2}$, there exist a constant $C>0$ and a sequence $(\rho_k)_k$ of non-trivial zeros $\rho_k = \frac{1}{2}+i\gamma_k$ of the Riemann zeta-function with $\gamma_k>0$ such that $$ \left| \zeta'(\rho_k) \right| \geq C \exp \left(c_0 \frac{\log \gamma_k}{\log\log \gamma_k} \right) $$ for every $k\in\mathbb{N}$. Hence, for any positive function $\mu$ satisfying $\lim_{t\rightarrow\infty}\mu(t)=\infty$, \begin{align*} &\lim_{k\rightarrow\infty}\, \mu(\gamma_k)\exp \left(-c_0 \frac{\log |\gamma_k|}{\log\log |\gamma_k|} \right)\, \zeta^{\#}(\rho_k)\\ &= \lim_{k\rightarrow\infty}\, \mu(\gamma_k)\exp \left(-c_0 \frac{\log |\gamma_k|}{\log\log |\gamma_k|} \right)\, |\zeta'(\rho_k)|\\ &=\infty. \end{align*} The assertion follows by Lehto's criterion (Theorem \ref{th:lehto}). \end{proof} \section{Non-trivial a-points to the left of the critical line} According to Selberg's $a$-point conjecture, we expect that, for any $a\in\mathbb{C}\setminus\{0\}$, about $3/4$-th of the non-trivial $a$-points of the Riemann zeta-function lie to the left of the critical line. However, unconditionally, it is not even known whether there are {\it infinitely many} non-trivial $a$-points to the left of the critical line. This motivated Steuding to raise the following question at the problem session of the 2011 Palanga conference in honour of Jonas Kubilius. Is it possible to find for every $a\in\mathbb{C}$ a sequence of points $s_k=\sigma_k + it_k$, $k\in\mathbb{N}$, with $\sigma_k<\frac{1}{2}$ and $\lim_{k\rightarrow\infty} t_k = \infty$ such that $$ \lim_{k\rightarrow\infty} \zeta(s_k) = a. $$ By means of our results in Section \ref{sec:largesmall} concerning small and large values in a neighbourhood of the critical line, we can give a very first answer to this question. \begin{corollary}\label{cor:apointsleft} Let $c>0$ and let $\varepsilonilon:[2,\infty)\rightarrow\mathbb{R}$ be a function satisfying $$ 0<\varepsilonilon(t)\leq \frac{c}{ \log t}\qquad \mbox{for }t\geq 2. $$ Then, for every $\alpha>0$, there exists an $a\in\mathbb{C}$ with $|a|=\alpha$ and a sequence $(t_k)$ with $t_k\in[1,\infty)$ such that $$ \lim_{k\rightarrow\infty} \zeta(\tfrac{1}{2}-\varepsilonilon(t_k)+it_k) = a. $$ \end{corollary} \begin{proof} The case $\alpha=0$ follows directly from Corollary \ref{cor:selbergsmalllarge}. Thus, suppose that $\alpha>0$. If we take $m=\alpha+1$ in Corollary \ref{cor:selbergsmalllarge}, the intermediate value theorem assures the existence of a sequence $(t_k)_k$ with $t_k\in[1,\infty)$ and $\lim_{k\rightarrow\infty} t_k = \infty$ such that $$ \zeta(\tfrac{1}{2}-\varepsilonilon(t_k)+it_k) \in \partial D_{\alpha}(0) $$ for all $k\in\mathbb{N}$. As the circle $\partial D_{\alpha}(0)$ is compact, the set $\{\zeta(\tfrac{1}{2}-\varepsilonilon(t_k)+it_k)\}_{k\in\mathbb{N}}\subset \partial D_{\alpha}(0)$ has at least one accumulation point $a\in \partial D_{\alpha}(0)$. Thus, there is a subsequence $(t_{k_j})_j$ of $(t_k)_k$ such that $$ \lim_{j\rightarrow\infty} \zeta(\tfrac{1}{2}-\varepsilonilon(t_{k_j})+it_{k_j}) = a. $$ \end{proof} \section{Summary: a-points of the Riemann zeta-function near the critical line} Let $\lambda$ be a positive function on $\mathbb{R}$ with $\lim_{t\rightarrow\infty} \lambda(t)=0$ and $S_{\lambda}$ be the region defined by $$ \tfrac{1}{2} - \lambda(t) <\sigma < \tfrac{1}{2} + \lambda(t), \qquad t\geq 2. $$ In the following corollary, we summarize our knowledge on how fast $\lambda(t)$ can tend to zero, as $t\rightarrow\infty$, such that, for a given $a\in\mathbb{C}$, there are almost all, a positive proportion, resp. infinitely many of all non-trivial $a$-points of the Riemann zeta-function inside $S_{\lambda}$. For more detailed information the reader is referred to the corresponding theorems of the preceeding sections. \begin{corollary}[Levinson, Selberg, Tsang, Christ]\label{cor:summaryapoints} $\mbox{ }$ \begin{itemize} \item[(a)] Unconditionally, for every $a\in\mathbb{C}$, there are almost all $a$-points of the Riemann zeta-function (in the sense of density) inside the region $S_{\lambda}$ if one chooses $$ \lambda(t)=\frac{\mu(t)\sqrt{\log\log t}}{\log t}, \qquad t\geq 2, $$ with any positive function $\mu$ satisfying $\lim_{t\rightarrow\infty}\mu(t)=\infty$.\footnote{This follows from Theorem \ref{th:levinsonselberg} which was proved in the framework of the class $\mathcal{S}^*$ by refining a method developed by Levinson \cite{levinson:1975} with results of Selberg \cite{selberg:1992}, Tsang \cite{tsang:1984} and Steuding \cite{steuding:2007}.} \item[(b)] Under the assumption of the Riemann hypothesis, for every $a\in\mathbb{C}$, there is a positive proportion of all $a$-points of the Riemann zeta-function (in the sense of density) inside the region $S_{\lambda}$ if one chooses $$ \lambda(t)=\frac{c\sqrt{\log\log t}}{\log t}, \qquad t\geq 2, $$ with any constant $c>0$.\footnote{This is due to Selberg \cite{selberg:1992}; see Section \ref{sec:apointsgeneral}.} \item[(c)] Unconditionally, for every $a\in\mathbb{C}$, with at most one exception, there are infinitely many $a$-points of the Riemann zeta-function inside the region $S_{\lambda}$ if one chooses $$ \lambda(t)=\frac{\mu(t)}{\log t}, \qquad t\geq 2, $$ with any positive function $\mu$ satisfying $\lim_{t\rightarrow\infty}\mu(t)=\infty$.\footnote{This follows from Theorem \ref{th:avalues-case1} which was proved in the framework of the class $\mathcal{G}$ by relying on normality arguments.}\\ Moreover, one knows unconditionally that almost all zeros of the Riemann zeta-function (in the sense of density) lie in this region.\footnote{This is due to Selberg \cite{selberg:1946}; see Theorem \ref{th:selbergzero}.} \item[(d)] Under the assumption of the Riemann hypothesis, for every $a\in\mathbb{C}$, there are infinitely many $a$-points of the Riemann zeta-function inside the region $S_{\lambda}$ if one chooses $$ \lambda(t)=\frac{\mu(t)(\log\log\log t)^3}{\log t \sqrt{\log\log t}}, \qquad t\geq 2, $$ with any positive function $\mu$ satisfying $\lim_{t\rightarrow\infty}\mu(t)=\infty$.\footnote{This is due to Selberg \cite{selberg:1992}; see Section \ref{sec:apointsgeneral}.} \item[(e)] Under the assumption of the GRH for Dirichlet $L$-functions,\footnote{Very likely this can be also obtained by only assuming RH; see the remarks in Section \ref{sec:filling_zeta}.} for every $a\in\mathbb{C}$, with at most one exception, there are infinitely many $a$-points of the Riemann zeta-function inside the region $S_{\lambda}$ if one chooses $$ \lambda(t)= \frac{1}{(\log t)^{\mbox{\ d}elta}}, \qquad t\geq 2, $$ with any $\mbox{\ d}elta>0$.\footnote{This follows from Theorem \ref{th:avalues-case3}which was deduced by certain normality arguments from a result of Milinovich \& Ng \cite{milinovichng:2013}.} \item[(f)] Under the assumption of the GRH for Dirichlet $L$-functions, for every $a\in\mathbb{C}$, with at most one exception, there are infinitely many $a$-points of the Riemann zeta-function inside the region $S_{\lambda}$ if one chooses $$ \lambda(t) = \mu(t)\exp \left(-c_0 \frac{\log t}{\log\log t} \right), \qquad t\geq 2 $$ with any positive function $\mu$ satisfying $\lim_{t\rightarrow\infty}\mu(t)=\infty$ and any constant $0<c_0 < \frac{1}{\sqrt{2}}$.\footnote{This follows from Theorem \ref{th:resonance} which was deduced by certain normality arguments from a result of Ng \cite{ng:2008}.} \end{itemize} \end{corollary} \chapter{Denseness results for the Riemann zeta-function in the critical strip}\label{ch:curve} The critical line is a natural boundary for the universality property of the Riemann zeta-function. Even if we slightly change the concept, the functional equation strongly restricts the set of approximable functions; see Chapter \ref{ch:conceptsuniv}. In this chapter we investigate a concept of universality for the Riemann zeta-function on the critical line that is significantly weaker than the one regarded in Chapter \ref{ch:conceptsuniv}. Roughly speaking, we set the scaling factor of the limiting process introduced in Section \ref{sec:shiftingshrinking} to be constantly equal to zero and drop our request to approximate analytic functions, but restrict to the approximation of complex points $a\in\mathbb{C}$ by shifts of the Riemann zeta-function on the critical line.\par We know that, for every $\alpha\in[0,\infty)$, there exist a sequence $(\tau_k)_k$ of real numbers $\tau_k\in[2,\infty)$ with $\lim_{k\rightarrow\infty}\tau_k=\infty$ such that $$ \lim_{k\rightarrow\infty} \left| \zeta(\tfrac{1}{2}+i\tau_k) \right| = \alpha; $$ see Section \ref{subsec:summaryunboundedness}. However, except for $a=0$, we do not know explicitly any other example of an accumulation point $a\in\mathbb{C}$ of the set $$V(\tfrac{1}{2}):=\{\zeta(\tfrac{1}{2}+it) \, : \, t\in[1,\infty)\}.$$ According to a conjecture of Ra\-ma\-chandra, we expect that $$ \overline{V(\tfrac{1}{2})} =\mathbb{C}, $$ i.e., we conjecture that the values of the Riemann zeta-function on the critical lie dense in $\mathbb{C}$. However, to prove or disprove this conjecture with present day's methods seems to be out of reach.\par Bohr \cite{bohrcourant:1914, bohr:1915, bohrjessen:1930, bohrjessen:1932} and his collaborators established denseness results for the zeta-values on vertical lines inside the strip $\frac{1}{2}<\sigma\leq 1$. It were their pioneering works that directed Voronin towards his universality theorem. In Section \ref{sec:bohr}, we state the main results of Bohr and his collaborators on the value-distribution of the Riemann zeta-function to the right of the critical line. We briefly report on their methods and discuss why these methods fail to obtain a denseness statement for the zeta-values on the critical line.\par In Section \ref{sec:qualdiff}, we point out that a denseness result for the critical line is qualitatively different from the one on vertical lines in $\frac{1}{2}<\sigma\leq 1$. \par We know that $0\in V(\tfrac{1}{2})$. According to Ramachandra's conjecture, we expect that $0$ is in particular an interior point of $\overline{V(\tfrac{1}{2})}$. In Section \ref{sec:appr}, we show that there is a subinterval $A\subset [0,2\pi)$ of length at least $\frac{\pi}{4}$ such that, for every $\theta\in A$, there is a sequence $(t_n)_n$ of numbers $t_n\in[2,\infty)$ with $$ \zeta(\tfrac{1}{2}+it_n)\neq 0, \qquad \lim_{n\rightarrow\infty} \zeta(\tfrac{1}{2}+it_n) = 0 \qquad \mbox{ and } \qquad \arg \zeta(\tfrac{1}{2}+it_n) \equiv \theta \mod 2\pi. $$ In Section \ref{sec:curves}, we approach Ramachandra's conjecture by asking whether there are curves $[1,\infty)\ni t \mapsto \tfrac{1}{2}+\varepsilonilon(t)+it$ with $\lim_{t\rightarrow\infty}\varepsilonilon(t)=0$ such that the values of the Riemann zeta-function on these curves lie dense in $\mathbb{C}$. We obtain some positive answers both by relying on the $a$-point results of Section \ref{sec:apointsnormality} and by relying on Bohr's work.\par In Section \ref{sec:densesigma1}, we see that the latter question is much easier to handle if we regard curves which do not approach the left but the right boundary line of the strip of universality, i.e. the line $\sigma=1$. \par Finally, in Section \ref{sec:universalityoncurves}, we briefly indicate what happens to the limiting process of Section \ref{sec:shiftingshrinking} if we adjust the underlying conformal mappings such that they map $\mathbb{D}$ to discs which lie completely inside the strip $\frac{1}{2}<\sigma<1$, but arbitrarily close to the critical line. \par Although we mainly restrict to the Riemann zeta-function in this chapter, most of the results are true for related functions from quite general classes. \section{The works of Bohr and Voronin}\label{sec:bohr} At the beginning of the 20th century, Bohr and his collaborators studied the value-distribution of the Riemann zeta-function to the right of the critical line. {\bf Value-distribution on vertical lines in $\sigma>1$.} Due to the pole of the Riemann zeta-function at $s=1$, the characteristic convergence abscissae of the Dirichlet series expansion of the Riemann zeta-function are given by $\sigma_c=\sigma_u=\sigma_a=1$. Thus, according to Bohr \cite{bohr:1922}, the behaviour of the zeta-function on vertical lines in the half-plane $\sigma>1$ is ruled by almost periodicity; see Theorem \ref{th:almostperiod}. For every fixed $\sigma>1$, we know that $$ \frac{\zeta(2\sigma)}{\zeta(\sigma)} \leq |\zeta(\sigma+it)| \leq \zeta(\sigma) \qquad \mbox{ for }t\in\mathbb{R}. $$ and that these inequalities are sharp; see Apostol \cite[Chapt. 7.6]{apostol:1990}. As $$ \lim_{\sigma\rightarrow\infty} \frac{\zeta(2\sigma)}{\zeta(\sigma)} = \lim_{\sigma\rightarrow\infty} \zeta(\sigma) = 1, \qquad \lim_{\sigma\rightarrow 1+} \frac{\zeta(2\sigma)}{\zeta(\sigma)} = 0 \qquad \mbox{ and }\qquad \lim_{\sigma\rightarrow 1+} \zeta(\sigma) = \infty, $$ the set $$ V(\sigma):=\left\{ \zeta(\sigma+it) \, : \, t\in[1,\infty) \right\} $$ contracts to $1$, as $\sigma\rightarrow\infty$, and contains both arbitrarily small and arbitrarily large values, as $\sigma\rightarrow 1$. Beyond this observation, we know that, for every $\sigma>1$, the set $$ \left\{\log\zeta(\sigma + it) \, : \, t\in\mathbb{R} \right\} $$ lies dense in an area of $\mathbb{C}$ which is either simply connected and bounded by a convex curve or which is ring-shaped and bounded by two convex curves; see Bohr \& Jessen \cite{bohrjessen:1930} or Titchmarsh \cite[\S 11.6]{titchmarsh:1986}. Essential ingredients in the proof are the Euler product representation of the Riemann zeta-function and diophantine approximation.\par {\bf Bohr's denseness results on vertical lines in $\frac{1}{2}<\sigma\leq 1$.} Bohr \& Courant \cite{bohrcourant:1914} proved that the values taken by the Riemann zeta-function on an arbitrary vertical line inside the strip $\frac{1}{2}<\sigma \leq 1$ form a dense set in $\mathbb{C}$, i.e. $$ \overline{V(\sigma)} = \mathbb{C} \qquad \mbox{ for every }\sigma\in(\tfrac{1}{2},1]. $$ In fact, their proof yields the stronger statement that, for every $\sigma\in(\frac{1}{2},1]$, every $a\in\mathbb{C}$ and every $\varepsilon>0$, \begin{equation}\label{eq:denseBohr} \liminf_{T\rightarrow\infty} \frac{1}{T} {\rm{meas\ }} \left\{ t\in[1,\infty) \, : \, \left|\zeta(\sigma+it)-a \right|<\varepsilon \right\}>0. \end{equation} Even more precise results were obtained by Bohr \& Jessen \cite{bohrjessen:1932} and Laurin\v{c}ikas \cite[Chapt. 4]{laurincikas:1991-2}, who established probabilistic limit theorems for the values of the logarithm of the zeta-function on vertical lines. In particular, Laurin\v{c}ikas \cite[Chapt. 4, Theorem 4.1]{laurincikas:1991-2} showed that, for every $\sigma>\frac{1}{2}$, there exists a Borel probability measure $\mu_{\sigma}$ such that, for every continuous and bounded function $f:\mathbb{C}\rightarrow\mathbb{C}$, $$ \lim_{T\rightarrow\infty} \ \frac{1}{2T} \int_{-T}^{T} f\bigl( \log \zeta(\sigma+it)\bigr) \mbox{\ d} t = \int_{\mathbb{C}} f(z) \mbox{\ d}\mu(z). $$ If $\sigma\in(\frac{1}{2},1]$ the support of $\mu_{\sigma}$ is the whole complex plane.\par {\bf Voronin's denseness results on vertical lines in $\frac{1}{2}<\sigma\leq 1$.} Voronin \cite{voronin:1972} established multidimensional extensions of Bohr's denseness result. For any $n\in\mathbb{N}_0$ and any function $f\in\mathcal{H}(\Omega)$, we define $$ \mathbb{D}elta_n f(s):=\left( f (s), f'(s),..., f^{(n)}(s)\right) $$ to be the $(n+1)$-dimensional vector consisting of the values of $f$ and its first $n$ derivatives evaluated at the point $s\in\Omega$. Among other things, Voronin \cite{voronin:1972} obtained that \begin{equation}\label{eq:multdimVoronin} \overline{\left\{ \mathbb{D}elta_n\zeta(\sigma+it) \, : \, t\in[1,\infty) \right\} }= \mathbb{C}^{n+1} \qquad \mbox{ for every }\sigma\in(\tfrac{1}{2},1] \end{equation} and every $n\in\mathbb{N}_0$. By a slight refinement of Voronin's proof, one obtains that, for every $\sigma\in(\frac{1}{2},1]$, every $a\in\mathbb{C}^{n+1}$ and every $\varepsilon>0$ \begin{equation}\label{eq:denseVoronin} \liminf_{T\rightarrow \infty}\frac{1}{T} {\rm{meas\ }} \left\{t\in (0,T]: \left\| \mathbb{D}elta_n\zeta(\sigma+it) -a \right\|<\varepsilon \right\} >0; \end{equation} here and in the following $\|\cdot\|$ denotes the maximum-norm in the complex vector space $\mathbb{C}^{n+1}$. It was this multidimensional extension of Bohr's result that inspired Voronin \cite{voronin:1975} to prove his universality theorem (Theorem \ref{th:universality}). In fact, for $\frac{1}{2}<\sigma<1$, the denseness results \eqref{eq:denseVoronin} and \eqref{eq:denseBohr} are direct consequences of Voronin's universality theorem. {\bf Bohr's method.} To prove his denseness result, Bohr modeled the Riemann zeta-function by truncated Euler products $$ \zeta_N(\sigma+it) = \prod_{p\leq N} \left(1-p^{-\sigma-it} \right)^{-1}; $$ here the product is taken over all prime numbers $p\in\mathbb{P}$ with $p\leq N$. We fix $\frac{1}{2}<\sigma\leq 1$. Bohr noticed that the quantities $p^{-it}=e^{-it \log p}$ with $p\in\mathbb{P}$, $p\leq N$, behave like independent random variables, although they all depend on the common variable $t$.\footnote{see Bohr \& Jessen \cite[p. 6]{bohrjessen:1930}.} Relying on the theory of convex curves and diophantine approximation, in particular a theorem of Kronecker and Weyl, he was able to prove that, for given $a\in\mathbb{C}$, there exists a large subset $A\subset[1,\infty)$ such that, for all $t\in A$, the truncated Euler product $\zeta_N(\sigma+it)$ is quite close to $a$. Although $\zeta_N(\sigma+it)$ does not converge in $\frac{1}{2}<\sigma\leq 1$, as $N\rightarrow\infty$, Bohr showed that the truncated Euler product approximates the zeta-function in mean-square. Here, the essential tool is the existence of the mean-square value $$ \lim_{T\rightarrow\infty }\int_1^T \left|\zeta(\sigma+it)\right|^2 \mbox{\ d} t < \infty $$ for $\frac{1}{2}<\sigma\leq 1$ in combination with Carlson's theorem. From the approximation in mean-square, Bohr deduced that there exists a large subset $B\subset [1,\infty)$ such that, for all $t\in B$, the truncated Euler product $\zeta_N(\sigma+it)$ is close to $\zeta(\sigma+it)$. Finally, certain density estimates assure that $A\cap B \neq \emptyset$ and the result follows. Voronin obtained his denseness statement \eqref{eq:multdimVoronin} basically by refining the ideas of Bohr. {\bf Non-denseness results on vertical lines in $\sigma<\frac{1}{2}$.} On vertical lines in the half-plane $\sigma<\frac{1}{2}$, we expect that the zeta-function grows too fast than that its values could lie dense in $\mathbb{C}$. It follows essentially from the functional equation that \begin{equation}\label{nondense} \overline{V(\sigma)} \neq \mathbb{C} \qquad \mbox{ for }\sigma\leq 0; \end{equation} see Garunk\v{s}tis \& Steuding \cite{garunkstissteuding:2010}. By assuming the Riemann hypothesis, Garunk\v{s}tis \& Steuding \cite{garunkstissteuding:2010} proved that \eqref{nondense} persists for all $\sigma<\frac{1}{2}$. Their proof uses a conditional $\Omega$-result for the zeta-function on vertical lines in $\sigma>\frac{1}{2}$ together with the functional equation.\par We know that $\overline{V(\sigma)}=\mathbb{C}$ for $\frac{1}{2}<\sigma\leq 1$ and we expect that $\overline{V(\sigma)}\neq \mathbb{C}$ for $0<\sigma<\frac{1}{2}$. But how is the situation if $\sigma=\frac{1}{2}$? {\bf Is $\overline{V(\frac{1}{2})}=\mathbb{C}$?} During the 1979 Durham conference, Ramachandra formulated the conjecture that the values of the Riemann zeta-function on the critical line lie dense in $\mathbb{C}$. Until now, this could not be proved or disproved. The difficulty in handling the values of the zeta-function on the critical line is that, due to Hardy \& Littlewood \cite{hardylittlewood:1936}, $$ \frac{1}{T} \int_{-T}^T \left|\zeta(\tfrac{1}{2}+it)\right|^2 dt \sim \log T, $$ as $T\rightarrow\infty$. Hence, Bohr's method collapses on the critical line.\par Jacod, Kowalski \& Nikeghbali \cite{jacodkowalksinikeghbali:2011} introduced a new type of convergence in probability theory, which they called `mod-Gaussian convergence'. For a given sequence $(Z_n)_n$ of real-valued random variables $Z_n$, this concept builds basically on working with the corresponding sequence of characteristic functions $(\mathbb{E}[e^{iuZ_n}])_n$. Leaning on this type of convergence, Kowalski \& Nikeghbali \cite{kowalskinikeghbali:2012} were able to show that $$\overline{V(\tfrac{1}{2})}=\mathbb{C}$$ would follow rather directly from a suitable version of the Keating-Snaith moment conjectures. By generalizing the notion of `mod-Gaussian convergence', Delbaen, Kowalski \& Nikeghbali \cite{delbaenkowalskinikeghbali:2011} obtained the following quantitative result. \begin{theorem}[Delbaen, Kowalski \& Nikeghbali, 2011] If for any $k>0$ there exist a real number $C_k\geq 0$ such that \begin{equation}\label{eq:momentconjecturedenseness} \left|\frac{1}{T}\int_0^T \exp\left(it\cdot \log\zeta(\tfrac{1}{2}+iu) \right)\mbox{\ d} u \right|\leq \frac{C_k}{1+|t|^4 (\log\log T)^2} \end{equation} holds for $T\geq 1$ and $t\in\mathbb{R}$ with $|t|\leq k$, then, for any bounded Jordan measurable subset $B\subset \mathbb{C}$, $$ \lim_{T\rightarrow\infty} \frac{\frac{1}{2}\log\log T}{T} \, {\rm{meas\ }} \left\{t\in (0,T] \, : \, \log\zeta(\tfrac{1}{2}+it)\in B \right\} = \tfrac{1}{2\pi} {\rm{meas\ }} B . $$ \end{theorem} Models for the zeta-function based on links to random matrix theory suggest that \eqref{eq:momentconjecturedenseness} should hold; see Keating \& Snaith \cite{keatingsnaith:2000-1, keatingsnaith:2000-2}. \begin{figure} \caption{For some $\sigma\in\mathbb{R} \label{fig:zeta32} \label{fig:zeta88} \label{fig:zeta78} \label{fig:zeta68} \label{fig:zeta58} \label{fig:zeta48} \label{fig:zeta38} \label{fig:zeta28} \label{fig:zeta18} \end{figure} \section{Qualitative difference of the value-distribution on the critical line}\label{sec:qualdiff} A possible denseness statement for the values of the Riemann zeta-function on the critical line is qualitatively different from the one on vertical lines in $\frac{1}{2}<\sigma\leq 1$.\par Garunk\v{s}tis \& Steuding \cite{garunkstissteuding:2010} showed that, due to the functional equation, a multidimensional denseness result in the sense of \eqref{eq:multdimVoronin} does not hold on the critical line. We can easily generalize their result to the class $\mathcal{G}$. \begin{theorem}\label{th:nondensenesscritlineG} Let $G\in\mathcal{G}$. Then, $$ \overline{\left\{ (G(\tfrac{1}{2}+it), G'(\tfrac{1}{2}+it)) \; : \; t\in[1,\infty) \right\}} \neq \widehat{\mathbb{C}}^2. $$ \end{theorem} \begin{proof} We follow the ideas of Garunk\v{s}tis \& Steuding \cite{garunkstissteuding:2010}. The basic ingredient in the proof is the observation that for real $t$ of sufficiently large modulus with $G(\frac{1}{2}+it)\neq 0$ and $G(\frac{1}{2}+it)\neq \infty$, \begin{equation}\label{eq:lindau} \left| \frac{G'(\frac{1}{2}+it)}{G(\frac{1}{2}+it)} \right| \geq \frac{d_G}{2}\log |t|-\frac{1}{2}\log (Q^2 \lambda) + O\left( \frac{1}{|t|}\right); \end{equation} see Lemma \ref{lem:connectionGG'}. We fix $(a,b)\in\mathbb{C}^2$ with $a,b\neq 0$ and set, for a given $0<\varepsilon<\min\{|a|,|b|\}$, $$ D_{\varepsilon}(a,b) := \left\{ (s_1,s_2)\in\mathbb{C}^2 \, : \, |s_1-a|<\varepsilon \, \mbox{ and } |s_2-b|<\varepsilon \right\} $$ Assume that $$ W:=\overline{\left\{ (G(\tfrac{1}{2}+it), G'(\tfrac{1}{2}+it)) \; : \; t\in[1,\infty) \right\}} = \widehat{\mathbb{C}}^2. $$ Then, in particular, $D_{\varepsilon}(a,b) \subset W$. Due to \eqref{eq:lindau}, we can fix $t_0>1$ such that $$ \left| \frac{G'(\frac{1}{2}+it)}{G(\frac{1}{2}+it)} \right| > \frac{|b|+\varepsilon}{|a|-\varepsilon} $$ for all $t\geq t_0$ with $G(\frac{1}{2}+it)\neq 0$ and $G(\frac{1}{2}+it)\neq \infty$. By the choice of $t_0$, it follows that $$ \overline{\left\{ (G(\tfrac{1}{2}+it), G'(\tfrac{1}{2}+it)) \; : \; t\in[t_0,\infty) \right\}} \subset \widehat{\mathbb{C}}\setminus D_{\varepsilon}(a,b). $$ Consequently, we have $$ D_{\varepsilon}(a,b) \subset \overline{\left\{ (G(\tfrac{1}{2}+it), G'(\tfrac{1}{2}+it)) \; : \; t\in[1,t_0] \right\}}=:W^*. $$ This, however, yields a contradiction. \end{proof} Moreover, according to Selberg's central limit law, the curve $t\mapsto \zeta(\frac{1}{2}+it)$ has a preference to visit arbitrarily small neighbourhoods of zero than the ones of any other complex value. \begin{theorem}\label{th:limitlawoncritline} Let $a\in\mathbb{C}$ and $0<\varepsilon<|a|$. Then, $$ \lim_{T\rightarrow \infty}\frac{1}{T} {\rm{meas\ }} \left\{t\in (0,T]: \left| \zeta(\tfrac{1}{2}+it) -a \right|<\varepsilon \right\} = \left\{\mbox{ \begin{tabular}{ll} $0$ & if $a\neq 0$, \\ $\frac{1}{2}$ & if $a=0$. \end{tabular}}\right. $$ \end{theorem} Comparing this with \eqref{eq:denseBohr}, we see once more that the value-distribution on the critical line is qualitatively different. \begin{proof} According to Theorem \ref{th:measselberglimitlaw} (b), Selberg's central limit law implies that, for arbitrary $\varepsilon>0$, as $T\rightarrow\infty$, $$ \frac{1}{T} {\rm{meas\ }} \left\{t\in(0,T]\, : \, |\zeta({\textstyle\frac{1}{2}+it})|< \varepsilon \right\} = \tfrac{1}{2} + o(1). $$ This proves the case $a=0$. Moreover, according to Theorem \ref{th:measselberglimitlaw} (b), Selberg's central limit law yields that, for arbitrary $a\in\mathbb{C}$, $a\neq 0$ and any $0<\varepsilon^*<|a|$, $$ \frac{1}{T} {\rm{meas\ }} \left\{t\in(0,T]\, : \, |a|-\varepsilon^*<|\zeta({\textstyle\frac{1}{2}+it})|\leq |a|+\varepsilon^* \right\}, = o(1), $$ as $T\rightarrow\infty$. Due to the observation that, for any $0<\varepsilon<\varepsilon^*$ and any $T>0$, \begin{align*} 0 \; \leq \; & \frac{1}{T} {\rm{meas\ }} \left\{t\in(0,T] : |\zeta({\textstyle\frac{1}{2}+it})-a|< \varepsilon \right\} \\ & \qquad \qquad \leq \frac{1}{T} {\rm{meas\ }} \left\{t\in(0,T] : |a|-\varepsilon^* <|\zeta({\tfrac{1}{2}+it})|\leq |a|+\varepsilon^* \right\} , \end{align*} the statement for $a\neq 0$ follows. \end{proof} \section{Approaching zero and infinity from different directions}\label{sec:appr} Although the curve $t\mapsto \zeta(\tfrac{1}{2}+it)$ has a preference to be either close to zero or to infinity, it is neither known whether zero is an interior point of the set $$ \overline{V(\tfrac{1}{2})}=\overline{\left\{ \zeta(\tfrac{1}{2}+it) \, : \, t\in[1,\infty) \right\}} \subset\mathbb{C} $$ nor whether zero is an interior point of the set $$ \overline{\left\{ \zeta(\tfrac{1}{2}+it)^{-1} \, : \, t\in[1,\infty) \right\}}\subset\widehat{\mathbb{C}}. $$ Relying on Theorem \ref{th:largesmall}, resp. Corollary \ref{cor:selbergsmalllarge}, we prove that we can approximate zero and infinity with non-zero values in $V(\frac{1}{2})$ from quite many directions. \begin{theorem}\label{th:zeroasintpoint} Let $\mathcal{L}\in\mathcal{S}^*$. Then, there exist real numbers $\theta_0,\theta_{\infty}\in[0,2\pi)$ with the following properties. \begin{itemize} \item[(a)] For every $\theta\in(\theta_0-\frac{\pi}{8},\theta_0+\frac{\pi}{8})$, there exist a sequence $(t_k)_k$ with $t_k\subset (1,\infty]$ and $\lim_{k\rightarrow\infty} t_k = \infty$ such that $$ \mathcal{L}(\tfrac{1}{2}+it_k) \in e^{i\theta}\mathbb{R}^+ := \left\{re^{i\theta}\, :\, r\in\mathbb{R}^+ \right\} $$ for $k\in\mathbb{N}$ and $$ \lim_{k\rightarrow\infty} \mathcal{L}(\tfrac{1}{2}+it_k) = 0. $$ \item[(b)] For every $\theta\in(\theta_{\infty}-\frac{\pi}{8},\theta_{\infty}+\frac{\pi}{8})$, there exist a sequence $(t_k)_k$ with $t_k\subset (1,\infty]$ and $\lim_{k\rightarrow\infty} t_k = \infty$ such that $$ \mathcal{L}(\tfrac{1}{2}+it_k) \in e^{i\theta}\mathbb{R}^+ $$ for $k\in\mathbb{N}$ and $$ \lim_{k\rightarrow\infty} \mathcal{L}(\tfrac{1}{2}+it_k) = \infty. $$ \end{itemize} \end{theorem} \begin{proof} We only prove statement (a). Statement (b) can be proved by essentially the same method.\par For $\mathcal{L}\in\mathcal{S}^{*}$, there is an analogue of Hardy's $Z$-function at our disposal which allows us to write for sufficiently large real $t$, say $t\geq t_0$, $$ \mathcal{L}(\tfrac{1}{2}+it) = Z_{\mathcal{L}}(t)\mathbb{D}elta_{\mathcal{L}}(\tfrac{1}{2}+it)^{1/2}. $$ For the definition and basic properties of $Z_{\mathcal{L}}(t)$ the reader is referred to Section \ref{sec:classG}. Recall here, in particular, that for $t\geq t_0$, $$ Z_{\mathcal{L}}(t)\in\mathbb{R} \qquad \mbox{ and }\qquad \mathbb{D}elta_{\mathcal{L}}(\tfrac{1}{2}+it)^{1/2} \in\partial\mathbb{D}. $$ We fix an arbitrary $0<q<\frac{1}{2}$ and set $\kappa_{\mathcal{L}}:=\frac{\pi}{2d_{\mathcal{L}}}$. Then, according to Theorem \ref{th:largesmall} (b), resp. Corollary \ref{cor:selbergsmalllarge} (b), we find a sequence $(\tau_k)_k$ with $\tau_k\in[t_0,\infty)$ and $\lim_{k\rightarrow\infty}\tau_k = \infty$ such that the functions \begin{align*} \mathcal{L}_{\tau_k}(y)&:=\mathcal{L}\left(\tfrac{1}{2} + i \frac{\kappa_{\mathcal{L}}}{\log \tau _k}\, y +i\tau_k \right)\\ &= Z_{\mathcal{L}}\left(\frac{\kappa_{\mathcal{L}}}{\log \tau _k}\, y + \tau_k \right)\mathbb{D}elta_{\mathcal{L}}\left(\tfrac{1}{2}+i\frac{\kappa_{\mathcal{L}}}{\log \tau _k}\, y + i\tau_k\right)^{1/2} \end{align*} do not vanish on the interval $[-q,q]$ for $k\in\mathbb{N}$ and converge uniformly on $[-q,q]$ to zero, as $k\rightarrow\infty$.\par As the functions $\mathcal{L}_{\tau_{k}}(y)$, $k\in\mathbb{N}$, are real and non-vanishing on the interval $[-q,q]$, we find, for every $k\in\mathbb{N}$, an integer $\eta_k\in\{-1,+1\}$ such that $$ f_k(y):= \eta_k \cdot Z_{\mathcal{L}}\left(\frac{\kappa_{\mathcal{L}}}{\log \tau _k}\,y + \tau_{k} \right) >0 \qquad \mbox{ for } y\in\left[-q,q\right] . $$ The factors $\eta_k$ assure that $f_k(y)=\left| \mathcal{L}_{\tau_k}(y) \right|$ for $k\in\mathbb{N}$ and $y\in[-q,q]$. Further, we observe that, uniformly for $y\in[-q,q]$, \begin{equation}\label{kw1} f_k(y) \rightarrow 0, \qquad \mbox{as }k\rightarrow\infty. \end{equation} Now, we set $$ h_k(y):= \eta_k^{-1} \cdot \mathbb{D}elta_{\mathcal{L}}\left(\tfrac{1}{2}+i\frac{\kappa_{\mathcal{L}}}{\log \tau _k}\, y + i\tau_k\right)^{1/2} . $$ Observe that $\left|h_k(y)\right|=1$ and that \begin{equation}\label{Zhf} f_k(y)\cdot h_k(y)=\mathcal{L}_{\tau_k}(y) \end{equation} for $k\in\mathbb{N}$ and $y\in[-q,q]$. Since $h_k(0)\in\partial\mathbb{D}$, it follows from the compactness of $\partial\mathbb{D}$ that the set $\{h_k(0)\}_k$ has at least one accumulation point $e^{i\theta_0}\in\partial\mathbb{D}$ with $\theta_0\in [0,2\pi)$. Without loss of generality, we may suppose that $$ \lim_{k\rightarrow\infty} h_{k}(0) = e^{i\theta_0}. $$ Otherwise, we work with a suitable subsequence of $(h_k(0))_k$. For $k\in\mathbb{N}$, we define $\theta_{k}\in[0,2\pi)$ such that $ e^{i\theta_{k}} = h_{k}(0)$. Then, according to Lemma \ref{lem:Delta_p_phi}, we have, uniformly for $y\in[-q,q]$, \begin{align*} h_{k}(y) &= h_k(0)\cdot \exp\left(- i\tfrac{1}{2}d_{\mathcal{L}}\kappa_{\mathcal{L}} y \right) \left(1 + O\left(\frac{1}{\log \tau_{k}} \right) \right)\\ &= \exp\left(i\theta_k - i\tfrac{\pi}{4} y \right) \left(1 + O\left(\frac{1}{\log \tau_{k}} \right) \right). \end{align*} as $k\rightarrow\infty$. Thus, for any $\theta\in (\theta_0-\frac{q\pi}{4}, \theta_0+\frac{q\pi}{4})$, we find for sufficiently large $k \in\mathbb{N}$, a real number $y_{k}\in [-q,q]$ with \begin{equation}\label{kw2} h_{k}(y_{k})= \exp(i\theta). \end{equation} Now, we set $$t_k := \tau_{k} + \frac{\kappa_{\mathcal{L}}}{\log \tau_k}\,y_k.$$ Then, according to \eqref{kw1}, \eqref{Zhf} and \eqref{kw2}, we have $\mathcal{L}(\tfrac{1}{2}+it_k ) \in e^{i\theta}\mathbb{R}^+$ for all sufficiently large $k\in\mathbb{N}$ and $\lim_{k\rightarrow\infty} \mathcal{L}(\tfrac{1}{2}+it_k) = 0$. As we can choose arbitrary $0<q<\frac{1}{2}$, the assertion is proved. \end{proof} For the Riemann zeta-function, there are results of Kalpokas, Korolev \& Steuding \cite{kalpokaskorolevsteuding:2013} at our disposal which exceed the statement of Theorem \ref{th:zeroasintpoint} (b) by far. Kalpokas \& Steuding \cite{kalpokassteuding:2011} investigated intersection points of the curve $\mathbb{R} \ni t\mapsto \zeta(\frac{1}{2}+it)$ with straight lines $e^{i\theta}\mathbb{R} := \{re^{i\theta}\, : \, r\in\mathbb{R}\}$ through the origin. In particular, they observed that $$ \zeta(\tfrac{1}{2}+it)\in e^{i\theta}\mathbb{R} \qquad \mathcal{L}ongleftrightarrow \qquad \zeta(\tfrac{1}{2}+it) = 0 \quad \mbox{or}\quad \mathbb{D}elta_{\zeta}(\tfrac{1}{2}+it) = e^{i2\phi}. $$ Their works were extended by Christ \& Kalpokas \cite{christkalpokas:2012, christkalpokas:2013} and Kalpokas, Korolev \& Steuding \cite{kalpokaskorolevsteuding:2013}. The latter showed that, for every $\theta\in[0,2\pi)$, there is a sequence $(t_k)_k$ with $t_k\in\mathbb{R}$ and $\lim_{k\rightarrow\infty} t_k = \infty$ such that, for all $k\in\mathbb{N}$, $$ \zeta(\tfrac{1}{2}+it_k) \in e^{i\theta}\mathbb{R}^+ \qquad \mbox{and}\quad |\zeta(\tfrac{1}{2}+it_k)|\geq C (\log t_k)^{5/4} $$ with some positive constant $C$. Thus, roughly speaking, the values of the Riemann zeta-function on the critical line expand in every direction. \par Korolev showed in a talk at the ELAZ conference 2012 at Schloss Schney that there exists a sequence $(t_k)_k$ with $t_k\in \mathbb{R}$ and $\lim_{k\rightarrow\infty} t_k = \infty$ such that, for every $k\in\mathbb{N}$, $$ \zeta(\tfrac{1}{2}+it_k) \in\mathbb{R}\setminus\{0\} \qquad \mbox{and} \qquad \lim_{k\rightarrow\infty} \zeta(\tfrac{1}{2}+it_k) = 0. $$ Korolev's result is not published yet. \section{Denseness results on curves approaching the critical line}\label{sec:curves} In the following, let $\varepsilonilon:[1,\infty)\rightarrow\mathbb{R}$ be a function with $\lim_{t\rightarrow\infty} \varepsilonilon(t) = 0$. For a humble attack on Ramachandra's conjecture, we investigate the value-distribution of the Riemann zeta-function on curves $t\mapsto \frac{1}{2}+\varepsilonilon(t)+it$ which approach the critical line asymptotically as $t\rightarrow\infty$.\par If we could establish a denseness statement for the zeta-values on curves approaching the critical line sufficiently fast, then the denseness of the zeta-values on the critical line would follow; see the subsequent theorem. Recall the definition of the function $\theta_{\mathcal{L}}(\sigma)$ from Section \ref{sec:orderofgrowth}, which indicates the order of growth of a given function $\mathcal{L}\in\mathcal{S}^{\#}$. \begin{theorem}\label{th:curvesmotivation} Let $\mathcal{L}\in\mathcal{S}^{\#}$ and $n\in\mathbb{N}_0$. Let $\varepsilonilon:[1,\infty) \rightarrow \mathbb{R}$ be a function such that $\varepsilonilon(t)\ll t^{-\theta_{\mathcal{L}}(\frac{1}{2})-\mbox{\ d}elta}$, as $t\rightarrow\infty$, with some $\mbox{\ d}elta>0$. Then, $$ \overline{\left\{ \mathbb{D}elta_n \mathcal{L}(\tfrac{1}{2}+it) \, : \, t\in[1,\infty) \right\}} = \mathbb{C}^{n+1} $$ if and only if $$ \overline{\left\{ \mathbb{D}elta_n \mathcal{L}(\tfrac{1}{2}+\varepsilonilon(t)+it) \, : \, t\in[1,\infty) \right\}} = \mathbb{C}^{n+1}. $$ \end{theorem} \begin{proof} Let $a\in\mathbb{C}^{n+1}$ and $\varepsilon>0$. Let $\|\cdot\|$ denote the maximum-norm in the complex vector space $\mathbb{C}^{n+1}$. Then, according to Lemma \ref{lem:growth}, there exist a constant $t_0>1$ such that $$ \bigl\|\mathbb{D}elta_n \mathcal{L}(\tfrac{1}{2}+\varepsilonilon(t)+it)-\mathbb{D}elta_n \mathcal{L}(\tfrac{1}{2}+it)\bigr\| < \varepsilon/2 $$ for $t\geq t_0$. Assume that $\overline{\left\{ \mathbb{D}elta_n \mathcal{L}(\tfrac{1}{2}+\varepsilonilon(t)+it) \, : \, t\in[1,\infty) \right\}} = \mathbb{C}^{n+1}$. Then, we find a $t^*\geq t_0$ such that $$ \bigl\|\mathbb{D}elta_n\mathcal{L}(\tfrac{1}{2}+\varepsilonilon(t^*)+it^*)-a\bigr\|<\varepsilon/2. $$ By means of the triangle inequality, we obtain that $$ \bigl\|\mathbb{D}elta_n \mathcal{L}(\tfrac{1}{2}+it^*)-a\bigl\|<\varepsilon. $$ As we can choose $a\in\mathbb{C}^{n+1}$ and $\varepsilon>0$ arbitrarily in the argumentation above, we deduce that $$ \overline{\left\{ \mathbb{D}elta_n \mathcal{L}(\tfrac{1}{2}+it) \, : \, t\in[1,\infty) \right\}} = \mathbb{C}^{n+1}. $$ The other implication of the theorem can be proved by the same argument. \end{proof} {\bf A negative denseness results on curves approaching the critical.} The property of the Riemann zeta-function that a multidimensional denseness result does not hold on the critical line carries over to certain curves approaching the critical line. \begin{corollary}\label{cor:nondensenesscurves} Let $\mathcal{L}\in\mathcal{S}^{\#}$ and $n\in\mathbb{N}$. Let $\varepsilonilon:[1,\infty) \rightarrow \mathbb{R}$ be a function such that $\varepsilonilon(t)\ll t^{-\theta_{\mathcal{L}}(\frac{1}{2}) - \mbox{\ d}elta}$, as $t\rightarrow\infty$, with some $\mbox{\ d}elta>0$. Then, $$ \overline{\left\{ \mathbb{D}elta_n \mathcal{L}(\tfrac{1}{2}+\varepsilonilon(t)+it) \, : \, t\in[1,\infty) \right\}} \neq \mathbb{C}^{n+1}. $$ \end{corollary} \begin{proof} Recalling that $\mathcal{S}^{\#}\subset \mathcal{G}$ and noticing that $n\neq 0$, the corollary follows directly by combining Theorem \ref{th:nondensenesscritlineG} with Theorem \ref{th:curvesmotivation}. \end{proof} {\bf Denseness results on curves approaching the critical line.} \begin{figure} \caption{The construction of the curve $t\mapsto \frac{1} \label{fig:densecurve} \end{figure} It seems difficult to obtain a denseness result on curves $t\mapsto \frac{1}{2}+\varepsilonilon(t)+it$ by adapting Bohr's method. This is due to the fact that, according to a result of Laurin\v{c}ikas \cite{laurincikas:1992}, there is a quantitative swap in the asymptotic behaviour of the mean-square near the critical line. \begin{theorem}[Laurin\v{c}ikas, 1991] Let $\mu:[2,\infty)\rightarrow \mathbb{R}$ be a non-negative, (not necessarily strictly) monotonically increasing or decreasing function satisfying $\lim_{T\rightarrow\infty}\frac{\mu(T)}{\log T} = 0$. \begin{itemize} \item[(a)] If $\lim_{T\rightarrow\infty} \mu(T) =\infty$, then, as $T\rightarrow\infty$, $$ \frac{1}{T}\int_2^{T} \left|\zeta\left(\tfrac{1}{2}+\frac{\mu(T)}{\log T}+it\right) \right|^2 \mbox{\ d} t \sim \frac{1}{2\mu(T)} \log T. $$ \item[(b)] If $\lim_{T\rightarrow\infty} \mu(T) =c$ with some $c>0$, then, as $T\rightarrow\infty$, $$ \frac{1}{T}\int_2^{T} \left|\zeta\left(\tfrac{1}{2}+\frac{\mu(T)}{\log T}+it\right) \right|^2 \mbox{\ d} t \sim \frac{1}{2c}\left(1-e^{-2c} \right)\log T. $$ \item[(c)] If $\lim_{T\rightarrow\infty} \mu(T) =0$, then, as $T\rightarrow\infty$, $$ \frac{1}{T}\int_2^{T} \left|\zeta\left(\tfrac{1}{2}+\frac{\mu(T)}{\log T}+it\right) \right|^2 \mbox{\ d} t \sim \log T. $$ \end{itemize} \end{theorem} For a proof we refer to Laurin\v{c}ikas \cite[Theorems 1, 2 and 3]{laurincikas:1992}.\par Nevertheless, we can use the $a$-point results from Chapter \ref{chapt:apoints} to deduce the existence of certain curves on which the values of the zeta-function lie dense in $\mathbb{C}$. \begin{theorem}\label{th:densenessavalues} There exists a continuous function $\varepsilonilon:[2,\infty)\rightarrow \mathbb{R}$ with $\lim_{t\rightarrow\infty}\varepsilonilon(t)=0$ such that $$ \overline{\left\{ \zeta(\tfrac{1}{2}+\varepsilonilon(t)+it) \, : \, t\in[2,\infty) \right\}}=\mathbb{C}. $$ Here we can demand additionally that, \begin{itemize} \item[(a)] unconditionally, $$ |\varepsilonilon(t)|\leq \frac{\mu(t)}{\log t} $$ with any positive function $\mu$ satisfying $\lim_{t\rightarrow\infty}\mu(t)=\infty$. \item[(b)] by assuming the Riemann hypothesis, $$ |\varepsilonilon(t)| \leq \frac{\mu(t)(\log\log\log t)^3}{\log t \sqrt{\log\log t}} $$ with any positive function $\mu$ satisfying $\lim_{t\rightarrow\infty}\mu(t)=\infty$. \item[(c)] by assuming the generalized Riemann hypothesis for Dirichlet $L$-functions,\footnote{Very likely this can be proved by only assuming the Riemann hypothesis; see the remarks in Section \ref{sec:filling_zeta}.} $$ |\varepsilonilon(t)| \leq \frac{1}{(\log t)^{\mbox{\ d}elta}} $$ with any $\mbox{\ d}elta>0$. \item[(d)] by assuming the generalized Riemann hypothesis for Dirichlet $L$-functions, $$ |\varepsilonilon(t)| \leq \mu(t)\exp \left(-c_0 \frac{\log t}{\log\log t} \right) $$ with any positive function $\mu$ satisfying $\lim_{t\rightarrow\infty}\mu(t)=\infty$ and any constant $0<c_0 < \frac{1}{\sqrt{2}}$. \end{itemize} \end{theorem} \begin{proof} Let $(q_k)_k$ be an enumeration of $\mathbb{Q}+ i\mathbb{Q}$. According to Corollary \ref{cor:summaryapoints} (c), we find for every $q_k$, with at most one exception, infinitely many roots of the equation $\zeta(s)=q_k$ inside the strip $S$ defined by $$ \tfrac{1}{2} - \frac{\mu(t)}{\log t} <\sigma < \tfrac{1}{2} + \frac{\mu(t)}{\log t}, \qquad t\geq 2, $$ where $\mu$ is an arbitrarily positive function satisfying $\lim_{t\rightarrow\infty}\mu(t)$. Without loss of generality, we may suppose that the enumeration $(q_k)_k$ is arranged such that $q_1$ is the possibly existing value which is not assumed by the zeta-function in $S$. This agreement as well as the observation above assure that we find points $s_k =\sigma_k + it_k \in S$ such that $\zeta(s_k) = q_k$ and $t_k<t_{k+1}$ for every $k\in\mathbb{N}\setminus\{1\}$. By connecting the point $s_k$ with its successor $s_{k+1}$, respectively, by a straight line segment, we get a continuous function $\varepsilonilon:[2,\infty)\rightarrow\mathbb{R}^+$ such that $|\varepsilonilon(t)|\leq \frac{\mu(t)}{\log t}$ for all $t\in[2,\infty)$ and $\frac{1}{2}+\varepsilonilon(t_k) + it_k = s_k$ for all $k\in\mathbb{N}$. Theorem \ref{th:densenessavalues} (a) follows then immediately as $\mathbb{Q}+ i\mathbb{Q}$ (minus the possibly existent exceptional value $q_1$) is dense in $\mathbb{C}$.\par The statements (b), (c) and (d) follow from Corollary \ref{cor:summaryapoints} (d), (e) and (f) in an analogous manner. \end{proof} The curves of Theorem \ref{th:densenessavalues} can oscillate from the left to the right of the critical line. It would be nice to have a similar statement for curves which stay always either to the left or to the right of the critical line.\par {\bf Curves approaching the critical line from the left.} From Selberg's conditional result concerning $a$-points to the left of the critical line, we deduce the following denseness statement. \begin{theorem} Assume that the Riemann hypothesis is true. Then, for arbitrary $0<c_1<c_2$, there exists a continuous function $\varepsilonilon:[1,\infty)\rightarrow \mathbb{R}$ with $$ \frac{c_1 \sqrt{\log\log t}}{\log \log t} \leq \varepsilonilon(t) \leq \frac{c_2 \sqrt{\log\log t}}{\log \log t} $$ such that $$ \overline{\left\{ \zeta(\tfrac{1}{2}-\varepsilonilon(t)+it) \, : \, t\in[1,\infty) \right\}}=\mathbb{C}. $$ \end{theorem} \begin{proof} Relying on Selberg's result \eqref{eq:selberg_aleft}, the proof follows along the lines of the proof of Theorem \ref{th:densenessavalues}. \end{proof} {\bf Curves approaching the critical line from the right.} Relying on Bohr's, resp. Voronin's denseness result, it is possible to prove the following theorem. \begin{theorem}[Christ \cite{christ:2012}, 2012]\label{th:enumerationbohr} For every $n\in\mathbb{N}_0$, there is a positive, piecewise-constant function $\varepsilonilon:[1,\infty) \rightarrow \mathbb{R}^+$ with $\lim_{t\rightarrow \infty} \varepsilonilon(t)=0$ such that $$\textstyle \overline{\left\{\mathbb{D}elta_n \zeta\left(\frac{1}{2}+\varepsilonilon(t)+it\right) \, :\,t\in[1,\infty) \right\}} = \mathbb{C}^{n+1}. $$ \end{theorem} \begin{proof} To prove Theorem \ref{th:enumerationbohr} we use Voronin's denseness result in combination with a certain enumeration method. Let $(\varepsilon_k)_{k}$ and $(\sigma_k)_{k}$ be sequences with $\varepsilon_k>0$ and $\sigma_k\in(0,\frac{1}{2})$ for $k\in\mathbb{N}$ that tend to zero as $k\rightarrow\infty$. Furthermore, for a given $n\in\mathbb{N}_0$, we define compact sets $$ A_k := \{z\in\mathbb{C}^{n+1} \,: \, \|z\|\leq k\},\qquad k\in\mathbb{N}. $$ The sets $A_k$ form a countable covering of the $(n+1)$-dimensional complex plane, i.e. $$ \bigcup_{k\in \mathbb{N}} A_k = \mathbb{C}^{n+1}. $$ We set $T_0=1$ and choose, inductively for $k\in\mathbb{N}$, a positive real number $T_k>T_{k-1}+1$ such that for all $a\in A_k$ there is a $\tau \in (T_{k-1}, T_k]$ with $$\textstyle \left\|\mathbb{D}elta_n \zeta\left(\frac{1}{2}+\sigma_k + i\tau\right)-a\right\|<\varepsilon_k. $$ The existence of such a number $T_k$ is assured by Voronin's denseness result applied to the vertical line $\sigma = \frac{1}{2}+\sigma_k$ and basic properties of compact sets. The piecewise-constant function $\varepsilonilon:[1,\infty)\rightarrow \mathbb{R}^+$ defined by $$ \varepsilonilon(t)=\sigma_k \qquad \mbox{for }t\in(T_{k-1},T_k] \mbox{ with }k\in\mathbb{N}, $$ satisfies $\lim_{t\rightarrow\infty}\varepsilonilon(t)=0$. The construction of the function $\varepsilonilon$ yields that the curve $t\mapsto \frac{1}{2}+\varepsilonilon(t)+it$ has the desired property. \par \end{proof} As we are very flexible in choosing the zero-sequences $(\varepsilon_k)_{k}$, $(\sigma_k)_{k}$ and a proper countable covering of the complex plane $(A_k)_{k\in\mathbb{N}}$, the proof of Theorem \ref{th:enumerationbohr} yields the existence of uncountable many curves with the desired property.\par By adjusting the sets $A_k$ in a proper way, we can use a quantitative version of Voronin's denseness result (see Karatsuba \& Voronin \cite[Chapt. VIII]{karatsubavoronin:1992}) to get very rough estimates on how `fast' these curves approach the critical line.\par With respect to Theorem \ref{cor:nondensenesscurves}, it would be very nice to characterize the changeover of $\{\mathbb{D}elta_2\zeta(\frac{1}{2}+\varepsilonilon(t)+it)\, : \, t\in[1,\infty)\}$ from denseness to non-denseness in terms of the speed of convergence of $\varepsilonilon(t)\rightarrow 0$, as $t\rightarrow\infty$. \par {\bf Small and large values on curves approaching the critical line.} \begin{figure} \caption{The values $|\zeta(\sigma+it)|$ and $|\zeta(\sigma+it)|^{-1} \label{fig:landscape1} \label{fig:landscape2} \end{figure} From Corollary \ref{cor:selbergsmalllarge}, which we obtained by relying on Selberg's central limit law, we can deduce information on small and large values of the zeta-function on certain curves $t\mapsto \frac{1}{2}+\varepsilonilon(t)+it$. \begin{corollary}\label{cor:curvessmalllarge} Let $c>0$ and $\varepsilonilon:[1,\infty)\rightarrow\mathbb{R}$ be a function satisfying $|\varepsilonilon(t)|\leq \frac{c}{ \log t}$ for $t\in[1,\infty)$. Then, $$ \liminf_{t\rightarrow\infty }|\zeta(\tfrac{1}{2}+\varepsilonilon(t)+it)| = 0 \qquad \mbox{ and }\qquad \limsup_{t\rightarrow\infty }|\zeta(\tfrac{1}{2}+\varepsilonilon(t)+it)| = \infty. $$ \end{corollary} \begin{proof} The corollary follows directly from Corollary \ref{cor:selbergsmalllarge} by working with the conformal map $\varphi_{\tau}(z)=\frac{1}{2}+\frac{\kappa_{\zeta}/4}{\log \tau} z +i\tau$ and the rectangular domain $\mathcal{R}(\frac{4c}{\kappa_{\zeta}},1)$. \end{proof} Roughly speaking, Corollary \ref{cor:curvessmalllarge} states that the zeta-function assumes both arbitrarily small and arbitrarily large values on every path to infinity which lies inside the strip $$ \tfrac{1}{2}-\frac{c}{\log t}<\sigma < \tfrac{1}{2}+\frac{c}{ \log t}, \qquad t\geq 1, $$ with arbitrary $c>0$.\par \section{Denseness results on curves approaching the line \texorpdfstring{$\sigma=1$}{}}\label{sec:densesigma1} By a slight refinement of Bohr's method, we can show that the values of the zeta-function taken on any curve $[1,\infty)\ni t\mapsto 1+ \varepsilonilon(t)+it$ with $\lim_{t\rightarrow\infty}\varepsilonilon(t)=0$ are dense in $\mathbb{C}$. \begin{theorem}[Christ \cite{christ:2012}, 2012]\label{th1} Let $\varepsilonilon:[1,\infty)\rightarrow\mathbb{R}$ be a function with $\lim_{t\rightarrow\infty}\varepsilonilon(t)=0$. Then, for every $a\in\mathbb{C}$ and $\varepsilon>0$, $$ \liminf_{T\rightarrow \infty}\frac{1}{T} {\rm{meas\ }} \left\{t\in (0,T]: \left| \zeta(1+\varepsilonilon(t)+it) -a \right|<\varepsilon \right\} >0. $$ \end{theorem} We choose the function $\varepsilonilon$ in Theorem \ref{th1} such that the curve $t\mapsto 1+\varepsilonilon(t)+it$ lies completely in the half-plane $\sigma >1$. In this case, Theorem \ref{th1} cannot be deduced from Voronin's universality theorem. Moreover, in Theorem \ref{th1}, there are no restrictions on how fast $\varepsilonilon(t)$ tends to zero as $t\rightarrow \infty$.\par \begin{proof} To prove Theorem \ref{th1}, we rely on the methods of Bohr \& Courant \cite{bohrcourant:1914} and Bohr \& Jessen \cite{bohrjessen:1932} who proved a corresponding result for vertical lines inside the strip $\frac{1}{2}<\sigma \leq 1$. We will refine their methods by adding a certain continuity argument. As Bohr and his collaborators did, we will prove the result not for $\zeta(s)$, but for $\log \zeta(s)$. The result for $\zeta(s)$ is then an immediate consequence therefrom. We define $\log\zeta(s)$ for $\sigma>\frac{1}{2}$ in a standard way; for details we refer to Steuding \cite[Chapt. 1.2]{steuding:2007}\par Let $\varepsilon>0$ and $a\in\mathbb{C}$. Let $(p_n)_{n}$ be an enumeration of all prime numbers in ascending order. For a positive integer $N$, we define the truncated Euler product $$ \zeta_N(s)=\prod_{n=1}^{N} (1-p_n^{-s})^{-1}. $$ Note that $\zeta_N(s)$ defines an analytic and non-vanishing function in the half-plane $\sigma>0$. Bohr showed that there is positive real number $d$ and a positive integer $N_1$ such that, for all $N\geq N_1$, we find a subset $\mathcal{I}(N)\subset(0,\infty)$ of lower density $$ \liminf_{T\rightarrow\infty} \frac{1}{T} {\rm{meas\ }} \left(\mathcal{I}(N)\cap (0,T]\right) > d $$ with the property that $$ |\log \zeta_N(1+it) - a | < \varepsilon/3 \qquad \mbox{ for }t\in\mathcal{I}(N). $$ Furthermore, Bohr proved that, for any $\varepsilon'>0$ and any $\frac{1}{2}<\sigma_0 < 1$, there is a positive real numbers $N_2$ such that, for every $N\geq N_2$ and every $\sigma\geq \sigma_0$, $$ \lim_{T\rightarrow\infty} \frac{1}{T}\int_1^{T} \left| \frac{\zeta(\sigma+it)}{\zeta_N(\sigma+it)}-1\right| \mbox{\ d} t < \varepsilon'. $$ The bounded convergence theorem assures that \begin{equation*} \lim_{T\rightarrow\infty} \int_{1}^T \int_{\sigma_0}^{2} \left|\frac{\zeta(\sigma+it)}{\zeta_N(\sigma+it)}-1 \right|^2 \mbox{\ d} \sigma \mbox{\ d} t < 2\varepsilon' \end{equation*} By carefully adopting Bohr's reasoning, we can deduce from the estimate above that there exists a positive integer $N_3$ such that, for all $N\geq N_3$, we find a subset $\mathcal{J}(N)\subset(0,\infty)$ of lower density $$ \liminf_{T\rightarrow\infty}\frac{1}{T} {\rm{meas\ }} \left( \mathcal{J}(N)\cap (0,T] \right)> 1-d $$ with the property that $$ |\log \zeta_N(1+\varepsilonilon(t)+it)- \log\zeta(1+\varepsilonilon(t)+it)|<\varepsilon/3 \qquad\mbox{for } t\in\mathcal{J}(N). $$ Now, we apply a certain continuity argument. We fix $N_0\geq \max\{N_1,N_3\}$ and choose a sufficiently small $\mbox{\ d}elta<\log (1- \frac{\varepsilon}{6N_0})^{-1}/(N_0\log 2)$. As $\lim_{t\rightarrow\infty}\varepsilonilon(t)=0$, we find a positive number $T_0$ such that $\varepsilonilon(t)<\mbox{\ d}elta$ for $t\geq T_0$. Then, for $t\geq T_0$, $$ \left| \log \zeta_{N_0} (1+it) - \log \zeta_{N_0}(1+\varepsilonilon(t)+it) \right|=\left| \sum_{n=1}^{N_0 }\log\left(1+\frac{1-p_n^{-\varepsilonilon(t)}}{p_n^{1+it}-1} \right)\right| $$ $$ \leq \sum_{n=1}^{N_0 } 2\left|\frac{1-p_n^{-\varepsilonilon(t)}}{p_n^{1+it}-1} \right| \leq 2 N_0 (1-p_{N_0}^{-\varepsilonilon(t)}) <2 N_0 (1-2^{-N_0\mbox{\ d}elta })<\varepsilon/3. $$ Here, we used Betrand's postulate which states that $p_{n}\leq 2^{n}$. Although the estimation via Bertrand's postulate is quite rough, it is completely sufficient for our purpose.\par Altogether, we can deduce that the set $\mathcal{M}:=\mathcal{I}(N_0)\cap \mathcal{J}(N_0)\cap (T_0,\infty)$ has positive lower density, i.e. $$ \liminf_{T\rightarrow\infty}\frac{1}{T} {\rm{meas\ }} \left(\mathcal{M}\cap (0,T]\right)> 0, $$ and enjoys the property that \begin{eqnarray*} |\log \zeta(1+\varepsilonilon(t)+it)-a|&\leq & |\log \zeta(1+\varepsilonilon(t)+it)-\log\zeta_{N_0}(1+\varepsilonilon(t)+it)|\\ & &+\, |\log \zeta_{N_0}(1+\varepsilonilon(t)+it)-\log \zeta_{N_0}(1+it)|\\ & &+\, |\log \zeta_{N_0}(1+it)-a|<\varepsilon\\ \end{eqnarray*} for $t\in\mathcal{M}$. The statement of the theorem follows. \end{proof} \section{A limiting process to the right of the critical line}\label{sec:universalityoncurves} In this final section of Part I, we briefly discuss what happens to the limiting process of Section \ref{sec:shiftingshrinking} if we adjust the underlying conformal mappings such that they map the unit disc to discs which lie completely inside the strip $\frac{1}{2}<\sigma<1$, but arbitrarily close to the critical line. By relying on Voronin's universality, we can prove the following statement. \begin{theorem}\label{th:univsliding} Let $0\leq\eta\leq\frac{1}{4}$ and let $(\varepsilonilon_k)_k$, $(\lambda_k)_k$ be sequences with $\varepsilonilon_k,\lambda_k\in\mathbb{R}$, $0<\lambda_k<\varepsilonilon_k<\frac{1}{4}$ for $k\in\mathbb{N}$ and $\lim_{k\rightarrow\infty}\varepsilonilon_k = \lim_{k\rightarrow\infty}\lambda_k = \eta$. Further, let $$ \zeta_{k,\tau}(z) :=\zeta(\tfrac{1}{2} +\varepsilonilon_k +\lambda_kz+i\tau)\qquad \mbox{for }\tau\geq 1,\, k\in\mathbb{N} \mbox{ and }z\in\mathbb{D}. $$ Then, there is a sequence $(\tau_k)_k$ with $\tau_k\in[1, \infty)$ and $\lim_{k\rightarrow\infty} \tau_k = \infty$ such that, for every continuous and non-vanishing function $g$ on $\overline{\mathbb{D}}$ which is analytic in $\mathbb{D}$, there is a subsequence of $(\zeta_{k,\tau_k})_k$ which converges uniformly on $\overline{\mathbb{D}}$ to $g$. \end{theorem} \begin{proof}[Sketch of the proof] Due to the theorem of Mergelyan, see for example \cite{rudin:1966}, it is sufficient to establish the assertion of Theorem \ref{th:univsliding} for polynomial target functions $g$ which have rational coefficients and do not vanish on $\overline{\mathbb{D}}$. The proof follows then directly from Voronin's universality theorem in combination with a similar enumeration method as the one that we used in the proof of Theorem \ref{th:enumerationbohr}. We refer to Christ \cite{christ:2012} for more details. \end{proof} \part{Discrete and continuous moments} $\mbox{ }$ In part II we aim at extending a result of to Tanaka \cite{tanaka:2008} who established a weak version of the Lindel\"of hypothesis for the Riemann zeta-function.\par Recall that, according to Hardy \& Littlewood \cite{hardylittlewood:1923}, the Lindel\"of hypothesis is equivalent to the statement that, for every $\sigma>\frac{1}{2}$ and $k\in\mathbb{N}$, \begin{equation}\label{Lind1} \lim_{T\rightarrow \infty}\frac{1}{T}\int_1^T \left|\zeta(\sigma+it) \right|^{2k} \mbox{\ d} t =\sum_{n=1}^{\infty}\frac{d_k(n)^2}{n^{2\sigma}}, \end{equation} where $d_k$ denotes the generalized divisor function appearing in the Dirichlet series expansion of $\zeta^{k}$. Tanaka showed that \eqref{Lind1} holds if one neglects a certain set $A\subset [1,\infty)$ of density zero from the path of integration.\par In the sequel, let $\pmb{1}_X$ denote the indicator function of a set $X\subset \mathbb{R}$ and $X^c := \mathbb{R}\setminus X$ its complement. So far, formula \eqref{Lind1} is proved only in the cases $k=1,2$, due to classical works of Hardy \& Littlewood \cite{hardylittlewood:1922} and Ingham \cite{ingham:1926}. This is sufficient to derive the following boundedness property of the Riemann zeta-function in $\sigma>\frac{1}{2}$: \textit{for every $\varepsilon>0$ and $\alpha>\frac{1}{2}$, there exist a constant $M_{\varepsilon}>0$ and a subset $B\subset [1,\infty)$ of upper density $$ \limsup_{T\rightarrow\infty} \frac{1}{T} \int_1^{T} \pmb{1}_B (t) \mbox{\ d} t < \varepsilon $$ such that \begin{equation} \label{e} \left|\zeta(\sigma+it) \right|\leq M_{\varepsilon} \qquad\mbox{for }\sigma\geq \alpha \mbox{ and } t\in B^c. \end{equation}}This follows from a standard method which we shall explain later on in the proof of Lemma \ref{th:suffconditionsN} (b).\par In particular, we deduce from \eqref{e} that, \textit{for every $\varepsilon>0$ and $\alpha>\frac{1}{2}$, there exist a constant $M_{\varepsilon}>0$ and a subset $B\subset [1,\infty)$ of upper density $$ \limsup_{T\rightarrow\infty} \frac{1}{T} \int_1^{T} \pmb{1}_B (t) \mbox{\ d} t < \varepsilon $$ such that, for every $\sigma\geq \alpha$ and $k\in\mathbb{N}$, \begin{equation}\label{limT} \limsup_{T\rightarrow \infty}\frac{1}{T}\int_1^T \left|\zeta(\sigma+it) \right|^{2k}\pmb{1}_{B^c}(t) \mbox{\ d} t \leq M_{\varepsilon}^{2k}. \end{equation}}This provides already a weak version of Tanaka's result.\par Tanaka used some additional ergodic theoretical reasoning to control the limit in \eqref{limT} as $\varepsilon\rightarrow 0$. He obtained that, \textit{for every $\varepsilon>0$ and $\alpha>\frac{1}{2}$, there exist a subset $A\subset [1,\infty)$ of density $$ \lim_{T\rightarrow\infty} \frac{1}{T} \int_1^{T} \pmb{1}_A (t) \mbox{\ d} t =0 $$ such that, for every $\sigma\geq \alpha$ and $k\in\mathbb{N}$, $$ \lim_{T\rightarrow \infty}\frac{1}{T}\int_1^T \left|\zeta(\sigma+it) \right|^{2k}\pmb{1}_{A^c}(t) \mbox{\ d} t =\sum_{n=1}^{\infty}\frac{d_k(n)^2}{n^{2\sigma}}. $$}\par We extend Tanaka's result to a large class of functions which we shall denote by $\mathcal{N}$. We rely here essentially on the ideas and methods developed by Tanaka \cite{tanaka:2008}.\par Two features of a function $\mathcal{L}$ are crucial in order to obtain an asymptotic expansion for the second power moment $(k=1)$ of $\mathcal{L}$ in the sense of Tanaka: \begin{itemize} \item[(i)] In the half-plane $\sigma>1$, the function $\mathcal{L}$ is represented by a Dirichlet series $$ \mathcal{L}(s)=\sum_{n=1}^{\infty} \frac{a(n)}{n^s}, \qquad \sigma>1, $$ with coefficients $a(n)\in\mathbb{C}$ satisfying $$ \sum_{n=1}^{\infty} \frac{|a(n)|^2}{n^{\sigma}}<\infty, \qquad \mbox{ for }\sigma>1. $$ \item[(ii)] The function $\mathcal{L}$ satisfies a certain normality feature in the half-plane $\sigma>\frac{1}{2}$ which we shall define later on in Section \ref{sec:classN}. This normality feature is more or less equivalent to the boundedness property \eqref{e} stated above for the Riemann zeta-function. \end{itemize} In order to obtain asymptotic expansions for the $2k$-th moment for $\mathcal{L}$ in the sense of Tanaka with $k\in\mathbb{N}$, it is necessary that both $\mathcal{L}$ and its $k$-th powers $\mathcal{L}^k$ satisfy property (i). For this purpose, we study in Chapter \ref{ch:coeff} the Dirichlet series expansions of $\mathcal{L}^k$ and other functions related to a given Dirichlet series $\mathcal{L}$. Here, we mainly work with Dirichlet series that satisfy the Ramanujan hypothesis.\par Dirichlet series can be modeled by an ergodic flow on the infinite dimensional torus. We outline this concept in Chapter \ref{ch:poly} and focus especially on the results which we shall need later on to prove our extended version of Tanaka's result in Chapter \ref{ch:probmom}. \par In Chapter \ref{ch:classN} we define the normality feature stated in property (ii) above and set up the class $\mathbb{N}o$. We investigate basic properties of functions in $\mathcal{N}$ which we shall need later on to prove our main result.\par In Chapter \ref{ch:probmom} we state and prove our main theorem, i.e. an extended version of Tanaka's result. \chapter{Arithmetic functions and Dirichlet series coefficients}\label{ch:coeff} In this chapter we study the Dirichlet series expansions of certain functions related to a given Dirichlet series $\mathcal{L}$. We start with some basic properties of certain arithmetic functions. \section{Arithmetic functions connected to the Riemann zeta-function} A function $a:\mathbb{N}\rightarrow\mathbb{C}$ is said to be an {\it arithmetic function}. According to the fundamental theorem of arithmetic, for every $n\in\mathbb{N}$ and every prime number $p\in\mathbb{P}$ with $p|n$, there exist uniquely determined quantities $\nu(n;p)\in\mathbb{N}$ such that \begin{equation}\label{eq:fundamentaltheoremarith} n = \prod_{\begin{subarray}{c}p\in\mathbb{P}\\ p|n\end{subarray}} p^{\nu(n;p)}. \end{equation} We call an arithmetic function $a:\mathbb{N}\rightarrow\mathbb{C}$ {\it multiplicative} if $$ a(n)=\prod_{\begin{subarray}{c}p\in\mathbb{P}\\ p|n\end{subarray}}a( p^{\nu(n;p)}) \qquad \mbox{for }n\in\mathbb{N}. $$ If $a$ satisfies the stronger property $$ a(n)=\prod_{\begin{subarray}{c}p\in\mathbb{P}\\ p|n\end{subarray}}a( p)^{\nu(n;p)} \qquad \mbox{for }n\in\mathbb{N}, $$ we call the function $a$ {\it completely multiplicative}. For two arithmetic functions $a,b:\mathbb{N}\rightarrow\mathbb{C}$, the arithmetic function $a*b:\mathbb{N}\rightarrow\mathbb{C}$ defined by $$ (a*b)(n):= \sum_{\begin{subarray}{c}(n_1,n_2)\in\mathbb{N}^2 \\ n_1n_2 = n\end{subarray}} a(n_1)b(n_2) \qquad \mbox{for }n\in\mathbb{N} $$ is said to be the {\it Dirichlet convolution of $a$ and $b$}.\par We associate a given arithmetic function $a:\mathbb{N}\rightarrow\mathbb{C}$, $n\mapsto a(n)$ with the formal Dirichlet series $$ \mathcal{L}(s)=\sum_{n=1}^{\infty}\frac{a(n)}{n^s}, $$ and vice versa. We say that the coefficients of a given Dirichlet series are (completely) multiplicative if its associated arithmetic function is (completely) multiplicative. If the Dirichlet series $$ \mathcal{L}_1(s)=\sum_{n=1}^{\infty}\frac{a(n)}{n^s}\qquad \mbox{and}\qquad \mathcal{L}_2(s)=\sum_{n=1}^{\infty}\frac{b(n)}{n^s} $$ converge absolutely for a given $s\in\mathbb{C}$, then their product $\mathcal{L}_1(s)\cdot\mathcal{L}_2(s)$ is also an absolutely convergent Dirichlet series given by \begin{equation}\label{eq:l1l2} \mathcal{L}_1 (s)\cdot \mathcal{L}_2(s) =\sum_{n=1}^{\infty}\frac{(a*b)(n)}{n^s}. \end{equation} {\bf The generalized divisor function.} We fix $\kappa\in\mathbb{R}$ and define, for every prime number $p\in\mathbb{P}$ and every $\nu\in\mathbb{N}_0$, $$ d_{\kappa}(p^{\nu}):=\binom{\kappa+\nu-1}{\nu}, $$ where the generalized binomial coefficient is defined in a standard way: $$ \binom{x}{0} := 1 \quad\mbox{and} \quad \binom{x}{\nu}:= \frac{1}{\nu!} \prod_{j=0}^{\nu-1} (x-j) \quad \mbox{for } x\in\mathbb{R} \mbox{ and } \nu\in\mathbb{N}. $$ From the functional equation of the Gamma-function, we derive that $$ d_{\kappa}(p^{\nu}) = \frac{\Gamma(\kappa+\nu)}{\Gamma(\kappa) \nu!}. $$ We set $$ d_{\kappa}(n):= \prod_{\begin{subarray}{c}p\in\mathbb{P}\\ p|n\end{subarray}}d_{\kappa}( p^{\nu(n;p)}). $$ for every $n\in\mathbb{N}$ and refer to the arithmetic function $d_{\kappa}:\mathbb{N}\rightarrow\mathbb{N}$ defined by $n\mapsto d_{\kappa}(n)$ as generalized divisor function with parameter $\kappa$. By definition, the function $d_{\kappa}$ is multiplicative. The Dirichlet series associated with $d_{\kappa}$ can be identified with the Dirichlet series expansion of $\zeta(s)^{\kappa}:= \exp(\kappa \log\zeta(s))$ in $\sigma>1$, where $\log \zeta(s)$ is defined in a standard way. For details we refer to Tenenbaum \cite[Chapt. II.5.1]{tenenbaum:1995} or Heath-Brown \cite{heathbrown:1981}. In the following lemma, we gather some well-known properties of the generalized divisor function if its allied parameter is an integer. In this case, $d_{\kappa}$ has an important number theoretical interpretation: for $k\in\mathbb{N}$, the quantity $d_k(n)$ counts the representations of $n\in\mathbb{N}$ as a product of $k$ natural numbers; additionally, we have $d_{-k}(n)=0$ if and only if there exist a prime number $p$ with $p|n$ such that $\nu(n;p)\geq k+1$. \begin{lemma}\label{lem:divisor} Let $k\in\mathbb{N}$. \item[(a)] For $n\in\mathbb{N}$, $$ d_0(n)=\begin{cases} 1 & \mbox{if }n=1, \\ 0 & \mbox{if $n\neq 1$,}\end{cases} \qquad\mbox{and}\qquad d_1(n)=1. $$ \item[(b)] The function $d_k$ is the $k$-fold Dirichlet convolution of $d_1$. In particular, for $n\in\mathbb{N}$, $$ d_k(n)=\sum_{\begin{subarray}{c} (n_1,...,n_k) \in\mathbb{N}^k \\ n_1\cdots n_k = n \end{subarray}} 1. $$ \item[(c)] For sufficiently large $n\in\mathbb{N}$, $$ 1\leq d_k(n) \leq \exp\left( (k-1) \log 2 \frac{\log n}{\log\log n}\left(1 + O\left(\frac{\log\log\log n}{\log\log n}\right) \right) \right). $$ \item[(d)] The function $d_{-1}$ is the classical M\"obius function $\mu$. In particular, for $n\in\mathbb{N}$, $$ d_{-1}(n) = \mu(n) = \begin{cases} (-1)^r & \mbox{if } n=p_1\cdots p_r \mbox{ with pairwise distinct } p_1,...,p_r\in\mathbb{P}, \\ 0 & \mbox{otherwise.} \end{cases} $$ \item[(e)] The function $d_{-k}$ is the $k$-fold Dirichlet convolution of $d_{-1}$. In particular, for $p\in\mathbb{P}$ and $\nu\in\mathbb{N}$ with $\nu\geq k+1$, $$ d_{-k}(p^{\nu}) = 0. $$ \item[(f)] For $n\in\mathbb{N}$, we have $|d_{-k}(n)|\leq d_{k}(n)$. \end{lemma} \begin{proof}[Sketch of the proof:] The statements (a) and (d) follow directly from the definition of $d_k$ and a short computation. As the Dirichlet series associated with $d_k$ can be identified as the Dirichlet series expansion of $\zeta(s)^k$ in $\sigma>1$, we obtain that $$ \zeta(s)^k = \left(\sum_{n=1}^{\infty} \frac{1}{n^s} \right)^k = \sum_{n=1}^{\infty}\frac{d_k(n)}{n^s},\qquad \sigma>1. $$ Therefrom, we deduce that \begin{equation}\label{eq:kfoldDirichlet} d_k = d_{k-1}* d_1=\underbrace{d_1 * ...* d_1}_{k\scalebox{0.8}{\mbox{-times}}}. \end{equation} Hence, $d_k$ is the $k$-fold Dirichlet convolution of $d_1$. This implies that \begin{equation}\label{repdk} d_k(n) = \sum_{\begin{subarray}{c} (n_1,...,n_k) \in\mathbb{N}^k \\ n_1\cdots n_k = n \end{subarray}} 1 \qquad \mbox{for }n\in\mathbb{N} . \end{equation} Alternatively, the latter identity can be derived from the fundamental theorem of arithmetics and a combinatorial argument. Altogether, statement (b) follows. By standard estimates, we obtain that, for sufficiently large $n\in\mathbb{N}$, \begin{equation}\label{eq:d2n} d_2(n) \leq \exp\left( \log 2 \frac{\log n}{\log\log n}\left(1 + O\left(\frac{\log\log\log n}{\log\log n}\right) \right) \right); \end{equation} see, for example, Steuding \cite[Chapt. 2.3]{steuding:2007} or Hardy \& Wright \cite[Chapt. 18.1]{hardywright:1979}. It follows from \eqref{eq:kfoldDirichlet} that, for $n\in\mathbb{N}$, $$ d_k(n) = \sum_{\begin{subarray}{c} (n_1,n_2) \in\mathbb{N}^2 \\ n_1\cdot n_2 = n \end{subarray}} d_{k-1}(n_1) d_1(n_2)\leq \, d_{k-1}(n)\cdot \hspace{-0.3cm}\sum_{\begin{subarray}{c} (n_1,n_2) \in\mathbb{N}^2 \\ n_1\cdot n_2 = n \end{subarray}} \hspace{-0.1cm}1 = d_{k-1}(n)\cdot d_2(n). $$ Iteratively, we obtain that $$ d_k(n) \leq d_2(n)^{k-1}\qquad \mbox{for }n\in\mathbb{N}. $$ Together with \eqref{eq:d2n}, this proves the upper bound for $d_k(n)$ in statement (c). The lower bound for $d_k(n)$, i.e. $$ d_k(n) \geq 1 \qquad \mbox{for }n\in\mathbb{N}, $$ follows immediately from the definition of $d_k$. The identity $$ \zeta(s)^{-k} = \left(\sum_{n=1}^{\infty} \frac{d_{-1}(n)}{n^s} \right)^{k} = \sum_{n=1}^{\infty} \frac{d_{-k}(n)}{n^s}, \qquad \sigma>1. $$ implies that $d_{-k}$ is the $k$-fold Dirichlet convolution of $d_{-1}$. Moreover, we deduce from the definition of $d_{\kappa}$ that, for any $\kappa\in\mathbb{R}$, $p\in\mathbb{P}$ and $\nu\in\mathbb{N}$, \begin{equation}\label{product1} d_{\kappa}(p^{\nu}) = \frac{1}{\nu!} \prod_{j=0}^{\nu-1} (\kappa-\nu + 1 -j) = \frac{1}{\nu!} \prod_{j=0}^{\nu-1} (\kappa + j). \end{equation} If we set $\kappa=-k$ in \eqref{product1}, then zero occurs as a factor in the products on the righthand-side, whenever $\nu\geq k+1$. Statement (e) follows. Moreover, we deduce from \eqref{product1} that, for $p\in\mathbb{P}$ and $\nu\in\mathbb{N}$, $$ \left| d_{-k}(p^{\nu}) \right| = \left| \frac{1}{\nu!} \prod_{j=0}^{\nu-1} (-k + j)\right| \leq \frac{1}{\nu!} \prod_{j=0}^{\nu-1} (k + j) = d_{k}(p^{\nu}). $$ Statement (f) follows then from the multiplicity of $d_{-k}$ and $d_{k}$. \end{proof} Now, we state some basic properties of $d_{\kappa}$ in the general situation if its allied parameter $\kappa$ is an arbitrary real number. \begin{lemma}\label{lem:gendivisor} Let $\kappa\in\mathbb{R}$. Then, the following statements are true. \begin{itemize} \item[(a)] If $\kappa \geq 0$, then $d_{\kappa}(n)\geq 0$ for $n\in\mathbb{N}$. If $\kappa\geq 1$, then $d_{\kappa}(n)\geq 1$ for $n\in\mathbb{N}$. \item[(b)] For $n\in\mathbb{N}$ and $K\geq |\kappa|$, we have $\left| d_{\kappa}(n) \right| \leq d_{K}(n)$. \item[(c)] There is an absolute constant $c>0$ such that, for every $p\in\mathbb{P}$ and every $\nu\in\mathbb{N}_0$, $$ d_{\kappa}(p^{\nu}) < c|\kappa| \nu^{\kappa-1}. $$ \item[(d)] For sufficiently large $n\in\mathbb{N}$, $$ \left| d_{\kappa}(n) \right| \leq \exp\left( c(\kappa) \log 2 \frac{\log n}{\log\log n}\left(1 + O\left(\frac{\log\log\log n}{\log\log n}\right) \right) \right). $$ where $c(\kappa):= \min\{n\in\mathbb{N} \, : \, n\geq |\kappa|-1\}$. \item[(e)] For $m\in\mathbb{N}$, the function $d_{m\kappa}$ is the $m$-fold Dirichlet convolution of $d_{\kappa}$. In particular, for $n\in\mathbb{N}$, \begin{align*} d_{m\kappa}(n)&= \sum_{\begin{subarray}{c}(n_1,...,n_m)\in\mathbb{N}^m\\ n_1\cdots n_m = n\end{subarray}} d_{\kappa}(n_1)\cdots d_{\kappa}(n_2) = \prod_{\begin{subarray}{c} p\in\mathbb{P}\\ p|n\end{subarray}} \; \sum_{\begin{subarray}{c} k_1,...,k_m \in \mathbb{N}_0 \\ k_1+...+k_m =\nu(n;p) \end{subarray}} \prod_{j=1}^m d_\kappa(p^{k_j}). \end{align*} \end{itemize} \end{lemma} \begin{proof} Let $\kappa\in\mathbb{R}$. It follows immediately from \eqref{product1} and the multiplicativity of $d_{\kappa}$ that, for $n\in\mathbb{N}$, $$ d_{\kappa}(n)\geq \begin{cases}0 & \mbox{if }\kappa\geq 0, \\ 1 & \mbox{if }\kappa\geq 1 .\end{cases} $$ Statement (a) is proved. Similarly, we derive statement (b) from the observation that, for $p\in\mathbb{P}$, $\nu\in\mathbb{N}$ and any $K\geq |\kappa|$, $$ \left|d_{\kappa}(p^{\nu})\right| \leq \frac{1}{\nu!}\prod_{j=0}^{\nu-1} (\left|\kappa\right| +j)\leq d_{K}(p^{\nu}). $$ Now, we rewrite \eqref{product1} in the form \begin{equation}\label{darstellung} d_{\kappa}(p^{\nu}) = \frac{\kappa}{\nu}\cdot \prod_{j=1}^{\nu-1} \left( 1+\frac{\kappa}{j}\right). \end{equation} By using the inequality $1+x \leq \exp(x)$, which is true for any real $x$, we obtain that, for $p\in\mathbb{P}$ and $\nu\in\mathbb{N}$, $$ \left| d_{\kappa}(p^{\nu}) \right| \leq \frac{|\kappa|}{\nu}\exp\left( \kappa \sum_{j=1}^{\nu-1}\frac{1}{j}\right). $$ A standard asymptotic estimate for the partial sums of the harmonic series yields the existence of an absolute constant $c>0$ such that, for every $p\in\mathbb{P}$ and every $\nu\in\mathbb{N}_0$, $$ d_{\kappa}(p^{\nu}) < c |\kappa| \nu^{\kappa-1}. $$ Statement (c) is proved. Statement (d) follows by combining the estimates of statement (b) and Lemma \ref{lem:divisor} (c). The identity $$ \sum_{n=1}^{\infty}\frac{d_{\kappa m}(n)}{n^s}=\zeta(s)^{\kappa m} = \left(\zeta(s)^{\kappa}\right)^m = \left(\sum_{n=1}^{\infty}\frac{d_{\kappa}(n)}{n^s}\right)^m $$ implies that $d_{m\kappa}$ is the $m$-fold Dirichlet convolution of $d_{\kappa}$ and that $$ d_{m\kappa}(n)=\sum_{\begin{subarray}{c}(n_1,...,n_m)\in\mathbb{N}^m\\ n_1\cdots n_m = n\end{subarray}} d_{\kappa}(n_1)\cdots d_{\kappa}(n_2) \qquad\mbox{for }n\in\mathbb{N} $$ The multiplicativity of $d_{\kappa}$ assures that $$ d_{m\kappa}(n) = \prod_{\begin{subarray}{c} p\in\mathbb{P}\\ p|n\end{subarray}} \; \sum_{\begin{subarray}{c} k_1,...,k_m \in \mathbb{N}_0 \\ k_1+...+k_m =\nu(n;p) \end{subarray}} \prod_{j=1}^m d_\kappa(p^{k_j})\qquad \mbox{for }n\in\mathbb{N}. $$ Statement (e) is proved. \end{proof} {\bf Von Mangoldt function.} The von Mangoldt function $\mathcal{L}ambda:\mathbb{N}\rightarrow \mathbb{R}$ is defined by $$ \mathcal{L}ambda(n)= \begin{dcases} \log p & \mbox{if } n=p^{\nu} \mbox{with some $p\in\mathbb{P}$ and $\nu\in \mathbb{N}$},\\ \quad 0 & \mbox{otherwise}. \end{dcases} $$ The von Mangoldt function appears in the Dirichlet series expansion of the logarithmic derivative of the Riemann zeta-function. More precisely, $$ \frac{\zeta'(s)}{\zeta(s)} = - \sum_{n=1}^{\infty} \frac{\mathcal{L}ambda(n)}{n^s},\qquad \sigma>1. $$ \section{The coefficients of certain Dirichlet series}\label{sec:coeff} Let \begin{equation}\label{dirichletseries} \mathcal{L}(s)=\sum_{n=1}^{\infty}\frac{a(n)}{n^s} \end{equation} be a Dirichlet series with coefficients $a(n)\in \mathbb{C}$. In the sequel, we shall pose certain conditions on the coefficients of $\mathcal{L}(s)$. Here, we work basically with the Ramanujan hypothesis and a polynomial Euler product representation. For the convenience of the reader, we recall the definition of these two features of a Dirichlet series.\par \textbf{\textit{Ramanujan hypothesis.}} {\it \textbf{}} $\mathcal{L}(s)$ is said to satisfy the Ramanujan hypothesis if, for any $\varepsilon>0$, $$ a(n)\ll_{\varepsilon} n^{\varepsilon}, $$ as $n\rightarrow\infty$. Here, the constant inherent in the Vinogradov symbol may depend on $\varepsilon$. \par \textbf{\textit{Polynomial Euler product.}} {\it \bf } $\mathcal{L}(s)$ is said to have a representation as a polynomial Euler product if there exist a positive integer $m$ and complex numbers $\alpha_1(p)$,...,$\alpha_m(p)$ such that \begin{equation}\label{eulerproduct} \mathcal{L}(s)=\prod_{p\in\mathbb{P}} \prod_{j=1}^m \left(1-\frac{\alpha_j(p)}{p^s}\right)^{-1}. \end{equation} We call the coefficients $\alpha_1(p)$,...,$\alpha_m(p)$ the local roots of $\mathcal{L}(s)$ at $p\in\mathbb{P}$.\par The Ramanujan hypothesis regulates the growth behaviour of the coefficients $a(n)$, as $n\rightarrow\infty$. The polynomial Euler product representation implies a multiplicative structure of the coefficients $a(n)$.\par In the following, we study the coefficients of Dirichlet series which are related to $\mathcal{L}(s)$ by means of certain analytic transformations. In particular, we investigate whether these related Dirichlet series satisfy the Ramanujan hypothesis under the presumption that the latter is true for $\mathcal{L}(s)$. We would like to stress that the theorems of this section are not to be considered as new. They occur in slightly modified versions in many papers and monographies dealing with Dirichlet series.\par First, we observe that the set of Dirichlet series which converge absolutely in a given point $s\in\mathbb{C}$ and satisfy the Ramanujan hypothesis is closed under finite summation and multiplication. \begin{theorem}\label{th:sumprodL} Let $k\in\mathbb{N}$ and $$ \mathcal{L}_j(s)=\sum_{n=1}^{\infty} \frac{a_j(n)}{n^s}, \qquad j=1,...,k $$ be absolutely convergent Dirichlet series which satisfy the Ramanujan hypothesis. Then, the following statements are true. \begin{itemize} \item[(a)] The sum $\mathcal{B}(s):=\mathcal{L}_1(s) + \mbox{\ d}ots + \mathcal{L}_k(s)$ can be written as an absolutely convergent Dirichlet series which satisfies the Ramanujan hypothesis. In particular, $$ \mathcal{B}(s)=\sum_{n=1}^{\infty} \frac{b(n)}{n^s}\qquad\mbox{with}\qquad b(n)=\sum_{j=1}^k a_j(n). $$ \item[(b)] The product $\mathcal{C}(s):=\mathcal{L}_1(s)\cdots \mathcal{L}_k(s)$ can be written as an absolutely convergent Dirichlet series which satisfies the Ramanujan hypothesis. In particular, $$ \mathcal{C}(s)=\sum_{n=1}^{\infty} \frac{c(n)}{n^s} \qquad \mbox{with}\qquad c(n)= \sum_{\begin{subarray}{c} (n_1,...,n_k) \in\mathbb{N}^k \\ n_1\cdots n_k = n \end{subarray}} a_1(n_1)\cdots a_k(n_k ). $$ \end{itemize} \end{theorem} \begin{proof} Certainly, the sum of finitely many absolutely convergent Dirichlet series is again an absolutely convergent Dirichlet series. By Riemann's rearrangement theorem, we can write $$ \mathcal{B}(s)=\sum_{j=1}^k \sum_{n=1}^{\infty} \frac{a_j(n)}{n^s} = \sum_{n=1}^{\infty} \frac{b(n)}{n^s}\qquad\mbox{with}\qquad b(n):=\sum_{j=1}^k a_j(n). $$ As $\mathcal{L}_1(s),...,\mathcal{L}_k(s)$ satisfy the Ramanujan hypothesis for $j=1,...,k$, respectively, the same is true for $\mathcal{B}(s)$.\par In a similar manner, we deduce that the product of finitely many absolutely convergent Dirichlet series is again an absolutely convergent Dirichlet series. By means of \eqref{eq:l1l2}, we obtain that $$ \mathcal{C}(s)=\prod_{j=1}^k \mathcal{L}_j(s) = \sum \frac{c(n)}{n^s} \qquad \mbox{with}\qquad c(n)= \sum_{\begin{subarray}{c} (n_1,...,n_k) \in\mathbb{N}^k \\ n_1\cdots n_k = n \end{subarray}} a_1(n_1)\cdots a_k(n_k ). $$ The Ramanujan hypothesis for the coefficients of the Dirichlet series $\mathcal{L}_1(s),...,\mathcal{L}_k(s)$ implies that, for any $\varepsilon>0$, $$ c(n)\ll n^{\varepsilon}\cdot \sum_{\begin{subarray}{c} (n_1,...,n_k) \in\mathbb{N}^k \\ n_1\cdots n_k = n \end{subarray}} 1 = n^{\varepsilon}\cdot d_k(n), $$ as $n\rightarrow\infty$. It follows from the estimate for $d_k(n)$ in Lemma \ref{lem:gendivisor} (d) that the coefficients $\mathcal{C}(s)$ satisfies the Ramanujan hypothesis. \end{proof} Next, we study absolutely convergent Dirichlet series which possess a representation as a polynomial Euler product. Steuding \cite[Chapt. 2.3]{steuding:2007} revealed the following relation between the Dirichlet series coefficient $a(n)$ and the local roots $\alpha_1(p),...,\alpha_m(p)$ of $\mathcal{L}(s)$. \begin{theorem}[Steuding, 2007]\label{coeffpoleuler} Let $\mathcal{L}(s)$ be an absolutely convergent Dirichlet series of the form \eqref{dirichletseries} that can be written as a polynomial Euler product of the form \eqref{eulerproduct}. Then, the following statements are true. \begin{itemize} \item[(a)] The Dirichlet series coefficients of $\mathcal{L}(s)$ are multiplicative and satisfy $$ a(1)=1 \qquad \mbox{ and }\qquad a(n) = \prod_{\begin{subarray}{c} p\in\mathbb{P}\\ p|n\end{subarray}} \quad \sum_{\begin{subarray}{c} k_1,...,k_m \in \mathbb{N}_0 \\ k_1+...+k_m =\nu(n;p) \end{subarray}} \prod_{j=1}^m \alpha_j(p)^{k_j} $$ for $n\in\mathbb{N}\setminus\{1\}$, where $\alpha_1(p),...,\alpha_m(p)$ denote the local roots of $\mathcal{L}(s)$ at $p\in\mathbb{P}$. \item[(b)] $\mathcal{L}(s)$ satisfies the Ramanujan hypothesis if and only if $$ \max_{j=1,...,m} |\alpha_j(p)| \leq 1 $$ for every $p\in\mathbb{P}$. \end{itemize} \end{theorem} For a proof, we refer to Steuding \cite[Chapt. 2.3]{steuding:2007}.\par Now, we turn our attention to the analytic function that is described by a Dirichlet series for which the Ramanujan hypothesis is true.\par Suppose that $\mathcal{L}(s)$ satisfies the Ramanujan hypothesis. Then, $\mathcal{L}(s)$ is absolutely convergent in the half-plane $\sigma>1$ and defines there an analytic function $\mathcal{L}$. For $\ell\in\mathbb{N}$, let $\mathcal{L}^{(\ell)}$ denote the $\ell$-th derivative of $\mathcal{L}$ in $\sigma>1$. We shall see that $\mathcal{L}^{(\ell)}$ has a Dirichlet series expansion in $\sigma>1$ which is related to the one of $\mathcal{L}$. \begin{theorem}\label{th:Dirichletderivative} Let $\ell\in\mathbb{N}$ and $\mathcal{L}$ be an analytic function in $\sigma>1$ that is given by a Dirichlet series of the form \eqref{dirichletseries} for which the Ramanujan hypothesis is true. Then, $\mathcal{L}^{(\ell)}$ has Dirichlet series expansion $$ \mathcal{L}^{(\ell)}(s) = \sum_{n=1}^{\infty}\frac{a_{{(\ell)}}(n)}{n^s}, \qquad \sigma>1, $$ where $$ a_{{(\ell)}}(n)= (-1)^{\ell}a(n)(\log n)^{\ell}. $$ In particular, the Dirichlet series representing $\mathcal{L}^{(\ell)}$ satisfies the Ramanujan hypothesis. \end{theorem} \begin{proof} The statement follows from the observation that $$ \frac{\mbox{\ d}^{\ell}}{\mbox{\ d} s^{\ell}} \frac{a(n)}{n^s} = \frac{a(n)(\log n)^{\ell}}{n^s}, \qquad n\in\mathbb{N}, $$ and basic convergence properties of series of analytic functions; see Busam \& Freitag \cite[Theorem III.1.6]{busamfreitag:2009}. \end{proof} Suppose that $\mathcal{L}(s)$ satisfies the Ramanujan hypothesis and has a polynomial Euler product representation. Then, $\mathcal{L}(s)$ defines an analytic, non-vanishing function $\mathcal{L}$ in $\sigma>1$. Thus, there exists an analytic logarithm of $\mathcal{L}$ in $\sigma>1$. In the further course of our investigations, we define $\log \mathcal{L}$ as follows. Due to the polynomial Euler product representation, the leading Dirichlet series coefficient of $\mathcal{L}(s)$ is given by $a(1)=1$. By the absolute convergence of $\mathcal{L}(s)$ in $\sigma>1$, we find a $\sigma_{0} \geq 1 $ such that $$ \left| \mathcal{L}(s) - 1 \right| \leq \tfrac{3}{4}, \qquad \mbox{for }\sigma> \sigma_{0}. $$ For $s\in\mathbb{C}$, let $\mbox{Log}\ s$ denote the principal branch of the logarithm. If we set $$ (\log\mathcal{L}) (s) := \mbox{Log} (\mathcal{L}(s))\qquad \mbox{ for }\sigma>\sigma_0, $$ then $(\log\mathcal{L}) (s)$ defines a uniquely determined analytic logarithm of $\mathcal{L}$ in the half-plane $\sigma> \sigma_0$ with the property that $$ {\rm{Im} } \left( \log \mathcal{L}(s) \right) \in [-\pi,\pi) \qquad \mbox{ for }\sigma> \sigma_{0}. $$ For all other simply connected domains $\Omega\subset \mathbb{C}$ which do not contain any zero of $\mathcal{L}$ and have a non-empty intersection $\mathcal{I}$ with the half-plane $\sigma>\sigma_{0}$, we define $\log\mathcal{L}$ by extending $\log\mathcal{L} |_{\mathcal{I}}$ analytically to $\Omega$. In particular, we obtain in this manner a uniquely determined analytic logarithm of $\mathcal{L}$ in $\sigma>1$ that satisfies the normalization $$ \lim_{\sigma\rightarrow+\infty} \log \mathcal{L}(\sigma) = 0. $$ \begin{theorem}\label{th:Dirichletlog} Let $\mathcal{L}$ be an analytic function in $\sigma>1$ that is given by a Dirichlet series of the form \eqref{dirichletseries} which satisfies the Ramanujan hypothesis and can be written as a polynomial Euler product of the form \eqref{eulerproduct}. Then, $\log \mathcal{L}$ has Dirichlet series expansion $$ \log \mathcal{L}(s) = \sum_{n=1}^{\infty} \frac{a_{\log \mathcal{L}}(n)}{n^s}, \qquad \sigma>1, $$ where $$ a_{ \log \mathcal{L}}(n) = \begin{dcases} -\frac{1}{\nu} \sum_{j=1}^{m} \alpha_{j} (p)^{\nu} & \mbox{if } n=p^{\nu} \mbox{with some $p\in\mathbb{P}$ and $\nu\in \mathbb{N}$},\\ \quad 0 & \mbox{otherwise}. \end{dcases} $$ In particular, the Dirichlet series representing $\log \mathcal{L}$ satisfies the Ramanujan hypothesis. \end{theorem} \begin{proof} Due to Theorem \ref{coeffpoleuler} (b), the Ramanujan hypothesis implies that \begin{equation}\label{log1} \max_{j=1,...,m} |\alpha_j(p)| \leq 1 \end{equation} for $p\in\mathbb{P}$. Consequently, we get that, for $p\in\mathbb{P}$ and $\sigma>1$, $$ \sum_{j=1}^m \log \left( 1- \frac{\alpha_j(p)}{p^s}\right)^{-1} = - \sum_{j=1}^m \sum_{\nu=1}^{\infty} \frac{\alpha_{j}(p)^{\nu}}{\nu p^{\nu s}} = - \sum_{\nu=1}^{\infty} \frac{\frac{1}{\nu}\sum_{j=1}^m\alpha_{j}(p)^{\nu}}{ p^{\nu s}}. $$ This implies that, for every $p\in\mathbb{P}$ and $\sigma>1$, $$ \left|\sum_{j=1}^m \log \left( 1- \frac{\alpha_j(p)}{p^s}\right)^{-1} \right| \leq \frac{2m}{p^{\sigma}}. $$ Hence, the series \begin{equation*}\label{logbranch} f(s):= \sum_{p\in\mathbb{P}} \sum_{j=1}^m \log \left( 1- \frac{\alpha_j(p)}{p^s}\right)^{-1} \end{equation*} converges absolutely in $\sigma>1$. By means of the polynomial Euler product representation, $f(s)$ defines an analytic logarithm of $\mathcal{L}$ in $\sigma>1$. According to the observation that, uniformly for $t\in\mathbb{R}$, $$ f(\sigma + it) = o(1), \qquad \mbox{as }\sigma\rightarrow +\infty, $$ the branch of the logarithm $f$ and the branch chosen for $\log \mathcal{L}$ coincide. Hence, we derive that, for $\sigma>1$, $$ \log \mathcal{L}(s) = - \sum_{\nu=1}^{\infty} \frac{\frac{1}{\nu}\sum_{j=1}^m\alpha_{j}(p)^{\nu}}{ p^{\nu s}} = \sum_{n=1}^{\infty} \frac{a_{\log \mathcal{L}}(n)}{n^s}. $$ Due to \eqref{log1}, we obtain that $|a_{\log \mathcal{L}}(n)|\leq m$ for $n\in\mathbb{N}$. Hence, the Dirichlet series representing $\log \mathcal{L}$ satisfies the Ramanujan hypothesis and the theorem is proved. \end{proof} Next, we consider the logarithmic derivative of $\mathcal{L}$. We shall see that there appears a generalized form of the von Mangoldt function in the Dirichlet series expansion of $\mathcal{L}'/\mathcal{L}$. \begin{theorem}\label{th:dirichletlogderivative} Let $\mathcal{L}$ be an analytic function in $\sigma>1$ that is given by a Dirichlet series of the form \eqref{dirichletseries} which satisfies the Ramanujan hypothesis and can be written as a polynomial Euler product of the form \eqref{eulerproduct}. Then, the logarithmic derivative $\mathcal{L}'/\mathcal{L}$ has Dirichlet series expansion $$ \frac{\mathcal{L}'(s)}{\mathcal{L}(s)} = -\sum_{n=1}^{\infty} \frac{\mathcal{L}ambda_{ \mathcal{L}}(n)}{n^s}, \qquad \sigma>1, $$ where $$ \mathcal{L}ambda_{ \mathcal{L}}(n) = \begin{dcases} \left( \sum_{j=1}^{m} \alpha_{j} (p)^{\nu} \right) \log p & \mbox{if } n=p^{\nu} \mbox{with some $p\in\mathbb{P}$ and $\nu\in \mathbb{N}$},\\ \quad 0 & \mbox{otherwise}. \end{dcases} $$ In particular, the Dirichlet series representing $\mathcal{L}'/\mathcal{L}$ satisfies the Ramanujan hypothesis. \end{theorem} \begin{proof} By combining Theorem \ref{th:Dirichletderivative} and Theorem \ref{th:Dirichletlog}, we get that $\mathcal{L}'/\mathcal{L}$ has the stated Dirichlet series expansion in $\sigma>1$. Theorem \ref{coeffpoleuler} (b) assures that, for $n\in\mathbb{N}$, $$ \left| \mathcal{L}ambda_{\mathcal{L}}(n) \right| \leq m\log n. $$ Hence, the Dirichlet series representing $\mathcal{L}'/\mathcal{L}$ satisfies the Ramanujan hypothesis. \end{proof} For $\kappa\in\mathbb{R}$, we define $\mathcal{L}^{\kappa}$ by $$ \mathcal{L}^{\kappa}(s):=\mathcal{L}(s)^{\kappa}:= \exp\left(\kappa \log \mathcal{L}(s) \right), \qquad \sigma>1. $$ The next theorem deals with the Dirichlet series expansion of $\mathcal{L}^{\kappa}$. \begin{theorem}\label{th:Lkappa} Let $\mathcal{L}$ be an analytic function in $\sigma>1$ that is given by a Dirichlet series of the form \eqref{dirichletseries} which satisfies the Ramanujan hypothesis and can be written as a polynomial Euler product of the form \eqref{eulerproduct}. Then, $\mathcal{L}^{\kappa}$ has Dirichlet series expansion $$ \mathcal{L}^{\kappa}(s) = \sum_{n=1}^{\infty} \frac{a_{\kappa}(n)}{n^s}, \qquad \sigma>1, $$ where $$ a_{\kappa}(1)=1 \qquad \mbox{ and }\qquad a_{\kappa}(n) = \prod_{\begin{subarray}{c} p\in\mathbb{P}\\ p|n\end{subarray}} \quad \sum_{\begin{subarray}{c} (k_1,...,k_m) \in \mathbb{N}_0^m \\ k_1+...+k_m =\nu(n;p) \end{subarray}} \prod_{j=1}^m d_{\kappa}(p^{k_j})\alpha_j(p)^{k_j} $$ for $n\in\mathbb{N}\setminus\{1\}$. In particular, the coefficients $a_{\kappa}(n)$ are multiplicative and the Dirichlet series representing $\mathcal{L}^{\kappa}$ satisfies the Ramanujan hypothesis. \end{theorem} \begin{proof} Let $\kappa\in\mathbb{R}$. According to Theorem \ref{coeffpoleuler} (b), the Ramanujan hypothesis assures that, for $p\in\mathbb{P}$, \begin{equation}\label{maxalphaj} \max_{j=1,..,m}|\alpha_j(p)| \leq 1. \end{equation} The Taylor expansion $$ (1-z)^{-\kappa} = \sum_{\nu =0}^{\infty} \binom{\kappa+\nu-1}{\nu} z^{\nu} \qquad \mbox{for }z\in\mathbb{C} \mbox{ with }|z|<1, $$ yields that, for $p\in\mathbb{P}$ and $\sigma>1$, $$ \left(1-\frac{\alpha_j(p)}{p^s}\right)^{-\kappa} = 1+\sum_{\nu=1}^{\infty}\frac{d_{\kappa}(p^{\nu})\alpha_j(p)^{\nu}}{p^{\nu s}}. $$ From \eqref{maxalphaj} and the estimate of Lemma for $d_{\kappa}(n)$, we derive that the series $$ \sum_{p\in\mathbb{P}} \sum_{j=1}^m \sum_{\nu=1}^{\infty}\frac{ d_{\kappa}(p^{\nu})\alpha_j(p)^{\nu}}{p^{\nu s}} $$ converges absolutely in $\sigma>1$. Hence, we conclude that \begin{equation*} \mathcal{L}(s)^{\kappa} = \prod_{p\in\mathbb{P}} \prod_{j=1}^m \left(1-\frac{\alpha_j(p)}{p^s}\right)^{-\kappa} = \prod_{p\in\mathbb{P}} \prod_{j=1}^m \left(1+\sum_{\nu=1}^{\infty}\frac{d_{\kappa}(p^{\nu})\alpha_j(p)^k}{p^{\nu s}}\right), \qquad \sigma>1, \end{equation*} where the infinite product and sum appearing in the latter expression converge absolutely in $\sigma>1$. By multiplying out and rearranging the terms, we obtain that $$ \mathcal{L}(s)^{\kappa} = \sum_{n=1}^{\infty} \frac{a_{\kappa} (n)}{n^s}, \qquad \sigma>1, $$ with $a_{\kappa}(1)=1$ and $$ a_{\kappa}(n) = \prod_{\begin{subarray}{c} p\in\mathbb{P}\\ p|n\end{subarray}} \quad \sum_{\begin{subarray}{c} (k_1,...,k_m) \in \mathbb{N}_0^m \\ k_1+...+k_m =\nu(n;p) \end{subarray}} \prod_{j=1}^m d_{\kappa}(p^{k_j})\alpha_j(p)^{k_j} \qquad \mbox{for }n\in\mathbb{N}\setminus\{1\}. $$ We deduce from \eqref{maxalphaj} and the properties of $d_{\kappa}(n)$ in Lemma \ref{lem:gendivisor} (b) and (e) that, for $n\in\mathbb{N}$, $$ \left| a_{\kappa}(n) \right| \leq \prod_{\begin{subarray}{c} p\in\mathbb{P}\\ p|n\end{subarray}} \quad \sum_{\begin{subarray}{c} (k_1,...,k_m) \in \mathbb{N}_0^m \\ k_1+...+k_m =\nu(n;p) \end{subarray}} \prod_{j=1}^m d_{|\kappa|}(p^{k_j}) =d_{m|\kappa|}(n). $$ Now, it follows from Lemma \ref{lem:gendivisor} (d) that the coefficients $a_{\kappa}(n)$ satisfy the Ramanujan hypothesis. \end{proof} Beyond our considerations, it would be interesting to investigate the situation if $\mathcal{L}(s)$ cannot be written as a polynomial Euler product representation, but as an Euler product of the general form used in the definition of the Selberg class. In this case, some additional obstacles occur. Especially, we get problems to control the growth behaviour of the coefficients appearing in the Dirichlet series expansion of $\zeta(s)^{\kappa}$, $\kappa< 0$, by means of the Ramanujan hypothesis; see de Roton \cite[Sect. 2]{deroton:2009} and Kaczorowski \& Perelli \cite[Lemma 2]{kaczorowskiperelli:2003}. \chapter{Dirichlet series and the infinite dimensional torus}\label{ch:poly} It was an ingenious idea of Bohr \cite{bohr:1913-2} to model Dirichlet series as functions on the infinite dimensional torus. Meanwhile this approach was translated into the modern language of functional analysis and probability theory. Concerning the probabilistic approach, we refer to the pioneering work of Bagchi \cite{bagchi:1981} and the extensive work of Laurin\v{c}ikas, see for example \cite{laurincikas:1991-2}. Concerning the functional analytic point of view, we refer to the seminal papers of Helson \cite{helson:1967,helson:1969} and the recent works by Hedenmalm, Lindqvist \& Seip \cite{hedenmalmlindqvistseip:1997, hedenmalmlindqvistseip:1999} and Tanaka \cite{tanaka:2001, tanaka:2008}.\par \section{The infinite dimensional torus, the compact group \texorpdfstring{$K$}{ } and a local product decomposition of \texorpdfstring{$K$}{ }}\label{sec:K} {\bf The discrete group $\Gamma$.} Let $\mathcal{L}ambda$ be a countable set of real numbers which are linearly independent over $\mathbb{Q}$. Let $(\lambda_n)_{n\in\mathbb{N}}$ be a denumeration of the elements of $\mathcal{L}ambda$ in ascending order. Further, let $\Gamma$ be the additive subgroup of $\mathbb{R}$ that is generated by $\mathcal{L}ambda$. It follows from the linear independence of the elements in $\mathcal{L}ambda$ that, for every $\gamma\in\Gamma$, there exist uniquely determined quantities $\nu_n(\gamma) \in \mathbb{Z}$, indexed by $n\in\mathbb{N}$, such that \begin{equation}\label{coordinates} \gamma = \sum_{n=1}^{\infty} \nu_n(\gamma)\cdot \lambda_n. \end{equation} For given $\gamma\in\Gamma$, all but finitely many of the quantities $\nu_n(\gamma)$ are equal to zero. This implies, in particular, that the sum in \eqref{coordinates} is finite.\par We endow $\Gamma$ with the discrete topology. In this way, $\Gamma$ becomes a locally compact abelian Hausdorff group (LCA-group). Moreover, as $\Gamma$ has only countable many elements, $\Gamma$ is separable as a topological space. For LCA-groups there is a generalized concept of Fourier analysis. We shall briefly sketch the very basics of this concepts. For details and more information the reader is referred to Deitmar \cite{deitmar:2002} and Hewitt \& Ross \cite{hewittross:1994}.\par {\bf Excursus: abstract harmonic analysis.} Let ${\sf G}$ be an LCA-group and $\mathbb{T}:=\{z\in\mathbb{C} \, : \,|z|=1\}$ denote the circle group, endowed with the standard topology generated by open arcs. A continuous group homomorphism $\chi: {\sf G} \rightarrow \mathbb{T}$ is said to be a character of ${\sf G}$. Under pointwise multiplication, the set ${\sf G}^*$ of all characters of ${\sf G}$ forms a group, the so called dual group of ${\sf G}$. By endowing ${\sf G}^*$ with the compact-open topology, ${\sf G}^*$ becomes also an LCA-group. It is a fundamental observation that ${\sf G}^*$ is compact, if ${\sf G}$ is discrete and that ${\sf G}^*$ is discrete, if ${\sf G}$ is compact; see Deitmar \cite[Prop. 7.2.1]{deitmar:2002}. In fact, the Pontryagin duality theorem reveals that ${\sf G}$ can be identified group-theoretically and topologically with its bidual ${\sf G}^{**}$; see Hewitt \& Ross \cite[\S 24]{hewittross:1994}. A further observation that we shall use later on is that the dual group ${\sf G}^*$ of ${\sf G}$ is metrizable if ${\sf G}$ is a separable LCA-group. \par On every LCA-group there exist a non-trivial, non-negative, regular and translation-invariant measure, called Haar-measure, which is unique up to scalar multiplication; see for example Hewitt \& Ross \cite[\S 15,16]{hewittross:1994}. Let ${\sf G}$ be an LCA-group and $\pmb{\sigma}$ a Haar-measure on ${\sf G}$, then we define, for $p\geq 1$, the space $$ L^p_{\pmb{\sigma}}({\sf G}):= \left\{f:{\sf G}\rightarrow \mathbb{C} \, : \, \int_{{\sf G}} \left| f \right|^p \mbox{\ d} \pmb{\sigma}<\infty \right\}. $$ If $f\in L^1_{\pmb{\sigma}}({\sf G})$, we call $\hat{f}: {\sf G}^*\rightarrow \mathbb{C}$, defined by \begin{equation}\label{fouriert} \hat{f}(\chi) = \int_{{\sf G}} f\, \overline{\chi} \mbox{\ d}\pmb{\sigma}, \end{equation} where $\overline{\chi}$ denotes the complex conjugation of a character $\chi\in {\sf G}^*$, the Fourier transform of $f$. \par The theorem of Plancherel connects the $L^2$-norm of $f$ with the $L^2$-norm of its Fourier transform; see Hewitt \& Ross \cite[\S 31]{hewittross:1994}. The theorem of Plancherel can be considered as an analogue of Parseval's theorem for Fourier series. For sake of simplicity, we assume that ${\sf G}$ is compact. Then, firstly, we find a uniquely determined Haar measure on ${\sf G}$ that satisfies the normalization $\pmb{\sigma}\left({\sf G}\right) =1$. Secondly, the Cauchy-Schwarz inequality implies that $L^2_{\pmb{\sigma}}({\sf G})\subset L^1_{\pmb{\sigma}}({\sf G})$. And thirdly, we know that the dual group ${\sf G}^*$ of ${\sf G}$ is discrete. In this special situation, the theorem of Plancherel states that \begin{equation}\label{plancherel} \int_{\sf G}\left| f \right|^2 \mbox{\ d} \pmb{\sigma} = \sum_{\chi \in {\sf G}^*} \left| \hat{f}(\chi) \right|^2. \end{equation} Here, a central ingredient in the proof is the fact that, for every two characters $\chi,\psi\in {\sf G}^*$, \begin{equation}\label{fourierorth} \int_{{\sf G}} \chi\, \overline{\psi} \mbox{\ d} \pmb{\sigma}= \begin{cases}1, & \mbox{if }\chi=\psi,\\ 0, &\mbox{otherwise.} \end{cases} \end{equation} {\bf The dual group $K$ of $\Gamma$.} In the following, let $K$ be the dual group of $\Gamma$. By the remarks above, we conclude that $K$ is a compact and metrizable group. Hence, there is a unique Haar-measure $\pmb{\sigma}$ on $K$ that satisfies the normalization $\pmb{\sigma}(K)=1$. In the sequel, we denote the elements of $K$, i.e. the characters of $\Gamma$, by $$ x:\Gamma\rightarrow\mathbb{T} $$ and the characters of $K$ by $$ \chi : K \rightarrow \mathbb{T}. $$ There is a natural identification of $K$ with the infinite-dimensional torus $$ \mathbb{T}^{\infty}:= \mathbb{T}_1 \times \mathbb{T}_2 \times ..., $$ which is given as the direct product of countably many copies $\mathbb{T}_n$ of the unit circle $\mathbb{T}$. For given $$ \omega:=\left(e^{i\theta_n}\right)_{n\in\mathbb{N}} = \left(e^{i\theta_1},e^{i\theta_2},... \right)\in \mathbb{T}^{\infty}, $$ we define $x_{\omega}:\Gamma \rightarrow \mathbb{T}$ to be the character of $\Gamma$ that satisfies $$ x_{\omega}(\lambda_n) = e^{i\theta_n}, \qquad n\in\mathbb{N}. $$ As the elements of $\mathcal{L}ambda$ are both linearly independent over $\mathbb{Q}$ and generate the group $\Gamma$, the character $x_{\omega}$ is well-defined and uniquely determined. It is easy to see that the map $$ h:\mathbb{T}^{\infty} \rightarrow K, \qquad \omega \mapsto x_{\omega}, $$ is a group isomorphism. If we endow $\mathbb{T}^{\infty}$ with the product topology, then $\mathbb{T}^{\infty}$ is compact, due to Tychonoff's theorem; see Loomis \cite{loomis:1953}. In this case, the map $h$ is also a homeomorphism between $K$ and $\mathbb{T}^{\infty}$ which allows us to identify $K$ with $\mathbb{T}^{\infty}$ in the sequel.\par The uniquely determined Haar-measure $\pmb{\sigma}'$ on $\mathbb{T}^{\infty}$ satisfying $\pmb{\sigma}'(\mathbb{T}^{\infty})=1$ coincides with the properly normalized product measure on $\mathbb{T}^{\infty}$: let $\lambda$ be the arc length measure on $\mathbb{T}$, normalized such that $\lambda(\mathbb{T})=1$. Then, for every set $$ E:=E_1\times...\times E_{N}\times \mathbb{T} \times \mathbb{T}\times ... \subset\mathbb{T}^{\infty} $$ with arbitrary Borel subsets $E_1,..., E_N\subset \mathbb{T}$, we have $$ \pmb{\sigma}'(E) = \lambda(E_1)\cdots \lambda(E_N). $$ {\bf A local product decomposition of $K$.} Local product decompositions of compact groups go back to Hoffman \cite{hoffman:1958}. They are important tools to study the structure of compact abelian groups which occur as dual groups of a subgroup of the discrete real line; see Gamlin \cite{gamelin:1969}. Roughly speaking, a local product decomposition of a compact group decomposes the latter into a compact subgroup and a real interval. Tanaka \cite{tanaka:2008} used a local product decomposition of $K$ to model the Riemann zeta-function in the right half of the critical strip. However, without this abstract background, this idea appeared already before in the theory of vertical limit functions for Dirichlet series.\par For $t\in\mathbb{R}$, let $e_t$ denote the element of $K$ which is given by $$ e_t (\gamma) = e^{-it\gamma}, \qquad \gamma\in\Gamma. $$ Let $\gamma\in\Gamma$. Then, we denote by $\chi_{\gamma}$ the character of $K$ that satisfies $\chi_{\gamma}(x)= x(\gamma)$ for $x\in K$. Let $l>0$ such that $\frac{2\pi}{l}\in\Gamma$. We define $$ K_{2\pi/l} := \left\{x\in K \, : \, \chi_{2\pi/l}(x)=1\right\}. $$ It can be seen easily that $K_{2\pi/ l}$ is a compact subgroup of $K$. Let $\pmb{\tau}$ denote the uniquely determined Haar measure on $K_{2\pi/ l}$ that satisfies the normalization $\pmb{\tau}(K_{2\pi/l})=1$. Via the map $$ h: K_{2\pi/ l}\times [0, l) \rightarrow K , \qquad (y,u)\mapsto y + e_u , $$ we can identify $K_{2\pi/ l}\times [0, l)$ with $K$ group theoretically, topologically (if we identify the left end point $0$ of the interval $[0,l)$ with $l$) and measure-theoretically. Here, the measure $\pmb{\sigma}$ on $K$ corresponds to the measure $\pmb{\tau}\times \frac{1}{l}\mbox{\ d} t$ on $K_{2\pi/l}\times [0,l)$. \section{An ergodic flow on \texorpdfstring{$K$}{} and a special version of the ergodic theorem}\label{sec:ergodicflow} In this section we consider certain ergodic processes on $K_{2\pi/l}$ and $K$. Ergodic theory studies the long term average behaviour of dynamical systems. For basic definitions and results in ergodic theory, the reader is referred to Dajani \& Dirksin \cite{dajanidirksin:2008}, Steuding \cite{steuding:2010} and Cornfeld, Fomin \& Sinai \cite[Chapt. 3, \S 1]{cornfeldfominsinai:1982}. Here, we shall work essentially with the Birkhoff-Khinchine ergodic theorem.\par We define the map $T:K_{2\pi/l}\rightarrow K_{2\pi/l}$ by $$ Ty := y+e_l. $$ For $n\in \mathbb{N}$, we set $$ T^{n}y := \underbrace{(T\circ ... \circ T)}_{n\scalebox{0.7}{\mbox{-times}}} y = y +ne_l $$ It is well-known that the system $(T,K_{2\pi/l})$ is uniquely ergodic where the unique $T$-invariant probability measure is given by $\pmb{\tau}$. For details, we refer to Cornfeld, Fomin \& Sinai \cite[Chapt. 3, \S 1]{cornfeldfominsinai:1982}. The proof relies essentially on a theorem of Kronecker which states the following: \begin{theorem}[Theorem of Kronecker]\label{th:kronecker} Let $\theta_1$,...,$\theta_M\in\mathbb{R}$ such that the numbers $1,\theta_1$,...,$\theta_M$ are linearly independent over $\mathbb{Q}$. Furthermore, let $\alpha_1$,...,$\alpha_M\in \mathbb{R}$, $\varepsilon>0$ and $N\in\mathbb{N}$. Then, there exist $n, q_1,...,q_M\in\mathbb{N}$ with $n>N$ such that $$ \left|n\theta_m - q_m -\alpha_m \right|< \varepsilon, \qquad m=1,...,M. $$ \end{theorem} A proof of Kronecker's theorem can be found in Hardy \& Wright \cite[Chapt. 23]{hardywright:1979}. \par Let $f\in L^1_{\pmb{\tau}}(K_{2\pi/l})$. The Birkhoff-Khinchine ergodic theorem implies that, for $\pmb{\tau}$-almost every $y\in K_{2\pi/l}$, \begin{equation}\label{ergodic} \lim_{N\rightarrow\infty}\frac{1}{N}\sum_{n=1}^{N} f\left( T^n y \right) = \int_K f \mbox{\ d}\pmb{\sigma}; \end{equation} see Cornfeld, Fomin \& Sinai \cite[Chapt. 1, \S 2]{cornfeldfominsinai:1982}. Since $T$ is {\it uniquely} ergodic, the formula \eqref{ergodic} holds even for every $y\in K_{2\pi/l}$, if $f$ is continuous on $K_{2\pi/l}$; see Cornfeld, Fomin \& Sinai \cite[Chapt. 1, \S 8]{cornfeldfominsinai:1982}.\par For $t\in\mathbb{R}$, we define the map $T_t : K \rightarrow K$ by $$ T_t x = x + e_t. $$ The set $\{T_t\}_{t\in\mathbb{R}}$ forms a one-parameter group of homeomorphisms of $K$. It is well-known that the flow $(\{T_t\}_{t\in \mathbb{R}}, K)$ is uniquely ergodic. Its associated unique invariant probability measure is given by $\pmb{\sigma}$; see Cornfeld, Fomin \& Sinai \cite[Chapt. 3, \S 1]{cornfeldfominsinai:1982}. The Birkhoff-Khinchine ergodic theorem implies that \begin{equation}\label{ergodic2} \lim_{T\rightarrow\infty}\frac{1}{T}\int_0^T f\left( T_t x \right) \mbox{\ d} t = \int_K f \mbox{\ d}\pmb{\sigma} . \end{equation} holds for $\pmb{\sigma}$-almost every $x\in K$, if $f\in L^1_{\pmb{\sigma}}(K)$, and for every $x\in K$, if $f$ is continuous on $K$.\par The maps $T$ and $T_t$ are strongly connected to one another. If we identify $K$ with $K_{2\pi/l}\times [0,l)$ as described in the preceeding section, the map $T_t$ is represented on $K_{2\pi/l}\times [0,l)$ by $$\mathcal{T}_t:K_{2\pi/l}\times[0,l)\rightarrow K_{2\pi/l}\times[0,l), \qquad (y,u)\mapsto (T^{N_t}y, t+u-N_t l) $$ where $$ N_t := \left[\frac{t+u}{l} \right] $$ with $[x]$ denoting the largest integer not exceeding $x\in\mathbb{R}$.\par For $y\in K_{2\pi/ l}$, $l>0$ and $J\subset \mathbb{N}$, we define \begin{equation}\label{EJ} E_{l,y}(J):= \overline{\left\{T^n y \, : \, n\in J\right\}} \subset K_{2\pi/ l}, \end{equation} where the closure is taken with respect to the topology of $K_{2\pi/l}$. As every closed set in a compact space is compact, we deduce immediately that $E_{l,y}(J)$ is compact. \par Further, for a subset $J\subset \mathbb{N}$, we define its upper density by $$ \mbox{\ d}ens^* J = \limsup_{N\rightarrow\infty} \frac{\# \left(J\cap [1,N] \right) }{N} $$ and its lower density by $$ \mbox{\ d}ens_* J = \liminf_{N\rightarrow\infty} \frac{\# \left(J\cap [1,N] \right) }{N}. $$ If $\mbox{\ d}ens^* J = \mbox{\ d}ens_* J=d$, we say that $J$ has density $d$ and write $\mbox{\ d}ens\ J := d$. \par The following lemma is due to Tanaka \cite[Lemma 3.1]{tanaka:2008} and relies essentially on the ergodicity of $T$. \begin{lemma}[Tanaka, 2008]\label{lem:tanaka1} Let $l>0$, $y\in K_{2\pi/l}$ and $J\subset \mathbb{N}$. Then, $$ \mbox{\ d}ens^* (J) \leq \pmb{\tau}\left( E_{l,y}(J)\right). $$ \end{lemma} For a proof, we refer to Tanaka \cite[Lemma 3.1]{tanaka:2008}. The following refinement of Lemma \ref{lem:tanaka1} is also due to Tanaka \cite[Lemma 3.2]{tanaka:2008} and yields a modified version of the Birkhoff-Khinchine ergodic theorem. \begin{lemma}[Tanaka, 2008]\label{lem:Tanakaergodic} Let $l>0$, $y\in K_{2\pi/ l}$ and $J\subset \mathbb{N}$. Suppose that, for any $\varepsilon>0$, there exists a subset $J_{\varepsilon}\subset \mathbb{N}\setminus J$ such that $$ \pmb{\tau} \left(E_{l,y}(J)\cap E_{l,y}(J_{\varepsilon}) \right) = 0 \qquad \mbox{and }\qquad \mbox{\ d}ens^* (\mathbb{N}\setminus(J\cup J_{\varepsilon})) < \varepsilon. $$ Let $p$ be a function on $K_{2\pi/ l}$ which is continuous on $E_{l,y}(J)$ and zero on $K_{2\pi/l}\setminus E_{l,y}(J)$. Then, $$ \lim_{N\rightarrow\infty} \frac{1}{N}\sum_{n=0}^{N} p(T^n y) = \int_{E_{l,y}(J)} p(y)\mbox{\ d} \pmb{\tau} $$ and $$ \mbox{\ d}ens (J) = \pmb{\tau}(E_{l,y}(J)). $$ \end{lemma} For a proof, we refer again to Tanaka \cite[Lemma 3.2]{tanaka:2008}. \section{Extension of Dirichlet series to functions on the infinite dimensional torus} In this section we outline the basic principles to study Dirichlet series as a function on the infinite dimensional torus. In our notation, we follow mainly Tanaka \cite{tanaka:2008}.\par Let $l>0$ be a fixed parameter and $\mathcal{L}ambda_P = \{\log p \, : \, p\in\mathbb{P}\}$. From now on, let $\Gamma$, $K$ and $K_{2\pi/l}$ be the groups of Section \ref{sec:K} that we obtain for the special choice of $$ \mathcal{L}ambda = \begin{dcases*} \mathcal{L}ambda_P\cup\{\tfrac{2\pi}{l}\} & if $l\notin \{2\pi k(\log \frac{n}{m})^{-1} \, : \, k,n,m\in\mathbb{N}, n\neq m\}$,\\ \mathcal{L}ambda_P & otherwise. \end{dcases*} $$ The fundamental theorem of arithmetic and the transcendence of $\pi$ assure that the elements of $\mathcal{L}ambda$ are linear independent over $\mathbb{Q}$.\par In the sequel, let $\mathcal{L}(s)$ denote a Dirichlet series of the form \begin{equation}\label{dirichlet1} \mathcal{L}(s) = \sum_{n=1}^{\infty} \frac{a(n)}{n^s}. \end{equation} To a given Dirichlet series $\mathcal{L}(s)$, we assign a set of allied series which we define formally by \begin{equation}\label{def:ext} L(s,x):= \sum_{n=1}^{\infty} \frac{a(n)}{n^{s}} \chi_{\log n} (x) \qquad \mbox{ with }x\in K. \end{equation} There are some fundamental relations between $\mathcal{L}(s)$ and $L(s,x)$. For $\sigma+it\in\mathbb{C}$ and $x\in K$, we have $$ L(\sigma+it,x) = \sum_{n=1}^{\infty} \frac{a(n)}{n^{\sigma}} e^{-it\log n} \chi_{\log n} (x) = \sum_{n=1}^{\infty} \frac{a(n)}{n^{\sigma}} \chi_{\log n}(e_t)\chi_{\log n} (x) $$ $$ = \sum_{n=1}^{\infty} \frac{a(n)}{n^{\sigma}} \chi_{\log n}(x+e_t) = L(\sigma, x+e_t). $$ Moreover, let $x_0\in K$ denote the principal character in $K$ which is given by $$ x_0(\gamma)=1 \qquad \mbox{ for }\gamma\in\Gamma. $$ Then, the relation $$ \mathcal{L}(\sigma+it) = L(\sigma+it,x_0) = L(\sigma,e_t) $$ holds for every $\sigma+it\in\mathbb{C}$. Roughly speaking, these identities allow us to model $$ \mathbb{R}\ni t\mapsto \mathcal{L}(\sigma+it), $$ for a properly chosen $\sigma\in\mathbb{R}$, as an ergodic flow on $K$.\par Formally, $L(s,x)$ defines a function on $\mathbb{C}\times K$ which we denote by $L$, i.e. \begin{equation}\label{L} L : (s,x) \mapsto L(s,x), \qquad (s,x)\in \mathbb{C}\times K. \end{equation} Later on, it will be convenient to fix $x\in K$ and to consider $L$ as a function on $\mathbb{C}$. For this purpose, we define formally the function $L_x$ on $\mathbb{C}$ via \begin{equation}\label{Lx} L_x : s\mapsto L_x (s):=L(s,x), \qquad s\in \mathbb{C}, \end{equation} where we consider $x\in K$ as a fixed parameter. Similarly, it will be useful at some places to fix $s\in \mathbb{C}$ and to regard $L$ as a function on $K$. For this purpose, we define formally the function $L_s$ on $K$ by \begin{equation}\label{Ls} L_s : x\mapsto L_s (x):=L(s,x), \qquad x\in K, \end{equation} where $s\in\mathbb{C}$ is considered as a fixed parameter.\par Firstly, we shall consider the Dirichlet series expansion of the functions $L_x$, $x\in K$, attached to given Dirichlet series $\mathcal{L}(s)$ by means of \eqref{Lx}. \begin{lemma}\label{lem:ext1} Let $\mathcal{L}(s)$ be a Dirichlet series, $\sigma_a$ its abscissa of absolute convergence and $\sigma_u$ its abscissa of uniform convergence. Then, the following statements are true. \begin{itemize} \item[(a)] For every $x\in K$, the abscissa of absolute convergence of the Dirichlet series expansion of $L_x$ coincides with $\sigma_a$. \item[(b)] For every $x\in K$, the abscissa of uniform convergence of the Dirichlet series expansion of $L_x$ coincides with $\sigma_u$. \end{itemize} \end{lemma} \begin{proof}[Sketch of the proof:] Statement (a) follows directly from the observation that $|\chi_{\log n}(x)|=1$ for $x\in K$ and $n\in\mathbb{N}$. We continue to sketch a proof of statement (b). For any $\sigma_0>\sigma_u$ and any $\varepsilon>0$, we find an integer $N_0\in\mathbb{N}$ such that the inequality $$ \left| \mathcal{L}(s)-\sum_{n=1}^{N}\frac{a(n)}{n^s} \right| < \varepsilon $$ holds for every ${\rm{Re} } \ s\geq \sigma_0$ and arbitrary $N\geq N_0$. Let $p_1,...,p_m$ denote the prime numbers less than or equal to $N$ in ascending order. Further, let $\mathbb{T}^{m}$ denote the direct product of $m$ copies of the unit circle. Let $S_m:\mathbb{T}^{m}\times \mathbb{C}\rightarrow\mathbb{C}$ be defined by \begin{equation}\label{defS} S_m(\theta_1,...,\theta_m,s) := \sum_{n=1}^{N}\frac{a(n)}{n^s}\phi_{\log n}(\theta_1,...,\theta_m), \end{equation} where $$ \phi_{\log n}(\theta_1,...,\theta_m) = \theta_1^{\nu(n;p_1)}\cdots \theta_m^{\nu(n;p_m)} $$ with $\nu(n;p_j)$ being the uniquely determined quantities for which $$ n= \prod_{j=1}^m p_j^{\nu(n;p_j)}. $$ Certainly, $S_m$ is continuous on $\mathbb{T}^{m}\times \mathbb{C}$. According to the theorem of Kronecker (Theorem \ref{th:kronecker}), the set $$ \{e^{-it\log p} \, : \, t\in\mathbb{R}\} $$ is dense in $\mathbb{T}^m$. From this observation and the continuity of $S_m$, we deduce that, for any $\sigma\in\mathbb{R}$ $$ \sup \left\{\sum_{n=1}^{N}\frac{a(n)}{n^{\sigma+it}} \, : \, t\in\mathbb{R} \right\} = \sup \left\{\sum_{n=1}^{N}\frac{a(n)\phi_{\log n}(\pmb{\theta})}{n^{\sigma}} \, : \, \pmb{\theta}\in \mathbb{T}^m \right\}. $$ The latter inequality and the topological correspondence of $\mathbb{T}^{\infty}$ and $K$ allow us to conclude that, for every $x\in K$, $N\geq N_0$ and $\sigma\geq \sigma_0$, $$ \left| L_x(s)-\sum_{n=1}^{N}\frac{a(n)}{n^s} \right| \leq \varepsilon. $$ Statement (b) follows. \end{proof} The following lemma gathers some fundamental analytic properties of the functions $L$ and $L_x$, $x\in K$, attached to a given Dirichlet series $\mathcal{L}(s)$. \begin{lemma}\label{lem:analyticpropertiesofLinU} Let $\mathcal{L}(s)$ be a Dirichlet series and $U$ its half-plane of uniform convergence. Then, the following statements are true \begin{itemize} \item[(a)] For every $x\in K$, the function $L_x$ is an analytic function in $U$. \item[(b)] The function $L$ is continuous on $U\times K$. \end{itemize} \end{lemma} \begin{proof} Statement (a) follows immediately from \ref{lem:ext1} (a). Statement (b) can be deduced from the continuity of the function $S_m$ on $\mathbb{T}^m\times \mathbb{C}$, defined by \eqref{defS}, and the topological correspondence of $\mathbb{T}^{\infty}$ and $K$. \end{proof} The analytic and probabilistic relevance of the functions $L_x$, $x\in K$, lies in the fact that they appear as vertical limit functions of $\mathcal{L}$ in its half-plane $U$ of uniform convergence. This observation dates back to Bohr \cite{bohr:1922}. In particular, we have $$ \overline{\left\{L_{e_{\tau}} \, : \, \tau\in\mathbb{R} \right\}} = \{L_x \, : \, x\in K\} \subset \mathcal{H}(U) $$ and, for arbitrary $l>0$, $$ \overline{\left\{L_{ne_{l}} \, : \, n\in\mathbb{N} \right\}} = \{L_y \, : \, y\in K_{2\pi/l}\} \subset \mathcal{H}(U). $$ Here, $\mathcal{H}(U)$ denotes the set of analytic functions on $U$ and the closures are taken with respect to the topology of uniform convergence on compact subsets of $U$, respectively. Observe further that the relations $$ L_{e_{\tau}}(s)= \mathcal{L}(s+i\tau) \qquad \mbox{and} \qquad L_{ne_l}(s) = \mathcal{L}(s+inl) $$ hold for $s\in U$.\par In the half-plane $U$ the analytic behaviour of $\mathcal{L}$ and its allied functions $L_x$, $x\in K$, is quite well-understood. Things are getting more interesting if $\mathcal{L}$ can be continued analytically beyond $U$. \section{The space \texorpdfstring{$\mathscr{H}^2$}{} of Dirichlet series with square summable coefficients}\label{sec:DirichletH2} Hedenmalm, Lindqvist \& Seip \cite{hedenmalmlindqvistseip:1997} investigated a Hilbert space $\mathscr{H}^2$ of Dirichlet series which have square-summable coefficients. This space can be considered as an analogue for Dirichlet series of the Hardy space $H^2(\mathbb{T})$ for Fourier series.\par Since we work with the Ramanujan hypothesis, we normalize the space $\mathscr{H}^2$ in a slightly different way than it was done by Hedenmalm, Lindqvist \& Seip \cite{hedenmalmlindqvistseip:1997}. We define that a Dirichlet series $$ \mathcal{L}(s)=\sum_{n=1}^{\infty} \frac{a(n)}{n^{s}} $$ belongs to the space $\mathscr{H}^2$ if and only if $$ \sum_{n=1}^{\infty} \frac{|a(n)|^2}{n^{\sigma}} <\infty \qquad \mbox{ for every }\sigma>1. $$ If $\mathcal{L}(s)\in\mathscr{H}^2$, then also the Dirichlet series expansions of the functions $L_x$, $x\in K$, attached to $\mathcal{L}(s)$ via \eqref{def:ext}, are elements of $\mathscr{H}^2$. The Ramanujan hypothesis is a sufficient condition for $\mathcal{L}(s)$ to lie in $\mathscr{H}^2$. If $\mathcal{L}(s)$ satisfies the Ramanujan hypothesis, we deduce from our considerations in Section \ref{sec:coeff} that several Dirichlet series related to $\mathcal{L}(s)$ lie also in $\mathscr{H}^2$, for example $\mathcal{L}(s)^k$ and $\mathcal{L}^{(\ell)}(s)$ with $k,\ell\in\mathbb{N}_0$. If $\mathcal{L}(s)$ satisfies the Ramanujan hypothesis and, additionally, $\mathcal{L}(s)$ can be written as a polynomial Euler product in $\sigma>1$, then we find also $\log \mathcal{L}(s)$ and $\mathcal{L}(s)^{\kappa}$ with $\kappa\in\mathbb{R}$ in $\mathscr{H}^2$. Next, we shall consider analytic properties of the functions $L_x$, $x\in K$, attached to a given Dirichlet series in $\mathscr{H}^2$ by \eqref{Lx}. The subsequent lemma follows directly from of the H\"older inequality $$ \sum_{n=1}^{\infty} \frac{|a(n)|}{n^{\sigma}} \leq \left( \sum_{n=1}^{\infty} \frac{|a(n)|^2}{n^{\sigma}} \right)^{1/2} \cdot \left(\sum_{n=1}^{\infty} \frac{1}{n^{\sigma}}\right)^{1/2}, \qquad \sigma>1. $$ \begin{lemma}\label{lem:analyticH2} Let $\mathcal{L}(s)\in\mathscr{H}^2$. Then, the following statements are true. \begin{itemize} \item[(a)] For $x\in K$, the Dirichlet series expansion of $L_x$ converges absolutely in $\sigma>1$. \item[(b)] For $x\in K$, the function $L_x$ is analytic in $\sigma>1$. \end{itemize} \end{lemma} The functions $L_{\sigma}$ with $\sigma>\frac{1}{2}$ which are attached to a given Dirichlet series $\mathcal{L}(s)\in\mathscr{H}^2$ by means of \eqref{Ls} lie in the space $L^2_{\pmb{\sigma}}(K)$ of the compact group $K$.\footnote{More precisely, the functions $L_{\sigma}$ with $\sigma>\frac{1}{2}$, are contained in the Hardy space $H^2_{\pmb{\sigma}}(K)\subset L^2_{\pmb{\sigma}}(K)$ of the compact group $K$; for a definition of Hardy spaces of compact groups with ordered duals, we refer to Tanaka \cite{tanaka:2008}.} According to Plancherel's theorem we obtain that, for $\sigma>\frac{1}{2}$, \begin{equation*}\label{plan} \int_K \left|L_{\sigma}(x) \right|^2 \mbox{\ d}\pmb{\sigma} = \sum_{n=1}^{\infty}\frac{|a(n)|^2}{n^{2\sigma}}. \end{equation*} By the unique ergodicity of the flow $\{T_t\}_{t\in\mathbb{R}}$ and the Birkhoff-Khinchine ergodic theorem, we get that, for every $\sigma>\frac{1}{2}$ and $\pmb{\sigma}$-almost every $x\in K$, $$ \lim_{T\rightarrow\infty} \frac{1}{2T}\int^T_{-T} \left| L_{x}(\sigma+it)\right|^2 \mbox{\ d} t = \int_K \left|L_{\sigma}(x) \right|^2 \mbox{\ d}\pmb{\sigma}. $$ These observations allow to retrieve information on the $\pmb{\sigma}$-almost sure behaviour of the functions $L_x$, $x\in K$, in the half-plane $\sigma>\frac{1}{2}$. The next theorem gathers fundamental results in this direction. \begin{theorem}[Helson, Steuding]\label{th:almostsurebehaviour} Let $\mathcal{L}(s)\in\mathscr{H}^2$. Then, there are subsets $E_1,E_2,E_3\subset K$ with $\pmb{\sigma}(E_1) = \pmb{\sigma}(E_2) = \pmb{\sigma}(E_3) = 1$ such that the following statements hold: \begin{itemize} \item[(a)] For $x\in E_1$, the Dirichlet series expansion of $L_{x}$ converges in the half-plane $\sigma>\frac{1}{2}$. \item[(b)] For $x\in E_2$, the function $L_x$ can be continued analytically to the half-plane $\sigma>\frac{1}{2}$. \item[(c)] For $x\in E_3$, the mean-square of $L_x$ is given by $$ \lim_{T\rightarrow\infty}\frac{1}{2T} \int_{-T}^T \left|L_x(\sigma+it) \right|^{2}\mbox{\ d} t = \sum_{n=1}^{\infty} \frac{|a(n)|^2}{n^{2\sigma}} \qquad \mbox{for every }\sigma>\tfrac{1}{2}. $$ \end{itemize} Suppose, additionally, that $\mathcal{L}(s)\in\mathscr{H}^2$ satisfies the Ramanujan hypothesis. Then, there exists a subset $E_4 \subset K$ with $\pmb{\sigma}(E_4)=1$ such that the following statements hold: \begin{itemize} \item[(d)] For $x\in E_4$, $$ \lim_{T\rightarrow\infty}\frac{1}{2T} \int_{-T}^T \left|L_x(\sigma+it) \right|^{2k}\mbox{\ d} t = \sum_{n=1}^{\infty} \frac{|a_k(n)|^2}{n^{2\sigma}} \qquad \mbox{for every }\sigma>\tfrac{1}{2} \mbox{ and }k\in\mathbb{N} , $$ where the $a_k(n)$ denote the coefficients of the Dirichlet series expansion of $\mathcal{L}^k$ in $\sigma>1$. \item[(e)] For $x\in E_4$ and $\sigma>\frac{1}{2}$, the asymptotic estimate $$ L_x(\sigma+it) \ll_{\sigma,\varepsilon} |t|^{\varepsilon} $$ is true for any $\varepsilon>0$, as $|t|\rightarrow\infty$. \end{itemize} Suppose that $\mathcal{L}(s)\in\mathscr{H}^2$ satisfies the Ramanujan hypothesis and can be written as a polynomial Euler product. Then, there exist subsets $E_5, E_6, E_7 \subset K$ with $\pmb{\sigma}(E_5) = \pmb{\sigma}(E_6)=1$ such that the following assertions hold. \begin{itemize} \item[(f)] For $x\in E_5$, the function $L_x$ is analytic and non-vanishing in $\sigma>\frac{1}{2}$. \item[(g)] For $x\in E_6$ and $\kappa\in\mathbb{R}$, $$ \lim_{T\rightarrow\infty}\frac{1}{T} \int_{0}^T \left|L_x(\sigma+it) \right|^{2\kappa}\mbox{\ d} t = \sum_{n=1}^{\infty} \frac{|a_{\kappa}(n)|^2}{n^{2\sigma}} \qquad \mbox{for every }\sigma>\tfrac{1}{2}, $$ where $a_{\kappa}(n)$ denote the coefficients of the Dirichlet series expansion of $\mathcal{L}^{\kappa}$ in $\sigma>1$. \end{itemize} Suppose that $\mathcal{L}(s)\in\mathscr{H}^2$ satisfies the Ramanujan hypothesis, can be written as a polynomial Euler product and satisfies the prime mean-square condition (S.6). Then, there exist subsets $E_7, E_8 \subset K$ with $\pmb{\sigma}(E_7) = \pmb{\sigma}(E_8)=1$ such that the following holds. \begin{itemize} \item[(h)] For $x\in E_7$, the function $L_x$ is analytic in $\sigma>\frac{1}{2}$ and universal in the sense of Voronin inside the strip $\frac{1}{2}<\sigma<1$. \item[(i)] For $x\in E_8$, the function $L_x$ has a convergent Dirichlet series expansion in $\sigma>\frac{1}{2}$ and the set $\{L_x \, : \, x\in E_8 \}$ lies dense in the set of all non-vanishing analytic functions in $\frac{1}{2}<\sigma<1$, with repsect to the topology of uniform convergence on compact subsets. \end{itemize} \end{theorem} Statements (a)-(c) follow directly from Helson \cite{helson:1969}. We refer also to Hedenmalm, Lindqvist \& Seip \cite{hedenmalmlindqvistseip:1997} for a slightly different proof of statement (c). Statement (d) can be deduced from (c) by observing that $\mathcal{L}(s)^k\in\mathscr{H}^2$ for $k\in\mathbb{N}$, if $\mathcal{L}(s)$ satisfies the Ramanujan hypothesis (see Theorem \ref{th:sumprodL}), and that the union of countable many sets $E\subset K$ with $\pmb{\sigma}(E)=0$ is again a set of $\pmb{\sigma}$-measure zero. Statement (e) can be deduced from (d) by standard methods; see Titchmarsh \cite[\S 13.1]{titchmarsh:1986}. If $\mathcal{L}(s)$ satisfies the Ramanujan hypothesis and can be written as a polynomial Euler product, then both $\mathcal{L}(s)$ and $\mathcal{L}(s)^{-1}$ lie in $\mathscr{H}^2$; see Theorem \ref{th:Lkappa}. This together with (b) yields statement (f). Statement (g) follows from (c) and Theorem \ref{th:Lkappa}. Statements (h) and (i) were proved by Steuding \cite[Chapt. 5]{steuding:2007}. We can interpret statement (e) and (f) as follows: under quite general assumptions on a Dirichlet series $\mathcal{L}(s)\in\mathscr{H}^2$, almost every of its attached functions $L_x$, $x\in K$, satisfy an analogue of the Lindel\"of hypothesis or an analogue of the Riemann hypothesis. However, for a particular function $L_x$, it seems very difficult to decide whether it lies in the exceptional zero-sets of Theorem \ref{th:almostsurebehaviour} or not. In fact, the zero-sets are not negligible at all: in the case of the Riemann zeta-function Tanaka \cite[\S 2]{tanaka:2008} showed that one can find $x\in K$ such that $\zeta_x$ has zeros and poles at prescribed points in $\frac{1}{2}<\sigma<1$. Moreover, for any $\frac{1}{2}<\sigma_0<1$, there exist $x\in K$ such that $\zeta_x$ is meromorphic in $\sigma>\sigma_0$ but does not extend meromorphically to a larger half-plane. \par In the next section we shall transfer almost-sure properties of the family $\{L_x\}_{x\in K}$ to special functions in $\{L_x\}_{x\in K}$.\par We conclude with a further observation. Let $\mathcal{L}(s)\in\mathscr{H}^2$. If $$l\notin\Gamma_{P}:= \{2\pi k(\log\tfrac{n}{m})^{-1} \, : \, k,n,m\in\mathbb{N}, n\neq m\},$$ then Plancherel's theorem \eqref{plancherel} together with \eqref{fouriert} and \eqref{fourierorth} yields that \begin{equation}\label{plan1} \int_{K_{2\pi/l}} \left|L_{\sigma}(y) \right|^2 \mbox{\ d}\pmb{\tau} = \int_K \left|L_{\sigma}(y) \right|^2 \mbox{\ d}\pmb{\sigma} = \sum_{n=1}^{\infty}\frac{|a(n)|^2}{n^{2\sigma}} \end{equation} and \begin{equation}\label{plan2} \int_{K_ {2\pi/l}} L_{\sigma}(y) \mbox{\ d}\pmb{\tau} = \int_K L_{\sigma}(y) \mbox{\ d}\pmb{\sigma} = a(1), \end{equation} where the $a(n)$ are the coefficients of the Dirichlet series expansion of $\mathcal{L}$. It is slightly more delicate to evaluate the integrals above if $l\in \Gamma_P$. In the next lemma, we discuss the case if $l\in \Gamma_P$ has a very simple form. The works of Reich \cite{reich:1980-2} and Good \cite{good:1978} offer methods to handle the case of arbitrary $l\in \Gamma_P$. \begin{lemma}\label{lem:lLambda} Let $\mathcal{L}(s)$ be a Dirichlet series which satisfies the Ramanujan hypothesis and can be written as a polynomial Euler product. Let $l>0$ be of the form $$ l= \frac{2\pi k}{\log p} \qquad \mbox{ with }p\in\mathbb{P} \mbox{ and } k\in\mathbb{N} . $$ Then, for $\sigma>\frac{1}{2}$ and $\kappa\in\mathbb{R}$, $$ \int_{K_{2\pi/l}} \left|L_{\sigma}(y) \right|^{2\kappa} \mbox{\ d}\pmb{\tau} = \left| \prod_{j=1}^m \left(1-\frac{\alpha_j(p)}{p^{\sigma}}\right)^{-2\kappa} \right| \cdot \sum_{\begin{subarray}{c}n\in \mathbb{N}\\ p\nmid n \end{subarray}} \frac{|a_{\kappa}(n)|^2}{n^{2\sigma}} $$ and $$ \int_{K_{2\pi/l}} L_{\sigma}(y)^{\kappa} \mbox{\ d}\pmb{\tau} = a_{\kappa}(1) \cdot \prod_{j=1}^m \left(1-\frac{\alpha_j(p)}{p^{\sigma}}\right)^{-\kappa}, $$ where the $a_{\kappa}(n)$ denote the Dirichlet series coefficients of $\mathcal{L}^{\kappa}$ and $\alpha_j(p)$ the local roots of the polynomial Euler product of $\mathcal{L}$. \end{lemma} \begin{proof} By the definition of $K_{2\pi/l}$, we get that $\chi_{\log p}(y) = 1$ for $y\in K_{2\pi/l}$. Hence, due to the Euler product representation, we write $$ L_{\sigma}(y) = \prod_{j=1}^m \left(1-\frac{\alpha_j(p)}{p^{\sigma}}\right)^{-1}\cdot \sum_{\begin{subarray}{c}n\in \mathbb{N}\\ p\nmid n \end{subarray}} \frac{a(n)\chi_{\log n}(y)}{n^{\sigma}}, \qquad \sigma>1 $$ where the $a(n)$ denote the Dirichlet series coefficients of $\mathcal{L}$. Thus, in particular, $$ L_{\sigma}^{\kappa}(y) = \prod_{j=1}^m \left(1-\frac{\alpha_j(p)}{p^{\sigma}}\right)^{-\kappa}\cdot \sum_{\begin{subarray}{c}n\in \mathbb{N}\\ p\nmid n \end{subarray}} \frac{a_{\kappa}(n)\chi_{\log n}(y)}{n^{\sigma}}, \qquad \sigma>1. $$ By Theorem \ref{th:Lkappa} and Plancherel's theorem, we obtain that $$ \int_{K_{2\pi/l}} \left| \prod_{j=1}^m \left(1-\frac{\alpha_j(p)}{p^{\sigma}}\right)^{\kappa} \cdot L_{\sigma}(y)^{\kappa} \right|^2 \mbox{\ d}\pmb{\tau} = \sum_{\begin{subarray}{c}n\in \mathbb{N}\\ p\nmid n \end{subarray}} \frac{|a_{\kappa}(n)|^2}{n^{2\sigma}}, \qquad\sigma>\tfrac{1}{2}, $$ and $$ \int_{K_{2\pi/l}} \left( \prod_{j=1}^m \left(1-\frac{\alpha_j(p)}{p^{\sigma}}\right)^{\kappa} \cdot L_{\sigma}(y)^{\kappa} \right) \mbox{\ d}\pmb{\tau} = a_{\kappa}(1), \qquad\sigma>\tfrac{1}{2}. $$ As the factor $\prod_{j=1}^m \left(1-\frac{\alpha_j(p)}{p^{\sigma}}\right)^{\kappa}$ does not depend on $y$ the statement of the lemma follows. \end{proof} \chapter{The class \texorpdfstring{$\mathbb{N}o$}{} and vertical limit functions}\label{ch:classN} In this section we define the class $\mathcal{N}(u)$ with $u\in[\frac{1}{2},1)$. Roughly speaking, the class $\mathbb{N}o(u)$ gathers all function which have a Dirichlet series expansion in $\mathscr{H}^2$ and satisfy a certain normality feature in the half-plane $\sigma>u$. In Chapter \ref{ch:probmom} we shall see that, for every function $\mathcal{L}\in\mathbb{N}o(u)$, its mean-square exists in a certain measure-theoretical sense on vertical lines in the half-plane $\sigma>u$. \section{The class \texorpdfstring{$\mathbb{N}o$}{} and its elements}\label{sec:classN} In the sequel, we shall work with certain half-strips $Q_n(\alpha,l)$ and certain compact rectangular regions $\mathcal{R}_n(\alpha,l)$. For $n\in\mathbb{Z}$ and $\alpha,l \in\mathbb{R}$ with $\alpha,l>0$, we define $Q_n(\alpha,l)\subset \mathbb{C}$ to be the open horizontal half-strip $$ Q_n(\alpha, l):= \left\{ \sigma + it \in \mathbb{C} \, : \, \sigma>\alpha, \, (n-1)l < t < (n+2) l \right\} $$ and $\mathcal{R}_n(\alpha, l)\subset \mathbb{C}$ to be the compact rectangular region \begin{equation}\label{def:Rn} \mathcal{R}_n(\alpha, l):= \left\{ \sigma + it \in \mathbb{C} \, : \, \alpha\leq \sigma \leq 2, \, nl \leq t \leq (n+1)l \right\}. \end{equation} Furthermore, we set \begin{equation}\label{def:Q} Q(\alpha, l ):= Q_0(\alpha,l) = \left\{ \sigma + it \in \mathbb{C} \, : \, \sigma>\alpha, \, -l < t < 2 l \right\}. \end{equation} and \begin{equation}\label{def:R} \mathcal{R}(\alpha,l):=\mathcal{R}_0(\alpha, l):= \left\{ \sigma + it \in \mathbb{C} \, : \, \alpha\leq \sigma \leq 2, \, 0 \leq t \leq l \right\}. \end{equation} For an illustration, we refer to Figure \ref{fig:RQ} \begin{figure} \caption{The half-strip $Q(\alpha,l)$ and the compact rectangular set $\mathcal{R} \label{fig:RQ} \end{figure} {\bf Definition of the class $\mathcal{N}(u)$.} Let $u\in[\frac{1}{2},1)$ and $\mathbb{H}_1$ denote the half-plane $\sigma>1$. A function $\mathcal{L}:\mathbb{H}_1\rightarrow \mathbb{C}$ belongs to the class $\mathcal{N}(u)$ if it satisfies the properties (N.1) and (N.2) stated below. \begin{itemize} \item[(N.1)] {\it Dirichlet series expansion in $\mathscr{H}^2$.} In the half-plane $\sigma>1$, the function $\mathcal{L}$ has a Dirichlet series expansion that is an element of $\mathscr{H}^2$, i.e. $$ \mathcal{L}(s) = \sum_{n=1}^{\infty} \frac{a(n)}{n^s}, \qquad \sigma>1. $$ with coefficients $a(n)\in\mathbb{C}$ satisfying $$ \sum_{n=1}^{\infty} \frac{|a(n)|^2}{n^{\sigma}}< \infty\qquad \mbox{for}\qquad \sigma>1. $$ \item[(N.2)] {\it Analytic continuation and normality.} Let $L_x$, $x\in K$, denote the functions associated to $\mathcal{L}$ by means of \eqref{Lx}. For any real numbers $\alpha$, $l$ and $\varepsilon$ with $\alpha\in(u,1]$ and $\varepsilon,l>0$, there exists a subset $J_{\varepsilon}:=J(\alpha,l,\varepsilon,\mathcal{L})\subset\mathbb{N}$ with $\mbox{\ d}ens_* J_{\varepsilon}>1-\varepsilon$ such that the following holds. \begin{itemize} \item[(N.2a)] For every $n\in J_{\varepsilon}$, the function $L_{ne_l}$ extends to an analytic function on the domain $ Q(\alpha,l)$. \item[(N.2b)] The family $\{L_{ne_l}\}_{n\in J_{\varepsilon}}$ is normal in $Q(\alpha,l)$. \end{itemize} \end{itemize} Let $\mathcal{L}\in\mathbb{N}o(u)$ with $u\in[\frac{1}{2},1)$. We start with some remarks on the analytic character of $\mathcal{L}$. By Lemma \ref{lem:analyticH2} (b), property (N.1) assures that $\mathcal{L}$ is analytic in the half-plane $\sigma>1$.\par Property (N.2) implies that $\mathcal{L}$ can be continued analytically to a larger domain: for appropriately fixed $\alpha,l>0$, let $J_{\varepsilon}\subset \mathbb{N}$ with $\varepsilon>0$ be the sets defined in (N.2). We set $I:=\bigcup_{\varepsilon>0} J_{\varepsilon}$. Then, $\mbox{\ d}ens\ I = 1$ and, according to (N.2a), the functions $L_{ne_l}$ are analytic on $Q(\alpha,l)$ for every $n\in I$. From the relation \begin{equation*} L_{ne_l}(s) = \mathcal{L}(s+inl), \end{equation*} which holds for $n\in\mathbb{N}$, $l>0$ and $s\in\mathbb{C}$, provided that $\mathcal{L}(s+inl)$ is well-defined, we deduce that $\mathcal{L}$ can be continued analytically to the domain $$ \bigcup_{n\in I} Q_n(\alpha,l)\cup \mathbb{H}_1. $$ We proceed with some remarks on the normality feature. Roughly speaking, the normality feature of $\mathcal{L}$ assures that, for $\pmb{\sigma}$-almost every $x\in K$, the function $L_x$ appears as a vertical limit function of $\mathcal{L}$ in $\sigma>u$; see Section \ref{sec:verticallimit} for details. Due to this, certain properties which hold $\pmb{\sigma}$-almost surely for $L_x$, $x\in K$, in the half-plane $\sigma>\frac{1}{2}$ pass over to $\mathcal{L}$ in a certain measure-theoretical sense in the half-plane $\sigma>u$. \par According to (N.2b), the families $\{L_{ne_l}\}_{n\in J_{\varepsilon}}$ are normal in $Q(\alpha,l)$ for every $\varepsilon>0$. Note, however, that the family $\{L_{ne_l}\}_{n\in I}$ with $I=\bigcup_{\varepsilon>0} J_{\varepsilon}$ is not necessarily normal in $Q(\alpha,l)$.\par The Dirichlet series expansion of $\mathcal{L}$ in $\sigma>1$ assures that property (N.2b) is equivalent to the local boundedness of $\{L_{ne_l}\}_{n\in J_{\varepsilon}}$ in $Q(\alpha,l)$. This observation is fundamental for our further considerations and follows from the next lemma. \begin{lemma}\label{lem:locboundednessNo} Let $\mathcal{L}:\mathbb{H}_1\rightarrow \mathbb{C}$ be a function which satisfies (N.1). Let $\alpha<1$, $l>0$ and $Q:=Q(\alpha,l)$ be defined by \eqref{def:Q}. Suppose that $J\subset \mathbb{N}$ is such that, for $n\in J$, the functions $L_{ne_l}$ are analytic on $Q$. Then, $\{L_{ne_l}\}_{n\in J}$ is normal in $Q$ if and only if $\{L_{ne_l}\}_{n\in J}$ is locally bounded in $Q$. \end{lemma} \begin{proof} If $\{L_{ne_l}\}_{n\in J}$ is locally bounded in $Q$, then $\{L_{ne_l}\}_{n\in J}$ is normal in $Q$ due to Montel's theorem. In order to prove the converse, suppose that $\{L_{ne_l}\}_{n\in J}$ is normal in $Q$. In the half-plane $\sigma>1$, $\mathcal{L}$ can be written as an absolutely convergent Dirichlet series $$ \mathcal{L}(s)=\sum_{n=1}^{\infty}\frac{a(n)}{n^s}, \qquad \sigma>1; $$ see Lemma \ref{lem:analyticH2} (a). We set $$ M:= \sum_{n=1}^{\infty}\frac{|a(n)|}{n^{2}} <\infty. $$ By observing that $2\in Q$ and that $$ \left|L_{ne_l} (2)\right| = \left|\mathcal{L}(2+inl) \right|\leq M\qquad\mbox{ for every }n\in\mathbb{N}, $$ the local boundedness follows immediately by means of Montel's theorem (see also the remark after Theorem \ref{th:montel1} in the appendix). \end{proof} \par {\bf Sufficient conditions for the normality feature.} The following theorem provides sufficient conditions for a function $\mathcal{L}$ with property (N.1) to satisfy the normality feature (N.2). \begin{theorem}\label{th:suffconditionsN} Let $\mathcal{L}:\mathbb{H}_1\rightarrow \mathbb{C}$ be a function which satisfies property (N.1). Let $u\in[\frac{1}{2},1)$ and suppose that $\mathcal{L}$ can be continued meromorphically to the half-plane $\sigma>u$ with at most finitely many poles. Then, $\mathcal{L}\in \mathbb{N}o(u)$ if at least one of the following conditions is true. \begin{itemize} \item[(a)] \textbf{\textit{Boundedness.}} For every $\alpha>u$, there is a constant $M>0$ such that $$ \left|\mathcal{L}(s) \right| \leq M \qquad \mbox{ for } \sigma\geq \alpha. $$ \item[(b)] \textbf{\textit{Existence of the mean-square.}} The function $\mathcal{L}$ has finite growth in $\sigma>u$ and satisfies, for every $\sigma>u$, $$ \limsup_{T\rightarrow\infty} \frac{1}{2T} \int_{-T}^T \left|\mathcal{L}(\sigma+it) \right|^2 \mbox{\ d} t <\infty. $$ \item[(c)] \textbf{\textit{$a$-point density estimate.}} There exist two distinct points $a,b\in\mathbb{C}$ such that, for $\sigma> u$, as $T\rightarrow\infty$, $$ N_a(\sigma,T) = O_{\sigma}(T) \qquad \mbox{ and }\qquad N_b(\sigma, T) = o_{\sigma}(T). $$ Here, $N_a(\sigma, T)$ denotes, as usual, the number of $a$-points $\rho_a=\beta_a + i\gamma_a$ of $\mathcal{L}$ with imaginary part $0<\gamma_a \leq T$ and real part $\beta_a > \sigma$. \end{itemize} \end{theorem} It is an immediate consequence of Montel's theorem that condition (a) implies that $\mathcal{L}\in\mathbb{N}o(u)$. In fact, condition (a) implies even more. We deduce from (a) that $\mathcal{L}$ is analytic in the half-plane $\sigma>u$ and that the Dirichlet series representing $\mathcal{L}$ converges uniformly in any half-plane $\sigma\geq \alpha >u$. In particular, we obtain that $\sigma_{unif,\mathcal{L}} \leq u$, where $\sigma_{unif,\mathcal{L}}$ denotes the abscissa of uniform convergence of the Dirichlet series connected to $\mathcal{L}$. Most of our subsequent results are trivial in this case. We included condition (a) for sake of completeness. In the following, however, we shall focus on the more interesting situation if $\mathcal{L}\in\mathbb{N}o(u)$ and $u<\sigma_{unif,\mathcal{L}}$.\par It follows essentially from an integrated version of Cauchy's integral formula (see Titchmarsh \cite[\S 11.8]{titchmarsh:1986} or Theorem \ref{th:sufflocalbound} in the appendix) that the mean-square condition (b) is sufficient for $\mathcal{L}$ to lie in $\mathbb{N}o(u)$. In fact, the existence of the mean-square is a standard tool used in the theory of vertical limit functions of Dirichlet series and appears already in the works of Bohr; see for example Bohr \cite{bohr:1913-2} and Bohr \& Jessen \cite{bohrjessen:1932}. \par By means of a generalized version of Montel's fundamental normality test (Theorem \ref{th:FNTextension}), we deduce that the $a$-point density estimate (c) implies that $\mathcal{L}\in\mathbb{N}o(u)$. As far as the author knows, except for Lee \cite{lee:2012} who derived universality for Hecke $L$-functions in $\sigma>\frac{1}{2}$ by assuming the truth of Selberg's zero-density hypothesis, condition (c) or something similar did not appear in the context of vertical limit functions yet. In particular, condition (c) is one reason why we set up the class $\mathbb{N}o(u)$ by means of the normality feature (N.2) and not, as common, by demanding finite growth for $\mathcal{L}$ and the existence of the mean-square value in $\sigma>u$. \par Conditions (a), (b) and (c) are not completely independent from one another. If $\mathcal{L}$ satisfies (a), then also (b) is true for $\mathcal{L}$. Moreover, if $\mathcal{L}$ satisfies (b), we deduce that, for every $a\in\mathbb{C}$ and $\sigma>u$, as $T\rightarrow\infty$, $$ N_a(\sigma, T) = O_{a,\sigma}(T); $$ see Section \ref{subsec:meansquare}. However, (b) does not necessarily imply (c) and the latter not necessarily (b). In Chapter \ref{ch:probmom}, we shall see that (c) implies (b) in a certain measure-theoretical sense. We shall now start to prove Theorem \ref{th:suffconditionsN} \par \begin{proof} We fix arbitrary real numbers $l,\varepsilon>0$ and $\alpha\in(u,1]$. Let $Q:=Q(\alpha, l)$ be defined by \eqref{def:Q}. (a): From condition (a), we deduce immediately that, for every $n\in\mathbb{N}$, the functions $L_{ne_l}$ are analytic on $Q$ and that the family $\mathcal{F}:=\{L_{ne_l}\}_{n\in\mathbb{N}}$ is bounded on $Q$. According to Montel's theorem, $\mathcal{F}$ is normal in $Q$. Altogether, as $\alpha\in(u,1]$ and $l>0$ were chosen arbitrarily, we conclude that $\mathcal{L}$ satisfies property (N.2).\par (b): We follow Tanaka \cite[\S 4]{tanaka:2008} to prove the sufficiency of condition (b). According to Carlson's theorem (Theorem \ref{th:carlson}), we have for $\sigma>u$ $$ \lim_{T\rightarrow\infty} \frac{1}{2T} \int_{-T}^T \left|\mathcal{L}(\sigma+it) \right|^2 \mbox{\ d} t = \sum_{n=1}^{\infty} \frac{|a(n)|^2}{n^{2\sigma}} =: f(\sigma). $$ Due to the Ramanujan hypothesis, the Dirichlet series $f(\sigma)$ is absolutely convergent for $\sigma>u\geq \frac{1}{2}$. Moreover, $f(\sigma)$ defines a positive, monotonically decreasing, continuous function on the interval $(u,\infty)$. The bounded convergence theorem assures that \begin{equation}\label{eq1} \lim_{T\rightarrow\infty} \int_{\alpha}^{2}\left( \frac{1}{2T} \int_{-T}^T \left|\mathcal{L}(\sigma+it) \right|^2 \mbox{\ d} t \right) \mbox{\ d} \sigma = \int_{\alpha}^{2} f(\sigma) \mbox{\ d} \sigma =: L < \infty. \end{equation} Let $\mathcal{R}:=\mathcal{R}(\alpha,l)$ be defined by \eqref{def:R}. We set $$ B(n) := \iint_{\mathcal{R}} \left|L_{ne_l}(\sigma+it) \right|^2 \mbox{d} \sigma \mbox{d} t . $$ By Fubini's theorem and the identity $L_{ne_l}(s)=\mathcal{L}(s+inl)$, we can write $$ \frac{1}{2N l}\sum_{n=-N}^N B(n) = \frac{1}{2N l} \int_{\alpha}^{2}\left( \int_{-N l}^{N l} \left|\mathcal{L}(\sigma+it) \right|^2 \mbox{\ d} t \right) \mbox{\ d} \sigma . $$ Thus, we obtain by \eqref{eq1} that \begin{equation}\label{eq2} \lim_{N\rightarrow\infty}\frac{1}{2N l}\sum_{n=-N}^N B(n) = L \end{equation} We set $L' := 6L/\varepsilon$ and define $$ I : = \left\{ n\in\mathbb{N} \, : \, B(n) \leq L' \right\}\qquad \mbox{and}\qquad I^c : = \mathbb{N}\setminus I = \left\{ n\in\mathbb{N} \, : \, B(n) > L' \right\}. $$ We deduce from \eqref{eq2} that $\mbox{\ d}ens^* I^c< \frac{\varepsilon}{3}$ and obtain consequently that $\mbox{\ d}ens_* I >1-\frac{\varepsilon}{3}$. Now, we set $$ J : = \left\{ n\in\mathbb{N} \, : \, \max\{B(n-1),B(n),B(n+1)\} \leq L' \right\} \quad \mbox{ and } \quad J^c : = \mathbb{N}\setminus J $$ As $\mbox{\ d}ens^* J^c \leq 3\cdot \mbox{\ d}ens^* I^c$, we get that $$ \mbox{\ d}ens^* J^c < \varepsilon \qquad \mbox{and} \qquad \mbox{\ d}ens_* J > 1- \varepsilon. $$ We define $Q'$ to be the rectangular domain which consists of all points that lie in $Q$ but not in $\sigma\geq 2$, i.e. $$ Q' := \left\{\sigma + it \in\mathbb{C} \, : \, \alpha < \sigma < 2 \, , - l < t < 2 l \right\}. $$ Then, the construction of the sets $J$ and $Q'$ assures that, for every $n\in J$, $$ \iint_{Q'} \left|L_{ne_l}(\sigma+it) \right|^2 \mbox{d} \sigma \mbox{d} t \leq 3 L'. $$ This implies that the family $\{L_{ne_l}\}_{n\in J}$ is locally bounded on $Q'$ (see Theorem \ref{th:sufflocalbound}). Moreover, it follows from the Dirichlet series expansion of $\mathcal{L}$ in $\sigma>1$ that $\mathcal{L}$ is bounded in the half-plane $\sigma\geq \frac{3}{2}$. Altogether, we obtain that $\{L_{ne_l}\}_{n\in J}$ is locally bounded on $Q$. Montel's theorem implies that $\{L_{ne_l}\}_{n\in J}$ is normal in $Q$. As $\alpha\in(u,1]$ and $l,\varepsilon>0$ can be chosen arbitrarily, we conclude that $\mathcal{L}$ satisfies property (N.2)..\par (c): For $c\in\mathbb{C}$, let $D_c(n)$ denote the number of $c$-points of $L_{ne_l}$ in $$ \mathcal{R}' := \left\{\sigma + it \in\mathbb{C} \, : \, \sigma \geq \alpha, \, 0 \leq t < l \right\}. $$ According to our assumption, there are two distinct $a,b\in\mathbb{C}$ and a constant $L>0$ such that \begin{equation}\label{eq:densitiesab} \limsup_{T\rightarrow\infty} \frac{1}{T}N_a(\alpha,T) \leq L \qquad \mbox{and }\qquad \limsup_{T\rightarrow\infty} \frac{1}{T}N_b(\alpha,T) = 0. \end{equation} We set $L':= 3L/\varepsilon$ and define $$ I_a:= \left\{n\in\mathbb{N} \, : \, D_a(n) \leq L' \right\} \qquad \mbox{and}\qquad I_b := \left\{n\in\mathbb{N} \, : \, D_b(n) =0 \right\}. $$ It follows from \eqref{eq:densitiesab} that \begin{equation*}\label{IaIb} \mbox{\ d}ens_* I_a > 1 - \frac{\varepsilon}{3} \qquad \mbox{ and } \qquad \mbox{\ d}ens\ I_b =1. \end{equation*} Now, let $$ J_a := \left\{n\in\mathbb{N} \, : \, \max\{D_a(n-1),D_a(n),D_a(n+1)\} \leq L' \right\} $$ and $$ J_b := \left\{n\in\mathbb{N} \, : \, D_b(n-1) = D_b(n) = D_b(n+1) = 0\} \leq L' \right\}. $$ By a similar argumentation as in (b), we deduce from \eqref{IaIb} that $$ \mbox{\ d}ens_* J_a > 1 - \varepsilon \qquad \mbox{ and } \qquad \mbox{\ d}ens\ J_b =1. $$ As $\mathcal{L}$ has only finitely many poles in the half-plane $\sigma>u$, we find a positive integer $N_{p}$ such that, for every $n\in J_{p}=\mathbb{N} \setminus\{1,...,N_{\infty}\}$, the function $L_{ne_l}$ is analytic in $Q$. \par We set $J:= J_a \cap J_b \cap J_{p}$. Then, by the construction of $J$, we get that $\mbox{\ d}ens_* J >1-\varepsilon$ and that the functions in the family $\mathcal{F}:=\{L_{ne_l}\}_{n\in J}$ are analytic on $Q$, omit the value $b$ on $Q$ and do not assume the value $a\in\mathbb{C}\setminus\{b\}$ at more than $L'$ points of $Q$. An extension of Montel's fundamental normality test (Theorem \ref{th:FNTextension} (b)) yields that the family $\{L_{ne_l}\}_{n\in J}$ is normal in $Q$. Since $\alpha\in(u,1]$ and $l,\varepsilon>0$ can be chosen arbitrarily, we conclude that $\mathcal{L}$ satisfies property (N.2). \end{proof} {\bf Basic structure of the class $\mathbb{N}o$(u).} For distinct $u_1,u_2\in[\frac{1}{2},1)$, the classes $\mathbb{N}o(u_1)$ and $\mathbb{N}o(u_2)$ are related as follows. \begin{lemma} Let $u_1,u_2 \in[\frac{1}{2},1)$ with $u_1 \leq u_2$. Then, $$ \mathbb{N}o(u_1) \subset \mathbb{N}o(u_2). $$ \end{lemma} In the sequel, we set $$ \mathbb{N}o := \bigcup_{u\in[\frac{1}{2},1)} \mathbb{N}o(u). $$ {\bf Elements of the class $\mathcal{N}$.} The class $\mathcal{N}$ contains functions from the extended Selberg class. Suppose that $\mathcal{L}\in\mathcal{S}^{\#}$ has degree $d_{\mathcal{L}}=0$. Then, according to Kaczorowski \& Perelli \cite{kaczorowskiperelli:1999}, $\mathcal{L}$ is given by a Dirichlet polynomial and we conclude immediately by Theorem \ref{th:suffconditionsN} (a) that $$ \mathcal{L}\in\mathbb{N}o(\tfrac{1}{2}). $$ Now suppose that $\mathcal{L}\in\mathcal{S}^{\#}$ has degree $d_{\mathcal{L}}>0$ and satisfies the Ramanujan hypothesis. Then, according to Theorem \ref{th:suffconditionsN} (b), $$ \mathcal{L}\in \mathbb{N}o(u_m) \qquad \mbox{where}\qquad u_m:= \max\{\tfrac{1}{2},\sigma_m\} $$ and $\sigma_m$ denotes, as usual, the abscissa of bounded mean-square of $\mathcal{L}$. Recall that, for any $\mathcal{L}\in\mathcal{S}^{\#}$ which satisfies the Ramanujan hypothesis and has degree $d_{\mathcal{L}}>0$, $$ \sigma_m \leq \max\{ \tfrac{1}{2},\tfrac{1}{2} - \tfrac{1}{d_{\mathcal{L}}}\}<1. $$ If $\mathcal{L}\in\mathcal{S}$ satisfies the Lindel\"of hypothesis or the Riemann hypothesis, we know that $\sigma_m \leq \tfrac{1}{2};$ see Section \ref{subsec:meansquare} for details. In particular, the truth of the Grand Lindel\"of hypothesis or the Grand Riemann hypothesis implies that $\mathcal{S}\subset \mathbb{N}o(\frac{1}{2})$. However, we would like to stress that a given function $\mathcal{L}\in\mathcal{S}$ does not necessarily have to satisfy the Lindel\"of hypothesis or the Riemann hypothesis necessarily, in order to lie in $\mathbb{N}o(\frac{1}{2})$. According to Theorem \ref{th:suffconditionsN} (b) and (c), it is sufficient that $\mathcal{L}\in\mathcal{S}$ fulfills the weaker condition $\sigma_m\leq \frac{1}{2}$ or the density estimates $$ N_a(\sigma,T) = O_{\sigma}(T) \qquad \mbox{ and }\qquad N_0(\sigma, T) = o_{\sigma}(T), $$ for every $\sigma>\frac{1}{2}$ and a particular $a\in\mathbb{C}\setminus\{0\}$, as $T\rightarrow\infty$.\par Besides, the class $\mathcal{N}$ contains many functions that do not lie in $\mathcal{S}^{\#}$ at all. Dirichlet $L$-functions attached to non-primitive characters, for example, lie in $\mathbb{N}o(\frac{1}{2})$, but they are not contained in $\mathcal{S}^{\#}$ as they lack an appropriate functional equation. Note further that a function $\mathcal{L}\in\mathbb{N}o$ does not necessarily extend to a meromorphic function on the whole complex plane. In the next lemma we shall see that, if $\mathcal{L}$ lies in $\mathbb{N}o(u)$ and its Dirichlet series expansion satisfies the Ramanujan hypothesis, then many functions related to $\mathcal{L}$ are also elements of $\mathbb{N}o(u)$. \begin{lemma}\label{lem:classNrelfct} Let $u\in[\frac{1}{2},1)$. Suppose that $\mathcal{L},\mathcal{L}_1,...,\mathcal{L}_n\in\mathbb{N}o(u)$ and that the Dirichlet series expansions of $\mathcal{L},\mathcal{L}_1,...,\mathcal{L}_n$ satisfy the Ramanujan hypothesis, respectively. Then, \begin{itemize} \item[(a)] $\mathcal{L}_1+...+\mathcal{L}_n\in\mathbb{N}o(u)$. \item[(b)] $\mathcal{L}_1\cdots\mathcal{L}_n\in\mathbb{N}o(u)$. \item[(c)] $\mathcal{L}^k \in \mathbb{N}o(u)$ for any $k\in\mathbb{N}_0$. \item[(d)] $\mathcal{L}^{(\ell)}\in\mathbb{N}o(u)$ for any $\ell\in\mathbb{N}_0$. \end{itemize} \end{lemma} \begin{proof} Let $k,\ell\in\mathbb{N}_0$, $u\in[\frac{1}{2},1)$ and $\mathcal{L},\mathcal{L}_1,...,\mathcal{L}_n\in\mathbb{N}o(u)$. We observe the following: \begin{itemize} \item[(i)] In the half-plane $\sigma>1$, the functions $\mathcal{L},\mathcal{L}_1,...,\mathcal{L}_n$ are given by a Dirichlet series which satisfies the Ramanujan hypothesis. It follows from Theorems \ref{th:sumprodL} and \ref{th:Dirichletderivative} that, in the half-plane $\sigma>1$, each of the functions $\sum_{j=1}^n \mathcal{L}_j$, $\prod_{j=1}^n \mathcal{L}_j$, $\mathcal{L}^k$ and $\mathcal{L}^{(\ell)}$ is also given by a Dirichlet series which satisfies the Ramanujan hypothesis; thus, in particular by a Dirichlet series that is an element of $\mathscr{H}^2$. \item[(ii)] If the functions $\mathcal{L},\mathcal{L}_1,...,\mathcal{L}_n$ are analytic on a domain $Q\subset\mathbb{C}$, then the functions $\sum_{j=1}^n \mathcal{L}_j$, $\prod_{j=1}^n \mathcal{L}_j$, $\mathcal{L}^k$ and $\mathcal{L}^{(\ell)}$ are also analytic on $Q$. \item[(iii)] Suppose that the families $\mathcal{F}, \mathcal{F}_1,..., \mathcal{F}_n$ of analytic functions on a domain $Q\subset\mathbb{C}$ are locally bounded on $Q$. Then, we deduce immediately that the families $$\textstyle \mathcal{F}_+ := \left\{\sum_{j=1}^n f_j \, : \, f_j\in \mathcal{F}_j\right\}, \qquad \mathcal{F}_{\times} := \left\{\prod_{j=1}^n f_j \, : \, f_j\in \mathcal{F}_j\right\}, $$ $$ \mathcal{F}^{k} := \left\{f^k \, : \, f\in \mathcal{F}\right\}\qquad \mbox{and}\qquad \mathcal{F}^{(\ell)} := \left\{f^{(\ell)} \, : \, f\in \mathcal{F}\right\} $$ are also locally bounded on $Q$. Here, the statement for $\mathcal{F}^{(\ell)}$ follows from Cauchy's integral formula. \item[(iv)] Let $\varepsilon>0$ and $J_1,...,J_n\subset \mathbb{N}$ with $\mbox{\ d}ens_* J_1, ..., \mbox{\ d}ens_*J_n> 1- \varepsilon$. Then, $$ \mbox{\ d}ens_* \left(J_1\cap ... \cap J_n \right) > 1 - n\varepsilon. $$ \end{itemize} The statement of the lemma follows from (i)-(iv), the definition of $\mathbb{N}o(u)$ and Lemma \ref{lem:locboundednessNo}. \end{proof} In fact, Lemma \ref{lem:classNrelfct} was another motivation for us to define the class $\mathbb{N}o(u)$ by the normality feature (N.2) and not by the mean-square condition of Theorem \ref{th:suffconditionsN} (b): we know that the Riemann zeta-function is an element of $\mathbb{N}o(\tfrac{1}{2})$ and satisfies the Ramanujan hypothesis. By means of Lemma \ref{lem:classNrelfct} (c), we conclude immediately, that $$ \zeta^k \in\mathbb{N}o(\tfrac{1}{2}) \qquad \mbox{ for }k\in\mathbb{N}. $$ However, as $\zeta^k$ is an element of the Selberg class of degree $k$, we only know for $\sigma>\max\{\frac{1}{2},1-\frac{1}{k}\}$, that $$ \limsup_{T\rightarrow\infty} \frac{1}{2T} \int_{-T}^T \left|\zeta^k(\sigma+it) \right|^2 \mbox{\ d} t <\infty. $$ Thus, we would only get $\zeta^k \in \mathcal{N}(\max\{\frac{1}{2},1-\frac{1}{k}\})$ for $k\in\mathbb{N}$, if we replace the normality feature (N.2) in the definition of $\mathcal{N}(u)$ by the mean-square condition of Theorem \ref{th:suffconditionsN} (b).\par Let $\zeta_K$ be the Dedekind zeta-function of an abelian number field $K$. Then, $\zeta_K$ can be written as a finite product of Dirichlet $L$-functions; see for example Neukirch \cite[Chapt. VII, \S 5]{neukirch:2010}. As every Dirichlet $L$-functions is an element of $\mathbb{N}o(\frac{1}{2})$ and satisfies the Ramanujan hypothesis, we deduce from Lemma \ref{lem:classNrelfct} (b) that $$ \zeta_K \in \mathbb{N}o(\tfrac{1}{2}). $$ We mention a further implication of Lemma \ref{lem:classNrelfct}. Let $u\in[\frac{1}{2},1)$ and $\mathcal{L}\in\mathbb{N}o(u)$. Suppose that $\mathcal{L}$ has Dirichlet series expansion $$ \mathcal{L}(s)=\sum_{n=1}^{\infty} \frac{a(n)}{n^s}, \qquad \sigma>1. $$ Then, it follows from Lemma \ref{lem:classNrelfct} (a) that, for $N\in\mathbb{N}$, the function $f_N$ defined by $$ f_N(s):= \mathcal{L}(s) - \sum_{n=1}^{N} \frac{a(n)}{n^s}, \qquad \sigma>1, $$ lies also in $\mathbb{N}o(u)$.\par We conclude with a possible extension of Lemma \ref{lem:classNrelfct} which we postpone to future works. Let $u\in[\frac{1}{2},1)$ and $\mathcal{L}\in\mathbb{N}o(u)$. Further, let $\mathcal{H}(\mathbb{H}_1)$ denote the set of all analytic functions in the half-plane $\sigma>1$. Is there a nice way to describe axiomatically the set of all operators $T:\mathcal{H}(\mathbb{H}_1) \rightarrow \mathcal{H}(\mathbb{H}_1)$ for which $T(\mathcal{L}) \in \mathbb{N}o(u)$. \section{Normal families related to a function of the class \texorpdfstring{$\mathbb{N}o$}{}} In this section we discuss fundamental properties of the families $\{L_{ne_l}\}_{n\in J_{\varepsilon}}$ related to $\mathcal{L}\in\mathbb{N}o(u)$ by means of the normality feature (N.2). Several ideas that we use appear in Tanaka \cite{tanaka:2008} and in a slightly different language in the theory of probabilistic limit theorems, see for example Laurin\v{c}ikas \cite{laurincikas:1991-2}. It is our claim to rely strictly on the normality feature (N.2) in our argumentation and not to use the mean-square condition of Theorem \ref{th:suffconditionsN} directly. We shall need the following lemmas to establish our main theorem in Chapter \ref{ch:probmom}. \par \begin{lemma}\label{lem:normalityNo} Let $u\in[\frac{1}{2},1)$ and $\mathcal{L}\in\mathbb{N}o(u)$. Let $\alpha\in(u,1]$, $l>0$ and $Q:=Q(\alpha,l)$ be defined by \eqref{def:Q}. Further, let $J\subset \mathbb{N}$. \begin{itemize} \item[(a)] Suppose that $d_*:= \mbox{\ d}ens_* J>0$. Then, for every $\varepsilon>0$, there is a subset $I_{\varepsilon}\subset J$ with $\mbox{\ d}ens_* I_{\varepsilon}>d_* -\varepsilon$ such that the functions $L_{ne_l}$ are analytic on $Q$ for $n\in I_{\varepsilon}$ and the family $\{L_{ne_l}\}_{n\in I_{\varepsilon}}$ is normal in $Q$. \item[(b)] Suppose that $d^*:= \mbox{\ d}ens^* J>0$. Then, for every $\varepsilon>0$, there is a subset $I_{\varepsilon}\subset J$ with $\mbox{\ d}ens^* I_{\varepsilon}>d^* -\varepsilon$ such that the functions $L_{ne_l}$ are analytic on $Q$ for $n\in I_{\varepsilon}$ and the family $\{L_{ne_l}\}_{n\in I_{\varepsilon}}$ is normal in $Q$. \end{itemize} \end{lemma} \begin{proof} Suppose that $J\subset \mathbb{N}$. Let $\varepsilon>0$. Then, due to the normality feature of $\mathcal{L}$, we find a subset $J_{\varepsilon}\subset \mathbb{N}$ with $\mbox{\ d}ens_* J_{\varepsilon} > 1- \varepsilon$ such that the functions $L_{ne_l}$ are analytic on $Q$ for $n\in J_{\varepsilon}$ and the family $\{L_{ne_l}\}_{n\in J_{\varepsilon}}$ is normal in $Q$. We set $I_{\varepsilon}:= J\cap J_{\varepsilon}$. Suppose that $d_*>0$. Then, we find immediately that $$ \mbox{\ d}ens_* I_{\varepsilon} \geq 1- \mbox{\ d}ens^* (\mathbb{N}\setminus I_{\varepsilon}) > d_* -\varepsilon. $$ Suppose that $d^*<0$. In view of $$ d^* \leq \mbox{\ d}ens^* I_{\varepsilon} + \mbox{\ d}ens^* (J \cap (\mathbb{N}\setminus J_{\varepsilon})) \leq \mbox{\ d}ens^* I_{\varepsilon} + (1 - \mbox{\ d}ens_* J_{\varepsilon}) $$ it follows that $$ \mbox{\ d}ens^* I_{\varepsilon} > d^*- \varepsilon. $$ \end{proof} The next lemma reflects the fact that a family $\{L_{ne_l}\}_{n\in J}$ is normal on a set $Q(\alpha,l)$ if and only if $\{L_{ne_l}\}_{n\in J}$ is locally bounded on $Q(\alpha,l)$; see Lemma \ref{lem:locboundednessNo}. \begin{lemma}\label{lem:JM} Let $u\in[\frac{1}{2},1)$ and $\mathcal{L}\in\mathbb{N}o(u)$. Let $\alpha\in(u,1]$, $l>0$ and $Q:=Q(\alpha,l)$ be defined by \eqref{def:Q}. Let $\mathcal{R}$ be a compact subset of $Q$. For $M>0$, let $J(M):=J(M,l,\alpha,\mathcal{R},\mathcal{L})$ be the set of all $n\in\mathbb{N}$ that satisfy the following properties: \begin{itemize} \item[(i)] The function $L_{ne_l}$ is analytic on $Q$. \item[(ii)] The inequality $\max_{s\in\mathcal{R}} \left|L_{ne_l}(s) \right| \leq M$ holds. \end{itemize} Then, for any $\varepsilon>0$, there exists a constant $M_{\varepsilon}>0$ such that $$ \mbox{\ d}ens_* J(M_{\varepsilon}) > 1 - \varepsilon. $$ \end{lemma} \begin{proof} The normality feature of $\mathcal{L}$ assures that, for any $\varepsilon>0$, we find a subset $J_{\varepsilon}\subset \mathbb{N}$ with $\mbox{\ d}ens_* J_{\varepsilon} > 1- \varepsilon$ such that, for $n\in J_{\varepsilon}$, the functions $L_{ne_l}$ are analytic on $Q$ and, additionally, the family $\{L_{ne_l}\}_{n\in J_{\varepsilon}}$ is normal in $Q$. According to Lemma \ref{lem:locboundednessNo}, the family $\{L_{ne_l}\}_{n\in J_{\varepsilon}}$ is locally bounded on $Q$. As $\mathcal{R}$ is a compact subset of $Q$, there exist a constant $M_{\varepsilon}>0$ such that $$ \max_{s\in\mathcal{R}}|L_{nl}(s)|\leq M_{\varepsilon} \qquad \mbox{ for } n\in J_{\varepsilon}. $$ Thus, the statement of the lemma follows by setting $J(M_{\varepsilon}):=J_{\varepsilon}$. \end{proof} For functions which satisfy the mean-square condition of Theorem \ref{th:suffconditionsN} (b), the statement of Lemma \ref{lem:JM} was essentially already known to Bohr (see for example Bohr \cite{bohr:1915}) and can also be found in Tanaka \cite[Lemma 4.1]{tanaka:2008}. Our motivation was to work out that the statement of Lemma \ref{lem:JM} does not only hold for functions which satisfy the mean-square condition but in general for those which satisfy the normality feature (N.2); thus, for example, for functions which satisfy the $a$-density estimate of Theorem \ref{th:suffconditionsN} (c).\par In the subsequent lemma, we gather important properties of a family $\{L_{ne_l}\}_{n\in J}$ if it is normal in a certain half-strip.\par For a subset $J\subset \mathbb{N}$ and a fixed $l>0$, let $E(J)\subset K_{2\pi/l}$ denote from now on the closure of the set $\{ne_l \, : \, n\in J \}$, with repsect to the topology of $K_{2\pi/l}$. \begin{lemma}\label{lem:normalNproperties} Let $u\in[\frac{1}{2},1)$ and $\mathcal{L}\in\mathbb{N}o(u)$. Let $\ell,l>0$, $\alpha\in(u,1]$ and $Q:=Q(\alpha,\ell)$ be defined by \eqref{def:Q}. Let the set $\mathcal{H}(Q)$ of all analytic functions on $Q$ be endowed with the topology of uniform convergence on compact subsets of $Q$. Suppose that $J$ is a subset of $\mathbb{N}$ such that the functions $L_{ne_l}$ are analytic on $Q$ for $n\in J$ and, additionally, the family $\{L_{ne_l}\}_{n\in J}$ is normal in $Q$. Then, the following statements are true. \begin{itemize} \item[(a)] For every $y\in E(J)$, the function $L_y$ is analytic on $Q$. Moreover, $$ \overline{\{L_{ne_l}\,:\, n\in J \}} = \{L_y\, : \, y\in E(J)\} \subset \mathcal{H}(Q), $$ where we regard $\{L_{ne_l}\,:\, n\in J \}$ and $\{L_y\, : \, y\in E(J)\}$ as subsets of $\mathcal{H}(Q)$ and take the closure of $\{L_{ne_l}\,:\, n\in J \}$ with respect to the topology of $\mathcal{H}(Q)$ chosen above. \item[(b)] The function $L:(s,y)\mapsto L_y(s)$ is continuous on $Q\times E(J)$. \item[(c)] Let $\mathcal{R}$ be a compact subset of $Q$ and suppose that there are constants $m,M>0$ such that $$ m \leq \max_{s\in\mathcal{R}} \left| L_{ne_l}(s) \right| \leq M \qquad \mbox{for every }n\in J. $$ Then, $$ m \leq \max_{s\in\mathcal{R}} \left| L_{y}(s) \right| \leq M \qquad \mbox{for every }y\in E(J). $$ \item[(d)] Let $\mathcal{R}$ be a compact subset of $Q$ and suppose that, for every $n\in J$, the function $L_{ne_l}$ has at least one zero in $\mathcal{R}$. Then, for every $y\in E(J)$, the function $L_{y}$ has at least one zero in $\mathcal{R}$. \end{itemize} \end{lemma} \begin{proof} (a): Let $y\in E(J)$. Then, according to Lemma \ref{lem:analyticH2} (b) and property (N.1) of $\mathcal{L}$, the function $L_y$ is analytic on the intersection $Q(1,l)$ of the domain $Q$ with the half-plane $\sigma>1$. Moreover, due to the definition of $E(J)$, we find natural numbers $n_k \in J$, indexed by $k\in\mathbb{N}$, such that the sequence $(n_ke_l)_k$ converges to $y$, with respect to the topology of $K_{2\pi/l}$. According to Lemma \ref{lem:analyticpropertiesofLinU} (b) and Lemma \ref{lem:analyticH2} (a), the function $$ L: (s,y') \mapsto L_{y'}(s) $$ is continuous on $Q(1,l)\times K_{2\pi/l}$. Thus, for every $s\in Q(1,l)$, we have $$ \lim_{k\rightarrow\infty} L_{n_{k}e_l}(s) = \lim_{k\rightarrow\infty} L(s,n_{k}e_l) = L(s,y)=L_{y}(s). $$ Since the family $\mathcal{F}:=\{L_{ne_l}\}_{n\in J}$ is normal in $Q(\alpha,l)$, there exists a subsequence of $(L_{n_ke_l})_{k}$ which converges locally uniformly on $Q(\alpha,l)$ to a function $f\in\mathcal{H}(Q)$. It follows from the uniqueness of the limit function that $f$ is the analytic continuation of $L_{y}$ to $Q(\alpha,l)$. We have proved that $L_y$ is analytic on $Q$ and that $$\{L_y\, : \, y\in E(J)\} \subset \overline{\{L_{ne_l}\,:\, n\in J \}}.$$ Now, suppose that the natural numbers $n_k\in J$ are chosen such that the sequence $(L_{n_ke_l})_k$ converges locally uniformly on $Q$ to a function $f\in\mathcal{H}(Q)$. We note that the set $E(J)\subset K_{2\pi/ l}$ is compact. Thus, we find a subsequence of $(n_ke_l)_{k}$ which converges to a certain element $y\in E(J)$. By the continuity of $L:(s,y')\mapsto L_{y'}(s)$ on $Q(1,l)\times K_{2\pi/l}$, we obtain that $$ f(s)=L_y(s) \qquad \mbox{for }s\in Q(1,l). $$ The uniqueness of the limit function implies that $f$ is the analytic continuation of $L_y$ to $Q(\alpha,l)$. This proves that $$\overline{\{L_{ne_l}\,:\, n\in J \}} \subset \{L_y\, : \, y\in E(J)\}.$$ \par (b): Let $(s_0,y_0)\in Q\times E(J)$ and $(s_k,y_k)_k$ be a sequence of points $(s_k,y_k)\in Q\times E(J)$ with $$ \lim_{k\rightarrow\infty} (s_k,y_k) = (s_0,y_0). $$ We choose an arbitrary $\varepsilon>0$. It follows from (a) that the function $L_{y_0}$ is analytic and, thus, continuous on $Q$. Hence, we find a disc $D_{\mbox{\ d}elta}(s_0)$ with $\mbox{\ d}elta>0$ such that $$\overline{D_{\mbox{\ d}elta}(s_0)}\subset Q$$ and \begin{equation}\label{C1} \left|L_{y_0}(s)-L_{y_0}(s_0) \right|<\frac{\varepsilon}{2} \qquad \mbox{for }s\in D_{\mbox{\ d}elta}(s_0). \end{equation} Statement (a) together with Lemma \ref{lem:locboundednessNo} implies that the family $\{L_y\}_{y\in E(J)}$ is locally bounded on $Q$ and, in particular, normal in $Q$. Similarly as in the proof of statement (a), we deduce from the continuity of $L$ in $Q(1,l)\times E(J)$, that the sequence $(L_{y_k})_k$ converges locally uniformly on $Q$ to $L_{y_0}$. Hence, for sufficiently large $k$, \begin{equation}\label{C2} \left|L_{y_k}(s)-L_{y_0}(s) \right|<\frac{\varepsilon}{2}\qquad \mbox{for }s\in D_{\mbox{\ d}elta}(s_0). \end{equation} By combining \eqref{C1} and \eqref{C2}, we obtain that, for sufficiently large $k$, $$ \left|L(s_k,y_k)-L(s_0,y_0) \right| <\varepsilon. $$ The assertion follows.\par (c): Statement (c) follows directly from (a).\par (d): Statement (d) follows from (a) by means of the theorem of Hurwitz (Theorem \ref{th:hurwitz}).\par \end{proof} \section{The class \texorpdfstring{$\mathbb{N}o$}{} and a polynomial Euler product representation} In this section we suppose that $\mathcal{L}\in\mathbb{N}o(u)$ satisfies the Ramanujan hypothesis and can be written as a polynomial Euler product of the form \eqref{eulerproduct}. We proceed to investigate the properties of the families $\{L_{ne_l}\}_{n\in J_{\varepsilon}}$ attached to $\mathcal{L}\in\mathbb{N}o(u)$ by means of the normality feature (N.2). Our main aim of this section is to establish Lemma \ref{lem:classNrelfct2} which states, roughly speaking, that the Ramanujan hypothesis together with a polynomial Euler product representation for $\mathcal{L}\in\mathbb{N}o(u)$ implies that also $\log\mathcal{L}$ and $\mathcal{L}^{\kappa}$ with $\kappa\in\mathbb{R}$ are elements of $\mathbb{N}o(u)$. \par If $\mathcal{L}\in\mathbb{N}o(u)$ satisfies the Ramanujan hypothesis and has a polynomial Euler product representation in $\sigma>1$, then $\mathcal{L}$ is free of zeros in $\sigma>1$. The next lemma states that possible zeros of $\mathcal{L}$ in the strip $u<\sigma\leq 1$ cannot lie too dense. For functions $\mathcal{L}\in\mathbb{N}o(u)$ which satisfy the mean-square condition of Theorem \ref{th:suffconditionsN} (b), the following lemma is well-known and can be found in a slightly modified version for the peculiar case of the Riemann zeta-function, for example, in the paper of Tanaka \cite[Proposition 2.1]{tanaka:2008}. \begin{lemma}\label{lem:Jzero} Let $u\in[\frac{1}{2},1)$ and $\mathcal{L}\in\mathbb{N}o(u)$. Suppose that $\mathcal{L}$ satisfies the Ramanujan hypothesis and can be written as a polynomial Euler product in $\sigma>1$. Let $\alpha\in[u,1)$, $l>0$ and $\mathcal{R}$ be a compact subset of the half-strip $Q:=Q(\alpha,l)$ defined by $\eqref{def:Q}$. Then, there exists a subset $J_{zf}:=J_{zf}(\alpha,l,\mathcal{R},\mathcal{L})\subset \mathbb{N}$ with the following properties: \begin{itemize} \item[(i)] $L_{ne_l}$ is analytic in $Q$ for $n\in J_{zf}$. \item[(ii)] $L_{ne_l}$ non-vanishing in $\mathcal{R}$ for $n\in J_{zf}$. \item[(iii)] The set $J_{zf}$ has density $\mbox{\ d}ens \ J_{zf} = 1$. \end{itemize} \end{lemma} \begin{proof} According to the normality feature of $\mathcal{L}$, we find a subset $J\subset \mathbb{N}$ with $\mbox{\ d}ens \ J =1$ such that $L_{ne_l}$ is analytic on $Q$ for $n\in J$. Suppose that there is a subset $J^c_{zf} \subset J$ with $d:=\mbox{\ d}ens^* J^c_{zf}>0$ such that, for every $n\in J_{zf}^c$, the function $L_{ne_l}$ has at least one zero in $\mathcal{R}\subset Q$. We choose $0<\varepsilon<d$. Then, according to Lemma \ref{lem:normalityNo}, there is a subset $I_{\varepsilon}\subset J_{zf}$ with $\mbox{\ d}ens^*>1-\varepsilon$ such that the family $\{L_{ne_l}\}_{n\in I_{\varepsilon}}$ is normal in $Q$. It follows from Lemma \ref{lem:normalNproperties} (d) that every function of the family $\{L_{y}\}_{y\in E(I_{\varepsilon})}$ is analytic on $Q$ and has at least one zero in $\mathcal{R}$. Moreover, due to Lemma \ref{lem:tanaka1}, we have \begin{equation} \label{Jzf} \pmb{\tau}(E(I_{\varepsilon})) \geq d-\varepsilon >0. \end{equation} Since $\mathcal{R}$ is a compact subset of $Q$, we find an open set $Q'$ with $$ \mathcal{R} \subset Q' \qquad \mbox{and} \qquad \overline{Q'} \subset Q. $$ Moreover, due to the relation $$ L_{y+e_{\tau}}(s)=L_{y}(s+i\tau), $$ there exists an interval $\mathcal{I}\subset [0,l)$ of positive Lebesgue measure such that, for $(y,u)\in E(I_{\varepsilon})\times\mathcal{I}$, the function $L_{y+e_u}$ is analytic on $Q'$ and has at least one zero in $Q'$. By our identification of $K_{2\pi/l}\times [0,l)$ with $K$, this and \eqref{Jzf} imply that there is a subset $G\subset K$ with $\pmb{\sigma}(G)>0$ such that, for $x\in G$, the function $L_{x}$ is analytic on $Q'$ and has zeros in $Q'$. This is in contradiction to statement (f) of Theorem \ref{th:almostsurebehaviour}. Thus, $\mbox{\ d}ens\ J^c_{zf} = 0$ and, consequently, $\mbox{\ d}ens\ J_{zf}=1$. \end{proof} The next lemma deals with the reciprocals $L_{ne_l}^{-1}$ of the functions $L_{ne_l}$ attached to $\mathcal{L}$. \begin{lemma}\label{lem:boundIM} Let $u\in[\frac{1}{2},1)$ and $\mathcal{L}\in\mathbb{N}o(u)$. Suppose that $\mathcal{L}$ satisfies the Ramanujan hypothesis and can be written as a polynomial Euler product in $\sigma>1$. Let $\alpha\in[u,1)$, $l>0$ and $Q:=Q(\alpha,l)$ be defined by \eqref{def:Q}. Then, for any $\varepsilon>0$, there exists a subset $J_{\varepsilon}\subset \mathbb{N}$ with $\mbox{\ d}ens_* J_{\varepsilon} >1-\varepsilon$ such that the functions $L_{ne_l}^{-1}$ are analytic in $Q$ for $n\in J_{\varepsilon}$ and the family $\{L_{ne_l}\}_{n\in J_{\varepsilon}}$ is locally bounded in $Q$. \end{lemma} \begin{proof} Due to the polynomial Euler product and the Dirichlet series representation of $\mathcal{L}$ in $\sigma>1$, the functions $L_{ne_l}$ are analytic and non-vanishing in $\sigma>1$ for every $n\in\mathbb{N}$; see Theorem \ref{th:Lkappa}. Moreover, for any $\sigma_0>1$, we find a constant $M\geq 1$ such that, for every $n\in\mathbb{N}$, \begin{equation}\label{Lnelbound} |L_{ne_l}(\sigma+it)| \geq \frac{1}{M} \qquad \mbox{and} \qquad |L_{ne_l}(\sigma+it)| \leq M \qquad \mbox{for }\sigma\geq\sigma_0. \end{equation} We choose $\alpha^*\in (u,1]$ with $\alpha^*<\alpha$ and set $Q^*:=Q(\alpha^*,l)$. Furthermore, let $Q'$ denote the half-strip $$ Q' := Q(\alpha, \tfrac{3}{4}l) \subset Q^* $$ and $\mathcal{R}'$ its closure, i.e. $$ \mathcal{R}'= \left\{\sigma+it\in\mathbb{C} \, : \, \sigma\geq \alpha,\ -\tfrac{3}{4} l \leq t \leq \tfrac{3}{2} l\right\} \subset Q^*. $$ It follows from Lemma \ref{lem:normalityNo}, Lemma \ref{lem:Jzero} and \eqref{Lnelbound} that, for any $\varepsilon>0$, there exist a subset $I_{\varepsilon}\subset \mathbb{N}$ with $\mbox{\ d}ens_* I_{\varepsilon}>1-\frac{\varepsilon}{3}$ such that the following holds: \begin{itemize} \item[(i)] For $n\in I_{\varepsilon}$, the functions $L_{ne_l}$ are analytic in $Q^*$ and non-vanishing in $\mathcal{R}'\subset Q^*$. \item[(ii)] The family $\{L_{ne_l}\}_{n\in I_{\varepsilon}}$ is normal in $Q^*$. \end{itemize} From $(i)$ we deduce immediately that the functions $L_{ne_l}^{-1}$ are analytic on the domain $Q'$ for $n\in I_{\varepsilon}$.\par Moreover, the observation \eqref{Lnelbound} implies that the family $\{L_{ne_l}\}_{n\in I_{\varepsilon}}$ contains no sequence that converges locally uniformly on $Q'$ to $f\equiv 0$. With respect to this, it follows from the theorem of Hurwitz and the non-vanishing property (i) that, for every compact subset $\mathcal{K}\subset Q'$, we find a constant $M>0$ such that $$ \min_{s\in\mathcal{K}}|L_{ne_l}(s)| \geq \frac{1}{M}. $$ Hence, the family $\{L_{ne_l}^{-1}\}_{n\in I_{\varepsilon}}$ is locally bounded in $Q'$. Let $J_{\varepsilon}$ be the set of all $n\in I_{\varepsilon}$ for which $\{n-1,n,n+1\}\subset I_{\varepsilon}$. It is easy to show that $\mbox{\ d}ens_* J_{\varepsilon} > 1-\varepsilon$ and that $J_{\varepsilon}$ fulfills the assertions of the lemma. \end{proof} From Lemma \ref{lem:boundIM} we derive that, if $\mathcal{L}\in\mathbb{N}o(u)$ has a polynomial Euler product representation in $\sigma>1$, many functions related to $\mathcal{L}$ lie also in $\mathbb{N}o(u)$, in addition to the ones provided by Lemma \ref{lem:classNrelfct}. \begin{lemma}\label{lem:classNrelfct2} Let $u\in[\frac{1}{2},1)$ and $\mathcal{L}\in\mathbb{N}o(u)$. Suppose that $\mathcal{L}$ satisfies the Ramanujan hypothesis and can be written as a polynomial Euler product in $\sigma>1$. Then, \begin{itemize} \item[(a)] $\mathcal{L}^{\kappa} \in \mathbb{N}o(u)$ for any $\kappa\in\mathbb{R}$. \item[(b)] $\log \mathcal{L} \in \mathbb{N}o(u)$. \item[(b)] $\mathcal{L}'/\mathcal{L}\in\mathbb{N}o(u)$. \end{itemize} \end{lemma} \begin{proof} Let $\kappa\in\mathbb{R}$, $u\in[\frac{1}{2},1)$ and $\mathcal{L}\in\mathbb{N}o(u)$. We observe the following: \begin{itemize} \item[(i)] In the half-plane $\sigma>1$, the function $\mathcal{L}$ is given by a Dirichlet series which satisfies the Ramanujan hypothesis and can be written as a polynomial Euler product. It follows from Theorem \ref{th:Lkappa}, \ref{th:Dirichletlog} and \ref{th:dirichletlogderivative} that, in the half-plane $\sigma>1$, the functions $\mathcal{L}^{\kappa}$, $\log \mathcal{L}$ and $\mathcal{L}'/\mathcal{L}$ are also given by Dirichlet series which satisfy the Ramanujan hypothesis and, thus, lie in $\mathscr{H}^2$. \item[(ii)] Let $\alpha\in[u,1)$, $l,\varepsilon>0$ and $Q:=Q(\alpha,l)$. For $n\in\mathbb{N}$, we define $\log L_{ne_l}$ by $$ \log L_{ne_l}(s):= \log \mathcal{L}(s+inl), \qquad \sigma>1. $$ This choice assures that, uniformly for $n\in\mathbb{N}$, \begin{equation}\label{log} \lim_{\sigma\rightarrow\infty} \log L_{ne_l}(\sigma)=0. \end{equation} Lemma \ref{lem:boundIM} implies that, for any $\varepsilon>0$, there is a set $J_{\varepsilon}\subset \mathbb{N}$ with $\mbox{\ d}ens_* J_{\varepsilon} >1-\varepsilon$ such that, for every $n\in J$, the functions $\log L_{ne_l}$, $L^{\kappa}_{ne_l}$ and $L'_{ne_l}/ L_{ne_l}$ are well-defined and analytic on $Q$ and such that the families $$ \mathcal{F}_{\log} := \left\{\log L_{ne_l}\right\}_{n\in J_{\varepsilon}}, \qquad \mathcal{F}_{\kappa} := \left\{ L^{\kappa}_{ne_l}\right\}_{n\in J_{\varepsilon}} $$ and $$ \mathcal{F}_{L'/L} := \left\{ \frac{L'_{ne_l}}{L_{ne_l}}\right\}_{n\in J_{\varepsilon}} $$ are locally bounded on $Q$. Note that the local boundedness of $\mathcal{F}_{\log}$ can be deduced from the local boundedness of $\{L_{ne_l}\}_{n\in J_{\varepsilon}}$ on $Q$ by means of \eqref{log} and the Borel-Carath\'{e}dory theorem; see Titchmarsch \cite[\S 5.5]{titchmarsh:1939}. \end{itemize} The statement of the lemma follows from (i), (ii) and the definition of $\mathbb{N}o(u)$. \end{proof} We know that the Riemann zeta-function is an element of $\mathbb{N}o(\frac{1}{2})$. By means of Lemma \ref{lem:classNrelfct2} we obtain that also its logarithm $\log \zeta$, its logarithmic derivative $\zeta'/\zeta$ and its $\kappa$-th power $\zeta^{\kappa}$ with any $\kappa\in\mathbb{R}$ lie in $\mathbb{N}o(\frac{1}{2})$. \section{Vertical limit functions }\label{sec:verticallimit} Let $\mathcal{L}\in\mathbb{N}o(u)$ with $u\in[\frac{1}{2},)$. By our considerations of the preceeding section, we find that, for $\pmb{\sigma}$-almost every $x\in K$, the function $L_x$ occurs as a vertical limit functions of $\mathcal{L}$ in $\sigma>u$. \begin{corollary}\label{cor:limitfctu} Let $u\in[\frac{1}{2},1)$ and $\mathcal{L}\in\mathbb{N}o(u)$. Let $\alpha\in(u, 1]$, $l>0$ and $Q:=Q(\alpha,l)$ be defined by \eqref{def:Q}. Then, there is a subset $G\subset K$ with $\pmb{\sigma}(G)=1$ and a subset $A\subset \mathbb{R}^+$ such that \begin{equation}\tag{$a$} \{L_x \, : \, x\in G\} \subset \overline{\left\{L_{e_{\tau}} \, : \, \tau\in A\right\}} \subset \mathcal{H}(Q) \end{equation} Moreover, there is a subset $E\subset K$ with $\pmb{\tau}(E)=1$ and a subset $J\subset \mathbb{N}$ such that \begin{equation}\tag{$b$} \{L_y \, : \, y\in K_{2\pi/l}\} \subset \overline{\left\{L_{ne_{l}} \, : \, n\in J \right\}} \subset \mathcal{H}(Q). \end{equation} Here, $\mathcal{H}(Q)$ denotes the set of analytic functions on $Q$ and the closures above are taken with respect to the topology of uniform convergence on compact subsets of $Q$. \end{corollary} If $\mathcal{L}$ satisfies the Ramanujan hypothesis and the mean-square condition of Theorem \ref{th:suffconditionsN} (b) in $\sigma>u$, the statement of the lemma is well-known both for $\mathcal{L}$ and several functions related to $\mathcal{L}$; for example for $\mathcal{L}^k$ with $k\in\mathbb{N}$, $\mathcal{L}^{(\ell)}$ with $\ell\in\mathbb{N}$ and, if $\mathcal{L}$ has a polynomial Euler product, also for $\log \mathcal{L}$ and $\mathcal{L}^{-1}$. By the definition of the normality feature, it is natural that the statement persists for all functions in $\mathbb{N}o(u)$. Here, as far as the author knows, it may be considered as new that the statement of Corollary \ref{cor:limitfctu} holds for functions $\mathcal{L}$ which satisfy the $a$-point density estimate of Theorem \ref{th:suffconditionsN} (c). \begin{proof}[Proof of Corollary \ref{cor:limitfctu}] Statement (a) follows directly from Lemma \ref{lem:tanaka1}, Lemma \ref{lem:normalityNo} and Lemma \ref{lem:normalNproperties} (a). Statement (b) follows from (a) by observing that we can identify every $x\in K$ with an element $(y,u)\in K_{2\pi/l}\times [0,l)$. \end{proof} \chapter[Discrete and continuous moments to the right of the critical line]{Discrete and continuous moments }\label{ch:probmom} \section{An extension of a theorem due to Tanaka to the class \texorpdfstring{$\mathbb{N}o(u)$}{ }}\label{sec:probmom} In this chapter we extend a result of Tanaka \cite{tanaka:2008}, which he obtained for the Riemann zeta-function, to functions in the class $\mathbb{N}o$. Building on the preliminary works of the preceeding sections, we strongly rely on his methods and ideas to prove our result.\par We introduce the following notation. For a given $l>0$, a set $A\subset [1,\infty)$ is said to be an {\it $l$-set of density zero} if there exists a subset $J\subset \mathbb{N}$ with $\mbox{\ d}ens \ J = 0$ such that $$ A= \bigcup_{n\in J} [nl,(n+1)l). $$ It is easy to see that an $l$-set $A$ of density zero satisfies \begin{equation*}\label{eq:setA} \lim_{T\rightarrow\infty} \frac{1}{T} \int_1^T \pmb{1}_A(t) \mbox{\ d} t = 0\qquad \mbox{and}\qquad\lim_{T\rightarrow\infty} \frac{1}{T} \int_1^T \pmb{1}_{A^c}(t) \mbox{\ d} t = 1, \end{equation*} where $\pmb{1}_X$ denotes the indicator function of a set $X\subset \mathbb{R}$ and $X^c:=\mathbb{R}\setminus X$ its complement.\par Let $u\in[\frac{1}{2},1)$ and $\mathcal{L}\in\mathbb{N}o(u)$. Further, let $p:\mathbb{C}\rightarrow\mathbb{C}$ be a continuous function with $p(z)\ll |z|^2$, as $|z|\rightarrow\infty$. We shall establish asymptotic formulas for moments of the form $$ \frac{1}{T} \int_1^T p\left( \mathcal{L}(\sigma + it) \right) \pmb{1}_{A^c}(t) \mbox{\ d} t, \qquad \sigma>u, \qquad \mbox{as }T\rightarrow\infty, $$ where we omit a certain $l$-set $A\subset [1,\infty)$ of density zero from the path of integration. Moreover, for $l>0$, we shall derive asymptotic formulas for discrete moments of the form $$ \frac{1}{N} \sum_{n=1}^N p\left(\mathcal{L}(\sigma+i\lambda + inl) \right)\pmb{1}_{A^c}(nl) , \qquad \sigma>u, \qquad 0\leq\lambda\leq l,\qquad \mbox{as }N\rightarrow\infty, $$\par where we neglect, by the definition of $A$, a certain set $J\subset\mathbb{N}$ of density zero in the summation. \par The next theorem is the main theorem of Part II of this thesis. \begin{theorem}\label{th:probmom} Let $u\in[\frac{1}{2},1)$. Let $(\mathcal{L}_j,p_j)_j$ be a sequence of pairs which consist of a function $\mathcal{L}_j\in\mathbb{N}o(u)$ and a continuous function $p_j:\mathbb{C}\rightarrow\mathbb{C}$ satisfying \begin{equation}\label{condC}\tag{C} p_j(z) \ll_j |z|^2 ,\qquad \mbox{ as } |z|\rightarrow\infty. \end{equation} Let $L_{j}$ denote the extension of $\mathcal{L}_j$ to $\mathbb{C}\times K$ defined by \eqref{Ls}. Then, for any $\alpha\in(u, 1]$ and $l>0$, there exist an $l$-set $A\subset [1,\infty)$ of density zero and a sequence $(N_j)_j$ of positive integers such that the following holds: \begin{itemize} \item[(i)] For every $j\in\mathbb{N}$, as $T\rightarrow\infty$, $$ \frac{1}{T} \int_{N_j}^{T}\int_2^{\alpha} p_j\bigl(\mathcal{L}_j(\sigma+it)\bigr)\cdot \pmb{1}_{A^c}(t) \mbox{\ d}\sigma \mbox{\ d} t = \int_{\alpha}^{2}\int_{K} p_j\bigl(L_{j}(\sigma,x) \bigr) \mbox{\ d} \pmb{\sigma} \mbox{\ d}\sigma + o_{j}(1). $$ \item[(ii)] For every $j\in\mathbb{N}$, uniformly for $\alpha\leq \sigma\leq 2$, as $T\rightarrow\infty$, \begin{equation*}\label{tanakastar} \frac{1}{T} \int_{N_j}^{T} p_j\bigl(\mathcal{L}_j(\sigma+it)\bigr) \cdot \pmb{1}_{A^c}(t)\mbox{\ d} t = \int_{K} p_j\bigl(L_{j}(\sigma,x) \bigr) \mbox{\ d} \pmb{\sigma} + o_{j}(1). \end{equation*} \item[(iii)] Suppose that $l\notin\Gamma_{P}:= \{2\pi k(\log\frac{n}{m})^{-1} \, : \, k,n,m\in\mathbb{N}, n\neq m\}$. Then, for every $j\in\mathbb{N}$, uniformly for $\alpha\leq \sigma\leq 2$ and $0\leq \lambda \leq l$, as $N\rightarrow\infty$, $$ \frac{1}{N } \sum_{n=N_j}^N \, p_j\bigl( \mathcal{L}_j(\sigma+i\lambda+inl) \bigr)\cdot \pmb{1}_{A^c}(nl) = \int_{K_{2\pi/l}} p_j\bigl(L_{j}(\sigma,x) \bigr) \mbox{\ d} \pmb{\tau} + o_{j}(1). $$ \end{itemize} \end{theorem} \par Tanaka established statement (ii) of Theorem \ref{th:probmom} for the Riemann zeta-function $\zeta$, its $\kappa$-th power $\zeta^{\kappa}$, $\kappa\in\mathbb{R}$, with the special choices of $p$ given by $p(z)=z$ and $p(z)=|z|^2$. We extend Tanaka's result to the quite general class $\mathbb{N}o$. Moreover, we provide with (i) and (ii) an integrated and a discrete version of (ii). Some remarks to Theorem \ref{th:probmom} are in order: \begin{itemize} \item[1.] Suppose that $\mathcal{L}\in\mathbb{N}o(u)$ has Dirichlet expansion $$ \mathcal{L}(s)=\sum_{n=1}^{\infty} \frac{a(n)}{n^s} , \qquad \sigma>1 $$ and that $l\notin \Gamma_P=\{2\pi k(\log\frac{n}{m})^{-1} \, : \, k,n,m\in\mathbb{N}\}$. Then, it follows from our considerations in Section \ref{sec:DirichletH2}, in particular from the identities \eqref{plan1} and \eqref{plan2}, that $$ \int_{K_ {2\pi/l}} \left| L(\sigma,y) \right|^2 \mbox{\ d}\pmb{\tau} = \int_{K} \left| L(\sigma,x) \right|^2 \mbox{\ d} \pmb{\sigma} = \sum_{n=1}^{\infty} \frac{|a(n)|^2}{n^{2\sigma}}, \qquad \sigma>u, $$ and $$ \int_{K_ {2\pi/l}} L(\sigma,y) \mbox{\ d}\pmb{\tau} = \int_{K} L(\sigma,x) \mbox{\ d} \pmb{\sigma} = a(1), \qquad \sigma>u. $$ \item[2.] The proof of Theorem \ref{th:probmom} shall show that statement (iii) remains valid for $l\in \Gamma_{P}$, if the function defined by $$ f(\sigma)=\int_{K_{2\pi/l}} \bigl|L_{j}(\sigma,x) \bigr|^2 \mbox{\ d} \pmb{\tau} $$ is continuous for $\sigma>u$. The results of Reich \cite{reich:1980-2} and Lemma \ref{lem:lLambda} assert that this is the case, if $\mathcal{L}\in\mathbb{N}o(u)$ has a polynomial Euler product representation. \item[3.] Let $\mathcal{L}_j$, $u$, $p_j$, $N_j$ and $A$ be as in Theorem \ref{th:probmom}. Let $\overline{A^c}$ denote the closure of $A^c = \mathbb{R}\setminus A$. If $\mathcal{L}_j$ is analytic in the region $$ \left\{ s\in\mathbb{C} \, : \, u<\sigma\leq 2, \, t\in \overline{A^c}\cap [0,N_j] \right\}, $$ then the limits in (i), (ii) and (iii) are not affected by replacing $N_j$ by $1$. \item[4.] The statements of Theorem \ref{th:probmom} can be formulated in an analogous manner for the lower half-plane: there exists an $l$-set $A\subset[-1,-\infty)$ of density zero and a sequence of negative integers $(N_j)_j$ such that the statement (i)-(iii) hold as $T\rightarrow -\infty$. \item[5.] By a diagonal argument (see Tanaka \cite[\S 5]{tanaka:2008}), similarly to the one that we shall use in the last step of our proof, we find a common $l$-set $A$ of density zero in Theorem \ref{th:probmom} such that the limits of statements (i)-(iii) hold for every $\sigma>u$. In this case, however, we loose the uniformity in $\sigma$, resp. the uniformity in $\sigma$ and $\lambda$. \item[6.] In the half-plane where the Dirichlet series expansion of $\mathcal{L}\in\mathbb{N}o$ converges uniformly, the statements (i)-(iii) hold trivially for $\mathcal{L}$ with $A=\emptyset$. This follows essentially from its almost periodic behaviour. Thus, the statements of Theorem \ref{th:probmom} are especially of interest if $\alpha$ is less than the abscissa of uniform convergence $\sigma_u$ of $\mathcal{L}$ or if the exact value of $\sigma_u$ is not known. We recall here the difficulties to determine $\sigma_u$ for functions in the extended Selberg class; see Section \ref{sec:charconvabs}. \item[7.] In the mean-square half-plane $\sigma>\sigma_m$ of $\mathcal{L}\in\mathbb{N}o$, i.e. in the half-plane where the classical continuous mean-square of $\mathcal{L}$ is bounded, statement (ii) of Theorem \ref{th:probmom} holds trivially with $A=\emptyset$ and $p(z)=|z|^2$, due to Carlson's theorem (Theorem \ref{th:carlson}); for any other admissible choice of $p$ the limit superior of the left-hand side of (ii) is at least bounded. By the dominate convergence theorem, the same applies to (i). Additionally, in the half-plane $\sigma>\sigma_m$, there are methods available that allow to establish asymptotic expansions for discrete mean-values in (iii) with $A=\emptyset$; see, for example, Montgomery \cite[Chapt. 1]{montgomery:1971}, Reich \cite{reich:1980-2} and Good \cite{good:1978}. Thus, statements (i)-(iii) are especially of interest if $\alpha<\sigma_m$ or if the exact value of $\sigma_m$ is not known. \item[8.] Tanaka's method is strongly related to a method of Reich \cite{reich:1980-2}. For functions $\mathcal{L}$ with polynomial Euler product representation of order two, Reich showed that the discrete and continuous mean-square value of $\mathcal{L}$ coincide in its mean-square half-plane, provided that $l\notin\Gamma_P$, i.e., for $l\notin\Gamma_P$, $\sigma>\sigma_m$, $$ \lim_{N\rightarrow\infty} \frac{1}{N} \sum_{n=1}^N\left|\mathcal{L}(\sigma+inl)\right|^2 = \lim_{T\rightarrow\infty}\frac{1}{T}\int_0^{T} \left| \mathcal{L}(\sigma+it)\right|^2 \mbox{\ d} t = \sum_{n=1}^{\infty} \frac{|a(n)|^2}{n^{2\sigma}}. $$ Reich derived also asymptotic expansions for the case $l\in\Gamma_P$. Reich relied on a uniform distribution result and the existence of the classical continuous square-mean of $\mathcal{L}$. By loss of a set of density zero, Tanaka used the {\it uniqueness} of the ergodic system $(K_{2\pi/l}, T_t)$ and, instead of working with the continuous mean-square value directly, relied on a property of $\mathcal{L}$ which we revealed as the normality feature (N.2) in Section \ref{sec:classN}. \item[9.] By definition, an $l$-set $A\subset [1,\infty)$ of density zero satisfies \begin{equation*}\label{erg} \frac{1}{T}\int_1^{\infty} \pmb{1}_{A^c}(t) \mbox{\ d} t = o(1), \qquad \mbox{as }T\rightarrow\infty. \end{equation*} It would be interesting if the statements of Theorem \ref{th:probmom} are also true for $l$-sets $A$ whose density can be bounded asymptotically in a better way than above. Here, however, some additional reasoning seems to be necessary. \end{itemize} We state some immediate corollaries of Theorem \ref{th:probmom}. \begin{corollary}\label{cor1} Let $u\in[\frac{1}{2},1)$ and $\mathcal{L}\in \mathbb{N}o(u)$. Suppose that the Dirichlet series expansion of $\mathcal{L}$ satisfies the Ramanujan hypothesis. Let $\alpha\in(u,1]$ and $l>0$. Then, there exist an $l$-set $A\subset[1,\infty)$ of density zero such that, for every $k\in\mathbb{N}$ and uniformly for $\sigma\in[ \alpha,2]$, $$ \lim_{T\rightarrow\infty} \frac{1}{T} \int_{1}^T \left|\mathcal{L}(\sigma + it) \right|^{2k} \pmb{1}_{A^c} (t) \mbox{\ d} t = \sum_{n=1}^{\infty} \frac{|a_k(n)|^2}{n^{2\sigma}} $$ and $$ \lim_{T\rightarrow\infty} \frac{1}{T} \int_{1}^T \mathcal{L}^k(\sigma + it) \ \pmb{1}_{A^c} (t) \mbox{\ d} t = a_k(1), $$ where the $a_k(n)$ denote the coefficients of the Dirichlet series expansion of $\mathcal{L}^k$. If $\mathcal{L}$ can be written additionally as a polynomial Euler product in $\sigma>1$, then we find an $l$-set $A\subset[1,\infty)$ of density zero such that, for every $k\in\mathbb{N}$, uniformly for $\sigma\in[\alpha,2]$, \begin{equation}\label{ww} \lim_{T\rightarrow\infty} \frac{1}{T} \int_{1}^T \left| \mathcal{L}(\sigma+it)\right|^{-2k} \pmb{1}_{A^c}(t) \mbox{\ d} t = \sum_{n=1}^{\infty} \frac{|a_{-k}(n)|^2}{n^{2\sigma}}, \end{equation} \begin{equation}\label{dw} \lim_{T\rightarrow\infty} \frac{1}{T}\int_1^T \left| \log \mathcal{L}(\sigma+it)\right|^{2} \pmb{1}_{A^c}(t) \mbox{\ d} t = \sum_{n=1}^{\infty} \frac{|a_{\log\mathcal{L}}(n)|^2}{n^{2\sigma}} \end{equation} and \begin{equation}\label{www} \lim_{T\rightarrow\infty} \frac{1}{T}\int_1^T \left| \frac{\mathcal{L}'(\sigma+it)}{\mathcal{L}(\sigma+it)}\right|^2 \pmb{1}_{A^c}(t) \mbox{\ d} t = \sum_{n=1}^{\infty} \frac{|\mathcal{L}ambda_{\mathcal{L}}(n)|^2}{n^{2\sigma}}, \end{equation} where the $a_{-k}(n)$, $a_{\log\mathcal{L}}(n)$ and $\mathcal{L}ambda_{\mathcal{L}}(n)$ denote the coefficients of the Dirichlet series expansion of $\mathcal{L}^{-k}$, $\log\mathcal{L}$ and $\mathcal{L}'/\mathcal{L}$, respectively. \end{corollary} \begin{proof} According to Lemma \ref{lem:classNrelfct}, we have that $\mathcal{L}^k\in \mathbb{N}o(u)$ for any $k\in \mathbb{N}$. It follows from Lemma \ref{lem:classNrelfct2} that $\mathcal{L}^{-k}$ with $k\in\mathbb{N}$, $\log \mathcal{L}$ and $\mathcal{L}'/\mathcal{L}$ are elements of $\mathbb{N}o(u)$, if $\mathcal{L}$ can be written additionally as a polynomial Euler product in $\sigma>1$. The statement follows directly from Theorem \ref{th:probmom} by respecting Remark 1 and 3 stated after Theorem \ref{th:probmom}. \end{proof} Let $\mathcal{L}\in\mathcal{S}$ have positive degree $d_{\mathcal{L}}$. The function $\mathcal{L}$ satisfies the Lindel\"of hypothesis if and only if, for every $\sigma>\frac{1}{2}$ and $k\in \mathbb{N}$, \begin{equation}\label{int} \lim_{T\rightarrow\infty} \frac{1}{2T}\int_{-T}^{T} \left|\mathcal{L}(\sigma+it) \right|^{2k} \mbox{\ d} t= \sum_{n=1}^{\infty}\frac{|a_k(n)|^2}{n^{2\sigma}}, \end{equation} where the $a_k(n)$ denote the Dirichlet series coefficients of $\mathcal{L}^k$. This follows essentially from classical methods due to Hardy \& Littlewood \cite{hardylittlewood:1923}, who settled the case of the Riemann zeta-function; see also Steuding \cite[Chapt. 6]{steuding:2007}. For given $k\in\mathbb{N}$, we know so far only for \begin{equation}\label{q} \sigma> \max\{\tfrac{1}{2},1-\tfrac{1}{kd_{\mathcal{L}}}\} \end{equation} that \eqref{int} is true; see Section \ref{subsec:meansquare} for details. Let $\mathcal{L}_1,...,\mathcal{L}_n$ be primitive functions in the Selberg class of degree $d_{\mathcal{L}_1},...,d_{\mathcal{L}_n}$ such that $$ \mathcal{L}=\mathcal{L}_1 \cdot \mbox{\ d}ots \cdot \mathcal{L}_n. $$ We set $d_{\mathcal{L}}^*=\max\{d_{\mathcal{L}_1},...,d_{\mathcal{L}_n}\}$. We deduce from Lemma \ref{lem:classNrelfct} and Theorem \ref{th:suffconditionsN} (b) that $$\mathcal{L}\in \mathbb{N}o(\max\{\tfrac{1}{2}, 1- \tfrac{1}{d_{\mathcal{L}}^*}\}).$$ By Theorem \ref{th:probmom} and Remark 5 after Theorem \ref{th:probmom}, we know that, for any $l>0$, there is an $l$-set $A\subset [1,\infty)$ of density zero such that, for every $k\in\mathbb{N}$ and $\sigma>\max\{\tfrac{1}{2}, 1- \tfrac{1}{d_{\mathcal{L}}^*}\}$, $$ \lim_{T\rightarrow\infty} \frac{1}{T} \int_{1}^{T} \left|\mathcal{L}(\sigma+it) \right|^{2k}\pmb{1}_{A^c}(t)\mbox{\ d} t = \sum_{n=1}^{\infty}\frac{|a_k(n)|^2}{n^{2\sigma}}. $$ Thus, in a certain measure-theoretical sense, \eqref{int} is true in the half-plane $$\sigma>\max\{\tfrac{1}{2}, 1- \tfrac{1}{d_{\mathcal{L}}^*}\}.$$ Let $\mathcal{L}\in\mathcal{S}$. Due to possible zeros of $\mathcal{L}$ in $\sigma>\frac{1}{2}$, it is difficult to obtain unconditional asymptotic expansions for the moments in \eqref{ww}, \eqref{dw} and \eqref{www} with $A=\emptyset$. We refer to Selberg \cite{selberg:1992} for certain conditional results. \par Next, we state a discrete version of Corollary \ref{cor1}. \begin{corollary} Let $u\in[\frac{1}{2},1)$ and $\mathcal{L}\in \mathbb{N}o(u)$. Suppose that the Dirichlet series expansion of $\mathcal{L}$ satisfies the Ramanujan hypothesis. Let $\alpha\in(u,1]$ and $l>0$. \begin{itemize} \item[(a)] If $l\notin\Gamma_{P}:= \{2\pi k(\log\frac{n}{m})^{-1} \, : \, k,n,m\in\mathbb{N}, n\neq m\}$, then there exist an $l$-set $A\subset[1,\infty)$ of density zero such that, for every $k\in\mathbb{N}$, uniformly for $\sigma\in[ \alpha,2]$ and $\lambda\in[0,l]$, \begin{equation}\label{dis} \lim_{N\rightarrow\infty}\frac{1}{N} \sum_{n=1}^N \bigl|\mathcal{L}(\sigma + i\lambda + inl) \bigr|^{2k} \pmb{1}_{A^c} (nl) = \sum_{n=1}^{\infty} \frac{|a_k(n)|^2}{n^{2\sigma}} \end{equation} and $$ \lim_{N\rightarrow\infty}\frac{1}{N} \sum_{n=1}^N \mathcal{L}(\sigma+ i\lambda + inl)^k \, \pmb{1}_{A^c} (nl) = a_k(1). $$ \item[(b)] Suppose additionally that $\mathcal{L}$ can be written as a polynomial Euler product. If $l=2\pi k / \log p$ with some $k\in\mathbb{N}$ and $p\in\mathbb{P}$, then there exist an $l$-set $A\subset[1,\infty)$ of density zero such that, for every $k\in\mathbb{N}$, uniformly for $\sigma\in[ \alpha,2]$ and $\lambda\in[0,l]$, $$ \lim_{N\rightarrow\infty}\frac{1}{N} \sum_{n=1}^N \bigl|\mathcal{L}(\sigma + i\lambda + inl) \bigr|^{2k} \cdot \pmb{1}_{A^c} (nl) = \left| \prod_{j=1}^m \left(1-\frac{\alpha_j(p)}{p^{\sigma}}\right)^{-2k} \right| \cdot \sum_{\begin{subarray}{c}n\in \mathbb{N}\\ p\nmid n \end{subarray}} \frac{|a_{k}(n)|^2}{n^{2\sigma}} $$ and $$ \lim_{N\rightarrow\infty}\frac{1}{N} \sum_{n=1}^N \mathcal{L}(\sigma+ i\lambda + inl)^k \, \pmb{1}_{A^c} (nl) = a_{k}(1) \cdot \prod_{j=1}^m \left(1-\frac{\alpha_j(p)}{p^{\sigma}}\right)^{-k}. $$ \end{itemize} Here, the $a_k(n)$ denote the coefficient of the Dirichlet series expansion of $\mathcal{L}^k$ and $\alpha_j(p)$ the local roots of the polynomial Euler product representation of $\mathcal{L}$. \end{corollary} \begin{proof} The statement follows directly from Lemma \ref{lem:classNrelfct} and Theorem \ref{th:probmom} (ii) by respecting the Remarks 1,2,3 and Lemma \ref{lem:lLambda}. \end{proof} For certain functions $\mathcal{L}$ with polynomial Euler product of order two, Reich \cite{reich:1980-2} proved that \eqref{dis} holds for $k=2$ and $A=\emptyset$ in the mean-square half-plane of $\mathcal{L}$. Good \cite{good:1978} used a different method to establish \eqref{dis} for certain functions in their mean-square half-plane with $A=\emptyset$. Besides a polynomial Euler product of order two, he assumed additionally the existence of an approximate functional equation and got better bounds in the asymptotic expansion \eqref{dis} than the ones provided by Reich \cite{reich:1980-2}. Both Reich \cite{reich:1980-2} and Good \cite{good:1978} studied also the case $l\in \Gamma_P$.\par The next corollary shows that the $k$-th power $\mathcal{L}^k$ of a function $\mathcal{L}\in\mathbb{N}o(u)$ which satisfies the Ramanujan hypothesis can be approximated in mean-square by certain Dirichlet polynomials in $\sigma>u$. \begin{corollary}\label{cor:Dirichletpol} Let $u\in[\frac{1}{2},1)$ and $\mathcal{L}\in \mathbb{N}o(u)$. Suppose that $\mathcal{L}$ satisfies the Lindel\"of hypothesis. Let $\alpha\in(u,1]$ and $l>0$. Then, there exist an $l$-set $A\subset[1,\infty)$ of density zero such that, for every $k\in\mathbb{N}$, $$ \lim_{N\rightarrow\infty}\lim_{T\rightarrow\infty} \frac{1}{T} \int_1^T \int_{\alpha}^2 \left|\mathcal{L}^k(\sigma + it) - \sum_{n=1}^{N} \frac{a_k(n)}{n^{\sigma+it}}\right|^{2} \pmb{1}_A(t) \mbox{\ d} t \mbox{\ d} \sigma = 0. $$ Here, the $a_k(n)$ denote the coefficient in the Dirichlet series expansion of $\mathcal{L}^k$. \end{corollary} \begin{proof} Let $\mathcal{L}\in\mathbb{N}o(u)$. Then, according to Lemma \ref{lem:classNrelfct}, the function defined by $$ \mathcal{L}^k_N(s):= \mathcal{L}(s)- \sum_{n=1}^{N} \frac{a_k(n)}{n^{s}} =\sum_{n=N+1}^{\infty}\frac{a_k(n)}{n^{s}}, \qquad \sigma>1, $$ with $k, N\in \mathbb{N}$, lies also in $\mathbb{N}o(u)$. By observing that $$ \lim_{N\rightarrow\infty} \sum_{n=N+1}^{\infty}\frac{|a_k(n)|^2}{n^{2\sigma}} = 0 ,\qquad \sigma>\frac{1}{2}, $$ the assertion can be derived from Theorem \ref{th:probmom} (a). \end{proof} In the mean-square half-plane of $\mathcal{L}^k$ the statement of Corollary \ref{cor:Dirichletpol} can be established for $A=\emptyset$ by standard methods relying on the residue theorem; see for example Steuding \cite[Chapt. 4.4]{steuding:2007}. We refer here also to Lee \cite{lee:2012} who proved that the logarithm of Hecke $\mathcal{L}$-functions can be approximated by certain Dirichlet polynomials in $\sigma>\frac{1}{2}$, under the assumption of a certain zero-density conjecture.\par \section{Proof of the main theorem} \subsection*{Auxiliary lemmas} We start with some lemmas. \begin{lemma}\label{lem:arithmeticmean} Let $I\subset \mathbb{N}$ and $(a_n)_n$ be a sequence of complex numbers such that $$ \lim_{N\rightarrow\infty} \frac{1}{N}\sum_{\begin{subarray}{c} n\in I \\ n\leq N \end{subarray}} a_n = a $$ with some $a\in\mathbb{C}$. \begin{itemize} \item[(a)] Suppose that, for $n\in\mathbb{N}$, the quantities $a_n$ are non-negative real number. Then, the limit $a$ is real and, for any $\mbox{\ d}elta>0$, there exist an integer $N_{\mbox{\ d}elta}\in\mathbb{N}$ such that, for every $N\in \mathbb{N}$ and every set $J \subset \mathbb{N}$ with $\{1,...,N_{\mbox{\ d}elta}\}\subset J$, the inequality $$ \frac{1}{N}\sum_{\begin{subarray}{c} n\in I\setminus J \\ n\leq N \end{subarray}} | a_n | < a +\mbox{\ d}elta $$ is true. \item[(b)] Suppose that there is a constant $C>0$ such that $|a_n|\leq C$ for $n\in\mathbb{N}$. Then, for any subset $J\subset I$ with $\mbox{\ d}ens \ J = 0$, $$ \lim_{N\rightarrow \infty} \frac{1}{N}\sum_{\begin{subarray}{c} n\in I\setminus J \\ n\leq N \end{subarray}} a_n = a. $$ \end{itemize} \end{lemma} \begin{proof} The assertions of the lemma follow by standard convergence arguments, respecting the conditions posed on $a_n$ and $J$, respectively. \end{proof} The next lemma is crucial for the proof of theorem and extends a lemma of Tanaka \cite[Lemma 5.2]{tanaka:2008}. \begin{lemma}\label{lem:probmom} Let $u\in[\frac{1}{2},1)$ and $\mathcal{L}\in\mathbb{N}o(u)$. Let $L$ be the function connected to $\mathcal{L}$ by means of \eqref{L}. Furthermore, let $\alpha'',\alpha',\alpha\in(u,1]$ with $\alpha''<\alpha'<\alpha$ and $l>0$. For $M>0$, let $J(M):=J(M,l,\alpha'',\mathcal{R}',\mathcal{L})\subset \mathbb{N}$ be defined as in Lemma \ref{lem:JM}, where we choose $\mathcal{R}'$ to be the compact rectangular set $$ \mathcal{R}':=\left\{\sigma+it\in\mathbb{C} \, : \, \alpha' \leq \sigma \leq 2, \, -\tfrac{3}{4}l \leq t \leq \tfrac{3}{2}l \right\}. $$ Then, either statement (A) or statement (B) is true: \begin{itemize} \item[(A)] There is a real number $M_1\geq 1$ such that $\pmb{\tau}(E(J(M_{1}))) = 1$. In this case, we set $\varTheta=\{1\}$. \item[(B)] There are real numbers $M_k\geq 1$ with $k\in \mathbb{N}$ such that \begin{itemize} \item[ ]$\qquad \pmb{\tau}(E(J(M_1)))>0$, \item[ ]$\qquad \pmb{\tau}(E(J(M_k))) < \pmb{\tau} (E(J(M_{k+1}))) \mbox{ for }k\in\mathbb{N} $ \item[ ]$\qquad \mbox{\ d}isplaystyle \mbox{and }\lim_{k\rightarrow\infty} \pmb{\tau} (E(J(M_k))) =1.$ \end{itemize} In this case, we set $\varTheta=\mathbb{N}$. \end{itemize} In both cases, the following holds: \begin{itemize} \item[(i)] Let $M_0:=0$ and $I_k:= J(M_k)\setminus J(M_{k-1})$ for $k\in\varTheta$. Then, for any $j,k\in\varTheta$ with $j\neq k$, $$ \pmb{\tau}(E(I_j)\cap E(I_k))) = 0. $$ \item[(ii)] Let $p:\mathbb{C}\rightarrow\mathbb{R}^+_0$ be a non-negative, continuous function, $k\in\varTheta$ and $$ G_k := \left\{ y+e_t \, : \, (y,t)\in E(I_k)\times [0,l) \right\} \subset K. $$ Then, for any $\mbox{\ d}elta>0$, there exists a finite subset $\mathbb{D}elta_k\subset I_k$ such that, for every $\sigma\in [\alpha,2]$, every $\lambda\in[0,l]$, every $N\in\mathbb{N}$ and every $J \subset \mathbb{N}$ with $\mathbb{D}elta_k \subset J$, \begin{equation}\tag{$\spadesuit$}\label{spade1} \frac{1}{N l} \sum_{\begin{subarray}{c} n\in I_k\setminus J \\ n\le N \end{subarray}} \int_{n l}^{(n+1) l} \int_{\alpha}^2 p\bigl(\mathcal{L}(\sigma+it)\bigr) d t \ d \sigma \leq \int_{\alpha}^2 \int_{G_k} p\bigl(L(\sigma,x)\bigr) d \pmb{\sigma} \mbox{\ d} \sigma + \mbox{\ d}elta, \end{equation} \begin{equation}\tag{$\mbox{\ d}iamondsuit$}\label{diamond1} \frac{1}{N l} \sum_{\begin{subarray}{c} n\in I_k\setminus J \\ n\le N \end{subarray}} \int_{n l}^{(n+1) l} p\bigl(\mathcal{L}(\sigma+it)\bigr) \mbox{\ d} t \leq \int_{G_k} p\bigl(L(\sigma,x)\bigr) \mbox{\ d} \pmb{\sigma} + \mbox{\ d}elta \end{equation} and \begin{equation}\tag{$\mbox{\ d}ivideontimes$}\label{divide1} \frac{1}{N}\sum_{\begin{subarray}{c} n\in I_k\setminus J \\ n\le N \end{subarray}} p\bigl(\mathcal{L}(\sigma+i\lambda+inl)\bigr) \leq \int_{E(I_k)} p\bigl(L(\sigma,y)\bigr) \mbox{\ d} \pmb{\tau} + \mbox{\ d}elta. \end{equation} \item[(iii)] Let $p:\mathbb{C}\rightarrow\mathbb{C}$ be a continuous function, $k\in\varTheta$ and $G_k$ be defined as above. Further, let $J\subset\mathbb{N}$ with $\mbox{\ d}ens\ J = 0$. Then, uniformly for $\sigma\in[\alpha,2]$ and $\lambda\in[0,l]$, as $N\rightarrow\infty$, \begin{equation}\tag{$\clubsuit$}\label{spade2} \frac{1}{N l} \sum_{\begin{subarray}{c} n\in I_k\setminus J \\ n\le N \end{subarray}} \int_{0}^{ l} \int_{\alpha}^2 p\bigl(\mathcal{L}(\sigma+it)\bigr) \mbox{\ d} t \mbox{\ d} \sigma = \int_{\alpha}^2 \int_{G_k} p\bigl(L(\sigma,x)\bigr) \mbox{\ d} \pmb{\sigma} \mbox{\ d} \sigma + o(1), \end{equation} \begin{equation}\tag{$\triangledown$}\label{diamond2} \frac{1}{N l} \sum_{\begin{subarray}{c} n\in I_k\setminus J \\ n\le N \end{subarray}} \int_{0}^{ l} p\bigl(\mathcal{L}(\sigma+it)\bigr) \mbox{\ d} t = \int_{G_k} p\bigl(L(\sigma,x)\bigr) \mbox{\ d} \pmb{\sigma} + o(1) \end{equation} and \begin{equation}\tag{$\times$}\label{divide2} \frac{1}{N}\sum_{\begin{subarray}{c} n\in I_k\setminus J \\ n\le N \end{subarray}} p\bigl(\mathcal{L}(\sigma+i\lambda+inl)\bigr) = \int_{E(I_k)} p\bigl(L(\sigma,y)\bigr) \mbox{\ d} \pmb{\tau} + o(1). \end{equation} \end{itemize} \end{lemma} \begin{proof}[Proof of Lemma \ref{lem:probmom}] We divide the proof into several steps. First we shall figure out that either statement (A) or statement (B) is true.\par {\bf The behaviour of $\pmb{\tau}(E(J(M)))$ as $M\rightarrow \infty$.} We consider the function $F:\mathbb{R}^+\rightarrow [0,1]$ defined by $$ F(M):= \pmb{\tau}(E(J(M))) \qquad\mbox{ for } M>0. $$ It follows from the definition of $J(M)$ that $$ J(M) \subset J(M') \qquad \mbox{ for }0<M<M' $$ and, consequently, that $$ E(J(M)) \subset E(J(M'))\qquad \mbox{ for }0<M<M'. $$ This implies that the function $F$ is monotonically increasing. Lemma \ref{lem:JM} together with Lemma \ref{lem:tanaka1} yields that \begin{equation}\label{pmf1} \lim_{M\rightarrow\infty} F(M)=1. \end{equation} If we find a real number $M_1>0$ such that $$ F(M)=1\qquad \mbox{ for } M\geq M_1, $$ then statement (A) is true. Otherwise, if $F(M)<1$ for every $M>0$, we find, according to \eqref{pmf1}, a sequence $(M_k)_k$ of real numbers $M_k\geq 1$ such that $$ F(M_1)>0, \qquad F(M_k)<F(M_{k+1}) \; \mbox{ for }k\in\mathbb{N} \qquad \mbox{ and } \qquad \lim_{k\rightarrow\infty} M_k =\infty. $$ In this case, statement (B) is true. In the following, we focus on situation (B). In fact, if statement (A) is true, then the assertions (i), (ii) and (iii) follow easily from the subsequent consideration by just regarding the case $k=1$.\par {\bf Properties of the sets $I_k\subset \mathbb{N}$.} Since $F$ is a monotonically increasing function, $F$ is discontinuous in at most countably many points. This observation allows us to adjust the sequence $(M_k)_k$ such that $F$ is continuous at every point $M_k$ with $k\in\mathbb{N}$. We set $M_0:=0$ and define $I_k:= J(M_k)\setminus J(M_{k-1})$ for $k\in\mathbb{N}$. Observe that the sets $I_1,...,I_k$ provide a disjoint decomposition of $J(M_k)$. Moreover, it follows immediately from the definitions of $I_k$ and $J(M_k)$, that for every $n\in I_k$, \begin{equation}\label{1} M_{k-1} \leq \max_{s\in\mathcal{R}'}\left|L_{ne_l}(s) \right| \leq M_k. \end{equation} We deduce from Montel's theorem and the local boundedness of the functions $L_{ne_l}$, $n\in\mathbb{N}$, in $\sigma>1$ that the family $\{L_{ne_l}\}_{n\in I_k}$ is normal in the half-strip $Q':=Q(\alpha',\frac{3}{4}l)$ defined by \eqref{def:Q}. The compact set $\mathcal{R}:=\mathcal{R}(\alpha,l)$ defined by \eqref{def:R} is a subset both of $Q'$ and $\mathcal{R}'$. According to Lemma \ref{lem:normalNproperties} (c) and \eqref{1}, this implies that, for every $y\in E(I_k)$, \begin{equation}\label{eq:boundLys} M_{k-1} \leq \max_{s\in\mathcal{R}}\left|L_y(s) \right| \leq M_k. \end{equation} Consequently, $E(I_j)\cap E(I_k)=\emptyset$ for $j=1,...,k-2$. It remains to consider the case $j=k-1$. For any $\mbox{\ d}elta>0$, we derive that $$ E(I_k)\cap E(I_{k-1}) \subset E(J(M_{k-1} + \mbox{\ d}elta))\setminus E(J(M_{k-1} - \mbox{\ d}elta)). $$ By the additivity of the measure $\pmb{\tau}$, this implies that $$ \pmb{\tau}\left( E(I_k)\cap E(I_{k-1}) \right) \leq F(M_k+\mbox{\ d}elta) - F(M_k - \mbox{\ d}elta). $$ The continuity of $F$ at $M_k$ assures that $$ \lim_{\mbox{\ d}elta\rightarrow 0+} F(M_k + \mbox{\ d}elta) - F(M_k - \mbox{\ d}elta) =0. $$ Consequently, we obtain that $\pmb{\tau}\left( E(I_k)\cap E(I_{k-1}) \right) =0$. Altogether, we proved that $$ \pmb{\tau}(E(I_k)\cap E(I_{j}) ) = 0 \qquad \mbox{ for }j,k\in \mathbb{N} \mbox{ with }j\neq k. $$ Statement (i) follows.\par {\bf Applicability of Tanaka's ergodic theorem.} Now, we shall figure out that the sets $E(I_k)$ are constructed in a suitable way such that Tanaka's modified version of the ergodic theorem (Lemma \ref{lem:Tanakaergodic}) can be applied. We fix an arbitrary $k\in\mathbb{N}$. For any given $\varepsilon>0$, we find according to Lemma \ref{lem:JM} an integer $\nu_0\in\mathbb{N}$ such that $$ \mbox{\ d}ens_* (J(M_{\nu_0})) > 1- \varepsilon. $$ We set $$ S:= \bigcup_{\begin{subarray}{c} \nu=1,...,\nu_0,\\ \nu\neq k \end{subarray}} I_{\nu}. $$ From the observation that $$ S\cup I_k = \bigcup_{\nu=1}^{\nu_0} I_{\nu} = J(M_{\nu_0}), $$ we deduce that $$ \mbox{\ d}ens^* \left(\mathbb{N}\setminus(S_{\varepsilon}\cup I_k) \right) < \varepsilon. $$ Moreover, by statement (i), we have $$ \pmb{\tau}(E(S_{\varepsilon}) \cap E(I_k))=0. $$ It follows from Lemma \ref{lem:Tanakaergodic} that, for any continuous function $p^{\triangledown}:E(I_k)\rightarrow\mathbb{C}$, \begin{equation}\label{eq:tanakapmb} \lim_{N\rightarrow\infty} \frac{1}{N} \sum_{\begin{subarray}{c} n\in I_k \\ n\le N \end{subarray}} p^{\triangledown}(ne_l) = \int_{E(I_k)} p^{\triangledown}(y) \mbox{\ d} \pmb{\tau} \end{equation} This observation is quite central in our further considerations. In fact, by choosing $p^{\triangledown}$ in a proper way, the statement above implies already a weak version of (ii) and (iii). However, some further work is necessary to establish (ii) and (iii) in full extent. We proceed with a continuity consideration. \par From now on, let $p:\mathbb{C}\rightarrow\mathbb{C}$ be a continuous function. We keep $k\in\mathbb{N}$ fixed and choose an arbitrary $\mbox{\ d}elta>0$. Moreover, we set $$ V_k := \max_{\begin{subarray}{c} z\in \mathbb{C} \\ |z|\leq M_k \end{subarray}} \left|p(z) \right|. $$ {\bf A uniform continuity argument.} We define the function $p^*: \mathcal{R}(\alpha,l)\times E(I_k)\rightarrow\mathbb{C}$ by $$ p^* (s,y) := p\bigl( L(s,y)\bigr). $$ According to Lemma \ref{lem:normalNproperties} (b), the function $p^*$ is continuous on $H:=\mathcal{R}(\alpha,l)\times E(I_k)$. The compactness of $E(I_k)\subset K_{2\pi/l}$ and $\mathcal{R}\subset \mathbb{C}$ imply that $H\subset \mathbb{C} \times K_{2\pi/l}$ is compact. Thus, $p^*$ is uniformly continuous on $H$. Consequently, we find a partition $$ \alpha=\sigma_1<\sigma_2< ... <\sigma_M=2 $$ of the interval $[\alpha,2]$ and a partition $$ 0=\lambda_1 < \lambda_2 < ... < \lambda_D = l $$ of the interval $[0,l]$ with the following properties: \begin{itemize} \item[\textreferencemark] For any $\sigma\in[\alpha,2]$, there is an $m\in\{1,...,M\}$ such that, for every $(y,t)\in E(I_k)\times [0,l]$, \begin{equation}\label{eq:uc1} \left| p^{*}(\sigma +it,y) - p^{*}(\sigma_m + it ,y) \right| < \frac{\mbox{\ d}elta}{3}. \end{equation} \item[\textreferencemark] For any $(\sigma,t)\in[\alpha,2]\times [0,l]$, there is an $(m,d)\in\{1,...,M\}\times\{1,...,D\}$ such that, for every $y\in E(I_k)$, \begin{equation}\label{eq:uc2} \left| p^{*}(\sigma +it,y) - p^{*}(\sigma_m + i\lambda_d ,y) \right| < \frac{\mbox{\ d}elta}{3}. \end{equation} \end{itemize} Observe further that, according to \eqref{eq:boundLys}, we have \begin{equation}\label{pstar} \max_{(s,y)\in H} \left| p^*(s,y)\right| \leq V_k. \end{equation} We are ready to establish statement (ii) and (iii).\par {\bf Continuous functions on $E(I_k)$.} For $\sigma\in[\alpha,2]$ and $\lambda\in[0,l]$, the functions $p^{\spadesuit},p_{\sigma}^{\mbox{\ d}iamondsuit},p_{\sigma,\lambda}^{\mbox{\ d}ivideontimes}:E(I_k)\rightarrow\mathbb{C}$ defined by $$ \begin{array}{cc} \mbox{\ d}isplaystyle{p^{\spadesuit}(y)}:= \frac{1}{l} \int_0^l\int_{\alpha}^2 p(L(\sigma+it,y)) \mbox{\ d}\sigma \mbox{\ d} t, \\[2em] \mbox{\ d}isplaystyle{ p_{\sigma}^{\mbox{\ d}iamondsuit}(y):=\frac{1}{l}\int_0^l p(L(\sigma+it,y)) \mbox{\ d} t }, \\[2em] \mbox{\ d}isplaystyle{p_{\sigma,\lambda}^{\mbox{\ d}ivideontimes}(y):= p(L(\sigma+i\lambda,y))} \end{array} $$ are continuous on $E(I_k)$. Thus, \eqref{eq:tanakapmb} applies to them. In fact, the functions above are special cases of functions $P_{\pmb{\mu}}:E(I_k)\rightarrow\mathbb{C}$ defined by $$ P_{\pmb{\mu}}(y):= \int_{[0,l]\times[\alpha,2]} p(L(\sigma+it,y)) \mbox{\ d}\pmb{\mu}(\sigma,t), $$ where $\pmb{\mu}$ is an appropriate measure on $[0,l]\times[\alpha,2]$. It might be reasonable to establish Lemma \ref{lem:probmom} and Theorem \ref{th:probmom} for $P_{\pmb{\mu}}$. However, this general approach bears some further technical obstacles which we want to omit here.\par {\bf Statement (ii).} First, we establish statement (ii). We suppose that $p$ is non-negative. Consequently, $$ p^{\spadesuit}(y)\geq 0,\qquad p^{\mbox{\ d}iamondsuit}(y)\geq 0, \qquad p^{\mbox{\ d}ivideontimes}(y)\geq 0 $$ for every $y\in E(I_k)$. By means of \eqref{eq:tanakapmb} and Lemma \ref{lem:arithmeticmean} (a), we find a finite subset $\mathbb{D}elta_k\subset I_k$ such that, for every $J\subset \mathbb{N}$ with $\mathbb{D}elta_k\subset J$ and every $N\in\mathbb{N}$, the inequality \begin{equation}\label{spadsuit1} \frac{1}{N} \sum_{\begin{subarray}{c} n\in I_k\setminus J \\ n\le N \end{subarray}} p^{\spadesuit}(ne_l) \leq \int_{E(I_k)} p^{\spadesuit}(y) \mbox{\ d} \pmb{\tau}+\mbox{\ d}elta \end{equation} holds. First, we consider the right-hand side of the inequality \eqref{spadsuit1}. According to \eqref{pstar}, the function $p^*$ is bounded on $\mathcal{R}(\alpha,l)\times E(I_k)$. By a general version of Fubini's theorem (see for example Deitmar \cite[\S 8.2]{deitmar:2002}) and the topological equivalence of $K_{2\pi/l}\times[0,l)$ and $K$, we obtain that \begin{align}\label{spadsuit2} \int_{E(I_k)} p^{\spadesuit}(y) \mbox{\ d} \pmb{\tau} &= \frac{1}{l} \int_{E(I_k)} \int_0^l\int_{\alpha}^2 p(L(\sigma+it,y)) \mbox{\ d}\sigma \mbox{d} t \mbox{d} \pmb{\tau}\\ &= \int_{\alpha}^2 \int_{E(I_k)\times[0,l)} p(L(\sigma,y+e_t)) \, (\mbox{d}\pmb{\tau}\times\tfrac{1}{l}\mbox{d} t) \mbox{\ d} \sigma \notag \\ & =\int_{\alpha}^2 \int_{G_k} p(L(\sigma,x)) \mbox{\ d}\pmb{\sigma} \mbox{\ d} \sigma, \notag \end{align} where $G_k:=\{y+e_t \, : \, (y,t)\in E(I_k)\times [0,l)\}\subset K$. Next, we regard the left-hand side of \eqref{spadsuit1}. We get that \begin{align}\label{spadsuit3} \frac{1}{N} \sum_{\begin{subarray}{c} n\in I_k\setminus J \\ n\le N \end{subarray}} p^{\spadesuit}(ne_l) &=\frac{1}{Nl} \sum_{\begin{subarray}{c} n\in I_k\setminus J \\ n\le N \end{subarray}} \int_0^l\int_{\alpha}^2 p\bigl(\mathcal{L}(\sigma+it+inl)\bigr) \mbox{\ d}\sigma \mbox{\ d} t\\ &=\frac{1}{Nl} \sum_{\begin{subarray}{c} n\in I_k\setminus J \\ n\le N \end{subarray}} \int_{nl}^{(n+1)l}\int_{\alpha}^2 p\bigl(\mathcal{L}(\sigma+it)\bigr) \mbox{\ d}\sigma \mbox{\ d} t \notag \end{align} Now, the inequality $(\spadesuit)$ of statement (ii) follows by combining \eqref{spadsuit1}, \eqref{spadsuit2} and \eqref{spadsuit3}.\par To establish $(\mbox{\ d}iamondsuit)$ of statement (ii), we proceed in an analogous manner. Here, however, we use additionally that $p^*$ is uniform continuous on $\mathcal{R}(\alpha,l)\times E(I_k)$. Again, \eqref{eq:tanakapmb} and Lemma \ref{lem:arithmeticmean} (a) assure that we find a finite subset $\mathbb{D}elta'_k\subset I_k$ such that, for every $m\in\{1,...,M\}$ and every $N\in\mathbb{N}$, \begin{equation}\label{diamondsuit1} \frac{1}{Nl} \sum_{\begin{subarray}{c} n\in I_k\setminus J \\ n\le N \end{subarray}} p_{\sigma_m}^{\mbox{\ d}iamondsuit}(ne_l) \leq \int_{E(I_k)} p_{\sigma_m}^{\mbox{\ d}iamondsuit}(y) \mbox{\ d} \pmb{\tau}+\frac{\mbox{\ d}elta}{3}, \end{equation} where $J$ is an arbitrary subset of $\mathbb{N}$ with $\mathbb{D}elta'_k\subset J$. According to the choice of the partition $\alpha=\sigma_1<...<\sigma_M=2$, for every $\sigma\in[\alpha,2]$, we find an $m'\in\{1,...,M\}$ such that \eqref{eq:uc1} holds. Then, we deduce by means of the triangle inequality that, for every $N\in\mathbb{N}$ \begin{equation}\label{diamondestimate1} \frac{1}{Nl} \sum_{\begin{subarray}{c} n\in I_k\setminus J \\ n\le N \end{subarray}} p_{\sigma}^{\mbox{\ d}iamondsuit}(ne_l) \leq \frac{1}{Nl} \sum_{\begin{subarray}{c} n\in I_k\setminus J \\ n\le N \end{subarray}} p_{\sigma_{m'}}^{\mbox{\ d}iamondsuit}(ne_l) +\frac{\mbox{\ d}elta}{3} \end{equation} and \begin{equation}\label{diamondestimate2} \int_{E(I_k)} p_{\sigma_{m'}}^{\mbox{\ d}iamondsuit}(y) \mbox{\ d} \pmb{\tau} \leq \int_{E(I_k)} p_{\sigma}^{\mbox{\ d}iamondsuit}(y) \mbox{\ d} \pmb{\tau} + \frac{\mbox{\ d}elta}{3} \pmb{\tau}(E(I_k)). \end{equation} Statement $(\mbox{\ d}iamondsuit)$ follows by combining \eqref{diamondsuit1}, \eqref{diamondestimate1} and \eqref{diamondestimate2} and by rewriting the appearing sums and integrals in an appropriate way as described in details for $p^{\spadesuit}$.\par We can easily repeat the arguments above to prove \eqref{divide1}: firstly, by means of \eqref{eq:tanakapmb} and Lemma \ref{lem:arithmeticmean} (a) and (b), we establish \eqref{divide1} for $p_{\sigma_m,\lambda_m}^{\mbox{\ d}ivideontimes}$ with $(m,d)\in\{1,...,M\}\times\{1,...,D\}$. Then, we argue via uniform continuity. We omit further details here.\par {\bf Statement (iii).} We proceed to establish statement (iii). Thus, we drop the restriction that $p$ is non-negative. Observe that $$ |p^{\spadesuit}(y)|\leq V_k, \qquad |p^{\mbox{\ d}iamondsuit}(y)|\leq V_k, \qquad |p^{\mbox{\ d}ivideontimes}(y)|\leq V_k $$ for $y\in E(I_k)$. By \eqref{eq:tanakapmb} and Lemma \ref{lem:arithmeticmean} (b), we get that, for any $J\subset \mathbb{N}$ with $\mbox{\ d}ens\ J=0$, $$ \frac{1}{N} \sum_{\begin{subarray}{c} n\in I_k\setminus J \\ n\le N \end{subarray}} p^{\spadesuit}(ne_l) = \int_{E(I_k)} p^{\spadesuit}(y) \mbox{\ d} \pmb{\tau} + o(1) $$ as $N\rightarrow\infty$. By similar arguments as above, this translates to $$ \frac{1}{Nl} \sum_{\begin{subarray}{c} n\in I_k\setminus J \\ n\le N \end{subarray}} \int_{nl}^{(n+1)l}\int_{\alpha}^2 p\bigl(\mathcal{L}(\sigma+it)\bigr) \mbox{\ d}\sigma \mbox{\ d} t = \int_{\alpha}^2 \int_{G_k} p\bigl( L(\sigma,x) \bigr) \mbox{\ d}\pmb{\sigma} \mbox{\ d} \sigma + o(1), $$ as $N\rightarrow\infty$. We proved $(\clubsuit)$ of statement (iii).\par Moreover, for any $J\subset \mathbb{N}$ with $\mbox{\ d}ens \ J = 0$, we find according to \eqref{eq:tanakapmb} and Lemma \ref{lem:arithmeticmean} (b) a positive integer $N_0$ such that \begin{equation}\label{heartestimate1} \Biggl| \frac{1}{Nl} \sum_{\begin{subarray}{c} n\in I_k\setminus J \\ n\le N \end{subarray}} p_{\sigma_m}^{\mbox{\ d}iamondsuit}(ne_l) - \int_{E(I_k)} p_{\sigma_m}^{\mbox{\ d}iamondsuit}(y) \mbox{\ d} \pmb{\tau} \Biggr| < \frac{\mbox{\ d}elta}{3} \end{equation} holds for every $m\in\{1,...,M\}$ and $N\geq N_0$. By means of our choice of the partition $\alpha=\sigma_1<...<\sigma_M$, a simple application of the triangle inequality yields that, for every $\sigma\in[\alpha,2]$ and $N\geq N_0$, \begin{equation}\label{heartestimate2} \Biggl| \frac{1}{Nl} \sum_{\begin{subarray}{c} n\in I_k\setminus J \\ n\le N \end{subarray}} p_{\sigma}^{\mbox{\ d}iamondsuit}(ne_l) - \int_{E(I_k)} p_{\sigma}^{\mbox{\ d}iamondsuit}(y) \mbox{\ d} \pmb{\tau} \Biggr| < \mbox{\ d}elta \end{equation} holds. Since $$ \int_{E(I_k)} p_{\sigma}^{\mbox{\ d}iamondsuit}(y) \mbox{\ d} \pmb{\tau} = \int_{G_k} p\left( L(\sigma,y)\right) \mbox{\ d} \pmb{\sigma} $$ and our choice of $\mbox{\ d}elta>0$ was arbitrary, statement \eqref{diamond2} is proved.\par It is now clear how to prove \eqref{divide2}. First, by means of \eqref{eq:tanakapmb} and Lemma \ref{lem:arithmeticmean} (a) and (b), we establish \eqref{divide2} for $p_{\sigma_m,\lambda_d}^{\mbox{\ d}ivideontimes}$ with $(m,d)\in\{1,...,M\}\times\{1,...,D\}$. Then, we argue via uniform continuity. Again, we omit further details here. \end{proof} \subsection*{Proof of Theorem \ref{th:probmom}} We shall now derive Theorem \ref{th:probmom} from Lemma \ref{lem:probmom}. First, we establish Theorem \ref{th:probmom} for a single function $\mathcal{L}$ and a single function $p$.\par {\bf Theorem \ref{th:probmom} for a single pair $(\mathcal{L},p)$.} Let $u\in[\frac{1}{2},1)$, $\mathcal{L}\in \mathbb{N}o(u)$ with Dirichlet series expansion $$ \mathcal{L}(s) = \sum_{n=1}^{\infty} \frac{a(n)}{n^s} \in \mathscr{H}^2 $$ and $p:\mathbb{C}\rightarrow \mathbb{C}$ be a continuous function such that condition \eqref{condC} of Theorem \ref{th:probmom} is satisfied. The latter implies that we find constants $c_1,c_2 \geq 0$ such that \begin{equation}\label{qqq} \left| p(z) \right| \leq c_1 |z|^2 + c_2, \qquad z\in\mathbb{C}. \end{equation} We define $q:\mathbb{C}\rightarrow \mathbb{R}_0^+$ by $$ q(z)= c_1 |z|^2 + c_2, \qquad z\in \mathbb{C}. $$ Observe that $q$ is a non-negative, continuous function on $\mathbb{C}$. The theorem of Plancherel, in particular \eqref{plan1}, asserts that, for $\sigma>\frac{1}{2}$, $$ \mathcal{Z}(\sigma):=\int_K q\left( L(\sigma, x) \right) \mbox{\ d} \pmb{\sigma} = c_1\cdot\sum_{n=1}^{\infty} \frac{|a(n)|^{2}}{n^{2\sigma}} + c_2 < \infty $$ Hence, due to \eqref{qqq}, we have, for $\sigma>\frac{1}{2}$, $$ \left| \int_K p\left( L(\sigma, x) \right) \mbox{\ d} \pmb{\sigma} \right| \leq \mathcal{Z}(\sigma) < \infty $$ and, as $l\in\Gamma_P$, $$ \left| \int_{K_{2\pi/l}} p\left( L(\sigma, x) \right) \mbox{\ d} \pmb{\tau} \right| \leq \int_{K_{2\pi/l}} q\left( L(\sigma, x) \right) \mbox{\ d} \pmb{\tau} = \mathcal{Z}(\sigma) < \infty. $$ Let $(M_k)_{k\in\varTheta}$ be a sequence of positive real numbers $M_k\geq 1$ such that statements of Lemma \ref{lem:probmom} hold for $\mathcal{L}$ and the continuous, non-negative function $q$. Again, as in the proof of Lemma \ref{lem:probmom}, we may suppose without loss of generality that $\varTheta=\mathbb{N}$. For $M_k$, let $I_k$ denote the corresponding subset of $\mathbb{N}$ as defined in Lemma \ref{lem:probmom}. We fix positive numbers $\mbox{\ d}elta_k$, $k\in \mathbb{N}$, such that $$ \sum_{k=1}^{\infty} \mbox{\ d}elta_k <\infty. $$ For $k\in\mathbb{N}$ and fixed $\alpha\in(u,1]$, let $\mathbb{D}elta_k$ be a finite subset of $I_k$ such that the statements of Lemma \ref{lem:probmom} (ii) hold with the special choice of $\mbox{\ d}elta = \mbox{\ d}elta_k$, respectively. We set $$ \Upsilon:= \left( \bigcup_{k=1}^{\infty} \mathbb{D}elta_k \right) \cup \left(\mathbb{N} \setminus \bigcup_{k=1}^{\infty} I_k \right). $$ Let $J(M)$ be defined as in Lemma \eqref{lem:probmom}. Then, according to Lemma \ref{lem:JM}, for any $\varepsilon>0$, we find a natural number $\nu$ such that $$ \mbox{\ d}ens_* J(M_{\nu})>1-\varepsilon $$ and, consequently, \begin{equation}\label{2} \mbox{\ d}ens^* \left( \mathbb{N}\setminus J(M_{\nu}) \right) \leq \varepsilon. \end{equation} By the definition of $I_k$, we obtain that the set $\mathbb{N}\setminus J(M_{\nu})$ contains both $\bigcup_{k=\nu+1}^{\infty} I_k$ and $\mathbb{N} \setminus \bigcup_{k=1}^{\infty} I_k$. Since the set $\bigcup_{k=1}^{\nu} \mathbb{D}elta_k$ has only finitely many elements, we conclude by means of \eqref{2} that $$ \mbox{\ d}ens^* \Upsilon \leq \varepsilon. $$ Since this holds for any $\varepsilon>0$, we derive that $$ \mbox{\ d}ens \ \Upsilon = 0. $$ From now on, let $J$ be an arbitrary subset of $\mathbb{N}$ with $$\mbox{\ d}ens\ J = 0 \qquad \mbox{and} \qquad \Upsilon \subset J.$$ Due to $\Upsilon \subset J$, we have in particular that $\mathbb{D}elta_k\subset J$ for $k\in\mathbb{N}$. We shall see later on why it is reasonable not to work with $\Upsilon$ but with an arbitrary set $J$ with the properties stated above. Roughly speaking, the set $J$ shall enable us to establish Theorem \ref{th:probmom} for countable many pairs $(\mathcal{L}_j,p_j)$, $j\in\mathbb{N}$, of functions $\mathcal{L}_j$ and $p_j$ simultaneously.\par First, we establish statement (i) of Theorem \ref{th:probmom} for $(\mathcal{L},p)$. We set $$ A_k := \int_{\alpha}^2 \int_{G_k} p(L(\sigma,x)) \mbox{\ d}\pmb{\sigma} \mbox{\ d} \sigma, \qquad \qquad A_k' := \int_{\alpha}^2 \int_{G_k} q(L(\sigma,x)) \mbox{\ d}\pmb{\sigma} \mbox{\ d} \sigma $$ $$ Z:= \int_{\alpha}^2 \int_{K} p(L(\sigma,x)) \mbox{\ d}\pmb{\sigma} \mbox{\ d} \sigma \qquad \mbox{and} \qquad Z':= \int_{\alpha}^2 \int_{K} q(L(\sigma,x)) \mbox{\ d}\pmb{\sigma} \mbox{\ d} \sigma $$ where $G_k$ is defined as in Lemma \ref{lem:probmom}. Relation \eqref{qqq} assures that $$\left| Z\right| \leq Z' = \int_{\alpha}^2 \mathcal{Z}(\sigma) \mbox{\ d} \sigma <\infty.$$ From the properties of $E(I_k)$ in Lemma \ref{lem:probmom}, we deduce that $$ \pmb{\tau}\left( \bigcup_{k=1}^{\infty} E(I_k)\right) = \lim_{k\rightarrow\infty} \pmb{\tau} (E(I_k)) =1. $$ This implies that $$\pmb{\sigma}\left(\bigcup_{k=1}^{\infty} G_k\right) =1.$$ Consequently, we get that \begin{equation}\label{AAA} \sum_{k=1}^{\infty}A_k = Z \qquad \mbox{and} \qquad \sum_{k=1}^{\infty}A'_k = Z'. \end{equation} We observe that \begin{equation}\label{A} \sum_{k=1}^{\infty}|A_k| \leq \sum_{k=1}^{\infty}A'_k = Z' < \infty. \end{equation} Furthermore, we set, for $n\in \mathbb{N}\setminus J$, $$ B_p(n):= \frac{1}{l}\int_{\alpha}^2\int_{n l}^{(n+1) l} p\bigl(\mathcal{L}(\sigma+it)\bigr) \mbox{\ d} t\mbox{\ d}\sigma $$ and $$ B_q(n): =\frac{1}{l}\int_{\alpha}^2\int_{n l}^{(n+1) l} q\bigl(\mathcal{L}(\sigma+it)\bigr) \mbox{\ d} t\mbox{\ d}\sigma $$ It is immediately clear that $|B_p(n)|\leq B_q(n)$ for $n\in\mathbb{N}\setminus J$. Since $\mathbb{D}elta_k\subset J$ for $k\in\mathbb{N}$, we get by applying statement \eqref{spade1} of Lemma \ref{lem:probmom} (ii) to $q$ that, for $k\in \mathbb{N}$, \begin{equation}\label{BpBq} |B_p(n)|\leq B_q(n) \leq A_k+\mbox{\ d}elta_k, \end{equation} provided that $n\in I_k\setminus J$. Hence, for every $N\in \mathbb{N}$, \begin{equation}\label{Bp} \frac{1}{N l} \sum_{k=1}^{\infty} \, \, \sum_{\begin{subarray}{c} n\in I_k\setminus J \\ n\le N \end{subarray}} \left| B_p(n)\right| \leq \frac{1}{N l} \sum_{k=1}^{\infty} \, \, \sum_{\begin{subarray}{c} n\in I_k\setminus J \\ n\le N \end{subarray}} B_q(n) \leq \sum_{k=1}^{\infty} A_k' + \sum_{k=1}^{\infty}\mbox{\ d}elta_k < \infty. \end{equation} The estimates \eqref{A} and \eqref{Bp} imply, in particular, that both the double series $$ \frac{1}{N } \sum_{k=1}^{\infty} \, \, \sum_{\begin{subarray}{c} n\in I_k\setminus J \\ n\le N \end{subarray}} B(n) \qquad \mbox{and} \qquad \sum_{k=1}^{\infty} A_k $$ converge absolutely for every $N\in\mathbb{N}$. Thus, we may rearrange their terms, respectively. \par Now, let $\varepsilon>0$. Then, we find a number $D\in\mathbb{N}$ such that \begin{equation}\label{eq:D} \sum_{k=D+1}^{\infty} A_k' <\frac{\varepsilon}{3} \qquad \mbox{and} \qquad \sum_{k=D+1}^{\infty} \mbox{\ d}elta_k <\frac{\varepsilon}{3}. \end{equation} By the rearrangement theorem and the construction of the set $\Upsilon$, we obtain that, for $N\in \mathbb{N}$, $$ \Bigl| \, \frac{1}{N } \, \, \sum_{\begin{subarray}{c} n\in \mathbb{N} \setminus J \\ n\le N \end{subarray}} B_p(n) - Z \Bigr| \leq \sum_{k=1}^{\infty} \Bigl| \frac{1}{N } \, \, \sum_{\begin{subarray}{c} n\in I_k \setminus J \\ n\le N \end{subarray}} B_p(n) - A_k \Bigr| $$ By means of \eqref{BpBq} and \eqref{eq:D}, we get that the inequality $$ \Bigl| \frac{1}{N } \, \, \sum_{\begin{subarray}{c} n\in \mathbb{N} \setminus J \\ n\le N \end{subarray}} B_p(n) - Z \Bigr| \leq \sum_{k=D+1}^{\infty} (A_k' + \mbox{\ d}elta_k) + \sum_{k=1}^{D} \Bigl| \frac{1}{N } \, \, \sum_{\begin{subarray}{c} n\in I_k \setminus J \\ n\le N \end{subarray}} B_p(n) - A_k \Bigr| + \sum_{k=D+1} A_k' \leq $$ $$ \leq \sum_{k=1}^{D} \Bigl| \frac{1}{N } \, \, \sum_{\begin{subarray}{c} n\in I_k \setminus J \\ n\le N \end{subarray}} B_p(n) - A_k \Bigr| + \varepsilon $$ holds for every $N\in\mathbb{N}$. From statement \eqref{spade2} of Lemma \ref{lem:probmom} (iii), we derive that \begin{equation}\label{limessup} \limsup_{N\rightarrow\infty} \Bigl| \frac{1}{N } \, \, \sum_{\begin{subarray}{c} n\in I_k \setminus J \\ n\le N \end{subarray}} B_p(n) - Z \Bigr| \leq \varepsilon. \end{equation} Since this is true for any $\varepsilon>0$, we conclude that, for any $J\subset \mathbb{N}$ with $\Upsilon\subset J$ and $\mbox{\ d}ens \ J = 0$, as $N\rightarrow\infty$, \begin{equation}\label{eqq} \frac{1}{N l} \sum_{\begin{subarray}{c}n\in\mathbb{N}\setminus J \\ n\leq N \end{subarray}} \int_{n l}^{(n+1) l}\int_{\alpha}^{2} p\bigl(\mathcal{L}(\sigma+it)\bigr) \mbox{\ d}\sigma \mbox{\ d} t = \int_{\alpha}^{2}\int_{K} p\bigl(L(\sigma,x) \bigr) \mbox{\ d} \pmb{\sigma} \mbox{\ d}\sigma + o(1). \end{equation} To establish statement (ii) of Theorem \ref{th:probmom} for the couple $(\mathcal{L},p)$, we proceed in an analogous manner: for $\sigma\in[\alpha,2]$, we set $$ \mbox{\ d}ot{A}_{k}(\sigma) = \int_{G_k} p(L(\sigma,x) \mbox{\ d}\pmb{\sigma} , \qquad \qquad \mbox{\ d}ot{A}'_{k}(\sigma):= \int_{G_k} q(L(\sigma,x)) \mbox{\ d}\pmb{\sigma} , $$ $$ \mbox{\ d}ot{Z}(\sigma):= \int_{K} p(L(\sigma,x)) \mbox{\ d}\pmb{\sigma} , \qquad \qquad \mbox{\ d}ot{Z}'(\sigma) := \mathcal{Z}(\sigma) = \int_{K} q(L(\sigma,x)) \mbox{\ d}\pmb{\sigma} , $$ $$ \mbox{\ d}ot{B}_{p}(\sigma,n):= \frac{1}{l}\int_{n l}^{(n+1) l} p\bigl(\mathcal{L}(\sigma+it)\bigr) \mbox{\ d} t $$ and $$ \mbox{\ d}ot{B}_{q}(\sigma,n):= \frac{1}{l}\int_{n l}^{(n+1) l} q\bigl(\mathcal{L}(\sigma+it)\bigr) \mbox{\ d} t . $$ By replacing the quantities $A_k$, $A_k'$, $Z$, $Z'$, $B_p(n)$, $B_q(n)$ by $\mbox{\ d}ot{A}_{k}(\sigma)$, $\mbox{\ d}ot{A}'_{k}(\sigma)$, $\mbox{\ d}ot{Z}_{k}(\sigma)$, $\mbox{\ d}ot{Z}'_{k}(\sigma)$, $\mbox{\ d}ot{B}_{p}(n,\sigma)$, $\mbox{\ d}ot{B}_{q}(n,\sigma)$, respectively, and by using \eqref{diamond1} and \eqref{diamond2} of Lemma \ref{lem:probmom} instead of \eqref{spade1} and \eqref{spade2}, we can follow basically step by step the argumentation above in order to prove that, for any $J\subset \mathbb{N}$ with $\Upsilon\subset J$ and $\mbox{\ d}ens \ J = 0$, uniformly for $\sigma\in[\alpha,2]$, as $N\rightarrow\infty$, \begin{equation}\label{eqqq} \frac{1}{N l} \sum_{\begin{subarray}{c}n\in\mathbb{N}\setminus J \\ n\leq N \end{subarray}} \int_{n l}^{(n+1) l} p\bigl(\mathcal{L}(\sigma+it)\bigr) \mbox{\ d} t = \int_{K} p\bigl(L(\sigma,x) \bigr) \mbox{\ d} \pmb{\sigma} + o(1). \end{equation} Additional arguments are necessary in order to establish the uniformity in $\sigma$:\par First, we consider the choice of $D$ in \eqref{eq:D}. We regard the sequence $(f_K)_K$ of functions $f_K:[\alpha,2]\rightarrow \mathbb{R}_0^+$ defined by $$ f_K(\sigma)=\sum_{k=1}^{K} \mbox{\ d}ot{A}_k'(\sigma). $$ Analogously to \eqref{AAA}, we have, for $\sigma\in[\alpha,2]$, $$ \lim_{K\rightarrow\infty} f_K(\sigma) = \sum_{k=1}^{\infty}\mbox{\ d}ot{A}_k'(\sigma) = \mbox{\ d}ot{Z}' (\sigma) =\mathcal{Z}(\sigma)<\infty. $$ The function $\mathcal{Z}(\sigma)$ is continuous on $[\alpha,2]$ and, for every $\sigma\in[\alpha,2]$, the sequence $(f_K(\sigma))_K$ is monotonically increasing, since the function $q$ is real-valued and non-negative. This implies that the sequence $(f_K)_K$ converges uniformly on $[\alpha,2]$ to $\mathcal{Z}$. Hence, we find, for any given $\varepsilon>0$, a positive integer $D$ such that $$ \sum_{k=D+1}^{\infty} \mbox{\ d}ot{A}_k'(\sigma) <\frac{\varepsilon}{3} $$ holds for every $\sigma\in[\alpha,2]$.\par Secondly, we consider the limit superior in \eqref{limessup}. Here, it is immediately clear from the statement \eqref{diamond2} of Lemma \ref{lem:probmom} (iii) that $$ \limsup_{N\rightarrow\infty} \Bigl| \frac{1}{N } \, \, \sum_{\begin{subarray}{c} n\in \mathbb{N} \setminus J \\ n\le N \end{subarray}} \mbox{\ d}ot{B}_p(n,\sigma) - \mbox{\ d}ot{Z}(\sigma) \Bigr| \leq \limsup_{N\rightarrow\infty} \sum_{k=1}^D \Bigl| \frac{1}{N } \sum_{\begin{subarray}{c} n\in I_k \setminus J \\ n\le N \end{subarray}} \mbox{\ d}ot{B}_p(n,\sigma) - \mbox{\ d}ot{A_k}(\sigma) \Bigr| + \varepsilon \leq \varepsilon $$ holds uniformly for $\sigma\in[\alpha,2]$.\par Statement (iii) of Theorem \ref{th:probmom} follows also along the lines of our considerations above: this time, we set for $\sigma\in[\alpha,2]$ and $\lambda\in[0,l]$, $$ \mbox{\ d}dot{A}_k(\sigma) := \int_{E(I_k)} p(L(\sigma,x) \mbox{\ d}\pmb{\tau} , \qquad \qquad \mbox{\ d}dot{A}'_k(\sigma):= \int_{E(I_k)} q(L(\sigma,x)) \mbox{\ d}\pmb{\tau} , $$ $$ \mbox{\ d}dot{Z}_{k}(\sigma):=\int_{K_{2\pi/l}} p(L(\sigma,x)) \mbox{\ d}\pmb{\tau} , \qquad \qquad \mbox{\ d}dot{Z}'_k(\sigma) := \int_{K_{2\pi/l}} q(L(\sigma,x)) \mbox{\ d}\pmb{\tau}, $$ $$ \mbox{\ d}dot{B}_p(n,\sigma,\lambda):= p\bigl(\mathcal{L}(\sigma+i\lambda + inl)\bigr) $$ and $$ \mbox{\ d}dot{B}_q(n,\sigma,\lambda):= q\bigl(\mathcal{L}(\sigma+i\lambda + inl)\bigr). $$ By replacing $A_k$, $A_k'$, $Z$, $Z'$, $B_p(n)$, $B_q(n)$ by $\mbox{\ d}dot{A}_{k}(\sigma)$, $\mbox{\ d}dot{A}'_{k}(\sigma)$, $\mbox{\ d}dot{Z}_{k}(\sigma)$, $\mbox{\ d}dot{Z}'_{k}(\sigma)$, $\mbox{\ d}dot{B}_{p}(n,\sigma,\lambda)$, $\mbox{\ d}dot{B}_{q}(n,\sigma,\lambda)$, respectively, in our argumentation above and by relying on \eqref{divide1} and \eqref{divide2} of Lemma \ref{lem:probmom}, we obtain that, for any $J\subset \mathbb{N}$ with $\Upsilon\subset J$ and $\mbox{\ d}ens \ J = 0$, uniformly for $\sigma\in[\alpha,2]$ and $\lambda\in[0,l]$, as $N\rightarrow\infty$, \begin{equation}\label{eqqqq} \frac{1}{N } \sum_{\begin{subarray}{c}n\in\mathbb{N}\setminus J \\ n\leq N \end{subarray}} p\bigl(\mathcal{L}(\sigma+i\lambda + inl)\bigr) = \int_{K_{2\pi/l}} p\bigl(L(\sigma,x) \bigr) \mbox{\ d} \pmb{\sigma} + o(1). \end{equation} Additionally arguments are necessary in two steps of our argumentation in order to obtain uniformity in $\sigma$ and $\lambda$.\par Firstly, we consider the choice of $D$ in \eqref{eq:D}. Since $l\in \Gamma_P$, we have $$ \mbox{\ d}dot{A}'_{k}(\sigma) = \mbox{\ d}ot{A}'_{k}(\sigma) \qquad \mbox{for }k\in\mathbb{N}. $$ Due to our analysis above, for given $\varepsilon>0$, we find a positive integer $D$ such that, for every $\sigma\in[\alpha,2]$, $$ \sum_{k=D+1}^{ \infty}\mbox{\ d}dot{A}_{k}(\sigma) < \varepsilon. $$ Secondly, it follows again from statement \eqref{diamond2} of Lemma \ref{lem:probmom} (iii) that the limit superior involved in \eqref{limessup} is uniform in $\sigma$ and $\lambda$. \par In fact, a close look at the proof reveals that we can transfer our arguments to the case $l\in \Gamma_P$, if the function $$ f(\sigma):= \int_{K_{2\pi/l}} q(L(\sigma,x)) \mbox{\ d}\pmb{\tau} $$ is continuous for $\sigma>u$. \par {\bf Theorem \ref{th:probmom} for countable many pairs $(\mathcal{L}_j,p_j)$.} For given $u\in[\frac{1}{2},1)$, let $(\mathcal{L}_j)_j$ be a sequence of functions $\mathcal{L}_j \in \mathbb{N}o(u)$ and let $(p_j)_j$ be a sequence of continuous functions $p_j:\mathbb{C}\rightarrow \mathbb{C}$ that satisfy condition \eqref{condC} of Theorem \ref{th:probmom}. Then, we find for every pair $(p_j,\mathcal{L}_j)$ a set $\Upsilon_j \subset \mathbb{N}$ with $\mbox{\ d}ens \ \Upsilon_j = 0$ such that \eqref{eqq}, \eqref{eqqq} and \eqref{eqqqq} hold, respectively, for $p_j$ and $\mathcal{L}_j$ with $\Upsilon$ being replaced by $\Upsilon_j$.\par We construct a common set $J$ with $\mbox{\ d}ens \ J = 0$ such that \eqref{eqq}, \eqref{eqqq} and \eqref{eqqqq} hold simultaneously for every pair $(p_j,L_j)$.\par Let $(\varepsilon_k)_k$ be a sequence of positive real numbers with $\lim_{k\rightarrow\infty} \varepsilon_k = 0$. We set $N_1:=1$ and $J_1:= \Upsilon_1$. Inductively for $k\in \mathbb{N}$ with $k\geq 2$, we fix a positive integer $N_k\geq N_{k-1}$ such that, for $N\geq N_k$, both $$ \frac{\# \bigl( J_{k-1} \cap [1,N ] \bigr)}{N} < \frac{\varepsilon_k}{2} \qquad \mbox{and} \qquad \frac{\# \bigl( \Upsilon_k \cap [1,N ] \bigr)}{N} < \frac{\varepsilon_k}{2} $$ and set $$ J_{k} := \bigcup_{j=1}^{k} \bigl( \Upsilon_j\setminus[1,N_j) \bigr) $$ By the construction of $J_k$, we have $$ \frac{\#\bigl( J_k \cap [1,N ] \bigr)}{N} < \varepsilon_k \qquad \mbox{for }N\geq N_k. $$ This implies that the set $$ J':= \bigcup_{j=1}^{\infty} \bigl( \Upsilon_j\setminus[1,N_j) \bigr) $$ has density zero. Now, it is easy to see that for the special choice of $J=J'$, we obtain that for every $j\in \mathbb{N}$, as $N\rightarrow\infty$, $$ \frac{1}{N l} \sum_{\begin{subarray}{c}n\in\mathbb{N}\setminus J' \\ N_j\leq n\leq N \end{subarray}} \int_{n l}^{(n+1) l}\int_{\alpha}^{2} p_j\bigl(\mathcal{L}_j(\sigma+it)\bigr) \mbox{\ d}\sigma \mbox{\ d} t = \int_{\alpha}^{2}\int_{K} p_j\bigl(L_j(\sigma,x) \bigr) \mbox{\ d} \pmb{\sigma} \mbox{\ d}\sigma + o_j(1); $$ for every $j\in \mathbb{N}$, uniformly for $\sigma\in[\alpha,2]$, as $N\rightarrow\infty$, $$ \frac{1}{N l} \sum_{\begin{subarray}{c}n\in\mathbb{N}\setminus J' \\ N_j \leq n\leq N \end{subarray}} \int_{n l}^{(n+1) l} p_j\bigl(\mathcal{L}_j(\sigma+it)\bigr) \mbox{\ d} t = \int_{K} p_j\bigl(L_j(\sigma,x) \bigr) \mbox{\ d} \pmb{\sigma} + o_j(1); $$ and for every $j\in \mathbb{N}$, uniformly for $\sigma\in[\alpha,2]$ and $\lambda\in[0,l]$, as $N\rightarrow\infty$, $$ \frac{1}{N l} \sum_{\begin{subarray}{c}n\in\mathbb{N}\setminus J' \\ N_j\leq n\leq N \end{subarray}} p_j\bigl(\mathcal{L}_j(\sigma+i\lambda + inl)\bigr) = \int_{K_{2\pi/l}} p_j\bigl(L_j(\sigma,x) \bigr) \mbox{\ d} \pmb{\tau} + o_j(1). $$ If we set $$ A:= \bigcup_{n\in J'} [nl, (n+1)l), $$ then $A$ is an $l$-set of density zero and the statements above can be translated easily into the form given in Theorem \eqref{th:probmom} (i), (ii) and (iii). The theorem is proved. \section{Applications to the value-distribution of the Riemann zeta-function} The asymptotic expansions for the moments in Theorem \ref{th:probmom} yield information on the value-distribution of the functions $\mathcal{L}\in\mathbb{N}o(u)$ under consideration. Exemplarily, we discuss the case of the Riemann zeta-function. We can retrieve some unconditional information on the growth behaviour of the Riemann zeta-function in $\sigma>\frac{1}{2}$. \begin{corollary}\label{cor:app1} Let $\alpha>\frac{1}{2}$ and $l>0$. Then, there exist an $l$-set $A\subset [1,\infty)$ of density zero such that, for any $\varepsilon>0$, uniformly for $\sigma\geq \alpha$ \begin{equation}\label{app1} \zeta(\sigma+it)\cdot \pmb{1}_{A^c}(t) \ll_{\varepsilon} t^{\varepsilon}, \qquad \mbox{as }t\rightarrow\infty \end{equation} and \begin{equation}\label{app2} \frac{1}{\zeta(\sigma+it)}\cdot \pmb{1}_{A^c}(t) \ll_{\varepsilon} t^{\varepsilon}, \qquad \mbox{as }t\rightarrow\infty. \end{equation} \end{corollary} Under the assumption of the Lindel\"of hypothesis, it is known that \eqref{app1} holds for $A=\emptyset$; see Titchmarsh \cite[\S 13.2]{titchmarsh:1986}. If we assume the truth of the Riemann hypothesis, then \eqref{app2} holds for $A=\emptyset $; see Titchmarsh \cite[\S 14.2]{titchmarsh:1986}. In view of Lemma \ref{lem:JM} and Lemma \ref{lem:classNrelfct2}, however, the results of Corollary \ref{cor:app1} are not too surprising and can also be derived by other techniques. \begin{proof} We know that $\zeta^k\in\mathbb{N}o(\frac{1}{2})$ for any $k\in\mathbb{Z}$. According to Theorem \ref{th:probmom}, there exist an $l$-set $A$ of density zero such that, uniformly for $\sigma\geq \alpha$, as $T\rightarrow\infty$, $$ \lim_{T\rightarrow \infty}\frac{1}{T}\int_1^T \left|\zeta(\sigma+it) \right|^{2k} \pmb{1}_{A^c}(t) \mbox{\ d} t =\sum_{n=1}^{\infty}\frac{d_k(n)^2}{n^{2\sigma}}. $$ The assertions follows by standard techniques (see Titchmarsh \cite[\S 13.2]{titchmarsh:1986}). \end{proof} Corollary \ref{cor:app1} can be used to obtain a certain non-denseness result for the Riemann zeta-function in $\sigma<\frac{1}{2}$. \begin{corollary} Let $\alpha<\frac{1}{2}$ and $l>0$. Then, there exist an $l$-set $A\subset [1,\infty)$ of density zero such that, for every $\sigma\leq \alpha$, \begin{equation}\label{app3} \overline{\left\{\zeta(\sigma+it) \, : \, t\in A^c \right\}} \neq \mathbb{C}. \end{equation} \end{corollary} Under the assumption of the Riemann hypothesis, Garunk\v{s}tis \& Steuding showed that \eqref{app3} holds for $A=\emptyset$. \begin{proof} This follows directly from \eqref{app2}, the functional equation of the Riemann zeta-function and the asymptotic expansion for $\mathbb{D}elta(s)$. \end{proof} It would be nice to find further applications of the moments in Theorem \ref{th:probmom} to the value-distribution of the Riemann zeta-function. \setcounter{section}{0} \renewcommand{A.\arabic{equation}}{A.\arabic{equation}} \renewcommand{A.\arabic{section}}{A.\arabic{section}} \renewcommand{A.\arabic{theorem}}{A.\arabic{theorem}} \chapter*{Appendix} \addcontentsline{toc}{chapter}{Appendix: Normal families of meromorphic functions} \markboth{Normal families}{Normal families} \section*{Normal families of meromorphic functions} The theory of normal families provides a powerful tool to study the value-distribution of meromorphic functions in the neighbourhood of an essential singularity. We shall use this section to outline the basic ideas of this concept. For a thorough account of the theory of normal families and their connection to value-distribution theory, the reader is referred to a monography by Schiff \cite{schiff:1993} and a nice survey paper of Zalcman \cite{zalcman:1998} which we took as a guideline for most of the following introductory outline.\par {\bf Sequences of analytic functions.} Let $\mathcal{H}(\Omega)$ denote the set of all analytic functions on a domain $\Omega\subset\mathbb{C}$. We start with some classical theorems on sequences $(f_n)_n$ of functions $f_n\in\mathcal{H}(\Omega)$. The locally uniform convergence of $(f_n)_n$ implies already that the limit function is also analytic and that the limiting process is stable with respect to complex differentiation. \begin{theorem}[Theorem of Weierstrass]\label{th:weierstrass} Let $(f_n)_n$ be a sequence of functions $f_n\in\mathcal{H}(\Omega)$ that converges locally uniformly on $\Omega$ to a function $f$. Then, $f$ is analytic in $\Omega$ and the sequence of derivatives $(f^{(k)}_n)_n$ converges locally uniformly on $\Omega$ to $f^{(k)}$, $k\in\mathbb{N}$. \end{theorem} For a proof, we refer to Schiff \cite[p. 9]{schiff:1993}. It follows from the analyticity of the limit function $f$ and the theorem of Rouch\'{e} that, for sufficiently large $n$, the functions $f_n$ and $f$ have essentially the same number of zeros in $\Omega$. \begin{theorem}[Theorem of Hurwitz]\label{th:hurwitz} Let $(f_n)_n$ be a sequence of functions $f_n\in\mathcal{H}(\Omega)$ converge locally uniformly on $\Omega$ to a non-constant function $f$. If $f$ has a zero of order $m$ at $z_0$, then there exists an $r>0$ such that for sufficiently large $n\in\mathbb{N}$, $f_n$ has exactly $m$ zeros (counting multiplicities) in the disc $D_r(z_0)$. \end{theorem} A proof can be found in Schiff \cite[p. 9]{schiff:1993}.\par {\bf Sequences of meromorphic functions.} Let $\mathcal{M}(\Omega)$ denote the set of all meromorphic functions on a domain $\Omega\subset\mathbb{C}$. To define convergence for sequences of meromorphic functions, one usually relies on the chordal metric on the Riemann sphere $\widehat{\mathbb{C}}$. Recall that the chordal metric $\chi(\cdot, \cdot)$ on $\widehat{\mathbb{C}}$ is defined by \begin{align*} \chi(z_1,z_2) & := \frac{|z_1-z_2|}{\sqrt{1+|z_1|^2} \sqrt{1+|z_2|^2}}, \qquad z_1,z_2\in\mathbb{C}, \\ \chi(z_1,\infty) &= \chi(\infty,z_1) := \frac{1}{\sqrt{1+|z_1|^2}}, \quad \;\; z_1\in\mathbb{C},\\ \chi(\infty, \infty) & := 0. \end{align*} Suppose that a sequence $(f_n)_n$ of functions $f_n\in\mathcal{M}(\Omega)$ converges locally uniformly on $\Omega$ to a limit function $f$, with respect to the chordal metric. Then, it follows from the theorem of Weierstrass, that $f$ is either meromorphic on $\Omega$ or $f\equiv\infty$. Note that $(f_n)_n$ converges locally uniformly on $\Omega$ to a limit function $f\not\equiv\infty$ in the chordal metric if and only if $(f_n)_n$ converges locally uniformly on $\Omega$ to $f$ in the Euclidean one. Moreover, $(f_n)_n$ converges locally uniformly on $\Omega$ to $f\equiv \infty$ in the chordal metric if and only if $(1/f_n)_n$ converges locally uniformly to $f\equiv 0$ in the Euclidean one. {\bf Normal families.} The concept of normal families dates back to Montel \cite{montel:1911, montel:1946}. A family $\mathcal{F}\subset\mathcal{M}(\Omega)$ is called {\it normal in $\Omega$} if every sequence of functions in $\mathcal{F}$ contains a subsequence that converges locally uniformly with respect to the chordal metric on $\Omega$. The family $\mathcal{F}$ is said to be {\it normal in a point $z_0\in\Omega$} if $\mathcal{F}$ is normal in a neighbourhood of $z_0$. Normality is a local property: one can show that $\mathcal{F}$ is normal in $\Omega$ if and only if it is normal in every point $z_0\in\Omega$.\par Normality yields a concept for compactness in the space $\widehat{\mathcal{M}}(\Omega)$ which we obtain by adding $f\equiv\infty$ to $\mathcal{M}(\Omega)$. The space $\widehat{\mathcal{M}}(\Omega)$ be endowed with the topology of uniform convergence on compact subsets of $\Omega$ and a metric generating the latter such that $\widehat{\mathcal{M}}(\Omega)$ becomes a complete metric space. Then, a family $\mathcal{F}$ of meromorphic functions is normal in $\Omega$ if and only if $\mathcal{F}$ is relatively compact in $\widehat{\mathcal{M}}(\Omega)$. \par {\bf Characterizations of normality.} In terms of equicontinuity and local boundedness, the Theorem of Arzel\`{a}-Ascoli provides necessary and sufficient conditions for subsets of certain function spaces to be relatively compact. In the case of normal families of meromorphic functions this translates to the following. \begin{theorem}[Theorem of Arzel\`{a}-Ascoli for families of meromorphic functions] \label{th:montel2} A family $\mathcal{F}\subset\mathcal{M}(\Omega)$ is normal in $\Omega$ if and only if $\mathcal{F}$ is equicontinuous on $\Omega$ with respect to the chordal metric. \end{theorem} A proof can be found in Schiff \cite[p. 74]{schiff:1993}. Montel \cite{montel:1907} observed that for a family of analytic functions locally boundedness implies equicontinuity. We call a family $\mathcal{F}\subset\mathcal{H}(\Omega)$ {\it locally bounded on $\Omega$} if for every $z_0\in\Omega$ there exists a neighbourhood $U(z_0)$ and a positive real number $M$ such that $|f(z)|\leq M$ for all $z\in U(z_0)$ and all $f\in\mathcal{F}$. \begin{theorem}[Montel's theorem] \label{th:montel1} A family $\mathcal{F}\subset\mathcal{H}(\Omega)$ that is locally bounded on $\Omega$ is normal in $\Omega$. \end{theorem} For a proof we refer to Schiff \cite[p. 35]{schiff:1993}. The local boundedness of $\mathcal{F}$ in Montel's theorem implies that $\mathcal{F}$ has no subsequence that converges locally uniformly to $f\equiv\infty$. Thus, one can easily see that the converse of Montel's theorem is not true in general. However, if a family $\mathcal{F}\subset\mathcal{H}(\Omega)$ is normal in $\Omega$ and, additionally, there is a point $z_0\in\Omega$ and a positive real number $M$ such that $$ |f(z_0)| \leq M \qquad \mbox{ for all }f\in\mathcal{F}, $$ then $\mathcal{F}$ is locally bounded.\par A sufficient condition for the local boundedness of a family $\mathcal{F}\subset \mathcal{H}(\Omega)$ can be formulated by means of an area integral. \begin{theorem}\label{th:sufflocalbound} Let $\mathcal{F}\subset\mathcal{H}(\Omega)$. Suppose that there exists a positive real number $M$ such that, for all $f\in\mathcal{F}$, $$ \iint_{\Omega} \left|f(x+iy) \right|^2 \mbox{\ d} x \mbox{\ d} y \leq M. $$ Then, $\mathcal{F}$ is locally bounded in $\Omega$. \end{theorem} The proof of Theorem \ref{th:sufflocalbound} relies essentially on an integrated version of Cauchy's integral formula; see Titchmarsh \cite[\S 11.8]{titchmarsh:1986} or Schiff \cite[p. 39]{schiff:1993}. Schiff \cite[p. 39]{schiff:1993} also mentions the following variation of Montel's theorem by Mandelbrojt \cite{mandelbrojt:1929}. \begin{theorem}[Mandelbrojt's variation of Montel's theorem] Let $\mathcal{F}\subset\mathcal{H}(\Omega)$ be a family of zero-free analytic functions. Then, $\mathcal{F}$ is normal in $\Omega$ if and only if the correspoonding family of functions given by $$ F(z,w) = \frac{f(z)}{f(w)} $$ is locally bounded on $\Omega\times\Omega$. \end{theorem}\par Marty \cite{marty:1931} succeeded to connect the chordal equicontinuity of $\mathcal{F}$ in Theorem \ref{th:montel2} with the boundedness of a suitable derivative. For a meromorphic function $f$ on a domain $\Omega$, we define the {\it spherical derivative} by $$ f^{\#} (z) := \lim_{h\rightarrow 0} \frac{\chi(f(z+h), f(z))}{|h|} = \frac{|f'(z)|}{1+|f(z)|^2}, \qquad z\in\Omega. $$ \begin{theorem}[Marty's theorem]\label{th:marty} A family $\mathcal{F}\subset\mathcal{M}(\Omega)$ is normal in $\Omega$ if and only if the corresponding family $$ \mathcal{F}^{\#}:= \left\{f^{\#} \, : \, f\in\mathcal{F}\right\} $$ of spherical derivatives is locally bounded on $\Omega$. \end{theorem} For a proof, we refer to Schiff \cite[p. 75]{schiff:1993}. Montel \cite{montel:1912} revealed a gainful connection of normality to value-distribution theory. We say that a family $\mathcal{F}\subset\mathcal{M}(\Omega)$ {\it omits a value $a\in\widehat{\mathbb{C}}$ on $\Omega$}, if no function $f\in\mathcal{F}$ assumes the value $a$ on $\Omega$. \begin{theorem}[Montel's fundamental normality test (FNT)]\label{th:FNT1} A family $\mathcal{F}\subset\mathcal{M}(\Omega)$ that omits three pairwise distinct values $a,b,c\in\widehat{\mathbb{C}}$ is normal in $\Omega$. \end{theorem} Schiff \cite{schiff:1993} presents five different proofs of Montel's FNT: via the elliptic modular function (which provides a mapping from the unit disc to the twice punctered plane), via Schottky's theorem, via Ahlfor's five islands theorem, via the rescaling lemma of Zalcman and via Nevanlinna theory. There are several extensions of Montel's FNT: assertion (a) of the next theorem deals with the case that all functions in $\mathcal{F}$ omit three values, but not necessarily the same. Assertion (b) treats families that omit just two value. \begin{theorem}[Extensions of Montel's FNT]\label{th:FNTextension} Let $\mathcal{F}\subset\mathcal{M}(\Omega)$. \begin{itemize} \item[(a)] Suppose that there exists a real number $\varepsilon>0$ such that each $f\in\mathcal{F}$ omits three pairwise distinct values $a_f, b_f, c_f\in\widehat{\mathbb{C}}$ with $$ \chi(a_f,b_f), \chi(a_f,c_f),\chi(b_f,c_f) \geq \varepsilon. $$ Then, $\mathcal{F}$ is normal in $\Omega$. \item[(b)] Suppose that there are three pairwise distinct values $a,b,c\in\widehat{\mathbb{C}}$ such that $\mathcal{F}$ omits the values $a,b$ on $\Omega$ and that no function in $\mathcal{F}$ assumes the value $c$ at more than $m\in\mathbb{N}_0$ points. Then, $\mathcal{F}$ is normal in $\Omega$. \end{itemize} \end{theorem} Assertion (b) was proved by Carath\'{e}odory \cite[p. 202]{caratheodory:1960}. We mention here also Grahl \& Nevo \cite{grahlnevo:2010} who generalized assertion (b) to `omitted functions' instead of `omitted values'. Assertion (a) was basically known by Montel \cite{montel:1912}. For a proof, the reader is referred to Schiff \cite[p. 56]{schiff:1993}. \par {\bf The rescaling lemma of Zalcman.} Bloch \cite{bloch:1926} observed an analogy between normal families of meromorphic functions and the value distribution of meromorphic functions in $\mathbb{C}$: certain properties which are responsible for a family $\mathcal{F}\subset \mathcal{M}(\Omega)$ to be normal, seem to force a function $f\in\mathcal{M}(\mathbb{C})$ to be constant.\footnote{Compare for example Liouville's theorem with Montel's theorem; and Picard's great theorem with Montel's FNT.} Bloch summarized this observation in the words {\it ``Nihil est in infinito quod non prius fuerit in finito''}. Building on works of Lohwater \& Pomerenke \cite{lohwaterpommerenke:1973}, Bloch's heuristic principle was made rigorous by Zalcman \cite{zalcman:1975}. \begin{theorem}[Rescaling Lemma of Zalcman] \label{th:zalcman} Let $\mathcal{F}\subset\mathcal{M}(\mathbb{D})$. Then, $\mathcal{F}$ is not normal in zero if and only if there exist \begin{itemize} \item[(i)] a sequence $(f_n)_n $ of functions $f_n\in \mathcal{F}$, \item[(ii)] a sequence $(z_n)_n$ of numbers $z_n\in \mathbb{D}$ with $\lim_{n\rightarrow\infty} z_n = 0$, \item[(iii)] a sequence $(\rho_n)_n$ of numbers $\rho_n\in\mathbb{R}^+$ with $\lim_{n\rightarrow\infty} \rho_n = 0$, \end{itemize} such that $$ g_n(z):= f_n(z_n + \rho_n z) $$ converges locally uniformly to a non-constant function $g\in\mathcal{M}(\mathbb{C})$. The function $g$ may be taken to satisfy the normatlization $$ g^{\#}(z) \leq g^{\#} (0)=1, \qquad z\in\mathbb{C}. $$ \end{theorem} For a proof of this version of Zalcman's lemma, we refer to Schwick \cite{schwick:1989} and also to Schiff \cite[p. 102]{schiff:1993}. There are several generalizations of Zalcman's lemma known, see for example Pang \cite{pang:1989}. For a given family $\mathcal{F}\subset\mathcal{M}(\mathbb{D})$ which is not normal in zero, it seems in general very difficult to get more detailed information about the sequences $(f_n)_n$, $(z_n)_n$ and $(\rho_n)_n$ satisfying the assertion of the rescaling lemma. By using the rescaling lemma one can deduce, for example, Montel's FNT and its extensions from Picard-type theorems on the value distribution of functions in $\mathcal{M}(\mathbb{C})$. {\bf Schottky's Theorem.} By combining Montel's fundamental normality test with Montel's theorem, one can easily deduce a well-known theorem of Schottky \cite{schottky:1904}. \begin{theorem}[Schottky's Theorem] Let $f\in\mathcal{H}(\mathbb{D})$. Suppose that $f$ omits the values $0$ and $1$ on $\mathbb{D}$ and that $|f(0)| \leq \alpha$. Then, for every $0<r<1$, there exists a positive constant $M(r,\alpha)$, which does only depend on $r$ and $\alpha$, such that $$ \left|f(z)\right| \leq M(r,\alpha) \qquad \mbox{for all } z\in D_r(0). $$ \end{theorem} To obtain explicit expressions for the bounds $M(r,\alpha)$ in terms of $r$ and $\alpha$, further methods are necessary. We refer to Burckel \cite[Chapt. XII, \S 2]{burckel:1979}. Hempel \cite{hempel:1980} derived explicit bounds $M(r,\alpha)$ for Schottky's theorem which are, in a certain sense, best possible. According to his investigations, one can take $$ M(r,\alpha) = \frac{1}{16} \Bigl( \min\bigl\{ 16\alpha + 8 ; e^{\pi}\cdot \max\{\alpha;1\}\bigr\} \Bigr)^{\frac{1+r}{1-r}} . $$ Moreover, if $\alpha<1$, the choice $$ M(r,\alpha) = \frac{1}{16} \exp\left( \frac{\pi^2}{\log(16/\alpha)}\cdot \frac{1+r}{1-r}\right) $$ is admissible, too. \end{document}
\begin{document} \title{Index Calculations} \begin{abstract} Given a contact three manifold $Y$ with a nondegenerate contact form $\langlembda$, and an almost complex structure $J$ compatible with $\langlembda$, its embedded contact homology $ECH(Y,\langlembda)$ is defined (\cite{bn}) and only depends on the contact structure. In this paper we explain how to compute ECH for Morse-Bott contact forms whose Reeb orbits appear in $S^1$ families, assuming the almost complex structure $J$ can be chosen to satisfy certain transversality conditions (this is the case for instance for boundaries of concave or convex toric domains, or if all the curves of ECH index one have genus zero). We define the ECH chain complex for a Morse-Bott contact form via an enumeration of ECH index one cascades. We prove using gluing results from \cite{Yaocas} that this chain complex computes the ECH of the contact manifold. This paper and \cite{Yaocas} fill in some technical foundations for previous calculations in the literature (\cite{choi2016combinatorial}, \cite{ECHT3}). \end{abstract} \tableofcontents \section{Introduction} \subsection{Embedded contact homology} In this article we develop some tools to compute the embedded contact homology (ECH) of contact 3-manifolds in Morse-Bott settings. ECH is a Floer theory defined for a pair $(Y,\langlembda)$, where $Y$ is a three dimensional contact manifold with nondegenerate contact form $\langlembda$ (for an introduction see \cite{bn}). The ECH chain complex is generated by orbit sets of the form $\alpha = \{(\gamma_i,m_i)\}$. Here $\gamma_i$ are distinct simply covered Reeb orbits of $\langlembda$; and the $m_i$ is a positive integer which we call the multiplicity of $\gamma_i$. To describe the differential, consider the symplectization $(\mathbb{R}\times Y, d(e^s \langlembda))$ of $Y$ with almost complex structure $J$. Here $s$ denotes the variable in the $\mathbb{R}$ direction; and $J$ is a generic $\langlembda$-compatible almost complex structure (see Definition \ref{compatibleJ}). The differential of ECH, which we write as $\partial$, is defined by counting holomorphic currents of ECH index $I=1$ in the symplectization. More precisely, the coefficient $\langlengle \partial \alpha,\beta \rangle ngle $ is defined by counts of $J$-holomorphic currents that approach $\alpha$ as $s\rightarrow \infty$ and $\beta$ as $s\rightarrow -\infty$, where convergence to $\alpha,\beta$ is in the sense of currents. The resulting homology, which we write as $ECH(Y,\xi)$, is an invariant of the contact structure $\xi = \textup{ker} \langlembda$. See Section \ref{ECH review} below for a more precise review of ECH and the ECH index. In part due to its gauge theoretic origin, ECH has had spectacular applications to understanding symplectic problems and dynamics in low dimensions; for instance sharp symplectic embedding obstructions of four dimensional symplectic ellipsoids (\cite{Mcduffemb}), closing lemmas for Reeb flows on contact 3-manifolds (\cite{irie}), the Arnold chord conjecture (\cite{arnoldchord1,arnoldchord2}), and quantitative refinements of the Weinstein conjecture \cite{1Reeb2}. Several computations (e.g. \cite{ECHT3, choi2016combinatorial,lebow}) and applications (e.g. \cite{beyondech}) of ECH have assumed results from its Morse-Bott version, which we develop in detail in this paper. \subsection{Morse-Bott theory} The original definition of ECH requires we use non-degenerate contact forms. However, in practice many contact forms we encounter carry Morse-Bott degeneracies, for which the Reeb orbits are no longer isolated but instead show up in families with weaker non-degeneracy conditions imposed (for a more precise description, see Definition 3.2 in \cite{oh_wang_2018}). Although all Morse-Bott contact forms can be perturbed to non-degenerate ones, it is often useful to be able to compute ECH directly in the Morse-Bott setting, where often the enumeration of $J$-holomorphic curves is easier. For ECH, since we only consider 3-manifolds, the two Morse-Bott cases are either when the Reeb orbits come in a two dimensional family, or come in one dimensional families. For the first case it then follows that the entire contact manifold is foliated by periodic Reeb orbits. ECH with this kind of Morse-Bott degeneracy has been computed in many cases by \cite{nelson2020embedded}, see also \cite{farris}. The other case is when Reeb orbits show up in one dimensional $S^1$ families, i.e. we see tori foliated by Reeb orbits. We shall call these tori Morse-Bott tori. It is with this case we concern ourselves in this paper (for a description of what the contact form looks like, see Proposition \ref{prop_locform}). Examples of this include boundaries of toric domains, and torus bundles over the circle see \cite{Hermann, intoconcave, danconvex, lebow}. For now we consider $(Y^3,\langlembda)$ a contact 3-manifold where $\langlembda$ is a Morse-Bott contact form all of whose Reeb orbits appear in $S^1$ families. Later for the case of boundary of convex or concave toric domains (Sections \ref{section concave},\ref{section convex}) we allow the case of both nondegenerate Reeb orbits and $S^1$ families of Reeb orbits. We consider the symplectization with a generic $\langlembda$ compatible almost complex structure $J$ (see Definition \ref{compatibleJ}) \[ (\mathbb{R} \times Y^3, d(e^s \langlembda)). \] Following the recipe described in \cite{BourPhd}, to compute ECH in the Morse-Bott setting we shall count holomorphic cascades of ECH index one. The philosophy behind this is as follows: given $\langlembda$, a Morse-Bott contact form with Reeb orbits in Morse-Bott tori, we can perturb \[ \langlembda \longrightarrow \langlembda_\delta \] where $\langlembda_\delta$ with $\delta>0$ is a nondegenerate contact form up to a certain action level $L>>0$. This perturbation requires the following information. For each circle of orbits parameterized by $S^1$, choose a Morse function $f$ on $S^1$ with two critical points. The effect of this perturbation is so that each Morse-Bott torus splits into two nondegenerate Reeb orbits (corresponding to the critical points of $f$): one is an elliptic orbit and the other is a hyperbolic orbit. We also need to perturb the $\langlembda$-compatible almost complex structure on the symplectization into a $\langlembda_\delta$ compatible almost complex structure, $J_\delta$. Since $\langlembda_\delta$ is nondegenerate up to action $L$, we can define the ECH chain complex up to action $L$ in this case by counting ECH index one $J_\delta$-holomorphic curves. The idea is to take $\delta \rightarrow 0$ and see what these ECH index one holomorphic curves degenerate into. By a compactness theorem in \cite{SFT} (see also \cite{BourPhd, Yaocas}), such $J_\delta$-holomorphic curves degenerate into $J$-holomorphic cascades. For a definition of $J$-holomorphic cascade, see \cite{Yaocas}. Roughly speaking, a $J$-holomorphic cascade, which we shall write as $\cas{u}$, consists of a sequence of $J$-holomorphic curves $\{u^1,..,u^n\}$ that have ends on Morse-Bott tori. We think of the curves $u^i$ as living on different levels, with $u^i$ one level above $u^{i+1}$. Between adjacent levels there is the data of a single number $T_i\in [0,\infty]$ described as follows. Suppose a positive end of $u^{i+1}$ is asymptotic to a simply covered Reeb orbit $\gamma$ with multiplicity $n$. This $\gamma$ corresponds to a point on $S^1 $ (the $S^1$ that parameterizes the family of Morse-Bott Reeb orbits). Then if we follow the upwards gradient flow of $f$ for time $T_i$ starting at the point corresponding to the Reeb orbit $\gamma$, we arrive at a Reeb orbit $\tilde{\gamma}$, and a negative end of $u^i$ is asymptotic to $\tilde{\gamma}$ with the same multiplicity $n$. We assume all positive ends of $u^{i+1}$ and negative ends of $u^{i}$ are matched up in this way. For an illustration of a cascade\footnote{This figure and the accompanying explanations are taken from Figure 1 in \cite{Yaocas}.}, see Figure 1. \begin{figure} \caption{A schematic picture of a cascade: the cascade $\cas{u} \end{figure} \subsection{Main results} The Morse-Bott ECH chain complex which we write as $(C_*^{MB},\partial_{MB})$ (see section \ref{section:computing using cascades}) can be described as follows. Its generators are collections of Morse-Bott tori, equipped with a multiplicity and additional data, which we write as $\alpha = \{(\mathcal{T}_j,\partialm, m_j)\}$. Here $\mathcal{T}_j$ denotes a Morse-Bott torus; we call $m_j$ the multiplicity; and a choice of $+$ or $-$. See Section \ref{MBT as ECH} for a description. Suppose we can choose a $\langlembda$ compatible almost complex structure $J$ which is ``good'' (see definition \ref{def:good j}), meaning certain transversality conditions (Definition \ref{def:transversality conditions}) are satisfied. The differential in the Morse-Bott chain complex $\partial_{MB}$ counts ECH index one cascades between Morse-Bott ECH generators. The ECH index of a cascade is described in Section \ref{Section:ECH index}. We describe what it means for an cascade to be asymptotic to a Morse-Bott ECH generator in Section \ref{MBT as ECH}. For a description of what ECH index one cascades look like, see Corollary \ref{conditions on currents}, Prop. \ref{nice cascades}. We prove that \begin{theorem} \langlebel{maintheorem_intro} Let $\langlembda$ be a Morse-Bott contact form on the contact 3-manifold $Y$ whose Reeb orbits all appear in $S^1$ families. Assuming the almost complex structure $J$ is good (see Definition \ref{def:good j}), the homology of the Morse-Bott ECH chain complex computes the ECH of the contact manifold $ECH(Y,\xi)$. \end{theorem} A slightly more precise version of this theorem that we prove is Theorem \ref{theorem:cobordism in general}. We next find some instances there is enough transversality to compute ECH using the Morse-Bott chain complex. \begin{theorem} \langlebel{thm:list of transversality conditions} Let $\langlembda$ be a Morse-Bott contact form on the contact 3-manifold $Y$ whose Reeb orbits all appear in $S^1$ families. We can choose a generic $J$ so that \begin{itemize} \item Every reduced cascade (See Definition \ref{def:reduced cascade}) of $\leq 3$ levels is transversely cut out (see Definition \ref{def:transversality conditions}). \item Every reduced cascade where all of the (nontrivial) $J$-holomorphic components of the reduced cascade (in all of its levels) are distinct up to translation in the symplectization direction is transversely cut out (see Definition \ref{def:transversality conditions}). \end{itemize} If we can show through some other means that we can choose a small perturbation of $J$ to $J_\delta$ satisfying conditions of Theorem \ref{generic path J} so that for small enough $\delta$, all ECH index one curves degenerate into cascades whose reduced version satisfy either of the above conditions, then consider the Morse-Bott ECH chain complex $(C_*^{MB},\partial_{MB})$ as described more precisely in Section \ref{section:computing using cascades}. For the differential $\partial_{MB}$, if we restrict to ``good'' cascades (see Sections \ref{Section:ECH index}, \ref{section:computing using cascades} for the notion of ``good'') of ECH index one whose reduced versions are of the above form, the differential is well defined and the chain complex $(C_*^{MB},\partial_{MB})$ computes $ECH(Y,\xi)$. \end{theorem} For a discussion how these conditions arise and a proof of this theorem, see the Appendix. This list is by no means exhaustive. We expect there are many other situations where transversality can be achieved; the particulars will depend on the specific details of the contact manifold for which we are computing the ECH chain complex. In particular, for the case relevant for boundaries of convex and concave toric domains, we have the following: \begin{theorem} Let $\langlembda$ be a contact form on the contact 3-manifold $Y$ whose Reeb orbits apppear either in Morse-Bott $S^1$ families or are non-degenerate. Let $\delta>0$, and $\langlembda_\delta$ be the nondegenerate perturbation of $\langlembda$ that perturbs each $S^1$ family of Reeb orbits into two nondegenerate ones. If for $\delta>0$ small enough, all $J_\delta$ holomorphic curves of ECH index one in $\mathbb{R} \times Y^3$ have genus zero, then the embedded contact homology of $Y$ can be computed from the Morse-Bott chain complex $(C_*^{MB,tree},\partial_{MB}^{tree})$ (see Section \ref{section:ECH index one curves of genus zero}) using an enumeration of tree like cascades. \end{theorem} To be more precise, for the above theorem we need to use a slightly different description of cascades which we call ``tree like'' cascades, which is explained in Sections \ref{section:ECH index one curves of genus zero}, \ref{section concave}, \ref{section convex}. Consequently, we can prove \begin{theorem} For boundaries of concave toric domains or convex toric domains, in the nondegenerate case after a choice of generic almost complex structure all curves of ECH index one have genus zero. Therefore the ECH of boundaries of concave/convex toric domains can be computed using the Morse-Bott ECH chain complex $(C_*^{MB,tree},\partial_{MB}^{tree})$, via counts of tree-like ECH index one cascades. \end{theorem} For a definition of convex and concave toric domains, see Sections \ref{section concave}, \ref{section convex}. We mention some previous computations of ECH that have assumed Morse-Bott theory of the flavour we develop in this paper, notably in \cite{ECHT3} for the case of $T^3$, and \cite{choi2016combinatorial} for certain toric contact 3-manifolds, and \cite{lebow} for the case of $T^2$ bundles over $S^1$. This paper and the gluing paper \cite{Yaocas} fill in the foundations for these results. \begin{remark} The above theorems say for genus zero curves we have all the transversality we need by simply restricting to cascades of ECH index one and choosing a generic $J$; however this result is not strict, there could well be other scenarios where transversality can be achieved. For instance we expect with some more care we can show the moduli space of cascades of ECH index one and genus one can be shown to be transverse. For discussion of general difficulties see the Appendix. \end{remark} \subsection{Some technical details} For ECH in the nondegenerate setting (see \cite{bn}), as we review in Section \ref{ECH review}, the Fredholm index of a somewhere injective curve is bounded from above by its ECH index. Further, the ECH index is superadditive under unions of $J$-holomorphic curves in symplectizations. Using the fact that after choosing a generic almost complex structure, all somewhere injective curves are transversely cut out, it follows that by restricting to only ECH index one curves we do not need to consider multiply covered nontrivial curves. With this, one defines the ECH differential in the nondegenerate setting via counts of ECH index one $J$-holomorphic curves. Parts of the above story continue to hold in the case of cascades if we assume can choose $J$ to be good (Definition \ref{def:good j}), as we explain below. We first note that the notion of an ECH index continues to make sense for cascades, as we explain in Section \ref{Section:ECH index}. The case of cascades, however, is more complicated, in two directions. \begin{itemize} \item During the degeneration process for $\langlembda_\delta$ as $\delta\rightarrow 0$, simple curves may degenerate into cascades that have multiply covered components; \item For generic $J$, and even if we restrict to cascades all of whose curves are somewhere injective, the cascade need not be transversely cut out. \end{itemize} The second bullet point is the most problematic. This happens because by requiring there is a single parameter between adjacent levels, we are imposing restrictions on the evaluation maps on the ends of the curves in a cascade. Hence a cascade lives in a fiber product, which need not be transversely cut out even if we restrict to only somewhere injective curves. For an explanation of this, see the Appendix. However, if we take as an \emph{assumption} that $J$ is good (which isn't always possible, it will depend on the specific contact manifold), then all cascades built out of somewhere injective curves that we consider are transversely cut out. Then we can address the first bullet point by using a version of the ECH index inequality for cascades . To explain the ECH index inequality for cascades, consider the following. Given a cascade, we can pass to a reduced cascade, which means we replace all multiply covered curves with the underlying simple curves. See Section \ref{degenerations} for a precise description of this process. The reduced cascade also lives in a fiber product because of the conditions we imposed on its ends. By the assumption that $J$ is good (and consequently transversality assumptions in Definition \ref{def:transversality conditions} are satisfied), the reduced cascade is transversely cut out. To each reduced cascade we can associate to it a virtual dimension, which is the dimension of the moduli space of curves that lies in the same configuration as the reduced cascade. We prove that the ECH index of the cascade bounds the Fredholm index of the reduced cascade from above; and that equality holds only if the original cascade had no multiply covered components (and is well behaved in various ways, see Section \ref{Section:ECH index}). In \cite{Yaocas}, we proved a correspondence theorem between certain cascades and $J$-holomorphic curves. \begin{theorem}[\cite{Yaocas}] Given a ``transverse and rigid'' (see Definition 3.4 in \cite{Yaocas}) height one $J$-holomorphic cascade $\cas{u}$ , it can be glued to a rigid $J_\delta$-holomorphic curve $u_\delta$ for $\delta>0$ sufficiently small. The construction is unique in the following sense: if $\{\delta_n\}$ is a sequence of numbers that converge to zero as $n\rightarrow \infty$, and $\{u'_{\delta_n}\}$ is sequence of $J_{\delta_n}$-holomorphic curves converging to $\cas{u}$, then for large enough $n$, the curves $u_{\delta_n}'$ agree with $u_{\delta_n}$ up to translation in the symplectization direction. \end{theorem} In this paper, using index calculations, we show that if $J$ is good (some instances of which are described in Theorems \ref{thm:list of transversality conditions}), then essentially all ECH index one cascades are transverse and rigid\footnote{Technically we need to restrict ourselves to \emph{good} ECH index one cascades. This is a fairly minor point, but see Proposition 5.32 and surrounding discussion.}. Thus the gluing theorem above is then used to show the Morse-Bott chain complex computes $ECH(Y,\langlembda)$. In the cases where we use ``tree like'' cascades, for instance for boundaries of convex or concave toric domains, the definitions are slightly different, but essentially the same story holds true and we can always choose a generic $J$ so that the Morse-Bott chain complex computes $ECH(Y,\langlembda)$. Finally in the Appendix we explain why the usual techniques for achieving transversality fails for cascades. \\ \textbf{Acknowledgements} I would like to thank my advisor Michael Hutchings for his consistent help and support throughout this project. I would like to acknowledge the support of the Natural Sciences and Engineering Research Council of Canada (NSERC), PGSD3-532405-2019. Cette recherche a été financée par le Conseil de recherches en sciences naturelles et en génie du Canada (CRSNG), PGSD3-532405-2019. \section{ECH review} \langlebel{ECH review} For a thorough introduction to ECH see \cite{bn}. We will summarize much of the material from \cite{bn} and \cite{Hutchings2002} for convenience of the reader. Let $(Y^3,\langlembda)$ be a contact 3 manifold with nondegenerate contact form $\langlembda$. The generator of ECH are collections $\Theta$, where each $\Theta$ is a set of Reeb orbits with multiplicities \[ \Theta:= \{ (\gamma_i,m_i) | \gamma_i \, \text{are pairwise distinct simply covered Reeb orbits},\, m_i \in \mathbb{Z}_+\}. \] We require $m_i=1$ if $\gamma_i$ is a hyperbolic orbit. Then the chain for ECH are just \[ C_*(\langlembda') := \bigoplus_{\Theta_i} \mathbb{Z}_2 \langle \Theta_i \rangle . \] \begin{remark} There is a decomposition of ECH according to homology class of $\Theta_i$ in $H_1(Y)$. ECH can also be defined using $\mathbb{Z}$ coefficients. We will not address these issues here. \end{remark} Let $\alpha, \beta$ be ECH generators. Consider the symplectization of $Y$, defined as the symplectic manifold $(\mathbb{R} \times Y, \omega := d(e^a \langlembda))$, where $a$ denotes the $\mathbb{R}$ coordinate. Equip it with a generic $\langlembda$ compatible almost complex structure $J$. By compatible we mean the following \begin{definition}\langlebel{compatibleJ} Let $\langlembda$ be a contact form (not necessarily nondegenerate) on a contact 3-manifold. Let $J$ be a almost complex structure on the symplectization $(\mathbb{R} \times Y, \omega := d(e^a \langlembda))$. We say $J$ is compatible with $\langlembda$ if \begin{enumerate} \item $J$ is invariant in the $\mathbb{R}$ direction; \item Let $R$ denote the Reeb vector field, then $J\partialartial_s =R$; \item Let $\xi$ denote the contact structure, then $J\xi =\xi$ and $d\langlembda(\cdot, J\cdot)$ defines a metric on $\xi$. \end{enumerate} \end{definition} Then the coefficient $\langle\partial \alpha, \beta \rangle $ is defined by \begin{equation} \langle\partial \alpha, \beta \rangle := \left\lbrace \begin{tabular}{@{}l@{}} $\mathbb{Z}_2$\, \textup{count of holomorphic currents}\, $\mathcal{C}$\, \textup{of ECH index} \,$I=1$,\\ \textup{so that as} $s\rightarrow +\infty, \, \mathcal{C}$ \,\text{approaches}\, $\alpha$ \textup{as a current, and as} $s\rightarrow -\infty$, \\ $\mathcal{C}$\, \text{approaches} $\beta$ \,\text{as a current}. \end{tabular} \right\rbrace \end{equation} A holomorphic current $\mathcal{C}$ is by definition a collection $\{(C_i,m_i)\}$ where each $C_i$ is a somewhere injective $J$ holomorphic curve and $m_i \in \mathbb{Z}_{>0}$ accounts for the multiplicity of this curve. The ECH index $I$ of a holomorphic curve $C$ (or more generally a relative 2 homology class in $H_2(\alpha,\beta,Y)$, see section below for a definition) is defined by \begin{equation} I(C) := Q_\tau(C) +c_\tau(C) +CZ^I(C) \end{equation} where $Q_\tau(C)$ is the relative intersection number, $c_\tau(C)$ is the relative Chern class, and $CZ$ is a sum of Conley Zehnder indices used in ECH. We will review these terms in the upcoming subsections. \subsection{Relative first Chern class} Let $\alpha,\beta$ be orbit sets. We define the relative homology group $H_2(\alpha,\beta,Y)$ to be the set of 2-chains $\Sigma$ with \[ \partial \Sigma = \alpha -\beta \] modulo boundary of 3 chains. This is an affine space over $H_2(Y)$, and each $J$ holomorphic curve defines a relative homology class. We fix trivializations $\tau$ of the contact structure $\xi$ over each Reeb orbit in $Y$. We then define the relative first Chern class $c_\tau$ with respect to this choice of trivialization. For a given homology class in $H_2(\alpha,\beta,Y)$, choose a representative $Z\in H_2(\alpha,\beta,Y)$ that is embedded near its boundaries $\alpha,\beta$. We assume $Z$ is a smooth surface. Let $\iota: Z \rightarrow Y$ be the inclusion. Then consider the bundle $\iota^*\xi$ over $Z$. Let $\partialsi$ be a section of this bundle that is constant with respect to the trivialization $\tau$ near each of the Reeb orbits, and perturb $\partialsi$ so that all of its zeroes are transverse. Then $c_\tau(Z)$ is defined to be the algebraic count of zeroes of $\partialsi$. See \cite{bn} for a more thorough explanation and that this is well defined. \subsection{Writhe} Let $C$ be a somewhere injective $J$ holomorphic curve in the symplectization of $Y$, $(\mathbb{R}\times Y,d(e^a \langlembda))$ (with generic $\langlembda$-compatible complex structure $J$) that is asymptotic to $\alpha$ as $s\rightarrow +\infty$ and $\beta$ as $s\rightarrow -\infty$. For simplicity we focus on $s\rightarrow +\infty$ end. It is known (see for example \cite{siefring}) that for $s$ sufficiently large, $C\cap \{s\}\times Y$ is a union of embedded curves near each orbit of $\alpha$. For each orbit $\gamma_i$ of $\alpha$, the curves $C\cap \{s\}\times Y$ forms a braid $\xi_i^+$. We use the trivialization $\tau$ to identify the braids $\xi_i^+$ with braids in $S^1 \times D^2$. We can define the writhe of $\xi_i^+$ by identifying $S^1\times D^2$ with an annulus times an interval, projecting $\xi_i^+$ to the annulus, and counting crossings with signs. The same sign convention is clearly explained in \cite{hutching_revisited}. Then given a somewhere injective $J$-holomorphic curve $C$ that is not the trivial cylinder, with braids $\zeta_i^+$ associated to the $i$-th Reeb orbit it approaches as $s\rightarrow +\infty$ and braids $\zeta_j^-$ associated to the $j$th Reeb orbit it approaches as $s\rightarrow -\infty$ we define its writhe to be \[ w_\tau(C) := \sum_i w_\tau(\zeta_i^+) - \sum_j w_\tau (\zeta_j^-). \] We also recall the writhe of the braid $\zeta_i^+$ can be bounded by expressions in terms of the Conley-Zehnder indices. \begin{proposition} Let $C$ be a somewhere injective holomorphic curve that is not a trivial cylinder which is asymptotic to $\gamma_i$ with total multiplicity $n_i$. Suppose there are $k_i$ distinct ends of $C$ that are asymptotic to $\gamma_i$, with covering multiplicities $q_i^j$. Then the writhe associated to the braid $\zeta_i^+$ corresponding to Reeb orbit $\gamma_i$ is bounded above by \begin{equation} w_\tau (\zeta_i^+) \leq \sum _j^ {n_i} CZ(\gamma_i^j) - \sum_j^{k_i}CZ(\gamma_i^{q_i^j}) \end{equation} A similar bound holds for braids at $s\rightarrow -\infty$ with signs reversed. \end{proposition} We will derive an analogue of this bound for the Morse-Bott case. For now we recall another definition: \begin{definition} Let $C$ be a somewhere injective $J$-holomorphic curve that is not a trivial cylinder. For each $\gamma_i$ that $C$ is asymptotic to as $s\rightarrow +\infty$, form the sum $CZ^I(\gamma_i): = \sum _{j=1}^ {n_i} CZ(\gamma_i^j)$ as above, and for each $\gamma_i'$ that $C$ is asymptotic to as $s\rightarrow -\infty$, we form an analogous sum, then we define \begin{equation} CZ^I(C) : = \sum_{\substack{\gamma_i, \\C \, \text{is asymptotic to}\, \gamma_i,\\ \text{as } \, s\rightarrow +\infty}}CZ^I(\gamma_i) - \sum_{\substack{\gamma_i', \\C \, \text{is asymptotic to} \, \gamma_i',\\ \text{as } \, s\rightarrow -\infty}}CZ^I(\gamma_i'). \end{equation} \end{definition} This is the Conley-Zehnder index term that appears in the definition of ECH index. \subsection{Relative adjunction formula} In this section we review the relative adjunction formula (see \cite{bn, Hutchings2002}). We first review the notion of relative intersection pairing, which is a map depending on the trivialization $\tau$: \[ Q_\tau: H_2(\alpha,\beta,Y) \times H_2(\alpha,\beta,Y) \rightarrow \mathbb{Z} \] as follows. Let $S$ and $S'$ be surfaces representing relative homology classes in $H_2(\alpha,\beta,Y)$. If we identify $\mathbb{R} \times Y$ with $ (-1,1)\times Y \subset [-1,1] \times Y$, then we have by definition \[ \partial S =\partial S' = \sum_i m_i \{1\} \times \alpha_i - \sum_i n_i \{-1\} \times \beta_i \] We make the following requirements on the representatives $S$ and $S'$: \begin{enumerate} \item The projections to $Y$ of the intersections of $S$ and $S'$ with $(1-\epsilon,1]\times Y$ and $[0,\epsilon) \times Y$ are embeddings. \item Each end of $S$ or $S'$ covers Reeb orbits $\alpha_i$ (resp $\beta_i$) with multiplicity $1$. \item The image of $S$ (after projecting to $Y$ in a neighborhood $S^1 \times D^2$ of $\alpha_i$ determined by the trivialization $\tau$) do not intersect, and do not rotate with respect to the chosen trivialization $\tau$ as one goes around $\alpha_i$. Further, the image of different ends of $S$ approaching $\alpha_i$ lie on distinct rays in a neighborhood of $\alpha_i$. More concretely using trivialization $\tau$ to identify a neighborhood of $\alpha_i$ with $S^1\times \mathbb{R}^2$, ends of $S$ approach $\alpha_i$ along different rays in $\mathbb{R}^2$. We make a similar requirement for $\beta_i$. We make a similar requirement for $S'$. \item All interior intersections between $S$ and $S'$ are transverse. \end{enumerate} Representatives satisfying all of the above conditions are called $\tau$ -representatives in \cite{Hutchings2002}, which is a definition we will adopt. Then given $\tau$ representatives as listed above, $Q_\tau(S,S')$ is defined to be the algebraic count of intersections between $S$ and $S'$. We are now ready to state the relative adjunction formula, see also \cite{Hutchings2002}. \begin{proposition} If $C$ is a somewhere injective $J$ holomorphic curve, \begin{equation} c_\tau(C)=\chi(C)+Q_\tau(C)+w_\tau(C) -2\delta (C) \end{equation} where $\delta(C)\geq 0$ is defined to be an algebraic count of singularities of $C$. Each singularity is positive due to the fact $C$ is $J$-holomorphic. \end{proposition} \subsection{ECH index inequality} We have now defined all of the terms that appear in the ECH index inequality. We compare this with the Fredholm index. Let $C$ be a somewhere injective $J$-holomorphic curve, let $Ind(C)$ denote the Fredhom index of $C$, which in this case is given by \[ -\chi(C) + 2c_\tau(C) + CZ^{Ind}(C). \] Here $CZ^{Ind}(C)$ is defined as follows. If $C$ is positively asymptotic to $\gamma$ with $k$ ends, each of multiplicity $q_k$, then the contribution to $CZ^{Ind}(C)$ from $\gamma$ is given by $\sum_k CZ(\gamma^{q_k})$. Similarly if $C$ is asymptotic to $\gamma$ at the negative ends, then its contribution to $CZ^{Ind}(C)$ is $-\sum_k CZ(\gamma^{q_k})$. \begin{theorem} Let $C$ denote a somewhere injective $J$-holomorphic curve as above, then we have the following inequality \begin{equation} Ind(C) \leq I(C) -2\delta (C). \end{equation} \end{theorem} An immediate corollary of the above is \begin{corollary} Let $\mathcal{C}$ be a $J$-holomorphic current of $I(\mathcal{C}) =1$. Then for generic $J$, the current $\mathcal{C}$ must satisfy \begin{enumerate} \item It contains an unique connected embedded curve $C$ of multiplicity one that is not a trivial cylinder. The ends of $C$ approach Reeb orbits according to partition conditions. (See \cite[Section 3]{bn} for a discussion of partition conditions). We will review the relevant partition conditions in the Morse-Bott setting later). \item The other components of $\mathcal{C}$ are trivial cylinders with multiplicities. \end{enumerate} \end{corollary} \begin{convention}\langlebel{nontrivial} In this paper we describe a correspondence between ECH index 1 currents in the nondegenerate setting and ECH index 1 cascades in the Morse-Bott setting. We will only care about the nontrivial part of the ECH index 1 current, as the trivial cylinders correspond trivially in the non-degenerate and Morse-Bott situations. Hence when we say cascade, or a sequences of ECH index one curves/currents degenerating into a cascade, unless stated otherwise, we will always be considering what happens to the nontrivial part of the ECH index one current, and what cascade it corresponds to. \end{convention} \subsection{\texorpdfstring{$J_0$}{J0} index and finiteness} We recall (without proof) the following proposition (see \cite{Hutchings2002},\cite{bn}): \begin{proposition} Let $\alpha,\beta$ be ECH generators. We choose a generic $J$, and let $\mathcal{M}^{I=1}(\alpha,\beta)/\mathbb{R}$ denote the moduli space of ECH index $=1$ currents from $\alpha$ to $\beta$ modulo the action of $\mathbb{R}$. Then $\mathcal{M}^{I=1}(\alpha,\beta)/\mathbb{R}$ is a finite collection of points. \end{proposition} We will mention two results that go into this proof, for we will need analogous constructions in the Morse-Bott context. \begin{definition} Let $\alpha =\{(\alpha_i,m_i)\},\beta=\{(\beta_i,n_i)\}$ be ECH generators, let $Z\in H_2(\alpha, \beta, Y)$ be a relative homology class. We define: \begin{equation} J_0(\alpha,\beta,Z) =-c_\tau(Z)+Q_\tau(Z) + CZ^{J_0}(\alpha,\beta) \end{equation} where \begin{equation} CZ^{J_0}(\alpha,\beta):=\sum_i\sum_{k=1}^{m_i-1}CZ(\alpha_i^k)-\sum_i\sum_{k=1}^{n_i-1}CZ(\beta_i^k) \end{equation} \end{definition} We have the following proposition bounding the topological complexity of holomorphic curves counted by ECH index 1 conditions: \begin{proposition} Let $\mathcal{C} \in \mathcal{M}^{I=1}(\alpha,\beta)$, which decomposes as $\mathcal{C} = C_0 \cup C$ where $C_0$ is a union of trivial cylinders, and $C$ is somewhere injective and nontrivial. Let $n_i^+$ denote the number of positive ends $C$ has at $\alpha_i$, plus 1 if $C_0$ includes cylinders of the form $\mathbb{R} \times \alpha_i$, define $n_j^-$ analogously for $\beta$ and negative ends of $C$ then \begin{equation} -\chi(C) +\sum_i(n_i^+-1)+\sum_j(n_j^--1) \leq J_0(C). \end{equation} \end{proposition} Finally we state the version of Gromov compactness for currents. Let $\alpha, \beta$ be orbit sets, we define a broken holomorphic current from $\alpha,\beta$ to be a finite sequence of $J$ nontrivial holomorphic currents $(\mathcal{C}^0,..,\mathcal{C}^k)$ in $\mathbb{R} \times Y$ such that there exists orbit sets $\alpha =\gamma^0,\gamma^1,..,\gamma^{k+1}=\beta$ so that $\mathcal{C}^i \in \mathcal{M}(\gamma^i,\gamma^{i+1})$ (this notation means $\mathcal{C}^i$ is a current from the orbit set $\gamma^i$ to $\gamma^{i+1}$). By nontrivial we mean a current is not entirely composed of unions of trivial cylinders. We say a sequence of holomorphic currents $\{\mathcal{C}_{v\geq1} \} \in \mathcal{M}(\alpha,\beta)$ converges to $(\mathcal{C}^0,..,\mathcal{C}^k)$ if for each $i=0,..,k$ there are representatives $\mathcal{C}_\nu^i$ of $\mathcal{C}_\nu \in \mathcal{M}(\alpha,\beta)/\mathbb{R}$ such that the sequence $\{\mathcal{C}_{v\geq1} \}$ converges as a current and as a point set on compact sets to $\mathcal{C}^i$. \begin{proposition}(\cite{bn}, \cite{Taubescompactness} Prop 3.3 ) Any sequence $\{\mathcal{C}_v\} $ of holomorphic currents in $\mathcal{M}(\alpha,\beta)/\mathbb{R}$ has a subsequence which converges to a broken holomorphic current $(\mathcal{C}^0,..,\mathcal{C}^k)$. Further if we denote $\{\mathcal{C}_v\} $ the convergent subsequence, we have the equality \begin{equation} [\mathcal{C}_v] = \sum_{i=0}^k[\mathcal{C}^i] \in H_2(\alpha,\beta,Y) \end{equation} \end{proposition} \section{Morse-Bott setup and SFT type compactness} \langlebel{degenerations} Let $(Y,\langlembda)$ be a contact 3 manifold with Morse-Bott contact form $\langlembda$. Throughout we assume the Morse-Bott orbits come in families of tori. \begin{convention} Throughout this paper we fix action level $L>0$ and only consider ECH generators of action level up to $L$. This is implicit in all of our constructions and will not be mentioned further. We construct Morse-Bott ECH up to action level $L$, and the full ECH is recovered by taking $L \rightarrow \infty$. \end{convention} The following theorem, which is a special case of a more general result in \cite{oh_wang_2018}, gives a characterization of the neighborhood of Morse-Bott Tori. Let $\langlembda_0$ denote the standard contact form on $(z,x,y) \in S^1\times S^1 \times \mathbb{R}$ of the form \[ \langlembda_0= dz-ydx. \] \begin{proposition} \cite{oh_wang_2018} \langlebel{prop_locform} Let $(Y,\langlembda)$ be a contact 3 manifold with Morse-Bott contact form $\langlembda$. We assume the Morse-Bott orbits come in families of tori $\mathcal{T}_i$ with minimal period $T_i$. Then we can choose coordinates around each Morse-Bott torus so that a neighborhood of $\mathcal{T}_i$ is described by $S^1\times S^1 \times (-\epsilon,\epsilon)$, and the contact form $\langlembda$ in this coordinate system looks like: \[ \langlembda = h(x,y,z) \langlembda_0 \] where $h(x,y,z)$ satisfies: \[ h(x,0,z)=1, dh(x,0,z) =0 \] Here we identify $z\in S^1 \sim \mathbb{R}/2\partiali T_i \mathbb{Z}$ \end{proposition} See \cite{Yaocas} Theorem Proposition 2.2 for a sketch of the proof. By the Morse-Bott assumption there are only finitely many such tori up to fixed action $L$. We assume we have chosen such neighborhoods around all Morse Bott Tori ${\mathcal{T}_i}$. Next we shall perturb them to nondegenerate Reeb orbits by perturbing the contact form in a neighborhood of each torus as described below. This is the same perturbation as in \cite{Yaocas}. Let $\delta>0$, let $f:x\in \mathbb{R}/\mathbb{Z} \rightarrow \mathbb{R}$ be a smooth Morse function with maximum at $x=1/2$ and minimum $x=0$. Let $g(y):\mathbb{R} \rightarrow \mathbb{R}$ be a bump function that is equal to $1$ on $[-\epsilon_{\mathcal{T}_i},\epsilon_{\mathcal{T}_i}]$ and zero outside $[-2\epsilon_{\mathcal{T}_i},2\epsilon_{\mathcal{T}_i}]$. Here $\epsilon_{\mathcal{T}_i}$ is a small number chosen for each $\mathcal{T}_i$ small enough so that the normal form in the above theorem applies to all Morse-Bott tori of action $<L$, and that all such chosen neighborhoods these Morse-Bott tori are disjoint. Then in neighborhood of the Morse-Bott tori $\mathcal{T}_i$ we perturb the contact form as \[ \langlembda \longrightarrow \langlembda_\delta:= e^{\delta gf}\langlembda. \] We can describe the change in Reeb dynamics as follows: \begin{proposition} For fixed action level $L>0$ there exists $\delta>0$ small enough so that the Reeb dynamics of $\langlembda_\delta$ can be described as follows. In the trivialization specified by Proposition \ref{locform}, each Morse-bott torus splits into two non-degenerate Reeb orbits corresponding to the two critical points of $f$. One of them is hyperbolic of index $0$, the other is elliptic with rotation angle $|\theta| <C\delta <<1$ and hence its Conley-Zehnder index is $\partialm 1$. There are no additional Reeb orbits of action $<L$. \end{proposition} For proof see \cite{BourPhd}. \begin{remark} Later when we define various terms in the ECH index, they will depend on the choice of trivializations of the contact structure on the Reeb orbits. We will always choose the trivialization specified by Proposition \ref{prop_locform}. For convenience of notation we will call this trivialization $\tau$ and write for example $c_\tau$ or $Q_\tau$ for the definition of relative Chern class or intersection form with respect to this trivialization. We also observe that after iterating the Reeb orbit in the Morse-Bott tori, their Robbin-Salamon index stays the same (\cite{Gutt2014}). So up to action $L$, in the nondegenerate picture, we will only see Reeb orbits of Conley-Zehnder index $-1,0,1$. \end{remark} \begin{definition} We say a Morse Bott torus is positive if the elliptic Reeb orbit has Conley-Zehnder index 1 after perturbation; otherwise we say it is negative Morse Bott torus. This condition is intrinsic to the Morse-Bott torus itself, and is independent of trivializations or our choice of perturbations. \end{definition} We recall our goal is to define the ECH chain complex up to filtration $L$, and then take $L\rightarrow \infty$ to recover the entire ECH chain complex. Hence, let us consider for small $\delta>0$ the symplectization \[ (M^4, \omega_\delta) := (\mathbb{R} \times Y^3,d(e^s\langlembda_\delta)) \] We equip $(M,\omega_\delta)$ with a $\langlembda_\delta$ compatible almost complex structure $J_\delta$, and $(M,\omega):= (\mathbb{R} \times Y^3,d(e^s\langlembda_\delta))$ with $\langlembda$-compatible almost complex structure $J$. Both $J$ and $J_\delta$ should be chosen generically, with genericity condition specified in Definition \ref{def:transversality conditions} and Theorem \ref{generic path J}. In particular $J_\delta$ should be a small perturbation of $J$, i.e. the $C^\infty$ norm difference between $J_\delta$ and $J$ should be bounded above by $C\delta$. For fixed $L$ and small enough and generic choice of $\delta$, the ECH of $(Y^3,\langlembda_\delta)$ is defined for generators of action less than $L$ via counts of embedded J-holomorphic curves of ECH index 1. To motivate our construction, we next take $\delta \rightarrow 0$ to see what kinds of objects these $J$ holomorphic curves degenerate into. By a theorem of that first appeared in Bourgeois' thesis \cite{BourPhd} and also stated in \cite{SFT} (for a proof see the Appendix of \cite{Yaocas}), they degenerate into $J$-holomorphic cascades. (For a more careful definition of cascades see the appendix of \cite{Yaocas} that takes into account of stability of domain and marked points, but the definition here suffices for our purposes). \begin{definition} [ \cite{BourPhd}, See also definition 2.7 in \cite{Yaocas}] Let $\Sigma$ be a punctured (nodal) Riemann surface, potentially with multiple connected components. A cascade of height 1, which we will denote by $\cas{u}$, in $(\mathbb{R}\times Y^3,d(e^s\langlembda)$ consists of the following data : \begin{itemize} \item A labeling of the connected components of $\Sigma ^*=\Sigma \setminus \{ \text{nodes} \}$ by integers in $\{1, . . . , l\}$, called sublevels, such that two components sharing a node have sublevels differing by at most 1. We denote by $\Sigma_i$ the union of connected components of sublevel $i$, which might itself be a nodal Riemann surface. \item $T_i \in [0,\infty)$ for $ i = 1, . . . , l - 1$. \item $J$-holomorphic maps $u^i: (\Sigma_i, j) \rightarrow (\mathbb{R}\times Y^3, J)$ with $E(u_i) < \infty$ for $ i = 1, . . . , l$, such that: \begin{itemize} \item Each node shared by $\Sigma_i$ and $\Sigma_{i+1}$, is a negative puncture for $u^i$ and is a positive puncture for $u^{i+1}$. Suppose this negative puncture of $u^i$ is asymptotic to some Reeb orbit $\gamma_i \in \mathcal{T}$, where $\mathcal{T}$ is a Morse-Bott torus, and this positive puncture of $u^{i+1}$ is asymptotic to some Reeb orbit $\gamma_{i+1} \in \mathcal{T}$, then we have that $\partialhi^{T_i}_f(\gamma_{i+1}) = \gamma_{i}$. Here $\partialhi^{T_i}_f$ is the upwards gradient flow of $f$ for time $T_i$ lifted to the Morse-Bott torus $\mathcal{T}$. It is defined by solving the ODE \[ \frac{d}{ds} \partialhi_f(s) = f'(\partialhi_f(s)). \] \item $u^i$ extends continuously across nodes within $\Sigma_i$. \item No level consists purely of trivial cylinders. However we will allow levels that consist of branched covers of trivial cylinders. \end{itemize} \end{itemize} \end{definition} \begin{convention} We fix our conventions as in \cite{Yaocas}. \begin{itemize} \item We say the punctures of a $J$-holomorphic curve that approach Reeb orbits as $s\rightarrow \infty$ are positive punctures, and the punctures that approach Reeb orbits as $s\rightarrow -\infty$ are negative punctures. We will fix cylindrical neighborhoods around each puncture of our $J$-holomorphic curves, so we will use ``positive/negative ends'' and ``positive/negative punctures'' interchangeably. By our conventions, we think of $u^1$ as being a level above $u^2$ and so on. \item We refer to the Morse-Bott tori $\mathcal{T}_j$ that appear between adjacent levels of the cascade $\{u^i,u^{i+1}\}$ as above, where negative punctures of $u^i$ are asymptotic to Reeb orbits that agree with positive punctures from $u^{i+1}$ up to a gradient flow, \textit{intermediate cascade levels}. \item We say that the positive asymptotics of $\cas{u}$ are the Reeb orbits we reach by applying $\partialhi_f^\infty$ to the Reeb orbits hit by the positive punctures of $u^1$. Similarly, the negative asymptotics of $\cas{u}$ are the Reeb orbits we reach by applying $\partialhi_f^{-\infty}$ to the Reeb orbits hit by the negative punctures of $u^l$. They are always Reeb orbits that correspond to critical points of $f$. We note if a positive puncture (resp. negative puncture) of $u^1$ (resp. $u^l$) is asymptotic to a Reeb orbit corresponding to a critical point of $f$, then applying $\partialhi^{+\infty}_f$ (resp. $\partialhi_f^{-\infty}$) to this Reeb orbit does nothing. \end{itemize} \end{convention} \begin{definition}[\cite{BourPhd}, Chapter 4, See also definition 2.9 in \cite{Yaocas} ] \langlebel{def height k cascade} A cascade of height $k$ consists of $k$ height 1 cascades, $\cas{u}_k =\{u^{1\text{\Lightning}},...,u^{k\text{\Lightning}}\}$ with matching asymptotics concatenated together. By matching asymptotics we mean the following. Consider adjacent height one cascades, $u^{i\text{\Lightning}}$ and $u^{i+1\text{\Lightning}}$. Suppose a positive end of the top level of $u^{i+1\text{\Lightning}}$ is asymptotic to the Reeb orbit $\gamma$ (not necessarily simply covered). Then if we apply the upwards gradient flow of $f$ for infinite time we arrive at a Reeb orbit reached by a negative end of the bottom level of $u^{i\text{\Lightning}}$. We allow the case where $\gamma$ is at a critical point of $f$, and the flow for infinite time is stationary at $\gamma$. We also allow the case where $\gamma$ is at the minimum of $f$, and the negative end of the bottom level of $u^{i\text{\Lightning}}$ is reached by following an entire (upwards) gradient trajectory connecting from the minimum of $f$ to its maximum. If all ends between adjacent height one cascades are matched up this way, then we say they have matching asymptotics. We will use the notation $\cas{u}_k$ to denote a cascade of height $k$. We will mostly be concerned with cascades of height 1 in this article, so for those we will drop the subscript $k$ and write $\cas{u} = \{u^1,...,u^l\}$. \end{definition} \begin{remark} As mentioned in \cite{Yaocas}, we can also think of a cascade of height $k$ as a cascade of height 1 where $k-1$ of the intermediate flow times are infinite. \end{remark} We now state a SFT style compactness theorem relating non-degenerate $J_\delta$ holomorphic curves to cascades. However, the precise statement is rather technical and requires us to take up Deligne-Mumford compactifications of the moduli space of Riemann surfaces. The full version is stated in \cite{SFT} (see also the Appendix of \cite{Yaocas}, where we also sketch a proof). For our purposes it will be sufficient to state the theorem informally as below. \begin{theorem} (See \cite{SFT}) Let $u_{\delta_n}$ be a sequence of $J_{\delta_n}$-holomorphic curves with uniform upper bound on genus and energy, then a subsequence of $u_{\delta_n}$ converges to a cascade of $J$- holomorphic curves (which can be apriori of arbitrary height). \end{theorem} Since ECH is really a theory of holomorphic currents, we find it also useful to define a \textit{cascade of holomorphic currents}, which is what we shall primarily work with. \begin{definition} A height 1 holomorphic cascade of currents $\cas{\mathbf{u}}= \{u^1,..,u^n\}$ consists of the following data: \begin{itemize} \item Each $u^i$ consists of holomorphic currents of the form $(C^i_j,d_j^i)$. Each $C^i_j$ is a somewhere injective holomorphic curve with $E(C^i_j)<\infty$. The positive integer $d^i_j$ is then the multiplicity. \item Numbers $T_i \in [0,\infty), i=1,..,n-1$ \item Let $\gamma_i$ be a simply covered Reeb orbit that is approached by the negative end of some component of $u^i$, say the components $C^i_{j_1},...,C^i_{j_k}$ (such curves have associated multiplicity $d^i_{j_1},...,d^i_{j_k}$). Each $C^i_{j_*}$ approaches $\gamma_i$ with a covering multiplicity $n_{j_*}$, which is how many times $\gamma_i$ is covered by $C^i_{j_*}$ as currents. Then the total multiplicity of $\gamma_i$ as covered by $u^i$ is given by $\sum_{*=1,..k} d^i_{j_*}n_{j_*}$. Then consider $\partialhi_{f}^{T_i}(\gamma_{i+1}):= \gamma_i$. Then $u^{i+1}$ is asymptotic to $\gamma^{i+1}$ in its positive end with total multiplicity $\sum_{*=1,..k} d^i_{j_*}n_{j_*}$ also. \item No level consists of purely of trivial cylinders (even if they have higher multiplicities). \end{itemize} \end{definition} We define the positive asymptotics of $ \cas{\mathbf{u}}:=\{u^1,..,u^n\}$ as before, except we only care about Reeb orbits up to multiplicity. Then we can similarly define a cascade of currents of height $k$ by stacking together cascades of currents of height $1$. We will refer to ordinary cascade a ``cascade of curves'' when we wish to distinguish it from a cascade of currents. Then given a cascade of curves, we can pass it to a cascade of currents by using the following procedure: \begin{procedure}\langlebel{curve_to_current} \begin{itemize} \item Replace every multiple covered non-trivial curve with a current of the form $(C,m)$ where $C$ is a somewhere injective curve, and we translate all $m$ copies along $\mathbb{R}$ to make the entire collection somewhere injective. \item If we see a multiply covered trivial cylinder we replace it with $(C,m)$ where $m$ is the multiplicity and $C$ is a trivial cylinder. \item If we see a nodal curve in one of the levels, we separate the node and apply the above process to each of the separated components of the nodal curve. \item We remove all levels that only have currents made out of trivial cylinders. Suppose $u^i$ is a level only consisting of trivial cylinders to be removed, and suppose the $s\rightarrow +\infty$ end is a intermediate cascade level with flow time $T_{i-1}$, and the $s\rightarrow -\infty$ end of $u^i$ has associated flow time $T_i$, after the removal of $u^i$ level, the newly adjacent levels $u^{i-1}$ and $u^{i+1}$ have flow time between them equal to $T_i+T_{i-1}$. \end{itemize} \end{procedure} In passing from cascades of curves to currents we have lost some information, but we shall see currents are the natural settings to talk about ECH index. We later wish to make sense of the Fredholm index of a cascade of currents. To this end we make the definition of \emph{reduced cascade of currents}. \begin{definition}\langlebel{def:reduced cascade} Given a cascade of currents $\cas{\mathbf{u}}$, for components within it of the form $(C,m)$ where $m>1$ and $C$ is a nontrivial holomorphic curve, we then replace $(C,m)$ with just $(C,1)$. After we perform this operation we obtain another cascade of currents, which we label $\cas{\tilde{\mathbf{u}}}$, which we call the reduced cascade of currents. \end{definition} \section{Index calculations and transversality} The heart of the calculation that underlies ECH is this: the ECH index bounds from the above the Fredholm index, and if there are curves of ECH index one with bad behaviour (singularities, multiply covers), this would imply the existence of somewhere injective curves of Fredholm index less than 1, which cannot happen for generic $J$. In this section we take up the issue of establishing Fredholm index for $J$ holomorphic cascades, and explain the transversality issue we encounter. Given a reduced cascades of currents, $\cas{\mathbf{\tilde{u}}}= \{\tilde{u}^1,...,\tilde{u}^n\}$, we would like to assign to it a Fredholm index. Ideally this Fredholm index measures geometrically the dimension of the moduli space this particular cascade lives in. We note that by passing to the reduced cascade the multiplicities associated to ends of adjacent levels, $\tilde{u}^i$ and $\tilde{u}^{i+1}$ do not necessarily match up, but by imposing there is a single flow time parameter $T_i$ between adjacent levels still means we can think of $\cas{\mathbf{u}}$ as living in a fiber product with virtual dimension. To this end we first recall some conventions when it comes to $J$-holomorphic curves with ends on Morse-Bott critical submanifolds (in this case, tori). Consider $\tilde{u}^i$, for simplicity suppose its domain $\dot{\Sigma}_i$ is a punctured Riemann surface that is connected. Let $p_j^\partialm$ label the positive/negative punctures, and the map $\tilde{u}^i$ is asymptotic to Reeb orbits (of some multiplicity) on Morse-Bott tori at each of its punctures. We wish to associate to $\tilde{u}^i$ a moduli space of curves that contain $\tilde{u}^i$ as an element and contains curves that are ``close'' to $\tilde{u}^i$. To this end we recall some conventions. To each puncture $p_j^\partialm$ of $\tilde{u}^i$, we can designated it as ``fixed'' or ``free'', and each choice of these designations leads to a different moduli space. The designation ``free'' means we consider $J$-holomorphic maps from $\dot{\Sigma}_i$ so that $p_j^\partialm$ can land on any Reeb orbit with the same multiplicity on the same Morse-Bott torus at the corresponding end of $\tilde{u}^i$. For a puncture to be considered ``fixed'', we consider moduli space of $J$-holomorphic curves from $\dot{\Sigma}_i$ so that $p_j^\partialm$ lands on a fixed Reeb orbits on a Morse-Bott torus with fixed multiplicity (the same Reeb orbit as $\tilde{u}^i$). Given a designation of ``fixed'' or ``free'' on punctures of $\tilde{u}^i$, we can then consider the moduli space of $J$ holomorphic curves from $\dot{\Sigma}_i$ into $\mathbb{R}\times Y$ with the same asymptotic constraints as $\tilde{u}^i$ and living in the same relative homology class. We shall denote this moduli space as $\mathcal{M}_{\mathbf{c}}(\tilde{u}^i)$, using $\mathbf{c}$ to denote our choice of fixed/free ends. This moduli space has virtual dimension given by: \begin{equation} Ind(\tilde{u}^i) : = -\chi(\tilde{u}^i) +2c_1(\tilde{u}^i) + \sum _{p_j^+} \mu(\gamma^{q_{p_j^+}} )- \sum _{p_j^-} \mu(\gamma^{q_{p_j^-}}) + \frac{1}{2}\# \text{free ends} - \frac{1}{2}\# \text{fixed ends} \end{equation} where $\chi$ is the Euler characteristic, $c_1$ the relative first Chern class, $\mu(-)$ is the Robbin Salamon index for path of symplectic matrices with degeneracies defined in \cite{Gutt2014}. We use the symbol $\gamma$ to denote the Reeb orbit the end $p_j^\partialm$ is asymptotic to, with multiplicity $q_{p_j^\partialm}$. Given a reduced cascade of currents, $\cas{\mathbf{\tilde{u}}}$, let $\alpha$ denote the designation of ``free''/``fixed'' ends of $\tilde{u}^1$ at the $s\rightarrow +\infty$ end, and let $\beta$ denote the ``fixed''/``free'' designation of $\tilde{u}^n$ at the $s\rightarrow -\infty$ end. Later we will see we can replace $\alpha$ and $\beta$ with Morse-Bott ECH generators. In order to define the Fredholm index we need to assign free/fixed ends to the rest of the ends. \begin{convention}\langlebel{convention:free/fixed} If a non trivial curve $u^i$ has an end landing on a critical point of $f$, then we consider that end to be fixed. If a trivial cylinder has one end on critical point of $f$, the other end must also land on the same critical point. We allow trivial cylinders with both ends free. If the trivial cylinder is at a critical point of $f$, we take the convention we can only designate one of its ends as fixed. \end{convention} \begin{definition}\langlebel{index} Let $\cas{\tilde{\mathbf{u}}}=\{u^1,..,u^{n-1}\}$ denote a reduced cascade of currents of height 1. Let $ind(u^i)$ denote the Fredholm index of each of $u^i$. Note this makes sense since we have assigned free/fixed ends to all ends of $u^i$ by our conventions above. Suppose there are $R_2,..., R_{n-1} \in \mathbb{Z}$ distinct Reeb orbits approached by free ends as $s\rightarrow-\infty$ at each intermediate cascade level. Let us denote $k_i$ and $k_i'$ the number of free ends in each intermediate cascade level. e.g. elements in $u^1$ has $k_2$ free ends as $s\rightarrow -\infty$, and $u^2$ has $k_2'$ free ends as $s\rightarrow +\infty$. Both counts of $k_i$ and $k_i'$, as well as $R_i$ ignores ``free'' ends of fixed trivial cylinders, as such ``free'' ends are artificial to our convention. Now we define the cascade dimension \begin{align*} Ind(\cas{\tilde{\mathbf{u}}}) :=& Ind(u^1)+..+Ind(u^{n-1}) \\ &- [k_2'...+k_{n-1}']-[k_2+...+k_{n-1}]+ [R_ 2+..+R_{n-1}]+(n-2)- (n-1)-L \end{align*} where $L$ is the number of intermediate cascade levels without free ends plus the number of intermediate cascade levels whose flow time is zero. Again in the count of $L$ we ignore ``free'' ends coming from fixed trivial cylinders. \end{definition} Observe for (reduced) cascades of height $1$, we always have $k_i\geq R_i$ and $k_i'\geq R_i$. We next explain how to define/compute the dimension of height $k$ cascades. Let $\cas{\tilde{\mathbf{u}}}=\{u^1,..,u^{n-1}\}$ denote a reduced cascade of currents of height $N$. We recall the difference between height one and height $N$ cascade is that between cascade levels $u^i$ and $u^{i+1}$ we allow flow times $T_i = \infty$. We assign the free/fixed ends to $u^i$ depending on whether they land on critical points of $f$ as before. We can split a height $N$ cascade into $N$ height 1 cascades by partitioning the levels where the flow times are infinite. In particular we write $\cas{\tilde{\mathbf{u}}} = \left\{ \cas{\tilde{\mathbf{v^1}}}, ...,\cas{\tilde{\mathbf{v^N}}} \right \}$. Then the index of $\cas{\tilde{\mathbf{u}}}$ is given by the sum of the indices of $\cas{\tilde{\mathbf{v^i}}}$. Here we come to the key transversality assumption of this paper. We first make sense of the notion of transversality. \begin{definition}\langlebel{def:good j} Let $\langlembda$ be a Morse-Bott contact form, whose Reeb orbits come in $S^1$ families. We say a $\langlembda$ compatible almost complex structure $J$ is \textbf{good} if all reduced cascades of height one are tranversely cut out, which is defined below. \end{definition} \begin{remark} We note the transversality conditions needed to count cascades given below are quite natural. However, since cascades have many parts the notation is bit complicated. \end{remark} \begin{definition} \langlebel{def:transversality conditions} Let $\cas{\tilde{\mathbf{u}}}=\{u^1,..,u^{n-1}\}$ denote a reduced cascade of currents of height 1. We say $\cas{\tilde{\mathbf{u}}}=\{u^1,..,u^{n-1}\}$ is \textbf{transversely cut out} if the conditions below are met. \begin{itemize} \item Each moduli space $\mathcal{M}_{c}(u^i)$ is transversely cut out with dimension given by the Fredholm index formula. Here the subscript $c$ implicitly denotes the assignments of fixed and free ends we assigned to each end of $u^i$ according to Convention \ref{convention:free/fixed}. Note fixed trivial cylinders are assigned index zero. \end{itemize} Suppose there are $R_2,..., R_{n-1} \in \mathbb{Z}$ distinct Reeb orbits reached by free ends at each intermediate cascade level. We label them by $\gamma(i,j)$ where $j=1,...,R_i$, and $i$ indexes which level we are referring to. For each $\gamma(i,j)$, we choose a negative puncture of $u^{i-1}$ that is asymptotic to $\gamma(i,j)$. We call this puncture $p^-(i-1,j)$. The other negative ends of $u^{i-1}$ that are asymptotic to $\gamma(i,j)$ are labelled $p^-(i-1,j,c, l)$, where $l=1,2.., n(\gamma(i,j),-)$. Next consider $\partialhi^{-T_{i-1}}(\gamma(i,j)))$. They are approached by positive punctures of $u^i$. For each $\partialhi^{-T_{i-1}}(\gamma(i,j)))$, we pick out a special free puncture $p^+(i,j)$. The remaining free positive ends of $u^i$ that are asymptotic to $\partialhi^{-T_{i-1}}(\gamma(i,j)))$ are labelled $p^+(i,j,c,l)$ for $l=1,...,n(\gamma(i,j),+)$. We next consider the evaluation maps. Given the collection of flow times $T_1,...,T_{n-1}$. Let $\mathfrak{I} \subset \{1,..,n-1\}$ denote the subset for which $T_i>0$, we consider the evaluation map \begin{align} EV^-: \mathcal{M}(u^1) \times \mathcal{M}(u^2)\times ...\times \mathcal{M}(u^{n-2}) \rightarrow (S^1)^{R_2} \times (S^1)^{R_3} \times ... \times (S^1)^{R_{n-1}} \end{align} given by \begin{align} (u'^{1},...,u'^{n-2}) \rightarrow (ev_1^-(u'^1), ev_2^-(u'^2),...,ev_{n-2}^-(u'^{n-2})) \end{align} Here the evaluation is at the $p^-(i-1,j)$ puncture of $u^{i-1}$. We also consider the map \begin{align} EV^+: \mathcal{M}(u^2) \times \mathcal{M}(u^3) ...\times \mathcal{M}(u^{n-1}) \rightarrow (S^1)^{R_2} \times (S^1)^{R_3} \times ... \times (S^1)^{R_{n-1}} \end{align} given by: \begin{align} (u'^{2},...,u'^{n-1}) \rightarrow (ev_2^+(u'^2),...,ev_{n-1}^+(u'^{n-1})) \end{align} where the evaluation is at $p^+(i,j)$ of $u^i$. We consider the flow map \[ \Phi_f: (S^1)^{R_2} \times \mathbb{R}^* \times..\times (S^1)^{R_{n-1}} \times \mathbb{R}^* \rightarrow (S^1)^{R_2} \times (S^1)^{R_3} \times ... \times (S^1)^{R_{n-1}}. \] The notation $\mathbb{R}^*$ means the following: if $i \in \mathfrak{I}$ then we include a factor of $\mathbb{R}$ in the above product, otherwise we omit the factor. For $x_i\in S^1$ (i.e. a copy of $S^1$ among the product $(S^1)^{R_i}$), if $i \in \mathfrak{I}$ then the image of $x_i$ under $\Phi_f$ is given by $\partialhi_f^{T_i'}(x_i)$. If the index $i$ is not in $\mathfrak{I}$, then the image under $\Phi_f$ is $x_i$. We use the notation $\Phi_f\circ EV^+$ to denote the composition of the two maps, with domain $\mathcal{M}(u^2) \times \mathbb{R}^* \times \mathcal{M}(u^2) ...\times \mathcal{M}(u^{n-1})\times \mathbb{R}^*$ and image $(S^1)^{R_2} \times (S^1)^{R_3} \times ... \times (S^1)^{R_{n-1}}$. Let $\mathcal{K}_-$ denote the subset of $\mathcal{M}(u^1) \times \mathcal{M}(u^2)\times ...\times \mathcal{M}(u^{n-2})$ so that the ends $p^-(i,j)$ and $p^-(i,j,c,l)$ approach the same Reeb orbit. Let $\mathcal{K}_+$ denote the subset of $\mathcal{M}(u^2) \times \mathcal{M}(u^3) ...\times \mathcal{M}(u^{n-1})$ where $p^+(i,j)$ and $p^+(i,j,c,l)$ are asymptotic to the same Reeb orbit. Then \begin{itemize} \item Near $\cas{\mathbf{\tilde{u}}}$, both $\mathcal{K}_\partialm$ are transversly cut out submanifolds. \end{itemize} Then we can restrict $EV^\partialm$ to $\mathcal{K}_\partialm$, in particular the map $\Phi_f \circ EV^+$ admits a natural restriction to $\mathcal{K}_-\times \mathbb{R}^{|\mathfrak{I}|}$, our final condition is: \begin{itemize} \item $\Phi_f \circ EV^+$ and $EV^-$, when restricted to $\mathcal{K}_+\times (\mathbb{R})^{|\mathfrak{I}|}$ and $\mathcal{K}_-$ respectively are transverse at $\cas{\tilde{\mathbf{u}}}=\{u^1,..,u^{n-1}\}$ \end{itemize} \end{definition} \begin{assumption}\langlebel{assumption} We assume we can choose $J$ to be good so that all reduced cascades of current we encounter satisfy the transversality condition above. \end{assumption} In particular, this implies all reduced cascades of currents live in a moduli space whose dimension is given by the index formula, and if such index is less than zero, then such cascades cannot exist. We note that in general the transversality assumption is not automatic. In a reduced cascades of currents, all our curves are somewhere injective, but this is not enough. The issue lies in the fact that the fiber product that defines cascade can fail to have enough transversality. This is because all different levels of the cascade have the same $J$, and this $J$ cannot be perturbed independently in each level. When the cascade is complicated enough, the same curve can appear multiple times in different levels, and this causes difficulty with the evaluation map. Consequently when there is not enough transversality for the naive definition of the universal moduli space of reduced cascades to be a Banach manifold, one usually needs some additional arguments. However in simple enough cases we can still achieve the above transversality condition. This is the content of Theorem \ref{thm:list of transversality conditions}, which is proved in the Appendix. \section{ECH Index of Cascades}\langlebel{Section:ECH index} In this section we develop the analogue of ECH index one condition for cascades of currents. We shall see this will impose severe limits on currents that can appear in a cascade, provided transversality can be achieved. To start the definition, we first consider one-level cascades, i.e. holomorphic curves from Morse-Bott tori to Morse-Bott tori. We want to define an index $I$ so that for somewhere injective curves: \[ I(C) \geq dim\mathcal{M}(C)+2\delta(C) \] where $\mathcal{M}(C)$ denotes the moduli space of holomorphic curves $C$ belongs in. Note the definition of $dim\mathcal{M}$ is ambiguous, because we need to specify which ends are ``fixed" and which are ``free". Our definition of $I$ will depend on the type of end conditions imposed on our curve. The key to our construction will be the relative adjunction formula. \subsection{Relative adjunction formula in the Morse-Bott setting} Here we clarify what we mean by the intersection form $Q$. We first provide a provisional definition that is very much similar to regular ECH, then we show this definition descends to a more natural definition adapted to the Morse-Bott setting. Let $\alpha,\beta$ be orbit sets. Observe here this means that they pick out discrete Reeb orbits (potentially with multiplicity) among the $S^1$ family of Reeb orbits. Then we can define the relative intersection formula as: \begin{definition}\langlebel{intersection_form_prelim_definition} We fix trivializations of Morse-Bott tori as we have specified, and denote it by $\tau$. Given $\alpha,\beta$ orbit sets, given $Z,Z'\in H_2(\alpha,\beta,Y)$ we choose $\tau$ representatives $S$ $S'$ as before, then $Q_\tau(Z,Z')$ is defined as before as the algebraic count of intersections between $S$ and $S'$. \end{definition} Because $\tau$ here provides a global trivialization of all Reeb orbits in a given Morse-Bott torus, the intersection $Q$ doesn't depend on \textit{which} specific Reeb orbit $\alpha$ or $\beta$ picks out in a given Morse-Bott torus. We state the phenomenon in terms of a proposition: \begin{proposition} Given orbit sets $\alpha,\beta$ and relative homology classes $Z,Z'\in H_2(\alpha,\beta)$. For definiteness let $\gamma$ be a Reeb orbit in the $s\rightarrow +\infty$ end of $\alpha$, let $\gamma'$ be any translation of $\gamma$ in its Morse-Bott torus, then using $\gamma'$ to replace $\gamma$ defines another orbit set $\alpha'$. There exists corresponding relative homology classes $\hat{Z},\hat{Z}' \in H_2(\alpha',\beta,Y)$ obtained by attaching a cylinder that connects between $\gamma$ to $\gamma'$ to ends of $S$ and $S'$ that are asymptotic to $\gamma$, then \[ Q_\tau(Z,Z')=Q_\tau(\hat{Z},\hat{Z}') \] \end{proposition} \begin{proof} Choose $\tau$ representatives for $Z,Z'$ which we write as $S$, $S'$, then attach a cylinder connecting between $\gamma$ to $\gamma'$ to $S$ and $S'$. In our trivialization the resulting surfaces are still $\tau$ representatives, and this process does not introduce additional intersections. \end{proof} The above proposition suggests $Q_\tau$ in the Morse-Bott case descends to a intersection number whose input is not $H_2(\alpha,\beta,Y)$ but a more general relative homology group adapted to the Morse-Bott setting. \begin{definition} We define the relative homology classes $\mathcal{H}_2(\alpha,\beta,Y)$. Here $\alpha,\beta$ are collections of Morse-Bott tori, and multiplicities. For instance we can write $\alpha :=\{(\mathcal{T}_i,m_i)| m_i \in \mathbb{Z}_{\geq 0}\}$ where $\mathcal{T}_i$ are Morse-Bott tori, and $m_i$ are multiplicities. A element $Z \in \mathcal{H}_2(\alpha,\beta,Y)$ is a 2-chain in $Y$ so that \[ \partialartial Z = \alpha -\beta. \] The above equality means the boundary (which includes orientation) of $Z$ consists of Reeb orbits on Morse-Bott tori $\{\mathcal{T}_i\}$, and each $\mathcal{T}_i \in \alpha$ has a total of $m_i$ Reeb orbits (counted with multiplicity) to which the ends of $Z$ are asymptotic. Likewise for $\beta$. We define a equivalence relation on $\mathcal{H}_2(\alpha,\beta,Y)$, which we write as $Z\sim Z'$ as follows: $Z$ and $Z'$ are equivalent if there is a 3-chain $W$ whose boundary takes the following form: \[ \partialartial W = Z-Z' + \{I\times S^1\} \] where the collection $\{I\times S^1\}$ consists of 2 chains on Morse-Bott tori that appear in either $\alpha$ or $\beta$. We think of these 2-chains as an Reeb orbit (which we think of $S^1$) times an interval, $I$. \end{definition} The idea is we consider 2-chains but allow their ends to slide along the Reeb orbits in the Morse-Bott family. The next proposition proves the relative intersection $Q$ remains well defined. \begin{proposition} $Q_\tau$ as defined above descends into a intersection form: \[ Q_\tau:\mathcal{H}_2(\alpha,\beta,Y) \times \mathcal{H}_2(\alpha,\beta,Y)\rightarrow \mathbb{Z}. \] \end{proposition} \begin{proof} For clarity we use $\hat{Q}_\tau$ to denote the intersection form defined in Definition \ref{intersection_form_prelim_definition}. Suppose $Z,Z' \in \mathcal{H}_2(\alpha,\beta,Y)$, and suppose $Z'' \sim Z$. We pick a distinguished Reeb orbit $\gamma_i$ for each Morse-Bott torus that appears in $\alpha,\beta$, and chosen so that $\gamma_i$ does not appear as a Reeb orbit in $Z,Z'$ and $Z''$. We connect Reeb orbits in $Z$, $Z'$ and $Z''$ to $\{\gamma_i\}$ counted with multiplicities using cyliners along each Morse-Bott tori to obtain $\hat{Z},\hat{Z}',\hat{Z}''$. We then define \[ Q_\tau(Z,Z') : = \hat{Q}_\tau(\hat{Z},\hat{Z''}). \] Observe in the above $\hat{Q}_\tau$ is an intersection form defined on $H_2(\alpha',\beta',Y)$ where $\alpha'$ and $\beta'$ are collections of Reeb orbits of the form $\{(\gamma_i,n_i)\}$. It suffices to prove $Q_\tau(Z'',Z') = Q_\tau(Z,Z'')$. To do this note the fact $Z \sim Z''$ in $\mathcal{H}_2(\alpha,\beta,Y)$ extends to an equivalence of $\hat{Z} \sim \hat{Z''}$ in $H_2(\alpha',\beta',Y)$, hence $\hat{Q}_\tau(\hat{Z}'',\hat{Z}') = \hat{Q}_\tau(\hat{Z},\hat{Z}')$, and hence the proof. \end{proof} We observe using the above reasoning the relative Chern class also descends to $\mathcal{H}_2(\alpha,\beta,Y)$. We state this in the form of a definition: \begin{definition} Given $Z\in \mathcal{H}_2(\alpha,\beta,Y)$, we define the relative Chern class $c_\tau(Z)$ the same way as before: choose a representative $S$ of $Z$ that is embedded near the boundary. Let $\iota:Z\rightarrow Y$ be the inclusion, then consider the pullback of the contact structure $\iota^*\xi$ to $Z$, pick a section $\partialsi$ of $\iota^*\xi$ that does not rotate with respect to $\tau$ near the end points and has transverse zeroes, then $c_\tau(Z)$ is the signed count of zeroes of $\partialsi$. \end{definition} Finally we define writhe the same way as before: \begin{definition} Let $C$ be a somewhere injective curve that is not a trivial cylinder. We assume at $s\rightarrow +\infty$ (resp. $-\infty$) it is asymptotic to orbit set $\alpha$ (resp. $\beta$). The trivialization specified in Theorem \ref{locform} gives an identification a neighborhood of each Reeb $\gamma \in \alpha,\beta$ with $S^1\times \mathbb{R}^2$, then using this we can define writhe of $C$ as we had before in section \ref{ECH review}. \end{definition} \begin{remark} The definition of writhe depends crucially on the fact $C$ is a holomorphic curve, and does not admit constructions as before where we can slide the Reeb orbits of $\alpha,\beta$ around and obtains a surface with same relative intersection number/Chern class. \end{remark} Hence we are ready to state the relative adjunction formula. \begin{theorem} If $C$ is a simple $J$-holomorphic curve, then \[ c_\tau(C) = \chi(C)+Q_\tau(C) +w_\tau(C)-2\delta(C) \] with the definition of relative chern class, relative intersection number, and writhe given above. \end{theorem} \begin{proof} This is a purely topological formula. The same proof as in \cite{Hutchings2002} follows through. \end{proof} Hence we would like to define a version of ECH index by applying the relative adjunction formula to the Fredholm index formula of holomorphic curves as in \cite{Hutchings2002}. Recall then the proof of index inequality boils down to bounding the writhe of the $J$ holomorphic curve in terms of various algebraic expressions involving the Conley Zehnder indices that the curve is asymptotic to. We turn to this writhe bound in the next subsection. \subsection{Writhe Bound} We recall the Fredholm index of a somewhere injective curve $u$ depends on which end is free and which end is fixed. Hence we anticipate that the ECH index we assign to a holomorphic curve $u$ will depend on which end is fixed and which end is free. The writhe inequality we prove shall take into account of the assignment of free and fixed ends. We note that this assignment of an index to a curve that depends on which end is free/fixed is somewhat artificial, but it will be less artificial once we use this index to define the ECH index of an entire cascade. First we fix some conventions on Conley Zehnder indices. For a given Morse-Bott Torus $\mathcal{T}$ assume the $J$ holomorphic curve has $N$ ends that are positively (resp. negatively) asymptotic to Reeb orbits on this torus. They are asymptotic to the individual Reeb orbits labelled $R_1, .., R_n$. Writhe bound is a local computation so we only consider a particular Reeb orbit, called $R_1$. Assume $k$ ends of $C$ are asymptotic to $R_1$. They have multiplicity $q_1,..,q_k$. We adopt the following convention on Conley Zehnder indices. \begin{convention} Recall for positive Morse-Bott torus $\mu =1/2$. We declare $\mu_+ =1$, $\mu_-=0$. For negative Morse-Bott torus we declare $\mu_+=0$, $\mu_- = -1$. This has the following significance: for a curve with free end as $s\rightarrow +\infty$ landing in a Morse-Bott torus (regardless of whether it is positive or negative torus), the Conley Zehnder index term in the Fredholm index formula associated to this end is $\mu_+$ (the specific value depends on the positive/negative Morse-Bott torus as above), and the Conley Zehnder index term assigned to fixed end is $\mu_-$. Conversely, at the $s\rightarrow -\infty$ end we assign $\mu_-$ to free ends and $\mu_+$ to fixed ends. \end{convention} Using the above conventions given a somewhere injective holomorhic curve $u$, we assign its total Conley-Zehnder index denoted by $CZ^{Ind}(u)$ according to the convention above. The goal of the writhe inequality is to come up with another Conley-Zehnder index term $CZ^{ECH}(u)$ so that the total writhe of $u$ is bounded above by \begin{equation} wr_\tau(u) \leq CZ^{ECH}(u) - CZ^{Ind}(u) \end{equation} By way of convention we will use $CZ^*(R_1,\partialm \infty)$ where $*=Ind,ECH$ to denote the Conley Zehnder index we should assign to the free/fixed ends approaching $R$ as $s\rightarrow \partialm \infty$ \subsubsection{Positive Morse-Bott tori} \begin{theorem}\langlebel{winding pos} In the case of positive Morse-Bott torus, $s\rightarrow -\infty$, if $\xi_i$ is an end of $u$ with covering multiplicity $q_i$ and $u$ is not the trivial cylinder, we have the following inequality \begin{equation*} \eta (\xi_i) \ge 1 \quad \text{(single end winding number)}. \end{equation*} For single end writhe, we have: \begin{equation*} w(\xi_i) \ge \eta(\xi_i) (q_i-1). \quad \end{equation*} Note this holds true for trivial cylinders (as long as it's somewhere injective). Let $\xi_1$ and $\xi_2$ be two braids that correspond to two distinct ends of $u$ that approach the same Reeb orbit, with multiplicities $q_i$ and winding numbers $\eta_i$, then: \begin{equation*} l(\xi_1,\xi_2)\ge min (q_1\eta_2,q_2\eta_1) \end{equation*} Note this holds if one of the ends $\xi_i$ came from a trivial cylinder. And finally to calculate the writhe of all ends approach the same Reeb orbit, $w(\xi)$, let $\xi$ denote the total braid and $\xi_i$ the various components coming from incoming ends of $u$ (this holds for both $s=\partialm \infty$): \begin{equation*} w(\xi) = \sum_i w(\xi_i) + \sum_{i \neq j } l(\xi_i,\xi_j) \end{equation*} In the case of $s\rightarrow +\infty$, using the exactly the same notation, we have the following inequalities: \begin{equation*} \eta (\xi_i) \le 0 \end{equation*} \begin{equation*} w(\xi_i) \le \eta(\xi_i) (q_i-1) \,\,\,\, \text{for single end writhe} \end{equation*} \begin{equation*} l(\xi_1,\xi_2)\le max (q_1\eta_2,q_2\eta_1) \end{equation*} \end{theorem} \begin{proof} (Sketch) The proof constitutes an amalgamation of existing results in the literature. The key result is an description of asymptotics of ends of holomoprhic curves on Morse-Bott torus \cite{siefring}. Namely, near the $s\rightarrow +\infty$ end of $u$, the $s$ constant slice of $\{s\} \times Y$ of $u$ can be described as follows. We can choose a neighborhood of trivial cylinder $\mathbb{R}\times \gamma$ as $\mathbb{R}\times S^1\times \mathbb{R}^2$ where $s$ is the symplectization direction, $t$ is the variable along the Reeb orbit and $\{0\}\times \mathbb{R}^2$ is the contact structure along the Reeb orbit, then we can write an end $\xi_i$ of $u$ as \begin{equation}\langlebel{locform} u(s,t) = (qs, qt, \sum_{i=1}^ne^{\langlembda_i s}e_i(t)) \end{equation} where $\langlembda_i$ and $e_i$ are respectively the (negative) eigenvalues and corresponding eigenfunctions of the operator $A(t): L^2(S^1,\mathbb{R}^2) \rightarrow L^2(S^1,\mathbb{R}^2)$ coming from the linearization of the Cauchy Riemann operator, which can be written as \[ A(t) = -J\partial_t -S \] With this normal form, the winding number bound comes from combining the results in \cite{Gutt2014} about the meaning of Robbin-Salamon index and results in \cite{Hofer2} relating Conley-Zehnder indices to crossing of eigenvalues. The relations on writhe and linking number come from direct modifications from the proofs in \cite{Hutchings2002}, once we realize that locally the braids can be described by Equation \ref{locform}. \end{proof} Next we move to use these relations to prove writhe bound. As in the case of ECH, equality of the writhe bound implies certain partition conditions, which we will carefully state. \begin{proposition}[link, $-\infty$, positive Morse Bott torus] Consider the $J$ holomorphic curve $u$ with negative ends on a Reeb orbit $\gamma$. We have $k_{free}$ free ends of multiplicity $q_i^{free}$, and $k_{fixed}$ fixed ends with multiplicity $q_i^{fixed}$ and of total multiplicity $N_{fixed}$. The writhe bound reads \[ w(\xi) \geq -\sum_{i=1}^{k_{free}+k_{fixed}} \eta_i +\sum_{i,j}^{k_{free}+k_{fixed}} min(\eta_i q_j, \eta_j q_i) \geq (N_{free} -1 + N_{fixed}) -(k_{fixed}) \] with equality holding implying there can be only free/fixed ends at this Reeb orbit. If there are only fixed ends the partition conditions is $(n)$, and if there are only free ends the partition condition is $(n)$ or $(1,n-1)$. \end{proposition} \begin{proof} We have the respective bounds \[ -k_{free} + \sum_{i}^{k_{free}} min(\eta_i q_j, \eta_j q_i)\geq N_{free}-1 \] and \[ -k_{fix}+\sum_{i,j}^{k_{fix}} min(\eta_i q_j, \eta_j q_i)\geq N_{fix}-k_{fixed} \] and cross terms will imply strict inequality, hence only free or fixed term appears. In the case of only fixed points, we see the only way equality can hold is with partition condition $(n)$. Similar considerations produces the partition conditions for free ends. \end{proof} \begin{proposition} [link, $\infty$, positive Morse Bott Torus] In the $s\rightarrow +\infty$ end, consider the $J$ holomorphic curve $u$ with ends on a Reeb orbit $\gamma$. We have $k_{free}$ free ends of multiplicity $q_i^{free}$, and $k_{fixed}$ fixed ends $q_i^{fixed}$ of total multiplicity $N_{fixed}$: \[ w(\xi) \leq -\sum_{i=1}^{k_{free}+ k_{fix}} \eta_i + \sum_{i,j}^{k_{free}+ k_{fix}} max(q_j \eta_i, q_i,\eta_j) \le N_{free} -(k_{free}). \] The partition condition implies $(1,...,1)$ on the free ends. \end{proposition} \begin{proof} We see that $lhs \leq 0$, and $RHS= 0$ iff the free end satisfies partition conditions $(1,...1)$; there are no requirements on fixed ends. \end{proof} \subsubsection{Negative Morse-Bott tori} In this subsection we take up the analogous writhe bounds for negative Morse-Bott tori. \begin{theorem}\langlebel{winding neg} In the case of negative Morse Bott torus, $s\rightarrow -\infty$, we have the following inequalities: If $\xi_i$ is an end of $u$ and $u$ is not the trivial cylinder, we have the following inequality: \begin{equation*} \eta (\xi_i) \ge 0 \end{equation*} For writhe of a single end, with covering multiplicity $q_i$, we have: \begin{equation*} w(\xi_i) \ge \eta(\xi_i) (q_i-1) \end{equation*} Note this holds for the case of a trivial cylinder. Let $\xi_1$ and $\xi_2$ be two braids that correspond to two distinct ends of $u$ that approach the same Reeb orbit, with multiplicities $q_i$ and winding numbers $\eta_i$, then: \begin{equation*} l(\xi_1,\xi_2)\ge min (q_1\eta_2,q_2\eta_1) \end{equation*} Note this holds if one of the ends $\xi_i$ came from a trivial cylinder. And finally to calculate the writhe of all ends approach the same Reeb orbit, $w(\xi)$, let $\xi$ denote the total braid, and $\xi_i$ the various components coming from incoming ends of $u$ (this holds for both $s=\partialm \infty$): \begin{equation*} w(\xi) = w(\xi_i) + \sum_{i \neq j } l(\xi_i,\xi_j) \end{equation*} In the case of $s\rightarrow +\infty$, we have the following inequalities \begin{equation*} \eta (\xi_i) \le -1 \end{equation*} \begin{equation*} w(\xi_i) \le \eta(\xi_i) (q_i-1) \,\,\,\, \text{for single end writhe} \end{equation*} \begin{equation*} l(\xi_1,\xi_2)\le max (q_1\eta_2,q_2\eta_1) \end{equation*} \end{theorem} \begin{proof} The exact same proof for the positive Morse-Bott torus except we use Robbin-Salamon index $\mu=-1/2$. \end{proof} \begin{proposition}[link, $-\infty$,negative Morse Bott torus] Let $u$ have ends asymptotic to $\gamma$ on a negative Morse-Bott torus as $s\rightarrow -\infty$, suppose there are $k_{free}$ free ends of multiplicity $q_i^{free}$, of total multiplicity $N_{free}$; suppose there are $k_{fix}$ fixed ends each of multiplicity $q_{fix}$, of total multiplicity $N_{fix}$. Then we have the writhe bound: \[ w(\xi) \geq -\sum_{i}^{k_{fix}+k_{free}} \eta_i +\sum _{i,j}^ {k_{fix}+k_{free}} min(\eta_i q_j, \eta_j q_i) \geq -N_{free}-(-k_{free}) \] with equality enforcing partition condition $(1,..,1)$ on free ends and no partition condition on fixed ends. \end{proposition} \begin{proof} $\eta \geq0$ so $lhs\geq 0$, $rhs =k_{free} - N_{free}$ so inequality holds, and equality if free ends has partition conditions $(1,..,1)$, no restrictions on fixed ends. \end{proof} \begin{proposition}[link, $+\infty$,negative Morse-Bott torus] Let $u$ have ends asymptotic to $\gamma$ on a negative Morse-Bott torus as $s\rightarrow +\infty$, suppose there are $k_{free}$ free ends of multiplicity $q_i^{free}$, of total multiplicity $N_{free}$; and suppose there are $k_{fix}$ fixed ends each of multiplicity $q_{fix}$, of total multiplicity $N_{fix}$. \[ w(\xi) \leq -\sum_{i}^{k_{fix}+k_{free}} \eta_i +\sum _{i,j}^ {k_{fix}+k_{free}} max(\eta_i q_j, \eta_j q_i) \leq -N_{fix}-N_{free}+1 +k_{fix} \] with equality enforcing only free or fixed ends. In the case of only fixed ends the partition condition is $(n)$, and in the case of only free ends the partition condition is either $(n)$ or $(n-1,1)$. \end{proposition} \begin{proof} We can split the sum into: \[ -\sum_i^{k_{free}} \eta_i + \sum _{i,j}^ {k_{free}} min(\eta_i q_j, \eta_j q_i)\leq 1-N_{free} \] and \[ -\sum_i^{k_{fixed}} \eta_i + \sum _{i,j}^ {k_{fixed}} min(\eta_i q_j, \eta_j q_i)\leq k_{fix}-N_{fix}. \] Each of the above inequalities hold individually, and when there are both free and fixed ends, there are cross terms that make the inequality strict. As before, we can deduce the partition conditions directly from imposing the equality condition. \end{proof} \subsection{Morse-Bott tori as ECH generators} \langlebel{MBT as ECH} Recall that for ECH of nondegenerate contact forms, the generators of the chain complex are orbit sets satisfying the condition that if an orbit is hyperbolic then it can only have multiplicity $1$. There are analogues of this in Morse Bott tori. In Morse-Bott ECH, we think of the generators of the chain complex as collections of Morse-Bott tori with additional data, written schematically as: \[ \alpha = \{ (\mathcal{T}_j,\partialm, m_j)\} \] and the differential as counting ECH index one height one $J$ holomorphic cascades connecting between chain complex generators as above (which we will also call orbit sets). In the above definition $m_j$ is the total multiplicity, which we think of as total multiplicity of Reeb orbits on $\mathcal{T}_j$ hit by the $J$ holomorphic curves that have ends on this Morse-Bott torus on the top (resp. bottom) level of a (height 1) cascade. $\partialm$ is additional information, which specifies how many ends of the $J$ holomorphic curve landing on $\mathcal{T}_j$ are free/fixed. We see that this also depends on whether $\alpha$ appears as the top or bottom level of a $J$ holomorphic cascade, and in context of our correspondence theorem free/fixed ends correspond to elliptic/hyperbolic orbits in the non-degenerate case. We state this explicitly in the next definition in which we also describe the expected correspondence between Morse-Bott ECH generators and nondegenerate ECH generators after the perturbation. \begin{definition} \langlebel{mbgenerator} We consider the case of positive Morse Bott tori. In the nondegenerate case we let $\gamma_-$ denote the hyperbolic Reeb orbit that arises from perturbation with Conley Zehnder index 0, and $\gamma_+$ the elliptic orbit that arose out of the perturbation with Conley Zehnder index 1. Then the description of our Morse-Bott generator, say $(\mathcal{T},\partialm,m)$ (this is just one Morse-Bott torus, in general $\alpha$ will consist of a collection of such tori, we focus on an example for the sake of brevity) and its correspondence with ECH generators in the perturbed non-degenerate case is given by: \begin{enumerate} \item positive side $s\rightarrow \infty$, \begin{enumerate} \item The Morse-Bott generator $(\mathcal{T},+,m)$ is defined to require all ends on $\mathcal{T}$ are free, with total multiplicity on the torus being $m$. In the perturbed nondegenerate case, this corresponds to ECH orbit set $(\gamma_+, m)$. We observe the nondeg partition ($\theta$ positive) condition is $(1,..,1)$, and the Morse-Bott partition condition from the writhe bound is $(1,..1)$. By the Conley-Zehnder index convention the ECH conley Zehnder index assigned to $(\mathcal{T},+,m)$ is given by: $CZ^{ECH}((\mathcal{T},+\infty,+,m)) =m$ \item The Morse-Bott generator $(\mathcal{T},-,m)$ there is one end on $\mathcal{T}$ that is fixed with multiplicity 1, on the critical point of $f$ that corresponds to the hyperbolic orbit. The rest of the ends are free, and the total multiplicity of orbits on $\mathcal{T}$ is $m$. This corresponds to the orbit set $\{(\gamma_-,1),(\gamma_+,m-1)\}$ in the nondegenerate case. Note the partition conditions between nondegenerate case and Morse-Bott case agree. We also have $CZ^{ECH}((\mathcal{T},+\infty,-,m)) =m-1$. \end{enumerate} \item In the case of negative ends, $s\rightarrow -\infty$, \begin{enumerate} \item The Morse-Bott generator $(\mathcal{T},+,m)$ is defined to require all ends are fixed and asymptotic to the critical point of $f$ corresponding to the elliptic orbits, and the total multiplicity is $m$. In the nondegenerate case this correspond to the orbit set $(\gamma_+,m)$. We observe Morse-Bott and nondegenerate partition conditions agree, both being $(m)$. By our conventions, $CZ^{ECH}(\mathcal{T},+,m)=m$ \item The Morse-Bott generator $(\mathcal{T},-,m)$ requires there is a multiplicity 1 free end landing on $\mathcal{T}$, the remaining ends are fixed and are also required to land on the critical point corresponding to elliptic Reeb orbit. This corresponds to the orbit set $\{(\gamma_+,m-1),(\gamma_-,1)\}$ in the nondegenerate case, and we have analogous partition conditions for both Morse-Bott and nondegenerate case. $CZ^{ECH}(\mathcal{T},-,m) = m-1$ \end{enumerate} \end{enumerate} We observe $(\mathcal{T},\partialm,m)$ imposes different free/fixed end conditions, depending whether it appears as $s\rightarrow \partialm \infty$ ends, however we should think of it as being the same generator in the chain complex, as is evidenced by the fact that it is identified to the same nondegenerate orbit set regardless of whether it appears at $+\infty$ or $-\infty$ end. \end{definition} We also briefly summarize the analogous result for negative Morse-Bott torus. \begin{definition}\langlebel{def:mb_generator_neg} In the case of negative Morse Bott tori, we use $\gamma_- $ to denote the elliptic Reeb orbit after perturbation of Conley Zehnder index -1, and let $\gamma_+$ denote the hyperbolic orbit after perturbation of Conley Zehnder index 0. Let $(\mathcal{T},\partialm,m)$ denote a Morse-Bott generator. \begin{enumerate} \item At the positive end as $s\rightarrow \infty$, \begin{enumerate} \item $(\mathcal{T},-,m)$ requires all ends fixed at the critical point of $f$ corresponding to $\gamma_-$, corresponds to $(\gamma_-,m)$ in nondegenerate case, both degenerate and nondegenerate case has partition conditions $(m)$. $CZ^{ECH}((\mathcal{T},-,m))=-m$ \item $(\mathcal{T},+,m)$ requires one end free with multiplicity 1, the rest have multiplicity $m-1$ fixed at the critical point of $f$ corresponding to $\gamma_-$. The generator corresponds to $ \{(\gamma_+,1),(\gamma_-,m-1)\}$. $CZ^{ECH}((\mathcal{T},+,m))=-m+1$. Partition conditions match. \end{enumerate} \item Negative end, as $s\rightarrow -\infty$, \begin{enumerate} \item $(\mathcal{T},-,m)$ has all ends free, of total multiplicity $m$. This corresponds to $(\gamma_-,m)$ in the nondegenerate case. Partition conditions match. $CZ^{ECH}((\mathcal{T},-,m))=-m$. \item $(\mathcal{T},+,m)$ has one fixed end corresponding to the critical point of $f$ at $\gamma_+$ of multiplicity one; the rest are free and of multiplicity $m-1$. This corresponds to the orbit set $\{(\gamma_+,1),(\gamma_-,m-1)$. The partition conditions correspond, and $CZ^{ECH}((\mathcal{T},+,m))=-m+1$. \end{enumerate} \end{enumerate} \end{definition} We would also like a more general notion of ECH Conley Zehnder index for when there are more free/fixed ends than allowed by ECH generator conditions are above. To keep track of the more refined intersection theory information, we need to make our definition depend slightly on the behaviour of the $J$-holomorphic curve as its ends approach Reeb orbits on Morse-Bott tori. We consider a nontrivial somewhere injective holomorphic curve $u:\Sigma \rightarrow \mathbb{R} \times Y^3$. We isolate this into the following definition. \begin{definition} Let $u:\Sigma \rightarrow \mathbb{R} \times Y^3$ be a nontrivial somewhere injective holomorphic curve. Let $\gamma$ be a simple Reeb orbit on a positive Morse-Bott torus. \begin{enumerate} \item At the $s\rightarrow \infty$ end, suppose $k_{free}$ ends approach $\gamma$ with total multiplicity $N_{free}$, and $k_{fixed}$ ends approach $\gamma$ with total multiplcity $N_{fixed}$, then $CZ^{ECH}(\gamma) := N_{free}$. \item At the $s\rightarrow - \infty$ end, suppose $k_{free}$ ends approach $\gamma$ with total multiplicity $N_{free}$, and $k_{fixed}$ ends approach $\gamma$ with total multiplcity $N_{fixed}$, then $CZ^{ECH}(\gamma) := N_{free}+N_{fixed} -1$. \end{enumerate} Similarly if $\gamma$ is a simply covered Reeb orbit on a negative Morse-Bott torus. \begin{enumerate} \item At the $s\rightarrow \infty$ end, suppose $k_{free}$ ends approach $\gamma$ with total multiplicity $N_{free}$, and $k_{fixed}$ ends approach $\gamma$ with total multiplcity $N_{fixed}$, then $CZ^{ECH}(\gamma) := -N_{fix}-N_{free}+1$. \item At the $s\rightarrow - \infty$ end, suppose $k_{free}$ ends approach $\gamma$ with total multiplicity $N_{free}$, and $k_{fixed}$ ends approach $\gamma$ with total multiplcity $N_{fixed}$, then $CZ^{ECH}(\gamma) := -N_{free}$. \end{enumerate} \end{definition} Note the above definition agrees with that of the ECH Morse-Bott generator. Then let $u$ be a somewhere injective $J$ holomorphic curve with no trivial cylinder components, and we have chosen which ends of $u$ are fixed/free. Then we define its ECH index using the above notion of ECH Conley-Zehnder index: \begin{definition} We define the ECH index of $u$ as: \begin{equation} I(u):= c_\tau(u) + Q_\tau(u) + CZ^{ECH}(u) \end{equation} \end{definition} Note the above definition not only depends on the relative homology class of $u$, it also depends on how the ends of $u$ are distributed among the Reeb orbits (for information of free/fixed beyond that of the Morse-Bott ECH generators)- in particular we have to keep the information of not only how many free/fix ends land on a Morse-Bott torus, we also need to retain the information which ends are asymptotic to which Reeb orbit. By using the writhe bound we recover directly \begin{proposition} Let $u$ be a $J$-holomorphic map satisfying the conditions above, \begin{equation} Ind(u) \leq I(u) -2\delta(u). \end{equation} with equality enforcing partition conditions described in the writhe bound section. \end{proposition} We next include the case of trivial cylinders in our definition of ECH Conley-Zehnder index. \begin{definition} Let $\gamma$ be a simply covered Reeb orbit on a positive Morse-Bott torus. Let $u:\Sigma \rightarrow \mathbb{R} \times Y$ be a $J$-holomorphic curve with potentially disconnected domain. When we say trivial cylinders below, we allow trivial cylinders with higher multiplicities. \begin{enumerate} \item At the $s\rightarrow \infty$ end, suppose $k_{free}$ ends approach $\gamma$ with total multiplicity $N_{free}$, and $k_{fixed}$ ends approach $\gamma$ with total multiplcity $N_{fixed}$, then $CZ^{ECH}(\gamma) := N_{free}$. Here we allow holomorphic curves to be trivial cylinders. \item At the $s\rightarrow - \infty$ end, suppose $k_{free}$ ends approach $\gamma$ with total multiplicity $N_{free}$, and $k_{fixed}$ ends approach $\gamma$ with total multiplcity $N_{fixed}$. If at least one of the approaching ends is not that of a trivial cylinder, then $CZ^{ECH}(\gamma) := N_{free}+N_{fixed} -1$. If all the approaching ends are trivial cylinders, then $CZ^{ECH} : = N_{fixed}$. \end{enumerate} Next let $\gamma$ be a simply covered Reeb orbit on a negative Morse-Bott torus. \begin{enumerate} \item At the $s\rightarrow \infty$ end, suppose $k_{free}$ ends approach $\gamma$ with total multiplicity $N_{free}$, and $k_{fixed}$ ends approach $\gamma$ with total multiplcity $N_{fixed}$, If at least one of the approaching ends is not that of a trivial cylinder, then $CZ^{ECH}(\gamma) := 1-N_{free}-N_{fix}$. If there are only trivial cylinders, then $CZ^{ECH}= -N_{fixed}$. \item At the $s\rightarrow - \infty$ end, suppose $k_{free}$ ends approach $\gamma$ with total multiplicity $N_{free}$, and $k_{fixed}$ ends approach $\gamma$ with total multiplcity $N_{fixed}$. Then we set $CZ^{ECH}(\gamma) := -N_{free}$. This includes the case of trivial cylinders. \end{enumerate} \end{definition} \begin{proposition} Let $C$ be a $J$ holomorphic current which can contain trivial cylinders. Each end in $C$ is implicitly assigned ``free'' or ``fixed'', and recall the convention that we can at most designate one end of a trivial cylinder as fixed. With $CZ^{ECH}$ as defined above, we have the inequality: \[ Ind(C) \leq I(C) -2\delta (C) \] \end{proposition} \begin{proof} Let $C$ be a $J$-holomorphic current of the form $\{(C_i,m_i\}$ where $C_i$ are pairwise distinct. If $C_i$ is nontrivial, and $m_i >1$, then as in \cite{Hutchings2002}, we can consider $m_i$ copies of $C_i$ translated by $m_i$ distinct factors in the symplectization direction. Then we can represent $(C_i,m_i)$ as $m_i$ distinct somewhere injective $J$-holomorphic curves. We do this for all nontrivial components of $C$. Each resulting end of $C_i$ receives an assignment of ``free/fixed'', hence both sides of the inequality above are defined. (One can make all the copies of $C_i$ coming from $(C_i,m_i)$ have the same free/fixed assignments at their corresponding ends, but this won't be necessary.) As before this boils down to writhe bounds at $s=+\infty$ and $s=-\infty$. We first consider $\gamma$ a Reeb orbit on a positive Morse-Bott torus. We first consider the $s=+\infty$ case. Here for trivial cylinders $q_i=1$ and the linking number is zero, so the same proof as before produces the writhe bound. In the case $s\rightarrow -\infty$, let $N_{trivial}$ denote the multiplicity of trivial ends. Let $N_{trivial}$ denote the total multiplicity of trivial ends, fixed or free. First assume there is at least one nontrivial end. The apriori bound on writhe is: \[ w(\xi) \geq - \# \textup{nontrivial ends} + \sum_{i,j \textup{nontrivial ends}} min(q_i,q_j) + N_{trivial} \cdot (\# \textup{nontrivial ends} ). \] With our new definition of $CZ^{ECH}$, we need to establish the writhe bound that \[ - \# \textup{nontrivial ends} + \sum_{i,j \textup{nontrivial ends}} min(q_i,q_j) + N_{trivial} \cdot (\# \textup{nontrivial ends} ) \geq N_{free} + N_{fixed} -1 -(k_{fixed}) \] We use the superscript $^T$ and $^{NT}$ to distinguish whether the multiplicity is coming from trivial ends or nontrivial ends. But the writhe bound already established implies \[ - \# \textup{nontrivial ends} + \sum_{i,j \textup{nontrivial ends}} min(q_i,q_j) \geq N^{NT}_{free} + N^{NT}_{fixed} -1 - k_{fixed}^{NT} \] Then it suffices to establish that \[ N_{trivial} \cdot (\# \textup{nontrivial ends} ) \geq N_{free}^T +N_{fixed}^T-k_{fixed}^T \] which always holds, hence the writhe bound continues to hold. When there are only trivial cylinders, the writhe is automatically zero, likewise the writhe bound is trivially satisfied. We next consider the case $\gamma$ a Reeb orbit on a negative Morse-Bott torus. We first consider the $s\rightarrow -\infty$ case. Since the winding number $\eta$ in this case is bounded below by zero, the writhe bound continues to hold even in the presence of trivial cylinders. In the case of $s\rightarrow +\infty$, the computation is very much similar to the $-\infty$ end of a positive Morse-Bott torus. Assuming there is at least one nontrivial end \[ w(\xi) \leq +\#\textup{nontrivial ends} +\sum_{i,j \textup{nontrivial ends}} max(\eta_iq_j,\eta_jq_i) -N_{trivial}\cdot \# \textup{nontrivial ends} \leq -N_{fix}-N_{free} +1+k_{fix}. \] With the previous writhe bound we have already proven \[ \#\textup{nontrivial ends} +\sum_{i,j \textup{nontrivial ends}} max(\eta_iq_j,\eta_jq_i) \leq -N_{fix}^{NT} -N_{free}^{NT}+1+k_{fix}^{NT} \] hence suffices to prove \[ -N_{trivial}\cdot \# \textup{nontrivial ends} \leq -N_{fix}^{T} -N_{free}^{T}+k_{fix}^{T} \] but this follows directly from our assumptions. In the case there are only trivial ends the total writhe is zero, and the writhe bound is achieved. \end{proof} We next establish the subadditivity property of the ECH index. \begin{proposition} Let $\mathcal{C}_1 = \{(C_a,m_a)\}$ and $\mathcal{C}_2 = \{ (C_b,m_b)\}$ denote two $J$-holomorphic currents, and $C_a$ is never the same as $C_b$ unless they are both trivial cylinders (they can be $\mathbb{R}$ translates of each other). Then their ECH indices satisfy \begin{equation} I(\mathcal{C}_1 \cup \mathcal{C}_2) \geq I(\mathcal{C}_1) + I(\mathcal{C}_2) + 2\mathcal{C}_1 \cap \mathcal{C}_2. \end{equation} In the above $\mathcal{C}_1 \cap \mathcal{C}_2$ counts the intersection with multiplicity of $C_a$ with $C_b$. Note by intersection positivity each multiplicity is positive. Further by construction the intersection between trivial cylinders is zero. \end{proposition} \begin{proof} We again apply the translation in the symplectization trick to represent nontrival currents $(C_a,m_a)$ (resp. $(C_b,m_b)$) by $m_a$ (rep. $m_b$) distinct somewhere injective curves. After relabelling we can also denote them by $C_a$ (resp. $C_b$). We apply the adjunction inequality as in \cite{Hutchings2002,hutching_revisited} to obtain \begin{equation} I(\mathcal{C}_1\cup \mathcal{C}_2) -I(\mathcal{C}_1) -I(\mathcal{C}_2) -2 \# \mathcal{C}_1\cdot \mathcal{C}_2 = CZ^{ECH}(\mathcal{C}_1 \cup \mathcal{C}_2) - CZ^{ECH}(\mathcal{C}_1)-CZ^{ECH}(\mathcal{C}_2) - 2\sum_{a, b} l_\tau(C_a,C_b) \end{equation} Then this reduces to a local computation relating linking number and our choice of Conley-Zehnder indices. We take this up case by case. First consider $\gamma$ a Reeb orbit on a positive Morse-Bott torus, consider the $s\rightarrow \infty$ end. In this case we have $CZ^{ECH}(\mathcal{C}_1 \cup \mathcal{C}_2) - CZ^{ECH}(\mathcal{C}_1)-CZ^{ECH}(\mathcal{C}_2) =0$ and $l_\tau(C_a,C_b) \leq 0$. Hence all the contributions from this end is $\geq 0$. We next consider $\gamma$ on a positive Morse-Bott torus at $s\rightarrow -\infty$ ends. Because how we assigned Conley-Zehnder indices depends on whether all the ends are trivial, we split into cases. In the case where all ends of $\mathcal{C}_1$ and $\mathcal{C}_2$ asymptotic to $\gamma$ as $s\rightarrow -\infty$ are trivial, we have again $CZ^{ECH}(\mathcal{C}_1 \cup \mathcal{C}_2) - CZ^{ECH}(\mathcal{C}_1)-CZ^{ECH}(\mathcal{C}_2)=0$ and the linking number vanishes. If one of them has non-trivial ends approaching $\gamma$ (WLOG take this to be $\mathcal{C}_1$ and take $\mathcal{C}_2$ consists purely of trivial ends), then we have the Conley Zehnder contribution being \[ N_{free}^1+N_{fixed}^1-1 +N_{fixed}^2 - (N_{free}^1+N_{free}^2+N_{fixed}^1+N_{fixed}^2-1) = -N_{free}^2 \] where we write $N_{free}^1$ to denote the free ends coming from $\mathcal{C}_1$ etc. The linking number contribution is bounded below by $2(N_{fixed}^2 + N_{free}^2)$, hence the overall contribution is non-negative. The case where both $\mathcal{C}_1$ and $\mathcal{C}_2$ contains nontrivial ends at $\gamma$ as $s \rightarrow -\infty$, then the Conley-Zehnder difference term is just $-1$, and the linking number term $2l_\tau (C_a,C_b)\geq 2$, hence once again the overall contribution is non-negative. We next consider the case $\gamma$ is a Reeb orbit in a negative Morse-Bott torus. This will be largely analogous to the positive Morse-Bott torus case. For $s\rightarrow -\infty$, we have the Coneley-Zehnder indices contribute zero, and $l_\tau(C_a,C_b) \geq 0$ as $s \rightarrow \infty$, hence the overall contribution is non-negative. We next consider $\gamma$ as $s\rightarrow +\infty$. Again we break into cases because of trivial cylinders. In the case where all ends approaching $\gamma$ from $\mathcal{C}_1$ and $\mathcal{C}_2$ are trivial cylinders, the Conley-Zehnder index contribution as well as the linking number is zero. Then in the case $\mathcal{C}_1$ has nontrivial ends but $\mathcal{C}_2$ has all ends trivial, then the Conley-Zehnder index contribution is given by $-N_{free}^2$, and the linking number $\sum2l_\tau(C_a,C_b) \leq -2(N_{free}^2+N_{fixed}^2)$, hence the overall contribution is nonnegative. Similarly in the case where both $\mathcal{C}_1$ and $\mathcal{C}_2$ have nontrivial ends, the difference of Conley-Zehnder index contribution is $-1$, whereas the linking number $2l_\tau(C_a,C_b) \leq -2$, hence the overall contribution is positive. Hence combining all of the above local inequalities we obtain the overall ECH index inequality. \end{proof} \subsection{Multiple level cascades and ECH index} In this subsection we describe ECH index one cascades. We recall ECH index one cascades should come from degenerations of ECH index one curves, and in particular should respect partition conditions on the end points. In particular we should always keep in mind that ECH index one cascades should flow from a generator Morse-Bott ECH $\alpha_1$ to another $\alpha_n$, which includes the information of multiplicities of free/fixed ends that land on Morse-Bott tori. Given any cascade $\cas{u}$ as given in our previous definition, we first turn it into a ``cascade of currents": $\cas{\mathbf{u}}=\{u^1,..,u^{n-1}\}$. Then we can proceed to define the ECH index of $\cas{\mathbf{u}}$. The following is half definition half theorem, as in if this cascade is transverse and rigid and we glued it into a $J$ holomorphic curve the ECH index of its homology class is given by the following calculation. Conversely, if $u$ came from a cascade of curves that came from a degeneration of $I=1$ holomorphic curve in the $\langlembda_\delta$ setting, then our definition of $I$ for the cascade of current will also be one. \begin{definition} Let $\cas{\mathbf{u}} = \{u^1,...,u^{n-1}\}$ be a height 1 cascade of currents. Let its positive asymptotics be denoted by $\alpha_1$ and negative asymptotics be denoted by $\alpha_n$, both Morse-Bott ECH generators. We can then define the ECH index for the cascade of currents as: \begin{equation} I(\cas{\mathbf{u}}) = c_1(\cas{\mathbf{u}}) + Q_\tau( \cas{\mathbf{u}}) + CZ^{ECH}(\cas{\mathbf{u}}). \end{equation} The $CZ^{ECH}$ index term for cascade is just the ECH index terms of $\alpha_1$ and $\alpha_n$, which corresponds to the nondegenerate ECH Conley Zehnder index once we have identified free/fixed ends with elliptic/hyperbolic orbits. The cascade Chern class and relative intersection terms are just the sum of the Chern class of each of the levels, i.e. \[ c_1(\cas{\mathbf{u}}):= c_1(u^1) + ...+c_1(u^{n-1}) \] and \[ Q_\tau(\cas{\mathbf{u}}): = Q_\tau (u^1) + ...+ Q_\tau(u^{n-1}) \] \end{definition} We would like to compare the ECH index of cascade to the Fredholm index of the reduced version, because then with enough transversality we would be able to rule out certain configurations of cascade of ECH index one by index reasons. To this end, we decompose the ECH index of a cascade into ECH index of its constituents, as follows: \begin{proposition} We assume all ends of $u^2,..,u^{n-2}$ are free, and all ends of $u^1$ and $u^{n-1}$ are considered free except those mandated by $\alpha_1$ and $\alpha_n$, and we recall our conventions on trivial cylinders with only one fixed end. Then let $R_{pos,i+1}'$ denote the number of distinct Reeb orbits on positive Morse-Bott tori approached by nontrivial ends of $u^i$ as $s\rightarrow -\infty$, and let $V_{pos,i+1}'$ denote the total multiplicity of Reeb orbits on positive Morse-Bott tori approached by $u^{i}$ at the $s\rightarrow -\infty$, so that at these Reeb orbits there are only trivial ends as $s\rightarrow -\infty$. Similarly we let $R_{neg,i}'$ denote the number of distinct Reeb orbits on negative Morse-Bott tori approached by nontrivial ends of $u^i$ as $s\rightarrow +\infty$, and let $V_{neg,i}'$ denote the total multiplicity of Reeb orbits on negative Morse-Bott tori approached by $u^{i}$ at the $s\rightarrow +\infty$, so that at these Reeb orbits there are only trivial ends as $s\rightarrow +\infty$. Then we have \begin{align*} I(\cas{\mathbf{u}}) =& I(u^1) ...+ I(u^{n-1})\\ &-R_{pos,2}'-...-R_{pos,n-1}'- V_{pos,2}'-...-V_{pos,n-1}'-R_{neg,2}' -..-R_{neg,n-1}' -V_{neg,2}'-..-V_{neg,n-1}' \end{align*} \end{proposition} \begin{proof} Follows directly from definition of ECH Conley Zehnder index. \end{proof} \begin{remark} Note the assignment of free/fixed end points for calculation of ECH index purposes is different from when we defined free/fixed punctures in the calculation of the Fredholm index. \end{remark} \begin{remark} We remark the above formula makes sense in the case our cascade consists purely of a chain of cylinder at a critical point. If it started at the minimum of $f$, the trick is to notice by our convention all trivial cylinders below it are considered free. \end{remark} In order to compare $I(\cas{\mathbf{u}})$ and $Ind(\cas{\tilde{\mathbf{u}}})$, we first define \begin{align*} I(\cas{\tilde{\mathbf{u}}}) &:= I(\tilde{u^1}) ...+ I(\tilde{u^{n-1}})\\ &-R_{pos,2}'-...-R_{pos,n-1}'- V_{pos,2}'-...-V_{pos,n-1}'-R_{neg,2}' -..-R_{neg,n-1}' -V_{neg,2}'-..-V_{neg,n-1}' \end{align*} by removing all multiple covers of nontrivial curves. Note we have \begin{equation} I(\cas{\tilde{\mathbf{u}}}) \leq I(\cas{\mathbf{u}}) \end{equation} with equality holding only if $\cas{\mathbf{u}}$ is already reduced. Next we compare $Ind(\cas{\tilde{\mathbf{u}}})$ and $ I(\cas{\tilde{\mathbf{u}}})$. \begin{proposition}\langlebel{prop:indexinequality} $Ind(\cas{\tilde{\mathbf{u}}}) \leq I(\cas{\tilde{\mathbf{u}}})-2\delta(\cas{\tilde{\mathbf{u}}})-1$ \end{proposition} \begin{proof} We make a term-wise comparison, e.g. we compare \begin{equation} Ind(\tilde{u}^i)-k_{i+1}' -k_{i+1} +R_{i+1} \end{equation} and \begin{equation} I(\tilde{u}^i)-2\delta(\tilde{u}^i) - R_{pos,i+1}'-V_{pos,i+1}' - R_{neg,i+1}' - V_{neg,i+1}'. \end{equation} Note there are two different conventions by which we assigned ``free'' and ``fixed'' ends to ends of curves appearing in the cascade, we will refer to them respectively as the Fredholm convention and the ECH convention. We further refine our notation to $k_{pos,i+1}, k_{neg,i+1}, k_{pos,i+1}',k_{neg,i+1}'$ to denote the number of ends among the $k_{i}$ and $k_{i+1}$ ends that land on positive/negative Morse-Bott tori, i.e. we have $k_{i} = k_{pos,i} + k_{neg,i}$. We first restrict to $1<i<n-1$ To compare these two terms, we first decompose $\tilde{u^i} = C_i\cup T_{free,i} \cup T_{fixed,i}$, where $C_i$ is a collection of nontrivial somewhere injective curves, $T_{free,i}$ is a collection of free trivial cylinders according to Fredholm convention, and $T_{fixed,i}$ is a collection of fixed cylinder according to the Fredholm index convention. Assume $C_i$ has $l_{free,i}$ free ends, and $l_{fixed,i}$ ends according to Fredholm convention, then we have: \[ Ind(C_i\cup T_{free,i} \cup T_{fixed,i}) +l_{fixed,i}\leq I (C_i\cup T_{free,i}\cup T_{fixed,i})-2\delta (C_i\cup T_{free,i}\cup T_{fixed,i}) -|T_{fixed,i}| \] We may at later points further refine the notation to $l_{fixed,pos/neg,\partialm, i}$ to indicate fixed ends at positive/negative Morse-Bott tori, at positive/negative ends. Note $T_{fixed,i}$ is regarded as free cylinders when we measure its ECH index. $|T_{fixed,i}|$ denotes the total number of fixed trivial cylinders that appear in this level. We will also later refine our notation to distinguish $T_{fixed/free,pos/neg,i}$ for trivial cylinders on positive/negative Morse-Bott tori. We next consider the case for $i=1$. We can decompose as before $\tilde{u}^1=C_1\cup T_{free,1} \cup T_{fixed,1} \cup T_{fixed,1}'$. We explain the notation. $C_1$ is a collection of nontrivial somewhere injective holomorphic curves. The information of Morse-Bott generator $\alpha_1$ tells us which of $C_1$ should already be considered as fixed as $s\rightarrow \infty$. There are additionally $l_{fixed}$ ends of $C$ that we count as fixed when we compute its Fredholm index because they land on critical points of $f$. $T_{free,1}$ is a collection of free cylinders. $T_{fix,1}$ is a collection of fixed trivial cylinders that come from requirements of $\alpha_1$. Each positive Morse Bott torus can only have one of these, and they must all be multiplicity 1. $T_{fixed,1}'$ is a collection of trivial cylinders that don't come from requirements of $\alpha_1$ but also happen to land on a critical point of $f$. The index inequality we have gives: \[ Ind(C_1\cup T_{fixed,1} \cup T_{free,1}\cup T_{fixed,1}') +l_{fixed,1} \leq I(C_1 \cup T_{fixed,1} \cup T_{free,1}\cup T_{fixed,1}') -2\delta(\tilde{u}^1)- |T_{fixed,1}'| \] where for the purpose of computing ECH index we have counted elements of $T_{fixed,1}'$ as free cylinders. Similarly for the $i=n-1$ level. As before we can decompose $\tilde{u}^{n-1}=C_{n-1}\cup T_{free,n-1} \cup T_{fixed,n-1} \cup T_{fixed,n-1}'$ with the same convention as before. Here we only need to prove: \[ Ind(C_{n-1}\cup T_{free,n-1} \cup T_{fixed,n-1}) +l_{fixed,n-1}\leq I(C_{n-1}\cup T_{free,n-1} \cup T_{fixed,n-1}) - 2\delta(\tilde{u}^{n-1})-|T'_{fixed,n-1}| \] which holds by the one-level ECH index inequality. When we take the difference between $I(\tilde{\cas{u}})$ and $Ind(\tilde{\cas{u}})$, we can break down their difference into the following form: \begin{align*} I(\cas{\tilde{\mathbf{u}}}) =& I(\tilde{u^1}) ...+ I(\tilde{u}^{n-1})\\ &-R_{pos,2}'-...-R_{pos,n-1}'- V_{pos,2}'-...-V_{pos,n-1}'-R_{neg,2}' -..-R_{neg,n-1}' -V_{neg,2}'-..-V_{neg,n-1}' \end{align*} and the index term can be re written as \[ Ind= \sum_i ind(\tilde{u}^i) - \sum_{i=2,...,n-1} (k_{pos,i} + k_{pos,i}'-R_{pos,i}) - \sum_{i=2,...,n-1} (k_{neg,i} + k_{neg,i}'-R_{neg,i})-1-L \] If we take their difference, and take advantage of the inequalities we proved in the previous paragraphs, we get: \begin{align*} I -Ind =& \sum I(\tilde{u}^i) - ind(\tilde{u}^i) + \sum_{i=2,...,n-1}((k_{pos,i} + k_{pos,i}'-R_{pos,i} - R_{pos,i}' -V_{pos,i}') \\ &+ \sum_{i=2,...,n-1}(k_{neg,i} + k_{neg,i}'-R_{neg,i} - R_{neg,i}' -V_{neg,i}') +L+1\\ \geq& 2\delta (\cas{\tilde{\mathbf{u}}}) + \sum_{i=2,..,n-2}( l_{fixed,i} + |T_{fixed,i}| )+ l_{fixed,1} + l_{fixed,n-1} + |T'_{fixed,1}| + |T'_{fixed,n-1}| \\ &+\sum_{i=2,...,n-1}((k_{pos,i} + k_{pos,i}'-R_{pos,i} - R_{pos,i}' -V_{pos,i}') \\ &+ \sum_{i=2,...,n-1}(k_{neg,i} + k_{neg,i}'-R_{neg,i} - R_{neg,i}' -V_{neg,i}') +L+1 \end{align*} It suffices to prove the above expression is bounded below by one. It suffices to prove \begin{align*} &\sum_{i=2,...,n-1}R_{pos,i} + R_{pos,i}' +V_{pos,i}' + \sum_{i=2,...,n-1}R_{neg,i} + R_{neg,i}' +V_{pos,i}'\\ &\leq \sum_{i=2,..,n-2}( l_{fixed,i} + |T_{fixed,i}| )+ l_{fixed,1} + l_{fixed,n-1} + |T'_{fixed,1}| + |T'_{fixed,n-1}| \\ &+ \sum_{i=2,...,n-1}(k_{pos,i} + k_{pos,i}') + \sum_{i=2,...,n-1}(k_{neg,i} + k_{neg,i}') +L \end{align*} We break down the above inequality into several components. We first observe for $i=2,..,n-2$ we have \[ R_{pos,i+1}' + V_{pos, i+1}' \leq l_{fixed,pos,-\infty,i}+l_{fixed,pos,+\infty,i}+|T_{fixed,i}|+k_{pos, i+1} + k'_{pos,i+1} - R_{pos,i+1} \] We first observe the multiplicities counted by $R_{pos,i+1}'$ and $V_{pos,i+1}'$ are disjoint - if a Reeb orbit appear in considerations of $R_{pos, i+1}'$ then it is not considered for $V_{pos,i+1}'$ and vice versa. Multiplicities counted by $V_{pos,i+1}'$ are contained in $k_{pos,-\infty,i+1}$ and $|T_{fixed,i+1}|$, and the Reeb orbits counted by $R_{pos,i+1}'$ are contained in the ends counted by $l_{fixed,pos,-\infty,i+1}$ and $k_{pos,-\infty,i+1}$. We observe for this range of $i$, we only needed to use the fixed ends of $C_{i}$ in $l_{fixed,i}$ as $s\rightarrow -\infty$ to achieve this inequality, and the prescence of $l_{fixed,i,pos,+\infty}$ will make this inequality strict by that factor. Finally we observe $k'_{pos,i+1} - R_{pos,i+1}\geq 0$. This concludes this inequality. We next consider the case for $i=1$ for positive Morse-Bott tori, i.e. we consider the inequality \[ R'_{pos,2} + V'_{pos,2} + R_{pos,2} \leq l_{fixed,pos,-\infty 1} + l_{fixed,pos,+\infty 1} + |T'_{fixed,pos,1}| + k_{pos,2} + k_{pos,2}' \] This inequality does not hold in general. We first observe $k_{pos,2}' -R_{2,pos} \geq 0$, and the Reeb orbits counted by $R_{pos,2}'$ are included in $k_{pos,2}$ and $l_{fixed,pos,-\infty,1}$. The issue for $V_{pos,2}'$ is slightly more subtle, because each positive Morse-Bott torus can contain one fixed trivial cylinder that is not included in $|T_{fixed,pos,1}'|$, hence a Reeb orbit counted by $V_{pos,2}'$ that does not necessarily appear on the right hand side. If we follow this trivial cylinder downwards, if we encounter an end of a non-trivial $J$-holomorphic curve that approaches this Reeb orbit at $s\rightarrow \infty$, then it will contribute to $l_{fixed,pos,+\infty,i}$ terms in one of the lower levels. And this $l_{fixed,pos,+\infty,i}$ term was not used in our previous computations, so after we add up all the terms in the inequality, the overall inequality will still hold. If we go downwards and do not see a nontrivial end, then there must be a trivial cylinder at the bottom level of the cascade making a contribution to $T_{fixed,,pos,n-1}'$ located at this specific Reeb orbit on this positive Morse-Bott torus. This cylinder counted by $T_{fixed,pos,n-1}'$ is not used anywhere else in any of our other inequalities, so makes up for the deficit coming from the $i=1$ inequality. Finally we consider the terms on the last level concerning the positive Morse-Bott tori contributing to our inequality. This is just \[ |T_{fixed,pos,n-1}'| \geq 0 \] which holds trivially. $|T_{fixed,pos,n-1}'|$ being nonzero does not necessarily mean our inequality is strict, as some of these may be borrowed to make the inequality hold on the $i=1$ level as per above. We now repeat the analogous series of inequalities concerning negative Morse-Bott tori. We first prove the inequalities \[ R_{neg,i}+R'_{neg,i}+V'_{neg,i} \leq k_{neg,i}+k'_{neg,i}+l_{fixed,neg,+\infty,i} + |T_{fixed,neg,i}| \] for $i$ in range $2,...,n-2$. We have as before that $R_{neg,i} \leq k_{neg,i}$. Similarly the count of orbits in $R_{neg,i}'$ is included $k'_{neg,i}$ and $l_{fixed,neg,+\infty,i}$, and the count of $V_{neg,i}'$ is included among $T_{fixed,neg,i}$ and $k'_{neg,i}$. This concludes the proof of this inequality. Next we focus on the $i=n-1$ case. We consider the inequality \[ R'_{neg,n-1} + V'_{neg,n-1} + R_{neg,n-1} \leq l_{fixed,neg,+\infty,n-1} + |T'_{fixed,neg,n-1}| + k_{neg,n-1} + k_{neg,n-1}' \] This does not always hold, as before we first observe $k_{pos,n-1} - R_{neg,n-1} \geq 0$, and $R'_{neg,n-1}$ is included in $l_{fixed,neg,+\infty, n-1}$ and $k_{neg,n-1}$. However each negative Morse-Bott torus can contain one fixed trivial cylinder not included in $T'_{fixed,neg,n-1}$. If we follow this trivial cylinder upwards, if we encounter an end of a non-trivial $J$-holomorphic curve that approaches this Reeb orbit at $s\rightarrow -\infty$, then it will contribute to $l_{fixed,neg,-\infty,i}$ terms in one of the upper levels. And this $l_{fixed,,neg,-\infty,i}$ term was not used in our previous computations, so after we add up the terms in the inequality, the overall inequality will still hold. If we go upwards and do not see a nontrivial end, then there must be a trivial cylinder contributing to $T'_{fixed,neg,1}$ appearing at the very same Reeb oribt. This cylinder's contribution is not used up by any of our previous inequalities, so makes up for the deficit in the above inequality. The $i=1$ level terms for negative Morse-Bott tori is simply $|T_{fixed,neg,1}'| \geq 0$ which holds trivially. This inequality being strict does not necessarily imply the overall inequality is strict, by the mechanism discussed above. Adding up the above inequalities we get the inequality in the proposition. \end{proof} We now state some consequences of the ECH index one condition, assuming transversality can be satisfied. \begin{corollary} \langlebel{conditions on currents} Assuming $J$ can be chosen to be good, and we have a height one cascade $\cas{u}$. Then we pass to cascade of currents $\cas{\mathbf{u}}$, the ECH index being one imposes the following conditions: \begin{enumerate} \item $\cas{\mathbf{u}}$ is reduced. \item All flow times are strictly positive. \item All curves are embedded. Curves on the same level are disjoint. \item Each level only has one nontrivial curve, the rest are trivial cylinders. \item With the above choice of fixed/free ends, all curves obey partition conditions of free ends for ends that do not land on critical points. They obey the partition conditions for fixed ends for those that land on critical points of $f$. \item For any nontrivial curve $C$ appearing in the cascade of currents $\mathbf{\cas{u}}$: \begin{itemize} \item If $C$ appears in either $u^1$ or $u^{n-1}$, then its ends can appear on critical points of $f$ only as mandated by $\alpha_1$ or $\alpha_2$. All other ends must avoid critical points of $f$. \item If $C$ appears in a level between $u^1$ and $u^{n-1}$, its ends can only end on a critical point of $f$ if this end is then connected by a fixed chain of trivial cylinders to fixed points mandated by $\alpha_1$ or $\alpha_n$. All other ends avoid critical points of $f$, and hence are free. \item Further, if we see a chain of fixed trivial cylinders connecting a positive or negative end of $C$ to a critical point of $f$, suppose the fixed Reeb orbit is called $\gamma$. Then no nontrivial end may land on $\gamma$ on any of the levels of the components of the chain of trivial cylinders in either $s\rightarrow +\infty$ or $s\rightarrow -\infty$. On the level where $C$ is asymptotic to $\gamma$ as $s\rightarrow \infty$ or $s\rightarrow -\infty$, the end of $C$ is the only end that is asymptotic to $\gamma$ as $s\rightarrow +\infty$ and $s\rightarrow -\infty$ respectively. \end{itemize} \item In particular, if $C$ is a nontrivial curve in the cascade, and an end of $C$ is asymptotic to $\gamma$, a Reeb orbit in the $s\rightarrow +\infty$ (resp. $-\infty$) end, then no other curve (or other ends of $C$) in the same level may be asymptotic to $\gamma$ as $s\rightarrow +\infty$ (resp. $-\infty)$. \item If an end of a nontrivial curve $C$ is asymptotic to $\gamma$ with multiplicity $>1$, as $s\rightarrow \infty$, and if we follow $\gamma $ upwards, e.g. we consider $C'$ in the level above which is asymptotic to $\gamma$ as $s\rightarrow -\infty$. If all curves above $C$ that are asymptotic to $\gamma$ are trivial cylinders, then we cannot draw any conclusions aside from partition conditions of $C$. However, if after some chain of gradient flow lines a nontrivial curve $C''$ above $C$ is asymptotic to $\partialhi_T^f(\gamma)$ as $s\rightarrow -\infty$ and is connected to the positive end of $C$ at $\gamma$ via a gradient flow, then by partition conditions both $C$ and $C''$ can only be asymptotic to $\gamma$ with multiplicity 1. \end{enumerate} \end{corollary} \begin{proof} All statements in the above proposition comes from taking all the inequalities in the previous proposition to be equalities. $(a)$ comes $I(\mathbf{\cas{u}}) = I(\mathbf{\cas{\tilde{u}}})$. $(b)$ comes from $L=0$. $(c)$ comes from $\delta(\mathbf{\cas{u}})=0$. $(d)$ comes from $Ind=0$, otherwise the cascade lives in a moduli space of dimension greater than zero. $(e)$ comes from the fact that violations of partition conditions for nontrivial curves would make the inequalities comparing Fredholm index to ECH index strict. Next consider $(f)$, for the nontrivial curves appearing in $u^1$ or $u^{n-1}$. We first consider the case of $u^1$. We observe all contributions to $l_{fixed,+\infty,1}$ from the $s\rightarrow +\infty$ must be zero for equality in \ref{prop:indexinequality} to hold. Similarly we observe that for $u^{n-1}$ all contributions to $l_{fixed,-\infty,n-1}$ from the $s\rightarrow -\infty$ must be zero for equality to hold. If $C$ is a nontrivial curve between $u^1$ and $u^{n-1}$, we have to separate this into cases. We first assume it has a negative end landing on a critical point of $f$ on a positive Morse-Bott torus. Then this end makes a contribution to $l_{fixed,pos,-\infty}$, and was used in our computation of inequality. Call this Reeb orbit $\gamma$, and consider levels below $C$ that have nontrivial ends asymptotic to $\gamma$ as $s\rightarrow +\infty$. Say this occurs on level $i$. If there are such curves, and if $\gamma$ does not appear as a fixed end assigned by $\alpha_1$ and connected to a trivial cylinder in $u^1$, then it is a appearance of $l_{fixed,pos,+\infty,i}$ that was not used in our proof of inequality in \ref{prop:indexinequality}, hence the inequality is strict. The case where $\gamma$ appears in $\alpha_1$ as a fixed end of a trivial cylinder is handled as follows. In the case there is a contribution to $T_{fixed,pos,n-1}'$ on the $u^{n-1}$ level from a trivial cylinder at $\gamma$, then we can use the additional $l_{fixed,pos,+\infty,i}$ at $\gamma$ to make the inequality strict. In the case $T_{fixed,n-1}'$ does not have a trivial cylinder at $\gamma$, then for multiplicity reasons the total multiplicity of nontrivial ends asymptotic to $\gamma$ as $s\rightarrow +\infty$ in the entire cascade must be greater than equal to two. If they come from two different ends (potentially at different levels), then their contribution to $l_{fixed,pos,+\infty,*}$ (of various levels) is at least two, which makes the inequality in proposition \ref{prop:indexinequality} strict. If we only see a single nontrivial end approach $\gamma$ as $s\rightarrow +\infty$ below $u^1$ level, then this end must have multiplicity $\geq 2$, and this violation of writhe inequality also ensures the index inequality is strict. If no nontrivial curves below $C$ that are positively asymptotic to $\gamma$ exist, then with the negative puncture of $C$ landing at $\gamma$, the negative puncture is connected to the last level $u^{n-1}$ at $\gamma$ via a chain of fixed trivial cylinders. If $\gamma$ is a minimum of $f$, then this is a contribution to $|T_{fixed,pos,n-1}'|$ that was not considered in the proof of inequality. This will make the overall inequality strict if $\gamma$ did not appear as a fixed end connected to a trivial cylinder in $u^1$. If $\gamma$ did appear (as a fixed end mandated by $\alpha_1$), then again for multiplicity reasons there is either an additional $l_{fixed,pos,+\infty i}$ contribution from $s\rightarrow +\infty$ ending on $\gamma$ on one of the middle levels, or $|T_{fixed,n-1}'|$ at $\gamma$ has multiplicity greater than or equal to two. Either case makes the index inequality strict. However if $\gamma$ is at a maximum of $f$, the inequality is not violated if this is a chain of trivial cylinders connecting to a fixed end mandated by $\alpha_{n}$. If $\alpha_{n}$ assigns free ends to this chain of cylinders, then we have extra contributions to $T_{fixed,pos,n-1}'$ which make the index inequality strict (in this case $\alpha_1$ cannot assign $\gamma$ as a fixed end). Finally if this is indeed a chain of fixed trivial cylinders connecting to a fixed orbit mandated by $\alpha_{n}$, then on the level where $C$ appears no other nontrivial end may be asymptotic to $\gamma$ as $s\rightarrow -\infty$, this is because if this is true, then we consider the inequality for $C$'s level \[ R_{pos,i+1}' + V_{pos, i+1}' \leq l_{fixed,pos,-\infty,i}+|T_{fixed,pos,i}|+k_{pos, i+1} + k'_{pos,i+1} - R_{pos,i+1} \] Both nontrivial ends at $\gamma$ are counted once by $R_{pos,i+1}'$, but twice by $l_{fixed,pos,-\infty, i}$, which makes this inequality strict. This automatically imposes the partition condition $(n)$ on this particular negative end of $C$. Further, down this chain of fixed trivial cylinders, all the way to $\alpha_{n}$, no further lower levels may have non-trivial curves whose ends are asymptotic to $\gamma$ as $\rightarrow -\infty$. This is clear for the lowest level $u^{n-1}$. We already argued $l_{fixed,pos,-\infty,n-1}=0$, then all fixed ends landing on $\gamma$ must be fixed ends assigned by $\alpha_{n}$, then the partition conditions imposed by ECH index implies we cannot have both trivial and nontrivial ends at $\gamma$. On levels above the lowest level and below the level of $C$, this follows from the inequality \[ R_{pos,i+1}' + V_{pos, i+1}' \leq l_{fixed,pos,-\infty,i}+|T_{fixed,pos,i}|+k_{pos, i+1} + k'_{pos,i+1} - R_{pos,i+1}. \] If we have both a trivial cylinder and an nontrivial end asymptotic to $\gamma$ in the negative end, they make an overall contribution of $1$ to the left hand side, but make a overall contribution of 2 to the right hand side by increasing $l_{fixed,pos,-\infty,i}$ and $|T_{fixed,pos,i}|$, hence making this inequality strict. We next consider $C$ has a positive end ending on a critical point of $f$. Call this Reeb orbit of $\gamma$. If $\gamma$ is not a fixed Reeb orbit mandated by $\alpha_{1}$, then this already makes a contribution to $l_{fixed,pos,+\infty,i}$ we did not use in the index inequality, which makes the overall inequality strict. If $\gamma$ indeed appears in $\alpha_1$ and is in fact connected to a trivial cylinder, then either this end of $C$ is connected upwards to $\gamma$ via a sequence of trivial cylinders, or there are more nontrivial ends above $C$ that ends on $\gamma$ as $s\rightarrow +\infty$, but this makes the index inequality strict due to multiplicity reasons ($\alpha_1$ can only require a fixed end of multiplicity 1 at $\gamma$). Hence it must be the case $C$ is connected to $\gamma$ on the top level via sequence of fixed trivial cylinders, and no level above $C$ have nontrivial ends approaching $\gamma$ as $s\rightarrow +\infty$. If a curve above $C$ has a negative end approaching $\gamma$, we are back to the previous case and this also makes the index inequality strict. The case of negative Morse-Bott tori is similar to positive Morse-Bott tori but with the signs reversed, so we will not repeat it. We remark the proof of Negative Morse-Bott tori is independent of the proof of positive Morse-Bott tori because when we compute $|T_{fixed,i}'|$ the trivial cylinders at negative and positive Morse-Bott tori are independent of each other. To prove $(g)$ and $(h)$. We already took care of the case a non-trivial curve that is asymptotic to a Reeb orbit corresponding to a critical point of $f$. We next consider the case of free ends. Let our curve be $C$ in some level of the cascade and consider its $+\infty$ free ends asymptotic to positive Morse-Bott tori. We have $k_{pos,i+1}' = R_{pos,i+1}$, this implies each free Reeb orbit as $s\rightarrow +\infty$ is approached by a unique positive end of $C$. The ECH index also imposes partition conditions of $(1,..,1)$, hence this end is simply covered. Recalling $\cas{\mathbf{u}}$ is reduced, any $s\rightarrow -\infty$ free end of curves above $C$ arrived at by following the gradient flow is also simply covered. This proves $(g)$ and $(h)$ for positive Morse-Bott tori. The result for negative Morse-Bott tori holds by considering the negative free ends of $C$. \end{proof} We would also like a way to prove that provided our transversality conditions hold (i.e. $J$ is good), $J_\delta$-holomorphic curves of ECH index one degenerate into cascades of height one, as opposed to cascades of greater height. To do this we need a slight strengthening of the above index inequality where we allow fixed trivial cylinders with higher multiplicities. \begin{proposition} Let $\alpha_1$ and $\alpha_n$ be ECH Morse-Bott generators, except we relax the condition on multiplicities of fixed/free ends - they are allowed to be arbitrary. Let $\cas{u}$ be a cascade of height one connecting from $\alpha_1$ to $\alpha_n$. Then we have the inequality \[ Ind(\cas{\mathbf{\tilde{u}}}) \leq I(\cas{\mathbf{u}}) - 2\delta (\cas{\mathbf{u}})-1 \] \end{proposition} \begin{proof} We repeat the proof of index inequality in Proposition \ref{prop:indexinequality} and observe the inequalities concerning the intermediate level curves continue to hold. The issue is in allowing fixed trivial cylinders of high multiplicities allowed by $\alpha_1$ and $\alpha_n$ at the top and bottom levels. We first focus on what happens near positive Morse-Bott tori. For simplicity we fix $\gamma$ a Reeb orbit corresponding to the hyperbolic orbit in a positive Morse-Bott torus and consider what happens to ends of holomorphic curves with fixed ends at $\gamma$. As we have seen above the problematic term comes from the inequality \[ R_{pos,2}' + V_{pos,2}' \leq k_{pos,2} +k_{pos,2}' -R_{pos,2} + l_{fixed,pos,-\infty,1} + l_{fixed,pos,+\infty,1} +|T_{fixed,1}'|, \] where $V_{pos,2}'$ can contain fixed trivial cylinders mandated by $\alpha_1$ that appear in $V_{2,pos}'$ but does not appear in $|T_{fixed,1}'|$. For simplicity we consider $T_{\gamma,fixed}$ appearing at $\gamma$ of multiplicity $N$. In order for this to make a contribution to $V_{2,pos}'$ instead of $R_{2,pos}'$, we assume that $u^1$ has no nontrivial end that are asymptotic to $\gamma$ as $s\rightarrow -\infty$. We recall we would like to prove an inequality of the form \[ I(\cas{u})-1 \geq Ind(\cas{\mathbf{\tilde{u}}}) +2\delta(\cas{u}) \] Consider for $i=2,...,n-1$, the nontrivial currents $(C_{i,j},m_{i,j}) \subset u^i$, where we think of $m_{i,j}$ as the multiplicity of $C_{i,j}$ (since we are working in the nonreduced case). We assume each $C_{i,j}$ has $l_{i,j}$ ends asymptotic to $\gamma$ as $s\rightarrow \infty$, and suppose $C_{i,j}$ has total multiplicity $n_{i,j}$ asymptotic to $\gamma$ as $s\rightarrow \infty$. Finally let $T_{fixed,n-1,\gamma}$ denote the number of trivial cylinders at the last level $u^{n-1}$ at $\gamma$. We have the inequality \[ N-\sum_{i,j} m_{i,j} n_{i,j} \leq |T'_{fixed,n-1,\gamma}|. \] Let's consider $I(C_{i,j})$, by virtue of it being nontrivial and the writhe inequality, $\sum_j I(C_{i,j}) \geq \sum_{j}(n_{i,j}+1) $. This is coming from the fact in order for the $C_{i,j}$ to exist its Fredholm index must be greater or equal to one, and at the ends of $\gamma$ the ECH index is treated as free ends whereas the Fredholm index is treated as fixed ends. So in passing from $u^i$ to $\tilde{u}^i$ we decreased the ECH index by at least $\sum_{i,j}(m_{i,j}-1)(n_{i,j} +1) $. We next compare the ECH index of reduced cascade with its Fredholm index, in particular we consider the inequalities \[ I(\tilde{u^i}) - Ind (\tilde{u^i})+ R_{pos,i+1}' + V_{pos, i+1}' -[ l_{fixed,i}+|T_{fixed,i}|+k_{pos, i+1} + k'_{pos,i+1} - R_{pos,i+1}] \geq 0 \] for $i =2,..,n-2$. We have that by virtue of the writhe inequality occurring at $\gamma$ across these levels, the $\gamma$ orbit's contribution is that the left hand side is at least $\sum_{j} n_{i,j}-l_{i,j}$ bigger than the right hand side. Finally, on the $u^{n-1}$ level, we originally had the inequality \[ |T'_{fixed,n-1}| \geq 0 \] In the above inequality we have included the $|T'_{fixed,n-1,\gamma}|$ term coming from the last level in our cascade contributed by $\gamma$, and the writhe bound for this level also implies this there is also an excess of the index inequality of size $\sum_{j} n_{n-1,j}-l_{n-1,j}$. Hence we can think of proving the index inequality as follows: there is a deficit of $N$ at the top level contributed purely by $\gamma$, and by making the inequalities of the lower levels strict, we can make up for it. In passing from nonreduced to reduced curve, the ``excess'' of ECH index is bounded below by $\sum_{i,j}(m_{i,j}-1)(n_{i,j} +1) $. The excess of comparing ECH index of reduced curves $C_{i,j}$ to their Fredholm index coming from writhe inequality is given by $\sum_{i,j} n_{i,j}-l_{i,j}$, and the excess in the index inequality of various levels due to contributions to $l_{fixed,pos,+\infty,i}$ coming from $\gamma$ is precisely $\sum_{i,j} l_{i,j}$. And on the last level the excess is given simply by $|T'_{fixed,n-1,\gamma}|$ Hence the excess due to $\gamma$ is bounded below by \[ \sum_{i,j}(m_{i,j}-1)(n_{i,j} +1) + \sum_{i,j} n_{i,j}-l_{i,j} + \sum_{i,j} l_{i,j} + |T'_{fixed,n-1,\gamma}| \] Using the fact $N-\sum_{i,j} m_{i,j} n_{i,j} \leq |T'_{fixed,n-1,\gamma}|$, we see the excess outweighs the deficit at the top level, so fixed trivial cylinders at $\gamma$ will keep the overall index inequality intact. We can apply the same reasoning for every $\gamma$ at positive Morse-Bott tori. We next consider negative Morse-Bott tori. We assume $\gamma$ is Reeb orbit on a negative Morse-Bott torus, and $\alpha_{n-1}$ assigns a fixed end of multiplicity $N$ to $\gamma$. We consider the overall inequality and show it still holds after we factor in the contributions from other terms. Let $|T_{fixed,1,\gamma}'|$ denote the number of free trivial cylinders located at $\gamma$ at the $u^1$ level. For $i =1,..,n-2$ we consider $(C_{i,j},m_{i,j})\subset u^i$ nontrivial curves that asymptote to $\gamma$ as $s\rightarrow -\infty$. We let $l_{i,j}$ denote the number of such ends at each level and $n_{i,j}$ denote the multiplicity. Then the same proof as before will show the inequality continues to hold. \end{proof} In fact we have equality of ECH index to Fredholm index also enforces that the cascade is simple. We now take care of the case of height $k$ cascades. \begin{proposition}\langlebel{prop:height1} Consider a sequence of $J_{\delta_n}$-holomorphic ECH index one curves $u_n$ of bounded energy from $\alpha_1$ to $\alpha_{n}$ (as nondegenerate ECH generators) converging to a cascade $\cas{u}$ from $\alpha_1$ and $\alpha_n$ viewed as Morse-Bott ECH generators, then $\cas{u}$ has height one. \end{proposition} \begin{proof} Suppose $\cas{u}$ is a height $k$ cascade, then it can be written as $k$ height $1$ cascades, which we write as $\cas{v_1},...,\cas{v_k}$. We recall that between cascades $\cas{v_i}$ and $\cas{v_{i+1}}$ their end asymptotics are connected by either infinite or semi-infinte gradient flows. We pass each to a cascade of currents, and to each cascade $\cas{\mathbf{v}_i}$ we assign to it two generalized ECH generators at its topmost and bottom-most level, which we write as $\alpha_i$ and $\alpha_{i+1}'$. For $\alpha_i$ we assign all the ends approaching the minimum of $f$ as fixed, and all others are free. For $\alpha_{i+1}'$ we consider all ends approaching the maximum of $f$ are fixed, and the rest are free. The exception to this rule is $\alpha_1$ and $\alpha_{k+1}'$ which we assign Morse-Bott ECH generators corresponding to the degenerating $J_\delta$-holomorphic curve. With this we can assign an ECH index to each cascade $I(\cas{v}_i)$. We can also assign a relative ECH index between the general ECH generators $\alpha_{i}$ and $\alpha_{i}'$, which we write as $I(\alpha_i',\alpha_i)$. This number is always $\geq 0$, and we illustrate it as follows. Let $\mathcal{T}$ be a Morse-Bott torus, and suppose coming from $\alpha_i$ there is multiplicity $n_1$ at the minimum of $f$ and $n_2$ away from minimum of $f$. From $\alpha_{i}'$ there is $n_1'$ multiplicity at the maximum of $f$, and $n_2'$ away from the maximum of $f$. Then we have the inequalities \[ n_1' \geq n_2 \] and \[ n_2'\leq n_1. \] Then we say contribution to $I(\alpha_i',\alpha_i)$ from this Morse-Bott torus is $ (n_1-n_2')=n_1'-n_2 \geq 0$. Then we add up this term for each Morse-Bott torus that appears in $\alpha_i$. Geometrically this is the total mulitplicity of complete gradient trajectories flowing between $\cas{v_i}$ and $\cas{v_{i-1}}$ and has potentially nonzero contributions to the ECH index. Then the fact that the cascade came from a ECH index one curve implies \[ I(\cas{v}_1) + I(\alpha_{2}',\alpha_{2}) +...+ I(\cas{v}_k) =1 \] And by previous proposition each $I(v_i)\geq 0$,with equality only if it consisted entirely of fixed trivial cylinders. Hence there is a unique $\cas{v_i}$ with ECH index 1, the rest have ECH index zero, and all $I(\alpha_i',\alpha_i)=0$. This means there can only be fixed trivial cylinders above and below $\cas{v_i}$ and cannot be infinite gradient flows. This is equivalent to saying the cascade of currents is height one. \end{proof} The above gives a description of what ECH index one cascades look like from the perspective of currents, we now reverse the process, and use the above to understand all cascades of curves of ECH index one. We need to add back in the information that was lost from passing from curves to currents. We only care about the cascades of curves that resulted from degeneration of a nondegenerate connected ECH index one curve. Call this curve $C_\delta$. We observe the Fredholm index of $C_\delta$, which we denote by $\text{Fred Ind}(C_\delta)$, is equal to one. We assume as $\delta \rightarrow 0$, $C_\delta$ degenerates into a cascade of curves $\cas{u}$, and denote $\cas{\mathbf{u}}$ the resulting cascade of holomorphic currents. From the above we know $\cas{\mathbf{u}}$ is a cascade of currents of height one, however $\cas{u}$ could apriori be of arbitrary height, and the levels that are removed from $\cas{u}$to form $\cas{\mathbf{u}}$ must all be branched covers of trivial cylinders occurring at critical points of $f$. The first case we need to consider is if $\cas{\mathbf{u}}$ is empty, then this implies that $\cas{u}$ consists purely of branched covers of trivial cylinders. To be precise $\cas{u}$ may contain many levels that consists of branched covers of trivial cylinders, and levels that begin and end on critical point of $f$, however it may also contain levels where the trivial cylinders (branched covered or not) are away from critical points of $f$. Here we allow levels where there is only a single unbranched cylinder away from critical points of $f$. We assume $C_\delta$ is connected. If at level $i$ a trivial cylinder is at the critical point of $f$ corresponding to elliptic Reeb orbit (hyperbolic for negative Morse-Bott torus) then all levels above $i$ the trivial cylinders that connected to the original cylinder will be at the same Reeb orbit. Similarly if at level $i$ a trivial cylinder is at the hyperbolic orbit (resp elliptic orbit for negative Morse-Bott torus) then all the trivial cylinders below this level connecting to this original (potentially branched cover of) cylinder will also be at the same Reeb orbits. If all the levels of $\cas{u}$ are at the same Reeb orbit which is also a critical point, then $u$ came from a branched cover of trivial cylinder in the nondegenerate case. If this is not the case, then remove the top most and bottom most levels until none of the trivial cylinders in $\cas{u}$ begin/end on critical point of $f$. Then as currents we don't care where the branched points are, so we can think of $u'$ as a cascade of currents with only 1 level. Then the ECH index of $\cas{\mathbf{u}}$ is equal to one, which implies $\cas{\mathbf{u}}$ consists of a free trivial cylinder with multiplicity one. Hence the same must be true of $\cas{u}$ and there are no top/bottom branch covers. We now turn our attention to the case where $\cas{\mathbf{u}}$ is nonempty. We shall use the fact the Fredholm index of $C_\delta$ is one to rule out configurations of height $> 1$. We observe the trivial cylinders on levels above/below $\cas{\mathbf{u}}$ admit the following description: \begin{proposition} \begin{enumerate} \item Let $\mathcal{T}$ denote a positive Morse Bott torus contained in the top level of $\cas{\mathbf{u}}$. For curves on the top level of $\cas{\mathbf{u}}$, as $s\rightarrow +\infty$ all free ends have multiplicity one, and avoid critical point of $f$. The fixed end can only have multiplicity one. Hence all branched covers of trivial cylinders above this level can only happen at the critical point of $f$ corresponding to the elliptic orbit. Moreover, because $C_\delta$ obeys partition conditions, the top most level in $\cas{u}$ of the stack of branched trivial cylinders has partition conditions $(1,..,1)$. \item Let $\mathcal{T}$ denote a negative Morse Bott torus contained in the top level of $\cas{\mathbf{u}}$, as $s\rightarrow +\infty$. The positive free end of the top level of $\cas{\mathbf{u}}$ has multiplicity 1, so there cannot be branched cover of trivial cylinder at the critical point of $f$ corresponding to the hyperbolic orbit. The fixed end at the critical point of $f$ corresponding to the elliptic orbit can have a stack of branched cover of trivial cylinders on top of it on height levels above $u'$, and again by partition conditions on $C_\delta$ the top most level is hit by partition condition $(n)$. \item Let $\mathcal{T}$ denote a positive Morse Bott torus contained in the bottom level of $\cas{\mathbf{u}}$. The free end has multiplicity one, so there cannot be branched covers of trivial cylinders at the critical point of $f$ corresponding to the hyperbolic orbit. The fixed end at critical point of $f$ corresponding to elliptic end can have a stack of branched cover of trivial cylinders below it on height levels below $u'$, and again by partition conditions on $C_\delta$ the top most level is hit by partition condition $(n)$. \item Let $\mathcal{T}$ denote a negative Morse Bott torus contained in the bottom level of $\cas{\mathbf{u}}$. As $s\rightarrow -\infty$ all free ends have multiplicity one, and avoid the critical points of $f$. The fixed end can only have multiplicity one. Hence all branched covers of trivial cylinders above this level can only happen at the critical point of $f$ corresponding to the elliptic orbit. Moreover, because $C_\delta$ obeys partition conditions, the bottom most level (in terms of height) of the stack of branched trivial cylinders has partition conditions $(1,..,1)$. \end{enumerate} \end{proposition} In light of the above, we can compute the topological Fredholm index of $C_\delta$ via the following procedure: First consider the height level corresponding to $\cas{\mathbf{u}}$, we know all trivial cylinders connecting between nontrivial curves are simply covered, so all the possible branched covers that appear on this height level are chains of trivial branched covers of cylinders that connect to the top and bottom levels of $\cas{\mathbf{u}}$. We then create two additional height levels, one above $\cas{\mathbf{u}}$, denoted by $\overline{\cas{\mathbf{u}}}$ and one below $\cas{\mathbf{u}}$, denoted by $\underline{\cas{\mathbf{u}}}$, and push all branch points of trivial cylinders that appear in $\cas{\mathbf{u}}$ onto these 2 levels $\overline{\cas{\mathbf{u}}}$,$\underline{\cas{\mathbf{u}}}$, so that all trivial cylinders that appear in $\cas{\mathbf{u}}$ have no branch point (though they may be multiply covered), and hence are transversely cut out. We recall we assign $Ind(\cas{\mathbf{u}})$ as the dimension of moduli space of $\cas{\mathbf{u}}$ lives in, viewed as a cascade of currents Then the Fredholm index of $C_\delta$ is computed as: \begin{align*} &Ind(C_\delta) =\\ & Ind(\cas{\mathbf{u}})+1 -\chi (\overline{\cas{\mathbf{u}}}) -\chi (\underline{\cas{\mathbf{u}}}) \end{align*} Note by the ECH index assumption $Ind(\cas{\mathbf{u}})=0$, so it will enforce no branched cover of trivial cylinders appear. Hence we have the proved the following proposition: \begin{proposition} \langlebel{nice cascades} Suppose $J$ is chosen to be good, if $C_\delta$ is a sequence of connected nontrivial ECH index one curves of bounded energy that converges to a cascade of curves, $\cas{u}$, then either \begin{itemize} \item $\cas{u}$ is a free cylinder of multiplicity one \item $\cas{u}$ is the same as a height one cascade of currents of ECH index one, described above, and all trivial cylinders that appear in levels of $\cas{u}$ either unbranched chains of fixed trivial cylinders, or trivial cylinders over a Reeb orbit of multiplicity one. \end{itemize} In the latter case, $\cas{u}$ does not contain a sequence of fixed trivial cylinders that do not connect to any nontrivial $J$ holomorphic curve. See Convention \ref{nontrivial}. \end{proposition} We call cascades of curves of ECH index one of the form stated in the above theorem \emph{good cascades of ECH index 1}. Then this is more or less a complete characterization of ECH index one cascades we should count in the Morse-Bott case provided we can achieve enough transversality. Assuming transversality conditions, we quote a theorem from \cite{Yaocas} to show ECH index one cascades can be glued uniquely (up to translation) to ECH index one curves. \begin{theorem}[Theorem 3.5 in \cite{Yaocas}] \langlebel{gluing theorem}Assuming transversality conditions \ref{assumption}, any given ECH index one cascades can be glued uniquely to ECH index one $J_\delta$-holomorphic curves for sufficiently small values of $\delta >0$ up to translation in the symplectization direction. \end{theorem} The key is to note ECH index one and transversality implies all of the cascades above are transverse and rigid, as in Definition 3.4 of \cite{Yaocas} and hence can be glued. The final ingredient we need is to show that assuming $J$ is good, the set of good ECH index one cascades is finite. To do this we need the notion of $J_0$ index for cascades. \section{Finiteness} \langlebel{Finite} In order to prove the differential in Morse-Bott ECH is well defined we need to prove the for given generators $\alpha,\beta$ the set of good ECH index one cascades from $\alpha$ to $\beta$ is finite. For $J$-chosen to be good, we already know this set is a zero dimensional space, hence it suffices to prove that it is compact. To this end we develop the analogue of $J_0$ index in the Morse-Bott world. We start with 1-level cascades then build upwards to $n$ level cascades. In this section we assume $J$ is good throughout. \subsection{Level 1 cascades} Consider an level 1 cascade of ECH index 1 from generator $\alpha$ to $\beta$. In anticipation of multiple level ECH index 1 cascades, here we relax some (but not all) of the conditions on $\alpha,\beta$ to remove conditions that require certain free/fixed ends (depending on whether we are on a positive/negative Morse-Bott torus) to only have multiplicity 1. This corresponds to relaxing the condition in the nondegenerate case to only allow hyperbolic orbits of multiplicity one (see Theorem \ref{mbgenerator}). We recall the consequences of generic $J$: \begin{enumerate} \item For positive Morse-Bott tori, as $s\rightarrow \infty$, all free ends are disjoint and are asymptotic to Reeb orbits in the torus with multiplicity 1. Let $n^{pos,free}_+$ denote the number of such orbits. \item For positive Morse-Bott tori, the fixed ends at $s\rightarrow \infty$ are disjoint from the free ends. They are hit with partition condition $(1)$. Suppose there are $N^{pos,fix}_+$ such ends. \item For positive Morse-Bott tori, as $s\rightarrow -\infty$ all free ends are disjoint and cover the Reeb orbits in the torus with multiplicity 1. Let $n^{pos,free}_-$ denote the number of such orbits. \item For positive Morse-Bott tori, as $s\rightarrow -\infty$, all fixed ends have partition conditions $(n)$. Suppose there are $N^{pos,fix}_-$ such ends, each with multiplicity $n^{pos,fix}_{-,j}$ \item For negative Morse-Bott tori, as $s\rightarrow \infty$, all free ends are disjoint and cover the Reeb orbits in the torus with multiplicity 1. Let $n^{neg,free}_+$ denote the number of such orbits. \item For negative Morse-Bott tori, the fixed ends at $s\rightarrow \infty$ are disjoint from the free ends. They are hit with partition conditions $(n)$. Suppose there are $N^{neg,fix}_+$ such ends with multiplicity $n^{neg,fix}_{+,j}$ \item For negative Morse-Bott tori, as $s\rightarrow -\infty$ all free ends are disjoint and cover the Reeb orbits in the torus with multiplicity 1. Let $N^{neg,free}_-$ denote the number of such orbits. \item For negative Morse-Bott tori, as $s\rightarrow -\infty$ there is only 1 fixed end for each Morse-Bott tori, and has partition conditions $(1)$. Let there be $N^{neg,fix}_-$ such ends total \end{enumerate} \begin{definition} For a level 1 good ECH index 1 cascade $C$ connecting generator $\alpha$ to $\beta$, we define: \begin{equation} J_0(C,\alpha,\beta) := -c_\tau(C) + Q_\tau(C,C) - [\sum_j (n^{pos,fix}_{-,j} -1)]- [\sum_j (n_{+,j}^{neg,fix}-1)] \end{equation} \end{definition} We observe that $J_0(C,\alpha,\beta)$ can be computed from the knowledge of $\alpha,\beta$ and the relative homology class of $C$ alone. We also remark that the $J_0$ index can be similarly be defined for nontrivial curves of higher ECH index, as long as they satisfy the long list of partition conditions we listed above, and the same genus bounds below holds. We shall have need for this fact for the proof of finiteness below. Then we have the following genus bound: \begin{proposition} Let $g$ denote the genus of a holomorphic curve $C$. Then we have the upper bound \begin{equation} -\chi(C) \leq J_0(C,\alpha,\beta). \end{equation} \end{proposition} \begin{proof} We recall the adjunction formula in our case says \[ c_\tau(C)= \chi(C) +Q_\tau(C) +w_\tau(C)-2\delta(C) \] plugging this into $J_0$ yields \[ J_0(C,\alpha,\beta) = -\chi(C) -w_\tau(C) -[\sum (n^{pos,fix}_- )-1]- [\sum n_+^{neg,fix}-1] +2\delta(C) \] hence it suffices to prove \[ -w_\tau -[\sum (n^{pos,fix}_- )-1]- [\sum n_+^{neg,fix}-1] \geq 0. \] We break this into cases. If $C$ is a trivial cylinder, then this is trivial. If $C$ has a nontrivial component along with fixed trivial cylinders, we only consider the nontrivial component, also denoted by $C$. All of the computations below follow from the computations of the writhe bound: \begin{itemize} \item At a positive Morse-Bott torus \begin{itemize} \item $s\rightarrow \infty$, free end. $-w_\tau \geq 0$ because the multiplicity is one. \item $s\rightarrow \infty$, fixed end $-w_\tau \geq 0$ because multiplicity is one. \item $s\rightarrow -\infty$, free end. $w_\tau \geq 0 $ by multiplicity. \item $s\rightarrow -\infty$, for given fixed end $j$, the writhe at this end satisfies $w_\tau \geq n_-^{pos,fix}-1$. \end{itemize} \item At a negative Morse-Bott torus \begin{itemize} \item $s\rightarrow \infty$, free end. $-w_\tau \geq 0 $ due to multiplicity constraints. \item $s\rightarrow \infty$, for a single fixed end $j$, the writhe satisfies $-w_\tau \geq n_+^{neg,fix}-1$. \item $s\rightarrow -\infty$, free end. $w_\tau \geq 0 $ due to multiplicity constraints. \item $s\rightarrow -\infty$, fixed end. $w_\tau \geq 0$ by multiplicity. \end{itemize} \end{itemize} combining all of the above we conclude our inequality. \end{proof} \subsection{Multiple level cascades} We now explain how to generalize the definition of $J_0(C,\alpha,\beta)$ to good ECH index one cascades of arbitrary number of levels. Consider a $n$ level cascade $\cas{u}= \{u^1,..,u^n\}$ of ECH index one with input $\alpha$ and output $\beta$. Recall we have so called fixed chains of trivial cylinders, i.e. chain of trivial cylinders that all begin/end on a fixed end orbit of either $\alpha$ or $\beta$ until this chain of trivial cylinders meet an nontrivial holomorphic curve in one of the intermediate levels (which has an fixed end at said Reeb orbit). We remove all of these kinds of trivial cylinders, then the number $J_0$ is defined for each of the intermediate cascade levels, which we denote by $J_0(u^i)$, then we define the $J_0$ of the entire cascade as \begin{definition} \begin{equation} J_0(\cas{u}) := \sum J_0(u^i) \end{equation} \end{definition} We observe this definition also only dependents on the relative homology class and $\alpha,\beta$. Recall the Euler characterisitc of the cascade $\chi (\cas{u})$ is the Euler characterstic of the surface obtained if we glued a cylinder between each matching end of $u^i$ and $u^{i+1}$, clearly then the Euler characteristic of the cascade is the sum of the Euler characteristic of each of its components. Applying the proposition for level one cascades we get \begin{proposition} \[ -\chi (\cas{u})\leq J_0(\cas{u}). \] \end{proposition} \subsection{Finiteness} We finally prove \begin{theorem}\langlebel{thm:finiteness} Given generators $\alpha, \beta$, the moduli space of good ECH index 1 cascades from $\alpha$ to $\beta$ is compact. \end{theorem} \begin{proof} Let $\{\cas{u}_m\}$ be a sequence of good ECH index one cascades from $\alpha$ to $\beta$. Each $\cas{u}_m$ is a cascade of the form $\{u_{m}^{n}\}_n$. We show $\{\cas{u}_m\}$ has a convergent subsequence. From the Morse-Bott assumption there is an upper bound to how many cascade levels there are, so we pass to a subsequence where they all have $N$ levels. For each $n=1,..,N$, we apply the compactness for holomorphic current from \cite{bn} to each of $u^{m}_{n}$. To see this, note for fixed $n$, the energy constraint of $\{\cas{u}_m\}$ and Morse-Bott condition implies there are only finitely many possible choices for the positive and negative asymptotics of $u^{m}_n$, so we pick a subsequence (also denoted by $u^{m}_n$) where the positive and negative asymptotics of $u^{m}_n$ is independent of $m$. Here, by positive and negative asymptotics of $u^{m}_n$ we simply mean the Morse-Bott tori $\mathcal{T}$ that $u^{m}_n$ are asymptotic to at its positive/negative ends, and the total multiplicity of Reeb orbits at each such Morse-Bott tori. Then using the Gromov compactness for currents (see \cite{bn}) applied to $\{u^{m}_n\}$ we conclude we can refine a further subsequence of $\{u^{m}_n\}$ (for all $n=1,..,N$) with the same relative homology class (our notion of relative homology class here is in $\mathcal{H}_2 (-,-,Y)$)). Now for each $u^{m}_n$ simply the knowledge of its asymptotics (which we can read off directly: by virtue of being part of ECH index one cascade all the ends that avoid the critical points of $f$ are free, and those at critical points of $f$ is fixed) and its relative homology class provides an upper bound on its $J_0$ index. This upper bound on $J_0$ then provides a bound the genus of each $u_n^m, n=1,...,N$. With the genus bound we can apply SFT compactness: for fixed $n$, we observe $u^{m}_{n}$ cannot break into a building, for that would yield (if we view $\cas{u}_m$ as cascade of currents) an ECH index 1 cascade of currents with $T_i=0$, which does not exist by genericity conditions. Similarly ruled out by genericity conditions are overlapping free ends and free ends migrating to fixed ends. The $u^n_m$ also cannot converge to a multiple cover of nontrivial curve, for that would yield an ECH cascade of current of index 1 with multiple covers of nontrivial curve, which is ruled out by genericity. Hence we conclude that $\{\cas{u}_m\}$ has a subsequence that converges to a ECH index 1 cascade, and hence we have compactness. \end{proof} \section{Computing ECH in the Morse-Bott setting using cascades}\langlebel{section:computing using cascades} We now define the Morse-Bott ECH chain complex (over $\mathbb{Z}_2$). We write the chain complex as \[ C_*^{MB}(\langlembda,J) := \bigoplus_{\Theta_i} \mathbb{Z}_2\langle \Theta_i \rangle . \] Here $\Theta_i = \{(\mathcal{T}_j,\partialm,m_j)\}$ denotes a collections of Morse-Bott ECH generators. Suppose we can choose our $J$ to be good, the differential, which we write as $\partialartial_{MB}$ is defined as \begin{equation} \langle\partial_{MB} \Theta_1, \Theta_2 \rangle := \left\lbrace \begin{tabular}{@{}l@{}} $\mathbb{Z}_2$\, \textup{count of J-holomorphic cascades}\, $\mathcal{C}$\, \textup{of ECH index} \,$I=1$,\\ \textup{so that as} $s\rightarrow +\infty, \, \mathcal{C}$ \,\text{approaches}\, $\Theta_1$ \textup{and as} $s\rightarrow -\infty$, \\ $\mathcal{C}$\, \text{approaches} $\Theta_2$. \end{tabular} \right\rbrace \end{equation} We clarify that in the above definition the cascade $\mathcal{C}$ must be decomposable into $\mathcal{C}_0 \sqcup \mathcal{C}_1$, where $\mathcal{C}_0$ is a (potentially empty) collection of fixed trivial cylinders with multiplicity, and $\mathcal{C}_1$ is a good ECH index one cascade. We note if $(T,n)$ is an element of $\mathcal{C}_0$, if it is positively asymptotic to Morse-Bott ECH generator $(\mathcal{T},n,\partialm)$, it is also negatively asymptotic to the Morse-Bott ECH generator $(\mathcal{T},n,\partialm)$ (thus far we only considered nontrivial cascades when we talked about their asymptotics). We note by Theorem \ref{thm:finiteness} the operator $\partial_{MB}$ is well defined. \begin{theorem}\langlebel{theorem:cobordism in general} Assuming $J$ is good, the chain complex $(C_*^{MB},\partialartial_{MB})$ computes $ECH(Y,\xi)$. \end{theorem} Before we prove this theorem we choose a generic family of almost complex structures $J_\delta$. Recall that the traditional definition of ECH requires choosing a generic $J$ from a residual subset of almost complex structures. For fixed $\delta>0$, we say $J_\delta$ is ECH adapted if it is an almost complex structure with which the ECH chain complex is well defined. \begin{definition} Consider $\delta \in (0,\delta_0]$, we say a path of almost complex structures $J_\delta$, each compatible with $\langlembda_\delta$ for any $\delta \in (0,\delta_0]$, is generic if for any collection of Reeb orbits $\alpha,\beta$, the moduli space \begin{equation} \mathcal{M}(\alpha,\beta,\delta):=\{ (u,\delta) | \bar{\partialartial}_{J_\delta} u =0, u \, \textup{somewhere injective}, \lim_{s\rightarrow +\infty} u \, \textup{converges to} \,\,\alpha, \lim_{s\rightarrow -\infty} u\, \textup{converges to} \,\, \beta\} \end{equation} is cut out transversely. \end{definition} \begin{theorem}\langlebel{generic path J} There is a small enough $\delta_0>0$ so that there is a generic path of almost complex structures $J_\delta$, $\delta \in (0,\delta_0]$ so that: \begin{itemize} \item $J_{\delta_0}$ is ECH adapted. \item $\lim_{\delta \rightarrow 0} J_\delta = J$, where $J$ is a generic almost complex structure we have chosen above to count ECH index one cascades. \item $|J-J_\delta| \leq C\delta$ in $C^k$ norm, $k>100$, and $J_\delta$ take the prescribed form near small fixed neighborhood of Morse-Bott torus described in Section \ref{degenerations}. \item For a residual subset $S \subset (0,\delta_0]$, for all $\delta \in S$, $J_\delta$ is ECH adapted. \end{itemize} \end{theorem} \begin{proof} This is standard application of Sard-Smale theorem. \end{proof} \begin{proof}[Proof of theorem \ref{theorem:cobordism in general}] We observe for fixed $L>0$, there are only finitely many ECH index 1 cascades of energy $<L$. We fix $\delta_0$ small enough so that for all $\delta \in (0,\delta_0]$ the cascades can be glued (uniquely in our sense specified) to ECH index 1 curves. We assume $\delta_0$ is such that $J_{\delta_0}$ is ECH adapted. We recall we have chosen a generic family $J_\delta, \delta\in[0,\delta_0]$ so that the space: \[ \{ (u,J_{\delta}) \,| \, \delta \in (0,\delta_0]\, \, u \, J_\delta \, \text{holomorphic, somewhere injective ECH index 1}\} \] modulo translation is a 1-manifold (not necessarily compact). A SFT compactness theorem (\cite{Yaocas,BourPhd,SFT}) tells us the $\delta =0$ ends of this manifold are precisely the good ECH index one cascades. We recall there is a residual set $A \subset (0,\delta_0]$ so that for all $\delta \in A$, $J_\delta$ is ECH adapted and the ECH homology can be computed by counting ECH index one $J_\delta$ holomorphic curves for $\delta \in A$. We make the following observation: if $u_\delta$ and $v_\delta$ are $J_\delta$-holomorphic curves of ECH index one that converge to the same cascade as $\delta \rightarrow 0$, by the gluing theorem, for small enough $\delta$ $u_\delta$ and $v_\delta$ are in fact the same curve up to $\mathbb{R}$ translation. Then we claim we can find small enough $\delta' \in A$ so that the corbordism from $\delta =0 $ to $\delta'$ built by $\{ (u,J_{\delta}) \,| \, \delta \in (0,\delta']\, \, u \, J_\delta \, \text{holomoprhic, somewhere injective ECH index 1}\}$ is the trivial cobordism. Suppose not, then for arbitrarily small $\delta$ we can find $u_\delta$ a ECH index one somewhere injective curve that does not come from gluing, take $\delta \rightarrow 0$ and after taking a subsequence, $u_\delta$ degenerates into a good ECH index one cascade, but by our observation must have come from a curve obtained by gluing together an ECH index one cascade, contradiction. \end{proof} \section{ECH index one curves of genus zero} \langlebel{section:ECH index one curves of genus zero} We showed in the previous section that when there is enough transversality for cascades, the cascades of ECH index one take a particularly nice form. However this is not always achievable, except in special circumstances. In this section and the next we outline some special circumstances in which transversality can always be achieved. Here we consider the case where all ECH index one curves in the perturbed picture must have genus zero. This is the case for $T^3$ and some toric domains. We shall use a slightly different description of cascades that do not allow for the presence of trivial cylinders. We will call this description ``tree-like'' cascades and will be described below. The reason we can use this description is that if the curve has genus zero, we can do the gluing without requiring that between each adjacent cascade levels there is a \emph{single} flow time parameter; instead we can assign a different flow time between each pair of adjacent nontrivial curves. We use the following convention to represent our holomorphic curves. We use a vertex to represent a $J$ holomorphic curve of genus zero, and use directed edges to denote the positive and negative punctures of the curve. Edges directed away from the $J$-holomorphic curve correspond to positive punctures, and edges directed towards the vertex correspond to negative punctures. The figure below illustrates how we go from $J$-holomorphic curve to vertex with directed edges. \begin{figure} \caption{Passing from genus zero curve to vertex with edges} \end{figure} Then a \emph{height one cascade with tree-like compactifications} from Morse-Bott ECH generator consists of the following data: \begin{enumerate} \item A collection of vertices $\{v_1,..,v_n\}$ each equipped with the data of inward and outward pointing edges. Each vertex has at least one outgoing edge. Each edge is also equipped with the information of which Reeb orbit it lands on. \item Given two vertices $v_i$ and $v_j$, if we can find a Morse-Bott torus $\mathcal{T}$ so that a positive puncture of $v_i$ lands on $\gamma$, and if we follow the gradient flow for time $T_{i,j}\in [0,\infty)$ along $\gamma$ we arrive at a negative puncture of $v_j$ landing on the corresponding orbit, then we say it is possible to connect $v_i$ and $v_j$ via the given pair of edges. The data of a height one cascade in this compactification consists of choices of connections between the vertices of $\{v_1,..,v_n\}$, so that after we connect the edges, we obtain a connected tree. See figure below for an example. We call these connections internal connections. \item The positive punctures of $\{v_1,..,v_n\}$ that are not assigned internal connections are assigned free/fixed as per required by ECH generator $\alpha_1$, and likewise for negative punctures and $\alpha_n$. \begin{figure} \caption{Cascade with tree like compactification. The green arrow denote finite gradient flow lines.} \end{figure} \end{enumerate} For genus zero $J_\delta$-holomorphic curves degenerating into a cascade with our previous compactification, we can easily pass to a tree like compactification by removing all the trivial cylinders. Given a cascade of height one with tree like compactification, which we write as $\cas{u}=\{v_1,..,v_n\}$. We can compute its ECH index as follows: we treat all edges participating in internal connections as free, then the ECH index is simply given by \[ I(\cas{u}) = I(v_1)+....+I(v_n)-n+1. \] In order to talk about Fredholm index we also need to pass to the reduced cascade $\cas{\tilde{u}}$ consisting of curves $\{\tilde{v_1},..,\tilde{v_n}\}$. If in our tree like compactification all free ends assigned by $\alpha_1$ and $\alpha_n$ as well as all internal connections avoided critical points of $f$, then the reduced cascade lives in a transversely cut out moduli space of dimension \[ \sum_i Ind(\tilde{v_i})-1 \] since being tree like removes the condition of needing to have the same flow time between adjacent cascade levels. Hence to achieve the necessary transversality conditions to count ECH index one cascades, we choose a generic $J$ so that \begin{enumerate} \item For any punctured sphere that is the domain of a $J$-holomorphic curve, we endow it with an assignment of incoming and outgoing punctures, and for each end we assign a free/fixed end; and if an end is assigned fixed it must land on a Reeb orbit corresponding to a critical point of $f$ under the $J$-holomorphic map; and if an end is free it must avoid critical points of $f$. Then all moduli spaces of somewhere injective $J$ holomorphic curves with the above information are transversely cut out with dimension given by the index formula. \item For any two curves $v_1$ and $v_2$ satisfying the above condition and both rigid, if their free ends land on the same Morse-Bott torus from opposite sides (one as a positive puncture the other as a negative puncture), then they do not land on the same Reeb orbit in the Morse-Bott family (we only care about where they land on the Morse-Bott torus and ignore information of multiplicity, i.e. even if they cover the same Reeb orbit of different multiplicity on their free ends, this is prohibited). \end{enumerate} The above conditions are easily achieved by choosing a generic $J$ by classical transversality methods. We next consider cascades of height one. We observe we have the inequality (if we treat all internal connections as free for both ECH index and Fredholm index) \[ I(\cas{u}) -n \geq \sum Ind(\tilde{v_i}) -1 \geq 0 \] since each $\tilde{v}_i$, by virtue of it existing and transversality conditions, must have Fredholm index $\geq 0$. ECH index one implies $Ind(\tilde{v_i}) =1$, hence all these curves are rigid, and embedded. By the above genericity of $J$ all flow times are nonzero, and the cascade itself is already reduced. All free ends and ends coming from internal connections avoid critical points of $f$. Also observe that by partition conditions derived previous sections that between internal connections, the participating edges can only over Reeb orbits with multiplicity one. Then suppose a sequence of genus zero ECH index one $J_\delta$ holomorphic curves from $\alpha_1$ to $\alpha_n$ degenerates into a cascade with tree like compactification for arbitrary height. This just means we allow internal connections adjoint to each other with semi-infinite or infinite gradient trajectories. Then for each internal connection whose flow time is infinite, we separate them into two different cascades. Then we get a collection of height one cascades each of which is tree like. We write them as $\cas{u}_1,...,\cas{u}_k$. Then we can assign generalized ECH generators to ends of $\cas{u}_i$ as before, and the ECH index one condition imposes \[ I(\cas{u}_1) + I(\cas{u_2}) +\dots +I(\cas{u_k}) + \textup{relative difference between ECH generators} =1 \] By relative difference between ECH generators we mean the same construction as proposition \ref{prop:height1}. We have for all height one cascades that \[ I(\cas{u_i})-1 \geq Ind(\cas{\tilde{u_i}}) \geq 0 \] Hence there is either a unique cascade $\cas{u_i}$ of index zero, or the entire cascade is just one gradient flow line. By considerations of topological Fredholm index we also rule out additional branched cover of trivial cylinders at the top/bottom level of the cascade with tree- like compactifications. Hence using the above description we have the following proposition. \begin{proposition} In the nondegenerate case, ECH index one curves of genus zero degenerate into ECH index one tree like cascades that are reduced and transversely cut out. \end{proposition} We call the type of cascades of the above proposition ``good ECH index one tree like cascades'', because we eliminated branched covers of trivial cylinders via topological Fredholm index. As in the previous section we choose $J_\delta$ to be a generic family of almost complex structures satisfying the same conditions as Theorem \ref{generic path J}. We then quote a gluing theorem from \cite{Yaocas}. \begin{theorem} Let $\cas{u}$ be a good ECH index one cascade of genus zero as per above, then for small enough $\delta>0$ there exists a unique (up to translation) $J_\delta$-holomorphic curve in an $\epsilonsilon$ neighborhood of this cascade. \end{theorem} \begin{proof} The main difference is that because the whole curve is genus zero, we no longer need to make sure the pregluing is well defined by restricting our choice of asymptotic vectors to $\hat{\Delta}$, as in proposition 8.28 in \cite{Yaocas}. \end{proof} We define a chain complex as before. We We write the chain complex as \[ C_*^{MB,tree}(\langlembda,J) := \bigoplus_{\Theta_i} \mathbb{Z}_2\langle \Theta_i \rangle . \] We use the superscript ``tree'' to denote the fact we are counting tree like cascades. As before $\Theta_i = \{(\mathcal{T}_j,\partialm,m_j)\}$ denotes a collections of Morse-Bott ECH generators. After we choose a generic $J$, all good tree like cascades are transversely cut out. Then we define the differential $\partial_{MB}^{Tree}$ to be \begin{equation} \langle\partial^{tree}_{MB} \Theta_1, \Theta_2 \rangle := \left\lbrace \begin{tabular}{@{}l@{}} $\mathbb{Z}_2$\, \textup{count of tree like J-holomorphic cascades}\, $\mathcal{C}$\, \textup{of ECH index} \,$I=1$,\\ \textup{so that as} $s\rightarrow +\infty, \, \mathcal{C}$ \,\text{approaches}\, $\Theta_1$ \textup{and as} $s\rightarrow -\infty$, \\ $\mathcal{C}$\, \text{approaches} $\Theta_2$. \end{tabular} \right\rbrace \end{equation} As before, we clarify in the cascade $\mathcal{C}$ must be decomposable into $\mathcal{C}_0 \sqcup \mathcal{C}_1$, where $\mathcal{C}_0$ is a (potentially empty) collection of fixed trivial cylinders with multiplicity, and $\mathcal{C}_1$ is a good ECH index one tree like cascade. \begin{theorem}\langlebel{theorem:cobordism for genus 0} Suppose $J$ is chosen to be generic so that all ECH index one good tree like cascades are transversely cut out, and we can choose a generic family of perturbations to $J$, which we write as $J_\delta$ that meets the conditions of Theorem \ref{generic path J}. We further for small enough $\delta>0$, all $J_\delta$-holomorphic curves of ECH index one are genus zero. Then the chain complex $(C_*^{MB,tree}, \partial_{MB}^{Tree})$ computes $ECH(Y,\xi)$. \end{theorem} \begin{proof}[Proof of Theorem \ref{theorem:cobordism for genus 0}] The same proof as in Theorem \ref{theorem:cobordism in general} works. \end{proof} \section{Applications to concave toric domains} \langlebel{section concave} As an application of our methods we show that for concave toric domains, ECH can be computed via enumeration of ECH index one cascades. By what we proved above, it suffices to show all ECH index one holomorphic curves after the Morse-Bott perturbation have genus zero. We recall the definition of a concave toric domain. Consider $\mathbb{C}^2$ equipped with the standard symplectic product symplectic form. Consider the diagonal $S^1$ action on $\mathbb{C}^2$, and the associated moment map $\mu: \mathbb{C}^2 \rightarrow \mathbb{R}^2$ given by \[ \mu(z_1,z_2) =(\partiali|z_1|^2, \partiali |z_2|^2). \] Let $\Omega \subset \mathbb{R}^2$ be a domain in the first quadrant of $\mathbb{R}^2$, we define the domain $X_\Omega$ to be \[ X_\Omega: = \{(z_1,z_2) | \mu (z_1,z_2) \in \Omega\}. \] Suppose $\Omega$ is a domain bounded by the horizontal segment from $(0,0)$ to $(a,0)$, the vertical segment from $(0,0)$ to $(0,b)$ and the graph of a convex function $f:[0,a] \rightarrow [0,b]$ so that $f(0)=b$ and $f(a)=0$. We further assume $f$ is smooth, $f'(0)$ and $f'(a)$ are irrational, $f'(x)$ is constant near $0$ and $a$, and $f''(x)>0$ whenever $f'(x)$ is rational, then we say $X_\Omega$ is a \textbf{concave toric domain}. Note our definition is slightly more restrictive than that of \cite{intoconcave}, because we are not interested in capacities; we need the boundary of $X_\Omega$ to be well behaved enough to define ECH. For a concave toric domain $X_\Omega$, its boundary $\partialartial X_\Omega$ is a contact 3-manifold diffeomorphic to $S^3$. We now describe the Reeb orbits that appear in $\partialartial X_\Omega$. We also note their Conley Zehnder indices, having chosen the same trivializations as in \cite{intoconcave}. \begin{enumerate} \item $\gamma_1 = \{ (z_1,0) \in \partialartial X_\Omega \}$. The orbit $\gamma_1$ is elliptic with rotation angle $-1/f'(a)$, hence $CZ(\gamma_1^k) = 2\floor{-k/f'(a)}+1$ \item $\gamma_2 = \{ (0,z_2) \in \partialartial X_\Omega \}$. The orbit $\gamma_2$ has rotation angle $-f'(0)$, hence $CZ(\gamma_2^k)=2\floor{-kf'(0)}+1$. \item Let $x\in (0,a)$ be such that $f'(x)$ is rational. Then the torus described by $\{(z_1,z_2) | \mu(z_1,z_2) = (x,f(x))\}$ is a (negative) Morse-Bott torus. Each Reeb orbit has Robbin-Salamon index $-1/2$. \end{enumerate} We say a bit more about the Reeb dynamics for the third case. Consider the point $(x,f(x))$ so that $f'(x)$ is rational. We set $f'(x) = tan(\partialhi), \partialhi \in (-\partiali/2,0)$. Then the Reeb vector field is given by (see \cite{mihai}) \[ R = \frac{2\partiali}{-x \sin(\partialhi) + f(x) \cos(\partialhi)}(-\sin \partialhi \partialartial_{\theta_1} + \cos(\partialhi) \partialartial_{\theta_2}). \] For large action $L>0$, we perturb each Morse-Bott torus to a pair of orbits, one elliptic, the other hyperbolic. Then an ECH generator $\alpha = \{\alpha_i,m_i\}$ is a collection of nondegenerate Reeb orbits with multiplicities. We associate to each ECH generator a \textbf{combinatorial generator}. \begin{definition}(see \cite{intoconcave}) A combinatorial generator is a quadruple $\tilde{\Lambda} = (\Lambda,\rho,m,n)$ where \begin{enumerate} \item $\Lambda$ is a concave integral path from $(0,B)$ to $(A,0)$ such that the slope of each edge is in the interval $[f'(0),f'(a)]$. \item $\rho$ is a labeling of each edge of $\Lambda$ by $e$ or $h$. \item $m$ and $n$ are nonnegative integers. \end{enumerate} \end{definition} Let $\Lambda_{m,n}$ denote the concatenation of the following sequence of paths: \begin{enumerate} \item The highest polygonal path with vertices at lattice points from $(0,B+n+\floor{-mf'(0)})$ to $(m,B+n)$ which is below the line through $(m,B+n)$ with slope $f'(0)$. \item The image of $\Lambda$ under the translation $(x,y)\mapsto (x+m,y+n)$. \item The highest polygonal path with vertices at lattice points from $(A+m,n)$ to $(A+m+\floor{-n/f'(a)},0)$ which is below the line through $(A+m,n)$ with slope $f'(a)$. \end{enumerate} Let $\mathcal{L}(\Lambda_{m,n})$ denote the number of lattice points bounded by the axes and $\Lambda_{m,n}$, not including the lattice points on the image of $\Lambda$ under the translation $(x,y) \mapsto (x+m,y+n)$. We then define \[ I^{comb}(\Lambda_{m,n}) =2 \mathcal{L}(\Lambda_{m,n}) + h(\Lambda) \] where $h(\Lambda)$ is the number of edges in $\Lambda$ labelled by $h$. To each ECH generator $\alpha = \{(\alpha_i,m_i)\}$ we associate a combinatorial ECH generator $(\Lambda,m,n)$ as follows. The number $m$ is the multiplicity of $\gamma_2$ as it appears in $\alpha$, and the integer $n$ is the multiplicity of $\gamma_1$ as it appears in $\alpha$. For other (nondegenerate) Reeb orbits of $\alpha$, they all come from small perturbations of Morse-Bott tori. If $\gamma \in \alpha$ is a Reeb orbit that comes from breaking the degeneracy of a Morse-Bott torus at $(x,f(x))$, then let $v_1$ be the smallest positive integer so that $v_2=f'(x)v_1\in \mathbb{Z}$. Let $v$ denote the vector $v=(v_1,v_2)$. The path is obtained by taking each Reeb orbit $\gamma$ in $\alpha$ that come from Morse-Bott tori, associating to it the vector that is $v$ multiplied by the multiplicity of $\gamma$ as it appears in $\alpha$, and concatenating these vectors in order of increasing slope. The labelling $\rho$ is obtained by labelling the vector associated to $\gamma$ the letter $h$ if $\gamma$ is hyperbolic, and $e$ if $\gamma$ is elliptic. \begin{proposition}(\cite{intoconcave}) If $C$ is a current from $\alpha$ to $\beta$, its ECH index is given by $I^{comb}(\alpha) - I^{comb}(\beta)$. \end{proposition} For future usage, we also record how the Chern class is computed (see \cite{intoconcave}). Let $\alpha$ denote a ECH generator, we associate to it the combinatorial generator $(\Lambda,\rho,m,n)$, then we take \[ c_\tau(\alpha) = A+B+m+n. \] Then if we have a $J$-holomorphic curves from ECH generator $\alpha$ to $\beta$, then its relative first Chern class is calculated by $c_\tau(\alpha)-c_\tau(\beta)$. We need a version of the local energy inequality, which we take up presently. Versions of this inequality have appeared in \cite{pfhdehn,ziwenyao,simplicityconjecture,choi2016combinatorial}. Consider the boundary of $\Omega$ with its intersections with the two coordinate axes removed, then its preimage under the moment map is an interval times a two torus. We write the two torus as $(x_1,x_2) \in S^1_1 \times S^1_2$, where the first $S^1_1$ is the $S^1$ coming from rotation in the first complex plane $\mathbb{C}$, and the second $S^1$ comes from the second copy of $\mathbb{C}$. We use $\mathbb{Z}\oplus \mathbb{Z}$ to denote the lattice of first homology with $\mathbb{Z}$ coefficients. Consider a Morse-Bott torus at $(x,f(x))$ with $f'(x) = v_2/v_1$ as before, then the homology class of the Reeb orbit is given by the pair $(-v_2,v_1)\in \mathbb{Z}^2$ (this is true before or after the Morse-Bott perturbation). Consider $F_{[x_0,x_1]}$, by which we denote the preimage of the graph $\{(x,f(x)) | x\in [x_0,x_1]\}$ under the moment map. We similarly consider $F_x$, which is the preimage of $(x,f(x))$ under the moment map. Let $C$ be a somewhere injective $J$ holomorphic curve, we consider $C\cap F_{x_0}$ (we choose $x_0$ generically so this intersection is transverse). We orient this intersection using the boundary orientation of $C\cap F_{[x_0-\epsilonsilon,x_0]}$. Its homology class in $\mathbb{Z}^2$ we write as $[F_x]$. \begin{proposition} Let $(p,q)\in \mathbb{Z}^2$ denote the homology of $C\cap F_{x_0}$, then we have the inequality \[ p +f'(x) q \geq 0. \] We further observe equality holds only if $C$ is a trivial cylinder. \end{proposition} \begin{proof} We consider $C\cap F_{[x_1,x_2]}$, and observe with our conventions $\partialartial (C\cap F_{[x_0,x_1]}) = C\cap F_{x_1} - C\cap F_{x_0}$. We next consider \begin{align*} \int_{C\cap F_{[x_1,x_2]}} d\langlembda &= \int_{C\cap F_{x_1}} \langlembda - \int_{C\cap F_{x_0}} \langlembda\\ &= \int_{C\cap F_{x_1}} r_1d\theta_1 + r_2 d\theta_2 - \int_{C\cap F_{x_0}} r_1 d\theta_1 +r_2 d\theta_2\\ &= (x_1-x_0) p +(f(x_1)-f(x_0))q \geq 0. \end{align*} By taking the limit $x_0\rightarrow x_1$, we conclude the proof. \end{proof} Suppose the $J$-holomorphic $C$ current connects from $\alpha_+$ to $\alpha_-$ and has ECH index one. Suppose $C$ does not contain trivial cylinder components, hence it is embedded. Let $\alpha_+$ contain $\gamma_1$ with multiplicity $n_+$, the orbit $\gamma_2$ with multiplicity $m_+$, and contains $e_+$ distinct elliptic orbits and $h_+$ hyperbolic orbits. Suppose further $C$ has $k_m^+$ ends at $\gamma_2$, with multiplicities $m_+^i$, and $C$ has $k_n^+$ ends at $\gamma_1$ with multiplicities $n_+^i$ Likewise we use $m_-,n_-,e_-,h_-$ and $k_m^-,m_-^i,k_n^-, n_-^i$ to denote the respective quantities in $\alpha_-$, except here $e_-$ denotes the number of elliptic Reeb orbits counted with multiplicity. Then the key is the following proposition (similar proofs have appeared in \cite{simplicityconjecture,pfhdehn,choi2016combinatorial}) \begin{proposition}\langlebel{thm:concave genus zero} For the case of concave toric domains, after a small perturbation away from the Morse-Bott degeneracies, all ECH index one curves have genus zero. \end{proposition} \begin{proof} \textbf{Step 1} We know that the integers $m_\partialm^i$ and $n_\partialm^i$ satisfy partition conditions because $C$ has ECH index one. Recall that for an elliptic Reeb orbit of rotational angle $\theta$, suppose $C$ is asymptotic to this Reeb orbit at its positive ends with multiplicity $m$. Consider the line $y=\theta x$ on the $x-y$ plane, then draw the maximal concave polygonal path connecting lattice points beneath $y=\theta x$. This polygonal path $\mathcal{P}$ starts at the origin and connects to $(m,\floor{m\theta})$. The horizontal displacements of the edges in this path we will write as $(m_i)$ and take the convention that if $i<j$, then $m_i$ is the segment before $m_j$ if we count starting from the origin. This gives an integer partition of $m$, which is the partition conditions for positive ends of $C$ that are asymptotic to this Reeb orbit. We observe that $\sum_i \floor{m_i \theta} = \floor{\theta m}$. To see this, first it follows from the properties of the floor function that \[ \sum_i \floor{m_i\theta} \leq \floor{m\theta}. \] For the converse inequality, consider the polygonal path $\mathcal{P}$ with vertices at $(\sum_{i}^km_i,\floor{\sum_{i}^km_i\theta })$. It suffices to show \[ \floor{m_k\theta} \geq \floor{\sum_i^k m_i \theta} - \floor{\sum_i^{k-1} m_i \theta}. \] This follows from the fact that \[ \theta \geq \frac{\floor{\sum_i^k m_i \theta} - \floor{\sum_i^{k-1} m_i \theta}}{m_k} \] which is a consequence of the fact that $\mathcal{P}$ is maximally concave. We next recall the partition conditions for negative ends of $C$ asymptotic to the Reeb orbit with rotation angle $\theta$. Consider the line $y=\theta x$, and the minimal convex path above $y=\theta x$ that connects between $(0,0)$ and $(m,\ceil{m\theta}$ through lattice points. The horizontal displacements of the edges of of this path are labelled (in order) $m_i$, and form the partition conditions for ends of $C$. Using a very similar proof as before, we can show \[ \sum \ceil{m_i \theta} = \ceil {m\theta}. \] Then we can compute the Fredholm index of $C$ as \begin{align*} Ind(C) =& 2g-2 +(e_+ + h_+ + k_m^++k_n^+)+(e_- + h_- + k_m^-+k_n^-)\\ &+2(A_++B_+ +m_+ +n_+ -A_--B_--m_--n_-)\\ & -e_+ +e_- \\ & + (k_n^+ + k_m^+ +k_m^- + k_n^-) \\ & + \sum_{i=1}^{k_n^+} 2\floor{- n_+^i /f'(a)} + \sum_{i=1}^{k_m^+} 2\floor{-m_+^i f'(0)} - \sum_{i=1}^{k_n^-} 2\ceil{- n_-^i /f'(a)} - \sum_{i=1}^{k_m^-} 2\ceil{-m_-^i f'(0)}. \end{align*} \textbf{Step 2} To analyze the above equation further, we first note that \begin{equation} \langlebel{Eq:yaxiscomparison} A_+ + n_+ +\sum_{i=1}^{k_m^+}\floor{-m_+^i f'(0)} -A_--n_- - \sum_{i=1}^{k_m^-}\ceil{-m_-^i f'(0)} \geq 0 \end{equation} This is accomplished by considering the interior intersections of $C$ with $\gamma_2 \times \mathbb{R}$. All such intersection points are positive, by positivity of intersections. The count of interior intersections is given by (see \cite{mean_action_calabi}) \[ l_+(C,\gamma_2) - l_-(C,\gamma_2) \] where $l_+$ denotes the linking number of positive ends of $C$ with $\gamma_2$, and $l_-$ is the linking of negative ends of $C$ with $\gamma_2$. We note the linking numbers in a concave toric domain are calculated as follows (\cite{intoconcave}): \[ lk(\gamma_1,\gamma_2) =1,\quad lk (\gamma_1, o_v) = -v_2, \quad lk (\gamma_2,o_v) =v_1, \quad lk(o_v,o_w) = \min \{-v_1w_2,-v_2w_1\}. \] Here we use $o_v$ to denote nondegenerate orbits that come from perturbing a Morse-Bott torus at $(x,f(x))$, with $f'(x) =v_2/v_1$. From this we see that $lk_+ =A_+ + n_+ +\sum_{i=1}^{k_m^+}\floor{-m_+^i f'(0)} $, and $lk_-= A_-+n_- + \sum_{i=1}^{k_m^-}\ceil{-m_-^i f'(0)}$. The $A_\partialm$ terms come from ends of $C$ asymptotic to $o_v$, the $n_\partialm$ term comes from ends of $C$ asymptotic to $\gamma_1$, and the floor and ceiling terms come from ends of $C$ asymptotic to $\gamma_2$ and the fact that $C$ has ECH index one. From the partition conditions we see that $\sum_{i=1}^{k_m^+}\floor{-m_+^i f'(0)} = \floor{-m_+ f'(0)}$. Likewise we can show \[ B_+ +m_+ +\sum_{i=1}^{k_n^+} 2\floor{- n_+^i /f'(a)} - B_--m_- -\sum_{i=1}^{k_n^-} 2\floor{- n_-^i /f'(a)} \geq 0 \] Hence we conclude from the Fredholm index formula that if $C$ has ends at $\gamma_+$ or $\gamma_-$, then it must have genus 0. \textbf{Step 3} Next we consider the case where $C$ has no ends at $\gamma_+$ or $\gamma_-$. We assume $C$ has genus one. Then $A_+=A_-, B_+=B_-$ from Fredholm index considerations. Let $\Lambda_\partialm$ denote the polygonal paths associated to generators $\alpha_\partialm$. We first show $\Lambda_+$ lies outside $\Lambda_-$. By the above we already know they agree at end points. As a preamble, we consider the homology classes $F_x\cap C$. First for $x$ very close to zero, say equal to $\epsilon>0$, let $[F_\epsilon] = (p,q)$. Then we have $p+ f'(0)q\geq 0$. Similarly consider $[F_{1-\epsilon}] = (-p,-q)$. We have $-p -f'(a)q\geq 0$. Adding these inequalities to get $(f'(0)-f'(a))q\geq 0 $ from which we deduce $q\leq 0$. Then we have $-f'(a)q \geq p \geq -f'(0)q$, which implies $p=q=0$. Incidentally this implies a kind of maximal principle for holomorphic curves. Note $p+f'(x)q=0$ only if the curve is a branched cover of a trivial cylinder. This implies for our curves they are confined to have $x\in (0,1)$. Next we compute $[F_x]$ for any $x$ irrational and $\epsilon>0$ sufficiently small. We have \begin{align*} &[F_x] - [F_\epsilon] + \text{homology class of Reeb orbits in $[\epsilon,x]$ approached by positive ends of $C$} \\ &-\text{homology class of Reeb orbits in $[\epsilon,x]$ approached by negative ends of $C$} = 0. \end{align*} Next we consider the no crossing of polygonal paths. Suppose the no crossing result does not hold, since we know $\Lambda_\partialm$ have the same beginning and end points, there must exists two intersection points which we call $(a,b)$ and $(c,d)$, with $a<c$. Then on the interval $(a,c)$ the path $\Lambda_-$ is strictly above $\Lambda_+$ except at end points where they overlap. Form the line connecting $(a,c)$ and $(b,d)$, we can find $x_0\in (a,c)$ such that $f'(x_0) = \frac{d-b}{c-a}$. We compute $[F_{x_0-\epsilon}]$ and apply the local energy inequality to it. We use $x_0-\epsilon$ to avoid the case where $x_0$ is the $x$ coordinate of lattice points in $\Lambda_\partialm$, practically this will not make a difference. Let the lattice point $(p,q)$ have the following property: it is a vertex on $\Lambda_+$, the edge to the left of this lattice point has slope less than $f'(x_0)$, and the edge to the right of this vertex has slope greater than equal to $f'(x_0)$. Then the contribution to $[F_{x_0-\epsilon}]$ from $\Lambda_+$ is simply $(-(B-q),-p)$. We also consider the contribution of $F_{x_0-\epsilon}$ from $\Lambda_-$, which takes the form $(B-q',p')$. The lattice point $(p',q')$ on $\Lambda_-$ is chosen the same way as $(p,q)$. If no such vertex exists, then $\Lambda_-$ must overlap with the line segment connecting $(a,b)$ and $(c,d)$. Then the point $(p',q')$ is still the lattice point on $\Lambda_-$ which corresponds to the left most end point of where $\Lambda_-$ overlaps with the line connecting $(a,b)$ to $(c,d)$. In either case the local energy inequality says that \[ (q-q') + \frac{d-b}{c-a} (p'-p) \geq 0 \] We first assume $(p',q')$ is not on the line connecting $(a,b)$ and $(c,d)$, then this means that the point $(p,q)$ is further away from the line connecting $(a,b)$ to $(c,d)$ than $(p',q')$. Geometrically this is described by \[ (b-d)(p-p') +(c-a) (q-q') < 0. \] which is impossible. Now assume $(p',q')$ is on the line connecting $(a,b)$ to $(c,d)$, then since we have chosen $[F_{x_0-\epsilonsilon}]$, we must have $p'<p$. The energy inequality implies \[ \frac{q-q'}{p-p'} > \frac{d-b}{c-a} \] contradicting the geometric picture. \textbf{Step 4}. After we proved no-crossing in the previous step, we show there cannot be a genus one curve satisfying the assumptions of the previous step. The Fredholm index formula tells us that (recall we are assuming $g=1$) \[ 1 = h_+ + h_- + 2e_- \] which means $e_-=0$ and at most one of $h_+$ and $h_-$ is one. If $h_+=1$, and $h_-=0$, then $\alpha_- = \emptyset$. By inspection $C$ cannot have ECH index one. On the other hand, if $h_+=1$ and $h_-=1$, then $\Lambda_-$ consists of a single line segment. $\Lambda_+$ has the same end points as $\Lambda_-$ and is concave, hence must also agree with $\Lambda_-$ as polygonal paths. One checks easily that in this case the ECH index cannot be one. This concludes the proof that all ECH index one curves have genus zero. \end{proof} After we have proved all ECH index one curves have genus zero, we can then use the tree like compatification to describe the moduli space of cascades. However there is the complication that there are two nondegenerate orbits, $\gamma_+$ and $\gamma_-$. So in the tree like compactification, we allow the ends of $J$-holomorphic curves to land on nondegenerate orbits. Furthermore, connecting between two nontrivial curves, instead of a gradient trajectory, it could be that adjacent ends of $J$-holomorphic curves land on the same non-degenerate orbits and no gradient trajectories connect between them. See figure \ref{fig:tree like with nondeg}. \begin{figure} \caption{Cascade with tree like compactification for concave toric domains. The unconnected ends of holomorphic curves can land on either Morse-Bott tori or nondegenerate Reeb orbits. The green arrow denotes a finite gradient flow line connecting between two adjacent ends that land on Morse-Bott tori. The dashed line is used to indicate the adjacent ends land on non-degenerate Reeb orbits, and there is no need for gradient trajectories to connect between them.} \end{figure} Given such a cascade of ECH index one, we can cut it into subtrees along each matching pair of nondegenerate orbits, see figure \ref{cut tree like cascade}. \begin{figure} \caption{We cut along the red dashed lines to sub trees of cascades. For this figure each subtree is circled by dashed blue lines. The ECH index is additive along concatenation of such sub trees.} \end{figure} The ECH index is additive with respect to concatenation of sub-trees. So the ECH index one conditions implies there are no matching along nondegenerate orbits, and we can use the correspondence theorem \ref{theorem:cobordism for genus 0} as before. \section{Convex Toric Domains}\langlebel{section convex} In this section we show we can compute the ECH chain complex of convex toric domains via enumeration of $J$-holomorphic cascades. As there are many similarities with the case of concave toric domains, we will be brief in its treatment. Suppose $\Omega$ is a domain bounded by the horizontal segment from $(0,0)$ to $(a,0)$, the vertical segment from $(0,0)$ to $(0,b)$ and the graph of a concave function $f:[0,a] \rightarrow [0,b]$ so that $f(0)=b$ and $f(a)=0$. We further assume $f$ is smooth, $f'(0)$ and $f'(a)$ are irrational, $f'(x)$ is constant near $0$ and $a$, and $f''(x)<0$ whenever $f'(x)$ is rational, then we say $X_\Omega$ is a \textbf{convex toric domain}. As in the case of a concave toric domain, the boundary of $X_\Omega$, written as $\partialartial X_\Omega$, is a contact 3-manifold diffeomorphic to $S^3$. We now describe the Reeb orbits that appear in $\partialartial X_\Omega$. We also note their Conley Zehnder indices, having chosen the same trivializations as in \cite{beyondech} \begin{enumerate} \item $\gamma_1 = \{ (z_1,0) \in \partialartial X_\Omega \}$. The orbit $\gamma_1$ is elliptic with rotation angle $-1/f'(a)$, hence $CZ(\gamma_1^k) = 2\floor{-k/f'(a)}+1$ \item $\gamma_2 = \{ (0,z_2) \in \partialartial X_\Omega \}$. The orbit $\gamma_2$ has rotation angle $-f'(0)$, hence $CZ(\gamma_2^k)=2\floor{-kf'(0)}+1$. \item Let $x\in (0,a)$ be such that $f'(x)$ is rational. Then the torus described by $\{(z_1,z_2) | \mu(z_1,z_2) = (x,f(x))\}$ is a (positive) Morse-Bott torus. Each Reeb orbit has Robbin-Salamon index $+1/2$. \end{enumerate} \begin{definition} A combinatorial generator is a quadruple $\tilde{\Lambda} = (\Lambda,\rho,m,n)$ where \begin{enumerate} \item $\Lambda$ is a convex integral path from $(0,B)$ to $(A,0)$ such that the slope of each edge is in the interval $[f'(0),f'(a)]$. \item $\rho$ is a labeling of each edge of $\Lambda$ by $e$ or $h$. \item $m$ and $n$ are nonnegative integers. \end{enumerate} \end{definition} Let $\Lambda_{m,n}$ denote the concatenation of the following sequence of paths: \begin{enumerate} \item The highest polygonal path with vertices at lattice points from $(0,B+n+\floor{-mf'(0)})$ to $(m,B+n)$ which is below the line through $(m,B+n)$ with slope $f'(0)$. \item The image of $\Lambda$ under the translation $(x,y)\mapsto (x+m,y+n)$. \item The highest polygonal path with vertices at lattice points from $(A+m,n)$ to $(A+m+\floor{-n/f'(a)},0)$ which is below the line through $(A+m,n)$ with slope $f'(a)$. \end{enumerate} Let $\mathcal{L}(\Lambda_{m,n})$ denote the number of lattice points bounded by the axes and $\Lambda_{m,n}$, including the lattice points on the edges of $\Lambda_{m,n}$. We then define \[ I^{comb}(\Lambda_{m,n}) =2( \mathcal{L}(\Lambda_{m,n})-1) - h(\Lambda) \] And the Chern class of $\Lambda_{m,n}$ is given by \[ c_\tau(\Lambda_{m,n}) = A+B+m+n. \] \begin{theorem} The ECH index of a holomorphic curve between two ECH generators is the difference of the $I^{comb}$ we associate to their corresponding combinatorial ECH generators. \end{theorem} \begin{proof} The proof is a generalization of the computation in \cite{beyondech,intoconcave}. We briefly summarize this below. Let $\alpha$ denote a ECH orbit set. We consider $I(\alpha,\emptyset,Z)$ where $Z$ is the unique relative homology class that is represented by discs with boundary $\alpha$. Let $m,n$ denote the multiplicity of $\gamma_2,\gamma_1$ respectively in $\alpha$, and let $\Lambda$ be the resulting convex integral path defined by associating Reeb orbit sets to integral paths as in \cite{beyondech}. Then it suffices to show $I(\alpha,\emptyset,Z) = I^{comb}(\Lambda_{m,n})$. The computation is the same as the one in \cite{beyondech}, except the Conley-Zehnder index terms arising from $\gamma_1$ and $\gamma_2$ may not just be $1$ due to the fact their rotation angles $\theta$ need not be very close to zero. This is accounted for by the polygonal paths we append to image of $\Lambda$ under the translation $(x,y)\mapsto (x+m,y+n)$. \end{proof} \begin{theorem} A nontrival $J_\delta$-holomorphic curve in a convex toric domain of ECH index one has genus zero. Here we use $J_\delta$ to mean we have perturbed away all Morse-Bott degeneracies. \end{theorem} \begin{proof} We borrow the notation of the previous section, except here $e_+$ denotes the total multiplicity of elliptic Reeb orbits in $\alpha_+$ arising from Morse-Bott tori and $e_-$ denotes the total number of distinct elliptic Reeb orbits in $\alpha_-$ arising from perturbations of Morse-Bott tori. The Fredholm index of a connected $J$-holomorphic curve $C$ between two orbit sets $\alpha_+$ and $\alpha_-$ is given by \begin{align*} Ind(C) =& 2g-2 +(e_+ + h_+ + k_m^++k_n^+)+(e_- + h_- + k_m^-+k_n^-)\\ &+2(A_++B_+ +m_+ +n_+ -A_--B_--m_--n_-)\\ & +e_+ -e_- \\ & + (k_n^+ + k_m^+ +k_m^- + k_n^-) \\ & + \sum_{i=1}^{k_n^+} 2\floor{- n_+^i /f'(a)} + \sum_{i=1}^{k_m^+} 2\floor{-m_+^i f'(0)} - \sum_{i=1}^{k_n^-} 2\ceil{- n_-^i /f'(a)} - \sum_{i=1}^{k_m^-} 2\ceil{-m_-^i f'(0)}. \end{align*} The same linking number relations as in \ref{thm:concave genus zero} holds in the case of convex toric domains; so similarly by considering the intersections of $C$ with the trivial cylinders at $\gamma_1$ and $\gamma_2$, we conclude \begin{equation*} A_+ + n_+ +\sum_{i=1}^{k_m^+}\floor{-m_+^i f'(0)} -A_--n_- - \sum_{i=1}^{k_m^-}\ceil{-m_-^i f'(0)} \geq 0 \end{equation*} and \[ B_+ +m_+ +\sum_{i=1}^{k_n^+} 2\floor{- n_+^i /f'(a)} - B_--m_- -\sum_{i=1}^{k_n^-} 2\floor{- n_-^i /f'(a)} \geq 0. \] Hence for $C$ to have genus nonzero it must not have any ends at $\gamma_1$ and $\gamma_2$. The local energy inequality holds as before, to prove the no-crossing lemma, we can associate two polygonal paths $\Lambda_+$ and $\Lambda_-$ to ECH generators $\alpha_+$ and $\alpha_-$ respectively. As before from index considerations the $x$ and $y$ intercepts of $\Lambda_+$ and $\Lambda_-$ agree. Hence as before we can choose points $(a,b)$ and $(c,d)$ where $\Lambda_+$ and $\Lambda_-$ intersect, and between these two points $\Lambda_-$ is strictly above $\Lambda_+$. As before we may choose $x_0 \in (a,c)$ so that $f'(x_0) = \frac{d-b}{c-a}$. Let the lattice point $(p',q')$ have the following property: it is a vertex on $\Lambda_-$, the edge to the left of this lattice point has slope greater than or equal to $f'(x_0)$, and the edge to the right of this vertex has slope less than $f'(x_0)$. Let $(p,q)$ denote a vertex of $\Lambda_+$ with the same property. We assume such a vertex $(p,q)$ exists and leave the case where such a vertex does not exist to later. Then consider $[F_{x_0+\epsilon}] = (q-q',p'-p)$. Now again the energy inequality says \[ (q-q') + \frac{d-b}{c-a} (p'-p) \geq 0 \] In this case, the point $(p,q)$ is closer to the line connecting $(a,b)$ and $(c,d)$ than $(p',q')$, but this time on the other side of the line. This means that \[ (p-p')(b-d) + (c-a) (q-q') < 0 \] Comparing with the energy inequality we see a contradiction. Now if $(p,q)$ is in fact on the line connecting $(a,b)$ and $(c,d)$, then since we are computing $[F_{x_0+\epsilon}]$, we must have $p>p'$, from which we have \[ \frac{d-b}{c-a} > \frac{q-q'}{p-p'} \] which is a contradiction. With the no-crossing result at hand, we turn to the index formula. If $C$ had genus one, then \[ 1 = 2e_+ + h_++h_-. \] As before we break this into cases. We must have $e_+=0$. If $h_+=1$ then $\Lambda_+ $ consists of a single edge, by no-crossing $\Lambda_-$ is either an identical edge or empty. We check either case cannot produce an ECH index 1 curve. $h_+$ cannot equal zero because then $\Lambda_+ =\emptyset$. \end{proof} Hence we concluded all ECH index one curves are index zero, a similar description of tree-like cascades shows we can use them to compute the ECH chain complex. \appendix \section{Appendix: Transversality Issues} In this Appendix we describe some the transversality difficulties in the moduli space of cascades, even if all the appearing curves are somewhere injective. Note we are not claiming transversality is impossible, we are simply saying there are issues with the standard universal moduli space approach of transversality. We give some simple examples below to illustrate this. Consider the universal moduli space of somewhere injective cascades, written as \[ \mathcal{B} : = \{ (\cas{u},J) | \, \cas{u} \, \textup{is a} \, J \textup{-holomorphic cascade, and that all curves appearing in $\cas{u}$ are simple} \}. \] We explain why the standard proof that $\mathcal{B}$ is a Banach manifold does not necessarily work. Given a cascade $\cas{u} \in \mathcal{B}$, there are two evaluation maps $EV^+$ and $EV^-$ that map into a product of $S^1$, as in Definition \ref{def:transversality conditions}. The usual procedure to show that $\mathcal{B}$ is a Banach manifold is to show the maps $EV^\partialm$ are transverse to each other. However in complicated enough cascades, the same curve can appear in multiple different levels. An illustration is given in the figure below. Here we have a cascade of 5 levels. The red curve is a map $u: \Sigma \rightarrow \mathbb{R} \times Y^3$, and the blue curve is a map $v: \Sigma' \rightarrow \mathbb{R} \times Y^3 $. Green horizontal arrows denote the upwards gradient flow, and the black horizontal lines denote Morse-Bott tori. Diamonds denote the critical points of $f$ on the Morse-Bott tori. For instance, one of the positive ends of the black curve ends on a critical point of $f$, and there is a chain of fixed trivial cylinders atop this end. \begin{figure} \caption{Cascade with 5 levels} \end{figure} This is an illustration of how the same curves can happen in the same cascade. To illustrate the transversality issue, we assume that the configuration consisting the red and blue curves (which we labelled $u$ and $v$) in figure \ref{repeat_red_and_blue} happens $n$ times in a cascade $\cas{u}$. \begin{figure} \caption{A repetitive pattern that can appear multiple times in a cascade.} \end{figure} We assume both $u$ and $v$ are rigid (we are allowed since we are working in the universal moduli space, in general more complicated things can still happen but the principle is the same). We label the $n$ identical copies of $u$ and $v$ as $u_i,v_i$ with $ i=1,...,n$. The two negative ends of $u_i$ and the two positive ends of $v_i$ are labelled by $1,2$, as shown in the figure. The remaining end of $u_i$ and $v_i$ is labelled $3$. We denote their evaluation maps by $ev(u_i,k)$ and $ev(v_i,k)$ where $k=1,2,3$. As a necessary condition for the $EV^+$ and $EV^-$ to be transverse, we must have \begin{equation} \langlebel{equation:transverse} \bigoplus (dev(u_i,1)+dev(v_i,1)+t_i, dev(u_i,2)+dev(v_i,1)+t_i): T\mathcal{W}_u \oplus T\mathcal{W}_v \bigoplus_{i=1,..,n} \mathbb{R} \longrightarrow \bigoplus_{i=1,...,n} (TS^1\oplus TS^1) \end{equation} is surjective. Note $(t_1,...,t_n)\in \bigoplus_{i=1,..,n} \mathbb{R}$. The vector space $T\mathcal{W}_u$ has the following description. Recall a neighborhood of (not necessarily $J$ holomorphic) curves near $u$ can be represented by $W^{2,p,d}(u^*TM) \oplus T\mathcal{J} \oplus V_1 \oplus V_2 \oplus V_3$. Here $W^{2,p,d}(u^*TM)$ is the Sobolev space of vector fields on $u$ with exponential weight $e^{d|s|}$ near the cylindrical ends. $T\mathcal{J}$ is a finite dimensional Teichmuller slice, and the vector spaces $V_i$ consist of asymptotically constant vectors near each of the cylindrical ends, which we labelled $1,2,3$ (see \cite{Yaocas,wendlauto}). Recalling the coordinate choices of Section \ref{degenerations} near Morse-Bott tori, the $V_i$ is spanned by vector fields of the form \[ \beta \partialartial_z, \quad \beta \partialartial_a, \quad \beta \partialartial_x. \] $\beta$ here is a cutoff function that is one near a cylindrical neighborhood of a puncture and zero elsewhere. We denote a triple of these vector fields in $V_i$ as $(r,a,p)_i$. Then the vector space $\mathcal{W}_u$ is given by \[ \{(\xi,(r,a,p)_i,Y) \in W^{2,p,d}(u^*TM) \oplus T\mathcal{J} \oplus V_1 \oplus V_2 \oplus V_3 \oplus T \mathcal{I} |D\bar{\partialartial}_J (\xi + \sum_i(r,a,p)_i) +Y\circ Tu \circ j =0\} \] $D\bar{\partialartial}_J$ is the linearization of Cauchy Riemann operator along $u$ that includes deformation of the domain complex structure of $u$. Here $T\mathcal{I}$ denotes the Sobolev space that is the tangent space of all $\langlembda$ compatible almost complex structures (we should choose a Sobolev space for this but that is unimportant for now). A similar expression holds for $T\mathcal{W}_v$. We note the same $Y\in T\mathcal{I}$ appears in the definition of $T\mathcal{W}_v$ as well. Now since $u$ is rigid for given $Y$ there exists a unique tuple $(\xi,(r,a,p)_i)$ (up to translation in the symplectization direction) so that $(\xi,(r,a,p)_i,Y)\in T\mathcal{W}_u$. A similar statement holds for $\mathcal{W}_v$. Conversely, given two tuples $(p_1(u),p_2(u),p_3(u))$ and $(p_1(v),p_2(v),p_3(v))$ (we use brackets to denote whether the vector field is living over $u$ or $v$, we can find $Y \in T\mathcal{I}$ and $(\xi(u),(r(u),a(u))_i)$ and $(\xi(v),(r(v),a(v))_i)$ so that the tuples $(\xi(u),(r(u),a(u),p(u))_i,Y )\in T\mathcal{W}_u$, and similarly for $T\mathcal{W}_v$. Hence we can think of the map described in Equation \ref{equation:transverse} as the following. Its imagine is spanned by vector fields of the form \[ \bigoplus_i (x_1+y_1+t_i,x_2+y_2+t_i) \] where $(x_1,y_1)$ and $(x_2,y_2)$ are arbitrary real numbers. We think of $x_1$ as $p_1(u)$, $x_2$ corresponding to $p_2(u)$, and likewise for $y$ and $p(v)$. For given $n$ the domain has $2+n$ independent variables, but the target is $2n$ dimensional. Hence for large values of $n$ this space cannot be transverse. \begin{proof}[Proof of Theorem \ref{thm:list of transversality conditions}] We note if the above situation does not happen, then the usual proof that $\mathcal{B}$ is a Banach manifold follows through. To be precise, if we let $\tilde{B}$ denote the universal moduli space so that \begin{equation} \tilde{B} : = \left\lbrace (\cas{u},J)\;\middle|\; \begin{tabular}{@{}l@{}} $\cas{u}$ \, \textup{is a reduced} $J$ \textup{-holomorphic cascade as in Definition \ref{def:transversality conditions}};\\ \textup{in addition, either all nontrivial curves}\\ \textup{ are distinct, or the cascade has less than or equal to 3 levels} \end{tabular} \right\rbrace \end{equation} Then $\tilde{B}$ is a Banach manifold, and for generic $J$, cascades satisfying the extra hypothesis of $\tilde{B}$ are transversely cut out living in moduli spaces given by the virtual dimension. In particular if we take as assumption after we perturb away the Morse-Bott degeneracy, all ECH index one curves degenerate (as reduced cascades) to reduced cascades of the form specified in $\tilde{B}$, then we can choose a $J$ so that the conditions \ref{def:transversality conditions} are satisfied for these cascades. A straightforward modification of the proofs in Sections \ref{Finite}, \ref{section:computing using cascades} shows the Morse-Bott chain complex $(C_*^{MB},\partial_{MB})$ when we further restrict the differential to only consider cascades whose reduced versions can appear in $\tilde{B}$ is well defined and computes $ECH(Y,\xi)$. The only different part is showing the cascades counted by $\partial_{MB}$ is finite. Consider the following. Suppose $\cas{u}_n$ is a sequence of cascades of the form allowed in $\tilde{B}$ and $\cas{u}_n \rightarrow \cas{u}$. Then for each $\cas{u}_n$ there is a sequence of $J_{\delta_n^m}$-holomorphic curves $v_n^m$ of ECH index one that converges to $\cas{u_n}$ as $m\rightarrow \infty$. We pass to a diagonal subsequence, which we denote by $v_n$, of ECH index one $J_{\delta_n}$-holomorphic curves that degenerate into $u$. By assumption, then the reduced version of $\cas{u}$ must be of the form allowed in $\tilde{B}$, and this concludes the proof of finiteness. \end{proof} \partialrintbibliography \end{document}
\begin{document} \begin{titlepage} \title{Concentration Bounds for High Sensitivity Functions Through Differential Privacy\thanks{Research by K.N.\ and U.S.\ is supported by NSF grant No.\ 1565387.}} \author{ Kobbi Nissim\thanks{Dept.\ of Computer Science, Georgetown University {\em and} Center for Research on Computation and Society (CRCS), Harvard University. {\tt [email protected]}.} \and Uri Stemmer\thanks{Center for Research on Computation and Society (CRCS), Harvard University. {\tt [email protected]}.} } \date{\today} \maketitle \setcounter{page}{0} \thispagestyle{empty} \begin{abstract} A new line of work~\cite{DworkFHPRR15,HU14,SU15,BassilyNSSSU16} demonstrates how differential privacy~\cite{DMNS06} can be used as a mathematical tool for guaranteeing generalization in adaptive data analysis. Specifically, if a differentially private analysis is applied on a sample $S$ of i.i.d.\ examples to select a low-sensitivity function $f$, then w.h.p.\ $f(S)$ is close to its expectation, although $f$ is being chosen based on the data. Very recently, Steinke and Ullman~\cite{SU17} observed that these generalization guarantees can be used for proving concentration bounds in the non-adaptive setting, where the low-sensitivity function is fixed beforehand. In particular, they obtain alternative proofs for classical concentration bounds for low-sensitivity functions, such as the Chernoff bound and McDiarmid's Inequality. In this work, we set out to examine the situation for functions with {\em high}-sensitivity, for which differential privacy does not imply generalization guarantees under adaptive analysis. We show that differential privacy can be used to prove concentration bounds for such functions in the non-adaptive setting. \end{abstract} \paragraph{Keywords:} Differential privacy, concentration bounds, high sensitivity functions \end{titlepage} \section{Introduction} A new line of work~\cite{DworkFHPRR15,HU14,SU15,BassilyNSSSU16} demonstrates how differential privacy~\cite{DMNS06} can be used as a mathematical tool for guaranteeing statistical validity in data analysis. Specifically, if a differentially private analysis is applied on a sample $S$ of i.i.d.\ examples to select a low-sensitivity function $f$, then w.h.p.\ $f(S)$ is close to its expectation, even when $f$ is being chosen based on the data. Dwork et al.~\cite{DworkFHPRR15} showed how to utilize this connection for the task of answering {\em adaptively chosen} queries w.r.t.\ an unknown distribution using i.i.d.\ samples from it. To make the setting concrete, consider a data analyst interested in learning properties of an unknown distribution $\mathcal D$. The analyst interacts with the distribution $\mathcal D$ via a {\em data curator} $\mathcal A$ holding a database $S$ containing $n$ i.i.d.\ samples from $\mathcal D$. The interaction is adaptive, where at every round the analyst specifies a query $q:X^n\rightarrow\mathbb{R}$ and receives an answer $a_q(S)$ that (hopefully) approximates $q(\mathcal D^n) \triangleq \operatorname*{\mathbb{E}}_{S'\sim\mathcal D^n}[q(S')]$. As the analyst chooses its queries based on previous interactions with the data, we run the risk of overfitting if $\mathcal A$ simply answers every query with its empirical value on the sample $S$. However, if $\mathcal A$ is a differentially private algorithm then the interaction would not lead to overfitting: \begin{theorem}[\cite{DworkFHPRR15,BassilyNSSSU16}, informal]\label{thm:BNSSSU} A function $f:X^n\rightarrow\mathbb{R}$ has sensitivity $\lambda$ if $|f(S)-f(S')| \leq \lambda$ for every pair $S, S' \in X^n$ differing in only one entry. Define $f(\mathcal D^n) \triangleq \ex{S'\sim\mathcal D^n}{f(S')}$. Let $\mathcal A : X^{n} \rightarrow fsetDelta$ be $(\varepsilon,\delta)$-differentially private where $fsetDelta$ is the class of $\lambda$-sensitive functions, and $n\geq\frac{1}{\varepsilon^2}\log(\frac{4\varepsilon}{\delta})$. Then for every distribution $\mathcal D$ on $X$, $$ \Pr_{\substack{xle\sim\mathcal D^n\\f\leftarrow\mathcal A(S)}}\left[ \left| f(xle) - f(\mathcal D^n) \right| \geq 18\varepsilon\lambda n \right] < \frac{\delta}{\varepsilon}. $$ \end{theorem} In words, if $\mathcal A$ is a differentially private algorithm operating on a database containing $n$ i.i.d.\ samples from the distribution $\mathcal D$, then $\mathcal A$ cannot (with significant probability) identify a low-sensitivity function that behaves differently on the sample $S$ and on $\mathcal D^n$. Very recently, Steinke and Ullman~\cite{SU17} observed that Theorem~\ref{thm:BNSSSU} gives alternative proofs for classical concentration bounds for low-sensitivity functions, such as the Chernoff bound and McDiarmid's Inequality: Fix a function $f:X^n\rightarrow\mathbb{R}$ with sensitivity $\lambda$ and consider the trivial mechanism $\mathcal A_f$ that ignores its input and always outputs $f$. Such a mechanism is $(\varepsilon,\delta)$-differentially private for any choice of $\varepsilon,\delta\geq 0$ and hence Theorem~\ref{thm:BNSSSU} yields (up to constants) McDiarmid's Inequality: \begin{equation} \Pr_{xle\sim\mathcal D^n}\left[ \left| f(S) - f(\mathcal D^n) \right| \geq 18\varepsilon\lambda n \right] < \frac{\delta}{\varepsilon}=2^{-\Omega(\varepsilon^2\cdot n)},\label{eq:SU17} \end{equation} where the last equality follows by setting $n=\frac{1}{\varepsilon^2}\log(\frac{4\varepsilon}{\delta})$. In light of this result it is natural to ask if similar techniques yield concentration bounds for more general families of queries, and in particular queries that are not low-sensitivity functions. In this work we derive conditions under which this is the case. \subsection{Differential Privacy, Max-Information, and Typical Stability} Let $\mathcal D$ be a fixed distribution over a domain $X$, and consider a family of functions mapping databases in $X^n$ to the reals, such that for every function $f$ in the family we have that $|f(S)-f(\mathcal D^n)|$ is small w.h.p.\ over $S\sim\mathcal D^n$. Specifically, $$\mathcal F_{\alpha,\beta}(\mathcal D) = \left\{ \;\; f:X^n\rightarrow\mathbb{R} \;\;\; : \;\;\; \Pr_{S\sim\mathcal D^n}[|f(S)-f(\mathcal D^n)|>\alpha]\leq\beta \;\; \right\}.$$ That is, for every function $f\in \mathcal F_{\alpha,\beta}(\mathcal D)$ we have that its empirical value over a sample $S\sim\mathcal D^n$ is $\alpha$-close to its expected value w.p.\ $1-\beta$. Now consider a differentially private algorithm $\mathcal A:X^n\rightarrow \mathcal F_{\alpha,\beta}(\mathcal D)$ that takes a database and returns a function from $\mathcal F_{\alpha,\beta}(\mathcal D)$. What can we say about the difference $|f(S)-f(\mathcal D^n)|$ when $f$ is chosen by $\mathcal A(S)$ based on the sample $S$ itself? Using the notion of {\em max-information}, Dwork et al.~\cite{DworkFHPRR-nips-2015} showed that if $\beta$ is small enough, then w.h.p.\ the difference remains small. Informally, they showed that if $\mathcal A$ is differentially private, then $$ \Pr_{\substack{S\sim\mathcal D^n \\ f\leftarrow\mathcal A(S)}}[|f(S)-f(\mathcal D^n)|>\alpha]\leq\beta\cdot e^{\varepsilon^2\cdot n}. $$ So, if $\mathcal A$ is a differentially private algorithm that ranges over functions which are very concentrated around their expected value (i.e., $\beta<e^{-\varepsilon^2 n}$), then $|f(S)-f(\mathcal D^n)|$ remains small (w.h.p.)\ even when $f$ is chosen by $\mathcal A(S)$ based on the sample $S$. When $\beta>e^{-\varepsilon^2 n}$ it is easy to construct examples where a differentially private algorithm identifies a function $f\in\mathcal F_{\alpha,\beta}(\mathcal D)$ such that $|f(S)-f(\mathcal D^n)|$ is arbitrarily large with high probability. So, in general, differential privacy {\em does not} guarantee generalization for adaptively chosen functions of this sort. However, a stronger notion than differential privacy -- typical stability -- presented by Bassily and Freund~\cite{BassilyF16} does guarantee generalization in this setting. Informally, they showed that if a typically stable algorithm $\mathcal B$ outputs a function $f\in\mathcal F_{\alpha,\beta}(\mathcal D)$, then $|f(S)-f(\mathcal D^n)|$ remains small.\footnote{A similar notion -- perfect generalization -- was presented in ~\cite{CummingsLNRW16}.} The results of this article provide another piece of this puzzle, as we show that (a variant of) differential privacy can in some cases be used to prove that a function $f$ is in $\mathcal F_{\alpha,\beta}(\mathcal D)$. \subsection{Our Results} \paragraph{Notation.} Throughout this article we use the convention that $f(\mathcal D^n)$ is the expected value of the function $f$ over a sample containing $n$ i.i.d.\ elements drawn according to the distribution $\mathcal D$. That is, $f(\mathcal D^n)\triangleq\ex{S\sim\mathcal D^n}{f(S)}$. Fix a function $f:X^n\rightarrow\mathbb{R}$, let $\mathcal D$ be a distribution over $X$, and let $S\sim\mathcal D^n$. Our goal is to bound the probability that $|f(S)-f(\mathcal D^n)|$ is large by some (hopefully) easy-to-analyze quantity. To intuit our result, consider for example what we get by a simple application of Markov's Inequality: \begin{equation} \Pr_{S\sim\mathcal D^n}[|f(S)-f(\mathcal D^n)|>\lambda]\leq \frac{1}{\lambda}\cdot \ex{S\sim\mathcal D^n}{ \mathbbm{1}_{|f(S)-f(\mathcal D^n)|>\lambda} \cdot |f(S)-f(\mathcal D^n)| }. \label{eq:markov} \end{equation} We show that using differential privacy we can replace the term $|f(S)-f(\mathcal D^n)|$ in the expectation with $|f(S\cup\{x\})-f(S\cup\{y\})|$, which can sometimes be easier to analyze. Specifically, we show the following. \begin{theorem}[part 1] \label{thm:dpGeneralizationIntro} Let $\mathcal D$ be a distribution over a domain $X$, let $f:X^n\rightarrow\mathbb{R}$ , and let $\Delta,\lambda\in\mathbb{R}^{\geq0}$ be s.t.\ for every $1\leq i\leq n$ it holds that \begin{eqnarray} \ex{\substack{S\sim\mathcal D^{n}\\z\sim\mathcal D}}{\mathbbm{1}_{\left|f(S) - f\left(S^{(i\leftarrow z)}\right)\right|>\lambda} \cdot\left|f(S) - f\left(S^{(i\leftarrow z)}\right)\right| }\leq\Delta, \label{eq:dpGeneralizationIntro_part1} \end{eqnarray} where $S^{(i\leftarrow z)}$ is the same as $S$ except that the $i^{\text{th}}$ element is replaced with $z$. Then for every $\varepsilon>0$ we have that $$ \Pr_{S\sim\mathcal D^n}\left[ | f(S) - f(\mathcal D^n) | \geq 18\varepsilon\lambda n \right] < \frac{14\Delta}{\varepsilon\lambda}, $$ provided that $n\geq O\left(\frac{1}{\varepsilon\cdot\min\{1,\varepsilon\}}\log(\frac{ \lambda \cdot \min\{1,\varepsilon\} }{\Delta})\right)$. \end{theorem} Observe that for a $\lambda$-sensitive function $f$, we have that the expectation in Equation~(\ref{eq:dpGeneralizationIntro_part1}) is zero, so the statement holds for every choice of $\beta>0$ and $n\geq O\left(\frac{1}{\varepsilon^2}\log(\frac{1}{\beta})\right)$, resulting in McDiarmid's Inequality (Equation~(\ref{eq:SU17})). Intuitively, Theorem~\ref{thm:dpGeneralizationIntro} states that in order to obtain a high probability bound on $| f(S) - f(\mathcal D^n) |$ is suffices to analyze the ``expectation of the tail'' of $\left|f(S) - f\left(S^{(i\leftarrow z)}\right)\right|$, as a function of the starting point $\lambda$. We also show that the above bound can be improved whenever the ``expectation of the head'' of $\left|f(S) - f\left(S^{(i\leftarrow z)}\right)\right|$ is smaller than $\lambda$. Specifically, { \renewcommand{\ref{claim:dpExpectation}}{\ref{thm:dpGeneralizationIntro}} \begin{theorem}[part 2] If, in addition to~(\ref{eq:dpGeneralizationIntro_part1}), $\exists \tau\leq\lambda$ s.t.\ for every $S\in X^n$ and every $1\leq i\leq n$ we have \begin{eqnarray} \ex{\substack{y,z\sim\mathcal D}}{\mathbbm{1}_{\left|f(S^{(i\leftarrow y)}) - f\left(S^{(i\leftarrow z)}\right)\right|\leq\lambda} \cdot\left|f(S^{(i\leftarrow y)}) - f\left(S^{(i\leftarrow z)}\right)\right| }\leq\tau, \label{eq:dpGeneralizationIntro_part2} \end{eqnarray} Then for every $\varepsilon>0$ we have that $$ \Pr_{S\sim\mathcal D^n}\left[ | f(S) - f(\mathcal D^n) | \geq 18\varepsilon\tau n \right] < \frac{14\Delta}{\varepsilon\tau}, $$ provided that $n\geq O\left(\frac{\lambda}{\varepsilon\cdot\min\{1,\varepsilon\} \tau}\log(\frac{\tau \cdot \min\{1,\varepsilon\} }{\Delta})\right)$ \end{theorem} \addtocounter{theorem}{-1} } Observe that while the expectation in~(\ref{eq:dpGeneralizationIntro_part1}) is over the entire sample $S$ (as well as the replacement point), in requirement~(\ref{eq:dpGeneralizationIntro_part2}) the sample $S$ is fixed. We do not know if this ``worst-case'' restriction is necessary. In Section~\ref{sec:applications} we demonstrate how Theorem~\ref{thm:dpGeneralizationIntro} can be used in proving a variety of concentration bounds, such as a high probability bound on $|f(S)-f(\mathcal D^n)|$ for Lipschitz functions. In addition we show that Theorem~\ref{thm:dpGeneralizationIntro} can be used to bound the probability that the number of triangles in a random graph significantly exceeds the expectation. \section{Preliminaries} \subsection{Differential Privacy} Our results rely on a number of basic facts about differential privacy. An algorithm operating on databases is said to preserve differential privacy if a change of a single record of the database does not significantly change the output distribution of the algorithm. Formally: \begin{definition} Databases $S\in X^n$ and $S'\in X^n$ over a domain $X$ are called \emph{neighboring} if they differ in exactly one entry. \end{definition} \begin{definition}[Differential Privacy~\cite{DMNS06,DKMMN06}] A randomized algorithm $\mathcal A : X^n\rightarrow Y$ is {\em $(\varepsilonilon,\delta)$-differentially private} if for all neighboring databases $S,S'\in X^n$, and for every set of outputs $T\subseteq Y$, we have $$\Pr[\mathcal A(S)\in T]\leq e^{\varepsilon}\cdot \Pr[\mathcal A(S')\in T]+\delta.$$ The probability is taken over the random coins of $\mathcal A$. \end{definition} \subsection{The Exponential Mechanism} We next describe the exponential mechanism of McSherry and Talwar~\cite{McSherryT07}. \begin{definition}[Sensitivity] The \emph{sensitivity} (or {\em global sensitivity}) of a function $f:X^n \rightarrow \mathbb{R}$ is the smallest $\lambda$ such that for every neighboring $S,S'\in X^n$, we have $|f(S)-f(S')|\leq \lambda$. We use the term ``$\lambda$-sensitive function'' to mean a function of sensitivity $\le \lambda$. \end{definition} Let $X$ be a domain and $H$ a set of solutions. Given a database $S\in X^*$, the exponential mechanism privately chooses a ``good'' solution $h$ out of the possible set of solutions $H$. This ``goodness'' is quantified using a \emph{quality function} that matches solutions to scores. \begin{definition}[Quality function] A \emph{quality function} is a function $q:X^*\times H \rightarrow\mathbb{R}$ that maps a database $S\in X^*$ and a solution $h\in H$ to a real number, identified as the score of the solution $h$ w.r.t.\ the database $S$. \end{definition} Given a quality function $q$ and a database $S$, the goal is to chooses a solution $h$ approximately maximizing $q(S,h)$. The exponential mechanism chooses a solution probabilistically, where the probability mass that is assigned to each solution $h$ increases exponentially with its quality $q(S,h)$: \begin{center} \noindent\fbox{ \parbox{.97\columnwidth}{ The Exponential Mechanism\\ {\bf Input:} privacy parameter $\varepsilon>0$, finite solution set $H$, database $S\in X^n$, and a $\lambda$-sensitive quality function $q$. \begin{enumerate} \item Randomly choose $h \in H$ with probability $\frac{\exp\left(\frac{\varepsilon}{2\lambda} \cdot q(S,h) \right)}{\sum_{h'\in H}\exp\left(\frac{\varepsilon}{2\lambda} \cdot q(S,h') \right)}.$ \item Output $h$. \end{enumerate} }} \end{center} \begin{theorem}[Properties of the exponential mechanism]\label{prop:expMech} (i) The exponential mechanism is $(\varepsilon,0)$-differentially private. (ii) Let $Opt(S)\triangleq\max_{f\in H}\{q(S,f)\}$ and $\Delta>0$. The exponential mechanism outputs a solution $h$ such that $q(S,h)\leq(Opt(S) - \Delta)$ with probability at most $|H| \cdot \exp\left(-\frac{\varepsilon \Delta}{ 2 \lambda}\right)$. \end{theorem} \subsection{Concentration Bounds} Let $X_1,\dots,X_n$ be independent random variables where $\Pr[X_i=1]=p$ and $\Pr[X_i=0]=1-p$ for some $0<p<1$. Clearly, $\operatorname*{\mathbb{E}}[\sum_{i=1}^n{X_i}]=pn$. Chernoff and Hoeffding bounds show that the sum is concentrated around this expected value: \begin{align*} &\Pr\left[\sum_{i=1}^n{X_i}>(1+\delta)pn\right]\leq \exp\left(-pn\delta^2/3\right) \;\;\text{ for } 0<\delta\leq 1,\\ &\Pr\left[\sum_{i=1}^n{X_i}<(1-\delta)pn\right]\leq \exp\left(-pn\delta^2/2\right) \;\;\text{ for } 0<\delta<1,\\ &\Pr\left[\left|\sum_{i=1}^n{X_i}-pn\right|>\delta\right]\leq 2\exp\left(-2\delta^2/n\right) \;\,\;\;\text{ for } \delta\geq0. \end{align*} The first two inequalities are known as the multiplicative Chernoff bounds~\cite{chern}, and the last inequality is known as the Hoeffding bound~\cite{hoeff}. The next theorem states that the Chernoff bound above is tight up to constant factors in the exponent. \begin{theorem}[Tightness of Chernoff bound~\cite{KleinY15}]\label{thm:chernoffTight} Let $0<p,\delta\leq\frac{1}{2}$, and let $n\geq\frac{3}{\delta^2 p}$. Let $X_1,\dots,X_n$ be independent random variables where $\Pr[X_i=1]=p$ and $\Pr[X_i=0]=1-p$. Then, \begin{align*} &\Pr\left[\sum_{i=1}^n{X_i}\leq(1-\delta)pn\right]\geq\exp(-9\delta^2pn),\\ &\Pr\left[\sum_{i=1}^n{X_i}\geq(1+\delta)pn\right]\geq\exp(-9\delta^2pn). \end{align*} \end{theorem} \section{Concentration Bounds via Differential Privacy} In this section we show how the concept of differential privacy can be used to derive conditions under which a function $f$ and a distribution $\mathcal D$ satisfy that $|f(S)-f(\mathcal D^n)|$ is small w.h.p.\ when $S\sim\mathcal D^n$. Our proof technique builds on the proof of Bassily et al.~\cite{BassilyNSSSU16} for the generalization properties of a differentially private algorithm that outputs a low-sensitivity function. The proof consists of two steps: \begin{enumerate} \item Let $S_1,\dots,S_T$ be $T$ independent samples from $\mathcal D^n$ (each containing $n$ i.i.d.\ samples from $\mathcal D$). Let $\mathcal A$ be selection procedure that, given $S_1,\dots,S_T$, chooses an index $t\in[T]$ with the goal of maximizing $|f(S_t)-f(\mathcal D^n)|$. We show that if $\mathcal A$ satisfies (a variant of) differential privacy then, under some conditions on the function $f$ and the distribution $\mathcal D$, the expectation of $|f(S_t)-f(\mathcal D^n)|$ is bounded. That is, if $\mathcal A$ is differentially private, then its ability to identify a ``bad'' index $t$ with large $|f(S_t)-f(\mathcal D^n)|$ is limited. \item We show that if $|f(S)-f(\mathcal D^n)|$ is large w.h.p.\ over $S\sim\mathcal D^n$, then it is possible to construct an algorithm $\mathcal A$ satisfying (a variant of) differential privacy that contradicts our expectation bound. \end{enumerate} We begin with a few definitions. \subsection{Definitions} \paragraph{Notations.} We use $\vec{S}\in (X^n)^T$ to denote a {\em multi}-database consisting of $T$ databases of size $n$ over $X$. Given a distribution $\mathcal D$ over a domain $X$ we write $\vec{S}\sim\mathcal D^{nT}$ to denote a multi-database sampled i.i.d.\ from $\mathcal D$. \begin{definition} Fix a function $f:X^n\rightarrow\mathbb{R}$ mapping databases of size $n$ over a domain $X$ to the reals. We say that two multi-databases $xles=(xle_1,\dots,xle_T)\in(X^n)^T$ and $xles'=(xle'_1,\dots,xle'_T)\in(X^n)^T$ are {\em $(f,\lambda)$-neighboring} if for all $1\leq i\leq T$ we have that $$|f(xle_i)- f(xle'_i)|\leq\lambda.$$ \end{definition} \begin{definition}[$(\varepsilon,(f,\lambda))$-differential privacy] Let $M:(X^n)^T\rightarrow Y$ be a randomized algorithm that operates on $T$ databases of size $n$ from $X$. For a function $f:X^n\rightarrow\mathbb{R}$ and parameters $\varepsilon,\lambda\geq0$, we say that $M$ is {\em $(\varepsilon,(f,\lambda))$-differentially private} if for every set of outputs $F\in Y$ and for every $(f,\lambda)$-neighboring $xles,xles'\in(X^n)^T$ it holds that $$ \Pr[M(xles)\in F]\leq e^\varepsilon \cdot \Pr[M(xles')\in F]. $$ \end{definition} \begin{claim}\label{claim:dpExpectation} Fix a function $f:X^n\rightarrow\mathbb{R}$ and parameters $\varepsilon\leq1$ and $\lambda\geq0$. If $M:(X^n)^T\rightarrow Y$ is $(\varepsilon,(f,\lambda))$-differentially private then for every $(f,\lambda)$-neighboring databases $xles,xles'\in(X^n)^T$ and every function $h:Y\rightarrow\mathbb{R}$ we have that $$ \ex{y\leftarrow M(\vec{S})}{h(y)} \leq \ex{y\leftarrow M(\vec{S'})}{h(y)} \;\;+\;\; 4\varepsilon\cdot \ex{y\leftarrow M(\vec{S'})}{|h(y)|}. $$ \end{claim} Claim~\ref{claim:dpExpectation} follows from basic arguments in differential privacy. The proof appears in the appendix for completeness. \subsection{Multi Sample Expectation Bound} The proof of Theorem~\ref{thm:dpGeneralizationIntro} contains somewhat unwieldy notation. For readability, we present here a restricted version of the theorem, tailored to the case where the function $f$ computes the sample sum, which highlights most of the ideas in the proof. The full proof of Theorem~\ref{thm:dpGeneralizationIntro} is included in the appendix. \paragraph{Notation.} Given a sample $S\in X^n$, we use $\bar f(S)$ to denote the sample sum, i.e., $\bar f(S)=\sum_{x\in S}x$. \begin{lem}[Simplified Expectation Bound] \label{lem:simpleExpectationBound} Let $\mathcal D$ be a distribution over a domain $X$ such that $\ex{x\sim\mathcal D}{x}=0$ and $\ex{x\sim\mathcal D}{\mathbbm{1}_{\left\{|x|>1\right\}} \cdot|x| }\leq\Delta$. Fix $0<\varepsilon\leq1$, and let $\mathcal A : (X^{n})^{T} \to [T]$ be an $(\varepsilon,(\bar f,1))$-differentially private algorithm that operates on $T$ databases of size $n$ from $X$, and outputs an index $1\leq t \leq T$. Then $$ \left|\ex{\substack{xles\sim\mathcal{D}^{nT} \\ t \leftarrow \mathcal A(xles)}}{ \bar f(S_t) } \right| \leq 4\varepsilon n + 2 n T \Delta. $$ \end{lem} \begin{proof} We denote $\vec{S}=(S_1,\dots,S_T)$, where every $S_t$ is itself a vector $S_t=(x_{t,1},\dots,x_{t,n})$. We have: \begin{align} \ex{\substack{xles\sim\mathcal{D}^{nT} \\ t \leftarrow \mathcal A(xles)}}{ \bar f(S_t) } & = \sum_{i\in[n]}\exx{ xles\sim\mathcal D^{nT}}\ex{t\leftarrow\mathcal A(xles)}{x_{t,i}} \nonumber\\ &= \sum_{i\in[n]}\ex{xles\sim\mathcal D^{nT}}{\mathbbm{1}\left\{ \max_{m\in[t]}|x_{m,i}|\leq1 \right\}\cdot\ex{t\leftarrow\mathcal A(xles)}{x_{t,i}} + \mathbbm{1}\left\{ \max_{m\in[t]}|x_{m,i}|>1 \right\}\cdot\ex{t\leftarrow\mathcal A(xles)}{x_{t,i}}}. \quad \label{eq:warmup1} \end{align} In the case where $\max_{m\in[t]}|x_{m,i}|>1$ we replace the expectation over $t\leftarrow\mathcal A(\vec{S})$ with the deterministic choice for the maximal $t$ (this makes the expression larger). When $\max_{m\in[t]}|x_{m,i}|\leq1$ we can use the privacy guarantees of algorithm $\mathcal A$. Given a multi-sample $\vec{S}\in(X^n)^T$ we use $\vec{S}_{-i}$ to denote a multi-sample identical to $\vec{S}$, except that the $i^{\text{th}}$ element of {\em every} sub-sample is replaced with 0. Using Claim~\ref{claim:dpExpectation} we get \begin{align} (\ref{eq:warmup1}) \; &\leq \sum_{i\in[n]}\ex{xles\sim\mathcal D^{nT}}{\mathbbm{1}\left\{ \max_{m\in[t]}|x_{m,i}|\leq1 \right\}\cdot\left(\ex{t\leftarrow\mathcal A(xles_{-i})}{x_{t,i}} +4\varepsilon \ex{t\leftarrow\mathcal A(xles_{-i})}{|x_{t,i}|} \right) + \mathbbm{1}\left\{ \max_{m\in[t]}|x_{m,i}|>1 \right\}\cdot\max_{m\in[T]}|x_{m,i}|}\nonumber\\ &\leq 4\varepsilon n\;+\; \sum_{i\in[n]}\ex{xles\sim\mathcal D^{nT}}{\mathbbm{1}\left\{ \max_{m\in[t]}|x_{m,i}|\leq1 \right\}\cdot\ex{t\leftarrow\mathcal A(xles_{-i})}{x_{t,i}} + \mathbbm{1}\left\{ \max_{m\in[t]}|x_{m,i}|>1 \right\}\cdot\max_{m\in[T]}|x_{m,i}|} \label{eq:warmup2} \end{align} We next want to remove the first indicator function. This is useful as without it, the expectation of a fresh example from $\mathcal D$ is zero. To that end we add and subtract the expression $\mathbbm{1}\left\{ \max_{m\in[t]}|x_{m,i}|>1 \right\}\cdot\ex{t\leftarrow\mathcal A(xles_{-i})}{x_{t,i}}$ to get (after replacing again $\operatorname*{\mathbb{E}}_t$ with $\max_t$) \begin{align*} (\ref{eq:warmup2})\; &\leq 4\varepsilon n\;+\; \sum_{i\in[n]}\ex{xles\sim\mathcal D^{nT}}{\ex{t\leftarrow\mathcal A(xles_{-i})}{x_{t,i}} \;+\; 2\cdot \mathbbm{1}\left\{ \max_{m\in[t]}|x_{m,i}|>1 \right\}\cdot\max_{m\in[T]}|x_{m,i}|}\\ &\leq 4\varepsilon n\;+\;2 \sum_{i\in[n]} \sum_{m\in[T]} \ex{xles\sim\mathcal D^{nT}}{ \mathbbm{1}\left\{ |x_{m,i}|>1 \right\}\cdot|x_{m,i}|}\\ &\leq 4\varepsilon n\;+\;2nT\Delta. \end{align*} \end{proof} \subsection{Multi Sample Amplification} \begin{theorem}[Simplified High Probability Bound] \label{thm:simplifiedDpGeneralization} Let $\mathcal D$ be a distribution over a domain $X$ such that $\ex{x\sim\mathcal D}{x}=0$. Let $\Delta\geq0$ be such that $\ex{x\sim\mathcal D}{\mathbbm{1}_{\left\{|x|>1\right\}} \cdot|x| }\leq\Delta$. Fix $1\geq \varepsilon \geq \sqrt{\frac{1}{n}\ln(2/\Delta)}$. We have that $$ \Pr_{S\sim\mathcal D^n}\left[ | \bar f(S) | \geq 30\varepsilon n \right] < \frac{\Delta}{\varepsilon}. $$ \end{theorem} We present the proof idea of the theorem. Any informalities made hereafter are removed in Section~\ref{sec:fullProof}. \begin{proof}[Proof sketch] We only analyze the probability that $\bar f(S)$ is large. The analysis is symmetric for when $\bar f(S)$ is small. Assume towards contradiction that with probability at least $\frac{\Delta}{2\varepsilon}$ we have that $ \bar f(S) \geq 30\varepsilon n$. We now construct the following algorithm $\mathcal B$ that contradicts our expectation bound. \begin{algorithm}[H] \caption{$\mathcal B$}\addcontentsline{lof}{figure}{Algorithm $\mathcal B$} {\bf Input:} $T$ databases of size $n$ each: $\vec{S}=(S_1,\dots,S_T)$, where $T\triangleq\left\lfloor 2\varepsilon/\Delta \right\rfloor$. \begin{enumerate}[rightmargin=10pt,itemsep=1pt,topsep=4pt] \item For $i\in[T]$, define $q(\vec{S},i) = \bar f(S_i) $. \item Sample $t^*\in [T]$ with probability proportional to $\exp\left(\frac{\varepsilon}{2} q(\vec{S},t)\right)$. \end{enumerate} \textbf{Output:} $t.$ \end{algorithm} The fact that algorithm $\mathcal B$ is $(\varepsilon,(\bar f,1))$-differentially private follows from the standard analysis of the Exponential Mechanism of McSherry and Talwar~\cite{McSherryT07}. The analysis appears in the full version of this proof (Section~\ref{sec:fullProof}) for completeness. Now consider applying $\mathcal B$ on databases $\vec{S} = (S_1,\dots,S_T)$ containing i.i.d.\ samples from $\mathcal D$. By our assumption on $\mathcal D$, for every $t$ we have that $\bar f(S_t) \geq 30\varepsilon n$ with probability at least $\frac{\Delta}{2\varepsilon}$. By our choice of $T = \left\lfloor 2\varepsilon/\Delta \right\rfloor$, we therefore get $$\Pr_{\vec{S}\sim\mathcal D^{nT}}\left[{\max_{t \in [T]} \left\{ \bar f(S_t) \right\} \geq 30\varepsilon n }\right] \geq 1 - \left( 1 - \frac{\Delta}{2\varepsilon} \right)^T \geq \frac12.$$ The probability is taken over the random choice of the examples in $\vec{S}$ according to $\mathcal D$. Had it been the case that the random variable $\max_{t \in [T]} \left\{ \bar f(S_t) \right\}$ is non-negative, we could have used Markov's inequality to get \begin{equation}\label{eq:warmupLargeError} \operatorname*{\mathbb{E}}_{\vec{S}\sim\mathcal D^{nT}}\left[\max_{t \in [T]} \left\{ q(\vec{S},t) \right\}\right] = \operatorname*{\mathbb{E}}_{\vec{S}\sim\mathcal D^{nT}}\left[\max_{t \in [T]} \left\{ \bar f(S_t) \right\}\right] \geq 15\varepsilon n. \end{equation} Even though it is not the case that $\max_{t \in [T]} \left\{ \bar f(S_t) \right\}$ is non-negative, we now proceed as if Equation~(\ref{eq:warmupLargeError}) holds. As described in the full version of this proof (Section~\ref{sec:fullProof}), this technical issue has an easy fix. So, in expectation, $\max_{t \in [T]} \left(q(\vec{S},t)\right)$ is large. In order to contradict the expectation bound of Theorem~\ref{thm:dpGeneralization}, we need to show that this is also the case for the index $t^*$ that is sampled on Step~2. To that end, we now use the following technical claim, stating that the expected quality of a solution sampled as in Step~2 is high. \begin{claim}[e.g.,~\cite{BassilyNSSSU16}] \label{claim:EMutility} Let $H$ be a finite set, $h : H \to \mathbb{R}$ a function, and $\eta >0$. Define a random variable $Y$ on $H$ by $\Pr[Y=y] = \exp(\eta h(y))/C$, where $C= \sum_{y \in H} \exp(\eta h(y))$. Then $\ex{}{h(Y)} \geq \max_{y \in H} h(y) - \frac{1}{\eta}\ln |H|$. \end{claim} For every fixture of $\vec{S}$, we can apply Claim~\ref{claim:EMutility} with $h(t) = q(\vec{S},t)$ and $\eta = \frac{\varepsilon}{2}$ to get \begin{equation*} \operatorname*{\mathbb{E}}_{t^*\in_R [T]}[q(\vec{S},t^*)] =\operatorname*{\mathbb{E}}_{t^*\in_R [T]}\Big[ \bar f(S_{t^*}) \Big] \geq \max_{t \in [T]} \left\{ \bar f(S_t) \right\} - \frac{2}{\varepsilon} \ln(T). \end{equation*} Taking the expectation also over $\vec{S}\sim\mathcal D^{nT}$ we get that \begin{eqnarray*} \operatorname*{\mathbb{E}}_{\substack{\vec{S}\sim\mathcal D^{nT} \\ t^*\leftarrow\mathcal B\left(\vec{S}\right)}}\Big[\bar f(S_{t^*})\Big] &\geq& \operatorname*{\mathbb{E}}_{\vec{S}\sim\mathcal D^{nT}}\left[\max_{t \in [T]} \left\{ \bar f(S_t) \right\}\right] - \frac{2}{\varepsilon} \ln(T)\\ &\geq& 15\varepsilon n - \frac{2}{\varepsilon} \ln(T). \end{eqnarray*} This contradicts Theorem~\ref{thm:dpGeneralization} whenever $\varepsilon>\sqrt{\frac{1}{n}\ln(T)}=\sqrt{\frac{1}{n}\ln(2\varepsilon/\Delta)}$. \end{proof} \section{Applications}\label{sec:applications} In this section we demonstrate how Theorem~\ref{thm:dpGeneralizationIntro} can be used in proving a variety of concentration bounds. \subsection{Example: Subgaussian Diameter and Beyond} Recall that for a low-sensitivity function $f$, one could use McDiarmid's Inequality to obtain a high probability bound on the difference $|f(S)-f(\mathcal D^n)|$, and this bound is {\em distribution-independent}. That is, the bound does not depend on $\mathcal D$. Over the last few years, there has been some work on providing distribution-dependent refinements to McDiarmid's Inequality, that hold even for functions with high worst-case sensitivity, but with low ``average-case'' sensitivity, where ``average'' is with respect to the underlying distribution $\mathcal D$. The following is one such refinement, by Kontorovich~\cite{Kontorovich14}. \begin{definition}[\cite{Kontorovich14}] Let $\mathcal D$ be a distribution over a domain $X$, and let $\rho:X^2\rightarrow\mathbb{R}^{\geq0}$. The {\em symmetrized distance} of $(X,\rho,\mathcal D)$ is the random variable $\Xi= \xi\cdot \rho(x,x')$ where $x,x'\sim\mathcal D$ are independent and $\xi$ is uniform on $\{\pm1\}$ independent of $x,x'$. The {\em subgaussian diameter} of $(X,\rho,\mathcal D)$, denoted $\Delta_{\rm{SG}}(X,\rho,\mathcal D)$, is the smallest $\sigma\in\mathbb{R}^{\geq0}$ such that $$ \operatorname*{\mathbb{E}}\left[ e^{\lambda \Xi} \right] \leq e^{\sigma^2\lambda^2/2}, \;\;\; \forall\lambda\in\mathbb{R}. $$ \end{definition} In~\cite{Kontorovich14}, Kontorovich showed the following theorem: \begin{theorem}[\cite{Kontorovich14}, informal]\label{thm:Kontorovich} Let $f:X^n\rightarrow\mathbb{R}$ be a function mapping databases of size $n$ over a domain $X$ to the reals. Assume that there exists a function $\rho:X^2\rightarrow\mathbb{R}^{\geq0}$ s.t.\ for every $i\in[n]$, every $S\in X^n$, and every $y,z\in X$ we have that $$ \left|f\left(S^{(i\leftarrow y)}\right) - f\left(S^{(i\leftarrow z)}\right)\right|\leq\rho(y,z), $$ where $S^{(i\leftarrow x)}$ is the same as $S$ except that the $i^{\text{th}}$ element is replaced with $x$. Then, $$ \Pr_{S\sim\mathcal D^n}[|f(S)-f(\mathcal D^n)|\geq t]\leq 2\exp\left(- \frac{t^2}{2 n\cdot \Delta_{\rm{SG}}^2(X,\rho,\mathcal D) } \right). $$ \end{theorem} Informally, using the above theorem it is possible to obtain concentration bounds for functions with unbounded sensitivity (in worst case), provided that the sensitivity (as a random variable) is subgaussian. In this section we show that our result implies a similar version of this theorem. While the bound we obtain is weaker then Theorem~\ref{thm:Kontorovich}, our techniques can be extended to obtain concentration bounds even in cases where the sensitivity is {\em not} subgaussian (that is, in cases where the subgaussian diameter is unbounded, and hence, Theorem~\ref{thm:Kontorovich} could not be applied). Let us denote $\sigma=\Delta_{\rm{SG}}(X,\rho,\mathcal D)$. Now for $t\geq0$, \begin{align} \Pr_{x,y\sim\mathcal D}[\rho(x,y)\geq t] &\leq 2 \Pr_{\substack{x,y\in\mathcal D\\\xi\in\{\pm1\}}}[\xi\cdot\rho(x,y)\geq t] = 2 \Pr[\Xi\geq t] = 2 \Pr[e^{\frac{t}{\sigma^2}\cdot\Xi}\geq e^{\frac{t}{\sigma^2}\cdot t}] \nonumber\\ &\leq 2 e^{-\frac{t^2}{\sigma^2}} \cdot \operatorname*{\mathbb{E}}\left[ e^{\frac{t}{\sigma^2}\cdot\Xi} \right] \leq 2 e^{-\frac{t^2}{\sigma^2}} \cdot e^{\frac{\sigma^2}{2}\cdot \frac{t^2}{\sigma^4}} = 2\exp\left( -\frac{t^2}{2\sigma^2} \right). \end{align} So, \begin{align} & \ex{\substack{S\sim\mathcal D^{n}\\x'\sim\mathcal D}}{\mathbbm{1}\left\{\left|f(S) - f\left(S^{(i\leftarrow x')}\right)\right|>\lambda\right\} \cdot\left|f(S) - f\left(S^{(i\leftarrow x')}\right)\right| } \nonumber\\ &\qquad \leq\ex{x,y\sim\mathcal D}{\mathbbm{1}\left\{\rho(x,y)>\lambda\right\} \cdot \rho(x,y) } \nonumber\\ &\qquad = \int_0^\lambda \Pr_{x,y\sim\mathcal D}\left[\mathbbm{1}\left\{\rho(x,y)>\lambda\right\} \cdot \rho(x,y)\geq t\right] {\rm{d}}t \;+\; \int_\lambda^\infty \Pr_{x,y\sim\mathcal D}\left[\mathbbm{1}\left\{\rho(x,y)>\lambda\right\} \cdot \rho(x,y)\geq t\right] {\rm{d}}t \nonumber\\ &\qquad = \int_0^\lambda \Pr_{x,y\sim\mathcal D}\left[ \rho(x,y)\geq \lambda \right] {\rm{d}}t \;+\; \int_\lambda^\infty \Pr_{x,y\sim\mathcal D}\left[ \rho(x,y)\geq t\right] {\rm{d}}t \nonumber\\ &\qquad = \lambda \cdot \Pr_{x,y\sim\mathcal D}\left[ \rho(x,y)\geq \lambda \right] \;+\; \int_\lambda^\infty \Pr_{x,y\sim\mathcal D}\left[ \rho(x,y)\geq t\right] {\rm{d}}t \nonumber\\ &\qquad \leq \lambda\cdot 2\exp\left( -\frac{\lambda^2}{2\sigma^2} \right) \;+\; \int_\lambda^\infty 2\exp\left( -\frac{t^2}{2\sigma^2} \right) {\rm{d}}t \nonumber\\ &\qquad = \lambda\cdot 2\exp\left( -\frac{\lambda^2}{2\sigma^2} \right) \;+\; \sqrt{2\pi} \sigma \cdot {\rm{erfc}}\left( \frac{\lambda}{\sqrt{2}\sigma} \right) \nonumber\\ &\qquad \leq \lambda\cdot 2\exp\left( -\frac{\lambda^2}{2\sigma^2} \right) \;+\; \sqrt{2\pi} \sigma \cdot \exp\left(- \frac{\lambda^2}{2\sigma^2} \right) \leq 3(\lambda+\sigma)\cdot \exp\left(- \frac{\lambda^2}{2\sigma^2} \right) \triangleq \Delta. \nonumber \end{align} In order to apply Theorem~\ref{thm:dpGeneralizationIntro} we need to ensure that $n\geq O\left(\frac{1}{\varepsilon\cdot\min\{1,\varepsilon\}}\ln\left(\frac{\lambda\cdot\min\{1,\varepsilon\}}{\Delta}\right)\right)$. For our choice of $\Delta$, it suffices to set $\varepsilon_0= \Theta \left(\frac{\lambda}{\sqrt{n}\sigma}\right)$, assuming that $\frac{\lambda}{\sqrt{n}\sigma}\leq1$. Otherwise, if $\frac{\lambda}{\sqrt{n}\sigma}>1$, we will choose $\varepsilon_1=\Theta\left(\frac{\lambda^2}{n\sigma^2}\right)$. Plugging $(\varepsilon_0,\Delta)$ or $(\varepsilon_1,\Delta)$ into Theorem~\ref{thm:dpGeneralizationIntro}, and simplifying, we get \begin{eqnarray} \Pr_{S\sim\mathcal D}\left[ |f(S)-f(\mathcal D^n)| \geq t \right] \leq \left\{ \begin{array}{ccl} e^{-\Omega\left(\frac{t}{\sqrt{n}\sigma}\right)} & ,& t \leq \sigma\cdot n^{1.5}\\[0.5em] e^{-\Omega\left(\frac{t^{2/3}}{\sigma^{2/3}}\right)} & ,& t > \sigma\cdot n^{1.5} \\ \end{array} \right. \label{eq:KontorovichSimilar} \end{eqnarray} Clearly, the bound of Theorem~\ref{thm:Kontorovich} is stronger. Note, however, that the only assumption we used here is that $\int_\lambda^\infty \Pr_{x,y\sim\mathcal D}[\rho(x,y)\geq t] {\rm{d}}t$ is small. Hence, as the following section shows, this argument could be extended to obtain concentration bounds even when $\Delta_{\rm{SG}}(X,\rho,\mathcal D)$ is unbounded. We remark that Inequality~\ref{eq:KontorovichSimilar} can be slightly improved by using part~2 of Theorem~\ref{thm:dpGeneralizationIntro}. This will be illustrated in the following section. \subsection{Example: Concentration Under Infinite Variance} Let $f:X^n\rightarrow\mathbb{R}$ be a function mapping databases of size $n$ over a domain $X$ to the reals. Assume that there exists a function $\rho:X^2\rightarrow\mathbb{R}^{\geq0}$ s.t.\ for every $i\in[n]$, every $S\in X^n$, and every $y,z\in X$ we have that $$ \left|f\left(S^{(i\leftarrow y)}\right) - f\left(S^{(i\leftarrow z)}\right)\right|\leq\rho(y,z), $$ where $S^{(i\leftarrow x)}$ is the same as $S$ except that the $i^{\text{th}}$ element is replaced with $x$.\\ As stated in the previous section, the results of~\cite{Kontorovich14} can be used to obtain a high probability bound on $|f(S)-f\left(\mathcal D^n\right)|$ whenever $\Pr_{x,y\sim\mathcal D}[\rho(x,y)\geq t]\leq\exp\left( -t^2/\sigma^2 \right)$ for some $\sigma>0$. In contrast, our bound can be used whenever $\int_\lambda^\infty \Pr_{x,y\sim\mathcal D}[\rho(x,y)\geq t] {\rm{d}}t$ is finite. In particular, we now use it to obtain a concentration bound for a case where the probability distribution of $\rho(x,y)$ is heavy tailed, and in fact, has infinite variance. Specifically, assume that all we know on $\rho(x,y)$ is that $\Pr[\rho(x,y)\geq t]\leq 1/t^2$ for every $t\geq 1$ (this is a special case of the {\em Pareto distribution}, with infinite variance). Let $\lambda\geq1$. We calculate: \begin{align*} & \ex{\substack{S\sim\mathcal D^{n}\\x'\sim\mathcal D}}{\mathbbm{1}\left\{\left|f(S) - f\left(S^{(i\leftarrow x')}\right)\right|>\lambda\right\} \cdot\left|f(S) - f\left(S^{(i\leftarrow x')}\right)\right| } \\ &\qquad \leq\ex{x,y\sim\mathcal D}{\mathbbm{1}\left\{\rho(x,y)>\lambda\right\} \cdot \rho(x,y) } \\ &\qquad = \int_0^\lambda \Pr_{x,y\sim\mathcal D}\left[\mathbbm{1}\left\{\rho(x,y)>\lambda\right\} \cdot \rho(x,y)\geq t\right] {\rm{d}}t \;+\; \int_\lambda^\infty \Pr_{x,y\sim\mathcal D}\left[\mathbbm{1}\left\{\rho(x,y)>\lambda\right\} \cdot \rho(x,y)\geq t\right] {\rm{d}}t \\ &\qquad = \int_0^\lambda \Pr_{x,y\sim\mathcal D}\left[ \rho(x,y)\geq \lambda \right] {\rm{d}}t \;+\; \int_\lambda^\infty \Pr_{x,y\sim\mathcal D}\left[ \rho(x,y)\geq t\right] {\rm{d}}t \\ &\qquad = \lambda \cdot \Pr_{x,y\sim\mathcal D}\left[ \rho(x,y)\geq \lambda \right] \;+\; \int_\lambda^\infty \Pr_{x,y\sim\mathcal D}\left[ \rho(x,y)\geq t\right] {\rm{d}}t \\ &\qquad \leq \lambda\frac{1}{\lambda^2} \;+\; \int_\lambda^\infty \frac{1}{t^2} {\rm{d}}t =\frac{2}{\lambda} \triangleq \Delta. \end{align*} In order to apply Theorem~\ref{thm:dpGeneralizationIntro} we need to ensure that $n\geq O\left( \frac{1}{\varepsilon\cdot\min\{1,\varepsilon\}}\ln\left(\frac{\lambda\cdot\min\{1,\varepsilon\}}{\Delta}+1\right)\right)$. Assuming that $n\geq \ln(\lambda)$, with our choice of $\Delta$ it suffices to set $\varepsilon=\Theta\left(\sqrt{\frac{1}{n}\ln(\lambda)}\right)$. Plugging $\varepsilon$ and $\Delta$ into Theorem~\ref{thm:dpGeneralizationIntro}, and simplifying, we get \begin{eqnarray} \Pr_{S\sim\mathcal D}\left[ |f(S)-f(\mathcal D^n)| \geq t \right] \leq \tilde{O}\left(\frac{n^{3/2}}{t^2}\right). \label{eq:ParetoTailBound} \end{eqnarray} Observe that the above bound decays as $1/t^2$. This should be contrasted with Markov's Inequality, which would decay as $1/t$. Recall the assumption that the variance of $\rho(x,y)$ is unbounded. Hence, the variance of $f(S)$ can also be unbounded, and Chebyshev's inequality could not be applied.\\ As we now explain, Inequality~\ref{eq:ParetoTailBound} can be improved using part~2 of Theorem~\ref{thm:dpGeneralizationIntro}. To that end, for a fixed database $S\in X^n$, we calculate: \begin{align*} &\ex{y,z\sim\mathcal D}{\mathbbm{1}\left\{\left|f(S^{(i\leftarrow y)}) - f\left(S^{(i\leftarrow z)}\right)\right|\leq\lambda\right\} \cdot\left|f(S^{(i\leftarrow y)}) - f\left(S^{(i\leftarrow z)}\right)\right| }\\ &\leq{} \ex{y,z\sim\mathcal D}{\rho(y,z) } \leq{} \int_0^1 1{\rm{d}}t + \int_1^\infty \frac{1}{t^2} {\rm{d}}t =2\triangleq\tau. \end{align*} In order to apply part~2 of Theorem~\ref{thm:dpGeneralizationIntro} we need to ensure that $n\geq O\left( \frac{\lambda}{\varepsilon\cdot\min\{1,\varepsilon\}\tau}\ln\left(\frac{\varepsilon\tau}{\Delta}\right)\right)$. For our choice of $\Delta$ and $\tau$, if $n\geq\lambda\ln(\lambda)$ then it suffices to set $\varepsilon_0=\Theta\left( \sqrt{\frac{\lambda}{n}\ln(\lambda) } \right)$. Otherwise, if $n<\lambda\ln(\lambda)$ then it suffices to set $\varepsilon_1=\Theta\left( \frac{\lambda}{n}\ln(\lambda) \right)$. Plugging $(\varepsilon_0,\Delta)$ or $(\varepsilon_1,\Delta)$ into Theorem~\ref{thm:dpGeneralizationIntro}, and simplifying, we get \begin{eqnarray*} \Pr_{S\sim\mathcal D}\left[ |f(S)-f(\mathcal D^n)| \geq t \right] \leq \left\{ \begin{array}{ccl} \tilde{O}\left(\frac{n^{2}}{t^3}\right) & ,& t \leq n\\[0.5em] \tilde{O}\left(\frac{n}{t^2}\right) & ,& t > n \\ \end{array} \right. \end{eqnarray*} \subsection{Example: Triangles in Random Graphs} A random graph $G(N,p)$ on $N$ vertices $1,2,\dots,N$ is defined by drawing an edge between each pair $1\leq i<j\leq N$ independently with probability $p$. There are $n={{N}\choose{2}}$ i.i.d.\ random variables $x_{\{i,j\}}$ representing the choices: $x_{\{i,j\}}=x_{\{j,i\}}=1$ if the edge $\{i,j\}$ is drawn, and 0 otherwise. We will use $\mathcal D$ to denote the probability $\Pr_{x\sim\mathcal D}[x=1]=p$ and $\Pr_{x\sim\mathcal D}[x=0]=1-p$, and let $S=\left(x_{\{1,2\}},\dots,x_{\{n-1,n\}}\right)\sim\mathcal D^n$. We say that three vertices $i,j,\ell$ form a triangle if there is an edge between any pair of them. Denote $f_{K_3}(S)$ the number of triangles in the graph defined by $S$. For a small constant $\alpha$, we would like to have an exponential bound on the following probability $$ \Pr\left[ f_{K_3}(S) \geq (1+\alpha) \cdot f_{K_3}(\mathcal D^n) \right]. $$ Specifically, we are interested in small values of $p=o(1)$ such that $f_{K_3}(\mathcal D^n)={{N}\choose{3}} p^3 = \Theta\left(N^3 p^3\right)=o(N)$. The difficulty with this choice of $p$ is that (in worst-case) adding a single edge to the graph can increase the number of triangles by $(N-2)$, which is much larger then the expected number of triangles. Indeed, until the breakthrough work of Vu~\cite{Vu2002} in 2002, no general exponential bounds were known. Following the work of~\cite{Vu2002}, in 2004 Kim and Vu~\cite{KimVu2004} presented the following sharp bound: \begin{theorem}[\cite{KimVu2004}, informal]\label{thm:KimVu} Let $\alpha$ be a small constant. It holds that $$ \exp\left( -\Theta\left( p^2 N^2 \log(1/p) \right) \right) \leq\Pr_{S\sim\mathcal D^n}\left[ f_{K_3}(S) \geq (1+\alpha)\cdot f_{K_3}(\mathcal D^n) \right] \leq\exp\left( -\Theta\left( p^2 N^2 \right) \right). $$ \end{theorem} In this section we show that our result can be used to analyze this problem. While the bound we obtain is much weaker than Theorem~\ref{thm:KimVu}, we find it interesting that the same technique from the last sections can also be applied here. To make things more concrete, we fix $$ p=N^{-3/4}. $$ In order to use our concentration bound, we start by analyzing the expected difference incurred to $f_{K_3}$ by resampling a single edge. We will denote $\blacktriangle_{i,j}(S)$ as the number of triangles that are created (or deleted) by adding (or removing) the edge $\{i,j\}$. That is, $$ \blacktriangle_{i,j}(S) = \left|\left\{ \ell\neq i,j \;:\; x_{\{i,\ell\}}=1 \text{ and } x_{\{\ell,j\}}=1 \right\}\right|. $$ Observe that $\blacktriangle_{i,j}(S)$ does not depend on $x_{\{i,j\}}$. Moreover, observe that for every fixture of $i<j$ we have that $\blacktriangle_{i,j}(S)$ is the sum of $(N-2)$ i.i.d.\ indicators, each equals to 1 with probability $p^2$. Fix $S=\left(x_{\{1,2\}},\dots,x_{\{n-1,n\}}\right)\in\{0,1\}^n$ and $x'\in\{0,1\}$. We have that $$ \left|f_{K_3}(S) - f_{K_3}\left(S^{(\{i,j\}\leftarrow x')}\right)\right| = \left\{ \begin{array}{ccl} 0 & ,& x_{\{i,j\}}=x'\\ \blacktriangle_{i,j}(S) & ,& x_{\{i,j\}} \neq x' \\ \end{array} \right. $$ where $S^{(\{i,j\}\leftarrow x')}$ is the same as $S$ except with $x_{\{i,j\}}$ replaced with $x'$. Fix $i<j$. We can now calculate \begin{align} &\ex{\substack{S\sim\mathcal D^{n}\\x'\sim\mathcal D}}{\mathbbm{1}\left\{\left|f_{K_3}(S) - f_{K_3}\left(S^{(\{i,j\}\leftarrow x')}\right)\right|>\lambda\right\} \cdot\left|f_{K_3}(S) - f_{K_3}\left(S^{(\{i,j\}\leftarrow x')}\right)\right| } \nonumber\\ &={} \ex{\substack{S\sim\mathcal D^{n}\\x'\sim\mathcal D}}{\mathbbm{1}\left\{x_{\{i,j\}\neq x'}\right\}\cdot\mathbbm{1}\left\{\blacktriangle_{i,j}(S)>\lambda\right\} \cdot \blacktriangle_{i,j}(S) } \nonumber\\ &={} \Pr_{x_{\{i,j\}},x'\sim\mathcal D}\left[x_{\{i,j\}}\neq x'\right] \cdot \ex{S\sim\mathcal D^{n}}{\mathbbm{1}\left\{\blacktriangle_{i,j}(S)>\lambda\right\} \cdot \blacktriangle_{i,j}(S) } \nonumber\\ &={} 2p(1-p)\cdot \left( \lambda\cdot\Pr_{S\sim\mathcal D^{n}}[\blacktriangle_{i,j}(S) \geq \lambda] + \int_{\lambda}^N \Pr_{S\sim\mathcal D^{n}}[\blacktriangle_{i,j}(S) \geq t] {\rm{d}}t \right) \nonumber\\ &\leq{} 2pN \cdot \Pr_{S\sim\mathcal D^{n}}[\blacktriangle_{i,j}(S) \geq \lambda]. \label{eq:app13} \end{align} Recall that $\blacktriangle_{i,j}(S)$ is the sum of $(N-2)$ i.i.d.\ indicators, each equals to 1 with probability $p^2$. We can upper bound the probability that $\blacktriangle_{i,j}(S)\geq\lambda$ with the probability that a sum of $N$ such random variables is at least $\lambda$. We will use the following variant of the Chernoff bound, known as the Chernoff-Hoeffding theorem: \begin{theorem}[\cite{hoeff}]\label{thm:Chernoff_entroty} Let $X_1,\dots,X_n$ be independent random variables where $\Pr[X_i=1]=p$ and $\Pr[X_i=0]=1-p$ for some $0<p<1$. Let $k$ be s.t.\ $p<\frac{k}{n}<1$. Then, $$ Pr\left[\sum_{i=1}^n{X_i}\geq k\right]\geq\exp\left(- n \cdot D\left(\left.\frac{k}{n}\right\|p\right)\right), $$ where $D(a\|b)$ is the relative entropy between an $a$-coin and a $p$-coin (i.e. between the Bernoulli($a$) and Bernoulli($p$) distribution): $$ D(a\|p) = a\cdot \log\left(\frac{a}{p}\right) + (1-a)\cdot \log\left(\frac{1-a}{1-p}\right). $$ \end{theorem} Using the Chernoff-Hoeffding theorem, for $p^2 N<\lambda<N$, we have \begin{align} (\ref{eq:app13})&\leq{} 2pN\cdot \exp\left(- N \cdot D\left( \left. \frac{\lambda}{N} \right\| p^2 \right) \right). \label{eq:app14} \end{align} Recall that we fixed $p=N^{-3/4}$. Choosing $\lambda=N^{1/13}$, we get: \begin{align} (\ref{eq:app14})&={} 2pN\cdot \exp\left(- N \cdot D\left( \left. N^{-12/13} \right\| N^{-6/4} \right) \right).\label{eq:app15} \end{align} We will use the following claim to bound $D\left( \left. N^{-12/13} \right\| N^{-6/4} \right)$: \begin{claim}\label{claim:relativeEntropy} Fix constants $c>b>0$. For $N\geq \max\{ 2^{1/b} , 2^{8/(c-b)} \}$ we have that $D\left( \left. N^{-b} \right\| N^{-c} \right) \geq \frac{c-b}{2}\cdot N^{-b} \cdot \log(N)$. \end{claim} Using Claim~\ref{claim:relativeEntropy}, for large enough $N$, we have that \begin{align} (\ref{eq:app15}) &\leq{} 2pN\cdot \exp\left(- N^{1/13} \right). \label{eq:app17} \end{align} So, denoting $\Delta= 2pN\cdot \exp\left(- N^{1/13} \right)$, we get that $$ \ex{\substack{S\sim\mathcal D^{n}\\x'\sim\mathcal D}}{\mathbbm{1}\left\{\left|f_{K_3}(S) - f_{K_3}\left(S^{(\{i,j\}\leftarrow x')}\right)\right|>\lambda\right\} \cdot\left|f_{K_3}(S) - f_{K_3}\left(S^{(\{i,j\}\leftarrow x')}\right)\right| } \leq \Delta. $$ In order to obtain a meaningful bound, we will need to use part~2 of Theorem~\ref{thm:dpGeneralizationIntro}. To that end, for every fixture of $S\in X^n$ and $i<j$ we can compute \begin{align*} \ex{y,z\sim\mathcal D}{\mathbbm{1}\left\{\left|f_{K_3}(S^{(\{i,j\}\leftarrow y)}) - f_{K_3}\left(S^{(\{i,j\}\leftarrow z)}\right)\right|\leq\lambda\right\} \cdot\left|f_{K_3}(S^{(\{i,j\}\leftarrow y)}) - f_{K_3}\left(S^{(\{i,j\}\leftarrow z)}\right)\right| } &\leq\ex{y,z\sim\mathcal D}{\mathbbm{1}\left\{ y\neq z \right\}\cdot\lambda}\\ &= 2p(1-p)\lambda \leq 2p\lambda \triangleq \tau. \end{align*} Finally, in order to apply Theorem~\ref{thm:dpGeneralizationIntro}, we need to ensure that $n\geq O\left( \frac{\lambda}{\varepsilon\min\{1,\varepsilon\}\tau}\ln\left(\frac{\min\{1,\varepsilon\}\tau}{\Delta}\right) \right)$. With our choices for $\Delta$ and $\tau$, it suffices to set $\varepsilon = \Theta\left( \sqrt{\frac{\lambda}{n p} } \right)$. Plugging $\varepsilon$, $\Delta$ and $\tau$ into Theorem~\ref{thm:dpGeneralizationIntro}, and simplifying, we get that $$ \Pr_{S\sim\mathcal D^n}\left[ | f_{K_3}(S) - f_{K_3}(\mathcal D^n) | \geq o\left( f_{K_3}(\mathcal D^n) \right) \right] < \exp\left(- N^{1/13}\right). $$ It remains to prove Claim~\ref{claim:relativeEntropy}: { \renewcommand{\ref{claim:dpExpectation}}{\ref{claim:relativeEntropy}} \begin{claim} Fix constants $c>b>0$. For $N\geq \max\{ 2^{1/b} , 2^{8/(c-b)} \}$ we have that $D\left( \left. N^{-b} \right\| N^{-c} \right) \geq \frac{c-b}{2}\cdot N^{-b} \cdot \log(N)$. \end{claim} \addtocounter{theorem}{-1} } \begin{proof}[Proof of Claim~\ref{claim:relativeEntropy}] \begin{align} D\left( \left. N^{-b} \right\| N^{-c} \right) &={} N^{-b} \cdot \log\left( N^{c-b} \right) + \left( 1 - N^{-b}\right) \cdot \log\left( \frac{1 - N^{-b}}{1 - N^{-c}} \right) \nonumber\\ &={} N^{-b} \cdot \log\left( N^{c-b} \right) + \left( 1 - N^{-b}\right) \cdot \log\left( \frac{N^{c}-N^{c-b}}{N^{c}-1} \right) \nonumber\\ &={} N^{-b} \cdot \log\left( N^{c-b} \right) + \left( 1 - N^{-b}\right) \cdot \log\left( 1- \frac{N^{c-b}-1}{N^{c}-1} \right) \label{eq:appendix1} \end{align} Using the fact that $\log(1-x)\geq -2x$ for every $0\leq x\leq\frac{1}{2}$, and assuming that $N\geq 2^{1/b}$, we have that \begin{align} (\ref{eq:appendix1}){} & \geq {} N^{-b} \cdot \log\left( N^{c-b} \right) -2 \left( 1 - N^{-b}\right) \cdot \frac{N^{c-b}-1}{N^{c}-1} \nonumber\\ &={} N^{-b} \cdot \log\left( N^{c-b} \right) - 2 \cdot \frac{N^{c-b}-1}{N^{c}-1} + 2 N^{-b} \cdot \frac{N^{c-b}-1}{N^{c}-1} \nonumber\\ &\geq{} N^{-b} \cdot \log\left( N^{c-b} \right) - 2 \cdot \frac{N^{c-b}-1}{N^{c}-1} \nonumber\\ &\geq{} N^{-b} \cdot \log\left( N^{c-b} \right) - 2 \cdot \frac{N^{c-b}}{\frac{1}{2}N^{c}} \nonumber\\ &\geq{} N^{-b} \cdot \log\left( N^{c-b} \right) - 4 N^{-b} \label{eq:appendix2} \end{align} Assuming that $N\geq 2^{8/(c-b)}$ we get \begin{align*} (\ref{eq:appendix2}){} & \geq {} \frac{1}{2}\cdot N^{-b} \cdot \log\left( N^{c-b} \right) \\ & \geq {} \frac{c-b}{2}\cdot N^{-b} \cdot \log\left( N \right). \end{align*} \end{proof} \section{Privately Identifying a High-Sensitivity Function} Let $S$ be a sample of $n$ i.i.d.\ elements from some distribution $\mathcal D$. Recall that if a low-sensitivity function $f$ is identified by a differentially private algorithm operating on $S$, then w.h.p.\ $f(S)\approx f(\mathcal D^n)\triangleq\ex{S'\sim\mathcal D^n}{f(S')}$. In this section we present a simple example showing that, in general, this is not the case for {\em high}-sensitivity functions. Specifically, we show that a differentially private algorithm operating on $S$ can identify a high-sensitivity function $f$ s.t.\ $|f(S)-f(\mathcal D^n)|$ is arbitrarily large, even though $|f(S')-f(\mathcal D^n)|$ is small for a fresh sample $S'\sim\mathcal D^n$. \begin{theorem} Fix $\beta,\varepsilon,B>0$, let $\mathcal U$ be the uniform distribution over $X=\{\pm1\}^d$ where $d=\mathrm{poly}(1/\beta)$, and let $n\geq O(\frac{1}{\varepsilon^2}\ln(1/\beta))$. There exists an $(\varepsilon,0)$-differentially private algorithm $\mathcal A$ that operates on a database $S\in (\{\pm1\}^d)^n$ and returns a function mapping $(\{\pm1\}^d)^n$ to $\mathbb{R}$, s.t.\ the following hold. \begin{enumerate} \item For every $f$ in the range of $\mathcal A$ it holds that $\Pr_{S'\sim \mathcal U^n}[f(S')\neq f(\mathcal U^n)]\leq\beta$. \item $\Pr_{\substack{S\sim \mathcal U^n\\f\leftarrow\mathcal A(S)}}[|f(S)-f(\mathcal U^n)|\geq B]\geq1/2$. \end{enumerate} \end{theorem} \begin{proof} For $t\in[d]$, define $f_t:(\{\pm1\}^d)^n\rightarrow\mathbb{R}$ as $$ f_t(x_1,\dots,x_n) = \left\{ \begin{array}{ccl} 0 & ,& \left|\sum_{i\in[n]} x_{i,t}\right| \leq \sqrt{2n\ln(2/\beta)}\\ B & ,& \sum_{i\in[n]} x_{i,t} > \sqrt{2n\ln(2/\beta)} \\ -B & ,& \sum_{i\in[n]} x_{i,t} < -\sqrt{2n\ln(2/\beta)} \\ \end{array} \right. $$ That is, given a database $S$ of $n$ rows from $\{\pm1\}^d$, we define $f_t(S)$ as $0$ if the sum of column $t$ (in absolute value) is less than some threshold, and otherwise set $f_t(S)$ to be $\pm B$ (depending on the sign of the sum). Observe that the global sensitivity of $f_t$ is $B$, and that $f_t(\mathcal U^n) \triangleq \ex{S'\sim \mathcal U^n}{f_t(S')}=0$. Also, by the Hoeffding bound, we have that $$ \Pr_{S\sim \mathcal U^n}\left[f_t(S)\neq 0\right]\leq \beta. $$ So, for every fixed $t$, with high probability over sampling $S\sim \mathcal U^n$ we have that $f_t(S)=0=f_t(\mathcal U^n)$. Nevertheless, as we now explain, if $d$ is large enough, then an $(\varepsilon,0)$-differentially private algorithm can easily identify a ``bad'' index $t^*$ such that $|f_{t^*}(S)|=B$. Consider the algorithm that on input $S=(x_1,x_2,\dots,x_n)$ samples an index $t\in[d]$ with probability proportional to $\exp\left( \frac{\varepsilon}{4} \left|\sum_{i\in[n]} x_{i,t}\right| \right)$. We will call it algorithm \texttt{BadIndex}. By the properties of the exponential mechanism, algorithm \texttt{BadIndex} is $(\varepsilon,0)$-differentially private. Moreover, with probability at least $3/4$, the output $t^*$ satisfies \begin{align} \left|\sum_{i\in[n]} x_{i,t^*}\right| \;\;\geq\;\; \max_{t\in[d]}\left\{ \left|\sum_{i\in[n]} x_{i,t}\right| \right\} \;-\; \frac{4}{\varepsilon}\ln\left(4d\right).\label{eq:overfit1} \end{align} In addition, by Theorem~\ref{thm:chernoffTight} (tightness of Chernoff bound), for every fixed $t$ it holds that \begin{align*} \Pr\left[ \sum_{i\in[n]} x_{i,t} \geq 1.11\cdot \sqrt{2n\ln(2/\beta)} \right]\geq \left(\frac{\beta}{2}\right)^{45}. \end{align*} As the columns are independent, taking $d=2\left(\frac{2}{\beta}\right)^{45}$, we get that \begin{align} \Pr\left[ \max_{t\in[d]}\left\{\sum_{i\in[n]} x_{i,t} \right\}\geq 1.11\cdot \sqrt{2n\ln(2/\beta)} \right]\geq 3/4.\label{eq:overfit2} \end{align} Combining~(\ref{eq:overfit1}) and~(\ref{eq:overfit2}) we get that with probability at least $1/2$ algorithm \texttt{BadIndex} identifies an index $t^*$ such that $$ \left|\sum_{i\in[n]} x_{i,t^*}\right| \;\;\geq\;\; 1.11\cdot \sqrt{2n\ln(2/\beta)} \;-\; \frac{4}{\varepsilon}\ln\left(4d\right). $$ Assuming that $n\geq O(\frac{1}{\varepsilon^2}\ln(1/\beta))$ we get that with probability at least $1/2$ algorithm \texttt{BadIndex} outputs an index $t^*$ s.t. $f_{t^*}(S)=B$. \end{proof} \subsection{Max-Information} In this section we show that algorithm \texttt{BadIndex} has relatively high {\em max-information}: Given two (correlated) random variables $Y$, $Z$, we use $Y\otimes Z$ denote the random variable obtained by drawing independent copies of $Y$ and $Z$ from their respective marginal distributions. \begin{definition}[Max-Information~\cite{DworkFHPRR-nips-2015}]\label{def:maxinfo} Let $Y$ and $Z$ be jointly distributed random variables over the domain $(\mathcal{Y},\mathcal{Z})$. The $\beta$-approximate max-information between $Y$ and $Z$ is defined as $$I_\infty^\beta (Y;Z) = \log \sup\limits_{\substack{\mathcal{O} \subseteq (\mathcal{Y} \times \mathcal{Z}),\\ \Pr[{ (Y,Z) \in \mathcal{O} }] > \beta}} \dfrac{\Pr[{(Y,Z) \in \mathcal{O}}] - \beta}{\Pr[{Y\otimes Z \in \mathcal{O}}] }.$$ An algorithm $\mathcal A: X^n \to F$ has $\beta$-approximate max-information of $k$ over product distributions, written $I^\beta_{\infty,P}(\mathcal A, n) \leq k$, if for every distribution $\mathcal D$ over $X$, we have $I^{\beta}_\infty(S; \mathcal A(S)) \leq k$ when $S\sim \mathcal D^n$. \label{defn:maxinfo} \end{definition} It follows immediately from the definition that approximate max-information controls the probability of ``bad events'' that can happen as a result of the dependence of $\mathcal A(S)$ on $S$: for every event $\mathcal{O}$, we have $\Pr[(S, \mathcal A(S)) \in \mathcal{O}] \leq 2^k\Pr[S \otimes \mathcal A(S) \in \mathcal{O}]+\beta$.\\ Consider again algorithm $\texttt{BadIndex}:(\{\pm1\})^n\rightarrow F$ that operates on database $S$ of size $n=O(\frac{1}{\varepsilon^2}\ln(1/\beta))$ and identifies, with probability 1/2, a function $f$ s.t.\ $f(S)\neq 0$, even though $f(S')=0$ w.p.\ $1-\beta$ for a fresh sample $S'$. Let us define $\mathcal{O}$ as the set of all pairs $(S,f)$, where $S$ is a database and $f$ is a function in the range of algorithm \texttt{BadIndex} such that $f(S)\neq0$. That is, $$ \mathcal{O} = \left\{ (S,f)\in(\{\pm1\})^n \times F \; : \; f(S)\neq0 \right\}. $$ If we assume that $I^{1/4}_{\infty,P}(\texttt{BadIndex}, n) \leq k$, then by Definition~\ref{def:maxinfo} we have: $$ \frac{1}{2}\leq\Pr_{\substack{S\sim\mathcal U^n\\f\leftarrow\texttt{BadIndex}(S)}}[(S,f)\in\mathcal{O}]\leq e^k\cdot\Pr_{\substack{S\sim\mathcal U^n\\T\sim\mathcal U^n\\f\leftarrow\texttt{BadIndex}(T)}}[(S,f)\in\mathcal{O}] +\frac{1}{4} \leq e^k\cdot\beta+\frac{1}{4}. $$ So $k\geq\ln(\frac{1}{4\beta})=\Omega(\varepsilon^2 n)$. \begin{thebibliography}{10} \bibitem{BassilyF16} Raef Bassily and Yoav Freund. \newblock Typicality-based stability and privacy. \newblock {\em CoRR}, abs/1604.03336, 2016. \bibitem{BassilyNSSSU16} Raef Bassily, Kobbi Nissim, Adam~D. Smith, Thomas Steinke, Uri Stemmer, and Jonathan Ullman. \newblock Algorithmic stability for adaptive data analysis. \newblock In {\em Proceedings of the 48th Annual {ACM} {SIGACT} Symposium on Theory of Computing, {STOC} 2016, Cambridge, MA, USA, June 18-21, 2016}, pages 1046--1059, 2016. \bibitem{chern} Herman Chernoff. \newblock A measure of asymptotic efficiency for tests of a hypothesis based on the sum of observations. \newblock {\em Ann. Math. Statist.}, 23:493--507, 1952. \bibitem{CummingsLNRW16} Rachel Cummings, Katrina Ligett, Kobbi Nissim, Aaron Roth, and Zhiwei~Steven Wu. \newblock Adaptive learning with robust generalization guarantees. \newblock In {\em Proceedings of the 29th Conference on Learning Theory, {COLT} 2016, New York, USA, June 23-26, 2016}, pages 772--814, 2016. \bibitem{DworkFHPRR-nips-2015} Cynthia Dwork, Vitaly Feldman, Moritz Hardt, Toniann Pitassi, Omer Reingold, and Aaron Roth. \newblock Generalization in adaptive data analysis and holdout reuse. \newblock In {\em Advances in Neural Information Processing Systems (NIPS)}, Montreal, December 2015. \bibitem{DworkFHPRR15} Cynthia Dwork, Vitaly Feldman, Moritz Hardt, Toniann Pitassi, Omer Reingold, and Aaron Roth. \newblock Preserving statistical validity in adaptive data analysis. \newblock In {\em ACM Symposium on the Theory of Computing (STOC)}. {ACM}, June 2015. \bibitem{DKMMN06} Cynthia Dwork, Krishnaram Kenthapadi, Frank McSherry, Ilya Mironov, and Moni Naor. \newblock Our data, ourselves: Privacy via distributed noise generation. \newblock In Serge Vaudenay, editor, {\em EUROCRYPT}, volume 4004 of {\em Lecture Notes in Computer Science}, pages 486--503. Springer, 2006. \bibitem{DMNS06} Cynthia Dwork, Frank McSherry, Kobbi Nissim, and Adam Smith. \newblock Calibrating noise to sensitivity in private data analysis. \newblock In {\em TCC}, volume 3876 of {\em Lecture Notes in Computer Science}, pages 265--284. Springer, 2006. \bibitem{HU14} Moritz Hardt and Jonathan Ullman. \newblock Preventing false discovery in interactive data analysis is hard. \newblock In {\em {FOCS}}, pages 454--463, 2014. \bibitem{hoeff} Wassily Hoeffding. \newblock Probability inequalities for sums of bounded random variables. \newblock {\em Journal of the American Statistical Association}, 58(301):13--30, 1963. \bibitem{KimVu2004} J.~H. Kim and V.~H. Vu. \newblock Divide and conquer martingales and the number of triangles in a random graph. \newblock {\em Random Structures and Algorithms}, 24(2):166--174, 2004. \bibitem{KleinY15} Philip~N. Klein and Neal~E. Young. \newblock On the number of iterations for dantzig-wolfe optimization and packing-covering approximation algorithms. \newblock {\em {SIAM} J. Comput.}, 44(4):1154--1172, 2015. \bibitem{Kontorovich14} Aryeh Kontorovich. \newblock Concentration in unbounded metric spaces and algorithmic stability. \newblock In {\em Proceedings of the 31th International Conference on Machine Learning, {ICML} 2014, Beijing, China, 21-26 June 2014}, pages 28--36, 2014. \bibitem{McSherryT07} Frank McSherry and Kunal Talwar. \newblock Mechanism design via differential privacy. \newblock In {\em {FOCS}}, pages 94--103. {IEEE}, Oct 20--23 2007. \bibitem{SU15} Thomas Steinke and Jonathan Ullman. \newblock Interactive fingerprinting codes and the hardness of preventing false discovery. \newblock In {\em {COLT}}, pages 1588--1628, 2015. \bibitem{SU17} Thomas Steinke and Jonathan Ullman. \newblock Subgaussian tail bounds via stability arguments. \newblock {\em ArXiv.org}, (arXiv:1701.03493 [cs.DM]), 2017. \bibitem{Vu2002} Van~H. Vu. \newblock Concentration of non-lipschitz functions and applications. \newblock {\em Random Structures and Algorithms}, 20(3):262--316, 2002. \end{thebibliography} \appendix \section{Concentration Bounds Through Differential Privacy -- Missing Details}\label{sec:fullProof} { \renewcommand{\ref{claim:dpExpectation}}{\ref{claim:dpExpectation}} \begin{claim} Fix a function $f:X^n\rightarrow\mathbb{R}$ and parameters $\varepsilon,\lambda\geq0$. If $M:(X^n)^T\rightarrow Y$ is $(\varepsilon,(f,\lambda))$-differentially private then for every $(f,\lambda)$-neighboring databases $xles,xles'\in(X^n)^T$ and every function $h:Y\rightarrow\mathbb{R}$ we have that $$ \ex{y\leftarrow M(\vec{S})}{h(y)} \leq e^{-\varepsilon}\cdot\ex{y\leftarrow M(\vec{S'})}{h(y)} \;\;+\;\; (e^\varepsilon - e^{-\varepsilon})\cdot \ex{y\leftarrow M(\vec{S'})}{|h(y)|}. $$\end{claim} \addtocounter{theorem}{-1} } \begin{proof} \begin{align*} \ex{y\leftarrow M(\vec{S})}{h(y)} &=\int_0^\infty \Pr_{y\leftarrow M(\vec{S})}[h(y)\geq z]{\rm d}z \;\;-\;\; \int_{-\infty}^0 \Pr_{y\leftarrow M(\vec{S})}[h(y)\leq z]{\rm d}z\\ &\leq e^\varepsilon\cdot \int_0^\infty \Pr_{y\leftarrow M(\vec{S'})}[h(y)\geq z]{\rm d}z \;\;-\;\; e^{-\varepsilon}\cdot\int_{-\infty}^0 \Pr_{y\leftarrow M(\vec{S'})}[h(y)\leq z]{\rm d}z\\ &= e^{-\varepsilon}\left[ \int_0^\infty \Pr_{y\leftarrow M(\vec{S'})}[h(y)\geq z]{\rm d}z \;\;-\;\; \int_{-\infty}^0 \Pr_{y\leftarrow M(\vec{S'})}[h(y)\leq z]{\rm d}z\right]\\ &+ (e^\varepsilon - e^{-\varepsilon})\cdot \int_0^\infty \Pr_{y\leftarrow M(\vec{S'})}[h(y)\geq z]{\rm d}z\\ &= e^{-\varepsilon}\cdot\ex{y\leftarrow M(\vec{S'})}{h(y)} \;\;+\;\; (e^\varepsilon - e^{-\varepsilon})\cdot \int_0^\infty \Pr_{y\leftarrow M(\vec{S'})}[h(y)\geq z]{\rm d}z\\ &\leq e^{-\varepsilon}\cdot\ex{y\leftarrow M(\vec{S'})}{h(y)} \;\;+\;\; (e^\varepsilon - e^{-\varepsilon})\cdot \int_0^\infty \Pr_{y\leftarrow M(\vec{S'})}[|h(y)|\geq z]{\rm d}z\\ &= e^{-\varepsilon}\cdot\ex{y\leftarrow M(\vec{S'})}{h(y)} \;\;+\;\; (e^\varepsilon - e^{-\varepsilon})\cdot \ex{y\leftarrow M(\vec{S'})}{|h(y)|} \end{align*} \end{proof} \subsection{Multi Sample Expectation Bound} \begin{lem}[Expectation Bound] \label{lem:MKLCondExp} Let $\mathcal D$ be a distribution over a domain $X$, let $f:X^n\rightarrow\mathbb{R}$ , and let $\Delta,\lambda$ be s.t.\ for every $1\leq i\leq n$ it holds that \begin{equation} \ex{\substack{S\sim\mathcal D^{n}\\z\sim\mathcal D}}{\mathbbm{1}\left\{\left|f(S) - f\left(S^{(i\leftarrow z)}\right)\right|>\lambda\right\} \cdot\left|f(S) - f\left(S^{(i\leftarrow z)}\right)\right| }\leq\Delta, \label{eq:expectationMainCondition} \end{equation} where $S^{(i\leftarrow z)}$ is the same as $S$ except that the $i^{\text{th}}$ element is replaced with $z$. Let $\mathcal A : (X^{n})^{T} \to ([T]\cup\bot)$ be an $(\varepsilon,(f,\lambda))$-differentially private algorithm that operates on $T$ databases of size $n$ from $X$, and outputs an index $1\leq t \leq T$ or $\bot$. Then $$ \left|\ex{\substack{xles\sim\mathcal{D}^{nT} \\ t \leftarrow \mathcal A(xles)}}{\mathbbm{1}\{t\neq\bot\}\cdot(f(\mathcal{D}^n) - f(xle_{t}))} \right| \leq (e^\varepsilon - e^{-\varepsilon})\cdot \lambda n + 6\Delta n T. $$ If, in addition to~(\ref{eq:expectationMainCondition}), there exists a number $0\leq \tau \leq \lambda$ s.t.\ for every $1\leq i\leq n$ and every fixture of $S\in X^n$ we have that \begin{equation} \ex{\substack{y,z\sim\mathcal D}}{\mathbbm{1}\left\{\left|f(S^{(i\leftarrow y)}) - f\left(S^{(i\leftarrow z)}\right)\right|\leq\lambda\right\} \cdot\left|f(S^{(i\leftarrow y)}) - f\left(S^{(i\leftarrow z)}\right)\right| }\leq\tau, \label{eq:expectationSecondCondition} \end{equation} Then, $$ \left|\ex{\substack{xles\sim\mathcal{D}^{nT} \\ t \leftarrow \mathcal A(xles)}}{\mathbbm{1}\{t\neq\bot\}\cdot(f(\mathcal{D}^n) - f(xle_{t}))} \right| \leq (e^\varepsilon - e^{-\varepsilon})\cdot \tau n + 6\Delta n T. $$ \end{lem} We now present the proof assuming that~(\ref{eq:expectationSecondCondition}) holds for some $0\leq\tau\leq\lambda$. This is without loss of generality, as trivially it holds for $\tau=\lambda$. \begin{proof}[Proof of Lemma~\ref{lem:MKLCondExp}] Let $xles' = (xle'_1,\dots,xle'_{T}) \sim \mathcal{D}^{nT}$ be independent of $xles$. Recall that each element $xle_{t}$ of $xles$ is itself a vector $(x_{t, 1},\dots,x_{t, n}),$ and the same is true for each element $xle'_{t}$ of $xles'.$ We will sometimes refer to the vectors $xle_{1},\dots,xle_{T}$ as the \emph{subsamples of $xles$.} We define a sequence of intermediate samples that allow us to interpolate between $xles$ and $xles'$. Formally, for $\ell \in \{0,1,\dots,n\}$ define $xles^{\ell} = (xle^{\ell}_{1},\dots,xle^{\ell}_{T}) \in (X^{n})^{T}$ where $xle^{\ell}_{t}=(x^{\ell}_{t, 1},\dots,x^{\ell}_{t, n})$ and $$ x^{\ell}_{t, i} = \left\{ \begin{array}{ccl} x_{t, i} & ,& i > \ell\\ x'_{t, i} & ,& i \leq \ell \\ \end{array} \right. $$ That is, every subsample $S^\ell_t$ of $\vec{S}^\ell$ is identical to $S'_t$ on the first $\ell$ elements, and identical to $S_t$ thereafter. By construction we have $xles^0=xles$ and $xles^{n} = xles'$. Moreover, for every $t$ we have that $S^\ell_t$ and $S^{\ell-1}_t$ differ in exactly one element. In terms of these intermediate samples we can write: \begin{align} &\left|\exx{xles\sim\mathcal{D}^{nT}}{\ex{t \leftarrow \mathcal A(xles)}{\mathbbm{1}\{t\neq\bot\}\cdot(f(\mathcal{D}^n) - f(xle_{t}))}} \right| \nonumber \\ &={}\left|\exx{xles\sim\mathcal{D}^{nT}}{\ex{t \leftarrow \mathcal A(xles)}{\mathbbm{1}\{t\neq\bot\}\cdot\left(\ex{xles'\sim\mathcal{D}^{nT}}{f(xle'_t)} - f(xle_{t})\right)}} \right| \nonumber \\ &={}\left|\exx{xles\sim\mathcal{D}^{nT}}{\exx{t \leftarrow \mathcal A(xles)}\ex{xles'\sim\mathcal{D}^{nT}}{\mathbbm{1}\{t\neq\bot\}\cdot\left(f(xle'_t) - f(xle_{t})\right)}} \right| \nonumber \\ &={} \left| \sum_{\ell \in [n]} \exx{xles,xles'\sim\mathcal{D}^{nT}}{\ex{t \leftarrow \mathcal A(xles)}{ \mathbbm{1}\{t\neq\bot\}\cdot\left(f(xle^{\ell}_{t})- f(xle^{\ell-1}_{t}) \right)}} \right| \nonumber \\ &\leq{} \sum_{\ell \in [n]} \left| \exx{xles,xles'\sim\mathcal{D}^{nT}}{\ex{t \leftarrow \mathcal A(xles)}{ \mathbbm{1}\{t\neq\bot\}\cdot\left(f(xle^{\ell}_{t})- f(xle^{\ell-1}_{t}) \right)}} \right| \nonumber\\ &={} \sum_{\ell \in [n]} \left| \exx{xles,xles'\sim\mathcal{D}^{nT}}\exx{Z\sim\mathcal{D}^T}{\ex{t \leftarrow \mathcal A(xles)}{ \mathbbm{1}\{t\neq\bot\}\cdot\left(f(xle^{\ell}_{t})- f(xle^{\ell-1}_{t}) \right)}} \right| \label{eq:1} \end{align} Given a multisample $xles=(S_1,\dots,S_T)\in(X^n)^T$, a vector $Z=(z_1\dots,z_T)\inX^T$, and an index $1 \leq k \leq n$, we define $xles^{(k\leftarrow Z)}$ to be the same as $xles$ except that the $k^{\text{th}}$ element of {\em every} subsample $S_i$ is replaced with $z_i$. Observe that by construction, for every $\ell,Z$ we have $xles^{\ell,(\ell\leftarrow Z)} = xles^{\ell-1,(\ell\leftarrow Z)}$. Thus, \begin{align} (\ref{eq:1}) &={} \sum_{\ell \in [n]} \left| \exx{xles,xles'\sim\mathcal{D}^{nT}}\exx{Z\sim\mathcal{D}^T}{\ex{t \leftarrow \mathcal A(xles)}{ \mathbbm{1}\{t\neq\bot\}\cdot\Bigg(f(xle^{\ell}_{t})- f\left(xle^{\ell,(\ell\leftarrow Z)}_{t}\right) \Bigg) - \mathbbm{1}\{t\neq\bot\}\cdot\Bigg(f(xle^{\ell-1}_{t})- f\left(xle^{\ell-1,(\ell\leftarrow Z)}_{t}\right) \Bigg) }} \right|. \label{eq:2} \end{align} Observer that the pairs $(xles,xles^{\ell})$ and $\left(xles,xles^{\ell,(\ell\leftarrow Z)}\right)$ are identically distributed. Namely, both $xles^{\ell}$ and $xles^{\ell,(\ell\leftarrow Z)}$ agree with $xles$ on the last $(n-\ell)$ entries of every subsample, and otherwise contain i.i.d.\ samples from $\mathcal{D}$. Hence, the expectation of $\left(f(xle^{\ell}_{t})- f\left(xle^{\ell,(\ell\leftarrow Z)}_{t}\right)\right)$ is zero, and we get \begin{align} (\ref{eq:2}) &={} \sum_{\ell \in [n]} \left| \exx{xles,xles'\sim\mathcal{D}^{nT}}\exx{Z\sim\mathcal{D}^T}{\ex{t \leftarrow \mathcal A(xles)}{ \mathbbm{1}\{t\neq\bot\}\cdot\Bigg( f\left(xle^{\ell-1,(\ell\leftarrow Z)}_{t}\right) - f(xle^{\ell-1}_{t}) \Bigg) }} \right|. \label{eq:3} \end{align} Observer that the pair $(xles^{\ell-1}, xles)$ has the same distribution as $(xles, xles^{\ell-1}).$ Specifically, the first component is $n T$ independent samples from $\mathcal{D}$ and the second component is equal to the first component with a subset of the entries replaced by fresh independent samples from $\mathcal{D}$. Thus, \begin{align} (\ref{eq:3})&={} \sum_{\ell \in [n]} \left| \exx{xles,xles'\sim\mathcal{D}^{nT}}\exx{Z\sim\mathcal{D}^T}{\ex{t \leftarrow \mathcal A(xles^{\ell-1})}{ \mathbbm{1}\{t\neq\bot\}\cdot\Bigg( f\left(xle^{(\ell\leftarrow Z)}_{t}\right) - f(xle_{t}) \Bigg) }} \right| \nonumber\\[1em] &\leq{} \sum_{\ell \in [n]} \left| \exx{xles,xles'\sim\mathcal{D}^{nT}}{\ex{Z\sim\mathcal{D}^T}{ \mathbbm{1}\left\{ \begin{array}{c} \max_{m\in[T]}|f(xle^{\ell-1}_{m}) - f(xle^{\ell}_{m})|\leq\lambda\\[0.5em] {\rm{and}}\\[0.5em] \max_{m\in[T]}|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})|\leq\lambda \end{array} \right\} \cdot \ex{t \leftarrow \mathcal A(xles^{\ell-1})}{ \mathbbm{1}\{t\neq\bot\}\cdot\Bigg( f\left(xle^{(\ell\leftarrow Z)}_{t}\right) - f(xle_{t}) \Bigg) } }} \right| \nonumber\\[1em] &+{}\sum_{\ell \in [n]} \left| \exx{xles,xles'\sim\mathcal{D}^{nT}}{\ex{Z\sim\mathcal{D}^T}{ \mathbbm{1}\left\{ \begin{array}{c} \max_{m\in[T]}|f(xle^{\ell-1}_{m}) - f(xle^{\ell}_{m})|>\lambda\\[0.5em] {\rm{or}}\\[0.5em] \max_{m\in[T]}|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})|>\lambda \end{array} \right\} \cdot \max_{m\in[T]}\left|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})\right| }} \right| \label{eq:4} \end{align} When $\max_{m\in[T]}|f(xle^{\ell-1}_{m}) - f(xle^{\ell}_{m})|\leq\lambda$ we now use the properties of algorithm $\mathcal A$ to argue that $\mathcal A(xles^{\ell-1}) \approx \mathcal A(xles^{\ell})$. Be Claim~\ref{claim:dpExpectation} we get that \begin{align} &(\ref{eq:4})\nonumber\\ &\leq{} \sum_{\ell \in [n]} \left| \exx{xles,xles'\sim\mathcal{D}^{nT}}{\ex{Z\sim\mathcal{D}^T}{ \mathbbm{1}\left\{ \begin{array}{c} \max_{m\in[T]}|f(xle^{\ell-1}_{m}) - f(xle^{\ell}_{m})|\leq\lambda\\[0.5em] {\rm{and}}\\[0.5em] \max_{m\in[T]}|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})|\leq\lambda \end{array} \right\} \cdot \ex{t \leftarrow \mathcal A(xles^{\ell})}{ \mathbbm{1}\{t\neq\bot\}\cdot\Bigg( f\left(xle^{(\ell\leftarrow Z)}_{t}\right) - f(xle_{t}) \Bigg) } }} \right| \nonumber\\[1em] &+{} (e^\varepsilon - e^{-\varepsilon})\cdot \sum_{\ell \in [n]} \left| \exx{xles,xles'\sim\mathcal{D}^{nT}}{\ex{Z\sim\mathcal{D}^T}{ \mathbbm{1}\left\{ \begin{array}{c} \max_{m\in[T]}|f(xle^{\ell-1}_{m}) - f(xle^{\ell}_{m})|\leq\lambda\\[0.5em] {\rm{and}}\\[0.5em] \max_{m\in[T]}|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})|\leq\lambda \end{array} \right\} \cdot \ex{t \leftarrow \mathcal A(xles^{\ell})}{ \mathbbm{1}\{t\neq\bot\}\cdot\left| f\left(xle^{(\ell\leftarrow Z)}_{t}\right) - f(xle_{t}) \right| } }} \right| \nonumber\\[1em] &+{}\sum_{\ell \in [n]} \left| \exx{xles,xles'\sim\mathcal{D}^{nT}}{\ex{Z\sim\mathcal{D}^T}{ \mathbbm{1}\left\{ \begin{array}{c} \max_{m\in[T]}|f(xle^{\ell-1}_{m}) - f(xle^{\ell}_{m})|>\lambda\\[0.5em] {\rm{or}}\\[0.5em] \max_{m\in[T]}|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})|>\lambda \end{array} \right\} \cdot \max_{m\in[T]}\left|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})\right| }} \right|\label{eq:5} \end{align} We can remove one of the two requirements in the indicator function in the middle row (this makes the expression bigger), to get: \begin{align} &(\ref{eq:5})\nonumber\\ &\leq{} \sum_{\ell \in [n]} \left| \exx{xles,xles'\sim\mathcal{D}^{nT}}{\ex{Z\sim\mathcal{D}^T}{ \mathbbm{1}\left\{ \begin{array}{c} \max_{m\in[T]}|f(xle^{\ell-1}_{m}) - f(xle^{\ell}_{m})|\leq\lambda\\[0.5em] {\rm{and}}\\[0.5em] \max_{m\in[T]}|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})|\leq\lambda \end{array} \right\} \cdot \ex{t \leftarrow \mathcal A(xles^{\ell})}{ \mathbbm{1}\{t\neq\bot\}\cdot\Bigg( f\left(xle^{(\ell\leftarrow Z)}_{t}\right) - f(xle_{t}) \Bigg) } }} \right| \nonumber\\[1em] &+{} (e^\varepsilon - e^{-\varepsilon})\cdot \sum_{\ell \in [n]} \left| \exx{xles,xles'\sim\mathcal{D}^{nT}}{\exx{Z\sim\mathcal{D}^T}{ \ex{t \leftarrow \mathcal A(xles^{\ell})}{ \mathbbm{1}\left\{\max_{m\in[T]}|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})|\leq\lambda \right\} \cdot \mathbbm{1}\{t\neq\bot\}\cdot\left| f\left(xle^{(\ell\leftarrow Z)}_{t}\right) - f(xle_{t}) \right| } }} \right| \nonumber\\[1em] &+{}\sum_{\ell \in [n]} \left| \exx{xles,xles'\sim\mathcal{D}^{nT}}{\ex{Z\sim\mathcal{D}^T}{ \mathbbm{1}\left\{ \begin{array}{c} \max_{m\in[T]}|f(xle^{\ell-1}_{m}) - f(xle^{\ell}_{m})|>\lambda\\[0.5em] {\rm{or}}\\[0.5em] \max_{m\in[T]}|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})|>\lambda \end{array} \right\} \cdot \max_{m\in[T]}\left|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})\right| }} \right| \label{eq:5b} \end{align} Furthermore, we can replace $\mathbbm{1}\left\{\max_{m\in[T]}|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})|\leq\lambda\right\}$ in the middle row with the weaker requirement -- just for the specific $t$ that was selected by algorithm $\mathcal A$. This yields: \begin{align} &(\ref{eq:5b})\nonumber\\ &\leq{} \sum_{\ell \in [n]} \left| \exx{xles,xles'\sim\mathcal{D}^{nT}}{\ex{Z\sim\mathcal{D}^T}{ \mathbbm{1}\left\{ \begin{array}{c} \max_{m\in[T]}|f(xle^{\ell-1}_{m}) - f(xle^{\ell}_{m})|\leq\lambda\\[0.5em] {\rm{and}}\\[0.5em] \max_{m\in[T]}|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})|\leq\lambda \end{array} \right\} \cdot \ex{t \leftarrow \mathcal A(xles^{\ell})}{ \mathbbm{1}\{t\neq\bot\}\cdot\Bigg( f\left(xle^{(\ell\leftarrow Z)}_{t}\right) - f(xle_{t}) \Bigg) } }} \right| \nonumber\\[1em] &+{} (e^\varepsilon - e^{-\varepsilon})\cdot \sum_{\ell \in [n]} \left| \exx{xles,xles'\sim\mathcal{D}^{nT}}{\exx{Z\sim\mathcal{D}^T}{ \ex{t \leftarrow \mathcal A(xles^{\ell})}{ \mathbbm{1}\left\{|f\left(xle^{(\ell\leftarrow Z)}_{t}\right) - f(xle_{t})|\leq\lambda \right\} \cdot \mathbbm{1}\{t\neq\bot\}\cdot\left| f\left(xle^{(\ell\leftarrow Z)}_{t}\right) - f(xle_{t}) \right| } }} \right| \nonumber\\[1em] &+{}\sum_{\ell \in [n]} \left| \exx{xles,xles'\sim\mathcal{D}^{nT}}{\ex{Z\sim\mathcal{D}^T}{ \mathbbm{1}\left\{ \begin{array}{c} \max_{m\in[T]}|f(xle^{\ell-1}_{m}) - f(xle^{\ell}_{m})|>\lambda\\[0.5em] {\rm{or}}\\[0.5em] \max_{m\in[T]}|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})|>\lambda \end{array} \right\} \cdot \max_{m\in[T]}\left|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})\right| }} \right|\label{eq:5c} \end{align} Using the fact that the pairs $(\vec{S},\vec{S}^\ell)$ and $(\vec{S}^\ell,\vec{S})$ are identically distributed, we can switch them in the middle row, to get \begin{align} &(\ref{eq:5c})\nonumber\\ &\leq{} \sum_{\ell \in [n]} \left| \exx{xles,xles'\sim\mathcal{D}^{nT}}{\ex{Z\sim\mathcal{D}^T}{ \mathbbm{1}\left\{ \begin{array}{c} \max_{m\in[T]}|f(xle^{\ell-1}_{m}) - f(xle^{\ell}_{m})|\leq\lambda\\[0.5em] {\rm{and}}\\[0.5em] \max_{m\in[T]}|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})|\leq\lambda \end{array} \right\} \cdot \ex{t \leftarrow \mathcal A(xles^{\ell})}{ \mathbbm{1}\{t\neq\bot\}\cdot\Bigg( f\left(xle^{(\ell\leftarrow Z)}_{t}\right) - f(xle_{t}) \Bigg) } }} \right| \nonumber\\[1em] &+{} (e^\varepsilon - e^{-\varepsilon})\cdot \sum_{\ell \in [n]} \left| \exx{xles\sim\mathcal{D}^{nT}}{\exx{t\leftarrow\mathcal A(xles)}{ \ex{ \substack{ xles'\sim\mathcal D^{nT}\\ Z\sim\mathcal{D}^T } }{ \mathbbm{1}\left\{|f\left(xle^{\ell,(\ell\leftarrow Z)}_{t}\right) - f(xle^{\ell}_{t})|\leq\lambda \right\} \cdot \mathbbm{1}\{t\neq\bot\}\cdot\left| f\left(xle^{\ell,(\ell\leftarrow Z)}_{t}\right) - f(xle^{\ell}_{t}) \right| } }} \right| \nonumber\\[1em] &+{}\sum_{\ell \in [n]} \left| \exx{xles,xles'\sim\mathcal{D}^{nT}}{\ex{Z\sim\mathcal{D}^T}{ \mathbbm{1}\left\{ \begin{array}{c} \max_{m\in[T]}|f(xle^{\ell-1}_{m}) - f(xle^{\ell}_{m})|>\lambda\\[0.5em] {\rm{or}}\\[0.5em] \max_{m\in[T]}|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})|>\lambda \end{array} \right\} \cdot \max_{m\in[T]}\left|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})\right| }} \right|\label{eq:5d} \end{align} Using our assumptions on the function $f$ and the distribution $\mathcal D$ (for the middle row), brings us to: \begin{align} &(\ref{eq:5d})\nonumber\\ &\leq{} \sum_{\ell \in [n]} \left| \exx{xles,xles'\sim\mathcal{D}^{nT}}{\ex{Z\sim\mathcal{D}^T}{ \mathbbm{1}\left\{ \begin{array}{c} \max_{m\in[T]}|f(xle^{\ell-1}_{m}) - f(xle^{\ell}_{m})|\leq\lambda\\[0.5em] {\rm{and}}\\[0.5em] \max_{m\in[T]}|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})|\leq\lambda \end{array} \right\} \cdot \ex{t \leftarrow \mathcal A(xles^{\ell})}{ \mathbbm{1}\{t\neq\bot\}\cdot\Bigg( f\left(xle^{(\ell\leftarrow Z)}_{t}\right) - f(xle_{t}) \Bigg) } }} \right| \nonumber\\ &+{} (e^\varepsilon - e^{-\varepsilon})n\tau\nonumber\\ &+{}\sum_{\ell \in [n]} \left| \exx{xles,xles'\sim\mathcal{D}^{nT}}{\ex{Z\sim\mathcal{D}^T}{ \mathbbm{1}\left\{ \begin{array}{c} \max_{m\in[T]}|f(xle^{\ell-1}_{m}) - f(xle^{\ell}_{m})|>\lambda\\[0.5em] {\rm{or}}\\[0.5em] \max_{m\in[T]}|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})|>\lambda \end{array} \right\} \cdot \max_{m\in[T]}\left|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})\right| }} \right|\label{eq:5e} \end{align} Our next task is to remove the indicator function in the first row. This is useful as the pairs $\left(xles^{\ell},xles^{(\ell\leftarrow Z)}\right)$ and $(xles^{\ell},xles)$ are identically distributed, and hence, if we were to remove the indicator function, the first row would be equal to zero. To that end we add and subtract the first row with the complementary indicator function (this amounts to multiplying the third row by 2). We get \begin{align} (\ref{eq:5e})&\leq{} \sum_{\ell \in [n]} \left| \exx{xles,xles'\sim\mathcal{D}^{nT}}{\ex{Z\sim\mathcal{D}^T}{ \ex{t \leftarrow \mathcal A(xles^{\ell})}{ \mathbbm{1}\{t\neq\bot\}\cdot\Bigg( f\left(xle^{(\ell\leftarrow Z)}_{t}\right) - f(xle_{t}) \Bigg) } }} \right| \nonumber\\ &+{} (e^\varepsilon - e^{-\varepsilon})n\tau\nonumber\\ &+{}2\cdot\sum_{\ell \in [n]} \left| \exx{xles,xles'\sim\mathcal{D}^{nT}}{\ex{Z\sim\mathcal{D}^T}{ \mathbbm{1}\left\{ \begin{array}{c} \max_{m\in[T]}|f(xle^{\ell-1}_{m}) - f(xle^{\ell}_{m})|>\lambda\\[0.5em] {\rm{or}}\\[0.5em] \max_{m\in[T]}|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})|>\lambda \end{array} \right\} \cdot \max_{m\in[T]}\left|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})\right| }} \right|\label{eq:5f} \end{align} Now the first row is 0, so \begin{align} (\ref{eq:5f})&={} (e^\varepsilon - e^{-\varepsilon})n\tau\nonumber\\ &+{}2\cdot\sum_{\ell \in [n]} \left| \exx{xles,xles'\sim\mathcal{D}^{nT}}{\ex{Z\sim\mathcal{D}^T}{ \mathbbm{1}\left\{ \begin{array}{c} \max_{m\in[T]}|f(xle^{\ell-1}_{m}) - f(xle^{\ell}_{m})|>\lambda\\[0.5em] {\rm{or}}\\[0.5em] \max_{m\in[T]}|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})|>\lambda \end{array} \right\} \cdot \max_{m\in[T]}\left|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})\right| }} \right| \label{eq:5g} \end{align} We can replace the {\em or} condition in the indicator function with the sum of the two conditions: \begin{align} (\ref{eq:5g})&\leq{} (e^\varepsilon - e^{-\varepsilon})n\tau\nonumber\\ &+{}2\cdot\sum_{\ell \in [n]} \left| \exx{xles,xles'\sim\mathcal{D}^{nT}}{\ex{Z\sim\mathcal{D}^T}{ \mathbbm{1}\left\{ \max_{m\in[T]}|f(xle^{\ell-1}_{m}) - f(xle^{\ell}_{m})|>\lambda \right\} \cdot \max_{m\in[T]}\left|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})\right| }} \right|\nonumber\\ &+{}2\cdot\sum_{\ell \in [n]} \left| \exx{xles,xles'\sim\mathcal{D}^{nT}}{\ex{Z\sim\mathcal{D}^T}{ \mathbbm{1}\left\{ \max_{m\in[T]}|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})|>\lambda \right\} \cdot \max_{m\in[T]}\left|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})\right| }} \right| \label{eq:5h} \end{align} In the third row, we can replace $\max_{m\in[T]}$ with $\sum_{m\in[T]}$, to get \begin{align} (\ref{eq:5h})&\leq{} (e^\varepsilon - e^{-\varepsilon})n\tau\nonumber\\ &+{}2\cdot\sum_{\ell \in [n]} \left| \exx{xles,xles'\sim\mathcal{D}^{nT}}{\ex{Z\sim\mathcal{D}^T}{ \mathbbm{1}\left\{ \max_{m\in[T]}|f(xle^{\ell-1}_{m}) - f(xle^{\ell}_{m})|>\lambda \right\} \cdot \max_{m\in[T]}\left|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})\right| }} \right|\nonumber\\ &+{}2\cdot\sum_{\ell \in [n]} \sum_{m\in[T]}\left| \exx{xles,xles'\sim\mathcal{D}^{nT}}{\ex{Z\sim\mathcal{D}^T}{ \mathbbm{1}\left\{ |f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})|>\lambda \right\} \cdot \left|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})\right| }} \right| \label{eq:5i} \end{align} Applying our assumptions on $f$ and $\mathcal D$ to the third row brings us to \begin{align} (\ref{eq:5i})&\leq{} (e^\varepsilon - e^{-\varepsilon})n\tau\nonumber + 2nT\Delta\\ &+{}2\cdot\sum_{\ell \in [n]} \left| \exx{xles,xles'\sim\mathcal{D}^{nT}}{\ex{Z\sim\mathcal{D}^T}{ \mathbbm{1}\left\{ \max_{m\in[T]}|f(xle^{\ell-1}_{m}) - f(xle^{\ell}_{m})|>\lambda \right\} \cdot \max_{m\in[T]}\left|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})\right| }} \right|\nonumber\\ \label{eq:5j} \end{align} The issue now is that the expression inside the indicator function is different from the expression outside of it. To that end, we split the indicator function as follows: \begin{align} (\ref{eq:5j})&\leq{} (e^\varepsilon - e^{-\varepsilon})n\tau\nonumber + 2nT\Delta\\ &+{}2\cdot\sum_{\ell \in [n]} \left| \exx{xles,xles'\sim\mathcal{D}^{nT}}{\ex{Z\sim\mathcal{D}^T}{ \mathbbm{1}\left\{ \begin{array}{c} \max_{m\in[T]}|f(xle^{\ell-1}_{m}) - f(xle^{\ell}_{m})|>\lambda\\[0.5em] {\rm{and}}\\[0.5em] \max_{m\in[T]}\left|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})\right|>\lambda \end{array} \right\} \cdot \max_{m\in[T]}\left|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})\right| }} \right|\nonumber\\[2em] &+{}2\cdot\sum_{\ell \in [n]} \left| \exx{xles,xles'\sim\mathcal{D}^{nT}}{\ex{Z\sim\mathcal{D}^T}{ \mathbbm{1}\left\{ \begin{array}{c} \max_{m\in[T]}|f(xle^{\ell-1}_{m}) - f(xle^{\ell}_{m})|>\lambda\\[0.5em] {\rm{and}}\\[0.5em] \max_{m\in[T]}\left|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})\right|\leq\lambda \end{array} \right\} \cdot \max_{m\in[T]}\left|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})\right| }} \right|\nonumber\\[2em] &\leq{} (e^\varepsilon - e^{-\varepsilon})n\tau\nonumber + 2nT\Delta\\ &+{}2\cdot\sum_{\ell \in [n]} \left| \exx{xles,xles'\sim\mathcal{D}^{nT}}{\ex{Z\sim\mathcal{D}^T}{ \mathbbm{1}\left\{ \max_{m\in[T]}\left|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})\right|>\lambda \right\} \cdot \max_{m\in[T]}\left|f\left(xle^{(\ell\leftarrow Z)}_{m}\right) - f(xle_{m})\right| }} \right|\nonumber\\[1em] &+{}2\cdot\sum_{\ell \in [n]} \left| \exx{xles,xles'\sim\mathcal{D}^{nT}}{\ex{Z\sim\mathcal{D}^T}{ \mathbbm{1}\left\{ \max_{m\in[T]}|f(xle^{\ell-1}_{m}) - f(xle^{\ell}_{m})|>\lambda \right\} \cdot \max_{m\in[T]}|f(xle^{\ell-1}_{m}) - f(xle^{\ell}_{m})| }} \right|\nonumber\\ &\leq{} (e^\varepsilon - e^{-\varepsilon})n\tau\nonumber + 6nT\Delta.\nonumber \end{align} \end{proof} \subsection{Multi Sample Amplification} \begin{theorem}[High Probability Bound] \label{thm:dpGeneralization} Let $\mathcal D$ be a distribution over a domain $X$, let $f:X^n\rightarrow\mathbb{R}$ , and let $\Delta,\lambda,\tau$ be s.t.\ for every $1\leq i\leq n$ it holds that $$ \ex{\substack{S\sim\mathcal D^{n}\\z\sim\mathcal D}}{\mathbbm{1}\left\{\left|f(S) - f\left(S^{(i\leftarrow z)}\right)\right|>\lambda\right\} \cdot\left|f(S) - f\left(S^{(i\leftarrow z)}\right)\right| }\leq\Delta, $$ and, furthermore, $\forall S\in X^n$ and $\forall 1\leq i\leq n$ we have $$ \ex{\substack{y,z\sim\mathcal D}}{\mathbbm{1}\left\{\left|f(S^{(i\leftarrow y)}) - f\left(S^{(i\leftarrow z)}\right)\right|\leq\lambda\right\} \cdot\left|f(S^{(i\leftarrow y)}) - f\left(S^{(i\leftarrow z)}\right)\right| }\leq\tau, $$ where $S^{(i\leftarrow z)}$ is the same as $S$ except that the $i^{\text{th}}$ element is replaced with $z$. Then for every $\varepsilon>0$ we have that $$ \Pr_{S\sim\mathcal D^n}\left[ | f(S) - f(\mathcal D^n) | \geq 6(e^\varepsilon - e^{-\varepsilon})\tau n \right] < \frac{14\Delta}{(e^\varepsilon - e^{-\varepsilon})\tau}, $$ provided that $n\geq O\left(\frac{\lambda}{\varepsilon(e^\varepsilon - e^{-\varepsilon})\tau}\log\left(\frac{(e^\varepsilon - e^{-\varepsilon}) \tau}{\Delta}\right)\right)$ \end{theorem} \begin{proof} We only analyze the probability that $(f(S)-f(\mathcal D^n))$ is large. The analysis for $(f(\mathcal D^n)-f(S))$ is symmetric. Assume towards contradiction that with probability at least $\frac{7\Delta}{(e^\varepsilon - e^{-\varepsilon})\tau}$ we have that $ f(S) - f(\mathcal D^n) \geq 6(e^\varepsilon - e^{-\varepsilon})\tau n$. We now construct the following algorithm $\mathcal B$ that contradicts our expectation bound. \begin{algorithm}[H] \caption{$\mathcal B$}\addcontentsline{lof}{figure}{Algorithm $\mathcal B$} {\bf Input:} $T$ databases of size $n$ each: $\vec{S}=(S_1,\dots,S_T)$, where $T\triangleq\left\lfloor \frac{(e^\varepsilon - e^{-\varepsilon})\tau}{7\Delta }\right\rfloor$. \begin{enumerate}[rightmargin=10pt,itemsep=1pt,topsep=4pt] \item Set $H=\{\bot,1,2,\dots,T\}$. \item For $i=1,...,T$, define $q(\vec{S},i) = f(S_i) - f(\mathcal D^n)$. Also set $q(\vec{S},\bot)=0$. \item Sample $t^*\in H$ with probability proportional to $\exp\left(\frac{\varepsilon}{2\lambda} q(\vec{S},t)\right)$. \end{enumerate} \textbf{Output:} $t.$ \end{algorithm} The fact that algorithm $\mathcal B$ is $(\varepsilon,(f,\lambda))$-differentially private follows from the standard analysis of the Exponential Mechanism of McSherry and Talwar~\cite{McSherryT07}. The proof appears in Claim~\ref{claim:ExpMechPrivacy} for completeness. Now consider applying $\mathcal B$ on databases $\vec{S} = (S_1,\dots,S_T)$ containing i.i.d.\ samples from $\mathcal D$. By our assumption on $\mathcal D$ and $f$, for every $t$ we have that $f(S_t) - f(\mathcal D^n) \geq 6(e^\varepsilon - e^{-\varepsilon})\tau n$ with probability at least $\frac{7\Delta}{(e^\varepsilon - e^{-\varepsilon})\tau}$. By our choice of $T = \left\lfloor \frac{(e^\varepsilon - e^{-\varepsilon})\tau}{7\Delta} \right\rfloor$, we therefore get $$\Pr_{\vec{S}\sim\mathcal D^{nT}}\left[{\max_{t \in [T]} \left\{ f(S_t)- f(\mathcal D^n) \right\} \geq 6(e^\varepsilon - e^{-\varepsilon})\tau n }\right] \geq 1 - \left( 1 - \frac{7\Delta}{(e^\varepsilon - e^{-\varepsilon})\tau} \right)^T \geq \frac12.$$ The probability is taken over the random choice of the examples in $\vec{S}$ according to $\mathcal D$. Thus, by Markov's inequality, \begin{equation}\label{eq:LargeError} \operatorname*{\mathbb{E}}_{\vec{S}\sim\mathcal D^{nT}}\left[\max_{t \in H} \left\{ q(\vec{S},t) \right\}\right] = \operatorname*{\mathbb{E}}_{\vec{S}\sim\mathcal D^{nT}}\left[\max\left\{0\;,\; \max_{t \in [T]} \left(f(S_t)- f(\mathcal D)\right) \right\}\right] \geq 3(e^\varepsilon - e^{-\varepsilon})\tau n. \end{equation} So, in expectation, $\max_{t \in H} \left(q(\vec{S},t)\right)$ is large. In order to contradict the expectation bound of Theorem~\ref{thm:dpGeneralization}, we need to show that this is also the case for the index $t^*$ that is sampled on Step~3. To that end, we now use the following technical claim, stating that the expected quality of a solution sampled as in Step~3 is high. \begin{claim}[e.g.,~\cite{BassilyNSSSU16}] \label{claim:EMutility_duplicate} Let $H$ be a finite set, $h : H \to \mathbb{R}$ a function, and $\eta >0$. Define a random variable $Y$ on $H$ by $\Pr[Y=y] = \exp(\eta h(y))/C$, where $C= \sum_{y \in H} \exp(\eta h(y))$. Then $\ex{}{h(Y)} \geq \max_{y \in H} h(y) - \frac{1}{\eta}\ln |H|$. \end{claim} For every fixture of $\vec{S}$, we can apply Claim~\ref{claim:EMutility_duplicate} with $h(t) = q(\vec{S},t)$ and $\eta = \frac{\varepsilon}{2\lambda}$ to get \begin{equation*} \operatorname*{\mathbb{E}}_{t^*\in_R H}[q(\vec{S},t^*)] =\operatorname*{\mathbb{E}}_{t^*\in_R H}\Big[\mathbbm{1}{\{t^*\neq\bot\}}\cdot\left(f(S_{t^*})-f(\mathcal D^n)\right)\}\Big] \geq \max\{0\;,\;\max_{t\in [T]}(f(S_t)-f(\mathcal D^n))\} - \frac{2\lambda}{\varepsilon} \ln(T+1). \end{equation*} Taking the expectation also over $\vec{S}\sim\mathcal D^{nT}$ we get that \begin{eqnarray*} \operatorname*{\mathbb{E}}_{\substack{\vec{S}\sim\mathcal D^{nT} \\ t^*\leftarrow\mathcal B\left(\vec{S}\right)}}\Big[\mathbbm{1}{\{t^*\neq\bot\}}\cdot\left(f(S_{t^*})-f(\mathcal D^n)\right)\}\Big] &\geq& \operatorname*{\mathbb{E}}_{\vec{S}\sim\mathcal D^{nT}}\left[\max\left\{0\;,\; \max_{t \in [T]} \left(f(S_t)- f(\mathcal D^n)\right) \right\}\right] - \frac{2\lambda}{\varepsilon} \ln(T+1)\\ &\geq& 3(e^\varepsilon - e^{-\varepsilon})\tau n - \frac{2\lambda}{\varepsilon} \ln(T+1). \end{eqnarray*} This contradicts Theorem~\ref{thm:dpGeneralization} whenever $n>\frac{2\lambda}{\varepsilon(e^\varepsilon - e^{-\varepsilon}) \tau}\ln(T+1)=\frac{2\lambda}{\varepsilon(e^\varepsilon - e^{-\varepsilon}) \tau}\ln(\frac{(e^\varepsilon - e^{-\varepsilon})\tau}{7\Delta}+1)$. \end{proof} \begin{claim}\label{claim:ExpMechPrivacy} Algorithm $\mathcal B$ is $(\varepsilon,(f,\lambda))$-differentially private. \end{claim} \begin{proof} Fix two $(f,\lambda)$-neighboring databases $\vec{S}$ and $\vec{S'}$, and let $b\in\{\bot,1,2,\dots,T\}$ be a possible output. We have that \begin{align} \Pr[\mathcal B(\vec{S})=b]&={}\frac{\exp(\frac{\varepsilon}{2\lambda}\cdot q(\vec{S},b))}{\sum_{a\in H}\exp(\frac{\varepsilon}{2\lambda}\cdot q(\vec{S},a))}\label{eq:11} \end{align} Using the fact that $\vec{S}$ and $\vec{S'}$ are $(f,\lambda)$-neighboring, for every $a\in H$ we get that $q(\vec{S'},a)-\lambda\leq q(\vec{S},a)\leq q(\vec{S'},a)+\lambda$. Hence, \begin{align*} (\ref{eq:11})&\leq{} \frac{\exp(\frac{\varepsilon}{2\lambda}\cdot [q(\vec{S'},b)+\lambda])}{\sum_{a\in H}\exp(\frac{\varepsilon}{2\lambda}\cdot [q(\vec{S'},a)-\lambda])}\\ &={} \frac{e^{\varepsilon/2}\cdot\exp(\frac{\varepsilon}{2\lambda}\cdot q(\vec{S'},b))}{e^{-\varepsilon/2}\sum_{a\in H}\exp(\frac{\varepsilon}{2\lambda}\cdot q(\vec{S'},a))}\\ &={} e^{\varepsilon}\cdot\Pr[\mathcal B(\vec{S'})=b]. \end{align*} For any possible {\em set} of outputs $B\subseteq\{\bot,1,2,\dots,T\}$ we now have that $$ \Pr[\mathcal B(\vec{S})\in B]=\sum_{b\in B}\Pr[\mathcal B(\vec{S})=b]\leq \sum_{b\in B}e^{\varepsilon}\cdot\Pr[\mathcal B(\vec{S'})=b] = \Pr[\mathcal B(\vec{S'})\in B]. $$ \end{proof} \end{document}
\begin{document} \title{Isotropic cuspidal functions in the Hall algebra of a quiver} \author{Lucien Hennecart} \address{Laboratoire de Math\'ematiques d'Orsay, Univ. Paris-Sud, CNRS, Universit\'e Paris-Saclay, 91405 Orsay, France} \email{[email protected]} \date{March 11, 2019} \keywords{Hall algebras, Cuspidal functions} \begin{abstract} From the structure of the category of representations of an affine cycle-free quiver, we determine an explicit linear form on the space of regular cuspidal functions over a finite field: its kernel is exactly the space of cuspidal functions. Moreover, we show that any isotropic cuspidal dimension has an affine support. Brought together, this two results give an explicit description of isotropic cuspidal functions of any quiver. The main theorem together with an appropriate action of some permutation group on the Hall algebra provides a new elementary proof of two conjectures of Berenstein and Greenstein previously proved by Deng and Ruan. We also prove a statement giving non-obvious constraints on the support of the comultiplication of a cuspidal regular function allowing us to connect both mentioned conjectures of Berenstein and Greenstein. Our results imply the positivity conjecture of Bozec and Schiffmann concerning absolutely cuspidal polynomials in isotropic dimensions. \end{abstract} \maketitle \tableofcontents \section{Introduction} Primitive elements of Hopf algebras or more generally of bialgebras are of primary importance in their study. A striking result is the Milnor-Moore theorem (\cite{MR0174052}) asserting that a graded connected cocommutative Hopf algebra with finite dimensional graded parts is isomorphic to the enveloping algebra of the Lie algebra of its primitive elements. Primitive elements of quantum groups (\cite{LusztigQuantum}) have no mystery: these are the Chevalley generators. The situation for generalized Borcherds-Kac-Moody algebras (\cite{Borcherds, MR1341758}) is analogous: these behave in fact very much like quantum groups associated to Kac-Moody algebras, although an infinite number of generators and imaginary simple roots are allowed. The Hall algebra of a quiver gives a way to construct quantum groups. Namely, given a quiver $Q$, one can consider the category $\Rep_Q(\F_q)$ of finite dimensional representations of $Q$ over some finite field $\F_q$. It can be used to built the so-called Hall algebra of $Q$ over $\F_q$, which is a Hopf algebra object in some braided monoidal category\footnote{More precisely, in the category of $\Z^I$-graded $\C$-vector spaces with finite dimensional graded components with braiding $X\otimes Y\rightarrow Y\otimes X$, $x\otimes y\mapsto \nu^{(x,y)}y\otimes x$ for any objects $X$ and $Y$ and homogeneous $x,y$, where $I$ is the set of vertices of $Q$ and $(-,-)$ is the symmetrized Euler form defined in Section \ref{2}.}. There is a natural subalgebra of the Hall algebra. It is the subalgebra generated by the classes of simple representations $[S_i]$ at each vertex $i$ of $Q$. This is the so-called composition algebra. By a theorem of Ringel (\cite{MR1062796}), it is isomorphic to the positive part of the quantum group $\U_{\nu}(\mathfrak{g}_A)$ specialized at $\nu=q^{1/2}$. The work of Sevenhant and Van den Bergh (\cite{SevenhantVdB}) identifies the whole Hall algebra $\HH_{Q,\F_q}$ of $Q$ with the quantization of the enveloping algebra of a generalized Kac-Moody algebra. The isomorphism is constructed using primitive elements of $\HH_{Q,\F_q}$ and depends on such a choice. This is not completely satisfactory since we would like to determine natural generators of the Hall algebra. This work is the beginning of this project as we provide a way to compute explicitly primitive elements of the Hall algebra in isotropic dimensions. This solves the problem of the calculation of primitive elements of the Hall algebra for affine quivers, but the ambiguity still remains. Here is a brief overview of what is done in this paper. In Section \ref{2}, we introduce notations and known facts of the representation theory of quivers. We focuse in particular on the category of representations of affine quivers and recall their decomposition in blocks. In Section \ref{3}, we recall the definition of the constructible Hall algebra of a quiver. We provide several formulas for the comultiplication and recall the theorem of Sevenhant and Van den Bergh, which will only be used in Section \ref{7} to prove Conjecture \ref{conj2}. A major role is played by the Kronecker quiver for which the classification of representations is explicit. In Section \ref{4}, we write the formulas for the number of indecomposable and absolutely indecomposable representations, and for the dimensions of cuspidal functions for affine quivers. In Section \ref{5}, we calculate explicitly all cuspidal functions of the Jordan quiver. We do not know any formula for nilpotent cuspidal functions of cyclic quivers but we provide sufficient informations on the value they take on indecomposable representations to deal with them. In Section \ref{6}, we determine regular cuspidal functions of affine quivers. Regular cuspidality is a weaker condition than cuspidality. As a numerical coincidence, in imaginary dimensions, cuspidal functions form a codimension one subspace of regular cuspidal functions. We determine an explicit linear form defining this hyperplane. In Section \ref{7}, we use our results to prove two conjectures made by Berenstein and Greenstein in \cite{MR3463039} concerning the symmetry of the Hall algebra. The last Section \ref{8} is devoted to show that an isotropic cuspidal dimension of any quiver has affine support. This immediately implies a conjecture of Bozec and Schiffmann in isotropic dimensions. The letter $I$ is used for both the set of vertices of a quiver and an indecomposable representation of a quiver. It should be clear from the context how to distinguish them. \subsection{The main results} We state here our main contributions. Let $Q$ be an affine quiver and $\F_q$ a finite field. As in Section \ref{3}, let $\HH_{Q,\F_q}$ be the Hall algebra of $Q$ over $\F_q$. \subsubsection{Cuspidal functions as the kernel of a linear form} In Section \ref{6}, we consider the subalgebra $\HH_{Q,\F_q,\RC}$ of $\HH_{Q,\F_q}$ generated by classes $[M]$ for $M$ a regular representation. It is a Hopf algebra for a corestriction of the comultiplication of $\HH_{Q,\F_q}$\footnote{see the introduction for precisions on the bialgebra structure.} endowed with a nondegenerate hermitian product $(-,-)$. The algebra $\HH_{Q,\F_q}$ has a well-understood structure. Indeed, we have a bialgebra graded isomorphism \[ \HH_{Q,\F_q,\RC}\simeq \underset{a\in \lvert\PP^1_{\F_q}\rvert}{\bigotimes^{}{}^{\prime}}\HH_{a} \] where $\HH_{a}$ is isomorphic to Macdonald's ring of symmetric function or to the nilpotent Hall algebra of a cyclic quiver for some finite number of $a$, and the degree of elements of $\HH_a$ for $a\in\lvert\PP^1_{\F_q}\rvert$ is multiplied by $\deg(a)$ to obtain the degree in $\HH_{Q,\F_q,\RC}$. This decomposition allows us to give an expression of primitive elements of this algebra, called regular cuspidal functions. For $r\geq 1$ and $\delta$ the indecomposable imaginary root of $Q$, let $\HH_{Q,\F_q,\RC}^{\cusp}[r\delta]$ be the subspace of regular cuspidal functions of dimension $r\delta$. We also let \[ \chi_{r\delta}=\sum_{\substack{[M]\text{ regular}\\ \dim M=r\delta}}[M]. \] \begin{theorem} The kernel of the linear form \[ \begin{matrix} L :& \HH_{Q,\F_q,\RC}^{\cusp}&\rightarrow &\C\\ &f&\mapsto&(f,\chi_{r\delta}) \end{matrix} \] is $\HH_{Q,\F_q}^{\cusp}[r\delta]$. \end{theorem} \subsubsection{An action of a permutation group on the Hall algebra} To prove Conjecture \ref{conj2}, we use the following action of a product of permutation groups on the Hall algebra which deserves to be considered separately. As in Section \ref{2}, $D$ denote the set of closed points of $\PP^1_{\F_q}$ parametrizing non-homogeneous tubes. For $e\geq 2$, we let $N(e)$ be the number of closed points of $\PP^1_{\F_q}$ of degree $e$ and $N(1)=q+1-|D|$. Let $\mathfrak{S}$ be the group of degree preserving permutations of $\lvert\PP^1_{\F_q}\rvert\setminus D$. The group $\mathfrak{S}$ is isomorphic to \[ \prod_{e\geq 1}\mathfrak{S}_{N(e)} \] where for a positive integer $N$, $\mathfrak{S}_N$ is the symmetric group on $N$ letters. We define an action \[ \mathfrak{S}\rightarrow \Aut(\HH_{Q,\F_q}) \] as follows. For $M,N$ two representations, $\lambda$ a partition, $x\in\lvert\PP^1_{\F_q}\rvert\setminus D$ and $\sigma\in \mathfrak{S}$, \begin{gather*} \sigma\cdot [M]=[M] \quad \text{ if $[M]$ is preprojective, preinjective or in a non-homogeneous tube}\\ \sigma\cdot [I_{\lambda}(x)]=[I_{\lambda}(\sigma(x))]\\ \sigma\cdot ([M]\oplus [N])=\sigma\cdot [M]\oplus \sigma\cdot[N] \end{gather*} where for notational reasons, we define here $[M]\oplus[N]=[M\oplus N]$. It is easily seen that $\sigma$ acts as a graded linear isomorphism on $\HH_{Q,\F_q}$. We prove in Section \ref{7} the following facts. \begin{enumerate} \item $\sigma$ acts as an isometry of $\HH_{Q,\F_q}$, \item The action of $\sigma$ leaves $\HH_{Q,\F_q,\RC}$ and $\HH_{Q,\F_q,\RC}^{\cusp}$ stable, \item $\sigma$ commutes with the linear form $L$. In particular, it preserves $\HH_{Q,\F_q}^{\cusp}[\dd]$ for any dimension $\dd$. \end{enumerate} This yields the following result. \begin{theorem} The group $\mathfrak{S}$ acts on $\HH_{Q,\F_q}$ by degree preserving Hopf algebra automorphisms. \end{theorem} Usually, quantum groups have very few degree preserving Hopf algebra automorphisms. The only ones are obtained by rescaling the Chevalley generators. Here, there is multiplicities for imaginary roots, from which we obtain non-trivial automorphisms. \section{Structure of the category of representations of affine quivers over a finite field}\label{2} \subsection{Notation and recollections on quiver representations} In this section, let $Q=(I,\Omega)$ be an arbitrary quiver with set of vertices $I$ and set of arrows $\Omega$ and $k$ a field. We denote by $\Rep_Q(k)$ the category of finite dimensional representations of $Q$ over $k$. Equivalently, this is the category of finite dimensional modules over the path algebra $kQ$ of $Q$. This category is known to be a $k$-linear abelian category of homological dimension one. \subsubsection{The Euler form}For $\mathcal{A}$ an abelian category, $K_0(\mathcal{A})$ is its Grothendieck group. For $M$ an object of $\mathcal{A}$, $\xoverline{M}\in K_{0}(\mathcal{A})$ is its class in the Grothendieck group. The bilinear (usually non-symmetric) form \[ \langle -,-\rangle : K_0(\mathcal{A})\times K_0(\mathcal{A})\rightarrow \Z,\quad\quad \langle \xoverline{M},\xoverline{N}\rangle=\homm(M,N)-\ext^1(M,N) \] where by definition $\homm(M,N)=\dim_k \Hom_{kQ}(M,N)$ and $\ext^1(M,N)=\dim_k\Ext^1_{kQ}(M,N)$ is called the Euler form of the quiver. It factorizes through the (surjective) morphism of abelian groups $\dim : K_0(\mathcal{A})\rightarrow \Z^I$ given by the dimension $\xoverline{M}\mapsto \dim M$. In fact, if $\dd=\dim M\in \N^I$ and $\dd'=\dim N\in \N^I$, we have the explicit formula : \[ \langle\xoverline{M},\xoverline{N}\rangle=\sum_{i\in I}\dd_i\dd'_i-\sum_{\alpha : i\rightarrow j}\dd_i\dd'_j. \] We use the same notation \[ \langle -,-\rangle : \Z^I\times \Z^I\rightarrow \Z \] for the induced bilinear form. We will also consider its symmetrized version : \[ (-,-) : \Z^I\times \Z^I\rightarrow \Z, \quad\quad (\dd,\dd')=\langle\dd,\dd'\rangle+\langle\dd',\dd\rangle \quad \text{for any $\dd,\dd'\in\Z^I$}. \] In case of an affine quiver, the symmetrized Euler form is nonnegative with one dimensional kernel generated by an indecomposable integer valued positive vector, called the indecomposable imaginary root, denoted by the letter $\delta$ (see \cite[Theorem 8.6]{SchifflerQuiver}). \subsubsection{Dualization}\label{dualisation}Let $Q$ be a quiver and $Q^*$ the quiver in which we change the orientation of all arrows, which deserves the name of dual quiver. A representation $V$ of $Q$ gives a representation $V^*$ of $Q^*$ obtained by dualizing the vector spaces at the vertices of $Q$ and replacing the linear maps between them by their transpose. Explicitly, if $V=((V_i)_{i\in I}, (f_{\alpha})_{\alpha\in\Omega})$ is a representation of $Q$, its dual is $V^*=((V_i^*)_{i\in I}, (^t\!f_{\alpha})_{\alpha\in\Omega})$. We obtain in this way an equivalence of categories \[ \tilde{D} : \Rep_Q(k)^{\op}\rightarrow\Rep_{Q^*}(k) \] which associate to a representation its dual. In particular, there is an identification $\Hom_{kQ^*}(M^*,N^*)\simeq \Hom_{kQ}(N,M)$ and $\Ext^1_{kQ^*}(M^*,N^*)\simeq \Ext_{kQ}^1(N,M)$. \subsection{The structure of the category of representations of an affine quiver} \subsubsection{Cyclic and Jordan quivers} Let $k$ be an arbitrary field. Cyclic and Jordan quivers are the non-acyclic quivers of affine type. This section introduces notations which will be later used. Let $J$ be the Jordan quiver. We let $J_n$ be the $n\times n$ indecomposable nilpotent matrix with ones on the superdiagonal and zeros everywhere else: \[ \begin{pmatrix} 0&1&0&\hdots&0\\ 0&\ddots&\ddots& &0\\ \vdots&\ddots&\ddots&\ddots&\vdots\\ &&\ddots&\ddots&1\\ 0&\hdots&\hdots&0&0 \end{pmatrix}. \] We also see $J_n$ as a $n$-dimensional representation of the Jordan quiver. For a partition $\lambda=(\lambda_1,\hdots)$, we write $J_{\lambda}=\bigoplus_{j\geq 1}J_{\lambda_j}$. Any nilpotent representation of $J$ over $k$ is isomorphic to exactly one representation of the following set: \[ \{J_{\lambda} : \lambda\text{ partition}\}. \] All other isomorphism classes of representations of the Jordan quiver also have an explicit representative which will not be used. See \cite[Section 3.]{HuaCounting}. Let $n\geq 1$ be a integer and $C_n$ be the cyclic quiver of length $n$. What follows also applies to $C_1$ which is the Jordan quiver. We distinguish two types of representations for cyclic quivers: invertible representations, for which every arrow is invertible, and nilpotent representations, for which the composition of arrows along any sufficiently long path is zero. Before describing them, we introduce some notations. We suppose that the vertices are indexed by $\Z/n\Z$ with exactly one arrow $i\rightarrow i+1$ for $i\in \Z/n\Z$. A representation of $C_n$ is a $n$-tuple $(a_{0},\hdots,a_{n-1})$ where $a_i : V_i\rightarrow V_{i+1}$ for $i\in\Z/n\Z$ and some $k$-vector spaces $V_i$. We let $\Rep_{C_n}^{\inv}(k)$ be the full subcategory of invertible representations of $C_n$ over $k$ and $\Rep_{C_n}^{\nil}(k)$ be the full subcategory of nilpotent representations. \begin{enumerate} \item $\Rep_{C_n}(k)=\Rep_{C_n}^{\nil}(k)\sqcup\Rep_{C_n}^{\inv}(k)$ is a decomposition in blocks of $\Rep_{C_n}(k)$. \item A full set of representatives of nilpotent indecomposable representations is built as follows. Let $l$ be a nonnegative integer and for $0\leq m\leq l$, $V_m=ke_m$ a one-dimensional vector space generated by $e_m$. The $\Z/n\Z$-graded vector space $V_{i,l}=\bigoplus_{\bar{x}\in\Z/n\Z}\bigoplus_{m\equiv x-i\pmod{n}}V_m$ with the endomorphism sending $e_m$ to $e_{m+1}$ if $0\leq m<l$ and $e_l$ to zero defines an indecomposable representation of $C_n$ again denoted by $V_{i,l}$. The set \[ \{V_{i,l} : i\in\Z/n\Z, l\geq 0\} \] contains exactly one representative of each isomorphism class of indecomposable nilpotent representations. Define also $S_i=V_{i,0}$ for $i\in\Z/n\Z$. \item The following functor is an equivalence of categories: \begin{equation}\label{equivJC} \begin{matrix} G_n :& \Rep_{J}^{\inv}(k)&\rightarrow &\Rep_{C_n}^{\inv}(k)\\ &(V,a)&\mapsto&(\id,\hdots,\id,a). \end{matrix} \end{equation} \end{enumerate} \begin{proof}The statement $(3)$ is straightforward and $(2)$ is proved in \cite[Section 3.5]{SchiffmannHall}. We prove $(1)$. Since $\Rep_{C_n}^{\nil}(k)$ and $\Rep_{C_n}^{\inv}(k)$ are stable under taking subobjects and quotients in $\Rep_{C_n}(k)$, any morphism from a nilpotent representation $M$ to an invertible representation $N$ is zero, and conversely. The extension spaces $\Ext^{1}(M,N)$ and $\Ext^{1}(N,M)$ also vanish, since using the Euler form and that $\dim N$ is some multiple $r\delta=(r,\hdots,r)$ of the indecomposable imaginary root $\delta=(1,\hdots,1)$, we have: \[ 0=(\xoverline{M},\xoverline{N})=-\ext^1(M,N)-\ext^1(N,M)=0. \] \end{proof} \subsubsection{Decomposition in blocks of $\Rep_{C_n}(k)$}\label{blockCn} Let $k$ be a field. We give here a decomposition of $\Rep_{C_n}(k)$ in blocks. Let $V$ be a $k$-vector space of dimension $d$. The group $\GL(V)$ acts algebraically on $\Hom(V,V)$ by conjugation with quotient $\Hom(V,V)//\GL(V)\simeq S^n\AAAA^1_k$. Denote by $\pi_V$ the projection $\pi_V : \Hom(V,V)\rightarrow S^n\AAAA^1_k$. \begin{theorem} We have a decomposition in blocks \[ \Rep_{J}(k)\simeq \bigsqcup_{a\in\mid\AAAA^1_k\mid}\Rep^a_J(k), \] where $(V,x)\in\Rep^a_J(k)$ if and only if $\pi_V(x)=(a,\hdots,a)$. \end{theorem} \begin{theorem} We have a decomposition in blocks \[ \Rep_{C_n}(k)\simeq\bigsqcup_{a\in\mid\AAAA^1_k\mid}\Rep_{C_n}^a(k) \] where $\Rep_{C_n}^0(k)=\Rep_{C_n}^{\nil}(k)$ and $\Rep_{C_n}^a(k)$ is the full subcategory $G_n(\Rep_J^{a}(k))$ of $\Rep_{C_n}^{\inv}(k)$. \end{theorem} \subsubsection{Acyclic affine quivers} We recollect known facts on the representation theory of acyclic affine quivers (see \cite{MR774589}) over a finite field (some results may hold in greater generality). The exposition here follows and can be completed by \cite[Section 3.6]{SchiffmannHall} and \cite[\S 8]{CBlec}. Troughout this section, $Q=(I,\Omega)$ is an acyclic affine quiver. This condition is equivalent to the finite dimensionality of the path algebra of $Q$ over any field and excludes the Jordan quiver and cyclic quivers which have been studied above. \begin{theorem} Let $k$ be an arbitrary field. Then, there exists an adjunction \[ \tau^- : \Rep_{Q}(k) \rightleftarrows \Rep_Q(k) : \tau \] with bi-natural isomorphisms\footnote{We say that $(\tau^-,\tau)$ is a Serre adjunction.} (the star means the dual with respect to the $k$-vector space structure): \[ \Ext^1(M,N)^*\simeq \Hom(N,\tau M),\quad\quad \Ext^1(M,N)^*\simeq \Hom(\tau^-N,M). \] \end{theorem} The functors $\tau$ and $\tau^-$ are known as \emph{Auslander-Reiten translates}. From the properties of $\tau^-$ and $\tau$, it is immediate that a representation $M$ of $Q$ over $k$ is projective if and only if $\tau(M)=0$ and injective if and only if $\tau^-(M)=0$. We call an indecomposable representation $M$ of $Q$ over $k$ \begin{enumerate} \item preprojective if $\tau^nM=0$ for $n\gg 0$, \item preinjective if $\tau^{-n}M=0$ for $n\gg0$, \item regular if $\tau^nM\neq 0$ for all $n\in\Z$. \end{enumerate} Furthermore, we call a representation $M$ of $Q$ over $k$ preprojective if all its indecomposable direct summands are preprojective, and we adopt similar terminology for preinjective and regular representations. The full subcategory of $\Rep_Q(k)$ of preprojective (resp. preinjective, resp. regular) representations is denoted by $\Rep_Q^{\PC}(k)$ (resp. $\Rep_Q^{\IC}(k)$, resp. $\Rep_Q^{\RC}$). These are extension closed subcategories of $\Rep_Q(k)$, hence exact categories. Moreover, $\Rep_Q^{\RC}(k)$ is an abelian category (though not stable under taking subobjects in the bigger category $\Rep_k Q$). The three categories $\Rep_Q^{\RC}(k), \Rep_Q^{\PC}(k)$ and $\Rep_Q^{\IC}(k)$ are disjoint and the category to which an indecomposable $M$ belongs is given by the sign of its defect defined by $\partial M=\langle\delta,\dim M\rangle$. An indecomposable representation $M$ is preprojective if and only if $\partial M<0$, preinjective if and only of $\partial M>0$ and regular if and only if $\partial M=0$. The following proposition gives the interactions between these three subcategories. \begin{proposition}\label{extensions} For $M\in \Rep_Q^{\PC}(k)$, $N\in\Rep_Q^{\IC}(k)$, $L\in\Rep_Q^{\RC}(k)$, we have \[ \Hom(N,M)=\Hom(N,L)=\Hom(L,M)=0, \] \[ \Ext^1(M,N)=\Ext^1(L,N)=\Ext^1(M,L)=0. \] \end{proposition} The simple objects of the abelian category $\Rep_Q^{\RC}(k)$ are called simple regular. A simple regular representation $M$ is called homogeneous if $\tau M\simeq M$. \begin{theorem}[Ringel, \cite{MR774589}]\label{ringelth} Let $Q$ be an affine acyclic quiver and $k$ an arbitrary field. Let $d$ and $p_1,\hdots,p_d$ be attached to $Q$ as in the table below. Then \begin{enumerate} \item There is a degree preserving bijection $M_a\leftrightarrow a$ between the set of homogeneous regular simple modules and $\mid\PP^1_k\mid\setminus D$ where $D$ consists of $d$ closed points of degree one\footnote{in the sequel for $X$ a scheme, we denote by $\mid X\mid$ the set of its closed points.}, \item There are $d$ $\tau$-orbits $\OO_1,\hdots,\OO_d$ of non-homogeneous regular simple modules of size $p_1,\hdots,p_d$\footnote{\emph{i.e.} the set of isomorphism classes of simple objects in $\OO_j$, $1\leq j\leq d$ is of cardinality $d$ and the Auslander-Reiten translates $\tau$ and $\tau^-$ act as inverse cycles on it.}, \item The category $\Rep_Q^{\RC}(k)$ decomposes as a direct sum of blocks\footnote{There are no morphisms or extensions between the objects of different categories}: \[ \Rep_Q^{\RC}(k)=\bigsqcup_{a\in\mid\PP^1\mid\setminus D}\CC_{M_a}\sqcup\bigsqcup_{l=1}^d\CC_{\OO_l} \] where $\CC_{M_a}$ is the full subcategory of objects which are extensions of $M_a$ and $\CC_{\OO}$ is the full subcategory of $\Rep_Q^{\RC}(k)$ of objects whose regular simple factors lie in $\OO$. \end{enumerate} \begin{figure} \caption{Non-homogeneous tubes of affine quivers and their period \cite[(3.18)]{SchiffmannHall} \label{tubes} \end{figure} \end{theorem} In Theorem \ref{ringelth}, the subcategories $\CC_{\OO_l}$ are called the non-homogeneous tubes while the subcategories $\CC_{M_a}$ are the homogeneous tubes. The number of non-homogeneous tubes is $d$ (see however Remark \ref{rem}) and the integers $p_1,\hdots,p_d$ are the periods. They do not depend on the chosen field. For $a\in\mid\PP_Q^1\mid$, $\CC_{Q}^a$ also denotes the corresponding tube. \begin{remark}\label{rem} In type $A_n^{(1)}$, in the case where all arrows except one go in the same direction, we have in fact $d=1$, \emph{i.e.} there is only one non-homogeneous tube. \end{remark} We can furthermore precisely identify the tubes $\CC_{M_a}$ and $\CC_{\OO}$ with the help of the Jordan quiver and of cyclic quivers respectively. \begin{theorem} Let $a\in \mid\PP^1_k\mid\setminus D$ a closed point of degree $d$. Let $K=\End(M_a)$ the $k$-algebra of endomorphisms of the simple regular $M_a$ of the tube $\CC_{M_a}$. This is a finite field extension of $k$ of degree $d$. There exists a unique equivalence of categories \[ \begin{matrix} F_a : &\Rep_J(K)&\rightarrow &\CC_{M_a}\\ &I_{(1)}&\rightarrow &M_a. \end{matrix} \] We set $I_{\lambda}^Q(a):=F_a(J_{\lambda})$. Let $a\in D$ be a closed point corresponding to a non-homogeneous tube. Let $p$ be the corresponding period of the non-homogeneous tube $\OO_a$ and $S$ a simple regular of $\OO_a$. There is a unique equivalence of categories \[ \begin{matrix} F_a : &\Rep_{C_p}(k)&\rightarrow&\CC_{\OO_a}\\ &S_i&\rightarrow&\tau^{i}S. \end{matrix} \] \end{theorem} In the case of a non-homogeneous tube, there is a reasonable way to choose the simple representation $S$. For this, we may first choose an extending vertex $i_0$ of $Q$. Then, since $\sum_{s=0}^p\dim\tau^sS=\delta$ and $\delta_{i_0}=1$, there is a unique simple representation $S$ in the non-homogeneous tube which is nonzero at vertex $i_0$. The isomorphism class of the representation $S$ may however depend on the extending vertex we choose. \subsection{Identification of the tubes with the help of the Kronecker quiver} \subsubsection{Representations of the Kronecker quiver}\label{Kronecker} We recall here the classification of indecomposables of the Kronecker quiver over a finite field $\F_q$ for the sake of completeness. It can be obtained from the classification over the algebraic closure $\xoverline{\F}_q$ using Galois arguments. For complements, see \cite[Section 7.7]{KirQuiv}. The Kronecker quiver has two vertices connected by two arrows going in the same direction : \[ K_2 : \quad\begin{tikzcd} 1 \arrow[r,shift left,"\alpha"] \arrow[r,shift right,swap,"\beta"] & 2, \end{tikzcd} \] We fix a finite field with $q$ elements $\F_q$ and an algebraic closure $\xoverline{\F}_q$ of $\F_q$. \begin{theorem}[{\cite[Theorem 7.30]{KirQuiv}}]\ \begin{enumerate} \item The set of real roots is $\{(n,n+1) : n\in \N\}\cup \{(n+1,n) : n\in\N\}$. For $n\in \N$, the indecomposable representation of $K_2$ over $\F_q$ of dimension $(n,n+1)$ is preprojective whereas the indecomposable representation of $K_2$ over $\F_q$ of dimension $(n+1,n)$ is preinjective. They have the following form: \[ \begin{tikzcd} \F_q^n \arrow[r, shift left,"{\begin{pmatrix} I_n\\ 0 \end{pmatrix} }"] \arrow[r,shift right,swap,"\begin{pmatrix} 0\\ I_n \end{pmatrix} "] & \F_q^{n+1}, \end{tikzcd} \] for the preprojective representations and \[ \begin{tikzcd} \F_q^{n+1} \arrow[r,shift left,"\begin{pmatrix} I_n \quad0 \end{pmatrix} "] \arrow[r,shift right,swap,"\begin{pmatrix} 0\quad I_n \end{pmatrix} "] & \F_q^{n}, \end{tikzcd} \] for the preinjective representations, where $I_n$ is the $n\times n$ identity matrix. \item The regular indecomposable representations of $K_2$ over $\F_q$ are parametrized by the set $\lvert\PP^1_{\F_q}\rvert\times \Z_{\geq 1}$. For $([x:y],n)\in \PP^1_{\F_q}(\xoverline{\F}_q)\times \Z_{\geq 1}$, the corresponding representation of $K_2$ is \[ \begin{tikzcd} \F_{q^{\deg(x)}}^{n} \arrow[r,shift left,"\begin{matrix} xI_n+J_n \end{matrix} "] \arrow[r,shift right,swap,"\begin{matrix} y I_n+J_n \end{matrix} "] & \F_{q^{\deg(x)}}^{n}, \end{tikzcd} \] where we consider $\F_{q^{\deg(x)}}$ as a $\F_q$-vector space of dimension $\deg(x)$. The isomorphism class of this representation only depends on the Galois orbit of $[x:y]$. We denote by $I_{t,n}$ where $t=[x:y]$ this representation and when $n=1$, we use the notation $S_t=I_{t,1}$ for $t\in\PP^1_{\F_q}(\xoverline{\F}_q)$. \end{enumerate} \end{theorem} For $t\in\PP^1_{\F_q}(\xoverline{\F}_q)$ and $\lambda=(\lambda_1,\lambda_2,\hdots)$ a partition, define $I_{t,\lambda}=\oplus_{i=1}^{l(\lambda)}I_{t,\lambda_i}$. The isomorphism class $[I_{t,\lambda}]$ only depends on the image $a$ of $t : \Spec\xoverline{\F}_q\rightarrow \PP^1_{\F_q}$. For any $a\in\lvert\PP^1_{\F_q}\rvert$, we fix $t_a\in\PP^1_{\F_q}(\xoverline{\F}_q)$ a geometric point such that the image of $t : \Spec\xoverline{\F}_q\rightarrow \PP^1_{\F_q}$ is $a$. The set of isomorphism classes of regular representations of $K_2$ over $\F_q$ is \[ \M^{K_2}_{\F_q}=\{[I_{t_a,\lambda}] : a\in\lvert\PP^1_{\F_q}\rvert, \lambda \text{ a partition}\}. \] For $a\in\lvert\PP^1_{\F_q}\rvert$, write $I_{a,\lambda}=I_{t_a,\lambda}$ and $S_a=I_{a,(1)}$. For any $r\geq 1$, the number $I_{K_2,(r,r)}(q)$ of indecomposable representations of the Kronecker quiver of dimension $(r,r)$ can be determined from this: \[ I_{K_2,(r,r)}(q)=\text{number of closed points of $\PP^1_{\F_q}$ of degree dividing $r$}. \] This description also gives a natural way to identify the tubes of the Kronecker quiver : \begin{equation}\label{Ktubes} \begin{matrix} C : &\lvert\PP^1_{\F_q}\rvert&\rightarrow &\{\text{tubes of $K_2$ over $\F_q$}\}\\ &a&\mapsto&C_a=\{I_{t_a,\lambda} : \lambda \text{ partition}\} \end{matrix}. \end{equation} \subsubsection{The number of automorphisms of regular representations of the Kronecker quiver} For $q$ a power of a prime and $\lambda=(1^{l_1},2^{l_2},\hdots)$ a partition, define: \[ a_{\lambda}(q)=q^{|\lambda|+2n(\lambda)}\prod_i\prod_{j=1}^{l_i}(1-q^{-j}). \] This is the number of automorphism of the nilpotent representation of type $\lambda$ $J_{\lambda}$ of the Jordan quiver over $\F_q$ (see \cite[Lemma 2.8]{SchiffmannHall} or \cite[Chapter II, (1.6)]{macdonald}). The identification of the tubes of the Kronecker quiver with nilpotent representations of the Jordan quiver gives the following. \begin{proposition}\label{automorphisms} Let $a\in\lvert\PP^1_{\F_q}\rvert$ and $n \geq 1$. Then the number of automorphism of $I^{K_2}_{t_a,\lambda}$ is $a_{\lambda}(q^{\deg(a)})$. \end{proposition} \subsubsection{Regular representations of an affine quiver} Let $Q$ be an arbitrary acyclic affine quiver. Ler $i_0$ be an extending vertex of $Q$ and $\delta$ the indivisible imaginary root of $Q$. We can restrict ourselves to the case where $i_0$ is a sink, since it is always possible to choose the extending vertex to be a sink or a source, and the dualization is a way to pass from the second case to the first. Without referring to the dualization process, it is possible to adapt this section in the case where the extending vertex $i_0$ is a source. By the classification above, all tubes are homogeneous for the Kronecker quiver $K_2$, \emph{i.e.} in type $A_1^{(1)}$ with the noncyclic orientation. Let $Q'=(I',\Omega')$ be the finite type quiver associated to $Q$ by erasing the vertex $i_0$ and all arrows adjacent to it. The element $\theta=\delta-e_{i_0}$ is an element of $\N^{I'}$. We denote by $I_{\theta}$ an indecomposable representation of $Q$ of dimension $\theta$. Thanks to the classification of affine quivers there is either one vertex $i_1$ adjacent to $i_0$ (in types $D_n^{(1)}$ ($n\geq 4$) and $E_n^{(1)}, n=6,7,8$), in which case $\delta_{i_1}=2$, or two vertices $i_1, i_2$ adjacent to $i_0$ (in types $A_n^{(1)}$, $n>1$), in which case, $\delta_{i_1}=\delta_{i_2}=1$. In the first case, we choose an arbitrary identification $I_{i_1}\simeq k^2$. We have a functor \begin{equation}\label{FunctorF} \begin{matrix} F : &\Rep_{K_2}(k)&\rightarrow& \Rep_{Q}(k)\\ &(V_0,V_1,\alpha,\beta)&\mapsto&V \end{matrix} \end{equation} where $V$ is defined as follows. The restriction to the subquiver $Q'$ is $V_{Q'}=I_{\theta}\otimes V_0$, $V_{i_0}=V_1$, and if $i_0$ is connected by two arrows $i_1\rightarrow i_0$ and $i_2\rightarrow i_0$ to $Q'$, then we choose $\alpha$ for the map $V_{i_1}=V_0\rightarrow V_{i_0}=V_1$ and $\beta$ for the map $V_{i_2}=V_0\rightarrow V_{i_0}=V_1$. If $i_0$ is connected to $Q'$ by a single arrow $i_1\rightarrow i_0$, then the map $V_{i_1}\simeq V_0\oplus V_0\rightarrow V_{i_0}$ is choosen to be $\alpha\oplus \beta$. Of course this functor depends in the first case on the choosen order $i_1, i_2$ of the vertices connected to $i_0$ and in the second case on the identification $V_{i_1}\simeq k^2$. We implicitly fix such a choice. The action of $F$ on the morphisms is as follows. Let $V=(V_0,V_1,\alpha,\beta)$ and $V'=(V'_0,V'_1,\alpha',\beta')$ be two representations of $K_2$ and $f : V\rightarrow V'$ a morphism between them. The linear map $f_0 : V_0\rightarrow V'_0$ induces a morphism of representations of $Q'$, $\id\otimes f_0 : I_{\theta}\otimes V_0\rightarrow I_{\theta}\otimes V'_0$ and considering $f_1 : V_1=F(V)_{i_0}\rightarrow V'_1=F(V')_{i_0}$ at the vertex $i_0$, we obtain a morphism of representations $F(f) : F(V)\rightarrow F(V')$. Note that $F(S_1)=I_{\theta}$ and $F(S_2)=S_{i_0}$. \begin{proposition}[{\cite[Theorem 7.34]{KirQuiv}}] The functor $F$ is exact and fully faithful. \end{proposition} \begin{proof} For quiver representations, exactness of a sequence can be checked pointwise (\emph{i.e.} at each vertex). Here, tensor products are over a field so exactness of $F$ is immediate from its definition. For the full faithfulness, we use that $I_{\theta}$, being an indecomposable of $Q'$ which is a finite type quiver, is a brick, \emph{i.e.} $\End_{kQ'}(I_{\theta},I_{\theta})\simeq k$. Let $V=(V_0,V_1,\alpha,\beta)$ and $V'=(V'_0,V'_1,\alpha',\beta')$ be two representations of $K_2$ and $g : F(V)\rightarrow F(V')$ a morphism of representations. We obtain for free a linear map $f_1=g_{i_0} : V_1\rightarrow V'_1$. Because $I_{\theta}$ is a brick, the restricted morphism of representations $g_{Q'} : I_{\theta}\otimes V_0\rightarrow I_{\theta}\otimes V'_0$ is induced by a unique linear map $f_1 : V_0\rightarrow V'_0$. The datum $(f_0,f_1) : V\rightarrow V'$ is a morphism of representations such that $F(f_0,f_1)=g$. Faithfulness is immediate. This concludes the proof. \end{proof} \begin{proposition}\label{ess} The image of the functor $F$ is the full subcategory of $\Rep_{Q}(\F_q)$ whose objects are extensions of $I_{\theta}^{\oplus d_1}$ by $S_{i_0}^{\oplus d_2}$ for some $d_1, d_2\geq 0$. \end{proposition} \begin{proof} This is a straightforward consequence of the definition of $F$. By definition, if $V=(V_0,V_1,\alpha,\beta)$ is a representation of $K_2$, then $F(V)$ is an extension of $I_{\theta}\otimes V_0$ by $S_{i_0}\otimes V_1$. Conversely let $(V', (f_{\alpha})_{\alpha\in\Omega})$ be a representation of $Q$, extension of $I_{\theta}^{\oplus d_1}$ by $S_{i_0}^{\oplus d_2}$ for some $d_1,d_2\geq 0$. We argue separately according to whether $Q=A_n^{(1)}$ or $Q \in\{D_n^{(1)}, E_n^{(1)}\}$. In the first case, we define the representation $V=(V_0,V_1,\alpha,\beta)$ of $K_2$ by setting $V_1=V'_{i_0}$, $V_0=V'_{i_1}$. An isomorphism of the restriction of $V'$ to $Q'$ with $I_{\theta}^{\oplus d_1}$, $\psi : V'_{Q'}\rightarrow I_{\theta}^{\oplus d_1}$ induces an isomorphism $\psi : V'_{i_1}\rightarrow V'_{i_2}$. We define $\alpha = f_{i_1\rightarrow i_0}$ and $\beta = f_{i_2\rightarrow i_0}\circ \psi$. It is easily checked that the isomorphism class of the so defined representation $V$ does not depend on the various choices made and that $F(V)\simeq V'$. In the second case, an isomorphism $V'/S_{i_0}^{d_2}\simeq I_{\theta}^{\oplus d_1}$ gives an identification $\varphi : V'_{i_1}\rightarrow k^2\otimes k^{d_1}$. We then define $V$ by setting $V_0=k^{d_1}$, $V_1=V_{i_0}$ and we define $\alpha=f_{i_0\rightarrow i_2}\circ\varphi^{-1}((1,0)\otimes -)$ and $\beta =f_{i_0\rightarrow i_2}\circ\varphi^{-1}((0,1)\otimes -)$. This defines a representation $V=(V_0,V_1,\alpha,\beta)$ of $K_2$ such that $F(V)\simeq V'$. \end{proof} Following the notations of Kirillov in \cite{KirQuiv}, we define $S_a^{Q}=F(S_a)$ for $a\in \lvert\PP^1_{\F_q}\rvert$. \begin{theorem}[Identification of the simple regular representations, {\cite[Theorem 7.37]{KirQuiv}}]\label{identif}\ \begin{enumerate} \item Let $X$ be a simple regular representation of $Q$ over $\F_q$ of period $1$. Then $\dim X=d\delta$ where $d$ is the degree of the tube containing $X$ and $X\simeq S_a^Q$ for some closed point $a\in\lvert\PP^1_{\F_q}\rvert$. \item Let $\OO$ be the $\tau$-orbit of a simple regular representation of period $l>1$. Then \[ \sum_{X\in\OO}\dim X=\delta. \] Since at the extending vertex $i_0$, $\delta_{i_0}=1$, $\OO$ contains a unique simple regular representation $X$ such that $X_{i_0}\neq 0$. Moreover, if $X^{(l)}\in\CC_{\OO}$ is the unique indecomposable representation of dimension $\delta$ having $X$ as quotient, then $X^{(l)}\simeq S_a^Q$ for some $a\in\lvert\PP^1_{\F_q}\rvert$. \end{enumerate} \end{theorem} \subsection{Some examples} We treat here the examples of types $D_4^{(1)}$ and $A_4^{(1)}$ with some particular orientations in dimension $\delta$ to illustrate the previous procedure in the two different cases for which (1) the extending vertex is connected to one vertex and (2) the extending vertex is connected to two vertices. \subsubsection{The type $D_4^{(1)}$} Let $Q$ be the quiver \[ \begin{tikzcd} 1\arrow[leftarrow,rd] & & 1\\ &2 \arrow[rightarrow,rd] \arrow[ru]&\\ 1\arrow[leftarrow,ru] & &1 \end{tikzcd}. \] The labels of the vertices are given by the indecomposable imaginary root $\delta$ of $Q$. The extending vertex can be one of the four vertices with a $1$. To fix the notations, we choose the bottom right vertex and call it $i_0$ as before. An indecomposable representation of $Q$ of dimension $\theta=\delta-e_{i_0}$ is \[ \begin{tikzcd} \F_q\arrow[leftarrow,rd]{}{(1,0)} & & \F_q\\ &\F_q^2 \arrow[rightarrow,rd]{}{} \arrow[ru,swap]{}{(0,1)}&\\ \F_q\arrow[leftarrow,ru]{}{(1,1)} & &0 \end{tikzcd}. \] Up to isomorphism, it is unique. Then, isomorphism classes of $(1,1)$-dimensional representations of $K_2$ are parametrized by $\PP^1_{\F_q}(\F_q)$. The functor $F$ induces on isomorphism classes of regular representations of dimension $\delta$ the following map : \[ \begin{matrix} F : &\M_{\F_q}^{K_2}[\delta]\simeq \PP^1_{\F_q}(\F_q)&\rightarrow &\M_{\F_q}^Q[\delta]\\ &[\lambda : \mu]&\mapsto& [S_{(\lambda, \mu)}^Q] \end{matrix} \] where $S_{(\lambda, \mu)}^Q$ is the following representation of Q : \[ \begin{tikzcd} \F_q\arrow[leftarrow,rd]{}{(1,0)} & & \F_q\\ &\F_q^2 \arrow[rightarrow,rd]{}{(\lambda,\mu)} \arrow[ru,swap]{}{(0,1)}&\\ \F_q\arrow[leftarrow,ru]{}{(1,1)} & &\F_q \end{tikzcd}. \] There are three non-homogeneous tubes associated to the parameters $[1:0], [0:1], [1:1]\in\PP^1_{\F_q}(\F_q)$. The other parameters give simple regular representations of dimension $\delta$. We now study one non-homogeneous tube. By symmetry of the quiver, this suffices to determine the three non-homogeneous tubes. Let us choose the non-homogeneous tube associated to $[1:0]$. Thanks to Theorem \ref{identif}, the regular simple representations of the tube are the indecomposables \[ Y=\begin{tikzcd} 0\arrow[leftarrow,rd]{}{} & & \F_q\\ &\F_q \arrow[rightarrow,rd]{}{} \arrow[ru,swap]{}{}&\\ \F_q\arrow[leftarrow,ru]{}{} & &0 \end{tikzcd} \quad \text{ and } \quad X=\begin{tikzcd} \F_q\arrow[leftarrow,rd]{}{} & & 0\\ &\F_q \arrow[rightarrow,rd]{}{} \arrow[ru,swap]{}{}&\\ 0\arrow[leftarrow,ru]{}{} & &\F_q \end{tikzcd}. \] In this non-homogeneous tube, there are two indecomposables of dimension $\delta$ up to isomorphism: the representation $S_{(1,0)}^Q$ and the unique (up to isomorphism) extension of $Y$ by $X$: \[ \begin{tikzcd} \F_q\arrow[leftarrow,rd]{}{(1,1)} & & \F_q\\ &\F_q^2 \arrow[rightarrow,rd]{}{(1,0)} \arrow[ru,swap]{}{(0,1)}&\\ \F_q\arrow[leftarrow,ru]{}{(0,1)} & &\F_q \end{tikzcd}. \] \subsubsection{The type $A_4^{(1)}$} Let $Q$ be the quiver \[ \begin{tikzcd} 1\arrow[r] \arrow[d]&1\arrow[d]\\ 1\arrow[r]&1 \end{tikzcd}. \] The extending vertex is the bottom right vertex, we call it $i_0$ and the indivisible imaginary root $\delta$ is given in the graph above. The indecomposable representation of dimension $\theta=\delta-e_{i_0}$ is \[ \begin{tikzcd} \F_q\arrow[r]{}{1} \arrow[d,swap]{}{1}&\F_q\arrow[d]\\ \F_q\arrow[r]& 0 \end{tikzcd}. \] The functor $F$ induces on isomorphism classes the following map \[ \begin{matrix} F : &\M_{\F_q}^{K_2}[(1,1)]\simeq \PP^1_{\F_q}(\F_q)&\rightarrow &\M_{\F_q}^Q[\delta]\\ &[\lambda : \mu]&\mapsto& [S_{(\lambda, \mu)}^Q] \end{matrix} \] where $S_{(\lambda, \mu)}^Q$ is the following representation of $Q$: \[ \begin{tikzcd} \F_q\arrow[r]{}{1} \arrow[d,swap]{}{1}&\F_q\arrow[d]{}{\mu}\\ \F_q\arrow[r,swap]{}{\lambda}& \F_q \end{tikzcd}. \] The two non-homogeneous tubes correspond to the parameters $[\lambda:\mu]=[0:1]$ and $[\lambda:\mu]=[1:0]$ and both are of period two. Because of the symmetry of the quiver, we study only the case $[\lambda:\mu]=[1:0]$. The two regular simple representations of this tube are \[ Y=\begin{tikzcd} 0\arrow[r]{}{} \arrow[d]{}{}&\F_q\arrow[d]{}{}\\ 0\arrow[r]{}{}& 0 \end{tikzcd} \quad \text{ and } \quad X=\begin{tikzcd} \F_q\arrow[r,swap]{}{} \arrow[d,swap]{}{1}&0\arrow[d]{}{}\\ \F_q\arrow[r,swap]{}{1}& \F_q \end{tikzcd}. \] The two indecomposable of dimension $\delta$ in this tube are \[ \begin{tikzcd} \F_q\arrow[r]{}{1} \arrow[d,swap]{}{1}&\F_q\arrow[d]{}{0}\\ \F_q\arrow[r,swap]{}{1}& \F_q \end{tikzcd} \quad \text{and} \quad \begin{tikzcd} \F_q\arrow[r]{}{1} \arrow[d,swap]{}{1}&\F_q\arrow[d]{}{1}\\ \F_q\arrow[r,swap]{}{0}& \F_q \end{tikzcd}. \] \subsubsection{Convention}In the sequel, we assume that a functor as in \eqref{FunctorF}: \[ F : \Rep_{K_2}(\F_q)\rightarrow \Rep_{Q}(\F_q) \] is fixed. The bijection \eqref{Ktubes} gives an explicit bijection \[ \begin{matrix} C_Q : &\lvert\PP^1_{\F_q}\rvert&\rightarrow &\{\text{tubes of $Q$}\}\\ &a&\mapsto&\text{tube of Q containing $F(C_a)$}. \end{matrix} \] where $F(C_a)$ is the essential image of the tube $C_a$ of the Kronecker quiver by the functor $F$. We will sometimes also write $C_{Q}^a$ the $a$-tube of $Q$. \subsubsection{The regular indecomposable representations of an affine quiver} In the previous theorem, we identified the simple regular representations in the homogeneous tubes. We give now an easy consequence concerning indecomposables in the tubes. We keep the notations of the previous sections. \begin{proposition}\label{indtubes} Let $a\in\lvert\PP^1_{\F_q}\rvert$ be a closed point. If the $a$-tube of $Q$ is homogeneous, then for $n\geq 1$, $F(I_{a,n})$ is the unique $n\deg(x)\delta$-dimensional indecomposable representation of this tube. Suppose the $a$-tube is non-homogeneous. Let $X_a$ be the simple regular representation of $Q$ in this tube which is nonzero at the extending vertex. Then $F(I_{a,n})$ is the indecomposable representation of this tube of dimension $n\delta$ having $X_a$ as quotient. \end{proposition} \begin{proof} This result is a consequence of the full faithfulness of $F$ and the properties of the representations of a tube given by the identification of a tube with the category of nilpotent representations of the Jordan or cyclic quiver. \end{proof} Let $a\in \lvert\PP^1_{\F_q}\rvert$ corresponding to a non-homogeneous tube $\OO_a$ and $S_a=F(I_{a,1})$ as in the Proposition \ref{indtubes}. Then the subcategory of $\OO_a$ generated by $S_a$ is isomorphic to the category of nilpotent representations of the Jordan quiver. When $a$ is associated to a homogeneous tube, the restriction of $F$ induces an equivalence of categories between the corresponding tubes of $K_2$ and $Q$. \section{The Hall algebra of a quiver}\label{3} The Hall algebra of a quiver now has a long history beginning with the paper of Ringel, \cite{MR1062796}. \subsection{Definition of the Hall algebra of a quiver} We refer the reader to \cite{SchiffmannHall} for the proofs and the general theory of constructible Hall algebras. These can be defined for any abelian finitary category. We will consider it for the category $\Rep_Q(k)$ with $k=\F_q$. Let $Q$ be a finite quiver and $k=\F_q$ a finite field. As a vector space, the Hall algebra of $Q$ over $k$ is defined as \[ \HH_{Q,k}=\bigoplus_{[M]\in \Ob(\Rep_Q(k))/\sim}\C[M] \] where $\Ob(\Rep_Q(k))/\sim$ is the set of representations of $Q$ over $k$ up to isomorphism. It is convenient to see the Hall algebra as an algebra of functions endowed with some sort of convolution product. For this, let $\M_k=\Rep_Q(k)/\sim$ be the set of isomorphism classes of representations of $Q$ over $k$ and \[ \M_k=\bigsqcup_{\dd\in\N^I}\M_k[\dd] \] its decomposition with respect to the dimension. Thus, \[ \HH_{Q,k}=\bigoplus_{\dd\in\N^I}\HH_{Q,k}[\dd],\quad\quad \HH_{Q,k}[\dd]=\Fun(\M_k[\dd],\C)=\text{functions $\M_k[\dd]\rightarrow\C$}. \] We now define the operations. Let $\nu=|k|^{1/2}$ and for $M$ a representation of $Q$, $a_M=|\Aut(M)|$. \begin{enumerate} \item(Multiplication) For $f, g\in \HH_{Q,k}$, \[ f\star g=m(f,g) : [M]\mapsto \sum_{N\subseteq M}\nu^{\langle M/N,N\rangle}f([M/N])g([N]) \] where the sum is over subrepresentations $N$ of $M$. \item(Comultiplication) For $f\in \HH_{Q,k}$ and $[M], [N]\in\M_k$, \[ \Delta(f)([M],[N])=\frac{\nu^{-\langle \dim(M),\dim(N)\rangle}}{|\Ext^1(M,N)|}\sum_{\xi\in\Ext^1(M,N)}f([X_{\xi}]), \] where $X_{\xi}$ is the middle term of an exact sequence representing the extension of $M$ by $N$ given by $\xi$. \item (Green's scalar product) For $[M], [N]\in\HH_{Q,k}$, define \[ ([M],[N])=\frac{\delta_{[M],[N]}}{a_M}. \] \end{enumerate} These operations endow $\HH_{Q,\F_q}$ with a twisted bialgebra structure (see the Introduction). The multiplication on $\HH_{Q,\F_q}\otimes \HH_{Q,\F_q}$ is defined for homogeneous $x,y,z,w$ by $(x\otimes y)(z\otimes w)=\nu^{(y,z)}(xz\otimes yw)$. The comultiplication lies at the center of this paper and it is in duality with the comultilication by \eqref{hopfpairing}: it is thus necessary to give other formulas for both the multiplication and the comultiplication, more adapted for explicit computations. For this purpose, we introduce the following notations. For $M,N,R\in\Rep_Q\F_q$ three quiver representations, let us define \[ F_{M,N}^{'R}=|\{(\alpha,\beta)\in\Hom(N,R)\times\Hom(R,M)\mid 0\rightarrow N\xrightarrow{\alpha}R\xrightarrow{\beta}M\rightarrow 0 \text{ is exact}\}|, \] \[ F_{M,N}^R=|\{X\subset R\mid X\simeq N\text{ and } R/X\simeq M\}|, \] \[ F^{M,N}_R=F_{M,N}^R\frac{a_Ma_N}{a_R}. \] The free action of $\Aut(N)\times \Aut(M)$ on $F^{'R}_{M,N}$ given by $(a,b)\cdot (\alpha,\beta)=(\alpha a^{-1},b\beta)$ gives the equality \[ F^{'R}_{M,N}=a_Ma_NF^R_{M,N}. \] We have also Riedtmann's formula : \[ F^{M,N}_R=\frac{|\Ext^1(M,N)_R|}{|\Hom(M,N)|} \] where $\Ext^1(M,N)_R$ is the subset of $\Ext^1(M,N)$ of extensions represented by an exact sequence with middle term isomorphic to $R$. We now have the following formulas for the multiplication and comultiplication: for $M, N, R\in\Rep_{Q}(k)$ three representations of $Q$ over $k$, \[ [M]\star[N]=\nu^{\langle M,N\rangle}\sum_{[S]\in\M_k}F_{M,N}^S[S], \] \[ \Delta([R])=\sum_{[U], [V]\in\M_k}\nu^{\langle U,V\rangle} F_R^{U,V}[U]\otimes [V]. \] The Green scalar product is a Hopf pairing, meaning that for any $f,g,h\in \HH_{Q,k}$, \begin{equation}\label{hopfpairing} (fg,h)=(f\otimes g,\Delta(h)). \end{equation} In this formula, we have implicitly naturally defined the scalar product on $\HH_{Q,k}\otimes\HH_{Q,k}$ by the formula: \[ ([M]\otimes[N], [R]\otimes [S])=([M],[R])([N], [S]) \] for any $[M], [N], [R], [S]\in \HH_{Q,k}$. \subsection{The dualization process and the Hall algebra} We saw in the subsection \ref{dualisation} how to to dualize representations of quivers to obtain from a representation $M$ of $Q$ a representation $M^*$ of the dual quiver $Q^*$. This process induces a linear map between the Hall algebras \[ \begin{matrix} D : &\HH_{Q,k}&\rightarrow&\HH_{Q^*,k}\\ & [M]&\mapsto &[M^*] \end{matrix} \] which is a Hopf algebra graded anti-isomorphism. In particular, $D$ induces a linear isomorphism between the spaces of primitive elements of a quiver and its dual. \subsection{A PBW basis for the Hall algebra} The Hall algebra construction can be extended to any finitary exact category, see \cite{SchiffmannHall} and references therein. Let $\HH_{\mathcal{A}}$\footnote{It is an algebra and a coalgebra. This is a bialgebra if $\mathcal{A}$ is hereditary.} be the Hall algebra of the finitary exact category $\mathcal{A}$. \begin{theorem}[Guo-Peng, Berenstein-Greenstein]\label{Guo} Let $\mathcal{A}$ be a finitary exact category. Then for any order on the set $\ind \mathcal{A}$ of isomorphism classes of indecomposable objects in $\mathcal{A}$, $\HH_{\mathcal{\mathcal{A}}}$ is spanned, as a $\C$-vector space, by ordered monomials on $\ind \mathcal{A}$. Moreover, if $\mathcal{A}$ is Krull-Schmidt, then such monomials form a basis of $\HH_{\mathcal{A}}$. \end{theorem} \begin{proof} See \cite[Theorem 2.4]{MR3463039}. See \cite[Theorem 3.1]{GuoPeng} for a quiver version. \end{proof} \subsection{Cuspidal functions and the theorem of Sevenhant and Van den Bergh} In this section, we let $Q$ be an arbitrary quiver (we allow multiple arrows, oriented cycles and edge loops) and $\F_q$ a finite field. Let $\HH_{Q,\F_q}$ be the Hall algebra of $Q$ over $\F_q$. \subsubsection{Cuspidal functions} We define here the objects of main interest in this paper. \begin{definition} An element $f\in\HH_{Q,\F_q}$ is called a \emph{cuspidal function} if it is primitive \emph{i.e.} if \[ \Delta{f}=f\otimes 1+1\otimes f. \] \end{definition} We let $\HH_{Q,\F_q}^{\cusp}$ be the space of cuspidal functions. It decomposes as a direct sum \[ \HH_{Q,\F_q}^{\cusp}=\bigoplus_{\dd\in\N^I}\HH_{Q,\F_q}^{\cusp}[\dd]. \] A key fact in the proof of Theorem \ref{SVthm} below is the following. \begin{lemma}\label{orthog} Let $\dd\in\N^I$. Then $\HH_{Q,\F_q}^{\cusp}[\dd]$ is the $\dd$-graded component of the orthogonal with respect to Green's scalar product of the subspace \[ \sum_{\dd',\dd''>0}\HH_{Q,\F_q}[\dd']\HH_{Q,\F_q}[\dd'']. \] \end{lemma} \begin{proof} See \cite[3.1]{SevenhantVdB}. \end{proof} \subsubsection{The theorem of Sevenhant and Van den Bergh} This theorem motivates the study of cuspidal functions, as they were used by Sevenhant and Van den Bergh in their article \cite{SevenhantVdB} to identify the whole Hall algebra of a quiver with the specialization at $\nu=\sqrt{q}$ of the positive part of the quantized enveloping algebra of a generalized Kac-Moody algebra associated to that quiver. Let $(f_j)_{j\in J}$ be a graded orthonormal basis of $\HH_{Q,\F_q}^{\cusp}$ with respect to Green's scalar product, so that in particular, for $j\in J$, $\dim f_j\in\N^I$ is well-defined. We define \[ a_{i,j}=(\dim f_i,\dim f_j). \] for $i,j\in J$. By \cite[Proposition 3.2]{SevenhantVdB}, $a_{i,j}\leq 0$ for $i\neq j$ and if $a_{i,i}>0$, then $a_{i,i}=2$. The infinite matrix $(a_{i,j})$ is a generalized Cartan matrix in the sense of \cite{Borcherds}. \begin{theorem}[Sevenhant-Van den Bergh, \cite{SevenhantVdB}]\label{SVthm} The Hall algebra $\HH_{Q,\F_q}$ is the bialgebra generated by the primitive elements $(f_j)_{j\in J}$ subject to the following relations: \begin{enumerate} \item For all $i,j\in J$, if $a_{i,j}=0$, then $f_if_j=f_jf_i$, \item For all $i,j\in J$, if $a_{i,i}=2$, then \[ \sum_{l=0}^{1-a_{i,j}}(-1)^l\left\{\begin{matrix} 1-a_{i,j}\\ l \end{matrix}\right\}f_i^lf_jf_i^{1-a_{i,j}+l}=0 \] where, for any integers $r$ and $s$, $\left\{ \begin{matrix} s\\ r \end{matrix} \right\}$ is the $\nu$-binomial coefficient defined by : \[ \left\{ \begin{matrix} s\\ r \end{matrix} \right\}=\prod_{u=1}^{r}\frac{\nu^{u+s-r}-\nu^{-(u+s-r)}}{\nu^u-\nu^{-u}} \] and $\nu=\sqrt{q}$. \end{enumerate} \end{theorem} \begin{remark} From what we know about the Hall algebra, there is no natural choice for the basis $(f_j)_{j\in J}$. We hope to tackle this question in the future. \end{remark} \section{Kac and cuspidal polynomials of an affine quiver over a finite field}\label{4} \subsection{Indecomposable and absolutely indecomposable representations count} For a fundamental contribution on the count of representations of quivers over finite fields, see \cite{HuaCounting}. Let $Q$ be an arbitrary quiver. We denote by $A_{Q,\dd}(q)$, $\dd\in\N^I$ the Kac polynomial of $Q$ counting absolutely indecomposables representations of dimension $\dd$ over $\F_q$ and $I_{Q,\dd}(q)$ the polynomial counting indecomposable representations of $Q$ of dimension $\dd$ over $\F_q$. The following formula is well-known and is a consequence of Galois descent for quiver representations: for $\dd\in\N^I$ indivisible and $r\geq 1$, \[ I_{Q,r\dd}(q)=\sum_{l\mid r}\frac{1}{l}\sum_{m\mid l}\mu(m)A_{Q,\frac{r}{l}\dd}(q^{\frac{l}{m}}), \] where $\mu$ is the M\"obius function. See also \cite[Theorem 4.1]{HuaCounting}. Sometimes, this formula is presented using plethystic operations (see \cite{BozecCounting} for basics on plethystic notation): \[ \Exp_{z}\left(\sum_{\dd>0}I_{Q,\dd}(q)z^{\dd}\right)=\Exp_{t,z}\left(\sum_{\dd>0}A_{Q,\dd}(t)z^{\dd}\right). \] These polynomials do not depend on the orientation of the graph $Q$. \subsection{Dimension count of cuspidal functions} Let $Q$ be an arbitrary quiver. For complements on this section, see \cite{BozecCounting}. For $\dd\in\N^I$, we let \[ C_{Q,\dd}(q)=\dim_{\C}\HH_{Q,\F_q}^{\cusp}[\dd]. \] We will not use the following in the sequel. \begin{theorem}[Bozec-Schiffmann, \cite{BozecCounting}] The function $C_{Q,\dd}(q)$ is a polynomial in $\Q[q]$. \end{theorem} In \emph{loc. cit.}, Bozec and Schiffmann defined the absolutely cuspidal polynomials of a quiver $Q$. They are characterized as follows. \begin{proposition}[Bozec-Schiffmann, \cite{BozecCounting}]\label{characterization} The absolutely cuspidal polynomials of $Q$ form the unique family of polynomials $(C_{Q,\dd}^{abs}(t))_{\dd\in\N^I}$ satisfying the following conditions. \begin{enumerate} \item If $\dd\in\N^I$ is hyperbolic, $C_{Q,\dd}^{abs}(t)=C_{Q,\dd}(t)$, \item If $\dd\in\N^I$ is isotropic and indivisible, then \[ \Exp_{z}\left(\sum_{r>0}C_{Q,\dd}(t)z^{\dd}\right)=\Exp_{t,z}\left(\sum_{r>0}C_{Q,\dd}^{abs}(t)z^{\dd}\right). \] \end{enumerate} \end{proposition} \begin{conj}[Bozec-Schiffmann, {\cite[Conjecture 1.3]{BozecCounting}}]\label{conjBS}For any $Q$ and $\dd\in\N^I$, $C_{Q,\dd}^{abs}(t)\in\N[t]$. \end{conj} We prove this conjecture for $\dd$ isotropic, see Corollary \ref{fin}. \subsection{Kac and cuspidal polynomials of affine quivers} For affine quivers, explicit expressions of the considered polynomials are known. \subsubsection{Kac polynomials of affine quivers} \begin{theorem} Let $Q$ be an affine quiver, $\delta\in\N^I$ its indecomposable imaginary root, and $k=\F_q$ a finite field with $q$ elements. \begin{enumerate} \item If $\dd\in\N^I$ is a real root (\emph{i.e.} if $\langle \dd,\dd\rangle=1$), there is a unique indecomposable representation over $\F_q$, and it is absolutely indecomposable: $A_{Q,\dd}(q)=1=I_{Q,\dd}(q)$, \item If $\dd\in\N^I$ is an imaginary root, then it is a positive multiple $r\delta$ of the indecomposable imaginary root and $A_{Q,\dd}(t)=t+n_0$, \emph{i.e.} there are $q+n_0$ absolutely indecomposable representations over $\F_q$ where $n_0$ is the number of vertices of $Q$ minus one (the number of vertices of the finite type quiver associated to $Q$). \end{enumerate} \end{theorem} \begin{proof} See \cite[5.1]{HuaXiao}. \end{proof} \subsubsection{Cuspidal polynomials of affine quivers} For acyclic affine quivers, \cite{HuaXiao} countains all the formulas we need. Let $Q=(I,\Omega)$ be an affine quiver. \begin{proposition}[Hua-Xiao, {\cite[5.2]{HuaXiao}}] For $r\geq 1$, we have: \begin{equation}\label{cuspindec} C_{Q,r\delta}(t)=I_{Q,r\delta}(t)-n_0 \end{equation} and \[ C_{Q,r\delta}^{abs}(t)=t. \] For $e_i=(0,\hdots,1,\hdots,0)\in\N^I$, we have \[ C_{Q,e_i}=C_{Q,e_i}^{abs}=1. \] For $\dd\not\in\{e_i : i\in I\}\cup\{r\delta : r\geq 1\}$, \[ C_{Q,\dd}=0. \] \end{proposition} In fact, this formulas remains true for any affine quiver, \emph{i.e.} for the Jordan and cyclic quivers, as it is clear from the formulas of \cite[1.2]{BozecCounting}. \section{Cuspidal functions of the Jordan and cyclic quivers}\label{5} Let $Q$ be a quiver. The category of nilpotent representations of $Q$, $\Rep_Q^{\nil}(\F_q)$ is a sub-category of $\Rep_Q(\F_q)$ stable under extensions and taking subobjects. Therefore, denoting by $\M_{\F_q}^{Q,\nil}$ the set of isomorphism classes of nilpotent representations of $Q$, the subspace $\HH_{Q,\F_q}^{\nil}=\bigoplus_{[M]\in\M_{\F_q}^{Q,\nil}}\C[M]$ is in fact a sub-Hopf-algebra. Its primitive elements are called nilpotent cuspidal functions. In this section, we put the emphasis on nilpotent cuspidal functions of the Jordan and cyclic quivers. We will also see that arbitrary cuspidal functions can be expressed in terms of nilpotent cuspidal functions. \subsection{Nilpotent cuspidal functions of the Jordan quiver}\label{nilpotentsJordan} All in this section has been known for almost a century, as Steinitz introduced an algebra structure on the complex vector space generated by isomorphism classes of finite length modules over a discrete valuation ring with finite residue field. The link with Macdonald's ring of symmetric functions is explained in \cite{macdonald}. We recall here what we need for the convenience of the reader -- who is asked to look at the references for the proofs. Let $\Lambda$ be Macdonald's ring of symmetric functions. It is constructed as the direct limit in the category of graded rings of the system \[ (\Z[x_1,\hdots,x_n]^{\mathfrak{S}_n}, f(x_1\hdots,x_{n+1})\in\Z[x_1,\hdots, x_{n+1}]^{\mathfrak{S}_{n+1}}\rightarrow f(x_1,\hdots, x_n,0)\in\Z[x_1,\hdots,x_n]^{\mathfrak{S}_n}). \] The $\Z$-algebra $\Lambda$ is endowed with a structure of Hopf algebra with comultiplication $\Delta : \Lambda\longrightarrow \Lambda\otimes\Lambda$ by considering the direct limit of the system of applications \begin{equation} \Delta_n : \Z[x_1,\hdots,x_{2n}]^{\mathfrak{S}_{2n}}\longrightarrow \Z[x_1,\hdots,x_{n}]^{\mathfrak{S}_{n}}\otimes \Z[x_1,\hdots,x_{n}]^{\mathfrak{S}_{n}} \end{equation} where $\Delta_n(x_i)=x_{i/2}\otimes 1$ if $i$ is even, and $\Delta_n(x_i)=1\otimes x_{\frac{i+1}{2}}$ if $i$ is odd. As usual, we let \[ e_r=\sum_{i_1<\hdots<i_r}x_{i_1}\hdots x_{i_r}, \] \[ p_r=\sum_i x_i^r \] for $r\geq 1$. We do not recall the definition of Hall-Littlewood symmetric functions $P_{\lambda}(x;t)$ for a partition $\lambda$: see \cite[Chapter III]{macdonald}. The elements $p_r$ are the normalized primitive elements of $\Lambda$. We now state the main theorem of this section. \begin{theorem} We have an isomorphism of Hopf algebras : \begin{equation} \begin{matrix} \varphi_q :& \HH_{Q_0}^{\nil}&\longrightarrow &\Lambda\otimes \C\\ &[I_{(1^r)}]&\longmapsto &q^{\frac{-r(r-1)}{2}}e_r. \end{matrix} \end{equation} Moreover, for any partition $\lambda$: \begin{equation} \varphi_q([I_{\lambda}])=q^{-n(\lambda)}P_{\lambda}(x ;q^{-1}) \end{equation} where $P_{\lambda}$ is the Hall-Littlewood symmetric function associated to the partition $\lambda$ and $n(\lambda)=\sum_{i}(i-1)\lambda_i$. \end{theorem} \begin{proof} See \cite{macdonald}, 3.4 page 217. \end{proof} Define \[ \widetilde{p}_r=\varphi_q^{-1}(p_r) \] the cuspidal function of dimension $r$ of the Jordan quiver. For $m\geq 0$, define $\phi_m(t)=\prod_{i=1}^m(1-t^i)$ and let $l(\lambda)$ be the length of the partition $\lambda$. We have the following closed formula for the nilpotent cuspidal functions of the Jordan quiver. \begin{theorem}\label{cuspJord} The $r$-dimensional cuspidal function over $\F_q$ of $Q_0$ is: \begin{equation}\label{cusphom} \widetilde{p}_r=\sum_{|\lambda|=r}\phi_{l(\lambda)-1}(q)[I_{\lambda}]. \end{equation} \end{theorem} \begin{proof} This formula already appears in \cite{SchiffmannNoncommutative}, formula $4.1$ or in \cite[3.2]{MR3463039}. See references therein. \end{proof} \subsection{Cuspidal functions for the Jordan quiver} The following theorem provides a basis for cuspidal functions for the Jordan quiver $J$. \begin{theorem}\label{cuspidauxJordan} Let $a\in\lvert\AAAA^1_{\F_q}\rvert$ a point of degree $d$. Fix $t_a\in\AAAA^1_{\F_q}(\F_q)$ such that the image of $t_a : \Spec \F_{q^d}\rightarrow \AAAA^1_{\F_{q^d}}$ is $a$. We define $\widetilde{p}_{r,t_a}\in\HH_{J,\F_{q^d}}$ associated to $t_a$ as \begin{equation} \widetilde{p}_{r,t_a}=\sum_{|\lambda|=r}\phi_{l(\lambda)-1}(q^d)[t_aI_d+I_{\lambda}]. \end{equation} This is a cuspidal function of $Q$ over $\F_{q^d}$. We have a linear map : \[ F_d : \HH_{J,\F_{q^d}}\rightarrow \HH_{J,\F_q} \] induced by the forgetfull functor $F'_d : \Rep_J(\F_{q^d})\rightarrow \Rep_J(\F_q)$. The set \[ \{F_d(\widetilde{p}_{r,t_a})\mid r\geq 1, |a|\in \lvert\AAAA^1_{\F_q}\rvert\} \] is a basis of cuspidal functions of $J$ over $\F_q$. \end{theorem} \begin{proof} The functions $F_d(\widetilde{p}_{r,t_a})$ for $d\geq 1$, $r\geq 1$ and $a\in\lvert\AAAA^1_{\F_q}\rvert$ of degree $d$ are clearly linearly independent, since they have disjoint support. For $e\geq 1$, the number of such functions of dimension $e$ is the number of closed points of $\AAAA^1_{\F_q}$ of degree less or equal to $e$. This is also the number of irreducible monic polynomials in $\F_q[T]$ with degree less than or equal to $e$. This number is the dimension of $\HH_{J,\F_q}^{\cusp}[e]$, see \cite[Examples 1.2 and Proposition 4.1]{BozecCounting}. Therefore, it remains to show that for $d\geq 1$, $r\geq 1$ and $a\in\AAAA^1_{\F_q}$ a closed point of degree $d$, $F_d(\widetilde{p}_{r,t_a})$ is indeed a cuspidal function in $\HH_{J,\F_q}$. Let $t\in\lvert\AAAA^1_{\F_q}\rvert$ be a closed point of degree $d$, $r$ an integer and $\lambda$ a partition of $r$. It suffices to show that the number of automorphisms of $F'_d(tI_r+J_{\lambda})$ and that of $tI_r+J_{\lambda}$ coincide, and that all subrepresentations of $F'_d(tI_r+J_{\lambda})$ are of the form $F'_d(N)$ for $N$ a subrepresentation of $tI_r+J_{\lambda}$. \emph{Step 1: The number of automorphisms.} To prove that the number of automorphisms of $F'_d(tI_r+J_{\lambda})$ and that of $tI_r+J_{\lambda}$ coincide, it suffices to show that an automorphism $\psi$ of $F'_d(tI_r+J_{\lambda})$ is $\F_{q^d}$-linear. But -- by definition of morphisms of quiver representations -- $\psi$ has to commute with $tI_r+J_{\lambda}$ hence with its semisimple part, which is $tI_r$. Therefore, $\psi$ commutes with the multiplication by $t$. Since $t$ is of degree $d$ over $\F_q$, $\F_q[t]=\F_{q^d}$ and $\psi$ is $\F_{q^d}$-linear, that is exactly what we wanted to prove. \emph{Step 2: The subrepresentations.} Let $N\subset F'_d(tI_r+J_{\lambda})$ be a subrepresentation, \emph{i.e.} $N$ is a $\F_q$-subspace of $\F_{q^d}^r$ stable under the linear map $tI_r+J_{\lambda}$. Since the semisimple part $tI_r$ of $tI_r+J_{\lambda}$ is a polynomial with coefficients in $\F_q$ without constant term of $tI_r+J_{\lambda}$, $N$ is stable under $tI_r$. Therefore, the multiplication by $t$ leaves $N$ invariant: $N$ is a $\F_{q^d}$-subspace of $\F_{q^d}^r$. \end{proof} \subsection{Nilpotent cuspidal functions of cyclic quivers} Let $n\geq 2$ and $C_n$ be the cyclic quiver of length $n$. \begin{theorem}\label{cuspCycl} For any $r\geq 1$, $\dim_{\C}\HH_{C_n,\F_q}^{\nil\cusp}[r\delta]=1$. \end{theorem} \begin{proof} In fact, Schiffmann proved the following result, see \cite[Proposition 3.25]{SchiffmannHall}. Let $U$ be the two sided ideal of $\HH_{C_n,\F_q}^{\nil}$ generated by the classes $[S_i]$ of simple representations of dimension $e_i$ for $0\leq i\leq n$ and let $\R=U^{\perp}$ be its orthogonal with respect to Green's scalar product. Then $\R$ is a sub-Hopf algebra of $\HH_{C_n,\F_q} ^{\nil}$ isomorphic to a graded polynomial ring with countably many variables \begin{equation}\label{anneauR} \R\simeq\C[x_j : j\geq 1] \end{equation} with $\deg(x_j)=j\delta$ and $\Delta(x_j)=x_j\otimes 1+1\otimes x_j$. This immediately implies our result. Indeed, if $f$ is a cuspidal function of dimension $r\delta$, then it is orthogonal to $[S_i]$ for $i\in I$ and to any nontrivial products. Therefore, $f\in \R$. By \eqref{anneauR}, a cuspidal function in $\R$ is a linear combination of $x_j$, $j\geq 1$. This proves Theorem \ref{cuspCycl}. \end{proof} \begin{lemma}\label{indcyclic} Let $d\geq 1$. Let $f\in \HH_{C_n,\F_q}^{\nil\cusp}[d\delta]$ be non-zero. Then for $I$ a nilpotent indecomposable representation of $C_n$ of dimension $d\delta$, $f(I)\neq 0$. Furthermore, $f(I)$ does not depend on the chosen indecomposable $I$. \end{lemma} \begin{proof} By Theorem \ref{Guo}, $\{[I] : \quad $I$ \text{ nilpotent indecomposable}\}$ generates $\HH_{Q,\F_q}^{\nil}$ as an algebra. For $d\geq 1$, consider the orthogonal projection with respect to Green's scalar product: \[ \pi : \HH_{C_n,\F_q}^{\nil}[d\delta]\rightarrow \HH_{C_n,\F_q}^{\nil\cusp}[d\delta]. \] By Lemma \ref{orthog}, $\pi$ restricts to a surjective linear map: \[ \pi : \HH_{C_n,\F_q}^{\nil,\indec}[d\delta]\rightarrow \HH_{C_n,\F_q}^{\nil\cusp}[d\delta] \] where $\HH_{C_n,\F_q}^{\nil\indec}$ is the subspace of $\HH_{C_n,\F_q}$ generated by the basis elements $[M]$ for $M$ indecomposable. Being surjective, this morphism is nonzero and there exists an indecomposable nilpotent representation $M$ of dimension $d\delta$ of $C_n$ such that $\pi([M])\neq 0$. This precisely means that $(f,[M])\neq 0$ since $\HH_{Q,\F_q}^{\nil\cusp}[d\delta]$ is one-dimensional, and therefore $f([M])\neq 0$. The group $\Z/n\Z$ acts by rotations on the quiver $C_n$, inducing an action of $\Z/n\Z$ by Hopf algebra automorphisms on $\HH_{C_n,\F_q}^{\nil}$ preserving $\HH_{C_n}^{\nil\cusp}[r\delta]$. Since the latter space is one-dimensional, $\Z/n\Z$ acts on it through a character $\Z/n\Z\rightarrow \C^*$, $i\mapsto\zeta^i$ for some $n$-th root of unity $\zeta$. We show that $\zeta =1$. For $M$ a representation of $C_n$ of dimension $d\delta$, $\Delta([M])(V_{0,dn-1},S_{-1})\neq 0$ if and only if $M\simeq V_{0,dn}$ or $M\simeq V_{0,dn-1}\oplus S_{-1}$ and $\Delta([M])(S_{-1},V_{0,dn-1})\neq 0$ if and only if $M\simeq V_{-1,dn}$ or $M\simeq V_{0,dn-1}\oplus S_{-1}$. Thus, we may write $f=[V_{0,nd}]+\zeta [V_{-1,nd}]+c[V_{0,nd-1}\oplus S_{-1}]+g$ where $\Delta(g)(S_{-1},V_{0,dn-1})=\Delta(g)(V_{0,dn-1},S_{-1})=0$ and $c\in\C$ is some complex number. Computing the comultiplications, we obtain : \[ 0=\Delta(f)(V_{0,nd-1},S_{-1})=\nu^{\langle V_{0,nd-1},S_{-1}\rangle}\left(\frac{|\Aut(S_{-1})||\Aut(V_{0,nd-1})|}{|\Aut(V_{0,nd})|}+c\frac{|\Aut(S_{-1})||\Aut(V_{0,nd-1})|}{|\Aut(V_{0,nd-1}\oplus S_{-1})|}\right) \] and \[ 0=\Delta(f)(S_{-1},V_{0,nd-1})=\nu^{\langle S_{-1},V_{0,nd-1}\rangle}\left(\zeta\frac{|\Aut(S_{-1})||\Aut(V_{0,nd-1})|}{|\Aut(V_{-1,nd})|}+c\frac{|\Aut(S_{-1})||\Aut(V_{0,nd-1})|}{|\Aut(I_{-1,nd-1}\oplus S_{-1})|}\right). \] From the first equation, it follows that $c\in\Q_{<0}$ and from the second equation, $\zeta\in\Q c$. So $\zeta\in\Q$ and $\zeta\in\R_{>0}$. This concludes the proof. \end{proof} \begin{remark} There is no known closed formula for nilpotent cuspidal functions of the Hall algebra of cyclic quivers. \end{remark} \subsection{Cuspidal functions of cyclic quivers} Let $C_n$ be the cyclic quiver of length $n$. As the functor $G_n$ (see \ref{equivJC}) is an equivalence of categories, we can give an explicit formula for invertible cuspidal functions of cyclic quivers. For $a\in\lvert\AAAA^1_{\F_q}\rvert$ of degree $d$, let \[ I_{a,\lambda}=G_n(F'_d(t_aI+J_{\lambda})), \] where $t_a\in\AAAA^1_{\F_q}(\F_q)$ represents $a$ (see Theorem \ref{cuspidauxJordan}). \begin{proposition} A basis of $\HH_{C_n,\F_q}^{\cusp\inv}$ is given by the functions \[ f_{a,s}=\sum_{|\lambda|=s}\left(\prod_{j=1}^{l(\lambda)-1}(1-q^j)\right)[I_{a,\lambda}] \] for $a\in\lvert\AAAA^1_{\F_q}\rvert\setminus \{0\}$ and $s\geq 1$. \end{proposition} \begin{proof} This is a consequence of Theorem \ref{cuspidauxJordan}. \end{proof} \section{Cuspidal functions of affine quivers}\label{6} \subsection{Decomposition of cuspidal functions} \begin{proposition}\label{decompcusp} A cuspidal function $f\in \HH_{Q,k}$ in the Hall algebra of an affine quiver over a finite field decomposes as $f=f_{\PC}+f_{\IC}+f_{\RC}$ where $f_{\RC}$ (resp. $f_{\IC}$, resp. $f_{\PC}$) is a cuspidal function whose support consists of regular (resp. preinjective, resp. preprojective) representations. \end{proposition} \begin{proof} Let $f\in\HH_{Q,k}^{\cusp}$ be a cuspidal function. Let $M=P\oplus R\oplus I$ be a representation of $Q$ over $k$ written as a direct sum of its preprojective, preinjective and regular summands, that is $P$ is the direct sum of the indecomposable preprojective direct summands of $M$, $I$ is the direct sum of the indecomposable preinjective direct summands of $M$ and $R$ is the direct sum of the indecomposable regular direct summands of $M$. Suppose moreover that at least two of the representations $P,R,I$ are nonzero. Then the proposition is equivalent to $f([M])=0$ for all such $M$. But, because of the properties of extensions and morphisms (see Proposition \ref{extensions}), in $\HH_{Q,k}$, we have $[M]=[P][R][I]$. This is a non trivial product, and therefore, $f$ is orthogonal to $[M]$ with respect to Green's scalar product. But this precisely means that $f([M])=0$ and implies the decomposition $f=f_{\PC}+f_{\IC}+f_{\RC}$. It remains to show that $f_{\PC}, f_{\RC}$ and $f_{\IC}$ are cuspidal. This comes from the fact that $f$ is cuspidal and $\Delta(f_{\PC})$ is supported on $\{(M,N) : \partial M+\partial N<0\}$ while $\Delta(f_{\RC})$ is supported on $\{(M,N) : \partial M+\partial N=0\}$ and $\Delta(f_{\IC})$ is supported on $\{(M,N) : \partial M+\partial N>0\}$. \end{proof} \begin{proposition}\label{supportreg} Let $r\geq 1$ and $f\in\HH_{Q,\F_q}^{\cusp}[r\delta]$. Then $f$ is supported on regular representations. \end{proposition} \begin{proof} Let $f=f_{\PC}+f_{\IC}+f_{\RC}$ be the decomposition given by Proposition \ref{decompcusp}. By Theorem \ref{Guo}, $\{[I] :\quad $I$ \text{ indecomposable}\}$ generates $\HH_{Q,\F_q}$ as an algebra. For $d\geq 1$, consider the orthogonal projection with respect to Green's scalar product: \[ \pi : \HH_{C_n,\F_q}[r\delta]\rightarrow \HH_{C_n,\F_q}^{\cusp}[r\delta]. \] By Lemma \ref{orthog}, $\pi$ restricts to a surjective linear map: \[ \pi : \HH_{C_n,\F_q}^{\indec}[r\delta]\rightarrow \HH_{C_n,\F_q}^{\cusp}[r\delta]. \] Let $M$ be an indecomposable representation of dimension $r\delta$. It is regular. Therefore, $(f_{\PC},[M])=(f_{\IC},[M])=0$ and $f_{\IC}=f_{\PC}=0$. \end{proof} \subsection{Regular Hall algebra of affine quivers} In all this section, $Q$ is an acyclic affine quiver and $\F_q$ a finite field. We denote by $\HH_{Q,\F_q,\RC}$ the subspace of $\HH_{Q,\F_q}$ generated by classes of regular representations $[M]$, for $M\in\Rep_{Q}^{\RC}(\F_q)$. Since $\Rep_Q^{\RC}(\F_q)$ is stable under extensions, this is in fact a subalgebra. We denote by $m$ the induced multiplication. The comultiplication $\Delta : \HH_{Q,k}\rightarrow\HH_{Q,k}\otimes\HH_{Q,k}$ induces a linear map \[ \Delta_{\RC} : \HH_{Q,k,\RC}\rightarrow \HH_{Q,k,\RC}\otimes\HH_{Q,k,\RC} \] which is the composition \[ \HH_{Q,k,\RC}\rightarrow \HH_{Q,k}\rightarrow \HH_{Q,k}\otimes \HH_{Q,k}\rightarrow \HH_{Q,k\RC}\otimes\HH_{Q,k,\RC} \] where the first arrow is the inclusion, the second is the comultiplication $\Delta$ and the last the orthogonal projection with respect to Green's scalar product. When elements of $\HH_{Q,\F_q}$ are seen as functions on isoclasses of representations of $Q$ over $\F_q$, the last arrow is the restriction of functions to isoclasses of regular representations. The restriction of Green's scalar product to $\HH_{Q,\F_q,\RC}$ induces a hermitian scalar product $(-,-)$. \begin{proposition} The two operations $m$ and $\Delta_{\RC}$ endow $\HH_{Q,k,\RC}$ with a bialgebra structure and the multiplication is adjoint to the comultiplication for the restriction of Green's scalar product. \end{proposition} \begin{proof} We already noticed that $m$ is an associative bilinear map. We have to prove that $\Delta_{\RC}$ is coassociative, compatible with the multiplication $m$ and that $m$ and $\Delta_{\RC}$ are adjoint for Green's scalar product. The coassociativity will follow from the associativity and the adjunction property. \emph{Adjunction property.} Let $M, N, R\in\Rep_{Q}^{\RC}(\F_q)$ be three regular representations of $Q$. Then, \[ (\Delta([R]),[M]\otimes[N])=(\Delta_{\RC}([R]),[M]\otimes[N]). \] But by adjunction for $m$ and $\Delta$ in $\HH_{Q,\F_q}$, \[ (\Delta([R]),[M]\otimes[N])=([R],[M][N]). \] Therefore, \[ (\Delta_{\RC}([R],[M]\otimes[N])=([R],[M][N]). \] This proves the adjunction for $m$ and $\Delta_{\RC}$. \emph{Compatibility of $m$ and $\Delta_{\RC}$.} Let $M,N\in\Rep_{Q}^{\RC}(\F_q)$. In $\HH_{Q,\F_q}$, we have the equation \[ \Delta([M][N])=\Delta([M])\Delta([N]). \] Thanks to the properties of morphisms between preprojective, regular and preinjective representions (see Proposition \ref{extensions}), if $S\subset M$ is a subrepresentation, then its indecomposable summands can only be preprojective or regular. Moreover, $S$ is regular if and only if its defect $\partial S$ is zero, since preprojective representations have negative defect. Suppose $S$ is not regular. If $S'$ is an arbitrary subrepresentation of $N$, then for any regular representations $A$ and $B$, $([S]\otimes[M/S])([S']\otimes [N/S'])(A,B)=0$ since the support of $[S][S']$ contains only representations of defect $\partial S+\partial S'<0$. By reversing the role of $S$ and $S'$, we obtain the formula \[ \Delta_{\RC}([M][N])=\Delta_{\RC}([M])\Delta_{\RC}([N]). \] This is precisely what we wanted. \end{proof} \subsection{Regular cuspidal functions of affine quivers} A function $f\in\HH_{Q,k,\RC}$ is said to be \emph{cuspidal regular} if $\Delta_{\RC}(f)=f\otimes 1+1\otimes f$. From Proposition \ref{supportreg}, we have the inclusion \[ \HH_{Q,k}^{\cusp}[r\delta]\subset \HH_{Q,k,\RC}^{\cusp}[r\delta] \] for any $r\geq 1$. Recall the decomposition of $\Rep_Q^{\RC}(\F_q)$ into blocks $C_a^Q$ for $a\in\lvert\PP^1_{\F_q}\rvert$. As a consequence, we have the following proposition. \begin{proposition}\label{resten} There is an isomorphism of Hopf algebras : \[ \HH_{Q,\F_q,\RC}\simeq \bigotimes_{a\in\lvert\PP^1_{\F_q}\rvert}\HH_{C_{a}^Q} \] between the regular Hall algebra and the restricted tensor product of Hall algebras of tubes. \end{proposition} \begin{theorem}\label{unique} For any $a\in \lvert\PP^1_{\F_q}\rvert$ and any $n>0$, $\dim(\HH_{Q,\F_q,\RC}^{\cusp}\cap\HH_{C_a^Q}[n\deg(a)])=1$. \end{theorem} \begin{proof} This is a consequence of Proposition \ref{resten}. Indeed, if $C_a^Q$ is a homogeneous tube, $\HH_{C_a^Q}\simeq \HH_{J,\F_q^{\deg(a)}}$ and if $C_{a}^Q$ is a non-homogeneous tube of period $p$, $\HH_{C_a^Q}\simeq \HH_{C_p,\F_q}$. \end{proof} \subsubsection{Normalization of cuspidal functions} The space of cuspidal functions whose support is contained in a given tube is one dimensional. We give here a natural way to normalize them. Take $a\in \lvert\PP^1_{\F_q}\rvert$ a closed point and $n\geq 1$. If the corresponding tube is homogeneous, it contains exactly one indecomposable representation $I=I_{a,n}$ of dimension $n\deg(a)\delta$ and if $f$ is a cuspidal function whose support is contained in this tube, thanks to formula \eqref{cusphom}, $f(I)\neq 0$. We may normalize $f$ such that $f(I)=1$. If $a$ corresponds to a non-homogeneous tube of period $p$, then for $n\geq 1$, it contains $p$ indecomposables (up to isomorphism) $I_1,\hdots,I_p$ of dimension $n\delta$. By Proposition \ref{indcyclic}, if $f$ is a nonzero cuspidal function whose support is contained in this tube, then $f(I_1)=\hdots=f(I_p)\neq 0$. We may therefore normalize $f$ by fixing the value $f(I_1)=1$. \begin{definition} A dimension vector $\dd\in\N^I$ is said to be cuspidal if there exists a nonzero cuspidal function of dimension $\dd$. We say sometimes that a regular cuspidal function is in a given tube when its support is contained in this tube. \end{definition} We define an homogeneous basis $\B$ of $\HH_{Q,\F_q,\RC}^{\cusp}$. From Theorem \ref{unique}, for each tube indexed by an element $a\in\lvert\PP^1_{\F_q}\rvert$, the space of cuspidal functions whose support is contained in this tube is one dimensional. We use the previous normalization and for $d\geq 1$, we denote by $f_{a,d}$ the unique corresponding normalized cuspidal function. We define now \[ \mathcal{B}=\{f_{a,d} : a\in\lvert\PP^1_{\F_q}\rvert, d\geq 1\}. \] This is a homogeneous basis of regular cuspidal functions of $Q$. \subsection{Comparison of dimensions} For any $\dd=s\delta\in\N^I$, the previous theorem gives a basis of $\HH_{Q,\F_q,\RC}$ containing \[ I_{Q,\dd}(q)-\sum_{i=1}^d(p_i-1)=\text{ number of tubes of degree $\leq s$} \] elements, where we recall that $d$ is the number of non-homogeneous tubes and $p_i$, $1\leq i\leq d$ denote their respective periods. Moreover, the table in Theorem \ref{ringelth} gives \[ \sum_{i=1}^dp-i-d=n_0-1 \] where $n_0$ is the number of vertices of $Q$ minus one. Therefore, \[ \dim_{\C}\HH_{Q,\F_q,\RC}^{\cusp}[s\delta]=I_{Q,s\delta}(q)-n_0+1 \] and, by Equation \eqref{cuspindec}, \[ \dim_{\C}\HH_{Q,\F_q}^{\RC}[s\delta]=C_{Q,s\delta}(q)+1 \] which means that the subspace $\HH_{Q,\F_q}^{\cusp}[s\delta]\subset \HH_{Q,\F_q,\RC}[s\delta]$ is of codimension $1$. In Theorem \ref{noyau}, we will construct an explicit linear form on $\HH_{Q,\F_q,\RC}^{\cusp}[s\delta]$ whose kernel is $\HH_{Q,\F_q}^{\cusp}[s\delta]$ for any $s\geq 1$. \subsection{Link between regular cuspidal functions of the Kronecker quiver and affine quivers} Let $Q$ be an acyclic affine quiver. In Section \ref{Kronecker}, we defined an exact fully faithful functor \[ F : \Rep_{K_2}(k)\rightarrow \Rep_Q(k). \] This functor induces a linear map \[ \tilde{F} : \HH_{K_2,\F_q}\rightarrow \HH_{Q,\F_q} \] defined by $\tilde{F}[M]=[F(M)]$ for a representation $M$ of $K_2$ over $\F_q$. This map is an injective algebra morphism (since $F$ is fully faithful and since the essential image of $F$ is closed under extensions). It is usually not a coalgebra homomorphism, but it verifies the following property. For $f\in\HH_{Q,\F_q}$, denote by $f^{\perp}$ its orthogonal projection on $\im(\tilde{F})$ with respect to Green's scalar product. When viewing elements of $\HH_{Q,\F_q}$ as functions, $f^{\perp}$ is simply the restriction of $f$ to $\{[F(M)] : M\in\Rep_{K_2}(\F_q)\}$. Then, denoting by $\tilde{F}^{-1}$ the inverse of $\tilde{F} : \HH_{K_2,\F_q}\rightarrow \im(\tilde{F})$, we have the formula \begin{equation}\label{projection} \Delta(F^{-1}(f^{\perp}))=\tilde{F}^{-1}\otimes \tilde{F}^{-1}(\Delta(f)^{\perp}). \end{equation} By construction the functor $F$ preserves indecomposables. Regularity of an indecomposable representation is a property of its dimension. It follows that $F$ restricts to a functor between abelian categories \[ F_{\RC} : \Rep_{K_2}^{\RC}(\F_q)\rightarrow \Rep_{Q}^{\RC}(\F_q) \] which now induces an algebra morphism between regular Hall algebras : \[ \tilde{F}_{\RC} : \HH_{K_2,\F_q,\RC}\rightarrow\HH_{Q,\F_q,\RC}. \] For $a\in\lvert\PP^1_{\F_q}\rvert$ and $n\geq 1$, \[ f^{K_2}_{a,n}=\sum_{|\lambda|=n}\prod_{j=1}^{l(\lambda)-1}(1-q^{j\deg(a)})[I_{a,\lambda}] \] are the regular cuspidal functions of the Kronecker quiver. We first prove the following result giving the restriction of a regular cuspidal function of $\HH_{Q,\F_q,\RC}$. \begin{proposition}\label{orth} Let $r\geq 1$ and $f$ be the normalized regular cuspidal function of $Q$ in a given tube. Then $\tilde{F}^{-1}(f^{\perp})$ is the normalized cuspidal function of $K_2$ of a (homogeneous) tube. \end{proposition} \begin{proof} In case $f$ is in a homogeneous tube, $F$ induces an equivalence of abelian categories with the corresponding tube of $\Rep_{K_2}^{\RC}(\F_q)$. Therefore, $f^{\perp}=f$, $\Delta(f)^{\perp}=\Delta(f)$ and the results follows from Equation \eqref{projection}. In case $f$ is in a non-homogeneous tube, let $C$ be the corresponding tube of $K_2$. Let $[M]$ be the isomorphism class of indecomposable representations of dimension $(s,s)$ contained in the tube $C$. By Lemma \ref{indcyclic}, $f([F(M)])\neq 0$ (the full faithfulness of $F$ implies that $F(M)$ is indecomposable in the same tube as $f$) and moreover by normalization, $f([F(M)])=1$. By formula \eqref{projection}, $\tilde{F}^{-1}(f^{\perp})$ is cuspidal, and moreover $\tilde{F}^{-1}(f^{\perp}([M])=1$. Therefore, $\tilde{F}^{-1}(f^{\perp})$ is the normalized cuspidal function of a homogeneous tube of $K_2$. \end{proof} \begin{cor} Let $a\in\lvert\PP^1_{\F_q}\rvert$. If $F$ sends the $a$-tube of $K_2$ on a homogeneous tube of $Q$, then \[ \tilde{F}(f^{K_2}_{a,n})=f_{a,n} \] is the normalized cuspidal regular function of $Q$ of in the $t$-tube of dimension $n\deg(a)$. If $F$ sends the $a$-tube of $K_2$ on a non-homogeneous tube of $Q$, then \[ \tilde{F}(f^{K_2}_{a,n})=f_{a,n}^{\perp}. \] \end{cor} \begin{proof} This is an immediate consequence of the Proposition \ref{orth} since in particular, if $a\in\lvert\PP^1_{\F_q}\rvert$ corresponds to an homogeneous tube, for any $n\geq 1$, $f_{a,n}^{\perp}=f_{a,n}$. \end{proof} \subsection{Cuspidal functions of affine quivers} \subsubsection{Cuspidal functions of an affine quiver in terms of regular cuspidal functions} Let us introduce some notations concerning partitions. For $(\lambda_1,\hdots,\lambda_l)$ a partition, we define the following quantities : \[ |\lambda|=\sum_{i=1}^l\lambda_i, \] \[ l(\lambda)=l \] and \[ n(\lambda)=\sum_{i=1}^l(i-1)\lambda_i. \] For $d\geq 1$ and $q\neq 0$, set \[ \xi(d,q)=\sum_{|\lambda|=d}\frac{\prod_{j=1}^{l(\lambda)-1}(1-q^{j})}{a_{\lambda}(q)}, \] and $\dd\in\N^I$, define also \[ \chi_{\dd} = \sum_{[M]\in\M_Q(\F_q)[\dd]}[M]. \] The main theorem of this paper is the following. \begin{theorem}\label{noyau} Let $\mathcal{B}=\{f_{x,n} : x\in\lvert\PP^1_{\F_q}\rvert, n\geq 1\}$ be the basis of normalized regular cuspidal functions of Q. Then the kernel of the linear form \[ \begin{matrix} L :& \HH_{Q,\F_q,\RC}^{\cusp}&\rightarrow &\C&\\ &f_{x,n}&\mapsto&\xi(n,q^{\deg(x)})&=(f_{x,n},\chi_{n\deg(x)}) \end{matrix} \] is the space of cuspidal functions of $Q$ of imaginary dimension, $\bigcup_{r\geq 1}\HH_{Q,\F_q}^{\cusp}[r\delta]$. \end{theorem} \begin{proof} Let $r\geq 1$ be an integer and $\dd=r\delta$ a multiple of the indivisible imaginary root. We denote by $L[\dd] : \HH_{Q,\F_q,\RC}^{\cusp}[\dd]\rightarrow \C$ the restriction of $L$. We already know that $\HH_{Q,\F_q}^{\cusp}[\dd]\subset \HH_{Q,\F_q,\RC}^{\cusp}[\dd]$ is an hyperplane. To prove the theorem, it suffices to prove that $L[\dd]$ is a nonzero linear form whose kernel contains $\HH_{Q,\F_q}^{\cusp}[\dd]$. \emph{Step 1:} We prove that $\dd$-dimensional cuspidal functions of $Q$ are in the kernel of $L[\dd]$. We can suppose -- using the dualization process -- that the extending vertex $i_0$ is a sink. We use previously defined notations, in particular $I_{\theta}$ is an indecomposable of $Q$ of dimension $\theta=\delta-e_{i_0}$. We will show that for $x\in\lvert\PP^1_{\F_q}\rvert$, \begin{equation}\label{formxi} L(f_{x,n})=\frac{1}{\nu^{\langle I_{\theta}^{\oplus r},S_{i_0}^{\oplus r}\rangle} |\GL_n(\F_{q^{\deg(x)}})|^2}\Delta(f_{x,n})(I_{\theta}^{\oplus r},S_{i_0}^{\oplus r}). \end{equation} This formula will imply our claim, since if $f$ is cuspidal of dimension $\dd$, then $\Delta(f)=f\otimes 1+1\otimes f$. Let us now prove Formula \eqref{formxi}. For a representation $M$ of $Q$, $\Delta([M])(I_{\theta}^{\oplus r},S_{i_0}^{\oplus r})=0$ if $M$ is not in the essential image of $F$ by Proposition \ref{ess}. Thus, since $F$ induces an equivalence of categories between $\Rep_{K_2}(\F_q)$ and the full subcategory of $\Rep_{Q}(\F_q)$ whose objects are extension of $I_{\theta}^{\oplus d_1}$ by $S_{i_0}^{\oplus d_2}$ for some nonnegative integers $d_1$ and $d_2$. It suffices therefore to prove \eqref{formxi} for $Q=K_2$ the Kronecker quiver, for which $I=S_1$ is the simple representation at the first vertex. For the Kronecker quiver, \[ f_{x,n}=\sum_{|\lambda|=n}\prod_{j=1}^{l(\lambda)-1}(1-q^{j\deg(x)})[I_{x,\lambda}^{K_2}]. \] Thus, \[ \Delta(f_{x,n})(S_1^{\oplus r},S_2^{\oplus r})=\nu^{\langle S_1^{\oplus r},S_2^{\oplus r}\rangle}\sum_{|\lambda|=n}\left(\prod_{j=1}^{l(\lambda)-1}(1-q^{j\deg(x)})\right)\frac{|\Aut(S_2^{\oplus r})||\Aut(S_1^{\oplus r})|}{a_{\lambda}(q^{\deg(x)})}. \] Now, we have $|\Aut(S_1^{\oplus r})|=|\Aut(S_2^{\oplus r})|=|\GL_r(\F_{q^{\deg(x)}})|$, giving \eqref{formxi}. \emph{Step 2:} We prove that $L[\dd]$ is a nonzero linear form. If $r>1$ or $r=1$ and $q\neq 2$, there is at least one homogeneous tube of degree $r$. Let us choose $f=[S]$ where $S$ is a regular simple in such a tube. By definition, it is an element of the basis $\mathcal{B}$ of dimension $r\delta$ and $L[\dd](f)=\xi(1,q^r)=\frac{1}{q^r-1}\neq 0$, so $L[\dd]\neq 0$ in these cases. If $d=1$ and $q=2$, then in types $D$ or $E$, there are only non-homogeneous tubes in dimension $\delta$, since $\mid\PP^1_{\F_2}(\F_2)\mid$ has three elements. Let $f$ be a regular cuspidal function of dimension $\delta$ in a non-homogeneous tube. By \ref{projection} and because the essential image in dimension $\delta$ of the functor $F$ defined above is precisely the full subcategory of objects which are nontrivial extension of $I$ by $S_{i_0}$, we have \[ \Delta(f)(I,S_{i_0})=\Delta(f^{\perp})(I,S_{i_0})=\Delta(\tilde{F}^{-1}(f^{\perp}))(S_1,S_2)\neq 0 \] since $\tilde{F}^{-1}(f^\perp)=[N]$ where $[N]$ is a regular cuspidal function of $K_2$ of dimension $(1,1)$, therefore $N$ is one of the following representation of $K_2$ over $\F_2$: \[ \begin{tikzcd} \F_2 \arrow[r,shift left,"1"] \arrow[r,shift right,swap,"0"] & \F_2 \end{tikzcd}, \quad \begin{tikzcd} \F_2 \arrow[r,shift left,"0"] \arrow[r,shift right,swap,"1"] & \F_2 \end{tikzcd} \quad \text{or} \quad \begin{tikzcd} \F_2 \arrow[r,shift left,"1"] \arrow[r,shift right,swap,"1"] & \F_2. \end{tikzcd} \] \end{proof} \begin{cor} The difference of two normalized regular cuspidal functions of the same dimension of two tubes of the same degree is a cuspidal function. \end{cor} \begin{proof} This is clear since for $x\in\lvert\PP^1_{\F_q}\rvert$ and $s\geq 1$, $L(f_{x,s})$ depends only on $s$ and $\deg(x)$. \end{proof} \section{Two conjectures of Berenstein and Greenstein}\label{7} In all this section, $Q$ is an acyclic affine quiver and $\F_q$ a finite field. \subsection{Fortuitous cancellation theorem} We prove in this section a result we use to relate Conjecture \ref{conj1} and Conjecture \ref{conj2} of Berenstein and Greenstein. We prove that for a cuspidal regular function $f\in \HH_{Q,\F_q,\RC}$, (\emph{i.e.} such that $\Delta_{\RC}(f)=f\otimes 1+1\otimes f$), the function $\Delta(f)-(f\otimes 1+1\otimes f)$ is supported on the subset of $\IC\times \PC=\{([M],[N]) : \text{ $M$ preinjective and $P$ preprojective}\}$. Let $f\in \HH_{Q,\F_q}$ a regular cuspidal function. \emph{A priori}, $\Delta(f)$ is a sum of the primitive part and terms of the form: $[I]\otimes[P], [I]\otimes [R\oplus P], [R\oplus I]\otimes [P]$ and $[R\oplus I]\otimes [R'\oplus P]$ where $R$ (resp. $P$, resp. $I$) is any regular (resp. preprojective, resp. preinjective) representation. Indeed, if a term of the form $[M]\otimes[N]$ appears with nonzero coefficient in the comultiplication of $[R]\in\HH_{Q,\F_q}$, this means that $N$ is a sub-representation of $R$ and $M$ the quotient $R/N$. But a subrepresentation of a regular representation has only regular and preprojective indecomposable direct summand and the quotient of a regular representation by a subobject has only indecomposable regular and preinjective summands. We will show the following cancellation theorem. \begin{theorem} Let $f\in\HH_{Q,\F_q,\RC}$ be a regular cuspidal function. Then the comultiplication $\Delta(f)\in\HH_{Q,\F_q}\otimes\HH_{Q,\F_q}$ is the sum of its primitive part and terms of the form $[I]\otimes[P]$. \end{theorem} \begin{proof} Let $f\in\HH_{Q,\F_q,\RC}$ be a regular cuspidal function. The regular cuspidality precisely means that $\Delta_{\RC}(f)([M],[N])=0$ for any nonzero regular representations $M$ and $N$ of $Q$. We will use the coassociativity to prove the theorem. Suppose $\Delta(f)(R\oplus I,P)\neq 0$ for some representations $P,R$ and $I$ of $Q$ with $R$ regular, $I$ preinjective and $P$ preprojective. We first prove that for any $g\in \HH_{Q,\F_q,\RC}$, \begin{equation}\label{form1} (\Delta\otimes 1)\circ \Delta(g)(R,I,P)=(1\otimes \Delta)(\Delta_{\RC}(g))(R,I,P) \end{equation} which will imply our assertion for the terms of the form $(R\oplus I)\otimes P$. Let $M$ be a regular representation. Then consider a filtration \[ 0\subset A\subset M \] of $M$ with successive quotients $P, R\oplus I$. Because $\Ext^1(R,I)=0$ and $\Hom(I,R)=0$, the datum of such a filtration is equivalent to the datum of a filtration: \[ 0\subset A\subset B\subset M \] with successive quotients $P, I, R$ (the two sorts of filtrations of $M$ with the given quotients are in one-to-one correspondence). Since $M$ and $M/B$ are by assumption regular, so is $B$. This proves \eqref{form1} for $g=[M]$ and then for any function $g$ by linearity. Therefore, \[ (\Delta\otimes 1)\circ \Delta(f)(R,I,P)=(1\otimes \Delta)(f\otimes 1+1\otimes f)(R,I,P)=(f\otimes 1\otimes 1+1\otimes\Delta(f))(R,I,P)=0. \] But a term of the form $[R]\otimes [I]\otimes [P]$ in the decomposition of $(\Delta\otimes 1)\circ \Delta(f)$ can only come from the term $[R\oplus I]\otimes [P]$ of $\Delta(f)$, because $\Ext^1(R,I)=0$, yielding a contradiction if this one appears with nonzero coefficient in $\Delta(f)$. The case of $I\otimes R\oplus P$ is dual: the ingredients to handle this case are the formula \[ (1\otimes \Delta)\circ \Delta(f)(I,P,R)=(\Delta\otimes 1)(\Delta_{\RC}(f))(I,P,R) \] and the fact that a term of the form $[I]\otimes[P]\otimes[R]$ in $(1\otimes \Delta)\circ \Delta(f)$ can only come from the term $[I]\otimes[R\oplus P]$ of $\Delta(f)$. The case of $[R\oplus I]\otimes [R'\oplus P]$ is more subtle but is a consequence of the formula : \[ (\Delta\otimes1\otimes 1 )\circ (1\otimes \Delta)\circ \Delta(f)(R,I,P,R')=(1\otimes \Delta\otimes 1)(((1\otimes \Delta)\circ \Delta)_{\RC}(f))(R,I,P,R'), \] where for $f\in\HH_{Q,\F_q}$, $((1\otimes \Delta)\circ\Delta)_{\RC}(f)$ is the projection on $\HH_{Q,\F_q,\RC}^3$ of $((1\otimes \Delta)\circ\Delta)(f)$. This formula is proved using the fact that filtrations of a regular module $M$ \[ 0\subset A\subset M \] with successive quotients $P\oplus R'$ and $I\oplus R$ are in one-to-one correspondence with filtrations of $M$ of the form \[ 0\subset B\subset A\subset C\subset M \] with successive quotients $R', P, I, R$. As before, we also need the fact that a term of the form $[R]\otimes[I]\otimes[P]\otimes[R']$ in $(\Delta\otimes1\otimes 1)\circ (1\otimes \Delta)\circ \Delta(f)$ can only come from the term $[R\oplus I]\otimes [R'\oplus P]$ of $\Delta(f)$. \end{proof} \subsection{Two conjectures of Berenstein and Greenstein} In their paper \cite{MR3463039}, Berenstein and Greenstein gave the following conjectures proved by Deng and Ruan in \cite{MR3612468} using weighted projective lines and Hall polynomials. For $n>1$, let $N(n)=\text{number of closed points of $\PP^1_{\F_q}$ of degree $n$}$ and $N(1)=q+1-d$. This is the number of closed points of $\PP^1_{\F_q}$ of degree $n$ not in $D$. \begin{conj}\label{conj1} For $s\geq 1$, $d\geq 1$, and $x\in\PP_{\F_q}^1$ a closed point of degree $d$ not in $D$, \[ f_{x,s}-\frac{1}{N(d)}\sum_{\substack{y\in\lvert\PP^1_{\F_q}\rvert\setminus D\\ \deg(y)=d}}f_{y,s} \] is a cuspidal function. \end{conj} \begin{conj}\label{conj2} Let $P$ be a preprojective representation and $I$ a preinjective representation. Then, for any partition $\lambda$ and closed points $x,y\in\lvert\PP^1_{\F_q}\rvert\setminus D$, \[ F^{P,I}_{I_{\lambda}(x)}=F^{P,I}_{I_{\lambda}(y)}. \] \end{conj} Thanks to the fortuitous cancellation Theorem, Conjecture \ref{conj2} implies Conjecture \ref{conj1}. We will prove both conjectures using Theorem \ref{noyau}. We provide a direct proof of Conjecture \ref{conj1} \begin{theorem}\label{solconj} Conjecture \ref{conj1} holds. \end{theorem} \begin{proof} Let $x\in\lvert\PP^1_{\F_q}\rvert\setminus D$ a closed point of degree $d$ and \[ f=f_{x,s}-\frac{1}{N(d)}\sum_{\substack{y\in\lvert\PP^1_{\F_q}\rvert\setminus D\\ \deg(y)=d}}f_{y,s}. \] Then $f$ is a function of dimension $s\deg(x)$. From Theorem \ref{noyau}, $f$ is cuspidal if and only if $L(f)=0$. But \[ L(f)=\xi(s,q^{deg(x)})-\frac{1}{N(d)}\sum_{\substack{y\in\lvert\PP^1_{\F_q}\rvert\setminus D\\ \deg(y)=d}}\xi(s,q^{\deg(y)})=0. \] \end{proof} To prove Conjecture \ref{conj2}, we first define an action of an infinite permutation group on the Hall algebra $\HH_{Q,\F_q}$. Let $\mathfrak{S}$ be the group of degree preserving permutations of $\lvert\PP^1_{\F_q}\rvert\setminus D$. The group $\mathfrak{S}$ is isomorphic to \[ \prod_{e\geq 1}\mathfrak{S}_{N(e)} \] where for a positive integer $N$, $\mathfrak{S}_N$ is the symmetric group on $N$ letters. The action \[ \mathfrak{S}\rightarrow \Aut(\HH_{Q,\F_q}) \] is defined as follows. For $M,N$ two representations, $\lambda$ a partition, $x\in\lvert\PP^1_{\F_q}\rvert\setminus D$ and $\sigma\in \mathfrak{S}$, \begin{gather*} \sigma\cdot [M]=[M] \quad \text{ if $[M]$ is preprojective, preinjective or in a non-homogeneous tube}\\ \sigma\cdot [I_{\lambda}(x)]=[I_{\lambda}(\sigma(x))]\\ \sigma\cdot ([M]\oplus [N])=(\sigma\cdot [M])\oplus (\sigma\cdot[N]) \end{gather*} where for notational reasons, we define here $[M]\oplus[N]=[M\oplus N]$. It is easily seen that $\sigma$ acts as a graded linear isomorphism on $\HH_{Q,\F_q}$. The following is the second important result of this paper. \begin{theorem} The group $\mathfrak{S}$ acts by Hopf-algebra automorphisms on $\HH_{Q,\F_q}$. \end{theorem} \begin{proof} It is a consequence of the following facts. \begin{enumerate} \item $\sigma$ acts as an isometry of $\HH_{Q,\F_q}$, \item The action of $\sigma$ leaves $\HH_{Q,\F_q,\RC}$ and $\HH_{Q,\F_q,\RC}^{\cusp}$ stable, \item $\sigma$ commutes with the linear form $L$. In particular, it preserves $\HH_{Q,\F_q}^{\cusp}[\dd]$ for any dimension $\dd$. \end{enumerate} Indeed, for $(1)$, we have to prove that for $M$ a representation of $Q$ over $\F_q$, the number of automorphisms of $M$ and $\sigma M$ are the same. We write $M\simeq I\oplus R\oplus P$ with $I$ preinjective, $R$ regular and $P$ preprojective. There is no morphisms from $I$ to $R$ or $P$ and no morphisms from $R$ to $P$. Therefore, an endomorphism of $M$ is upper triangular in this direct sum decomposition. Since for a partition $\lambda$ and a closed point $x\in\lvert\PP^1_{\F_q}\rvert\setminus D$ the number of automorphisms of $I_{\lambda}(x)$, $a_{\lambda}(q^{\deg(x)})$, only depends on the degree of $x$, we only have to prove that for a partition $\lambda$, $x\in\lvert\PP^1_{\F_q}\rvert\setminus D$, a preprojective representation $P$ and a preinjective representation $I$, both numbers \begin{gather*} \dim\Hom(P,I_{\lambda}(x)),\\ \dim\Hom(I_{\lambda}(x),I) \end{gather*} only depend on the degree of $x$. This is straightforward since using the Euler form, the first equals $\langle\dim P,|\lambda|\deg(x)\delta\rangle$ and the second equals $\langle|\lambda|\deg(x)\delta,\dim I\rangle$. For $(2)$, by definition $\sigma$ let $\HH_{Q,\F_q,\RC}$ stable and the element $f_{x,n}$ of $\HH_{Q,\F_q,\RC}$ verifies $\sigma\cdot f_{x,n}=f_{\sigma(x),n}$, so $\HH_{Q,\F_q,\RC}^{\cusp}$ is stable under $\sigma$. For $(3)$, we notice that $L(f_{x,n})=\xi(n,q^{\deg(x)})=L(f_{\sigma(x),n})$. Therefore, $\sigma$ sends a system of orthogonal cuspidal generators of $\HH_{Q,\F_q}$ to an other such system. Theorem \ref{SVthm} tells us that these systems of generators satisfy the same relations: $\sigma$ is an algebra morphism. Since $\sigma$ also preserves cuspidal functions, it is a Hopf algebra automorphism. \end{proof} \begin{cor} Conjecture \ref{conj2} holds. \end{cor} \begin{proof} As usual, $\Delta_{I,P}$ denotes the truncation of the comultiplication: for $f\in\HH_{Q,\F_q}$, we keep only the terms of the form $[I']\otimes[P']$ in $\Delta(f)$. A reformulation of conjecture \ref{conj2} is \[ \Delta_{I,P}[I_{\lambda}(x)]=\Delta_{I,P}[I_{\lambda}(y)] \] for any partition $\lambda$ and closed points $x,y\in \lvert\PP^1_{\F_q}\rvert\setminus D$. For any $x,y\in\lvert\PP^{1}_{\F_q}\rvert\setminus D$, we choose $\sigma\in \mathfrak{S}$ such that $\sigma(x)=y$. We have \[ \Delta_{I,P}([I_{\lambda}(y)])=\Delta_{I,P}(\sigma\cdot[I_{\lambda}(x)])=(\sigma\otimes\sigma)\cdot\Delta_{I,P}([I_{\lambda}(x)])=\Delta_{I,P}([I_{\lambda}(x)]) \] as $\sigma$ acts trivially on preinjective and preprojective representations. \end{proof} \section{Isotropic cuspidal dimensions of quivers}\label{8} Let $Q=(I,\Omega)$ be an arbitrary quiver and $\F_q$ a finite field. \subsection{The support of an isotropic cuspidal dimension of a quiver}\ Write $e_i=(0,\hdots,0,1,0,\hdots, 0)$ where all coordinates except the $i$-th are $0$. The following seems to be known and is not difficult to prove but we reproduce the proof for the sake of completeness. \begin{proposition}\label{connected} Let $\dd$ be a cuspidal dimension of $Q$ over $\F_q$. Then $\dd$ has a connected support. \end{proposition} \begin{proof} Let $\dd\in\N^I$ be a cuspidal dimension and $f$ a nonzero cuspidal function of dimension $\dd$. Suppose $\supp\dd$ is not connected, therefore $P=\supp\dd=P_1\sqcup P_2$ such that there is no arrow between $P_1$ and $P_2$. For a representation $M$ of $Q$ over $\F_q$, we write $M_{P_j}$ for $j=1,2$ the representation of $Q$ which coincides with $M$ on $P_j$ and is zero at the other vertices. Since $P_1$ and $P_2$ are not connected, \[\Ext^1(M_{P_1},M_{P_2})=\Ext^1(M_{P_2},M_{P_1})=0 \] and \[\Hom(M_{P_1},M_{P_2})=\Hom(M_{P_2},M_{P_1})=0 .\] Therefore, in $\HH_{Q,\F_q}$, \[ [M_{P_1}][M_{P_2}]=[M_{P_1}\oplus M_{P_2}]=[M_{P_2}][M_{P_1}]. \] Now write \[ f=\sum_{[M], \dim M=\dd}c_{[M]}[M]=\sum_{[M], \dim M=\dd}c_{[M]}[M_{P_1}\oplus M_{P_2}]. \] Since from the characterization of cuspidal functions, $f$ is orthogonal to any nontrivial products, \[ c_{[M]}=|\Aut(M)|(f,[M])=|\Aut(M)|(f,[M_1][M_2])=0, \] giving $f=0$, contradiction. \end{proof} The following Theorem \ref{isotropic} is a direct consequence of \cite[Proposition 5.7]{KacInfinite} and of the inequalities verified by cuspidal dimensions in case of quivers without loops. We reproduce the proof here for arbitrary quivers for the convenience of the reader. \begin{theorem}\label{isotropic} Let $\dd$ be an isotropic cuspidal dimension of Q over $\F_q$ (\emph{i.e.} $(\dd,\dd)=0$ where $(-,-)$ is the symmetrized Euler form of the quiver). Then the support $\supp \dd$ of $\dd$ is an affine quiver. \end{theorem} \begin{proof} From \cite[Proposition 3.2 1.(a)]{SevenhantVdB}, which can be extended to an arbitrary quiver with few modifications\footnote{Sevenhant and Van den Bergh restrict themselves to edge loop free quivers, but their proof can be slightly modified for any quiver.}, we have \[ (\dd,e_i)\leq 0 \] for any $i\in I$ and moreover $\supp \dd$ is connected by Proposition \ref{connected}. We set $P=\supp\dd$ and see $P$ as a full subquiver of $Q$. The condition $(\dd,\dd)=0$ then implies \begin{equation}\label{nul} (\dd, e_i)=0 \end{equation} for $i\in \supp \dd$: in fact, if $\dd=\sum_{i\in \supp\dd}d_ie_i$ with $d_i$ a positive integer, $(d,d)=\sum_{i\in\supp\dd}d_i(\dd,e_i)=0$ and each term of this sum is negative or zero, which implies the result. We show that if $P$ contains edge loops, $P$ is the Jordan quiver. Let $i\in \supp\dd$ be a vertex with $g$ loops. Then \[ (\dd,e_i)=2(1-g)d_i - \sum_{\alpha : i\rightarrow j}d_j-\sum_{\alpha : j\rightarrow i}d_j. \] This quantity must be zero. The only possibility is $g=1$ and $P$ contains no vertices adjacent to $i$. Thanks to the connectedness of $P$, we deduce that $P$ is the Jordan quiver. Suppose now $P$ has no edge loop. Then the Cartan matrix $A_{P}$ of the subquiver $P$ is a generalized Cartan matrix as defined in \cite[Chapter 4]{KacInfinite}, that is for any $i,j$ vertices of $P$, \[ a_{i,i}=2 \quad\text{ and }\quad a_{i,j}=0\implies a_{j,i}=0. \] The matrix $A_{P}$ is indecomposable because $P$ is connected. Moreover, the vector $\dd$ has positive nonzero integer coefficients and verifies $A_{P}\dd=0$ because of equation \eqref{nul}, since by definition of the symmetrized Euler form of a quiver, $(\dd,e_i)={}^{t}e_iA\dd$. The classification of indecomposable generalized Cartan matrices given in \cite[Theorem 4.3]{KacInfinite} implies that $A_{P}$ is of affine type. From \cite[Lemma 4.5]{KacInfinite}, since $A_{P}$ is by definition symmetric, $A_{P}$ is positive semidefinite. Thanks to \cite[Theorem 8.6 2.]{SchifflerQuiver}, $\supp\dd$ is an affine quiver: this concludes the proof. \end{proof} \subsection{Consequences} \subsubsection{} Theorem \ref{isotropic} together with Theorem \ref{noyau} gives an explicit description of cuspidal isotropic functions of any quiver. The cuspidal functions of a quiver are of three different types. We have the real cuspidal functions: the $[S_i]$ for $i$ a real vertex of $Q$ and $S_i$ the simple representation of dimension $e_i$, the isotropic cuspidal functions, which are the homogeneous cuspidal functions of dimension $\dd$ with $(\dd,\dd)=0$ and finally the hyperbolic cuspidal functions, that is cuspidal functions of dimension $\dd$ such that $(\dd,\dd)<0$. \subsubsection{A conjecture of Bozec and Schiffmann for isotropic dimensions} In the paper, \cite{BozecCounting}, Bozec and Schiffmann give the formula: \begin{equation}\label{teq} C_{Q,\dd}^{abs}(t)=t \end{equation} if $\dd$ is an isotropic dimension of an affine quiver. By Theorem \ref{isotropic}, we can replace the condition $(2)$ of Proposition \ref{characterization} by the condition $(2)'$: if $\dd$ is isotropic, then $C_{Q,\dd}^{abs}(t)=t$. We can also partially obtain Conjecture \ref{conjBS}. \begin{cor}\label{fin} Conjecture \ref{conjBS} holds for isotropic dimensions $\dd\in\N^I$. \end{cor} \end{document}
\begin{document} \author{\textbf{Samuel Deleplanque}\\ Ifsttar, COSYS, ESTAS, Universit\'e Lille Nord de France,\\ \textbf{Martine Labb\'e}\\ D\'epartament d'Informatique, Facult\'e des Sciences,\\ Universit\'e Libre de Bruxelles,\\ \textbf{Diego Ponce}\\ Instituto de Matem\'aticas de la Universidad de Sevilla (IMUS), \\ \textbf{ Justo Puerto}\\ Instituto de Matem\'aticas de la Universidad de Sevilla (IMUS). \\} \title{ An extended version of a Branch-Price-and-Cut Procedure for the Discrete Ordered Median Problem} \begin{abstract} The Discrete Ordered Median Problem (DOMP) is formulated as a set partitioning problem using an exponential number of variables. Each variable corresponds to a set of demand points allocated to the same facility with the information of the sorting position of their corresponding costs. We develop a column generation approach to solve the continuous relaxation of this model. Then, we apply a branch-price-and-cut algorithm to solve to optimality small to moderate size of DOMP in competitive computational time. \end{abstract} \section{Introduction \label{section:1}} Logistics is a new most active field in nowadays Operations Research and Location Analysis is among its most important building blocks. Motivated by the need of applying more flexible models in Logistics, in the last years, a new family of location models, namely the Ordered Median location Problem has been proposed. An ordered median objective function computes ordered weighted averages of vectors (\cite{Nickel2005}) and when it is applied to location problems those vectors are distances or allocation costs from clients to service facilities. Ordered median location problems were first introduced in networks and continuous spaces by \cite{NP1999} and \cite{PF2000}, respectively. Later, they were extended to the discrete setting by \cite{Nickel2001,Boland2006}. The Discrete Ordered Median Problem (DOMP) has been widely studied since the 90's and there is a number of different formulations, solution approaches and applications available in the literature (\citet{Boland2006,Dominguezmarin2003, Marin2009, Marin2010, Nickel2001, NP1999, Nickel2005, PP2013, Puerto2008, Puerto2009, Puerto2014}). Given a set of clients and a set of candidate locations and assuming that the allocation costs of clients to facilities are known, DOMP consists in choosing $p$ facility locations and assigning each client to a facility with smallest allocation cost in order to minimize the ordered weighted average of these costs. The ordered weighted average sorts the allocation costs in a non-decreasing sequence and then it performs the scalar product of this so-obtained sorted cost vector with a given vector of weights. There are several valid formulations for DOMP that exploit specific features of the problem (see e.g. \cite{Boland2006,Marin2009,Labbe2017} and the references therein). In \cite{Labbe2017} a new formulation for DOMP has been proposed, based on a set packing approach, that is valid for general cost coefficient. This formulation gives rise to rather tight integrality gaps and was shown to be reasonably efficient to solve medium size instances when embedded in a branch-and-cut (B\&C) scheme. In this paper we explore a different paradigm for solving DOMP based on an extended formulation using an exponential number of variables corresponding to a set partitioning model. Each variable represents a set of couples (client, position). These clients are served by the same facility and their position indicates the situation of this allocation cost in the sorted list of allocation costs in any feasible solution. To handle the exponential number of variables we use a column generation approach that is embedded in a branch-price-and-cut (\textbf{B\&P\&C\xspace}) algorithm. A recent similar approach can be seen in \cite{Hossein2016}. This scheme has never been applied to DOMP and it opens new avenues of research. Therefore, the contribution of this paper is to propose a new perspective in the resolution of DOMP based on formulations with an exponential number of variables and to develop an efficient \textbf{B\&P\&C\xspace} \; algorithm to handle them. This paper is organized as follows. After the introduction, Section \hat{\rho}ef{section:2.2} introduces a new set partitioning formulation for DOMP. This formulation uses an exponential number of variables where each element of the partition is a set of clients together with their sorted positions that are assigned to the same server. This formulation is theoretically compared in Section \hat{\rho}ef{section:2.3} with another valid formulation described in Section \hat{\rho}ef{section:2.1} borrowed from \cite{Labbe2017}. Section \hat{\rho}ef{section:2.4} describes the column generation algorithm that we have designed to overcome the large number of variables in the model. We prove that the pricing subproblem is solvable efficiently in polynomial time by using an \textit{ad hoc} dynamic programming algorithm. We devote our Section \hat{\rho}ef{section:3} to determine the implementations details of our \textbf{B\&P\&C\xspace} \; algorithm. We develop a GRASP heuristic, in Section \hat{\rho}ef{section:3.1}, that is used both to generate a promising initial solution and a pool of variables to initialize the column generation routine. We also develop a stabilization routine, based in \cite{Pessoa2010}, that reduces considerably the number of iterations of the column generation approach in Section \hat{\rho}ef{section:3.2}. In addition, sections \hat{\rho}ef{section:3.3} and \hat{\rho}ef{section:3.4} are devoted to present two additional improvements, namely a pricer heuristic and a preprocessing. The next two subsections, \hat{\rho}ef{section:3.5} and \hat{\rho}ef{section:3.6}, present our branching strategies and some families of valid inequalities that will be added to the branch-and-price algorithm. The next section, namely Section \hat{\rho}ef{section:4} is devoted to report on the final computational experiments of this paper. Here, we report on the performance of the solution approach. Besides, we also compare the performance of the \textbf{B\&P\&C\xspace}\; algorithm presented in this paper against the compact formulation in Section \hat{\rho}ef{section:2.1}. The paper ends with a section devoted to concluding remarks. \section{Problem definition and formulations\label{section:2}} Let $I$ be a set of $n$ points which at the same time represent clients and potential facility locations which are assumed to be uncapacitated; and let $c_{ij}$ denote the cost for serving client $i$'s demand from facility $j$. \hat{\rho}enewcommand{\arabic{enumi}.}{\arabic{enumi}.} Given a set $J$ of $p$ open facilities, let $c_i(J)$ represents the cost for allocating client $i$ to the cheapest facility in $J$ so that $c_i(J):=\displaystyle\min_{j \in J} c_{ij}$. Now let us sort the costs $c_i(J)$, $i\in I$ by non-decreasing order of their values. The elements of the resulting vector of ordered costs are denoted by $c^{(k)}(J)$ and satisfy $c^{(1)}(J)\leq \cdots \leq c^{(n)}(J).$ Given vector $\lambda=(\lambda^k)^n_{k=1}$ satisfying $\lambda^k\geq 0, k=1,\dots,n$, the objective function of DOMP, is defined as \begin{equation}\label{fo:def}z(J):=\sum_{k=1}^n\lambda^kc^{(k)}(J).\end{equation} Recall that this objective function provides a very general paradigm to encompass standard and new location models. For instance, if $\lambda^1=\dots=\lambda^n=1$ we obtain the median objective, if $\lambda^1=\lambda^2=\dots=\lambda^{n-1}=0,\lambda^n=1$ we obtain the center objective, if $\lambda^1=\lambda^2=\dots=\lambda^{n-1}=\alpha,\lambda^n=1$, where $\alpha= [0,1]$, we obtain a convex combination of median and center objectives (centdian), etc. The $p$-facility Discrete Ordered Median Problem looks for the subset $J$ of $p$ facilities to open in order to minimize the ordered median function: \begin{equation}\label{of:DOMPdefiniton} \tag{DOMP}\min_{J\subseteq I:|J|=p}z(J).\end{equation} There are several available formulations of DOMP in the literature using different spaces of variables. Among them we mention those based on some combinations of the $p$-median and permutation polytopes (\citep{Boland2006}) or on coverage approaches based on radius variables (\citep{Puerto2008}, \citep{Marin2009,Marin2010}). \subsection{An explicit formulation for DOMP: The Weak Order Constraints\label{section:2.1}} In the following, we recall the Weak Order Constraints formulation, that we will refer to as $WOC$, introduced in \citet{Labbe2017}, that will be the starting point for the developments presented in this paper. This formulation uses two types of binary variables. Variables $y_j$ assume value 1 if facility $j$ is open (i.e. $j \in J$) and 0 otherwise. Variables $x_{ij}^k$ are equal to 1 if client $i$ is allocated to facility $j$ and the corresponding cost occupies position $k$ in the allocation cost ranking (i.e. $c^{(k)}(J)=c_{ij}$). The choice of this formulation is motivated by its good performance in terms of integrality gap (see \citep{Labbe2017}). However, it requests important memory space since it needs $O(n^3)$ binary variables which may become prohibitive for moderate $n$. Let $R=(r_{ij}) \in \mathbb{N}^{n\times n}$ be a matrix such that $r_{ij}=\ell$ if $c_{ij}$ is the $\ell$-th element in the sorted list of the costs in $C=(c_{ij})$, where ties are broken arbitrarily. In other words, $r_{ij}$ is the position in the above list of the allocation cost $c_{ij}$ of the problem. For the sake of readability the reader is referred to Example \hat{\rho}ef{ex:firstsolution} in Section \hat{\rho}ef{section:2.4}. Thus, the formulation is \begin{eqnarray} (WOC):\; \min&\displaystyle\sum_{i=1}^n\sum_{j=1}^n\sum_{k=1}^n\lambda^kc_{ij}x_{ij}^k\label{c3:ofdomp4}\\ \mbox{s.t.}&\displaystyle\sum_{j=1}^n\sum_{k=1}^nx_{ij}^k=1&i=1,\dots,n\label{eq:1fb}\\ &\displaystyle\sum_{i=1}^n\sum_{j=1}^nx_{ij}^k = 1&k=1,\dots,n\label{eq:2fb}\\ &\displaystyle\sum_{k=1}^nx_{ij}^k \leq y_j & {\small i,j=1,\dots,n} \quad \label{in:1fb}\\ &\displaystyle\sum_{j=1}^ny_{j} = p\label{eq:3fb}\\ &\hspace*{-4cm}\displaystyle \sum_i^n\sum_j^n\left(\sum_{i'=1}^n\sum_{\substack{j'=1:\\ r_{i'j'}\leq r_{ij}}}^nx_{i'j'}^k + \sum_{i'=1}^n\sum_{\substack{j'=1:\\hat{\rho}_{i'j'}\geq r_{ij}}}^nx_{i'j'}^{k-1}\hat{\rho}ight) \le n^2, & k=2,\cdots,n\label{in:3f1}\\ &x_{ij}^k \in \{0,1\}& i,j,k=1,\dots,n \quad \label{binary:xf1}\\ &y_j \in \{0,1\}& j=1,\dots,n,\label{binary:yf1} \end{eqnarray} By means of (\hat{\rho}ef{eq:1fb}) we ensure that each location is served by exactly one facility. In the same way, in each position there must be exactly one allocation cost (\hat{\rho}ef{eq:2fb}). We know that a client can be allocated to a facility only if this facility is open, i.e. $x_{ij}^k\leq y_j$ for all $i,j,k$. Furthermore, each allocation cost of a client to a facility can be placed in at most one position. Hence, $x_{ij}^k\leq y_j$ can be strengthened yielding constraints (\hat{\rho}ef{in:1fb}). The equality constraint (\hat{\rho}ef{eq:3fb}) implies that there are exactly $p$ open facilities. The constraints (\hat{\rho}ef{in:3f1}), called \textit{weak order constraints}, ensure that if client $i$ allocated to facility $j$, occupies the $k$-th position in the client ranking then in $(k-1)$-th position there must be a more preferred allocation cost. This property is enforced by the coefficients of each variable in the inequality. In each constraint there are two different positions, $k$ and $k-1$, so that, by (\hat{\rho}ef{eq:2fb}), only two variables must take value one and all the others will be equal to zero. If we do not take into account the variables assuming the value zero and we assume that the variables with value one for positions $k$ and $k-1$ correspond to allocation pairs in sorted position $s$ and $t$, respectively, the inequality reduces to the following expression: $$(n^2-(s-1))x_{i_sj_s}^k+tx_{i_tj_t}^{k-1}\le n^2,$$ which is valid if and only if $t<s$. Finally, the variables are binary, see (\hat{\rho}ef{binary:xf1}) and (\hat{\rho}ef{binary:yf1}). $WOC$ can be reinforced by adding some valid inequalities \begin{equation} \sum_{i'=1}^n\sum_{\substack{j'=1:\\ r_{i'j'}\leq r_{ij}}}^nx_{i'j'}^k + \sum_{i'=1}^n\sum_{\substack{j'=1:\\ r_{i'j'}\geq r_{ij}}}^nx_{i'j'}^{k-1} \le 1, \; i,j=1,\cdots,n,\; k=2,\cdots,n. \label{in:3f1-des} \end{equation} Observe that constraints (\hat{\rho}ef{in:3f1}) are the aggregation over $i,j$ of inequalities (\hat{\rho}ef{in:3f1-des}). These inequalities are the so called \textit{strong order constraints}, see \cite{Labbe2017} for a detailed explanation. \subsection{A set partitioning formulation\label{section:2.2}} From a linear programming relaxation point of view the above formulation is not the strongest one but it provides a good compromise between the number of required constraints and the quality of its linear relaxation bound, see \cite{Labbe2017}. Further, it allows to solve to optimality problems of moderate size. One of its drawbacks is the use of a cubic number of variables, which can be prohibitive for large $n$. A second important problem of most known formulations for DOMP is the high degree of symmetry in case of allocation costs ($C$) or weighted ordered vector ($\lambda$) with many ties. The reasons above motivate the introduction of a new formulation based on a different rationale. We observe that a solution for DOMP is a partition of the clients together with their positions in the sorted vector of costs so that each subset of clients in the partition is allocated to the same facility. Let us consider sets of couples $(i,k)$ where the first component refers to client $i$ and the second to position $k$, namely $S=\{(i,k): \text{ for some } i,k=1,\dots,n\}$. Associated with each set $S$ and facility $j$, we define variables \begin{eqnarray*}y_{S}^j&=&\left\{\begin{array}{cl}1&\text{if set $S$ is part of a feasible solution, i.e. } (i,k)\in S \text{ iff } x_{ij}^k=1\\0&\text{otherwise.}\end{array}\hat{\rho}ight.\end{eqnarray*} We observe that in any feasible solution each client $i$ must occupy a unique sorted position $k$ and must be allocated to a unique facility $j$, thus the following relationship holds $x_{ij}^k=\sum_{S\ni(i,k)}y_S^j$, for all $i,j,k$. Next, assuming that all clients in $S$ are allocated to facility $j$ and that the positions that appear in the second entry of the couples $(i,k)$ of the set $S$ satisfy the sorting among their allocation costs, i.e. $c_{ij}\le c_{i'j}$ whenever $(i,k)$, $(i',k')\in S$ and $k < k'$, we can evaluate the cost $c_S^j$ induced by the set $S$ provided that its clients are assigned to facility $j$ in a feasible solution: \begin{equation}c_S^j=\sum_{(i,k)\in S}\lambda^kc_{ij}.\label{eq:relation}\end{equation} To simplify the presentation in the following we denote by $(i,\cdot)$ the couples whose first entry is $i$ regardless of the value of the second entry. Analogously, $(\cdot,k)$ denotes the couples whose second entry is $k$ regardless of the value of the first entry. We give next a valid formulation for DOMP using the set of variables $y_S^j$. This will be our Master Problem ($MP$) in Section \hat{\rho}ef{section:2.2}. \begin{eqnarray} \textbf{(MP) }\min&\displaystyle\sum_{j=1}^n\sum_Sc_S^jy_S^j&\label{of:ma}\\ s.t.&\displaystyle\sum_{j=1}^n\sum_{S\ni(i,\cdot)}y_S^j&=1,\forall\,i\label{eq:1ma}\\ &\displaystyle\sum_{j=1}^n\sum_{S\ni(\cdot,k)}y_S^j&=1,\forall\,k\label{eq:2ma}\\ &\displaystyle\sum_{S}y_S^j&\le1,\forall\,j\label{eq:3ma}\\ &\displaystyle\sum_{j=1}^n\sum_{S}y_S^j&\le p,\label{eq:4ma}\\ &\hspace*{-1cm}\displaystyle\sum_{i=1}^n\sum_{j=1}^n\left(\sum_{\substack{S\ni(i',k)\\:r_{i' j'}\le r_{ij}}}y_S^{ j'}+\sum_{\substack{S\ni(i',k-1)\\:r_{i' j'}\ge r_{ij}}}y_S^{ j'}\hat{\rho}ight)&\le n^2,k=2,\dots,n\label{eq:5ma}\\ &y_S^j&\in\{0,1\},\forall\,S,j, \end{eqnarray} The objective function (\hat{\rho}ef{of:ma}) accounts for the sorted weighted cost of any feasible solution. Constraints (\hat{\rho}ef{eq:1ma}) ensure that each client appears exactly once in a set $S$. Constraints (\hat{\rho}ef{eq:2ma}) ensure that each position is taken exactly once by a client in a set $S$. Constraints (\hat{\rho}ef{eq:3ma}) guarantees that each facility $j$ serves at most one set $S$ of clients. Inequality (\hat{\rho}ef{eq:4ma}) states that at most $p$ facilities will be opened. By the following family of inequalities (\hat{\rho}ef{eq:5ma}) we enforce the correct sorting of the costs in any feasible solution. Finally, the variables are binary. We note in passing that this formulation is not a Dantzig-Wolfe reformulation of $WOC$ but a new formulation based on the properties of the problem. Indeed, the definition of a column $y_S^j$ includes conditions on the position of the clients in $S$. Hence partial order constraints are transfered to the pricing problem. The above formulation can be strengthen by adding valid inequalities borrowed from $WOC$. Indeed, one can translate valid inequalities (\hat{\rho}ef{in:3f1-des}) in terms of the $y_S^j$ variables so that they can be used in the set partition formulation of DOMP. The translation of (\hat{\rho}ef{in:3f1-des}) results in: \begin{equation} \displaystyle\sum_{\substack{S\ni(i',k)\\:r_{i' j'}\le r_{ij}}}y_S^{ j'}+\sum_{\substack{S\ni(i',k-1)\\:r_{i' j'}\ge r_{ij}}}y_S^{ j'}\le 1,\; i,j=1,\dots,n,k=2,\dots,n.\label{cuts} \end{equation} \subsection{Theoretical comparison of formulations \label{section:2.3}} One can prove that the linear relaxation of $MP$, from now on $LRMP$, is tighter than that of $WOC$. Let $P_{MP}$ and $P_{WOC} $, denote, respectively, the polyhedra defined by the feasible domains of $MP$ and $WOC$ relaxing the integrality constraints. Moreover, let $N$ be the dimension of the space of variables $y_S^j$ defined above and consider the following mapping $$\begin{array}{rcl} f:[0,1]^N&\longrightarrow& [0,1]^{n^3}\times[0,1]^n\\ (y_s^j)&\longmapsto&(x_{ij}^k,y_j) \end{array}$$ defined by the following two equations \begin{equation} x_{ij}^k=\sum_{S\ni (i,k)}y_S^j\quad i,j,k=1,\dots,n\label{c3r:1yx} \end{equation} and \begin{equation} y_j=\sum_{S}y_S^j\quad j=1,\dots,n\label{c3r:2yy}. \end{equation} \begin{pro} \label{c3-pro-inclusionMP-DOMP} Let $p=(y_S^j)$ if $p\in P_{MP} $ then $f(p)\in P_{WOC}$. \end{pro} \begin{proof} Let us assume that $p\in P_{MP} $. We prove that $f(p)$ satisfies (\hat{\rho}ef{eq:1fb})-(\hat{\rho}ef{in:3f1}). To prove $(\hat{\rho}ef{eq:1fb})$, observe that, according to the definition of $x_{ij}^k$ in (\hat{\rho}ef{c3r:1yx}), $\sum_{S\ni (i,.)} y_S^j=\sum_{k=1}^n x_{ij}^k$. Therefore, substituting in (\hat{\rho}ef{eq:1ma}) we get the desired result. Checking the validity of (\hat{\rho}ef{eq:2fb}) is analogous. Now, we prove (\hat{\rho}ef{in:1fb}). Observe that by (\hat{\rho}ef{c3r:1yx}) $\sum_{k=1}^n x_{ij}^k = \sum_{k=1}^n \sum_{S\ni (i,k)} y_S^j=\sum_{S\ni (i,\cdot)} y_S^j$ and then $$ \sum_{S\ni (i,\cdot)} y_S^j \le \sum_{k=1}^n \sum_{S} y_S^j\le 1.$$ This last inequality holds by (\hat{\rho}ef{eq:3ma}) which proves (\hat{\rho}ef{in:1fb}). To check (\hat{\rho}ef{eq:3fb}) we replace (\hat{\rho}ef{c3r:2yy}) on (\hat{\rho}ef{eq:4ma}) to obtain $\sum_{j=1}^n y_j\le 1$. The equality follows because setting extra $y_j$ variables to 1 do not worsen the objective function since all $y_j$ variables have null cost. Finally, (\hat{\rho}ef{eq:5ma}) follows analogously substituting (\hat{\rho}ef{c3r:1yx}) in (\hat{\rho}ef{in:3f1}). \end{proof} Hence, it is clear that the bound obtained by LRMP is at least as good as the bound provided by the linear relaxation of $WOC$. There are instances where the inclusion is strict as shown by the integrality gap results reported in Table \hat{\rho}ef{ResultsLP20and30}. Let $P_{SOC}$ be the polyhedron defined by the constraints (\hat{\rho}ef{eq:1fb})-(\hat{\rho}ef{eq:3fb}) and (\hat{\rho}ef{in:3f1-des}) assuming the variables $(x,y)\in [0,1]^{n^3}\times [0,1]^n$. Observe that this is the polyhedron that results from $P_{WOC}$ by replacing (\hat{\rho}ef{in:3f1}) by (\hat{\rho}ef{in:3f1-des}). Analogously, let $P_{SMP}$ be the convex polyhedron defined by the constraints (\hat{\rho}ef{eq:1ma})-(\hat{\rho}ef{eq:4ma}) and (\hat{\rho}ef{cuts}), that results from $P_{MP}$ replacing (\hat{\rho}ef{eq:5ma}) by (\hat{\rho}ef{cuts}). We assume variables $y\in [0,1]^{N}$. The following results relates the feasible solutions of the linear relaxations of $MP$ and $WOC$ whenever all the cuts coming from the \textit{strong order constraints} are added to both formulations. \begin{coro} Let $p=(y_S^j)$ if $p\in P_{SMP}$ then $f(p)\in P_{SOC}$. \end{coro} The proof is similar to that of Proposition \hat{\rho}ef{c3-pro-inclusionMP-DOMP}. \subsection{Column generation to solve LRMP}\label{section:2.4} Due to the fact that $MP$ can have a number of variables too large to be handled directly, in this section we describe a column generation approach to solve it. We begin by obtaining the dual of LRMP. In order to do that let ($\alpha, \beta, \gamma,\delta,\epsilon$) be the dual variables associated, respectively, to constraints (\hat{\rho}ef{eq:1ma}), (\hat{\rho}ef{eq:2ma}), (\hat{\rho}ef{eq:3ma}), (\hat{\rho}ef{eq:4ma}) and (\hat{\rho}ef{eq:5ma}). Then, DP, the dual problem of LRMP is \begin{align} \textbf{(DP)}\max&\displaystyle\sum_{i=1}^n\alpha_i+\sum_{k=1}^n\beta_k-\sum_{j=1}^n\gamma_j-p\delta-\sum_{k=2}^nn^2\epsilon_k&&\\ s.t.&\displaystyle\sum_{\substack{i=1\\:(i,\cdot)\in S}}^n\alpha_i+\sum_{\substack{k=1\\:(\cdot,k)\in S}}^n\beta_k-\gamma_j-\delta\nonumber\\ &\displaystyle-\sum_{k=2}^n\sum_{i'=1}^n\sum_{ j'=1}^n\left(\sum_{\substack{(i,k)\in S\\:r_{i' j'}\ge r_{ij}}}\epsilon_k+\sum_{\substack{(i,k-1)\in S\\:r_{i' j'}\le r_{ij}}}\epsilon_{k}\hat{\rho}ight)\le c_S^{j},\quad \forall\,j,S\\ &\gamma_j\ge0,\quad \forall\,j\nonumber\\ &\delta\ge0,\quad \nonumber\\ &\epsilon_k\ge0\quad \forall\,k.\nonumber \end{align} In order to apply the column generation procedure let us assume that we are given a set of columns that defines a restricted linear relaxation of the Master Problem, from now on $ReLRMP$. This problem is solved to optimality and we get its dual optimal variables ($\alpha^*, \beta^*, \gamma^*,\delta^*,\epsilon^*$). See Example \hat{\rho}ef{ex:firstsolution}. The reduced cost, $\overline c_S^j$, of the column $y_S^j$, namely $\overline c_S^j=c_S^j-z_S^j$ is given as: $$\overline c_S^j=c_S^j+\gamma_j^*+\delta^*+\sum_{k=2}^n\sum_{i'=1}^n\sum_{ j'=1}^n\left(\sum_{\substack{(i,k)\in S\\:r_{i' j'}\ge r_{ij}}}\epsilon_k^*+\sum_{\substack{(i,k-1)\in S\\:r_{i' j'}\le r_{ij}}}\epsilon_{k}^*\hat{\rho}ight)-\sum_{\substack{i=1\\:(i,\cdot)\in S}}^n\alpha_i^*-\sum_{\substack{k=1\\:(\cdot,k)\in S}}^n\beta_k^*.$$ If $\overline c_S^j\ge0$ for all $S,j$ the current solution of ReLRMP is also optimal for the LRMP and the column generation procedure is finished. Otherwise, one has identified one (some) new column(s) to be added to the current reduced master problem to proceed further. In each iteration, the ReLRMP and its reduced costs provide lower and upper bounds for the LRMP. Indeed it holds (\citet{Desrosiers2005}) \begin{eqnarray} z_{ReLRMP}+ p\cdot\min_{j,S}\overline c_S^j\le z_{LRMP}\le z_{ReLRMP},\label{lb1}\\ z_{ReLRMP}+ \sum_{j=1}^n\min_{S}\overline c_S^j\le z_{LRMP}\le z_{ReLRMP}. \label{lb2} \end{eqnarray} where $z_{ReLRMP}$ and $z_{LRMP}$ denote the optimal value of $ReLRMP$ and $LRMP$ respectively. \begin{ex}\label{ex:firstsolution} Consider the following cost matrix: $$C=\left(\begin{array}{ccc}1&3&6\\3&1&8\\6&8&1\end{array}\hat{\rho}ight)$$ and the vector $\lambda=(4,2,1)$ The precedence matrix is the following $$R=\left(\begin{array}{ccc}1&4&6\\5&2&8\\7&9&3\end{array}\hat{\rho}ight).$$ For $n=3$, there are 33 different sets of couples $(i,k)$. \begin{eqnarray*} \begin{array}{l} S_1=\{(1,1)\}\\ S_2=\{(1,2)\}\\ S_3=\{(1,3)\}\\ S_4=\{(2,1)\}\\ S_5=\{(2,2)\}\\ S_6=\{(2,3)\}\\ S_7=\{(3,1)\}\\ S_8=\{(3,2)\}\\ S_9=\{(3,3)\}\\ S_{10}=\{(1,1),(2,2)\}\\ S_{11}=\{(1,1),(2,3)\} \end{array} & \begin{array}{l} S_{12}=\{(1,1),(3,2)\}\\ S_{13}=\{(1,1),(3,3)\}\\ S_{14}=\{(1,2),(2,1)\}\\ S_{15}=\{(1,2),(2,3)\}\\ S_{16}=\{(1,2),(3,1)\}\\ S_{17}=\{(1,2),(3,3)\}\\ S_{18}=\{(1,3),(2,1)\}\\ S_{19}=\{(1,3),(2,2)\}\\ S_{20}=\{(1,3),(3,1)\}\\ S_{21}=\{(1,3),(3,2)\}\\ S_{22}=\{(2,1),(3,2)\} \end{array} & \begin{array}{l} S_{23}=\{(2,1),(3,3)\}\\ S_{24}=\{(2,2),(3,1)\}\\ S_{25}=\{(2,2),(3,3)\}\\ S_{26}=\{(2,3),(3,1)\}\\ S_{27}=\{(2,3),(3,2)\}\\ S_{28}=\{(1,1),(2,2),(3,3)\}\\ S_{29}=\{(1,1),(2,3),(3,2)\}\\ S_{30}=\{(1,2),(2,1),(3,3)\}\\ S_{31}=\{(1,2),(2,3),(3,1)\}\\ S_{32}=\{(1,3),(2,1),(3,2)\}\\ S_{33}=\{(1,3),(2,1),(3,2)\}.\\ \end{array} \end{eqnarray*} We consider as initial pool of columns the variables $y_{18}^1$ and $y_8^3$. With this set of variables, the ReLRMP is $$ \begin{array}{rrrll} \textbf{(ReLRMP)}\min&+2y_{5}^2&+10y_{13}^1\\ s.t.&&+y_{13}^1&\ge1&i=1\\ &+y_{5}^2&&\ge1&i=2\\ &&+y_{13}^1&\ge1&i=3\\ &&+y_{13}^1&\ge1&k=1\\ &+y_{5}^2&&\ge1&k=2\\ &&+y_{13}^1&\ge1&k=3\\ &&-y_{13}^1&\ge-1&j=1\\ &-y_5^2&&\ge-1&j=2\\ &&&\ge-1&j=3\\ &-y_5^2&-y_{13}^1&\ge-2&\\ &-8y_5^2&-y_{13}^1&\ge-9&k=2\\ &-2y_5^2&-3y_{13}^1&\ge-9&k=3\\ &&y&\ge0& \end{array} $$ Actually, we are interested in its dual problem: $${\scriptsize \begin{array}{rrrrrrrrrrrrrll} \textbf{(DP)}\max&+\alpha_1&+\alpha_2&+\alpha_3&+\beta_1&+\beta_2&+\beta_3&-\gamma_1&-\gamma_2&-\gamma_3&-2\delta&-9\epsilon_2&-9\epsilon_3\\ s.t.&&+\alpha_2&&&+\beta_2&&&-\gamma_2&&-\delta&-8\epsilon_2&-2\epsilon_3&\le2&(y_5^2)\\ &+\alpha_1&&+\alpha_3&+\beta_1&&+\beta_3&-\gamma_1&&&-\delta&-\epsilon_2&-3\epsilon_3&\le10&(y_{13}^1)\\ \end{array} } $$ $$\alpha,\beta,\gamma,\delta,\epsilon\ge0$$ Solving (DP) the solution is $\alpha_2=2,\beta_3=10$ and the value of the objective function is $f=12$. \end{ex} \subsection{Solving the pricing subproblem\label{section:2.5}} Although any column $y_S^j$ with negative reduced cost may be added to ReLRMP, we will follow a strategy that identifies the most negative reduced cost for each facility $j$. This approach may give rise to several candidate columns (multiple pricing, see \citet{Chvatal1983}), which is advantageous for this procedure. In order to do that, we solve for each facility $j$ a subproblem to find the column with minimum reduced cost associated with a feasible set $S$, namely a solution that satisfies that there is at most one pair $(i,\cdot)$ for each client $i$ and one pair $(\cdot,k)$ for each position $k$. Furthermore, the set $S$ must enjoy that the allocation costs of its couples are ranked accordingly. We solve this problem by the following dynamic programming algorithm. The reader may gain some intuition interpreting the algorithm as a shortest path in a graph built upon the matrix $D_j$ defined in (\hat{\rho}ef{matrizdj}). Let $d_{ij}^k$ be the contribution of the pair $(i,k)$ to the reduced cost of any column $y_S^j$ such that $(i,k)\in S$. Depending on the values of $k$, $d_{ij}^k$ is given by $$d_{ij}^k=\left\{\begin{array}{ll}\displaystyle\lambda^kc_{ij}+\sum_{i'=1}^n\sum_{\substack{ j'=1\\:r_{i' j'}\le r_{ij}}}\epsilon_{k+1}-\alpha_i-\beta_k&\text{if }k=1,\\ \displaystyle\lambda^kc_{ij}+\sum_{i'=1}^n\sum_{\substack{ j'=1\\:r_{i' j'}\ge r_{ij}}}^n\epsilon_k+\sum_{i'=1}^n\sum_{\substack{ j'=1\\:r_{i' j'}\le r_{ij}}}^n\epsilon_{k+1}-\alpha_i-\beta_k&\text{if }k=2,\dots,n-1,\\ \displaystyle\lambda^kc_{ij}+\sum_{i'=1}^n\sum_{\substack{ j'=1\\:r_{i' j'}\ge r_{ij}}}^n\epsilon_k-\alpha_i-\beta_k,&\text{if }k=n.\\\end{array}\hat{\rho}ight.$$ Now for each facility $j$, we define the matrix $D_j$, namely \begin{equation}\label{matrizdj}D_j=\left(\begin{array}{c c c c} d_{i_1j}^{1}&d_{i_1j}^{2}&\cdots&d_{i_1j}^{n}\\ d_{i_2j}^{1}&&&\\ \vdots&&\ddots&\\ d_{i_nj}^{1}&&&d_{i_nj}^{n} \end{array}\hat{\rho}ight) \end{equation} where $i_1,i_2,\dots,i_n$ is a permutation of the indices $i=1,\dots,n$ which ensures $c_{i_1j}\le c_{i_2j}\le\cdots\le c_{i_nj}$. \begin{ex}[continues=ex:firstsolution] Next, we show the procedure that computes the elements $d_{ij}^{k}$ for all $i,k=1,\ldots,n$ of the matrix $D_1$. (\textbf{j=1}) \begin{eqnarray*} &&d_{11}^1=\lambda^1c_{11}+r_{11}\epsilon_{2}-\alpha_1-\beta_1=4\\ &&d_{11}^2=\lambda^2c_{11}+(n^2-r_{11}+1)\epsilon_{2}+r_{11}\epsilon_{3}-\alpha_1-\beta_2=2\\ &&d_{11}^3=\lambda^3c_{11}++(n^2-r_{11}+1)\epsilon_{3}-\alpha_1-\beta_3=-9\\ &&d_{21}^1=\lambda^1c_{21}+r_{21}\epsilon_{2}-\alpha_2-\beta_1=10\\ &&d_{21}^2=\lambda^2c_{21}+(n^2-r_{21}+1)\epsilon_{2}+r_{21}\epsilon_{3}-\alpha_2-\beta_2=4\\ &&d_{21}^3=\lambda^3c_{21}++(n^2-r_{21}+1)\epsilon_{3}-\alpha_2-\beta_3=-9\\ &&d_{31}^1=\lambda^1c_{31}+r_{31}\epsilon_{2}-\alpha_3-\beta_1=24\\ &&d_{31}^2=\lambda^2c_{31}+(n^2-r_{31}+1)\epsilon_{2}+r_{21}\epsilon_{3}-\alpha_3-\beta_2=12\\ &&d_{31}^3=\lambda^3c_{31}++(n^2-r_{31}+1)\epsilon_{3}-\alpha_3-\beta_3=-4 \end{eqnarray*} Since $r_{11}<r_{21}<r_{31}$ the valid permutation is $(1,2,3)$. This implies that $$D_1=\left(\begin{array}{rrr}4&2&-9\\10&4&-9\\24&12&-4\end{array}\hat{\rho}ight)\begin{array}{l}i=1\\i=2\\i=3\end{array} $$ \end{ex} We now present a dynamic programming algorithm to obtain the minimum reduced cost $\min_S\overline c_S^j$ for each $j=1,\ldots,n$. For each couple $(i_l,k)$, we use two functions $g^j(i_l,k)$ and $S^j(i_l,k)$ representing the minimum reduced cost and the corresponding set of couples of the smaller pricing problem limited to the $l$ first rows and $k$ first columns respectively. Our recursive procedure computes $g^j(i_l,k)$ and $S^j(i_l,k)$ for increasing values of $l$ and $k$ so that, at the end, $g^j({i_n},n)+\delta+\gamma_j=\min\limits_S\overline c_{S_j}^j$ and $S^j(i_n,n)=\text{arg}\,\min\limits_Sc_{S_j}$. Further, the procedure exploits the following feasibility conditions on S: \begin{enumerate}[(i)] \item at most one couple per row and column belong to $S$. \item if $(i_{\hat l},\hat k)$ and $(i_{\tilde l},\tilde k)\in S$ and $\hat k < \tilde k$ then $r_{i_{\hat l}j} < r_{i_{\tilde l}j}$. \end{enumerate} {\bf{Algorithm Pricing Subproblem}} \begin{itemize} \item \textbf{Step 0} Set $g^j(i_1,1)=\min\{0,d_{i_1j}^1\}$ If $g^j(i_1,1)=d_{i_1j}^1<0$ , set $S^j(i_1,1)=\{(i_1,1)\}$. Otherwise set $S^j(i_1,1)=\emptyset$. \item \textbf{Step 1}. For $k=2,\dots,n$. Set $g^j(i_1,k)=\min\{d_{i_1j}^k,g^j(i_1,k-1)\}$ If $g^j(i_1,k)=g^j(i_1,k-1)$ , set $S^j(i_1,k)=S^j(i_1,k-1)$. Otherwise set $S^j(i_1,k)=\{(i_1,k)\}$. \item \textbf{Step 2}. For $l=2,\dots,n$. Set $g^j(i_l,1)=\min\{d_{i_lj}^1,g^j(i_{l-1},1)\}$ If $g^j(i_l,1)=g^j(i_{l-1},1)$ , set $S^j(i_l,1)=S^j(i_{l-1},k)$. Otherwise set $S^j(i_l,1)=\{(i_l,1)\}$. \item \textbf{Step 3}. For $k,l=2,\dots,n$. Set $g^j(i_l,k)=\min\{g^j(i_{l-1},k-1)+d_{i_lj}^k,g^j(i_{l-1},k-1),g^j(i_{l},k-1),g^j(i_{l-1},k)\}$ If $g^j(i_l,k)=g^j(i_{l-1},k-1)$ , set $S^j(i_l,k)=S^j(i_{l-1},k-1)$. Else, if $g^j(i_l,k)=g^j(i_{l},k-1)$ , set $S^j(i_l,k)=S^j(i_{l},k-1)$. Else, if $g^j(i_l,k)=g^j(i_{l-1},k)$ , set $S^j(i_l,k)=S^j(i_{l-1},k)$. Otherwise set $S^j(i_l,k)=S^j(i_{l-1},k-1)\cup\{(i_l,k)\}$. \end{itemize} Obviously, if this $g^j(i_n,n)+\delta+\gamma_j$ is negative the variable $y_{S^j(i_n,n)}^j$ is a good candidate to be chosen in the next iteration of the column generation scheme. If we solve this problem for all $j$, we get $\overline c_{R}^j=\displaystyle \min_S\overline c_{S}^j$ and if $\overline c_{R}^j<0$, we can activate (at least) $y_{R}^j$. Next, we solve a new reduced master problem ReLRMP with this (these) new activated variable(s). \begin{ex}[continues=ex:firstsolution] We show the computation of the $g^j(i_n,n)$ and $S^j(i_n,n)$ for $j=1$. $g^1(i_1,1)=\min\{0,4\}=0,S^1(i_1,1)=\emptyset$. $g^1(i_1,2)=\min\{2,0\}=0,S^1(i_1,2)=\emptyset$. $g^1(i_1,3)=\min\{-9,0\}=-9,S^1(i_1,3)=\{(1,3)\}$. $g^1(i_2,1)=\min\{10,0\}=0,S^1(i_2,1)=\emptyset$. $g^1(i_3,1)=\min\{24,0\}=0,S^1(i_3,1)=\emptyset$. $g^1(i_2,2)=\min\{0+4,0,0,0\},S^1(i_2,2)=\emptyset$. $g^1(i_3,2)=\min\{0+12,0,0,0\},S^1(i_3,2)=\emptyset$. $g^1(i_2,3)=\min\{0-9,0,-9,0\},S^1(i_2,3)=\{(1,3)\}$. $g^1(i_3,3)=\min\{0-4,0,-9,0\},S^1(i_3,3)=\{(1,3)\}$. We have obtained $g^1(i_3,3)$ and $S^1(i_3,3)=S_3$ being the potential set to be used, if the reduced cost is negative. Next, the corresponding reduced cost $\overline c_{3}^1=g^1(i_3,3)+\delta+\gamma_1=-9+0+0=-9<0$. Hence, we active variable $y_{3}^1$. Next, the process continues with the following facilities, i.e. $j=2,3$. In this example the optimal solution can be certified after four complete iterations of the above process. The following table shows the objective function values and the negative reduced costs per facility obtained in each iteration. \begin{center} \begin{tabular}{r|c|rrr} &&\multicolumn{3}{c}{$\displaystyle\min_Sc_S^j$}\\ &f&j=1&j=2&j=3\\ \hline Iteration 0&12.00&-9.00&-11.00&-9.00\\ Iteration 1&12.00&-5.00&-4.00&-3.00\\ Iteration 2&12.00&-3.00&-3.00&-0.29\\ Iteration 3&9.00&0.00&0.00&0.00\\ \end{tabular} \end{center} \end{ex} \subsection{Dealing with infeasibility} One important issue when implementing a column generation procedure to solve a linear optimization problem is how to deal with infeasibility. This is specially crucial if the procedure is used within a branch-and-bound scheme to solve the linear relaxation of the problem in every node. In order to handle it, we resort to the so called Farkas pricing. According with Farkas' Lemma, a reduced master problem is infeasible if and only if its associated dual problem is unbounded. Thus, to recover feasibility in the ReLRMP we have to revoke the certificate of unboundedness in the dual problem what can be done by adding constraints to it. Since we are only interested in recovering feasibility in ReLRMP, one can proceed in the same way that the usual pricing, but with null coefficients in the objective function of the primal. In this way, the Farkas dual problem is $$ \begin{array}{rrll} \max&\displaystyle\sum_{i=1}^n\alpha_i+\sum_{k=1}^n\beta_k-\sum_{j=1}^n\gamma_j-p\delta-\sum_{k=2}^nn^2\epsilon_k&&\\ s.t.&\displaystyle\sum_{\substack{i=1\\:(i,\cdot)\in S}}^n\alpha_i+\sum_{\substack{k=1\\:(\cdot,k)\in S}}^n\beta_k-\gamma_j-\delta\\ &\displaystyle-\sum_{k=2}^n\sum_{i'=1}^n\sum_{ j'=1}^n\left(\sum_{\substack{(i,k)\in S\\:r_{i' j'}\ge r_{ij}}}\epsilon_k+\sum_{\substack{(i,k-1)\in S\\:r_{i' j'}\le r_{ij}}}\epsilon_{k}\hat{\rho}ight)&\le 0&\forall\,j,S\\ &\gamma_j&\ge0&\forall\,j\\ &\delta&\ge0&\\ &\epsilon_k&\ge0&\forall\,k.\\ \end{array} $$ We proceed to identify new variables that make the reduced master problem feasible using the dynamic programming approach replacing $c_S^j$ by zeros. Farkas pricing is an important element in our approach because it allows to start the column generation algorithm with an empty pool of columns, although this is not advisable. Furthermore, Farkas pricing will be crucial in the branching phase to recover feasibility (whenever possible) in those nodes of the branching tree where it is lost after fixing variables. \section{A branch-price-and-cut implementation\label{section:3}} In this section, we precise several components of the implementation of our set partitioning formulation based on a column generation approach. \textbf{B\&P\&C\xspace}\; is a branch-and-cut scheme that solves the linear relaxation of each node of the branching tree with the column generation algorithm previously described and may apply cuts to improve the obtained lower bound. (The reader is referred to \cite{Hossein2016} for another recent implementation of a \textbf{B\&P\&C\xspace}.) To calibrate the best choice of the different parameters used in our \textbf{B\&P\&C\xspace}, we have performed, in all test in this section, a preliminary computational study based on a set of 60 instances with sizes $n=20,30$ and with a time limit of 1800 sec. Those are the smallest instances that we will eventually use in Section \hat{\rho}ef{section:4}. \subsection{Upper bound for the Master Problem: A GRASP heuristic and an initialization stage}\label{section:3.1} We now present a heuristic algorithm to generate a feasible solution for $MP$. This feasible solution will provide a promising pool of initial columns as well as a good upper bound. GRASP (\citet{Feo1989}, \citet{Feo1995}) is a well-known heuristic technique that usually exhibits good performance in short computing time. In our case, it consists in a multistart greedy algorithm to construct a set of $p$ facilities from a randomly generated set of facilities with smaller cardinality. Following \citet{Puerto2014} we have chosen, in a greedy manner, an initial set of $\lfloor p/2\hat{\rho}floor$facilities. Next, we improve this initial solution by performing a fixed number of iterations of a local search procedure. The greedy algorithm adds iteratively a new facility to the current set of open facilities, choosing the one with the maximum improvement of the objective value. The local search consists in an interchange heuristic between open and closed facilities. The pseudocode of the GRASP used to solve the problem is described in Algorithm 1. \begin{algorithm}[H] \begin{algorithmic}[1] \STATE Input($n,p,C,\lambda, n_1, n_2,q$); \FOR {$n_1$ replications} \STATE PartialSolution $\leftarrow$ ConstructRandomizedPartialSolution($q$); \STATE Solution $\leftarrow$ ConstructGreedySolution(PartialSolution); \FOR {$n_2$ iterations} \STATE Solution $\leftarrow$ LocalSearch(Solution); \STATE BestSolution $\leftarrow$ UpdateSolution(Solution, BestSolution); \ENDFOR \ENDFOR \end{algorithmic} \caption{GRASP for DOMP.\label{c3:al GRASP}} \end{algorithm} First of all, we would like to point out the remarkable behavior of the GRASP heuristic for this problem. In order to illustrate the appropriateness of our heuristic we have solved to optimality a number of instances of the problem (using the MIP formulation) to be compared with those given by our GRASP. In all instances, up to a size of $n=100$, the solution provided by GRASP is always as good as the one obtained by the any of our MIP formulations with a CPU time limit of 7200 seconds, see Section \hat{\rho}ef{section:4}. Moreover, it is not only advisable to use the GRASP heuristic because it provides a very good upper bound thus helping the exploration of the searching tree by pruning many branches of the branch-and-bound tree, but in addition, the construction phase of the heuristic also provides a very promising pool of initial columns for the \textbf{B\&P\&C\xspace}, in combination with the technique described in the following. Since we are solving the linear relaxation of our master problem, $LRMP$, without generating its entire set of variables, using the primal simplex algorithm, the goal of the initialization phase is to find an initial set of columns that allows solving the $MP$ by performing a small number of iterations in the column generation routine. We create variables using a modification of the local search routine of the GRASP algorithm. Every time that we find a promising feasible solution in the heuristic, we create the variables that define that solution (CreateSetVariables(J)). Algorithm \hat{\rho}ef{GRASP_initial} presents the pseudocode of this process. Function CreateSetVariables(J) determines the costs involved in the solution, i.e. the minimum for each client among the open facilities. Then those costs are ordered to determine the position of each client. Once we know the couples $(i,k)$ assigned for each open facility, the corresponding variables are added to the pool. \begin{ex}[continues=ex:firstsolution] We illustrate the use of the function CreateSetVariables(J) with the following set $J=\{1,3\}$ (open facilities). The allocation costs for this set $J$ of open facilities are $c_{11}=1,c_{21}=3,c_{33}=1$. According to $R$, the ranks of these costs are $r_{11}=1<r_{33}=3<r_{21}=5$. Thus, we get the couples $(1,1), (3,2)$ and $(2,3)$. This means that client $1$ goes to facility $1$ in position $1$, client $3$ goes to facility $3$ in position $2$ and client $2$ goes to facility $1$ in position $3$. Therefore, the variables $y_{\{(1,1),(2,3)\}}^1$ and $y_{\{(3,2)\}}^3$ are added to the pool. \end{ex} \begin{algorithm}[H] \begin{algorithmic}[1] \STATE Input($|J|=p$); \STATE $\bar z= z(J)$; CreateSetVariables(J); \FOR {$n_2$ iterations,$j_1\in J$,$j_2\in \bar J$} \IF {$z((J\setminus\{j_1\})\cup \{j_2\})<\bar z$} \STATE $\bar z = z((J\setminus\{j_1\})\cup \{j_2\})$; $J=(J\setminus\{j_1\})\cup \{j_2\}$; CreateSetVariables(J); \ENDIF \ENDFOR \end{algorithmic} \caption{Initial columns.\label{GRASP_initial}} \end{algorithm} In order to test the helpfulness of GRASP in solving problems instances, Table \hat{\rho}ef{c3:tableGRASP} reports results of the 60 instances of sizes $n=20,30$ enabling or not the use of the GRASP. It shows average results of CPU time (Time(s)), gap at termination, i.e. $100(z_{UB}-z_{LB})/z_{UB}$ (GAP(\%)), and number of unsolved problems (in parentheses), number of nodes (\#nodes) and number of variables ($|Vars|$). {\centering \begin{table}[H] \begin{center} {\small \begin{tabular}{rrrrr} \toprule \textbf{GRASP}&\textbf{Time(s)}&{\textbf{GAP(\%)}}&\textbf{\#nodes}&\textbf{$|Vars|$}\\ \midrule Disabled&1350.47& -- \; (40)&33&9710\\ Enabled&1200.03&2.33(35)&19&7167\\ \bottomrule \end{tabular} \caption{CPU-Time, Number of nodes and Number of variables with and without GRASP heuristic for $n=20,30$.} \label{c3:tableGRASP} } \end{center} \end{table}} According with Table \hat{\rho}ef{c3:tableGRASP} it is clearly advisable to use the upper bound provided by the GRASP heuristic: it reduces the number of nodes, thus improving the size of the branch-and-bound tree. In Table \hat{\rho}ef{t3:tableGRASP}, using the same notation that in Table \hat{\rho}ef{c3:tableGRASP}, it is reported Time(s), $\#nodes$ and $|Vars|$ of all solved instances with sizes $n=20,30$. As one can observe from this table enabling the use of GRASP reduces the CPU time and number of nodes of the B\&B tree and at the same time reduces the overall number of variables required by the \textbf{B\&P\&C\xspace}. In addition, we would like to remark that by using the GRASP heuristic, \textbf{B\&P\&C\xspace}\; is able to solve 5 more instances. Moreover, for those instances for which \textbf{B\&P\&C\xspace}\; does not certify optimality, GRASP provides an upper bound that leads to an average gap of 2.33 \%. Here, we also would like to point out that without the use of GRASP, in many cases, no feasible solutions are found within the time limit and thus, no \% gap (``--'') can be reported. {\centering \begin{table}[H] \begin{center} {\small \begin{tabular}{cccc} \toprule \textbf{GRASP}&\textbf{Time(s)}&\textbf{\#nodes}&\textbf{$|Vars|$}\\ \midrule Disabled&450.80&56&7664\\ Enabled&216.29&38&4500\\ \bottomrule \end{tabular} \caption{CPU-Time, Number of nodes and Number of variables with and without GRASP heuristic for $n=20,30$. Summary of solved instances} \label{t3:tableGRASP} } \end{center} \end{table}} From our results, we have obtained that using GRASP heuristic one gets, on average, 4.91\% of the final number of variables applying Algorithm \hat{\rho}ef{GRASP_initial}. The combination of the incumbent solution (given by GRASP) and that initial pool of variables leads to solve the considered instances faster, requiring less number of nodes and variables to certify optimality. Figure \hat{\rho}ef{performanceProfilesGRASP} reports the performance profile of GAP versus number of solved instances within a time limit of 1800 seconds, for the 60 instances with sizes $n=20,30$. The blue line reports results using GRASP and the orange one without it. It is interesting to point out that when GRASP is enabled the \textbf{B\&P\&C\xspace}\; is able to optimally solve 25 instances and the GAP of the remaining never goes beyond 10.72\%. On the other hand, if GRASP is disabled then \textbf{B\&P\&C\xspace}\; only solves 20 instances but in addition, only for 4 more instances it is capable to obtain a feasible solution whereas in the remaining 36 instances the gap is greater than 100\% (no feasible solution is found). \begin{figure} \caption{Performance profile graph with GRASP enabled or disabled after 1800 seconds, GAP / \# Instances } \label{performanceProfilesGRASP} \end{figure} \subsection{Stabilization\label{section:3.2}} When using a column generation procedure, the vector of dual variables may be quite different from an iteration to the next resulting in a slow convergence. For this reason, sometimes the stabilization is a critical step in order to reduce the number of variables and iterations needed to solve each reduced master problem (\citet{duMerle1999}). In our approach, to perform the stabilization we follow the procedure in \cite{Pessoa2010} which depends on only one parameter. The idea consists in using a vector of dual variables which is a convex combination of the previous vector and the current solution of the dual problem. Let $\pi=(\alpha, \beta, \gamma,\delta,\epsilon,\zeta)$ be a generic vector of dual multipliers, $\overline \pi$ be the best known vector of dual multipliers (found so far) and $\pi_{ReMP}$ be the current solution of the dual problem. Let $\overline c_S^j(\pi)$ be the reduced cost of $y_S^j$ computed with the dual variable $\pi$ and $LB(\pi)$ the lower bound provided by the same vector of dual multipliers, namely $\pi$. Finally, let $z_D(\pi)$ be the value of the dual objective function of ReLRMP for the dual vector $\pi$, see (\hat{\rho}ef{lb1}). The stabilization algorithm that we have implemented is described by the following pseudocode: \begin{algorithm}[H] \begin{algorithmic}[1] \STATE $\Delta=\Delta_{init}$; $\overline \pi = 0$; $LB(\overline\pi)=0$; $GAP=1$; \WHILE {$GAP>\epsilon$} \STATE Solve ReLRMP, obtaining $z_{ReLRMP}$ and $\pi_{ReLRMP}$; $\pi_{st}=\Delta \pi_{ReLRMP}+(1-\Delta)\overline \pi$; \FOR {$j=1,\dots,n$} \STATE Solve the pricing using $\pi_{st}$, obtaining $S$; \STATE \textbf{if} $\overline c_S^j (\pi_{ReLRMP})< 0$ \textbf{then} Add variable $y_S^j$; \textbf{end if} \ENDFOR \STATE{$LB(\pi_{st})=z(\pi_{st}^t)+\displaystyle\sum_{\substack{S,j:y_S^j added}}\overline c_S^j(\pi_{st})$}; \IF{At least one variable was added} \IF{$LB(\pi_{st})>LB(\overline\pi)$} \STATE $\overline\pi=\pi_{st}$; $LB(\overline\pi)=LB(\pi_{st})$; \ENDIF \ELSE \STATE $\overline\pi=\pi_{st}$; $LB(\overline\pi)=LB(\pi_{st})$; \ENDIF \STATE $GAP=\frac{z_{ReLRMP}-LB(\overline\pi)}{z_{ReLRMP}}$; \STATE \textbf{if} $GAP<1-\Delta$ \textbf{then} $\Delta=1-GAP$; \textbf{end if} \ENDWHILE \end{algorithmic} \caption{Stabilization in $ReLRMP$.\label{c3: al stabilization}} \end{algorithm} In words, the algorithm performs a while loop where in each iteration it makes a convex combination of the current vector of dual multipliers and the best vector of multipliers found so far. This loop ends whenever both vectors of multipliers are close enough based on the gap between the incumbent lower bound and the actual value of the reduced master problem. It is important to realize that the coefficient (importance), $\Delta$, given in the convex combination to $\pi_{ReLRMP}$ (the current solution of ReLRMP) increases with the number of iterations of the algorithm since $\Delta=1-GAP$ and $GAP$ decreases with the number of iterations. Eventually in the very last iterations of the stabilization algorithm we will use the actual vector of dual multipliers since $\pi_{st}\approx \pi_{ReLRMP}$. In our implementation, we have chosen $\Delta=0.6$ based on the computational study shown in Figure \hat{\rho}ef{performanceProfilesStabilization}. As one can observe in this figure, the best performance profile is obtained by $\Delta=0.6$ (green dashed line) because it is the configuration that solves the largest number of problem within the time limit. \begin{figure} \caption{Performance profile graph with different combination of $\Delta_{init} \label{performanceProfilesStabilization} \end{figure} In order to show the performance of the stabilization algorithm (Algorithm \hat{\rho}ef{c3: al stabilization}), we report in Figure \hat{\rho}ef{fig:dd} the evolution of the lower and upper bounds with respect to number of iterations. Results reported here correspond to a single example. When Stabilization generally results in a better behavior. One can realize that the dual bound is not infinity at iteration 0 and that it does not improve for some iterations. The reason is because we start with a feasible solution of the problem. \begin{figure} \caption{Stabilization disabled} \label{fig:sfig1} \caption{Stabilization enabled} \label{fig:sfig2} \caption{Bound's behavior at the root node in a particular instance on successive iterations.} \label{fig:dd} \end{figure} The control over the dual variables significantly improves the necessary number of iterations and the number of variables used to certify optimality. Note that this improvement becomes more important where $MP$ is solved using a branch-and-bound procedure because the number of variables should be small in every node. \subsection{HurryPricer: the Pricer heuristic\label{section:3.3}} The pricing subproblem can be solved optimally by the dynamic programming algorithm described in Section \hat{\rho}ef{section:2.5} with a worst case complexity of $O(n^3)$. However, this complexity may be excessive if the number of calls to that routine is large. For that reason, we have developed an alternative pricer heuristic that looks, in a greedy manner, for new variables in the pricing process with much less computational burden. Of course, if the heuristic does not find any variable to be added we need to resort to the exact pricer either to certify optimality or to find alternative variables that were not found in the heuristic phase. A brief pseudocode description of the heuristic pricer is given in the Appendix. \begin{algorithm} \caption{HurryPricer} \begin{algorithmic}[1] \STATE Input($\alpha, \beta, \gamma, \epsilon,\delta, \zeta$); $S = \emptyset$; \FOR{a set of selected $j$} \STATE $\bar{c}^j_S = 0$; $k'= 0$; $l = 1$; \WHILE{($k' \neq n$) and ($l < n + 1 $)} \STATE Continue = True; $k = k' + 1$; \WHILE{(Continue is True) and ($k < n +1$)} \STATE $d_{i_lj}^k = \lambda^k c_{i_lj} + r_{i_lj} \epsilon_k + (n^2 - r_{i_lj} + 1 ) \epsilon_{k-1} - \alpha_{i_l} - \beta_k $; \IF{$d_{i_lj}^k < 0$} \IF {we consider cuts} \STATE $d_{i_lj}^k = d_{i_lj}^k + \sum^n_{ i'=1} \sum^n_{\substack{ i'{j}=1: \\ r_{ i' j'} \leq r_{i_lj}}} \zeta_{ i' j'}^k + \sum^n_{ i'=1} \sum^n_{\substack{ j'=1: \\ r_{ i' j'} \geq r_{i_lj}}} \zeta_{ i' j'}^{k-1}$; \IF{$d_{i_lj}^k < 0$} \STATE $\bar{c}^j_S = \bar{c}^j_S + d_{i_lj}^k$; $S_j = S_j \cup \{ (i_l,j) \}$; \STATE Continue = False; $k' = k$; \ENDIF \ELSE \STATE $\bar{c}^j_S = \bar{c}^j_S + d_{i_lj}^k$; $S_j = S_j \cup \{ (i_l,j) \}$; \STATE Continue = False; $k' = k$; \ENDIF \ENDIF \STATE $k = k + 1$; \ENDWHILE \STATE $l = l + 1$; \ENDWHILE \IF{$\bar{c}^j_S + \delta + \gamma_j < 0$} \STATE $S = S \cup S_j$; \ENDIF \ENDFOR \mathbb{R}ETURN $S$; \end{algorithmic}\label{hurryPricer} \end{algorithm} In the following we analyze whether is is advisable to combine stabilization techniques and pricing heuristics in the pricing subproblem. We show in Figure \hat{\rho}ef{performanceProfilesHP} the performance profiles of time versus number of solved instances. From this figure one can observe that combining stabilization and Hurry Pricer seems to have a slightly better behavior than the remaining options. This conclusion is reinforced by the data shown in Table \hat{\rho}ef{table:stabHP} based on computing time, number of variables and nodes required by the different combinations. \begin{figure} \caption{Performance profile graph of \#solved instances with different combinations of Hurry Pricer (HP) and stabilization (Stab). } \label{performanceProfilesHP} \end{figure} {\centering \begin{table}[H] \begin{center} {\small \begin{tabular}{crrrr} \toprule HP& Stab & Time (s) & Variables & Nodes\\ \midrule No&Yes&422.62&6023&38\\ Yes&No&358.41&5437&37\\ Yes&Yes&333.75&5128&33\\ \bottomrule \end{tabular} \caption{Average CPU-Time, number of variables and number of nodes with different strategies of stabilization for the 25 solved instances in 1800 seconds.} \label{table:stabHP} } \end{center} \end{table}} \subsection{Preprocessing\label{section:3.4}} In order to improve the performance of the algorithm we use two different preprocessings to set some variables to zero. Our approach is based on Claims 1 and 2 in \cite{Labbe2017}. The reader may observe that although those results fix to zero $x_{ij}^k$ variables, this variable-fixing can be translated to the new setting by the relation $x_{ij}^k=\sum_{S\ni(i,k)}y_S^j$ between the variables in $WOC$ and $MP$ formulations. Therefore, the above results imply that those variables $y_S^j$ such that $(i,k)\in S$ and $x_{ij}^k=0$ will not be considered to be added to the ReLRMP. This can be simply enforced by setting the corresponding $d_{ij}^k=0$ in every pricing subproblem. {\subsection{Branching strategies\label{section:3.5}} Branching on original variables is a common option on Mixed Integer Master Problems where some set partition constraints are involved. See for instance \citet{Johnson1989}. In spite of that, we have also considered other branching strategies as using the set partitioning variables or the Ryan and Foster branching, \citet{Ryan1981,Barnhart1998}. However, these two alternatives were discarded because branching in original variables our pricing subproblem is polynomially solvable whereas using any of the other branching strategies mentioned above, makes it NP-hard. Recall that $x_{ij}^k=\sum_{S\ni(i,k)}y_S^j$, thus, a way to branch on a fractional solution can be derived directly from satisfying integrality conditions of original variables. \begin{pro} If $x_{ij}^k\in\{0,1\}$ for $i,j,k=1,\dots, n$, then $y_S^j\in\{0,1\}$. \begin{proof} Suppose on the contrary there exists a variable with fractional value $y_{S^\prime}^{j^\prime}$. Since $x_{ij}^k$ are binary for all $i,j,k$ (in particular for $i_1,j^{\prime},k_1$ where $(i_1,k_1)$ a pair of $S^\prime$), there must be another fractional variable $y_{S^{\prime\prime}}^{j^\prime}$ such that $(i_1,k_1)\in S^{\prime\prime}$. Note that $S^{\prime\prime}\neq S^{\prime}$ since the column generation procedure never generates duplicate variables, there is a pair $(i_2,k_2)$ such that either $(i_2,k_2) \in S^{\prime}$ or $(i_2,k_2)\in S^{\prime\prime}$ but not both. Therefore, we obtain the following relationship $$1\ge\sum_{S\ni(i_1,k_1)}y_S^{j^\prime}>\sum_{S\ni(i_2,k_2)}y_S^{j^\prime}>0.$$ The first inequality comes directly from the formulation. The second inequality is strict because the term $\sum_{S\ni(i_2,k_2)}y_S^{j^\prime}$ has at least one fractional variable less than the term $\sum_{S\ni(i_1,k_1)}y_S^{j^\prime}$. The third inequality is strict because of the choice of $(i_2,k_2)$. Finally, a contradiction is found because $x_{i_2k_2}^{j\prime}$ is not binary. \end{proof} \end{pro} The reader may note that this branching can be seen as a SOS1 branching \citep{Beale1970} since at most one of the above $y_S^j$ variables can assume the value 1. The way to implement this branching in the pricing subproblem is to set locally (in the current node) to zero the $y_S^j$ variables which are in conflict with the condition implied by the branch $x_{ij}^k=0$ or $x_{ij}^k=1$. In the case $x_{ij}^k=0$ we set $y_S^j=0$ for all sets $S$ containing couples $(i,k)\in S$. Analogously, in the case $x_{ij}^k=1$ we set $y_S^{j'}=0$ for all sets $S$ containing $(i,k)\in S$ such that $j\ne j'$, $(i',k)\in S$ such that $i\ne i'$ or $(i,k')\in S$ such that $k\ne k'$. This condition can be transferred to the pricing subproblem modifying the $d_{ij}^k$ coefficients accordingly. Specifically, this transformation is done as follows: \begin{itemize} \item If $x_{ij}^k=0$ then $ d_{ij}^k=0.$ \item If $x_{ij}^k=1 $ then $ \left\{\begin{array}{l} d_{i j'}^k=0,\quad \forall j'\neq j.\\ d_{i' j'}^k=0,\quad \forall j'\forall i'\neq i.\\ d_{i j'}^{ k'}=0,\quad \forall j'\forall k'\neq k.\\ \end{array}\hat{\rho}ight.$ \end{itemize} Moreover, it is also well-known that branching on SOS constraints (original variables) gives rise to more balanced branching trees (see e.g. Chapter 7 of \citep{Wolsey1998}) than branching on the variables of $MP$. Among the fractional original variables one has to decide which will be the next variable to branch on. One of the easiest techniques for this choice is to consider the \emph{most fractional variable}. This is not difficult to implement but it is not better than choosing randomly (\citep{Achterberg2005}). Alternative techniques are \emph{pseudocost branching} (\citep{Benichou1971}) or \emph{strong branching} (\citet{Applegate1995}) although they are rather costly. This issue has motivated us to propose another rule to select the variable to branch on, based on the improvement of the bounds in each of the new created nodes. We use the following indices corresponding to the down and up branches of the variable $x_{ij}^k$: \begin{equation} \varsigma_{ij}^{k,-}=\frac{\lambda^kc_{ij}}{x_{ij}^k}\text{ and }\varsigma_{ij}^{k,+}=\frac{\lambda^kc_{ij}}{1-x_{ij}^k}. \label{branch-n1} \end{equation} They account, respectively, for the unitary contribution to the objective function due to fixing the variable $x_{ij}^k$ either to zero (down branching) or to one (up branching). Branching down stimulates the improvement of the lower bound, whereas branching up helps the problem to find integer solutions. We have tested several strategies that make use of the indices, $\varsigma$, defined above. \begin{description} \item[ Strategy 1: ] $\arg\min\{\theta\varsigma_{ij}^{k,-}+(1-\theta)\varsigma_{ij}^{k,+}:0<x_{ij}^k<1\}$ \item[ Strategy 2: ] $\arg\min\{\min\{\varsigma_{ij}^{k,-},\varsigma_{ij}^{k,+}\}:0<x_{ij}^k<1\}$ \item[ Strategy 3: ] $\arg\min\{\max\{\varsigma_{ij}^{k,-},\varsigma_{ij}^{k,+}\}:0<x_{ij}^k<1\}$. \end{description} Based on our computational experience (see Figure \hat{\rho}ef{performanceProfilesBranching}), we have concluded that the best strategy to choose the following variable to branch on corresponds to strategy 1 with $\theta=0.5$. \begin{figure} \caption{Performance profile graph of \#solved instances using different branching strategies.} \label{performanceProfilesBranching} \end{figure} Each node of the branching tree can be fathomed before it is fully processed comparing lower bounds, as given by (\hat{\rho}ef{lb1}) and (\hat{\rho}ef{lb2}), with the current incumbent solution. This strategy implies reducing the number of calls to the pricing subproblem and as a result savings in the number of variables added to the restricted master problem. \subsection{Valid inequalities\label{section:3.6}} Clearly, the addition of valid inequalities (\hat{\rho}ef{cuts}) to $MP$ modifies the structure of the master problem and thus the pricing must be modified accordingly. Let us denote by $\zeta_{ij}^k$ the dual variable associated with valid inequality (\hat{\rho}ef{cuts}) for indices $i,j,k$. After some calculation, one obtains the following expression of the reduced costs of variable $y_S^j$: \scriptsize $$\overline c_S^j=c_S^j+\gamma_j^*+\delta^*+\sum_{k=2}^n\sum_{i'=1}^n\sum_{ j'=1}^n\left(\sum_{\substack{(i,k)\in S\\:r_{i' j'}\ge r_{ij}}}(\epsilon_k^*+\zeta_{i' j'}^{k*})+\sum_{\substack{(i,k-1)\in S\\:r_{i' j'}\le r_{ij}}}(\epsilon_{k}^*+\zeta_{i' j'}^{k*})\hat{\rho}ight)-\sum_{\substack{i=1\\:(i,\cdot)\in S}}^n\alpha_i^*-\sum_{\substack{k=1\\:(\cdot,k)\in S}}^n\beta_k^*.$$ \normalsize Furthermore, solving the pricing subproblem to find a new column or to certify optimality of the column generation algorithm requires to adapt the dynamic programming algorithm that computes the $g(i_l,k)$ terms using the new dual multipliers. This implies to modify the $D_j$ matrices. Once again, after some calculations the modified $d_{ij}^k$ elements are now given by: \scriptsize $$d_{ij}^k=\left\{\begin{array}{ll}\displaystyle\lambda^kc_{ij}+\sum_{i'=1}^n\sum_{\substack{ j'=1\\:r_{i' j'}\le r_{ij}}}(\epsilon_{k+1}+\zeta_{i' j'}^{k+1})-\alpha_i-\beta_k&\text{if }k=1\\ \displaystyle\lambda^kc_{ij}+\sum_{i'=1}^n\sum_{\substack{ j'=1\\:r_{i' j'}\ge r_{ij}}}^n(\epsilon_k+\zeta_{i' j'}^{k})+\sum_{i'=1}^n\sum_{\substack{ j'=1\\:r_{i' j'}\le r_{ij}}}(\epsilon_{k+1}+\zeta_{i' j'}^{k+1})-\alpha_i-\beta_k&\text{if }k=2,\dots,n-1\\ \displaystyle\lambda^kc_{ij}+\sum_{i'=1}^n\sum_{\substack{ j'=1\\:r_{i' j'}\ge r_{ij}}}^n(\epsilon_k+\zeta_{i' j'}^{k})-\alpha_i-\beta_k,&\text{if }k=n.\\\end{array}\hat{\rho}ight.$$ \normalsize These new elements allow us to apply the adapted column generation algorithm to solve LRMP, reinforced with valid inequalities (\hat{\rho}ef{cuts}). The implementation details of how to adapt these new elements within the pricer and the hurry pricer can be found in the appendix \hat{\rho}ef{ap:hp}. To justify the use of the mentioned cuts we have done some preliminary computational experiments with instances of sizes $n=50$ and $60$. Table \hat{\rho}ef{ResultsCuts} compares the behavior of the standard branch-and-price without cuts, ($\textbf{B\&P}(MP)$), against the strategy with cuts, $\textbf{B\&P\&C}(MP)$. \begin{table}[!ht] \begin{center} \scriptsize \begin{tabular}{ll|rrr|rrr} & & \multicolumn{3}{c|}{$n=50$} & \multicolumn{3}{c}{$n=60$} \\ & & $p=12$ & $p=16$ & $p=25$& $p=15$ & $p=20$ & $p=30$ \\ \hline \textbf{B\&P} & $Time(s)$&7200.00&7200.00&7200.00&7200.00&7200.00&7200.00 \\ (MP) & $|Vars|$&30277&24410&16617&28443&24146&19996 \\ & $|Nodes|$&1016&2728&6149&1091&2013&3736 \\ & $\# unsolved$&10&10&10&10&10&10 \\ & $Gap(\%)$ &6.44&7.60&9.45&8.20&8.83&11.59 \\\hline \textbf{\textbf{B\&P\&C\xspace}} & $Time(s)$&7200.00&7200.00&6697.44&7200.00&6864.94&7200.00\\ (MP) & $|Vars|$&14971&13627 &13725 &21094&16077&17634 \\ & $|Nodes|$&55&1 &1&599&535&512 \\ & $|Cuts|$&7807&7907 &9183&12999&16061&13342 \\ & $\# unsolved$&10&10 &9 &10&9&10 \\ & $Gap(\%)$&3.96&5.06 &3.87&7.04&6.83&7.48 \\\hline \end{tabular} \caption{Numerical results with and without cuts } \label{ResultsCuts} \end{center} \end{table} From Table \hat{\rho}ef{ResultsCuts}, we conclude that it is always better to add cuts because the final gap is always smaller with this strategy. This solution scheme has been implemented and the results are reported in the next section. \section{Computational Experiments\label{section:4}} The \textbf{B\&P\&C\xspace}\; implementation of the formulation $MP$ has been experimentally compared with the B\&C implementation of the formulation $WOC$ on the instances detailed below. The \textbf{B\&P\&C\xspace} \; algorithm considered in these experiments is based on the description in the previous section. The computer used for these tests has an Intel Core i7 CPU clocked at 2.8GHz with 8Gb of RAM. Each implementation has a maximum of 7,200 seconds (2 hours) to solve each individual instance. Both implementations are using the SCIP 4.0's API (see \cite{Maher2017}) and both are calling the LP solver of IBM ILOG Cplex 12.6.1. \subsection{Instances\label{section:4.1}} Since no standard libraries of instances for DOMP are available in public repositories we generate our own instances with the \textit{pseudorandom} number generator from the C random library. We consider 9 sets of 30 instances. Each set has a different number of clients such that $n \in \{20,30,40,50,60,70,80,90,100\}$. For a given $n$, we generate one subset of 10 instances for each value of $p$, where $p \in \{ \left \lfloor{(n/4)} \hat{\rho}ight \hat{\rho}floor, \left \lfloor{(n/3)} \hat{\rho}ight \hat{\rho}floor, \left \lfloor{(n/2)} \hat{\rho}ight \hat{\rho}floor \}$. For a given $n$, we first randomly generate the Cartesian coordinates of the potential servers in the square $[0,400]^2$. Then, we calculate the cost for each pair of clients with the Euclidean distance between the two related nodes in the square. We round each distances to the nearest integer to build the cost matrices. We also fix the values of the matrix diagonal to the smallest admissible cost to avoid free self service. Finally, we randomly generate the weighted ordered vector $\lambda$ such that, for each potential server $i=1,\dots, n$, $\lambda_i \in [n/4,n]$. The parameters for the generation process are given in table \hat{\rho}ef{ParamGenInst}. \begin{table}[!ht] \begin{center} \begin{tabular}{l||ccc|ccc|ccc|ccc|ccc} $n$ & \multicolumn{3}{c|}{20} & \multicolumn{3}{c|}{30} & \multicolumn{3}{c|}{40} & \multicolumn{3}{c|}{50} & \multicolumn{3}{c}{60} \\ \hline $p$ & 5 & 6 & 10 & 7 & 10 & 15 & 10 & 13 & 20 & 12 & 16 & 25 & 15 & 20 & 30 \\ \hline $\lambda$ & \multicolumn{3}{c|}{$[5,20]^n$} & \multicolumn{3}{c|}{$[7,30]^n$} & \multicolumn{3}{c|}{$[10,40]^n$} & \multicolumn{3}{c|}{$[12,50]^n$} & \multicolumn{3}{c}{$[15,60]^n$} \\ \hline\hline $n$ & \multicolumn{3}{c|}{70} & \multicolumn{3}{c|}{80} & \multicolumn{3}{c|}{90} & \multicolumn{3}{c|}{100} \\ \hline $p$ & 17 & 23 & 35 & 20 & 26 & 40 & 22 & 30 & 45 & 25 & 33 & 50 \\ \hline $\lambda$ & \multicolumn{3}{c|}{$[17,60]^n$} & \multicolumn{3}{c|}{$[20,80]^n$} & \multicolumn{3}{c|}{$[22,90]^n$} & \multicolumn{3}{c|}{$[25,100]^n$} \\ \end{tabular} \caption{Parameters for the generation of the instances.} \label{ParamGenInst} \end{center} \end{table} All these instances, with $n$ up to 100, are available at \url{http://gom.ulb.ac.be/domp_repo/}. \subsection{$MP$ vs $WOC$ linear relaxations \label{section:4.2}} We assess experimentally the linear relaxation of $MP$ by comparing with $WOC$ on all the instances generated. For these experiments, no cuts have been applied. In Table \hat{\rho}ef{ResultsLP20and30}, we report averages of the numerical results of the linear relaxation for both formulations. We report the values $GapLP(\%)$ which are the percentage gaps between the optimal integer values $z^*$ (alternatively the best known solution) and the linear relaxation optimal values $z^*_{LP}$ such that $GapLP(\%) = 100 (z^* - z^*_{LP})/ z^*$. We also report the computational times (in seconds). Table \hat{\rho}ef{ResultsLP20and30} also includes average number of variables ($|Vars|$) and required memory ($Memory(MB)$). The reader can see that, in terms of time, $MP$ has some room for improvement as compared with the professional implementation of Cplex used for solving $WOC$. On the contrary, we highlight the small number of variables that are used to certify optimality with this column generation approach $MP$. \begin{table}[!ht] \begin{center} \scriptsize \begin{tabular}{ll|rrr|rrr} & & \multicolumn{3}{c|}{$n=20$} & \multicolumn{3}{c}{$n=30$} \\ & & $p=5$ & $p=6$ & $p=10$ & $p=7$ & $p=10$ & $p=15$ \\ \hline \textbf{B\&C} & $GapLP(\%)$ & 8.64 & 8.66 & 13.13 & 9.45 & 10.38 & 14.28 \\ (WOC) & $Time(s)$ & 0.14 &0.14& 0.12 & 0.70 &0.68 & 0.62 \\ & $|Vars|$ & 8020 &8020&8020 & 27030 &27030 & 27030 \\ & $Memory(MB)$ & 35 &35&35 & 101 &101 & 101 \\\hline \textbf{\textbf{B\&P\&C\xspace}} & $GapLP(\%)$ & 7.87 & 8.03 & 12.70 & 8.46 & 9.81 & 13.83 \\ (MP) & $Time(s)$ & 1.19 & 0.89 & 0.63 & 6.04 & 3.98 & 3.74 \\ & $|Vars|$ & 724 &656&537 & 1754 &1570 & 1484 \\ & $Memory(MB)$ & 7 &6&4 & 20 &17 & 14 \\\hline & & \multicolumn{3}{c|}{$n=40$} & \multicolumn{3}{c}{$n=50$} \\ & & $p=10$ & $p=13$ & $p=20$ & $p=12$ & $p=16$ & $p=25$ \\ \hline \textbf{B\&C} & $GapLP(\%)$ & 9.43 & 11.00 & 15.35 & 7.34 & 8.77 & 12.97 \\ (WOC) & $Time(s)$ & 2.51 &2.34& 2.09 & 7.35 &6.37 & 6.25 \\ & $|Vars|$ & 64040 &64040&64040 & 125050 &125050 & 125050 \\ & $Memory(MB)$ & 235 &235&235 & 451 &451 & 451 \\\hline \textbf{\textbf{B\&P\&C\xspace}} & $GapLP(\%)$ & 9.11 & 10.75 & 15.18& 6.98 & 8.51 & 12.76 \\ (MP) & $Time(s)$ & 17.61 & 13.85 & 12.17 & 40.75 & 33.72 & 33.13 \\ & $|Vars|$ & 3370 &3149&3111 & 5355 &5182 & 5175 \\ & $Memory(MB)$ & 46 &39&35 & 82 &72 & 68 \\\hline & & \multicolumn{3}{c|}{$n=60$} & \multicolumn{3}{c}{$n=70$} \\ & & $p=15$ & $p=20$ & $p=30$ & $p=17$ & $p=23$ & $p=35$ \\ \hline \textbf{B\&C} & $GapLP(\%)$ & 8.84 & 9.95 & 14.43 & 8.04 & 9.19 & 13.73 \\ (WOC) & $Time(s)$ & 15.98 &13.30& 12.27 & 40.78 &35.65 & 29.40 \\ & $|Vars|$ & 216060 &216060&216060 & 125050 &343070 & 343070 \\ & $Memory(MB)$ & 764 &764&764 & 1214 &1214 & 1214 \\\hline \textbf{\textbf{B\&P\&C\xspace}} & $GapLP(\%)$ & 8.56 & 9.71 & 14.25 & 7.79 & 9.04 & 13.62 \\ (MP) & $Time(s)$ & 94.79 & 72.47 & 92.70 & 176.21 & 157.19 & 212.97 \\ & $|Vars|$ & 8146 &7592&9069 & 11112 &11250 & 13648 \\ & $Memory(MB)$ & 139 &120&142 & 211 &202 & 244 \\\hline & & \multicolumn{3}{c|}{$n=80$} & \multicolumn{3}{c}{$n=90$} \\ & & $p=20$ & $p=26$ & $p=40$ & $p=22$ & $p=30$ & $p=45$ \\ \hline \textbf{B\&C} & $GapLP(\%)$ & 8.65 & 7.65 & 7.12 & 8.70 & 6.60 & 6.69 \\ (WOC) & $Time(s)$ & 67.42 &58.14& 47.63 & 128.70 &96.74 & 82.19 \\ & $|Vars|$ & 512080 &512080&512080 & 729090 &729090 & 729090 \\ & $Memory(MB)$ & 1830 &1830&1830 & 2561 &2561 & 2561 \\\hline \textbf{\textbf{B\&P\&C\xspace}} & $GapLP(\%)$ & 8.53 & 7.48 & 7.08 & 8.55 & 6.55 & 6.66 \\ (MP) & $Time(s)$ & 352.75 & 264.60 & 210.92 & 713.28 & 459.07 & 404.18 \\ & $|Vars|$ & 15704 &14163&11851 & 21566 &18451 & 16205 \\ & $Memory(MB)$ & 330 &280&214 & 513 &404 & 336 \\\hline & & \multicolumn{3}{c|}{$n=100$} \\ & & $p=25$ & $p=33$ & $p=50$ \\ \hline \textbf{B\&C} & $GapLP(\%)$ & -- & -- & -- \\ (WOC) & $Time(s)$ & -- &--& -- \\ & $|Vars|$ & 1000100 &1000100&1000100 \\ & $Memory(MB)$ & $>$4096 &$>$4096&$>$4096 \\\hline \textbf{\textbf{B\&P\&C\xspace}} & $GapLP(\%)$ & 7.94 & 7.40 & 6.59 \\ (MP) & $Time(s)$ & 1417.65 & 939.40 & 667.86 \\ & $|Vars|$ & 30202 &26068&21101 \\ & $Memory(MB)$ & 809 &656&482 \\\hline \end{tabular} \caption{Numerical results on linear relaxation for $WOC$ and $MP$ } \label{ResultsLP20and30} \end{center} \end{table} As expected, according to Proposition \hat{\rho}ef{c3-pro-inclusionMP-DOMP}, the integrality gap of formulation $MP$ outperforms the one by $WOC$. Moreover, formulation $MP$ also outperforms $WOC$ in number of required variables (see Figure \hat{\rho}ef{performanceProfiles1}) which results in much smaller memory requirements (see Figure \hat{\rho}ef{performanceProfiles2}). Indeed, the implementation of $WOC$ fails to solve, already for sizes of $n=100$, the linear relaxation of all instances by lack of RAM memory; whereas with the same parameter configuration, formulation $MP$ does not experience that problem. Figure \hat{\rho}ef{performanceProfiles2} shows the performance profile of the memory requirement of both formulations. As one can see \textbf{B\&P\&C}(MP) outperforms $WOC$ with respect to this factor for all instance sizes. \begin{figure} \caption{\small Graph of Number of Variables versus size $n$ for \textbf{B\&C} \label{performanceProfiles1} \caption{\small Graph of Memory usage (Mb) versus size $n$ for \textbf{B\&C} \label{performanceProfiles2} \end{figure} \subsection{\textbf{B\&P\&C\xspace}(MP) vs B\&C (WOC)\label{section:4.3}} We now compare the \textbf{B\&P\&C\xspace}\; implementation of $MP$ with the B\&C implementation of $WOC$. The former is a branch-price-and-cut algorithm and the latter a branch-and-cut. \begin{table}[!ht] \begin{center} \scriptsize \begin{tabular}{ll|rrr|rrr} & & \multicolumn{3}{c|}{$n=20$} & \multicolumn{3}{c}{$n=30$} \\ & & $p=5$ & $p=6$ & $p=10$ & $p=7$ & $p=10$ & $p=15$ \\ \hline \textbf{B\&C} & $Time(s)$&16.54&11.50&4.48&1807.41&1578.21 &131.89 \\ (WOC) & $|Vars|$&6054&5706&4211&20643&18245&13952\\ & $|Nodes|$&1215&440&38&198424&305595&19197 \\ & $|Cuts|$&1537&1249&689&4789&3056 &2519 \\ & $\# unsolved (T/M)$&0/0&0/0&0/0&1/1&1/0 &0/0 \\ & $Gap(\%)$&0.00&0.00&0.00&0.63&0.12 &0.00 \\\hline \textbf{\textbf{B\&P\&C\xspace}} & $Time(s)$&3425.38&2220.55&159.35&6011.22&6298.75&4849.08\\ (MP) & $|Vars|$&13477&9054&4451&9493&11427 &11464 \\ & $|Nodes|$&24&21&54&2&15 &26 \\ & $|Cuts|$&1289&1028&543&3945&2520 &2162 \\ & $\# unsolved$&2&1&0&8&8 &6 \\ & $Gap(\%)$&0.45&0.14&0.00&1.38&1.18 &0.90 \\\hline & & \multicolumn{3}{c|}{$n=40$} & \multicolumn{3}{c}{$n=50$} \\ & & $p=10$ & $p=13$ & $p=20$ & $p=12$ & $p=16$ & $p=25$ \\ \hline \textbf{B\&C} & $Time(s)$&7050.93&7061.36&6202.85&7200.00&7116.54 &6575.59 \\ (WOC) & $|Vars|$&48065&43664&32820&94784&85630 &63776 \\ & $|Nodes|$&602685&628962&605812&270959&284028 &355560 \\ & $|Cuts|$&7939&6559&4727&12579&10423 &10131 \\ & $\# unsolved (T/M)$&7/3&8/2&8/0&10/0&9/1 &9/0 \\ & $Gap(\%)$&1.65&2.30&2.45&0.90&1.13 &1.32 \\\hline \textbf{\textbf{B\&P\&C\xspace}} & $Time(s)$&7200.00&6572.81&6709.53&7200.00&7200.00&6697.44\\ (MP) & $|Vars|$&10278&10170&12096&14971&13627 &13725 \\ & $|Nodes|$&1&1&2&55&1 &1 \\ & $|Cuts|$&5436&5073&4734&7807&7907 &9183 \\ & $\# unsolved$&10&9&9&10&10 &9 \\ & $Gap(\%)$&5.54&4.36&3.72&3.96&5.06 &3.87 \\\hline & & \multicolumn{3}{c|}{$n=60$} & \multicolumn{3}{c}{$n=70$} \\ & & $p=15$ & $p=20$ & $p=30$ & $p=17$ & $p=23$ & $p=35$ \\ \hline \textbf{B\&C} & $Time(s)$&2768.88&3306.54&6707.38&1842.00&2119.13&2474.98\\ (WOC) & $|Vars|$&161807&144983&109804&259406&231680 &173955 \\ & $|Nodes|$&1&20330&85723&1&1 &835 \\ & $|Cuts|$&18081&19887&15676&16115&23603 &19238 \\ & $\# unsolved (T/M)$&0/8&2/8&8/2&0/10&0/10 &2/8 \\ & $Gap(\%)$&2.74&2.86&1.78&5.67&5.77 &7.12 \\\hline \textbf{\textbf{B\&P\&C\xspace}} & $Time(s)$&7200.00&6864.94&7200.00&7200.00&7200.00&7200.00\\ (MP) & $|Vars|$&21094&16077&17634&31949&32345&22175 \\ & $|Cuts|$&12999&16061&13342&14722&20532 &19240 \\ & $|Cuts|$&8917&13099&12406&5252&2058 &17238 \\ & $\# unsolved$&10&9&10&10&10 &10 \\ & $Gap(\%)$&7.04&6.83&7.48&6.95&8.14 &8.35 \\\hline & & \multicolumn{3}{c|}{$n=80$} & \multicolumn{3}{c}{$n=90$} \\ & & $p=20$ & $p=26$ & $p=40$ & $p=22$ & $p=30$ & $p=45$ \\ \hline \textbf{B\&C} & $Time(s)$&2902.00&2886.25&3428.13&5999.16&5214.89 &6243.49 \\ (WOC) & $|Vars|$&383199&346926&259186&549561&488316 &368560 \\ & $|Nodes|$&1&1&1&1&1 &1 \\ & $|Cuts|$&27129&25187&12406&46216&32406 &12157 \\ & $\# unsolved (T/M)$&0/10&0/10&0/10&0/10&0/10 &7/3 \\ & $Gap(\%)$&6.50&5.28&3.26&6.37&4.42 &4.06 \\\hline \textbf{\textbf{B\&P\&C\xspace}} & $Time(s)$&7200.00&7200.00&7200.00&7200.00&7200.00&7200.00\\ (MP) & $|Vars|$&41971&34634&17640&41239&36230 &23826 \\ & $|Nodes|$&384&1196&1&294&625 &314 \\ & $|Cuts|$ &27360&24059&13884&43721&31810&11061 \\ & $\# unsolved$&10&10&10&10&10 &10 \\ & $Gap(\%)$&8.33&7.09&3.16&8.37&6.14 &4.56 \\\hline & & \multicolumn{3}{c|}{$n=100$} \\ & & $p=25$ & $p=33$ & $p=50$ \\ \hline \textbf{B\&C} & $Time(s)$&--&--&--& \\ (WOC) & $|Vars|$&&&&& & \\ & $|Nodes|$&--&--&--& \\ & $|Cuts|$&--&--&--& \\ & $\# unsolved (T/M)$&--&--&--& \\ & $Gap(\%)$&--&--&--& \\\hline \textbf{\textbf{B\&P\&C\xspace}} & $Time(s)$&7200.00&7200.00&7200.00&&&\\ (MP) & $|Vars|$&40905&40552&31199&& & \\ & $|Nodes|$&319&389&68&& & \\ & $|Cuts|$&77889&54296&15408&& & \\ & $\# unsolved$&10&10&10&& & \\ & $Gap(\%)$&7.77&7.12&5.49&& & \\\hline \end{tabular} \caption{Numerical results for \textbf{B\&C}(WOC) and \textbf{B\&P\&C\xspace}(MP) } \label{Results20to100} \end{center} \end{table} The results are reported in Table \hat{\rho}ef{Results20to100}. In that table, we denote by $Time(s)$ the average computational time (in seconds) required by each method to obtain an optimal solution for a given set of 10 instances defined by number of clients ($n$) and number of open facilities ($p$). We report 7200 s. in those cases where the optimal solution is not obtained in 2 hours. With $|Vars|$ we refer to the average of the numbers of variables used by $MP$ or $WOC$. We also denote by $|Nodes|$ and $|Cuts|$ the average of the number of nodes explored and the average of the number of cuts used, respectively, in the corresponding methodology. The row $\#unsolved(T/M)$ in the case of \textbf{B\&C}(WOC) reports the number of unsolved instances out of the 10 in each group. It distinguishes between those instances not solved by exceeding the maximum running time ($T$) or the memory limits ($M$). Observe that in the similar row within the blocks \textbf{B\&P\&C}(MP) no distinction is shown since the memory limit is never reached and instances not solved are only due to time limitations. Finally, we also include in our report the gap at termination ($GAP(\%)$). Analyzing further the results in Table \hat{\rho}ef{Results20to100} we conclude that on average \textbf{B\&C}(WOC) is faster than \textbf{B\&P\&C}(MP). We could explain this behavior because of the professional implementation of Cplex to handle the branching tree and its sophisticated branching strategies that we cannot reproduce in our implementation. On the other hand, remark the much smaller number of variables and thus, memory requirements, used by \textbf{B\&P\&C\xspace} (MP) as compared with \textbf{B\&C}(WOC). Actually, one of the most important features of our $MP$ formulation is that it needs much less number of variables than $WOC$, allowing us solving larger size instances with $MP$ that were not affordable for the original $WOC$. We also observe that the number of required cuts for \textbf{B\&P\&C\xspace} (MP) is smaller than for \textbf{B\&C}(WOC). This could be explained by the tightness of \textbf{B\&P\&C\xspace} (MP) with respect to \textbf{B\&C}(WOC). After adding cuts \textbf{B\&P\&C\xspace} (MP) is able to solve the problem in many of the cases at the root node. This behavior does not occur for \textbf{B\&C}(WOC). The number of instances solved to optimality, for small size instances up to $n=40$, is slightly better for \textbf{B\&C}(WOC). As the size increases this number is similar in both cases. Gaps at termination, after 7200 seconds, are always smaller than $8\%$ for \textbf{B\&P\&C\xspace} (MP) and smaller than $7.15\%$ for \textbf{B\&C}(WOC), being the later slightly better. For the larger instances of $n=80,90$ gaps are similar. Finally, \textbf{B\&C}(WOC) was not able to handle any instance with $n=100$ (reporting out of memory flags) whereas \textbf{B\&P\&C\xspace} (MP) reports the same performance than for the previous sizes. To conclude, despite the promising better root node gap, and the features developed for \textbf{B\&P\&C\xspace}(MP), such as the stabilization, hurry pricer, cuts, etc., the overall performance of this framework in solving DOMP is not systematically better than the branch-and-cut formulation \textbf{B\&C}(WOC). In small to medium size instances \textbf{B\&C}(WOC) is faster and achieves slightly smaller gaps. Nevertheless, in larger size instances performance is similar. Moreover, as expected, we were able to handle the largest considered sizes $(n=100)$ only with \textbf{B\&P\&C\xspace} (MP) and not with \textbf{B\&C}(WOC). \section{Conclusions\label{section:5}} This paper presents a first branch-price-and-cut, \textbf{B\&P\&C\xspace}(MP), algorithm for solving DOMP. This approach is based on an extended formulation using an exponential number of variables coming from a set partitioning model. Elements in the partitions are couples containing information about a client and its sorted position in the sorted sequence of allocation costs. To address the solution of this formulation we develop a column generation algorithm and we prove that the pricing routine is polynomially solvable by a dynamic programming algorithm. We embed the column generation algorithm within a brand-and-price framework. Furthermore, we adapt preprocessing and incorporate families of valid inequalities that improve its performance. Extensive computational results compare the performance of our \textbf{B\&P\&C\xspace}(MP) against the most recent algorithm in the literature for DOMP, \textbf{B\&C}(WOC), showing that for the largest considered instances \textbf{B\&P\&C\xspace}(MP) performs better and it requires less memory to upload and run the models. \appendix \section{Appendix} \subsection{GRASP\label{c3:ss35}} In the following we report the detailed implementation of the functions \textit{ConstructGreedySolution} and \textit{LocalSearch} in the GRASP algorithm \hat{\rho}ef{c3:al GRASP}. \begin{algorithm}[H] \begin{algorithmic}[1] \small \STATE Input($|J|=q\le p$); \WHILE {$|J|< p$} \STATE $j^*=\emptyset$; \STATE $value=M$; \FOR {$j\in \bar J$} \IF {$z(J\cup \{j\})<value$} \STATE $value=z(J\cup \{j\})$; \STATE$j^*=\{j\}$; \ENDIF \ENDFOR \STATE$J=J\cup\{j^*\}$; \ENDWHILE \end{algorithmic} \caption{{ConstructGreedySolution}.\label{c3:al GRASP}} \end{algorithm} \begin{algorithm}[H] \begin{algorithmic}[1] \small \STATE Input($|J|=p$); \STATE $\bar z= z(J)$; \FOR {$n_2$ iterations} \FOR {$j_1\in J$} \FOR {$j_2\in \bar J$} \IF {$z((J\setminus\{j_1\})\cup \{j_2\})<\bar z$} \STATE $\bar z = z((J\setminus\{j_1\})\cup \{j_2\})$ \STATE$J=(J\setminus\{j_1\})\cup \{j_2\}$ \ENDIF \ENDFOR \ENDFOR \ENDFOR \end{algorithmic} \caption{{LocalSearch(Solution)}.\label{c3:al GRASP}} \end{algorithm} \subsection{Handling cuts within the Hurry pricer\label{ap:hp}} The following algorithms try to avoid useless calculations in Algorithm \hat{\rho}ef{hurryPricer} while we handle the $\zeta$ values (dual multipliers of the cuts). The idea is that, because the cuts are relatively rare, the $\zeta$ are often equal to zero. For example, in one of our experiments, we activated only 58 cuts among a maximum of 64 000, solving a $n=40$ instance. We need to save the index for each new cut added. We note $ListOfBiIndex$ the sorted 3-tuple list of index ($c_i$,$c_j$,$c_k$) for each cut $c$. It is sorted by $k$ and then according to the costs. This list is updated after each separator has been called. Then, we can have several pricings using the same $ListOfBiIndex$, while the duals $\zeta$ are changing at each iteration. We note $VVP$ the vector of vectors of pairs such that it saving the increasing and decreasing sums of $\zeta$. The increasing sums are accessible by $first$ and the decreasing sums by $second$. First, we fill out a data structure $VVP$ with the right sum for each individual tuple of index from $ListOfBiIndex$ and for all $k=1..n$ (cf. Algorithm \hat{\rho}ef{FastSumsDualCutsValues}). Second, we finish to fill out $VVP$ for the other index with the existing source. \begin{algorithm} \caption{FastSumsDualCutsValues} \begin{algorithmic}[1] \STATE Take the list of tuples $ListOfBiIndex$ from the last call of the Separator ; \STATE Take the duals $\zeta$ from the last restricted MP resolution ; \STATE $kPrevious = 0$ ; Initialize all $VVP$ with 0 ; \FOR{the 3-tuple ($(index = (i,j))$,$k$) in the normal order of $ListOfBiIndex$} \IF{$kPrevious \neq k$} \STATE $Previous = 0$ ; \STATE $kPrevious = k$ ; \ENDIF \STATE $VVP[index][k].first = Previous + \zeta_{ij}^k$ ; \STATE $Previous = VVP[index][k].first$ ; \ENDFOR \STATE $kPrevious = 0$ ; \FOR{the 3-tuple ($(index_r = (i_r,j_r))$,$k_r$) in the reverse order of $ListOfBiIndex$} \IF{$kPrevious \neq k_r$} \STATE $Previous = 0$ ; \STATE $kPrevious = k_r$ ; \ENDIF \STATE $VVP[index_r][k_r].second = Previous + \zeta_{i_rj_r}^k$ ; \STATE $Previous = VVP[index_r][k_r].second$ ; \ENDFOR \STATE \mathbb{R}ETURN $VVP$; \end{algorithmic}\label{FastSumsDualCutsValues} \end{algorithm} This first algorithm will fill out the structure $VVP$ with the sums of the dual $\zeta$. $first$ gives the dimension saving the sums in the increasing order, in order to have directly the value $ \sum^n_{ i'=1} \sum^n_{\substack{ j'=1: \\ C_{ i' j'} \leq C_{i_lj}}} \zeta_{ i' j'}^k $ and $second$ determines the reverse order to obtain $ \sum^n_{ i'=1} \sum^n_{\substack{ j'=1: \\ C_{ i' j'} \geq C_{i_lj}}} \zeta_{ i' j'}^{k-1}$ faster. The Algorithm \hat{\rho}ef{SpreadSums} takes for input the $VPP$ updated from the last call of Algorithm \hat{\rho}ef{FastSumsDualCutsValues}. It will copy the non-zero sums (so from the index for those we added a cut) to the other cells such that the value of the current cell (so with an "non-cut index") is equal, for the same $k$, to the last sum in the increasing or decreasing order (resp. for $first$ and $second$ dimensions). \begin{algorithm} \caption{SpreadSums} \begin{algorithmic}[1] \FOR{$k=1..n$} \STATE $Current = 0$ ; $Current_r = 0$ ; \FOR{$index=1..n^2$} \IF{$VVL[index][k].first \neq 0$} \STATE $Current = VVL[index][k].first$ ; \ELSE \STATE $VVL[index][k].first = Current$ ; \ENDIF \STATE $index_r = 1 + n^2 - index$ ; \IF{$VVL[index_r][k].second \neq 0$} \STATE $Current_r = VVL[index_r][k].second$ ; \ELSE \STATE $VVL[index_r][k].second = Current_r$ ; \ENDIF \ENDFOR \ENDFOR \STATE \mathbb{R}ETURN $VVP$; \end{algorithmic}\label{SpreadSums} \end{algorithm} We can now replace the time consuming instruction of the Algorithm \hat{\rho}ef{hurryPricer}: ``$d_{i_lj}^k = d_{i_lj}^k + \sum^n_{ i'=1} \sum^n_{\substack{ j'=1: \\ C_{ i' j'} \leq C_{i_lj}}} \zeta_{ i' j'}^k + \sum^n_{ i'=1} \sum^n_{\substack{ j'=1: \\ C_{ i' j'} \geq C_{i_lj}}} \zeta_{ i' j'}^{k-1}$ ; " with the following instruction : `` $d_{i_lj}^k = d_{i_lj}^k + VVP[index=(i,j)][k].first + VVP[index=(i,j)][k-1]].second$ ; " \end{document}
\begin{document} \title{Completeness of the ZH-calculus} \date{} \author{Miriam Backens} \email{[email protected]} \affiliation{School of Computer Science, University of Birmingham, Edgbaston, Birmingham B15 2TT, UK} \author{Aleks Kissinger} \email{[email protected]} \affiliation{Department of Computer Science, University of Oxford, Wolfson Building, Parks Road, Oxford OX1 3QD, UK} \author{Hector Miller-Bakewell} \email{[email protected]} \homepage{https://hjmb.co.uk/} \affiliation{Department of Computer Science, University of Oxford, Wolfson Building, Parks Road, Oxford OX1 3QD, UK} \author{John van de Wetering} \email{[email protected]} \homepage{http://vdwetering.name} \affiliation{Institute for Computing and Information Sciences, Radboud Universiteit, Toernooiveld 212, 6525 EC Nijmegen, NL} \affiliation{Department of Computer Science, University of Oxford, Wolfson Building, Parks Road, Oxford OX1 3QD, UK} \author{Sal Wolffs} \email{[email protected]} \affiliation{Institute for Computing and Information Sciences, Radboud Universiteit, Toernooiveld 212, 6525 EC Nijmegen, NL} \begin{abstract} There are various gate sets used for describing quantum computation. A particularly popular one consists of Clifford gates and arbitrary single-qubit phase gates. Computations in this gate set can be elegantly described by the \emph{ZX-calculus}, a graphical language for a class of string diagrams describing linear maps between qubits. The ZX-calculus has proven useful in a variety of areas of quantum information, but is less suitable for reasoning about operations outside its natural gate set such as multi-linear Boolean operations like the Toffoli gate. In this paper we study the \emph{ZH-calculus}, an alternative graphical language of string diagrams that does allow straightforward encoding of Toffoli gates and other more complicated Boolean logic circuits. We find a set of simple rewrite rules for this calculus and show it is complete with respect to matrices over $\mathbb Z[\frac12]$, which correspond to the approximately universal Toffoli+Hadamard gateset. Furthermore, we construct an extended version of the ZH-calculus that is complete with respect to matrices over any ring $R$ where $1+1$ is not a zero-divisor. \end{abstract} \tableofcontents \maketitle \section{Introduction} Graphical calculi give us a compact way to express and reason about complex, interacting processes using \textit{string diagrams}. String diagrams represent processes that can compose in sequence or parallel using a notation consisting of boxes or nodes connected by wires. Notably, wires can be left `open' at one or both ends to indicate inputs and outputs of the composed process. For example, depicting sequential composition as $\circ$ and parallel composition as $\otimes$, we can translate expressions such as the following one into a string diagram: \[ h \circ (f \otimes g) \qquad \qquad \leadsto \qquad \qquad \tikzfig{string-diag-example} \] Such notation has proven convenient for expressing compositions and tensor products of linear maps (as used extensively in quantum theory), and more generally for expressing morphisms in a generic symmetric monoidal category. A \textit{graphical calculus} is both a collection of graphical building-blocks and a collection of equations between string diagrams that we can use to transform one diagram into another, typically equivalent, one. One of the best-studied graphical calculi is the \textit{ZX-calculus}~\cite{CD1,CD2,CKbook,vandewetering2020zxcalculus}, which has been applied extensively in the study of quantum circuits and related structures in quantum computation (e.g.\ measurement-based quantum computing~\cite{DP2,Backens2020extraction,kissinger2017MBQC}, fault-tolerant quantum computations~\cite{horsman2017surgery,hanks2019effective,magicFactories}, error-correcting codes~\cite{chancellor2016graphical}, circuit optimisation~\cite{FaganDuncan,cliff-simp,optimisation-paper}, and classical simulation~\cite{kissinger2021simulating,kissinger2022classical}). ZX-diagrams consist of \emph{spiders}, a type of well-behaved linear map that is depicted by coloured dots: \ctikzfig{example-ZX-diagram} Quantum circuits lie at the heart of the \textit{circuit model} of quantum computation, which represents the quantum part of the computation as a large unitary operator arising from compositions and tensor products of basic gates~\cite{NielsenChuang}. There are many choices of basic gates which are \textit{universal}, in the sense that they can construct or approximate arbitrary $2^n \times 2^n$ unitary matrices. A popular choice is the \textit{Clifford+phase} set of gates: \[ \text{CNOT} \ :=\ \begin{pmatrix} 1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0 \end{pmatrix} \qquad\qquad H \ := \ \frac{1}{\sqrt 2} \begin{pmatrix} 1 & 1 \\ 1 & -1 \end{pmatrix} \qquad\qquad Z_{\alpha} \ := \ \begin{pmatrix} 1 & 0 \\ 0 & e^{i \alpha} \end{pmatrix} \] This set of gates is universal, here in the sense that it can exactly express any unitary matrix. A common finite restriction of this infinite set is the \textit{Clifford+T} gate set, which replaces $Z_\alpha$ with $T := Z_{\pi/4}$. This set of gates is able to approximate any unitary up to arbitrary finite precision. A further restriction to the \textit{Clifford} gate set $(\text{CNOT}, H, S := T^2)$ is no longer universal, and in fact yields only circuits that can be efficiently simulated on a classical computer~\cite{aaronsongottesman2004}. Nevertheless, Clifford circuits play an important role in quantum computation~\cite{NielsenChuang}, quantum error correction~\cite{fowler2012surface,gottesman2010introduction,BB84}, and many quantum communication protocols. Notably, these three important families of circuits map straightforwardly to three fragments of the ZX-calculus: where parameters are restricted to integer multiples of $\frac\pi 2$ (Clifford), restricted to integer multiples of $\frac \pi 4$ (Clifford+T), or are unrestricted (Clifford+phase). Furthermore, each of these fragments of the ZX-calculus has been proven \textit{complete} in the sense that two ZX-diagrams which evaluate to the same linear operator are provably equal using just diagram rewrite rules~\cite{Backens1,SimonCompleteness,ng2017universal}. However, the ZX-calculus remains closely tied to the structure of the Clifford+phase family of circuits. The further one gets from this family, the more awkward it becomes to work with computations using the ZX-calculus. For instance, the CNOT gate (`controlled-NOT') can be expressed quite simply in the ZX-calculus: \[ \left\llbracket \ \tikzfig{cnot-intro}\ \right\rrbracket \ =\ \textit{CNOT} \] In contrast, the `controlled-controlled-NOT', commonly referred to as the Toffoli gate, is considerably more complicated, with typical presentations requiring many more generators which cannot be easily manipulated using the ZX-calculus rules~\cite[Ex.~12.10]{CKbook}. The problem with the Toffoli gate is that it acts on basis states in a manner that is not $\mathbb Z_2$-affine. For example, the Boolean CNOT is the map $\text{CNOT}:\{0,1\}^2\rightarrow \{0,1\}^2$ given by $\text{CNOT}(x,y) = (x,x\oplus y)$, where $\oplus$ denotes XOR. Such a Boolean function is called \emph{affine}, in analogy to the theory of polynomials, since it only contains terms of multiplicative degree at most 1 that are then added together (XOR acts as addition on the field $\mathbb Z_2$). Similarly, we say the NOT gate $NOT(x) = x\oplus 1$ is affine, since again it is a sum of terms of degree at most 1. In the ZX-calculus there are two main types of generators: Z-spiders and X-spiders. Both of these are closely related to affine Boolean functions: the Z-spider is based on the COPY Boolean map $\text{COPY}(x) = (x,x)$, while the X-spider is based on XOR. As being affine is baked into the generators, some tricks are required to represent non-affine Boolean functions in the ZX-calculus, such as the Toffoli gate $\text{TOF}(x,y,z) = (x,y,(x\cdot y)\oplus z)$ that contains the degree-2 term $x\cdot y$. The way we solve this issue in this paper is by introducing a third diagrammatic type of generator beyond the Z and X spiders: the H-box. An $n$-input $0$-output H-box is defined by $\ket{x_1\cdots x_n} \mapsto (-1)^{x_1\cdot \ldots \cdot x_n}$. This can then be used in combination with Z-spiders (i.e.\ copy maps) to represent the `controlled-controlled-Z' gate, and thus the Toffoli gate. Another way to view the H-box generator is as a generalisation of the Hadamard gate to a linear map with an arbitrary number of inputs and outputs. With this new generator comes a new set of graphical rewrite rules that allow us to reason more efficiently about quantum computation involving non-affine Boolean functions. The resulting graphical language is called the \emph{ZH-calculus}, since its main generators are Z-spiders and H-boxes. The main topic of this paper is to show that various fragments of the ZH-calculus are complete. Specifically, we find a set of particularly simple and compelling rewrite rules (see Figure~\ref{fig:phasefree-rules}) that are complete for the approximately universal Toffoli-Hadamard gate set. \footnote{The Toffoli-Hadamard gate set is only approximately universal for `real' quantum computing, where all unitaries contain only real numbers. However, such real unitaries can simulate with constant overhead unitaries with complex entries~\cite{shi2003toffoli}. The notion of approximate universality is here hence different than that of Clifford+$T$, and might better be called `computationally universal'. We will however not make this technical distinction in the remainder of the paper.} Arguably, this is the smallest complete rule set for an approximately universal fragment of quantum computing (we discuss this claim in more detail in Section~\ref{sec:motivation}). Building on this work, we show that for any ring where $2:=1+1$ is not a zero divisor, we can generalise the ZH-calculus in order to express all matrices over this ring, and we exhibit a complete set of rules for this generalisation as well. This article is based on a conference paper by Backens and Kissinger~\cite{backens2018zhcalculus} which first introduced the ZH-calculus and proved completeness for the ZH-calculus with complex parameters, an unpublished preprint of van de Wetering and Wolffs~\cite{PhasefreeZH2019} which proved completeness for the phase-free fragment of the calculus, and the DPhil thesis of Miller-Bakewell~\cite{millerbakewell2020thesis} which showed how the ZH-calculus and its completeness could be extended to arbitrary rings. The formal developments in those papers have been expanded and unified, and the technical content has been extended significantly in three ways: we find a simpler complete rule set for the phase-free fragment, we give a completely self-contained proof of phase-free completeness based on an encoding of arithmetic into ZH, and we extend the ZH-calculus to any ring where $2 := 1 + 1$ is not a zero divisor. \subsection{Related work} In the earliest complete version of the ZX-calculus, a new generator, the `triangle', was introduced in order to represent non-affine Boolean functions~\cite{ng2017universal}. This has later also been used to prove completeness of the fragment corresponding to the Toffoli+Hadamard gate set that we also study~\cite{vilmart2018zxtriangle}, as well as to find an axiomatisation of the ZX-calculus over arbitrary rings~\cite{wang2020algebraic}. In~\cite{ZXand}, a complete ruleset for classical circuits, i.e.\ natural number matrices, was found which uses almost the same ruleset as we do in the phase-free fragment. The difference is that ~\cite{ZXand} treats the AND gate as an atomic non-symmetric generator, while we decompose it into H-boxes.\footnote{In fact, his rules were inspired by the rules of the phase-free ZH-calculus presented in an earlier preprint~\cite{PhasefreeZH2019} (personal communication, Comfort).} The seminal complete graphical language for the related fragment of integer matrices is the \emph{ZW-calculus}~\cite{hadzihasanovic2015diagrammatic} which has generators based on the two different types of entanglement that are possible in tripartite qubit systems~\cite{CK}. The related language \mathbb{R}ING{R}\ \cite{millerbakewell2020thesis} was designed to unify the study of qubit graphical calculi parameterised by phase rings, and indeed both parameterised ZH (Section~\ref{sec:zh-ring}) and the interpretation of ZH as operations on Boolean functions (Section~\ref{sec:motivation}) fit into this scheme. Therefore there are natural maps from \mathbb{R}ING{\mathbb{B}}\ to ZH and from \mathbb{R}ING{R}\ to \ensuremath{\text{ZH}_R}\xspace, the second of which is exhibited in \cite{millerbakewell2020thesis}. Note that \cite{carette2020recipe} shows that, under certain assumptions, there are only three basic graphical calculi for qubits: ZX, ZW, and ZH. The results presented in the current paper prove that the youngest of these calculi, ZH, is complete, concluding the program of finding complete graphical calculi for qubits. Using the ZH-calculus, an efficient description of \emph{hypergraph states} can be given~\cite{Lemonnier2020hypergraph}, which allows for a description of ZH-diagrams in terms of \emph{path-sums}~\cite{AmyVerification,pathsRenaud,Vilmart2022Completeness}. There is also a close connection between ZH-diagrams and quantum multiple-valued decision diagrams~\cite{vilmart2021quantum}. The ZH-calculus has been used to give a graphical description of \emph{AKLT states}, a particularly canonical type of condensed matter system~\cite{east2020akltstates}, and of spin-networks~\cite{d.p.east2021spinnetworks}. \subsection{Overview of the paper and structure of the main proof}\label{sec:overview} In Section~\ref{sec:phase-free-ZH} we introduce the ZH-calculus. We present and motivate the rewrite rules and we introduce the notation we will use throughout the paper (the rules can be found in Figure~\ref{fig:phasefree-rules}, their Boolean counterparts that motivate them can be found in Figure~\ref{fig:boolean-functions}). Note that in particular, in subsection~\ref{sec:labelledHboxes}, we define `labelled H-boxes', which are derived generators that form an essential ingredient to our proof of completeness of the ZH-calculus. The majority of the paper, Sections~\ref{sec:avg-intro-mult}--\ref{sec:completeness}, is devoted to this proof of completeness. First, in Section~\ref{sec:avg-intro-mult}, we prove several special cases of three families of equations that lie at the core of our completeness proof. These families are called `multiply', `intro', and `average' (see, respectively, \eqref{eq:mult-def}, \eqref{eq:intro-def} and \eqref{eq:average-def}) and apply to labelled H-boxes. Then, in Section~\ref{sec:normal-forms}, we introduce normal form diagrams. A normal form diagram corresponds in a precise way to the matrix the diagram represents, and hence is unique for a given underlying linear map. Given two diagrams representing the same linear map, the ability to reduce both of them to normal form then shows that they can be transformed into the same diagram, and hence that the calculus is complete. In this section, we give a proof that the reduction to normal form can be done \emph{conditional} on having proved all instances of multiply, intro and average. This conditional proof works by showing that each generator can be transformed into normal form (Lemmas~\ref{lem:H-box-nf} and~\ref{lem:Z-spider-nf}), that a tensor product of normal forms can be reduced to normal form (Corollary~\ref{cor:tensor-product}), and that connecting any of the wires in a normal form results in a diagram that can again be brought to normal form (Corollary~\ref{cor:cap-nf}). With these steps and the condition, any diagram can be brought into normal form (Proposition~\ref{prop:completeness-conditional}). It remains to prove all instances of multiply, intro and average. In Section~\ref{sec:arithmetic}, we prove that we can do basic arithmetic on the labels of H-boxes: addition of H-box labels in Proposition~\ref{prop:addition}, and multiplication of H-box labels (which is exactly the general multiply rule) in Proposition~\ref{prop:mult-rule-rational}. Then, in Section~\ref{sec:completeness}, we give proofs of the general average rule (Proposition~\ref{prop:average-integer}) and general intro rule (Proposition~\ref{prop:intro-rational}). Together with the conditional reduction to normal form, this finishes our proof of completeness (Theorem~\ref{thm:ZH-completeness}). The reason we structured the proof like this, starting with a conditional proof of the reduction to normal form, is because some of the proofs in Sections~\ref{sec:arithmetic} and~\ref{sec:completeness} use this reduction to normal form (with a careful analysis that shows the required instances of average, intro and multiply were all proved in Section~\ref{sec:avg-intro-mult}), since we could not find a more direct way to prove them. In Section~\ref{sec:zh-ring}, we shift to studying the ZH-calculus over arbitrary rings. We prove its completeness by assuming the multiply, intro, and average rules as axioms. Then in Section~\ref{sec:alternative-rules}, we study a couple of modifications to the calculus and its rules. We end with some concluding remarks in Section~\ref{sec:conclusion}. \section{The ZH-calculus}\label{sec:phase-free-ZH} The ZH-calculus is a graphical language for expressing maps with zero or more inputs and outputs as certain string diagrams called ZH-diagrams. As a convention, we will draw inputs as wires entering the bottom of the diagram and outputs as wires exiting the top. Throughout this paper, we will distinguish formal ZH-diagrams $D : m \to n$ with $m$ input wires and $n$ output wires, from their standard interpretation as linear maps $\llbracket D \rrbracket : (\mathbb C^2)^{\otimes m} \to (\mathbb C^2)^{\otimes n}$, where $(\mathbb C^2)^{\otimes m} \cong \mathbb{C}^{2^m}$ is the $m$-fold tensor product of the 2D vector space $\mathbb C^2$. \begin{remark} In categorical terms, ZH-diagrams are morphisms in the free PROP $\mathcal{ZH}$ presented by the ZH-calculus, considered as a symmetric monoidal category, and $\llbracket - \rrbracket$ is a strong monoidal functor from $\mathcal{ZH}$ to $(\textbf{Vect}_{\mathbb C}, \otimes)$, the category of finite-dimensional complex vector spaces, with the monoidal product given by the tensor product. Though familiarity with symmetric monoidal categories and PROPs is not required to read this paper, the interested reader can find an accessible introduction in Chapter 2 of~\cite{zanasithesis}. \end{remark} \subsection{The generators}\label{sec:ZH-generators} ZH-diagrams have three types of generators: \emph{Z-spiders} represented by white dots, \emph{H-boxes} represented by white boxes, and a \emph{star} generator represented by \tikzfig{star}\!. These generators are interpreted as linear maps on copies of $\mathbb{C}^2$. We denote the standard (also called \emph{computational}) basis of $\mathbb{C}^2$ in Dirac notation as: \[ \ket 0 := \begin{pmatrix} 1 \\ 0 \end{pmatrix} \qquad \ket 1 := \begin{pmatrix} 0 \\ 1 \end{pmatrix} \] We will be using this Dirac `ket' notation $\ket{\psi}$ to denote a state in a vector space. We will also denote the `bra' $\bra{\psi}$ for the linear map that calculates the inner product with $\ket{\psi}$. That is, inputting a state $\ket{\phi}$ gives $\bra{\psi}(\ket{\phi}) = \braket{\psi}{\phi}$. The computational basis states $\ket{0}$ and $\ket{1}$ extend naturally to a basis for $(\mathbb{C}^2)^{\otimes n}$ by interpreting bitstrings as tensor products: \[ \ket{x_1\ldots x_n} := \ket{x_1}\otimes \cdots \otimes \ket{x_n} \] We can define the interpretation of Z-spiders and H-boxes in terms of these basis states. Z-spiders and H-boxes can have any number of inputs and outputs, and are then interpreted as the following linear maps: \begin{equation*} \intf{\tikzfig{Z-spider}} := \ket{0}^{\otimes n}\bra{0}^{\otimes m} + \ket{1}^{\otimes n}\bra{1}^{\otimes m} \qquad\quad \intf{\tikzfig{H-spider-free}} := \sum (-1)^{i_1\ldots i_m j_1\ldots j_n} \ket{j_1\ldots j_n}\bra{i_1\ldots i_m} \end{equation*} where $\intf{\cdot}$ denotes the map from diagrams to matrices. The sum in the second equation is over all $i_1,\ldots, i_m, j_1,\ldots, j_n\in\{0,1\}$ so that an H-box represents a matrix with all entries equal to 1, except the bottom right element, which is $-1$. Finally we have the generator \emph{star}, which has zero inputs and outputs. Its interpretation is: \begin{equation*} \intf{\tikzfig{star}} := \frac{1}{2} \end{equation*} Straight and curved wires have the following interpretations: \begin{equation*} \intf{\;|\;} := \ketbra{0}{0}+\ketbra{1}{1} \qquad\qquad\qquad \intf{\tikzfig{wire-cup}} := \ket{00}+\ket{11} \qquad\qquad\qquad \intf{\tikzfig{wire-cap}} := \bra{00}+\bra{11}. \end{equation*} When two diagrams are juxtaposed, the corresponding linear map is the tensor product (a.k.a. Kronecker product) of the matrices corresponding to the individual diagrams. A sequential composition of two diagrams is interpreted as the matrix product of the matrices corresponding to the individual diagrams: \[ \intf{\gendiagram{$D_1$}\;\gendiagram{$D_2$}} := \intf{\gendiagram{$D_1$}}\otimes\intf{\gendiagram{$D_2$}} \qquad\qquad \intf{\tikzfig{sequential-composition}} := \intf{\gendiagram{$D_2$}}\circ\intf{\gendiagram{$D_1$}} \] Some diagram motifs appear so often that we create shorthands for them, called \emph{derived} generators. We define the derived grey spider and the grey spider with a NOT as: \begin{equation}\label{eq:defx} (X) \quad\ \ \tikzfig{X-spider-dfn-free}\qquad\qquad\qquad (NOT)\quad\ \ \tikzfig{negate-dfn-free} \end{equation} The generator \dotmult{gray dot}\ acts as XOR on the computational basis while \grayphase{\neg} acts as NOT: \begin{equation*} \intf{\dotmult{gray dot}} = \ketbra{0}{00}+\ketbra{0}{11}+\ketbra{1}{01}+\ketbra{1}{10} \qquad\qquad\qquad \intf{\grayphase{\neg}}=\ketbra{0}{1}+\ketbra{1}{0}. \end{equation*} In some of the later sections there will be diagrams with many NOTs. To avoid overcrowding, we will draw a red dashed edge for an edge between Z-spiders with a NOT on it: \begin{equation}\label{eq:not-edge-def} \tikzfig{not-edge-def} \end{equation} We also introduce the derived \emph{negate} spider (a white NOT): \begin{equation}\label{eq:Z-triangle-dfn} (Z) \quad\ \ \tikzfig{negate-white-dfn} \end{equation} The negate white spider with one input and output acts like the Z gate: \begin{equation*} \intf{\phase{\neg}} = \ketbra{0}{0} - \ketbra{1}{1} \end{equation*} \subsection{The rules} \label{sec:ZH-rules} The ZH-calculus comes with a set of rewrite rules shown in Figure~\ref{fig:phasefree-rules}. Of the 8 rules in Figure~\ref{fig:phasefree-rules}, 7 are `obvious' in the sense that they express simple properties of the structure of the underlying Boolean functions (see Section~\ref{sec:motivation}). The only outlier is \OrthoRule which seems more arbitrary. In Section~\ref{sec:o-rule} we will see that we can replace \OrthoRule with two smaller rules. Additionally, the calculus has the meta-rule that \emph{only topology matters}. This means that two diagrams are considered equal when one can be topologically deformed into the other, while respecting the order of the inputs and outputs. Finally, the two generators are considered to be symmetric and undirected, so that the following equations also hold: \ctikzfig{generator-symmetries-free} These symmetry properties also hold for the derived grey spider and NOT gate. \begin{figure} \caption{The rules of the ZH-calculus. Throughout, $m,n$ are nonnegative integers. The right-hand sides of both \textit{bialgebra} \label{fig:phasefree-rules} \end{figure} \begin{proposition} \label{prop:phasefree-sound} The ZH-calculus is sound. \end{proposition} \begin{proof} It is straightforward to check that the symmetry properties for each generator and the finite rules of Figure~\ref{fig:ZH-rules} are sound by concrete calculation. \StrongCompRule and \HCompRule can both be reduced to a finite set of equations for which soundness is easily checked and from which the general infinite family of rules can be derived using induction (see for instance~\cite[Theorem~9.71]{CKbook}). The rules \SpiderRule and \HFuseRule express equations related to Frobenius algebras for which soundness can also be checked in a standard manner, see for instance~\cite{coecke2013new}. Soundness of the meta rule `only topology matters' follows by considering the string diagrams as morphisms in a compact closed category~\cite{SelingerCPM}. \end{proof} The converse to soundness is completeness. Determining that the ZH-calculus is complete will be the main objective of this paper. A third important property of diagrammatic languages is \emph{universality}. It can be seen from the definitions of the generators that their interpretations as linear maps are all matrices over $\mathbb{Z}[\half]$, and therefore all $\ensuremath{\otimes}\xspace$- and $\circ$-products of the generators also have interpretations as matrices over $\mathbb{Z}[\half]$. Universality means that we can represent \emph{any} matrix over $\mathbb{Z}[\half]$ using the ZH-calculus. We cannot prove this property yet, but it will follow immediately from Theorem~\ref{thm:nf-unique}. These matrices, and hence the ZH-calculus, are closely related to the approximately universal gate set Toffoli+Hadamard~\cite{Amy2020numbertheoretic}. We discuss the precise connection in more detail in Section~\ref{sec:tof-had}. \subsection{Motivation for the rewrite rules}\label{sec:motivation} Let us give some motivation for the rewrite rules of Figure~\ref{fig:phasefree-rules}. Each of the three main (derived) generators of the ZH-calculus, the Z-spiders, X-spiders and H-boxes, corresponds to a family of Boolean functions. Using this correspondence, each of the rules of Figure~\ref{fig:phasefree-rules} can be seen to be equivalent to an equation that holds between Boolean functions. In order to make this correspondence, we first recall that we can lift a Boolean function $f:\{0,1\}^n \rightarrow \{0,1\}^m$ to a linear map $\hat{f}:\mathbb C^{2^n} \to \mathbb C^{2^m}$ by its action on the computational basis states: \begin{equation}\label{eq:induced-linear-map} \hat{f}\ket{x_1\ldots x_n} = \ket{f(x_1\ldots x_n)}. \end{equation} \begin{figure} \caption{The rules of the ZH-calculus presented as equations between Boolean functions. } \label{fig:boolean-functions} \end{figure} Define the COPY family of Boolean functions COPY$^n: \{0,1\} \rightarrow \{0,1\}^n$ by $\text{COPY}(x) = (x,\ldots, x)$. Then $\widehat{\text{COPY}^n}$ is equal as a linear map to the Z-spider with a single input and $n$ outputs. Similarly, the X-spider with $n$ inputs and a single output is equal to $\widehat{\text{XOR}^n}$, while the single input, single output NOT generator is, unsurprisingly, equal to $\widehat{\text{NOT}}$. H-boxes are not induced by Boolean functions, as the linear map of an H-box contains negative numbers. Yet composing an H-box with a further Hadamard gives the family of Boolean AND functions. In summary: \begin{equation}\label{eq:classical-interpretation} \tikzfig{classical-interpretation} \end{equation} Seeing this, one might wonder why we simply did not choose some other generator that can represent the AND directly. The reason for this is that it is in fact useful to represent the inputs and outputs of AND asymmetrically. Indeed, the COPY and XOR maps are symmetric under interchange of the inputs and outputs, a property also known as \emph{flexsymmetry}~\cite{carette2021when}: \begin{equation}\label{eq:spider-unbend-wire} \tikzfig{spider-unbend-wire} \end{equation} This property is not true for AND: \begin{equation}\label{eq:and-unbend-wire} \tikzfig{and-unbend-wire} \end{equation} We can now re-express the rules of Figure~\ref{fig:phasefree-rules} with all the generators presented as Boolean functions, as shown in Figure~\ref{fig:boolean-functions}. The first five rules in this figure are exactly equal to the rules of Figure~\ref{fig:phasefree-rules} when we translate the Boolean functions back into the generators according to~\eqref{eq:classical-interpretation}. The other three are not equal, but can be proven to be equivalent using the first five equations (which we will do below), and hence the two rulesets are equivalent. For the representation of \OrthoRule we introduce two new Boolean `predicates': \begin{equation}\label{eq:boolean-predicates} \tikzfig{is-equal} \ \ := \ \ \tikzfig{wire-cap} \qquad \qquad\tikzfig{is-zero} \ \ := \ \ \tikzfig{gray-effect} \end{equation} Recall that the linear map corresponding to the cap is $\bra{00} + \bra{11}$, and hence we can interpret IS-EQUAL as selecting for the case where both inputs are equal to one another. Similarly IS-0 is just $\bra{0}$ and selects for its input to be $0$. Let us demonstrate how the rules of Figure~\ref{fig:boolean-functions} suffice to prove the rules of Figure~\ref{fig:phasefree-rules}. We will ignore scalar factors for these derivations (correct scalars can be introduced by repeated use of Lemma~\ref{lem:scalarcancelstars}). First, let us derive \StrongCompRule from its Boolean counterpart. We denote the usage of the `Boolean version' of \StrongCompRule by $(*)$. \ctikzfig{ZH-bialgebra-boolean-pf} Next, we prove \MultRule from the cancellation of two NOT gates. Here $(*)$ denotes the application of the `Boolean version' of \MultRule: \ctikzfig{multiply-rule-boolean-pf} Finally, we derive \OrthoRule from the corresponding relation between Boolean functions, where here $(*)$ denotes the Boolean version of \OrthoRule: \ctikzfig{ortho-rule-boolean-pf} The converse proofs that the rules of Figure~\ref{fig:phasefree-rules} imply those of Figure~\ref{fig:boolean-functions} are similar (and will also implicitly follow from completeness of the ZH-calculus). Looking at Figure~\ref{fig:boolean-functions} we see that \SpiderRule and \HFuseRule express the associativity of COPY, respectively AND, and that \IDRule and \HHRule express the triviality of COPY and AND when they have only a single input. We see that the only difference between \StrongCompRule and \HCompRule is which Boolean function comes before the COPY. These rules are in fact special cases of the rule that any Boolean function copies through COPY~\cite[Prop.~8.19]{CKbook}. The rule \MultRule expresses that NOT is self-inverse, and finally there is \OrthoRule, which requires some more explanation. On both sides of the equation, the middle input is copied and sent to both AND gates, but one of the copies is negated. Hence the output of at least one of the AND gates will be zero, so that the only way for the outputs of these AND gates to be equal is if they are both zero. This is the property that \OrthoRule expresses. As this is still not a particularly concise motivation, we present in Section~\ref{sec:o-rule} an alternative pair of rules that can replace \OrthoRule and which do have a more concise motivation. It might be surprising that this rule set expressing identities about Boolean functions suffices to reason about quantum computation. However, what is not apparent from this rule set is that the AND gate is `broken up' into two parts, given by the H-boxes and the Hadamard. The Hadamard gives the `Fourier transform' between the Z and X spiders and relates the COPY and XOR maps in a way that does not follow easily from their definition as Boolean functions. In fact, in~\cite{ZXand}, a calculus for integer matrices is presented using XOR, COPY and AND as generators, and which uses a rule set that is essentially the same as that of Figure~\ref{fig:boolean-functions}, but with \OrthoRule replaced by the rules we present in Section~\ref{sec:o-rule}. Hence, the difference between this `classical' calculus and our `quantum' calculus is the usage of the Hadamard and the necessity of negative numbers this requires. Beyond having a nice interpretation in terms of Boolean identities, our rule set is furthermore arguably preferable to that of other (approximately) universal complete graphical calculi for qubits in terms of simplicity. This is because: \begin{enumerate} \item It only requires a small number of rules (8, of which one is actually superfluous, cf.~Section~\ref{sec:hh-rule-necessity}). \item Each of these rules only involve a few (derived) generators. \item A priori, the set of generators does not involve any further information, such as a label taken from a ring or group. \item As a result, no rule involves a `side condition' on the elements of the ring or group. \end{enumerate} While there are other calculi that satisfy items 1) and 2) --- notably ZX$_{\pi/4}$~\cite{SimonCompleteness}, universal ZX~\cite{euler-zx}, \textsc{ring}~\cite{millerbakewell2020thesis}, and ZQ~\cite{MillerBakewell2020ZQ} also only require a small number of rules --- each of these requires either a complicated side condition on at least one the rules, or directly encodes properties of the underlying ring in its generators. Other calculi like ZW~\cite{hadzihasanovic2015diagrammatic} and $\Delta$ZX~\cite{vilmart2018zxtriangle} also do not have a lot of rules, but many of them lack a clear interpretation. This is of course not to say that those calculi are `worse', but just that the ZH-calculus has a particularly elegant set of rewrite rules, which somehow neatly capture the complexity of quantum computing. \subsection{Basic derived rules} \label{s:basic-derived} In this section we will prove some basic but useful rewrite rules that we will use throughout the paper. Since the statements and proofs are often very short we will state all the lemmas first and then all the proofs. Lemmas~\ref{lem:scalarcancelstars}--\ref{lem:xnot-h-reduce} are different ways to cancel or simplify scalar diagrams. Lemmas~\ref{lem:negate-direct}--\ref{lem:znots-cancel} are various ways to simplify the derived generators. Lemmas~\ref{lem:h-z-commute}--\ref{lem:z-commute} and \ref{lem:x-z-commute} give ways to commute Z's, NOTs and Hadamards through the other generators. Lemmas~\ref{lem:copy-x-z}--\ref{lem:copy-znot-h} govern the interaction of states with generators (mostly showing that many states can copy through a generator), and finally Lemma~\ref{lem:hopf-rule} is the Hopf rule that holds between the Z and X spider. While some of these lemmas are direct special cases of the rules, in particular of \StrongCompRule and \HCompRule, we include them here for convenience of reference, and to give a more complete set of `state commutation' rules. Note that for Lemma~\ref{lem:h-z-commute}, if $m=0$, then instead of the \tikzfig{star}\!'s, there is a single \tikzfig{dot}; similarly for Lemma~\ref{lem:h-x-commute}, if $m=0$, then instead of the \tikzfig{dot}\!'s, there is a single \tikzfig{star}. \begin{multicols}{3} \begin{lemma}\label{lem:scalarcancelstars} \begin{equation*} \tikzfig{scalar-rule-stars} \end{equation*} \end{lemma} \begin{lemma}\label{lem:scalarcancelzx} \begin{equation*} \tikzfig{scalar-rule-ZX} \end{equation*} \end{lemma} \begin{lemma}\label{lem:scalarcancelxh} \begin{equation*} \tikzfig{scalar-rule-XH} \end{equation*} \end{lemma} \begin{lemma}\label{lem:scalarcancelhh} \begin{equation*} \tikzfig{scalar-rule-HH} \end{equation*} \end{lemma} \begin{lemma}\label{lem:scalarcancelznot} \begin{equation*} \tikzfig{scalar-rule-ZNOT} \end{equation*} \end{lemma} \begin{lemma}\label{lem:xnot-h-reduce} \begin{equation*} \tikzfig{xnot-h-reduce} \end{equation*} \end{lemma} \begin{lemma}\label{lem:negate-direct} \begin{equation*} \tikzfig{negate-direct} \end{equation*} \end{lemma} \begin{lemma}\label{lem:x-spider} \begin{equation*} \tikzfig{X-spider-rule} \end{equation*} \end{lemma} \begin{lemma}\label{lem:x-special} \begin{equation*} \tikzfig{X-special} \end{equation*} \end{lemma} \begin{lemma}\label{lem:xnots-cancel} \begin{equation*} \tikzfig{XNOT-spider-rule} \end{equation*} \end{lemma} \begin{lemma}\label{lem:x-with-xnot} \begin{equation*} \tikzfig{X-with-XNOT} \end{equation*} \end{lemma} \begin{lemma}\label{lem:znots-cancel} \begin{equation*} \tikzfig{ZNots-cancel} \end{equation*} \end{lemma} \begin{lemma}\label{lem:h-z-commute} \begin{equation*} \tikzfig{H-Z-commute} \end{equation*} \end{lemma} \begin{lemma}\label{lem:h-x-commute} \begin{equation*} \tikzfig{H-X-commute} \end{equation*} \end{lemma} \begin{lemma}\label{lem:h-not-commute} \begin{equation*} \tikzfig{H-NOT-commute} \end{equation*} \end{lemma} \begin{lemma}\label{lem:cz-correct} \begin{equation*} \tikzfig{CZ-correct} \end{equation*} \end{lemma} \begin{lemma}\label{lem:not-commute} \begin{equation*} \tikzfig{NOT-commute} \end{equation*} \end{lemma} \begin{lemma}\label{lem:z-commute} \begin{equation*} \tikzfig{Z-commute} \end{equation*} \end{lemma} \begin{lemma}\label{lem:copy-x-z} \begin{equation*} \tikzfig{copy-x-z} \end{equation*} \end{lemma} \begin{lemma}\label{lem:copy-xnot-z} \begin{equation*} \tikzfig{copy-xnot-z} \end{equation*} \end{lemma} \begin{lemma}\label{lem:copy-z-x} \begin{equation*} \tikzfig{copy-z-x} \end{equation*} \end{lemma} \begin{lemma}\label{lem:copy-znot-x} \begin{equation*} \tikzfig{copy-znot-x} \end{equation*} \end{lemma} \begin{lemma}\label{lem:white-not-cancel} \begin{equation*} \tikzfig{white-not-cancel} \end{equation*} \end{lemma} \begin{lemma}\label{lem:copy-x-h} \begin{equation*} \tikzfig{copy-x-h} \end{equation*} \end{lemma} \begin{lemma}\label{lem:copy-xnot-h} \begin{equation*} \tikzfig{copy-xnot-h} \end{equation*} \end{lemma} \begin{lemma}\label{lem:copy-znot-h} \begin{equation*} \tikzfig{copy-znot-h} \end{equation*} \end{lemma} \begin{lemma}\label{lem:x-z-commute} \begin{equation*} \tikzfig{X-Z-commute} \end{equation*} \end{lemma} \begin{lemma}\label{lem:hopf-rule} \begin{equation*} \tikzfig{hopf-rule} \end{equation*} \end{lemma} \end{multicols} We now prove all the above lemmas. The proofs are all quite basic and straightforward, except that of Lemma~\ref{lem:copy-znot-h} which is the only one to require \OrthoRule. \begin{proof}[Proof of Lemmas \ref{lem:scalarcancelstars} and \ref{lem:scalarcancelzx}] \[\tikzfig{scalar-rule-proof} \qedhere\] \end{proof} \begin{proof}[Proof of Lemmas \ref{lem:scalarcancelxh} and \ref{lem:scalarcancelhh}] \[\tikzfig{HH-scalar-cancel-proof}\qedhere\] \end{proof} \begin{proof}[Proof of Lemma \ref{lem:scalarcancelznot}] \[\tikzfig{scalar-rule-ZNOT-proof}\qedhere\] \end{proof} \begin{proof}[Proof of Lemma \ref{lem:xnot-h-reduce}] \[\tikzfig{xnot-h-reduce-proof} \qedhere\] \end{proof} \begin{proof}[Proof of Lemma \ref{lem:negate-direct}] \[\tikzfig{negate-direct-proof}\qedhere\] \end{proof} \begin{proof}[Proof of Lemma \ref{lem:x-spider}] \[\tikzfig{X-spider-proof} \qedhere\] \end{proof} \begin{proof}[Proof of Lemma \ref{lem:x-special}] \[\tikzfig{X-special-proof} \qedhere\] \end{proof} \begin{proof}[Proof of Lemma \ref{lem:xnots-cancel}] \[\tikzfig{XX-cancel-proof} \qedhere\] \end{proof} \begin{proof}[Proof of Lemma \ref{lem:x-with-xnot}] \[\tikzfig{X-with-XNOT-proof} \qedhere\] \end{proof} \begin{proof}[Proof of Lemma \ref{lem:znots-cancel}] \[\tikzfig{ZZ-cancel-proof} \qedhere\] \end{proof} \begin{proof}[Proof of Lemma \ref{lem:h-z-commute}] \[\tikzfig{H-Z-commute-proof} \qedhere\] \end{proof} \begin{proof}[Proof of Lemma \ref{lem:h-x-commute}] \[\tikzfig{H-X-commute-proof} \qedhere\] \end{proof} \begin{proof}[Proof of Lemma \ref{lem:h-not-commute}] \[\tikzfig{H-NOT-commute-proof} \qedhere \] \end{proof} \begin{proof}[Proof of Lemma \ref{lem:cz-correct}] \[\tikzfig{CZ-correct-proof} \qedhere\] \end{proof} \begin{proof}[Proof of Lemma \ref{lem:not-commute}] \[\tikzfig{NOT-commute-proof} \qedhere \] \end{proof} \begin{proof}[Proof of Lemma \ref{lem:z-commute}] \[\tikzfig{Z-commute-proof} \qedhere \] \end{proof} \begin{proof}[Proof of Lemmas \ref{lem:copy-x-z}, \ref{lem:copy-z-x}, and \ref{lem:copy-x-h}] These are just applications of \StrongCompRule and \HCompRule. \end{proof} \begin{proof}[Proof of Lemma \ref{lem:copy-xnot-z}] \[\tikzfig{copy-xnot-z-proof} \qedhere \] \end{proof} \begin{proof}[Proof of Lemma \ref{lem:copy-znot-x}] \[\tikzfig{copy-znot-x-proof} \qedhere \] \end{proof} \begin{proof}[Proof of Lemma \ref{lem:white-not-cancel}] \[\tikzfig{white-not-cancel-proof} \qedhere\] \end{proof} \begin{proof}[Proof of Lemma \ref{lem:copy-xnot-h}] \[\tikzfig{copy-xnot-h-proof} \qedhere \] \end{proof} \begin{proof}[Proof of Lemma \ref{lem:copy-znot-h}] \[\tikzfig{H-copy-proof-1}\] \[\tikzfig{H-copy-proof-2}\] \[\tikzfig{H-copy-proof-3} \qedhere\] \end{proof} \begin{proof}[Proof of Lemma \ref{lem:x-z-commute}] \[\tikzfig{X-Z-commute-proof} \qedhere\] \end{proof} \begin{proof}[Proof of Lemma \ref{lem:hopf-rule}] \[\tikzfig{hopf-rule-proof} \qedhere\] \end{proof} \subsection{Labelled H-boxes}\label{sec:labelledHboxes} The matrix corresponding to an H-box is filled with $1$'s, except for the bottom right position where there is a $-1$. Calculating the matrix of some other diagrams in the ZH-calculus, we see that their matrix is similar, but instead of a $-1$ in the bottom right corner there is some other number $a\in\mathbb{Z}$. In order to make this connection clearer, we introduce some new notation, where we write an H-box with a label of $a$ inside it, to denote it is equal to a vector of $1$'s with an $a$ in the bottom position. When talking about labelled H-boxes in text, we will often write $H(a)$ to denote we are referring to an H-box labelled by $a$ (leaving the arity implicit, which should be clear from context). We begin with H-boxes of arity 1, corresponding to vectors $\left(\begin{smallmatrix}1\\a\end{smallmatrix}\right)$, which we will define inductively for non-negative integers using a `successor' gadget; H-boxes labelled by negative integers will then be defined from the positive ones using a `negate' gadget. \begin{definition} Let \begin{equation}\label{eq:H-box-minus1} \tikzfig{H-box-minus1} \end{equation} and for any $a\in\mathbb{Z}$ such that $a\geq 0$, define: \begin{equation}\label{eq:succesor} \tikzfig{H-box-successor} \end{equation} \end{definition} The `successor gadget' used in this definition might appear a bit strange. As a matrix, it is in fact equal to the previously considered \emph{triangle generator} in the ZX-calculus~\cite{vilmart2018zxtriangle,ng2017universal}: \ctikzfig{triangle-dfn} This triangle has the following interpretation as a matrix: \[\intf{\tikzfig{triangle-white}} = \ketbra{0}{0}+\ketbra{1}{0} + \ketbra{1}{1}\] We could have introduced the triangle as an additional derived generator. However, as can be seen from its matrix, it is not self-adjoint and hence the orientation of the node matters. To avoid the complications that come from that, we elect not to use the triangle and to simply write out its form as a ZH-diagram as in~\eqref{eq:succesor}. $H(-1)$ is not the only H-box with a particularly simple representation. Indeed, we have \begin{equation}\label{eq:H-box-0} \tikzfig{H-box-0} \end{equation} \begin{equation}\label{eq:H-box-1} \tikzfig{H-box-1} \end{equation} \begin{definition} For any $a\in\mathbb{Z}$ such that $a<-1$, we define the corresponding degree-1 H-box by applying a negate spider: \begin{equation}\label{eq:def-negative-numbers} \tikzfig{H-box-negation} \end{equation} \end{definition} The negation gadget plays nicely with the H-box labels. In particular by Lemma~\ref{lem:znots-cancel}, H-box labels satisfy $-(-a) = a$. We also have consistency with the simpler representations of $H(a)$ for $a\in\{-1,0,1\}$: \ctikzfig{negation-consistency} \begin{definition} Labelled H-boxes of arbitrary arity are defined as: \begin{equation} \label{eq:labelledHboxhigherarity} \tikzfig{labeledHbox-higharity} \end{equation} \end{definition} This is consistent with \HHRule for arity-1 H-boxes of arbitrary label (using Lemma~\ref{lem:scalarcancelstars}). Certain higher-arity H-boxes have relatively simple label-free forms, in particular, for the $-1$-labelled H-box we have: \begin{equation}\label{eq:minus-one-high-arity} \tikzfig{minus-1-high-arity} \end{equation} for the $0$-labelled H-box we have: \begin{equation}\label{eq:zero-high-arity} \tikzfig{zero-high-arity} \end{equation} for the $1$-labelled H-box we have: \begin{equation}\label{eq:unit} \tikzfig{one-high-arity} \end{equation} and for the $2$-labelled H-box we have: \begin{equation}\label{eq:H-box-2-def} \tikzfig{H-box-2} \end{equation} There are some scalar cancellation rules associated to these labelled H-boxes: \begin{multicols}{3} \begin{lemma}\label{lem:scalarcancelxh-gen} For any $a\in \mathbb{Z}$: \ctikzfig{scalarcancelxh-gen} \end{lemma} \null \columnbreak \begin{lemma}\label{lem:scalar-2}~ \ctikzfig{scalar-2} \end{lemma} \null \columnbreak \begin{lemma}\label{lem:scalar-cancel-2}~ \ctikzfig{scalar-cancel-2} \end{lemma} \end{multicols} \begin{proof}[Proof of Lemma~\ref{lem:scalarcancelxh-gen}] First, note that if $a<-1$, we can change the sign: \ctikzfig{scalarcancelxh-gen-neg} It thus suffices to consider $-1\leq a$. For these values, we prove the result by induction, with the base case $a=-1$ being Lemma~\ref{lem:scalarcancelxh}. The inductive step is \[ \tikzfig{scalarcancelxh-gen-ind} \qedhere \] \end{proof} \begin{proof}[Proof of Lemma~\ref{lem:scalar-2}] \[\tikzfig{scalar-2-proof} \qedhere\] \end{proof} \begin{proof}[Proof of Lemma~\ref{lem:scalar-cancel-2}] \[\tikzfig{scalar-cancel-2-proof} \qedhere\] \end{proof} \subsection{!-box notation}\label{sec:bang-boxes} Many of the calculations in the remainder of the paper are greatly simplified by the use of \textit{!-box notation}~\cite{kissinger2014pattern}. A !-box (pronounced `bang box') in a string diagram represents a part of the diagram that is able to fan out arbitrarily. That is, the contents of a !-box, along with any wires into or out of the !-box, can be copied $n$ times for any non-negative integer $n$. For example, the !-box diagram below represents the following family of (concrete) string diagrams, one for each $n$: \[ \tikzfig{bang-box-example} \quad \longleftrightarrow \quad \left\{ \ \ \tikzfig{bang-box-example0}\ \ ,\quad \ \ \tikzfig{bang-box-example1}\ \ ,\quad \ \ \tikzfig{bang-box-example2}\ \ ,\quad \ \ \tikzfig{bang-box-example3}\ \ ,\quad \ \ \ldots\ \ \right\} \] All of the resulting string diagrams are well-defined because all of our generators can have arbitrary arities. We can also use !-boxes in diagram equations, as long as each !-box on the LHS has a corresponding !-box on the RHS, and the inputs/outputs in each !-box match. Such a rule represents a family of equations where each \textit{pair} of corresponding !-boxes is replicated $n$ times, e.g.\ we can re-express \eqref{eq:unit} as: \[ \tikzfig{unit-bangboxed} \quad \longleftrightarrow \quad \left\{ \ \ \tikzfig{unit-bb0}\ \ ,\quad \ \ \tikzfig{unit-bb1}\ \ ,\quad \ \ \tikzfig{unit-bb2}\ \ ,\quad \ \ \ldots\ \ \right\} \] Note the dashed box on the right-hand side of the first equation denotes an empty diagram. With this notation, the definition of grey spiders \eqref{eq:defx} becomes \begin{equation} \label{eq:grey-spider-dfn} \tikzfig{X-spider-dfn-bb} \end{equation} Additionally, the rules \SpiderRule, \HFuseRule, \StrongCompRule, and \HCompRule from Figure~\ref{fig:phasefree-rules} become: \[ \text{(zs)}\quad \tikzfig{Z-spider-rule-bb} \qquad \text{(hs)}\quad \tikzfig{H-spider-rule-bb} \qquad\! \text{(ba$_1$)}\quad \tikzfig{ZX-bialgebra-bb} \qquad \text{(ba$_2$)}\quad \tikzfig{ZH-bialgebra-bb} \] Note that the red dashed NOT-edges defined in \eqref{eq:not-edge-def} behave well when crossing !-box borders. \[ \tikzfig{rededge-bangboxed} \quad \longleftrightarrow \quad \left\{ \ \ \tikzfig{rededge-0}\ \ ,\quad \ \ \tikzfig{rededge-1}\ \ ,\quad \ \ \tikzfig{rededge-2}\ \ ,\quad \ \ \tikzfig{rededge-3}\ \ ,\quad \ \ \ldots\ \ \right\} \] \subsection{Annotated !-boxes}\label{sec:annotated-bb} Many of our diagrams -- in particular the normal forms that will be defined in Section~\ref{sec:normal-form} -- have a repeating structure involving multiple H-boxes with different labels. To write such diagrams more concisely, we introduce a limited extension of the usual !-box notation, which allows !-boxes to be indexed by the elements of a totally ordered finite set. Standard !-boxes allow infinite families of diagrams to be represented in a single diagram, whereas annotated !-boxes simply offer a more concise representation of a single diagram with repeated structure, instantiated once for each element of the finite set. \begin{definition}\label{def:annotated-bb} An annotated !-box is a !-box with a label of the form `$x\in X$', where $X$ is a totally ordered finite set and $x$ is a variable that may appear in labels inside the !-box. In the following, we will use a large box labelled $D_x$ to denote an arbitrary ZH-diagram whose labels may contain the parameter $x$. Given a diagram containing an annotated !-box, we recursively define an equivalent diagram without the annotated !-box. The base case is that of a !-box indexed over the empty set, which is instantiated zero times: \ctikzfig{annotated-bb-base} For the recursive case, $X$ is non-empty. Thus it has a maximum element, which we denote $m:=\max X$. We construct an equivalent diagram where the !-box is indexed by the smaller set $X\setminus\{m\}$. This is done by copying the contents of the !-box (including all external wires) once, immediately to the right of the !-box, and replacing each occurrence of $x$ inside the new subdiagram by $m$: \begin{equation}\label{eq:annotated-bb-recursive} \tikzfig{annotated-bb-recursive} \end{equation} \end{definition} \begin{remark} This definition straightforwardly extends to diagrams containing multiple disjoint annotated !-boxes, which is all we need in this paper. It can also be applied in reverse: any time a diagram contains multiple copies of the same subdiagram, up to changes in labels, these can be combined into an annotated !-box. \end{remark} We will often use bit strings for indexing annotated !-boxes; the motivation for this will become clear with the introduction of normal-form diagrams in Section~\ref{sec:normal-form}. As an example, indexing over the finite set $\mathbb B^2 := \{ 00, 01, 10, 11 \}$, we can write expressions such as: \begin{equation}\label{eq:indexed-ex} \tikzfig{indexed-example} \ \ :=\ \ \ \tikzfig{index-example-rhs} \end{equation} Since annotated !-boxes correspond to unique diagrams (rather than infinite families of diagrams), their appearance in equations is less constrained than that of unlabelled !-boxes. Nevertheless, some care is needed if the annotated !-boxes contain inputs or outputs of the diagram as a whole. The simplest such equations correspond directly to allowed equations for unlabelled !-boxes: they have corresponding annotated !-boxes on the LHS and RHS, which are both indexed by the \textit{same} finite set. Inputs and outputs coming out of a labelled !-box are matched to those of the same index on the other side of the equality. For example: \[ \left(\ \tikzfig{index-example-rule}\ \right) \ \ := \ \ \left( \ \tikzfig{index-example-rule-inst}\ \right) \] If an annotated !-box contains diagram inputs or outputs, it must either have a corresponding annotated !-box on the other side of the equality, or it must match a !-box labelled by a sub- or superset (possibly the empty set) as in \eqref{eq:annotated-bb-recursive}. This is to avoid complications caused by the need to keep the order of external wires consistent. For annotated !-boxes that do not contain any diagram inputs or outputs, such ordering issues do not arise because of the meta rule `only topology matters'. Lemmas~\ref{lem:annotated-expansion} and~\ref{lem:annotated-split} below give examples of diagram equations where the annotated !-boxes on the two sides do not match in the same strict way. Note that we can recover the behaviour of normal, un-labelled !-boxes by interpreting a !-box without a label as being indexed by an \textit{arbitrary} totally ordered finite set, e.g. \[ \tikzfig{Z-spider-rule-bb} \qquad \longleftrightarrow \qquad \tikzfig{Z-spider-rule-bb-index} \quad \textrm{(for any totally ordered finite sets $X$ and $Y$)} \] In particular, this means that any derived rewrite rule involving un-labelled !-boxes can also be applied to annotated !-boxes with arbitrary labels. \begin{lemma}\label{lem:annotated-expansion} An annotated !-box indexed by a bit string can be `expanded' according to one of the bits, as long as it does not contain any inputs or outputs of the diagram as a whole. Here, the box labelled $D_{\vec{b}}$ denotes an arbitrary diagram parameterised by $\vec{b}$. \ctikzfig{annotated-expansion-prime} \end{lemma} \begin{proof} Note that the set $\mathbb B^{n}$ can be split into two pieces, based on whether the most significant bit is $0$ or $1$: \[ \mathbb B^{n} = \{ 0 \vec{c} \ |\ \vec{c} \in \mathbb B^{n-1}\} \uplus \{ 1 \vec{c} \ |\ \vec{c} \in \mathbb B^{n-1} \} \] The result therefore follows by completely expanding the annotated !-boxes on each side according to Definition~\ref{def:annotated-bb} and applying the meta-rule `only topology matters'. \end{proof} \begin{lemma}\label{lem:annotated-split} An annotated !-box containing two disconnected diagram components can be split into two boxes indexed over the same set, as long as the original annotated !-box does not contain any inputs or outputs of the diagram as a whole. Here, $X$ is an arbitrary totally ordered finite set and the boxes labelled $D_x$ and $D_x'$ denote arbitrary diagrams parameterised by $x$. \ctikzfig{annotated-split-prime} \end{lemma} \begin{proof} Note that for any set $X$, \[ \biguplus_{x\in X} \{D_x,D_x'\} = \{D_x\mid x\in X\} \uplus \{D_x'\mid x\in X\} \] The result therefore follows follows from Definition~\ref{def:annotated-bb} and the meta-rule `only topology matters'. \end{proof} The condition about annotated !-boxes in Lemmas~\ref{lem:annotated-expansion} and~\ref{lem:annotated-split} not containing any inputs or outputs of the diagram as a whole is to avoid issues caused by the need to keep the order of external wires consistent. We have drawn the contents of the annotated !-boxes as being connected only to Z-spiders, since this covers all our applications. Nevertheless, the result generalises straightforwardly to X-spiders and H-boxes as well. The following notation will be useful when working with annotated !-boxes. \begin{definition}\label{def:indexing-map} Let $\mathbb B^n$ be the set of all $n$-bitstrings. For any $\vec{b} := b_1\ldots b_n \in \mathbb B^n$, define the \textit{indexing map} $\iota_{\vec{b}}$ as follows: \[ \iota_{\vec{b}} \; = \; \tikzfig{indexing-box} \; = \; \left(\grayphase{\neg}\right)^{1 - b_1} \ldots \left(\grayphase{\neg}\right)^{1 - b_n}, \] where $\left(\grayphase{\neg}\right)^{1} = \grayphase{\neg}$ and $\left(\grayphase{\neg}\right)^{0} = \;\tikzfig{identity}\;$, analogous to how $\begin{pmatrix}0&1\\1&0\end{pmatrix}^0 = \begin{pmatrix}1&0\\0&1\end{pmatrix}$. \end{definition} \begin{lemma}\label{lem:iota-copy} The $\iota_{\vec{b}}$ operator copies through white spiders, i.e.\ for any $\vec{b}\in\mathbb B^n$: \ctikzfig{iota-copy} \end{lemma} \begin{proof} This follows immediately from Lemma~\ref{lem:not-commute} via Definition~\ref{def:indexing-map}. \end{proof} \section{Special cases of multiply, intro and average}\label{sec:avg-intro-mult} Now that we have all the necessary definitions and basic rewrite rules, we can start our proof of completeness. We will proceed as outlined in Section~\ref{sec:overview}, first introducing three families of rewrite rules that use the labelled H-boxes of Section~\ref{sec:labelledHboxes}, which will play an important part in proving completeness. Proving that these rewrite rules hold in full generality is difficult however. In this section we will only prove these rewrite rules for certain small integers. The general proofs are postponed until Section~\ref{sec:completeness}, after we have introduced and studied the normal form diagrams in Section~\ref{sec:normal-forms}. Note that starting from here, we will no longer necessarily write all the lemmas used in a rewrite step in a proof. In particular, we will often suppress uses of Lemma~\ref{lem:scalarcancelstars} and just treat \tikzfig{star} and \tikzfig{dot} as each other's inverses. We will often also let uses of spider fusion \spiderrule be implicit. \subsection{The multiply rule}\label{sec:mult-rule} We claim that the following identity holds in the ZH-calculus for all integers $a$ and $b$: \begin{equation*}\label{eq:mult-def} \tag{$M_{a,b}$}\tikzfig{multiply-rule-phased} \end{equation*} We refer to this as the \emph{multiply} rule. Proving this is quite involved and will be postponed until Section~\ref{sec:arithmetic}. The usefulness of this rule comes from the following generalisation to arbitrary arity. \begin{lemma}\label{prop:multiply-bb} If the ZH-calculus proves $M_{a,b}$, then it also proves the following: \ctikzfig{multiply-rule-bb} \end{lemma} \begin{proof} \ctikzfig{multiply-rule-bb-proof1} \[ \tikzfig{multiply-rule-bb-proof2} \qedhere \] \end{proof} This states that if two labelled H-boxes are connected to exactly the same set of white spiders that we can fuse the H-boxes together by multiplying their labels. For certain small values of $b$ we can easily prove this rule. \begin{lemma}\label{lem:mult-simple-values} Let $b\in \{0,1,-1\}$. Then the ZH-calculus proves \eqref{eq:mult-def} for all $a\in \mathbb{Z}$. \ctikzfig{multiply-rule-bb} \end{lemma} \begin{proof} By Lemma~\ref{prop:multiply-bb} it suffices to show the case where the !-box is expanded a single time. For $b=1$, this follows from \eqref{eq:unit} and \SpiderRule. For $b=-1$, this follows from \ZDef and \eqref{eq:def-negative-numbers}. For $b=0$ we calculate: \[\tikzfig{mult-0-a-proof} \qedhere\] \end{proof} \subsection{The intro rule} \label{s:intro} A second family of rules that is particularly useful is the following: \begin{equation*}\label{eq:intro-def} \tag{$I_a$}\tikzfig{intro-rule} \end{equation*} We refer to this as the \emph{intro} rule as it allows us to introduce new wires to an H-box. As is the case for the multiply rule, this rule has a generalisation to arbitrary arity. But unlike the multiply rule, it will often be useful to apply the rule multiple times in succession to connect it to a larger set of wires. \begin{lemma}\label{lem:intro-bangboxed} If the ZH-calculus proves $I_a$, then it also proves the following !-boxed version\footnote{We use a mixed notation without !-boxes for the inputs and outputs for easier intelligibility, but will occasionally use the more formal notation with further !-boxes where the extra rigour can be achieved without confusing the notation.}. The natural numbers $m$ and $n$ are arbitrary, but note that the annotated !-box depends on $n$. \ctikzfig{intro-rule-bangboxed-prime} \end{lemma} \begin{proof} First, let us prove the following equation: \begin{equation}\label{eq:intro-rule-bb-lemma} \tikzfig{intro-rule-bb-lemma} \end{equation} Now, we prove the claim by induction on $n$, beginning with the base case where $n = 1$. Note that $n = 0$ is valid but trivial. In the derivation below we use a !-box to show that $m$ is arbitrary. \[\tikzfig{intro-rule-bb-proof} \] For the inductive step, assume the lemma holds for $n$ copies of the lower white node. In the calculation below, (*) denotes the induction step. \ctikzfig{intro-rule-bb-inductive-step-1-prime} \ctikzfig{intro-rule-bb-inductive-step-2-prime} \[\tikzfig{intro-rule-bb-inductive-step-3-prime} \qedhere \] \end{proof} \begin{example} When applying the !-boxed intro rule to an H-box and $n$ white spiders, the H-box is copied $2^n$ times, with each copy being connected to the $n$ white spiders via a different combination of NOT edges and plain edges. Every wire originally incident on the H-box is replaced by a white spider, which connects to all the new H-boxes. For example, with $n=2$ and a single wire incident on the original H-box: \ctikzfig{intro-bb-example} This corresponds to the outer product \[ \begin{pmatrix}1\\a\end{pmatrix} \left( \begin{pmatrix}1&1\end{pmatrix} \otimes \begin{pmatrix}1&1\end{pmatrix} \right) = \begin{pmatrix}1\\a\end{pmatrix} \begin{pmatrix}1&1&1&1\end{pmatrix} = \begin{pmatrix}1&1&1&1\\a&a&a&a\end{pmatrix}, \] as will become clear from the normal form definition in Section~\ref{sec:normal-form}. \end{example} We will prove that the intro rule holds for all integers in Section~\ref{sec:completeness}. But first we will directly prove the intro rule for certain simple integers. To do this we will need two lemmas. \begin{multicols}{2} \begin{lemma}\label{lem:intro-half}~ \ctikzfig{intro-half} \end{lemma} \begin{lemma}\label{lem:cancel-two-half}~ \ctikzfig{cancel-two-half} \end{lemma} \end{multicols} \begin{proof}[Proof of Lemma~\ref{lem:intro-half}]Going from right to left: \ctikzfig{intro-half-proof-1} \[\tikzfig{intro-half-proof-2}\qedhere\] \end{proof} \begin{proof}[Proof of Lemma~\ref{lem:cancel-two-half}] We prove the !-box-free version, with the generalisation following the same proof as for Lemma~\ref{prop:multiply-bb}. \[\tikzfig{cancel-two-half-proof} \qedhere\] \end{proof} \begin{lemma}\label{lem:intro-phasefree} Let $a \in \{-1,0,1,2\}$. Then the ZH-calculus proves $I_a$ and its !-boxed generalisation: \begin{equation*} \tikzfig{intro-rule-bangboxed-prime} \end{equation*} \end{lemma} \begin{proof} By Lemma~\ref{lem:intro-bangboxed} it suffices to show Eq.~\eqref{eq:intro-def}. We prove by case distinction on $a\in\{-1,0,1,2\}$: \ctikzfig{intro-minone-proof} \ctikzfig{intro-one-proof} \[\scalebox{0.95}{\tikzfig{intro-zero-proof}}\] \[\tikzfig{intro-2-pf} \qedhere\] \end{proof} \subsection{The average rule} \label{s:average} The final family of rewrite rules we will need is the following. \begin{equation*}\label{eq:average-def} \tag{$A_{a,b}$}\qquad \tikzfig{average-rule-prime} \end{equation*} We call this rule the \emph{average} rule, because on the right-hand side it adds the numbers together, but also multiplies it with a `NOT 2' H-box, that acts like multiplying by a half; cf.~Lemma~\ref{lem:average-true-form}. We will prove that this rule holds for all integers in Section~\ref{sec:completeness}. For now, we will prove it for certain combinations of integers. \begin{lemma}\label{prop:avg-bb} If the ZH-calculus proves \eqref{eq:average-def}, then it also proves the following: \ctikzfig{avg-lemma-prime} \end{lemma} \begin{proof} \[ \tikzfig{avg-lemma-prime-pf} \qedhere\] \end{proof} \begin{lemma}\label{lem:average-with-0} The ZH-calculus proves $(A_{a,0})$ for all $a\in \mathbb{Z}$. \ctikzfig{average-rule-prime-0} \end{lemma} \begin{proof} \[\tikzfig{average-rule-prime-0-proof} \qedhere\] \end{proof} \begin{lemma}\label{lem:avg-neg} Let $a$ be an integer for which we have proven~\eqref{eq:intro-def}. Then we can prove $(A_{a,-a})$. \ctikzfig{avg-neg} \end{lemma} \begin{proof} The first diagram is transformed into the last one as follows: \ctikzfig{average-rule-a-minus-a} The equality to the middle diagram follows by applying Eq.~\eqref{eq:H-box-0} instead of Lemma~\ref{lem:scalar-cancel-2} in the sixth rewrite step. \end{proof} \begin{lemma}\label{lem:avg-equal} Let $a$ be an integer for which we have proven~\eqref{eq:intro-def}. Then we can simplify the left-hand side of $(A_{a,a})$ to the diagram in the middle. If we also have proven $(M_{2,a})$, then we can prove $(A_{a,a})$ outright. \ctikzfig{avg-equal} \end{lemma} \begin{proof}~ \[ \tikzfig{avg-equal-proof} \qedhere \] \end{proof} \begin{lemma}\label{lem:average-phasefree} Let $a,b\in\{0,1,-1\}$. Then the ZH-calculus proves~\eqref{eq:average-def}. \begin{equation*} \tikzfig{average-rule-prime} \end{equation*} \end{lemma} \begin{proof} If one of $a$ and $b$ is $0$ this follows from Lemma~\ref{lem:average-with-0}. The remaining cases satisfy $a,b\in\{1,-1\}$, in particular either $a=b$ or $a=-b$. We have $I_1$ and $I_{-1}$ by Lemma~\ref{lem:intro-phasefree}, and we have $M_{2,1}$ and $M_{2,-1}$ by Lemma~\ref{lem:mult-simple-values}. Hence the result follows from Lemmas~\ref{lem:avg-neg} and~\ref{lem:avg-equal}. \end{proof} \section{Normal forms}\label{sec:normal-forms} Our completeness proof relies on showing that every diagram can be reduced to a certain unique normal form. In this section we will introduce this normal form, and establish a strategy for reducing diagrams to normal form. This strategy requires that we have proven the multiply, intro, and average rule for all integers. Hence, this section results in a completeness proof that is `conditional' on the multiply, intro, and average rule being provable in the ZH-calculus for all integers. \subsection{Normal form diagrams} \label{sec:normal-form} Our normal form diagrams will be `state-like' in that all of the wires will be connected to the top of the diagram. This is because we can convert between states and operators by simply bending inputs to become outputs and vice versa. This transforming of an operator into a state is known as map-state duality or the Choi-Jamio\l{}kowski isomorphism. A key part of this normal form, is that we can easily represent the \emph{Schur product} of two states, i.e.\ the pointwise product, in the ZH-calculus. For states $\psi,\phi$, write $\psi * \phi$ for their Schur product. Then representing states by large white boxes we have the following identity: \ctikzfig{schur} Here the $i$-th output of $\psi$ and $\phi$ is plugged into a \dotmult{white dot} for each $i$. It follows from \SpiderRule that $*$ is associative and commutative, so we can write $k$-fold Schur products $\psi_1 * \psi_2 * \ldots * \psi_k$ without ambiguity. For any finite set $J$ with $|J| = k$, let $\prod_{j\in J} \psi_j$ be the $k$-fold Schur product. As shown in the next lemma, the indexing maps $\iota_{\vec{b}}$ from Definition~\ref{def:indexing-map} interact nicely with the Schur product. \begin{lemma}\label{lem:convolution-iota} The ZH-calculus enables the computation of the Schur product of two maps of the form $\iota_{\vec{b}}\circ H_n(x)$ and $\iota_{\vec{b}}\circ H_n(y)$ for any $\vec{b}\in\mathbb B^n$ and $x,y$ for which we have proven~$(M_{x,y})$: \ctikzfig{convolution-iota} \end{lemma} \begin{proof} Apply Lemma~\ref{lem:iota-copy}, followed by Lemma~\ref{prop:multiply-bb}. \end{proof} \begin{definition} A \emph{normal-form diagram} in the ZH-calculus is a diagram of the form \begin{equation}\label{eq:nf-formula} \tikzfig{star}^k\prod_{\vec{b} \in \mathbb B^n} \big( \iota_{\vec{b}} \circ H_n(a_{\vec{b}}) \big) \end{equation} where $H_n(a_{\vec{b}})$ is the arity-$n$ H-box (considered as a state) labelled by some values $a_{\vec{b}}$ for all $\vec{b}\in \mathbb B^n$, and $k\in \mathbb{N}$. We say the normal-form is \emph{reduced} when either $k=0$ or at least one of the labels $a_{\vec{b}}$ cannot be factored by two (i.e.\ is odd). \end{definition} A normal form diagram can be seen as a collection of $n$ spiders, fanning out to $2^n$ H-boxes, each with a distinct configuration of NOT's corresponding to the $2^n$ bitstrings in $\mathbb B^n$, together with a global scalar. Diagrammatically, normal forms are: \begin{equation} \label{eq:normal-form} \tikzfig{nf-bbox-star}\ \ :=\ \ \tikzfig{nf-picture-star} \end{equation} \begin{remark} Note that the number of outputs in a normal form diagram is the same as the exponent of $\mathbb{B}$ in the !-box annotation. To avoid cluttering the notation too much, we will only rarely make this explicit in diagrams. \end{remark} The additional condition for being reduced ensures uniqueness of normal form diagrams. \begin{theorem}\label{thm:nf-unique} Reduced normal forms are unique. In particular: \begin{equation}\label{eq:nf-concrete} \intf{ \, \tikzfig{star}^k \prod_{\vec{b} \in \mathbb B^n} \big( \iota_{\vec{b}} \circ H_n(a_{\vec{b}}) \big) } = \sum_{\vec{b} \in \mathbb B^n} \frac{1}{2^k}a_{\vec{b}} \ket{\vec{b}}. \end{equation} \end{theorem} \begin{proof} The map $\iota_{\vec b}$ is a permutation that acts on computational basis elements as $\ket{\vec c} \mapsto \ket{\vec c \oplus \vec b \oplus \vec 1}$. In particular, it sends the basis element $\ket{\vec 1}$ to $\ket{\vec b}$. Hence $\iota_{\vec b} \circ H_n(a_{\vec b})$ is a vector with $a_{\vec b}$ in the $\vec b$-th component and $1$ everywhere else. The Schur product of all such vectors indeed gives the RHS of ~\eqref{eq:nf-concrete} up to the global scalar of $\frac{1}{2^k}$ that is added by $\tikzfig{star}^k$. So now suppose two different normal form diagrams have equal interpretation~\eqref{eq:nf-concrete}. We need to show that the diagrams are then equal, which boils down to showing $k=k'$ and $a_{\vec{b}}=a_{\vec{b}}'$ for all $\vec{b}\in \mathbb B^n$. Suppose without loss of generality that $k\leq k'$. Then $a_{\vec{b}} = \frac{2^k}{2^{k'}}a_{\vec{b}}'$. The left-hand side is an integer, and so the right-hand side must be so as well. Hence, if $k<k'$, then all the $a_{\vec{b}}'$ must be divisible by 2, which contradicts the assumption of being a reduced normal form (since in this case $k'\neq 0$). Hence, $k=k'$, and hence $a_{\vec{b}}=a_{\vec{b}}'$ as required. \end{proof} Because reduced normal forms are unique, two diagrams in reduced normal form are equal if and only if the linear maps the diagrams represent are equal, and hence, the calculus is complete if we can bring all diagrams to reduced normal form. Some simple diagrams can readily be brought to reduced normal form: \begin{lemma}\label{lem:H-box-nf} Any H-box can be brought into normal form using the rules of the ZH-calculus. \end{lemma} \begin{proof} The matrix of an H-box $H_n(a)$ has 1's in every entry but the very last one. Hence, to bring an H-box into normal form, we just need to introduce `dummy' 1's for every other matrix entry. We demonstrate the principle using a binary H-box but the argument is analogous for any other arity: \[ \tikzfig{H-nf-example} \qedhere \] \end{proof} \begin{lemma}\label{lem:cup-zeroes}~ \ctikzfig{cup-zeroes} \end{lemma} \begin{proof} \[\tikzfig{cup-zeroes-proof} \qedhere\] \end{proof} \begin{lemma}\label{lem:cup-nf} The diagram of a single cup can be brought into normal form: \[ \tikzfig{cup-nf} \] \end{lemma} \begin{proof} Starting from the normal form we work our way back: \[\tikzfig{normal-cup-john} \qedhere\] \end{proof} Before we continue to the more general procedure to reduce diagrams to normal form we have to prove some results about how to deal with scalars and factors of 2 in normal forms in order to reduce the diagram. \begin{lemma}\label{lem:reduce-two-from-nf} Given integers $a_{\vec{b}}$ for which we have proven $(M_{2, a_{\vec{b}}})$ we can prove: \ctikzfig{reduce-two-from-nf} \end{lemma} \begin{proof} \[\tikzfig{reduce-two-from-nf-pf} \qedhere\] \end{proof} \begin{proposition}\label{prop:normal-to-reduced} Suppose we have proven $(M_{2,a})$ for all integers $a\in\mathbb{Z}$. Given a diagram in normal form, we can bring it to reduced normal form. \end{proposition} \begin{proof} If there are no $\tikzfig{star}$'s present we are done. Furthermore, if any of the $a_{\vec{b}}$ is odd, we are also done. So suppose there is at least one $\tikzfig{star}$ and that all the $a_{\vec{b}}$ are even. Then we can factor them all as $2\frac{a_{\vec{b}}}{2}$ and apply Lemma~\ref{lem:reduce-two-from-nf} in reverse to introduce a scalar $2$ to cancel a star (using Lemma~\ref{lem:scalar-2} and Lemma~\ref{lem:scalarcancelstars}). We keep repeating this procedure until either the $\tikzfig{star}$'s run out, or one of the $a_{\vec{b}}$ becomes odd. \end{proof} \subsection{Conditional reduction to normal form}\label{sec:normal-form-conditional} For bigger diagrams we will show that if a part of the diagram is in normal form, we can `consume' the rest of the diagram generator by generator to write the entire diagram into normal form. This procedure requires that we have proven the multiply, average and intro rule for all integers. Since this is difficult to do, we will first show the general procedure for bringing a diagram to normal form \emph{assuming} we have proven these identities. We will then be able to `bootstrap' the proof for the general multiply, average and intro rule using the simple cases of these rules we have already proven. We need to show that any generator of the ZH-calculus --- H-boxes, Z-spiders, and any type of wiring --- can be brought to normal form, that tensor products of normal forms can be brought to normal form, and that any diagram consisting of a normal form composed with a generator in some way can also be brought to normal form. Once we have this we know that any ZH-diagram can be brought to normal form, and hence, if two ZH-diagrams represent the same linear map they must then be equal to the same ZH-diagram. \begin{proposition}\label{prop:extension} Let $D$ be a diagram consisting of a normal form diagram for which we have proven~\eqref{eq:intro-def}for every $a$ in the normal form, juxtaposed with \dotunit{white dot}. Then $D$ can be brought into normal form using the rules of the ZH-calculus: \ctikzfig{extension} \end{proposition} \begin{proof} Starting from the left-hand side, which we expand using the indexed !-box notation, we calculate: \ctikzfig{extension-proof} The last diagram is a normal form diagram with $n+1$ outputs, i.e.\ the desired result. \end{proof} \begin{proposition}\label{prop:convolution} The Schur product of two normal form diagrams can be brought into normal form using the rules of the ZH-calculus, when we have proven~\eqref{eq:mult-def} for every pair $a$ and $b$ occurring in the normal forms. \ctikzfig{convolution-nf} \end{proposition} \begin{proof} This follows from \SpiderRule and Lemma~\ref{lem:convolution-iota}. \end{proof} \begin{corollary}\label{cor:tensor-product} The tensor product of two normal form diagrams can be brought into normal form using the rules of the ZH-calculus, when we have proven~\eqref{eq:intro-def} for all $a$ occurring in the normal forms, and~\eqref{eq:mult-def} for all $a$ and $b$ in the normal forms. \end{corollary} \begin{proof} A tensor product can be expressed as \ctikzfig{tensor-product} The diagram NF$_1$ and the leftmost $m$ copies of \dotunit{white dot}\ can be combined into one normal-form diagram with $(n+m)$ outputs by successive applications of Proposition~\ref{prop:extension}. Similarly, the rightmost $n$ copies of \dotunit{white dot}\ and NF$_2$ can be combined into one normal-form diagram with $(n+m)$ outputs. The desired result then follows by Proposition~\ref{prop:convolution}. \end{proof} The most difficult step is to show that contraction with a white dot preserves normal forms. This requires a couple of lemmas. First, we present a more general form of the \OrthoRule{} rule that allows us to disconnect wires in a more general way. \begin{lemma}\label{lem:splitting} Instead of just wires, white spiders between an arbitrary number of H-boxes can be split using \OrthoRule: \ctikzfig{splitting-rule} \end{lemma} \begin{proof}~ \ctikzfig{splitting-proof-1} \ctikzfig{splitting-proof-2} \ctikzfig{splitting-proof-3} \[\tikzfig{splitting-proof-4} \qedhere\] \end{proof} In practice, Lemma~\ref{lem:splitting} is usually applied in diagrams that are close to normal form, so that the H-boxes are connected to multiple white spiders. This allows the lemma to be applied multiple times in succession. \begin{example} Consider the following derivation, which will arise in the proof of Proposition~\ref{prop:average-integer}. For each rewrite step, the parts of the diagram that are uninvolved have been greyed out to clarify the process. In the first application of Lemma~\ref{lem:splitting}, the top leftmost spider is the important one. The rewrite step splits the bottom spider to separate the first H-box from the others, as the first H-box is the only one that is connected to the leftmost spider via an X node: \ctikzfig{splitting-ex1} Now, applying the lemma to the middle spider would have no effect as all H-boxes are connected to the middle spider via the same kind of wire (namely, simple ones). Thus, it remains to apply Lemma~\ref{lem:splitting} according to the top rightmost spider. This step splits the bottom spider to separate the second H-box from the others because the second H-box is connected to the rightmost spider via an X node while the others are connected by simple wires: \ctikzfig{splitting-ex2} \end{example} \begin{lemma}\label{lem:big-disconnect} Disconnect lemma:\footnote{A similar but incorrectly scaled result is given as Lemma~B.3 in \cite{backens2018zhcalculus}. Since any application of the lemma in that paper is always combined with an also incorrectly-scaled application of the average rule, the scalars work out there overall anyway. We give the correct scalar factors here.} \begin{equation}\label{eq:contraction-sep} \tikzfig{contraction-sep} \quad\ =\ \tikzfig{contraction-sep-rhs} \end{equation} \end{lemma} \begin{proof} If $n=1$, the !-boxes on both sides are indexed over the one-element set, so the result follows straightforwardly from the definition of annotated !-boxes in Section~\ref{sec:annotated-bb} and removal of the white spider via \IDRule. Note that there are zero stars in this case. For $n>1$, by Lemma~\ref{lem:annotated-expansion}, we can equivalently express the LHS as \[ \scalebox{0.83}{\tikzfig{contraction-msb-index1}} \ \namedeq{\eqref{eq:labelledHboxhigherarity}} \scalebox{0.83}{\tikzfig{contraction-msb-index2}} \] We can then apply Lemma~\ref{lem:splitting} to split the bottom spider in two: \[ \cdots \ \ \namedeq{\ref{lem:splitting}}\ \scalebox{0.8}{\tikzfig{contraction-msb-index3}} \ \namedeq{\eqref{eq:labelledHboxhigherarity}}\ \scalebox{0.8}{\tikzfig{contraction-msb-index3p}} \] By Lemma~\ref{lem:annotated-split}, we can then split the !-box into two parts, indexed over the same set to obtain: \[ \scalebox{0.9}{\tikzfig{contraction-msb-index4}} \] We now have two copies of a graph which is very similar to the LHS of \eqref{eq:contraction-sep}, but for one fewer bit. We can thus repeat the process above to split each of the two spiders using the second bit of the bitstring, then the third, and so on, until $\dotonly{white dot}\xspace$-spiders only connect pairs of $H$-spiders that disagree on the least significant bit. Each application of Lemma~\ref{lem:splitting} introduces a star, so we need to count how many times that lemma is used in total. For the first bit of $\mathbb{B}^{n-1}$, the lemma is applied once. Afterwards, the diagram has separated into two indexed !-boxes and Lemma~\ref{lem:splitting} needs to be applied to each part. Continuing in this way, the number of !-boxes which need to be split doubles in each step. Hence we introduce $1+2+\ldots+2^{n-2} = 2^{n-1}-1$ stars. Note that at the end of this process, each !-box will be indexed over a one-element set, so according to the definition of annotated !-boxes we can simply drop them. By replacing 2-legged $\dotonly{white dot}\xspace$-spiders with cups using \IDRule{}, and applying the definition of annotated !-boxes to re-introduce indexing over $\mathbb{B}^{n-1}$, we obtain the RHS of \eqref{eq:contraction-sep}. \end{proof} \begin{lemma}\label{lem:nf-avg} For any $a_{\vec{b}}$ we have \ctikzfig{nf-avg-lemma} \end{lemma} \begin{proof} First, we introduce a scalar $H(2)$ and get this in the !-box using Lemma~\ref{lem:intro-bangboxed} (the !-boxed form of the Intro rule): \[\scalebox{0.9}{\tikzfig{nf-avg-lemma-proof-1}}\] And then we prepare the diagram to apply Lemma~\ref{lem:cancel-two-half}: \[\tikzfig{nf-avg-lemma-proof-2}\] Note that, in the last step, \eqref{eq:labelledHboxhigherarity} introduces a scalar white dot in each !-box, resulting in $2^n$ white dots, one of which is cancelled by the $\tikzfig{star}$ outside the !-box. \end{proof} \begin{proposition}\label{prop:contraction} The diagram resulting from applying \dotcounit{white dot}\ to an output of a normal form diagram can be brought into normal form, when we have proven~\eqref{eq:average-def} for all $a$ and $b$ in the normal form: \ctikzfig{whitecounit-nf} \end{proposition} \begin{proof} Starting from an arbitrary normal form, with a \dotcounit{white dot} plugged into the right most output, we first expand the annotated !-box: \[ \tikzfig{contraction-thm-pf-prime} \] Now the diagram is ready for application of Lemma~\ref{lem:big-disconnect}, followed by the average rule: \[\scalebox{0.9}{\tikzfig{contraction-thm-pf2-prime}}\] This diagram can now be brought to normal form by application of Lemma~\ref{lem:nf-avg}. Note the stars exactly cancel the white dots introduced by that lemma. \end{proof} Our strategy will now be to show that any diagram can be decomposed into H-boxes, combined via the operations of extension, convolution, and contraction. This will give our completeness proof. To simplify the decomposition of diagrams into H-boxes, we prove a few corollaries. \begin{corollary}\label{cor:whitemult-nf} The diagram resulting from applying \dotmult{white dot}\ to a pair of outputs of a normal form diagram can be brought into normal form, when we have proven~\eqref{eq:intro-def} and~\eqref{eq:average-def} for all $a$ and $b$ in the normal form. \begin{equation}\label{eq:whitemult-nf} \tikzfig{whitemult-nf} \end{equation} \end{corollary} \begin{proof} Applying a \dotmult{white dot}\ to a pair of outputs has the same result as convolving with a cup, then contracting one of the outputs. That is, we can decompose \eqref{eq:whitemult-nf} as follows: \ctikzfig{whitemult-decomp} then apply Lemma~\ref{lem:cup-nf} and Propositions \ref{prop:extension}, \ref{prop:convolution}, and \ref{prop:contraction}. \end{proof} \begin{corollary}\label{cor:cap-nf} A diagram consisting of a cap applied to a normal form diagram can be transformed into a normal form diagram, when we have proven~\eqref{eq:intro-def}, \eqref{eq:average-def} and $(A_{a', b'})$ for all $a$ and $b$ in the normal form and for every $a'$ and $b'$ that are sums of labels in the normal form: \ctikzfig{cap-nf} \end{corollary} \begin{proof} Since the cap can be decomposed as $\dotcounit{white dot} \circ \dotmult{white dot}$, the result follows immediately from Corollary~\ref{cor:whitemult-nf} and Proposition~\ref{prop:contraction}. \end{proof} Thanks to Corollaries~\ref{cor:tensor-product} and \ref{cor:cap-nf}, we are able to turn any diagram of normal forms into a normal form. It only remains to show that the generators of the ZH-calculus can themselves be made into normal forms. We have already shown the result for H-boxes (Lemma~\ref{lem:H-box-nf}), so the following will be sufficient: \begin{lemma}\label{lem:Z-spider-nf} Any Z-spider can be brought into normal form using the rules of the ZH-calculus. \end{lemma} \begin{proof} As shown in Eq.~\eqref{eq:H-box-1}, \dotunit{white dot}{} is already a labelled H-box and thus is in normal form. By \spiderrule, $\dotonly{white dot}\xspace = \tikzfig{dot-nf}$, and hence it can be brought into normal form using Corollaries~\ref{cor:tensor-product} and \ref{cor:cap-nf}. This covers the cases of Z-spiders with 0 or 1 incident wires. We can decompose any Z-spider with $n\geq 2$ incident wires as a tensor product of $(n-1)$ cups, with each cup \dotmult{white dot}-ed with its neighbours: \ctikzfig{n-ary-Z-decomposition} If $n=2$, no \dotmult{white dot} are needed and the equality is by \IDRule instead of \SpiderRule. In either case, the diagram can be brought into normal form by applying Lemma~\ref{lem:cup-nf} and Corollaries~\ref{cor:tensor-product} and \ref{cor:whitemult-nf}. The intermediate normal forms only involve H-boxes labelled by a $0$ and $1$ and hence we have already proven the necessary cases of multiply, intro and average to prove this. \end{proof} \begin{proposition}\label{prop:completeness-conditional} If the ZH-calculus proves \eqref{eq:mult-def}, \eqref{eq:intro-def}, and \eqref{eq:average-def} for all integers $a$ and $b$, then the ZH-calculus is complete, i.e.\ for any two ZH-diagrams $D_1$ and $D_2$, if $\llbracket D_1 \rrbracket = \llbracket D_2 \rrbracket$ then $D_1$ is transformable into $D_2$ using the rules of the ZH-calculus. \end{proposition} \begin{proof} By Theorem~\ref{thm:nf-unique}, it suffices to show that any ZH diagram can be brought into reduced normal form. Lemmas~\ref{lem:H-box-nf} and~\ref{lem:Z-spider-nf} suffice to turn any generator into normal form. Then using Corollary~\ref{cor:tensor-product} we can turn any tensor product of generators into a normal form and with Corollary~\ref{cor:cap-nf} we can then apply any sort of wiring and reduce it to normal form. Finally, Proposition~\ref{prop:normal-to-reduced} allows us to bring the normal form to reduced normal form. \end{proof} To prove completeness, it now remains to prove \eqref{eq:mult-def}, \eqref{eq:intro-def}, and \eqref{eq:average-def} for all integers $a$ and $b$. To assist in this, let us prove a few lemmas that will help reduce diagrams to normal form when we have these rules for certain values of $a$ and $b$. As we already have many individual cases of those rules --- see Lemma~\ref{lem:mult-simple-values} for~\eqref{eq:mult-def} and Lemma~\ref{lem:intro-phasefree} for~\eqref{eq:intro-def}, as well as Section~\ref{s:average} for~\eqref{eq:average-def} --- these lemmas can already be applied to many diagrams. \begin{lemma}\label{lem:Hadamard-nf} Suppose we have $(I_{a_{b\vec{c}}})$ for all $b\in\mathbb{B}$, $\vec{c}\in\mathbb{B}^{n-1}$. Then we can consume a Hadamard gate into the normal form: \ctikzfig{hadamard-lemma-prime} If we also have $A_{a_{0\vec{c}},a_{1\vec{c}}}$ and $A_{a_{0\vec{c}},-a_{1\vec{c}}}$ for all $\vec{c}\in\mathbb{B}^{n-1}$, then in fact: \ctikzfig{hadamard-lemma} \end{lemma} \begin{proof} We start by manipulating the !-boxes and layout, and then perform the rewriting: \ctikzfig{hadamard-lemma-proof1} \ctikzfig{hadamard-lemma-proof2} \ctikzfig{hadamard-lemma-proof3} \[\tikzfig{hadamard-lemma-proof4} \qedhere\] \end{proof} The following lemma allows us to (dis)connect integer-labelled H-boxes that are connected to a superset of a given $0$-labelled H-box. \begin{lemma}\label{lem:AND-1} For any integer $a$, \ctikzfig{AND-lemma-1-ext} \end{lemma} \begin{proof}~ \[\scalebox{0.9}{\tikzfig{AND-lemma-1-ext-proof}} \qedhere\] \end{proof} \begin{lemma}\label{lem:zero-box-nf} Suppose we have proven $(I_{a_{b\vec{c}}})$ for all $b\in\mathbb{B}$, $\vec{c}\in\mathbb{B}^{n-1}$ and $(A_{a_{0\vec{c}},a_{1\vec{c}}})$ for all $\vec{c}\in\mathbb{B}^{n-1}$, then we can consume an $H(0)$ into the normal form: \ctikzfig{zero-box-nf2} \end{lemma} \begin{proof} The proof is analogous to that of Lemma~\ref{lem:Hadamard-nf}, using Lemma~\ref{lem:AND-1} instead of $(M_{-1,a_{1\vec{c}}})$. \end{proof} \section{Arithmetic}\label{sec:arithmetic} In this section we show we can do simple arithmetic with labelled H-boxes. Namely we will show first that the following equation holds for all integers $a$ and $b$. \begin{equation}\label{eq:addition-gadget} \tikzfig{H-box-addition} \end{equation} Hence, we can add H-box labels together. This will be crucial for when we will prove the general average rule in Section~\ref{sec:completeness}. Secondly, we will prove the multiply rule for all integers. \subsection{Proving addition for natural numbers} First we will prove \eqref{eq:addition-gadget} for natural numbers, which requires showing that \eqref{eq:addition-gadget} acts as expected when $b=1$, and that furthermore the `addition gadget' of \eqref{eq:addition-gadget} is associative. For this we need a couple of lemmas. The next lemma, while looking deceptively simple, has a quite involved proof, requiring repeated use of Lemma~\ref{lem:splitting}. It was found by translating both sides to normal form, and then simplifying the intermediate steps. \begin{lemma}\label{lem:dedup}~ \ctikzfig{had-Z-cancel} \end{lemma} \begin{proof} \[\tikzfig{dedup-proof-1}\] \[\tikzfig{dedup-proof-2}\] \[\tikzfig{dedup-proof-3}\] \[\tikzfig{dedup-proof-4}\] \[\tikzfig{dedup-proof-5} \qedhere\] \end{proof} \begin{multicols}{3} \begin{lemma}\label{lem:zero-double-connection}~ \ctikzfig{zero-double-connection} \end{lemma} \begin{lemma}\label{lem:zero-gray-push}~ \ctikzfig{zero-gray-push} \end{lemma} \begin{lemma}\label{lem:zero-triangle-disconnect}~ \ctikzfig{zero-triangle-disconnect} \end{lemma} \end{multicols} \begin{proof}[Proof of Lemma~\ref{lem:zero-double-connection}] \[\tikzfig{zero-double-connection-pf} \qedhere\] \end{proof} \begin{proof}[Proof of Lemma~\ref{lem:zero-gray-push}] \[\scalebox{0.9}{\tikzfig{zero-gray-push-proof}}\] \end{proof} \begin{proof}[Proof of Lemma~\ref{lem:zero-triangle-disconnect}] \[\tikzfig{zero-triangle-disconnect-proof} \qedhere\] \end{proof} \begin{lemma}\label{lem:addition-1-is-successor} Adding 1 is the same as applying the successor. \ctikzfig{addition-1-is-successor} \end{lemma} \begin{proof} \[\tikzfig{addition-1-is-successor-pf} \qedhere\] \end{proof} \begin{lemma}\label{lem:addition-associative} Addition is associative. \ctikzfig{addition-associative} \end{lemma} \begin{proof} We can reduce the LHS to a diagram that is symmetric in the three inputs: \[\scalebox{0.9}{\tikzfig{addition-associative-pf}}\] By symmetry we can also bring the RHS to this last diagram, and hence the LHS and RHS are equal. \end{proof} \begin{lemma}\label{lem:addition-natural} Let $a,b\in \mathbb{N}$. Then the following holds: \ctikzfig{H-box-addition} \end{lemma} \begin{proof} We prove this by induction on $b$. For any $a$, the case $b=0$ is straightforward: \ctikzfig{H-box-add-0} Furthermore, for any $a$, Lemma~\ref{lem:addition-1-is-successor} shows the case $b=1$. Now, suppose there is some $b$ such that the desired result holds for any $a$, and consider $b+1$. Then \ctikzfig{H-box-add-proof} where the step marked (*) is by `only topology matters', and the step marked (**) is the inductive hypothesis. Thus, the result follows by commutativity of addition. \end{proof} \subsection{Proving addition for all integers} To generalise addition to arbitrary integers, we need two more lemmas. \begin{lemma}\label{lem:triangle-Z}~ \ctikzfig{triangle-Z} \end{lemma} \begin{proof}~ \ctikzfig{triangle-Z-proof-1} \[\tikzfig{triangle-Z-proof-2}\qedhere\] \end{proof} \begin{lemma}\label{lem:triangle-inverse}The successor has an inverse. \ctikzfig{triangle-inverse} \end{lemma} \begin{proof}~ \ctikzfig{triangle-inverse-proof-1} \ctikzfig{triangle-inverse-proof-2} \ctikzfig{triangle-inverse-proof-3} \[\tikzfig{triangle-inverse-proof-4} \qedhere \] \end{proof} This lemma actually only shows that the successor operation has a one-sided inverse. But by conjugating the top and bottom using a negate spider, we see that this is in fact a two-sided inverse. \begin{proposition}\label{prop:addition} Let $a,b\in \mathbb{Z}$. Then the following holds: \ctikzfig{H-box-addition} \end{proposition} \begin{proof} If $a,b\geq 0$ this is just Lemma~\ref{lem:addition-natural}. If both $a,b\leq 0$ we simply do: \ctikzfig{H-box-addition-negative} Now suppose $a>0$ and $b\leq 0$, i.e.\ $-b\geq 0$. We prove by induction on $-b\in\mathbb{N}$. If $-b=0$ this is trivial. Suppose it holds for $-b$. In the calculation below we denote the induction step by (*): \ctikzfig{H-box-addition-negative2} \end{proof} \subsection{Proving multiplication} In this section we will show that the multiply rule introduced in Section~\ref{sec:mult-rule} holds for all integers: \begin{equation}\label{eq:mult-gadget} \tikzfig{multiply-rule-phased} \end{equation} It is in any case clear that this operation is commutative, associative and the unit is the $1$-labelled H-box. To prove that this indeed acts as multiplication it then suffices to prove it distributes over addition, which requires a bit of set-up to prove. \begin{multicols}{3} \begin{lemma}\label{lem:zero-projector}~ \ctikzfig{zero-projector} \end{lemma} \begin{lemma}\label{lem:zero-becomes-two}~ \ctikzfig{zero-becomes-two} \end{lemma} \begin{lemma}\label{lem:zero-push-addition}~ \ctikzfig{zero-push-addition} \end{lemma} \end{multicols} \begin{proof}[Proof of Lemma~\ref{lem:zero-projector}] Starting from the RHS, we apply \introzero to the bottom $H(0)$-box. The newly-introduced $H(0)$-boxes can then be removed using Lemma~\ref{lem:AND-1}: first with the top binary $H(0)$-box, then with the one on the right, and finally with the one on the left. \ctikzfig{zero-projector-proof} After replacing the red dashed lines with NOT gates and applying \idrule, this is equal to the LHS. \end{proof} \begin{proof}[Proof of Lemma~\ref{lem:zero-becomes-two}] First: \ctikzfig{zero-becomes-two-new-pf1} Note that -- up to one bent wire and the introduction of trivial $H(1)$-boxes -- the resulting diagram consists of an $H(0)$-box on the left output, applied to a normal form diagram. The H-box parameters in the normal-form diagram are \[ a_{b\vec{c}} = \begin{cases}0&\text{if } b=0,\,\vec{c}=11 \text{ or } b=1,\,\vec{c}=10 \\ 1 &\text{otherwise.}\end{cases} \] By Lemma~\ref{lem:intro-phasefree}, we have both $I_0$ and $I_1$, and by Lemma~\ref{lem:average-phasefree}, we have $A_{x,y}$ for any $x,y\in\{0,1\}$. Thus we may apply Lemma~\ref{lem:zero-box-nf} (where we have left out the bent wire for simplicity): \ctikzfig{zero-becomes-two-new-pf2} For the step denoted (*), note that \[ \begin{pmatrix}1&1\\1&0\end{pmatrix} \begin{pmatrix}1&1&1&0\\1&1&0&1\end{pmatrix} = \begin{pmatrix}2&2&1&1\\1&1&1&0\end{pmatrix} \] so \[ a_{0\vec{c}}+(1-b)a_{1\vec{c}} = \begin{cases} 2 &\text{if } b=0 \text{ and } \vec{c}\in\{00,01\} \\ 0 &\text{if } b=1, \vec{c}=11 \\ 1 &\text{otherwise,}\end{cases} \] and that $H(1)$-boxes can be left out of the diagram by \eqref{eq:unit} and Lemma~\ref{lem:white-not-cancel}. Hence, re-introducing the bent wire, we find: \[ \tikzfig{zero-becomes-two-new-pf3} \qedhere \] \end{proof} \begin{proof}[Proof of Lemma~\ref{lem:zero-push-addition}] \[\tikzfig{zero-push-addition-pf-1} \qedhere\] \end{proof} \begin{lemma}\label{lem:n-copy} For any natural number $n$: \ctikzfig{n-copy-lemma} \end{lemma} \begin{proof} For $n=0$ this is straightforward. We prove by induction, so assume it holds for some $n$. In the calculation below we denote the induction step by (*). \[\scalebox{0.95}{\tikzfig{n-copy-lemma-proof}}\] \end{proof} \begin{lemma}\label{lem:distributivity-natural} The multiplication gadget distributes over addition, i.e.\ for any natural numbers $n\in \mathbb{N}$: \ctikzfig{mult-distributes-addition} \end{lemma} \begin{proof} \[\tikzfig{mult-distributes-addition-proof}\qedhere\] \end{proof} \begin{proposition}\label{prop:mult-rule-rational} The multiply rule holds for any integers $a$ and $b$: \ctikzfig{multiply-rule-bb} \end{proposition} \begin{proof} By Lemma~\ref{prop:multiply-bb} it suffices to show the non--!-boxed version, i.e.\ \eqref{eq:mult-def}. First we prove \eqref{eq:mult-def} for $a,b\in \mathbb{N}$. The proof is by induction on $b$ for any $a\in\mathbb{N}$. The base cases of $b=0$ and $b=1$ have already been shown in Lemma~\ref{lem:mult-simple-values}. For the induction step, assume there exists some $b$ such that \eqref{eq:mult-def} holds for any $a$. Then: \ctikzfig{mult-rule-inductive} where the step labelled (*) uses the base case $b=1$ and the inductive hypothesis. Now for negative numbers, we have: \ctikzfig{mult-rule-neg} and similarly for $b$, so the multiplication reduces to that of non-negative numbers. If there are two copies of \whitephase{\neg}, they cancel by Lemma~\ref{lem:znots-cancel}. \end{proof} \section{Completeness}\label{sec:completeness} We write $\text{ZH}\xspace \vdash D_1 = D_2$ when $D_1$ and $D_2$ can be proven to be equal using the rules of the ZH-calculus. The ZH-calculus is \emph{complete} when $\intf{D_1}=\intf{D_2}$ implies that ${\text{ZH}\xspace \vdash D_1=D_2}$. In Proposition~\ref{prop:completeness-conditional}, a `conditional' completeness result was proven, that shows that if the ZH-calculus can prove the average, intro and multiply rule for all integers, then the ZH-calculus is complete. Proposition~\ref{prop:mult-rule-rational} shows that the multiply rule holds for all integers, so that it remains to prove the intro and average rule. That is what we will do in this section. First, we will establish the average rule. \begin{lemma}\label{lem:grey-dot-nf} The ternary X-spider can be brought to normal form. \end{lemma} \begin{proof} \begin{equation*} \tikzfig{grey-dot-nf-proof} \end{equation*} where there are three applications of Lemma~\ref{lem:splitting} in the second-to-last step to break up the white spider. This last diagram is in normal form if we introduce the necessary trivial $H(1)$ boxes. \end{proof} \begin{proposition}\label{prop:average-integer} The ZH-calculus proves \eqref{eq:average-def} for all integers $a$ and $b$. \end{proposition} \begin{proof} It suffices to show that: \begin{equation}\label{eq:avg-as-addition} \tikzfig{avg-as-addition} \end{equation} As then \ctikzfig{avg-as-addition-2} as required. We show this by bringing both sides of Eq.~\eqref{eq:avg-as-addition} to normal form. For the RHS of \eqref{eq:avg-as-addition}, first recall that we can bring the X-spider to normal form using Lemma~\ref{lem:grey-dot-nf}, and hence: \ctikzfig{avg-proof-rhs} For the LHS of \eqref{eq:avg-as-addition}, bending the wires up for simplicity, we first have \ctikzfig{avg-proof-lhs1-prime} Ignoring the white dot, this is a normal form diagram with $a_{011}=a_{110}=0$, $a_{111}=-1$ and $a_{\vec{b}}=1$ otherwise. Lemma~\ref{lem:average-phasefree} proves $(A_{a,b})$ for all combinations of those values, thus we can apply the second part of Lemma~\ref{lem:Hadamard-nf} to show \ctikzfig{avg-proof-lhs2-prime} Up to a partial transpose (for easier legibility of the matrices), this corresponds to \[ \begin{pmatrix}1&1\\1&-1\end{pmatrix} \begin{pmatrix}1&1&1&0\\1&1&0&-1\end{pmatrix} = \begin{pmatrix}1+1&1+1&1+0&0-1\\1-1&1-1&1+0&0+1\end{pmatrix} = \begin{pmatrix}2&2&1&-1\\0&0&1&1\end{pmatrix}. \] Again, this is a normal form diagram, this time with $a_{000}=a_{001}=2$, $a_{011}=-1$, $a_{100}=a_{101}=0$ and $a_{010}=a_{110}=a_{111}=1$. Now we apply the first part of Lemma~\ref{lem:Hadamard-nf} (note the changes due to the H-box being on the third output): \ctikzfig{avg-proof-lhs3-prime} We have $a_{\vec{c}0} = \pm a_{\vec{c}1}$ for all $\vec{c}\in\mathbb B^2$, so the two H-boxes in each copy of the !-box contain either equal or opposite integers. By Lemma~\ref{lem:intro-phasefree} we have $(I_{a})$ for all $a\in\{-1,0,1,2\}$, so we can use Lemma~\ref{lem:avg-neg}. Thus, \ctikzfig{avg-proof-lhs4-prime} Up to a partial transpose, this sequence of rewriting steps corresponds to \[ \begin{pmatrix}2&2\\1&-1\\0&0\\1&1\end{pmatrix} \begin{pmatrix}1&1\\1&-1\end{pmatrix} = \begin{pmatrix}2+2&2-2\\1-1&1+1\\0&0\\1+1&1-1\end{pmatrix} = 2 \begin{pmatrix}2&0\\0&1\\0&0\\1&0\end{pmatrix} \] Combining everything and bending back the legs, we find \ctikzfig{avg-proof-lhs5-prime} This is the same as the RHS. \end{proof} It now remains to prove~\eqref{eq:intro-def} for all integers $a$. We can reduce this problem to proving it for natural numbers using the following lemma. \begin{lemma}\label{lem:intro-mult} Suppose we have proven $(I_a)$ and $(I_b)$ for some integers $a$ and $b$. Then we can also prove $(I_{a\cdot b})$. In particular, if we have $(I_a)$, then we also get $(I_{-a})$. \end{lemma} \begin{proof} We bend all the wires up for a more easy presentation. The proof is then straightforward: \ctikzfig{intro-for-mult-pf} Because we have $(I_{-1})$ (Lemma~\ref{lem:intro-phasefree}), we get $(I_{-a})$ if we have $(I_a)$. \end{proof} \begin{proposition}\label{prop:intro-rational} The ZH-calculus proves \eqref{eq:intro-def} for all integers $a$. \end{proposition} \begin{proof} By the previous lemma it suffices to prove $(I_n)$ for all natural numbers. Note that we have \introzero and \introone so that by induction it suffices to show that we can get $(I_{n+1})$ out of $(I_n)$. We will show that: \begin{equation}\label{eq:intro-induction} \tikzfig{intro-induction-step} \end{equation} This is sufficient because then: \ctikzfig{intro-induction-step-2} We prove Eq.~\eqref{eq:intro-induction} by reducing both sides of the equation to normal form, beginning with the part that is the same on both sides. To start, we bring the top part of the diagram into normal form and then apply the first part of Lemma~\ref{lem:Hadamard-nf} to the H-box on the bottom left wire. \ctikzfig{intro-ind-nf-proof1} For the final step, note that the coefficients $a_{\vec{c}bd}$ of the normal-form diagram are $\pm 1$, indeed $a_{0101}=a_{0111}=a_{1101}=a_{1111}=-1$ and $a_{\vec{c}bd}=1$ otherwise. We have $(I_1)$ and $(I_{-1})$ by Lemma~\ref{lem:intro-phasefree}. Thus we can use the first equalities from Lemmas~\ref{lem:avg-neg} and~\ref{lem:avg-equal}. This transformation corresponds to \[ \begin{pmatrix}1&1&1&1\\1&1&-1&-1\\1&1&1&1\\1&-1&1&-1\end{pmatrix} \begin{pmatrix}1&0&1&0\\0&1&0&1\\1&0&-1&0\\0&1&0&-1\end{pmatrix} = \begin{pmatrix}1+1&1+1&1-1&1-1\\1-1&1-1&1+1&1+1\\1+1&1+1&1-1&1-1\\1+1&-1-1&1-1&-1+1\end{pmatrix} = 2\begin{pmatrix}1&1&0&0\\0&0&1&1\\1&1&0&0\\1&-1&0&0\end{pmatrix} \] Let $z_{\vec{c}bd}:=\frac{1}{2}\left(a_{\vec{c}0d}+(-1)^b a_{\vec{c}1d}\right)$, then \[ z_{\vec{c}bd} = \begin{cases} -1 & \text{if } \vec{c}bd=1101 \\ 0 & \text{if } (\vec{c}\in\{00,10,11\}\wedge b=1) \vee (\vec{c}=01\wedge b=0) \\ 1 &\text{otherwise.} \end{cases} \] For each $\vec{c}$ and $b$, we have $z_{\vec{c}b0} = \pm z_{\vec{c}b1}$, and by Lemma~\ref{lem:intro-phasefree} we have $(I_0)$, $(I_1)$ and $(I_{-1})$. Hence we can again apply the first part of Lemma~\ref{lem:Hadamard-nf}, followed by the first equalities of Lemmas~\ref{lem:avg-neg} and \ref{lem:avg-equal}. \ctikzfig{intro-ind-nf-proof2} This step corresponds to: \[ 2 \begin{pmatrix}1&1&0&0\\0&0&1&1\\1&1&0&0\\1&-1&0&0\end{pmatrix} \begin{pmatrix}1&1&0&0\\1&-1&0&0\\0&0&1&1\\0&0&1&-1\end{pmatrix} = 2 \begin{pmatrix}1+1&1-1&0+0&0+0\\0+0&0+0&1+1&1-1\\1+1&1-1&0+0&0+0\\1-1&1+1&0+0&0+0\end{pmatrix} = 4 \begin{pmatrix}1&0&0&0\\0&0&1&0\\1&0&0&0\\0&1&0&0\end{pmatrix} \] Combining everything, we have \begin{equation}\label{eq:intro-ind-middle} \tikzfig{intro-ind-nf} \end{equation} where \[ w_{\vec{c}bd} := \frac{1}{2}\left( z_{\vec{c}b0} + (-1)^d z_{\vec{c}b1} \right) = \begin{cases} 1 & \text{if } \vec{c}bd \in\{0000, 0110, 1000, 1101\} \\ 0 & \text{otherwise.} \end{cases} \] Now, for the left-hand side of Eq.~\eqref{eq:intro-induction}, let $y_{\vec{c}bd} := w_{\vec{c}(1-b)(1-d)}$, so that \[ y_{\vec{c}bd} = \begin{cases} 1 & \text{if } \vec{c}bd \in\{0011, 0101, 1011, 1110\} \\ 0 & \text{otherwise.} \end{cases} \] Note that $y_{\vec{c}0d}y_{\vec{c}1d} = 0$ for all $\vec{c},d$, so we will be able to apply Lemma~\ref{lem:zero-box-nf} because we have already proved $(A_{y_{\vec{c}0d},y_{\vec{c}1d}})$ for all $\vec{c},d$. By absorbing the two NOTs on the bottom wires and relabelling, we find: \ctikzfig{intro-ind-lhs1} This corresponds to \[ 4 \begin{pmatrix}0&0&0&1\\0&1&0&0\\0&0&0&1\\0&0&1&0\end{pmatrix} \begin{pmatrix}1&0&1&0\\0&1&0&1\\1&0&0&0\\0&1&0&0\end{pmatrix} = 4 \begin{pmatrix}0&1&0&0\\0&1&0&1\\0&1&0&0\\1&0&0&0\end{pmatrix} \] Finally, let $x_{\vec{c}bd} := y_{\vec{c}0d}+(1-b)y_{\vec{c}1d}$, then \[ x_{\vec{c}bd} = \begin{cases} 1 &\text{if } \vec{c}bd \in\{0001,0101,0111,1001,1100\} \\ 0 &\text{otherwise.} \end{cases} \] Note that $x_{\vec{c}b0}x_{\vec{c}b1}=0$ for all $\vec{c}b$, so we use the same process for the final 0-labelled H-box. \ctikzfig{intro-ind-lhs2} This rewrite step corresponds to: \[ 4 \begin{pmatrix}0&1&0&0\\0&1&0&1\\0&1&0&0\\1&0&0&0\end{pmatrix} \begin{pmatrix}1&1&0&0\\1&0&0&0\\0&0&1&1\\0&0&1&0\end{pmatrix} = 4 \begin{pmatrix}1&0&0&0\\1&0&1&0\\1&0&0&0\\1&1&0&0\end{pmatrix} \] Apart from the two white dots, the left-hand side diagram of Eq.~\eqref{eq:intro-induction} has now been brought into normal form. For the right-hand side of Eq.~\eqref{eq:intro-induction}, note that $w_{b0\vec{c}}w_{b1\vec{c}}=0$ for all $b,\vec{c}$, so we can again use Lemma~\ref{lem:zero-box-nf}, before absorbing the NOT and relabelling: \ctikzfig{intro-ind-rhs} This corresponds to \begin{align*} 4 \begin{pmatrix}0&1&0&0\\1&0&0&0\\0&0&0&1\\0&0&1&0\end{pmatrix} \begin{pmatrix}1&1&0&0\\1&0&0&0\\0&0&1&1\\0&0&1&0\end{pmatrix} \begin{pmatrix}1&0&0&0\\0&0&1&0\\1&0&0&0\\0&1&0&0\end{pmatrix} &= 4 \begin{pmatrix}0&1&0&0\\1&0&0&0\\0&0&0&1\\0&0&1&0\end{pmatrix} \begin{pmatrix}1&0&1&0\\1&0&0&0\\1&1&0&0\\1&0&0&0\end{pmatrix} \\ &= 4 \begin{pmatrix}1&0&0&0\\1&0&1&0\\1&0&0&0\\1&1&0&0\end{pmatrix} \end{align*} which is equal to the LHS, so \eqref{eq:intro-induction} is proved. \end{proof} \begin{theorem}\label{thm:ZH-completeness} The ZH-calculus is complete. \end{theorem} \begin{proof} Propositions~\ref{prop:mult-rule-rational}, \ref{prop:average-integer} and \ref{prop:intro-rational} show that~\eqref{eq:mult-def}, \eqref{eq:average-def} and~\eqref{eq:intro-def} are provable in the ZH-calculus for all integers $a$ and $b$. Hence Proposition~\ref{prop:completeness-conditional} shows that the ZH-calculus is complete. \end{proof} \section{The ZH-calculus over arbitrary rings}\label{sec:zh-ring} The ZH-calculus as defined above represents matrices over $\mathbb{Z}[\frac{1}{2}]$. To handle these efficiently, we introduced integer labels on H-boxes. In this section, we extend this idea: we allow H-boxes to be labelled \emph{a priori} by elements of some ring $R$. As a result, diagrams can express matrices in $R$-\textbf{bit}, the full subcategory of $R$-modules where the objects are of the form $(R\oplus R)^{\ensuremath{\otimes}\xspace n}$. This means our diagrams are interpreted as matrices of shape $2^n\times 2^m$ with entries in $R$. The conditions on $R$ and the ways in which we show completeness will be the core of this section. We will dub this new calculus \ensuremath{\text{ZH}_R}\xspace, or `ZH over $R$', with the original phase-free calculus simply called `ZH' without a subscript. The ruleset of \ensuremath{\text{ZH}_R}\xspace\ will be a strict superset of the rules of Figure~\ref{fig:phasefree-rules}, and hence anything we have already proven for ZH will remain true in \ensuremath{\text{ZH}_R}\xspace. The original ZH-calculus as introduced in \cite{backens2018zhcalculus} represented matrices over the complex numbers, and hence in our notation is called \ensuremath{\text{ZH}}\xspaceC. The utility of \ensuremath{\text{ZH}}\xspaceC\ is that it can describe evolutions of qubits. This does not need to be the only useful choice of ring: The ZW-calculus is similarly parametrised by a commutative ring \cite{hadzihasanovic2017algebra}, and this flexibility is what allowed for the proof of completeness for Clifford+T ZX via ZW$_{\mathbb{Z}[\half]}$ \cite{SimonCompleteness}\footnote{ Due to these ideas emerging concurrently the authors of \cite{SimonCompleteness} do not explicitly use ZW$_{\mathbb{Z}[\half]}$ but something very similar. }. We will show that we can make \ensuremath{\text{ZH}_R}\xspace\ sound, complete, and universal into $R$-\textbf{bit} provided that $R$ is commutative with $1 \neq 0$, and the element $2$ is not a zero-divisor. Unitarity of the ring is required so that we can preserve the interpretation of the $\dotmult{white dot}$ generator, which uses $0$ and $1$. Commutativity of $R$ is required because we require the following isometry of diagrams to be sound: \[ \scalar{hadamard}{a}\ \scalar{hadamard}{b} \simeq \scalar{hadamard}{b}\ \scalar{hadamard}{a} \implies \intf{\scalar{hadamard}{a}\ \scalar{hadamard}{b}} = \intf{\scalar{hadamard}{b}\ \scalar{hadamard}{a}} \implies a \times b = b \times a \] The condition on the element 2 is needed since otherwise we would no longer have a well-defined relationship allowing us to go between white spiders and grey spiders as in \mathbb{N}otDef and \GreyDef. Likewise we would no longer be able to freely introduce pairs of Hadamard gates via the \HHRule rule. These relationships are so important that we exclude the case where 2 is a zero-divisor from this paper and leave it to future work. We will first deal with the case where $\half \in R$ in Section~\ref{sec:ring-with-half}, and afterwards we will adapt the results to work when $\half \not\in R$ in Section~\ref{sec:ring-without-half}. When $\half\in R$ we will show that $\dstar$ is equal to the $H(\half)$ box with no inputs or outputs, and hence the $\dstar$ generator is superfluous. The completeness result for when $\half \in R$ uses the rules given in Figure~\ref{fig:ZH-rules}, which are a superset of the rules given in Figure~\ref{fig:phasefree-rules}. If the ring $R$ does not contain $\half$ but does contain $2$, which is not a zero-divisor, then the ring $R[\half]$ is well defined. We can then use the completeness of $\ensuremath{\text{ZH}_R}\xspacehalf$ and the introduction of a meta-rule to eliminate all instances of the $\dstar$ generator to give completeness of \ensuremath{\text{ZH}_R}\xspace\ for $\half \notin R$. Note that the ZH-calculus as introduced in Section~\ref{sec:phase-free-ZH} is not the same as $\ensuremath{\text{ZH}}\xspace_R$ for any $R$, since in the ZH-calculus the labels are from $\mathbb{Z}$ but the interpretation is into $\mathbb{Z}[\half]$-\textbf{bit}. \subsection{ZH-calculus over rings with a half}\label{sec:ring-with-half} In this section we will take $R$ to be a commutative unital ring that contains $\half$ (and so $2$ is not a zero-divisor). We generalise the labelled H-boxes of Section~\ref{sec:labelledHboxes} by letting the H-boxes in \ensuremath{\text{ZH}_R}\xspace\ be labelled by any element $r\in R$: \[ \intf{\bracedSpider{hadamard}{r}} := \sum r^{i_1\ldots i_m j_1\ldots j_n} \ket{j_1\ldots j_n}\bra{i_1\ldots i_m} \] As in Section~\ref{sec:ZH-generators}, the sum in the second equation is over all $i_1,\ldots, i_m, j_1,\ldots, j_n\in\{0,1\}$ so that the H-box above represents a matrix with all entries equal to 1, except the bottom right element, which is $r$. If the label is $-1$ (so that it matches the usual H-box of Section~\ref{sec:ZH-generators}) we will usually leave out the label. Besides these more general H-boxes, the other generators are identical to those found in Section~\ref{sec:ZH-generators}. Note that because $\half \in R$, the star generator is redundant: \[ \intf{\dstar} = \half = \intf{\scalar{hadamard}{\half}} \] We will derive the corresponding diagrammatic equality later in Lemma~\ref{lem:star-is-half}. We could therefore use just the following generators when $\half \in R$: \[ \spider{white dot}{} \ ,\ \spider{hadamard}{r} \] But in order to retain compatibility with the previous results, we will continue to write $\!\dstar\!$ to represent the scalar $\half$. As before, we define the grey spiders and NOT gate of \eqref{eq:defx} as derived generators. With these generators and derived generators established we can state our theorem of universality. \begin{theorem}\label{thm:ZHR-universality} The calculus \ensuremath{\text{ZH}_R}\xspace\ is universal over $R$-\textbf{bit}, shown using the unique normal form diagram: \begin{equation} \intf{ \, \prod_{\vec{b} \in \mathbb B^n} \big( \iota_{\vec{b}} \circ H_n(a_{\vec{b}}) \big) } = \sum_{\vec{b} \in \mathbb B^n} a_{\vec{b}} \ket{\vec{b}}. \end{equation} \end{theorem} \begin{proof} \label{prf:thm:ZHR-universality} The equation above was demonstrated in the proof of Theorem~\ref{thm:nf-unique}. The lack of stars in this version comes from noting that every element in $R$ is divisible by $2$ (as $\half \in R$) and so stars are always subsumed into the ring elements $a_{\vec{b}}$ in the reduced normal form. \end{proof} Now let us establish our ruleset for the ZH-calculus over $R$. The rules for \ensuremath{\text{ZH}_R}\xspace\ are given in Figure~\ref{fig:ZH-rules} along with the same `only topology matters' meta rule and all the same symmetry conditions as before: \ctikzfig{generator-symmetries} Here, $a\in R$ is arbitrary. \begin{figure} \caption{The set of rules for the ZH$_R$-calculus. Throughout, $m,n$ are nonnegative integers and $a,b$ are arbitrary elements of the commutative unital ring $R$ that contains a half. The lowercased rules are the same as those in Figure~\ref{fig:phasefree-rules} \label{fig:ZH-rules} \end{figure} In comparison to the rules of Figure~\ref{fig:phasefree-rules}, \HPhaseRule has been generalised from \HFuseRule to allow arbitrary labelled H-boxes and \UnitRule has been added to relate the white dot to an H-box labelled by the $1$ element of the ring. The rules \MultPhaseRule, \AvgRule and \IntroRule already appeared in Section~\ref{sec:avg-intro-mult} as \eqref{eq:mult-def}, \eqref{eq:intro-def} and \eqref{eq:average-def}, but there they were derived rules that applied to integer labelled H-boxes, while here they are axioms that relate the ring elements of $R$. Note that it is not immediately obvious that the $\mathbb{Z}$-labelled H-boxes of Section~\ref{sec:labelledHboxes} correspond to the $R$-labelled H-boxes of the \ensuremath{\text{ZH}_R}\xspace-calculus. We will prove this imminently, though first we observe the soundness of \ensuremath{\text{ZH}_R}\xspace. \begin{proposition} \label{prop:ZHR-sound} The rules of Figure~\ref{fig:ZH-rules} are sound. \end{proposition} \begin{proof} \label{prf:prop:ZHR-sound} Because of Proposition~\ref{prop:phasefree-sound}, we simply need to check that the rules not already present in ZH are sound; these are \HPhaseRule, \MultPhaseRule, \AvgRule, \IntroRule, and \UnitRule. This is done by straightforward calculation. \end{proof} To show how the labelled H-boxes of ZH and \ensuremath{\text{ZH}_R}\xspace\ relate, we first show that $H(1)$ indeed satisfies \eqref{eq:unit}: \begin{lemma}\label{lem:unit-bb} The phase-1 H-box of any arity decomposes: \ctikzfig{unit-bangboxed} \end{lemma} \begin{proof} \[ \tikzfig{unit-bb-proof} \qedhere \] \end{proof} \begin{lemma}\label{lem:two-cancel}~ \ctikzfig{two-cancel} \end{lemma} \begin{proof} \[\tikzfig{two-cancel-pf} \qedhere\] \end{proof} Now we can also establish that $H(0)$ and $H(2)$ are as in Section~\ref{sec:labelledHboxes}. \begin{multicols}{3} \begin{lemma}\label{lem:zero-is-grey}~ \ctikzfig{zero-is-grey} \end{lemma} \null \columnbreak \begin{lemma}\label{lem:two-as-diagram}~ \ctikzfig{two-as-diagram} \end{lemma} \null \columnbreak \begin{lemma}\label{lem:two-scalar}~ \ctikzfig{scalar-2} \end{lemma} \end{multicols} \begin{proof}[Proof of Lemma~\ref{lem:zero-is-grey}] \[\tikzfig{zero-is-grey-pf} \qedhere\] \end{proof} \begin{proof}[Proof of Lemma~\ref{lem:two-as-diagram}] \[\tikzfig{two-as-diagram-pf} \qedhere\] \end{proof} \begin{proof}[Proof of Lemma~\ref{lem:two-scalar}] Identical to the proof of Lemma~\ref{lem:scalar-2}. \end{proof} With these equivalences established, we will now use induction to show that the labelled H-boxes in \ensuremath{\text{ZH}}\xspace\ are equivalent (using the rules of \ensuremath{\text{ZH}_R}\xspace) to the corresponding H-boxes in \ensuremath{\text{ZH}_R}\xspace. \begin{proposition} \label{prop:ZH-and-ZHR-same-labels} All the integer labelled H-boxes of the \ensuremath{\text{ZH}}\xspace-calculus (Section~\ref{sec:labelledHboxes}) are provably equal in \ensuremath{\text{ZH}_R}\xspace\ to the corresponding labelled H-box in the ZH$_R$-calculus. \end{proposition} \begin{proof} \label{prf:prop:ZH-and-ZHR-same-labels} We will denote a labelled \ensuremath{\text{ZH}}\xspace\ H-box of Section~\ref{sec:labelledHboxes} by $H(n')$ to distinguish it from a generator of the ZH$_R$-calculus. We have for every $n \in \{ 1, 2, 3, \dots \} \subset R$: \ctikzfig{n-induction} Here the induction step is denoted by (*). Note that we first used the average rule as an axiom of \ensuremath{\text{ZH}_R}\xspace\ acting on the generators (written $\avgrule$), and then as a derived rule from the original ZH-calculus (Proposition~\ref{prop:average-integer}). For negative integers we simply note that the definition of negation \eqref{eq:def-negative-numbers} corresponds precisely to multiplying by $-1$ in \MultPhaseRule. \end{proof} As in Section~\ref{sec:avg-intro-mult}, we can prove !-boxed versions of the axioms \MultPhaseRule, \AvgRule and \IntroRule. In particular, the !-boxed version of \MultPhaseRule and \UnitRule (i.e.\ Lemma~\ref{lem:unit-bb}) where the !-box is expanded $0$ times give: \begin{equation}\label{eq:scalar-cancel} \tikzfig{scalar-mult} \qquad\qquad\qquad\qquad\qquad\qquad \tikzfig{one-cancellation} \end{equation} These rules enable us to multiply scalars at will, and in particular eliminate scalars by multiplying by the inverse (when the inverse exists in $R$). Finally we will prove that the star generator is equal to the scalar $\half$ in \ensuremath{\text{ZH}_R}\xspace. \begin{lemma} \label{lem:star-is-half} If $\half \in R$, then $\dstar = \scalar{hadamard}{\half}$. \end{lemma} \begin{proof} \label{prfLem:star-is-half} \[ \scalar{hadamard}{\half} \namedeq{\ref{lem:scalarcancelstars}} \dstar \scalar{white dot}{} \scalar{hadamard}{\half} \namedeq{\ref{lem:two-scalar}} \dstar \scalar{hadamard}{2}\scalar{hadamard}{\half} \namedeq{\eqref{eq:scalar-cancel}} \dstar \scalar{hadamard}{1} \namedeq{\eqref{eq:scalar-cancel}} \dstar \qedhere \] \end{proof} With the correspondence to the standard ZH-calculus now firmly established, we can easily adapt our previous results to prove that \ensuremath{\text{ZH}_R}\xspace\ is complete. \begin{theorem}\label{thm:ZHRHalf-complete} Let $R$ be a commutative unital ring with $\half \in R$. Then the ZH$_R$-calculus is complete, i.e.\ for any ZH$_R$-diagrams $D_1$ and $D_2$, if $\llbracket D_1 \rrbracket = \llbracket D_2 \rrbracket$, then $D_1$ can be transformed into $D_2$ using the rules of Figure~\ref{fig:ZH-rules}. \end{theorem} \begin{proof} As the rules of Figure~\ref{fig:ZH-rules} are a superset of those of Figure~\ref{fig:phasefree-rules}, anything we have proven for ZH remains provable in \ensuremath{\text{ZH}_R}\xspace. We have also shown in Proposition~\ref{prop:ZH-and-ZHR-same-labels} that all the labelled (\ensuremath{\text{ZH}}\xspace) H-boxes used in the completeness proof are equivalent to their \ensuremath{\text{ZH}_R}\xspace-calculus counterparts. In particular, the reduction to normal form conditional on having proved \eqref{eq:mult-def}, \eqref{eq:intro-def} and \eqref{eq:average-def} of Section~\ref{sec:normal-form-conditional} remains valid. But now, instead of having to prove the multiplication, introduction and average rules, we have them assumed as axioms. Thus, we can transform any \ensuremath{\text{ZH}_R}\xspace-diagram into reduced normal form, and hence the calculus is complete. \end{proof} \subsection{The ZH-calculus over rings without a half} \label{sec:ring-without-half} The \ensuremath{\text{ZH}_R}\xspace-calculus when $R$ does not contain a half is slightly trickier to deal with. As before, we will assume that $R$ is a commutative unital ring. But instead of assuming that $\half\in R$, we will merely assume that $2$ is not a zero-divisor. With these conditions, $R$ embeds faithfully into $R[\half]$, and so \ensuremath{\text{ZH}_R}\xspace\ as defined in Section~\ref{sec:ring-with-half} has a non-universal interpretation into $R[\half]$-\textbf{bit}. We could then add the $\dstar$ generator creating a universal interpretation into $R[\half]$-\textbf{bit}. This is essentially the process one follows to go from $\ensuremath{\text{ZH}}\xspace_\mathbb{Z}$ to our `phase-free' $\ensuremath{\text{ZH}}\xspace$, but also raises the question of `why not just start with \ensuremath{\text{ZH}_R}\xspacehalf\ instead?' We present in this subsection a definition of \ensuremath{\text{ZH}_R}\xspace, with $\half \notin R$, that is universal for $R$-\textbf{bit} and is complete using the same rules as \ensuremath{\text{ZH}_R}\xspace\ when $\half \in R$, with the exception of an additional `scalar cancellation' meta-rule. Since we want to have an interpretation into $R$-\textbf{bit} we will no longer be able to use the $\dstar$ generator, which in turn makes the definition of the derived generators \GreyDef and \mathbb{N}otDef invalid. Instead we will promote the grey spiders from being \emph{derived} generators to, simply, generators. Hence, our list of generators becomes: \[ \spider{white dot}{}\ ,\ \spider{hadamard}{r} \ ,\ \spider{gray dot}{} \ ,\ \spider{gray dot}{\neg} \] The ruleset of the \ensuremath{\text{ZH}_R}\xspace-calculus when $\half\not\in R$ consists of the rules of Figure~\ref{fig:ZH-rules}, where now the grey spiders are considered to be actual generators, and the usual symmetries of the generators. In addition, as the grey spiders are now no longer defined in terms of the other generators, we introduce the following rules to relate the generators to one another, which are scaled versions of the definitions of the grey spiders \eqref{eq:defx}: \begin{equation}\label{eq:def2x} \tikzfig{X-spider-dfn-free-doubled} \tag{2X} \end{equation} \begin{equation}\label{eq:not2x} \tikzfig{negate-dfn-free-doubled} \tag{2NOT} \end{equation} Finally, we will also need the following cancellation meta-rule to get completeness when $\half \notin R$: \begin{equation}\label{eq:2cancel} \dotonly{white dot}\xspace D_1 = \dotonly{white dot}\xspace D_2 \implies D_1 = D_2 \tag{2Cancel} \end{equation} In words, this rule says that if two diagrams are provably equal in \ensuremath{\text{ZH}_R}\xspace\ and they both contain a scalar white dot, then we can cancel the white dot on both sides and retain an equality. (When $\dstar$ is in the calculus, this follows easily by an application of Lemma~\ref{lem:scalarcancelstars}.) We call this implication a meta-rule because it is a statement about admissible deductive logic rather than diagram equality. Note that since 2 is not a zero-divisor in $R$ this rule is sound. A similar meta-rule was also used for proving completeness of rational-angle fragments of the ZX calculus \cite{ZXNormalForm}. The plan for showing completeness of \ensuremath{\text{ZH}_R}\xspace\ when $\half \notin R$ is to show that the \ensuremath{\text{ZH}_R}\xspacehalf-derivation from a \ensuremath{\text{ZH}_R}\xspace-diagram $D$ to its normal form can be used as a \ensuremath{\text{ZH}_R}\xspace-derivation, up to rescaling. While this idea is straightforward, formalising it takes some care. \begin{remark} \label{rem:grey-spider-unpacking-explicit} Every \ensuremath{\text{ZH}_R}\xspace\ diagram can be seen as a \ensuremath{\text{ZH}_R}\xspacehalf\ diagram by translating each diagram component to their obvious counterpart. For grey spiders we need to be careful about this translation, since they are generators in \ensuremath{\text{ZH}_R}\xspace, but derived generators in \ensuremath{\text{ZH}_R}\xspacehalf. We will be using this embedding in the lemmas that follow, noting that we will still explicitly `unpack' grey spiders using the rules \mathbb{N}otDef, \GreyDef, \TwoNot and \TwoX when we need to (and not leave such unpacking implicit). We are treating the $\dstar$ element as syntactic sugar to represent the 0-arity $H(\half)$ box when it arises. \end{remark} \begin{lemma} \label{lem:rescaled-derived-generators} Suppose $D_1$ and $D_2$ are \ensuremath{\text{ZH}_R}\xspace-diagrams. Let $*$ denote either of the \ensuremath{\text{ZH}_R}\xspacehalf-rules \mathbb{N}otDef or \GreyDef, with $2*$ denoting the corresponding rescaled \ensuremath{\text{ZH}_R}\xspace\ version \TwoNot or \TwoX. Then if the \ensuremath{\text{ZH}_R}\xspacehalf\ rule application \[D_1 \namedeq{*} \dstar D_2\] is a valid diagrammatic rewrite, so is the \ensuremath{\text{ZH}_R}\xspace\ rule application \[ \dotonly{white dot}\xspace D_1 \namedeq{$2*$} D_2 \] \end{lemma} \begin{proof} \label{prf:lem:rescaled-derived-generators} This follows immediately from the definitions. \end{proof} \begin{definition} \label{def:ZHR-with-stars} We say that a \ensuremath{\text{ZH}_R}\xspacehalf\ diagram is a \emph{$\ensuremath{\text{ZH}_R}\xspacestar$} diagram (pronounced `ZHR with stars' diagram) if it is of the form $D \ensuremath{\otimes}\xspace (\dstar)^{\ensuremath{\otimes}\xspace n}$, where $D$ is a \ensuremath{\text{ZH}_R}\xspace\ diagram. \end{definition} Recall that the rules of \ensuremath{\text{ZH}_R}\xspacehalf\ are the rules given in Figure~\ref{fig:ZH-rules} and the rules relating the generators to derived generators: \GreyDef, \mathbb{N}otDef, and \ZDef. \begin{lemma} \label{lem:leaving-ring-R-for-Rhalf} The only \ensuremath{\text{ZH}_R}\xspacehalf\ rules that can be applied to a \ensuremath{\text{ZH}_R}\xspacestars\ diagram to give a diagram that is no longer a \ensuremath{\text{ZH}_R}\xspacestars\ diagram are \MultPhaseRule, \AvgRule, and \HPhaseRule. \end{lemma} \begin{proof} The \MultPhaseRule and \AvgRule rules (applied right-to-left) can both result in H-box phases that are no longer in $R$. Furthermore, recall from Remark~\ref{rem:grey-spider-unpacking-explicit} that the star element is used here as short-hand for the 0-arity H-box labelled by $\half$. Hence $\half$-labelled H-boxes of arity 0 are allowed in \ensuremath{\text{ZH}_R}\xspacestar\ diagrams, while $\half$-labelled H-boxes of higher arity are not allowed. Yet the \HPhaseRule rule, when applied right-to-left to a $\half$-labelled H-box of arity 0, yields a $\half$-labelled H-box with arity greater than 0. All other rules preserve \ensuremath{\text{ZH}_R}\xspacestar\ diagrams: the only remaining rules that affect H-box labels are \UnitRule, which can only introduce the integer label 1, and \IntroRule, which does not change any labels and does not match H-boxes of arity 0. The other rules do not involve H-box labels at all. \end{proof} \begin{lemma} \label{lem:all-star-boxes-are-arity-zero} The $\ensuremath{\text{ZH}_R}\xspacehalf$ derivation from a \ensuremath{\text{ZH}_R}\xspace\ diagram $D$ to its normal form $N$ of Theorem~\ref{thm:ZHRHalf-complete} is a \ensuremath{\text{ZH}_R}\xspacestar\ diagram at every step of the derivation. \end{lemma} \begin{proof} \label{prf:lem:all-h0-boxes-are-arity-zero} The starting diagram $D$ is a \ensuremath{\text{ZH}_R}\xspace\ diagram (and therefore also a \ensuremath{\text{ZH}_R}\xspacestar\ diagram). Each step of the derivation to its normal form $N$ is a rule application, and we will show that every rule application used preserves the condition of being a \ensuremath{\text{ZH}_R}\xspacestars\ diagram. Intuitively, this is because the process for rewriting to normal form was derived in the phase-free ZH-calculus, which differs from $\ensuremath{\text{ZH}}\xspace_\mathbb{Z}^{\dstar}$ only in that grey spiders are syntactic sugar rather than generators. More formally: by Lemma~\ref{lem:leaving-ring-R-for-Rhalf} the only rule applications that could result in a diagram that is no longer in \ensuremath{\text{ZH}_R}\xspacestars\ form are \MultPhaseRule, \AvgRule, and \HPhaseRule. As can be verified by going through the steps of the derivation in Section~\ref{sec:normal-forms}, the derivation from $D$ to $N$ only uses \MultPhaseRule and \AvgRule with phases that are elements of $R$, and hence they preserve the \ensuremath{\text{ZH}_R}\xspacestars\ form. Also note that at no point in the derivation is the rule \HPhaseRule applied to a star, since in ZH that would result in an invalid $\half$-phased H-box. \end{proof} The previous lemma shows that the derivation from a \ensuremath{\text{ZH}_R}\xspace-diagram $D$ to its normal form in \ensuremath{\text{ZH}_R}\xspacehalf\ consists of diagrams that are close to being \ensuremath{\text{ZH}_R}\xspace-diagrams, except for the star generators. We shall next show that the only rules that interact with $\!\dstar\!$ are \mathbb{N}otDef and \GreyDef, which are precisely the rules we replace when using $\ensuremath{\text{ZH}_R}\xspace$ instead of $\ensuremath{\text{ZH}_R}\xspacehalf$. \begin{lemma} \label{lem:only-use-stars-as-stars} In the $\ensuremath{\text{ZH}_R}\xspacehalf$ derivation from a \ensuremath{\text{ZH}_R}\xspace\ diagram $D$ to its normal form $N$ of Theorem~\ref{thm:ZHRHalf-complete}, the element $\dstar$ is only ever involved in a rewrite when the rule being applied is \mathbb{N}otDef or \GreyDef. \end{lemma} \begin{proof} \label{prf:lem:only-use-stars-as-stars} The only \ensuremath{\text{ZH}_R}\xspacehalf\ rules that interact with the $\dstar$ element are \mathbb{N}otDef, \GreyDef, and \HPhaseRule (viewing the star as a 0-arity H-box). As noted in the proof of Lemma~\ref{lem:all-star-boxes-are-arity-zero} the \HPhaseRule rule is never applied to a star in the course of the derivation, since the derivation was designed to be applied to ZH diagrams where such a rewrite would be invalid. \end{proof} \begin{lemma} \label{lem:ZHRhalf-nf-derivation-to-ZHR-nf-derivation} If the \ensuremath{\text{ZH}_R}\xspace\ diagram $D$ has normal form $N$, then there is a $m\in \mathbb{N}$ such that the equation $D \ensuremath{\otimes}\xspace (\dotonly{white dot}\xspace)^{\ensuremath{\otimes}\xspace m} = N \ensuremath{\otimes}\xspace (\dotonly{white dot}\xspace)^{\ensuremath{\otimes}\xspace m}$ is derivable in \ensuremath{\text{ZH}_R}\xspace. \end{lemma} \begin{proof} \label{prf:lem:ZHRhalf-nf-derivation-to-ZHR-nf-derivation} By Lemma~\ref{lem:all-star-boxes-are-arity-zero} there is a \ensuremath{\text{ZH}_R}\xspacehalf-derivation from $D$ to $N$ such that every diagram in the chain is a \ensuremath{\text{ZH}_R}\xspacestar\ diagram. As noted in Remark~\ref{rem:grey-spider-unpacking-explicit}, we are keeping the unpacking of grey spiders explicit, and so applications of the \ensuremath{\text{ZH}_R}\xspacehalf{}-rules \mathbb{N}otDef and \GreyDef are explicit steps in this derivation. We then simultaneously compose every diagram in the derivation by the same number of $\dotonly{white dot}\xspace$ generators such that by Lemma~\ref{lem:rescaled-derived-generators} we can replace all applications of \mathbb{N}otDef with \TwoNot and all applications of \GreyDef with \TwoX, and then cancel all remaining instances of a $\dstar$ with a $\dotonly{white dot}\xspace$. This leaves us with a sound \ensuremath{\text{ZH}_R}\xspace-derivation of \ensuremath{\text{ZH}_R}\xspace\ diagrams from $D \ensuremath{\otimes}\xspace (\dotonly{white dot}\xspace)^{\ensuremath{\otimes}\xspace m}$ to $N \ensuremath{\otimes}\xspace (\dotonly{white dot}\xspace)^{\ensuremath{\otimes}\xspace m}$ for some $m \geq 0$. \qedhere \end{proof} \begin{theorem} Let $R$ be a commutative unital ring where $2$ is a zero-divisor and $\half\not\in R$. Then the \ensuremath{\text{ZH}_R}\xspace-calculus is complete. In other words, for any \ensuremath{\text{ZH}_R}\xspace-diagrams $D_1$ and $D_2$, if $\llbracket D_1 \rrbracket = \llbracket D_2 \rrbracket$ then $D_1$ can be transformed into $D_2$ using the rules of Figure~\ref{fig:ZH-rules}, the rules \TwoNot and \TwoX, and the meta-rule \TwoCancel. \end{theorem} \begin{proof} The completeness of \ensuremath{\text{ZH}_R}\xspacehalf\ gives us the \ensuremath{\text{ZH}_R}\xspacehalf-derivations $D_1 = N$ and $N = D_2$. By Lemma~\ref{lem:ZHRhalf-nf-derivation-to-ZHR-nf-derivation} there are \ensuremath{\text{ZH}_R}\xspace-derivations $(\dotonly{white dot}\xspace)^{\ensuremath{\otimes}\xspace a} \ensuremath{\otimes}\xspace D_1 = (\dotonly{white dot}\xspace)^{\ensuremath{\otimes}\xspace a} \ensuremath{\otimes}\xspace N$ and $(\dotonly{white dot}\xspace)^{\ensuremath{\otimes}\xspace b} \ensuremath{\otimes}\xspace N = (\dotonly{white dot}\xspace)^{\ensuremath{\otimes}\xspace b} \ensuremath{\otimes}\xspace D_2$ for some $a,b\in \mathbb{N}$. We can therefore derive $(\dotonly{white dot}\xspace)^{\ensuremath{\otimes}\xspace a+b} \ensuremath{\otimes}\xspace D_1 = (\dotonly{white dot}\xspace)^{\ensuremath{\otimes}\xspace a+b} \ensuremath{\otimes}\xspace N = (\dotonly{white dot}\xspace)^{\ensuremath{\otimes}\xspace a+b} \ensuremath{\otimes}\xspace D_2$ in \ensuremath{\text{ZH}_R}\xspace\ by multiplying every diagram in the derivations by a suitable number of $\ensuremath{\otimes}\xspace$-products of \dotonly{white dot}\xspace. By the \TwoCancel meta-rule we can then assert that $D_1 = D_2$ is provable in ZH$_R$. \end{proof} We have now shown that \ensuremath{\text{ZH}_R}\xspace\ is universal, sound, and complete whenever $R$ is a commutative, unital ring where $2$ is not a zero-divisor. In the case $\half \in R$, the calculus and ruleset is very similar to our basic ZH. When $\half \notin R$ we needed to tweak the generators to avoid using the $\dstar$ generator, and we needed to introduce a meta-rule for cancelling $\dotonly{white dot}\xspace$ generators. \section{Modifications to the ZH-calculus}\label{sec:alternative-rules} In this section, we will discuss a couple of variations on the ZH-calculus. In particular, we present an alternative to \OrthoRule in Section~\ref{sec:o-rule}, and an alternative to \AvgRule and \IntroRule in Section~\ref{sec:alternative-intro-avg}. We show that \HHRule can actually be proved from the other rules in Section~\ref{sec:hh-rule-necessity}. In Section~\ref{sec:tof-had} we present an alternative parameter-free ZH-calculus which is complete over a subring of matrices over $\mathbb{Z}[\frac{1}{\sqrt{2}}]$ which exactly corresponds to those that can be implemented by the Toffoli+Hadamard gate set. \subsection{Replacing the (o) rule}\label{sec:o-rule} Of all the basic rules of the ZH-calculus in Figure~\ref{fig:phasefree-rules} the ortho rule \OrthoRule seems a bit out of place. In this section we will see that we can replace it with two other rules, namely Lemmas~\ref{lem:copy-znot-h} and~\ref{lem:dedup}. These two rules can be rephrased as statements about the AND gate. Namely, up to some simple rewriting, Lemma~\ref{lem:copy-znot-h} says that if we post-select the AND gate with $\bra{1}$ that this post-selection copies through: \ctikzfig{AND-postselect} Lemma~\ref{lem:dedup} can be rephrased as stating that a COPY gate followed by an AND gate applied to its outputs is just the identity: \ctikzfig{AND-COPY-ID} Lemmas~\ref{lem:copy-znot-h} and~\ref{lem:dedup} show that these two rules can be proved by the basic ZH-calculus rules including \OrthoRule. In this section we will show the converse: that assuming just the rules of Figure~\ref{fig:phasefree-rules} except for \OrthoRule, together with the Lemmas~\ref{lem:copy-znot-h} and~\ref{lem:dedup} we can prove \OrthoRule. Note that all the basic lemmas of Section~\ref{s:basic-derived} are proven without the usage of \OrthoRule (except for Lemma~\ref{lem:copy-znot-h}), and hence can be used in this section. Before proving \OrthoRule, we need to prove a number of lemmas that are essentially statements in Boolean logic. \begin{multicols}{2} \begin{lemma}\label{lem:o-rule-alt1}$(x\oplus y)\wedge z = (x\wedge z)\oplus (y\wedge z)$: \ctikzfig{lem-o-rule-1} \end{lemma} \begin{lemma}\label{lem:o-rule-alt2}$(\neg x)\wedge y = (x\wedge y)\oplus y$: \ctikzfig{lem-o-rule-2} \end{lemma} \null \columnbreak \begin{lemma}\label{lem:o-rule-alt3}$(\neg x)\wedge x = 0$: \ctikzfig{lem-o-rule-3} \end{lemma} \begin{lemma}\label{lem:o-rule-alt4}$x\vee y = x\oplus (x\wedge y) \oplus y$: \ctikzfig{lem-o-rule-4} \end{lemma} \end{multicols} \begin{proof}[Proof of Lemma~\ref{lem:o-rule-alt1}] \[\tikzfig{lem-o-rule-1-pf} \qedhere\] \end{proof} \begin{proof}[Proof of Lemma~\ref{lem:o-rule-alt2}]~ \[\tikzfig{lem-o-rule-2-pf} \qedhere\] \end{proof} \begin{proof}[Proof of Lemma~\ref{lem:o-rule-alt3}]~ \[\tikzfig{lem-o-rule-3-pf} \qedhere\] \end{proof} \begin{proof}[Proof of Lemma~\ref{lem:o-rule-alt4}]~ \[\tikzfig{lem-o-rule-4-pf} \qedhere\] \end{proof} \begin{lemma}\label{lem:o-rule-alt5}We have $(x\wedge (\neg y))\vee (y \wedge z) = (x\wedge (\neg y))\oplus (y\wedge z)$: \ctikzfig{lem-o-rule-5} \end{lemma} \begin{proof}~ \ctikzfig{lem-o-rule-5-pf} \end{proof} \begin{theorem}\label{thm:o-alt} In the presence of the other rules of the ZH-calculus, \OrthoRule is equivalent to the union of Lemmas~\ref{lem:copy-znot-h} and~\ref{lem:dedup}. \end{theorem} \begin{proof} The proof of Lemmas~\ref{lem:copy-znot-h} and~\ref{lem:dedup} shows the forward direction: that the standard ruleset of the ZH-calculus, including \OrthoRule, implies the two lemmas. For the converse direction, recall that the only one of the basic derived rules in Section~\ref{s:basic-derived} whose proof required \OrthoRule was Lemma~\ref{lem:copy-znot-h}, which is now one of our axioms. Note that we have proven Lemma~\ref{lem:o-rule-alt5} using just Lemma~\ref{lem:dedup}, ZH-calculus axioms other than \OrthoRule, and the basic derived rules. Proving \OrthoRule from Lemma~\ref{lem:o-rule-alt5}, other ZH axioms, and basic derived rules will therefore give the desired result. To make the application of intermediate rules clearer, we re-arrange the diagrams of \OrthoRule so the wire which is usually an output is now the middle input instead: \[\tikzfig{ortho-proof} \qedhere\] \end{proof} \subsection{The (hh) rule is not necessary}\label{sec:hh-rule-necessity} It turns out that \HHRule can actually be derived from the other rules. Before proving this, since the proof of Lemma~\ref{lem:scalarcancelstars} uses \HHRule, we will need to find a different way to cancel scalars first. The following turns out to suffice: \begin{equation}\label{eq:hhh-scalars} \tikzfig{hhh-scalars} \end{equation} We can now prove \HHRule: \begin{equation}\label{eq:hh-proof} \tikzfig{hh-proof} \end{equation} The fact that we can derive \HHRule from the other rules raises the question whether any other rules are also not necessary. We hypothesise that they are in fact all needed. It is at least the case that \SpiderRule and \HHRule are necessary: they are the only rules that relate higher-arity spiders, respectively H-boxes, to lower arity ones (this argument can be formalised by presenting alternative interpretations into linear maps that change what higher-arity spiders/H-boxes correspond to). \IDRule is also necessary as it is the only rule that relates a generator to the identity wire. At least one of \StrongCompRule and \HCompRule is necessary as they are the only ones that relate an empty diagram to a non-empty diagram (the $n=m=0$ case). We do not know of any argument for the necessity of the other rules \MultRule and \OrthoRule, although it seems likely that they are both necessary. \subsection{Merging the intro and average rule}\label{sec:alternative-intro-avg} The intro rule \IntroRule and the average rule \AvgRule can be subsumed by a single larger rule. Before we present this rule, let us first present an alternative to \AvgRule that holds when the ring contains a half. \begin{lemma}\label{lem:average-true-form} Let $R$ be a ring with a half. In the presence of the other rules in Figure~\ref{fig:ZH-rules}, \AvgRule is equivalent in \ensuremath{\text{ZH}_R}\xspace\ to the following rule: \begin{equation}\label{eq:average-rule-orig} \tikzfig{average-rule} \end{equation} \end{lemma} \begin{proof} As the LHS of \AvgRule agrees with the LHS of \eqref{eq:average-rule-orig}, it suffices to show that the RHS of both are equal. Note first that: \begin{equation}\label{eq:not-two-is-half} \tikzfig{not-two-is-half} \end{equation} Hence: \ctikzfig{average-two-forms} Note that this only shows that \AvgRule implies \eqref{eq:average-rule-orig}, as we proved Lemma~\ref{lem:two-cancel} using \AvgRule. For the converse direction, we have \ctikzfig{average-prf-rev} which implicitly uses a straightforward generalisation of Lemma~\ref{lem:copy-xnot-h} to H-boxes with arities other than 3 and with phase labels. None of the lemmas invoked in this proof, nor their antecedents, rely on \AvgRule. \end{proof} \begin{theorem}[\cite{renaud}]\label{thm:renaud} Let $R$ be a ring with a half. In the presence of the other rules of Figure~\ref{fig:ZH-rules}, the combination of the rules \IntroRule and \AvgRule is equivalent to the following rule: \begin{equation}\label{eq:average-renaud} \tikzfig{average-renaud} \end{equation} \end{theorem} \begin{proof} As the \ensuremath{\text{ZH}_R}\xspace-calculus is complete with the rules presented in Figure~\ref{fig:ZH-rules}, and \eqref{eq:average-renaud} is easily shown to be sound for any $a$ and $b$, the equation \eqref{eq:average-renaud} can be proven using \IntroRule and \AvgRule for any particular choice of $a$ and $b$. Alternatively, by using the ZH-calculus over the polynomial ring in two variables $R[a,b]$, there is also a proof of it using the variables $a$ and $b$ directly. It hence remains to show that \eqref{eq:average-renaud} together with (Figure~\ref{fig:ZH-rules})${}\setminus\{\AvgRule,\IntroRule\}$ implies both \IntroRule and \AvgRule. We will first prove~\eqref{eq:average-rule-orig}. By plugging a white dot into the top wire of \eqref{eq:average-renaud} we get this, except for a scalar factor that will be shown to cancel afterwards: \begin{equation}\label{eq:renaud-to-average} \scalebox{0.9}{\tikzfig{renaud-to-average}} \end{equation} Bending the wires up, and taking $a:=\frac{a'+b'}{2}$ and $b:=\frac{a'-b'}{2}$ gives \eqref{eq:average-rule-orig} for $a'$ and $b'$ (up to scalar factors). Using this we note that: \begin{equation}\label{eq:zero-to-grey} \tikzfig{zero-to-grey} \end{equation} We can use this to show that the scalar involving $H(0)$ cancels: \begin{equation}\label{eq:zero-scalar-cancel} \tikzfig{zero-scalar-cancel} \end{equation} Hence, \eqref{eq:renaud-to-average} and \eqref{eq:zero-to-grey} reduce to what we would expect. We still need to show that a scalar white dot is equal to $H(2)$. First we need to know how to decompose $H(\frac12)$: \begin{equation}\label{eq:half-to-hboxes} \tikzfig{half-to-hboxes} \end{equation} Now proving the equivalent of Lemma~\ref{lem:two-scalar} is straightforward: \ctikzfig{two-scalar-cancel-reprise} Combining~\eqref{eq:renaud-to-average} with this now gives us~\eqref{eq:average-rule-orig}. Finally, by taking $b=0$ in \eqref{eq:average-renaud}, we can derive \IntroRule: \[\scalebox{0.9}{\tikzfig{renaud-to-intro}}\] We can now apply the argument of Lemma~\ref{lem:average-true-form} to get \AvgRule. \end{proof} \subsection{ZH-calculus as a complete language for Toffoli+Hadamard circuits}\label{sec:tof-had} The ZH-calculus presented in Section~\ref{sec:phase-free-ZH} is universal and complete for matrices over the ring $\mathbb{Z}[\half]$. As shown in~\cite{Amy2020numbertheoretic}, the unitary matrices over $\mathbb{Z}[\half]$ correspond to circuits generated by Toffoli and $H\otimes H$, or in other words, circuits consisting of Toffoli gates and an even number of Hadamard gates. The reason these matrices, and thus the ZH-calculus, are restricted to an even number of Hadamard gates is because of the normalisation of the individual binary H-boxes: we have $\intf{\,\tikzfig{hada-phase}\,} = \left(\begin{smallmatrix}1&1\\1&-1\end{smallmatrix}\right)$, which differs from the unitary Hadamard gate by a factor of $\frac{1}{\sqrt{2}}$. With all generators being interpreted as matrices over $\mathbb{Z}[\half]$, a diagram containing an odd number of binary H-boxes can hence not be normalised to be unitary. We chose to define the interpretation of the \tikzfig{star} generator to be $\half$, because it led to the simplest possible calculus. By redefining the interpretation of this generator, we can make ZH-diagrams represent matrices $M=(\frac{1}{\sqrt{2}})^n A$ where $A$ is a matrix over $\mathbb{Z}$, which correspond precisely to the matrices generated by the Toffoli+Hadamard gate set~\cite{Amy2020numbertheoretic}. We do this by redefining the \tikzfig{star} generator to represent $\frac{1}{\sqrt{2}}$ instead of $\frac{1}{2}$. By replacing every occurrence of \tikzfig{star} by two stars in the definitions of Section~\ref{sec:phase-free-ZH}, and in all the derivations of the preceding sections, all the derivations remain sound under this new interpretation. In particular, the derivation that each diagram can be reduced to normal form remains true. For almost any diagram we can also bring the diagram to reduced normal form, with one class of exceptions: if the diagram represents the zero matrix then the reduced normal form should contain no stars. However, when we reinterpret the star (and thus double the number of occurrences in the rewrite rules) we see that all rewrite rules preserve the parity of the number of stars in the diagram. Hence, the following equality that says that $\frac{1}{\sqrt{2}}\cdot 0 = 0$ is not derivable with those rules: \begin{equation} \text{Using $\intf{\tikzfig{star}} = \frac{1}{\sqrt{2}}$} \qquad \tikzfig{star-zero-rule} \end{equation} Adding this as another rewrite rule solves this edge case and we still get a complete calculus, but now for matrices of the form $(\frac{1}{\sqrt{2}})^n A$ where $A$ only contains integers. \section{Conclusion}\label{sec:conclusion} We studied the ZH-calculus, a graphical language for reasoning about qubit quantum computing. We found a small set of rewrite rules motivated by basic identities between Boolean functions that allowed us to prove completeness of the fragment that corresponds to the Toffoli-Hadamard gateset. We then extended the ZH-calculus so that it can represent matrices over arbitrary rings where $2$ is not a zero divisor. We found an extended ruleset that is complete for any such ring. We have argued that our calculus, both the parameter-free one, as well as the one complete over any ring, is the simplest complete graphical calculus for an approximately universal fragment of quantum computing found so far. Of the rules of the ZH-calculus as presented in Figure~\ref{fig:phasefree-rules}, we showed that \HHRule is actually not necessary, as it can be proven from the others. We believe that the rest are all necessary for completeness. In Section~\ref{sec:o-rule} we showed that \OrthoRule can be replaced by two smaller and simpler to understand rules. It might perhaps be possible to show that only one of these suffices, which would give us a simpler axiomatisation. For the \ensuremath{\text{ZH}_R}\xspace-calculus we required a meta-rule to cancel scalars when $\half\not\in R$. It is not clear at the moment whether the calculus is complete without this meta-rule. The phase-free calculus with its translation to Boolean functions elucidates the relationship between classical computation and universal quantum computation using the Toffoli-Hadamard gate set. Correspondingly, the ZH-calculus over arbitrary rings should be useful for analysing universal quantum computation with multiply-controlled gates as well as quantum computation or error-correction schemes involving hypergraph states. The Fourier-transform relationship between the ZH-calculus over $\mathbb{C}$ and the ZX-calculus~\cite{GraphicalFourier2019} also points towards the usefulness of combining the ZX- and ZH-calculus, and translating between them. Indeed, \cite{east2020akltstates} already exhibits a combined calculus called the ZXH-calculus. Whether on its own or in combination with other graphical languages, the ZH-calculus has applications in a broad range of areas of quantum computing. \paragraph{Acknowledgements} The authors wish to thank Renaud Vilmart for pointing out \eqref{eq:average-renaud} as an alternative to the intro and average rule, and Patrick Roy for pointing out the derivation~\eqref{eq:hh-proof} that showed that \HHRule is superfluous. The authors would also like to extend their gratitude to the anonymous reviewers that read the paper closely and found several small errors and oversights that we could subsequently fix. JvdW and AK are supported in part by AFOSR grant FA2386-18-1-4028. For the majority of the work in this article, JvdW is supported by a Rubicon fellowship financed by the Dutch Research Council (NWO). Additionally, JvdW acknowledges that this project has received funding from the European Union's Horizon 2020 research and innovation programme under the Marie Sklodowska-Curie grant agreement No 101018390. \end{document}
\begin{document} \renewcommand{\arabic{footnote}}{\fnsymbol{footnote}} \begin{center} {\LARGE Inversion sequences avoiding the pattern 010} \\[15pt] {\large Benjamin Testart\footnote[1]{Université de Lorraine, CNRS, Inria, LORIA, F 54000 Nancy, France}} \\ \end{center} \renewcommand{\arabic{footnote}}{\arabic{footnote}} \abstract{Inversion sequences are integer sequences $(\sigma_1, \dots, \sigma_n)$ such that $0 \leqslant \sigma_i < i$ for all $1 \leqslant i \leqslant n$. The study of pattern-avoiding inversion sequences began in two independent articles by Mansour--Shattuck and Corteel--Martinez--Savage--Weselcouch in 2015 and 2016. These two initial articles solved the enumeration of inversion sequences avoiding a single pattern for every pattern of length 3 except the patterns 010 and 100. The case 100 was recently solved by Mansour and Yildirim. We solve the final case by making use of a decomposition of inversion sequences avoiding the pattern 010. Our decomposition needs to take into account the maximal value, and the number of distinct values occurring in the inversion sequence. We then expand our method to solve the enumeration of inversion sequences avoiding the pairs of patterns $\{010, 000\}, \{010, 110\}, \{010, 120\}$, and the Wilf-equivalent pairs $\{010, 201\} \sim \{010, 210\}$. For each family of pattern-avoiding inversion sequences considered, its enumeration requires the enumeration of some family of constrained words avoiding the same patterns, a question which we also solve. } \medbreak {\noindent \textbf{Keywords: }pattern avoidance, inversion sequences, enumeration, words, Stirling \break numbers} \section{Introduction} Let $\mathbb N$ be the set of natural numbers, including 0. Given a natural number $n \in \mathbb N$, we call \emph{integer sequences} of length $n$ the elements of $\mathbb N^n$. If $\sigma \in \mathbb N^n$ is an integer sequence, we write $\sigma = (\sigma_1, \dots, \sigma_n)$. We denote by $\textbf{I}_n$ the set of \emph{inversion sequences} of length $n$, that is sequences $\sigma = (\sigma_1, \dots, \sigma_n)$ with elements in $\mathbb N$, such that $\sigma_i < i$ for all $i \in \{1, \dots, n\}$. There is a natural bijection between $\textbf{I}_n$ and the set of permutations of $n$ elements, called the \emph{Lehmer code}, which explains the name "inversion sequence". If $\pi = (\pi_1, \dots, \pi_n)$ is a permutation, the inversion sequence $\sigma = (\sigma_1, \dots, \sigma_n)$ associated by the Lehmer code is defined by $\sigma_i = \#\{j \; | \; \pi_j > \pi_i \text{ and } j < i \}$ for all $i \in \{1, \dots, n\}$, i.e. $\sigma_i$ counts the number of inversions of $\pi$ whose second element is at position $i$. The study of pattern-avoiding inversion sequences (and many more types of sequences avoiding patterns) branched from pattern-avoiding permutations, a well-established field of research in enumerative combinatorics, see \cite{Kitaev_2011}, \cite{Vatter_2015}, or \cite{wikipedia_permutations}. Pattern-avoiding inversion sequences were first introduced in \cite{Mansour_Shattuck_2015} and \cite{Corteel_Martinez_Savage_Weselcouch_2016}. This study was extended using various definitions for 'patterns', see \cite{Martinez_Savage_2018} for patterns of relations, \cite{Yan_Lin_2020} for pairs of patterns, \cite{Auli_Elizalde_2019_1} for consecutive patterns, \cite{Auli_Elizalde_2019_2} for consecutive patterns of relations, \cite{Auli_Elizalde_2021} and \cite{Lin_Yan_2020} for vincular patterns, among other articles about the enumeration of pattern-avoiding inversion sequences. In this paper, we will only consider 'classical' patterns: given two integer sequences $\sigma = (\sigma_1, \dots, \sigma_n) \in \mathbb N^n$ and $p = (p_1, \dots, p_k) \in \mathbb N^k$, we say that $\sigma$ \emph{contains} the pattern $p$ if and only if there exists a subsequence of $\sigma$ which is order-isomorphic to $p$. For example, the sequence $(4,3,2,5,4)$ contains the pattern 021 since both subsequences $(3,5,4)$ and $(2,5,4)$ are order-isomorphic to 021. A sequence \emph{avoids} a pattern $p$ if it does not contain $p$, e.g. the inversion sequence $(0,0,2,3,2,0,1,5)$ avoids the pattern 101. \begin{table}[ht] \begin{center} \begin{tabular}{|c|c|c|c|} \hline Pattern $p$ & $\#\textbf{I}_n(p)$ for $n = 1, \dots, 7$ & Solved? & OEIS \cite{OEIS}\\ \hline 000 & 1, 2, 5, 16, 61, 272, 1385 & \cite{Corteel_Martinez_Savage_Weselcouch_2016} & A000111\\ 001 & 1, 2, 4, 8, 16, 32, 64 & \cite{Corteel_Martinez_Savage_Weselcouch_2016} & A000079\\ 011 & 1, 2, 5, 15, 52, 203, 877 & \cite{Corteel_Martinez_Savage_Weselcouch_2016} & A000110\\ 012 & 1, 2, 5, 13, 34, 89, 233 & \cite{Corteel_Martinez_Savage_Weselcouch_2016} and \cite{Mansour_Shattuck_2015} & A001519\\ 021 & 1, 2, 6, 22, 90, 394, 1806 & \cite{Corteel_Martinez_Savage_Weselcouch_2016} and \cite{Mansour_Shattuck_2015} & A006318\\ 101 or 110 & 1, 2, 6, 23, 105, 549, 3207 & \cite{Corteel_Martinez_Savage_Weselcouch_2016} & A113227\\ 102 & 1, 2, 6, 22, 89, 381, 1694 & \cite{Mansour_Shattuck_2015} & A200753\\ 120 & 1, 2, 6, 23, 103, 515, 2803 & \cite{Mansour_Shattuck_2015} & A263778\\ 201 or 210 & 1, 2, 6, 24, 118, 674, 4306 & \cite{Mansour_Shattuck_2015} & A263777\\ 100 & 1, 2, 6, 23, 106, 565, 3399 & \cite{Mansour_Yildirim_2022} & A263780\\ 010 & 1, 2, 5, 15, 53, 215, 979 & Theorem \ref{thm010} & A263779\\ \hline \end{tabular} \caption{Enumeration of inversion sequences avoiding a single pattern of length 3.} \label{table1} \end{center} \end{table} If $P$ is a set of patterns, we denote by $\textbf{I}_n(P)$ the set of inversion sequences of length $n$ avoiding all patterns in $P$. The enumeration of inversion sequences avoiding a single pattern of length 3 was already solved for all patterns except $010$ (see Table \ref{table1}). We solve the pattern 010 in Section \ref{010} by using a decomposition of inversion sequences avoiding 010, which involves constrained words avoiding 010. We first enumerate these pattern-avoiding words, exhibiting a new instance of the Stirling numbers. Then, our decomposition results in a recursive formula for inversion sequences avoiding 010. \begin{table}[ht] \begin{center} \begin{tabular}{|c|c|c|c|} \hline Pattern $p$ & $\#\textbf{I}_n(010,p)$ for $n = 1, \dots, 7$ & Solved? & OEIS \cite{OEIS} \\ \hline 001 & 1, 2, 3, 4, 5, 6, 7 & \cite{Yan_Lin_2020} & A000027 \\ 011 & 1, 2, 4, 9, 23, 66, 210 & \cite{Yan_Lin_2020} & A026898 \\ 012 & 1, 2, 4, 8, 16, 32, 64 & \cite{Yan_Lin_2020} & A000079 \\ 021 & 1, 2, 5, 14, 42, 132, 429 & \cite{Yan_Lin_2020} & A000108 \\ 100 & 1, 2, 5, 15, 52, 203, 877 & \cite{Yan_Lin_2020} & A000110 \\ 101 & 1, 2, 5, 15, 52, 203, 877 & \cite{Martinez_Savage_2018} & A000110 \\ 000 & 1, 2, 4, 10, 29, 95, 345 & Theorem \ref{thm000} & A279552 \\ 120 & 1, 2, 5, 15, 52, 201, 845 & Theorem \ref{thm120} & A279559 \\ 201 or 210 & 1, 2, 5, 15, 53, 214, 958 & Theorem \ref{thm210} & A360052 \\ 110 & 1, 2, 5, 15, 52, 201, 847 & Theorem \ref{thm110} & A359191 \\ 102 & 1, 2, 5, 15, 51, 186, 707 & no & not yet \\ \hline \end{tabular} \caption{Enumeration of inversion sequences avoiding 010 and one other pattern of length 3.} \label{table2} \end{center} \end{table} The systematic study of inversion sequences avoiding pairs of patterns of length 3 was done by Lin and Yan in \cite{Yan_Lin_2020}. Among the pairs of patterns that contain 010, only five cases (up to Wilf-equivalence\footnote{We say that two patterns are \emph{Wilf-equivalent} if the corresponding avoidance classes of inversion sequences have the same enumeration sequence.}) are still unknown, see Table \ref{table2}. We solve four of those cases by adapting our approach for 010-avoiding inversion sequences, leaving only the pair $\{010, 102\}$ open. \section{The pattern 010} \label{010} The aim of this section is to enumerate inversion sequences avoiding the pattern 010. We solve this problem in Theorem \ref{thm010} by using a decomposition of 010-avoiding inversion sequences, in which a family of 010-avoiding words appears. We solve the enumeration of this family of words first, in Lemma \ref{lem010}. Let $\Ustirling{n}{k}$ be the unsigned Stirling number of the first kind, counting permutations of length $n$ with $k$ cycles (among other combinatorial interpretations). \begin{lem} \label{lem010} Let $\mathfrak{A}_{n,k}$ be the set of 010-avoiding words $\omega = (\omega_1, \dots, \omega_n)$ of length $n$ on the alphabet $\{1, \dots, k\}$ such that $\omega$ contains all letters $\{1, \dots, k\}$ and $\omega_1 = k$. Let $\mathfrak{a}_{n,k} = \#\mathfrak{A}_{n,k}$. For all $n,k \geqslant 1$, $$\mathfrak{a}_{n,k} = \Ustirling{n}{n+1-k}.$$ \end{lem} \begin{proof} Let $n \geqslant k \geqslant 2$, and $\omega \in \mathfrak{A}_{n,k}$. Since $\omega$ avoids the pattern $010$, all letters 1 (which is the smallest letter) in $\omega$ are consecutive. \begin{itemize} \item If $\omega$ contains several letters 1, then removing one of them yields a word $\omega' \in \mathfrak{A}_{n-1,k}$, and this is clearly a bijection. \item If $\omega$ contains a single letter 1, then removing it and subtracting $1$ from all other letters yields a word $\omega' \in \mathfrak{A}_{n-1,k-1}$. Since $\omega_1 = k > 1$, there are $n-1$ possible positions for the letter 1 in $\omega$, so exactly $n-1$ words $\omega \in \mathfrak{A}_{n,k}$ yield the same $\omega'$. \end{itemize} Hence the following recurrence relation holds for all $n \geqslant k \geqslant 2$: $$\mathfrak{a}_{n,k} = \mathfrak{a}_{n-1,k} + (n-1) \cdot \mathfrak{a}_{n-1,k-1}.$$ This recurrence relation is also satisfied by the Stirling numbers of the first kind $\Ustirling{n}{n+1-k}$, and we can easily verify that initial conditions also match. \end{proof} \begin{thm} \label{thm010} Let $\mathfrak{B}_{n,m,d}$ be the set of 010-avoiding inversion sequences that have length $n$, maximum $m$, and contain exactly $d$ distinct values. Let $\mathfrak{b}_{n,m,d} = \#\mathfrak{B}_{n,m,d}$, so that we have $$\# \emph{\textbf{I}}_n(010) = \sum_{m = 0}^{n-1} \sum_{d=0}^n \mathfrak{b}_{n,m,d}.$$ The numbers $\mathfrak{b}_{n,m,d}$ satisfy the following recurrence relation: \begin{equation} \mathfrak{b}_{n,m,d} = \sum_{i = 0}^{d-1} \binom{m-i}{d-i-1} \sum_{p = m+1}^n \Ustirling{n-p+1}{n-p-d+i+2} \sum_{j = 0}^{m-1} \mathfrak{b}_{p-1,j,i}. \label{eq010} \end{equation} \end{thm} \begin{proof} Let $\sigma \in \mathfrak{B}_{n,m,d}$. Let $p$ be the left-most position of the value $m$ in $\sigma$. Let $\alpha = (\sigma_i)_{i \in \{1, \dots, p-1\}}$ be the subsequence of $\sigma$ to the left of the left-most $m$, and $\beta = (\sigma_i)_{i \in \{p, \dots, n\}}$ be the subsequence of $\sigma$ starting at position $p$, so that $\alpha \cdot \beta = \sigma$. Since $\sigma$ avoids the pattern 010, $\alpha$ and $\beta$ avoid the pattern 010 and they do not share any common values. In particular, $\alpha \in \mathfrak{B}_{p-1,j,i}$ for some $j < m$ and $i < d$. Let us now assume we have already chosen $n,m,d,p,j,i$ and $\alpha$, and count how many sequences $\beta$ fit with those choices. The subsequence $\beta$ is a 010-avoiding word of length $n-p+1$, which contains exactly $d-i$ distinct values chosen from the remaining $m+1-i$ (that is, all values $\{0, \dots, m\}$ except for the $i$ values in $\alpha$), and such that $\beta_1 = m = \max(\beta)$. Since $m$ is always in $\beta$, it remains to choose the other $d-i-1$ values, there are therefore $\binom{m-i}{d-i-1}$ possible choices for the set of values of $\beta$. Once the values of $\beta$ are chosen, there are $\Ustirling{n-p+1}{n-p-d+i+2}$ ways to arrange them into a word avoiding 010 and starting with its maximum, according to Lemma \ref{lem010}. The recursive formula \eqref{eq010} is then obtained by summing over all possible values of $p, i$, and $j$ in this decomposition. \end{proof} Using Equation \eqref{eq010}, we generate the terms $\mathfrak{b}_{n,m,d}$ for $n \leqslant 140$ in 1 minute with a C++ program running on a personal computer. Summing over all $m$ and $d$ gives the number of inversion sequences of length $n$ avoiding 010. The first 14 terms of this sequence are $$1, 2, 5, 15, 53, 215, 979, 4922, 26992, 159958, 1016784, 6890723, 49534501, 376081602.$$ \section{The pair of patterns \{010,000\}} \label{000} We now enumerate inversion sequences avoiding both patterns 010 and 000. The addition of the pattern 000 does not change our decomposition, so this section is similar to Section \ref{010}. We begin by enumerating a family of words avoiding $\{010,000\}$, like we did for the pattern 010 in Section \ref{010}. \begin{lem} \label{lem000} Let $\mathfrak{C}_{n,k}$ be the set of $\{010, 000\}$-avoiding words $\omega = (\omega_1, \dots, \omega_n)$ of length $n$ on the alphabet $\{1, \dots, k\}$ such that $\omega$ contains all letters $\{1, \dots, k\}$ and $\omega_1 = k$. Let $\mathfrak{c}_{n,k} = \#\mathfrak{C}_{n,k}$. The sequence $\mathfrak{c}_{n,k}$ satisfies the following recurrence relation for all $n, k \geqslant 2$: \begin{equation} \mathfrak{c}_{n,k} = (n-1) \cdot \mathfrak{c}_{n-1,k-1} + (n-2) \cdot \mathfrak{c}_{n-2,k-1}. \label{eqlem000} \end{equation} \end{lem} \begin{rem} Because the words in $\mathfrak{C}_{n,k}$ avoid the pattern 000, we have $\mathfrak{c}_{n,k} = 0$ if $n > 2k$. \end{rem} \begin{proof}[Proof of Lemma \ref{lem000}] Let $n \geqslant k \geqslant 2$, and $\omega \in \mathfrak{C}_{n,k}$. Since $\omega$ avoids $010$, all letters 1 in $\omega$ are consecutive. Since $\omega$ avoids $000$, $\omega$ has at most two letters 1. \begin{itemize} \item If $\omega$ contains a single letter 1, then removing it and subtracting $1$ from all other letters yields a word $\omega' \in \mathfrak{C}_{n-1,k-1}$. There are $n-1$ possible positions for the letter 1 in $\omega$, so exactly $n-1$ words $\omega \in \mathfrak{C}_{n,k}$ yield the same $\omega'$. \item If $\omega$ contains two letters 1, then removing them and subtracting $1$ from all other letters yields a word $\omega' \in \mathfrak{C}_{n-2,k-1}$. There are $n-2$ possible positions for the two consecutive letters 1 in $\omega$, so exactly $n-2$ words $\omega \in \mathfrak{C}_{n,k}$ yield the same $\omega'$. \end{itemize} Hence the recurrence relation \eqref{eqlem000} holds for all $n \geqslant k \geqslant 2$. \end{proof} \begin{thm} \label{thm000} Let $\mathfrak{D}_{n,m,d}$ be the set of $\{010, 000\}$-avoiding inversion sequences that have length $n$, maximum $m$, and contain exactly $d$ distinct values. Let $\mathfrak{d}_{n,m,d} = \#\mathfrak{D}_{n,m,d}$. The numbers $\mathfrak{d}_{n,m,d}$ satisfy the following recurrence relation: \begin{equation} \mathfrak{d}_{n,m,d} = \sum_{i = 0}^{d-1} \binom{m-i}{d-i-1} \sum_{p = m+1}^n \mathfrak{c}_{n-p+1,d-i} \sum_{j = 0}^{m-1} \mathfrak{d}_{p-1,j,i}. \label{eq000} \end{equation} \end{thm} \noindent The proof of Theorem \ref{thm000} is identical to that of Theorem \ref{thm010}: the pattern 000 could not spread over $\alpha$ and $\beta$ since they have no value in common. \section{The pair of patterns \{010,120\}} \label{120} In this section, we will enumerate inversion sequences avoiding the patterns 010 and 120. We will first solve the enumeration of 'classical' words avoiding this pair of patterns (i.e. words defined by their length and the size of their alphabet). The proof still makes use of words that are required to contain all letters of their alphabet. However, unlike in Sections \ref{010} and \ref{000}, we no longer require the left-most value of our words to be their maximum. \begin{lem} \label{lem120} Let $\mathfrak{E}_{n,k}$ be the set of $\{010, 120\}$-avoiding words of length $n$ over the alphabet $\{1, \dots, k\}$. Let $\mathfrak{e}_{n,k} = \#\mathfrak{E}_{n,k}$. Then \begin{equation} \mathfrak{e}_{n,k} = \sum_{d=0}^k \binom{k}{d} \frac{\binom{n-1}{d-1} \binom{n+d}{d-1}}{d}. \label{eqlem120} \end{equation} \end{lem} \begin{proof} Let $\mathfrak{F}_{n,k}$ be the subset of $\mathfrak{E}_{n,k}$ of words that contain all letters $\{1, \dots, k\}$, and $\mathfrak{f}_{n,k} = \#\mathfrak{F}_{n,k}$. By summing over the number of distinct letters $d$ in words in $\mathfrak{E}_{n,k}$, we have the formula \begin{equation} \mathfrak{e}_{n,k} = \sum_{d=0}^k \binom{k}{d} \mathfrak{f}_{n,d}. \label{eqWd120} \end{equation} We now decompose a word $\omega \in \mathfrak{F}_{n,k}$ around the left-most position of its maximum, similarly to what we did with inversion sequences earlier. Let $p$ be the left-most position of the letter $k$ in $\omega$. Let $\alpha = (\omega_i)_{i \in \{1, \dots, p-1\}}$, and $\gamma = (\omega_i)_{i \in \{p+1, \dots, n\}}$ be two subwords of $\omega$, so that $\alpha \cdot k \cdot \gamma = \omega$. Since $\omega$ avoids 010 and 120, $\alpha$ has smaller letters than $\gamma$. If $q$ is the maximum of $\alpha$, then the letters of $\alpha$ are exactly $\{1, \dots, q\}$, and those of $\gamma$ are either $\{q+1, \dots, k\}$ or $\{q+1, \dots, k-1\}$. We obtain the recurrence relation \begin{equation} \mathfrak{f}_{n,k} = \sum_{p=1}^{n} \sum_{q=0}^{k-1} \mathfrak{f}_{p-1,q} \cdot (\mathfrak{f}_{n-p,k-q} + \mathfrak{f}_{n-p,k-q-1}). \label{convo120} \end{equation} Let $$F(x,y) = \sum_{n,k \geqslant 0} x^n y^k \mathfrak{f}_{n,k}$$ be the bivariate ordinary generating function of $\mathfrak{f}_{n,k}$. The recurrence relation \eqref{convo120} translates into the following functional equation for $F$: \begin{equation} (x + xy) \cdot F(x,y)^2 - (x+1) \cdot F(x,y) + 1 = 0. \label{funeq120} \end{equation} From \eqref{funeq120}, we observe that the function $x^2 \cdot F(x,y)$ satisfies a functional equation for the ordinary generating function of the number of diagonal dissections of a convex $n$-gon into $k$ regions, found in \cite[Section 3.1]{Flajolet_Noy_1999}. Hence $\mathfrak{f}_{n,k}$ is the number of diagonal dissections of a convex $(n+2)$-gon into $k$ regions. A closed formula for the number of diagonal dissections is also obtained in \cite[Section 3.1]{Flajolet_Noy_1999}, which gives \begin{equation} \mathfrak{f}_{n,k} = \frac{\binom{n-1}{k-1} \binom{n+k}{k-1}}{k}. \label{eqbinom120} \end{equation} Finally, we obtain \eqref{eqlem120} from \eqref{eqWd120} and \eqref{eqbinom120}. \end{proof} The enumeration of inversion sequences avoiding 010 and 120 does not require us to refine sequences according to their number of distinct values. In this simpler situation, both our decomposition and the resulting formula are similar to what was done by Mansour and Shattuck in \cite{Mansour_Shattuck_2015} for the single pattern 120. \begin{thm} \label{thm120} Let $\mathfrak{G}_{n,m}$ be the set of $\{010, 120\}$-avoiding inversion sequences that have length $n$ and maximum $m$. Let $\mathfrak{g}_{n,m} = \#\mathfrak{G}_{n,m}$. The numbers $\mathfrak{g}_{n,m}$ satisfy the following recurrence relation: \begin{equation} \mathfrak{g}_{n,m} = \sum_{p = m+1}^n \sum_{j = 0}^{m-1} \mathfrak{g}_{p-1,j} \cdot \mathfrak{e}_{n-p, m-j}. \label{eq120} \end{equation} \end{thm} \begin{proof} We proceed similarly to the proof of Theorem \ref{thm010}. Let $\sigma \in \mathfrak{G}_{n,m}$. Let $p$ be the left-most position of the value $m$ in $\sigma$. Let $\alpha = (\sigma_i)_{i \in \{1, \dots, p-1\}}$ be the subsequence of $\sigma$ to the left of the left-most $m$, and $\gamma = (\sigma_i)_{i \in \{p+1, \dots, n\}}$ be the subsequence of $\sigma$ to the right of the left-most $m$, so that $\alpha \cdot m \cdot \gamma = \sigma$. Since $\sigma$ avoids the patterns 010 and 120, the subsequences $\alpha$ and $\gamma$ avoid these two patterns as well, and all values of $\gamma$ are greater than all values of $\alpha$. We have $\alpha \in \mathfrak{G}_{p-1,j}$ for some $j < m$, and $\gamma$ is a $\{010, 120\}$-avoiding word of length $n-p$ over the alphabet $\{j+1, \dots, m\}$. The recursive formula \eqref{eq120} is obtained by summing over all possible $p, j, \alpha$, and $\gamma$. \end{proof} \section{The pairs \{010,201\} and \{010,210\}} \label{210} \indent Wilf-equivalence between the pairs of patterns $\{010,201\}$ and $\{010,210\}$ was proved by Yan and Lin in \cite{Yan_Lin_2020} through the use of a bijection. In this section we will work with the pair of patterns $\{010,210\}$, although our method can also be applied to the pair $\{010,201\}$, resulting in the same formula. Earlier, we refined the enumeration of pattern-avoiding inversion sequences $\sigma$ according to two parameters (in addition to their size). The first one is the maximum $m$ of $\sigma$, and is needed in order to decompose sequences at the left-most position of their maximum. The second one records the number of values that are forbidden to the right of the left-most maximum, provided $\sigma$ is the left part in our usual decomposition around the left-most occurrence of the maximum (denoted $\alpha$ earlier). If $\sigma$ is a sequence avoiding a set of patterns $P$, we say that a value $v \in \{0, \dots, \max(\sigma)\}$ is \emph{forbidden} by $\sigma$ and $P$ if $\sigma \cdot m v$ contains a pattern in $P$ when $m > \max(\sigma)$. We denote by $\text{forb}(\sigma, P)$ the number of values forbidden by $\sigma$ and $P$, or simply $\text{forb}(\sigma)$ when there is no ambiguity. For example, for the patterns 010 and $\{010,000\}$, all values occurring in $\sigma$ were forbidden, so $\text{forb}(\sigma)$ was simply the number of distinct values in $\sigma$, which was recorded by the parameter $d$ in Theorems \ref{thm010} and \ref{thm000}. For the pair $\{010,120\}$, all values from 0 to $\max(\sigma)$ were forbidden, so the number $\max(\sigma)+1$ of forbidden values was redundant with recording the maximum of $\sigma$, and therefore omitted. \begin{rem} \label{rem210} If $\sigma$ is a $\{010, 210\}$-avoiding sequence with values in $\mathbb N$, then $\text{forb}(\sigma) = q+r$, where $q$ is the largest value of $\sigma$ such that a larger value appears to its left, or $q = 0$ if there is no such value (i.e. if $\sigma$ is nondecreasing), and $r$ is the number of values in $\sigma$ that are greater than or equal to $q$ (counted without multiplicity). \end{rem} \begin{proof} All values smaller than $q$ are forbidden by the pattern 210 (regardless of whether or not they occur in $\sigma$). In addition, the value $q$ itself and any values in $\sigma$ greater than $q$ (counted by $r$) are forbidden by the pattern 010. \end{proof} \begin{lem} \label{lem210} Let $\mathfrak{H}_{n,k}$ be the set of $010$-avoiding words $\omega$ of length $n$ over the alphabet $\{1, \dots, k\} \sqcup \{\infty\}$ (where $\infty$ is the largest letter) such that the subword $\omega'$ defined by removing all letters $\infty$ from $\omega$ is nondecreasing, and $k$ is the maximum of $\omega'$ if $\omega'$ is nonempty, or $k = 0$ otherwise. Consider the bipartition of $\mathfrak{H}_{n,k}$ into $\mathfrak{H}^{(1)}_{n,k} \sqcup \mathfrak{H}^{(2)}_{n,k}$, where $\mathfrak{H}^{(1)}_{n,k} = \{\omega \in \mathfrak{H}_{n,k} \; | \; \omega_n = \infty\}$, and $\mathfrak{H}^{(2)}_{n,k} = \{\omega \in \mathfrak{H}_{n,k} \; | \; \omega_n \neq \infty\}$. Let $\mathfrak{h}_{n,k} = \#\mathfrak{H}_{n,k}$, $\mathfrak{h}^{(1)}_{n,k} = \#\mathfrak{H}^{(1)}_{n,k}$, and $\mathfrak{h}^{(2)}_{n,k} = \#\mathfrak{H}^{(2)}_{n,k}$. These numbers satisfy the following equations: \begin{equation} \begin{cases} \mathfrak{h}_{n,k} = \mathfrak{h}^{(1)}_{n,k} + \mathfrak{h}^{(2)}_{n,k} \\[5pt] \mathfrak{h}^{(1)}_{n,k} = \mathfrak{h}_{n-1,k} \\[5pt] \mathfrak{h}^{(2)}_{n,k} = \mathfrak{h}^{(2)}_{n-1,k} + \sum_{i=0}^{k-1} \mathfrak{h}_{n-1,i}. \end{cases} \label{eqlem210} \end{equation} \end{lem} \begin{proof} By definition, $\mathfrak{H}_{n,k} = \mathfrak{H}^{(1)}_{n,k} \sqcup \mathfrak{H}^{(2)}_{n,k}$, therefore $\mathfrak{h}_{n,k} = \mathfrak{h}^{(1)}_{n,k} + \mathfrak{h}^{(2)}_{n,k}$. Let $\omega \in \mathfrak{H}_{n,k}$. \begin{itemize} \item If $\omega \in \mathfrak{H}^{(1)}_{n,k}$, then $\omega_n = \infty$. Removing $\omega_n$ yields a word in $\mathfrak{H}_{n-1,k}$. \item If $\omega \in \mathfrak{H}^{(2)}_{n,k}$, then by the nondecreasing property, $\omega_n = k$. \begin{itemize} \item If $\omega_{n-1} = k$, removing $\omega_n$ yields a word in $\mathfrak{H}^{(2)}_{n-1,k}$. \item If $\omega_{n-1} = \infty$, the avoidance of the pattern 010 ensures $\omega$ cannot contain another letter $k$, therefore removing $\omega_n$ yields a word in $\mathfrak{H}^{(1)}_{n-1,i}$ for some $i < k$. \item Otherwise, $\omega_{n-1} = i$ for some $i < k$, and removing $\omega_n$ yields a word in $\mathfrak{H}^{(2)}_{n-1,i}$. \end{itemize} \end{itemize} All maps above are bijections, so the last two formulas in \eqref{eqlem210} naturally follow, observing that $\mathfrak{h}^{(1)}_{n-1,i} + \mathfrak{h}^{(2)}_{n-1,i} = \mathfrak{h}_{n-1,i}$. \end{proof} \begin{thm} \label{thm210} Let $\mathfrak{I}_{n,m,f}$ be the set of $\{010, 210\}$-avoiding inversion sequences $\sigma$ that have length $n$, maximum $m$, and such that $\text{forb}(\sigma) = f$. Let $\mathfrak{i}_{n,m,f} = \#\mathfrak{I}_{n,m,f}$. The numbers $\mathfrak{i}_{n,m,f}$ satisfy the following recurrence relation: \begin{equation} \mathfrak{i}_{n,m,f} = \sum_{p = m+1}^n \sum_{i=0}^{f-1} \sum_{j = 0}^{m-1} \mathfrak{i}_{p-1,j,i} \cdot \mathfrak{h}_{n-p,f-i-1}. \label{eq210} \end{equation} \end{thm} \begin{proof} We proceed the same way as before. Let $\sigma \in \mathfrak{I}_{n,m,f}$. Let $p$ be the left-most position of the value $m$ in $\sigma$. Let $\alpha = (\sigma_i)_{i \in \{1, \dots, p-1\}}$ be the subsequence of $\sigma$ to the left of the left-most $m$, and $\gamma = (\sigma_i)_{i \in \{p+1, \dots, n\}}$ be the subsequence of $\sigma$ to the right of the left-most $m$, so that $\alpha \cdot m \cdot \gamma = \sigma$. Let $\gamma'$ be the subsequence of $\gamma$ obtained by removing all values $m$ from $\gamma$ (if $\gamma$ does not contain $m$, then $\gamma' = \gamma$). Since $\sigma$ avoids the pattern 010, the subsequences $\alpha$ and $\gamma$ avoid 010, and they do not share any common values. Since $\sigma$ avoids the pattern 210, $\alpha$ avoids 210, and $\gamma'$ is nondecreasing. The subsequence $\alpha$ is in $\mathfrak{I}_{p-1,j,i}$ for some $j < m$ and $i \leqslant f$. Notice that we actually have $i < f$ since $m$ is a forbidden value for $\sigma$ (because of the avoidance of 010), but not for $\alpha$ (since $m > \max(\alpha))$. Let $s$ be the largest value of $\alpha$ such that a larger value appears to its left, or $s = 0$ if there is no such value. Let $t$ be the number of values in $\alpha$ that are greater than or equal to $s$, counted without multiplicity. By Remark \ref{rem210}, we have $s+t = \text{forb}(\alpha) = i$. The subsequence $\gamma$ is a 010-avoiding word of length $n-p$ over an alphabet $\Sigma$ of size $m-i+1$ (that is, all values in $\{s, \dots, m\}$ except the $t$ values in $\{s, \dots, m\}$ forbidden by $\alpha$), and such that $\gamma'$ is nondecreasing. Since $f$ values are forbidden by $\sigma$, including $i$ values previously forbidden by $\alpha$, there are $f-i$ new values forbidden by $m \cdot \gamma$. The new values forbidden by $m \cdot \gamma$ are precisely: \begin{itemize} \item Every value in $\Sigma$ lower than the maximum of $\gamma'$, because of the avoidance of 210 (with $m$ playing the role of the 2, and the maximum of $\gamma'$ playing the role of the 1). \item The value $m$, and the maximum of $\gamma'$, because of the avoidance of 010. \end{itemize} It follows that the maximum of $\gamma'$ is the $(f-i-1)$-th smallest letter of $\Sigma$, therefore the number of possible choices for $\gamma$ is counted by $\mathfrak{h}_{n-p,f-i-1}$ (with $m$ taking the role of the letter denoted $\infty$ in Lemma \ref{lem210}). As before, the recursive formula \eqref{eq210} is obtained by summing over all possible $p, i, j, \alpha$, and $\gamma$. \end{proof} \section{The pair of patterns \{010,110\}} \label{110} In the case of the pair $\{010,110\}$, we will apply our decomposition of 010-avoiding inversion sequences to words as well. This can also be done with the words studied in Sections \ref{010}, \ref{000}, and \ref{120}, however the methods used in those sections resulted in simpler, more efficient formulas. We begin with a characterization of forbidden values similar to Remark \ref{rem210}. \begin{rem} \label{rem110} If $\sigma$ is a $\{010, 110\}$-avoiding sequence with values in $\mathbb N$, then $\text{forb}(\sigma) = q+r$, where $q$ is the largest value which appears twice in $\sigma$, or $q = 0$ if there is no such value, and $r$ is the number of values in $\sigma$ that are greater than or equal to $q$ (counted without multiplicity). \end{rem} \begin{proof} All values smaller than $q$ are forbidden by the pattern 110 (regardless of whether or not they occur in $\sigma$). In addition, the value $q$ itself and any values in $\sigma$ greater than $q$ (counted by $r$) are forbidden by the pattern 010. \end{proof} \noindent Let $\delta_{a,b} = \left \{ \begin{array}{lcl} 1 & \text{if} & a = b \\ 0 & \text{if} & a \neq b \end{array} \right.$ be the Kronecker delta function. \begin{lem} \label{lem110} Let $\mathfrak{J}_{n,k,f}$ be the set of $\{010,110\}$-avoiding words $\omega$ of length $n$ over the alphabet\footnote{Our alphabet now starts at 0 so that our characterization of forbidden values in Remark \ref{rem110} remains the same for both words and inversion sequences.} $\{0, \dots, k-1\}$ such that $\text{forb}(\omega) = f$. Let $\mathfrak{K}_{n,k}$ be the set of $\{010,110\}$-avoiding words $\omega$ of length $n$ over the alphabet $\{0, \dots, k-1\}$. Let $\mathfrak{j}_{n,k,f} = \#\mathfrak{J}_{n,k,f}$, and $\mathfrak{k}_{n,k} = \#\mathfrak{K}_{n,k}$, so that we have $$\mathfrak{k}_{n,k} = \sum_{f=0}^k \mathfrak{j}_{n,k,f}.$$ The numbers $\mathfrak{j}_{n,k,f}$ satisfy the following recurrence relation: \begin{equation} \mathfrak{j}_{n,k,f} = \sum_{p=1}^{n} \sum_{i=0}^{f-1} \sum_{m=0}^{k-1} \mathfrak{j}_{p-1,m,i} \cdot (\mathfrak{j}_{n-p, m-i, f-i-1} + \delta_{f, m+1} \cdot \sum_{\ell=0}^{n-p-1} \mathfrak{k}_{\ell, m-i}). \label{eqlem110} \end{equation} \end{lem} \begin{proof} Let $\omega \in \mathfrak{J}_{n,k,f}$. Let $m$ be the maximum of $\omega$ and $p$ the position of the left-most $m$ in $\omega$. Let $\alpha = (\omega_i)_{i \in \{1, \dots, p-1\}}$, and $\gamma = (\omega_i)_{i \in \{p+1, \dots, n\}}$, so that $\alpha \cdot m \cdot \gamma = \omega$. Since $\omega$ avoids the pattern 010, the subwords $\alpha$ and $\gamma$ avoid 010, and they do not share any common letters. Since $\sigma$ avoids the pattern 110, $\alpha$ and $\gamma$ avoid 110, and the letters of $\gamma$ are greater than any letters which appear twice in $\alpha$. Additionally, if a letter $m$ appears in $\gamma$, then all letters to its right must be $m$. If $i$ letters are forbidden by $\alpha$, i.e. $\alpha \in \mathfrak{J}_{p-1,m,i}$ then $\gamma$ is a $\{010,110\}$-avoiding word of length $n-p$ on an alphabet of size $m+1-i$ (that is, all letters $\{0, \dots, m\}$ except the $i$ letters forbidden by $\alpha$), and such that all letters to the right of a $m$ are also $m$. We distinguish two cases for $\gamma$: \begin{itemize} \item If $\gamma$ does not contain the letter $m$, then $\gamma$ is a $\{010,110\}$-avoiding word of length $n-p$ on an alphabet of size $m-i$. Since $f$ letters are forbidden by $\omega$, including $i$ letters forbidden by $\alpha$ and the letter $m$, the remaining $f-i-1$ must be forbidden by $\gamma$, therefore the number of possible choices for $\gamma$ is $\mathfrak{j}_{n-p, m-i, f-i-1}$. \item If $\gamma$ contains the letter $m$, let $\ell$ be the number of letters different from $m$ in $\gamma$ (counted with multiplicity). No letters other than $m$ may appear to the right of a $m$ in $\gamma$, therefore $\gamma$ can be seen as a $\{010,110\}$-avoiding word of length $\ell$ on an alphabet of size $m-i$ to which we append $n-p-\ell$ letters $m$. In this case, $m$ is the largest letter which appears twice in $\omega$, and $\omega$ does not contain any letters greater than $m$, therefore $f = m+1$. \end{itemize} The recursive formula \eqref{eqlem110} is obtained by summing over all $p, i, m, \ell, \alpha$, and $\gamma$. \end{proof} \begin{thm} \label{thm110} Let $\mathfrak{L}_{n,m,f}$ be the set of $\{010,110\}$-avoiding inversion sequences $\sigma$ of length $n$, maximum $m$, and such that $\text{forb}(\sigma) = f$. Let $\mathfrak{l}_{n,m,f} = \#\mathfrak{L}_{n,m,f}$. The numbers $\mathfrak{l}_{n,k,f}$ satisfy the following recurrence relation: \begin{equation} \mathfrak{l}_{n,m,f} = \sum_{p=m+1}^{n} \sum_{i=0}^{f-1} \sum_{j=0}^{m-1} \mathfrak{l}_{p-1,j,i} \cdot (\mathfrak{j}_{n-p, m-i, f-i-1} + \delta_{f,m+1} \cdot \sum_{\ell=0}^{n-p-1} \mathfrak{k}_{\ell, m-i}). \label{eq110} \end{equation} \end{thm} \noindent The proof of Theorem \ref{thm110} is identical to that of Lemma \ref{lem110}, except the left part of our decomposition is now an inversion sequence instead of a word. \section{The pair of patterns \{010,102\}} \label{102} The enumeration of inversion sequences avoiding the pair of patterns $\{010, 102\}$ remains unsolved, as our usual method does not apply to it. Indeed, the value 1 in the pattern 102 may appear in the left part of our decomposition, and the values 02 in the right part, hence recording the number of forbidden values in not sufficient in this case. \end{document}
\begin{document} \title{Differentiability of the speed of biased random walks on Galton-Watson trees} \author{Adam Bowditch^{\text{th}}anks{National University of Singapore, Department of Mathematics, Singapore {\tt [email protected]}} \and Yuki Tokushige^{\text{th}}anks{Kyoto University, RIMS, Kyoto 606-8502, Japan. {\tt [email protected]}}} \maketitle \begin{abstract} We prove that the speed of a $\lambda$-biased random walk on a supercritical Galton-Watson tree is differentiable for $\lambda$ such that the walk is ballistic and obeys a central limit theorem, and give an expression of the derivative using a certain $2$-dimensional Gaussian random variable. The proof heavily uses the renewal structure of Galton-Watson trees that was introduced in \cite{LPP3}. \end{abstract} \blfootnote{2010 {\it Mathematics Subject Classification}. 60J80, 60K05, 60K37, 60F17.} \blfootnote{{\it Key words and phrases}. Galton-Watson tree, biased random walks, renewal structure.} \section{Introduction}\label{s:int} In this paper, we investigate the speed of biased random walks on supercritical Galton-Watson trees. Specifically, we show that the speed is differentiable within a certain range of bias and obtain an expression for the derivative in terms of the covariance of a 2-dimensional Gaussian random variable. Random walks on GW-trees are a natural setting for studying trapping phenomena as dead-ends, caused by leaves in the trees, trap the walk. Even without leaves, the randomness in the environment slows the walk and several properties that seem obvious turn out to be non-trivial and interesting problems. These models can be used to approach related problems concerning biased random walks on percolation clusters (as studied in \cite{frha14}) and random walk in random environment (see for example \cite{S}) which experience similar phenomena. For a recent review of trapping phenomena we direct the reader to \cite{arce06}, \cite{arfr16} and \cite{foma14} which detail the history of trapping models including their motivation via spin-glasses and cover recent developments in a range of models of random walks on underlying graphs including supercritical GW-trees. We now briefly describe the supercritical GW-tree conditioned on survival via the Harris decomposition; for more detail see \cite{atne04, J}. Let $\{p_k\}_{k\geq 0}$ denote the offspring distribution of a GW-process $W_n$ with a single progenitor, mean $\mu >1$ and probability generating function $f$. The process $W_n$ gives rise to a random tree $\mathbf{T}_f$ where individuals are represented by vertices and edges connect individuals with their offspring. Let $q$ denote the extinction probability of $W_n$ which is strictly less than $1$ since $\mu>1$ and non-zero only when $p_0>0$. In this case we then define \[g(s):=\frac{f((1-q)s+q)-q}{1-q} \qquad \text{ and } \qquad h(s):=\frac{f(qs)}{q}\] which are generating functions of a GW-process without deaths and a subcritical GW-process respectively (cf.\ Chapter I.12 of \cite {atne04}). An $f$-GW-tree conditioned on nonextinction $\mathbf{T}$ can be constructed by first generating a $g$-GW-tree $\mathbf{T}_g$ and then, to each vertex $x$ of $\mathbf{T}_g$, appending a random number of independent $h$-GW-trees (see Figure \ref{treediag}). We refer to $\mathbf{T}_g$ as the backbone of $\mathbf{T}$, the finite trees appended to $\mathbf{T}_g$ as the traps and the vertices in the first generation of the traps as the buds. \begin{figure} \caption{A sample section of a supercritical GW-tree conditioned to survive $\mathbf{T} \label{treediag} \end{figure} We now introduce the biased random walk on a fixed tree $\mathscr{T}$. We denote by $e(\mathscr{T})$ the root, which is the vertex representing the unique progenitor. For $x\in \mathscr{T}$, let $\pi(x)$ denote the parent of $x$ and $\nu(x)$ the number of children of $x$. A $ \lambda$-biased random walk on $\mathscr{T}$ is a random walk $(Z_n)_{n \geq 0}$ on the vertices of $\mathscr{T}$ started from $e(\mathscr{T})$ with transition probabilities \begin{align*} \mathit{P}^\mathscr{T}_\lambda(Z_{n+1}=y|Z_n=x)=A_{\lambda}(x,y):=\begin{cases} \frac{\lambda}{\lambda+\nu(x)}, & \text{if } y=\pi(x), \\ \frac{1}{\lambda +\nu(x)}, & \text{if } x=\pi(y)\neq e(\mathscr{T}), \\ \frac{1}{\nu(x)}, & \text{if } x=\pi(y)= e(\mathscr{T}), \\ 0, & \text{otherwise.} \\ \end{cases} \end{align*} We use $\mathit{P}_\lambda(\cdot):=\int \mathit{P}^\mathbf{T}_\lambda(\cdot)\mathbb{P}(\text{d}\mathbf{T})$ for the \emph{annealed law} obtained by averaging the \emph{quenched law} $\mathit{P}^\mathbf{T}_\lambda$ over the law $\mathbb{P}$ on $f$-GW-trees conditioned to survive. For $x \in \mathbf{T}$, let $d(x)$ denote the distance between $x$ and the root of the tree and write $\lambda_c:=f'(q)$ where we note that $\lambda_c=0$ when $p_0=0$. The behaviour of $\lambda$-biased random walks on the GW-tree $\mathbf{T}$ have been extensively studied since \cite{LPP3} showed that if $\lambda\in(\lambda_c,\mu)$ then the walk is \emph{ballistic}; that is, $d(Z_n)n^{-1}$ converges $\mathit{P}_\lambda$-a.s.\ to a deterministic constant $\upsilon_\lambda>0$ called the {\it speed} of the walk. When $\lambda>\mu$ the walk is recurrent and $d(Z_n)n^{-1}$ converges $\mathit{P}_\lambda$-a.s.\ to $0$. When $\lambda$ is small and $p_0>0$, the walk is transient but slowed by having to make long sequences of movements against the drift in order to escape the traps; in particular, if $\lambda\leq \lambda_c$ then the slowing affect is strong enough to cause $d(Z_n)n^{-1}$ to converge $\mathit{P}_\lambda$-a.s.\ to $0$. This regime has been studied further in \cite{BFGH} and \cite{B2} where polynomial scaling results are shown. The aim of this paper is to study how the value of $\upsilon_\lambda$ depends on the parameter of bias $\lambda$; specifically, our main result is the following. \begin{thm}\label{dif} Suppose that there exists $\beta>1$ such that $\sum_{k=1}^{\infty}p_k\beta^k<\infty$. Then, the function $\lambda\mapsto \upsilon_{\lambda}$ is differentiable on $(\lambda_c^{1/2},\mu)$. Moreover, the derivative of the speed $\upsilon'_{\lambda}$ can be expressed as the covariance of a two dimensional Gaussian random variable $(X,Y)$. Namely, we have that $\upsilon'_{\lambda}=E_{\lambda}[XY]$. \end{thm} We remark here that $0\leq\lambda_c\leq\lambda_c^{1/2}<1$ since $0\leq\lambda_c<1$ and note that the covariance matrix of $(X,Y)$ is given in \eqref{var}. In the unpublished note \cite{A2}, the differentiability of the function $\lambda\mapsto \upsilon_\lambda$ was shown for $0<\lambda<1$ in the case $p_0=0$, and an expression of the derivative was given which is based on the description of invariant measures for the environment seen from the particle obtained in \cite{A1}. A fluctuation-dissipation theorem FDT (see \cite{dede10,ku66}) suggests that the internal fluctuations of a system at equilibrium should be related to the response of the system to an external disturbance. In the context of a random walk, this would suggest that the fluctuations of the walk should be related to the response of imposing a drift. A widely held conjecture is that an FDT should hold in many random walk models (e.g.\ \cite{gamapi12,lero94}); however, it has been shown in \cite{cuku93} that this is violated by several mean-field spin glass models at low temperature due to slow dynamics and aging. This is of particular interest due to the connections between spin-glasses and models of random walks in random trapping environments. Some progress towards proving an FDT for a random walk on a supercritical GW-tree without leaves was made in \cite{arhuolze13} where it was shown that the diffusivity is equal to the mobility (the derivative of the speed with respect to the exterior force $\alpha_\lambda=\log(\mu/\lambda)$) at the diffusive point $\lambda=\mu$. Understanding the relation between the diffusivity and the mobility for $\lambda$ in the ballistic regime remains open. It has been shown in \cite{B3} and \cite{PZ} that, under the conditions of Theorem \ref{dif}, there exists a constant $\varsigma\in(0,\infty)$ such that, for $\mathbb{P}$-a.e.\ $\mathbf{T}$, \[B_t^n:=\frac{d(Z_{nt})-nt\upsilon_\lambda}{\varsigma\sqrt{n}}\] converges in $\mathit{P}^{\mathbf{T}}_\lambda$-distribution to a Brownian motion. In particular, the range of bias $(\lambda_c^{1/2},\mu)$ is precisely the range in which the walk is ballistic and a central limit theorem holds. We expect that the differentiability should extend to $(\lambda_c,\mu)$ however, our proof relies heavily on second moment bounds of regeneration times which only hold in the smaller range of bias. The key ingredients of the proof are the \emph{renewal structure}, the \emph{discrete Girsanov formula} and suitable moment bounds on excursion times of random walks in GW-trees. The renewal structure allows paths of a random walk to be decomposed into \emph{i.i.d.\ } components. This technique is frequently used to analyse random walks in random environments as well as various other models in probability and statistical mechanics. \cite{LPP3} constructed the renewal structure for supercritical GW-trees, which we will heavily utilise in this paper. See \cite{BFS,BGN,DGPZ} for applications of this method to the analysis of the speed of random walks in random environments. In particular, we refer to the paper \cite{BGN}, where the authors study the speed of biased random walks on a random conductance model, since our strategy resembles theirs. See \cite{M} also for a study of a similar problem in the context of random walks on word-hyperbolic groups. We now describe the discrete Girsanov formula which allows us to relate the walk for different values of the bias. Let $\mathscr{T}$ be a rooted infinite tree and $\bigl(\mathcal{F}_n(\mathscr{T})\bigr)_{n\geq0}$ be the filtration on the probability space $(\tilde{\Omega}(\mathscr{T}),\mathcal{F}(\mathscr{T}),P_{\lambda}^{\mathscr{T}})$ generated by the $\lambda$-biased random walk $(Z_n)$ on $\mathscr{T}$. Then for an $\bigl(\mathcal{F}_n(\mathscr{T})\bigr)$-stopping time $S$, an $\mathcal{F}_S(\mathscr{T})$-measurable function $F:\tilde{\Omega}(\mathscr{T})\rightarrow\mathbb{R}$ and $h\geq -\lambda$, we have that \begin{align}\label{Gir} E^{\mathscr{T}}_{\lambda+h}\left[F\bigl((Z_k)_{k\geq0}\bigr)\right] =E^{\mathscr{T}}_{\lambda}\left[F\bigl((Z_k)_{k\geq0}\bigr)\prod_{i=1}^S\frac{A_{\lambda+h}(Z_{i-1},Z_i)}{A_{\lambda}(Z_{i-1},Z_i)}\right]. \end{align} We remark here that regeneration times are not stopping times, thus the formula \eqref{Gir} does not apply directly to them. Moreover, we will mostly work with the annealed measure conditioned on {\it non-backtracking} in order to avoid bad behaviour of the first regeneration time. (See Remark \ref{rem:tau1} for details.) The presence of the non-backtracking condition is also an obstacle to apply the Girsanov formula. We will solve this problem using Lemma \ref{lem:replace}. In order to study the change in $\upsilon_\lambda$ as we vary the bias $\lambda$, we require control on the walk that is uniform in the bias. Specifically, due to the regeneration structure, it will suffice to control the variation of the walk within a single regeneration block. To this end, an important role is played by Proposition \ref{p:UniMom}, which gives a moment estimate of regeneration times that is uniform in the bias. Its proof is the main technical contribution of this paper and Sections \ref{s:uni}, \ref{s:mom} and \ref{s:reg} are entirely devoted to the fairly intricate arguments involved in it. The organisation of this paper is as follows; in Section \ref{s:ren}, we first introduce several basic facts on the renewal structure of GW-trees. In Section \ref{s:exp}, we will prove Theorem \ref{dif} using the formula \eqref{Gir}. We defer the more technical aspects concerning moments of regeneration times to Sections \ref{s:uni}, \ref{s:mom} and \ref{s:reg}. Specifically, in Section \ref{s:uni} we show that the uniform moment estimates for regenerations times hold for GW-trees without leaves, in Section \ref{s:mom} we prove a moment bound on the generation sizes of GW-trees and, finally, in Section \ref{s:reg} we combine these estimates to prove that the uniform moment estimates for regenerations times extends to the case with leaves. \if0 \begin{comYuki} I will add a couple of sentences to the introduction according to the following my opinions. Please let me know if you don't agree with me on some of them. \begin{itemize} \item I believe it's very important to emphasize that our uniform moment estimate is the main technical contribution of this article. What I proved is somewhat common sense for experts of this subject. \end{itemize} \end{comYuki} \fi \if0 Let $\mathbf{T}$ be a Galton-Watson tree with offspring distribution $\{p_k\}_{k\geq0}$ such that $p_n\neq1$ for any $n\in\bn$. We will denote by $\mathbb{P}$ the distribution of the Galton-Watson tree. In this paper, we always assume that $\mathbf{T}$ is supercritical ({\it i.e.,} $m:=\sum_{k\geq1}kp_k>1$) and has no leaves ({\it i.e.,} $p_0=0$). Fix $\lambda>0$. For a given infinite rooted tree $T$ without leaves, we consider the $\lambda$-{\it biased random walk} $(Z_n)_{n\geq0}$ defined on a probability space $(\tilde{\Omega}(T),\mathcal{F},P_{\lambda}^T)$ starting at the root whose transition probabilities $\{A_{\lambda}(x,y)\}_{x,y\in T}$ are given as follows: denote the root of $T$ by $e(T)$, and for $x\in T$, the number of its offspring by $\nu(x)$. From the root $e(T)$, the random walk moves to one of its children equally likely, and from $x\neq e$ which has children $x_1,...,x_{\nu(x)}$, the random walk moves to one of neighbors of $x$ according to the following formula. \begin{align*} A_{\lambda}(x,\pi(x)):=\frac{\lambda}{\lambda+\nu(x)},\ \ {\rm and}\ \ A_{\lambda}(x,x_i):=\frac{1}{\lambda+\nu(x)},\ \ {\rm for}\ 1\leq i\leq\nu(x), \end{align*} where $\pi(x)$ is the parent of $x$. For $x,y\in T$, denote by $d(x,y)$ the distance between $x$ and $y$, and by $d(x)$ the distance between $e(T)$ and $x$. Behaviors of $\lambda$-biased random walks on the Galton-Watson tree $\mathbf{T}$ have been extensively studied over decades. For instance, it is proved in \cite{L} that when $\lambda\in(0,m)$, the $\lambda$-biased random walk on the Galton-Watson tree $\mathbf{T}$ is transient $\mathbb{P}$-a.s. Later, it is shown in \cite{LPP1, LPP2} that when $\lambda\in(0,m)$ and $p_0=0$, the sequence $n^{-1}d(Z_n)$ converges $P_{\lambda}$-almost surely and in $L^1(P_{\lambda})$, where $P_{\lambda}$ is the so-called {\it annealed measure} and defined by \begin{align*} P_{\lambda}(\cdot):=\int \mathbb{P}({\rm d}\mathbf{T})P_{\lambda}^{\mathbf{T}}(\cdot). \end{align*} Moreover, it is proved in \cite{LPP1,LPP2} that the limit of $n^{-1}d(Z_n)$ is a deterministic positive constant, and usually called the {\it speed} of the $\lambda$-biased random walk on the Galton-Watson tree, and we will denote it by $\upsilon_\lambda>0$. \fi \section{Renewal structure of Galton-Watson trees}\label{s:ren} \if0 \subsection{Notation} Throughout this paper, we regard a rooted tree as a subset of $\bn^{\infty}:=\{e\}\cup\bigcup_{k=1}^{\infty}\bn^k$ in the following way: a rooted tree $\mathscr{T}$ is defined as a subset of $\bn^{\infty}$ with the following properties. \begin{itemize} \item $e\in \mathscr{T}$, \item whenever $x\in \mathscr{T}\setminus\{e\}$, we have $\pi(x)\in \mathscr{T}$, where $\pi(x)=x_1...x_{n-1}$ for $x=x_1...x_{n-1}x_n$, \item if $x=x_1...x_n$, there exists $\nu(x)\in\bn$ such that $x_1...x_nj\in \mathscr{T}$ for any $1\leq j\leq\nu(x)$. \end{itemize} For $x=x_1...x_n$ and $y=x_1...x_ny_1...y_m$, define $x^{-1}y:=y_1...y_m$. \fi In this section, we introduce regeneration times and state their moment estimates, which will be very important for this study. \begin{dfn} For a rooted tree $\mathscr{T}$ and $x\in \mathscr{T}$, define $P_{\lambda,x}^{\mathscr{T}}(\cdot):=P_{\lambda}^{\mathscr{T}}(\cdot|Z_0=x)$. (Thus, $P^{\mathscr{T}}_{\lambda}=P^{\mathscr{T}}_{\lambda,e(\mathscr{T})}$.) We will denote the expectation with respect to $P_{\lambda}$ (resp. $P_{\lambda}^{\mathscr{T}}$) by $E_{\lambda}$ (resp.\ $E_{\lambda}^{\mathscr{T}}$). \end{dfn} \begin{dfn}\label{d:re} Let $(Z_n)_{n\geq0}$ be the $\lambda$-biased random walk on a rooted tree $\mathscr{T}$. \begin{description} \item[1] A time $n\in\mathbb{N}$ is called a regeneration time if $d(Z_n)> d(Z_k)$ for all $k<n$ and $d(Z_l)> d(Z_{n-1})$ for all $l>n$. \item[2] For $x\in \mathscr{T}$, define the first return time $\sigma_x$ by $\sigma_x:=\inf\{n\geq1\ ;\ Z_n=x\}$. \end{description} \end{dfn} \begin{rem} Regeneration times defined above are called {\it level-regeneration times} in \cite{DGPZ}, and are different from what are defined in \cite{LPP3}. \end{rem} \begin{dfn}Let $\mathscr{T}$ be a rooted tree. \begin{description} \item[1] For $x\in \mathscr{T}$, define $\mathscr{T}(x)$ as the subtree of $\mathscr{T}$ which consists of $x$ and its descendants. The vertex $x$ is naturally regarded as the root of $\mathscr{T}(x)$. \item[2] We will denote by $\mathscr{T}^*$ a new tree obtained by adding to the graph $\mathscr{T}$ an edge connecting $e(\mathscr{T})$ and a new vertex $e^*(\mathscr{T})$. The vertex $e^*(\mathscr{T})$ is considered as the root of $\mathscr{T}^*$ and the parent of $e(\mathscr{T})$. We often write $e$ and $e^*$ for $e(\mathscr{T})$ and $e^*(\mathscr{T})$ when the tree is clear from context. \end{description} \end{dfn} The usefulness of renewal structure and regeneration times is that they provide a way to decompose sample paths of random walks into \emph{i.i.d.\ } pieces. When we deal with random walks on graphs carrying good renewal structures, approximations using regeneration times often enable us to reduce the analysis of the statistical behaviour of random walks to that of \emph{i.i.d.\ } random variables. We note that a different sequence of regeneration times (called {\it super-regeneration times}) have been introduced in \cite{BFGH} which decouple the event of a regeneration from the structure of the tree. These are particularly useful in decomposing the walk; however, this definition of regeneration times is only suitable when $\lambda<1$ because it relies on comparison with a biased random walk on $\mathbb{Z}$ with this bias. An important property is that, by Lemma 3.3 and Proposition 3.4 of \cite{LPP3}, for $\lambda\in(0,\mu)$ there exist, $P_{\lambda}$-a.s., infinitely many regeneration times $0=:\tau_0<\tau_1<\tau_2<....$ and the sequences $\{(\tau_{i+1}-\tau_i,d(Z_{\tau_{i+1}})-d(Z_{\tau_i})\}_{i\geq0}$ are \emph{i.i.d.\ } random vectors under $P_{\lambda}$. A useful fact is that the law of $\left(\tau_2-\tau_1,d(Z_{\tau_2})-d(Z_{\tau_1})\right)$ under the probability measure $P_{\lambda}$ is identical to the law of $\left(\tau_1,d(Z_{\tau_1})\right)$ under the probability measure $P_{\lambda}^{\tt NB}$, where \begin{align*} P_{\lambda}^{\tt NB}(A):= \int\mathbb{P}({\rm d}\mathbf{T})P_{\lambda,e(\mathbf{T})}^{\mathbf{T}^*}(A\cap\sigma_{e^*(\mathbf{T})}=\infty)\cdot \left(\int\mathbb{P}({\rm d}\mathbf{T})P_{\lambda,e(\mathbf{T})}^{\mathbf{T}^*}(\sigma_{e^*(\mathbf{T})}=\infty)\right)^{-1}. \end{align*} Therefore, with respect to $P_{\lambda}^{\tt NB}$, the distribution of $\left(\tau_2-\tau_1,d(Z_{\tau_2})-d(Z_{\tau_1})\right)$ coincides with that of $\left(\tau_1,d(Z_{\tau_1})\right)$. We will denote by $E_{\lambda}^{\tt NB}$ the expectation with respect to $P_{\lambda}^{\tt NB}$. \if0 \begin{prp}\label{re} \begin{description} Assume that $\lambda\in(\lambda_c,\mu)$. Then the following claims hold. \item[(1)] There exist infinitely many regeneration times $0=:\tau_0<\tau_1<\tau_2<....$ and infinitely many level regeneration times $0=:r_0<r_1<r_2<...$ $P_{\lambda}$-a.s. \item[(2)] The sequence $\{(Z_{\tau_i}^{-1}Z_{\tau_i+k})_{0\leq k\leq\tau_{i+1}-\tau_i}\}_{i\geq0}$ are independent $\mathbb{N}^{\infty}$-random variables under $P_{\lambda}$. Moreover, $\{(Z_{\tau_i}^{-1}Z_{\tau_i+k})_{0\leq k\leq\tau_{i+1}-\tau_i}\}_{i\geq1}$ are identically distributed. \item[(3)] The sequences $\{\tau_{i+1}-\tau_i,d(Z_{\tau_{i+1}})-d(Z_{\tau_i})\}_{i\geq0}$ and are \emph{i.i.d.\ } random vectors under $P_{\lambda}$. \item[(4)] The law of $\left(\tau_2-\tau_1,d(Z_{\tau_2})-d(Z_{\tau_1})\right)$ under the probability measure $P_{\lambda}$ is identical to the law of $\left(\tau_1,d(Z_{\tau_1})\right)$ under the probability measure $P_{\lambda}^{\tt NB}$, where \begin{align*} P_{\lambda}^{\tt NB}(A):= \int\mathbb{P}({\rm d}\mathbf{T})P_{\lambda,e(\mathbf{T})}^{\mathbf{T}^*}(A\cap\sigma_{e^*(\mathbf{T})}=\infty)\cdot \left(\int\mathbb{P}({\rm d}\mathbf{T})P_{\lambda,e(\mathbf{T})}^{\mathbf{T}^*}(\sigma_{e^*(\mathbf{T})}=\infty)\right)^{-1}. \end{align*} \end{description} \end{prp} \begin{proof} The first claim and the third claim are shown in Lemma 3.3 and Proposition 3.4 of \cite{LPP3}. The second claim is clearly stated in the proof of Proposition 3.4 in \cite{LPP3}. Therefore, we only need to prove the fourth claim. We first introduce several stopping times, Define $S_1$ and $U_1$ by \begin{align*} &S_1:=1,\ \ U_1:=\inf\{n\geq 1\ ;\ Z_n\notin\mathbf{T}(Z_1)\}\}. \intertext{We further define $S_k$ and $U_k$ recursively by} &S_k:=\inf\{n\geq U_{k-1}\ ;\ Z_n\notin\{Z_0,Z_1,...,Z_{U_{k-1}}\}\}, \ \ U_{k}:=\inf\{n\geq S_{k-1}\ ;\ Z_n\notin\mathbf{T}(Z_{S_{k}})\}. \end{align*} Then we have $\tau_1=S_J$, where $J$ is the unique integer such that $S_{J}<\infty$ and $U_{J}=\infty$. Note that conditionally on $\mathbf{T}$, $(S_k)$ and $(U_k)$ are $(\mathcal{F}_n(\mathbf{T}))$-stopping times although $\tau_1$ is not. Now we get \begin{align*} P_{\lambda}\left[(Z_{\tau_1}^{-1}Z_{\tau_1+k})_{k\geq0}\in A\right] &=\sum_{j\geq1}P_{\lambda}\left[(Z_{S_j}^{-1}Z_{S_j+k})_{k\geq0}\in A,J=j\right]\\ &=\sum_{j\geq1}\int\mathbb{P}({\rm d}\mathbf{T})P^{\mathbf{T}}_{\lambda}\left[ (Z_{S_j}^{-1}Z_{S_j+k})_{k\geq0}\in A, S_j<\infty,U_{j}=\infty\right]. \end{align*} In the proof of Proposition 3.4 in \cite{LPP3}, it is shown that the pair $<\mathbf{T}(Z_{\tau_1}), (Z_{\tau_1+k})_{k\geq1}>$ is independent of $<\mathbf{T}\setminus\mathbf{T}(Z_{\tau_1})\cup\{X_{\tau_1}\},(Z_k)_{k\leq\tau_1}>$ under $P_{\lambda}$. Moreover, Lemma 3.2 in \cite{LPP3} implies that the law of $\mathbf{T}(Z_{S_j})$ is identical to that of $\mathbf{T}$ under $P_{\lambda}$. Thus, we obtain \begin{align*} &\sum_{j\geq1}\int\mathbb{P}({\rm d}\mathbf{T})P^{\mathbf{T}}_{\lambda}\left[ (Z_{S_j}^{-1}Z_{S_j+k})_{k\geq0}\in A,S_j<\infty,U_{j}=\infty\right]\\ =&\sum_{j\geq1}P_{\lambda}(S_j<\infty)P_{\lambda}\left[ P_{\lambda,Z_{S_j}}^{(\mathbf{T}(Z_{S_j}))^*}[(Z_{S_j}^{-1}Z_{S_j+k})_{k\geq0}\in A,\sigma_{\pi(Z_{S_j})}=\infty]\right]\\ =&\int\mathbb{P}({\rm d}\mathbf{T})P_{\lambda,e(\mathbf{T})}^{\mathbf{T}^*}[(Z_{k})_{k\geq0}\in A,\sigma_{e^*(\mathbf{T})}=\infty] \sum_{j\geq1}P_{\lambda}(S_j<\infty). \end{align*} By substituting the whole space for $A$, we get \begin{align*} \sum_{j\geq1}P_{\lambda}(S_j<\infty)=\left(\int\mathbb{P}({\rm d}\mathbf{T})P_{\lambda,e(\mathbf{T})}^{\mathbf{T}^*}[\sigma_{e^*(\mathbf{T})}=\infty]\right)^{-1}. \end{align*} This implies the conclusion. \end{proof} \fi The following moment estimate of regeneration times which is uniform in $\lambda$ will play an important role in this paper. We note that $\lambda_c<1$ thus the condition $a<1$ is to ensure that $\log(\lambda_c)/\log(a)>0$. Since the proof is quite technical, we postpone it to Sections \ref{s:uni}, \ref{s:mom} and \ref{s:reg}. \begin{prp}\label{p:UniMom} Suppose $[a,b] \subset(0,\mu)$ with $a<1$ and that there exists $\beta>1$ such that $\sum_{k=1}^{\infty}p_k\beta^k<\infty$. Then, for any $\alpha<\log(\lambda_c)/\log(a)$ we have \[ \sup_{\lambda\in [a,b]}E_{\lambda}^{\tt NB}[\tau_1^{\alpha}]=\sup_{\lambda\in [a,b]}E_{\lambda}^{\tt NB}[(\tau_2-\tau_1)^{\alpha}]= \sup_{\lambda\in [a,b]}\mathit{E}_\lambda[(\tau_2-\tau_1)^{\alpha}]<\infty.\] \end{prp} If $a>\lambda_c^{1/2}$ then $\log(\lambda_c)/\log(a)>2$ therefore we immediately have the following corollary. \begin{cly}\label{c:2pe} Suppose $[a,b] \subset(\lambda_c^{1/2},\mu)$ and that there exists $\beta>1$ such that $\sum_{k=1}^{\infty}p_k\beta^k<\infty$. Then, for some $\varepsilon>0$ we have \[ \sup_{\lambda\in [a,b]}E_{\lambda}^{\tt NB}[\tau_1^{2+\varepsilon}]=\sup_{\lambda\in [a,b]}E_{\lambda}^{\tt NB}[(\tau_2-\tau_1)^{2+\varepsilon}]= \sup_{\lambda\in [a,b]}\mathit{E}_\lambda[(\tau_2-\tau_1)^{2+\varepsilon}]<\infty.\] \end{cly} \begin{rem}\label{rem:tau1} In general, $\tau_1$ does not satisfy these good moment estimates under the law $\mathit{P}_\lambda$ which is one of the reasons that we use instead the law $\mathit{P}_\lambda^{\tt NB}$. This problem arises from the number of excursions of the random walk from the root to itself until the walk escapes. Denote by ${\sf RE}$ the number of visits of the walk to the root, we have that $\tau_1\geq{\sf RE}\ P_{\lambda}\mathchar`-a.s.$ Moreover, ${\sf RE}$ under $P_{\lambda}^{\mathbf{T}}$ is distributed as the geometric random variable whose termination probability is the quenched escape probability $P_{\lambda}^{\mathbf{T}}(\sigma_{e^*(\mathbf{T})}=\infty)$. Suppose that $p_1>0$ and $\lambda>1$. We denote by $R^{\lambda}(\mathbf{T})$ the effective resistance from $e(\mathbf{T})$ to $\infty$ of $\mathbf{T}$ where $\mathbf{T}$ is regarded as the electric network corresponding to the $\lambda$-biased random walk. Let us consider the event where every individual up to $n^{\text{th}}$ generation gives birth to only one child, which occurs with probability $p_1^n$. On this event, $R^{\lambda}(\mathbf{T})$ is of order $\lambda^n$. This observation implies that $$\mathbb{P}(R^{\lambda}(\mathbf{T})>n)\geq cn^{\log p_1/\log\lambda}$$ for some constant $c>0$. (Note that $\log p_1<0$ since $0<p_1<1$.) In the light of a well-known fact in the theory of electric networks and reversible random walks (see Theorem 2.11 of \cite{Ba17} for instance), the above estimate implies that ${\sf RE}$ (therefore $\tau_1$ also) does not even satisfy a finite first moment in general. \end{rem} We also need the following estimates in what follows. Their proofs will also be given in Sections \ref{s:uni}, \ref{s:mom} and \ref{s:reg}. \begin{prp}\label{est:sigma} Suppose that there exists $\beta>1$ such that $\sum_{k=1}^{\infty}p_k\beta^k<\infty$. Then for any $\lambda\in(\lambda_c,\mu)$, we have that \[\mathbb{E}\left[E_{\lambda}^{\mathbf{T}^*}\left[\sigma_{e^*(\mathbf{T})}\mathbf{1}_{\{\sigma_{e^*(\mathbf{T})}<\infty\}}\right]\right]<\infty.\] \end{prp} \begin{prp}\label{p:esc-conti} The function \[\lambda\mapsto\mathbb{E}\left[P_{\lambda}^{\mathbf{T}^*}\left(\sigma_{e^*(\mathbf{T})}=\infty\right)\right]\] is continuous on $(\lambda_c,\mu)$. \end{prp} \if0 \begin{comAdam} I might include a short note on the proof or include it in the appendix if we choose to have one. \end{comAdam} \fi \section{Expressions of derivatives of the speed}\label{s:exp} In this section we prove Theorem \ref{dif} assuming Proposition \ref{p:UniMom}. The following result gives the finite approximation of the derivative. Notice that we do not use expectations $E_{\lambda}[d(Z_n)]$ and $E_{\lambda+h}[d(Z_n)]$, but $E_{\lambda}^{\tt NB}[d(Z_n)]$ and $E_{\lambda+h}^{\tt NB}[d(Z_n)]$, to approximate the derivative, which is a non-negligible difference from the approach of \cite{BGN}. This is necessary because of the lack of good moment estimates of the first regeneration time $\tau_1$, which arises from a special role played by the root. See Remark \ref{rem:tau1} for details. \begin{prp}\label{appro} Suppose $\lambda\in(\lambda_c^{1/2},\mu)$ and that there exists $\beta>1$ such that $\sum_{k=1}^{\infty}p_k\beta^k<\infty$. Let $h$ tend to $0$ and $n$ tend to $\infty$ in such a way that $h^2n$ tends to $1$ (i.e.\ $hn\sim n^{1/2}$). Then \begin{align*} \frac{\upsilon_{\lambda+h}-\upsilon_\lambda}{h}-\frac{E_{\lambda+h}^{\tt NB}[d(Z_n)]-E_{\lambda}^{\tt NB}[d(Z_n)]}{hn} \end{align*} tends to $0$. \end{prp} \begin{proof} Define $\eta_n:=\inf\{k:\tau_k\geq n\}$ for $n\in\mathbb{N}$, then $\eta_n$ is a stopping time with respect to the filtration generated by random variables $\tau_1$, and $\{\tau_{i+1}-\tau_i\}_{i\geq1}$. By the definition of $\eta_n$, we have \begin{align}\label{e:Sdw} n\leq\tau_{\eta_n}\leq n+\max_{0\leq i\leq n}(\tau_{i+1}-\tau_i),\ P_{\lambda}^{\tt NB}\mathchar`-a.s. \end{align} Combining this with Wald's identity we then have \begin{align}\label{a} n\leq E_{\lambda}^{\tt NB}[\tau_{\eta_n}]=E_{\lambda}^{\tt NB}[\eta_n]E_{\lambda}^{\tt NB}[\tau_1]\leq n +E_{\lambda}^{\tt NB}\left[\max_{0\leq i\leq n}(\tau_{i+1}-\tau_i)\right]. \end{align} Using that $0\leq |d(Z_n)-d(Z_{\tau_{\eta_n}})|\leq \tau_{\eta_n}-n$, \eqref{e:Sdw} then implies that \begin{align*} |E_{\lambda}^{\tt NB}[d(Z_n)]-E_{\lambda}^{\tt NB}[d(Z_{\tau_{\eta_n}})]| \leq E_{\lambda}^{\tt NB}\left[\max_{0\leq i\leq n}(\tau_{i+1}-\tau_i)\right]. \end{align*} Wald's identity then gives \begin{align*} E_{\lambda}^{\tt NB}[d(Z_{\tau_{\eta_n}})]=E_{\lambda}^{\tt NB}[\eta_n]E_{\lambda}^{\tt NB}[d(Z_{\tau_1})]. \end{align*} Hence, we get \begin{align}\label{aa} \Bigl|E^{\tt NB}_{\lambda}[d(Z_n)]-n\upsilon_\lambda \Bigr| &\leq \left|E^{\tt NB}_{\lambda}[d(Z_n)]-E^{\tt NB}_{\lambda}[d(Z_{\tau_{\eta_n}})] \right|+ \left|E^{\tt NB}_{\lambda}[d(Z_{\tau_{\eta_n}})]-n\upsilon_\lambda \right| \nonumber\\ & \leq E_{\lambda}^{\tt NB}\left[\max_{0\leq i\leq n}(\tau_{i+1}-\tau_i)\right]+\left|E_{\lambda}^{\tt NB}[\eta_n] E_{\lambda}^{\tt NB}[d(Z_{\tau_1})]-n\upsilon_\lambda \right| \end{align} By (6.4) in \cite{LPP3} we have that \begin{align}\label{speed} \upsilon_\lambda=\frac{E_{\lambda}[d(Z_{\tau_2})-d(Z_{\tau_1})]}{E_{\lambda}[\tau_2-\tau_1]} =\mathrm{d}frac{E_{\lambda}^{\tt NB}[d(Z_{\tau_1})]}{E_{\lambda}^{\tt NB}[\tau_1]}, \end{align} therefore, using \eqref{a} and that $\upsilon_\lambda\leq 1$, we have \begin{align}\label{aaa} \left|E_{\lambda}^{\tt NB}[\eta_n] E_{\lambda}^{\tt NB}[d(Z_{\tau_1})]-n\upsilon_\lambda \right|\leq\left|E_{\lambda}^{\tt NB}[\eta_n]E_{\lambda}^{\tt NB}[\tau_1]-n\right|\leq E_{\lambda}^{\tt NB}\left[\max_{0\leq i\leq n}(\tau_{i+1}-\tau_i)\right]. \end{align} By combining \eqref{aa} and \eqref{aaa}, we get \begin{align*} \left|E_{\lambda}^{\tt NB}[d(Z_n)]-n\upsilon_\lambda \right|\leq 2E_{\lambda}^{\tt NB}\left[\max_{0\leq i\leq n}(\tau_{i+1}-\tau_i)\right] \end{align*} In order to complete the proof of Proposition \ref{appro}, it suffices to show that there exist constants $0<t_{\lambda}<\min\{\mu-\lambda,\lambda-\lambda_c\}$, $0<\kappa<1/2$ and $c_{\lambda}>0$ such that \begin{align}\label{max} E_{\lambda'}^{\tt NB}\left[\max_{0\leq i\leq n}(\tau_{i+1}-\tau_i)\right]\leq c_{\lambda} n^{\kappa} \end{align} for any $\lambda'\in(\lambda-t_{\lambda},\lambda+t_{\lambda})$. The estimate \eqref{max} can be proved as follows: for $\kappa>0$, we have \begin{align*} &E_{\lambda'}^{\tt NB}\left[\max_{0\leq i\leq n}(\tau_{i+1}-\tau_i)\right]\leq n^{\kappa}+\sum_{k\geq[n^{\kappa}]}P_{\lambda'}^{\tt NB}\left(\max_{0\leq i\leq n}(\tau_{i+1}-\tau_i)\geq k\right). \end{align*} By Corollary \ref{c:2pe}, there exists a constant $c_{\lambda}>0$ such that for $\lambda'\in(\lambda-t_{\lambda},\lambda+t_{\lambda})$ and sufficiently large $k$, we have \begin{align*} P_{\lambda'}^{\tt NB}\left(\max_{0\leq i\leq n}(\tau_{i+1}-\tau_i)\geq k\right)&= 1-\{1-P_{\lambda'}\left(\tau_{2}-\tau_1\geq k\right)\}^n\\ &\leq 1-(1-c_{\lambda}k^{-(2+\varepsilon)})^n\leq 2c_{\lambda}nk^{-(2+\varepsilon)}. \end{align*} Thus, for sufficiently large $n$, we get \begin{align*} E_{\lambda'}^{\tt NB}\left[\max_{0\leq i\leq n}(\tau_{i+1}-\tau_i)\right]\leq n^{\kappa}+4c_{\lambda}n^{-\kappa(1+\varepsilon)+1}. \end{align*} Since $\max\{\kappa,-\kappa(1+\varepsilon)+1\}\leq\frac{1}{2+\varepsilon}$, we obtain the estimate \eqref{max}. Therefore, we have shown that \begin{align*} \frac{E_{\lambda+h}^{\tt NB}[d(Z_n)-n\upsilon_{\lambda+h}]-E_{\lambda}^{\tt NB}[d(Z_n)-n\upsilon_\lambda]}{hn} \end{align*} tends to $0$ when $h$ tends to $0$ and $n$ tends to $\infty$ in such a way that $h^2n$ tends to $1$. \end{proof} By Proposition \ref{appro}, in order to show the differentiability of the function $\lambda\mapsto \upsilon_\lambda$, it suffices to prove the existence of the limit \begin{align*} \lim_{h,n}\frac{E_{\lambda+h}^{\tt NB}[d(Z_n)]-E_{\lambda}^{\tt NB}[d(Z_n)]}{hn}, \end{align*} where $h$ tends to $0$ and $n$ tends to $\infty$ in such a way that $h^2n$ tends to $1$. We will need the following estimates. \begin{lem}\label{ui} Suppose that $\lambda\in(\lambda_c^{1/2},\mu)$ and that there exists $\beta>1$ such that $\sum_{k=1}^{\infty}p_k\beta^k<\infty$. Then, we have \begin{align}\label{est:ui1} \sup_n\frac{1}{n}E_{\lambda}^{\tt NB}[(d(Z_n)-n\upsilon_\lambda)^2]<\infty \end{align} and \begin{align}\label{est:ui2} \sup_n\frac{1}{n}\mathbb{E}\left[E^{\mathbf{T}^*}_{\lambda}\left[(d(Z_n)-n\upsilon_\lambda)^{2}\mathbf{1}_{\{\sigma_{e^*(\mathbf{T})}>n\}}\right]\right]<\infty. \end{align} \end{lem} \begin{proof} We first prove \eqref{est:ui1}. Using \eqref{e:Sdw} and the arguments of Proposition \ref{appro} we have that \begin{align*} \left|d(Z_n)-n\upsilon_\lambda\right|\leq \left|d(Z_{\tau_{\eta_n}})-n\upsilon_\lambda\right|+\max_{0\leq i\leq n}(\tau_{i+1}-\tau_i)\ \ P^{\tt NB}_{\lambda}\mathchar`-a.s., \end{align*} and, $P^{\tt NB}_{\lambda}$-a.s., \begin{align*} d(Z_{\tau_{\eta_n}})-n\upsilon_\lambda =\sum_{i=0}^{\eta_n-1}\left(d(Z_{\tau_{i+1}})-d(Z_{\tau_i})-E_{\lambda}^{\tt NB}[d(Z_{\tau_1})]\right)+\bigl(\eta_n\cdot E_{\lambda}^{\tt NB}[d(Z_{\tau_1})]-n\upsilon_\lambda\bigr) \end{align*} Hence, we get \begin{align*} & E^{\tt NB}_{\lambda}[(d(Z_n)-n\upsilon_\lambda)^2]\\ &\leq 4\Biggl\{E_{\lambda}^{\tt NB}\left[\left(\max_{0\leq i\leq n}(\tau_{i+1}-\tau_i)\right)^2\right] +E_{\lambda}^{\tt NB}\left[\left(\eta_n\cdot E^{\tt NB}_{\lambda}[d(Z_{\tau_1})]-n\upsilon_\lambda\right)^2\right]\\ &\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ +E_{\lambda}^{\tt NB}\left[\left\{\sum_{i=0}^{\eta_n-1}\left(d(Z_{\tau_{i+1}})-d(Z_{\tau_i})-E_{\lambda}^{\tt NB}[d(Z_{\tau_1})]\right)\right\}^2\right]\Biggr\}. \end{align*} Recall that $d(Z_{\tau_{i+1}})-d(Z_{\tau_i})$ are \emph{i.i.d.\ } then Wald's second identity implies \begin{align*} & E_{\lambda}^{\tt NB}\left[\left(\sum_{i=0}^{\eta_n-1}\left(d(Z_{\tau_{i+1}})-d(Z_{\tau_i})-E_{\lambda}^{\tt NB}[d(Z_{\tau_1})]\right)\right)^2\right]\\ &=E_{\lambda}^{\tt NB}\left[\left(d(Z_{\tau_{2}})-d(Z_{\tau_1})-E_{\lambda}^{\tt NB}[d(Z_{\tau_1})]\right)^2\right]E_{\lambda}[\eta_n] \end{align*} thus by Corollary \ref{c:2pe} and the estimate \eqref{a} along with \eqref{max}, we have \begin{align*} \sup_{n}\frac{1}{n}E_{\lambda}^{\tt NB}\left[\left(\sum_{i=0}^{\eta_n-1}\left(d(Z_{\tau_{i+1}})-d(Z_{\tau_i})-E_{\lambda}^{\tt NB}[d(Z_{\tau_1})]\right)\right)^2\right]<\infty. \end{align*} It is not difficult to see that Corollary \ref{c:2pe} implies $n^{-1}E_{\lambda}^{\tt NB}[(\max_{0\leq i\leq n}(\tau_{i+1}-\tau_i))^2]$ is also bounded in $n$. Hence, we get the conclusion if we show \begin{align*} \sup_{n\geq1}\frac{1}{n}E_{\lambda}^{\tt NB}\left[\Bigl(\eta_n\cdot E_{\lambda}^{\tt NB}[d(Z_{\tau_1})]-n\upsilon_\lambda\Bigr)^2\right]<\infty. \end{align*} It is shown in Chapter 4 of \cite{C} that \begin{align*} E_{\lambda}^{\tt NB}[\eta_n^2]=E_{\lambda}^{\tt NB}[\eta_n]^2+O(n)=\frac{n^2}{E_{\lambda}^{\tt NB}[\tau_1]^2}+O(n). \end{align*} By using the formula \eqref{speed} and the estimate \eqref{a}, we get \begin{align*} &E_{\lambda}^{\tt NB}\left[\Bigl(\eta_n\cdot E_{\lambda}^{\tt NB}[d(Z_{\tau_1})]-n\upsilon_\lambda\Bigr)^2\right] \\ &=E_{\lambda}^{\tt NB}[\eta_n]^2E_{\lambda}^{\tt NB}[d(Z_{\tau_1})]^2-\frac{E_{\lambda}^{\tt NB}[d(Z_{\tau_1})]^2} {E_{\lambda}^{\tt NB}[\tau_1]^2}\cdot n^2+O(n) \end{align*} which is at most order $n$ therefore this implies \eqref{est:ui1}. We next prove \eqref{est:ui2}. In order to deduce \eqref{est:ui2} from \eqref{est:ui1}, it suffice to show that \begin{align*} \sup_{n}\frac{1}{n}\mathbb{E}\left[E^{\mathbf{T}^*}_{\lambda}\left[(d(Z_n)-n\upsilon_\lambda)^{2}\mathbf{1}_{\{n<\sigma_{e^*(\mathbf{T})}<\infty\}}\right]\right]<\infty. \end{align*} This immediately follows from Proposition \ref{est:sigma} and an obvious bound $|d(Z_n)-n\upsilon_\lambda|\leq (1+\upsilon_{\lambda})n.$ \end{proof} Lemma \ref{ui} implies uniform integrability of the sequence $\{(d(Z_n)-n\upsilon_\lambda)/\sqrt{n}\}_{n\geq1}$ under the conditioned annealed measure $P_{\lambda}^{\tt NB}$ when $\lambda\in(\lambda_c^{1/2},\mu)$ and the offspring distribution $\{p_k\}_{k\geq0}$ has finite exponential moment. On the other hand, by Corollary \ref{c:2pe} and a standard argument in the renewal theory, we get that the sequence $\{(d(Z_n)-n\upsilon_\lambda)/\sqrt{n}\}_{n\geq1}$ satisfies the annealed CLT under the same assumptions. Hence, we have \begin{align*} \lim_{n\to\infty}\frac{E_{\lambda}^{\tt NB}[d(Z_n)-n\upsilon_\lambda]}{\sqrt{n}}=0, \end{align*} for any $\lambda\in(\lambda_c^{1/2},\mu)$. Thus, in order to prove the differentiability of the speed, we only need to show the existence of the limit \begin{align}\label{aim} \frac{1}{hn}E_{\lambda+h}^{\tt NB}[d(Z_n)-n\upsilon_\lambda], \end{align} for any sequence $h$ and $n$ such that $h\rightarrow0$ and $n\rightarrow\infty$ in such a way that $h^2n\rightarrow1$. To do so, we wish to relate the measures $E_{\lambda}^{\tt NB}$ and $E_{\lambda+h}^{\tt NB}$ by using the Girsanov formula \eqref{Gir}. However, the formula \eqref{Gir} does not directly apply to $E_{\lambda+h}^{\tt NB}[d(Z_n)-n\upsilon_\lambda]$ because of the presence of the non-backtracking condition $\{\sigma_{e^*(\mathbf{T})}=\infty\}$. In order to overcome this problem, we need the following lemma. \begin{lem}\label{lem:replace} Suppose that $\lambda\in(\lambda_c^{1/2},\mu)$ and that there exists $\beta>1$ such that $\sum_{k=1}^{\infty}p_k\beta^k<\infty$. Then, we have that \begin{align}\label{eq:replace} &\frac{1}{\sqrt{n}}\Biggl( \mathbb{E}\left[E^{\mathbf{T}^*}_{\lambda+h}\left[\left(d(Z_n)-n\upsilon_\lambda\right)\mathbf{1}_{\{\sigma_{e^*(\mathbf{T})}=\infty\}}\right]\right]\notag\\ &\ \ \ \ \ \ \ \ \ \ \ -\mathbb{E}\left[E^{\mathbf{T}^*}_{\lambda}\left[\left(d(Z_n)-n\upsilon_\lambda\right)\prod_{i=1}^{n}\mathrm{d}frac{A_{\lambda+h}(Z_{i-1},Z_i)}{A_{\lambda}(Z_{i-1},Z_i)}\mathbf{1}_{\{\sigma_{e^*(\mathbf{T})}=\infty\}}\right]\right]\Biggr) \end{align} converges to $0$ as $h$ tends to $0$ and $n$ tends to $\infty$ in such a way that $h^2n\to1$. \end{lem} Since the proof of Lemma \ref{lem:replace} requires a careful analysis of the Girsanov weight, we will defer it until the end of the next subsection. \subsection{The discrete Girsanov formula} In this subsection, we will analyse the Girsanov weight $\prod_{i=1}^n\frac{A_{\lambda+h}(Z_{i-1},Z_i)}{A_{\lambda}(Z_{i-1},Z_i)}$. \ By the Taylor expansion, there exists $s=s(x,y)\in[0,1]$ such that \begin{align*} \log\frac{A_{\lambda+h}(x,y)}{A_{\lambda}(x,y)} =hB_{\lambda}(x,y)&+\frac{h^2}{2}C_{\lambda}(x,y)+\frac{h^3}{6}D_{\lambda+sh}(x,y), \end{align*} where \begin{align*} B_{\lambda}(x,y)=\frac{\mathrm{d}}{\mathrm{d}\lambda}\log A_{\lambda}(x,y)&=\begin{cases} 0 &{\rm when}\ x=e,\\ \frac{1}{\lambda}-\frac{1}{\lambda+\nu(x)} &{\rm when}\ y=\pi(x),\\ -\frac{1}{\lambda+\nu(x)} &{\rm when}\ x=\pi(y), \end{cases}\\ C_{\lambda}(x,y)=\frac{\mathrm{d}}{\mathrm{d}\lambda}B_{\lambda}(x,y)&=\begin{cases} 0 &{\rm when}\ x=e,\\ -\frac{1}{\lambda^2}+\frac{1}{(\lambda+\nu(x))^2} &{\rm when}\ y=\pi(x),\\ \frac{1}{(\lambda+\nu(x))^2} &{\rm when}\ x=\pi(y), \end{cases} \intertext{and} D_{\lambda}(x,y)=\frac{\mathrm{d}}{\mathrm{d}\lambda}C_{\lambda}(x,y)&=\begin{cases} 0 &{\rm when}\ x=e,\\ \frac{2}{\lambda^3}-\frac{2}{(\lambda+\nu(x))^3} &{\rm when}\ y=\pi(x),\\ -\frac{2}{(\lambda+\nu(x))^3} &{\rm when}\ x=\pi(y). \end{cases} \end{align*} By using these expressions, we have \begin{align}\label{AAA} \prod_{i=1}^{n}\frac{A_{\lambda+h}(Z_{i-1},Z_i)}{A_{\lambda}(Z_{i-1},Z_i)}=\exp(hP_n-h^2Q_n+R_{n,h})\ \ P_{\lambda}\mathchar`-a.s., \end{align} where \begin{align*} P_n&:=\sum_{j=0}^{n-1}B_{\lambda}(Z_j,Z_{j+1}),\\ Q_n&:=\sum_{j=0}^{n-1}\frac{1}{2}B^2_{\lambda}(Z_j,Z_{j+1}),\\ R_{n,h}&:=\sum_{j=0}^{n-1}\left\{h^2\left(\frac{1}{2}B_{\lambda}^2(Z_j,Z_{j+1})+\frac{1}{2}C_{\lambda}(Z_j,Z_{j+1})\right) +\frac{h^3}{6}D_{\lambda+sh}(Z_j,Z_{j+1})\right\}. \end{align*} Since \begin{align}\label{Ubound} |B_{\lambda}(x,y)|\leq\frac{1}{\lambda}+1,\ |C_{\lambda}(x,y)|\leq\frac{1}{\lambda^2}+1,\ |D_{\lambda}(x,y)|\leq\frac{2}{\lambda^3}+2, \end{align} we get \begin{align*} 1&=\sum_{y}A_{\lambda+h}(x,y)=\sum_{y}A_{\lambda}(x,y)\exp\Bigl(hB_{\lambda}(x,y)+\frac{h^2}{2}C_{\lambda}(x,y) +\frac{h^3}{6}D_{\lambda+sh}(x,y)\Bigr)\\ &=\sum_{y}A_{\lambda}(x,y)\Bigl(1+hB_{\lambda}(x,y)+\frac{h^2}{2}B^2_{\lambda}(x,y)+\frac{h^2}{2}C_{\lambda}(x,y)+O(h^3)\Bigr). \end{align*} This implies that for any $x\in\mathbf{T}$, \begin{align}\label{0} \sum_{y}A_{\lambda}(x,y)B_{\lambda}(x,y)&=0,\ \ P_{\lambda}\mathchar`-a.s.,\\ \sum_{y}A_{\lambda}(x,y)\Bigl(B^2_{\lambda}(x,y)+C_{\lambda}(x,y)\Bigr)&=0,\ \ P_{\lambda}\mathchar`-a.s. \notag \end{align} By using the Markov property and the equality \eqref{0}, we obtain \begin{align*} E_{\lambda}^{\mathbf{T}}[B_{\lambda}(Z_j,Z_{j+1})\ |\ Z_j]=\sum_{y}B(Z_j,y)A_{\lambda}(Z_j,y)=0,\ \ P^{\mathbf{T}}_{\lambda}\mathchar`-a.s. \end{align*} This implies \begin{align}\label{B} E^{\mathbf{T}}_{\lambda}[B_{\lambda}(Z_j,Z_{j+1})]=E_{\lambda}[B_{\lambda}(Z_j,Z_{j+1})]=0,\ \ P_{\lambda}\mathchar`-a.s. \end{align} Similarly, we have \begin{align}\label{BC} E^{\mathbf{T}}_{\lambda}[B^2_{\lambda}(Z_j,Z_{j+1})+C_{\lambda}(Z_j,Z_{j+1})]=0,\ \ P_{\lambda}\mathchar`-a.s. \end{align} We now let $h$ tend to $0$ and $n$ tend to $\infty$ in such a way that $h^2n$ tends to $1$. We show that the limits of $hP_n$ and $h^2Q_n$ are described by a CLT and a LLN respectively and the limit of $R_{n,h}$ is negligible. \begin{enumerate}[align=left, leftmargin=0pt, labelindent=\parindent, listparindent=\parindent, labelwidth=0pt, itemindent=!] \item[{\bf 1) The CLT for $P_n$:}] By the renewal structure of GW-trees, we know that the collection $\{\sum_{j=\tau_i}^{\tau_{i+1}-1}B_{\lambda}(Z_j,Z_{j+1})\}_{i\geq1}$ are \emph{i.i.d.\ } random variables under $P_{\lambda}$, and are distributed as $\sum_{i=0}^{\tau_1-1}B_{\lambda}(Z_j,Z_{j+1})$ under $P_{\lambda}^{\tt NB}$. Recall that by \eqref{B} we have that \begin{align}\label{eq:pn} E_{\lambda}[P_n]=0. \end{align} On the other hand, noticing that $\tau_1$ is finite a.s. by the renewal structure we have the following LLN: \begin{align}\label{e:lln} \lim_{n\to\infty}n^{-1}P_n=\mathrm{d}frac{E_{\lambda}\left[\sum_{j=\tau_1}^{\tau_2-1}B_{\lambda}(B_j,B_{j+1})\right]}{E_{\lambda}[\tau_2-\tau_1]}=\mathrm{d}frac{E_{\lambda}^{\tt NB}\left[\sum_{j=0}^{\tau_1-1}B_{\lambda}(B_j,B_{j+1})\right]}{E^{\tt NB}_{\lambda}[\tau_1]}\ \ P_{\lambda}\mathchar`-a.s. \end{align} Noticing that $n^{-1}P_n\leq 1+\lambda^{-1}$ by \eqref{Ubound}, the dominated convergence theorem implies that the same convergence as \eqref{e:lln} holds in $L^1(P_{\lambda})$. This fact together with \eqref{eq:pn} implies that \begin{align}\label{e:BTT} E_{\lambda}\left[\sum_{j=\tau_1}^{\tau_2-1}B_{\lambda}(Z_j,Z_{j+1})\right]=E_{\lambda}^{\tt NB}\left[\sum_{j=0}^{\tau_1-1}B_{\lambda}(Z_j,Z_{j+1})\right]=0. \end{align} By \eqref{Ubound} and Corollary \ref{c:2pe} we also have that \begin{align*} E_{\lambda}\left[\left(\sum_{j=\tau_1}^{\tau_2-1}B_{\lambda}(Z_j,Z_{j+1})\right)^2\right] &\leq E_{\lambda}^{\tt NB}\left[\left(\sum_{j=0}^{\tau_1-1}B_{\lambda}(Z_j,Z_{j+1})\right)^2\right]\\ &\leq \left(1+\frac{1}{\lambda}\right)^2E_{\lambda}^{\tt NB}\left[\tau_1^2\right]<\infty. \end{align*} Moreover, we have that \[n^{-1/2}\left(\sum_{j=0}^{\tau_{\eta_n}-1}B_{\lambda}(Z_j,Z_{j+1})-P_n\right)\leq n^{-1/2}\left(1+\frac{1}{\lambda}\right)\max_{0\leq i\leq n}(\tau_{i+1}-\tau_{i}),\] which converges to $0$ in probability by \eqref{max}. It therefore follows that $n^{-1/2}P_n$ converges in distribution to a centred Gaussian. \item[{\bf 2) The LLN for $Q_n$:}] Recalling that $\tau_1$ is $P_{\lambda}$-a.s.\ finite and from \eqref{Ubound} that $B_{\lambda}^2(Z_j,Z_{j+1})$ is bounded above, by the law of large numbers we know that, $P_{\lambda}$-a.s. \if0 \begin{align*} \lim_{k\to\infty}\frac{1}{k}\sum_{j=0}^{\tau_k-1}B_{\lambda}^2(Z_j,Z_{j+1})= E_{\lambda}\left[\sum_{j=\tau_1}^{\tau_2-1}B_{\lambda}^2(Z_j,Z_{j+1})\right], \ \ P_{\lambda}\mathchar`-a.s. \end{align*} Hence, since $\eta_n\rightarrow \infty$ as $n \rightarrow \infty$, \begin{align*} \lim_{n\to\infty}\frac{1}{\eta_n}\sum_{j=0}^{\tau_{\eta_n}-1}B_{\lambda}^2(Z_j,Z_{j+1})=E_{\lambda}\left[\sum_{j=\tau_1}^{\tau_2-1}B_{\lambda}^2(Z_j,Z_{j+1})\right], \ \ P_{\lambda}\mathchar`-a.s. \end{align*} Now we have \begin{align} \left|\frac{1}{n}Q_n-\frac{\eta_n}{n}\cdot\frac{1}{\eta_n}\sum_{j=0}^{\tau_{\eta_n}-1}\frac{1}{2}B_{\lambda}^2(Z_j,Z_{j+1})\right| &\leq\frac{1}{2n}\left|\sum_{j=n}^{\tau_{\eta_n}-1}B_{\lambda}^2(Z_j,Z_{j+1})\right| \notag\\ &\leq (\lambda^{-1}+1)^2\frac{\max_{0\leq i\leq n}(\tau_{i+1}-\tau_i)}{n} \label{e:UpQ} \end{align} where the estimates \eqref{e:Sdw}, \eqref{Ubound} are used in the last step. Moreover, by using arguments in the proof of Proposition \ref{appro} and the Borel-Cantelli lemma, it is easy to show that \begin{align*} \lim_{n\to\infty}\frac{\max_{0\leq i\leq n}(\tau_{i+1}-\tau_i)}{n}=0,\ P_{\lambda}\mathchar`-a.s., \end{align*} thus \eqref{e:UpQ} converges to $0$ $P_{\lambda}$-a.s. It is a standard result in the renewal theory (see \cite{C} for instance) that \begin{align*} \lim_{n\to\infty}\frac{\eta_n}{n}=\frac{1}{E_{\lambda}[\tau_2-\tau_1]}=\frac{1}{E_{\lambda}^{\tt NB}[\tau_1]},\ P_{\lambda}\mathchar`-a.s. \end{align*} therefore we get \fi \begin{align*} \lim_{n\to\infty}\frac{1}{n}Q_n &=\frac{1}{2E_{\lambda}[\tau_2-\tau_1]}E_{\lambda}\left[\sum_{j=\tau_1}^{\tau_2-1}B_{\lambda}^2(Z_j,Z_{j+1})\right] \\ &=\frac{1}{2E_{\lambda}^{\tt NB}[\tau_1]}E^{\tt NB}_{\lambda}\left[\sum_{j=0}^{\tau_1-1}B_{\lambda}^2(Z_j,Z_{j+1})\right]. \end{align*} \item[{\bf 3) The estimate for $R_{n,h}$:}] For some constant $c<\infty$, we have $n\leq ch^{-2}$. Using this and \eqref{Ubound}, we have \begin{align*} \left|\sum_{j=0}^{n-1}\frac{h^3}{6}D_{\lambda+sh}(Z_j,Z_{j+1})\right|\leq \frac{ch}{3}\left(\frac{1}{\lambda^3}+1\right). \end{align*} By \eqref{BC} we have that \begin{align*} E_{\lambda}^{\mathbf{T}}&\left[\sum_{j=0}^{n-1}\left(B_{\lambda}^2(Z_j,Z_{j+1})+C_{\lambda}(Z_j,Z_{j+1})\right)\right]=0,\ P_{\lambda}\mathchar`-a.s. \end{align*} hence, by using the similar argument to the above one, we see that \begin{align*} \lim_{n\to\infty}R_{n,h}=0,\ P_{\lambda}\mathchar`-a.s. \end{align*} Note also that $R_{n,h}$ satisfies the following uniform estimate for $h$ sufficiently small. \begin{align}\label{R_n} |R_{n,h}|&\leq h^2n\left(\frac{1}{\lambda}+1+\frac{1}{2}\left(\frac{1}{\lambda^2}+1\right)\right)+\frac{ch}{3}\left(\frac{1}{\lambda^3}+1\right) \nonumber\\ &\leq 2c\left(\frac{1}{\lambda}+1+\frac{1}{2}\left(\frac{1}{\lambda^2}+1\right)\right)+\frac{1}{\lambda^3}+1. \end{align} \item[{\bf 4) The joint CLT for}] $\Bigl(n^{-1/2}(d(Z_n)-n\upsilon_\lambda),n^{-1/2}P_n\Bigr)_{n\geq1}${\bf :} We have given a proof of the annealed CLT for the sequences of random variables $\{n^{-1/2}(d(Z_n)-n\upsilon_\lambda)\}_{n\geq1}$ and $\{n^{-1/2}P_n\}_{n\geq1}$, but in what follows, we need the joint CLT for the sequence of random vectors $\bigl(n^{-1/2}(d(Z_n)-n\upsilon_\lambda),n^{-1/2}P_n\bigr)$. Note that for any $\lambda\in(\lambda_c^{1/2},\mu)$, \begin{align*} \left(d(Z_{\tau_{l+1}})-d(Z_{\tau_l})-\upsilon_\lambda(\tau_{l+1}-\tau_l),\sum_{j=\tau_l}^{\tau_{l+1}-1}B_{\lambda}(Z_j,Z_{j+1})\right)_{l\geq1} \end{align*} are \emph{i.i.d.\ } $\mathbb{R}^2$-valued random variables under $P_{\lambda}$. \end{enumerate} This fact together with the moment estimate of regeneration times immediately implies the following result. Note that for $\sigma_{10}(\lambda)$, we use $E_{\lambda}\left[\sum_{j=\tau_1}^{\tau_2-1}B_{\lambda}(Z_j,Z_{j+1})\right]=0$ from \eqref{e:BTT} and that $\sigma_{00}(\lambda)$ coincides with the diffusion constant in the central limit theorems proved in \cite{B3}. \begin{prp}\label{joint} Suppose $\lambda\in(\lambda_c^{1/2},\mu)$ and that there exists $\beta>1$ such that $\sum_{k=1}^{\infty}p_k\beta^k<\infty$. Then, the sequence $\bigl\{(n^{-1/2}(d(Z_n)-n\upsilon_\lambda),n^{-1/2}P_n)\bigr\}_{n\geq1}$ under $P_{\lambda}$ converges weakly to the two dimensional Gaussian random variable $(X,Y)$ with the covariance matrix $\Sigma_{\lambda}:=(\sigma_{ij}(\lambda))_{0\leq i,j\leq1}$ given by \begin{align}\label{var} \sigma_{00}(\lambda)&:=\frac{1}{E_{\lambda}[\tau_2-\tau_1]} E_{\lambda}\left[\Bigl(\bigl(d(Z_{\tau_2})-d(Z_{\tau_1})\bigr)-E_{\lambda}[d(Z_{\tau_2})-d(Z_{\tau_1})]\Bigr)^2\right],\nonumber\\ \sigma_{11}(\lambda)&:=\frac{1}{E_{\lambda}[\tau_2-\tau_1]}E_{\lambda}\left[\sum_{j=\tau_1}^{\tau_2-1}B_{\lambda}^2(Z_j,Z_{j+1})\right],\\ \sigma_{10}(\lambda)=\sigma_{01}(\lambda)&:=\frac{1}{E_{\lambda}[\tau_2-\tau_1]} E_{\lambda}\left[\Bigl(d(Z_{\tau_2})-d(Z_{\tau_1})\Bigr)\sum_{j=\tau_1}^{\tau_2-1}B_{\lambda}(Z_j,Z_{j+1})\right]\nonumber. \end{align} Moreover, under $P^{\tt NB}_{\lambda}$ the sequence $\bigl\{(n^{-1/2}(d(Z_n)-n\upsilon_\lambda),n^{-1/2}P_n)\bigr\}_{n\geq1}$ converges weakly to the same two dimensional Gaussian random variable $(X,Y)$. \end{prp} \begin{proof} We have already proved the first claim. The second claim is immediate from the fact that the distribution of $$\left(d(Z_{\tau_{2}})-d(Z_{1})-\upsilon_\lambda(\tau_{2}-\tau_1),\sum_{j=\tau_1}^{\tau_{2}-1}B_{\lambda}(Z_j,Z_{j+1})\right)\ \ {\rm under}\ P_{\lambda}$$ is same as that of $$\left(d(Z_{1})-\upsilon_\lambda\tau_1,\sum_{j=0}^{\tau_{1}}B_{\lambda}(Z_j,Z_{j+1})\right)\ \ {\rm under}\ P_{\lambda}^{\tt NB}.$$ \end{proof} We now prove Lemma \ref{lem:replace} by using discussions given in this subsection. \begin{proof}[Proof of Lemma \ref{lem:replace}.] By the Markov property, we have that \begin{align*} &\mathbb{E}\left[E^{\mathbf{T}^*}_{\lambda+h}\left[\left(d(Z_n)-n\upsilon_\lambda\right)\mathbf{1}_{\{\sigma_{e^*}=\infty\}}\right]\right]\\ &=\mathbb{E}\left[E^{\mathbf{T}^*}_{\lambda}\!\left[\left(d(Z_n)-n\upsilon_\lambda\right)\prod_{i=1}^{n}\frac{A_{\lambda+h}(Z_{i-1},Z_i)}{A_{\lambda}(Z_{i-1},Z_i)}\mathbf{1}_{\{\sigma_{e^*}>n\}} \cdot E^{\mathbf{T}^*}_{\lambda+h,Z_n}\left[\mathbf{1}_{\{\sigma_{e^*}=\infty\}}\right]\right]\right] \end{align*} and \begin{align*} &\mathbb{E}\left[E^{\mathbf{T}^*}_{\lambda}\left[\left(d(Z_n)-n\upsilon_\lambda\right)\prod_{i=1}^{n}\frac{A_{\lambda+h}(Z_{i-1},Z_i)}{A_{\lambda}(Z_{i-1},Z_i)}\cdot\mathbf{1}_{\{\sigma_{e^*}=\infty\}}\right]\right]\\ &=\mathbb{E}\left[E^{\mathbf{T}^*}_{\lambda}\left[\left(d(Z_n)-n\upsilon_\lambda\right)\prod_{i=1}^{n}\frac{A_{\lambda+h}(Z_{i-1},Z_i)}{A_{\lambda}(Z_{i-1},Z_i)}\mathbf{1}_{\{\sigma_{e^*}>n\}} \cdot E^{\mathbf{T}^*}_{\lambda,Z_n}\left[\mathbf{1}_{\{\sigma_{e^*}=\infty\}}\right]\right]\right]. \end{align*} Thus, by H\"{o}lder's inequality and Jensen's inequality we have that \eqref{eq:replace} is equal to \begin{align*} &\Biggl|\mathbb{E}\Biggl[E^{\mathbf{T}^*}_{\lambda}\Biggl[\left(\mathrm{d}frac{d(Z_n)-n\upsilon_\lambda}{\sqrt{n}}\right)\mathbf{1}_{\{\sigma_{e^*}>n\}}\cdot\prod_{i=1}^{n}\frac{A_{\lambda+h}(Z_{i-1},Z_i)}{A_{\lambda}(Z_{i-1},Z_i)}\nonumber\\ &\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \cdot\left(E^{\mathbf{T}^*}_{\lambda+h,Z_n}\left[\mathbf{1}_{\{\sigma_{e^*}=\infty\}}\right]-E^{\mathbf{T}^*}_{\lambda,Z_n}\left[\mathbf{1}_{\{\sigma_{e^*}=\infty\}}\right]\right)\Biggr]\Biggr]\Biggr| \end{align*} which is bounded above by $\mathcal{E}^1_{h,n}\cdot\mathcal{E}^2_{h,n}\cdot\mathcal{E}^3_{h,n}$ where \begin{align*} \mathcal{E}^1_{n}&:= \mathbb{E}\left[E^{\mathbf{T}^*}_{\lambda}\left[\left(\mathrm{d}frac{d(Z_n)-n\upsilon_\lambda}{\sqrt{n}}\right)^2\mathbf{1}_{\{\sigma_{e^*}>n\}}\right]\right]^{1/2},\\ \mathcal{E}^2_{h,n}&:=\mathbb{E}\left[E^{\mathbf{T}^*}_{\lambda}\left[\left(\prod_{i=1}^{n}\frac{A_{\lambda+h}(Z_{i-1},Z_i)}{A_{\lambda}(Z_{i-1},Z_i)}\right)^4\right]\right]^{1/4},\\ \mathcal{E}^3_{h,n}&:=E_{\lambda}\left[\left(E^{\mathbf{T}^*}_{\lambda+h,Z_n}\left[\mathbf{1}_{\{\sigma_{e^*}=\infty\}}\right]-E^{\mathbf{T}^*}_{\lambda,Z_n}\left[\mathbf{1}_{\{\sigma_{e^*}=\infty\}}\right]\right)^{4}\right]^{1/4}. \end{align*} It suffices to show that \begin{align} \sup_{n}\mathcal{E}^1_{n}&<\infty,\label{eq:e1}\\ \sup_{h,n:h^2n\sim1}\mathcal{E}^2_{h,n}&<\infty,\ {\rm and}\label{eq:e2}\\ \lim_{h,n:h^2n\sim1}\mathcal{E}^3_{h,n}&=0.\label{eq:e3} \end{align} The estimate \eqref{eq:e1} follows from \eqref{est:ui2}. We now prove the estimate \eqref{eq:e2}. Notice that \begin{align*} \left(\prod_{i=1}^n\frac{A_{\lambda+h}(Z_{i-1},Z_i)}{A_{\lambda}(Z_{i-1},Z_i)}\right)^4 &=\exp(4hP_n-4h^2Q_n+4R_{n,h}) \intertext{and} \prod_{i=1}^n\frac{A_{\lambda+4h}(Z_{i-1},Z_i)}{A_{\lambda}(Z_{i-1},Z_i)} &=\exp(4hP_n-16h^2Q_n+R_{n,4h}). \end{align*} By the estimate \eqref{R_n}, there exists a constant $C_{\lambda}>0$ such that $|R_{n,h}|\leq C_{\lambda}$ and $|R_{n,3h}|\leq C_{\lambda}$. Since $h^2\sim n^{-1}$, there exists a constant $C'_{\lambda}$ such that $|h^2Q_n|\leq C'_{\lambda}$. Thus, there exists a constant $C''_{\lambda}>0$ such that \begin{align*} \left(\prod_{i=1}^n\frac{A_{\lambda+h}(Z_{i-1},Z_i)}{A_{\lambda}(Z_{i-1},Z_i)}\right)^4 \leq C''_{\lambda}\left(\prod_{i=1}^n\frac{A_{\lambda+4h}(Z_{i-1},Z_i)}{A_{\lambda}(Z_{i-1},Z_i)}\right). \end{align*} Noticing that \begin{align*} E^{\mathbf{T}^*}_{\lambda}\left[\prod_{i=1}^n\frac{A_{\lambda+4h}(Z_{i-1},Z_i)}{A_{\lambda}(Z_{i-1},Z_i)}\right] =E^{\mathbf{T}^*}_{\lambda+4h}[1]=1, \end{align*} we get the conclusion.\par Finally, we show \eqref{eq:e3}. Note that \begin{equation*} Q(\lambda):=E^{\mathbf{T}^*}_{\lambda,Z_n}\left[\mathbf{1}_{\{\sigma_{e^*}=\infty\}}\right] =P^{\mathbf{T}^*}_{\lambda,Z_n}\left(\sigma_{e^*}=\infty\right) \end{equation*} is bounded above by $1$ and monotonically decreasing in $\lambda$. Furthermore, since $d(Z_n)$ converges $P_\lambda$-a.s.\ to $\infty$ as $n \rightarrow \infty$, we have that $Q(\tilde{\lambda})$ converges $P_\lambda$-a.s.\ to $1$ for any $\tilde{\lambda}\in(\lambda_c,\mu)$. Fix $t>0$ such that $[\lambda-t,\lambda+t]\subset (\lambda_c,\mu)$ then for $|h|\leq t$, by bounded convergence theorem we then have that \begin{equation*} \mathcal{E}^3_{h,n} \leq \sqrt{2}E_{\lambda}\left[P^{\mathbf{T}^*}_{\lambda-t,Z_n}\left(\sigma_{e^*}=\infty\right)-P^{\mathbf{T}^*}_{\lambda+t,Z_n}\left(\sigma_{e^*}=\infty\right)\right]^{1/4} \end{equation*} which converges to $0$ as $n \rightarrow \infty$. \end{proof} \subsection{The proof of the differentiability of the speed} In this subsection, we will prove Theorem \ref{dif}. \begin{proof}[Proof of Theorem \ref{dif}] By Proposition \ref{joint} it is now sufficient to prove that $\upsilon_{\lambda}'=E^{\tt NB}_{\lambda}[XY].$ By \eqref{AAA}, we have that \begin{align}\label{BBB} & E^{\tt NB}_{\lambda}\left[\left(d(Z_n)-n\upsilon_\lambda\right)\prod_{i=1}^{n}\frac{A_{\lambda+h}(Z_{i-1},Z_i)}{A_{\lambda}(Z_{i-1},Z_i)} \right]\nonumber\\ &=E^{\tt NB}_{\lambda}\left[\left(d(Z_n)-n\upsilon_\lambda\right)\exp(hP_n-h^2Q_n+R_{n,h}) \right]. \end{align} Therefore, once we justify that we can pass to the limit in \eqref{BBB}, by using Lemma \ref{lem:replace} and Proposition \ref{joint} we will get \begin{align}\label{conv} &\frac{1}{hn}E^{\tt NB}_{\lambda+h}\left[d(Z_n)-n\upsilon_\lambda \right]\nonumber\\ &\rightarrow E^{\tt NB}_{\lambda}\left[X\exp\left(Y-\frac{1}{2E_{\lambda}[\tau_2-\tau_1]}E_{\lambda}\left[\sum_{j=\tau_1}^{\tau_2-1}B_{\lambda}^2(Z_j,Z_{j+1})\right]\right) \right] \end{align} where $(X,Y)$ is the two dimensional Gaussian random variable with the covariance matrix $\Sigma_{\lambda}$. Notice that we have shown the continuity of the escape probability in Lemma \ref{p:esc-conti}. Since it is shown in \eqref{var} that \begin{align*} {\rm Var}(Y)=\frac{1}{E^{\tt NB}_{\lambda}[\tau_1]}E^{\tt NB}_{\lambda}\left[\sum_{j=0}^{\tau_1-1}B_{\lambda}^2(Z_j,Z_{j+1})\right], \end{align*} the above convergence and the integration by parts formula for Gaussian laws implies \begin{align*} \frac{\upsilon_{\lambda+h}-\upsilon_\lambda}{h} \;\rightarrow\; E_{\lambda}^{\tt NB}\left[X\exp\left(Y-\frac{1}{2}{\rm Var}(Y)\right)\right] \;=\;E_{\lambda}^{\tt NB}[XY]=E_{\lambda}[XY]. \end{align*} In order to justify the step \eqref{conv}, it suffices to show the uniform integrability of \begin{align*} \left\{\frac{1}{hn}\Bigl(d(Z_n)-n\upsilon_\lambda\Bigr)\cdot \prod_{i=1}^n\frac{A_{\lambda+h}(Z_{i-1},Z_i)}{A_{\lambda}(Z_{i-1},Z_i)}\right\}_{n\geq1}. \end{align*} under $P_{\lambda}^{\tt NB}$. By H\"{o}lder's inequality, we have \begin{align*} &E_{\lambda}^{\tt NB}\left[\left(\frac{1}{hn}\Bigl(d(Z_n)-n\upsilon_\lambda\Bigr)\cdot \prod_{i=1}^n\frac{A_{\lambda+h}(Z_{i-1},Z_i)}{A_{\lambda}(Z_{i-1},Z_i)}\right)^{6/5}\right]\\ &\leq E_{\lambda}^{\tt NB}\left[\frac{1}{(hn)^2}\Bigl(d(Z_n)-n\upsilon_\lambda\Bigr)^2\right]^{3/5} E_{\lambda}^{\tt NB}\left[\left(\prod_{i=1}^n\frac{A_{\lambda+h}(Z_{i-1},Z_i)}{A_{\lambda}(Z_{i-1},Z_i)}\right)^3\right]^{2/5} \end{align*} In Lemma \ref{ui}, we have already seen that $E_{\lambda}^{\tt NB}\left[\frac{1}{(hn)^2}\bigl(d(Z_n)-n\upsilon_\lambda\bigr)^2\right]$ is bounded in $n$. That $E_{\lambda}^{\tt NB}\left[\left(\prod_{i=1}^n\frac{A_{\lambda+h}(Z_{i-1},Z_i)}{A_{\lambda}(Z_{i-1},Z_i)}\right)^3\right]$ is also bounded in $n$ follows from the estimate \eqref{eq:e2}. \end{proof} \section{Uniform moment bounds on regeneration times}\label{s:uni} In this section we study regeneration and return times for biased random walks on supercritical GW-trees whose offspring law has exponential moments and no deaths (i.e.\ $p_0=0$). First, we prove that for any $u\in\mathbb{N}$ and $[a,b]\subset (0,\mu)$ we have \begin{equation}\label{e:uMom} \sup_{\lambda\in[a,b]}E_{\lambda}^{\tt NB}\left[\tau_1^u\right]=\sup_{\lambda\in[a,b]}\mathit{E}_\lambda\left[\left(\tau_2-\tau_1\right)^u\right]<\infty. \end{equation} This will be used in the proof of Proposition \ref{p:UniMom} where we consider the case with leaves. Following this, we show that the escape probability is continuous in $\lambda$ thus proving Proposition \ref{p:esc-conti}. Towards proving \eqref{e:uMom}, we note that, since the interval $[a,b]$ is compact, it suffices to show that for any $\lambda\in(0,\mu)$ and $u\in\mathbb{N}$ there exists $\varepsilon>0$ such that \[\sup_{|h|\leq \varepsilon}E_{\lambda}^{\tt NB}\left[\tau_1^u\right]=\sup_{|h|\leq \varepsilon}\mathit{E}_{\lambda+h}\left[\left(\tau_2-\tau_1\right)^u\right]<\infty.\] For $\lambda<1$ this follows trivially by choosing $\varepsilon<1-\lambda$ and comparing with a biased random walk on $\mathbb{Z}$ (e.g.\ Lemma 5.1 of \cite{depeze96}). We consider the case $\lambda\geq 1$ and proceed similarly to Proposition 3 in \cite{PZ} in which it is shown that $E_{\lambda}^{\tt NB}[\tau_1^u]=\mathit{E}_\lambda\left[\left(\tau_2-\tau_1\right)^u\right]<\infty$ for any $\lambda\in(0,\mu)$ and $u\in\mathbb{N}$. Our main contribution here is that we show that this bound is uniform in the bias $\lambda$ in compact intervals for which Remark \ref{r:Rayleigh} will play an important role. \begin{rem}\label{r:Rayleigh} By Rayleigh's monotonicity principle we have that for any infinite tree $\mathscr{T}$ and any $v\in\mathscr{T}$, \[P^{\mathscr{T}}_{\lambda,v}(\sigma_e=\infty)\] is monotonically decreasing in $\lambda$. This follows using the relationship between electrical networks and reversible Markov chains (see \cite{LP} for further detail). \end{rem} We now show that the speed $\upsilon_\lambda$ is bounded away from $0$ uniformly in $\lambda$ in compact subsets of $[1,\mu)$. \begin{lem}\label{l:SpBnd} Suppose $p_0=0$. For any $b\in[1,\mu)$ there exists a constant $c_b>0$ such that \[\inf_{\lambda\in[1,b]}\upsilon_\lambda\geq c_b.\] \end{lem} \begin{proof} By Theorem 3.1 of \cite{LPP3}, for $\lambda \in (1,\mu)$ we have that $\upsilon_\lambda\geq (1-\lambda^{-1})^3(1-q_\lambda)^2/12$ where $q_\lambda$ is the smallest non-negative solution to $f(1-\lambda^{-1}(1-q_\lambda))=q_\lambda$. It is immediate from this that for any $a>1$ there exists $c_{a,b}>0$ such that \[\inf_{\lambda\in[a,b]}\upsilon_\lambda\geq c_{a,b}.\] It therefore remains to consider $\lambda$ arbitrarily close to $1$. Let $\xi$ be a random variable with the offspring distribution. By Theorem 1.1 of \cite{A1} we have that \[\upsilon_\lambda=\mathbb{E}\left[\frac{(\xi-\lambda)\tilde{p}_\lambda^{(0)}}{\lambda-1+\sum_{i=0}^\xi\tilde{p}^{(i)}_\lambda}\right]\bigg/\mathbb{E}\left[\frac{(\xi+\lambda)\tilde{p}_\lambda^{(0)}}{\lambda-1+\sum_{i=0}^\xi\tilde{p}^{(i)}_\lambda}\right]\] where $\tilde{p}^{(i)}_\lambda$ are independent copies of $\mathit{P}_\lambda^\mathbf{T}(\sigma_e=\infty)$ (which are also independent of $\xi$). Since $\tilde{p}^{(i)}_\lambda$ are independent of $\xi$ we have that, for $\lambda \in[1,3/2]$, \begin{align} &\mathbb{E}\left[\frac{(\xi-\lambda)\tilde{p}_\lambda^{(0)}}{\lambda-1+\sum_{i=0}^\xi\tilde{p}^{(i)}_\lambda}\right]\notag\\ & =\sum_{k=1}^\infty \mathbb{P}(\xi=k)\mathbb{E}\left[\frac{(k-\lambda)\tilde{p}_\lambda^{(0)}}{\lambda-1+\sum_{i=0}^k\tilde{p}^{(i)}_\lambda}\right] \label{e:SpLo}\\ & \geq p_1(1-\lambda)\mathbb{E}\left[\frac{\tilde{p}_\lambda^{(0)}}{\lambda-1+\sum_{i=0}^1\tilde{p}^{(i)}_\lambda}\right]+\frac{1}{4}\sum_{k=2}^\infty p_k\mathbb{E}\left[\frac{k\tilde{p}_\lambda^{(0)}}{\lambda-1+\sum_{i=0}^k\tilde{p}^{(i)}_\lambda}\right] \notag \end{align} since $\xi-\lambda\geq \xi/4$ for $\xi\geq 2$ and $\lambda \leq 3/2$. Similarly, \begin{align}\label{e:SpUp} \mathbb{E}\left[\frac{(\xi+\lambda)\tilde{p}_\lambda^{(0)}}{\lambda-1+\sum_{i=0}^\xi\tilde{p}^{(i)}_\lambda}\right] & \leq 2\sum_{k=1}^\infty p_k\mathbb{E}\left[\frac{k\tilde{p}_\lambda^{(0)}}{\lambda-1+\sum_{i=0}^k\tilde{p}^{(i)}_\lambda}\right]. \end{align} By Remark \ref{r:Rayleigh}, for any tree $\tilde{p}^{(i)}_\lambda$ is decreasing in $\lambda$. Moreover, $\mathbb{P}(\tilde{p}^{(i)}_{1+\varepsilon}>0)>0$ for any $\varepsilon\in(0,\mu-1)$. It follows that there exists $c>0$ such that for any $k\geq 1$ and $\lambda\in [1,1+\varepsilon]$ for $\varepsilon>0$ suitably small we have that \begin{align} \frac{k}{k+1} & \geq \mathbb{E}\left[\frac{k\tilde{p}_\lambda^{(0)}}{\lambda-1+\sum_{i=0}^k\tilde{p}^{(i)}_\lambda}\right] \notag\\ & =\frac{k}{k+1}\left(1-\mathbb{E}\left[\frac{\lambda-1}{\lambda-1+\sum_{i=0}^k\tilde{p}^{(i)}_\lambda}\right]\right) \notag\\ & \geq \frac{k}{k+1}\left(1-\mathbb{E}\left[\frac{\varepsilon}{\varepsilon+\sum_{i=0}^k\tilde{p}^{(i)}_{1+\varepsilon}}\right]\right) \notag\\ & \geq \frac{ck}{k+1}. \label{e:kkp1} \end{align} In particular, we can choose $\varepsilon>0$ sufficiently small such that \[p_1(\lambda-1)\mathbb{E}\left[\frac{\tilde{p}_\lambda^{(0)}}{\lambda-1+\sum_{i=0}^1\tilde{p}^{(i)}_\lambda}\right]\leq \frac{1}{8}\sum_{k=2}^\infty p_k\mathbb{E}\left[\frac{k\tilde{p}_\lambda^{(0)}}{\lambda-1+\sum_{i=0}^k\tilde{p}^{(i)}_\lambda}\right]\] uniformly over $\lambda \in[1,1+\varepsilon]$. Combining this with \eqref{e:SpLo} and \eqref{e:SpUp} we have \begin{align*} \upsilon_\lambda & \geq \frac{1}{16} \sum_{k=2}^\infty p_k\mathbb{E}\left[\frac{k\tilde{p}_\lambda^{(0)}}{\lambda-1+\sum_{i=0}^k\tilde{p}^{(i)}_\lambda}\right]\bigg/ \sum_{k=1}^\infty p_k\mathbb{E}\left[\frac{k\tilde{p}_\lambda^{(0)}}{\lambda-1+\sum_{i=0}^k\tilde{p}^{(i)}_\lambda}\right] \end{align*} which is bounded below for $\lambda \in[1,1+\varepsilon]$ for $\varepsilon>0$ suitably small using \eqref{e:kkp1} \end{proof} We now use the Girsanov formula \eqref{Gir} to obtain a useful bound relating the laws for different values of $\lambda$. Let $\Delta_n:=\inf\{m\geq 0: d(Z_m)=n\}$ be the first time the walk reaches distance $n$ from the root. \begin{lem}\label{l:Gir} For any tree $\mathscr{T}$ of height at least $n$, $\lambda\in(0,1]$ and $h\in(0,\lambda)$ we have that \[\mathit{P}_{\lambda-h}^{\mathscr{T}}(\Delta_n> m,\sigma_e>m)\leq e^{nh}\mathit{P}_{\lambda}^{\mathscr{T}}(\Delta_n> m, \sigma_e>m).\] \end{lem} \begin{proof} First note that the function $F((Z_{k})_{k\geq 0})=\mathbf{1}_{\{\Delta_n> m,\sigma_e>m\}}$ is measurable with respect to $\mathcal{F}_m(\mathscr{T})$ therefore, by the Girsanov formula \eqref{Gir} we have that \begin{align}\label{e:Gir} \mathit{P}_{\lambda-h}^{\mathscr{T}}(\Delta_n> m,\sigma_e>m)=\mathit{E}_{\lambda}^{\mathscr{T}}\left[\mathbf{1}_{\{\Delta_n> m,\sigma_e>m\}}\prod_{i=1}^m\frac{A_{\lambda-h}(Z_{i-1},Z_i)}{A_{\lambda}(Z_{i-1},Z_i)}\right]. \end{align} For a walk started from the root, every time the walk takes a step back towards the root it crosses an edge that has previously been crossed. In particular, there is a most recent time that edge was crossed and, due to the tree structure, it must have been crossed directed away from the root. It follows that, for any path $(z_k)_{k=0}^m$ in $\mathscr{T}$, every pair $(z_{i-1},z_i)$ either corresponds to a unique pair $(z_{j-1},z_j)$ using this coupling or belongs to the unique self avoiding path starting from the root and ending at $z_m$. Denote by $\gamma$ this unique path of length $d(z_m)$. For a neighbouring pair of vertices $x,y\in\mathscr{T}$ it is straightforward to show that \[\frac{A_{\lambda-h}(x,y)A_{\lambda-h}(y,x)}{A_{\lambda}(x,y)A_{\lambda}(y,x)}\leq 1\] for $\lambda\in(0,1]$ and $h\in(0,\lambda)$. It follows that, \begin{align}\label{e:Canc} \prod_{i=1}^{d(z_m)}\frac{A_{\lambda-h}(z_{i-1},z_i)}{A_{\lambda}(z_{i-1},z_i)}=\prod_{x\in\gamma\setminus\{z_0,z_m\}}\frac{\lambda+\nu(x)}{\lambda-h+\nu(x)} \leq e^{h(d(z_m)-1)}. \end{align} Noting that $\{\Delta_n>m\}\subset\{d(Z_m)< n\}$, combining \eqref{e:Gir} and \eqref{e:Canc} completes the proof. \end{proof} An important result that we will use in the following proof is that the distance between regenerations have exponential moments. That is, by Lemma 4.2 of \cite{DGPZ} we have that for any $\lambda\in(0,\mu)$ there exists $^{\text{th}}eta(\lambda)=:^{\text{th}}eta>0$ such that $\mathit{E}_\lambda[e^{^{\text{th}}eta d(Z_{\tau_1})}]<\infty$. In fact, we require the stronger uniform moment bound Lemma \ref{l:empMom}, whose proof is a straightforward extension of that of Lemma 4.2 in \cite{DGPZ} using Remark \ref{r:Rayleigh} which we omit. \begin{lem}\label{l:empMom} Suppose $p_0=0$. For any $[a,b]\subset(0,\mu)$ there exists $^{\text{th}}eta(a,b)=:^{\text{th}}eta>0$ such that \begin{align}\label{e:empMom} \sup_{\lambda\in[a,b]}\mathit{E}_\lambda[e^{^{\text{th}}eta d(Z_{\tau_1})}]<\infty. \end{align} \end{lem} \if0 First note that it suffices to show that \[\sup_{\lambda\in[a,b]}\mathit{P}_\lambda(d(Z_{\tau_1})>t)\leq Ce^{-\varsigma t}\] for some $C,\varsigma>0$. We first describe a path decomposition due to \cite{ke77}. Fix $S_0=0$ and $\tilde{\tau}_0:=\inf\{k>0:Z_k=e\}$ then \begin{enumerate} \item if $\tilde{\tau}_0=\infty$, define $K=0$ and $S_i=0$ for all $i\geq 1$; \item else, define $M_0=\max\{d(Z_k):n\leq \tilde{tau}_0\}$ to be the furthest distance reached before returning to the root and $S_1:=\min\{n:d(Z_n)>M_0\}$ to be the first time the walk reaches further that $M_0$. \end{enumerate} Recursively, define $\tilde{\tau}_i:=\inf\{k>S_i:d(Z_k)=d(Z_{S_i})-1\}$ and \begin{enumerate} \item if $\tilde{\tau}_i=\infty$ then set $K=i$ and $S_n=\infty$ for all $n\geq i$; \item else set $M_i=\max\{d(Z_k):n\leq \tilde{tau}_i\}$ and $S_{i+1}:=\min\{n:d(Z_n)>M_i\}$. \end{enumerate} We note that if $K>0$ then $S_K$ is the first level regeneration distance and if $K=0$ then $d(Z_{\tau_1})=1$. For $t>1$ \begin{align} \mathit{P}_\lambda(d(Z_{\tau_1})>t) & = \mathit{P}_\lambda\left(\sum_{i=1}^K(d(Z_{S_i})-d(Z_{S_{i-1}})>t\right)\notag \\ & = \sum_{j=1}^\infty\mathit{P}_\lambda\left(\sum_{i=1}^j(d(Z_{S_i})-d(Z_{S_{i-1}})>t, \tilde{\tau}_{j-1}<\infty,\tilde{\tau}_j=\infty\right).\label{e:posSum} \end{align} Note that $(d(Z_{S_i})-d(Z_{S_{i-1}}))_{i\geq 1}$ are independent since they only depend on the part of the tree which is unseen up to the stopping time $S_{i-1}$. In particular, by Lemma 4.4 of \cite{DGPZ}, for $C_i\subset \mathbb{N}$ we have that \begin{align} &\mathit{P}_\lambda(\{d(Z_{S_i})-d(Z_{S_{i-1}})\in C_j\}_{i=1}^j, \tilde{\tau}_{j-1}<\infty,\tilde{\tau}_j=\infty) \label{e:indSec}\\ &\qquad = \mathit{P}_\lambda(d(Z_{S_1})\in C_1, \tilde{\tau}_0<\infty)\mathit{P}_\lambda(\tilde{\tau}_1=\infty)\prod_{i=2}^j\mathit{P}_\lambda(d(Z_{S_2})-d(Z_{S_1})\in C_i, \tilde{\tau}_1<\infty). \notag \end{align} Now, for $t>3$ and any fixed tree $\mathscr{T}$ we have \begin{align*} \mathit{P}_\lambda^{\mathscr{T}}(d(Z_{S_1})>t, \tilde{\tau}_0<\infty) \leq \sum_{x}\mathit{P}_\lambda^{\mathscr{T}}(Z_1=x)\mathit{P}_{\lambda,x}^{\mathscr{T}}(\sigma_x<\infty, d(Z_n)=t-1 \text{ for some } n<\sigma_x) \end{align*} where the sum is over $x$ such that $\pi(x)=e$. Averaging then yields \begin{align*} \mathit{P}_\lambda(d(Z_{S_1})>t, \tilde{\tau}_0<\infty) & = \mathbb{E}\left[\mathit{P}_\lambda^{\mathbf{T}}(d(Z_{S_1})>t, \tilde{\tau}_0<\infty)\right] \\ &\leq\mathbb{E}\!\left[\sum_{x}\frac{1}{\nu(e)}\mathit{P}_{\lambda,x}^{\mathbf{T}}(\sigma_x<\infty, d(Z_n)=t-1 \text{ for some } n<\sigma_x)\right]\\ &=\mathbb{E}\!\left[\sum_{x}\frac{1}{\nu(e)}\mathbb{E}\left[\mathit{P}_{\lambda,x}^{\mathbf{T}}(\sigma_x<\infty, d(Z_n)=t-1 \text{ for some } n<\sigma_x)|\nu(e)\right]\right]\\ & = \mathit{P}_\lambda(d(Z_{S_2})-d(Z_{S_1})\geq t, \tilde{\tau}_1<\infty). \end{align*} Combining this with \eqref{e:posSum} and \eqref{e:indSec} we have that it suffices to show that \[\sup_{\lambda\in[a,b]}\mathit{P}_\lambda(d(Z_{S_2})-d(Z_{S_1})\geq t, \tilde{\tau}_1<\infty) \leq \tilde{C}e^{-\tilde{\varsigma} t}.\] Where we note that \begin{align*} \mathit{P}_\lambda(d(Z_{S_2})-d(Z_{S_1})\geq t, \tilde{\tau}_1<\infty) &\leq \mathit{P}_\lambda(M_0\geq t, \sigma_e<\infty) \\ &\leq \mathit{P}_\lambda(d(Z_m)=t,d(Z_n)=0 \text{ for some } n>m). \end{align*} For a tree $\mathscr{T}$, a constant $\mathrm{d}elta>0$ and a vertex $x\in\nu(e)$ write \[\mathcal{A}^\mathrm{d}elta_\lambda(\mathscr{T},x):=\mathbf{1}_{\{\nu(e)\geq 2\}}\mathbf{1}_{\{\mathit{P}_\lambda^{\mathscr{T}\setminus\mathscr{T}_x}(\sigma_e=\infty)\geq \mathrm{d}elta\}}\] for the indicator that the root has at least two children and, removing the subtree rooted at $x$, the walk has positive probability of regenerating immediately. Note that since $\mathit{P}_\lambda^{\mathscr{T}\setminus\mathscr{T}_x}(\sigma_e=\infty)$ is decreasing in $\lambda$ we have that $\mathcal{A}^\mathrm{d}elta_\lambda(\mathscr{T},x)$ is decreasing in $\lambda$. For $\beta>0$ and $z$ in the $n^{\text{th}}$ generation of the tree, we call $z\in\mathscr{T}$ $(\lambda,\beta)$-successful if the shortest path connecting $e$ with $z$ has at least $\beta n$ vertices $v_i$ satisfying $\mathcal{A}^\mathrm{d}elta_\lambda(\mathscr{T}_{\pi(v_i)},v_i)=1$. Then, write $\Bc_\lambda^\mathrm{d}elta(n,\beta):=\{\exists z \in n^{\text{th}} \text{generation that is not } (\lambda,\beta)-\text{successful}\}$. In particular, if $z$ is $(b,\beta)$-successful then it is $(\lambda,\beta)$-successful for all $\lambda \in [a,b]$ and therefore $\Bc_\lambda^\mathrm{d}elta(n,\beta)\subseteq\Bc_b^\mathrm{d}elta(n,\beta)$ for all $\lambda \in[a,b]$. Upon first visiting a vertex $v_i$ (on the unique path connecting $e$ with $z$) which satisfies $\mathcal{A}^\mathrm{d}elta_\lambda(\mathscr{T}_{\pi(v_i)},v_i)=1$, the walk has probability at least \[\frac{\mathrm{d}elta(\nu(\pi(v_i))-1)}{\nu(\pi(v_i))+\lambda}\geq \frac{\mathrm{d}elta}{2+b}\] of leaving the path connecting $e,z$ and never returning. In particular, for $\mathscr{T}\notin\Bc_b^\mathrm{d}elta(t,\beta)$, \[\mathit{P}_\lambda^{\mathscr{T}}(d(Z_m)=t,d(Z_n)=0 \text{ for some } n>m)\leq \left(1-\frac{\mathrm{d}elta}{2+b}\right)^{\beta t}\] for all $\lambda \in[a,b]$. Furthermore, by Lemma 2.2 of \cite{DGPZ}, $\mathbb{P}(\Bc_b^\mathrm{d}elta(t,\beta))$ decays exponentially in $t$ so we are done. \fi We now proceed to the main result of this section. This follows similarly to Proposition 3 in \cite{PZ} however, we include the proof since the extension to uniformity over $\lambda$ is delicate. \begin{prp}\label{p:UnMo} Suppose $p_0=0$, $b\in[1,\mu)$ and that there exists $\beta>1$ such that $\sum_{k\geq 1}p_k\beta^k<\infty$. For all $u\in\mathbb{N}$ and $\lambda\in[1,\mu)$ there exists $\varepsilon>0$ such that \[\sup_{\lambda \in[1-\varepsilon,b]}\mathit{E}_{\lambda}\left[\left(\tau_2-\tau_1\right)^u\right]<\infty.\] \end{prp} \begin{proof} First note that \[\mathit{E}_{\lambda}[(\tau_2-\tau_1)^u]=E_{\lambda}^{\tt NB}[\tau_1^u] =\frac{\mathbb{E}\left[E^{\mathbf{T}^*}_{\lambda}[\tau_1^u\mathbf{1}_{\{\sigma_{e^*}=\infty\}}]\right]}{\mathbb{E}\left[P^{\mathbf{T}^*}_{\lambda}(\sigma_{e^*}=\infty)\right]}.\] Since the denominator $\mathbb{E}\left[P^{\mathbf{T}^*}_{\lambda}(\sigma_{e^*}=\infty)\right]$ is monotonic in $\lambda$ by Remark \ref{r:Rayleigh}, it suffices to consider $\sup_{\lambda \in[1-\varepsilon,b]}\mathbb{E}\left[E^{\mathbf{T}^*}_{\lambda}\left[\tau_1^u\mathbf{1}_{\{\sigma_{e^*}=\infty\}}\right]\right]$. Using the uniform exponential moment bound \eqref{e:empMom}, the Cauchy-Schwarz inequality and integration by parts we have \begin{align}\label{est:long} & \mathbb{E}\left[E^{\mathbf{T}^*}_{\lambda}\left[\tau_1^u\mathbf{1}_{\{\sigma_{e^*}=\infty\}}\right]\right]\\ & = \sum_{n=1}^\infty \mathbb{E}\left[E^{\mathbf{T}^*}_{\lambda}\left[\tau_1^u;\sigma_{e^*}=\infty,d(Z_{\tau_1})=n\right]\right] \notag\\ & = \sum_{n=1}^\infty \mathbb{E}\left[E^{\mathbf{T}^*}_{\lambda}\left[\Delta_n^u;\sigma_{e^*}=\infty,d(Z_{\tau_1})=n\right]\right] \notag\\ & \leq \sum_{n=1}^\infty \mathbb{E}\left[E^{\mathbf{T}^*}_{\lambda}\left[\Delta_n^{2u};\sigma_{e^*}=\infty\right]\right]^{1/2}\mathbb{E}\left[P^{\mathbf{T}^*}_{\lambda}(d(Z_{\tau_1})=n)\right]^{1/2} \notag\\ & \leq \mathbb{E}\left[E^{\mathbf{T}^*}_{\lambda}\left[e^{^{\text{th}}eta d(Z_{\tau_1})}\right]\right]\sum_{n=1}^\infty e^{-^{\text{th}}eta n} \mathbb{E}\left[E^{\mathbf{T}^*}_{\lambda}\left[\Delta_n^{2u};\sigma_{e^*}=\infty\right]\right]^{1/2} \notag\\ & \leq \mathbb{E}\!\left[E^{\mathbf{T}^*}_{\lambda}\!\left[e^{^{\text{th}}eta d(Z_{\tau_1})}\right]\right]\!\sum_{n=1}^\infty e^{-^{\text{th}}eta n}n^{10u}\! \left(\sum_{k=0}^\infty (k+1)^{2u} \mathbb{E}\!\left[P^{\mathbf{T}^*}_{\lambda}\!(\Delta_n>kn^{10}, \sigma_{e^*}=\infty)\right]\!\right)^{\!1/2}\!. \notag \end{align} \if0 \begin{align}\label{est:long} & = \sum_{n=1}^\infty \mathit{E}_{\lambda}[\tau_1^u;\sigma_e=\infty,d(Z_{\tau_1})=n] \notag\\ & = \sum_{n=1}^\infty \mathit{E}_{\lambda}[\Delta_n^u;\sigma_e=\infty,d(Z_{\tau_1})=n] \notag\\ & \leq \sum_{n=1}^\infty \mathit{E}_{\lambda}[\Delta_n^{2u};\sigma_e=\infty]^{1/2}\mathit{P}_\lambda(d(Z_{\tau_1})=n)^{1/2} \notag\\ & \leq \mathit{E}_\lambda[e^{^{\text{th}}eta d(Z_{\tau_1})}]\sum_{n=1}^\infty e^{-^{\text{th}}eta n} \mathit{E}_{\lambda}[\Delta_n^{2u};\sigma_e=\infty]^{1/2} \notag\\ & \leq \mathit{E}_\lambda[e^{^{\text{th}}eta d(Z_{\tau_1})}]\sum_{n=1}^\infty e^{-^{\text{th}}eta n}n^{10u} \left(\sum_{k=0}^\infty (k+1)^{2u} \mathit{P}_{\lambda}(\Delta_n>kn^{10}, \sigma_e=\infty)\right)^{1/2}. \end{align} Using the uniform exponential moment bound \eqref{e:empMom}, the Cauchy-Schwarz inequality and integration by parts we have \begin{align*} \mathit{E}_{\lambda}[\tau_1^u\mathbf{1}_{\{\sigma_e=\infty\}}] & = \sum_{n=1}^\infty \mathit{E}_{\lambda}[\tau_1^u;\sigma_e=\infty,d(Z_{\tau_1})=n] \\ & = \sum_{n=1}^\infty \mathit{E}_{\lambda}[\Delta_n^u;\sigma_e=\infty,d(Z_{\tau_1})=n] \\ & = \sum_{n=1}^\infty \mathit{E}_{\lambda}[\Delta_n^{2u};\sigma_e=\infty]^{1/2}\mathit{P}_\lambda(d(Z_{\tau_1})=n)^{1/2} \\ & \leq \mathit{E}_\lambda[e^{^{\text{th}}eta d(Z_{\tau_1})}]\sum_{n=1}^\infty e^{-^{\text{th}}eta n} \mathit{E}_{\lambda}[\Delta_n^{2u};\sigma_e=\infty]^{1/2} \\ & \leq \mathit{E}_\lambda[e^{^{\text{th}}eta d(Z_{\tau_1})}]\sum_{n=1}^\infty e^{-^{\text{th}}eta n}n^{10u} \sum_{k=0}^\infty (k+1)^{2u} \mathit{P}_{\lambda}(\Delta_n>kn^{10}, \sigma_e=\infty). \fi By Lemma \ref{l:Gir} we have that for $\varepsilon>0$ suitably small \begin{align*} \sup_{h\in(0,\varepsilon)}\mathbb{E}\left[P^{\mathbf{T}^*}_{1-h}(\Delta_n>kn^{10}, \sigma_{e^*}=\infty)\right] & \leq\sup_{h\in(0,\varepsilon)}\mathbb{E}\left[P^{\mathbf{T}^*}_{1-h}(\Delta_n>kn^{10}, \sigma_{e^*}>kn^{10})\right]\\ & \leq e^{\varepsilon n}\mathbb{E}\left[P^{\mathbf{T}^*}_{1}(\Delta_n>kn^{10}, \sigma_{e^*}>kn^{10})\right]. \end{align*} Choosing $\varepsilon<^{\text{th}}eta/2$ and using \eqref{e:empMom}, it suffices to show that \begin{align}\label{est:suff} \sup_{\lambda\in[1,b]}\sum_{n=1}^\infty e^{-^{\text{th}}eta n/2}n^{10u}\left(\sum_{k=0}^\infty (k+1)^{2u} \mathbb{E}\left[P^{\mathbf{T}^*}_{\lambda}(\Delta_n>kn^{10}, \sigma_{e^*}>kn^{10})\right]\right)^{1/2}<\infty. \end{align} For $k\geq 1$, let \[\mathcal{A}_{1,k,n}:=\bigcup_{m\leq kn^{10}}\{|\nu(Z_m)|\geq \log(kn^{10})^2\}\] be the event that the walk visits a vertex with at least $\log(kn^{10})^2$ offspring by time $kn^{10}$. By the exponential moments assumption we have that for all $n$ large \[ \mathbb{E}\left[P_{\lambda}^{\mathbf{T}^*}(\mathcal{A}_{1,k,n})\right]\leq kn^{10}\mathbb{P}(|\nu(e)|\geq \log(kn^{10})^2)\leq e^{-c\log(n^{10})^2}e^{-c\log(k)^2}\] for some constant $c$ depending only on $\beta$. Let $N_{k,n}:=|\{m\leq kn^{10}: Z_l\neq Z_m \forall l<m\}|$ be the number of distinct vertices visited by time $kn^{10}$. Set \[\mathcal{A}_{2,k,n}:=\left\{N_{k,n}<\sqrt{kn^{10}}\right\}\cap\left\{\sigma_e>kn^{10}\right\}\] to be the event that, up to time $kn^{10}$, the walk visits at most $(kn^{10})^{1/2}$ distinct vertices and does not return to the root $e^*$. On the event $\mathcal{A}_{2,k,n}\cap \mathcal{A}_{1,k,n}^c$ there is a time $m\leq kn^{10}$ and a vertex $v$ with degree at most $\log(kn^{10})^2$ such that $Z_m=v$ and $v$ is subsequently visited at least $(kn^{10})^{1/2}$ times without a visit to the root. By the Gambler's ruin, for a walk started at $v$ of distance at most $n$ from the root, the probability that the walk returns to $v$ before reaching the root is at most $1-1/(2n\log(kn^{10})^2)$ uniformly in $k, m, v$ and $\lambda\geq 1$. It follows that the probability that $v$ is visited by the the walk $(kn^{10})^{1/2}$ times without a visit to the root is at most \[\left(1-\frac{1}{2n\log(kn^{10})^2}\right)^{\sqrt{kn^{10}}}.\] It follows that for $n$ suitably large (independently of $k\geq 1$) \begin{align*} \mathbb{E}\left[P^{\mathbf{T}^*}_\lambda(\mathcal{A}_{2,k,n})\right] & \leq \mathbb{E}\left[P^{\mathbf{T}^*}_\lambda(\mathcal{A}_{1,k,n})\right] +kn^{10}\left(1-\frac{1}{2n\log(kn^{10})^2}\right)^{\sqrt{kn^{10}}} \\ & \leq 2e^{-c\log(n^{10})^2}e^{-c\log(k)^2}. \end{align*} On the event $\mathcal{A}_{2,k,n}^c\cap\{\sigma_e>kn^{10}\}$ there are at least $k^{1/2}n^3$ vertices which are visited by the walk before time $kn^{10}$ with at least time $n^2$ between the first hitting times. Write $\psi_1:=\min\{m>0:Z_l\neq Z_m \forall l<m\}$ and, for $i\geq 2$, \[\psi_i:=\min\{m>\psi_{i-1}+n^2:Z_l\neq Z_m \forall l<m\}.\] Then, let \[\mathcal{G}_j=\bigcap_{i=1}^j\left\{\max_{m\leq n^2}|d(Z_{\psi_i})-d(Z_{\psi_i+m})|<n\right\}.\] We have that \begin{align}\label{e:awy} &\mathbb{E}\left[P^{\mathbf{T}^*}_\lambda(\Delta_n>kn^{10}, \sigma_e>kn^{10},\mathcal{A}_{2,k,n}^c)\right] \\ & \leq \mathbb{E}\left[P^{\mathbf{T}^*}_\lambda\left(\bigcap_{i=1}^{k^{1/2}n^3}\left\{\max_{m\leq n^2}|d(Z_{\psi_i})-d(Z_{\psi_i+m})|<n\right\}\right)\right]\notag \\ & = \prod_{i=1}^{k^{1/2}n^3} \mathbb{E}\left[P^{\mathbf{T}^*}_\lambda\left(\max_{m\leq n^2}|d(Z_{\psi_i})-d(Z_{\psi_i+m})|<n\big|\mathcal{G}_{i-1}\right)\right] \notag\\ &= \prod_{i=1}^{k^{1/2}n^3} \left(1-\mathbb{E}\left[P^{\mathbf{T}^*}_\lambda\left(\max_{m\leq n^2}|d(Z_{\psi_i})-d(Z_{\psi_i+m})|\geq n\big|\mathcal{G}_{i-1}\right)\right]\right).\notag \end{align} If the walk regenerates at time $\psi_i$ then $(Z_m)_{m\geq \psi_i}$ is independent of $\mathcal{G}_{i-1}$ (conditionally on $Z_{\psi_i}$) therefore \eqref{e:awy} is bounded above by \begin{align*} & \prod_{i=1}^{k^{1/2}n^3} \left(1-\mathbb{E}\left[P^{\mathbf{T}^*}_\lambda\left(\max_{m\leq n^2}|d(Z_{\psi_i})-d(Z_{\psi_i+m})|\geq n, d(Z_m)\geq d(Z_{\psi_i}) \forall m\geq \psi_i\right)\right]\right) \\ &= \prod_{i=1}^{k^{1/2}n^3} \left(1-\mathbb{E}\left[P^{\mathbf{T}^*}_\lambda\left(\Delta_n<n^2, \sigma_{e^*}=\infty\right)\right]\right) \\ & = \prod_{i=1}^{k^{1/2}n^3} \left(1-\mathbb{E}\left[P^{\mathbf{T}^*}_\lambda\left(\sigma_e=\infty\right)\right]P_{\lambda}^{\tt NB}\left(\Delta_n<n^2\right)\right). \end{align*} We have seen that $\mathit{P}_\lambda\left(\sigma_e=\infty\right)$ is bounded away from $0$ for $\lambda \in[1,b]$ therefore it remains to show that, for $n$ large, $P_{\lambda}^{\tt NB}\left(\Delta_n<n^2\right)$ is bounded away from $0$ uniformly in $\lambda\in[1,b]$. By Markov's inequality \begin{align*} P_{\lambda}^{\tt NB}\left(\Delta_n\geq n^2\right) \leq \frac{E^{\tt NB}_\lambda[\Delta_n]}{n^2} \leq \frac{E^{\tt NB}_\lambda[\tau_1]}{n} \leq \frac{E^{\tt NB}_\lambda[d(Z_{\tau_1})]}{\upsilon_\lambda n} \end{align*} where we have used that there are at most $n$ regenerations up to level $n$ and the formula of the speed \eqref{speed}. By Lemmas \ref{l:SpBnd} and \ref{l:empMom} we then have that this converges to $0$ (uniformly in $\lambda$) as $n \rightarrow \infty$ which completes the proof. \end{proof} We now prove the following lemma which claims stronger estimates than Proposition \ref{est:sigma} under the assumption that $p_0=0$. \begin{lem}\label{lem:est:sigma} Suppose that $p_0=0$ and that $\sum_{k\geq1}p_k\beta^k<\infty$ for some $\beta>1$. Then for any $\lambda\in(0,\mu)$ and any $\in\mathbb{N}$, we have \[\mathbb{E}\left[E^{\mathbf{T}^*}_{\lambda}\left[(\sigma_{e^*})^l\mathbf{1}_{\{\sigma_{e^*}<\infty\}}\right]\right]<\infty.\] \end{lem} \begin{proof} On the event $\{\sigma_{e^*}<\infty\}$, we obviously have that \begin{align}\label{est:sig-tau} \sigma_{e^*}\leq\tau_1\ \ P_{\lambda}\mathchar`-a.s. \end{align} The estimate \eqref{est:sig-tau} together with Lemma 5.1 in \cite{depeze96} implies the result for $0<\lambda<1$. The case $\lambda=1$ can be shown by using \eqref{est:sig-tau} and Theorem 2 in \cite{piau}. We will show the claim for $1<\lambda<\mu$. Notice that $\sigma_{e^*}=\sigma_{e^*}\wedge \tau_1$ almost surely on the event $\{\sigma_{e^*}<\infty\}$. Therefore similarly to \eqref{est:long}, we obtain that \begin{align*} &\mathbb{E}\left[E^{\mathbf{T}^*}_{\lambda}\left[(\sigma_{e^*})^l\mathbf{1}_{\{\sigma_{e^*}<\infty\}}\right]\right]\notag\\ & = \sum_{n=1}^\infty \mathbb{E}\left[E^{\mathbf{T}^*}_{\lambda}\left[(\sigma_{e^*}\wedge \tau_1)^l;\ \sigma_{e^*}<\infty,d(Z_{\tau_1})=n\right]\right] \notag\\ & \leq \mathbb{E}\!\left[E^{\mathbf{T}^*}_{\lambda}\!\left[e^{^{\text{th}}eta d(Z_{\tau_1})}\right]\right]\\ & \qquad\cdot\sum_{n=1}^\infty e^{-^{\text{th}}eta n}n^{10k}\left(\sum_{k=0}^\infty (k+1)^{2l} \mathbb{E}\!\left[P^{\mathbf{T}^*}_{\lambda}\!\left(kn^{10}<\sigma_{e^*}<\infty,\ kn^{10}<\Delta_n\right)\right] \!\right)^{1/2}\!. \end{align*} This follows similarly to \eqref{est:suff}. \end{proof} We conclude this section by proving Proposition \ref{p:esc-conti}. For this, we first show the following lemma. \begin{lem}\label{l:Betn}Suppose that $p_0=0$. For any $[a,b]\subset (0,\mu)$ \[\lim_{n\rightarrow\infty}\sup_{\lambda\in[a,b]}\mathbb{E}\left[\mathit{P}^{\mathbf{T}^*}_\lambda(n<\sigma_{e^*}<\infty)\right]=0.\] \end{lem} \begin{proof} Let $s_n\rightarrow \infty$ be an increasing sequence that we shall specify later and $^{\text{th}}eta>0$ be as in Lemma \ref{l:empMom}. We now split into the cases where $d(Z_{\tau_1})<s_n$ and $d(Z_{\tau_1})\geq s_n$. First, for $d(Z_{\tau_1})\geq s_n$, by Markov's inequality \begin{flalign*} &\lim_{n\rightarrow\infty}\sup_{\lambda\in[a,b]}\mathbb{E}\left[\mathit{P}^{\mathbf{T}^*}_\lambda(n<\sigma_{e^*}<\infty,d(Z_{\tau_1})\geq s_n)\right]\\ & \leq \lim_{n\rightarrow\infty}\sup_{\lambda\in[a,b]}\mathbb{E}\left[\mathit{P}^{\mathbf{T}^*}_\lambda(d(Z_{\tau_1})\geq s_n)\right] \\ & = \lim_{n\rightarrow\infty}\sup_{\lambda\in[a,b]}\mathit{P}_\lambda(d(Z_{\tau_1})\geq s_n) \\ & \leq \lim_{n\rightarrow\infty}\sup_{\lambda\in[a,b]}\mathit{E}_\lambda[e^{^{\text{th}}eta d(Z_{\tau_1})}]e^{-^{\text{th}}eta s_n} \end{flalign*} which converges to $0$ as $n\rightarrow \infty$ by Lemma \ref{l:empMom}. For $d(Z_{\tau_1})<s_n$ we note that \[\{n<\sigma_{e^*}<\infty, \; d(Z_{\tau_1})<s_n\}\subset\{\max_{m\leq n}d(Z_m)<s_n\}\] since once the walk reaches level $s_n$ it cannot return to $e^*$ on the event $\{d(Z_{\tau_1})<s_n\}$. With a slight abuse of notation, let $\mathbb{Z}^+$ denote the tree which is isomorphic to $\mathbb{Z}^+$. By comparison with a simple random walk on $\mathbb{Z}^+$ we have that \begin{flalign*} &\lim_{n\rightarrow\infty}\sup_{\lambda\in[a,b]}\mathbb{E}\left[\mathit{P}^{\mathbf{T}^*}_\lambda(n<\sigma_{e^*}<\infty,d(Z_{\tau_1})< s_n)\right]\\ & \leq \lim_{n\rightarrow\infty}\sup_{\lambda\in[a,b]}\mathit{P}_\lambda\left(\max_{m\leq n}d(Z_m)<s_n\right) \\ & \leq \lim_{n\rightarrow\infty}\mathit{P}_\mu^{\mathbb{Z}^+}\left(\max_{m\leq n}d(Z_m)<s_n\right) \\ & = \lim_{n\rightarrow\infty}\mathit{P}_\mu^{\mathbb{Z}^+}\left(\kappa(s_n)>n\right) \end{flalign*} where $\kappa(l):=\inf\{n\geq 0: d(Z_n)=l\}$ is the first hitting time of level $l$. A simple calculation shows that $\mathit{E}_\mu^{\mathbb{Z}^+}[\kappa(l)]\leq C_\mu \mu^{l}$. Therefore, choosing $s_n=\log(n)/\log(\mu^2)$ and using Markov's inequality we have that $\mathit{P}_\mu^{\mathbb{Z}^+}\left(\kappa(s_n)>n\right)\leq C_\mu n^{-1/2}$ and therefore \[\lim_{n\rightarrow\infty}\sup_{\lambda\in[a,b]}\mathbb{E}\left[\mathit{P}^{\mathbf{T}^*}_\lambda(n<\sigma_{e^*}<\infty,d(Z_{\tau_1})< s_n)\right]=0.\] \end{proof} We are now ready to prove that the escape probability $\mathbb{E}[P_{\lambda}^{\mathbf{T}^*}(\sigma_{e^*(\mathbf{T})}<\infty)]$ is a continuous function of the bias. \begin{proof}[Proof of Proposition \ref{p:esc-conti}] First note that we can assume $p_0=0$ without loss of generality since $P_{\lambda}^{\mathbf{T}^*}\left(\sigma_{e^*(\mathbf{T})}=\infty\right)=P_{\lambda}^{\mathbf{T}^*_g}\left(\sigma_{e^*(\mathbf{T}_g)}=\infty\right)$ $\mathbb{P}$-a.s.\ where we recall that $\mathbf{T}^*_g$ is the backbone of $\mathbf{T}^*$. Note that for any tree $\mathscr{T}$ and any $n\in\mathbb{N}$, the function $\lambda\mapsto P_{\lambda}^{\mathscr{T}}(\sigma_{e^*}<n)$ is continuous since $P_{\lambda}^{\mathscr{T}}(\sigma_{e^*}<n)$ only depends on the first $n$ steps of $(Z_n)$. We now show that for any $n\in\mathbb{N}$, \begin{align}\label{eq:conti-n} {\rm the\ function}\ \lambda\mapsto \mathbb{E}\left[P_{\lambda}^{\mathbf{T}^*}(\sigma_{e^*}<n)\right]\ {\rm is\ continuous.} \end{align} For a tree $\mathscr{T}$ and $n\in\mathbb{N}$ we write $\mathscr{T}[n]$ for the truncated tree up to $n^{\text{th}}$ generation, and define \[\mathcal{B}_{n,m}:=\bigcap_{k=1}^n\{\mathscr{T};\ {\rm every\ vertex\ of\ \mathscr{T}[{\it n}]\ in\ {\it k}^{\text{th}}\ generation\ has\ at\ most\ {\it m}\ children}\}.\] Noticing that $P_{\lambda}^{\mathscr{T}}(\sigma_{e^*}<n)$ only depends on $\mathscr{T}[n]$ and that $\{\mathscr{T}[n];\ \mathscr{T}\in\mathcal{B}_{n,m}\}$ is a finite set, we obtain that the function $\lambda\mapsto \mathbb{E}\left[P_{\lambda}^{\mathbf{T}^*}\left(\sigma_{e^*}<n\right)\mathbf{1}_{\mathcal{B}_{n,m}}\right]$ is a continuous function. Now the claim \eqref{eq:conti-n} follows since $\mathbb{P}(\mathcal{B}_{n,m}^c)$ is independent of $\lambda$ and converges to $0$ as $m\to\infty$ for any $n$. In order to deduce the conclusion from \eqref{eq:conti-n}, it suffices to prove that $$\mathbb{E}\left[P_{\lambda}^{\mathbf{T}^*}\left(n<\sigma_{e^*}<\infty\right)\right]$$ is uniformly convergent to $0$ in $(\lambda_c,\mu)$ as $n\to\infty$. This immediately follows from Lemma \ref{l:Betn}. \end{proof} \if0 First note that \[\sup_{\lambda \leq b}\mathit{P}_{\lambda}(\sigma_e=\infty)\geq \mathit{P}_{b}(\sigma_e=\infty)>0\] therefore, since \[\mathit{E}_{\lambda}[(\tau_2-\tau_1)^u]=\mathit{E}_{\lambda}[\tau_1^u|\sigma_e=\infty] =\frac{\mathit{E}_{\lambda}[\tau_1^u\mathbf{1}_{\{\sigma_e=\infty\}}]}{\mathit{P}_\lambda(\sigma_e=\infty)}\] it suffices to consider $\sup_{\lambda \in[1-\varepsilon,b]}\mathit{E}_{\lambda}[\tau_1^u\mathbf{1}_{\{\sigma_e=\infty\}}]$. Using the uniform exponential moment bound \eqref{e:empMom}, the Cauchy-Schwarz inequality and integration by parts we have \begin{align*} \mathit{E}_{\lambda}[\tau_1^u\mathbf{1}_{\{\sigma_e=\infty\}}] & = \sum_{n=1}^\infty \mathit{E}_{\lambda}[\tau_1^u;\sigma_e=\infty,d(Z_{\tau_1})=n] \\ & = \sum_{n=1}^\infty \mathit{E}_{\lambda}[\Delta_n^u;\sigma_e=\infty,d(Z_{\tau_1})=n] \\ & = \sum_{n=1}^\infty \mathit{E}_{\lambda}[\Delta_n^{2u};\sigma_e=\infty]^{1/2}\mathit{P}_\lambda(d(Z_{\tau_1})=n)^{1/2} \\ & \leq \mathit{E}_\lambda[e^{^{\text{th}}eta d(Z_{\tau_1})}]\sum_{n=1}^\infty e^{-^{\text{th}}eta n} \mathit{E}_{\lambda}[\Delta_n^{2u};\sigma_e=\infty]^{1/2} \\ & \leq \mathit{E}_\lambda[e^{^{\text{th}}eta d(Z_{\tau_1})}]\sum_{n=1}^\infty e^{-^{\text{th}}eta n}n^{10u} \sum_{k=0}^\infty (k+1)^{2u} \mathit{P}_{\lambda}(\Delta_n>kn^{10}, \sigma_e=\infty). \end{align*} By Lemma \ref{l:Gir} we have that for $\varepsilon>0$ suitably small \begin{align*} \sup_{h\in(0,\varepsilon)}\mathit{P}_{1-h}(\Delta_n>kn^{10}, \sigma_e=\infty) & \leq\sup_{h\in(0,\varepsilon)}\mathit{P}_{1-h}(\Delta_n>kn^{10}, \sigma_e>kn^{10})\\ & \leq e^{\varepsilon n}\mathit{P}_{1}(\Delta_n>kn^{10}, \sigma_e>kn^{10}). \end{align*} Choosing $\varepsilon<^{\text{th}}eta/2$ and using \eqref{e:empMom}, it suffices to show that \[\sup_{\lambda\in[1,b]}\sum_{n=1}^\infty e^{-^{\text{th}}eta n/2}n^{10u}\sum_{k=0}^\infty (k+1)^{2u} \mathit{P}_{\lambda}(\Delta_n>kn^{10}, \sigma_e>kn^{10})<\infty.\] For $k\geq 1$, let \[\mathcal{A}_{1,k,n}:=\bigcup_{m\leq kn^{10}}\{|\nu(Z_m)|\geq \log(kn^{10})^2\}\] be the event that the walk visits a vertex with at least $\log(kn^{10})^2$ offspring by time $kn^{10}$. By the exponential moments assumption we have that for all $n$ large \[\mathit{P}_\lambda(\mathcal{A}_{1,k,n})\leq kn^{10}\mathbb{P}(|\nu(e)|\geq \log(kn^{10})^2)\leq e^{-c\log(n^{10})^2}e^{-c\log(k)^2}\] for some constant $c$ depending only on $\beta$. Let $N_{k,n}:=|\{m\leq kn^{10}: Z_l\neq Z_m \forall l<m\}|$ be the number of distinct vertices visited by time $kn^{10}$. Set \[\mathcal{A}_{2,k,n}:=\left\{N_{k,n}<\sqrt{kn^{10}}\right\}\cap\left\{\sigma_e>kn^{10}\right\}\] to be the event that, up to time $kn^{10}$, the walk visits at most $(kn^{10})^{1/2}$ distinct vertices and does not return to the root. On the event $\mathcal{A}_{2,k,n}\cap \mathcal{A}_{1,k,n}^c$ there is a time $m\leq kn^{10}$ and a vertex $v$ with degree at most $\log(kn^{10})^2$ such that $Z_m=v$ and $v$ is subsequently visited at least $(kn^{10})^{1/2}$ times without a visit to the root. By the Gambler's ruin, for a walk started at $v$ of distance at most $n$ from the root, the probability that the walk returns to $v$ before reaching the root is at most $1-1/(\textcolor{red}{2}n\log(kn^{10})^2)$ uniformly in $k, m, v$ and $\lambda\geq 1$. It follows that the probability that $v$ is visited by the the walk $(kn^{10})^{1/2}$ times without a visit to the root is at most \[\left(1-\frac{1}{\textcolor{red}{2}n\log(kn^{10})^2}\right)^{\sqrt{kn^{10}}}.\] \begin{comYuki} This is not important at all but when $\lambda=1$, \[\frac{\lambda}{\lambda+\log(kn^{10})^2}\leq \frac{1}{\log(kn^{10})^2}\ \ {\rm though}\ \ \frac{\lambda}{\lambda+\log(kn^{10})^2}\geq \frac{1}{2\log(kn^{10})^2}.\] \end{comYuki} It follows that for $n$ suitably large (independently of $k\geq 1$) \begin{align*} \mathit{P}_\lambda(\mathcal{A}_{2,k,n}) & \leq \mathit{P}_\lambda(\mathcal{A}_{1,k,n}) +kn^{10}\left(1-\frac{1}{\textcolor{red}{2}n\log(kn^{10})^2}\right)^{\sqrt{kn^{10}}} \\ & \leq 2e^{-c\log(n^{10})^2}e^{-c\log(k)^2}. \end{align*} On the event $\mathcal{A}_{2,k,n}^c\cap\{\sigma_e>kn^{10}\}$ there are at least $k^{1/2}n^3$ vertices which are visited by the walk before time $kn^{10}$ with at least time $n^2$ between the first hitting times. Write $\psi_1:=\min\{m>0:Z_l\neq Z_m \forall l<m\}$ and, for $i\geq 2$, \[\psi_i:=\min\{m>\psi_{i-1}+n^2:Z_l\neq Z_m \forall l<m\}.\] Let $\mathcal{G}_m$ be the $\sigma$-algebra generated by the walk up to time $m$. We have that \begin{align*} &\mathit{P}_\lambda(\Delta_n>kn^{10}, \sigma_e>kn^{10},\mathcal{A}_{2,k,n}^c) \\ & \leq \mathit{P}_\lambda\left(\bigcap_{i=1}^{k^{1/2}n^3}\left\{\max_{m\leq n^2}|d(Z_{\psi_i})-d(Z_{\psi_i+m})|<n\right\}\right) \\ & \leq \prod_{i=1}^{k^{1/2}n^3} \mathit{P}_\lambda\left(\max_{m\leq n^2}|d(Z_{\psi_i})-d(Z_{\psi_i+m})|<n|\mathcal{G}_{\psi_i}\right) \\ &= \prod_{i=1}^{k^{1/2}n^3} \left(1-\mathit{P}_\lambda\left(\max_{m\leq n^2}|d(Z_{\psi_i})-d(Z_{\psi_i+m})|\geq n|\mathcal{G}_{\psi_i}\right)\right) \\ & \leq \prod_{i=1}^{k^{1/2}n^3} \left(1-\mathit{P}_\lambda\left(\max_{m\leq n^2}|d(Z_{\psi_i})-d(Z_{\psi_i+m})|\geq n, d(Z_m)\geq d(Z_{\psi_i}) \forall m\geq \psi_i\right)\right) \\ &= \prod_{i=1}^{k^{1/2}n^3} \left(1-\mathit{P}_\lambda\left(\Delta_n<n^2, \sigma_e=\infty\right)\right) \\ & = \prod_{i=1}^{k^{1/2}n^3} \left(1-\mathit{P}_\lambda\left(\sigma_e=\infty\right)\mathit{P}_\lambda\left(\Delta_n<n^2| \sigma_e=\infty\right)\right) \end{align*} where the final inequality follows from the fact that if the walk regenerates at time $\psi_i$ then $(Z_m)_{m\geq \psi_i}$ is independent of $\mathcal{G}_{\psi_i}$ (conditionally on $Z_{\psi_i}$). We have seen that $\mathit{P}_\lambda\left(\sigma_e=\infty\right)$ is bounded away from $0$ for $\lambda \in[1,b]$ therefore it remains to show that, for $n$ large, $\mathit{P}_\lambda\left(\Delta_n<n^2| \sigma_e=\infty\right)$ is bounded away from $0$ uniformly in $\lambda\in[1,b]$.By Markov's inequality \begin{align*} \mathit{P}_\lambda\left(\Delta_n\geq n^2| \sigma_e=\infty\right) & \leq \frac{\mathit{E}_\lambda[\Delta_n|\sigma_e=\infty]}{n^2}\\ & \leq \frac{\mathit{E}_\lambda[\tau_2-\tau_1]}{n}\\ & \leq \frac{\mathit{E}_\lambda[d(Z_{\tau_2})-d(Z_{\tau_1})]}{\upsilon_\lambda n} \end{align*} where we have used that there are at most $n$ regenerations up to level $n$ and the formula of the speed \eqref{speed}. By Lemma \ref{l:SpBnd} and \eqref{e:empMom} we then have that this converges to $0$ (uniformly in $\lambda$) as $n \rightarrow \infty$ which completes the proof. \fi \section{Moments of generation sizes of Galton-Watson trees}\label{s:mom} In this section we prove several technical estimates for subcritical GW-trees which we will require later when showing moment bounds for the time between regenerations of the walk. For this section, we take $W_n$ to be a GW-process with mean number of offspring $\mu:=\mathbb{E}[W_1]<1$ which will typically be applied as $\lambda_c$ in Section \ref{s:reg}. The following lemma gives a bound on the moments of generation sizes of GW-processes. The main purpose of this lemma is to prove Lemma \ref{l:GWProd}. \begin{lem}\label{l:GWMom} Suppose $W_n$ is a GW-process with mean number of offspring $\mu:=\mathbb{E}[W_1]<1$ and which satisfies $\mathbb{E}[\beta^{W_1}]<\infty$ for some $\beta>1$. Then, for any $m\in\mathbb{N}$ there exists $C_m<\infty$ such that $\mathbb{E}[W_n^m]\leq C_m\mu^n$. \end{lem} \begin{proof} We prove this inductively in $m$. The case $m=1$ holds with $\mathbb{E}[W_n]=\mu^n$ (cf.\ Chapter I.2 of \cite{atne04}). Suppose that for some $m\geq 2$ there exist $C_j<\infty$ for $j=1,...,m-1$ such that $\mathbb{E}[W_n^j]\leq C_j\mu^n$. Let $W_n^{(1)}, W_n^{(2)},...$ be independent copies of $W_n$ then, using the branching property, \begin{flalign*} \mathbb{E}[W_{n+1}^m] = \mathbb{E}[\mathbb{E}[W_{n+1}^m|W_1]] = \mathbb{E}\left[\mathbb{E}\left[\left(\sum_{k=1}^{W_1}W_n^{(k)}\right)^m\big|W_1\right]\right]. \end{flalign*} For $l,m,N\in\mathbb{N}$ let $\mathcal{I}_l^m(N):=\{{\mathbf{k}}=(k_1,...,k_m)\in\{1,...,N\}^m:\sum_{j=1}^N\mathbf{1}_{\bigcup_{i=1}^m\{k_i=j\}}=l\}$ be the $m$-tuples of positive integers at most $N$ with exactly $l$ distinct values. Expanding the term in the above expression and using that $W_n^{(k)}$ are independent of $W_1$ we have that \begin{flalign}\label{e:lsum} \mathbb{E}[W_{n+1}^m] &= \mathbb{E}\left[\mathbb{E}\left[\sum_{\mathbf{k}\in\{1,...,W_1\}^m}\prod_{i=1}^{m}W_n^{(k_i)}\big|W_1\right]\right] \notag \\ &= \sum_{l=1}^m\mathbb{E}\left[\sum_{\mathbf{k}\in\mathcal{I}_l^m(W_1)}\mathbb{E}\left[\prod_{i=1}^{m}W_n^{(k_i)}\right]\right]. \end{flalign} If $\mathbf{k}\in\mathcal{I}_1^m(W_1)$ then $k_i=k_1$ for all $i$ and, since there are $W_1$ choices of $k_1$, we have \begin{flalign}\label{e:le1} \mathbb{E}\left[\sum_{\mathbf{k}\in\mathcal{I}_1^m(W_1)}\mathbb{E}\left[\prod_{i=1}^{m}W_n^{(k_i)}\right]\right] = \mathbb{E}\left[\sum_{\mathbf{k}\in\mathcal{I}_1^m(W_1)}\mathbb{E}[(W_n^{(k_1)})^m]\right] = \mathbb{E}[W_1]\mathbb{E}[W_n^m]. \end{flalign} Otherwise, using independence of $W_n^{(1)}, W_n^{(2)},...$ and our induction hypothesis that $\mathbb{E}[W_n^j]\leq C_j\mu^n$ for $j\leq m-1$, for $k\in\mathcal{I}_l^m(W_1)$ we have \begin{flalign*} \mathbb{E}\left[\prod_{i=1}^{m}W_n^{(k_i)}\right]\leq \mu^{nl}\left(\max_{j\leq m-1}C_j\right)^l. \end{flalign*} There are $\prod_{j=0}^{l-1}(W_1-j)$ choices for the $l$ distinct values in $\{1,...,W_1\}$ then $l^{m-l}$ choices for the remaining $m-l$ duplicates and at most $m!$ orderings of the indices. In particular, for $l\geq 2$, \begin{flalign}\label{e:lg2} \mathbb{E}\left[\sum_{\mathbf{k}\in\mathcal{I}_l^m(W_1)}\mathbb{E}\left[\prod_{i=1}^{m}W_n^{(k_i)}\right]\right] \leq \mu^{nl}(\max_{j\leq m-1}C_j)^ll^{m-l}m!\mathbb{E}\left[\prod_{j=0}^{l-1}(W_1-j)\right]. \end{flalign} By the exponential moment assumption we have that $\mathbb{E}[\prod_{j=0}^{l-1}(W_1-j)]\leq \mathbb{E}[W_1^l]<\infty$. Combining \eqref{e:lsum}, \eqref{e:le1} and \eqref{e:lg2}, we can choose constants $M_l^m$ such that \begin{flalign*} \mathbb{E}[W_{n+1}^m] &\leq \mu\mathbb{E}[W_n^m]+\sum_{l=2}^mM_l^m\mu^{nl} \\ &\leq \mu^2\mathbb{E}[W_{n-1}^m]+\mu\sum_{l=2}^mM_l^m\mu^{(n-1)l}+\sum_{l=2}^mM_l^m\mu^{nl} \\ &= \mu^2\mathbb{E}[W_{n-1}^m]+\sum_{l=2}^mM_l^m\mu^{nl}(1+\mu^{-(l-1)}). \end{flalign*} Iterating and using the geometric sum formula yields \begin{flalign*} \mathbb{E}[W_{n+1}^m] &\leq \mu^n\mathbb{E}[W_1^m]+\sum_{l=2}^mM_l^m\mu^{nl}\sum_{k=0}^{n-1}\mu^{-k(l-1)}\\ &\leq \mu^n\mathbb{E}[W_1^m]+\sum_{l=2}^mM_l^m\mu^{nl}\frac{\mu^{-n(l-1)}-1}{\mu^{-(l-1)}-1} \end{flalign*} which is bounded above by $C_m\mu^{n+1}$ as required since $\mathbb{E}[W_1^m]<\infty$ by the exponential moments assumption. \end{proof} The corresponding lower bound holds trivially by noting that $\mathbb{P}(W_n\geq 1)\leq\mathbb{E}[W_n^m]$ for any $m\in\mathbb{N}$ and using that $\mathbb{P}(W_n\geq 1)\mu^{-n}$ is decreasing and converges (e.g.\ Theorem B in \cite{LPP1}). This shows that, up to constants, this is the best possible bound. The following result extends Lemma \ref{l:GWMom} to the expectation of products of the generation sizes at varying times. This is an extension of Lemma 2.4.1 in \cite{bo17} which proves this for $m\leq 3$. \begin{lem}\label{l:GWProd} Suppose $W_n$ is a GW-process with mean number of offspring $\mu:=\mathbb{E}[W_1]<1$ and which satisfies $\mathbb{E}[\beta^{W_1}]<\infty$ for some $\beta>1$. Then, for any $m\in\mathbb{N}$ there exists $\tilde{C}_m<\infty$ such that for any $(n_i)_{i=1}^m\in\mathbb{N}^m$ we have \[\mathbb{E}\left[\prod_{i=1}^mW_{n_i}\right]\leq \tilde{C}_m\mu^{\max_{l\leq m}n_l}.\] \end{lem} \begin{proof} Let $W_n^{(k)}$ be independent GW-processes for $k\geq 1$. Using the branching property of GW-processes and convexity of polynomials of degree $l\in\mathbb{N}$ we have \begin{flalign}\label{e:GWCond} \mathbb{E}[W_n^l|W_0=j] \; =\; \mathbb{E}\left[\left(\sum_{k=1}^jW_n^{(k)}\right)^l\right] \;\leq\; j^l\mathbb{E}\left[\sum_{k=1}^j\frac{(W_n^{(k)})^l}{j}\right] \;=\; j^l\mathbb{E}[W_n^l]. \end{flalign} Without loss of generality let $n_1\leq n_2\leq...\leq n_m$ be ordered. Noting that $W_n$ is a Markov process, by \eqref{e:GWCond} we have \begin{flalign*} \mathbb{E}\!\left[\prod_{i=1}^mW_{n_i}\right] = \mathbb{E}\!\left[\mathbb{E}\left[W_{n_m}\big|W_{n_{m-1}}\right]\prod_{i=1}^{m-1}W_{n_i}\right] \leq \mathbb{E}[W_{n_m-n_{m-1}}]\mathbb{E}\!\left[W_{n_{m-1}}^2\prod_{i=1}^{m-2}W_{n_i}\right]. \end{flalign*} Iterating and applying Lemma \ref{l:GWMom} then gives \begin{flalign*} \mathbb{E}\left[\prod_{i=1}^mW_{n_i}\right] \;\leq\; \prod_{i=1}^m\mathbb{E}[W_{n_i-n_{i-1}}^{m+1-i}] \;\leq\; \prod_{i=1}^mC_{m+1-i}\mu^{n_k-n_{i-1}} \;\leq\; \tilde{C}_m\mu^{n_m} \end{flalign*} where $\tilde{C}_m=(\max_{l\leq m}C_l)^m<\infty$. \end{proof} \section{The proof of Proposition \ref{p:UniMom}}\label{s:reg} The main aim of this section is to prove Corollary \ref{c:2pe} which states that for any closed ball $B$ contained within $(\lambda_c^{1/2},\mu)$ there exists $\varepsilon>0$ such that the time between regenerations has finite $(2+\varepsilon)^{\text{th}}$ moments uniformly over $\lambda\in B$. We deduce this from the more general result Proposition \ref{p:UniMom}. We first state the following lemma which gives a useful bound for the $\alpha^{\text{th}}$ moments of a geometric random variable. This will be used repeatedly throughout this section. \begin{lem}\label{l:GeoAlpha} For any $\alpha>0$ there exists $C_\alpha<\infty$ such that for any $p\in(0,1)$ we have \[ \sum_{k=1}^\infty k^\alpha p^k(1-p) \leq C_\alpha p(1-p)^{-\alpha}.\] \end{lem} \begin{proof} Note that if $f:\mathbb{R}\rightarrow \mathbb{R}^+$ is increasing and $g:\mathbb{R}\rightarrow \mathbb{R}^+$ is decreasing then for $x \in[k,k+1)$ we have that $f(x-1)\leq f(k)\leq f(x)$ and $g(x)\leq g(k)\leq g(x-1)$. Therefore, \begin{align*} \sum_{k=1}^\infty f(k)g(k) &=\sum_{k=1}^\infty \int_k^{k+1} f(k)g(k)\mathrm{d} x\\ & \leq \sum_{k=1}^\infty \int_k^{k+1} f(x)g(x-1)\mathrm{d} x\\ & =\int_1^\infty f(x)g(x-1)\mathrm{d} x. \end{align*} Take the specific case that $f(x)=x^\alpha$ (which is increasing since $\alpha>0$) and $g(x)=p^x$ (which is decreasing for $p\in(0,1)$). Then, for $p\in[1/2,1)$, we have that \begin{flalign*} p^{-1}(1-p)^{1+\alpha}\sum_{k=1}^\infty k^\alpha p^k & \leq p^{-2}(1-p)^{1+\alpha} \int_1^\infty x^\alpha p^x\mathrm{d} x \\ & = p^{-2}\left(\frac{1-p}{\log(p^{-1})}\right)^{1+\alpha} \int_{\log(p^{-1})}^\infty x^\alpha e^{-x}\mathrm{d} x \\ & \leq 4\Gamma(1+\alpha) \end{flalign*} since $\left(\frac{1-p}{\log(p^{-1})}\right)^{1+\alpha}\leq 1$. For $p\in(0,1/2]$ we have that \[p^{-1}(1-p)^{1+\alpha}\sum_{k=1}^\infty k^\alpha p^k \leq \sum_{k=1}^\infty k^\alpha 2^{-(k-1)}\] which converges. \end{proof} We now introduce some notation concerning hitting and regeneration times. Recall that $\sigma_x:=\inf\{n\geq 1:Z_n=x\}$ is the first return time to $x\in\mathbf{T}$. Let $S(0):=0$, $S(n):=\inf\{k> S(n-1):Z_k,Z_{k-1} \in \mathbf{T}_g\}$ for $n\geq 1$ and $Y_n:=Z_{S(n)}$, then $Y_n$ is a $\lambda$-biased random walk on $\mathbf{T}_g$ coupled to $Z_n$. Write $\zeta_0:=0$ and for $m=1,2,...$ let \[\zeta_m:=\inf\{k>\zeta_{m-1}:d(Y_j)< d(Y_k), d(Y_l)>d(Y_{k-1}) \text{ for all } j<k\leq l\} \] be regeneration times for the walk $Y$. We then have that $\tau_k=\inf\{m\geq 0: Z_m=Y_{\zeta_k}\}$ are the corresponding regeneration times for $Z$ and we define $\varrho_k:=Z_{\tau_k}=Y_{\zeta_k}$ to be the regeneration points. By Proposition 3.4 of \cite{LPP3} we have that there exists, $\mathit{P}_\lambda$-a.s., an infinite sequence of regeneration times $(\tau_k)_{k\geq 1}$ and \[\left\{\left(\tau_{k+1}-\tau_k\right),\left(d(\varrho_{k+1})-d(\varrho_k)\right)\right\}_{k\geq1}\] are \emph{i.i.d.\ } (as are the corresponding variables for $Y$). Let $\xi_f,\xi_g,\xi_h$ be random variables with probability generating functions $f,g$ and $h$ respectively then let $\xi$ be equal in distribution to the number of vertices in the first generation of $\mathbf{T}$. Throughout we will assume that $\xi_f$ has some exponential moments. \begin{rem}\label{r:mom} Since the generation sizes of $\mathbf{T}_g$ are dominated by those of $\mathbf{T}$ we have that $\xi_g$ is stochastically dominated by $\xi$. Using Bayes' law we have that $\mathbf{P}(\xi=k)= p_k(1-q^k)(1-q)^{-1} \leq cp_k$ therefore both $\xi$ and $\xi_g$ inherit the exponential moment bounds of $\xi_f$. Furthermore $\mathbf{P}(\xi_h=k)= p_kq^k$ therefore $\xi_h$ automatically has exponential moments. \end{rem} We now show that the duration of an excursion in a single trap has finite $\alpha^{\text{th}}$ moments (uniformly for the bias in a small ball). If $p_0=0$ then traps are trivial therefore assume that $p_0>0$. Denote by $\mathbf{T}_h$ a GW tree with this law and $\mathbf{T}_h^*$ the tree $\mathbf{T}_h$ where we append an additional vertex ${e^*}(\mathbf{T}_h)$ as the root in the usual way (for convenience we write ${e^*}$ when there is no confusion). Let $W_k^{\mathbf{T}_h^*}$ denote the $k^{\text{th}}$ generation size of the tree $\mathbf{T}_h^*$. We denote by $\mathit{P}_{\lambda,x}^{\mathbf{T}_h^*}$ the quenched law of the walk with bias $\lambda$ started from $x$. \begin{lem}\label{l:subComp} Suppose $p_0>0$ and $a<1$ then, for any $\alpha<\log(\lambda_c)/\log(a)$, \[\sup_{\lambda\geq a}\mathbb{E}\left[\mathit{E}^{\mathbf{T}_h^*}_{\lambda}\left[\sigma_{e^*}^\alpha\right]\right] <\infty.\] \begin{proof} Write $\underline{\alpha}:=\max\{k\in\mathbb{Z}: k<\alpha\}$. Throughout we will use that for $N\in\mathbb{N}$ and $x_n\in\mathbb{R}_+$ for $n=1,...,N$ we have \begin{flalign}\label{e:Con} \left(\sum_{n=1}^Nx_n\right)^\alpha\leq N^{\underline{\alpha}}\sum_{n=1}^Nx_n^\alpha \end{flalign} which follows from convexity for $\alpha\geq 1$ and the bound $||\cdot||_{1/\alpha}\leq ||\cdot||_1$ for $l^p$ norms with $\alpha<1$. We can write \[\sigma_{e^*}=\sum_{x \in \mathbf{T}_h^*}v_x \qquad \text{where} \qquad v_x=\sum_{k=0}^{\sigma_{e^*}-1}\mathbf{1}_{\{Z_k=x\}}\] is the number of visits to $x$ before returning to ${e^*}$. By \eqref{e:Con} it then follows that \begin{flalign}\label{e:Jen2} \mathbb{E}\left[\mathit{E}^{\mathbf{T}_h^*}_{\lambda}\left[\sigma_{e^*}^\alpha\right]\right] \;=\; \mathbb{E}\left[\mathit{E}^{\mathbf{T}_h^*}_{\lambda}\left[\left(\sum_{x \in \mathbf{T}_h^*}v_x\right)^\alpha\right]\right] \;\leq\; \mathbb{E}\left[d(\mathbf{T}_h^*)^{\underline{\alpha}}\sum_{x \in \mathbf{T}_h^*}\mathit{E}^{\mathbf{T}_h^*}_{\lambda}\left[v_x^\alpha\right]\right] \end{flalign} where, using a decomposition up to the first hitting time of $x$ we have that \[\mathit{E}^{\mathbf{T}_h^*}_{\lambda,{e^*}}\left[v_x^\alpha\right]=\mathit{P}^{\mathbf{T}_h^*}_{\lambda,{e^*}}\left(\sigma_x<\sigma_{e^*}\right)\mathit{E}^{\mathbf{T}_h^*}_{\lambda,x}\left[v_x^\alpha\right]\leq \mathit{E}^{\mathbf{T}_h^*}_{\lambda,x}\left[v_x^\alpha\right].\] Started from $x$, for the walk to reach to $e^*$ before returning to $x$, the walk must initially move to $\pi(x)$. It follows that the number of visits to $x$ before reaching $e^*$ is geometrically distributed with termination probability \begin{flalign}\label{e:GR} \mathit{P}^{\mathbf{T}_h^*}_{\lambda,x}(\sigma_{e^*}<\sigma_x)=\frac{\lambda}{\lambda+\nu(x)}\cdot \mathit{P}^{\mathbf{T}_h^*}_{\lambda,\pi(x)}(\sigma_{e^*}<\sigma_x) \end{flalign} where $\mathit{P}^{\mathbf{T}_h^*}_{\lambda,\pi(x)}(\sigma_{e^*}<\sigma_x)$ depends only on $\lambda$ and the distance between $e^*$ and $x$. By Lemma \ref{l:GeoAlpha} we have that, for some constant $C_{\alpha}$, \begin{flalign}\label{e:Drp} \mathit{E}^{\mathbf{T}_h^*}_{\lambda,x}\left[v_x^\alpha\right] \leq C_{\alpha}\mathit{P}^{\mathbf{T}_h^*}_{\lambda,x}(\sigma_{e^*}<\sigma_x)^{-\alpha}\mathit{P}^{\mathbf{T}_h^*}_{\lambda,x}(\sigma_x<\sigma_{e^*})\leq C_{\alpha}\mathit{P}^{\mathbf{T}_h^*}_{\lambda,x}(\sigma_{e^*}<\sigma_x)^{-\alpha}. \end{flalign} For $r\in\mathbb{N}$ write \[\mathcal{R}(\lambda,\alpha,r)=\begin{cases} r^\alpha & \text{if } \lambda=1,\\ \lambda^{-r\alpha} & \text{if } \lambda < 1,\\ 1 & \text{if } \lambda > 1,\end{cases}\] then, by the Gambler's ruin and \eqref{e:GR}, we have that \[\mathit{P}^{\mathbf{T}_h^*}_{\lambda,x}(\sigma_{e^*}<\sigma_x)^{-\alpha}\leq (1+\lambda^{-1}\nu(x))^\alpha\mathcal{R}(\lambda,\alpha,d^*(x))\] where $d^*(x)$ denotes the distance between $x\in\mathbf{T}_h^*$ and the root $e^*$. Substituting this with \eqref{e:Drp} into \eqref{e:Jen2} and using \eqref{e:Con} we have that \begin{flalign*} \mathbb{E}\left[\mathit{E}^{\mathbf{T}_h^*}_{\lambda,e^*}\left[\sigma_{e^*}^\alpha\right]\right] &\leq C_{\alpha}\mathbb{E}\left[d(\mathbf{T}_h^*)^{\underline{\alpha}}\sum_{x \in \mathbf{T}_h^*}(1+\lambda^{-1}\nu(x))^{\alpha}\mathcal{R}(\lambda,\alpha,d^*(x))\right]\\ &\leq \tilde{C}_{\alpha}\mathbb{E}\left[d(\mathbf{T}_h^*)^{\underline{\alpha}}\sum_{x \in \mathbf{T}_h^*}(1+\lambda^{-\alpha}\nu(x)^{\alpha})\mathcal{R}(\lambda,\alpha,d^*(x))\right]\\ &\leq \tilde{C}_{\alpha}\mathbb{E}\left[d(\mathbf{T}_h^*)^{\underline{\alpha}}\sum_{k=0}^\infty W_k^{\mathbf{T}_h^*}\left(1+\lambda^{-\alpha}\left(W_{k+1}^{\mathbf{T}_h^*}\right)^{\underline{\alpha}+1}\right)\mathcal{R}(\lambda,\alpha,k)\right] \end{flalign*} where, for the final inequality, we have replaced the sum over vertices in the tree with a sum over the generations and bounded the number of children of a vertex in generation $k$ with the total number of vertices in generation $k+1$. Since $W_1^{\mathbf{T}_h^*}=1=W_0^{\mathbf{T}_h}$ and $W_{k+1}^{\mathbf{T}_h^*}=W_k^{\mathbf{T}_h}$ for $k\geq 1$, we have that $1+\lambda^{-\alpha}\left(W_{k+1}^{\mathbf{T}_h^*}\right)^{\underline{\alpha}+1}\leq C\sum_{j=0}^\infty \left(W_j^{\mathbf{T}_h}\right)^{\underline{\alpha}+1}$ for any $k\geq 1$ and a constant $C\leq 1+a^{-\alpha}$. The process $W_n^{\mathbf{T}_h}$ is a GW-process with offspring distribution $\xi_h$ which has mean $\lambda_c$ and exponential moments. It therefore follows from Lemma \ref{l:GWProd} that \begin{flalign} \mathbb{E}\left[\mathit{E}^{\mathbf{T}_h^*}_{\lambda,e^*}\left[\sigma_{e^*}^\alpha\right]\right] &\leq C_{\alpha}\mathbb{E}\left[\sum_{k_1=0}^\infty\mathrm{d}ots\sum_{k_{2\underline{\alpha}+2}=0}^\infty \mathcal{R}(\lambda,\alpha,k_1)\prod_{j=1}^{2\underline{\alpha}+2} W_{k_j}^{\mathbf{T}_h^*}\right] \notag\\ &\leq C_{\alpha}\sum_{k_1=0}^\infty\mathrm{d}ots\sum_{k_{2\underline{\alpha}+2}=0}^\infty \mathcal{R}(\lambda,\alpha,k_1)\mathbb{E}\left[\prod_{j=1}^{2\underline{\alpha}+2} W_{k_j}^{\mathbf{T}_h^*}\right] \notag\\ &\leq \tilde{C}_{\alpha}\sum_{k_1=0}^\infty\mathrm{d}ots\sum_{k_{2\underline{\alpha}+2}=0}^\infty \mathcal{R}(\lambda,\alpha,k_1)\lambda_c^{\max_{j\leq 2\underline{\alpha}+2}k_j}. \label{e:GWSum} \end{flalign} Taking first those terms in \eqref{e:GWSum} where $k_1\geq k_j$ for all $j$, we have \begin{flalign*} &\sum_{k_1=0}^\infty\mathrm{d}ots\sum_{k_{2\underline{\alpha}+2}=0}^\infty \mathbf{1}_{\{k_1=\max_{j\leq 2\underline{\alpha}+2}k_j\}}\mathcal{R}(\lambda,\alpha,k_1)\lambda_c^{k_1} \\ & \leq (2\underline{\alpha}+1)\sum_{k_1=0}^\infty (k_1+1)\mathcal{R}(\lambda,\alpha,k_1)\lambda_c^{k_1} \end{flalign*} which is bounded above uniformly over $\lambda\geq a$ since $a^{-\alpha}\lambda_c<1$ by our choice of $\alpha$. Next, writing $m=\max_{j=2,...,2\underline{\alpha}+2}k_j$, taking the remaining terms in \eqref{e:GWSum} and noting that \[\sum_{k_1=0}^{m-1}\mathcal{R}(\lambda,\alpha,k_1) \leq m^2a^{-m}\] we have \begin{flalign*} \sum_{k_1=0}^\infty\mathrm{d}ots\sum_{k_{2\underline{\alpha}+2}=0}^\infty \mathbf{1}_{\{k_1<m\}}\mathcal{R}(\lambda,\alpha,k_1)\lambda_c^{m} & \leq (2\underline{\alpha}+1)\sum_{m=1}^\infty m^3a^{-m\alpha}\lambda_c^{m} \end{flalign*} which is finite by our choice of $\alpha$. \end{proof} \end{lem} Let $\chi_k:=S(k+1)-S(k)$ denote the total time taken between $Z_n$ making the $k^{\text{th}}$ and $(k+1)^{\text{th}}$ transition along the backbone. This time consists of \[N_k:=\sum_{n=S(k)+1}^{S(k+1)}\mathbf{1}_{\{Z_n=Y_k\}}\] excursions into the finite trees appended to the backbone at this vertex and one additional step to the next backbone vertex. Write $\vartheta_k^{(0)}:=S(k)$ and $\vartheta_k^{(j)}:=\inf\{n>\vartheta_k^{(j-1)}:Z_n=Y_k\}$ for $j\geq 1$ to be the hitting times of the backbone after time $S(k)$. We can then write \begin{flalign}\label{e:etaK} \chi_k:=1+\sum_{j=1}^{N_k}\gamma_{k,j} \qquad \text{ where } \qquad \gamma_{k,j}:=\vartheta_k^{(j)}-\vartheta_k^{(j-1)} \end{flalign} is the duration of the $j^{\text{th}}$ such excursion. \begin{proof}[Proof of Proposition \ref{p:UniMom}] Recall that $\underline{\alpha}:=\max\{k\in\mathbb{Z}: k<\alpha\}$ and write $\overline{\alpha}:=\min\{k\in\mathbb{Z}: k\geq\alpha\}$. We therefore have that $\mathit{E}^{\tt NB}_\lambda\left[\tau_1^\alpha\right] $ can be written as \begin{flalign*} \mathit{E}^{\tt NB}_\lambda\left[\tau_1^\alpha\right] &= \mathit{E}^{\tt NB}_\lambda\left[\left(\sum_{k=1}^{\zeta_1}\chi_k\right)^\alpha\right] \\ &\leq \mathit{E}^{\tt NB}_\lambda\left[\zeta_1^{\underline{\alpha}}\sum_{k=1}^{\zeta_1}\chi_k^\alpha\right] \end{flalign*} by \eqref{e:Con}. Using \eqref{e:Con} again with the decomposition \eqref{e:etaK} we can write this as \begin{flalign*} &\mathit{E}^{\tt NB}_\lambda\left[\zeta_1^{\underline{\alpha}}\sum_{k=1}^{\zeta_1} \!\left(1+\sum_{j=1}^{N_k}\gamma_{k,j}\right)^\alpha\!\right] \\ & \qquad \quad \leq \mathit{E}^{\tt NB}_\lambda\left[\zeta_1^{\underline{\alpha}}\sum_{k=1}^{\zeta_1}(N_k+1)^{\underline{\alpha}}\left(1+\sum_{j=1}^{N_k}\gamma_{k,j}^\alpha\right) \right]. \end{flalign*} The excursion times $\gamma_{k,j}$ are distributed as the first return time to $e^*$ for a walk started from $e^*$ on $\mathbf{T}_h^*$. Moreover, under $\mathit{P}^{\tt NB}_\lambda$, they are independent of the backbone, the buds and the walk on the backbone and buds. In particular, they are independent of the regeneration times of $Y$ and the number of excursions therefore the above expectation can be bounded above by \begin{flalign*} \mathbb{E}\left[\mathit{E}^{\mathbf{T}_h^*}_{\lambda}\left[\sigma_{e^*}^\alpha\right]\right] \mathit{E}^{\tt NB}_\lambda\left[\zeta_1^{\underline{\alpha}}\sum_{k=1}^{\zeta_1}(N_k+1)^{\overline{\alpha}} \right]. \end{flalign*} Where, by Lemma \ref{l:subComp}, we have that $\sup_{\lambda\in[a,b]}\mathbb{E}\left[\mathit{E}^{\mathbf{T}_h^*}_{\lambda}\left[\sigma_{e^*}^\alpha\right]\right] <\infty$. Let $(z_j)_{j=0}^\infty$ denote the ordered distinct vertices visited by $Y$ and \[\mathcal{L}(z,j):=\sum_{k=0}^j \mathbf{1}_{\{Y_k=z\}}, \quad \mathcal{L}(z):=\mathcal{L}(z,\infty)\] the local times of the vertex $z$. Write \[M_{z,l}:=\sum_{j=0}^\infty \mathbf{1}_{\left\{Z_j=z,\; Z_{j+1} \notin \mathbf{T}_g, \; \mathcal{L}(z,j)=l\right\}}\] to be the number of excursions from $z$ (by $Z$) on the $l^{\text{th}}$ visit to $z$ (by $Y$) for $l=1,...,\mathcal{L}(z)$ and $\mathcal{J}:=|\{Y_j\}_{j=1}^{\zeta_1-1}|$ the number of distinct vertices visited by $Y$ between time $1$ and time $\zeta_1-1$. Each $k\leq \zeta_1$ corresponds to a unique pair $(z_j,l)$ with $j\leq \mathcal{J}$ and $l\leq \mathcal{L}(z_j)$ with $M_{z_j,l}=N_k$ therefore \begin{flalign} & \mathit{E}^{\tt NB}_\lambda\left[\zeta_1^{\underline{\alpha}}\sum_{k=1}^{\zeta_1}(N_k+1)^{\overline{\alpha}} \right] \notag \\ & \quad = \mathit{E}^{\tt NB}_\lambda\left[\zeta_1^{\underline{\alpha}}\sum_{j=1}^{\mathcal{J}}\sum_{l=1}^{\mathcal{L}(z_j)}(M_{z_j,l}+1)^{\overline{\alpha}} \right] \notag \\ & \quad = \sum_{j=1}^{\infty}\sum_{l=1}^{\infty}\mathit{E}^{\tt NB}_\lambda\left[\zeta_1^{\underline{\alpha}}\mathbf{1}_{\{j\leq \mathcal{J}, \; l\leq \mathcal{L}(z_j)\}}(M_{z_j,l}+1)^{\overline{\alpha}}\right] \notag \\ & \quad \leq \sum_{j=1}^{\infty}\sum_{l=1}^{\infty}\Big(\mathit{E}^{\tt NB}_\lambda\left[\zeta_1^{2\underline{\alpha}}\mathbf{1}_{\{j\leq \mathcal{J}, \; l\leq \mathcal{L}(z_j)\}}\right]\mathit{E}^{\tt NB}_\lambda\left[(M_{z_j,l}+1)^{2\overline{\alpha}} \right]\Big)^{1/2} \label{e:CSBnd} \end{flalign} by the Cauchy-Schwarz inequality. For all $1\leq j\leq \mathcal{J}$ we have that $\mathcal{L}(z_j)\leq \zeta_1$; moreover, $\mathcal{J}\leq \zeta_1$ therefore \[\mathbf{1}_{\{j\leq \mathcal{J}, \; l\leq \mathcal{L}(z_j)\}}\leq\mathbf{1}_{\{j,l\leq \zeta_1\}}.\] Due to the independence structure of the GW-tree, for any fixed $j$ the distribution of the number of children of $z_j$ is equal to the distribution of the number of children of the root. Since the root does not have a parent, we have that the walk is more likely to take an excursion into one of the neighbouring traps when at the root than from a vertex with the same number of children. We can, therefore, stochastically dominate the number of excursions from a backbone vertex by the number of excursions from the root to see that $\mathit{E}^{\tt NB}_\lambda\left[(M_{z_j,l}+1)^{2\overline{\alpha}} \right]\leq \mathit{E}^{\tt NB}_\lambda\left[(M_{z_0,1}+1)^{2\overline{\alpha}} \right]$. Using this and the Cauchy-Schwarz inequality, the expression \eqref{e:CSBnd} is bounded above by \begin{flalign*} & \mathit{E}^{\tt NB}_\lambda\!\left[\zeta_1^{4\underline{\alpha}}\right]^{1/4}\mathit{E}^{\tt NB}_\lambda\!\left[(M_{z_0,1}+1)^{2\overline{\alpha}} \right]^{1/2}\sum_{j,l=1}^{\infty}\mathit{P}^{\tt NB}_\lambda\left(j,l\leq \zeta_1\right)^{1/4}. \end{flalign*} By Remark \ref{r:mom} the offspring distribution $\xi_g$ has exponential moments, we therefore have that the time between regenerations of $Y$ has finite $4\underline{\alpha}$ moments uniformly over $\lambda \in[a,b]$ by Proposition \ref{p:UnMo}. That is, $\sup_{\lambda\in[a,b]}\mathit{E}^{\tt NB}_\lambda\left[\zeta_1^{4\underline{\alpha}}\right]<\infty$. Write $W_n$ and $W^g_n$ to be the GW-processes associated with $\mathbf{T}$ and $\mathbf{T}_g$. The number of excursions from the root is geometrically distributed with termination probability $1-p_{ex}$ where \[p_{ex}:=\frac{W_1-W_1^g}{W_1}.\] Using Lemma \ref{l:GeoAlpha} we therefore have that, for a constant $C$ independent of $\lambda$, \[\mathit{E}^{\tt NB}_\lambda\left[(M_{z_0,1}+1)^{2\overline{\alpha}} \right] \;\leq \; C\mathbb{E}[(1-p_{ex})^{-2\overline{\alpha}}] \;\leq \; C\mathbb{E}[W_1^{2\overline{\alpha}}] \;< \;\infty\] since $W_1\stackrel{\text{\tiny{d}}}{=} \xi$ which has exponential moments. It remains to show that \begin{flalign}\label{e:dubsum} \sum_{j=1}^{\infty}\sum_{l=1}^{\infty}\mathit{P}^{\tt NB}_\lambda\left(j,l\leq \zeta_1\right)^{1/4} \end{flalign} is finite. Note that $\mathit{P}^{\tt NB}_\lambda\left(j,l\leq\zeta_1\right)=\mathit{P}^{\tt NB}_\lambda\left(\zeta_1\geq l\right)$ whenever $l\geq j$. Using Chebyshev's inequality we can then bound \eqref{e:dubsum} above by \begin{flalign*} 2\sum_{j=1}^{\infty}\sum_{l=j}^{\infty}\mathit{P}^{\tt NB}_\lambda\left(\zeta_1\geq l\right)^{1/4} \leq 2\sum_{j=1}^{\infty}\sum_{l=j}^{\infty} \left(\frac{\mathit{E}^{\tt NB}_\lambda\left[\zeta_1^u\right]}{l^u}\right)^{1/4} \end{flalign*} for any integer $u$. In particular, we have that $\sup_{\lambda\in[a,b]}\mathit{E}^{\tt NB}_\lambda\left[\zeta_1^u\right]$ is finite for any integer $u$ by Proposition \ref{p:UnMo}. Choosing $u>8$ we then have that this sum is finite which completes the proof of the moment estimate of $\tau_1$. \end{proof} Finally, we complete the proof of Proposition \ref{est:sigma}. \begin{proof}[Proof of Proposition \ref{est:sigma}] We first observe that \begin{align}\label{est:last} \mathbb{E}\left[E_{\lambda}^{\mathbf{T}^*}\left[\sigma_{e^*(\mathbf{T})}\mathbf{1}_{\{\sigma_{e^*(\mathbf{T})}<\infty\}}\right]\right] \leq E_{\lambda}\left[1+\sum_{k=1}^{\sigma^Y_{e^*(\mathbf{T}_g)}}\chi_k\ ;\ \sigma_{e^*(\mathbf{T})}<\infty\right], \end{align} where $\sigma^Y_{e^*(\mathbf{T}_g)}$ is the first time that $(Y_n)$ returns to $e^*(\mathbf{T}_g)$. The reason why \eqref{est:last} is an inequality is that the walk $(Z_n)$ may enter traps attached to $e(\mathbf{T})=e(\mathbf{T}_g)$ and return to $e^{*}(\mathbf{T})=e^*(\mathbf{T}_g)$ without any transitions on the backbone $\mathbf{T}_g$. It is straightforward to check \eqref{est:last} by using Lemma \ref{lem:est:sigma}, Lemma \ref{l:subComp} and arguments in the proof of Proposition \ref{est:sigma}. \end{proof} \end{document}
\begin{document} \begin{abstract} We give a combinatorial definition of ``core entropy'' for quadratic polynomials as the growth exponent of the number of certain precritical points in the Julia set (those that separate the $\alpha$ fixed point from its negative). This notion extends known definitions that work in cases when the polynomial is postcritically finite or when the topology of the Julia set has good properties, and it applies to all quadratic polynomials in the Mandelbrot set. We prove that core entropy is continuous as a function of the complex parameter. In fact, we model the Julia set as an invariant quadratic lamination in the sense of Thurston: this depends on the external angle of a parameter in the boundary of the Mandelbrot set, and one can define core entropy directly from the angle in combinatorial terms. As such, core entropy is continuous as a function of the external angle. Moreover, we prove a conjecture of Giulio Tiozzo about local and global maxima of core entropy as a function of external angles: local maxima are exactly dyadic angles, and the unique global maximum within any wake occurs at the dyadic angle of lowest denominator. We also describe where local minima occur. An appendix by Wolf Jung relates different concepts of core entropy and biaccessibility dimension and thus shows that biaccessibility dimension is continuous as well. \end{abstract} \maketitle \underline{s}ection{Introduction and Statement of Results} Topological entropy of a topological dynamical system $f\colon K\to K$ measures the complexity of the dynamical system, roughly speaking as follows. If $K=\bigcup U_i$ is covered by some number of open sets, denote by $N(n)$ the number of allowed itineraries of length $n$: these are sequences $i(0),\dots,i(n-1)$ for which there exists some $x\in K$ with $f^{\circ k}(x)\in U_{i(k)}$ for $k=0,1,\dots,n-1$. Then the growth exponent of $N(n)$ is the topological entropy of $(K,f)$; more precisely, topological entropy is defined as $h=\limsup_n\frac{1}n \log N(n)$. For a precise definition and equivalent descriptions, see for instance \cite{BrinStuck} or \cite{deMelovanStrien}. In their seminal paper \cite{MilnorThurston}, Milnor and Thurston investigated topological entropy of continuous interval maps. Specifically for the quadratic family $f_\lambda\colon x\mapsto \lambda x(1-x)$ acting on $I=[0,1]$ they proved that topological entropy as a function of $\lambda$ is monotone and continuous. Misiurewicz and Sz{\l}enk \cite{MisiurewiczSzlenk} wrote one of the early papers that gave various equivalent interpretations of topological entropy for interval maps, for example as growth exponents of intervals of monotonicity of $f^{\circ n}$ or of the number of periodic points of period $n$. Since then, there has been a lot of activity on related questions; see for instance \cite{deMelovanStrien} for an overview. In the last years of his life, William Thurston raised the issue of finding a good definition of topological entropy of a complex polynomial $p$ of degree $d$. Viewing $p$ as a map on $\mathbb C$ or on the filled-in Julia set, the entropy is clearly $\log d$ and not very interesting. The same holds of course when $p$ is a real polynomial (considered as a self-map of $\mathbb C$), but in that case there may be a dynamically interesting invariant interval in $\mathbb R$ that is invariant and that contains the orbits of the critical points: the restriction to this interval is the dynamically interesting object, and the aforementioned studies were concerned with self-maps of such intervals. What is the analogous object for complex polynomials, or what is an interesting definition of topological entropy? If the polynomial $p$ is postcritically finite (that is, all critical points are periodic or preperiodic), then it has a natural invariant tree called its Hubbard tree $H$, and Thurston defined the \emph{core entropy} of $p$ as the topological entropy of the restriction to $H$; see \cite{bghkltt,taoli}. This entropy can also be defined in certain other cases: for instance, for certain maps one can define a finite tree in analogy to the Hubbard tree that is still invariant and contains the critical orbit, even if the latter is infinite (in analogy to the case of real polynomials). However, in the general case there are difficulties: the filled-in Julia set may not be path connected so that there may not be a tree, or the critical orbit may be dense in the filled-in Julia set so that any tree would have to be infinite, and topological entropy might seem to be equal to $\log d$ (this happens for a dense set of parameters on $\partial\mathscr M$, so if entropy with this definition was continuous then it would have to be constant). Thurston asked for a definition of core entropy that would work in all cases. Moreover, in a seminar in Cornell in the spring of 2012 he raised the question whether core entropy could be continuous as a function of the complex parameter. This resulted in a bet between him and John Hubbard, which was one original inspiration for starting our work. One possible definition is in terms of \emph{biaccessibility dimension}: that is the Hausdorff dimension of all angles $\vartheta\in\mathbb Circle$ for which there exists another angle $\vartheta'\neq\vartheta$ so that the dynamic rays at angles $\vartheta$ and $\vartheta'$ land at a common point. Thurston had shown that in the cases for which his definition of core entropy applied that it was equal to biaccessibility dimension (up to a factor of $\log d$). Earlier we had shown \cite{MeerkampSchleicher} that the biaccessibility dimension is always less than $1$, except when the Julia set is an interval, strengthening earlier work by Smirnov \cite{Smirnov}, Zakeri \cite{Zakeri}, and Zdunik \cite{Zdunik}. Quite recently, Tiozzo wrote an interesting thesis on core entropy and related questions \cite{TiozzoThesis}, in particular with a relation to the thermodynamic formalism. Some additional history, as well as the relation between core entropy and biaccessibility dimension in the general case, are given in the appendix by Wolf Jung. Bill Thurston inspired a number of people to investigate core entropy. In particular, there are a survey on current work and open problems by Tan Lei~\cite{TanLeiEntropy}, a manuscript by Wolf Jung \cite{Jung}, and two manuscripts by Giulio Tiozzo \cite{TiozzoThesis,TiozzoPaper}. We propose a general definition of core entropy that coincides with Thurston's in all cases he considered, and that does not require the Julia set to have particular properties (such as pathwise connectivity or the existence of an invariant compact tree that might generalize the Hubbard tree), nor does it require the concepts of thermodynamic formalism or biaccessibility dimension. Instead, our approach is purely combinatorial and thus applies in great generality; here we work only on the case of quadratic polynomials with connected Julia sets. Of course, in the cases where the usual definitions of topological entropy apply, our definition agrees with them. More precisely, we define two entropy functions: \begin{itemize} \item $h\colon \mathbb Circle\to [0,\log 2]$, assigning to every external angle $\vartheta\in\mathbb Circle$ the core entropy of the lamination associated to the angle $\vartheta$. \item $\tilde h\colon \mathscr M\to[0,\log2]$, assigning to every $c\in\mathscr M$ (the Mandelbrot set) the core entropy of the quadratic polynomial $z\mapsto z^2+c$. \end{itemize} We describe these definitions in Section~\ref{Sec:Definitions}. Note that we define $\tilde h$ for every $c\in\mathscr M$, postcritically finite or not (and could extend it to every parameter $c\in\mathbb C$), and similarly for every $\vartheta\in\mathbb Circle$. These definitions are such that if the parameter ray at angle $\vartheta$ lands (or accumulates) at $c\in\partial \mathscr M$, then $h(\vartheta)=\tilde h(c)$. Our first result goes back to the bet between Bill Thurston and John Hubbard in the spring of 2012 and helped settle this bet soon after. \begin{theorem}[Continuity of Core Entropy] \lineclear Both entropy functions, $\tilde h\colon\mathscr M\to[0,\log 2]$ and $h\colon\mathbb Circle\to[0,\log 2]$, are continuous. \end{theorem} We prove continuity of $h$ in Theorem~\ref{Thm:Continuity}; the fact that this implies continuity of $\tilde h$ is easy and is explained in Section~\ref{Sec:Definitions}. An independent proof of continuity of core entropy can be found in the recent manuscript \cite{TiozzoPaper}. \begin{figure} \caption{The graph of the core entropy function $h\colon[0,1/2]\to[0,\log 2]$ (by Bill Thurston).} \label{Fig:EntropyGraph} \end{figure} Our second result settles a conjecture of Giulio Tiozzo \cite[Conjecture~1.6]{TiozzoThesis}. \begin{theorem}[Local Maximal of Core Entropy] \lineclear The entropy function $h\colon\mathbb Circle\to[0,\log 2]$ has the following properties. \begin{enumerate} \item[a)] Every dyadic angle is an isolated local maximum of the entropy function. \item[b)] Conversely, every local maximum of $h$ is dyadic. \item[c)] Within every wake, the entropy function has a unique global maximum, and it occurs at the unique dyadic angle of lowest denominator in the wake. \end{enumerate} \end{theorem} We will prove this in Theorem~\ref{Thm:TiozzoConj}. In Section~\ref{Sec:Definitions}, we give all relevant definitions, in particular of the two entropy functions $h$ and $\tilde h$, and of invariant laminations; we define a wake as a closed interval in $\mathbb Circle$ bounded by two rational angles so that the corresponding rays land together (or more generally as any closed interval for which the two boundary angles have the same angled internal address, so the corresponding parameter rays land together at the same point in $\mathscr M$, or at least in its combinatorial models). The fundamental construction will be carried out in Section~\ref{Sec:TopSurgery}: we define a surgery construction of dyadic Hubbard trees (that is, Hubbard trees associated to external angles $a/2^k$) that lowers the dyadic exponent $k$ and increases the core entropy. We also describe the vein structure of $\mathscr M$ and introduce a relation $\lhd$ between dyadic veins. From this construction, it is no longer difficult to prove Tiozzo's conjecture on maxima of $h$; this will be done in Section~\ref{Sec:IrrationalTiozzoConj}: here we have to extend the estimates on entropy to non-dyadic external angles. We also discuss local minima of $h$ and $\tilde h$ in this section. In Section~\ref{Sec:Continuity} we then discuss continuity of entropy. The main result is that for any parameter $c\in\mathscr M$, if entropy is continuous along the (combinatorial) path from $0$ to that endpoint, then it is continuous in $\mathscr M$ at each of these points on the path. The difference is that we \emph{assume} continuity on a path, but continuity (the conclusion) makes a claim about an entire neighborhood in $\mathbb C$. It is known, by work of Tiozzo and Jung, that entropy along many paths in $\mathscr M$ is continuous, in particular to all dyadic endpoints. It remains to prove that entropy is continuous along paths to irrational endpoints of $\mathscr M$, and that we prove in Section~\ref{Sec:IrratEndpoints}. Finally, in a brief appendix, Wolf Jung discusses the relations of core entropy with biaccessibility dimension, and concludes that biaccessibility dimension is a continuous function as well. \emph{Acknowledgements}. We would like to thank Henk Bruin, John Hubbard, Tan Lei, Mikhail Lyubich, John Milnor, Bill Thurston, Giulio Tiozzo, Jean-Christophe Yoccoz and especially Wolf Jung for interesting and helpful discussions. In the spring of 2014, we had the opportunity to give various presentations about this result in Bremen, Moscow, and Stony Brook, and we thank the audiences for their questions and suggestions. Finally, we would like to thank Cornell University and the ICERM institute in Providence for their hospitality and support in the spring of 2012 where many of our initial discussions were carried out that yielded our first version of the proof. \goodbreak \underline{s}ection{Definitions} \label{Sec:Definitions} In this section we introduce our combinatorial definition of topological entropy that applies to all (quadratic) polynomials with connected Julia set, whether or not they are locally connected and whether or not they have (generalized) Hubbard trees, and if so whether the latter are compact. One advantage of our approach is that we work in an entirely combinatorial setting, so we never have to worry about topological issues. We work on invariant (quadratic) laminations as introduced by Thurston \cite{ThurstonLaminations}. For every angle $\vartheta\in\mathbb Circle\underline{s}etminus\{0\}$, there is a unique invariant quadratic lamination where the minor leaf either ends at $\vartheta$ or is the degenerate leaf at $\vartheta$. This lamination will be called $L_\vartheta$. A \emph{precritical leaf of generation $n$} in this lamination will be any leaf on the backwards orbit of one of the two major leaves that takes $n$ generations to map to the minor leaf. Arbitrarily choose one of these two major leaves as \emph{preferred major leaf} (in the special case that the minor leaf is degenerate, there is only one major leaf, which is a diameter, and in this case there is no choice). In an invariant quadratic lamination, the \emph{$\alpha$ gap} is either the leaf connecting the two angles $1/3$ and $2/3$, or it is the unique gap that is fixed by the dynamics (a finite or infinite polygon). \begin{definition}[Relevant Precritical Leaf and Entropy Associated to External Angle] \label{Def:CoreEntropy} We call a precritical leaf \emph{relevant} if it separates the $\alpha$ gap from its negative and if it is on the backwards orbit of the preferred major leaf, and define $N(n)=N_\vartheta(n)$ as the number of relevant precritical leaves of generation $n$. We define the \emph{core entropy} of this lamination as $h=h(\vartheta):=\limsup_n\frac 1 n \log N_\vartheta(n)$. In the special case $\vartheta=0$, the lamination is trivial and we naturally have $N_{\vartheta}(n):=0$. \end{definition} If the $\alpha$ gap is an infinite polygon, then $N_\vartheta(n)=0$ except for $n=1$, so $h(\vartheta)=0$. \begin{remark} A natural question is whether the $\limsup$ in the definition of entropy can be replaced by a simple $\lim$. This is not always so: Wolf Jung kindly pointed out to us the example of $c=c(9/56)$ where the Hubbard tree has the shape of a $\textsf{Y}$ so that the branch point is fixed and one endpoint maps to the second, which maps to the third, which in turn maps to the branch point. Here $N(n)=0$ for infinitely many $n$ while $h=(\log 2)/3>0$. There are thus counterexamples when the dynamics is renormalizable. We prove that immediate satellite renormalizability is the only obstruction (see Corollary~\ref{Cor:ExistenceLimit}). For now, observe that in the postcritically finite non-renormalizable case, the limit exists and equals the $\limsup$ because the associated subshift of finite type is irreducible. In definition of relevant precritical leaf, we only count those leaves that are preimages of the preferred major leaf. Without this restriction, the count of $N(n)$ would increase by a factor $2$, which would yield the same definition of entropy, but we would lose monotonicity of $N(n)$ as in Lemma~\ref{Lem:Monotonicity} for the stupid reason that the degenerate minor leaf has only half as many preimages (of course, an alternate way to remedy this problem would be to count preimages of a degenerate minor leaf with multiplicity two). \end{remark} Thurston showed that the union of all minor leaves of all invariant quadratic laminations forms itself a lamination, called the \emph{quadratic minor lamination} QML \cite{ThurstonLaminations}. It turns out that $h$ is naturally defined on QML: since both ends of any leaf in QML define the same lamination, we can first extend the definition of $h$ to each separate leaf on QML. Complementary components of leaves in QML are called \emph{gaps}, and they come in two kids: either they are finite polygons (corresponding to Misiurewicz-Thurston parameters) or have infinitely many boundary leaves (and describe hyperbolic components). In both cases, it is easy to see that $h$ is constant on all boundary leaves of any gap, so $h$ naturally extends to the disk on which QML is defined, as a constant function on the closure of each leaf and each gap. The equivalence relation defining QML is closed, so the quotient of the supporting closed unit disk by collapsing all leaves to points yields a topological Hausdorff space called the ``abstract Mandelbrot set'' $\mathscr M_{abs}$ (this construction is known as Douady's ``pinched disk model'' of $\mathscr M$). Since $h$ is constant on fibers of the quotient map $q\colon \ovl{\mathbb D} \to \mathscr M_{abs}$, it follows that $h$ is naturally a function on $\mathscr M_{abs}$. Finally, there is the natural projection $\pi\colon\mathscr M\to\mathscr M_{abs}$ from the Mandelbrot set $\mathscr M$ to the abstract Mandelbrot set $\mathscr M_{abs}$. It is defined by mapping every landing point $c(\vartheta)$ of any rational parameter ray $\vartheta$ to the equivalence class of the angle $\vartheta$; then $\pi\colon\partial\mathscr M\to\partial\mathscr M_{abs}$ is the unique continuous extension, and from here it is easy to extend $\pi$ to a continuous map $\mathscr M\to\mathscr M_{abs}$ that on each component of the interior is either injective or constant (for details, see Douady~\cite{DouadyCompacts}). \looseness-1 We thus obtain a unique map $\tilde h=h\circ\pi\colon\mathscr M\to[0,\log 2]$, and continuity of $\tilde h$ follows from continuity of $h$, with continuity of $\pi$ being well known. More specifically, the map $\tilde h$ can also be constructed explicitly as follows. We define a \emph{ray pair} $RP(\phi,\phi)$ to be the union of two rays (in a dynamical plane or in parameter space) that land (or perhaps accumulate) at a common point, together with the union of their accumlation sets. Every ray pair divides $\mathbb C$ into two open components. \begin{definition}[Entropy Associated to Quadratic Polynomial in $\mathscr M$] \label{Def:CoreEntropyTilde} Let $p_c(z):=z^2+c$ be any quadratic polynomial with $c\in\mathbb C$ for which the critical value is in the Julia set, and let $\vartheta$ be the external angle of any parameter ray that lands or accumulates at the parameter $c$. The \emph{critical ray pair} will be the ray pair $RP(\vartheta/2,(1+\vartheta)/2)$, and a precritical ray pair of generation $n$ will be any ray pair on the backwards orbit of the critical ray pair that takes $n$ iterations to map to the ray $R(\vartheta)$. If $p_c$ is such that the critical value is in the Fatou set, then it has an attracting or parabolic orbit and there is a unique periodic characteristic ray pair that lands on the boundary of the Fatou component containing the critical value; precritical ray pairs are then defined as ray pairs on its backward orbit, and their generation is the number of iterations it takes to map to the characteristic ray pair. We call a precritical ray pair \emph{relevant} if it separates the $\alpha$ fixed point from its negative, and define $\tilde N(n)$ as the number of relevant precritical ray pairs of generation $n$. We define the \emph{core entropy} of $p_c$ as $\tilde h=\tilde h(c):=\limsup_n\frac 1 n \log \tilde N(n)$. \end{definition} Note that we use the term ``separation'' in a combinatorial sense: the two rays in a separating ray pairs either land together or accumulate at the same fiber. --- We do not wish to (or need to) deal with topological subtleties such as whether, in the Cremer case, a dynamic ray indeed accumulates at the critical value: every $c\in\mathscr M$ has naturally associated one or several external angles $\vartheta$ that define its dynamics, and this is sufficient for our combinatorial approach. We need to fix some notation on Hubbard trees of postcritically finite polynomials. The \emph{Hubbard tree} is a minimal tree within the filled-in Julia set connecting the postcritical set (subject to a natural condition on how to traverse bounded Fatou components in case some critical points are periodic). The \emph{marked points} or \emph{vertices} of the Hubbard tree are the endpoints, branch points, and the postcritical points. (In fact, all endpoints are postcritical points; critical points are not included in their own right, but they might be postcritical, for instance when they are periodic, and when the degree is greater than $2$ then they might also be branch points.) Since the set of vertices is forward invariant, every edge (a closed arc connecting two vertices) maps over one or several entire edges, so that the image contains every edge the interior of which intersects the image; in other words, the edges form a Markov partition on the Hubbard tree. --- Here, we will only discuss quadratic polynomials and Hubbard trees. The usual definition of core entropy is modeled after the postcritically finite case. The finite set of edges on the tree form a Markov chain with associated transition matrix, where the matrix element $M_{i,j}$ is $0$, $1$, or $2$ if the edge $e_i$ covers the edge $e_j$ respectively $0$, $1$, or $2$ times. Having only non-negative real entries, this matrix has a leading eigenvalue which is real, and its logarithm is defined as the core entropy of the given postcritically finite parameter. This definition coincides with the classical definition of topological entropy of general dynamical systems, and it also applies in the postcritically infinite case as long as the Hubbard tree is defined (i.e.\ the Julia set is path connected) and still finite. However, the number of edges of the Hubbard trees is not locally bounded even among postcritically finite maps, which makes entropy estimates based on these transition matrices difficult. It is well known, at least in the postcritically finite case, that if $x$ is any point on the Hubbard tree and $N_x(n)$ is the number of preimages of $x$ of generation $n$, then $h=\limsup_n \log N_x(n)$ is the core entropy. Since each of the finitely many edges, except those within ``renormalizable little Julia sets'', will cover the entire tree after finitely many iterations (Lemma~\ref{Lem:HubbardTreeRenormalization}), one can as well count only those preimages of $x$ that are on an arbitrary subset of the edges of the Hubbard tree, as long as at least one of these edges is not in a renormalizable little Julia set. For instance, instead of counting precritical leaves on $[\alpha,-\alpha]$ (as in our definition above) we may count preimages on $[\alpha,\beta]$ (as we will do in Section~\ref{Sec:Continuity}) or on $[\beta,-\beta]$. \begin{lemma}[Definitions of Core Entropy Coincide] \label{Lem:defEntropy} \lineclear For postcritically finite polynomials, the core entropy as in Definition~\ref{Def:CoreEntropyTilde} coincides with the usual definition (in terms of transition matrices on finite Hubbard trees). \looseness-1 If for $p_c$ the critical value is in the impression of the dynamic ray at angle $\vartheta$, the core entropy of $p_c$ equals the core entropy of the lamination $L_\vartheta$. \end{lemma} It is well known that if several parameter rays accumulate at the same parameter in $\mathscr M$, then the laminations associated to their corresponding angles coincide, so these angles have the same entropy (if more than two rays accumulate at the same parameter, then the parameter is a Misiurewicz-Thurston parameter and the rays actually land there). If $\vartheta\in\mathbb Circle$ is so that the parameter ray of $\mathscr M$ at angle $\vartheta$ lands (in particular, if $\vartheta$ is rational), we define $c(\vartheta)$ as the landing point in $\mathscr M$ of the parameter ray at angle $\vartheta$, and within any connected Julia set we define $z(\vartheta)$ as the landing point of the dynamic ray at angle $\vartheta$ (if the latter ray lands). We define a partial order on $\mathscr M$ as follows: if $c,\tilde c$ are two parameters in $\mathscr M$, we say that $c\prec \tilde c$ if there is a parameter ray pair $RP(\phi^-,\phi^+)$ at periodic angles that separates $\tilde c$ simultaneously from $c$ and from the origin. Following Milnor~\cite{MiOrbits}, a periodic or preperiodic ray pair $RP(\phi^-,\phi^+)$ (with $\phi^-,\phi^+\in\mathbb Circle$) in the dynamical plane of $p_c$ is called \emph{characteristic} if the dynamic rays $R(\phi^-)$ and $R(\phi^+)$ land together in such a way that they separate the critical value $c$ from the critical point $0$ as well as from all other rays landing at $\bigcup_{k\ge 0}p_{c}^{\circ k}(x)$. It is well known that every periodic ray pair $RP(\phi^-,\phi^+)$ has a unique characteristic ray pair on its forward orbit. Let us now state the Correspondence Theorem relating the combinatorics of dynamical and parameter ray pairs; see for instance \cite{MiOrbits} or \cite{MandelBranch}. \begin{theorem}[Correspondence Theorem] \label{thm:Corresp} \lineclear A ray pair $RP(\phi^-,\phi^+)$ in the dynamical plane of $p_c$ with $\phi^\pm$ rational is characteristic if and only if the parameter rays with angles $\phi^-$ and $\phi^+$ land together and separate the parameters $c$ and $0$ from each other. \end{theorem} \begin{lemma}[Monotonicity of Lamination and of Entropy] \label{Lem:Monotonicity} \lineclear If $c\prec \tilde c$, then $N_c(n) \le N_{\tilde c}(n)$ for all $n$ and thus $h(c) \le h(\tilde c)$. Moreover, any characteristic leaf in $L_c$ also occurs in $L_{\tilde c}$. \end{lemma} \begin{proof} It is routine to check that any precritical leaf of $c$ also ``occurs'' in the dynamics (or the lamination) of $\tilde c$: the major leaf (or leaves) in $L_{\tilde c}$ separate the two major leaves in $L_c$. Precritical leaves in $L_{c}$ are preimages of the pair of major leaves (the preimages of this pair are always ``parallel'', i.e.\ not separated by the critical value, because otherwise the forward orbit of the critical value would have to intersect the domain bounded by the two major leaves). Each pair of preimages surrounds one preimage of the major leaf of $\tilde c$ (or a pair of preimages of the major leaves), and when such a preimage separates the $\alpha$ gap from its negative in $L_c$, then it also does so for $L_{\tilde c}$. Therefore, $N_{\tilde c}(n)\ge N_c(n)$ and $\tilde h(\tilde c)\ge \tilde h(c)$. The statement about characteristic leaves (or characteristic ray pairs) is well known and follows from the Correspondence Theorem~\ref{thm:Corresp}. \end{proof} For the record, we define a \emph{dyadic parameter} as a parameter $c$ that is the landing point of a parameter ray at a dyadic angle $a/2^m$; in this case, we call $m$ the \emph{generation} of $c$. \underline{s}ection{Topological Surgery on dyadic Hubbard trees} \label{Sec:TopSurgery} \emph{Combinatorial arcs and veins}. Conjecturally, the Mandelbrot set is path connected: every $c\in\mathscr M$ has an arc $[0,c]$ that connects it to the origin. Such arcs are unique when requiring that they traverse hyperbolic components only along internal rays (radial curves with respect to the parameterization by the multiplier map). In fact, for many parameters $c\in\mathscr M$ one can prove that such an arc actually exists (by work of Jeremy Kahn using Yoccoz' puzzle results, and by Johannes Riedl using quasiconformal surgery). However, we only need a combinatorial version of such arcs. One way of defining them is as follows: the abstract Mandelbrot set $\mathscr M_{abs}$ (as defined in Section~\ref{Sec:Definitions}) is well known to be locally connected and hence path connected, and it comes with a continuous projection $\pi\colon\mathscr M\to\mathscr M_{abs}$. For $c\in\mathscr M$, there exists a preferred path $\Gamma(c)\underline{s}ubset\mathscr M_{abs}$ connecting $\pi(0)$ to $\pi(c)$, and then we define $[0,c]:=\pi^{-1}(\Gamma(c))$. It is not hard to describe this combinatorial arc $[0,c]$ in terms of internal rays of hyperbolic components and of fibers that separate $0$ from $c$; the details are somewhat tedious but not very enlightening. Specifically if $c$ is a dyadic parameter in $\mathscr M$, we call the combinatorial arc $[0,c]$ the \emph{long vein} of $c$. The \emph{vein} of $c$ is the shortest closed sub-arc of the long vein connecting $c$ to the union of the long veins of all dyadic parameters of lower generation than $c$. If $c$ and $c'$ are two dyadic parameters, then it is well known that their long veins intersect in an arc $[0,x]$, where $x$ is postcritically finite; this result is known as the ``branch theorem'' of $\mathscr M$ \cite{Orsay,MandelBranch}. Moreover, in the special case that $c$ and $c'$ are dyadic of equal generation, then $x$ is on the vein of a dyadic parameter $c''$ of lower generation. This means that veins (minus endpoints) are disjoint. \begin{definition}[Directly Subordinate Parameter $c\lhd c'$] \label{Def:DirectlySubordinate} \lineclear We say that $ c$ is \emph{directly subordinate} to $c'$ and write $c\lhd c'$ if the vein of $c$ terminates at an interior point of the vein of $c'$; in addition, any dyadic parameter whose vein terminates at $0$ is declared to be subordinate to $c(1/2)=-2$. \end{definition} If $c\lhd c'$, then necessarily the external angle of $c'$ has lower denominator than that of $c$. Note that this is not a transitive relation and thus not a partial order. A few directly subordinate dyadic parameters are illustrated in Figure~\ref{Fig:DirectlySubordinate}. \begin{figure} \caption{Illustration of directly subordinate parameters: we have $c(3/16)\lhd c(1/4)$, $c(1/4)\lhd c(1/2)$, and $c(3/8)\lhd c(1/2)$. Arrows indicate where the vein to some dyadic parameter terminates at the vein of the dyadic parameter to which it is directly subordinate.} \label{Fig:DirectlySubordinate} \end{figure} \begin{theorem}[Entropy Between Dyadic Hubbard Trees] \label{Thm:RelationDyadicTrees} \lineclear If $c\lhd c'$ are two dyadic parameters, then \begin{itemize} \item[a)] $N(n)\le N'(n)$ for all $n$, \item[b)] $N(n)<N'(n)$ for all sufficiently large $n$, and \item[c)] $\tilde h(c)<\tilde h(c')$. \end{itemize} \end{theorem} Here $N(n)$ and $N'(n)$ denote the numbers of relevant precritical points of generation $n$ for $p_c$ and for $p_{c'}$. This immediately implies a weak version of the Tiozzo conjecture: \begin{corollary}[Dyadic Version of Tiozzo Conjecture] \label{Cor:DyadicTiozzoConjecture}\lineclear Every dyadic angle $\vartheta$ has a neighborhood on which $h$, restricted to dyadic angles, assumes its unique maximum at $\vartheta$. \end{corollary} From here, we could use continuity of $h$ to prove the unrestricted version of the Tiozzo conjecture (which indeed was our strategy in an early version of the proof). We will argue the other way around: it is much easier to deduce the conjecture directly and use this in the proof of continuity. Let us state another corollary to the Correspondence Theorem~\ref{thm:Corresp}: it is a more precise version than monotonicity of entropy (as stated in Lemma~\ref{Lem:Monotonicity}) because even the lamination is monotone. \begin{corollary}[Monotonicity of Rational Lamination on Hubbard Tree] \label{Cor:CountPartsHubTree} Let $c,c'\in\mathscr M$ be two postcritically finite parameters such that $c\prec c'$. Suppose that in the dynamical plane of $p_c$ two periodic or preperiodic rays $R(\phi^-)$ and $R(\phi^+)$ land together at some point in the Hubbard tree of $p_c$, but not on the backwards orbit of the critical value. Then in the dynamical plane of $c'$ the dynamic rays $R(\phi^-)$ and $R(\phi^+)$ also land together and the landing point is in the Hubbard tree of $c'$. \end{corollary} \begin{proof} In the dynamic plane of $p_c$, let $x$ be the landing point of the dynamic ray pair $RP_c(\phi^-,\phi^+)$; it is by hypothesis in the Hubbard tree of $c$ and it must be a repelling periodic or preperiodic point because $p_c$ is postcritically finite. Let $W\underline{s}ubset\mathbb C$ be the set of parameters $c''$ for which the dynamic rays $R_{c''}(\phi^-)$ and $R_{c''}(\phi^+)$ land together at a repelling periodic or preperiodic point that is not precritical. By Theorem~\ref{thm:Corresp}, the set $W$ is open, and its boundary in $\mathbb C$ consists of parameters $c''\in\mathbb C\underline{s}etminus\mathscr M$ where the critical value is on the forward orbit of one of the rays $R_{c''}(\phi^-)$ and $R_{c''}(\phi^+)$, as well as of parameters $c''\in\mathscr M$ where at least one of the two rays lands at a parabolic orbit or at a precritical point. By hypothesis, the set $W$ contains $c$ and is thus non-empty, and any parameter $c''\in\mathbb C\underline{s}etminus\ovl W$ must be separated from $c$ by a parameter ray pair with angles on the forward orbit of $\phi^-$ or $\phi^+$. However, in the dynamical plane of $c$, all dynamic rays at such angles land at the Hubbard tree, and their configuration shows that there is no parameter ray pair available that, for $c'\underline{s}ucc c$, could bound $c'$ away from $c$. Thus $c'\in W$. \end{proof} \begin{definition}[Dynamical Counterpart to Parameter] \label{Def:DynamCounterpart} \lineclear Let $c$ be a postcritically finite parameter and suppose $c'\underline{s}ucc c$. Then a (pre)periodic point $x$ in the dynamical plane of $p_{c'}$ is called the \emph{dynamical counterpart to $c$} in the following case: \begin{itemize} \item if $c$ is preperiodic, then $x$ is the landing point of the preperiodic dynamic rays at the same angles as $c$; \item if $c$ is periodic, then $x$ is the landing point of the periodic dynamic rays bounding (in the parameter plane) the subwake of $c$ containing $c'$. \end{itemize} \end{definition} In the periodic case, $c$ is the center of a hyperbolic component, say $H_c$, and the subwake of $c$ containing $c'$ is bounded by a periodic parameter ray pair landing at $\partial H_c$. The angles of this ray pair are the angles of two rays landing at $x$. For example, the $\alpha$ fixed point is the dynamical counterpart of $c=0$. In the preperiodic case, it is known that all dynamic rays with the same angles of the rays landing at $c$ also land together in the dynamical plane of $c'$. An equivalent definition is that $x$ is the unique repelling periodic or preperiodic point in the dynamical plane of $c'$ such that the itinerary of $x$ (with respect to the critical point) equals the (upper) kneading sequence of $c$. \begin{lemma}[Dynamical Counterpart is Characteristic] \label{Lem:DynCounterpartCharacteristic} \lineclear Every periodic or preperiodic point that is a dynamical counterpart is characteristic. \end{lemma} \begin{proof} If, in the dynamical plane of $p_{c'}$, the point $x$ is the dynamical counterpart of a parameter $c$, then $c'\underline{s}ucc c$, and $x$ is the landing point of at least two dynamic rays. If $p_c$ is critically strictly preperiodic, then the parameter rays landing at $c$ bound the wake that $c'$ is in, and this implies that $x$ is a characteristic preperiodic point. If $p_c$ is critically periodic, then the argument is similar, except that the parameter rays do not land at $c$ but at the root of the hyperbolic component containing $c$. \end{proof} \begin{lemma}[Directly Subordinate Dyadics] \label{Lem:DirectlySubordinateDynamics} \lineclear \looseness-1 If $c$ and $c'$ are two dyadic parameters, then $c\lhd c'$ if and only if there is a postcritically finite parameter $c_*\in\mathscr M$ so that $c'$ is the dyadic of least generation within any sublimb of $c_*$, and $c$ is the dyadic of least generation within a different sublimb of $c_*$ than $c'$. In this case, denoting the external angles of $c$ and $c'$ by $\vartheta$ and $\vartheta'$, respectively, then in the dynamics of $c$ (or any other parameter in the same sublimb of $c_*$) there is a repelling (pre)periodic point $x_*$ that is the landing point of at least three dynamic rays that separate the dynamic rays at angles $0$, $\vartheta$, and $\vartheta'$. The point $x_*$ is the dynamical counterpart to $c_*$. \end{lemma} \begin{proof} Any two dyadic parameters are endpoints of $\mathscr M$, so by the Branch Theorem of the Mandelbrot set there is a unique postcritically finite parameter $c_*$ that contains $c$ and $c'$ in two different of its sublimbs. Let $c_0$ be the unique dyadic of least generation in any of the sublimbs of $c_*$; then $c_*$ is on the long vein of all three of $c_0$, $c$, and $c'$, and it is on the vein of $c_0$. The assumption that $c\lhd c'$ means that the vein of $c$ terminates at an interior point of the vein of $c'$, and hence it must terminate at the parameter $c_*$, so $c_*$ is an interior point of the vein of $c'$. Since $c_*$ is also an interior point of the vein of $c_0$, it follows that $c'=c_0$ (two veins can never have more than one point in common). Conversely, if $c'$ is the dyadic of least generation in the sublimb of $c_*$ and $c$ is the dyadic of least generation within a different sublimb of $c_*$ than $c'$, then $c_*$ is in the interior of the vein of $c'$ and the vein of $c$ terminates at $c_*$. This proves the first claim. For the second claim, we first consider the case that $c_*$ is a Mi\-siu\-re\-wicz-Thurston parameter; it is then the landing point of $s\ge 3$ rational parameter rays, say at angles $\vartheta_1,\dots,\vartheta_s$, so that the parameter rays at angles $0$, $\vartheta$, and $\vartheta'$ are in different sectors with respect to these parameter rays. Every parameter in any sublimb of $c_*$ has the property that the dynamic rays at angles $\vartheta_1,\dots,\vartheta_s$ land together at a repelling preperiodic point, and the claim follows. If $c_*$ is the center of a hyperbolic component, then the parameter $c$ is in a sublimb at internal angle $p/q\neq 1/2$, and in the dynamical plane of $c$ (or any parameter within the same sublimb) there is a repelling periodic point that is the landing point of $q\ge 3$ dynamic rays that separates the angles $0$, $\vartheta$ and $\vartheta'$ so that $\vartheta$ is in the largest sector not containing the angle $0$. \end{proof} \begin{lemma}[No Extra Branch Point] \label{Lem:NoExtraBranchPoint} \lineclear Suppose $c'\in\mathscr M$ is a dyadic parameter and $c\in\mathscr M$ is the postcritically finite parameter where the vein of $c'$ ends. Let $x$ be the dynamical counterpart of $c$ in $H'$. Then the arc $(x,c']\underline{s}ubset H'$ does not contain a branch point. \end{lemma} \begin{proof} Suppose the claim is false and there is a branch point on $(x,c']$. Then the sub-wake of $x$ containing $c'$ also contains a point, say $c''$, in the forward orbit of $c'$ (all endpoints of $H'$ are on the orbit of $c'$). But then $c''$ is the landing point of a dyadic ray of lower generation than the dyadic ray landing at $c'$. This is impossible because in the parameter plane the dyadic ray landing at $c'$ has the lowest possible generation among all dyadic rays in the sub-wake of $c$ containing $c'$ (by hypothesis that the vein of $c'$ terminates at $c$). \end{proof} The main step in proving Theorem~\ref{Thm:RelationDyadicTrees} is a topological surgery on Hubbard trees, as follows: \begin{proposition}[Relation Between Subordinate Dyadic Hubbard Trees] \label{Prop:RelationDyadicTrees} Let $c\lhd c'$ be two dyadic parameters with external angles $\vartheta$ and $\vartheta'$, let $H_c$ be the Hubbard tree of $c$, and let $H \underline{s}upset H_c$ be the connected hull of the critical orbit and of the orbit of $z(\vartheta')$. Let $x$ be the branch point of the arcs from $0$ to $c$ and to $z(\vartheta')$. If $p_c$ is the natural map on $H$, define a map $f\colon H\to H$ as follows: choose a homeomorphism $\rho\colon[x,c]\to[x,z(\vartheta')]$ fixing $x$ and let \[ f(z):=\left\{ \begin{array}{ll} \rho\circ p_c(z) & \text{if $p_c(z)\in[x,c]$} \\ p_c(z) & \text{otherwise.} \end{array} \right. \] Let $H'$ be the connected hull within $H$ of the orbit of $0$ under $f$. Then $(H',f)$ is the Hubbard tree of $p_{c'}$ (up to isotopy rel branch points and endpoints). \end{proposition} \begin{proof} Since there are no branch points on $[x,c]$ (Lemma~\ref{Lem:NoExtraBranchPoint}), the new map $f$ is a branched covering where $0$ is the unique critical point. We have a connected tree $H$ containing the critical point $0$, and with respect to $f$ the orbit of $0$ is still finite (it still terminates at the $\beta$ fixed point). Therefore, $(H',f)$ is a finite tree with a continuous self-map, and the dynamics is locally injective except at the critical point $0$. Since there are at most $2$ branches at $0$, the map is globally at most $2:1$. Every endpoint is by definition on the critical orbit, and the tree comes with an embedding into $\mathbb C$ that is compatible with the dynamics. Therefore $(H',f)$ is the Hubbard tree of a postcritically finite polynomial in which the critical orbit lands at the $\beta$ fixed point at the desired number of iterations. Let $c''$ be the corresponding parameter and $\vartheta''$ be the external angle; we have $\vartheta''=a''/2^{k'}$. It remains to prove that $\vartheta''=\vartheta'$ and thus $c''=c'$. Since $c\lhd c'$, there is a postcritically finite branch point in $\mathscr M$, say $c_*$, that separates $c$ from $c'$, and the external angles of $c_*$ are the external angles of $x$ in the dynamical plane of $c$ (Lemma~\ref{Lem:DirectlySubordinateDynamics}). In the dynamical plane of $c''$, the point $x$ has the same external angles because it has the same period and preperiod and the dynamics of the subtree connecting the orbit of $x$ is unaffected by the surgery (except the bit around the critical point that maps past $x$). Hence $\vartheta''$ is the unique dyadic of least generation that is separated from the angle $0$ by the angles of $x$, and the same is true for $\vartheta'$. \end{proof} \begin{remark} The fact that $c''=c'$ can also be shown using spiders \cite{Spiders} and Thurston's theorem. Let us topologically extend the map $f:H' \to H'$ to a continuous map on $\mathbb C$ as follows. First, we set $f$ to be $p_c$ on the dynamic rays $R({2^{t}\vartheta'})$ of $p_c$, where $t\in\{0,1,\dots, k'\}$. There are $k'+1$ topological discs in the complement of $H'\cup_{t\ge 0} R({2^{t}\vartheta'})$ and the map $f$ easily extends to each of them as a homeomorphism. The new map $f$ is a topological polynomial for which $\bigcup_t R({2^{t}\vartheta'})$ forms an invariant spider. Since this spider is equivalent to a standard invariant spider of $c'$, we get $c'=c''$ by Thurston rigidity \cite{DHThurston,Spiders}. \end{remark} \begin{lemma}[Injective Dynamics of Last Edge] \label{Lem:InjectiveDynamicsLastEdge} \lineclear In any dyadic Julia set, consider any dyadic angle $\vartheta=a/2^k$ with $k\ge 1$ and let $x$ be the point where the arc from $z(\vartheta)$ to $\alpha$ is attached to the minimal tree connecting all dyadic endpoints of generations $a'/2^{k'}$ with $k'<k$. Then $[z(\vartheta),x]$ maps injectively for $k$ iterations to an interval $[\beta,y]\underline{s}ubset[\beta,\alpha]$. \end{lemma} \begin{proof} For every integer $m\ge 0$, let $T_m$ be the minimal tree connecting the $\alpha$ fixed point to all dyadic endpoints of generation at most $m$. Let $f$ be the map on the Julia set. The edge $[z(\vartheta),x]\underline{s}ubset \ovl{T_k\underline{s}etminus T_{k-1}}$ certainly maps forward homeomorphically one generation to an arc $[z(2\vartheta),x']$, where $x'=f(x)\in f(T_{k-1})$. We claim that $[z(2\vartheta),x']\underline{s}ubset \ovl{T_{k-1}\underline{s}etminus T_{k-2}}$ so that the inductive step applies and completes the proof. We first show that \begin{equation} \label{eq:T_k-2InT_k-1}f^{-1}(T_{k-2})\underline{s}ubset T_{k-1}. \end{equation} Indeed, $T_{k-2}$ is the minimal tree connecting all dyadic endpoints of generation at most $k-2$. Consider an endpoint $y$ of the forest $f^{-1}(T_{k-2})$. If $f(y)\in T_{k-2}$ was not an endpoint, so it was connected to at least two edges in $T_{k-2}$, then $y$ would have to be connected to at least two edges in $f^{-1}(T_{k-2})$, a contradiction. Thus every endpoint of the forest $f^{-1}(T_{k-2})$ is a dyadic endpoint of generation at least $k-1$ and hence $f^{-1}(T_{k-2})\underline{s}ubset T_{k-1}$. Finally, if $[z(2\vartheta),x']$ intersects $T_{k-2}$, then $[z(\vartheta),x]$ intersects $T_{k-1}$, and by hypothesis this intersection is the single point $x$. Hence $[z(2\vartheta),x']\underline{s}ubset (T_{k-1}\underline{s}etminus T_{k-2})\cup\{x'\}$. \end{proof} \begin{remark} In this lemma, the hypothesis that the polynomial be dyadic was stated only for convenience. All we are using is that the Julia set is path connected (if there are bounded Fatou components, the notation needs minor adjustments). \end{remark} \begin{lemma}[Homeomorphic Preimage of Arc] \label{Lem:HomeomorphicPreimage} \lineclear Suppose that $c\lhd c'$ are two dyadic parameters and let $\vartheta,\vartheta'$ be their external angles. In the Julia set of $c$, let $x$ be the branch point between $0$, $z(\vartheta)=c$ and $z(\vartheta')=:z'$. If $\vartheta=a/2^k$, then there is a point $y'\in(z',x)$ so that $p_c^{\circ k}\colon [z',y']\to p_c^{\circ k}([z',y'])= p_c^{\circ k}([ c,x])$ is a homeomorphism. \end{lemma} \begin{proof} \looseness-1 We know from Lemma~\ref{Lem:InjectiveDynamicsLastEdge} that $[c,x]$ maps homeomorphically for $k$ iterations to a subinterval of $[\beta,\alpha]$; define $y:=p_c^{\circ k}(x)\in(\beta,\alpha]$. Similarly, there is a point $x'\in[z',0]$ so that $[z',x']$ maps forward homeomorphically for $k'$ iterations (if $\vartheta'=a'/2^{k'}$). We have $x'\in[x,0]$ (or equivalently $x\in[z',x']$) because $c\lhd c'$, i.e.\ $c$ is \emph{directly} subordinate to $c'$. More precisely, if $c_*$ is the point where the vein of $c$ terminates (Lemma~\ref{Lem:DirectlySubordinateDynamics}), then the external angles of the dynamic rays landing at $x$ are exactly the external angles of the parameter rays bounding the subwake of $c_*$ containing $c$. Analogously, the same is true for the point $x'$ and the vein of $c'$; let $c'_*$ be this branch point. But since $c\lhd c'$, it follows that $c'_*$ separates $c_*$ from the origin, and hence, by the Correspondence Theorem~\ref{thm:Corresp}, $x'$ separates $x$ from the origin. \begin{figure} \caption{Illustration of the relative position of various points in the Hubbard tree of $c$ in the proof of Lemma~\ref{Lem:HomeomorphicPreimage} \label{Fig:HubbardTreeVariousPoints} \end{figure} Let $y'':=p_c^{\circ k'}(x)$; then $p_c^{\circ k'}\colon[z',x]\to[\beta,y'']$ is a homeomorphism. Iterating this $k-k'$ further times, the image arc terminates at $\beta$ and at $y$, but it can no longer be injective (the map $p_c^{\circ k}\colon[c,x]\to [\beta,y]$ is a homeomorphism, and $p_c^{\circ k}$ is a local homeomorphism near $x$ because $x$ cannot be on the critical orbit). There is a branch $p_c^{-1}\colon[\beta,\alpha]\to[\beta,-\alpha]$; let $[\beta,y''']$ be the image of $[\beta,y]$ under the $k-k'$-th iterate of this branch. Observe that $[y''',\beta]\underline{s}ubsetneq[y'',\beta]$ because otherwise $p_c^{\circ (k-k')}$ restricted to $[\beta,y'']$ would have degree $1$. Pulling back $k'$ times we obtain an interval $[z',y']\underline{s}ubset[z',x]$ so that $p_c^{\circ k'}\colon [z',y']\to[\beta,y''']$ and $p_c^{\circ k}\colon[z',y']\to[\beta,y]$ are homeomorphisms, as claimed. \end{proof} \begin{proposition}[Injection Between Precritical Points] \label{Prop:InjectionPrecritical} \lineclear Given two dyadic parameters $c\lhd c'$, there exists a generation-preserving injection $B$ from the set of precritical points in $[c,-\alpha]$ of $p_c$ to the set of precritical points in $[c',-\alpha]$ of $p_{c'}$. Moreover, $B$ can be taken to satisfy the following properties (A)--(C) for every precritical point $\zeta\in [c,-\alpha]$. \begin{itemize} \item[(A)] For $k\ge 0$ we have $p_c^{\circ k}(\zeta)\in [c,-\alpha]$ if and only if $p_{c'}^{\circ k}(B(\zeta))\in [c',-\alpha]$. Moreover, if $p_c^{\circ k}(\zeta)\in [c,-\alpha]$, then \[ B(p_c^{\circ k}(\zeta))=p_{c'}^{\circ k}(B(\zeta)). \] \item[(B)] Assume $c_*$ is a postcritically finite parameter such that $c_* \prec c$ and $c_* \prec c'$. Let $x_*$ and $x'_*$ be the dynamical counterparts of $c_*$ in the dynamical planes of $c$ and $c'$. Then $\zeta\in [x_*,-\alpha]$ if and only if $B(\zeta)\in [x'_*,-\alpha]$. \item[(C)] There is a sub-interval $J\underline{s}ubset [c', \alpha]$ such that $p^{\circ k }_{c'}(J)=[-\alpha, \alpha ]$ for some $k>0$ and such that the image of $B$ is in $[c',-\alpha]\underline{s}etminus J$. \end{itemize} \end{proposition} \begin{proof} Using Proposition~\ref{Prop:RelationDyadicTrees} and its notation, we may identify $p_{c'}:H_{c'}\to H_{c'}$ with $f:H'\to H'$. The point $x_*\in H$ is periodic or preperiodic under iteration of $p_c$ and never visits $[x,c]$; thus $x_*\in H'$ is identified with $x'_*\in H_{c'}$. We will now construct a bijection $B$ between precritical points in $[c,-\alpha]$ of $p_c$ and those precritical points in $[z',-\alpha]\underline{s}etminus [y',x]$ of $f$ for which the orbit never visits $[y',x]$, where $y'$ is specified in Lemma~\ref{Lem:HomeomorphicPreimage} (see Figure~\ref{Fig:HubbardTreeVariousPoints}). In fact, our bijection will preserve the itinerary with respect to $H\underline{s}etminus\{0\}$, except for $k$ iterations along the orbit from $[c,x]$ to $[\beta,y]=p_c^{\circ k}([c,x])$ (resp.\ from $[z',y']$ to $[\beta,y]$). Our proof proceeds by induction on the number of times, say $m$, that an orbit of a precritical point $\zeta$ runs through $[c,x]$ (not counting $\zeta$ itself). We start by those precritical points on $[c,x]$ (for the map $p_c$) that never run through $(c,x)$ again, that is with the case $m=0$. By Lemma~\ref{Lem:HomeomorphicPreimage}, an appropriate branch of $f^{\circ(-k)}\circ p_c^{\circ k}$ sends $[c,x]$ homeomorphically to $[z',y']$; call this branch $\eta\colon[c,x]\to [z',y']$. Then for any $a\in[c,x]$, we have $p_c^{\circ k}(a)=f^{\circ k}(\eta(a))$ and the future orbits of these points under $p_c$ respectively under $f$ coincide as long as the orbits avoid $[c,x]$. Note that all precritical points on $[c,x]$ must have generation at least $k$. We thus obtain an injection, say $B_0$, of precritical points with $m=0$. Every precritical point $\zeta\in[c,x]$ of generation $n$ and with $m=0$ is the common endpoint of two adjacent sub-intervals of $[c,x]$ that map homeomorphically onto $[c,x]$ after $n$ iterations: we have $p_c^{\circ n}(\zeta)=c$ and $p_c^{\circ (n-1)}(\zeta)=0$, so we can pull the entire interval $[c,x]$ back in two ways (with a choice in the first step) until we end at $\zeta$. The pull-back of the entire interval $[c,x]$ is possible because no critical value can interfere (the critical orbit visits only endpoints of the tree), and the resulting interval is in $[c,x]$ because the Hubbard tree is unbranched on $[c,x)$ and the orbit of $x$ never enters $[c,x)$. There is an analogous result about precritical points $\zeta'\in[z',y']$ of $f$ with $m=0$ and sub-intervals of $[z',y']$ that map to $[z',y']$. By construction of $y'$, the point $\zeta'$ has generation $n\ge k$, and there is a precritical point $\zeta=\eta^{-1}(\zeta')\in[c,x]$. The point $\zeta$ has a neighborhood, say $I_\zeta\underline{s}ubset [c,x]$, that maps $2:1$ onto $[c,x]$ (the union of the two intervals constructed above), and then $\zeta'$ has a neighborhood $I_{\zeta'}\underline{s}ubset[z',y']$ with $p_c^{\circ k}(I_{\zeta'})=p_c^{\circ k}(I_\zeta)\underline{s}ubset[\beta,y]$. The bijection $B_0$ of precritical points with $m=0$ thus extends to a bijection between intervals $I\underline{s}ubset[c,x]$ that map homeomorphically onto $[c,x]$ after some number of iterations without visiting $(c,x)$ before, and intervals $I'\underline{s}ubset [z',y']$ that map homeomorphically onto $[z',y']$ after the same number of iterations and without ever visiting $(z',y')$; this bijection respects the number of iterations as well as the order along the intervals within $[c,x]$ and $[z',y']$ (the intervals are obviously disjoint). Denote this bijection of intervals by $B^*_0$. Now suppose the statement is shown for all precritical points on $[c,x]$ that visit $(c,x)$ at most $m$ times, for some $m\ge 0$; in particular, we have an injection, say $B_m$, from precritical points on $(c,x)$ that map into $(c,x)$ exactly $m$ times, to precritical points on $(z',y')$ that map into $(z',y')$ exactly $m$ times {and never visit $(y',x)$}. Consider any precritical point $\zeta\in(c,x)$ that visits $(c,x)$ exactly $m+1$ times, and let $s$ be minimal such that $p_c^{\circ s}(\zeta)\in(c,x)$. Then there is an interval $I\ni \zeta$ so that $p_c^{\circ s}\colon I\to[c,x]$ is a homeomorphism (same reasoning as above). Let $I':=B^*_0(I)$. Then $p_c^{\circ s}(\zeta)\in(c,x)$ is a precritical point that visits $(c,x)$ only $m$ times, and $B_{m+1}(\zeta)=\zeta':= f^{\circ(-s)}\circ B_m \circ p_c^{\circ s}(\zeta)$, choosing the branch $f^{\circ(-s)}\colon [y',z']\to I'$. Since the map $B^*_0$ is injective, different intervals $I$ land in disjoint intervals $I'$, and since $B_m$ is injective by induction, the restriction of $B_{m+1}$ that run through any particular $I$ is injective too, so in total $B_{m+1}$ is injective as claimed. This takes care of all precritical points on $[c,x]$, and we still have to deal with those on $[x,-\alpha]$. But those with orbits that never run through $[c,x]$ are unaffected by the changed dynamics, and the injection easily extends to those that map into $[c,x]$ under $p_c$. It remains to show that the map $B$ satisfies Properties (A)--(C). Let us extend $B_0^*$ to all intervals in $[x,-\alpha]$ that are injective preimages of $[c,x]$ and never run through $[c,x]$ before mapping into $[c,x]$. It is easy to see that $B_0^*(I)\underline{s}ubset I$ for every such interval $I\not \underline{s}ubset [x,c]$ because $[z',y']\underline{s}ubset [z',x]$ and $p_c^{\circ (-1)}([c,x])=f^{-1}([z',x])$, and induction can be applied. Since $x_*$ never visits $[x,c]$, we see that $x_*\not \in I$ for every maximal pre-image $I$ of $[c,x]$. Also, by construction, $\zeta \in I$ if and only if $B(\zeta)\in I$ for every precritical $\zeta$ and interval $I\not \underline{s}ubset [x,c]$ as above. Therefore, $\zeta$ and $B(\zeta)$ are on the same side of $x_*$. It is clear that $\zeta$ and $B(\zeta)$ have the same return times to $[c,-\alpha]$ and $[z',-\alpha]$ because $p_c^{\circ k}([c,x])=f^{\circ k}([z',y'])$. And the dynamical relation $B(p_c^{\circ k}(\zeta))=p_{c'}^{\circ k}(B(\zeta))$ holds by construction. Thus Properties (A) and (B) hold. To prove (C) set $J$ to be any {iterated} pre-image of $[\alpha,-\alpha]$ so that $J\underline{s}ubset [x,y']$. \end{proof} \begin{proof}[Proof of Theorem~\ref{Thm:RelationDyadicTrees}] From Proposition~\ref{Prop:InjectionPrecritical} we have an injection of precritical points of $p_{c}$ of any given generation on $[-\alpha,c]$ to precritical points of $p_{c'}$ of the same generation on $[-\alpha',c']$, and by claim (B) this injection restricts to the arcs $[-\alpha,\alpha]$ and $[-\alpha',\alpha']$. This immediately proves part a), and part b) follows from claim (C) of Proposition~\ref{Prop:InjectionPrecritical}. Part c) of Theorem~\ref{Thm:RelationDyadicTrees} also follows from claim (C) of Proposition~\ref{Prop:InjectionPrecritical}: since $p_{c'}$ is dyadic, every edge of its Hubbard tree, say $H'$, maps over every other in a bounded number of iterations (the Markov partition associated to the edges is irreducible). Therefore, every typical orbit visits $J$ with positive frequency, and the entropy of orbits in $H'$ that are not allowed to visit $J$ is strictly smaller than the full entropy in $H'$. Precritical orbits in $H$ inject to precritical orbits in $H'$ avoiding $J$, and so $H$ has smaller entropy than $H'$. \end{proof} \underline{s}ection{Irrational Angles and the Tiozzo Conjecture} \label{Sec:IrrationalTiozzoConj} In this brief section, we will complete the proof of the Tiozzo Conjecture about local maxima of $h$, and we also describe local minima. \begin{theorem}[The Tiozzo Conjectures] \label{Thm:TiozzoConj} \lineclear The entropy function $h\colon\mathbb Circle\to[0,\log 2]$ has the following properties. \begin{enumerate} \item[a)] Every dyadic angle is an isolated local maximum of the entropy function. \item[b)] Conversely, every local maximum of $h$ is dyadic. \item[c)] Within every wake, the entropy function has a unique global maximum, and it occurs at the unique dyadic of lowest denominator in the wake. \item[d)] Within every wake, for each $n$ the function $N_\vartheta(n)$ assumes its maximum at the dyadic of least generation (of course, this maximum is not unique). \end{enumerate} \end{theorem} \begin{proof} Fix a dyadic angle $\vartheta_0$ and let $I=I(\vartheta_0)\underline{s}ubset\mathbb Circle$ be the open interval of angles $\vartheta$ for which the combinatorial arcs to $c(\vartheta)$ intersect the interior of the vein of $c(\vartheta_0)$ (not the long vein). In other words, if $c_*$ is the endpoint of the vein of $c(\vartheta_0)$, then $I$ consists of the angles within the same subwake of $c_*$ that $c(\vartheta)$ is in. Clearly $\vartheta_0\in I$. Every dyadic angle in $I$ is either directly or indirectly subordinate to $\vartheta_0$ (where the latter means that there is a finite sequence of dyadic angles ending at $\vartheta_0$ so that each is directly subordinate to the next). By Theorem~\ref{Thm:RelationDyadicTrees} part c), we know that $h$ restricted to dyadic angles in $I$ has its unique maximum at $\vartheta_0$. We claim that for every $\vartheta\in I$ and every $n\in\mathbb N$ we have $N_\vartheta(n)\le N_{\vartheta_0}(n)$ and hence $h(\vartheta)\le h(\vartheta_0)$. Indeed, if $\vartheta$ is a dyadic angle, then this is Theorem~\ref{Thm:RelationDyadicTrees} part a). And if not, then there is a dyadic angle $\vartheta'$ so that $c(\vartheta)\prec c(\vartheta')$ and we have $N_\vartheta(n)\le N_{\vartheta'}(n)\le N_{\vartheta_0}(n)$ for all $n$ where the second inequality is again the dyadic argument and the first one is Lemma~\ref{Lem:Monotonicity}. Therefore $\vartheta_0$ is a (weak) local maximum of $N_\vartheta(n)$ for all $n$ and thus of $h$. Part a) will follow from the stronger claim that $h$ has a unique global maximum on $I(\vartheta_0)$, and this occurs at $\vartheta_0$. We know that any dyadic $\vartheta\in I(\vartheta_0)\underline{s}etminus\{\vartheta_0\}$ has $h(\vartheta)<h(\vartheta_0)$. If $\vartheta\in I(\vartheta_0)$ is such that $c(\vartheta)\prec c(\vartheta_0)$, then by Lemma~\ref{Lem:Monotonicity} we have $h(\vartheta)\le h(\vartheta_0)$; but in fact we have strict monotonicity because there is some dyadic $\vartheta'$ with $c(\vartheta)\prec c(\vartheta')$ and $h(\vartheta)\le h(\vartheta')<h(\vartheta_0)$. And if not $c(\vartheta)\prec c(\vartheta_0)$, there is another dyadic $\vartheta'$ so that $\vartheta\in I(\vartheta')$ but $\vartheta_0\not\in I(\vartheta')$. We then have $h(\vartheta)\le h(\vartheta')<h(\vartheta_0)$. Therefore, $\vartheta_0$ is the unique global maximum within $I(\vartheta_0)\ni\vartheta_0$. This finishes the proof of the stronger version of claim a). For part c), consider any hyperbolic component $W$ and let $I$ be the open interval of angles within its wake. Let $\vartheta_W$ be the unique dyadic of lowest generation within $I$. Then $I(\vartheta_W)\underline{s}upset I$, and on this interval $h$ has its unique global maximum at $\vartheta_W$. Now suppose $W$ is a wake that is not the wake of a hyperbolic component: then either it is one of the subwakes of a Misiurewicz-Thurston parameters, or an irrational wake (bounded by two irrational angles with equal angled internal address). But such wakes are exhausted by wakes of hyperbolic components, so the claim holds for them as well. Part d) also follows. For claim b), suppose $\vartheta$ is a local maximum of $h$, and let $I\underline{s}ubset\mathbb Circle$ be an interval on which $\vartheta$ is a global maximum. By monotonicity, we may assume that the $\vartheta$-ray lands (combinatorially) at an endpoint of $\mathscr M$. If $\vartheta$ is not dyadic, then choose a dyadic angle $\vartheta'\in I$ with $\vartheta\in I(\vartheta')\underline{s}ubset I$. Then the unique global maximum of $h$ within $I(\vartheta')$ is at $\vartheta'$, so $\vartheta=\vartheta'$ is dyadic. \end{proof} \begin{remark} For the record, we observe that along the way we proved that core entropy $\tilde h$ is strictly monotone on arcs before dyadic endpoints: if $c'$ is dyadic and $c\prec c'$, then $\tilde h(c)<\tilde h(c')$ (the general result in Lemma~\ref{Lem:Monotonicity} would only give $\tilde h(c)\le \tilde h(c')$). In fact, there are parameters $c\prec c'$ so that entropy is constant along $[c,c']$; this happens when $c$ and $c'$ are within the same little Mandelbrot sets, for instance within the ``main molecule of $\mathscr M$'' (which was shown in \cite{BruinSchleicher} to be the locus of parameters with zero biaccessibility dimension). This implies that core entropy is strictly monotone along all veins, even long veins, except within little Mandelbrot sets: if $c\prec c'$ are two postcritically finite parameters, then the Hubbard tree of $c$ (more precisely, its marked points) can be recovered in the Hubbard tree of $c'$, so all precritical orbits of $c$ are found for $c'$, while there is strictly more choice for $c'$. This choice strictly increases entropy except when $c$ and $c'$ are within the same little Mandelbrot set (or the main molecule with entropy $0$): in the latter case, entropy may be dominated by the non-renormalizable dynamics while the extra choices are added only to the renormalizable dynamics (see Section~\ref{Sub:Renormalizable}). \end{remark} The following is a rather obvious restatement of this result for $\tilde h\colon\mathscr M\to[0,\log 2]$. \begin{corollary}[Local Maxima of $\tilde h$ on $\mathscr M$] \lineclear The function $\tilde h\colon\mathscr M\to[0,\log 2]$ has local maxima exactly at parameters $c(\vartheta)$ with $\vartheta=k/2^q$, and these are all isolated. \end{corollary} The graph in Figure~\ref{Fig:EntropyGraph} also shows local minima, and these can be classified relatively easily; we are grateful to Steffen Maass for his questions. \begin{theorem}[Local Minima of Entropy] The function $h\colon\mathscr M\to[0,\log 2]$ has an isolated local minimum at $\vartheta$ if and only if there are two angles $\vartheta_1,\vartheta_2$ with $0<\vartheta_1<\vartheta<\vartheta_2<1$ so that all three parameter rays at angles $\vartheta,\vartheta_1,\vartheta_2$ land at a common parameter; in this case all angles $\vartheta,\vartheta_1,\vartheta_2$ are rational with odd denominators that are all equal, and the landing point of these rays is a Misiurewicz-Thurston parameter. The function $\tilde h\colon\mathscr M\to[0,\log 2]$ has no isolated local minimum. \end{theorem} \begin{proof} Since entropy is monotone along the combinatorial arc $[0,c]$ for every $c\in\mathscr M$, the entropy function $\tilde h$ cannot have isolated local minima when $\tilde h(c)>0$. Since $\tilde h^{-1}(0)$ is connected (the main molecule of $\mathscr M$), there is no isolated local minimum at all. By strict monotonicity along veins, except within little copies of $\mathscr M$, it is obvious that $h$ has an isolated minimum at every angle $\vartheta$ for which there are angles $\vartheta_1,\vartheta_2$ as claimed. Conversely, if $h$ has an isolated local minimum, then all angles associated to the combinatorial arc $[0,c(\vartheta)]$ must avoid a neighborhood of $\vartheta$. For sufficiently small $\eps_1>0$, $\eps_2>0$ so that $\vartheta-\eps_1\in\mathbb Q$ and $\vartheta+\eps_2\in\mathbb Q$, the Branch Theorem applied to $c(\vartheta-\eps_1)$ and $c(\vartheta+\eps_2)$ shows that either $c(\vartheta)$ is a Misiurewicz-Thurston parameter and $\vartheta=p/q$ with odd $p$ and even $q$, or $c(\vartheta)$ is a boundary point of a hyperbolic component of $\mathscr M$. In the latter case, every neighborhood of $\vartheta$ contains angles for which the corresponding rays land at the boundary of the same hyperbolic component, and these have the same entropy (even the same number of relevant precritical leaves). \end{proof} \reminder{Should we also discuss non-isolated local minima? These occur at little Mandelbrot sets, but this would require the statement that entropy is constant within every little Mandelbrot set that has positive entropy at the root.} \underline{s}ection{Continuity of Entropy} \label{Sec:Continuity} Many of our estimates will involve ``radial growth of entropy'': that is, comparing $\tilde h(c')-\tilde h(c)$ for parameters $c'\underline{s}ucc c$. A fundamental case will be when $c'$ is a dyadic endpoint of $\mathscr M$ and $c$ is the parameter where the vein of $c'$ terminates. We start by comparing the corresponding Hubbard trees. Since we count pre-critical points between $\alpha$ and $-\alpha$, we add the points $\alpha$ and $-\alpha$ to the set of vertices of the Hubbard trees (if they are not already there). \begin{lemma}[Marked Points in Related Hubbard Trees] \label{Lem:MarkedPointsSubset_new} \lineclear Suppose $c'\in\mathscr M$ is a dyadic parameter and $c\in\mathscr M$ is the postcritically finite parameter where the vein of $c'$ ends. We assume that $c\not= 0$. Denote by $H$ and $H'$ the Hubbard trees of $p_c$ and $p_{c'}$. If $-\alpha\not\in H$, then extend $H$ to $-\alpha$ by adding an extra edge. Then $H$ and $H'$ are related as follows. Let $V$ be the union of the postcritical points, $\{\alpha, -\alpha \}$, and branch points in $H$. Denote by $x$ the dynamical counterpart of $c$ in $H'$. Let $V'$ be the union of the postcritical points, $\{\alpha,-\alpha\}$, branch points, and points on the orbit of $x$ in $H'$, and let $V'_*$ be the union of $\{\alpha, -\alpha\}$, the branch points, and the points on the orbit of $x$ in $H'$. Then \begin{itemize} \item[(A)] $V'\underline{s}etminus V'_*$ are exactly the endpoints of $H'$; and \item[(B)] there is a bijection $J\colon V\to V'_*$ so that \begin{itemize} \item[(1)] $J(p_c(v))=p_{c'}(J(v))$ for all $v\in V$; \item[(2)] if $v\in V$ is the landing point of a ray with angle $\phi$, then $J(v)\in V'$ is also the landing point of the ray with angle $\phi$; \item[(3)] $J(c)=x$; and \item[(4)] if a ray pair $RP(\phi_1,\phi_2)$ lands at a non-precritical point of $H$ so that $RP(\phi_1,\phi_2)$ separates two vertices $v_1,v_2\in H$, then $RP(\phi_1,\phi_2)$ separates $J(v_1)$ and $J(v_2)$ in the dynamical plane of $p_{c}$. \end{itemize} \end{itemize} \end{lemma} \begin{remark} Note that the tree $H$ contains $-\alpha$ unless $c$ is immediately satellite renormalizable (see Lemma~\ref{Lem:RenormMinimalPeriod}); in all other cases $-\alpha$ is in the connected hull of the critical orbit. Observe also that every $v\in V$ is either the landing point of a periodic or preperiodic dynamic ray, or on the orbit of $c$. \end{remark} \begin{proof} The set of marked points of the Hubbard tree $H$ are the postcritical points, which include the endpoints, the branch points, and $\{\alpha, -\alpha\}$. They form the set $V$, and we show that these points exist, with the same combinatorics, for all parameters $c''\underline{s}ucc c$, including $c'$. Let $v$ be a branch point of $H$. Then $v$ is the landing point of at least three dynamic rays, all periodic or preperiodic, and the rays at the same angles land at a common point for all parameters $c''\underline{s}ucc c$, in particular for $c'$, by Corollary~\ref{Cor:CountPartsHubTree}. It is not hard to see that the tree $H'$ has (at least) as many branches at the corresponding point as $H$ does at $v$. This defines the map $J$ on the set of branch points on $H$ in a natural way. The case $v\in\{\alpha, -\alpha\}$ is treated similarly. All further marked points of $H$ are the critical value $c$ and its finite forward orbit. In the dynamics of $H'$, there is the dynamical counterpart $x$ of $c$, see Definition~\ref{Def:DynamCounterpart}. Let us set $J(p_c^{\circ n}(c)):=p_{c'}^{\circ n}(x)$ and show that $J$ is well defined and satisfies the requirements (1)--(4). If $c$ is a Misiurewicz-Thurston parameter, then $p^{\circ k}_c(c)$ is the landing point of at least two external rays (because $c\prec c'$ is not an endpoint). Moreover, $p^{\circ k}(c)$ is the landing point of a ray $R(\phi)$ if and only if $R(\phi)$ lands at $J(p^{\circ k}(c))=p'^{\circ k}(x)$ in the dynamical plane of $p'$. Thus $J$ is well defined and satisfies (1)--(4) in the Misiurewicz-Thurston case. The other case is that $c$ is the center of a hyperbolic component $W\underline{s}ubset\mathscr M$. In this case, no ray lands at $p^{\circ k}(c)$ for $k\ge 0$ because these points are in the Fatou set. By definition, $J(c)=x$ is the landing point of a periodic ray pair, say $RP(\phi_-,\phi_+)$, so that in parameter space the ray pair at the same angles bounds the subwake of $c$ containing $c'$. Denote by $c_*\in \partial W$ the landing point of the parameter ray pair $RP(\phi_-,\phi_+)$. For a parameter $w\in \ovl W$, let $\gamma_w$ be a periodic point on the unique non-repelling orbit; there is a unique continuous choice so that $\gamma_c=c$. Then $\gamma_{c_*}$ is the landing point of the dynamic ray pair $RP(\phi_-,\phi_+)$. Observe also that the cycle $\{p_w^{\circ k}(\gamma_w)\}_{k\ge 0}$ for $w\in W\cup\{c_*\}$ does not cross any ray pair $RP(\phi_1,\phi_2)$ as in the requirement (4) of $J$. This proves claim (4) because the rays landing at non-precritical points of $H$ plus rays $R(\phi_-)$ and $R(\phi_+)$ are stable in the subwake of $c$ containing $c'$. The requirements (1)--(3) are immediate. We get a natural injection $J\colon V\to V'$. None of the image points are endpoints: the images of branch points are branch points, the images of $\alpha, -\alpha$ are $\alpha, -\alpha$, and the image of $x$ is on $[0,c']$ and not an endpoint, so the forward iterates of $x$ cannot be endpoints either. Hence $J(V)\underline{s}ubset V'_*$. Since there is no branch point on $(x,c']$ (Lemma~\ref{Lem:NoExtraBranchPoint}), all branch points of $H'$ are in the connected hull of the orbit of $x$. Therefore, $J:V'\to V'_*$ is a bijection. This completes the proof. \end{proof} \begin{lemma}[Corresponding Dynamics on Edges] \label{lem:HintoH'} \lineclear \looseness-1 Let $H$ and $H'$ be the Hubbard trees of $c$ and $c'$ as in Lemma~\ref{Lem:MarkedPointsSubset_new}. Let $V$ and $V'$ be the vertex sets of $H$ and $H'$ (again as in Lemma~\ref{Lem:MarkedPointsSubset_new}). Then the bijection $J\colon V\to V'_*$ extends to an injection of edges in $H$ to edges in $H'$. The image of $H$ under this map on edges is the connected hull in $H'$ containing the orbit of $x$. Let $e'_0\underline{s}ubset H'$ be the edge that contains the critical point in its interior. The critical point of $p_c$ is in the interior of $J^{-1}(e'_0)\underline{s}ubset H$ if $c$ is pre-periodic, and it is on the boundary of $J^{-1}(e'_0)\underline{s}ubset H$ if $c$ is periodic. Moreover, an edge $e_1\underline{s}ubset H$ covers once (resp.\ twice) an edge $e_2\underline{s}ubset H$ under $p_c$ if and only if the edge $J(e_1)\underline{s}ubset H'$ covers once (resp.\ twice) an edge $J(e_2)\underline{s}ubset H'$ under $p_{c'}$. If there are two edges $e'_1,e'_2\underline{s}ubset H'$ with $e'_1\underline{s}ubset J(H)$ and $e'_2\not\underline{s}ubset J(H)$ so that $p_{c'}(e'_1)\underline{s}upset e'_2$, then $e'_1=e'_0$ and $e'_2=[c',x]$. \end{lemma} \begin{proof} If an edge $e\underline{s}ubset H$ connects two vertices $v_1, v_2\in V$, then by Lemma~\ref{Lem:MarkedPointsSubset_new} the vertices $J(v_1), J(v_2)\in V'$ are adjacent; i.e.\ $J(v_1)$ and $J(v_2)$ are connected by an edge that we defined to be $J(e)$. This extends $J$ to an injection of edges in $H$ to edges in $H'$. Clearly, $J(H)$ is the connected hull of $V'_*=J(V)$. Since $p_{c'}$ is a Misiurewicz-Thurston parameter, the critical point of $p_{c'}$ is in the interior of an edge; we denote this edge by $e'_0=[a,b]\underline{s}ubset H'$. Then both pre-images of $x$ must be in $[a,b]$ because $(x,c']$ contains no branch point (Lemma~\ref{Lem:NoExtraBranchPoint}) and thus no marked point other than $c'$. If $c\in H$ is periodic, then so is $x\in H'$, and at least one of the two pre-images of $x$ must be in $V'$, so this point must be in $\{a,b\}$. Thus one of $p_{c'}(a)$ and $p_{c'}(b)$ equals $x$. Since $x=J(c)$, one of $J^{-1}(a)$ or $J^{-1}(b)$ is the critical point in $H$. If $c\in H$ is pre-periodic, then the critical point $0\in H$ is not a marked point, so it is an interior point of some edge, say $e_0$. All marked points in $H$ are landing points of (pre)periodic dynamic rays, and the map $J$ respects their external angles, and this implies that $J$ must send $e_0$ to $e'_0$ (the critical point must be accessible by two rays with angles that differ by $1/2$). It is now easy to see that $J$ respects the dynamics of edges in $H$ and in $H'$, except that $e'_0$ covers, in addition, twice $[c',x]$. Indeed, if an edge $[v,w]\in H$ is different from $e_0$, then $[J(v),J(w)]\not= e'_0$; thus $p_{c}$ maps $[v,w]$ homeomorphically onto $[p_{c}(v),p_{c}(w)]$, while $p_{c'}$ maps $[J(v),J(w)]$ homeomorphically onto $[p_{c'}(J(v)),p_{c'}(J(w))]=[J(p_c(v)),J(p_c(w))]$. It remains to analyze the edges $e'_0=[a,b]$ and $e_0:=[J^{-1}(a),J^{-1}(b)]$; this is done similarly: the arcs $[a,0]$ and $[0,b]$ cover homeomorphically $[p_{c'}(a),c']$ and $[c',p_{c'}(b)]$ respectively, while $[J^{-1}(a),0]$ and $[0,J^{-1}(b)]$ respectively cover $[p_{c}(J^{-1}(a)),c]$ and $[c,p_{c}(J^{-1}(b))]$. Finally, if $e'_1\underline{s}ubset J(H)$ but $p_{c'}(e'_1)\not\underline{s}ubset J(H)$, then $p_{c'}\colon e'_1\to p_{c'}(e'_1)$ cannot be a homeomorphism (because $J(H)$ is connected), so $e'_1$ must be the unique edge containing the critical point, so $p_{c'}(e'_1)\ni c'$; since $(x,c')$ contains no marked point of $H'$, we have $p_{c'}(e'_1)\underline{s}etminus J(H)=(x,c']$. \end{proof} Next we need a combinatorial estimate. A ``combinatorial pattern of length $n$ with gap size $s$'' is a finite sequence of integers $(j_1,j_2,\dots,j_m,n)$ with $1\le j_1< j_2 < \dots < j_m< n$ and $j_{i+1}-j_i\ge s$ and $n-j_m\ge s$. \begin{lemma}[Number of Combinatorial Patterns] \label{Lem:CombinatorialPatterns} \lineclear The number of combinatorial patterns of length $n$ with gap size $s$ is at most $e^{(n/s)\log(s+1)}$. \end{lemma} \begin{proof} The number of combinatorial patterns equals the number of binary sequences of length $n$ where two consecutive digits $1$ have distance at least $s$, and so that the final digit is a $1$. Write $n=ks+r$ with $r<s$. Then each block of $s$ consecutive entries has $s+1$ possibilities because it has at most a single $1$, and the last block has $r-1$ digits $0$ followed by a $1$, so it has $1$ possibility. The number of combinatorial patterns is thus at most $(s+1)^k= e^{k\log(s+1)}\le e^{(n/s)\log(s+1)}$. \end{proof} In the proof of Proposition~\ref{Prop:BoundEntropyIncrease}, it will be convenient to define ``relevant precritical points'' as precritical points on $[\alpha,\beta]$, i.e.\ precritical leaves separating the two fixed points $\alpha$ and $\beta$ or their corresponding leaves in the lamination (rather than separating $\alpha$ from $-\alpha$ as before). We thus start by showing that this will not affect the value of the entropy. \begin{lemma}[Different Counts Yield Identical Entropy] \label{lem:DifferCounts} \lineclear In any invariant quadratic lamination, let $N_1(n)$ be the number of precritical leaves that separate $\alpha$ from $-\alpha$, and let $N_2(n)$ be the number of precritical leaves of generation $n$ that separate $\alpha$ from $\beta$. Then \[ \limsup_n \frac 1 n \log N_1(n) = \limsup_n \frac 1 n \log N_2(n) \;; \] in other words, both counting functions define the same entropy. \end{lemma} \begin{proof} Since $-\alpha\in[\alpha,\beta]$, we clearly have $N_1(n)\le N_2(n)$. To show the converse, we claim that $N_2(n) \le N_1(n)+N_1(n-1)+ N_1(n-2)+\dots$. To see this, denote $\alpha_0:=\alpha$ and, recursively, $\alpha_{k+1}$ to be the unique preimage of $\alpha_k$ on $[\alpha,\beta]$, so $\alpha_1=-\alpha$. Then $[\alpha_{k+1},\alpha_k]$ maps homeomorphically onto $[\alpha_k,\alpha_{k-1}]$ and $[\alpha,\beta]=\bigcup_{k\ge 0}[\alpha_{k},\alpha_{k+1}]$. If $N_{[\alpha_k,\alpha_{k+1}]}(n)$ denotes the number of precritical points of generation $n$ on $[\alpha_k,\alpha_{k+1}]$, then we have $N_{[\alpha_{k+1},\alpha_{k+2}]}(n)=N_{[\alpha_k,\alpha_{k+1}]}(n-1)$ and $N_{[\alpha_{0},\alpha_{1}]}(n)=N_1(n)$ and indeed $N_2(n) = N_1(n)+N_1(n-1)+N_1(n-2)+\dots$. If $N_1(n)\le C e^{(h+\eps)n}$ for all $n$, then $N_2(n)\le C n e^{(h+\eps)n}$. Therefore, $N_1$ and $N_2$ define the same entropy. \end{proof} \begin{proposition}[Bound on Entropy Increase] \label{Prop:BoundEntropyIncrease} \lineclear Suppose $c_1\prec c_2$ and $[c_1,c_2]$ is a (combinatorial) arc in $\mathscr M$ such that $0\le \tilde h(c_2)-\tilde h(c_1)\le \eps$. Then there is an $s>0$ with the following property: if $[c,c']$ is a dyadic vein of generation at least $s$ that terminates at $c\in [c_1,c_2]$, then $\tilde h(c')-\tilde h(c_2)\le \eps$. \end{proposition} \reminder{Would it be more consistent to use $\ovl m$ for $s$? } \begin{proof} Consider first the case $c\not =0$. Set $h:=\tilde h(c_2)$. There is a $C>0$ so that all $c\in [c_1,c_2]$ satisfy $N_c(n)\le N_{c_2}(n)\le Ce^{(h+\eps/2)n}$ for all $n$ (monotonicity, Lemma~\ref{Lem:Monotonicity}). We may suppose that $s$ is large enough so that $2C\le e^{(h+\eps/2)s}$. Let $s'\ge s$ be the generation of $c'$. As before (see Lemma~\ref{Lem:MarkedPointsSubset_new}), let $H'$ be the Hubbard tree of $p_{c'}$, which is dyadic, and let $H$ be the Hubbard tree of $p_c$, which is postcritically finite (as endpoint of a dyadic vein in $\mathscr M$), so both Hubbard trees exist and are finite. Since in this proof we count pre-critical points in $[\alpha,\beta]$ we add to $H$ the arc, say $[\beta',\beta]$, connecting $\beta$ to $H$; denote by $H_\beta:= H\cup [\beta',\beta]$ the extended Hubbard tree. Clearly, $H_\beta$ is $p_c$-invariant. Let $x\in H'$ be the dynamical counterpart of $c$. Recall that $x$ is a characteristic periodic or preperiodic point in the sense that the entire orbit of $x$ is contained in the closure of the component of $H'\underline{s}etminus\{x\}$ that contains $0$ (Lemma~\ref{Lem:DirectlySubordinateDynamics}). In particular, the orbit of $x$ is disjoint from $(x,c']$. It follows that any connected component $I$ of $p_{c'}^{\circ (-n)}([x,c'])$ within $H'$ is either contained in $[x,c']$ or intersects it at most in $\{x\}$. (Otherwise, $x$ would be in the interior of $I$ and after $n$ iterates $x$ would be mapped into $(x,c']$, but $H'$ does not have a branch point on $(x,c']$ by Lemma~\ref{Lem:NoExtraBranchPoint}). We know from Lemma~\ref{Lem:InjectiveDynamicsLastEdge} that \begin{equation} p_{c'}^{\circ s'}([x,c']) \underline{s}ubset[\alpha,\beta] \label{Eq:IterateOf[x,ctilde]} \end{equation} and, moreover, the orbit of $p_{c'}^{\circ i}([x,c'])$ for $i\in \{0,1,\dots , s'-1\}$ does not contain $0$. Before we finish the proof of Proposition~\ref{Prop:BoundEntropyIncrease}, we need to define a few terms, and we need a lemma. \looseness+1 By a \emph{maximal preimage of $[x,c'] \underline{s}ubset H'$ of generation $n>0$} we mean a connected component $I$ of $p_{c'}^{\circ (-n)}([x,c'])$ so that $I\not\underline{s}ubset p_{c'}^{\circ(-i)}[x,c']$ for every $i\in\{1,2,\dots,n\}$; equivalently, $p_{c'}^{\circ i}(I)\not \underline{s}ubset [x,c']$ for all $i<n$ {(we just proved that this implies that $p_{c'}^{\circ i}(I)\cap[x,c']\underline{s}ubset\{x\}$} ). We denote by $M$ the set of all maximal preimages of $[x,c']$ that are in $[\alpha,\beta]$. An \emph{itinerary} of $I$ will be a sequence $s_{0}s_1\dots s_{n-2}\in\{\texttt{0},\texttt{1}\}^{n-1}$ where each $s_{i}$ describes the connected component of $H'\underline{s}etminus \{0\}$ containing $p_{c'}^{\circ i}(I)$ (labeled for instance so that the critical value is in the component with label $\texttt{1}$). {Our construction assures that every itinerary is well-defined (the immediate preimage of $I$ contains $0$, but further preimages do not because $[x,c')$ does not contain postcritical points, and $c'$ is not periodic). } \pagebreak \begin{lemma} \label{lem:PreCritInterv} There is an itinerary preserving injection from\nopagebreak \begin{itemize}\nopagebreak \item the set $M$ of maximal preimages of $[x,c'] \underline{s}ubset H'$ of generation $n$ to \item the set of precritical points of $f:H_\beta\to H_\beta$ of generation $n$. \end{itemize} Moreover, a maximal preimage of $[x,c'] \underline{s}ubset H'$ belongs to the interval $[\alpha,\beta]\underline{s}ubset H'$ if and only if the corresponding precritical point belongs to $[\alpha,\beta]\underline{s}ubset H_\beta$. \end{lemma} \begin{proof} The map $J:H\to H'$ from Lemma~\ref{lem:HintoH'} extends naturally into $J:H_\beta \to H'$ such that the new map embeds the sets of vertices and edges of $H_\beta$ into the sets of vertices and edges of $H'$. Every interval $I\in M$ is uniquely characterized by a sequence $\ovl \tau'=(\tau'_0,\eps_{0},\tau'_1,\eps_{1},\dots, \eps_{n-2},\tau'_{n-1})$ so that $\tau'_i$ is the edge of $H'$ containing $p_{c'}^{\circ i}(I)$ and $\eps_i\in \{\texttt{0},\texttt{1}\}$ describes the connected component of $H'\underline{s}etminus \{0\}$ containing $p_{c'}^{\circ i}(I)$, labeled for instance so that the critical value is in the component with label $\texttt{1}$. Of course, $\eps_i$ is determined by $\tau_i$ unless $\tau_i=e'_0$; similarly, $\eps_i$ and $\tau_{i+1}$ uniquely determine $\tau_i$. The sequence $\ovl \tau'$ is subject to the following conditions: \begin{itemize} \item $p_{c'}(\tau'_i)\underline{s}upset \tau'_{i+1}$ \;; \item all $\tau_i\neq [c',x]$ (by the condition of ``maximal preimage'') \;; \item $\tau_0\underline{s}ubset [\alpha,-\alpha]$\;; and \item $\tau'_{n-1}=e'_0$. \end{itemize} Since all $\tau_i\neq [c',x]$ and $\tau_0\underline{s}ubset [\alpha,-\alpha]$, all $\tau_i$ are in the image of $J$. Define the sequence \[ \ovl \tau=J^{-1}(\ovl \tau'):= (J^{-1}(\tau'_0), \eps_0, J^{-1}(\tau'_1), \eps_1,\dots , J^{-1}(\tau'_{n-1})). \] By Lemma~\ref{lem:HintoH'}, $ J^{-1}(\tau'_{n-1})=e_0$ while $J^{-1}(\tau'_0)\underline{s}ubset [\alpha,-\alpha]$. Since $e_0$ contains the critical point (in its interior or in its boundary) the sequence $\ovl \tau$ determines a unique relevant pre-critical point $\ell$ such that $p_c^{i}(\ell)$ is contained in the intersection of $\tau_i$ with component of $H_\beta\underline{s}etminus \{0\}$ labeled by $\eps_i$. It is straightforward that a different choice of $I$ leads to a different choice of $\ovl \tau'$, which leads to a different choice of $\ell$. And the itinerary of $I$, which is $\eps_0\eps_1\dots \eps_{n-2}$, is preserved. \end{proof} {Now we continue the proof of Proposition~\ref{Prop:BoundEntropyIncrease}}. By Lemma~\ref{lem:PreCritInterv} the number of intervals in $M$ of generation $n$ that are in $[\alpha,\beta]$ is bounded above by $Ce^{(h+\eps/2)n}$. In order to bound the number of relevant precritical points of any generation $n$ in the dynamics of $p_{c'}\colon H'\to H'$ (these are, by definition, the precritical points in $[\alpha,\beta]$), consider any relevant precritical point $\ell$ of generation $n$. Let $j_1<j_2<\dots <j_m=n$ be the set of all iterates so that $p_{c'}^{\circ j_i}(\ell)\in [x,c']$. Then $\ell$, $p_{c'}^{\circ (j_1+s')}(\ell)$, $p_{c'}^{\circ(j_2+s')}(\ell)$,\dots , $p_{c'}^{\circ(j_{m-1}+s')}(\ell)$ are within $[\alpha,\beta]$; compare \eqref{Eq:IterateOf[x,ctilde]}. This also implies that $j_{i+1}-j_i\ge s'$. Let $I_0,I_1,\dots , I_{m-1}\in M$ be the unique intervals in $M$ containing, respectively, $\ell, p_{c'}^{\circ(j_1+s')}(\ell),p_{c'}^{\circ(j_2+s')}(\ell),\dots , p_{c'}^{\circ(j_{m-1}+s')}(\ell)$; their respective generations are $j_1, j_2-j_1-s',\dots , j_{m}-j_{m-1}-s'$. We claim that $\ell$ has itinerary $\underline{s}=s_0s_1s_2\dots s_{n-2}$ of $\ell$ as follows: \begin{itemize} \item $s_0s_1\dots s_{j_1-2}$ is the itinerary of $I_0$; \item $s_{j_i}s_{j_i+1}\dots s_{j_i+s'-1}$ is the kneading sequence of $c'$; \item $s_{j_{i}+s'}s_{j_{i}+s'+1}\dots s_{j_{i+1}-2}$ is the itinerary of $I_i$; and \item all $s_{j_{i}-1}$ are arbitrary in $\{\texttt{0},\texttt{1}\}$. \end{itemize} We justify this as follows: $p_{c'}^{\circ(j_1-1)} $ maps $I_0$ homeomorphically, while $p_{c'}^{\circ j_1}\colon I_0\to [x,\tilde c]$ is a $2:1$-map, so the first $j_1-2$ iterates do not contain $0$ and all points in $I_0$ have the same entries in their itineraries up to entry number $j_1-2$. Since $p_{c'}^{\circ j_i}(\ell)\in [x,c']$, the next iterates are the same as for $[x,c']$ and, in particular, for $c'$, hence equal to the kneading sequence of $c'$, at least before $c'$ lands at the $\beta$ fixed point, that is for $s'-1$ iterations. The iterate $p_{c'}^{\circ (j_i+s')}(\ell)$ is by definition in $I_i$, and this interval travels forward homeomorphically until it covers $0$, which is the iteration before it reaches $[x,c']$ the next time; since the latter is at iterate $j_{i+1}$, the itinerary of $\ell$ coincides with that of $I_i$ until position $j_{i+1}-2$ (analogous to the beginning). In the subsequent iterate, the image interval $p_{c'}^{\circ (j_{i+1}-j_i-1)}(I_i)$ contains $0$, so both entries in the itinerary are possible. Now consider the set of all precritical points in $[\alpha,\beta]$ of generation $n$ corresponding to a particular combinatorial pattern $(j_1,j_2,\dots,j_m,n)$. We just showed that in order to determine the itinerary of $\ell$ we only need to specify $s_{j_1-1},s_{j_2-1},\dots , s_{j_m-1}\in \{\texttt{0},\texttt{1}\}$ as well as the intervals $I_0,I_1,\dots , I_{m-1}$ as above; their numbers we estimated {in Lemma~\ref{lem:PreCritInterv}}. Therefore, the total number of precritical points with pattern $(j_1,j_2,\dots,j_m,n)$ is at most \[ 2 C e^{(h+\eps/2)(j_1-1)} \left(\prod_{i=1}^{m-1} 2C e^{(h+\eps/2)(j_{i+1}-j_i-s'-1)}\right)\le2C e^{(h+\eps/2)n} \] because $2Ce^{-(h+\eps/2)s'}\le 1$ by hypothesis. Since the number of combinatorial patterns is at most $e^{(n/s')\log(s'+1)}$ (Lemma~\ref{Lem:CombinatorialPatterns}), it follows that $N_{\tilde c}(n) \le 2 C e^{n(h+\eps/2+\log(s'+1)/s')}$. Therefore \begin{align*} \tilde h(\tilde c) &\le \limsup_n \frac{1}{n}\left(\log 2 +\log C + n (h+\eps/2)+\frac{n}{s'}\log(s'+1) \rule{0pt}{11pt} \right) \\ &\le h+\eps/2+ \frac{\log (s'+1)}{s'} \le h+\eps \; \end{align*} if $s'\ge s$ is sufficiently large. Consider now the case $c=0$. We claim that $h(c')<\varepsilon$ if $s$ is sufficiently big. Denote by $K'$ the filled in Julia set of $p_{c'}$. Suppose that $K'\underline{s}etminus \{\alpha\}$ consists of $q$ connected components (this is equivalent to $c'$ being in the primary $p/q$ limb of the Mandelbrot set for some $p$ coprime with $q$), we enumerate them as $K_1,K_2,\dots ,K_{q}$ such that $K_1$ contains the critical value, $K_q$ contains the critical point, and $p_{c'}$ maps $K_i$ homeomorphically onto $K_{i+1}$ for all $i<q$. Observe that $\beta\in K_q$ and there is a unique $c_i\in p_{c'}^{q-i}(\beta)$ such that $c_i\in K_i$. Since $c'$ is the dyadic endpoint of smallest generation in a limb of $c=0$ we see that $c_1$ is the critical value of $p_{c'}$, in particular $q\ge s$. Thus $H'=\cup _{i\le q} [\alpha, c_i]$ is a star-like tree. The associated transition matrix of $p_{c'}\colon H'\to H'$ is \[M=\left(\begin{matrix} 0 & 1 & 0 &0 & \dots &0 &0 \\ 0 & 0 & 1&0 & \dots &0 & 0 \\ 0 & 0 & 0&1 & \dots &0 & 0 \\ \hdotsfor{7} \\ 0 & 0& 0 &0 & \dots & 0 &1\\ 2 & 0& 0 &0 & \dots & 0 &1 \end{matrix} \right)\] It is easy to see that if $q\ge s$ is big enough, then the leading eigenvalue of $M$ is close to $1$, thus its logarithm is close to $0$. \end{proof} \begin{theorem}[Continuity of Entropy at Hyperbolic Components] \label{Thm:ContEntropyHypComps} \lineclear The core entropy function $h$ is continuous at all angles that are associated to hyperbolic components of $\mathscr M$. \end{theorem} \begin{proof} Let $W$ be a hyperbolic component of $\mathscr M$ of some period $n$ and suppose an angle $\vartheta$ is associated to $W$, in the sense that the parameter ray at angle $\vartheta$ lands at $\partial W$ (in fact, all we are using is that the ray accumulates at $\partial W$; we are not assuming the known fact that all such rays actually land). Let $c_1$ be the root of $W$ and $c_2$ the bifurcation point in $\partial W$ of the period $2n$ component. The combinatorial arc $[c_1,c_2]$ consists of the two internal rays of $W$ connecting the center, say $c_0$, to $c_1$ and to $c_2$. Every sublimb $L$ of $W$ has a leading dyadic, say $c_L$, at which the entropy within $L$ is maximal (Theorem~\ref{Thm:TiozzoConj}). Unless $L$ is the $1/2$-limb, the vein of $c_L$ terminates at $c_0$, so for given $\eps>0$, by Proposition~\ref{Prop:BoundEntropyIncrease} there are only finitely many limbs of $W$ in which the entropy exceeds $\tilde h(c_1)+\eps$. This immediately implies continuity of $h$ at all irrational angles $\vartheta$ associated to $W$. If $\vartheta$ is a rational angle associated to $W$, then either $c(\vartheta)$ is the root of $W$, or $c(\vartheta)$ is the parameter where $W$ bifurcates to a component $W'$ with period a proper multiple of $n$. In the latter case, Proposition~\ref{Prop:BoundEntropyIncrease} implies continuity of $h$ at $\vartheta$ among all rays that are not in the wake of $W'$. The same argument, applied to $W'$, implies continuity at $\vartheta$ among all rays in the wake of $W'$. We finally have to discuss the case that $\vartheta$ lands at the root of $W$. Continuity among rays in the wake of $W$ is handled once again as before. The only case left is when $W$ is a primitive hyperbolic component of period $n\ge 2$ (in the period $n=1$ case every ray is in the wake of $W$). Here we use the fact that entropy is continuous along the combinatorial arc from $0$ to $W$ \cite[Theorem~4.9]{Jung}, so there is a postcritically finite parameter $c_3\prec c_1$ with $\tilde h(c_3)\ge \tilde h(c_1)-\eps$. By Proposition~\ref{Prop:BoundEntropyIncrease} there are at most finitely many veins ending at $[c_3,c_1]$ with entropy variation greater than $\eps$. Therefore, $\vartheta$ is continuous among all rays outside of the wake of $W$. \end{proof} \begin{theorem}[Continuity of Entropy Near Veins] \label{Thm:ContinuityNearVeins} \lineclear Suppose $\vartheta\in\mathbb Circle$ is such that topological entropy is continuous along the (combinatorial) vein $[0,c(\vartheta)]$ connecting the parameters $0$ to $c(\vartheta)$ in $\mathscr M$. Then $\tilde h$ is continuous for all parameters on $[0,c(\vartheta)]$, and $h$ is continuous at all angles $\phi$ that correspond to parameters on $[0,c(\vartheta)]$. \end{theorem} \begin{remark} It may be helpful to explain the statement. Let $\gamma\colon[0,1]\to \mathbb C$ be a parametrization of the (combinatorial) arc $[0,c(\vartheta)]$. Then the hypothesis says that $\tilde h(\gamma(t))$ is continuous for $t\in[0,1]$ (only considering parameters along the arc). The conclusion is that then $\tilde h\colon\mathscr M\to[0,\log 2]$ is continuous at $\gamma(t)$ for all $t$ (where $\gamma(t)$ is now viewed as an element of $\mathbb C$, not just of the arc). Note that this hypothesis is known to be true for all angles $\vartheta\in\mathbb Circle$, except when $c(\vartheta)$ is an endpoint of $\mathscr M$ at an irrational angle \cite[Theorem~4.9]{Jung}; we will treat the missing case in Section~\ref{Sec:IrratEndpoints}. \end{remark} \begin{proof} We start the proof with an auxiliary consideration that does not involve the angle $\vartheta$. Suppose there are two parameters $c_a\prec c_b\in\mathscr M$ with $0\le \tilde h(c_b)-\tilde h(c_a)\le\eps$; we allow $c_b$ to be a (combinatorial) endpoint of $\mathscr M$. Denote by $\text{wake}(c_a)$ the open wake of $c_a$: this is the set of all parameters in $\mathscr M$ that are separated from $0$ by two parameter rays landing at (or accumulating at) $c_a$. If $c_a$ is a Misiurewicz-Thurston-parameter, then we set $\text{wake}(c_a)$ to be the subwake of $c_a$ containing $c_b$. Similarly $\text{wake}(c_b)$ is defined; if $c_b$ is a combinatorial endpoint of the Mandelbrot set, then $\text{wake}(c_b)=\emptyset$. Set $W:=\ovl{\text{wake}(c_a)}\underline{s}etminus\text{wake}(c_b)$. By Proposition~\ref{Prop:BoundEntropyIncrease} there are at most finitely many dyadic veins $[c_i,c'_i]$ with $c_i\in [c_a,c_b]$ such that the entropy variation along $[c_i,c'_i]$ exceeds $\eps$. We may suppose that $(c_i,c'_i]\cap [c_a,c_b]=\emptyset$, possibly by replacing $[c_i,c'_i]$ with the closure of $[c_i,c'_i]\underline{s}etminus[c_a,c_b]$. We will construct a ``reduced wake'' $W'\underline{s}ubset W$ in which the entropy variation is at most $2\eps$. By the Branch Theorem \cite{Orsay}, \cite[Theorem~3.1]{MandelBranch}, the points $c_i$ are either Misiurewicz-Thurston parameters or centers of hyperbolic components. In both cases, we will exclude a subwake at $c_i$ from $W$ where the entropy variation is large. If $c_i$ is a Misiurewicz-Thurston-parameter, let $W_i$ be the subwake of $c_i$ containing $c'_i$ and thus $(c_i,c'_i]$. If $c_i$ is the center of a hyperbolic component, say $H_i$, then let $W_i$ be the subwake of $H_i$ that contains $c'_i$ (the root of this wake is a bifurcation parameter on $\partial H_i$). In both cases, $W_i$ does not contain $c_b$. Set $W':=W\underline{s}etminus \bigcup_i \ovl{W_i}$ {(recall that the union is finite)}. The external angles corresponding to rays in $W'$ occupy finitely many intervals, and the maximal entropy of these angles occurs either at an interior point or at an endpoint. In the first case, the maximum is at a dyadic angle by Theorem~\ref{Thm:TiozzoConj} (the Tiozzo Conjecture), and our construction is such that this maximum is at most $\tilde h(c_a)+2\eps$. In the second case, it occurs at an angle corresponding to some $c_i\in[c_a,c_b]$ with entropy $\tilde h(c_i)\le \tilde h(c_b)\le \tilde h(c_a)+\eps$ (if $c_i$ is the center of a hyperbolic component, then the ray at the angle with entropy maximum does not land at $c_i$, but at a parabolic boundary point of the same hyperbolic component, with equal entropy). Therefore, for all parameter rays $R(\phi)\underline{s}ubset W'$ we have $\tilde h(c_a)\le h(\phi)\le \tilde h(c_a)+2\eps$. Now we start the actual proof: consider a parameter $c\in[0,c(\vartheta)]$ with $c=c(\phi)$ for some $\phi\in\mathbb Circle$. We claim that $\tilde h$ is continuous at $c$ and that $h$ is continuous at $\phi$. Fix $\eps>0$. We first consider the case that $c(\vartheta)$ is a combinatorial endpoint and $c=c(\vartheta)$. We prove continuity of $\tilde h$ at $c(\vartheta)$ and of $h$ at $\vartheta$. By hypothesis, entropy is continuous along the combinatorial vein $[0,c(\vartheta)]$, so there is a $c_a\in\mathscr M$ with $0\prec c_a\prec c(\vartheta)$ and $0\le \tilde h(c(\vartheta)) -\tilde h(c_a) \le \eps$. Now using the argument from above we construct a reduced wake $W'$ so that all angles $\vartheta'\in W'$ satisfy $h(\vartheta')\in[\tilde h(c_a),\tilde h(c_a)+\eps]$. These angles form a neighborhood of $\vartheta$, while $W'$ is a neighborhood of $c$. This completes the proof when $c=c(\vartheta)$ is a combinatorial endpoint. If $c=c(\vartheta)$ is not a combinatorial endpoint, then we can extend the combinatorial arc $[0,c(\vartheta)]$, so from now on it suffices to assume that $c\in(0,c(\vartheta))$, possibly by replacing $\vartheta$ by a different angle. The second case is that $c$ is neither a Misiurewicz-Thurston-parameter nor on the boundary of a hyperbolic component. In this case, we may choose an arc $(c_a,c_b)\ni c$ (i.e., $c_a\prec c\prec c_b$) with $0\le \tilde h(c_b)-\tilde h(c_a)\le 2\eps$ and proceed as above. The assumptions on $c$ mean that $c\not\in\partial W_i$, so $c$ is still an interior point of $W'$, and we conclude that $\tilde h$ is continuous at $c$ and $h$ is continuous at $\phi$. If $c(\phi)$ is a Misiurewicz-Thurston-parameter, then there are finitely many branches, and the previous argument works separately for all the individual branches. The final case is that $c(\phi)$ is on the boundary of a hyperbolic component --- and that case was handled in Theorem~\ref{Thm:ContEntropyHypComps}. \end{proof} \begin{theorem}[Continuity of Entropy] \label{Thm:Continuity} \lineclear Core entropy $h\colon\mathbb Circle\to[0,\log 2]$ is continuous. \end{theorem} \begin{proof} If $\phi\in\mathbb Circle$ is such that $c(\phi)$ is on some combinatorial arc $[0,c(\vartheta)]$ along which entropy is continuous, then $h$ is continuous at $h$: this is the content of Theorem~\ref{Thm:ContinuityNearVeins}. By work of Tiozzo~\cite{TiozzoThesis} and Jung \cite[Theorem~4.9]{Jung}, this is true for all dyadic angles $\vartheta$. This proves continuity of $h$ at all angles $\phi$ except when $\phi$ corresponds to a combinatorial endpoint of $\mathscr M$ at irrational angle, or when $\phi$ corresponds to a boundary point of a hyperbolic component at irrational angle. The second case, irrational boundary points of hyperbolic components, has been treated in Theorem~\ref{Thm:ContEntropyHypComps}. The first case will be taken care of in Corollary~\ref{cor:RadialContin}: there we will prove continuity of entropy along all combinatorial arcs $[0,c(\phi)]$ for all irrational endpoints of $\mathscr M$, and then the claim follows as above from Theorem~\ref{Thm:ContinuityNearVeins}. \end{proof} \begin{remark} Recall from Section~\ref{Sec:Definitions} that continuity of $h\colon\mathbb Circle\to[0,\log 2]$ implies continuity of $\tilde h\colon\mathscr M\to[0,\log 2]$ (there is a natural continuous projection from $\mathscr M$ to the ``abstract Mandelbrot set'', and $h$ is naturally defined on the latter, so $\tilde h$ is the composition of two continuous maps). \end{remark} \reminder{Current number of reminders: \arabic{treminder}} \underline{s}ection{Irrational endpoints} \label{Sec:IrratEndpoints} In this section we prove that for every combinatorial endpoint $c$ of $\mathscr M$, entropy is continuous along the combinatorial arc $[0,c]$. This is known when $c$ is postcritically finite \cite[Theorem~4.9]{Jung}, but we need it in all cases. This proof provides the missing step in the continuity proof in Theorem~\ref{Thm:Continuity}. {In fact, we prove the result somewhat more generally for endpoints that are non-dyadic, whether or not they are irrational.} We will approximate the non-dyadic endpoints by dyadic ones, and of course we need uniform estimates for the latter (Proposition~\ref{prop:Uniformity}). \underline{s}ubsection{Hubbard Trees, Automata, and Renormalization} \label{Sub:TreesAutomataRenormalization} Consider a dyadic endpoint $c'$ of $\mathscr M$ with external angle ${q}/{2^m}$ and Hubbard tree $H'$. Then the critical value and all further postcritical points are endpoints of $H'$, so vertices of $H'$ are either endpoints or branch points. As in Lemma~\ref{Lem:MarkedPointsSubset_new} we add $\alpha$ and $-\alpha$ to the vertex set of $ H'$ (if necessary). Easy calculations show that $H'$ has \begin{itemize} \item $m+1$ endpoints; \item at most $m-1$ branch points; \item at most $2m+2$ vertices (including $\alpha$ and $-\alpha$); and \item at most $2m+1$ edges. \end{itemize} Let $c$ be the postcritically finite parameter where the vein of $c'$ terminates. We will assume that $c\not=0$ so that we are in the setting of Lemma~\ref{Lem:MarkedPointsSubset_new}. We denote by $H$ the Hubbard tree of $c$; note that $\beta \not\in H$. Denote by $x\in H'$ the dynamic counterpart of $c$ as in Definition~\ref{Def:DynamCounterpart}. \begin{lemma} The set $\{ p_{c'}^{\circ k}(x): k\ge 0\}\underline{s}etminus \{\text{vertices of }H'\}$ contains at most $m$ points; equivalently, $p_{c'}^{\circ m}(x)$ is a branch point of $H'$. \end{lemma} \begin{proof} Let $\tilde c$ be the dyadic parameter with $c'\lhd \tilde c$, so $c'$ is directly subordinate to $\tilde c$. Then $c\prec c'$ and $c\prec \tilde c$. Denote by $\tilde q/ 2^n$ the external angle of $\tilde c$; note that $n<m$. We will work in the dynamical plane of $p_{c'}$. The rays landing at $x$ separate $R(0), R(\tilde q/2^n)$, and $R(q/2^m)$ (Lemma~\ref{Lem:DirectlySubordinateDynamics}; the point $x$ is denoted $x_*$ there), so $x$ has at least three branches in $H'\cup [x(\tilde q/ 2^n),x]$, where $x(\tilde q/ 2^n)$ denotes the landing point of $R(\tilde q/ 2^n)$. We claim that $p_{c'}^{\circ n}[x(\tilde q/ 2^n),x]\underline{s}ubset H'$. Indeed, {for $k\ge 0$} let $T_k$ be the minimal tree connecting the $\alpha$ fixed point to all dyadic endpoints of generation at most $k$. Since endpoints of $p_{c'}(T_{k-1})$ are endpoints of $T_{k-2}$ as well as the critical value, we have $p_{c'}(T_{k-1})\underline{s}ubset T_{k-2}\cup H'$ and, by induction, indeed $p_{c'}^{\circ n}(T_{n})\underline{s}ubset H'$. Therefore, $p_{c'}^{\circ n}(x)$ has at least $3$ branches in $H'$ because $p_{c'}^{\circ n}:H' \cup [x(\tilde q/ 2^n),x]\to H'$ is locally injective near $x$. Since $m>n$, also $p_{c'}^{\circ m}(x)$ is a branch point (and also $p_{c'}^{\circ (m-1)}(x)$). \end{proof} \goodbreak Let us now refine $H'$ by adding the finite set $\{ p_{c'}^{\circ k}(x): k\ge 0\}$ to its vertex set. The new tree, still called $H'$, has \begin{itemize} \item at most $3m+2$ vertices; and \item at most $3m+1$ edges. \end{itemize} \begin{corollary} \label{cor:EdgesOfH} The tree $H$ has \begin{itemize} \item at most $2m+1$ vertices; and \item at most $2m$ edges. \end{itemize} \end{corollary} \begin{proof} We have a natural injection of marked points (vertices) in $H$ to marked points in $H'$ (Lemma~\ref{Lem:MarkedPointsSubset_new}) that is compatible with the dynamics and with edges connecting marked points (Lemma~\ref{lem:HintoH'}), and the $m+1$ endpoints of $H'$ are not in the range of this injection (Lemma~\ref{Lem:MarkedPointsSubset_new}). \end{proof} \emph{Overview on the argument.} The key idea of our proof consists of identifying the dynamics of $p$ on $H$ as an embedded subset of the dynamics of $p'$ on $H'$. Since entropy measures the growth rate of choice of orbits of length $n$, the entropy of $p'$ on $H'$ is no less than the entropy of $p$ on $H$, and we need to give an upper bound on the difference. An orbit in $H'$ that realizes the additional choice is one that leaves the embedded image of $H$ in $H'$, and we show that it starts on a single edge $[x,c']$ at the critical value. We show that this edge maps forward homeomorphically a large number of iterations: so if some orbit uses the additional choice, then it will not have any choice for a long time, and this will give an upper bound on the entropy increase. \emph{Automata}. Here and elsewhere, we find it convenient to express some combinatorial properties in terms of \emph{automata}. We would like to reassure the reader that we only use the basic notion without results from automata theory and hope it will not be distracting. The concept is simple: given a postcritically finite polynomial $p=p_{c}$ with Hubbard tree $H$, we associate to it an automaton $A$ in a natural way, as follows. The states of $A$ correspond to the edges of $H$. There is an arrow in $A$ from edge $e_1$ to edge $e_2$ whenever $p(e_1)\underline{s}upset e_2$; the number of arrows from $e_1$ to $e_2$ equals the number of times $p(e_1)$ covers $e_2$ under $p$ (this is well defined because we have a Markov partition). This number of arrows equals $0$ or $1$, except for the unique edge (if any) that contains the critical point in its interior. Similarly, denote by $A'$ the natural automaton associated with $p'=p_{c'}\colon H'\to H'$. In Lemma~\ref{lem:HintoH'}, we had identified the dynamics on edges of $H$ as a subset of the dynamics on edges of $H'$. We will find it convenient to express this fact by saying that the automaton $A$ can be considered as a sub-automaton of $A'$; this is done in the following lemma. \begin{lemma}[Automata and Edges in Hubbard Trees] \label{Lem:MappingEdgesHubbardTrees} \lineclear Let $J : H\to H'$ be the embedding of vertices and edges of the Hubbard tree of $H$ into vertices and edges of the Hubbard tree of $H'$ as in Lemma~\ref{lem:HintoH'}. Then $J$ induces an inclusion of automata $A \hookrightarrow A'$ by mapping a state $e$ of $A$ into the state $J(e)$ of $A'$, {and this inclusion is compatible with the number of arrows between states}. There are exactly two arrows in $A'$ going from $J(A)$ to $A'\underline{s}etminus J(A)$, and they connect the same states (they start at the state corresponding to the edge containing the critical point, and they end at the state corresponding to the edge $[c',x')$). \end{lemma} \begin{proof} By Lemma~\ref{lem:HintoH'}, the inclusion $J$ injects the set of states of $A$, which are edges of $H$, into the set of states of $A'$, which are edges of $H'$. Moreover, the number of arrows from $a_1$ to $a_2$ (i.e., the degree of the corresponding map on the edge) is equal to the number of arrows from $J(a_1)$ to $J(a_2)$. The claim concerning the edges from $J(A)$ into $A'\underline{s}etminus J(A)$ is a straightforward reformulation of the last claim of Lemma~\ref{lem:HintoH'}. \end{proof} In view of this lemma, we may simply write $A\underline{s}ubset A'$; this will help us compare the two automata. We need to discuss whether the dynamics on our trees is \emph{renormalizable}. A quadratic polynomial $p$ is $n$-renormalizable when there exists a proper subset $K_r$ (the ``little Julia set'') of the filled-in Julia set so that $K_r$ is compact, connected, and full and $p_c^{\circ n}\colon K_r\to K_r$ is a proper map of degree $2$, topologically conjugate to another polynomial in $\mathscr M$. It is well known that this means that $p$ belongs to a small embedded copy of the Mandelbrot set within itself (here and elsewhere, we ignore the possibility of ``crossed renormalization''; see \cite{McMullenRenormalization,CrossRenorm}: we use ``renormalizable'' in the meaning of ``simple renormalizable''). Since $H'$ is dyadic, its dynamics is never renormalizable, but $H$ may be. If the critical point of $H$ is periodic of some period $n$, then the dynamics is trivially $n$-renormalizable, and the Hubbard tree resulting from $n$-renormalization is trivial (a single point). This particular case of renormalizability is not related easily to the dynamics of edges and thus to the automaton $A$. However, we have the following. The following result is standard and its proof is omitted; compare McMullen~\cite[Sec.~7]{McMullenRenormalization}. \begin{lemma}[Immediately Satellite Renormalization] \label{Lem:RenormMinimalPeriod} \lineclear If $p_c$ is $n$-renormalizable and $n$ is minimal with this property, and the small Julia sets of the $n$-renormalization are denoted $K_i$, then either \begin{itemize} \item all $K_i$ contain the $\alpha$ fixed point of $p_c$; in this case $H\underline{s}ubset \bigcup_{i=0}^{n-1}K_i$; or \item the $K_i$ are pairwise disjoint. \end{itemize} \end{lemma} \begin{remark} There are renormalizable Hubbard trees in which some $K_i$ intersect and others do not, but this does not happen when the renormalization period $n$ is chosen minimal. \looseness-1 In parameter space, Lemma~\ref{Lem:RenormMinimalPeriod} may be expressed as follows. If $p_c$ is renormalizable, let $\mathscr M'$ be the largest renormalization copy of $\mathscr M$ containing $p_c$ (corresponding to the least period of \mbox{renormalization}); the two cases in the lemma depend on whether or not $\mathscr M'$ touches the main cardioid of $\mathscr M$. If not, the main component of $\mathscr M'$ is primitive. (There are many renormalization copies of $\mathscr M$ within $\mathscr M$ that are non-primitive and that do not touch the main cardioid of $\mathscr M$; these are contained in larger renormalization copies of $\mathscr M$, and they describe $n$-renormalizable parameters for which $n$ is not minimal.) We call a parameter \emph{immediately satellite renormalizable} if the first case in Lemma~\ref{Lem:RenormMinimalPeriod} is realized. \end{remark} \begin{lemma}[The Hubbard Tree and Renormalization] \label{Lem:HubbardTreeRenormalization} \lineclear For every edge $e$ of $H$, at least one of the following is true: \begin{itemize} \item there is a $k\ge 0$ such that $p_c^{\circ k}(e)=H$; or \item $p_c$ is renormalizable and there is a cycle $K_0,K_1,\dots ,K_{n-1}$ of small filled in Julia sets of $p_c$ such that $e\underline{s}ubset \bigcup_{i=0}^{n-1}K_i$. \end{itemize} \end{lemma} \begin{proof} We start by assuming that $p_c$ is $n$-renormalizable; let $n$ be minimal with this property. We may assume that all $K_i$ are disjoint: if not, then by Lemma~\ref{Lem:RenormMinimalPeriod} all $K_i$ meet at the $\alpha$ fixed point of $p_c$, and thus $H\underline{s}ubset \bigcup_{i=0}^{n-1}K_i$, so the result is clear. We need the following properties. \begin{itemize} \item[(A)] If $T$ is a periodic connected subset of $H$ such that $T \neq H$ but $T$ contains at least two points, then $T\underline{s}ubset K_i$ for some $i$. \end{itemize} Indeed, since $T$ is periodic but not a singleton, the forward orbit of $T$ contains a critical point. Hence around $T$ there is a small filled in Julia set of $p_c$. \begin{itemize} \item[(B)] If $K$ is one of the $K_i$, or an (iterated) preimage thereof, then $H\underline{s}etminus K$ has at most two connected components. \end{itemize} Indeed, there must be some $K_i$ so that $H\underline{s}etminus K_i$ is connected (start with an arbitrary $K_0$, and if $H\underline{s}etminus K_0$ is not connected, choose some $K_1\neq K_0$, and then some $K_2$ in a different component of $H\underline{s}etminus K_1$ than $K_0$; this process must terminate at some $K_i$ for which $H\underline{s}etminus K_i$ is connected). Further, if $K_j$ does not contain the critical point, then $H\underline{s}etminus K_j$ has no more connected components as $H\underline{s}etminus p_c(K_j)$; and if $K_j$ contains the critical point, then $H\underline{s}etminus K_j$ can have at most twice as many components as $H\underline{s}etminus p_c(K_j)$. Therefore, $H\underline{s}etminus K$ has at most two connected components for any iterated pre-image $K$ of $K_0$. \begin{itemize} \item[(C)] Every vertex $v\in H$ either has $v\in K_i$ for some $i$ or the forward orbit of $v$ is disjoint from $\bigcup _{i=1}^{n-1}K_i$. \end{itemize} \looseness-1 To see this, consider a non-periodic iterated pre-image $K'$ of $K_i$. Then $H\underline{s}etminus K'$ has at most two connected components by (B). Since the critical point is contained in some periodic $K_0$, it follows that $K'$ contains no postcritical point. A vertex in $K'$ must thus be a branch point, and this is possible only if $H\underline{s}etminus K'$ has at least three components, which is not the case. \looseness-1 Suppose now that $e\not \underline{s}ubset \bigcup_{i=1}^{n-1}K_i$ and let the endpoints of $e$ be $v_1$ and $v_2$. Then for some $m\ge 0$ the vertices $p_c^{\circ nm}(v_1)$ and $p_c^{\circ nm}(v_2)$ are periodic. Since $e\not \underline{s}ubset \bigcup_{i=1}^{n-1}K_i$, property (C) implies that $p_c^{\circ nm}(v_1)$ and $p_c^{\circ nm}(v_2)$ are not in a single $K_i$. Set $e':=p_c^{\circ nm}(e) $ and let $q$ be the least common period of $p_c^{\circ nm}(v_1)$ and $p_c^{\circ nm}(v_2)$. Then $\bigcup_{k\ge 0}p_c^{\circ qk} (e')$ is a periodic subset of $H$; thus $\bigcup_{k\ge 0}p_c^{\circ qk} (e')=H$ by (A). It is easy to see that $\bigcup_{k\ge 0}p_c^{\circ qk}(e') = p_c^{\circ qm}(e') $ for some $m$ (the set $p_c^{\circ qk}(e')$ is increasing in $k$). Finally, if $p_c$ is not renormalizable, choose an edge $e=[v_1,v_2]$ and an $m\ge 0$ so that $p_c^{\circ m}(v_1)$ and $p_c^{\circ m}(v_2)$ are periodic. Let again $e':=[p_c^{\circ m}(v_1),p_c^{\circ m}(v_2)]$; then $\bigcup_{k\ge 1}p_c^{\circ k} (e')$ is periodic, and by (A) it follows that either $p_c$ is renormalizable or $\bigcup_{k\ge 1}p_c^{\circ k} (e')=H$, and in the latter case again $p_c^{\circ m} (e')=H$ for some $m$. \end{proof} We want to relate renormalization of $H$ to properties of the associated automaton $A$. We write $A=A_n\cup A_r$ so that \begin{itemize} \item $A_n$ (non-renormalizable edges) contains all states that, for some fixed finite iterate, reach all states of $A$ simultaneously; and \item $A_r$ (renormalizable edges) contains all states from which {not all} of $A$ can be reached simultaneously: these correspond to edges within the Hubbard trees of ``smalls Julia sets'' corresponding to renormalization domains). \end{itemize} We should remark that similar notions can be found in \cite[Sec~3.3]{Jung}. \begin{lemma}[Renormalization and Automata] \label{Lem:RenormAutom} \lineclear Suppose that $p_c$ is postcritically finite. We have $A_r=\emptyset$ if and only if \begin{itemize} \item either $p_c$ is non-renormalizable \item or $c$ is periodic of some period $n$ and $p_c$ is $n'$-renormalizable only for $n'=n$ and so that all little Julia sets are disjoint. \end{itemize} If $p_c$ is renormalizable, let $n$ be minimal so that $p_c$ is $n$-renormalizable. Let $K_0,K_1,\dots K_{n-1}$ be the cycle of small filled-in Julia sets of $p_c$. Then $A_r$ is the set of states of $A$ so that the associated edges are within $\bigcup_{i=0}^{n-1}K_i$. \end{lemma} \begin{proof} Let again $H$ denote the Hubbard tree of $p_c$. If $p_c$ is non-renormalizable, then it follows from Lemma~\ref{Lem:HubbardTreeRenormalization} that every edge $e\underline{s}ubset H$ has a $k$ so that $p_c^{\circ k}(e)=H$ for all large $k$, so $A_r=\emptyset$. Suppose $p_c$ is renormalizable and let $n$ be minimal so that $p_c$ is $n$-renormalizable. If $e$ is an edge of $H$ such that $e\not\underline{s}ubset \bigcup_{i=1}^{n-1} K_i$, then every sufficiently high iterate of $e$ will cover all of $H$ (Lemma~\ref{Lem:HubbardTreeRenormalization}), and the associated state of $A$ is in $A_n$. The other case is that $e\underline{s}ubset \bigcup_{i=0}^{n-1}K_i$; then by Lemma~\ref{Lem:RenormMinimalPeriod} either the $K_i$ are disjoint or they all touch at the $\alpha$ fixed point (which is a vertex of $H$ by out convention), and in both cases we have $e\underline{s}ubset K_i$ for a unique $i$. In this case, the orbit of $e$ will follow the orbit of $K_i$ and every edge is in only one $K_i$, so it follows that the associated state is in $A_r$. \looseness-1 We conclude that if $p_c$ is renormalizable, then $A_r=\emptyset$ if and only if $\bigcup_{i=1}^{n-1} K_i$ contains no edge of $H$, and that is the case if and only if every ``little Hubbard tree'' $H_i\underline{s}ubset K_i$ (corresponding to the Hubbard tree after renormalization) is trivial and the $K_i$ are all disjoint. Finally, the little Hubbard trees are trivial if and only if $c$ is periodic of period $n$. \end{proof} An automaton is called \emph{irreducible} if every state of $A$ can be reached from every other. This is certainly the case when $A$ is non-renor\-ma\-li\-zable, but may also happen in the renormalizable case: for instance, the Hubbard tree of the rabbit polynomial with a superattracting $3$-cycle has its Hubbard tree in the form of a topological \textsf{Y} where the three edges are permuted cyclically: the automaton has the form $e_0\to e_1\to e_2\to e_0$ and is irreducible, but the dynamics is renormalizable. The difference is that no edge covers all of $A$ after the same number of iterations (see Lemma~\ref{Lem:RenormAutom}). If $A_r\neq\emptyset$, so that the dynamics is renormalizable, we may have $A_n=\emptyset$ or $A_n\neq \emptyset$. The next lemma shows that the former case happens only in the case of immediate satellite renormalization. \begin{corollary}[Existence of Limit] \label{Cor:ExistenceLimit} \lineclear In the dynamics of $p_c$, the limit $\lim_{n\to\infty} (1/n)\log N(n)$ exists whenever $p_c$ is not immediately satellite renormalizable. \end{corollary} \begin{proof} Consider a parameter $c\in\mathscr M$ such that $c$ is not immediately satellite renormalizable. Let $e$ be the unique edge in $[\alpha, -\alpha]\underline{s}ubset H$ such that $e$ is attached to $\alpha$. By Lemma~\ref{Lem:RenormAutom} we have $e\in A_n$. Therefore, there is a $k\ge 0$ such that $p_c^{\circ k}(e)=H$, see Lemma~\ref{Lem:HubbardTreeRenormalization}. Thus $e$ contains as many pre-critical points of generation $n+k$ as the number of pre-critical point of generation $n$ in the entire $H$. Since $e\underline{s}ubset [\alpha,-\alpha]$ we get $\underline{s}up _{i\le n+k}N_c(i)\le N_c(n)$, hence \[ \limsup_n\frac 1 n \log N_c(n)=\lim_n\frac 1 n \log N_c(n). \] If $c$ is not postcritically finite and not an endpoint of $\mathscr M$ (that is, $c$ is associated to two external angles in $\mathscr M$), then there are two non-immediately-satellite-renormalizable parameters $c_1$ and $c_2$ with $N_{c_1}(n)\le N_c(n)\le N_{c_2}(n)$ and $0\le \tilde h(c_2)-\tilde h(c_1)\le\eps$ for arbitrary $\eps>0$ and the result holds as well (here we use continuity of entropy). Finally, if $c$ is a non-immediately-satellite-renormalizable endpoint, then by continuity of $\tilde h$, for any $\eps>0$ there exists a postcritically finite non-immediately-satellite-renormalizable parameter $c_1\prec c$ so that $\tilde h(c)-\tilde h(c_1)\le \eps$. By monotonicity, we have \[ \liminf \frac 1 n N_{c_1}(n) \le \liminf \frac 1 n N_c(n) \;, \] and \begin{align*} \limsup \frac 1 n N_c(n)&=\tilde h(c)\le \tilde h(c_1)+\eps=\liminf \frac 1 n N_{c_1}(n)+\eps \\ &\le \liminf \frac 1 n N_{c}(n)+\eps \;. \end{align*} Since $\eps>0$ was arbitrary, the claim follows in this case too. \end{proof} \begin{lemma}[Immediate Satellite Renormalization and $A_n=\emptyset$] \lineclear A polynomial $p_c$ has $A_n=\emptyset$ if and only if either $p_c$ is immediately satellite renormalizable or it has a superattracting fixed point. \end{lemma} \begin{proof} If $p_c$ is immediately satellite $n$-renormalizable, let $K_0,\dots,K_{n-1}$ be the little Julia sets. By Lemma~\ref{Lem:RenormMinimalPeriod}, we have $H\underline{s}ubset \bigcup_i K_i$ and all $K_i$ touch at the $\alpha$ fixed point. Since the $\alpha$ fixed point is a vertex of $H$ every edge is contained in some $K_i$, so the corresponding state is in $A_r$, hence $A_n=\emptyset$. Also, if $p_c$ has a superattracting fixed point, then $H$ is trivial and $A_n=\emptyset$. For the converse, if $A_n=\emptyset$ but $H$ is not trivial, then $p_c$ must be renormalizable (otherwise $A_r=\emptyset$ by Lemma~\ref{Lem:HubbardTreeRenormalization}), and by Lemma~\ref{Lem:RenormMinimalPeriod} we must have $H\underline{s}ubset\bigcup_i K_i$, so $p_c$ is immediate satellite renormalizable. \end{proof} \begin{remark} Let us note that there are no arrows from $A_r$ to $A_n$, so within $A$ there is no escape from the set of renormalization states $A_r$. However, in $A'\underline{s}upset A$, if $A_r\neq\emptyset$, then there are two arrows from $A_r$ to $[c',x]$, which is a state in $A'\underline{s}etminus A$ (see Lemma~\ref{Lem:MappingEdgesHubbardTrees}). \end{remark} \goodbreak We will consider the following special states of $A$ and $A'\underline{s}upset A$: \begin{itemize} \item the $0$-state in $A'$ contains the critical point. Denoting this state by $e'_0$, we set the $0$-state of $A$ to be $J^{-1}(e'_0)$. This convention is compatible with the inclusion $J\colon A\hookrightarrow A'$ from Lemma~\ref{Lem:MappingEdgesHubbardTrees}. \item the $[c',x]$-state of $A'\underline{s}etminus A$; \item states of $A$ and of $A'$ that belong to the interval $[\alpha, -\alpha]$. \end{itemize} \underline{s}ubsection{Counting Precritical Paths} \label{Sub:CountingPaths} A \emph{path} in $A$ or $A'$ is a sequence of arrows so that each arrow starts where the previous arrow ends. The \emph{length} of a path is the number of arrows it contains. We can also think of a path as a sequence of states so that there is an arrow from every state to the subsequent one (that is, a sequence of edges in the Hubbard tree so that each edge covers the next one under the map). When a path connects two states that are connected by multiple arrows, then there are accordingly multiple paths along this sequence of states (as an example, in $A'$ there are two paths of length $1$ from the $0$-state to the $[c',x]$-state). We define a \emph{relevant precritical path} in $A'$ or in $A$ as a path that starts at a state in $[\alpha,-\alpha]$ and terminates at the $0$-state. By basic properties of symbolic dynamics, relevant precritical paths in $A'$ are in bijection with precritical points of $p_{c'}$ in $[\alpha,-\alpha]$ because the critical point of $p_{c'}$ is not a vertex of $H'$. Different relevant precritical paths in $A$ encode different relevant precritical points of $p_c$ in $[\alpha,-\alpha]$. Every relevant precritical path $s$ in $A'$ has the form \begin{equation} \label{eq:DecompOfs} s=b_0c_0a_1b_1c_1a_2b_2c_3\dots a_pb_pc_p \end{equation} such that (roughly: $a_i, b_i, c_i$ are the sub-paths in $ A'\underline{s}etminus A$, in $A_n$, and in $A_r$ respectively) \begin{itemize} \item $a_i$ is an (almost) ``choiceless'' path that starts at the $0$-state, then goes to $[x,c']$, then travels outside of states in $[\alpha, -\alpha]$, and terminates at the first state reached in $[\alpha,-\alpha]\underline{s}ubset A$; \item if $a_i$ terminates at a state in $A_r$, then $b_i=\emptyset$; otherwise, $b_i$ is a path that starts at the state in $[\alpha,-\alpha]$ where $a_i$ terminates and continues while states in $A_n$ are visited (if $i=0$, then instead of the terminal state of $a_i$ we take the initial state of $s$); \item if $A_r=\emptyset$, then $c_i=\emptyset$; otherwise: if $b_i\not=\emptyset$, then $c_i$ is a path that starts in $A_n$ where $b_i$ terminates, and immediately moves into $A_r$, and otherwise it starts at a state in $A_r$ where $a_i$ terminates. The end of $c_i$ is the $0$-state, and until then the path remains in $A_r$ (again, if $i=0$, then instead of the terminal state of $a_i$ we take the initial state of $s$). \end{itemize} Observe that paths in $A'$ that are not in $A$ start on the edge $[x,c']$, so they are described by the $a_i$ that are long and have almost no choice, hence contribute little additional entropy. Indeed, every $a_i$ has length at least $m+1$ because $[x,c']$ needs $m$ iteration to reach $[\beta,\alpha]$, and might need further iterations to land in $[\alpha,-\alpha]$ (Lemma~\ref{Lem:InjectiveDynamicsLastEdge}). Once it lands there, we are either in $A_n$ and we continue with a path $b_i$ as long as we stay in $A_n$, or we are already in $A_r$ and $b_i=\emptyset$, and $c_i$ continues until the next visit of the $0$ state. In particular, if $A_n=\emptyset$, then $b_i=\emptyset$. We will refer to $a_i$ as \emph{excursions} (long almost choice-less parts). Defining $\ell_i,t_i, k_i$ as the lengths of $a_i, b_i, c_i$ respectively, we say that $s$ has \emph{combinatorial pattern} $P=(t_0,k_0,\ell_1,t_1,k_1,\ell_2,t_2,k_2,\dots,\ell_p,t_p,k_p)$. \begin{lemma}[Almost Choiceless Paths] \label{lem:ChoiceLessPaths} \lineclear For every length $\ell\ge m+1$, there are at most two possible paths in $A'$ that \begin{itemize} \item start at the $0$-state; \item then immediately go to the $[c',x]$-state; \item then travel outside $[\alpha,-\alpha]$; \item terminate at a given state in $[\alpha,-\alpha]$ \item and have a length $\ell\ge m+1$. \end{itemize} \end{lemma} \begin{proof} First, the edge of $H'$ associated with the $0$-state covers $[c,x]$ with degree $2$ under $p_{c'}$. Then $[x,c]$ maps injectively for at least $m+1$ iterations until $[x,c]$ starts to partially cover $[\alpha,-\alpha]$. But this is, by definition, when the path under discussion terminates. \end{proof} \begin{lemma}[Number of Paths $c_i$] \label{lem:c_iPaths} \lineclear Suppose $c$ is renormalizable. Let $g$ be the period of the biggest small Mandelbrot set containing $c$. Then there are at most $2^{{k}/{g}}$ paths in $A$ that \begin{itemize} \item start at a given state in $A_n$ or in $A_r$; \item all subsequent states are within $A_r$; \item terminate at the $0$-state; and \item have length $k$. \end{itemize} \end{lemma} \begin{proof} Let $H_0,H_1,\dots H_{g-1}$ be the cycle of small Hubbard trees associated with the the largest renormalizable Hubbard trees (corresponding to the largest small Mandelbrot set containing $c$). Then the degree $p_c^{\circ gt}:H_{0}\to H_{0}$ is at most $2^t$ for all $t$. Therefore, there are at most $2^{t}$ paths in $A_r$ with length $k\in \{ gt,gt+1,\dots, g(t+1)-1\}$ that terminate at the $0$-state; so for given length $k$, the number of such paths is at most $2^t=2^{\lfloor k/g \rfloor}$ (and we have not even counted the first step from a given state of $A$ to $A_r$). \end{proof} Fix a combinatorial pattern $P=(t_0,k_0,\ell_1,t_1,k_1,\ell_2,t_2,k_2,\dots,\ell_p,t_p,k_p)$ and let $n=|P|=t_0+k_0+\underline{s}um_{i\ge 1}^p(\ell_i+t_i+k_i)$. When comparing entropy in $A'$ and in $A$, we will consider the additional relevant precritical paths in $A'$ and show that they correspond to relevant precritical paths in $A$ of bounded length, so that there are not too many additional paths in $A'$. More precisely, if an excursion has length $\ell_i\ge 3m$ then the new path within $A$ will be shorter (or have equal length) than before. We thus introduce a quantity $\kappa$, called \emph{uncertainty of $P$}, that measures the possible increase of length as follows: \[ \kappa(P):= \frac 1 n \underline{s}um_{\ell_i<3m}(3m- \ell_{i}) = \frac 1 n \underline{s}um_i \max(0,3m-\ell_i) \] (the first sum is taken over all $\ell_i$ that are less than $3m$). Higher values of $\kappa$ create problems as the paths in $A$ might be shorter than the corresponding paths in $A'$. Since all $\ell_i>m$ and $\underline{s}um\ell_i\le n$, we have $3m-\ell_i\le 2m < 2\ell_i$ and $\kappa(P)\in[0, 2]$. Denote by $N'(P)$ the number of all precritical paths in $A'$ with pattern $P$. For $\kappa\in [0,2]$ define \[ N'(\kappa,n):= \underline{s}um_{\kappa(P)\le \kappa,\ |P|=n} N'(P) \;, \] the numbers of precritical paths in $A'$ with small uncertainty. We define $S'(\kappa,n)$ to be the corresponding set of relevant precritical paths with small uncertainty, so that $N'(\kappa,n)=|S'(\kappa,n)|$. \begin{lemma}[Replacing a Path in $A'$ by a Path in $A$] \label{lem:substitution} \lineclear Suppose $A_n\neq\emptyset$. If $s=b_0c_0 a_1b_1c_1a_2b_2c_3\dots a_pb_pc_p$ is a precritical path in $A'$ with uncertainty $\kappa$, then there are paths $a^*_i$ in $A_n$ with lengths in $\{m,\dots,3m\}$ such that $s^*:=b_0 a^*_1 b_1a^*_1b_2a^*_2\dots a^*_p b_pc_p$ is a path in $A$. If $n$ is the length of $s$, then $s^*$ has length at most $n + \kappa n$. \end{lemma} \begin{proof} Recall that some $b_i$ might be empty paths. Choose any state $a\in A_n$. For convenience, we say that $a$ is the beginning and the end of every empty $b_i$ with $i<p$. If $b_p$ is empty but $c_p$ is not, then we say that the beginning of $b_p$ is the beginning of $c_p$. If $b_pc_p$ is empty, then the beginning of $b_p$ is the $0$ state. Since $A_n$ is irreducible and has less than $2m$ vertices (Corollary~\ref{cor:EdgesOfH}) we may replace every $c_ia_{i+1}$ by a path $a^*_{i+1}$ in $A_n$ of length at most $2m$ so that $a^*_{i+1}$ connects the end of $b_{i}$ with the beginning of $b_{i+1}$ (which are by definition both in $A_n$); by adding up to $m$ arbitrary steps at the beginning, we may arrange things so that $a^*_i$ has length in $\{m,\dots,3m\}$. Since the length of each $a_i$ is at least $m$, this procedure increases the length of $s$ by at most $\kappa n$ (and even shortens it whenever $c_ia_{i+1}$ has length greater than $3m$). \end{proof} \begin{lemma}[Counting Patterns] \label{lem:PatternsGrowth} \lineclear The quantity \[ \limsup_{n}\frac{1}{n}\log\left(\# \{\text{patterns of length }n\}\rule{0pt}{11pt}\right) \] tends to $0$ as $m$ tends to infinity. \end{lemma} \begin{proof} Every pattern $P$ is uniquely characterized by a non-decreasing sequence of positive integers \[t_0,t_0+k_0,t_0+k_0+\ell_1, t_0+k_0+\ell_1+t_1, \dots. \] Since $\ell_i\ge m+1$, for every $q\in\{0,\dots,\lfloor n/m\rfloor\}$ the interval $[qm,(q+1)m)$ contains at most $3$ elements of the above sequence; and the same is true for the final interval $[\lfloor n/m\rfloor m+1, n]$. Therefore, the number of all patterns is bounded by $Z(n,m):=((m+1)^3)^{(n/m)+1}=e^{{3(n+m)\log(m+1)}/{m}}$. For fixed $m$, we have $\limsup_n (1/n)\log Z(n,m)=3\log(m+1)/m$, and indeed this tends to $0$ as $m\to\infty$. \end{proof} {Recall that $c'$ is a dyadic parameter of generation $m$ and $c$ is the parameter where the vein of $c'$ terminates. } \begin{proposition}[Uniformity] \label{prop:Uniformity}\lineclear For every $\varepsilon >0$ there are $\overline g, \overline m, \ovl\kappa>0$ depending only on $\varepsilon$ but not on $c$ and $c'$ such that the following holds. If \begin{itemize} \item $m\ge \overline m$, \item either $c$ is non-renormalizable, or the period of the largest small Mandelbrot set containing $c$ is at least $\overline g$, \item $\kappa\le \ovl \kappa$, and \item $h$ is the entropy of the parameter $c$, \end{itemize} then \[ N'(\kappa,n)\le C e^{(h+\eps)n} \] for some constant $C>0$ depending on $c$. \end{proposition} \begin{proof} Let $N(n)$ count the number of relevant precritical paths in $A$ of generation $n$. Since $h$ is the entropy of $c$, there is a constant $C_1>0$ such that \[ N(n)\le C_1e^{(h+\eps/3)n} \;. \] Suppose first that $A_n\neq\emptyset$. Our first claim is that there are at most $ 2^{{n}/{g}+{n}/{m}}$ precritical paths \[ s=b_0c_0 a_1b_1c_1a_2b_2c_3\dots a_pb_pc_p \] with fixed $(b_i)_{i\le p}$ of a given pattern $P$ of length $n$. Indeed, the beginning of the $c_i$ is fixed by $b_i$, or by $a_i$ if $b_i$ is empty, and the end is at the $0$ state, so by Lemma~\ref{lem:c_iPaths} there are at most $2^{k_i/g}$ choices for each $c_i$ and in total at most $2^{n/g}$ choices for all $c_i$ combined (in the non-renormalizable case, the $c_i$ are empty and there is no choice at all). Each $a_i$ has at most two choices by Lemma~\ref{lem:ChoiceLessPaths}, and since their length is at least $m$, there are no more than $2^{n/m}$ such choices. By Lemma~\ref{lem:substitution} we may substitute $c_ia_i$ by $a^*_i$ with $m\le |a^*_i|\le 3m$ and get a precritical path \[s^*=b_0a^*_1b_1a^*_2b_2\dots a^*_pb_p c_p\] in $A$ with length at most $ n+\kappa(P) n $. Denoting the length of $a_i^*$ by $\ell_i^*$, we call the numbers $P^*=(t_0,0, \ell^*_1, t_1,0,\ell^*_2,\dots,\ell^*_p,t_p,k_p)$ the pattern of $s^*$. Our next claim is that for fixed patterns $P$ and $P^*$, the number of triples $(s,(b_i)_{i\le p},s^*)$ is at most \[ 2^{{n}/{g}+ {n}/{m}}C_1 e^{(h+\eps/3)(n+\ovl\kappa n)}=C_1e^{((h+\eps/3)(1+\ovl \kappa)+ 1/g+1/m)n} \;. \] Indeed, the number of paths $s$ for a given pattern $P$ with fixed $b_i$ is at most $2^{{n}/{g}+ {n}/{m}}$. The length of $s^*$ is specified by $P^*$; denote it by $\mu$. Since each $s^*$ is a precritical path in $A$ and we have we have $\mu\le n+\kappa(P)n$ by Lemma~\ref{lem:substitution}, the number of different $s^*$ is bounded by \[ N(\mu)\le C_1e^{(h +\eps/3)(n+\kappa (P)n)}\le C_1 e^{(h+\eps/3)(n+\ovl\kappa n)} \;. \] Since the $b_i$ are determined by the paths $s^*$, the claim is proved. If $g$ and $m$ are sufficiently large and $\ovl \kappa$ is sufficiently small, then $(h+\eps/3)(1+\ovl \kappa)+ 1/g+1/m\le h+2\eps/3$, and the number of triples $(s,(b_i)_{i\le p},s^*)$ {(still for fixed patterns $P$ and $P^*$)} is bounded by \[ C_1e^{(h+2\eps/3)n} \;. \] Since every $s\in S'(\kappa,n)$ is a part of at least one triple $(s,(b_i)_{i\le p},s^*)$ for some patterns $P$ and $P^*$ with $\kappa(P)\le \kappa$ we get the estimate \[ N'(\kappa, n)\le \left|\{(P,P^*)\}\right| C_1 e^{(h+2\eps/3) n} \;, \] where $\left|\{(P,P^*)\}\right|$ denotes the number of pairs of patterns $P$ and $P^*$ with $|P|=n$ and $|P^*|\le n+\kappa(P)n$. By Lemma~\ref{lem:PatternsGrowth}, we have $\left|\{(P,P^*)\}\right| \le C_2e^{(\eps/3) n}$ for some constant $C_2>0$ when $m$ is sufficiently large. We get \[ N'(\kappa, n)\le C_1C_2 \, e^{(h+\eps) n} \;; \] this finishes the proof if $A_n\neq\emptyset$. The case $A_n=\emptyset$ is simpler: here we have all $b_i=\emptyset$. Therefore, every $s\in A'$ is of the form \[ s=c_0 a_1c_1a_2c_2\dots a_pc_p \] and it is easy to see that there are at most $ 2^{{n}/{g}+{n}/{m}}$ precritical paths in $A'$. (By Lemma~\ref{lem:c_iPaths} there are at most $2^{k_i/g}$ choices for each $c_i$ and in total at most $2^{n/g}$ choices for all $c_i$ combined. Each $a_i$ has at most two choices by Lemma~\ref{lem:ChoiceLessPaths}, and since their length is at least $m$, there are no more than $2^{n/m}$ such choices.) Therefore, if $g\ge \bar g$ and $m\ge \bar m$ are sufficiently big, then \[ N'(\kappa, n)\le C \, e^{\eps n} \;. \qedhere \] \end{proof} \underline{s}ubsection{Continuity at Non-Renormalizable Irrational Endpoints} Let $c_{\infty}$ be a non-dyadic endpoint of $\mathscr M$ that is not renormalizable; the case that $c_\infty$ is renormalizable will be treated in Section~\ref{Sub:Renormalizable}. There is a sequence of dyadic veins $[c_i,c'_i]$ approximating $c_{\infty}$ in the following way \reminder{should we replace $i$ by $\nu$, because $i$ is in different use?} \begin{itemize} \item $c_1\prec c_2\prec\dots \prec c_{\infty}$; \item $c_{i+1}\in [c_i,c'_i]$; and \item $ \dots c_i \lhd \dots \lhd c'_2\lhd c'_1$. \end{itemize} Figure~\ref{Fig:IrrationalEndpoint} illustrates the arrangement of these points. Note that once $c_1$ is chosen, the remaining parameters are uniquely determined: $c'_i$ is the dyadic of least generation with $c'_i\underline{s}ucc c_i$ {in the same subwake of $c_i$ as $c_\infty$}, and $c_{i+1}$ is the branch point in the vein of $c'_i$ where the vein to $c_\infty$ branches off. \begin{figure} \caption{The relative (combinatorial) positions of the parameters $c_i$, $c'_i$, and $c_\infty$ used in the proof.} \label{Fig:IrrationalEndpoint} \end{figure} It may be that individual $c_i$ are renormalizable, but only finitely many of them are $n$-renormalizable for any fixed $n$, so the renormalization periods of $c_i$ (if any) tend to $\infty$; hence the prerequisite of Proposition~\ref{prop:Uniformity} is satisfied for sufficiently large $i$; we will use this in Proposition~\ref{lem:RefOfprop:Uniformity}. Similar to the previous discussion we specify the following objects: \begin{itemize} \item $H'_i$ is the Hubbard tree of $c'_i$ with dynamics $p_{c'_i}:H'_i\to H'_i$ \,; \item $S'_{i}(n)\underline{s}ubset [\alpha,-\alpha]\underline{s}ubset H'_i$ is the set of relevant precritical points of generation $n$ in $[\alpha,-\alpha]$ for the parameter $c'_i$\,; \item $N'_{i}(n):= |S'_i(n)|$ is its cardinality; \item $h_i=\tilde h(c_i)$ and $h'_i=\tilde h(c'_i)$ are the entropies; \item $m_i$ is the generation of the dyadic parameter $c'_i$\,; and \item for $j\le i$ we denote by $x_j^{(i)}\in H_i$ the dynamical counterpart of $c_j$ in $p_{c'_i}:H'_i\to H'_i$ as in Definition~\ref{Def:DynamCounterpart}. \end{itemize} We will do our considerations for a fixed Hubbard tree $H'_i$ and often suppress $i$ from the notation. We will now introduce the sets $S'_i(j,\kappa, n)$ for all $j\le i$; these are the sets of relevant precritical points of generation $n$ with uncertainty at most $\kappa$ (in analogy to $S'(\kappa,n)$ in Section~\ref{Sub:CountingPaths}), but subject to a certain relation with respect to the points $x_j^{(i)}$. The special case $j=i$ is exactly the case that was considered in Section~\ref{Sub:CountingPaths}. Specifically, consider a precritical point $y\in S'_i(n)\underline{s}ubset H'_i$. Let \begin{equation} \label{eq:Iter:K:ell}0=\tilde \ell_0 <\tilde t_0<\tilde \ell_1<\tilde t_1< \dots < \tilde t_p=n \end{equation} be the iteration times of $y$ (depending on $j$) uniquely specified as follows: \begin{itemize} \item $\tilde t_k>\tilde \ell_k$ is the first time so that $p^{\circ \tilde t_k}_{c'_i}(y) \in [ c'_i, x_{j}^{(i)}]$; \item $\tilde \ell_k>\tilde t_{k-1}$ is the first time so that $p^{\circ \tilde \ell_k}_{c'_i}(y)\in [\alpha,-\alpha]$. \end{itemize} We also set $\ell_k:=\tilde \ell_k- \tilde t_{k-1}$ and $t_k:= \tilde t_k-\tilde \ell_k$. Clearly, the sequence \eqref{eq:Iter:K:ell} is uniquely specified by $(t_0,\ell_1, \dots, t_p)$. We define the \emph{uncertainty of $y$ with respect to $x_j^{(i)}$} as \[ \kappa_j(y):= \frac 1 n \underline{s}um_{\ell_k<3m_j}(3m_j- (\ell_k+1)) = \frac 1 n \underline{s}um_k \max(0,3m_j-(\ell_k+1)) \] (note that here we start counting at the interval $[c'_i,x_j^{(i)}]$, while in Section~\ref{Sub:CountingPaths} we have $\ell_i=|a_i|$, which starts at the $0$-state before going to $[c',x]$; hence in order to be consistent here we have to use $\ell_k +1$). We denote by $S'_i(j,\kappa,n)$ the set of all $y\in S'_i(n)$ such that $\kappa_j(y)<\kappa$. Let us also define \[ I_j(y):= \bigcup_{\ell_k<3m_j} \left\{\tilde t_{k-1}, \tilde t_{k-1}+1,\dots, \tilde \ell_{k} \right\} = \bigcup_{\ell_k<3m_j} \left[ \tilde t_{k-1},\tilde \ell_k \right]\cap\mathbb Z \;; \] this is the set of iteration times without choice (from $[c'_i,x^{(i)}_j]$ to $[\alpha,-\alpha]$). \begin{lemma}[Blocks of Iteration Times] \label{lem:I_j} \lineclear In the Hubbard tree $H'_i$, consider some $y\in S'_i(n)$. If $m_j > 3 m_{j'}$ for some $j>j'$, then the corresponding sets $I_j(y)$ and $I_{j'}(y)$ are disjoint. Furthermore, \( \displaystyle \kappa_j(y) \le \frac 2 n |I_j(y)|\) for all $j\ge i$. \end{lemma} \begin{proof} Let us fist show that for all $j\ge i$, the set $I_j(y)$ is a union of blocks of consecutive numbers so that each block has length $\ell_k+1\in[m_j,\dots, 3m_j]$, and \begin{itemize} \item its first number $ \tilde t_{k-1}$ is the unique number $t$ in the block that satisfies $p_{c'_i}^{\circ t}(y)\in [c'_i, \alpha]$; \item its last number $\tilde\ell_k$ is the unique number $\ell$ in the block that satisfies $p_{c'_i}^{\circ \ell}(y)\in [\alpha,-\alpha]$. \end{itemize} We have $\ell_k<3m_j$ by definition of $I_j(y)$ and we need to prove the lower bound $\ell_k\ge m_j -1$. For every integer $k\ge 0$, let $T_k\underline{s}ubset H'_i$ be the minimal tree connecting the $\alpha$ fixed point to all dyadic endpoints of generation at most $k$. In particular, $[\alpha,-\alpha]\underline{s}ubset T_0$. By~\eqref{eq:T_k-2InT_k-1} we have $p_{c'_i}^{-1}(T_{k-2})\underline{s}ubset T_{k-1}$. By construction, the parameters $c'_i$ and $c'_{j-1}$ are in different sublimbs of $c_{j}$. Thus, by Lemma~\ref{Lem:DirectlySubordinateDynamics}, in the tree $H'_i$ the arc $[c'_i,x_{j}^{(i)})$ is disjoint from $T_{m_j-1 }$; and we get $[c'_i,x_{j}^{(i)})\cap p_{c'_i}^{-k} [\alpha,-\alpha]=\emptyset$ for all $k< m_j$. This shows that $\ell_k\ge m_j -1$. By definition of $ \tilde t_{k-1}$ we certainly have $p_{c'_i}^{\circ \tilde t_{k-1}}(y)\in[c'_i,x^{(i)}_j]\underline{s}ubset [c'_i,\alpha]$, and since $p_{c'_i}^{-1}([c'_i,\alpha])=[\alpha,-\alpha]$, the block would end before the orbit could enter $[c'_i,\alpha]$ again. The claim about the last number is obvious. Since any block $I_j(y)$ describes a trajectory from $[c'_i,\alpha]$ to $[\alpha,-\alpha]$, the given properties imply that any two blocks $I_j(y)$ and $I_{j'}(y)$ are either disjoint or identical. If $m_j > 3 m_{j'}$, then any block of $I_j(y)$ has greater length than any block of $I_{j'}(y)$, and consequently $I_{j}(y)$ and $I_{j'}(y)$ are disjoint. Finally, $3m_j-(\ell_k+1) \le 2m_j \le 2(\ell_k+1)$, and taking the sum we conclude $\kappa_j(y) \le \frac 2 n |I_j(y)|$ as claimed. \end{proof} \begin{lemma}[Surgery Respects Uncertainty] \label{lem:MonotIncl} \lineclear For $j<i$ the injection $B:S'_{i}(n)\to S'_{i-1}(n)$ of Proposition~\ref{Prop:InjectionPrecritical} injects $S'_i(j,\kappa,n)$ into $S'_{i-1}(j,\kappa,n)$. \end{lemma} \begin{proof} We will show that the injection $B$ respects the sequence \eqref{eq:Iter:K:ell}: the orbits of $y$ and $B(y)$ visit the intervals defining this sequence at the same times. This immediately implies that the uncertainty $\kappa$ is preserved. Recall that $x_j^{(i)}$ and $x_j^{(i-1)}$ are the dynamical counterparts of $c_j$ in the dynamical planes of $p_{c'_i}$ and $p_{c'_{i-1}}$ respectively. Consider $y\in S'_i(j,\kappa,n)$. It follows from Proposition~\ref{Prop:InjectionPrecritical} part (B), applied to $c_*=0$ and thus $x_*=\alpha$ and $x'_*=\alpha$, that \[ p^{\circ t}_{c'_{i}}(y)\in [\alpha,-\alpha] \;\;\text{ if and only if }\;\; p^{\circ t}_{c'_{i-1}}(B(y))\in [\alpha,-\alpha] \;. \] This takes care of the first pair of corresponding intervals. For the second pair, it follows from Proposition~\ref{Prop:InjectionPrecritical} part (A) that for all $t\ge 0$ \[ p^{\circ t}_{c'_{i}}(y)\in [c'_{i},-\alpha] \;\;\text{ if and only if }\;\; p^{\circ t}_{c'_{i-1}}(B(y))\in [c'_{i-1},-\alpha] \;. \] By part (B), applied to $c_*=c_j$, it follows that \[ p^{\circ t}_{c'_{i}}(y)\in [x_j^{(i)},-\alpha] \;\;\text{ if and only if }\;\; p^{\circ t}_{c'_{i-1}}(B(y))\in [x_j^{(i-1)},-\alpha] \;. \] Since $[x_j^{(i)},-\alpha]\underline{s}ubset [c'_{i},-\alpha]$ and $[x_j^{(i-1)},-\alpha]\underline{s}ubset [c'_{i-1},-\alpha]$, we conclude that \[ p^{\circ t}_{c'_{i}}(y)\in [c'_{i},x_j^{(i)}] \;\;\text{ if and only if }\;\; p^{\circ t}_{c'_{i-1}}(B(y))\in [c'_{i-1},x_j^{(i-1)}] \;. \] Therefore, our surgery respects the sequence \eqref{eq:Iter:K:ell}, and thus $\kappa(y)=\kappa(B(y))$ and $B(y)\underline{s}ubset S_{i-1}(j,\kappa,n)$. \end{proof} \begin{lemma}[Small Uncertainty] \label{lem:SmallKappa} \lineclear For every $\delta>0$ and for every $i'\ge 0$ there is an $i''>i'$ such that for every $n\ge 0$ we have \[ S'_{i''}(n)\underline{s}ubset \bigcup_{j\in \{i',\dots ,i''\}} S'_{i''}(j,\delta,n). \] \end{lemma} \begin{proof} Choose a subsequence of indices $i'=i_1< i_2< \dots < i_\nu$ so that $3m_{i_r}< m_{i_{r+1}}$ and $\nu> 2/\delta$. We show that $i'':= i_\nu$ satisfies the claim of the lemma. It is sufficient to show that for every $y\in S'_{i_\nu}(n)$ there is a $\nu'\le \nu$ such that $\kappa_{i_{\nu'}}(y)<\delta$. By Lemma~\ref{lem:I_j} the sets $I_{i_1}(y), I_{i_2}(y), \dots , I_{i_\nu}(y)$ are pairwise disjoint because $3m_{i_r}< m_{i_{r+1}}$. Hence \[ \frac 1 n (|I_{i_1}(y)|+ |I_{i_2}(y)|) + \dots + |I_{i_\nu}(y)|) \le 1. \] Since $\kappa_j(y) \le \frac 2 n |I_j(y)|$ (Lemma~\ref{lem:I_j} again), we have \[ \kappa_{i_1}(y)+\kappa_{i_2}(y)+\dots +\kappa_{i_\nu}(y)\le 2 < \nu\delta \;; \] this implies that $\kappa_{i_{\nu'}}(y)<\delta$ for some $\nu'\le \nu$. \end{proof} The next lemma is a corollary of Proposition~\ref{prop:Uniformity}. \begin{lemma}[Bound on Precritical Points] \label{lem:RefOfprop:Uniformity}\lineclear For every $\eps>0$ there are $i'\ge 0$ and $\overline \kappa>0$ such that if $ j \ge i'$ and $\kappa\le \overline \kappa$, then \[|S'_j(j,\kappa,n)|\le C_j e^{(\eps+h_j) n}\] for all $n>0$ and some constant $C_j>0$. \end{lemma} \begin{proof} For given $\eps>0$ fix $\overline g, \overline m, \ovl\kappa>0 $ as in Proposition~\ref{prop:Uniformity} so that $N'(\kappa,n)\le Ce^{(h+\eps)n}$. Since $c_{\infty}$ is not renormalizable, we may choose $i'$ large enough such that for all $j\ge i'$ \begin{itemize} \item $m_j\ge \overline m$; and \item $c_j$ is either non-renormalizable or the renormalization period of $c_j$ is at least $\overline g$ (we can make this assumption because $c_\infty$ is non-renormalizable). \end{itemize} We will now apply Proposition~\ref{prop:Uniformity} to the pair $c:=c_j$ and $c':= c'_j$; then $h_j=h$ is the entropy of $c=c_j$. Observe first that $|S'_j(j, \kappa,n)| = |S'( \kappa, n)|= N'(\kappa,n)$ after the substitution. Indeed, every relevant pre-critical point of $p_{c'_j}$ is uniquely characterized by a precritical path in $A'$ (again by a fundamental property of the symbolic dynamics because the critical point is in the interior of the $0$-state of $A'$). This bijection preserves the uncertainties: if $y\in S'_j(j,\kappa,n)$ with sequence~\eqref{eq:Iter:K:ell} is identified with a relevant precritical path $s$ with decomposition~\eqref{eq:DecompOfs}, then $\ell_k +1 =\tilde \ell_k - \tilde t_k +1= |a_k|$. Hence $y$ and $s$ have the same uncertainties, and $S'_j(j, \kappa,n)$ and $S'( \kappa, n)$ are in bijection. Now the claim immediately follows from Proposition~\ref{prop:Uniformity}. \end{proof} \begin{theorem}[Continuity on Vein to $c_{\infty}$, Non-Renormalizable Case] \label{thm:ContAtIrrNonRen} If $c_\infty$ is a non-renormalizable combinatorial endpoint of $\mathscr M$, then core entropy is continuous along the combinatorial arc $[0,c_\infty]$. \end{theorem} \begin{proof} We have parameters $c_i$ and $c'_i$ as introduced at the beginning of the section, and we have \[ \tilde h(c_i) \le \tilde h(c_{i+1}) \le \tilde h(c_\infty)\le \tilde h(c'_{i+1})\le \tilde h(c'_i) \] for all $i$ by radial monotonicity of core entropy (for the first two inequalities), and by Theorem~\ref{Thm:TiozzoConj} (for the last two). Therefore, it suffices to prove the following claim: \emph{ for every $\eps>0$ there is an $\bar i\ge 1$ such that $h'_{i}-h_{i}\le \eps$ for all $i\ge \bar i$. } Choose $\overline\kappa$ and $i'$ as in Lemma~\ref{lem:RefOfprop:Uniformity}. By Lemma~\ref{lem:SmallKappa} there is an $i''\ge i'$ such that \[ S'_{i''}(n)\underline{s}ubset \bigcup_{j\in \{i',\dots ,i''\}} S'_{i''}(j,\overline \kappa,n). \] By Lemma~\ref{lem:MonotIncl} there is an injection from $S'_{i''}(j,\overline \kappa,n)$ into $S'_j(j,\overline \kappa,n)$ for all $j\in \{i',\dots ,i''\}$. Therefore, \[ N'_{i''}(n)=|S'_{i''}(n)|\le \underline{s}um_{j=i'}^{i''} |S'_j(j,\overline \kappa,n)|. \] By Lemma~\ref{lem:RefOfprop:Uniformity} we have $|S'_j(j,\overline \kappa,n)|\le C_j e^{(\eps+h_j) n}$. Thus \[ N'_{i''}(n)\le \underline{s}um_{j=i'}^{i''}C_j e^{(\eps+h_j) n} \le \left( \underline{s}um_{j=i'}^{i''}C_j\right) e^{(\eps +h_{i''})n} \]because $h_{i''}\ge h_{j}$ by monotonicity. This proves that $h'_{i''}-h_{i''}\le \eps$, and since the sequence $h'_i-h_i$ is decreasing we have $h'_{i}-h_{i}\le \eps$ for all $i\ge i''$. \end{proof} \underline{s}ubsection{The Renormalizable Case} \label{Sub:Renormalizable} In this section, we assume that $c_\infty$ is renormalizable. In order to formulate our statements, we need to briefly review well known facts on renormalization; compare \cite{Polylike, McMullenRenormalization,MiSelfSim,MiRenorm}. If $p_c$ is simple $m$-renormalizable, then there exists a ``little Mandelbrot set'' $\mathscr M'\underline{s}ubset\mathscr M$ consisting of $m$-renormalizable parameters with $c\in\mathscr M'$ and a straightening homeomorphism $\chi\colon\mathscr M'\to\mathscr M$ so that $p_{\chi(c)}$ in the neighborhood of its filled-in Julia set is hybrid equivalent to $p_c^{\circ m}$ on a neighborhood of the little filled-in Julia set (except possibly at the root point $\chi^{-1}(1/4)$). The little Mandelbrot set has a main center $c_0:=\chi^{-1}(0)$ with a superattracting orbit of period $m$. In this case, we say that ``the parameter $c$ is $c_0$ tuned with $\chi(c)$''. Dynamically, the filled-in Julia set $K_{c}$ equals $K_{c_0}$ in which every Fatou component is replaced by a copy of $K_{\chi(c)}$. \begin{lemma}[Renormalization and Entropy] \label{Lem:Renormalization} \lineclear If $\mathscr M'$ is a small copy of $\mathscr M$ consisting of $m$-renormalizable parameters, then the straightening map $\chi:\mathscr M'\to \mathscr M$ satisfies \[ \tilde h(c)=\max\left(\tilde h(c_0),\frac 1 m\tilde h(\chi( c))\right) \; \] for all $c\in \mathscr M'$. \end{lemma} \begin{proof} For convenience, in this proof we will count the number of relevant pre-critical points of \emph{strict generation} $n$; i.e. relevant pre-critical points of $p_c$, and similar for other polynomials, that are in \[p_c^{\circ (-n)}(c)\underline{s}etminus \bigcup _{i=1}^{n-1}p_c^{\circ (-i)}(c).\] Clearly, this count also gives the entropy of $p_c$. (Indeed, suppose $N_c(n)$ counts relevant pre-critical points of strict generation $n$ while $N'_c(n)$ counts pre-critical points of ``non-strict'' generation $n$; these numbers are different only when a precritical point of generation $n$, i.e.\ an element of $p_c^{\circ (-n)}(c)$, also is precritical of generation $n'<n$, i.e.\ when the critical point is periodic of period dividing $n-n'$. So if $0$ is periodic, say of minimal period $m$, then $N'_c(n) -N'_c(n-m) \le N_c(n)\le N'_c(n)$. Therefore, $N'_c(n)$ and $N_c(n)$ have the same growth rate.) Let $c_0$ be the center of $\mathscr M'$. Denote by $N_{c_0}(n)$ and $N_c(n)$ the numbers of relevant pre-critical points on $[\alpha,-\alpha]$ of $p_{c_0}$ and $p_c$ of strict generation $n$. Let us denote by $\tilde N_{\chi(c)}$ the number of pre-critical points on $[\beta,-\beta]$ of $p_{\chi(c)}$ of strict generation $n$. We will prove the lemma by relating $N_{c_0}(n)$, $\tilde N_{\chi(c)}$, and $N_c(n)$, see~\eqref{eq:Lem:Renorm} below. First let us note that the growth rate of $\tilde N_{\chi(c)}$ is the entropy of $\chi(c)$. Indeed, by Lemma~\ref{lem:DifferCounts}, the precritical points on $[\alpha,-\alpha]$ and those on $[\alpha,\beta]$ give the same entropies; and on $[\beta,-\beta]$ there can be at most twice as many precritical points of any generation $n$ as on $[\alpha,\beta]$. Denote by $F_0$ the unique periodic Fatou component of $p_{c_0}$ containing the critical point. A pre-image $F'$ of $F_0$ under $p_{c_0}^{\circ(n-1)}$ will be called \emph{relevant of strict generation $n$} if the center of $F'$ is on $[\alpha,-\alpha]$ and $F'$ is not a pre-image of $F_0$ under $p_c^{\circ i}$ for any $i<n-1$. Then $N_{c_0}(n)$ counts the number of relevant pre-images of $F_0$ of strict generation $n$. For a relevant pre-image $F'$ of $F_0$, say of strict generation $n$, the intersection of $\ovl F'$ with the Hubbard tree of $p_{c_0}$ is equal to $\ovl F'\cap [\alpha,-\alpha]$ because the Hubbard tree of $p_{c_0}$ has no branch points in $F'$ (see Claim (B) in the proof of Lemma~\ref{Lem:HubbardTreeRenormalization}). Therefore, $p_{c_0}^{\circ(n-1)}$ maps $\overline F'\cap [\alpha,\alpha]$ homeomorphically onto $\overline F_0\cap [\alpha,\alpha']$. Let $K_{\chi(c)}$ be the filled in Julia set of $p_{\chi(c)}$. Then the filled in Julia set $K_c$ of $p_c$ is obtained from the filled in Julia set $K_{c_0}$ of $p_{c_0}$ by replacing the closure of each Fatou component $F'$ of $K_{c_0}$ with $K_{\chi(c)}$. And, moreover, if $F'$ is a relevant pre-image of $F_0$, then $\overline F'\cap [\alpha,\alpha']$ is replaced by the arc $[\beta,-\beta]\underline{s}ubset K_{\chi(c)}$. The inserted copies of $ K_{\chi(c)}$ are small (periodic and pre-periodic) filled in Julia sets of $p_c$. A small filled in Julia set $K'$ of $p_c$ will be called \emph{relevant} if the intersection $K'\cap [\alpha,-\alpha]$ has more than two points. Equivalently, $K'$ is obtained by inserting $K_{\chi(c)}$ into a relevant Fatou component of $p_{c_0}$. Let us denote by $K_0$ the small filled in Julia set containing the critical point; i.e. $K_0$ is obtained from $F_0$. By construction, $N_{c_0}(n)$ counts the number of relevant pre-images of $K_0$ of strict generation $n$. Let us now count relevant pre-critical points of $p_c$. If $x$ is such point, say of strict generation $n\ge 0$, then $x$ is within a relevant pre-image $K'$ of $K_0$. Suppose $K'$ has strict generation $i\ge 0$. Then after $i-1$ iteration $x$ is within $K_0$ and $p_c^{\circ (i-1)}(x)$ is a relevant pre-critical point of strict generation $n+1-i$. Since the dynamics of $p_c^{\circ m}:K_0\to K_0$ is identified with $p_{\chi(c)}:K_{\chi(c)}\to K_{\chi(c)}$, there are $\tilde N_{\chi(c)}\left(\frac{n-i}{m}+1\right)$ relevant pre-periodic points of strict generation $n+1-i$ in $K_0$, where we use the convention $\tilde N_{\chi (c)}(t)=0$ if $t\not\in \mathbb N$. We get: \begin{equation} \label{eq:Lem:Renorm} N_c(n)=\underline{s}um_{i=0}^{n} N_{c_0}(i) \tilde N_{\chi(c)}\left(\frac{n-i}{m}+1\right), \end{equation} where $N_{c_0}(i)$ counts pre-images $K'$ of $K_0$ and $\tilde N_{\chi(c)}\left(\frac{n-i}{m}+1\right)$ counts the number of relevant pre-critical points of strict generation $n$ within $K'$. Since the growths of $N_c(n)$, $N_{c_0}(n)$, and $ N_{\chi(c)}(n)$ are the entropies of $p_c$, $p_{c_0}$, and $p_{\chi(c)}$ we get $\tilde h(c)=\max\left(\tilde h(c_0),\frac 1 m\tilde h(\chi( c))\right)$. \end{proof} \begin{corollary}[Continuity Along Vein to $c_{\infty}$, General Case.] \label{cor:RadialContin} \lineclear Suppose that $c_{\infty}$ is a non-dyadic endpoint of the Mandelbrot set. Then the entropy is continuous along the vein $[c_{\infty}, 0]$. \end{corollary} \begin{proof} Theorem~\ref{thm:ContAtIrrNonRen} proves the case when $c_{\infty}$ is non-renormalizable. The second case is when $c_{\infty}$ is finitely many times renormalizable. Then is there is a renormalization $\chi \colon\mathscr M'\to \mathscr M$ such that $c_{\infty}\in \mathscr M'$ and $c'_{\infty}:=\chi(c_{\infty})$ is non-renormalizable. Entropy is continuous along the vein $[0,c'_{\infty}]$ by Theorem~\ref{thm:ContAtIrrNonRen}, and continuous along the image $\chi_*^{-1}([0,c'_{\infty}])=[c_0,c_{\infty}]$ by Lemma~\ref{Lem:Renormalization} (where $c_0$ is the main center of $\mathscr M'$), and continuous along $[0,c_0]$ by \cite[Theorem~4.9]{Jung} (or Theorem~\ref{thm:ContAtIrrNonRen}). Therefore, entropy is continuous along $[0,c_{\infty}]$. The final case is that there is an infinite sequence $\chi_1:\mathscr M_1\to \mathscr M$, $\chi_2:\mathscr M_2\to \mathscr M$, $\dots$ of renormalizations such that $\mathscr M_n \underline{s}ubsetneq \mathscr M_{n-1}$ and $c_\infty\in \mathscr M_n$ for all $n\ge 0$. Let $c_n$ be the center of $\mathscr M_n$; i.e. $c_n=\chi_n ^{-1}(0)$. Then $c_1\prec c_2 \prec \dots \prec c_{\infty}$, and it is sufficient to show that $\lim_n \tilde h(c_n)$ is equal to $\tilde h(c_{\infty})$. Suppose that $\chi_m\colon\mathscr M'_n\to \mathscr M$ is an $m_n$-renormalization. Using Lem\-ma~\ref{Lem:Renormalization} we get \[\tilde h(c_{\infty})=\max\left(\tilde h(c_n),\frac 1 {m_n}\tilde h(\chi( c_{\infty}))\right)\le \tilde h(c_n)+\frac 1 {m_n}h(\chi( c_{\infty})).\] But $\tilde h(c_n)+\frac 1 {m_n}h(\chi( c_{\infty}))\le \tilde h(c_n)+\frac 1 {m_n}\log 2$ has the same limit as $\tilde h(c_n)$ because $\lim_n m_n = +\infty$. Thus entropy is continuous along $[0,c_{\infty}]$. \end{proof} \reminder{Total number of reminders: \arabic{treminder}} \renewcommand{{\scriptscriptstyle\mathrm{top}}}{{\underline{s}criptscriptstyle\mathrm{top}}} \newcommand{{\scriptscriptstyle\mathrm{comb}}}{{\underline{s}criptscriptstyle\mathrm{comb}}} \appendix \underline{s}ection{\ \\mathbb Core entropy and biaccessibility dimension} \centerline{by Wolf Jung} \markleft{D.~DUDKO, D.~SCHLEICHER; APPENDIX BY W.~JUNG} Here various definitions of core entropy shall be discussed and related to the biaccessibility dimension. On a compact metric space, the topological entropy of a continuous map is defined by a growth rate, which is referring to preimages of covers, or to $\eps$-shadowing sets. See \cite{BrinStuck, deMelovanStrien} for details. When the underlying space is a compact interval, a finite tree, or graph, several equivalent characterizations are due to Misiurewicz and others \cite{alm}. These include the growth rate of horse shoes, laps (monotonic branches), periodic points, and preimages of a general point. For real and complex quadratic polynomials, core entropy was defined by Tao Li \cite{taoli} and Bill Thurston \cite{TanLeiEntropy, bghkltt} as the topological entropy of $p_c(z)$ on the Hubbard tree; this definition applies to the postcritically finite case in particular, and more generally to finite or infinite compact trees, but it does not work when $c$ is an endpoint with a dense postcritical orbit. In a fairly general situation, the filled Julia set $\mathcal{K}_c$ is path-connected with empty interior. Then $\mathcal{K}_c$ consists of the Hubbard tree $T_c$\,, the countable family of its preimages, and an uncountable family of endpoints. The dynamics on $T_c$ is interesting because this tree is folded over itself, while the iteration does not return to strict preimages of $T_c$\,. On the other hand, with the single exception of $c=-2$ \cite{Zdunik, Smirnov, MeerkampSchleicher}, the endpoints form a set of full harmonic measure, while the external angles of $T_c$ and its preimages form a set of Hausdorff dimension $<1$. Since all biaccessible points are contained in arcs iterated to $T_c$\,, these angles are called biaccessible (or biaccessing). More precisely, the biaccessibility dimension is defined as follows: \begin{itemize} \item For the lamination generated by an angle $\vartheta\in \mathbb Circle$ \cite{ThurstonLaminations}, consider all angles of non-trivial leaves. Their Hausdorff dimension is the \emph{combinatorial biaccessibility dimension} $B_{\scriptscriptstyle\mathrm{comb}}(\vartheta)$. The same dimension is obtained from pairs of angles with the same itinerary, or from pairs not separated by the precritical leaves: the diameter joining $\vartheta/2$ and $(\vartheta+1)/2$, and its preimages. \item For a parameter $c\in\mathcal{M}$, the \emph{topological biaccessibility dimension} $B_{\scriptscriptstyle\mathrm{top}}(c)$ is the Hausdorff dimension of those angles for which the dynamic ray is landing together with another ray. \end{itemize} These definitions are related analogously to Lemma~\ref{Lem:defEntropy}: \begin{lemma}[Combinatorial and topological biaccessibility] \lineclear Suppose that $\vartheta\in \mathbb Circle$ and $c\in\partial\mathscr M$ belongs to the impression of the parameter ray with angle $\vartheta$, or $c\in\mathscr M$ is hyperbolic and the ray lands at the corresponding root. Then $B_{\scriptscriptstyle\mathrm{comb}}(\vartheta)=B_{\scriptscriptstyle\mathrm{top}}(c)$. \end{lemma} \begin{proof}[Sketch of proof \cite{SymDyn, Jung}] \looseness-1 In the locally connected case, and neglecting the countable set of angles at precritical or precharacteristic points, two dynamic rays are landing together if and only if they are not separated by a precritical ray-pair. (This separation line contains additional internal arcs when the interior is not empty.) When $\mathcal{K}_c$ is not locally connected, exceptional sets of angles are shown to be negligible in terms of Hausdorff dimension: a Cremer periodic point might be topologically biaccessible \cite{SchleicherZakeri}, but the relevant angles have Hausdorff dimension 0 \cite{BullettSentenac}. On the other hand, in an infinitely renormalizable situation, rays with combinatorially biaccessing angles may fail to land together, or to land at all. But their Hausdorff dimension is 0 in the case of the main molecule, and it is less than the dimension of non-renormalizable biaccessing angles in the primitive maximal case \cite{Jung}. \end{proof} The following relation to entropy is due to Thurston \cite{TanLeiEntropy}, relying on earlier work by Furstenberg \cite{Furstenberg} and Douady \cite{Douady}. \begin{proposition}[Dimension and entropy of the tree]\label{Prop:Biaccessibility} \lineclear Suppose that either \begin{itemize} \item $\mathcal{K}_c$ is locally connected with empty interior, or \item $f_c$ is parabolic, or \item $f_c$ is hyperbolic so that the attracting orbit has real multiplier. \end{itemize} Using regulated arcs when necessary, define the tree $T_c$ as the path-connected hull of the critical orbit. If $T_c$ is compact, consider the topological entropy of $p_c(z)$ on $T_c$\,. Then it is related to the biaccessibility dimension by $h_{\scriptscriptstyle\mathrm{top}}(T_c)=B_{\scriptscriptstyle\mathrm{top}}(c)\cdot\log2$. \end{proposition} \begin{proof} The proof is found in version 1 of \cite{BruinSchleicher} and in \cite{TiozzoThesis, Jung}: since $\vartheta\mapsto z(\vartheta)$ is a semi-conjugation with finite fibers, we may consider the topological entropy of the angle-doubling map on the compact set of angles of $T_c$ \cite[Thm.~II.7.1]{deMelovanStrien}. And this equals the Hausdorff dimension \cite[Proposition~III.1]{Furstenberg}, except for the base 2 instead of $e$ in the logarithm of the growth factor $\lambda$. \end{proof} While the definition of $h_{\scriptscriptstyle\mathrm{top}}(T_c)$ requires a compact tree $T_c$\,, a general notion was given in Definition~\ref{Def:CoreEntropy} in terms of precritical ray pairs, and its relation to the biaccessibility dimension shall be discussed now: \begin{theorem}[Dimension and entropy in general] \label{Thm:Biaccessibility} \lineclear Entropy and biaccessibility dimension are related as follows for all parameters $c\in\mathcal{M}$ and all angles $\vartheta\in \mathbb Circle$\,: \begin{equation} \tilde h(c)=B_{\scriptscriptstyle\mathrm{top}}(c)\cdot\log2 \qquad\mbox{and}\qquad h(\vartheta)=B_{\scriptscriptstyle\mathrm{comb}}(\vartheta)\cdot\log2 \ . \end{equation} \end{theorem} \begin{proof} First, suppose that $c=c(\vartheta)$ is postcritically finite or belongs to a dyadic vein. In particular, $\mathcal{K}_c$ is locally connected and $T_c$ is compact with finitely many endpoints. Then the growth rate of monotonic branches on $T_c$ is equal to the growth rate of precritical points on $[\alpha_c\,,-\alpha_c]$, so $h_{\scriptscriptstyle\mathrm{top}}(T_c)=\tilde h(c)$, and Proposition~\ref{Prop:Biaccessibility} applies. Second, suppose that $c$ is a non-renormalizable irrational endpoint; approximate it with biaccessible parameters $c_n\prec c$. Then monotonicity of $B_{\scriptscriptstyle\mathrm{top}}(c)$ \cite[Proposition~4.6]{Jung} (which in turn is a consequence of monotonicity of characteristic leaves of the lamination, in analogy to Lemma~\ref{Lem:Monotonicity}) and continuity of $\tilde h(c)$ give \begin{equation} B_{\scriptscriptstyle\mathrm{top}}(c)\cdot\log2\ge\lim B_{\scriptscriptstyle\mathrm{top}}(c_n)\cdot\log2 =\lim\tilde h(c_n)=\tilde h(c) \ . \end{equation} For the opposite estimate, note that the plane is cut into pieces successively by precritical ray pairs, and the angles of a piece of level $n$ form up to $n$ intervals of total length $2^{-n}$ according to \cite[Lemma~4.1]{BruinSchleicher}. Recall that $N(n)$ is the number of precritical points of generation $n$ on $[\alpha_c\,,-\alpha_c]$, and denote the number of level-$n$ pieces intersecting the arc $[\alpha_c\,,-\alpha_c]$ by $V(n)$. Then $V(n)=1+N(1)+\dots+N(n)$ is growing by the same factor $\lambda=e^h$ as $N(n)$, and the same holds for $n \cdot V(n)$. The $b$-dimensional Hausdorff measure of the angles of $[\alpha_c\,,-\alpha_c]$ is estimated as \begin{equation} \mu_b\le\lim n \cdot V(n) \cdot 2^{-bn} \ , \end{equation} which is 0 when $b>\log\lambda/\log2=\tilde h(c)/\log2$. So the Hausdorff dimension is estimated as $B_{\scriptscriptstyle\mathrm{top}}(c)\le\tilde h(c)/\log2$ as well, implying equality. \looseness-1 Finally, both $\tilde h(c)$ and $B_{\scriptscriptstyle\mathrm{top}}(c)$ are constant on the main molecule and on primitive small Mandelbrot sets: for $B_{\scriptscriptstyle\mathrm{top}}(c)$ see \cite[Theorem~4.7]{Jung}, and $\tilde h(c)$ is constant on a dense subset and continuous. Moreover, both scale by the period of immediate satellite renormalization, see Lemma~\ref{Lem:Renormalization}. Now all cases are covered by the Yoccoz Theorem \cite{MiRenorm}. \end{proof} Continuity of entropy according to Theorem~\ref{Thm:Continuity} gives: \begin{corollary}[Continuity of biaccessibility dimension] \lineclear The biaccessibility dimension $B_{\scriptscriptstyle\mathrm{comb}}(\vartheta)$ is continuous on $\mathbb Circle$ and $B_{\scriptscriptstyle\mathrm{top}}(c)$ is continuous on the Mandelbrot set $\mathcal{M}$. \end{corollary} Proposition~\ref{Prop:Biaccessibility} and Theorem~\ref{Thm:Biaccessibility} show that the definition of entropy in terms of precritical points is a generalization of the original definition in terms of a compact core: \begin{corollary}[Extending the definition of core entropy] \lineclear We have $h_{\scriptscriptstyle\mathrm{top}}(T_c)=\tilde h(c)$ whenever $T_c$ is defined and compact. \end{corollary} \vskip-0.8mm \end{document}
\begin{document} \title{Measurement cost of metric-aware variational quantum algorithms} \author{Barnaby van Straaten} \author{B\'alint Koczor} \email{[email protected]} \affiliation{Department of Materials, University of Oxford, Parks Road, Oxford OX1 3PH, United Kingdom} \begin{abstract} We consider metric-aware quantum algorithms which use a quantum computer to efficiently estimate both a matrix and a vector object. For example, the recently introduced quantum natural gradient approach uses the Fisher matrix as a metric tensor to correct the gradient vector for the co-dependence of the circuit parameters. We rigorously characterise and upper bound the number of measurements required to determine an iteration step to a fixed precision, and propose a general approach for optimally distributing samples between matrix and vector entries. Finally, we establish that the number of circuit repetitions needed for estimating the quantum Fisher information matrix is asymptotically negligible for an increasing number of iterations and qubits. \end{abstract} \maketitle \section{Introduction} With quantum computers rising as realistic technologies, attention has turned to how such machines could perform as variational tools~\cite{farhi2014quantum,peruzzo2014variational,wang2015quantum,PRXH2,PhysRevA.95.020501,mcclean2016theory, PhysRevLett.118.100503,Li2017,PhysRevX.8.011021,Santagatieaap9646,kandala2017hardware,kandala2018extending, PhysRevX.8.031022,romero2017strategies,higgott2018variational,SuguruExc, mcclean2017hybrid,colless2017robust,kokail2018self,sharma2020noise,cerezo2020variational,SuguruGeneral, koczor2020quantum, koczor2020exponential}. This results in a hybrid model with an iterative loop: a classical processor determines how to update the parameters describing a family of quantum states (parametrised ansatz states), while a quantum coprocessor generates and performs measurements on that state (via an ansatz circuit). This is of particular interest in the context of noisy, intermediate-scale quantum devices (NISQ devices)~\cite{preskill2018quantum}, because complex ansatz states can be prepared with shallow circuits~\cite{kassal2011simulating,C2CP23700H,whaley2014quantum,ourReview}. Such shallow circuits will potentially enable obtaining useful value before the era of resource-intensive quantum fault tolerance methods. As such, variational quantum algorithms promise to solve key problems that are intractable to classical computers, such as finding ground states \cite{peruzzo2014variational,PRXH2,mcclean2016theory,kandala2017hardware,google2020hartree}---as relevant in quantum chemistry and in materials science---or approximately solving combinatorial problems \cite{farhi2014quantum} and beyond. Despite their potential power, variational algorithms might require an extremely large number of quantum-circuit repetitions -- optimally using quantum resources will therefore have a crucial economic importance. Attention has recently been focused on statistical aspects of these variational quantum algorithms \cite{sweke2019stochastic,kubler2019adaptive,qfi,Crawford2019,arrasmith2020operator,hadfield2020measurements}, such as the effect of shot noise and the reduction of their measurement costs. It is our aim in this work to establish general scaling results by rigorously characterising the number of measurements required to obtain a single iteration step in case of so-called metric-aware quantum algorithms. Let us first introduce basic notions. \subsection{Variational quantum algorithms} We consider variational quantum algorithms which typically aim to prepare a parametrised quantum state $\rho(\underline{\theta}) := \Phi(\underline{\theta}) \, \rho_0$ where we model via a mapping $\Phi(\underline{\theta})$ that acts on the computational zero state $\rho_0$ of $N$ qubits and depends continuously on the parameters $\theta_i$ with $i\in\{1, 2, \dots \nu \}$. This mapping can in general contain non-unitary elements, such as measurements \cite{koczor2019quantum, PhysRevLett.126.220501}, but in many applications one assumes that it acts (approximately) as a unitary circuit that decomposes into a product of individual quantum gates. These gates typically act on a small subset of the system, e.g., one and two-qubit gates. Recently a novel variational algorithm was proposed for simulating real-time quantum evolution using shallow quantum circuits \cite{Li2017} and was further generalised to imaginary time and natural gradient evolutions \cite{samimagtime,koczor2019quantum} which can be used as optimisers of variational quantum eigensolvers (VQE) \cite{peruzzo2014variational,PRXH2,Rebentrost_2019, SuguruExc}. This was shown to significantly outperform other approaches, such as simple gradient descent, in terms of convergence speed and accuracy according to numerical simulations \cite{samimagtime,koczor2019quantum,wierichs2020avoiding}. In this work, we consider generalisations of the aforementioned techniques as variational algorithms that need to estimate the following two objects: (a) a positive-semidefinite, symmetric matrix, which is usually the quantum Fisher information that characterises sensitivity with respect to parameters $\theta_k$; (b) a vector object that is in many applications the gradient vector of the loss function. Examples of such algorithms are provided in references~\cite{li2017efficient,xiaotheory,samimagtime,koczor2019quantum,quantumnatgrad}, and we will refer to them in the following as metric-aware quantum algorithms. The metric tensor typically only depends on the parameter values while the vector object additionally depends on, e.g., a Hermitian observable $\mathcal{H}$ that in typical scearios represents the Hamiltonian of a physical system and decomposes into a polynomially increasing number $r_h$ of Pauli terms. \subsection{Quantum natural gradient} To be more concrete, in the following we will focus on one prominent algorithm, the recently introduced quantum natural gradient approach \cite{koczor2019quantum,quantumnatgrad} which is equivalent to imaginary time evolution when quantum circuits are noiseless and unitary \cite{koczor2019quantum,samimagtime}. This approach can be used as a VQE optimiser when minimising the expectation value $E(\underline{\theta}) :=\mathrm{Tr}[ \rho(\underline{\theta}) \mathcal{H} ]$ over the parameters $\underline{\theta}$. However, the approach generalises to any Lipschitz continuous mapping as an objective function \cite{koczor2019quantum}. In particular, natural gradient descent governs the evolution of the ansatz parameters according to the update rule \cite{koczor2019quantum} \begin{equation} \label{naturalgradEvoRESULUT} \underline{\theta}(t{+}1) = \underline{\theta}(t) - \lambda \, \mathbf{F}_Q^{-1} \underline{g}, \end{equation} where $t$ is an index and $\lambda$ is a step size. Here the inverse of the positive-semidefinite, symmetric quantum Fisher information matrix $\mathbf{F}_Q \in \mathbb{R}^{\nu \times \nu}$ corrects the gradient vector $g_k := \partial_k E(\underline{\theta})$ for the co-dependence of the parameters, and both objects can be estimated efficiently using a quantum computer while the inverse $\mathbf{F}_Q^{-1}$ is computed by a classical processor. We discuss different protocols for estimating the matrix $[\mathbf{F}_Q]_{kl}$ and vector $g_k$ entries for both pure (idealised, perfect quantum gates) and mixed quantum states (via imperfect quantum gates or non-unitary elements as measurements) in the Appendix. We now highlight two results. a) We derive the general upper bound $[\mathbf{F}_Q]_{kl} \leq r_g^2$, where $r_g$ is the maximal number of Pauli terms into which generators of ansatz gates can be decomposed (Lemma~1). This bound is a generalisation of what is known as the Heisenberg limit in quantum metrology f_\mathrm{id}ootnote{Where the ansatz parameter $\theta$ corresponds to a global $Z$ rotation of all the qubits and therefore $r_g = N$. }, refer also to \cite{review,giovannetti11,koczor2019variational}. b) The matrix $\mathbf{F}_Q$ might be ill-conditioned and the inversion in Eq.~\mathrm{Re}f{naturalgradEvoRESULUT} requires a regularisation. We will use the simple variant of Tikhonov regularisation $\tilde{\mathbf{F}}_Q^{-1} := [\mathbf{F}_Q {+} \eta \mathrm{Id}]^{-1}$ in the following; we derive analytical lower and upper bounds on the singular values of this inverse matrix in the Appendix~(Lemma~3) using a). \section{Upper bounds on the measurement cost} To motivate our approach, we illustrate in Fig~\mathrm{Re}f{fig1} (a/green) how naively using the same number of measurements for estimating each matrix and vector entry, such as in \cite{wierichs2020avoiding}, can result in impractical sampling costs. In particular, we aim to reduce the error due to shot noise (finite sampling) $\epsilon$ of the vector $\underline{v} := \tilde{\mathbf{F}}_Q^{-1} \underline{g}$ in the update rule in Eq.~\eqref{naturalgradEvoRESULUT}. We first express how the error in the matrix and vector entries propagates to the parameter-update rule in Eq.~\eqref{naturalgradEvoRESULUT}. We quantify this error as the expected Euclidean distance $\langle \lVert \Delta v \rVert^2 \rangle = \epsilon^2$, and this translates to the condition $\sum_{k=1}^\nu \mathrm{Var}[v_k] = \epsilon^2$, where $\mathrm{Var}[v_k]$ is the variance of a single vector entry. We derive an analytical formula in Lemma~2 in the Appendix: we express the error $\epsilon$ in terms of the variances $\mathrm{Var}\{[\mathbf{F}_Q]_{k l}\}$ and $\mathrm{Var}[g_l]$ of the measurements used to estimate the matrix and vector entries, respectively, as \begin{equation} \label{errorpropagation} \epsilon^2 = \sum_{k, l=1}^\nu a_{k l} \mathrm{Var}\{[\mathbf{F}_Q]_{k l}\} + \sum_{k=1}^\nu b_k \mathrm{Var}[g_k]. \end{equation} The coefficients $a_{kl}$ and $b_{k}$ describe how the error of $[\mathbf{F}_Q]_{k l}$ and $g_k$ propagates through matrix inversion and subsequent vector multiplication into the precision $\epsilon$. We remark that these results are completely general and can be applied to any quantum algorithm that requires the estimation of both an inverse matrix and a vector object, such as a Hessian-based optimisation. \addtocounter{footnote}{1} f_\mathrm{id}ootnotetext[\value{footnote}]{ In Fig.~1 in the uniformly distributed scenario the same number of measurements are used to determine every entry of the metric tensor (gradient vector) and only the overall number $N_{F}$ ($N_{g}$) of measurements to determine the metric tensor (gradient vector) is chosen optimally. In the naive scheme both $N_{F}$ and $N_{g}$ are additionally fixed } \newcounter{footcombined} \setcounter{footcombined}{\value{footnote}} \begin{figure*} \caption{ Exact numerical simulations: a $12$-qubit ansatz circuit with 84 parameters is initialised at a good approximation of the ground state of a spin-chain Hamiltonian (refer to Appendix). Natural gradient evolution from Eq.~\eqref{naturalgradEvoRESULUT} \label{fig1} \end{figure*} We derive general upper bounds on the variances $\mathrm{Var}\{[\mathbf{F}_Q]_{k l}\}$ and $\mathrm{Var}[g_l]$ for different experimental strategies in the Appendix; The error $\epsilon^{2}$ in Eq.~\mathrm{Re}f{errorpropagation} is reduced proportionally when repeating measurements. In the following, we assume that $N_{F}$ measurements are assigned to estimate the full matrix $\mathbf{F}_Q$ while $N_{g}$ measurements are used to estimate the gradient vector $\underline{g}$~\cite{Note\thefootcombined}. We now state an upper bound on them in terms of the precision $\epsilon$. \begin{theorem} \label{theo1} To reduce the uncertainty of the vector $\underline{v} = \tilde{\mathbf{F}}_Q^{-1} \underline{g}$ due to shot noise to a precision $\epsilon$, the number of samples to estimate the matrix $\mathbf{F}_Q$ in Eq.~\eqref{naturalgradEvoRESULUT} is upper bounded as \begin{equation} \label{nfupperbound} N_{F} \leq 2 \, \epsilon^{-2} \nu^4 \, \mathrm{Spc}[\tilde{\mathbf{F}}_Q^{-1}]^2 \, \lVert g \rVert_\infty^2 \, f_\mathrm{id}F \end{equation} while sampling the gradient has a cost upper bounded by \begin{equation}\label{thqeq2} N_{g} \leq 2 \epsilon^{-2} \, \nu^2 \, \mathrm{Spc}[\tilde{\mathbf{F}}_Q^{-1}] \, \spec[\mathcal{H}] f_\mathrm{id}g. \end{equation} The overall measurement cost of determining the natural gradient vector is $N_{F} {+} N_{g}$. Here $\mathrm{Spc}[A]$ denotes the average squared singular values of a matrix $A \in \mathbb{C}^{d \times d}$ via its Hilbert-Schmidt or Frobenius norm as $\mathrm{Spc}[A] := \lVert A \rVert^2/d$ and $\lVert g \rVert_\infty$ is the absolute largest entry in the gradient vector. \end{theorem} The constant factors $f_\mathrm{id}F$ and $f_\mathrm{id}g$ in Theorem~\mathrm{Re}f{theo1} are specific to the experimental setup used to estimate the matrix or vector entries. For example, for $r_g = 1$ the factor simplifies as $f_\mathrm{id}F \leq 2$. The upper bounds in Theorem~\mathrm{Re}f{theo1} crucially depend on the regularisation and we prove that $\mathrm{Spc}[\tilde{\mathbf{F}}_Q^{-1}] \leq \eta^{-2}$, refer to Lemma~3 in the Appendix. The product $\spec[\mathcal{H}] f_\mathrm{id}g$ is a constant that reflects the complexity of estimating the expected value of the fixed $\mathcal{H}$ (and can be reduced with advanced techniques that simultaneously estimate commuting terms \cite{Crawford2019,yen2020measuring,jena2019pauli,gokhale2020n,gokhale2019minimizing,hadfield2020measurements}). It is interesting to note that the sampling cost of the gradient vector $N_{g}$ depends on the metric tensor via $\mathrm{Spc}[\tilde{\mathbf{F}}_Q^{-1}]$ (and vice versa). Let us illustrate this point in an example where one of the entries in $\tilde{\mathbf{F}}_Q^{-1}$ is extremely large in absolute value and therefore via the matrix/vector product it magnifies both the mean and the variance of the gradient entries. Indeed, reducing such a magnified variance to our fixed precision $\epsilon$ requires an increased number of measurements in the gradient vector. We finally remark that Theorem~\mathrm{Re}f{theo1} is quite general and the upper bounds apply to all metric-aware quantum algorithms \cite{li2017efficient,xiaotheory,samimagtime,koczor2019quantum,quantumnatgrad} up to minor modifications. We will establish in the following, that in many cases sampling the gradient vector $N_{g}$ dominates the overall cost of the natural gradient approach as $N_{F}{ +} N_{g} \approx N_{g}$. Before doing so, let us first bound the sampling cost of the natural gradient vector \emph{relative to} the sampling cost $N_{g}d$ of the gradient vector that would be used in simple gradient descent optimisations. Note that the difference between $N_{g}$ and $N_{g}d$ is that the latter corresponds to the scenario when we fix the metric tensor as the identity matrix $F_Q:=\mathrm{Id_\nu}$ and thus the precision is $ \epsilon^2 := \langle \lVert \Delta g \rVert^2 \rangle $. \begin{theorem} \label{theo2} Determining the natural gradient vector to the same precision $\epsilon$ as the gradient vector requires a sampling overhead $\kappa:= (N_{F}{ +} N_{g})/N_{g}d$. This overhead is upper bounded in general \begin{equation*} \kappa \leq \eta^{-2} +y , \quad \text{and }\quad \kappa \approx \mathrm{Spc}[\tilde{\mathbf{F}}_Q^{-1}] +y, \end{equation*} up to the potentially vanishing term $y=N_{F} / N_{g}d$, as in Result~\mathrm{Re}f{result1} and Result~\mathrm{Re}f{result2}. Here $\eta$ is either a regularisation parameter or the smallest singular value of $\mathbf{F}_Q$. The second equality establishes an approximation as a constant factor which is valid, e.g., when the evolution is close to the optimal point. \end{theorem} \section{Scaling as a function of the iterations} Theorem~\mathrm{Re}f{theo1} establishes that the sampling cost $N_{F}$ of the matrix $\mathbf{F}_Q$ depends on the norm of the gradient vector, which is expected to decrease polynomially during an optimisation. In a typical scenario we expect that, even if initially estimating the matrix dominates the sampling costs, asymptotically sampling the vector $\underline{g}$ dominates the costs. \begin{result}\label{result1} The upper bound in Theorem~\mathrm{Re}f{theo1} results in the growth rate $N_{F} + N_{g} = \mathcal{O}(\lVert g(t) \rVert_\infty^2) + N_{g}$ when viewed as a function of iterations or steps $t$. Assuming polynomial convergence via $\lVert g(t) \rVert_\infty= \mathcal{O}( t^{-c})$ with $c>0$, the natural gradient vector requires only a constant sampling overhead asymptotically as \begin{equation*} \kappa = (N_{F}{+}N_{g})/N_{g}d = \mathcal{O}(\mathrm{Spc}[\tilde{\mathbf{F}}_Q^{-1}] + t^{-2c} ), \end{equation*} when compared to the gradient vector via Theorem~\mathrm{Re}f{theo2}. We remark that convergence is guaranteed under mild continuity conditions \cite{sweke2019stochastic}. \end{result} We have numerically simulated the natural gradient evolution from Eq.~\eqref{naturalgradEvoRESULUT} and determined its overhead $\kappa$. This quantifies how much more it costs at every iteration step $t$ to estimate the natural gradient vector $\underline{v}(t)$ than it would cost to estimate the gradient vector $\underline{g}(t)$ assuming the same precision $\epsilon$. Fig~\mathrm{Re}f{fig1} (a/red) shows how this sampling overhead converges to its constant asymptotic approximation as the average squared singular values $\mathrm{Spc}[\tilde{\mathbf{F}}_Q^{-1}] \approx 10^{6}$ ($10^{1.5}$) in Fig~\mathrm{Re}f{fig1} (a/black). Fig~\mathrm{Re}f{fig1} (a/dashed) also demonstrates that under-regularising the inverse (via $\eta = 10^{-5}$) results in unfeasible sampling costs. In fact, carefully increasing the regularisation parameter (as $\eta = 10^{-1}$) reduces the sampling cost by several orders of magnitude without significantly affecting the performance: both evolutions decrease the gradient norm with a similar rate, compare solid and dashed red lines in Fig~\mathrm{Re}f{fig1} (b). It is striking that the overhead plotted in Fig~\mathrm{Re}f{fig1} (a) can be very high initially; while the focus of the present paper is on the asymptotic costs with respect to time and size, it is worth noting that this high initial cost could be straightforwardly mitigated by, e.g., only occasionally updating a low-rank approximation of the metric tensor. This may be expected to have little impact on the convergence rate since in the early phase the advantage of using natural gradient is typically less pronounced. \begin{figure*} \caption{(a) Sampling cost $N_{F} \label{fig2} \end{figure*} Recall that Fig~\mathrm{Re}f{fig1} (a) via Result~\mathrm{Re}f{result1} assumes a constant precision $\epsilon$ throughout the evolution which is not practical. In fact, one would require a relative precision such that $\epsilon = \epsilon_0 \lVert \underline{g}(t) \rVert $ in case of the gradient vector and $\epsilon = \epsilon_0 \lVert \underline{v}(t) \rVert $ in case of the natural gradient vector, for some fixed $\epsilon_0$. In particular, using a moderate regularisation of the inverse as $\eta = 0.1$, the cost of estimating $\underline{v}(t)$ is comparable or even smaller than estimating $\underline{g}(t)$, see [Fig~\mathrm{Re}f{fig1} (c/red) solid]. We finally stress that in Fig.~\mathrm{Re}f{fig1}(a) we do not actually compare the overall performance of the simple and natural gradient methods, but only their per-iteration (per-epoch) costs. We therefore conclude that the natural gradient optimisation requires overall less samples to converge (i.e., asymptotically constant overhead but faster convergence rate) when compared to simple gradient descent, see also \cite{samimagtime,koczor2019quantum,wierichs2020avoiding}. Moreover, we prove in the following that even the significant initial overheads in Fig~\mathrm{Re}f{fig1} (a-c) do in many practical applications asymptotically vanish for an increasing number of qubits. \section{Scaling with the system size} Let us now consider how the upper bounds in Theorem~\mathrm{Re}f{theo1} scale with the number of qubits $N$. First, we consider the general growth rate $\nu = \mathcal{O}(N a(N))$ of the number of parameters $\nu$ where $a(N)$ is the depth of the ansatz circuit. For example, $\mathrm{polylog}(N)$-depth circuits constitute a very general class of ans{\"a}tze via $a(N) = \mathcal{O} (x \log(N)^y)$ for some $x,y>0$. Second, we establish that the spectral quantity scales with the number of qubits as $\mathrm{Spc}[\tilde{\mathbf{F}}_Q^{-1}] = \mathcal{O}(N^{-s} a^{-s}(N))$ with $0 \leq s \leq 2$, refer to Lemma~3 in the Appendix. Third, if $\mathcal{H}$ decomposes into a number $r_h$ of Pauli terms that grows polynomially (e.g., $N^4$ in case of chemistry applications) then we obtain a polynomial growth rate $\spec[\mathcal{H}] f_\mathrm{id}g = \mathcal{O}(N^b)$ in Theorem~\mathrm{Re}f{theo1} (Eq.~\eqref{thqeq2}) with some $b\geq1$. We finally obtain the growth rates \begin{align} \label{qubitscaling1} N_{F} &= \mathcal{O}[ N^{4-2s} \, a^{4-2s}(N) \, \, \lVert g \rVert_\infty^2 ], \\ N_{g} &= \mathcal{O}[ N^{2-s+b} \, a^{2-s}(N)] . \label{qubitscaling2} \end{align} Note that the vector norm $\lVert g \rVert_\infty^2$ might in general also depend on the number of qubits, e.g., exponentially vanishing gradients in case of barren plateaus~\cite{mcclean2018barren,grant2019initialization,cerezo2020cost} which would result in an exponentially decreasing relative sampling cost of the metric tensor. One may also think of scenarios where the gradient norm grows, however, one could then in practice decrease the inverse precision $\epsilon^{-1}$ proportionally as typical at the initial stages of an optimisation. To simplify our discussion, we assume that the gradient norm $\lVert g \rVert_\infty$ is fixed (bounded), e.g., the evolution is initialised in a close vicinity of the optimal parameters as a good classical guess is known. This also encompasses scenarios where the optimisation is near termination approaching a fixed e.g., chemical precision. We summarise the resulting measurement cost in the following. \begin{result}\label{result2} Assume that the number of Pauli terms in the Hamiltonian grows polynomially implying $\spec[\mathcal{H}] f_\mathrm{id}g = \mathcal{O}(N^b)$ for some $b\geq 1$, and the gradient norm $\lVert g \rVert_\infty = \mathcal{O}(N^{1})$ is bounded. The relative sampling cost of the matrix $\mathbf{F}_Q$ vanishes for general $\mathrm{polylog}(N)$-depth circuits when $b > (2{-}s)$ and, following Theorem~\mathrm{Re}f{theo2}, determining the natural gradient vector requires at most a constant overhead asymptotically \begin{equation*} \kappa = (N_{F}{+}N_{g})/N_{g}d = \mathcal{O}( \mathrm{Spc}[\tilde{\mathbf{F}}_Q^{-1}] + N^{2-b} ), \end{equation*} when compared to the gradient vector. \end{result} Note that Result~\mathrm{Re}f{result2} guarantees a vanishing sampling cost of the matrix $\mathbf{F}_Q$ when the number of terms in the Hamiltonian grows faster than quadratically, i.e., $b > 2$. We have explicitly calculated the growth rates $b$ in case of 3 example Hamiltonians in the Appendix\, as $b=1,2,3$, respectively, and plot the relative sampling costs $N_{F}/N_{g}$ in Fig~\mathrm{Re}f{fig2}. We remark that this result can be applied to the general class of metric-aware quantum algorithms \cite{li2017efficient,xiaotheory,samimagtime,koczor2019quantum,quantumnatgrad}. \section{Optimal measurement distribution} So far we have assumed that $N_{F}$ ($N_{g}$) measurements are distributed uniformly among the $\nu^2$ ($\nu$) matrix (vector) entries~\cite{Note\thefootcombined}. However, the overall number of samples $N_{F} + N_{g}$ (from Theorem~\mathrm{Re}f{theo1}), needed to obtain the vector $\underline{v} = \tilde{\mathbf{F}}_Q^{-1} \underline{g} $ to a precision $\epsilon$, can be minimised by distributing samples between the elements of $\mathbf{F}_Q$ and $\underline{g}$ optimally \cite{Crawford2019}. We denote the matrix $[N_{F}mat]_{kl}$ and the vector $[N_{g}vec]_k$ entries that represent the number of measurements assigned to individual elements in $\mathbf{F}_Q$ and in $\underline{g}$, respectively. The number of samples required is reduced to $N_\mathrm{opt} = \Sigma^{2} / \epsilon^{2}$ with $N_\mathrm{opt} \leq N_{F} + N_{g}$. We now state explicit expressions for determining $\Sigma$, $[N_{F}mat]_{kl}$ and $[N_{g}vec]_k$. \begin{result}\label{result3} Measurements are distributed optimally when the number of samples for determining individual elements of the matrix and gradient are given by \begin{align} [N_{F}mat]_{kl} &= \epsilon^{-2} \, \Sigma \sqrt{a_{kl} \mathrm{Var} \big \{[\mathbf{F}_Q]_{kl}\big\}}, \\ [N_{g}vec]_{k} &= \epsilon^{-2} \, \Sigma \sqrt{b_{k} \mathrm{Var} [g_{k}]}, \end{align} respectively. Here $\mathrm{Var}[\cdot]$ is the variance of a single measurement of the corresponding element and we explicitly define $\Sigma$ via the coefficients $a_{kl}$ and $b_k$ as \begin{equation} \Sigma := \sum_{k,l=1}^{\nu} \sqrt{a_{kl} \mathrm{Var} \big \{[\mathbf{F}_Q]_{kl}\big\}} + \sum_{k=1}^{\nu} \sqrt{b_k \mathrm{Var}[g_k]}. \end{equation} Furthermore, the symmetry of the Fisher matrix can be explicitly included just by modifying the coefficients $a_{kl}$, as discussed in the Appendix. \end{result} We remark that this result is completely general and can be applied to any of the metric-aware quantum algorithms \cite{li2017efficient,xiaotheory,samimagtime,koczor2019quantum,quantumnatgrad}. [Fig. \mathrm{Re}f{fig1} (a/c) blue] shows how the optimal distribution of samples reduces the measurement overhead across the entire evolution -- most significantly for small regularisation parameters [Fig. \mathrm{Re}f{fig1} (a/c), $\eta = 10^{-5}$], in which case some matrix elements might be crucially larger than others. Moreover, result~\mathrm{Re}f{result3} automatically takes into account the decreasing sampling cost of the matrix as established in Results~\mathrm{Re}f{result1}-\mathrm{Re}f{result2}. This is illustrated in Fig~\mathrm{Re}f{fig2} (b); For the first few iterations, far from convergence, the bulk of the measurements are directed to the matrix, comparatively few go to the elements of the gradient [Fig 2 (b), $t=1$]. However, close to convergence, consistent with Result~\mathrm{Re}f{result1}, the gradient takes the majority of the measurements, [Fig \mathrm{Re}f{fig2} (b), $t=20$]. \section{Discussion and conclusion} In this work we established general upper bounds on the sampling cost of metric-aware variational quantum algorithms (e.g., natural gradient). We analysed how this sampling cost scales for increasing iterations in Result~\mathrm{Re}f{result1} and for increasing qubit numbers in Result~\mathrm{Re}f{result2}. The latter establishes that the relative measurement cost of the matrix object $\mathbf{F}_Q$ is asymptomatically negligible in many practically relevant scenarios, such as in case of quantum chemistry applications. Natural gradient has been shown to outperform other optimisation approaches in numerical simulations~\cite{samimagtime,koczor2019quantum,wierichs2020avoiding}. We proved in this work that for both an increasing number of iterations and number of qubits the sampling overhead \emph{per-iteration (per-epoch)} of the natural gradient approach is constant asymptotically when compared to simple gradient descent. The most important implication of our results is therefore that the \emph{overall cost} of natural gradient is lower since it converges to the optimum faster. We finally established a general technique that optimally distributes measurements when estimating matrix and vector entries, further reducing the cost of general metric-aware quantum algorithms. Let us finally remark on the generality of our results: our techniques are immediately applicable to other problems beyond metric-aware approaches, for example, to Hessian-based optimisations via Eq.~\eqref{errorpropagation} as detailed in the Appendix. \begin{acknowledgments} Acknowledgements --- B.\,K. acknowledges funding received from EU H2020-FETFLAG-03-2018 under the grant agreement No 820495 (AQTION). The authors thank Simon C. Benjamin and Natalia Ares for their support, stimulating ideas and useful comments on this manuscript. Numerical simulations in this work used the QuEST and QuESTlink quantum simulation packages \cite{quest,questlink}. The authors would like to acknowledge the use of the University of Oxford Advanced Research Computing (ARC) facility in carrying out this work. We thank Patrick Coles and Andrew Arrasmith for their useful comments. \end{acknowledgments} \begin{thebibliography}{60} \makeatletter \providecommand \@ifxundefined [1]{ \@ifx{#1\undefined} } \providecommand \@ifnum [1]{ \ifnum #1\expandafter \@firstoftwo \else \expandafter \@secondoftwo f_\mathrm{id}i } \providecommand \@ifx [1]{ \ifx #1\expandafter \@firstoftwo \else \expandafter \@secondoftwo f_\mathrm{id}i } \providecommand \natexlab [1]{#1} \providecommand \enquote [1]{``#1''} \providecommand \bibnamefont [1]{#1} \providecommand \bibfnamefont [1]{#1} \providecommand \citenamefont [1]{#1} \providecommand \href@noop [0]{\@secondoftwo} \providecommand \href [0]{\begingroup \@sanitize@url \@href} \providecommand \@href[1]{\@@startlink{#1}\@@href} \providecommand \@@href[1]{\endgroup#1\@@endlink} \providecommand \@sanitize@url [0]{\catcode `\\12\catcode `\$12\catcode `\&12\catcode `\#12\catcode `\^12\catcode `\_12\catcode `\%12\mathrm{Re}lax} \providecommand \@@startlink[1]{} \providecommand \@@endlink[0]{} \providecommand \url [0]{\begingroup\@sanitize@url \@url } \providecommand \@url [1]{\endgroup\@href {#1}{\urlprefix }} \providecommand \urlprefix [0]{URL } \providecommand \Eprint [0]{\href } \providecommand \doibase [0]{https://doi.org/} \providecommand \selectlanguage [0]{\@gobble} \providecommand \bibinfo [0]{\@secondoftwo} \providecommand \bibfield [0]{\@secondoftwo} \providecommand \mathrm{Tr}anslation [1]{[#1]} \providecommand \BibitemOpen [0]{} \providecommand \bibitemStop [0]{} \providecommand \bibitemNoStop [0]{.\EOS\space} \providecommand \EOS [0]{\spacefactor3000\mathrm{Re}lax} \providecommand \BibitemShut [1]{\csname bibitem#1\endcsname} \let\auto@bib@innerbib\@empty \bibitem [{\citenamefont {Farhi}\ \emph {et~al.}(2014)\citenamefont {Farhi}, \citenamefont {Goldstone},\ and\ \citenamefont {Gutmann}}]{farhi2014quantum} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Farhi}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Goldstone}},\ and\ \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Gutmann}},\ }\bibfield {title} {\bibinfo {title} {A quantum approximate optimization algorithm},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {arXiv preprint arXiv:1411.4028}\ } (\bibinfo {year} {2014})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Peruzzo}\ \emph {et~al.}(2014)\citenamefont {Peruzzo}, \citenamefont {McClean}, \citenamefont {Shadbolt}, \citenamefont {Yung}, \citenamefont {Zhou}, \citenamefont {Love}, \citenamefont {Aspuru-Guzik},\ and\ \citenamefont {O'Brien}}]{peruzzo2014variational} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Peruzzo}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {McClean}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Shadbolt}}, \bibinfo {author} {\bibfnamefont {M.-H.}\ \bibnamefont {Yung}}, \bibinfo {author} {\bibfnamefont {Q.}~\bibnamefont {Zhou}}, \bibinfo {author} {\bibfnamefont {P.~J.}\ \bibnamefont {Love}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Aspuru-Guzik}},\ and\ \bibinfo {author} {\bibfnamefont {J.~L.}\ \bibnamefont {O'Brien}},\ }\bibfield {title} {\bibinfo {title} {A variational eigenvalue solver on a photonic quantum processor},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Nature communications}\ }\textbf {\bibinfo {volume} {5}} (\bibinfo {year} {2014})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Wang}\ \emph {et~al.}(2015)\citenamefont {Wang}, \citenamefont {Dolde}, \citenamefont {Biamonte}, \citenamefont {Babbush}, \citenamefont {Bergholm}, \citenamefont {Yang}, \citenamefont {Jakobi}, \citenamefont {Neumann}, \citenamefont {Aspuru-Guzik}, \citenamefont {Whitfield} \emph {et~al.}}]{wang2015quantum} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Wang}}, \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Dolde}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Biamonte}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Babbush}}, \bibinfo {author} {\bibfnamefont {V.}~\bibnamefont {Bergholm}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Yang}}, \bibinfo {author} {\bibfnamefont {I.}~\bibnamefont {Jakobi}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Neumann}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Aspuru-Guzik}}, \bibinfo {author} {\bibfnamefont {J.~D.}\ \bibnamefont {Whitfield}}, \emph {et~al.},\ }\bibfield {title} {\bibinfo {title} {Quantum simulation of helium hydride cation in a solid-state spin register},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {ACS nano}\ }\textbf {\bibinfo {volume} {9}},\ \bibinfo {pages} {7769} (\bibinfo {year} {2015})}\BibitemShut {NoStop} \bibitem [{\citenamefont {O'Malley}\ \emph {et~al.}(2016)\citenamefont {O'Malley}, \citenamefont {Babbush}, \citenamefont {Kivlichan}, \citenamefont {Romero}, \citenamefont {McClean}, \citenamefont {Barends}, \citenamefont {Kelly}, \citenamefont {Roushan}, \citenamefont {Tranter}, \citenamefont {Ding}, \citenamefont {Campbell}, \citenamefont {Chen}, \citenamefont {Chen}, \citenamefont {Chiaro}, \citenamefont {Dunsworth}, \citenamefont {Fowler}, \citenamefont {Jeffrey}, \citenamefont {Lucero}, \citenamefont {Megrant}, \citenamefont {Mutus}, \citenamefont {Neeley}, \citenamefont {Neill}, \citenamefont {Quintana}, \citenamefont {Sank}, \citenamefont {Vainsencher}, \citenamefont {Wenner}, \citenamefont {White}, \citenamefont {Coveney}, \citenamefont {Love}, \citenamefont {Neven}, \citenamefont {Aspuru-Guzik},\ and\ \citenamefont {Martinis}}]{PRXH2} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {P.~J.~J.}\ \bibnamefont {O'Malley}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Babbush}}, \bibinfo {author} {\bibfnamefont {I.~D.}\ \bibnamefont {Kivlichan}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Romero}}, \bibinfo {author} {\bibfnamefont {J.~R.}\ \bibnamefont {McClean}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Barends}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Kelly}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Roushan}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Tranter}}, \bibinfo {author} {\bibfnamefont {N.}~\bibnamefont {Ding}}, \bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {Campbell}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Chen}}, \bibinfo {author} {\bibfnamefont {Z.}~\bibnamefont {Chen}}, \bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {Chiaro}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Dunsworth}}, \bibinfo {author} {\bibfnamefont {A.~G.}\ \bibnamefont {Fowler}}, \bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Jeffrey}}, \bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Lucero}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Megrant}}, \bibinfo {author} {\bibfnamefont {J.~Y.}\ \bibnamefont {Mutus}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Neeley}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Neill}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Quintana}}, \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Sank}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Vainsencher}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Wenner}}, \bibinfo {author} {\bibfnamefont {T.~C.}\ \bibnamefont {White}}, \bibinfo {author} {\bibfnamefont {P.~V.}\ \bibnamefont {Coveney}}, \bibinfo {author} {\bibfnamefont {P.~J.}\ \bibnamefont {Love}}, \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Neven}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Aspuru-Guzik}},\ and\ \bibinfo {author} {\bibfnamefont {J.~M.}\ \bibnamefont {Martinis}},\ }\bibfield {title} {\bibinfo {title} {{Scalable Quantum Simulation of Molecular Energies}},\ }\href {https://doi.org/10.1103/PhysRevX.6.031007} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. X}\ }\textbf {\bibinfo {volume} {6}},\ \bibinfo {pages} {031007} (\bibinfo {year} {2016})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Shen}\ \emph {et~al.}(2017)\citenamefont {Shen}, \citenamefont {Zhang}, \citenamefont {Zhang}, \citenamefont {Zhang}, \citenamefont {Yung},\ and\ \citenamefont {Kim}}]{PhysRevA.95.020501} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Shen}}, \bibinfo {author} {\bibfnamefont {X.}~\bibnamefont {Zhang}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Zhang}}, \bibinfo {author} {\bibfnamefont {J.-N.}\ \bibnamefont {Zhang}}, \bibinfo {author} {\bibfnamefont {M.-H.}\ \bibnamefont {Yung}},\ and\ \bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Kim}},\ }\bibfield {title} {\bibinfo {title} {Quantum implementation of the unitary coupled cluster for simulating molecular electronic structure},\ }\href {https://doi.org/10.1103/PhysRevA.95.020501} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {95}},\ \bibinfo {pages} {020501} (\bibinfo {year} {2017})}\BibitemShut {NoStop} \bibitem [{\citenamefont {McClean}\ \emph {et~al.}(2016)\citenamefont {McClean}, \citenamefont {Romero}, \citenamefont {Babbush},\ and\ \citenamefont {Aspuru-Guzik}}]{mcclean2016theory} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.~R.}\ \bibnamefont {McClean}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Romero}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Babbush}},\ and\ \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Aspuru-Guzik}},\ }\bibfield {title} {\bibinfo {title} {The theory of variational hybrid quantum-classical algorithms},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {New J. Phys.}\ }\textbf {\bibinfo {volume} {18}},\ \bibinfo {pages} {023023} (\bibinfo {year} {2016})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Paesani}\ \emph {et~al.}(2017)\citenamefont {Paesani}, \citenamefont {Gentile}, \citenamefont {Santagati}, \citenamefont {Wang}, \citenamefont {Wiebe}, \citenamefont {Tew}, \citenamefont {O'Brien},\ and\ \citenamefont {Thompson}}]{PhysRevLett.118.100503} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Paesani}}, \bibinfo {author} {\bibfnamefont {A.~A.}\ \bibnamefont {Gentile}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Santagati}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Wang}}, \bibinfo {author} {\bibfnamefont {N.}~\bibnamefont {Wiebe}}, \bibinfo {author} {\bibfnamefont {D.~P.}\ \bibnamefont {Tew}}, \bibinfo {author} {\bibfnamefont {J.~L.}\ \bibnamefont {O'Brien}},\ and\ \bibinfo {author} {\bibfnamefont {M.~G.}\ \bibnamefont {Thompson}},\ }\bibfield {title} {\bibinfo {title} {{Experimental Bayesian Quantum Phase Estimation on a Silicon Photonic Chip}},\ }\href {https://doi.org/10.1103/PhysRevLett.118.100503} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {118}},\ \bibinfo {pages} {100503} (\bibinfo {year} {2017})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Li}\ and\ \citenamefont {Benjamin}(2017{\natexlab{a}})}]{Li2017} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Li}}\ and\ \bibinfo {author} {\bibfnamefont {S.~C.}\ \bibnamefont {Benjamin}},\ }\bibfield {title} {\bibinfo {title} {{Efficient Variational Quantum Simulator Incorporating Active Error Minimization}},\ }\href {https://doi.org/10.1103/PhysRevX.7.021050} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. X}\ }\textbf {\bibinfo {volume} {7}},\ \bibinfo {pages} {021050} (\bibinfo {year} {2017}{\natexlab{a}})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Colless}\ \emph {et~al.}(2018)\citenamefont {Colless}, \citenamefont {Ramasesh}, \citenamefont {Dahlen}, \citenamefont {Blok}, \citenamefont {Kimchi-Schwartz}, \citenamefont {McClean}, \citenamefont {Carter}, \citenamefont {de~Jong},\ and\ \citenamefont {Siddiqi}}]{PhysRevX.8.011021} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.~I.}\ \bibnamefont {Colless}}, \bibinfo {author} {\bibfnamefont {V.~V.}\ \bibnamefont {Ramasesh}}, \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Dahlen}}, \bibinfo {author} {\bibfnamefont {M.~S.}\ \bibnamefont {Blok}}, \bibinfo {author} {\bibfnamefont {M.~E.}\ \bibnamefont {Kimchi-Schwartz}}, \bibinfo {author} {\bibfnamefont {J.~R.}\ \bibnamefont {McClean}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Carter}}, \bibinfo {author} {\bibfnamefont {W.~A.}\ \bibnamefont {de~Jong}},\ and\ \bibinfo {author} {\bibfnamefont {I.}~\bibnamefont {Siddiqi}},\ }\bibfield {title} {\bibinfo {title} {{Computation of Molecular Spectra on a Quantum Processor with an Error-Resilient Algorithm}},\ }\href {https://doi.org/10.1103/PhysRevX.8.011021} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. X}\ }\textbf {\bibinfo {volume} {8}},\ \bibinfo {pages} {011021} (\bibinfo {year} {2018})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Santagati}\ \emph {et~al.}(2018)\citenamefont {Santagati}, \citenamefont {Wang}, \citenamefont {Gentile}, \citenamefont {Paesani}, \citenamefont {Wiebe}, \citenamefont {McClean}, \citenamefont {Morley-Short}, \citenamefont {Shadbolt}, \citenamefont {Bonneau}, \citenamefont {Silverstone}, \citenamefont {Tew}, \citenamefont {Zhou}, \citenamefont {O{\textquoteright}Brien},\ and\ \citenamefont {Thompson}}]{Santagatieaap9646} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Santagati}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Wang}}, \bibinfo {author} {\bibfnamefont {A.~A.}\ \bibnamefont {Gentile}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Paesani}}, \bibinfo {author} {\bibfnamefont {N.}~\bibnamefont {Wiebe}}, \bibinfo {author} {\bibfnamefont {J.~R.}\ \bibnamefont {McClean}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Morley-Short}}, \bibinfo {author} {\bibfnamefont {P.~J.}\ \bibnamefont {Shadbolt}}, \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Bonneau}}, \bibinfo {author} {\bibfnamefont {J.~W.}\ \bibnamefont {Silverstone}}, \bibinfo {author} {\bibfnamefont {D.~P.}\ \bibnamefont {Tew}}, \bibinfo {author} {\bibfnamefont {X.}~\bibnamefont {Zhou}}, \bibinfo {author} {\bibfnamefont {J.~L.}\ \bibnamefont {O{\textquoteright}Brien}},\ and\ \bibinfo {author} {\bibfnamefont {M.~G.}\ \bibnamefont {Thompson}},\ }\bibfield {title} {\bibinfo {title} {{Witnessing eigenstates for quantum simulation of Hamiltonian spectra}},\ }\bibfield {journal} {\bibinfo {journal} {Science Advances}\ }\textbf {\bibinfo {volume} {4}},\ \href {https://doi.org/10.1126/sciadv.aap9646} {10.1126/sciadv.aap9646} (\bibinfo {year} {2018})\BibitemShut {NoStop} \bibitem [{\citenamefont {Kandala}\ \emph {et~al.}(2017)\citenamefont {Kandala}, \citenamefont {Mezzacapo}, \citenamefont {Temme}, \citenamefont {Takita}, \citenamefont {Brink}, \citenamefont {Chow},\ and\ \citenamefont {Gambetta}}]{kandala2017hardware} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Kandala}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Mezzacapo}}, \bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Temme}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Takita}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Brink}}, \bibinfo {author} {\bibfnamefont {J.~M.}\ \bibnamefont {Chow}},\ and\ \bibinfo {author} {\bibfnamefont {J.~M.}\ \bibnamefont {Gambetta}},\ }\bibfield {title} {\bibinfo {title} {Hardware-efficient variational quantum eigensolver for small molecules and quantum magnets},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Nature}\ }\textbf {\bibinfo {volume} {549}},\ \bibinfo {pages} {242} (\bibinfo {year} {2017})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Kandala}\ \emph {et~al.}(2019)\citenamefont {Kandala}, \citenamefont {Temme}, \citenamefont {C{\'o}rcoles}, \citenamefont {Mezzacapo}, \citenamefont {Chow},\ and\ \citenamefont {Gambetta}}]{kandala2018extending} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Kandala}}, \bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Temme}}, \bibinfo {author} {\bibfnamefont {A.~D.}\ \bibnamefont {C{\'o}rcoles}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Mezzacapo}}, \bibinfo {author} {\bibfnamefont {J.~M.}\ \bibnamefont {Chow}},\ and\ \bibinfo {author} {\bibfnamefont {J.~M.}\ \bibnamefont {Gambetta}},\ }\bibfield {title} {\bibinfo {title} {Error mitigation extends the computational reach of a noisy quantum processor},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Nature}\ }\textbf {\bibinfo {volume} {567}},\ \bibinfo {pages} {491} (\bibinfo {year} {2019})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Hempel}\ \emph {et~al.}(2018)\citenamefont {Hempel}, \citenamefont {Maier}, \citenamefont {Romero}, \citenamefont {McClean}, \citenamefont {Monz}, \citenamefont {Shen}, \citenamefont {Jurcevic}, \citenamefont {Lanyon}, \citenamefont {Love}, \citenamefont {Babbush}, \citenamefont {Aspuru-Guzik}, \citenamefont {Blatt},\ and\ \citenamefont {Roos}}]{PhysRevX.8.031022} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Hempel}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Maier}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Romero}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {McClean}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Monz}}, \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Shen}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Jurcevic}}, \bibinfo {author} {\bibfnamefont {B.~P.}\ \bibnamefont {Lanyon}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Love}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Babbush}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Aspuru-Guzik}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Blatt}},\ and\ \bibinfo {author} {\bibfnamefont {C.~F.}\ \bibnamefont {Roos}},\ }\bibfield {title} {\bibinfo {title} {{Quantum Chemistry Calculations on a Trapped-Ion Quantum Simulator}},\ }\href {https://doi.org/10.1103/PhysRevX.8.031022} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. X}\ }\textbf {\bibinfo {volume} {8}},\ \bibinfo {pages} {031022} (\bibinfo {year} {2018})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Romero}\ \emph {et~al.}(2017)\citenamefont {Romero}, \citenamefont {Babbush}, \citenamefont {McClean}, \citenamefont {Hempel}, \citenamefont {Love},\ and\ \citenamefont {Aspuru-Guzik}}]{romero2017strategies} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Romero}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Babbush}}, \bibinfo {author} {\bibfnamefont {J.~R.}\ \bibnamefont {McClean}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Hempel}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Love}},\ and\ \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Aspuru-Guzik}},\ }\bibfield {title} {\bibinfo {title} {Strategies for quantum computing molecular energies using the unitary coupled cluster ansatz},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {arXiv preprint arXiv:1701.02691}\ } (\bibinfo {year} {2017})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Higgott}\ \emph {et~al.}(2018)\citenamefont {Higgott}, \citenamefont {Wang},\ and\ \citenamefont {Brierley}}]{higgott2018variational} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {O.}~\bibnamefont {Higgott}}, \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Wang}},\ and\ \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Brierley}},\ }\bibfield {title} {\bibinfo {title} {{Variational Quantum Computation of Excited States}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {arXiv preprint arXiv:1805.08138}\ } (\bibinfo {year} {2018})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Jones}\ \emph {et~al.}(2019{\natexlab{a}})\citenamefont {Jones}, \citenamefont {Endo}, \citenamefont {McArdle}, \citenamefont {Yuan},\ and\ \citenamefont {Benjamin}}]{SuguruExc} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Jones}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Endo}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {McArdle}}, \bibinfo {author} {\bibfnamefont {X.}~\bibnamefont {Yuan}},\ and\ \bibinfo {author} {\bibfnamefont {S.~C.}\ \bibnamefont {Benjamin}},\ }\bibfield {title} {\bibinfo {title} {{Variational quantum algorithms for discovering Hamiltonian spectra}},\ }\href {https://doi.org/10.1103/PhysRevA.99.062304} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {99}},\ \bibinfo {pages} {062304} (\bibinfo {year} {2019}{\natexlab{a}})}\BibitemShut {NoStop} \bibitem [{\citenamefont {McClean}\ \emph {et~al.}(2017)\citenamefont {McClean}, \citenamefont {Kimchi-Schwartz}, \citenamefont {Carter},\ and\ \citenamefont {de~Jong}}]{mcclean2017hybrid} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.~R.}\ \bibnamefont {McClean}}, \bibinfo {author} {\bibfnamefont {M.~E.}\ \bibnamefont {Kimchi-Schwartz}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Carter}},\ and\ \bibinfo {author} {\bibfnamefont {W.~A.}\ \bibnamefont {de~Jong}},\ }\bibfield {title} {\bibinfo {title} {Hybrid quantum-classical hierarchy for mitigation of decoherence and determination of excited states},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Physical Review A}\ }\textbf {\bibinfo {volume} {95}},\ \bibinfo {pages} {042308} (\bibinfo {year} {2017})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Colless}\ \emph {et~al.}(2017)\citenamefont {Colless}, \citenamefont {Ramasesh}, \citenamefont {Dahlen}, \citenamefont {Blok}, \citenamefont {McClean}, \citenamefont {Carter}, \citenamefont {de~Jong},\ and\ \citenamefont {Siddiqi}}]{colless2017robust} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.~I.}\ \bibnamefont {Colless}}, \bibinfo {author} {\bibfnamefont {V.~V.}\ \bibnamefont {Ramasesh}}, \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Dahlen}}, \bibinfo {author} {\bibfnamefont {M.~S.}\ \bibnamefont {Blok}}, \bibinfo {author} {\bibfnamefont {J.~R.}\ \bibnamefont {McClean}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Carter}}, \bibinfo {author} {\bibfnamefont {W.~A.}\ \bibnamefont {de~Jong}},\ and\ \bibinfo {author} {\bibfnamefont {I.}~\bibnamefont {Siddiqi}},\ }\bibfield {title} {\bibinfo {title} {Robust determination of molecular spectra on a quantum processor},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {arXiv preprint arXiv:1707.06408}\ } (\bibinfo {year} {2017})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Kokail}\ \emph {et~al.}(2018)\citenamefont {Kokail}, \citenamefont {Maier}, \citenamefont {van Bijnen}, \citenamefont {Brydges}, \citenamefont {Joshi}, \citenamefont {Jurcevic}, \citenamefont {Muschik}, \citenamefont {Silvi}, \citenamefont {Blatt}, \citenamefont {Roos} \emph {et~al.}}]{kokail2018self} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Kokail}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Maier}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {van Bijnen}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Brydges}}, \bibinfo {author} {\bibfnamefont {M.~K.}\ \bibnamefont {Joshi}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Jurcevic}}, \bibinfo {author} {\bibfnamefont {C.~A.}\ \bibnamefont {Muschik}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Silvi}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Blatt}}, \bibinfo {author} {\bibfnamefont {C.~F.}\ \bibnamefont {Roos}}, \emph {et~al.},\ }\bibfield {title} {\bibinfo {title} {{Self-Verifying Variational Quantum Simulation of the Lattice Schwinger Model}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {arXiv preprint arXiv:1810.03421}\ } (\bibinfo {year} {2018})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Sharma}\ \emph {et~al.}(2020)\citenamefont {Sharma}, \citenamefont {Khatri}, \citenamefont {Cerezo},\ and\ \citenamefont {Coles}}]{sharma2020noise} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Sharma}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Khatri}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Cerezo}},\ and\ \bibinfo {author} {\bibfnamefont {P.~J.}\ \bibnamefont {Coles}},\ }\bibfield {title} {\bibinfo {title} {{Noise resilience of variational quantum compiling}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {New Journal of Physics}\ }\textbf {\bibinfo {volume} {22}},\ \bibinfo {pages} {043006} (\bibinfo {year} {2020})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Cerezo}\ \emph {et~al.}(2020{\natexlab{a}})\citenamefont {Cerezo}, \citenamefont {Sharma}, \citenamefont {Arrasmith},\ and\ \citenamefont {Coles}}]{cerezo2020variational} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Cerezo}}, \bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Sharma}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Arrasmith}},\ and\ \bibinfo {author} {\bibfnamefont {P.~J.}\ \bibnamefont {Coles}},\ }\bibfield {title} {\bibinfo {title} {{Variational Quantum State Eigensolver}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {arXiv preprint arXiv:2004.01372}\ } (\bibinfo {year} {2020}{\natexlab{a}})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Endo}\ \emph {et~al.}(2020)\citenamefont {Endo}, \citenamefont {Sun}, \citenamefont {Li}, \citenamefont {Benjamin},\ and\ \citenamefont {Yuan}}]{SuguruGeneral} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Endo}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Sun}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Li}}, \bibinfo {author} {\bibfnamefont {S.~C.}\ \bibnamefont {Benjamin}},\ and\ \bibinfo {author} {\bibfnamefont {X.}~\bibnamefont {Yuan}},\ }\bibfield {title} {\bibinfo {title} {Variational quantum simulation of general processes},\ }\href {https://doi.org/10.1103/PhysRevLett.125.010501} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {125}},\ \bibinfo {pages} {010501} (\bibinfo {year} {2020})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Koczor}\ and\ \citenamefont {Benjamin}(2020)}]{koczor2020quantum} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {Koczor}}\ and\ \bibinfo {author} {\bibfnamefont {S.~C.}\ \bibnamefont {Benjamin}},\ }\bibfield {title} {\bibinfo {title} {Quantum analytic descent},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {arXiv preprint arXiv:2008.13774}\ } (\bibinfo {year} {2020})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Koczor}(2020)}]{koczor2020exponential} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {Koczor}},\ }\bibfield {title} {\bibinfo {title} {Exponential error suppression for near-term quantum devices},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. X (in production) arXiv:2011.05942}\ } (\bibinfo {year} {2020})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Preskill}(2018)}]{preskill2018quantum} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Preskill}},\ }\bibfield {title} {\bibinfo {title} {{Quantum Computing in the NISQ era and beyond}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {arXiv preprint arXiv:1801.00862}\ } (\bibinfo {year} {2018})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Kassal}\ \emph {et~al.}(2011)\citenamefont {Kassal}, \citenamefont {Whitfield}, \citenamefont {Perdomo-Ortiz}, \citenamefont {Yung},\ and\ \citenamefont {Aspuru-Guzik}}]{kassal2011simulating} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {I.}~\bibnamefont {Kassal}}, \bibinfo {author} {\bibfnamefont {J.~D.}\ \bibnamefont {Whitfield}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Perdomo-Ortiz}}, \bibinfo {author} {\bibfnamefont {M.-H.}\ \bibnamefont {Yung}},\ and\ \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Aspuru-Guzik}},\ }\bibfield {title} {\bibinfo {title} {Simulating chemistry using quantum computers},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Annual review of physical chemistry}\ }\textbf {\bibinfo {volume} {62}},\ \bibinfo {pages} {185} (\bibinfo {year} {2011})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Lu}\ \emph {et~al.}(2012)\citenamefont {Lu}, \citenamefont {Xu}, \citenamefont {Xu}, \citenamefont {Li}, \citenamefont {Chen}, \citenamefont {Peng}, \citenamefont {Xu},\ and\ \citenamefont {Du}}]{C2CP23700H} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Lu}}, \bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {Xu}}, \bibinfo {author} {\bibfnamefont {N.}~\bibnamefont {Xu}}, \bibinfo {author} {\bibfnamefont {Z.}~\bibnamefont {Li}}, \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Chen}}, \bibinfo {author} {\bibfnamefont {X.}~\bibnamefont {Peng}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Xu}},\ and\ \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Du}},\ }\bibfield {title} {\bibinfo {title} {Quantum chemistry simulation on quantum computers: theories and experiments},\ }\href {https://doi.org/10.1039/C2CP23700H} {\bibfield {journal} {\bibinfo {journal} {Phys. Chem. Chem. Phys.}\ }\textbf {\bibinfo {volume} {14}},\ \bibinfo {pages} {9411} (\bibinfo {year} {2012})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Whaley}\ \emph {et~al.}(2014)\citenamefont {Whaley}, \citenamefont {Dinner},\ and\ \citenamefont {Rice}}]{whaley2014quantum} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {K.~B.}\ \bibnamefont {Whaley}}, \bibinfo {author} {\bibfnamefont {A.~R.}\ \bibnamefont {Dinner}},\ and\ \bibinfo {author} {\bibfnamefont {S.~A.}\ \bibnamefont {Rice}},\ }\href@noop {} {\emph {\bibinfo {title} {{Quantum information and computation for chemistry}}}}\ (\bibinfo {publisher} {John Wiley \& Sons},\ \bibinfo {year} {2014})\BibitemShut {NoStop} \bibitem [{\citenamefont {McArdle}\ \emph {et~al.}(2018)\citenamefont {McArdle}, \citenamefont {Endo}, \citenamefont {Aspuru-Guzik}, \citenamefont {Benjamin},\ and\ \citenamefont {Yuan}}]{ourReview} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {McArdle}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Endo}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Aspuru-Guzik}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Benjamin}},\ and\ \bibinfo {author} {\bibfnamefont {X.}~\bibnamefont {Yuan}},\ }\bibfield {title} {\bibinfo {title} {{Quantum computational chemistry}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {arXiv preprint arXiv:1808.10402}\ } (\bibinfo {year} {2018})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Quantum}\ \emph {et~al.}(2020)\citenamefont {Quantum} \emph {et~al.}}]{google2020hartree} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {G.~A.}\ \bibnamefont {Quantum}} \emph {et~al.},\ }\bibfield {title} {\bibinfo {title} {{Hartree-Fock on a superconducting qubit quantum computer}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Science}\ }\textbf {\bibinfo {volume} {369}},\ \bibinfo {pages} {1084} (\bibinfo {year} {2020})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Sweke}\ \emph {et~al.}(2019)\citenamefont {Sweke}, \citenamefont {Wilde}, \citenamefont {Meyer}, \citenamefont {Schuld}, \citenamefont {F{\"a}hrmann}, \citenamefont {Meynard-Piganeau},\ and\ \citenamefont {Eisert}}]{sweke2019stochastic} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Sweke}}, \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Wilde}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Meyer}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Schuld}}, \bibinfo {author} {\bibfnamefont {P.~K.}\ \bibnamefont {F{\"a}hrmann}}, \bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {Meynard-Piganeau}},\ and\ \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Eisert}},\ }\bibfield {title} {\bibinfo {title} {Stochastic gradient descent for hybrid quantum-classical optimization},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {arXiv preprint arXiv:1910.01155}\ } (\bibinfo {year} {2019})}\BibitemShut {NoStop} \bibitem [{\citenamefont {K{\"u}bler}\ \emph {et~al.}(2019)\citenamefont {K{\"u}bler}, \citenamefont {Arrasmith}, \citenamefont {Cincio},\ and\ \citenamefont {Coles}}]{kubler2019adaptive} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.~M.}\ \bibnamefont {K{\"u}bler}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Arrasmith}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Cincio}},\ and\ \bibinfo {author} {\bibfnamefont {P.~J.}\ \bibnamefont {Coles}},\ }\bibfield {title} {\bibinfo {title} {{An adaptive optimizer for measurement-frugal variational algorithms}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {arXiv preprint arXiv:1909.09083}\ } (\bibinfo {year} {2019})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Gentini}\ \emph {et~al.}(2019)\citenamefont {Gentini}, \citenamefont {Cuccoli}, \citenamefont {Pirandola}, \citenamefont {Verrucchi},\ and\ \citenamefont {Banchi}}]{qfi} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Gentini}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Cuccoli}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Pirandola}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Verrucchi}},\ and\ \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Banchi}},\ }\bibfield {title} {\bibinfo {title} {{Noise-Assisted Variational Hybrid Quantum-Classical Optimization}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {arXiv preprint arXiv:1912.06744}\ } (\bibinfo {year} {2019})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Crawford}\ \emph {et~al.}(2019)\citenamefont {Crawford}, \citenamefont {van Straaten}, \citenamefont {Wang}, \citenamefont {Parks}, \citenamefont {Campbell},\ and\ \citenamefont {Brierley}}]{Crawford2019} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {O.}~\bibnamefont {Crawford}}, \bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {van Straaten}}, \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Wang}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Parks}}, \bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Campbell}},\ and\ \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Brierley}},\ }\bibfield {title} {\bibinfo {title} {{Efficient quantum measurement of Pauli operators}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {arXiv preprint arXiv:1908.06942}\ } (\bibinfo {year} {2019})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Arrasmith}\ \emph {et~al.}(2020)\citenamefont {Arrasmith}, \citenamefont {Cincio}, \citenamefont {Somma},\ and\ \citenamefont {Coles}}]{arrasmith2020operator} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Arrasmith}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Cincio}}, \bibinfo {author} {\bibfnamefont {R.~D.}\ \bibnamefont {Somma}},\ and\ \bibinfo {author} {\bibfnamefont {P.~J.}\ \bibnamefont {Coles}},\ }\bibfield {title} {\bibinfo {title} {{Operator Sampling for Shot-frugal Optimization in Variational Algorithms}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {arXiv preprint arXiv:2004.06252}\ } (\bibinfo {year} {2020})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Hadfield}\ \emph {et~al.}(2020)\citenamefont {Hadfield}, \citenamefont {Bravyi}, \citenamefont {Raymond},\ and\ \citenamefont {Mezzacapo}}]{hadfield2020measurements} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Hadfield}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Bravyi}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Raymond}},\ and\ \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Mezzacapo}},\ }\bibfield {title} {\bibinfo {title} {{Measurements of Quantum Hamiltonians with Locally-Biased Classical Shadows}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {arXiv preprint arXiv:2006.15788}\ } (\bibinfo {year} {2020})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Koczor}\ and\ \citenamefont {Benjamin}(2019)}]{koczor2019quantum} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {Koczor}}\ and\ \bibinfo {author} {\bibfnamefont {S.~C.}\ \bibnamefont {Benjamin}},\ }\bibfield {title} {\bibinfo {title} {{Quantum natural gradient generalised to non-unitary circuits}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {arXiv preprint arXiv:1912.08660}\ } (\bibinfo {year} {2019})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Ferguson}\ \emph {et~al.}(2021)\citenamefont {Ferguson}, \citenamefont {Dellantonio}, \citenamefont {Balushi}, \citenamefont {Jansen}, \citenamefont {D\"ur},\ and\ \citenamefont {Muschik}}]{PhysRevLett.126.220501} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {R.~R.}\ \bibnamefont {Ferguson}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Dellantonio}}, \bibinfo {author} {\bibfnamefont {A.~A.}\ \bibnamefont {Balushi}}, \bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Jansen}}, \bibinfo {author} {\bibfnamefont {W.}~\bibnamefont {D\"ur}},\ and\ \bibinfo {author} {\bibfnamefont {C.~A.}\ \bibnamefont {Muschik}},\ }\bibfield {title} {\bibinfo {title} {{Measurement-Based Variational Quantum Eigensolver}},\ }\href {https://doi.org/10.1103/PhysRevLett.126.220501} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {126}},\ \bibinfo {pages} {220501} (\bibinfo {year} {2021})}\BibitemShut {NoStop} \bibitem [{\citenamefont {McArdle}\ \emph {et~al.}(2019)\citenamefont {McArdle}, \citenamefont {Jones}, \citenamefont {Endo}, \citenamefont {Li}, \citenamefont {Benjamin},\ and\ \citenamefont {Yuan}}]{samimagtime} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {McArdle}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Jones}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Endo}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Li}}, \bibinfo {author} {\bibfnamefont {S.~C.}\ \bibnamefont {Benjamin}},\ and\ \bibinfo {author} {\bibfnamefont {X.}~\bibnamefont {Yuan}},\ }\bibfield {title} {\bibinfo {title} {Variational ansatz-based quantum simulation of imaginary time evolution},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {npj Quantum Information}\ }\textbf {\bibinfo {volume} {5}},\ \bibinfo {pages} {1} (\bibinfo {year} {2019})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Rebentrost}\ \emph {et~al.}(2019)\citenamefont {Rebentrost}, \citenamefont {Schuld}, \citenamefont {Wossnig}, \citenamefont {Petruccione},\ and\ \citenamefont {Lloyd}}]{Rebentrost_2019} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Rebentrost}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Schuld}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Wossnig}}, \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Petruccione}},\ and\ \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Lloyd}},\ }\bibfield {title} {\bibinfo {title} {{Quantum gradient descent and Newton's method for constrained polynomial optimization}},\ }\href {https://doi.org/10.1088/1367-2630/ab2a9e} {\bibfield {journal} {\bibinfo {journal} {New Journal of Physics}\ }\textbf {\bibinfo {volume} {21}},\ \bibinfo {pages} {073023} (\bibinfo {year} {2019})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Wierichs}\ \emph {et~al.}(2020)\citenamefont {Wierichs}, \citenamefont {Gogolin},\ and\ \citenamefont {Kastoryano}}]{wierichs2020avoiding} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Wierichs}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Gogolin}},\ and\ \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Kastoryano}},\ }\bibfield {title} {\bibinfo {title} {{Avoiding local minima in variational quantum eigensolvers with the natural gradient optimizer}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {arXiv preprint arXiv:2004.14666}\ } (\bibinfo {year} {2020})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Li}\ and\ \citenamefont {Benjamin}(2017{\natexlab{b}})}]{li2017efficient} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Li}}\ and\ \bibinfo {author} {\bibfnamefont {S.~C.}\ \bibnamefont {Benjamin}},\ }\bibfield {title} {\bibinfo {title} {Efficient variational quantum simulator incorporating active error minimization},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. X}\ }\textbf {\bibinfo {volume} {7}},\ \bibinfo {pages} {021050} (\bibinfo {year} {2017}{\natexlab{b}})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Yuan}\ \emph {et~al.}(2019)\citenamefont {Yuan}, \citenamefont {Endo}, \citenamefont {Zhao}, \citenamefont {Li},\ and\ \citenamefont {Benjamin}}]{xiaotheory} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {X.}~\bibnamefont {Yuan}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Endo}}, \bibinfo {author} {\bibfnamefont {Q.}~\bibnamefont {Zhao}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Li}},\ and\ \bibinfo {author} {\bibfnamefont {S.~C.}\ \bibnamefont {Benjamin}},\ }\bibfield {title} {\bibinfo {title} {Theory of variational quantum simulation},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Quantum}\ }\textbf {\bibinfo {volume} {3}},\ \bibinfo {pages} {191} (\bibinfo {year} {2019})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Stokes}\ \emph {et~al.}(2019)\citenamefont {Stokes}, \citenamefont {Izaac}, \citenamefont {Killoran},\ and\ \citenamefont {Carleo}}]{quantumnatgrad} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Stokes}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Izaac}}, \bibinfo {author} {\bibfnamefont {N.}~\bibnamefont {Killoran}},\ and\ \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Carleo}},\ }\bibfield {title} {\bibinfo {title} {Quantum natural gradient},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {arXiv preprint arXiv:1909.02108}\ } (\bibinfo {year} {2019})}\BibitemShut {NoStop} \bibitem [{Note1()}]{Note1} \BibitemOpen \bibinfo {note} {Where the ansatz parameter $\theta $ corresponds to a global $Z$ rotation of all the qubits and therefore $r_g = N$.}\BibitemShut {Stop} \bibitem [{\citenamefont {Pezz\`e}\ \emph {et~al.}(2018)\citenamefont {Pezz\`e}, \citenamefont {Smerzi}, \citenamefont {Oberthaler}, \citenamefont {Schmied},\ and\ \citenamefont {Treutlein}}]{review} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Pezz\`e}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Smerzi}}, \bibinfo {author} {\bibfnamefont {M.~K.}\ \bibnamefont {Oberthaler}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Schmied}},\ and\ \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Treutlein}},\ }\bibfield {title} {\bibinfo {title} {{Quantum metrology with nonclassical states of atomic ensembles}},\ }\href {https://doi.org/10.1103/RevModPhys.90.035005} {\bibfield {journal} {\bibinfo {journal} {Rev. Mod. Phys.}\ }\textbf {\bibinfo {volume} {90}},\ \bibinfo {pages} {035005} (\bibinfo {year} {2018})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Giovannetti}\ \emph {et~al.}(2011)\citenamefont {Giovannetti}, \citenamefont {Lloyd},\ and\ \citenamefont {Maccone}}]{giovannetti11} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {V.}~\bibnamefont {Giovannetti}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Lloyd}},\ and\ \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Maccone}},\ }\bibfield {title} {\bibinfo {title} {{Advances in quantum metrology}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Nat. Phot.}\ }\textbf {\bibinfo {volume} {5}},\ \bibinfo {pages} {222} (\bibinfo {year} {2011})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Koczor}\ \emph {et~al.}(2020)\citenamefont {Koczor}, \citenamefont {Endo}, \citenamefont {Jones}, \citenamefont {Matsuzaki},\ and\ \citenamefont {Benjamin}}]{koczor2019variational} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {Koczor}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Endo}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Jones}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Matsuzaki}},\ and\ \bibinfo {author} {\bibfnamefont {S.~C.}\ \bibnamefont {Benjamin}},\ }\bibfield {title} {\bibinfo {title} {{Variational-State Quantum Metrology}},\ }\href {https://doi.org/10.1088/1367-2630/ab965e} {\bibfield {journal} {\bibinfo {journal} {New J. Phys.}\ }\textbf {\bibinfo {volume} {22}},\ \bibinfo {pages} {083038} (\bibinfo {year} {2020})}\BibitemShut {NoStop} \bibitem [{Note2()}]{Note2} \BibitemOpen \bibinfo {note} {In Fig.~1 in the uniformly distributed scenario the same number of measurements are used to determine every entry of the metric tensor (gradient vector) and only the overall number $N_{F}$ ($N_{g}$) of measurements to determine the metric tensor (gradient vector) is chosen optimally. In the naive scheme both $N_{F}$ and $N_{g}$ are additionally fixed}\BibitemShut {NoStop} \bibitem [{\citenamefont {Yen}\ \emph {et~al.}(2020)\citenamefont {Yen}, \citenamefont {Verteletskyi},\ and\ \citenamefont {Izmaylov}}]{yen2020measuring} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {T.-C.}\ \bibnamefont {Yen}}, \bibinfo {author} {\bibfnamefont {V.}~\bibnamefont {Verteletskyi}},\ and\ \bibinfo {author} {\bibfnamefont {A.~F.}\ \bibnamefont {Izmaylov}},\ }\bibfield {title} {\bibinfo {title} {{Measuring all compatible operators in one series of single-qubit measurements using unitary transformations}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Journal of chemical theory and computation}\ }\textbf {\bibinfo {volume} {16}},\ \bibinfo {pages} {2400} (\bibinfo {year} {2020})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Jena}\ \emph {et~al.}(2019)\citenamefont {Jena}, \citenamefont {Genin},\ and\ \citenamefont {Mosca}}]{jena2019pauli} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Jena}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Genin}},\ and\ \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Mosca}},\ }\bibfield {title} {\bibinfo {title} {{Pauli partitioning with respect to gate sets}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {arXiv preprint arXiv:1907.07859}\ } (\bibinfo {year} {2019})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Gokhale}\ \emph {et~al.}(2020)\citenamefont {Gokhale}, \citenamefont {Angiuli}, \citenamefont {Ding}, \citenamefont {Gui}, \citenamefont {Tomesh}, \citenamefont {Suchara}, \citenamefont {Martonosi},\ and\ \citenamefont {Chong}}]{gokhale2020n} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Gokhale}}, \bibinfo {author} {\bibfnamefont {O.}~\bibnamefont {Angiuli}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Ding}}, \bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Gui}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Tomesh}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Suchara}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Martonosi}},\ and\ \bibinfo {author} {\bibfnamefont {F.~T.}\ \bibnamefont {Chong}},\ }\bibfield {title} {\bibinfo {title} {{O(N3) Measurement Cost for Variational Quantum Eigensolver on Molecular Hamiltonians}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {IEEE Transactions on Quantum Engineering}\ }\textbf {\bibinfo {volume} {1}},\ \bibinfo {pages} {1} (\bibinfo {year} {2020})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Gokhale}\ \emph {et~al.}(2019)\citenamefont {Gokhale}, \citenamefont {Angiuli}, \citenamefont {Ding}, \citenamefont {Gui}, \citenamefont {Tomesh}, \citenamefont {Suchara}, \citenamefont {Martonosi},\ and\ \citenamefont {Chong}}]{gokhale2019minimizing} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Gokhale}}, \bibinfo {author} {\bibfnamefont {O.}~\bibnamefont {Angiuli}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Ding}}, \bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Gui}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Tomesh}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Suchara}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Martonosi}},\ and\ \bibinfo {author} {\bibfnamefont {F.~T.}\ \bibnamefont {Chong}},\ }\bibfield {title} {\bibinfo {title} {{Minimizing state preparations in variational quantum eigensolver by partitioning into commuting families}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {arXiv preprint arXiv:1907.13623}\ } (\bibinfo {year} {2019})}\BibitemShut {NoStop} \bibitem [{\citenamefont {McClean}\ \emph {et~al.}(2018)\citenamefont {McClean}, \citenamefont {Boixo}, \citenamefont {Smelyanskiy}, \citenamefont {Babbush},\ and\ \citenamefont {Neven}}]{mcclean2018barren} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.~R.}\ \bibnamefont {McClean}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Boixo}}, \bibinfo {author} {\bibfnamefont {V.~N.}\ \bibnamefont {Smelyanskiy}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Babbush}},\ and\ \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Neven}},\ }\bibfield {title} {\bibinfo {title} {{Barren plateaus in quantum neural network training landscapes}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Nature communications}\ }\textbf {\bibinfo {volume} {9}},\ \bibinfo {pages} {1} (\bibinfo {year} {2018})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Grant}\ \emph {et~al.}(2019)\citenamefont {Grant}, \citenamefont {Wossnig}, \citenamefont {Ostaszewski},\ and\ \citenamefont {Benedetti}}]{grant2019initialization} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Grant}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Wossnig}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Ostaszewski}},\ and\ \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Benedetti}},\ }\bibfield {title} {\bibinfo {title} {{An initialization strategy for addressing barren plateaus in parametrized quantum circuits}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Quantum}\ }\textbf {\bibinfo {volume} {3}},\ \bibinfo {pages} {214} (\bibinfo {year} {2019})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Cerezo}\ \emph {et~al.}(2020{\natexlab{b}})\citenamefont {Cerezo}, \citenamefont {Sone}, \citenamefont {Volkoff}, \citenamefont {Cincio},\ and\ \citenamefont {Coles}}]{cerezo2020cost} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Cerezo}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Sone}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Volkoff}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Cincio}},\ and\ \bibinfo {author} {\bibfnamefont {P.~J.}\ \bibnamefont {Coles}},\ }\bibfield {title} {\bibinfo {title} {{Cost-Function-Dependent Barren Plateaus in Shallow Quantum Neural Networks}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {arXiv preprint arXiv:2001.00550}\ } (\bibinfo {year} {2020}{\natexlab{b}})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Jones}\ \emph {et~al.}(2019{\natexlab{b}})\citenamefont {Jones}, \citenamefont {Brown}, \citenamefont {Bush},\ and\ \citenamefont {Benjamin}}]{quest} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Jones}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Brown}}, \bibinfo {author} {\bibfnamefont {I.}~\bibnamefont {Bush}},\ and\ \bibinfo {author} {\bibfnamefont {S.~C.}\ \bibnamefont {Benjamin}},\ }\bibfield {title} {\bibinfo {title} {{QuEST and high performance simulation of quantum computers}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Sci. Rep.}\ }\textbf {\bibinfo {volume} {9}},\ \bibinfo {pages} {10736} (\bibinfo {year} {2019}{\natexlab{b}})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Jones}\ and\ \citenamefont {Benjamin}(2020)}]{questlink} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Jones}}\ and\ \bibinfo {author} {\bibfnamefont {S.~C.}\ \bibnamefont {Benjamin}},\ }\bibfield {title} {\bibinfo {title} {{QuESTlink--Mathematica embiggened by a hardware-optimised quantum emulator}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Quantum Science and Technology}\ } (\bibinfo {year} {2020})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Ku}(1966)}]{Ku1966} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Ku}},\ }\bibfield {title} {\bibinfo {title} {{Notes on the use of propagation of error formulas}},\ }\href {https://doi.org/10.6028/jres.070c.025} {\bibfield {journal} {\bibinfo {journal} {Journal of Research of the National Bureau of Standards, Section C: Engineering and Instrumentation}\ }\textbf {\bibinfo {volume} {70C}},\ \bibinfo {pages} {263} (\bibinfo {year} {1966})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Lefebvre}\ \emph {et~al.}(2000)\citenamefont {Lefebvre}, \citenamefont {Keeler}, \citenamefont {Sobie},\ and\ \citenamefont {White}}]{Lefebvre2000} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Lefebvre}}, \bibinfo {author} {\bibfnamefont {R.~K.}\ \bibnamefont {Keeler}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Sobie}},\ and\ \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {White}},\ }\bibfield {title} {\bibinfo {title} {{Propagation of errors for matrix inversion}},\ }\href {https://doi.org/10.1016/S0168-9002(00)00323-5} {\bibfield {journal} {\bibinfo {journal} {Nuclear Instruments and Methods in Physics Research, Section A: Accelerators, Spectrometers, Detectors and Associated Equipment}\ }\textbf {\bibinfo {volume} {451}},\ \bibinfo {pages} {520} (\bibinfo {year} {2000})},\ \Eprint {https://arxiv.org/abs/9909031} {arXiv:9909031 [hep-ex]} \BibitemShut {NoStop} \end{thebibliography} \onecolumngrid \iffalse \begin{center} \textbf{\large Supplemental Materials: Measurement cost of metric-aware variational quantum algorithms} \end{center} \setcounter{equation}{0} \setcounter{table}{0} \setcounter{page}{1} \makeatletter \mathrm{Re}newcommand{S\arabic{equation}}{S\arabic{equation}} \mathrm{Re}newcommand{S\arabic{page}}{S\arabic{page}} \mathrm{Re}newcommand{S\arabic{table}}{S\arabic{table}} \mathrm{Re}newcommand{S\arabic{figure}}{S\arabic{figure}} \else \appendix f_\mathrm{id}i \section{Determining variances \label{determineVar}} \subsection{Pauli decompositions \label{paulidecomps}} Let us denote the set of Hermitian matrices of dimension $d$ as $\mathrm{Herm}[ \mathds{C}^{d \times d}]$. The Hamiltonian $\mathcal{H} \in \mathrm{Herm}[ \mathds{C}^{d \times d}]$ of a qubit-system in general decomposes into a sum over Pauli-operator strings via \begin{equation} \label{hamildec} \mathcal{H} = \sum_{l=1}^{r_h} h_l P_l, \quad \quad \textrm{with} \quad \quad \mathbb{R} \ni h_l:=\mathrm{Tr}[\mathcal{H} P_l]/d, \end{equation} where $P_l \in \mathrm{Herm}[ \mathds{C}^{d \times d}]$ are tensor products of single-qubit Pauli operators that act on an $N$-qubit system and form an orthonormal basis of the Hilbert-Schmidt operator space, and $d=2^N$ is the dimensionality. We denote as $r_h \in \mathbb{N}$ the Pauli rank, i.e., the number of non-zero Pauli components in the Hamiltonian. Note that in general $r_h \leq 4^N$. In the following derivations we assume for simplicity that ansatz circuits $U_c$ are unitary and decomposes into a product of individual gates \begin{equation} \label{circuitEq} U_c(\underline{\theta}) = U_\nu(\theta_\nu) \dots U_2(\theta_2) U_1(\theta_1), \end{equation} that typically act on a small subset of the system, e.g., one and two-qubit gates. We assume in Eq.~\eqref{circuitEq} for ease of notation that each quantum gate depends on an individual parameter $\theta_i$ with $i=\{1, 2, \dots \nu \}$. Individual gates $U_k(\theta_k) \in SU(d)$ of the quantum circuit from Eq.~\eqref{circuitEq} are in general of the form $U_k(\theta_k) := \exp[-i \theta_k G_k]$ and their generators $G_k \in \mathrm{Herm}[ \mathds{C}^{d \times d}]$ decompose into a sum of Pauli strings resulting in \begin{equation*} U_k(\theta_k) = \exp[-i \theta_k G_k] = \exp[-i \theta_k \sum_{l=1}^{r^{(k)}_g} g_{kl} P_l], \quad \quad \text{with} \quad \quad \mathbb{R} \ni g_{kl}:=\mathrm{Tr}[G_k P_l]/d \end{equation*} and $r^{(k)}_g \in \mathbb{N}$ is the Pauli rank of the generator $G_k$. We additionally assume that $g_{kl} \leq 1/2$ for simplicity -- but any other upper bound could be specified. It follows in general that the derivative $\partial_k U_k(\theta_k)$ decomposes into a sum of $r^{(k)}_g$ unitary operators as \begin{equation}\label{generalDeriv} \partial_k U_k(\theta_k) = -i \sum_{l=1}^{r^{(k)}_g} g_{kl} P_l U_k(\theta_k). \end{equation} For ease of notation, in the following we consider circuits via Eq.~\eqref{circuitEq} which decompose into gates $U_k(\theta_k)$ with Pauli rank $r_g = 1$. This is naturally the case for a wide variety of ansatz circuits, e.g., circuits that consist of single-qubit rotations and two-qubit $ZZ$ or $XX$ evolution gates as depicted in Fig.~\mathrm{Re}f{ansatzfig}. This assumption results in a simplified structure of the gates as $U_k(\theta_k) := \exp[-i \theta_k P_k/2]$ and their derivatives as \begin{equation}\label{simpleDeriv} \partial_k U_k(\theta_k) = -\tfrac{i}{2} P_k U_k(\theta_k), \end{equation} where $P_k$ is the Pauli generator of the gate $U_k(\theta_k)$. This construction simplifies our following derivations, however, the generalisation to arbitrary parametrised gates straightforwardly follows from linearity of Eq.~\eqref{generalDeriv}. We finally define the partial derivative of the circuit in Eq.~\eqref{circuitEq} using our simplified ansatz as \begin{equation} \nonumber D_k := 2i \, \partial_k U_c(\underline{\theta}) = U_\nu(\theta_\nu) \dots P_k U_k(\theta_k) \dots U_2(\theta_2) U_1(\theta_1), \end{equation} which itself is unitary via $[D_k]^\dagger = [D_k]^{-1}$ (and we omit its explicit dependence on the parameters $\underline{\theta}$) and $P_l P_l^\dagger = \mathrm{Id}_d$. We remark that in case of non-unitary parametrisations one would need to consider the general mapping $\rho(\underline{\theta}) := \Phi(\underline{\theta}) \, \rho_0$. The circuit derivative then decomposes into Pauli terms as \begin{equation} \partial_k \rho(\underline{\theta}) = \sum_{m,n=1}^{r^{(k)}_p} p_{kmn} P_m \rho(\underline{\theta}) P_n. \end{equation} \subsection{Upper bound on the quantum Fisher information} We now derive a general upper bound on the quantum Fisher information for unitary parametrisations. \begin{lemma} \label{qfilemma} In case of unitary ansatz circuits that act on arbitrary quantum states $\rho$ via quantum gates that decompose into at most $r_g$ Pauli terms, entries of the quantum Fisher information matrix are upper bounded as $[\mathbf{F}_Q]_{kl} \leq r_g^2$. \end{lemma} \begin{proof} When the ansatz circuit consists of unitary gates, the quantum Fisher information assumes its maximum for pure states. Considering the pure state $\rho=| \psi \rangle \langle \psi |$, it follows from \cite{koczor2019quantum} that \begin{equation*} [\mathbf{F}_Q]_{kl} = 2\, \mathrm{Tr}[(\partial_k \rho)(\partial_l \rho)]. \end{equation*} Applying the Cauchy–Schwarz inequality yields \begin{equation*} 2 \mathrm{Tr}[(\partial_k \rho)(\partial_l \rho)] \leq 2 \sqrt{ \mathrm{Tr}[(\partial_k \rho)(\partial_k \rho)] \, \mathrm{Tr}[(\partial_l \rho)(\partial_l \rho)]} \leq F_{max} \end{equation*} where $F_{max}$ is a bound on the scalar quantum Fisher information, i.e., diagonal entries of the matrix $\mathbf{F}_Q$. Let us determine this bound via \begin{equation} [\mathbf{F}_Q]_{kk} = 4 \mathrm{Re}[\langle \partial_k \psi | \partial_k \psi \rangle ] - 4|\langle \partial_k \psi | \psi \rangle|^2 \leq 4 \mathrm{Re}[\langle \partial_k \psi | \partial_k \psi \rangle ] = 4 \langle \partial_k \psi | \partial_k \psi \rangle \end{equation} for an arbitrary $| \psi \rangle $. It follows from Eq.~\eqref{generalDeriv} that \begin{equation} \langle \partial_k \psi | \partial_k \psi \rangle = \sum_{l,m=1}^{r^{(k)}_g} g_{kl} \, g_{km} \langle \psi_l | \psi_m \rangle \leq (r_g)^2/4, \end{equation} where $| \psi_m \rangle$ are some valid, normalised states and therefore $\langle \psi_l | \psi_m \rangle \leq 1$ and we used that $g_{kl} \leq 1/2$. This finally establishes the general upper bound for unitary ansatz circuits whose gates decompose into at most $r_g$ Pauli terms as \begin{equation*} [\mathbf{F}_Q]_{kl} \leq r_g^2 \end{equation*} and in case of simplified ans{\"a}tze with $r_g=1$ from Sec.~\mathrm{Re}f{paulidecomps} one obtains $[\mathbf{F}_Q]_{kl} \leq 1$. \end{proof} \subsection{Components of the gradient \label{gradsec}} Components of the gradient vector can be measured via Hadamard test. We discuss this on the example of simplified ans{\"a}tze from Sec.~\mathrm{Re}f{paulidecomps}, while the generalisation follows from linearity. Let us first express the gradient components $g_k:=\partial_k E(\underline{\theta})$ in terms of the derivative circuits from Eq.~\mathrm{Re}f{simpleDeriv} as \begin{align} \nonumber g_k &= - \mathrm{Im}[ \langle 0 | \, [D_k]^\dagger \, \mathcal{H} \,U_c \, | 0 \rangle]= - \sum_{l=1}^{r_h} h_l M_{kl}, \end{align} where the second equation uses the decomposition of the Hamiltonian into Pauli operators from Eq.~\eqref{hamildec} via denoting the matrix elements $M_{kl}:=\mathrm{Im}\langle 0 | \, [D_k]^\dagger \, P_l \,U_c \, | 0 \rangle$. These matrix elements can be estimated by using an ancilla qubit via the circuits in Fig.~2 of reference \cite{Li2017} and the corresponding proof can be found in footnote [53] of \cite{Li2017}, refer also to \cite{xiaotheory}. The probability $p$ of measuring this ancilla qubit in the $|\pm\rangle$ basis with outcome $+1$ determines the matrix elements via $(2p_{kl} {-}1)=M_{kl}$ for every Pauli component in the Hamiltonian $P_l$. This finally yields the explicit form of the gradient vector \begin{equation} g_k =\partial_k E(\underline{\theta}) =- \sum_{l=1}^{r_h} h_l (2p_{kl} {-}1) \end{equation} in terms of the measurement probabilities $0 \leq p_{kl} \leq 1$. Note that each probability $p_{kl} $ is estimated by sampling a binomial distribution which has a variance $\sigma^2_{kl} = p_{kl} (1- p_{kl} )$. It follows that the variance of the gradient components are determined by these individual variances via \begin{equation}\label{gradvariance} \mathrm{Var}[g_k] = 4 \sum_{l=1}^{r_h} h_l^2 \, \sigma^2_{kl} =4 \sum_{l=1}^{r_h} h_l^2 \, p_{kl} (1- p_{kl} ). \end{equation} Re-expressing this variance in terms of the matrix elements via $p_{kl}= (M_{kl} {+}1)/2$ yields the simplified form \begin{equation}\label{gradientVar} \mathrm{Var}[g_k] = \sum_{l=1}^{r_h} h_l^2 \, (1{-}[M_{kl}]^2). \end{equation} This expression is related directly to the parametrised quantum state $| \psi(\underline{\theta}) \rangle $ via the expectation value as $M_{kl} = - 2 \mathrm{Re}\langle \partial_k \psi(\underline{\theta}) | P_l | \psi(\underline{\theta}) \rangle $. In complete generality, i.e., when gates decompose into a linear combination of at most $r_g$ Pauli terms, the variance of the gradient entries is upper bounded (via Eq.~\eqref{gradvariance}) as \begin{equation} \label{gradVarUB} \mathrm{Var}[g_k] \leq r_g \sum_{l=1}^{r_h} h_l^2 = r_g \spec[\mathcal{H}], \end{equation} where $\spec[\mathcal{H}] $ follows from the Hilbert-Schmidt scalar product as \begin{equation*} \spec[\mathcal{H}] := \lVert \mathcal{H} \rVert^2/d :=\mathrm{Tr}[\mathcal{H} \mathcal{H}]/d = \sum_{k,l=1}^{r_h} h_k h_l \mathrm{Tr}[ P_k P_l]/d = \sum_{l=1}^{r_h} h_l^2. \end{equation*} via Eq.~\eqref{hamildec} and recall that $\mathrm{Tr}[ P_k P_l] = d\, \delta_{kl}$, where $\delta_{kl}$ is the Kroenecker delta and $d=2^N$. So far we have assumed that each term in the Hamiltonian is estimated separately from outcomes of independent ancilla measurements and the above variance therefore corresponds to overall $r_h$ measurements. Indeed, advanced techniques could be used for simultaneously measuring commuting terms in the Hamiltonian (possibly without an ancilla qubit) reducing the overall number of shots \cite{Crawford2019,yen2020measuring,jena2019pauli,gokhale2020n,gokhale2019minimizing,hadfield2020measurements} and we would like to take this into account in our final result. We conclude by stating the upper bound on the variance $\mathrm{Var}[g_k]$ of a single measurement to estimate the gradient entry $g_k$ as \begin{equation}\label{Nk_upper_bound} \mathrm{Var}[g_k] \leq \spec[\mathcal{H}] f_\mathrm{id}g . \end{equation} Here we have introduced the constant factor $f_\mathrm{id}g$. We can generally state the bounds $1 \leq f_\mathrm{id}g \leq r_g r_h$ as $f_\mathrm{id}g$ depends on the system (type of gates via $r_g$) and on the measurement technique used for estimating terms in the Hamiltonian (number of commuting groups). Here the lower bound (best case scenario $ f_\mathrm{id}g = 1$) is saturated for Pauli gates ($r_g=1$) and Hamiltonians from Eq.~\eqref{hamildec} in which all terms commute and are measured simultaneously. The upper bound (worst case scenario) is saturated by Hamiltonians from Eq.~\eqref{hamildec} in which all $r_h$ terms are estimated from separate measurements (all terms are non-commuting) and all terms have comparable strengths (optimally distributing samples does not reduce $\mathrm{Var}[g_k]$). The factor $f_\mathrm{id}g$ interpolates between these two extremal cases and will correspond to a value in the bounded rage $1 \leq f_\mathrm{id}g \leq r_g r_h$. In most of this work we assume a fixed $\mathcal{H}$ and therefore we can treat $f_\mathrm{id}g$ and $\spec[\mathcal{H}]$ as constants. The only exception is our derivation in Result~2 where we make the mild, general assumption that the number $r_h$ of terms in the Hamiltonian grows polynomially and therefore necessarily the product $\spec[\mathcal{H}] f_\mathrm{id}g = \mathcal{O}(N^b)$ grows in some polynomial order $b$ with the number of qubits $N$. To illustrate this, we construct 3 example Hamiltonians in Sec.~\mathrm{Re}f{appendix:simulations} and explicitly compute the polynomial order $b$ in which the cost $\spec[\mathcal{H}] f_\mathrm{id}g$ of estimating $g_k$ grows with the number of qubits $N$. Let us now consider mixed quantum states, e.g., due to gate imperfections, via the eigendecomposition $\rho = \sum_{n} p_n |\psi_n \rangle \langle \psi_n | $. If the parametrisation $\underline{\theta}$ is approximately unitary via $\tfrac{\partial p_n}{\partial \theta_k} \approx 0$, then gradient components of the expectation value $\mathrm{Tr}[ \rho(\underline{\theta}) \mathcal{H} ]$ can be expressed as \begin{equation} \tfrac{\partial }{\partial \theta_k} \mathrm{Tr}[ \rho(\underline{\theta}) \mathcal{H} ] \approx \sum_{n} p_n \tfrac{\partial }{\partial \theta_k} [\langle \psi_n(\underline{\theta}) | \mathcal{H} |\psi_n(\underline{\theta}) \rangle ] = \sum_{n} p_n [g_{k}]_n \end{equation} where $ [g_{k}]_n$ is the gradient that would be measured by the above protocol for the pure eigenstate $|\psi_n(\underline{\theta}) \rangle$. The above discussed protocol therefore estimates the correct gradient for mixed states -- as long as the parametrisation is approximately unitary, such as in case of noisy gates. The same upper bound holds for the variances via $\sum_{n} p_n = 1$ and $0\leq p_n\leq 1$, and the bound is only saturated by pure states. In summary, the variance of the gradient entries is upper bounded as $\mathrm{Var}[g_k] \leq \spec[\mathcal{H}] f_\mathrm{id}g$, where $f_\mathrm{id}g$ is a constant factor that only depends on the ansatz structure, on the particular quantum algorithm that is used to estimate the entries and on the Hamiltonian. We remark that the above discussed protocol is used in other metric-aware quantum algorithms, and our bounds therefore apply to other vector objects used in these algorithms~\cite{li2017efficient,xiaotheory,samimagtime,koczor2019quantum,quantumnatgrad}. \subsection{Components of the quantum Fisher information matrix} We will now focus on determining variances of the quantum Fisher information entries $[\mathbf{F}_Q]_{kl} $. For pure states as $\rho=| \psi \rangle \langle \psi |$, entries of the quantum Fisher information can be expressed via the state-vector scalar products \cite{koczor2019quantum} \begin{equation} \label{qfidef} [\mathbf{F}_Q]_{kl} = 4 \mathrm{Re}[\langle \partial_k \psi | \partial_l \psi \rangle - \langle \partial_k \psi | \psi \rangle \langle \psi | \partial_l \psi \rangle], \end{equation} The second term in the above equation vanishes when the global phase evolution of $| \psi \rangle$ is zero \cite{xiaotheory} and an experimental protocol for measuring the remaining component $\mathrm{Re}\langle \partial_k \psi | \partial_l \psi \rangle $ was used in \cite{samimagtime} for simulating imaginary time evolution. We now propose a protocol that determines both terms in Eq.~\mathrm{Re}f{qfidef}. Assuming the simplified ansatz from Sec.~\mathrm{Re}f{paulidecomps}, our protocol allows to evaluate the coefficients by measuring an ancilla qubit \begin{align*} A_{kl} = 4 \mathrm{Re} \langle \partial_k \psi | \partial_l \psi \rangle &= \mathrm{Re} \langle 0 | [D_k]^\dagger D_l| 0\rangle = 2[p_a]_{kl} {-}1,\\ B_k = 2 \mathrm{Re} \langle \partial_k \psi | \psi \rangle &=\mathrm{Re} \langle 0 | [D_k]^\dagger U_c| 0\rangle = 2[p_b]_k {-}1,\\ C_k = 2 \mathrm{Im} \langle \partial_k \psi | \psi \rangle &=\mathrm{Im} \langle 0 | [D_k]^\dagger U_c| 0\rangle = 2[p_c]_k {-}1, \end{align*} using the circuits in Fig.~2 of reference \cite{Li2017}, refer to footnote [53] of \cite{Li2017} for a proof. These circuits allow for estimating the probabilities $p_a$, $p_b$ and $p_c$ by sampling the ancilla qubit as a binomial distribution. The quantum Fisher information is then obtained as \begin{equation*} [\mathbf{F}_Q]_{kl} = A_{kl} + B_k B_l - C_kC_l = (2[p_a]_{kl} {-}1)+ (2[p_b]_k {-}1) (2[p_b]_l {-}1) - (2[p_c]_k {-}1)(2[p_c]_l {-}1). \end{equation*} Since the probabilities $p_a$, $p_b$ and $p_c$ are determined from binomial distributions, their variances are given by, e.g., $[\sigma_a^2]_{kl} = [p_a]_{kl} (1- [p_a]_{kl})$. It follows that \begin{equation*} \mathrm{Var}\{ [\mathbf{F}_Q]_{kl} \} = 4 [\sigma_a^2]_{kl} + 4 [\sigma_b^2]_{k} B_l^2 + 4 [\sigma_b^2]_{l} B_k^2 + 4[\sigma_c^2]_{k} C_l^2 + 4 [\sigma_c^2]_{l} C_k^2, \end{equation*} Substituting $4 [\sigma_b^2]_{k} = (1-[B_l]^2)$ and $4 [\sigma_c^2]_{k} = (1-[C_l]^2)$, we can express the variances as \begin{equation*} \mathrm{Var}\{ [\mathbf{F}_Q]_{kl} \} = (1 {-}[A_{kl}]^2) + (1-[B_k]^2) B_l^2 + (1-[B_l]^2) B_k^2 + (1-[C_k]^2) C_l^2 + (1-[C_l]^2) C_k^2, \end{equation*} in terms of the estimated quantities $A_{kl}$, $B_k$ and $C_k$, and we used the expressions, e.g., $(A_{kl}\ {+}1)/2 = [p_a]_{kl} $. Note that the inequality $(1-[B_k]^2) B_l^2 \leq 1/4$ is saturated when $B_k=1/\sqrt{2}$ and in general $ |A_{kl}|, |B_l|, |C_l| \leq 1$. Using this inequality we can establish the general upper bound \begin{equation}\label{qfVarUB2} \mathrm{Var}\{[\mathbf{F}_Q]_{kl}\} \leq 2 r_g^2 , \end{equation} when gates decompose into a linear combination of at most $r_g$ Pauli terms. When assuming noisy unitary circuits, Result~3 in \cite{koczor2019quantum} establishes that $[\mathbf{F}_Q]_{kl} \approx 2\, \mathrm{Tr}[(\partial_k \rho)(\partial_l \rho)]$ and the approximation becomes exact for pure states as $\rho=| \psi \rangle \langle \psi |$. The Hilbert-Schmidt scalar products $\mathrm{Tr}[(\partial_k \rho)(\partial_l \rho)]$ can be measured using the circuit based on SWAP tests from \cite{xiaotheory} and one can directly estimate the quantity $[\mathbf{F}_Q]_{kl} = (2p_{kl}-1)$ by measuring the probability $p_{kl}$ of an ancilla qubit in case when using the simplified ansatz from Sec.~\mathrm{Re}f{paulidecomps}, i.e., when gates decompose into single Pauli terms. We remark that this implementation requires more qubits when compared to the above introduced pure-state approach. However, it is preferable as it results in negligible approximation errors when gates are imperfect, refer to \cite{koczor2019quantum}. The variance follows as $\mathrm{Var}\{[\mathbf{F}_Q]_{kl}\} = 4 p_{kl} (1-p_{kl}) = (1 - [\mathbf{F}_Q]_{kl}^2) \leq 1$ in case of the simplified ansatz from Sec.~\mathrm{Re}f{paulidecomps} and we have used $[\mathbf{F}_Q]_{kl} \leq 1$ from Lemma~\mathrm{Re}f{qfilemma}. In complete generality, i.e., when gates decompose into a linear combination of at most $r_g$ Pauli terms, the variance of the matrix entries is upper bounded as \begin{equation} \label{qfVarUB1} \mathrm{Var}\{[\mathbf{F}_Q]_{kl}\} \leq r_g^2. \end{equation} In summary, the variance of the matrix entries are upper bounded as $\mathrm{Var}\{[\mathbf{F}_Q]_{kl}\} \leq f_\mathrm{id}F$, where $f_\mathrm{id}F$ is a constant factor that only depends on the ansatz structure and the approach used to estimate the matrix entries. We remark that the above discussed two protocols are used in other metric-aware quantum algorithms and our bounds therefore apply to other matrix objects estimated by these algorithms~\cite{li2017efficient,xiaotheory,samimagtime,koczor2019quantum,quantumnatgrad}. \subsection{Numerical simulations \label{appendix:simulations}} In our numerical simulations we use the ansatz illustrated in Fig.~\mathrm{Re}f{ansatzfig}. This decomposes into repeated blocks. The first block $B_1$ consists of single-qubit $X$ rotations while the second block $B_2$ decomposes into nearest-neighbour Pauli $ZZ$ gates followed by single qubit $Y$ and $X$ rotations. Each gate depends on an individual parameter $\theta_k$ with $k\in \{1 \dots \nu\}$. In our numerical simulations we use the ansatz structure $B_1 B_2 B_2$ which has a linearly growing number of parameters $\nu = \mathcal{O}(N)$ in the number of qubits via the constant depth $a(N) = \mathcal{O}(N^0)$. In Fig.~1 we simulate the natural gradient approach for finding the ground state energy of the spin-chain Hamiltonian \begin{equation} \label{hamil} \mathcal{H} = \sum_{i=1}^{N-1} J [ \sigma_x^{\{i\}} \sigma_x^{\{i+1\}} + \sigma_y^{\{i\}} \sigma_y^{\{i+1\}} + \sigma_z^{\{i\}} \sigma_z^{\{i+1\}} ] + J[ \sigma_x^{\{1\}} \sigma_x^{\{N\}} + \sigma_y^{\{1\}} \sigma_y^{\{N\}} + \sigma_z^{\{1\}} \sigma_z^{\{N\}} ] +\sum_{i=1}^N \omega_i \, \sigma_z^{\{i\}}. \end{equation} which contains identical couplings $xx$, $yy$ and $zz$ between nearest neighbours with a constant which we set $J=1$. Here $\sigma_\alpha^{\{k\}}$ represent Pauli matrices acting on qubit $k$ with $\alpha = \{x,y,z\}$. We select on-site frequencies $\omega_i$ randomly according to a uniform distribution with values varying between $-1$ and $1$. The resulting Hamiltonain has a non-trivial, highly entangled ground state that we aim to approximate using the (not necessarily optimal) ansatz circuit shown on Fig.~\mathrm{Re}f{ansatzfig}. We initialise the optimisation at a point in parameter space close to the optimum and we set the step size as $\lambda = 0.2$. In Fig.~2 we simulate various different Hamiltonians using the same technique. In particular, we use Eq.~\eqref{hamil} as the linearly scaling Hamiltonian in Fig.~2 (red). We define the quadratically scaling Hamiltonian Fig.~2 (blue) as \begin{equation} \label{hamil2} \mathcal{H} = \sum_{k>l=1}^{N} J [ \sigma_x^{\{k\}} \sigma_x^{\{l\}} + \sigma_y^{\{k\}} \sigma_y^{\{l\}} + \sigma_z^{\{k\}} \sigma_z^{\{l\}} ] +\sum_{k=1}^N \omega_k \, \sigma_z^{\{k\}}, \end{equation} while we chose the cubically scaling Hamiltonian Fig.~2 (brown) as \begin{equation} \label{hamil3} \mathcal{H} = \sum_{l>k}^{N} \sum_{m>l}^{N} J \sigma_x^{\{k\}} \sigma_y^{\{l\}} \sigma_z^{\{m\}} +\sum_{k=1}^N \omega_k \, \sigma_z^{\{k\}}. \end{equation} In our simulations we start the optimisation at a random initial point in parameter space, i.e., $\underline{\theta}$ is selected randomly, and run the optimisation until the gradient vector is such that $\lVert v\rVert \approx 10^{-1}$. This ensures that the we approximately randomly select points in parameter space for which the gradient norm is fixed, hence satisfying our assumption in Result~2. We compute the values of $N_{F}$ and $N_{g}$ at $25$ instances of such randomly selected ansatz parameters. Dots (shading) [solid lines] Fig.~2 shows the average (standard deviation) [fitting] of the ratio $N_{F}/N_{g}$. Let us now compute how the product $\spec[\mathcal{H}] f_\mathrm{id}g = \mathcal{O}(N^b)$ from Result~2 (which reflects the cost of estimating a gradient entry $g_k$ via Eq.~\eqref{Nk_upper_bound}) grows with the number of qubits. Terms in the above Hamiltonians can be grouped into a constant number of commuting groups which can be measured simultaneously and therefore $f_\mathrm{id}g = \mathcal{O}(1)$ in Eq.~\eqref{Nk_upper_bound}. Furthermore, the squared sum of the coefficients grows as $\spec[\mathcal{H}] = \sum_{l=1}^{r_h} h_l^2 = \mathcal{O}(N^b)$ with $b=1,2,3$, respectively. We therefore conclude that our upper bound in Eq.~\eqref{Nk_upper_bound} grows as $\mathrm{Var}[g_k] \leq \spec[\mathcal{H}] f_\mathrm{id}g = \mathcal{O}(N^b)$ and, indeed, the product in Result~2 grows as $\spec[\mathcal{H}] f_\mathrm{id}g = \mathcal{O}(N^b)$ with $b=1,2,3$, respectively. \begin{figure*} \caption{ Example of an $8$-qubit ansatz structure used in our simulations. It consists of repeated blocks of single qubit $X$ and $Y$ rotations and two-qubit $ZZ$ evolution gates. All gates here have Pauli rank $r_g = 1$ as discussed in Sec.~\mathrm{Re} \label{ansatzfig} \end{figure*} \section{Propagating Variances} \label{sec: PV} \begin{lemma} \label{propagationlemma} Let us define the regularised inverse $\tilde{\mathbf{F}}_Q^{-1} := [\mathbf{F}_Q {+} \eta \mathrm{Id}]^{-1}$ of the Fisher information matrix for some regularisation parameter $\eta \geq 0$ and recall that we have defined the error measure $\epsilon^2 := \sum_{k=1}^\nu \mathrm{Var}[v_k] $ with $\underline{v} := \tilde{\mathbf{F}}_Q^{-1} \underline{g}$ in the main text. If the elements of $\mathbf{F}_Q$ and $\underline{g}$ are measured independently and their errors are sufficiently small, then the error measure can be written in the form \begin{align} &\epsilon^{2} = \sum_{k, l = 1}^{\nu} a_{k l} \mathrm{Var} \big \{[\mathbf{F}_Q]_{k l}\big\} + \sum_{k = 1}^{\nu} b_{k} \mathrm{Var} [g_l], \\ & \textnormal{where} \ \ a_{k l} := \sum_{i,j = 1}^{\nu} [\tilde{\mathbf{F}}_Q^{-1}]_{ik}^{2} [\tilde{\mathbf{F}}_Q^{-1}]_{l j}^{2} g_{l}^{2}, \ \ b_{k} := \sum_{l = 1}^{\nu} \big \{[\tilde{\mathbf{F}}_Q^{-1}]_{k l} \big \}^{2}. \end{align} \end{lemma} \begin{proof}\label{proof:em} Under the assumption that the elements are measured independently and are sufficiently small, it is appropriate to use the variance formula \cite{Ku1966}, thus we can write error measure in terms of the variance of the elements in $\tilde{\mathbf{F}}_Q^{-1}$ and $\underline{g}$ yielding \begin{align} \epsilon^{2} &= \sum_{k = 1}^{\nu} \mathrm{Var} [v_{k}] \\ &= \sum_{k, l = 1}^{\nu} \mathrm{Var} \big\{[\tilde{\mathbf{F}}_Q^{-1}]_{kl} \big\} g_{l}^{2} + \big \{[\tilde{\mathbf{F}}_Q^{-1}]_{k l} \big \}^{2} \mathrm{Var} [g_l]. \end{align} Now we use the result derived in \cite{Lefebvre2000} to relate the variance of elements of $\tilde{\mathbf{F}}_Q^{-1}$ to elements of $\mathbf{F}_Q$, namely \begin{equation} \mathrm{Var} \big \{ [\tilde{\mathbf{F}}_Q^{-1}]_{k l}\big \} = \sum_{i,j = 1}^{\nu} [\tilde{\mathbf{F}}_Q^{-1}]_{ik}^{2} \mathrm{Var} \big \{[\mathbf{F}_Q]_{k l}\big\} [\tilde{\mathbf{F}}_Q^{-1}]_{l j}^{2}. \end{equation} Here we have used that $\mathrm{Var} \big \{[\tilde{\mathbf{F}}_Q]_{k l}\big\} = \mathrm{Var} \big \{[\mathbf{F}_Q]_{k l}\big\}$. Substituting this result into the error metric and trivially rearranging yields the required result \begin{align} \epsilon^{2} &= \sum_{k, l = 1}^{\nu} \bigg[\sum_{i,j = 1}^{\nu} [\tilde{\mathbf{F}}_Q^{-1}]_{ik}^{2} \mathrm{Var} \big \{[\mathbf{F}_Q]_{k l}\big\} [\tilde{\mathbf{F}}_Q^{-1}]_{l j}^{2} \bigg] g_{l}^{2} + \big \{[\tilde{\mathbf{F}}_Q^{-1}]_{k l} \big \}^{2} \mathrm{Var} [g_l] \\ &= \sum_{k, l = 1}^{\nu} \underbrace{\bigg[\sum_{i,j = 1}^{\nu} [\tilde{\mathbf{F}}_Q^{-1}]_{ik}^{2} [\tilde{\mathbf{F}}_Q^{-1}]_{l j}^{2} g_{l}^{2} \bigg]}_{a_{k l}} \mathrm{Var} \big \{[\mathbf{F}_Q]_{k l}\big\} + \sum_{k = 1}^{\nu} \underbrace{\bigg[ \sum_{l = 1}^{\nu} \big \{[\tilde{\mathbf{F}}_Q^{-1}]_{k l} \big \}^{2} \bigg]}_{b_{k}} \mathrm{Var} [g_k]. \\ \end{align} \end{proof} \subsection{Proof of Theorem~1 \label{proof:theo1}} \begin{proof} Recall that Lemma~\mathrm{Re}f{propagationlemma} establishes the error propagation formula which we abbreviate as $\epsilon^2 = \epsilon_F^2 + \epsilon_g^2$ via \begin{equation} \epsilon_F^2 := \sum_{\alpha, \beta=1}^\nu a_{\alpha \beta} \mathrm{Var}\{[\mathbf{F}_Q]_{\alpha \beta}\}, \quad \quad \epsilon_g^2 := \sum_{l=1}^\nu b_l \mathrm{Var}[g_l]. \end{equation} The coefficients $a_{\alpha \beta}$ can be upper bounded as \begin{equation*} a_{\alpha \beta} = \sum_{k,l=1}^\nu g_l^2 \, [\tilde{\mathbf{F}}_Q^{-1}]_{k\alpha}^2 [\tilde{\mathbf{F}}_Q^{-1}]_{l\beta}^2 \leq \lVert g \rVert_\infty^2 \sum_{k=1}^\nu [\tilde{\mathbf{F}}_Q^{-1}]_{k\alpha}^2 \sum_{l=1}^\nu [\tilde{\mathbf{F}}_Q^{-1}]_{l\beta}^2, \quad \quad \text{and} \quad \quad b_l = \sum_{k=1}^\nu [\tilde{\mathbf{F}}_Q^{-1}]_{k l}^2, \end{equation*} where $\lVert g \rVert_\infty$ is the absolute largest element in the gradient vector. We assume that every matrix and vector element is assigned measurements uniformly as $N_{F}/\nu^2$ and $N_{g}/\nu$ where $N_{F}$ and $N_{g}$ are the overall number of measurements required to estimate the matrix and vector objects such that the vector $\underline{v}$ is obtained to a precision $\epsilon$. Using the upper bounds on the variances of individual gradient vector entries from Eq.~\eqref{gradVarUB} and individual matrix entries from Eq.~\eqref{qfVarUB1} and Eq.~\eqref{qfVarUB2} we derive the explicit bound \begin{equation*} \mathrm{Var}\{[\mathbf{F}_Q]_{\alpha \beta}\} \leq V_F := \nu^2 N_{F}^{-1} \, f_\mathrm{id}F \quad \quad \mathrm{Var}[g_l] \leq V_G := \nu N_{g}^{-1} \, \spec[\mathcal{H}] f_\mathrm{id}g, \end{equation*} where $\lVert \mathcal{H} \rVert$ is the Hilbert-Schmidt or Frobenius norm of the Hamiltonian and $f_\mathrm{id}F$, $fg$ are constant factors that depend on the ansatz structure and the and the approach used to estimate the gradient/Fisher matrix, refer to Sec.~\mathrm{Re}f{paulidecomps}. For example for the simplified ansatz ($r_g=1$) in Sec.~\mathrm{Re}f{paulidecomps} we obtain $f_\mathrm{id}F \leq 2$ and $1 \leq f_\mathrm{id}g \leq r_h$. Here the lower bound $f_\mathrm{id}g = 1$ is saturated when when all terms in the Hamiltonian from Eq.~\eqref{hamildec} commute and can be measured simultaneously while the upper bound $f_\mathrm{id}g = r_h$ is saturated when all $r_h$ terms in the Hamiltonian need to be estimated independently (because they do not commute) and their strengths are comparable, refer to Sec.~\mathrm{Re}f{gradsec}. We use the above derived upper bounds and obtain \begin{equation} \epsilon_F^2 \leq V_F \, \lVert g \rVert_\infty^2 \sum_{\alpha, k =1}^\nu [\tilde{\mathbf{F}}_Q^{-1}]_{k\alpha}^2 \sum_{\beta,l=1}^\nu [\tilde{\mathbf{F}}_Q^{-1}]_{l\beta}^2 = V_F \lVert g \rVert_\infty^2 \, \lVert \tilde{\mathbf{F}}_Q^{-1} \rVert^4, \quad \quad \quad \epsilon_g^2 \leq V_G \sum_{k,l=1}^\nu [\tilde{\mathbf{F}}_Q^{-1}]_{k l}^2 = V_G \, \lVert \tilde{\mathbf{F}}_Q^{-1} \rVert^2, \end{equation} where $\lVert \tilde{\mathbf{F}}_Q^{-1} \rVert$ is the Hilbert-Schmidt or Frobenius norm of the inverse matrix $\tilde{\mathbf{F}}_Q^{-1}$. We now require that $\epsilon^2/2 =: \epsilon_F^2 $ and $ \epsilon^2/2=: \epsilon_g^2$ as a possible choice to satisfy $\epsilon^2 = \epsilon_F^2 + \epsilon_g^2$. This results in the explicit bound on the number of measurements after substituting $V_F$ and $V_G$ as \begin{equation*} N_{F} \leq 2 \, \nu^2 \, \lVert g \rVert_\infty^2 \, \lVert \tilde{\mathbf{F}}_Q^{-1} \rVert^4 \epsilon^{-2} f_\mathrm{id}F \quad \quad N_{g} \leq 2 \, \nu\, \lVert \tilde{\mathbf{F}}_Q^{-1} \rVert^2 \epsilon^{-2} \, \spec[\mathcal{H}] f_\mathrm{id}g. \end{equation*} We introduce the notation $\mathrm{Spc}[\tilde{\mathbf{F}}_Q^{-1}] := \lVert \tilde{\mathbf{F}}_Q^{-1} \rVert^2 /\nu = \tfrac{1}{\nu}\sum_{k=1}^\nu \sigma_k^{2}(\tilde{\mathbf{F}}_Q^{-1}) $ to denote the average of the squared singular values of $\tilde{\mathbf{F}}_Q^{-1}$. Note that, for example, the identity operator yields $\mathrm{Spc}[\mathrm{Id}] = 1$ and we derive upper and lower bounds on in general in Lemma~\mathrm{Re}f{lemmaspec}. We finally establish the upper bounds \begin{equation*} N_{F} \leq 2 \, \nu^4 \, \lVert g \rVert_\infty^2 \, \mathrm{Spc}[\tilde{\mathbf{F}}_Q^{-1}]^2 \epsilon^{-2} \, f_\mathrm{id}F, \quad \quad N_{g} \leq 2 \, \nu^2 \, \mathrm{Spc}[\tilde{\mathbf{F}}_Q^{-1}] \epsilon^{-2} \, \spec[\mathcal{H}] f_\mathrm{id}g. \end{equation*} \end{proof} \subsection{Proof of Theorem~2 \label{proof:theo2}} \begin{proof} Recall that in Sec.~\mathrm{Re}f{proof:theo1} we have defined the precision associated with the gradient vector in the natural gradinet approach as $\epsilon_g^2 := \sum_{l=1}^\nu b_l \mathrm{Var}[g_l]$. We have also defined the total number of measurements $N_{g}$ that needs to be assigned to determining the gradient vector $\underline{g}$ to a precision $\epsilon_g^2$ as \begin{equation*} N_{g} := f_\mathrm{id}rac{\nu}{\epsilon_g^2} \sum_{l=1}^\nu b_l \mathrm{Var}[g_l], \end{equation*} since each gradient entry $g_l$ receives $N_{g}/\nu$ samples. In the limiting case $\tilde{\mathbf{F}}_Q^{-1} \rightarrow \mathrm{Id}$ and $\mathrm{Var}\{[\mathbf{F}_Q]_{k l}\} \rightarrow 0$ the natural gradient approach reduces to the simple gradient descent approach with $b_l=1$. We can therefore define the total number of measurements $N_{g}d$ required to reconstruct the gradient vector in the simple gradient descent approach via $b_l=1$ as \begin{equation*} N_{g}d := f_\mathrm{id}rac{\nu}{\epsilon_g^2} \sum_{l=1}^\nu \mathrm{Var}[g_l]. \end{equation*} Let us start by explicitly writing the ratio of measurements as \begin{equation*} f_\mathrm{id}rac{N_{g}}{N_{g}d} = f_\mathrm{id}rac{\sum_{l=1}^\nu b_l \mathrm{Var}[g_l]}{\sum_{l=1}^\nu \mathrm{Var}[g_l]} \end{equation*} and let us consider the term \begin{equation*} b_l = \sum_{k=1}^\nu [\tilde{\mathbf{F}}_Q^{-1}]_{k l}^2 = \lVert \mathrm{Col}_l[\tilde{\mathbf{F}}_Q^{-1}] \rVert^2 = \lVert \tilde{\mathbf{F}}_Q^{-1} B_l \rVert^2 \leq \lVert \tilde{\mathbf{F}}_Q^{-1} \rVert_\infty^2 = \sigma_{\mathrm{max}} (\tilde{\mathbf{F}}_Q^{-1})^2 \leq \eta^{-2} \end{equation*} where $\eta$ is either a regularisation parameter or the smallest singular value of $\mathbf{F}_Q$, $\mathrm{Col}_l[\tilde{\mathbf{F}}_Q^{-1}] $ denotes the $l$-th column vector of the matrix $\tilde{\mathbf{F}}_Q^{-1}$ and $B_l$ is the $l$-th standard basis vector with $\lVert B_l \rVert = 1$. Our general upper bound follows as \begin{equation*} f_\mathrm{id}rac{N_{g}}{N_{g}d} = f_\mathrm{id}rac{\sum_{l=1}^\nu b_l \mathrm{Var}[g_l]}{\sum_{l=1}^\nu \mathrm{Var}[g_l]} \leq \eta^{-2} f_\mathrm{id}rac{\sum_{l=1}^\nu \mathrm{Var}[g_l]}{\sum_{l=1}^\nu \mathrm{Var}[g_l]} =\eta^{-2} \end{equation*} We now establish an approximation under the assumption that $\mathrm{Var}[g_l]$ does not significantly depend on the index $l$, e.g., when the gradient is vanishing close to an optimal point via $M_{kl} \rightarrow 0$ in Eq.~\eqref{gradientVar} as, e.g, \begin{equation} \mathrm{Var}[g_k] = \sum_{l=1}^{r_h} h_l^2 \, (1{-}[M_{kl}]^2) \rightarrow \sum_{l=1}^{r_h} h_l^2 = \spec[\mathcal{H}]. \end{equation} This results in \begin{equation*} f_\mathrm{id}rac{N_{g}}{N_{g}d} = f_\mathrm{id}rac{\sum_{l=1}^\nu b_l \mathrm{Var}[g_l]}{\sum_{l=1}^\nu \mathrm{Var}[g_l]} \approx f_\mathrm{id}rac{\sum_{l=1}^\nu b_l }{\nu} = f_\mathrm{id}rac{\sum_{k,l=1}^\nu [\tilde{\mathbf{F}}_Q^{-1}]_{k l}^2 }{\nu} = \lVert \tilde{\mathbf{F}}_Q^{-1} \rVert /\nu = \mathrm{Spc}[\tilde{\mathbf{F}}_Q^{-1}] . \end{equation*} \end{proof} \subsection{Remarks on Theorem~2} We establish bounds in case of the relative-precision scheme, i.e., when $\epsilon \propto \lVert g(t) \rVert$ and $\epsilon \propto \lVert v(t) \rVert$ in case of the gradient and natural gradient vectors, respectively. The upper bound follows via \begin{equation} f_\mathrm{id}rac{\lVert g \rVert^2}{\lVert v \rVert^2} = f_\mathrm{id}rac{\lVert g \rVert^2}{\lVert \tilde{\mathbf{F}}_Q^{-1} g \rVert^2} \leq \sigma_{\mathrm{min}} (\tilde{\mathbf{F}}_Q^{-1})^{-2}, \end{equation} and a lower bound can be specified as \begin{equation} f_\mathrm{id}rac{\lVert g \rVert^2}{\lVert v \rVert^2} = f_\mathrm{id}rac{\lVert g \rVert^2}{\lVert \mathbf{F}_Q^{-1} g \rVert^2} \geq \sigma_{\mathrm{max}} (\tilde{\mathbf{F}}_Q^{-1})^{-2} \end{equation} and in complete generality \begin{equation} f_\mathrm{id}rac{N_{g}}{N_{g}d} f_\mathrm{id}rac{\lVert g \rVert^2}{\lVert v \rVert^2} \leq [\sigma_{\mathrm{max}} (\tilde{\mathbf{F}}_Q^{-1}) /\sigma_{\mathrm{min}} (\tilde{\mathbf{F}}_Q^{-1})]^{2} =: \mathrm{Cnd}[\mathbf{F}_Q^{-1}]^{2}, \end{equation} and Lemma~\mathrm{Re}f{lemmaspec} establishes that $\mathrm{Cnd}[\mathbf{F}_Q^{-1}] \leq \eta^{-1} (\nu r_g + \eta)$. \begin{lemma} \label{lemmaspec} Assuming the simple regularisation $\tilde{\mathbf{F}}_Q^{-1} := (\mathbf{F}_Q + \eta \mathrm{Id}_\nu)^{-1}$, the largest singular value of the inverse is upper bounded as $\sigma_\mathrm{max}(\tilde{\mathbf{F}}_Q^{-1}) \leq \eta^{-1}$ and the smallest singular value is lower bounded via $\sigma_\mathrm{min}(\tilde{\mathbf{F}}_Q^{-1}) \geq (\nu r_g + \eta)^{-1}$. Moreover, the bounds $ (\nu r_g + \eta)^{-2} \leq \mathrm{Spc}[\tilde{\mathbf{F}}_Q^{-1}] \leq \eta^{-2}$ and $\mathrm{Cnd}[\tilde{\mathbf{F}}_Q^{-1}] \leq \eta^{-1} (\nu r_g + \eta)$ hold in general. Here $r_g$ is the largest Pauli rank of the ansatz gates from Sec.~\mathrm{Re}f{paulidecomps}. \end{lemma} \begin{proof} It immediately follows that \begin{equation*} \sigma_\mathrm{max}([\mathbf{F}_Q + \eta \mathrm{Id}_\nu]^{-1}) = [\sigma_\mathrm{min}(\mathbf{F}_Q + \eta \mathrm{Id}_\nu)]^{-1} \leq \eta^{-1} \end{equation*} via $\sigma_\mathrm{min}(\mathbf{F}_Q + \eta \mathrm{Id}_\nu) \geq \eta$. Now we use the boundedness of the matrix elements as $|[\mathbf{F}_Q ^{-1}]_{kl}| \leq r_g^2$ from Lemma~\mathrm{Re}f{qfilemma} which establishes the matrix norm $\lVert \mathbf{F}_Q \rVert_{\mathrm{max}} := \max_{k,l}|[\mathbf{F}_Q ]_{kl}| \leq r_g^2$. This bounds the largest singular value of $\mathbf{F}_Q$ as \begin{equation*} r_g^2 \geq \lVert \mathbf{F}_Q \rVert_{\mathrm{max}} \geq \lVert \mathbf{F}_Q \rVert_{\infty} /\nu := \sigma_\mathrm{max}(\mathbf{F}_Q)/\nu. \end{equation*} The smallest singular value of the inverse is therefore bounded as \begin{equation*} \sigma_\mathrm{min}([\mathbf{F}_Q + \eta \mathrm{Id}_\nu]^{-1}) = [\sigma_\mathrm{max}(\mathbf{F}_Q + \eta \mathrm{Id}_\nu)]^{-1} \geq (\nu r_g^2 + \eta)^{-1}. \end{equation*} We can now establish the bound \begin{equation} (\nu r_g^2 + \eta)^{-2} \leq \sigma_\mathrm{min}^2( \tilde{\mathbf{F}}_Q^{-1} ) \leq \mathrm{Spc}[\tilde{\mathbf{F}}_Q^{-1}] \leq \sigma_\mathrm{max}^2( \tilde{\mathbf{F}}_Q^{-1} ) \leq \eta^{-2} \end{equation} And we can therefore bound the growth rate of the quantity $\mathrm{Spc}[\tilde{\mathbf{F}}_Q^{-1}]$ as $\mathrm{Spc}[\tilde{\mathbf{F}}_Q^{-1}] = \mathcal{O}(\nu^s)$ with $-2 \leq s \leq 0$. \end{proof} \section{Optimal Measurements} \begin{lemma}\label{lemma: optimal} Measurements are distributed optimally when the number of samples for determining individual elements of the matrix and gradient are given by \begin{align} [N_{F}mat]_{kl} &= \epsilon^{-2} \, \Sigma \sqrt{a_{kl} \mathrm{Var} \big \{[\mathbf{F}_Q]_{kl}\big\}}, \\ [N_{g}vec]_{k} &= \epsilon^{-2} \, \Sigma \sqrt{b_{k} \mathrm{Var} [g_{k}]}, \end{align} respectively. Here $\mathrm{Var}[\cdot]$ is the variance of a single measurement of the corresponding element and we explicitly define $\Sigma$ via the coefficients $a_{kl}$ and $b_k$ from Appendix~\mathrm{Re}f{sec: PV} as \begin{equation} \Sigma := \sum_{k,l=1}^{\nu} \sqrt{a_{kl} \mathrm{Var} \big \{[\mathbf{F}_Q]_{kl}\big\}} + \sum_{k=1}^{\nu} \sqrt{b_k \mathrm{Var}[g_k]}. \end{equation} \end{lemma} \begin{proof} From Lemma \mathrm{Re}f{propagationlemma} we write the error measure as \begin{equation} \epsilon^{2} = \sum_{k, l = 1}^{\nu} a_{k l} \mathrm{Var} \big \{[\mathbf{F}_Q]_{k l}\big\} + \sum_{k = 1}^{\nu} b_{k} \mathrm{Var} [g_k], \end{equation} where $\mathrm{Var} [\cdot]$ denotes the variance in the statistical average over many measurements. Now we allow $\mathrm{Var}[\cdot]$ to denote variance in a single measurement while $[N_{F}mat]_{kl}$ and $[N_{g}vec]_{k}$ are the number of measurement assigned each element $[\mathbf{F}_Q]_{kl}$ and $g_k$ respectively, so the error measure becomes \begin{equation} \epsilon^{2} = \sum_{k, l = 1}^{\nu} f_\mathrm{id}rac{a_{k l} \mathrm{Var} \big \{[\mathbf{F}_Q]_{k l}\big\}}{[N_{F}mat]_{kl}} + \sum_{k = 1}^{\nu} f_\mathrm{id}rac{b_{k} \mathrm{Var} [g_k]}{[N_{g}vec]_{k}}. \end{equation} By minimising error measure, in this form, subject to the constraint of a fixed total number of measurements, so that \begin{equation} N_{opt} = \sum_{k, l = 1}^{\nu} [N_{F}mat]_{kl} + \sum_{k = 1}^{\nu} [N_{g}vec]_{k}, \end{equation} we find that the optimal fraction of measurement to be assigned to each element is \begin{align} f_\mathrm{id}rac{[N_{F}mat]_{kl}}{N_\mathrm{opt}} = f_\mathrm{id}rac{\sqrt{a_{kl} \mathrm{Var} \big \{[\mathbf{F}_Q]_{kl}\big\}}}{\Sigma}, \quad f_\mathrm{id}rac{[N_{g}vec]_{k}}{N_\mathrm{opt}} = f_\mathrm{id}rac{\sqrt{b_{k} \mathrm{Var} [g_{k}]}}{\Sigma} \\ \text{where} \quad \Sigma := \sum_{k,l=1}^{\nu} \sqrt{a_{kl} \mathrm{Var} \big \{[\mathbf{F}_Q]_{kl}\big\}} + \sum_{k=1}^{\nu} \sqrt{b_k \mathrm{Var}[g_k]}. \end{align} By substituting this results in the error measure we can remove the dependence on the total number of measurements $N_{opt}$, to yield the required result. \end{proof} \subsection{Fisher Matrix Symmetry}\label{appendix: symmetry} \begin{lemma} The symmetry of Fisher Matrix can be accounted for by replacing the elements $a_{kl}$ with $a'_{kl}$, where \begin{equation} a'_{k l} := \begin{cases} 0 & k < l \\ a_{k k} & k = l \\ a_{k l} + a_{l k} & k > l \end{cases} \end{equation} \end{lemma} \begin{proof} As the Fisher Matrix is symmetric, measurements of $[\mathbf{F}_Q]_{kl}$ element also constitute measurements of the $[\mathbf{F}_Q]_{lk}$, so $\mathrm{Var} \{[\mathbf{F}_Q]_{kl}]\} = \mathrm{Var} \{[\mathbf{F}_Q]_{lk} \}$. Thus, the error measure can be written as \begin{align} \epsilon^{2} = \sum_{k = 1}^{\nu} a_{k k} \mathrm{Var} \big \{[\mathbf{F}_Q]_{k k}\big\} + \sum_{k > l}^{\nu} 2 a_{kl} \mathrm{Var} \big \{[\mathbf{F}_Q]_{kl}\big\} + \sum_{k = 1}^{\nu} b_{k} \mathrm{Var} [g_k]. \end{align} It is possible to force this back into the original form of of the error measure if we define \begin{equation} a'_{k l} := \begin{cases} 0 & k < l \\ a_{k k} & k = l \\ a_{k l} + a_{l k} & k > l \end{cases}, \end{equation} so that the error measure error measure can be written as \begin{equation} \epsilon^{2} = \sum_{k, l = 1}^{\nu} a'_{k l} \mathrm{Var} \big \{[\mathbf{F}_Q]_{k l}\big\} + \sum_{k = 1}^{\nu} b_{k} \mathrm{Var} [g_k]. \end{equation} Using the error measure written in this form as a starting point for the derivation in the proof of Lemma \mathrm{Re}f{lemma: optimal} we trivially obtain the same results with the elements $a_{kl}$ replaced with $a'_{kl}$. \end{proof} \section{Applications beyond natural gradient} Let us now comment on how main results of this work can be applied to other quantum algorithms beyond natural gradient optimisation. For this reason, we now consider 3 categories of algorithms and review how the results in the main text can be tailored to these algorithms. We note that all algorithms considered in the following use a parameter update rule whereby an inverse matrix $A^{-1}$ is applied to a vector $v$. \textbf{Metric-aware optimisation algorithms:} We have covered metric-aware optimisation algorithms in the main text which include quantum natural gradient descent and imaginary time evolution \cite{li2017efficient,xiaotheory,samimagtime,koczor2019quantum,quantumnatgrad}. In this case the matrix object $A$ is the quantum Fisher information, which only depends on the ansatz circuit, while the vector object is the gradient vector that depends on both the ansatz circuit and on the Hamiltonian. \textbf{Variational quantum simulation:} The time evolution of a quantum system under a Hamiltonian can be simulated using techniques described in \cite{li2017efficient,xiaotheory}. In such a scenario the matrix object (imaginary part of the quantum geometric tensor) still only depends on the ansatz circuit and the vector object is related to the gradient vector and therefore all our results apply, except for Result~\mathrm{Re}f{result1}. The reason is the following. In Result~\mathrm{Re}f{result1} we assumed that when increasing the number of iterations the norm of the gradient vector $v$ vanishes due to convergence. However, in case of variational simulation, the norm of the vector object does not necessarily decrease. \textbf{Hessian optimisation:} Analogously to metric aware optimisations here the inverse of the Hessian matrix is applied to the gradient vector. Thus our error propagation formula in Eq.~\eqref{errorpropagation} and our optimal measurement distribution scheme in Result~\mathrm{Re}f{result3} immediately apply to this scenario too. The main difference to the previously discussed scenarios is that the Hessian matrix now depends on both the ansatz circuit and on the Hamiltonian. For this reason, Theorems~1-2 need to be modified such that the dependence on the Hamiltonian is taken into account. As such, the main conclusion of Result~\mathrm{Re}f{result1} will still hold: as the optimisation converges the vanishingly small gradient becomes increasingly more expensive to determine to a sufficient precision. However, via Result~\mathrm{Re}f{result2} it is expected that when we increase the number of qubits the Hessian matrix becomes increasingly more expensive to estimate due to its dependence on the Hamiltonian. \end{document}
\begin{document} \title[Martingale problem for perturbations of Lévy-type generators]{Well-posedness of the martingale problem for non-local perturbations of Lévy-type generators} \author[P. Jin]{Peng Jin} \address{Peng Jin: Fakultät für Mathematik und Naturwissenschaften, Bergische Universität Wuppertal, 42119 Wuppertal, Germany} \email{[email protected]} \subjclass[2010]{primary 60J75; secondary 60J35} \keywords{Lévy-type generator, stable process, martingale problem, transition density, resolvent, perturbation} \begin{abstract} Let $L$ be a Lévy-type generator whose Lévy measure is controlled from below by that of a non-degenerate $\alpha$-stable ($0<\alpha<2$) process. In this paper, we study the martingale problem for the operator $\mathcal{L}_{t}=L+K_{t}$, with $K_{t}$ being a time-dependent non-local operator defined by \[ K_{t}f(x):=\int_{\mathbb{R}^{d}\backslash\{0\}}[f(x+y)-f(x)-\mathbf{1}_{\alpha>1}\mathbf{1}_{\{|y|\le1\}}y\cdot\nabla f(x)]M(t,x,dy), \] where $M(t,x,\cdot)$ is a Lévy measure on $\mathbb{R}^{d}\backslash\{0\}$ for each $(t,x)\in\mathbb{R}_+ \times \mathbb{R}^{d}$. We show that if \[ \sup_{t\geq0,x\in\mathbb{R}^{d}}\int_{\mathbb{R}^{d}\backslash\{0\}}1\wedge|y|^{\beta}M(t,x,dy)<\infty \] for some $0<\beta<\alpha$, then the martingale problem for $\mathcal{L}_{t}$ is well-posed. \end{abstract} \maketitle \section{Introduction} As a generalization of the fractional Laplacian $\triangle^{\alpha/2}$ ($0<\alpha<2)$, the anisotropic fractional Laplacian is defined by \begin{align*} Af(x) & =\int_{\mathbb{R}^{d}\backslash\{0\}}\left[f(x+y)-f(x)-\mathbf{1}_{\{|y|\le1\}}y\cdot\nabla f(x)\right]\nu(dy), \end{align*} where \[ \nu(B)=\int_{\mathbb{S}^{d-1}}\mu(d\xi)\int_{0}^{\infty}\mathbf{1}_{B}(r\xi)\frac{dr}{r^{1+\alpha}},\quad\forall B\in\mathcal{B}(\mathbb{R}^{d}), \] and $\mu$ a is a finite measure on $\mathbb{S}^{d-1}$. We call $\nu$ the Lévy measure and $\mu$ the sprectral measure of $A$. Clearly the behaivior of the anisotropic fractional Laplacian is solely determined by its spectral measure. Since $\mu$ can be any finite measure on $\mathbb{S}^{d-1}$, this leads to some interesting properties of $A$ that the fractional Laplacian $\triangle^{\alpha/2}$ does not possess. As an example, the heat kernel of $A$ may have very different type of estimates compared to $\triangle^{\alpha/2}$, see \cite{MR2286060}. The anisotropic fractional Laplacian $A$ corresponds to a Markov process, namely, it is the generator of an $\alpha$-stable process. It is natural to ask the following question of stability: if we add a small perturbation $B$ to $A$, does $A+B$ still correspond to a Markov process, or more precisely, is the martingale problem for $A+B$ well-posed? This problem has been well-studied when $1<\alpha<2$ and the perturbation operator $B$ is of drift-type $B=b(t,\cdot)\cdot\nabla$. Depending on the regularity of the spectral measure $\mu$, various classes of drifts $b$ have been introduced such that the martingale problem for $A+b(t,\cdot)\cdot\nabla$ is well-posed. If $\mu$ is the surface measure on $\mathbb{S}^{d-1}$, drifts belonging to the Kato class $\mathcal{K}_{\alpha-1}^{d}$ were considered in \cite{MR3192504,chen2013uniqueness}; for the case when $\mu$ is non-degenerate, drifts from some Hölder or $L^{p}$ spaces were treated in \cite{MR2945756,MR3127913,jin2015weak}. In addition to drift-type perturbations mentioned above, perturbations of $A$ including a lower order non-local term have also been investigated. This type of perturbation was first considered in \cite{MR736974}. There, the perturbation operator $B$ took the form \[ Bf(x)=\mathbf{1}_{\alpha>1}b(x)\cdot\nabla f(x)+\int_{\mathbb{R}^{d}\backslash\{0\}}[f(x+y)-f(x)-\mathbf{1}_{\alpha>1}\mathbf{1}_{\{|y|\le1\}}y\cdot\nabla f(x)]M(x,dy), \] and, under some appropriate conditions on $\mu$, $b$ and $M$, uniqueness of the martingale problem for $A+B$ was obtained. As an essential step, some non-local estimates on the resolvent of $A$ were established in \cite{MR736974}. To obtain these estimates, relatively strong regularity conditions on the spectral measure $\mu$ were needed. More precisely, it was assumed in \cite{MR736974} that the spectral measure $\mu$ has the Radon-Nikodym density $m(y),\ y\in\mathbb{S}^{d-1}$, with respect to the surface measure on $\mathbb{S}^{d-1},$ and $m(\cdot)$ is $d$-times continuously differentiable on $\mathbb{S}^{d-1}$ and not identically $0$. Afterwards, similar perturbations of stable-like operators were considered in \cite{MR1248747,MR3145767,MR3201992,chen2016uniqueness}; among many other things, well-posedness of the corresponding martingale problem was obtained in \cite{MR3201992,chen2016uniqueness}. We remark that in \cite{chen2016uniqueness}, the jump measures of the stable-like operator don't need to have densities with respect to the Lebesgue measure and are merely assumed to be controlled from above and below, respectively, by two Lévy measures of non-degenerate $\alpha$-stable processes. The anisotropic fractional Laplacian is a special Lévy-type generator. A general Lévy-type generator is given by \begin{align} Lf(x) & =\sum_{i,j=1}^{d}a_{ij}\frac{\partial^{2}}{\partial x_{i}\partial x_{j}}f(x)+b\cdot\nabla f(x)\nonumber \\ & \qquad+\int_{\mathbb{R}^{d}\backslash\{0\}}\left[f(x+y)-f(x)-\mathbf{1}_{\{|y|\le1\}}y\cdot\nabla f(x)\right]\nu(dy),\label{MPAFLdefiofA-1} \end{align} where $(a_{ij})_{1\le i,j\le d}$ is a positive semi-definite symmetric $d\times d$ matrix, $b\in\mathbb{R}^{d}$, and $\nu$ is a Lévy measure on $\mathbb{R}^{d}\backslash\{0\}$. The tuple $\left((a_{ij})_{1\le i,j\le d},b,\nu\right)$ is called the Lévy triple of $L$. In this paper, we study the martingale problem for (time-dependent) non-local perturbations of a general Lévy-type generator whose Lévy measure is controlled from below by that of a non-degenerate anisotropic fractional Laplacian. Our main result is the following: \begin{thm} \label{thm: main}Let $L$ be as in \emph{(}\ref{MPAFLdefiofA-1}\emph{)} and assume that there exist some $\alpha\in(0,2)$ and a non-degenerate finite measure $\mu$ on $\mathbb{S}^{d-1}$ such that \begin{equation} \nu(B)\ge\int_{\mathbb{S}^{d-1}}\mu(d\xi)\int_{0}^{\infty}\mathbf{1}_{B}(r\xi)\frac{dr}{r^{1+\alpha}},\quad\forall B\in\mathcal{B}(\mathbb{R}^{d}).\label{eq:MPAFLdefiofnu-1} \end{equation} Define the operator $K_{t}$ by \begin{equation} K_{t}f(x):=\int_{\mathbb{R}^{d}\backslash\{0\}}[f(x+y)-f(x)-\mathbf{1}_{\alpha>1}\mathbf{1}_{\{|y|\le1\}}y\cdot\nabla f(x)]M(t,x,dy),\label{first defi: K_t} \end{equation} where $M$ is a measurable kernel from $\mathbb{R}_+ \times \mathbb{R}^{d}$ to $\mathcal{B}\left(\mathbb{R}^{d}\backslash\{0\}\right)$ and $M(t,x,\cdot)$ is a Lévy measure on $\mathbb{R}^{d}\backslash\{0\}$ for each $(t,x)\in\mathbb{R}_+ \times \mathbb{R}^{d}$. If\emph{ }there exists some $\beta\in(0,\alpha)$ such that \begin{equation} \sup_{t\geq0,x\in\mathbb{R}^{d}}\int_{\mathbb{R}^{d}\backslash\{0\}}1\wedge|y|^{\beta}M(t,x,dy)<\infty,\label{conditon 2 for K} \end{equation} then the martingale problem for $\mathcal{L}_{t}=L+K_{t}$ is well-posed. \end{thm} Note that the maxtrix $(a_{ij})_{1\le i,j\le d}$ in (\ref{MPAFLdefiofA-1}) is not assumed to be non-degenerate in Theorem \ref{thm: main}. Indeed, if $(a_{ij})_{1\le i,j\le d}$ is non-degenerate, then by the classical results of Stroock \cite{MR0433614}, the assumption (\ref{conditon 2 for K}) in Theorem \ref{thm: main} can be relaxed to $\sup_{t\geq0,x\in\mathbb{R}^{d}}\int_{\mathbb{R}^{d}\backslash\{0\}}1\wedge|y|^{2}M(t,x,dy)<\infty$. Here we are more interested in the case where $(a_{ij})_{1\le i,j\le d}$ is degenerate and the non-local part of $L$ acts as the leading term. The novelty of our Theorem \ref{thm: main}, compared to the results of \cite{MR736974,MR3201992,chen2016uniqueness} in this direction, lies firstly in the fact that the generator $L$ here contains a possibly degenerate diffusion part. As far as the author knows, non-local perturbations of this kind of Lévy-type generators have not yet been considered. Another point we would like to mention is that the Lévy measure $\nu$ of $L$ is only required to satisfy the lower bound condition (\ref{conditon 2 for K}), which is weaker than those assumed in the above mentioned works. As a compensation, our assumption (\ref{conditon 2 for K}) on the perturbing jump kernel $M(t,x,\cdot)$, which guarantees that $K_{t}$ is a lower order perturbation of $L$, is actually slightly stronger than those in \cite{MR736974,MR3201992}. Our strategy to prove the asserted uniqueness is motivated by the method of Komatsu in \cite{MR736974}. We will derive some non-local estimates of the resolvent of $L$. Since our assumption on the Lévy measure $\nu$ is much weaker than that of \cite{MR736974}, together with the presence of the possibly degenerate diffusion part of $L$ and the time-dependency of the kernel $M(t,x,\cdot)$, our arguments are technically more involved. To obtain the existence, we will first consider smooth approximations $\mathcal{L}_{n,t}$ of $\mathcal{L}_{t}$ and then derive some Krylov's estimates for the martingale solutions corresponding to $\mathcal{L}_{n,t}$. It turns out that the limit point (under the topology of weak convergence for measures) of these martingale solutions exists and solves the martingale problem for $\mathcal{L}_{t}$. The rest of the paper is organized as follows. In Section 2 we give some notation and recall the definition of the martingale problem for non-local generators. In Section 3 we establish some estimates on the time-space resolvent of the Lévy process with generator $L$. In Section 4 we construct the time-space resolvent corresponding to $\mathcal{L}_{t}$. Finally, we prove Theorem \ref{thm: main} in Section 5. \section{Preliminaries} The inner product of $x$ and $y$ in $\mathbb{R}^{d}$ is written as $x\cdot y$. We use $|v|$ to denote the Euclidean norm of a vector $v\in\mathbb{R}^{m}$, $m\in\mathbb{N}$. For a bounded function $g:\mathbb{R}_+ \times \mathbb{R}^{d}\to\mathbb{R}^{m}$ we write $\|g\|:=\sup_{(s,x)\in\mathbb{R}_+ \times \mathbb{R}^{d}}|g(s,x)|$. Let $\mathbb{S}^{d-1}:=\{x\in\mathbb{R}^{d}:|x|=1\}$ be the unitary sphere. Let $C_{b}^{2}(\mathbb{R}^{d})$ denote the class of $C^{2}$ functions such that the function and its first and second order partial derivatives are bounded. Note that $C_{b}^{2}(\mathbb{R}^{d})$ is a Banach space endowed with the norm \[ \|f\|_{C_{b}^{2}(\mathbb{R}^{d})}:=\|f\|+\sum_{i=1}^{d}\|\partial_{i}f\|+\sum_{i,j=1}^{d}\|\partial_{ij}^{2}f\|,\quad f\in C_{b}^{2}(\mathbb{R}^{d}), \] where $\partial_{i}f(x):=\partial_{x_{i}}f(x)$ and $\partial_{ij}^{2}f(x):=\partial_{x_{i}x_{j}}^{2}f(x)$ for $x\in\mathbb{R}^{d}$. For $k\in\mathbb{N}$ and $k\ge3$, the space $C_{b}^{k}(\mathbb{R}^{d})$ and the norm on $C_{b}^{k}(\mathbb{R}^{d})$ are similarly defined. Consider a Lévy-type generator \begin{align} Lf(x) & =\sum_{i,j=1}^{d}a_{ij}\frac{\partial^{2}}{\partial x_{i}\partial x_{j}}f(x)+b\cdot\nabla f(x)\nonumber \\ & \qquad+\int_{\mathbb{R}^{d}\backslash\{0\}}\left[f(x+y)-f(x)-\mathbf{1}_{\{|y|\le1\}}y\cdot\nabla f(x)\right]\nu(dy),\label{MPAFLdefiofA} \end{align} defined for every $f\in C_{b}^{2}(\mathbb{R}^{d})$, where $(a_{ij})_{1\le i,j\le d}$ is a positive semi-definite symmetric $d\times d$ matrix, $b\in\mathbb{R}^{d}$, and $\nu$ is a Lévy measure on $\mathbb{R}^{d}\backslash\{0\}.$ Throughout this paper, we assume that the generator $L$ satisfies the following assumption. \begin{assumption} \label{MPAFLass21}There exist $\alpha\in(0,2)$ and a non-degenerate finite measure $\mu$ on $\mathbb{S}^{d-1}$ such that \begin{equation} \nu(B)\ge\int_{\mathbb{S}^{d-1}}\mu(d\xi)\int_{0}^{\infty}\mathbf{1}_{B}(r\xi)\frac{dr}{r^{1+\alpha}},\quad\forall B\in\mathcal{B}(\mathbb{R}^{d}).\label{eq:MPAFLdefiofnu} \end{equation} \end{assumption} By non-degeneracy of $\mu$ we mean that the support of $\mu$ is not contained in a proper linear subspace of $\mathbb{R}^{d}$. \begin{rem} Since we don't assume additional conditions on $(a_{ij})_{1\le i,j\le d}$, the matrix $(a_{ij})_{1\le i,j\le d}$ can be degenerate. \end{rem} Recall that $K_{t}$ is given by \begin{equation} K_{t}f(x)=\int_{\mathbb{R}^{d}\backslash\{0\}}[f(x+y)-f(x)-\mathbf{1}_{\alpha>1}\mathbf{1}_{\{|y|\le1\}}y\cdot\nabla f(x)]M(t,x,dy),\label{MPAFLdefiofKt} \end{equation} where $M$ is a kernel from $\mathbb{R}_+ \times \mathbb{R}^{d}$ to $\mathcal{B}\left(\mathbb{R}^{d}\backslash\{0\}\right)$ with $M(t,x,\cdot)$ being a Lévy measure on $\mathbb{R}^{d}\backslash\{0\}$ for each $(t,x)\in\mathbb{R}_+ \times \mathbb{R}^{d}$. Without any further specification, we will always assume the following: \begin{assumption} \label{eq:MAPAFLassonM}There exists $\beta\in(0,\alpha)$ such that \[ \sup_{t\geq0,x\in\mathbb{R}^{d}}\int_{\mathbb{R}^{d}\backslash\{0\}}1\wedge|y|^{\beta}M(t,x,dy)<\infty. \] \end{assumption} Let \begin{equation} \mathcal{L}_{t}:=L+K_{t},\label{MPAFLgeneratorlt} \end{equation} where $L$ and $K_{t}$ are defined in (\ref{MPAFLdefiofA}) and (\ref{MPAFLdefiofKt}), respectively. Let $D=D\big([0,\infty)\big)$, the set of paths in $\mathbb{R}^{d}$ that are right continuous with left limits, endowed with the Skorokhod topology. Set $X_{t}(\omega)=\omega(t)$ for $\omega\in D$ and let $\mathcal{D}=\sigma(X_{t}:0\le t<\infty)$ and $\mathcal{F}_{t}:=\sigma(X_{r}:0\le r\le t)$. A probability measure $\mathbf{P}$ on $(D,\mathcal{D})$ is called a solution to the martingale problem for $\mathcal{L}_{t}$ starting from $(s,x)$, if \begin{equation} \mathbf{P}(X_{t}=x,\ \forall t\le s)=1\label{MPAFLwsintegrcondi} \end{equation} and under the measure $\mathbf{P}$, \begin{equation} f(X_{t})-\int_{s}^{t}\mathcal{L}_{u}f(X_{u})du,\quad t\ge s,\label{MPAFLeq2defimp} \end{equation} is an $\mathcal{F}_{t}$-martingale after time $s$ for all $f\in C_{b}^{2}(\mathbb{R}^{d})$. \section{Estimates on the time-space resolvent of the Lévy process with generator $L$} In this section we consider a $d$-dimensional Lévy process $S=(S_{t})_{t\geq0}$ with generator $L$ that is defined in (\ref{MPAFLdefiofA}). So $S$ has the Lévy triple $((a_{ij})_{1\le i,j\le d},b,\nu)$, namely, \begin{align} \mathbf{E}\big[e^{iS_{t}\cdot u}\big] & =e^{-t\psi(u)},\quad u\in\mathbb{R}^{d},\nonumber \\ \psi(u) & =\sum_{i,j=1}^{d}a_{ij}u_{i}u_{j}-\int_{\mathbb{R}^{d}\setminus\{0\}}\Big(e^{iu\cdot y}-1-\mathbf{1}_{\{|y|\le1\}}iu\cdot y\Big)\nu(dy)-ib\cdot u,\label{MPAFLeqsect21} \end{align} where $(a_{ij})_{1\le i,j\le d}$, $b$ and $\nu$ are the same as in (\ref{MPAFLdefiofA}). Let $\alpha\in(0,2)$ and $\mu$ be as in Assumption \ref{MPAFLass21}. Define \begin{equation} \tilde{\nu}(B)=\int_{\mathbb{S}^{d-1}}\mu(d\xi)\int_{0}^{\infty}\mathbf{1}_{B}(r\xi)\frac{dr}{r^{1+\alpha}},\quad B\in\mathcal{B}(\mathbb{R}^{d}),\label{defi: nu tilde} \end{equation} and \begin{equation} \tilde{\psi}(u)=-\int_{\mathbb{R}^{d}\setminus\{0\}}\Big(e^{iu\cdot y}-1-\mathbf{1}_{\{|y|\le1\}}iu\cdot y\Big)\tilde{\nu}(dy),\quad u\in\mathbb{R}^{d}.\label{defi: psi tilde} \end{equation} Then $\tilde{\psi}$ is the characteristic exponent of an $\alpha$-stable process $\tilde{S}=(\tilde{S}_{t})_{t\geq0}$. Let $\hat{\psi}:=\psi-\tilde{\psi}$. So $\hat{\psi}$ is the characteristic exponent of a Lévy process $\hat{S}=(\hat{S}_{t})_{t\geq0}$ with the Lévy triple $(A,b,\nu-\tilde{\nu})$. Without loss of generality, we assume that $S$, $\tilde{S}$ and $\hat{S}$ are defined on the same probability space. Define \begin{equation} \gamma:=\begin{cases} -\int_{\{0<|y|\le1\}}y\tilde{\nu}(dy), & 0<\alpha<1,\\ \int_{\mathbb{S}^{d-1}}\xi\mu(d\xi), & \alpha=1,\\ \int_{\{|y|>1\}}y\tilde{\nu}(dy), & 1<\alpha<2. \end{cases}\label{defi, gamma} \end{equation} Then for $\alpha\neq1$, the function $\tilde{\psi}(u)+iu\cdot\gamma$ becomes a homogeneous function (with variable $u$) of index $\alpha$. As a result, for $\alpha\neq1$, we obtain \begin{equation} \tilde{\psi}(\rho u)+i(\rho u\cdot\gamma)=\rho^{\alpha}(\tilde{\psi}(u)+i(u\cdot\gamma)),\quad\forall\rho>0.\label{MPAFLeqsect215-1} \end{equation} The case with $\alpha=1$ is a little different. For $\alpha=1$, according to \cite[p.~84,~(14.20)]{MR1739520} and its complex conjugate, it holds that \[ \tilde{\psi}(u)=\int_{\mathbb{S}^{d-1}}\left(\frac{\pi}{2}|u\cdot\xi|+iu\cdot(\xi\log|u\cdot\xi|)-ic_{1}u\cdot\xi\right)\mu(d\xi),\quad u\in\mathbb{R}^{d}, \] where $c_{1}=\int_{1}^{\infty}r^{2}\sin rdr+\int_{0}^{1}r^{-2}(\sin r-r)dr$; in this case, we have \begin{equation} \tilde{\psi}(\rho u)=\rho\tilde{\psi}(u)+i(\rho\log\rho)u\cdot\gamma,\quad\forall\rho>0,\ u\in\mathbb{R}^{d}.\label{MPAFLeqsect217} \end{equation} According to Assumption \ref{MPAFLass21} and \cite[Prop.~24.20]{MR1739520}, there exists some constant $c_{2}>0$ such that \begin{equation} \left|e^{-t\tilde{\psi}(u)}\right|\le e^{-c_{2}t|u|^{\alpha}},\quad\forall u\in\mathbb{R}^{d},\ t>0.\label{MPAFLsect31} \end{equation} By the inversion formula of Fourier transform, the law of $\tilde{S}_{t}$ has a density $\tilde{p}_{t}\in L^{1}(\mathbb{R}^{d})\cap C_{b}(\mathbb{R}^{d})$ that is given by \begin{equation} \tilde{p}_{t}(x)=\frac{1}{(2\pi)^{d}}\int_{\mathbb{R}^{d}}e^{-iu\cdot x}e^{-t\tilde{\psi}(u)}du,\quad x\in\mathbb{R}^{d},\ t>0.\label{defi: p tilde} \end{equation} Moreover, according to \cite[p.~2856,~(2.3)]{MR2286060}, we have the following scaling property for $\tilde{p}_{t}$: for $x\in\mathbb{R}^{d},\ t>0,$ \begin{equation} \tilde{p}_{t}(x)=\begin{cases} t^{-d/\alpha}\tilde{p}_{1}(t^{-1/\alpha}x+(1-t^{1-1/\alpha})\gamma),\quad & (\alpha\neq1),\\ t^{-d}\tilde{p}_{1}(t^{-1}x-\gamma\log t), & (\alpha=1), \end{cases}\label{MPAFLsect315} \end{equation} where $\gamma$ is given in (\ref{defi, gamma}). The following result is a slight extension of \cite[Lemma 3.1]{MR2945756}. For its proof the reader is referred to \cite[Lemma 3.1]{jin2015weak}. \begin{lem} \label{lemma: p_t tilde}Let $t>0$ be arbitrary. Then the densities $\tilde{p}_{t}\in C_{b}^{\infty}(\mathbb{R}^{d})\cap L^{r}(\mathbb{R}^{d})$ for all $r\ge1$. \end{lem} Since \[ \mathbf{E}\big[e^{iS_{t}\cdot u}\big]=e^{-t\psi(u)}=e^{-t\tilde{\psi}(u)}e^{-t\hat{\psi}(u)}=\mathbf{E}\big[e^{i\tilde{S}_{t}\cdot u}\big]\mathbf{E}\big[e^{i\hat{S}_{t}\cdot u}\big], \] the law of $S_{t}$ has a density $p_{t}$ that is given by \begin{equation} p_{t}(x):=\int_{\mathbb{R}^{d}}\tilde{p}_{t}(x-y)\hat{m}_{t}(dy),\quad x\in\mathbb{R}^{d},\ t>0,\label{defi: p} \end{equation} where $\hat{m}_{t}$ denotes the law of $\hat{S}_{t}$. It follows from Lemma \ref{lemma: p_t tilde} that $p_{t}\in C_{b}^{\infty}(\mathbb{R}^{d})\cap L^{r}(\mathbb{R}^{d})$ for all $r\ge1$. For $0<\delta<1$, define the integro-differential operator $|\partial|^{\delta}$ by \[ |\partial|^{\delta}f(x)=c_{3}\int_{\mathbb{R}^{d}\backslash\{0\}}\left[f(x+y)-f(x)\right]\cdot|y|^{-d-\delta}dy,\quad f\in C_{b}^{2}(\mathbb{R}^{d}), \] where the constant $c_{3}$ is given by \[ c_{3}:=2^{\delta}\pi^{-d/2}\Gamma\left(\frac{d+\delta}{2}\right)/\Gamma\left(-\frac{\delta}{2}\right). \] Note that \begin{equation} c_{3}\int_{\mathbb{R}^{d}\setminus\{0\}}\Big(e^{iu\cdot y}-1\Big)|y|^{-d-\delta}dy=-|u|^{\delta},\quad u\in\mathbb{R}^{d}.\label{eq: u^delta} \end{equation} Next, we give an estimate of the $L^{r}$-norm of $|\partial|^{\delta}p_{t}$. \begin{lem} \label{lem: lp esti for delta p_t}Let $0<\delta<1$ and $r\ge1$. Then there exists a constant $c_{4}>0$ that depends on $\delta$ and $r$ such that \begin{equation} \||\partial|^{\delta}p_{t}\|_{L^{r}(\mathbb{R}^{d})}\le c_{4}t^{(d/r-\delta-d)/\alpha},\quad\forall t>0.\label{eq:MPAFLgtxl1norm} \end{equation} \end{lem} \begin{proof} Since $|\partial|^{\delta}p_{t}(x)=\int_{\mathbb{R}^{d}}|\partial|^{\delta}\tilde{p}_{t}(x-y)\hat{m}_{t}(dy)$, $t>0$, by Jensen's inequality, it suffices to prove \[ \||\partial|^{\delta}\tilde{p}_{t}\|_{L^{r}(\mathbb{R}^{d})}\le t^{(d/r-\delta-d)/\alpha}\||\partial|^{\delta}\tilde{p}_{1}\|_{L^{r}(\mathbb{R}^{d})}<\infty,\quad\forall t>0. \] By (\ref{MPAFLsect31}), (\ref{defi: p tilde}) and Fubini's theorem, we easily obtain that for each $t>0$, \begin{equation} |\partial|^{\delta}\tilde{p}_{t}(x)=-\frac{1}{(2\pi)^{d}}\int_{\mathbb{R}^{d}}|u|^{\delta}e^{-t\tilde{\psi}(u)}e^{-iu\cdot x}du,\quad x\in\mathbb{R}^{d}.\label{eq:MPAFLsect316} \end{equation} We first assume $\alpha\neq1$. Using a change of variables $u=t^{-1/\alpha}u'$ and noting (\ref{MPAFLeqsect215-1}), we obtain \begin{align*} |\partial|^{\delta}\tilde{p}_{t}(x)= & -\frac{t^{-d/\alpha}}{(2\pi)^{d}}\int_{\mathbb{R}^{d}}t^{-\delta/\alpha}|u'|^{\delta}e^{-(\tilde{\psi}(u')+iu'\cdot\gamma)+it^{1-1/\alpha}u'\cdot\gamma}e^{-it^{-1/\alpha}u'\cdot x}du'\\ = & t^{-(\delta+d)/\alpha}|\partial|^{\delta}\tilde{p}_{1}\left(t^{-1/\alpha}x-\gamma(t^{1-1/\alpha}-1)\right). \end{align*} So \begin{align} \||\partial|^{\delta}\tilde{p}_{t}\|_{L^{r}(\mathbb{R}^{d})} & \le t^{-(\delta+d)/\alpha}\left(\int_{\mathbb{R}^{d}}\left(|\partial|^{\delta}\tilde{p}_{1}(t^{-1/\alpha}x)\right)^{r}dx\right)^{1/r}\nonumber \\ & =t^{(d/r-\delta-d)/\alpha}\||\partial|^{\delta}\tilde{p}_{1}\|_{L^{r}(\mathbb{R}^{d})}.\label{MPAFLeqremark3156} \end{align} For the case $\alpha=1$, we can apply (\ref{MPAFLeqsect217}) and a similar argument as above to also obtain (\ref{MPAFLeqremark3156}). So (\ref{MPAFLeqremark3156}) is true for all $\alpha\in(0,2)$. It remains to show that $\||\partial|^{\delta}\tilde{p}_{1}\|_{L^{r}(\mathbb{R}^{d})}<\infty$, or equivalently, \begin{equation} \int_{\mathbb{R}^{d}}\Big|\int_{\mathbb{R}^{d}}|u|^{\delta}e^{-\tilde{\psi}(u)}e^{-iu\cdot y}du\Big|^{r}dy<\infty.\label{eq2:MPAFLlemma6udel} \end{equation} To prove this fact, we use the same idea as in the proof of \cite[Lemma~3.4]{jin2015weak}. Firstly, note that the characteristic exponent $\tilde{\psi}$ can be written as the sum of $\tilde{\psi}_{1}$ and $\tilde{\psi}_{2}$, where \[ \tilde{\psi}_{1}(u)=-\int_{\{0<|y|\le1\}}\Big(e^{iu\cdot y}-1-iu\cdot y\Big)\tilde{\nu}(dy),\quad\tilde{\psi}_{2}=\tilde{\psi}-\tilde{\psi}_{1}. \] We can easily check that that $\tilde{\psi}_{1}\in C^{\infty}(\mathbb{R}^{d})$. Since (\ref{MPAFLsect31}) holds, we see that $\exp(-\tilde{\psi}_{1})$ belongs to the Schwartz space $\mathcal{S}(\mathbb{R}^{d})$. According to (\ref{eq: u^delta}), we can write $|u|^{\delta}=\psi_{\delta,1}(u)+\psi_{\delta,2}(u)+\psi_{\delta,3}$, where \[ \psi_{\delta,1}(u)=-c_{\delta}\int_{\{0<|y|\le1\}}\left(e^{iu\cdot y}-1\right)|y|^{-d-\delta}dy \] and \[ \psi_{\delta,2}(u)=-c_{\delta}\int_{\{|y|>1\}}e^{iu\cdot y}|y|^{-d-\delta}dy,\quad\psi_{\delta,3}=c_{\delta}\int_{\{|y|>1\}}|y|^{-d-\delta}dy. \] Then \begin{align} |u|^{\delta}e^{-\tilde{\psi}}= & \psi_{\delta,1}e^{-\tilde{\psi}_{1}}e^{-\tilde{\psi}_{2}}+\psi_{\delta,2}e^{-\tilde{\psi}_{1}}e^{-\tilde{\psi}_{2}}+\psi_{\delta,3}e^{-\tilde{\psi}_{1}}e^{-\tilde{\psi}_{2}}\nonumber \\ = & \psi_{\delta,1}e^{-\tilde{\psi}_{1}}e^{-\tilde{\psi}_{2}}-e^{-\tilde{\psi}_{1}}(-\psi_{\delta,2})e^{-\tilde{\psi}_{2}}+\psi_{\delta,3}e^{-\tilde{\psi}_{1}}e^{-\tilde{\psi}_{2}}.\label{eq1:MPAFLlemma6udel} \end{align} We only treat the first term on the right-hand side of (\ref{eq1:MPAFLlemma6udel}), since the other two terms are similar. With the same reason as for $\exp(-\tilde{\psi}_{1})$ above, we have $\psi_{\delta,1}\exp(-\tilde{\psi}_{1})\in\mathcal{S}(\mathbb{R}^{d})$. It is also easy to see that $\exp(-\tilde{\psi}_{2})$ is bounded and is the characteristic function of an infinitely divisible probability measure $\rho$ on $\mathbb{R}^{d}$. As a consequence, we are allowed to define $h$ to be the inverse Fourier transform of the $\psi_{\delta,1}\exp(-\tilde{\psi})$, i.e., \[ h(y):=\frac{1}{(2\pi)^{d}}\int_{\mathbb{R}^{d}}\psi_{\delta,1}e^{-\tilde{\psi}_{1}}e^{-\tilde{\psi}_{2}}e^{-iu\cdot y}du,\quad y\in\mathbb{R}^{d}. \] Since the Fourier transform is a one-to-one map of $\mathcal{S}(\mathbb{R}^{d})$ onto itself, we can find $f\in\mathcal{S}(\mathbb{R}^{d})$ with $\hat{f}$=$\psi_{\delta,1}\exp(-\tilde{\psi}_{1})$, where $\hat{f}$ denotes the Fourier transform of $f$. In particular, we have $f\in L^{r}(\mathbb{R}^{d})$. Let $f*\rho$ be the convolution of $f$ and $\rho.$ We have \[ \ \widehat{f*\rho}=\hat{f}\hat{\rho}=\psi_{\delta,1}e^{-\tilde{\psi}_{1}-\tilde{\psi}_{2}}=\psi_{\delta,1}e^{-\tilde{\psi}}=\hat{h}, \] which implies $h=f*\rho$. Thus $h\in C_{b}^{\infty}(\mathbb{R}^{d})$. By Young's inequality, we get $h\in L^{r}(\mathbb{R}^{d})$, i.e., \begin{equation} \int_{\mathbb{R}^{d}}\Big|\int_{\mathbb{R}^{d}}\psi_{\delta,1}e^{-\tilde{\psi}(u)}e^{-iu\cdot y}du\Big|^{r}dy<\infty.\label{eq3:MPAFLlemma6udel} \end{equation} Similarly, by noting that $-\psi_{\delta,2}$ and $\exp(-\tilde{\psi}_{2})$ are both characteristic functions of some finite measures on $\mathbb{R}^{d}$, we can show that \begin{equation} \int_{\mathbb{R}^{d}}\Big|\int_{\mathbb{R}^{d}}e^{-\tilde{\psi}_{1}}(-\psi_{\delta,2})e^{-\tilde{\psi}_{2}}e^{-iu\cdot y}du\Big|^{r}dy<\infty\label{eq4:MPAFLlemma6udel} \end{equation} and \begin{equation} \int_{\mathbb{R}^{d}}\Big|\int_{\mathbb{R}^{d}}\psi_{\delta,3}e^{-\tilde{\psi}_{1}}e^{-\tilde{\psi}_{2}}e^{-iu\cdot y}du\Big|^{r}dy<\infty.\label{eq5:MPAFLlemma6udel} \end{equation} Now, the inequality (\ref{eq2:MPAFLlemma6udel}) follows from (\ref{eq1:MPAFLlemma6udel}), (\ref{eq3:MPAFLlemma6udel}), (\ref{eq4:MPAFLlemma6udel}) and (\ref{eq5:MPAFLlemma6udel}). \end{proof} \begin{rem} If we understand $|\delta|^{0}$ as the identity map, then Lemma \ref{lem: lp esti for delta p_t} holds also for the case $\delta=0$, namely, for each $r\ge1$, there exists a constant $c_{4}>0$ depending on $r$ such that \begin{equation} \|p_{t}\|_{L^{r}(\mathbb{R}^{d})}\le c_{4}t^{(d/r-d)/\alpha},\quad\forall t>0.\label{esti: lp of p_t} \end{equation} Indeed, the proof of Lemma \ref{lem: lp esti for delta p_t} can be easily adapted to work also for this case. \end{rem} In the next lemma we deal with a non-local estimate on the gradient of $p_{t}$ when $1<\alpha<2$. Since its proof is completely similar to that of Lemma \ref{lem: lp esti for delta p_t}, so we omit it here. \begin{lem} \label{lem: detla i p_t}Let $1<\alpha<2$ , $0<\delta<\alpha-1$ and $r\ge1$. Then there exists a constant $c_{5}>0$ which depends on $\delta$ and $r$ such that for each $i=1,\cdots,d$, \[ \||\partial|^{\delta}\partial_{i}p_{t}\|_{L^{r}(\mathbb{R}^{d})}\le c_{5}t^{(d/r-\delta-1-d)/\alpha},\quad\forall t>0. \] \end{lem} For $\lambda>0$, the time-space resolvent $R_{\lambda}$ of the Lévy process $S$ is defined by \begin{equation} R_{\lambda}f(t,x):=\int_{0}^{\infty}e^{-\lambda u}\int_{\mathbb{R}^{d}}p_{u}(y-x)f(t+u,y)dydu,\quad(t,x)\in\mathbb{R}_+ \times \mathbb{R}^{d},\label{eq:MPAFLdefiofrlam} \end{equation} where $f\in\mathcal{B}_{b}(\mathbb{R}_{+}\times\mathbb{R}^{d})$. Before we state the next lemma, we recall two equalities from \cite[Lemma 2.1]{MR736974}: for each $0<\delta<1,$ there exist konstants $c_{6},$$c_{7}>0$, which depend on $\delta$, such that \begin{equation} \int_{\mathbb{R}^{d}}\left|(|w+z|^{\delta-d}-|w|^{\delta-d})\right|dw=c_{6}|z|^{\delta},\label{eq1:Komastu} \end{equation} and \begin{equation} f(x+z)-f(x)=c_{7}\int_{\mathbb{R}^{d}}(|w+z|^{\delta-d}-|w|^{\delta-d})|\partial|^{\delta}f(x-w)dw,\label{eq2:Komastu} \end{equation} where $f\in C_{b}^{\infty}(\mathbb{R}^{d})$ is arbitrary. \begin{lem} Assume $0<\delta<\alpha\wedge1.$ \emph{(i)} If $\lambda>0$ and $g\in\mathcal{B}_{b}(\mathbb{R}_{+}\times\mathbb{R}^{d})$, then $|\partial|^{\delta}\left(R_{\lambda}g(t,\cdot)\right)$ is well-defined for each $t\ge0$. Moreover, there exists a constant $C_{\lambda}>0$, independent of $g$, such that \begin{equation} \left||\partial|^{\delta}\left(R_{\lambda}g(t,\cdot)\right)(x)\right|\le C_{\lambda}\|g\|\label{new new eq0} \end{equation} and \begin{equation} |R_{\lambda}g(t,x+z)-R_{\lambda}g(t,x)|\le C_{\lambda}|z|^{\delta}\|g\|\label{new new eq 0.5} \end{equation} for all $(t,x)\in\mathbb{R}_+ \times \mathbb{R}^{d}$ and $z\in\mathbb{R}^{d}$. The constant $C_{\lambda}$ goes to $0$ as $\lambda\to\infty$. \emph{(ii)} Let $T>0$ and $g\in\mathcal{B}_{b}(\mathbb{R}_{+}\times\mathbb{R}^{d})$ be such that $supp(g)\subset[0,T]\times\mathbb{R}^{d}$ and $g\in L^{q}([0,T];L^{p}(\mathbb{R}^{d}))$ with $p,q>0$ and $d/p+\alpha/q<\alpha-\delta$. Then for each $\lambda>0$, there exists a constant $N_{\lambda}>0$, independent of $g$ and $T$, such that \begin{equation} \left||\partial|^{\delta}\left(R_{\lambda}g(t,\cdot)\right)(x)\right|\le N_{\lambda}\|g\|_{L^{q}([0,T];L^{p}(\mathbb{R}^{d}))}\label{neweq 2: Lemma 3.4} \end{equation} and \begin{equation} |R_{\lambda}g(t,x+z)-R_{\lambda}g(t,x)|\le N_{\lambda}z^{\delta}\|g\|_{L^{q}([0,T];L^{p}(\mathbb{R}^{d}))}\label{neweq3: Lemma 3.4} \end{equation} for all $(t,x)\in\mathbb{R}_+ \times \mathbb{R}^{d}$ and $z\in\mathbb{R}^{d}$. Moreover, the constant $N_{\lambda}$ goes to $0$ as $\lambda\to\infty$.\label{lem4:MPAFL} \end{lem} \begin{proof} (i) Assume $g\in\mathcal{B}_{b}(\mathbb{R}_{+}\times\mathbb{R}^{d})$. Let $\epsilon>0$ be a constant such that $\delta<\delta+\epsilon<\alpha\wedge1$. For $z\in\mathbb{R}^{d}$, we have \begin{align} & |p_{u}(y-x-z)-p_{u}(y-x)|\nonumber \\ \overset{(\ref{eq2:Komastu})}{\le} & c_{7}\int_{\mathbb{R}^{d}}\Big|(|w-z|^{\delta+\epsilon-d}-|w|^{\delta+\epsilon-d})|\partial|^{\delta+\epsilon}p_{u}(y-x-w)\Big|dw.\label{eq3:MPFALdifferofpt} \end{align} It follows from (\ref{eq3:MPFALdifferofpt}) and Young's inequality that \begin{align} & \int_{\mathbb{R}^{d}}|p_{u}(y-x-z)-p_{u}(y-x)|dy\nonumber \\ \le & c_{7}\||\partial|^{\delta+\epsilon}p_{u}\|_{L^{1}(\mathbb{R}^{d})}\int_{\mathbb{R}^{d}}|(|w-z|^{\delta+\epsilon-d}-|w|^{\delta+\epsilon-d})|dw\nonumber \\ \overset{(\ref{eq1:Komastu})}{=} & c_{6}c_{7}|z|^{\delta+\epsilon}\||\partial|^{\delta+\epsilon}p_{u}\|_{L^{1}(\mathbb{R}^{d})}\overset{(\ref{eq:MPAFLgtxl1norm})}{\le}c_{4}c_{6}c_{7}u^{-(\delta+\epsilon)/\alpha}|z|^{\delta+\epsilon}.\label{ineq:MPAFLformulaI2} \end{align} So \begin{align} \left|R_{\lambda}g(t,x+z)-R_{\lambda}g(t,x)\right| & \le\|g\|\int_{0}^{\infty}e^{-\lambda u}\int_{\mathbb{R}^{d}}|p_{u}(y-x-z)-p_{u}(y-x)|dydu\nonumber \\ & \le c_{4}c_{6}c_{7}|z|^{\delta+\epsilon}\|g\|\int_{0}^{\infty}e^{-\lambda u}u^{-(\delta+\epsilon)/\alpha}du.\label{esti 1: R lambda} \end{align} On the other hand, we have \begin{equation} \left|R_{\lambda}g(t,x+z)-R_{\lambda}g(t,x)\right|\le2\|R_{\lambda}g\|\le2\lambda^{-1}\|g\|.\label{new new eq 2} \end{equation} By (\ref{esti 1: R lambda}) and (\ref{new new eq 2}), we can find a constant $c>0$ such that \[ \left|R_{\lambda}g(t,x+z)-R_{\lambda}g(t,x)\right|\le c\left(|z|^{\delta+\epsilon}\wedge1\right),\quad\forall z\in\mathbb{R}^{d}, \] which implies that $|\partial|^{\delta}\left(R_{\lambda}g(t,\cdot)\right)(x)$ is well-defined. By Fubini's theorem, we obtain that for all $t\ge0$ and $x\in\mathbb{R}^{d}$, \begin{equation} |\partial|^{\delta}\left(R_{\lambda}g(t,\cdot)\right)(x)=\int_{0}^{\infty}e^{-\lambda u}\int_{\mathbb{R}^{d}}|\partial|^{\delta}\left(p_{u}(y-\cdot)\right)(x)g(t+u,y)dydu.\label{eq:MPAFLrepreforpartialdelta} \end{equation} So for all $t\ge0$ and $x\in\mathbb{R}^{d}$, \begin{equation} \left||\partial|^{\delta}\left(R_{\lambda}g(t,\cdot)\right)(x)\right|\le\|g\|\int_{0}^{\infty}e^{-\lambda u}\left\Vert |\partial|^{\delta}p_{u}\right\Vert _{L^{1}(\mathbb{R}^{d})}du\overset{(\ref{eq:MPAFLgtxl1norm})}{\le}C_{\lambda}\|g\|,\label{neweq: Lemma 3.4} \end{equation} where \[ C_{\lambda}:=c_{4}\int_{0}^{\infty}e^{-\lambda u}u^{-\delta/\alpha}du. \] Hence (\ref{new new eq0}) is true. It is clear that $C_{\lambda}\downarrow0$ as $\lambda\to\infty$. It follows from (\ref{eq2:Komastu}) that \begin{align*} & R_{\lambda}g(t,x+z)-R_{\lambda}g(t,x)\\ & \quad=\int_{0}^{\infty}e^{-\lambda u}\int_{\mathbb{R}^{d}}[p_{u}(y-x-z)-p_{u}(y-x)]g(t+u,y)dydu\\ & \quad=\int_{0}^{\infty}e^{-\lambda u}\int_{\mathbb{R}^{d}}\Big(c_{7}\int_{\mathbb{R}^{d}}(|w-z|^{\delta-d}-|w|^{\delta-d})|\partial|^{\delta}p_{u}(y-x-w)dw\Big)\\ & \qquad\quad\times g(t+u,y)dydu. \end{align*} In view of (\ref{eq:MPAFLgtxl1norm}), (\ref{eq1:Komastu}) and (\ref{eq:MPAFLrepreforpartialdelta}), we can apply Fubini's theorem to obtain that for all $t\ge0,\ x,z\in\mathbb{R}^{d}$, \begin{equation} R_{\lambda}g(t,x+z)-R_{\lambda}g(t,x)=c_{7}\int_{\mathbb{R}^{d}}(|w-z|^{\delta-d}-|w|^{\delta-d})|\partial|^{\delta}\left(R_{\lambda}g(t,\cdot)\right)(x-w)dw.\label{eq:eq:MPAFLreprefordifferRlambda} \end{equation} Combining (\ref{eq:eq:MPAFLreprefordifferRlambda}), (\ref{eq1:Komastu}) and (\ref{neweq: Lemma 3.4}) yields (\ref{new new eq 0.5}). (ii) Since (\ref{neweq3: Lemma 3.4}) follows easily from (\ref{eq1:Komastu}), (\ref{neweq 2: Lemma 3.4}) and (\ref{eq:eq:MPAFLreprefordifferRlambda}), we only need to prove (\ref{neweq 2: Lemma 3.4}). Note that $supp(g)\subset[0,T]\times\mathbb{R}^{d}$. By (\ref{eq:MPAFLrepreforpartialdelta}) and Hölder's inequality, we get \begin{align*} ||\partial|^{\delta}\left(R_{\lambda}g(t,\cdot)\right)(x)|= & \Big|\int_{0}^{\infty}e^{-\lambda u}\int_{\mathbb{R}^{d}}|\partial|^{\delta}\left(p_{u}(y-\cdot)\right)(x)g(t+u,y)dydu\Big|\\ \le & \int_{0}^{\infty}e^{-\lambda u}\||\partial|^{\delta}p_{u}\|_{L^{p^{*}}(\mathbb{R}^{d})}\|g(t+u,\cdot)\|_{L^{p}(\mathbb{R}^{d})}du\\ = & \int_{0}^{T}e^{-\lambda u}\||\partial|^{\delta}p_{u}\|_{L^{p^{*}}(\mathbb{R}^{d})}\|g(t+u,\cdot)\|_{L^{p}(\mathbb{R}^{d})}du\\ \le & \Big(\int_{0}^{T}e^{-q^{*}\lambda u}\||\partial|^{\delta}p_{u}\|_{L^{p^{*}}(\mathbb{R}^{d})}^{q^{*}}dt\Big)^{1/q^{*}}\|g\|_{L^{q}([0,T];L^{p}(\mathbb{R}^{d}))}, \end{align*} where $p^{*},q^{*}>0$ are such that $1/p^{*}+1/p=1$ and $1/q^{*}+1/q=1$. By (\ref{eq:MPAFLgtxl1norm}), we see that the inequality (\ref{neweq 2: Lemma 3.4}) holds with \[ N_{\lambda}:=\Big(c_{4}\int_{0}^{\infty}e^{-q^{*}\lambda u}u^{q^{*}\alpha^{-1}(d/p^{*}-\delta-d)}du\Big)^{1/q^{*}}, \] which is finite if $q^{*}\alpha^{-1}(d/p^{*}-\delta-d)>-1$, or equivalently, $d/p+\alpha/q<\alpha-\delta$. By dominated convergence theorem, $\lim_{\lambda\to\infty}N_{\lambda}=0$. \end{proof} \begin{lem} Let $1<\alpha<2$ and $0<\delta<\alpha-1$. \emph{(i)} If $\lambda>0$ and $g\in\mathcal{B}_{b}(\mathbb{R}_{+}\times\mathbb{R}^{d})$, then $|\partial|^{\delta}\left(\partial_{i}R_{\lambda}g(t,\cdot)\right)$ is well-defined for each $t\ge0$. Moreover, there exists a constant $\tilde{C}_{\lambda}>0$, independent of $g$, such that for all $g\in\mathcal{B}_{b}(\mathbb{R}_{+}\times\mathbb{R}^{d})$, \[ \left||\partial|^{\delta}\left(\partial_{i}R_{\lambda}g(t,\cdot)\right)(x)\right|\le\tilde{C}_{\lambda}\|g\| \] and \[ |\partial_{i}R_{\lambda}g(t,x+z)-\partial_{i}R_{\lambda}g(t,x)|\le\tilde{C}_{\lambda}|z|^{\delta}\|g\| \] for all $(t,x)\in\mathbb{R}_+ \times \mathbb{R}^{d}$, $z\in\mathbb{R}^{d}$ and $i=1,\cdots,d$. The constant $\tilde{C}_{\lambda}$ goes to $0$ as $\lambda\to\infty$. \emph{(ii)} Let $T>0$ and $g\in\mathcal{B}_{b}(\mathbb{R}_{+}\times\mathbb{R}^{d})$ be such that $supp(g)\subset[0,T]\times\mathbb{R}^{d}$ and $g\in L^{q}([0,T];L^{p}(\mathbb{R}^{d}))$ with $d/p+\alpha/q<\alpha-1-\delta$. Then for each $\lambda>0$, there exists a constant $\tilde{N}_{\lambda}>0$, independent of $g$ and $T$, such that \[ \left||\partial|^{\delta}\left(\partial_{i}R_{\lambda}g(t,\cdot)\right)(x)\right|\le\tilde{N}_{\lambda}\|g\|_{L^{q}([0,T];L^{p}(\mathbb{R}^{d}))} \] and \[ |\partial_{i}R_{\lambda}g(t,x+z)-\partial_{i}R_{\lambda}g(t,x)|\le\tilde{N}_{\lambda}z^{\delta}\|g\|_{L^{q}([0,T];L^{p}(\mathbb{R}^{d}))}. \] for all $(t,x)\in\mathbb{R}_+ \times \mathbb{R}^{d}$, $z\in\mathbb{R}^{d}$ and $i=1,\cdots,d$. Moreover, the constant $\tilde{N}_{\lambda}$ goes to $0$ as $\lambda\to\infty$.\label{lem:MPAFLdifferrlam} \end{lem} \begin{proof} Let $g\in\mathcal{B}_{b}(\mathbb{R}_{+}\times\mathbb{R}^{d})$ be arbitrary. It is easy to see that for each $i=1,\cdots,d,$ \[ \partial_{i}R_{\lambda}g(t,x)=-\int_{0}^{\infty}e^{-\lambda u}\int_{\mathbb{R}^{d}}\partial_{i}p_{u}(y-x)g(t+u,y)dydu. \] In view of Lemma \ref{lem: detla i p_t}, we can argue in the same way as in Lemma \ref{lem4:MPAFL} to derive the statements. We omit the details. \end{proof} \section{Construction of the time-space resolvent corresponding to $\mathcal{L}_{t}$} In this section we give a purely analytical construction of the time-space resolvent $G_{\lambda}$ that corresponds to the generator $\mathcal{L}_{t}:=L+K_{t}$. Not to be precise, we can write $G_{\lambda}=(\lambda-\partial_{t}-\mathcal{L}_{t})^{-1}$. The main aim of this section is to establish rigorously, at least for large enough $\lambda>0$, that \[ G_{\lambda}g=\sum_{k=0}^{\infty}R_{\lambda}(KR_{\lambda})^{k}g,\quad g\in\mathcal{B}_{b}(\mathbb{R}_{+}\times\mathbb{R}^{d}), \] where $R_{\lambda}$ is the time-space resolvent of the Lévy process $S$ and the operator $KR_{\lambda}$ is defined by \begin{align} KR_{\lambda}g(t,x) & :=\int_{\mathbb{R}^{d}\backslash\{0\}}[R_{\lambda}g(t,x+z)-R_{\lambda}g(t,x)\nonumber \\ & \qquad-\mathbf{1}_{\alpha>1}\mathbf{1}_{\{|z|\le1\}}z\cdot\nabla R_{\lambda}g(t,x)]M(t,x,dz),\quad(t,x)\in\mathbb{R}_+ \times \mathbb{R}^{d}.\label{defi: KR_lambda} \end{align} To see that $KR_{\lambda}g$ in (\ref{defi: KR_lambda}) is well-defined for $g\in\mathcal{B}_{b}(\mathbb{R}_{+}\times\mathbb{R}^{d})$, we need the following proposition. \begin{prop} \label{prop MPAFL:For-any-}For each $\lambda>0,$ define \begin{equation} k_{\lambda}:=\begin{cases} (C_{\lambda}+2\lambda^{-1})\sup_{t\geq0,x\in\mathbb{R}^{d}}\int_{\mathbb{R}^{d}\backslash\{0\}}1\wedge|z|^{\beta}M(t,x,dz), & 0<\alpha\leq1,\\ (\tilde{C}_{\lambda}+2\lambda^{-1})\sup_{t\geq0,x\in\mathbb{R}^{d}}\int_{\mathbb{R}^{d}\backslash\{0\}}1\wedge|z|^{\beta}M(t,x,dz), & 1<\alpha<2, \end{cases}\label{defi: k_lambda} \end{equation} where $C_{\lambda}$ and $\tilde{C}_{\lambda}$ are the constants from Lemma \ref{lem4:MPAFL} and Lemma \ref{lem:MPAFLdifferrlam}, respectively. Then \begin{equation} \|KR_{\lambda}g\|\leq k_{\lambda}\|g\|,\quad\forall g\in\mathcal{B}_{b}(\mathbb{R}_{+}\times\mathbb{R}^{d}).\label{esti: kr_lambda} \end{equation} \end{prop} \begin{proof} Let $\beta\in(0,\alpha)$ be the constant in Assumption \ref{eq:MAPAFLassonM}. We distinguish between the cases with $0<\alpha\leq1$ and $1<\alpha<2$. \emph{Case} 1: $0<\alpha\leq1$. According to Lemma \ref{lem4:MPAFL}, there exists a constant $C_{\lambda}>0$ such that for all $g\in\mathcal{B}_{b}(\mathbb{R}_{+}\times\mathbb{R}^{d})$, \[ |R_{\lambda}g(t,x+z)-R_{\lambda}g(t,x)|\leq C_{\lambda}\|g\||z|^{\beta},\quad(t,x)\in\mathbb{R}_+ \times \mathbb{R}^{d},\ z\in\mathbb{R}^{d}, \] and $C_{\lambda}$ goes to $0$ as $\lambda\uparrow\infty$. Let $g\in\mathcal{B}_{b}(\mathbb{R}_{+}\times\mathbb{R}^{d})$ be arbitrary. Then \begin{align} & \int_{\mathbb{R}^{d}\backslash\{0\}}\left|R_{\lambda}g(t,x+z)-R_{\lambda}g(t,x)\right|M(t,x,dz)\nonumber \\ & \quad=\int_{\{0<|z|\leq1\}}|R_{\lambda}g(t,x+z)-R_{\lambda}g(t,x)|M(t,x,dz)\nonumber \\ & \qquad+{\displaystyle \int_{\{|z|>1\}}|R_{\lambda}g(t,x+z)-R_{\lambda}g(t,x)|M(t,x,dz)}\label{eq 1: Prop. 4.1}\\ {\displaystyle } & \quad\le C_{\lambda}\|g\|\int_{\{0<|z|\leq1\}}|z|^{\beta}M(t,x,dz)+2\Vert R_{\lambda}g\Vert\int_{\{|z|>1\}}1M(t,x,dz)\nonumber \\ {\displaystyle } & \quad\le(C_{\lambda}+2\lambda^{-1})\|g\|\sup_{t\geq0,x\in\mathbb{R}^{d}}\int_{\mathbb{R}^{d}\backslash\{0\}}1\wedge|z|^{\beta}M(t,x,dz).\nonumber \end{align} So $KR_{\lambda}g$ is well-defined and $||KR_{\lambda}g||\leq k_{\lambda}||g||$. \emph{Case} 2: $1<\alpha<2$. Let $\delta\in(0,1)$ be such that $\beta<\delta+1<\alpha$. According to Lemma \ref{lem:MPAFLdifferrlam}, there exists a constant $\tilde{C}_{\lambda}>0$ such that for all $g\in\mathcal{B}_{b}(\mathbb{R}_{+}\times\mathbb{R}^{d})$, \begin{equation} |\nabla R_{\lambda}g(t,x+z)-\nabla R_{\lambda}g(t,x)|\leq\tilde{C}_{\lambda}\|g\||z|^{\delta},\quad(t,x)\in\mathbb{R}_+ \times \mathbb{R}^{d},\ z\in\mathbb{R}^{d},\label{eq2:MPAFLprop1} \end{equation} and $\tilde{C}_{\lambda}$ goes to $0$ as $\lambda\uparrow\infty$. Let $g\in\mathcal{B}_{b}(\mathbb{R}_{+}\times\mathbb{R}^{d})$. For all $(t,x)\in\mathbb{R}_+ \times \mathbb{R}^{d}$ and $z\in\mathbb{R}^{d}$, we have \begin{align} & |R_{\lambda}g(t,x+z)-R_{\lambda}g(t,x)-z\cdot\nabla R_{\lambda}g(t,x)|\nonumber \\ & \quad=\left|\int_{0}^{1}\nabla R_{\lambda}g(t,x+rz)\cdot zdr-z\cdot\nabla R_{\lambda}g(t,x)\right|\nonumber \\ & \quad=\left|\int_{0}^{1}[\nabla R_{\lambda}g(t,x+rz)-\nabla R_{\lambda}g(t,x)]\cdot zdr\right|\nonumber \\ & \quad\le|z|\int_{0}^{1}|\nabla R_{\lambda}g(t,x+rz)-\nabla R_{\lambda}g(t,x)|dr\overset{(\ref{eq2:MPAFLprop1})}{\le}\tilde{C}_{\lambda}\|g\||z|^{\delta+1}.\label{eq1:MPAFLprop1} \end{align} So we obtain \begin{align} & \int_{\mathbb{R}^{d}\backslash\{0\}}\left|R_{\lambda}g(t,x+z)-R_{\lambda}g(t,x)-\mathbf{1}_{\{|z|\le1\}}z\cdot\nabla R_{\lambda}g(t,x)\right|M(t,x,dz)\nonumber \\ & \quad\le\int_{\{0<|z|\leq1\}}|R_{\lambda}g(t,x+z)-R_{\lambda}g(t,x)-z\cdot\nabla R_{\lambda}g(t,x)|M(t,x,dz)\nonumber \\ & \qquad+{\displaystyle \int_{\{|z|>1\}}|R_{\lambda}g(t,x+z)-R_{\lambda}g(t,x)|M(t,x,dz)}\nonumber \\ & \quad{\displaystyle \overset{(\ref{eq1:MPAFLprop1})}{\le}}\tilde{C}_{\lambda}\|g\|\int_{\{0<|z|\leq1\}}|z|^{\delta+1}M(t,x,dz)+2\Vert R_{\lambda}g\Vert\int_{\{|z|>1\}}1M(t,x,dz)\nonumber \\ {\displaystyle } & \quad\le(\tilde{C}_{\lambda}+2\lambda^{-1})\|g\|\sup_{t\geq0,x\in\mathbb{R}^{d}}\int_{\mathbb{R}^{d}\backslash\{0\}}1\wedge|z|^{\beta}M(t,x,dz).\label{esti: a>1, M R_lambda} \end{align} Hence $||KR_{\lambda}g||\leq k_{\lambda}||g||$ for all $\mathcal{B}_{b}(\mathbb{R}_{+}\times\mathbb{R}^{d})$. \end{proof} \begin{cor} \label{cor MAPAFL:There-exists-}There exists $\lambda_{0}>0$ such that for all $\lambda\ge\lambda_{0}$, we have $k_{\lambda}<1/2$ and \begin{equation} \left\Vert \sum_{i=0}^{\infty}R_{\lambda}(KR_{\lambda})^{i}g\right\Vert \le\sum_{i=0}^{\infty}\lambda^{-1}(k_{\lambda})^{i}\|g\|\le2\lambda^{-1}\|g\|,\quad g\in\mathcal{B}_{b}(\mathbb{R}_{+}\times\mathbb{R}^{d}).\label{esti2: kr_lambda} \end{equation} \end{cor} According to Corollary \ref{cor MAPAFL:There-exists-}, for each $\lambda\ge\lambda_{0}$, we can define \begin{equation} G_{\lambda}g:=\sum_{i=0}^{\infty}R_{\lambda}(KR_{\lambda})^{i}g,\quad g\in\mathcal{B}_{b}(\mathbb{R}_{+}\times\mathbb{R}^{d}).\label{defi: G_lambda} \end{equation} \begin{rem} \label{rem: contin. of G_lambda}By (\ref{esti: kr_lambda}), (\ref{esti2: kr_lambda}) and (\ref{new new eq 0.5}), we see that if $\lambda\ge\lambda_{0}$ and $g\in\mathcal{B}_{b}(\mathbb{R}_{+}\times\mathbb{R}^{d})$, then the function $\mathbb{R}^{d}\ni x\mapsto G_{\lambda}g(t,x)$ is bounded continuous for each $t\ge0$. \end{rem} We have the following estimate of Krylov's type. \begin{prop} \label{prop: Krylov}Let $T>0$ and $g\in\mathcal{B}_{b}(\mathbb{R}_{+}\times\mathbb{R}^{d})$ be such that $supp(g)\subset[0,T]\times\mathbb{R}^{d}$ and $g\in L^{q}([0,T];L^{p}(\mathbb{R}^{d}))$ with $p,q>0$ and $d/p+\alpha/q<\alpha-\beta$, where $\beta\in(0,\alpha)$ is the constant in Assumption \ref{eq:MAPAFLassonM}. Then for each $\lambda\ge\lambda_{0}$, there exists a constant $l_{\lambda}>0$, independent of $g$ and $T$, such that \begin{equation} \|G_{\lambda}g\|\le l_{\lambda}\|g\|_{L^{q}([0,T];L^{p}(\mathbb{R}^{d}))}.\label{eq: Prop 4.3, to prove} \end{equation} Moreover, the constant $l_{\lambda}$ goes to $0$ as $\lambda\to\infty$. \end{prop} \begin{proof} By (\ref{esti: lp of p_t}) and the same proof of \cite[Proposition 3.9 (i)]{jin2015weak}, we can find a constant $c_{\lambda}>0$, independent of $g$ and $T$, such that \begin{equation} \|R_{\lambda}g\|\le c_{\lambda}\|g\|_{L^{q}([0,T];L^{p}(\mathbb{R}^{d}))},\label{esti: R_lambda g} \end{equation} where $c_{\lambda}$ goes to $0$ as $\lambda\to\infty$. For $0<\alpha\leq1$, by (\ref{eq 1: Prop. 4.1}), (\ref{esti: R_lambda g}) and Lemma \ref{lem4:MPAFL} (ii), we have \begin{align} & \int_{\mathbb{R}^{d}\backslash\{0\}}\left|R_{\lambda}g(t,x+z)-R_{\lambda}g(t,x)\right|M(t,x,dz)\nonumber \\ {\displaystyle } & \quad\le N_{\lambda}\|g\|_{L^{q}([0,T];L^{p}(\mathbb{R}^{d}))}\int_{\{0<|z|\leq1\}}|z|^{\beta}M(t,x,dz)\nonumber \\ & \qquad+2c_{\lambda}\|g\|_{L^{q}([0,T];L^{p}(\mathbb{R}^{d}))}\int_{\{|z|>1\}}1M(t,x,dz)\nonumber \\ {\displaystyle } & \quad\le(N_{\lambda}+2c_{\lambda})\|g\|_{L^{q}([0,T];L^{p}(\mathbb{R}^{d}))}\sup_{t\geq0,x\in\mathbb{R}^{d}}\int_{\mathbb{R}^{d}\backslash\{0\}}1\wedge|z|^{\beta}M(t,x,dz).\label{esti: lp, M R_lambda} \end{align} For $1<\alpha<2$, similarly to (\ref{esti: a>1, M R_lambda}), we obtain \begin{align} & \int_{\mathbb{R}^{d}\backslash\{0\}}\left|R_{\lambda}g(t,x+z)-R_{\lambda}g(t,x)-\mathbf{1}_{\{|z|\le1\}}z\cdot\nabla R_{\lambda}g(t,x)\right|M(t,x,dz)\nonumber \\ & \quad\le(\tilde{N}_{\lambda}+2c_{\lambda})\|g\|_{L^{q}([0,T];L^{p}(\mathbb{R}^{d}))}\sup_{t\geq0,x\in\mathbb{R}^{d}}\int_{\mathbb{R}^{d}\backslash\{0\}}1\wedge|z|^{\beta}M(t,x,dz),\label{esti: a>1, lp, M R_lambda} \end{align} where $\tilde{N}_{\lambda}>0$ is the constant from Lemma \ref{lem:MPAFLdifferrlam} (ii). Summarizing (\ref{esti: lp, M R_lambda}) and (\ref{esti: a>1, lp, M R_lambda}), we obtain that for all $\alpha\in(0,2)$, \begin{equation} \|KR_{\lambda}g\|\leq\tilde{c}_{\lambda}\|g\|_{L^{q}([0,T];L^{p}(\mathbb{R}^{d}))},\label{esti: Prop. 4.3, KR_lambda} \end{equation} where \begin{equation} \tilde{c}_{\lambda}:=\begin{cases} (N_{\lambda}+2c_{\lambda})\sup_{t\geq0,x\in\mathbb{R}^{d}}\int_{\mathbb{R}^{d}\backslash\{0\}}1\wedge|z|^{\beta}M(t,x,dz), & 0<\alpha\le1,\\ (\tilde{N}_{\lambda}+2c_{\lambda})\sup_{t\geq0,x\in\mathbb{R}^{d}}\int_{\mathbb{R}^{d}\backslash\{0\}}1\wedge|z|^{\beta}M(t,x,dz), & 1<\alpha<2. \end{cases}\label{defi: c tilde _lambda} \end{equation} By (\ref{esti: R_lambda g}), (\ref{esti: Prop. 4.3, KR_lambda}) and Lemma \ref{prop MPAFL:For-any-}, we obtain that for all $i\in\mathbb{N}$, \[ \|R_{\lambda}(KR_{\lambda})^{i}g\|\leq c_{\lambda}\left(k_{\lambda}\right)^{i-1}\|KR_{\lambda}g\|\le c_{\lambda}\left(k_{\lambda}\right)^{i-1}\tilde{c}_{\lambda}\|g\|_{L^{q}([0,T];L^{p}(\mathbb{R}^{d}))}, \] which implies that for $\lambda\ge\lambda_{0}$, \begin{align*} \|G_{\lambda}g\|\le\sum_{i=0}^{\infty}\|R_{\lambda}(KR_{\lambda})^{i}g\| & \leq c_{\lambda}\left(1+\sum_{i=1}^{\infty}\tilde{c}_{\lambda}\left(k_{\lambda}\right)^{i-1}\right)\|g\|_{L^{q}([0,T];L^{p}(\mathbb{R}^{d}))}\\ & \le c_{\lambda}\left(1+2\tilde{c}_{\lambda}\right)\|g\|_{L^{q}([0,T];L^{p}(\mathbb{R}^{d}))}. \end{align*} So (\ref{eq: Prop 4.3, to prove}) holds with \begin{equation} l_{\lambda}:=c_{\lambda}\left(1+2\tilde{c}_{\lambda}\right)>0.\label{defi: l_lambda} \end{equation} Since $c_{\lambda}$, $N_{\lambda}$ and $\tilde{N}_{\lambda}$ all converge to $0$ as $\lambda\to\infty$, we see that $\lim_{\lambda\to\infty}l_{\lambda}=0$. \end{proof} \section{Well-posedness of the martingale problem for $\mathcal{L}_{t}$ } In this section we prove our main result, namely, the martingale problem for $\mathcal{L}_{t}$ is well-posed. In view of (\ref{defi: G_lambda}), the uniqueness problem can be solved by standard perturbation arguments. To obtain existence, we will first consider smooth approximations of $\mathcal{L}_{t}$ and then construct a solution to the martingale problem for $\mathcal{L}_{t}$ by weak convergence of probability measures. Let $\phi\in C_{0}^{\infty}(\mathbb{R}^{d})$ be such that $0\le\phi\le1$, $\int_{\mathbb{R}^{d}}\phi(x)dx=1$ and $\phi(x)=0$ for $|x|\ge1$. Define $\phi_{n}(x):=n^{d}\phi(nx)$, $x\in\mathbb{R}^{d}$. Given $n\in\mathbb{N}$, define $M_{n}(t,x,\cdot)$ as the kernel obtained by mollifying $M(t,x,\cdot)$ through $\phi_{n}$, that is, \[ M_{n}(t,x,B):=\int_{\mathbb{R}^{d}}M(t,x-z,B)\phi_{n}(z)dz,\quad B\in\mathcal{{B}}(\mathbb{R}^{d}). \] So $M_{n}(t,x,\cdot)$ is a kernel from $\mathbb{R}_+ \times \mathbb{R}^{d}$ to $\mathcal{B}\left(\mathbb{R}^{d}\backslash\{0\}\right)$ and $M_{n}(t,x,\cdot)$ is a Lévy measure on $\mathbb{R}^{d}\backslash\{0\}$ for each $(t,x)\in\mathbb{R}_+ \times \mathbb{R}^{d}$. By Fubini's theorem, we have that for all $(t,x)\in\mathbb{R}_+ \times \mathbb{R}^{d}$ and $n\in\mathbb{N},$ \begin{align} \int_{\mathbb{R}^{d}\backslash\{0\}}1\wedge|y|^{\beta}M_{n}(t,x,dy) & =\int_{\mathbb{R}^{d}}\left(\int_{\mathbb{R}^{d}\backslash\{0\}}1\wedge|y|^{\beta}M(t,x-z,dy)\right)\phi_{n}(z)dz\nonumber \\ & \le\sup_{t\geq0,x\in\mathbb{R}^{d}}\int_{\mathbb{R}^{d}\backslash\{0\}}1\wedge|y|^{\beta}M(t,x,dy)<\infty.\label{condition: sup M_n} \end{align} Define \[ K_{n,t}f(x):=\int_{\mathbb{R}^{d}\backslash\{0\}}[f(x+y)-f(x)-\mathbf{1}_{\alpha>1}\mathbf{1}_{\{|y|\le1\}}y\cdot\nabla f(x)]M_{n}(t,x,dy). \] \begin{lem} \label{lem: delta }Let $f\in C_{b}^{3}(\mathbb{R}^{d})$ be arbitrary. Then for all $(t,x)\in\mathbb{R}_+ \times \mathbb{R}^{d}$, we have \[ \left|K_{n,t}f(x)-K_{t}f\ast\phi_{n}(x)\right|\le4n^{-1}\left\Vert f\right\Vert _{C_{b}^{3}(\mathbb{R}^{d})}\sup_{t,x}\int_{\mathbb{R}^{d}\backslash\{0\}}(1\wedge|h|^{\beta})M(t,x,dh). \] \end{lem} \begin{proof} First note that \begin{equation} \int_{\mathbb{R}^{d}}|y|\phi_{n}(y)dy=\int_{\{|y|\le1/n\}}|y|\phi_{n}(y)dy\le n^{-1}.\label{eq, Lemma 5.1: *} \end{equation} Let \begin{align*} \Delta_{n,t}f(x):= & K_{n,t}f(x)-K_{t}f\ast\phi_{n}(x). \end{align*} (i) For the case $0<\alpha\le1$, we have \begin{align} \Delta_{n,t}f(x) & =\int_{\mathbb{R}^{d}\backslash\{0\}}\int_{\mathbb{R}^{d}}[f(x+h)-f(x)]M(t,x-y,dh)\phi_{n}(y)dy\nonumber \\ & \quad-\int_{\mathbb{R}^{d}\backslash\{0\}}\int_{\mathbb{R}^{d}}[f(x-y+h)-f(x-y)]M(t,x-y,dh)\phi_{n}(y)dy.\label{eq, Lemma 5.1: **} \end{align} Since \begin{align*} & \left|f(x+h)-f(x-y+h)-f(x)+f(x-y)\right|\\ & \quad=\left|\int_{0}^{1}\left[\nabla f(x+h-y+ry)-\nabla f(x-y+ry)\right]\cdot ydr\right|\\ & \quad\le2|y|\left(1\wedge|h|\right)\left\Vert f\right\Vert _{C_{b}^{2}(\mathbb{R}^{d})}, \end{align*} it follows from (\ref{eq, Lemma 5.1: **}) that \begin{align*} |\Delta_{n,t}f(x)| & \le2\left\Vert f\right\Vert _{C_{b}^{2}(\mathbb{R}^{d})}\int_{\mathbb{R}^{d}\backslash\{0\}}\int_{\mathbb{R}^{d}}(1\wedge|h|)|y|M(t,x-y,dh)\phi_{n}(y)dy\\ & \le2\left\Vert f\right\Vert _{C_{b}^{2}(\mathbb{R}^{d})}\sup_{t,x}\int_{\mathbb{R}^{d}\backslash\{0\}}(1\wedge|h|^{\beta})M(t,x,dh)\int_{\mathbb{R}^{d}}|y|\phi_{n}(y)dy\\ & {\displaystyle \overset{(\ref{eq, Lemma 5.1: *})}{\le}}2n^{-1}\left\Vert f\right\Vert _{C_{b}^{2}(\mathbb{R}^{d})}\sup_{t,x}\int_{\mathbb{R}^{d}\backslash\{0\}}(1\wedge|h|^{\beta})M(t,x,dh). \end{align*} (ii) For $1<\alpha<2$, we have \begin{align*} \Delta_{n,t}f(x) & =\int_{\mathbb{R}^{d}\backslash\{0\}}\int_{\mathbb{R}^{d}}[f(x+h)-f(x)-\mathbf{1}_{\{|h|\le1\}}h\cdot\nabla f(x)]M(t,x-y,dh)\phi_{n}(y)dy\\ & \quad-\int_{\mathbb{R}^{d}\backslash\{0\}}\int_{\mathbb{R}^{d}}[f(x-y+h)-f(x-y)-\mathbf{1}_{\{|h|\le1\}}h\cdot\nabla f(x-y)]\\ & \qquad\times M(t,x-y,dh)\phi_{n}(y)dy. \end{align*} If $|h|>1$, then $\left|f(x+h)-f(x-y+h)-f(x)+f(x-y)\right|\le4\left\Vert f\right\Vert $; for $0<|h|\le1$, we have \begin{align*} & \left|f(x+h)-f(x)-h\cdot\nabla f(x)-f(x-y+h)+f(x-y)+h\cdot\nabla f(x-y)\right|\\ & \quad=\left|\int_{0}^{1}\left[\nabla f(x+rh)-\nabla f(x)-\nabla f(x-y+rh)+\nabla f(x-y)\right]\cdot hdr\right|\\ & \quad=\left|\int_{0}^{1}\left[\int_{0}^{1}\left(\nabla^{2}f(x-y+rh+r'y)-\nabla^{2}f(x-y+r'y)\right)\cdot ydr'\right]\cdot hdr\right|\\ & \quad\le|y||h|^{2}\left\Vert f\right\Vert _{C_{b}^{3}(\mathbb{R}^{d})}. \end{align*} So \begin{align*} |\Delta_{n,t}f(x)| & \le4\left\Vert f\right\Vert _{C_{b}^{3}(\mathbb{R}^{d})}\int_{\mathbb{R}^{d}\backslash\{0\}}\int_{\mathbb{R}^{d}}(1\wedge|h|^{2})|y|M(t,x-y,dh)\phi_{n}(y)dy\\ & \le4\left\Vert f\right\Vert _{C_{b}^{3}(\mathbb{R}^{d})}\sup_{t,x}\int_{\mathbb{R}^{d}\backslash\{0\}}(1\wedge|h|^{\beta})M(t,x,dh)\int_{\mathbb{R}^{d}}|y|\phi_{n}(y)dy\\ & {\displaystyle \overset{(\ref{eq, Lemma 5.1: *})}{\le}}4n^{-1}\left\Vert f\right\Vert _{C_{b}^{3}(\mathbb{R}^{d})}\sup_{t,x}\int_{\mathbb{R}^{d}\backslash\{0\}}(1\wedge|h|^{\beta})M(t,x,dh). \end{align*} \end{proof} \begin{lem} \label{lem: existence L_n}For each $(s,x)\in\mathbb{R}_+ \times \mathbb{R}^{d}$, there exists at least one solution to the martingale problem for $\mathcal{L}_{n,t}=L+K_{n,t}$ starting from $(s,x)$. \end{lem} \begin{proof} To prove the solvability of the martingale problem for $\mathcal{L}_{n,t}$, we use the same argument as in \cite[Theorem~(2.2)]{MR0433614}. Let $\varphi\in C_{b}^{\infty}(\mathbb{R}^{d})$ be such that $0\le\varphi\le1$, $\varphi(y)=0$ for $|y|\le1/2,$ and $\varphi(y)=1$ for $|y|\ge1$. For $0<\delta<1$, let $\varphi_{\delta}(y):=\varphi(y/\delta)$ and define the kernel $M_{n}^{\delta}(t,x,\cdot)$ by \[ M_{n}^{\delta}(t,x,dy):=\varphi_{\delta}(y)M_{n}(t,x,dy). \] Set $c_{\delta}(t,x)=\mathbf{1}_{\alpha>1}\int_{\{|y|\le1\}}yM_{n}^{\delta}(t,x,dy)$. Since \begin{align*} c_{\delta}(t,x) & =\mathbf{1}_{\alpha>1}\int_{\{|y|\le1\}}y\varphi_{\delta}(y)M_{n}(t,x,dy)\\ & =\mathbf{1}_{\alpha>1}\int_{\mathbb{R}^{d}}\left(\int_{\{|y|\le1\}}y\varphi_{\delta}(y)M(t,x-z,dy)\right)\phi_{n}(z)dz\\ & =\mathbf{1}_{\alpha>1}\int_{\mathbb{R}^{d}}\left(\int_{\{|y|\le1\}}y\varphi_{\delta}(y)M(t,z,dy)\right)\phi_{n}(x-z)dz, \end{align*} we see that $|\nabla_{x}c_{\delta}(t,x)|$ is bounded on $\mathbb{R}_+ \times \mathbb{R}^{d}$. Hence $c_{\delta}(t,x)$ is globally Lipschitz continuous in $x$. Define the differential operator $A_{t}^{\delta}$ by \[ A_{t}^{\delta}f(x):=\sum_{i,j=1}^{d}a_{ij}\frac{\partial^{2}}{\partial x_{i}\partial x_{j}}f(x)+b\cdot\nabla f(x)-c_{\delta}(t,x)\cdot\nabla f(x). \] By the Lipschitz continuity (in the space variable $x$) of the coefficients of $A_{t}^{\delta}$, there is for each $(s,x)$ a unique solution $\mathbf{{Q}}_{\delta}^{s,x}$ to the martingale problem for $A_{t}^{\delta}$ starting from $(s,x)$, see, e.g., \cite[Theorem 5.1.1 and Corollary 5.1.3]{MR2190038}. By \cite[Theorem 5.1.4]{MR2190038}, the mapping $(s,x)\mapsto\mathbf{{Q}}_{\delta}^{s,x}(E)$ is measurable for all $E\in\mathcal{D}$. Note that $A_{t}^{\delta}f(x)+\int_{\mathbb{R}^{d}\backslash\{0\}}[f(x+y)-f(x)]M_{n}^{\delta}(t,x,dy)=Lf(x)+K_{n,t}^{\delta}f(x)$, where \[ K_{n,t}^{\delta}f(x):=\int_{\mathbb{R}^{d}\backslash\{0\}}[f(x+y)-f(x)-\mathbf{1}_{\alpha>1}\mathbf{1}_{\{|y|\le1\}}y\cdot\nabla f(x)]M_{n}^{\delta}(t,x,dy). \] It follows from \cite[Theorem~(2.1)]{MR0433614} that the martingale problem for $L+K_{n,t}^{\delta}$ is solvable. For $f\in C_{0}^{\infty}(\mathbb{R}^{d})$, we have \begin{align*} \left|K_{n,t}^{\delta}f(x)-K_{n,t}f(x)\right| & \le\int_{\{|y|\le\delta\}}\left|f(x+y)-f(x)-\mathbf{1}_{\alpha>1}y\cdot\nabla f(x)\right|M_{n}(t,x,dy)\\ & \le\left\Vert f\right\Vert _{C_{b}^{2}(\mathbb{R}^{d})}\int_{\{|y|\le\delta\}}\left(\mathbf{1}_{\alpha\le1}|y|+\mathbf{1}_{\alpha>1}|y|^{2}\right)M_{n}(t,x,dy)\\ & \le\left\Vert f\right\Vert _{C_{b}^{2}(\mathbb{R}^{d})}\int_{\{|y|\le\delta\}}|y|^{\alpha}M_{n}(t,x,dy)\\ & \le\delta^{\alpha-\beta}\left\Vert f\right\Vert _{C_{b}^{2}(\mathbb{R}^{d})}\int_{\{|y|\le\delta\}}|y|^{\beta}M_{n}(t,x,dy)\\ & {\displaystyle \overset{(\ref{condition: sup M_n})}{\le}}\delta^{\alpha-\beta}\left\Vert f\right\Vert _{C_{b}^{2}(\mathbb{R}^{d})}\sup_{t\geq0,x\in\mathbb{R}^{d}}\int_{\mathbb{R}^{d}\backslash\{0\}}1\wedge|y|^{\beta}M(t,x,dy), \end{align*} which implies that $K_{n,t}^{\delta}f\to K_{n,t}f$ uniformly as $\delta\to0$. The rest of the proof goes in the same way as in \cite[Theorem~(2.2)]{MR0433614}. We omit the details. \end{proof} Recall that $\lambda_{0}>0$ is the constant given in Corollary \ref{cor MAPAFL:There-exists-}. \begin{lem} \label{lem: resolvent L_n}Let $(s,x)\in\mathbb{R}_+ \times \mathbb{R}^{d}$ and $\mathbf{{P}}_{n}^{s,x}$ be a solution to the martingale problem for $\mathcal{L}_{n,t}=L+K_{n,t}$ starting from $(s,x)$. Then for any $\lambda\ge\lambda_{0}$ and $g\in\mathcal{B}_{b}(\mathbb{R}_+ \times \mathbb{R}^{d})$, we have \begin{equation} \mathbf{E}_{n}^{s,x}\Big[\int_{s}^{\infty}e^{-\lambda(t-s)}g(t,X_{t})dt\Big]=\sum_{k=0}^{\infty}R_{\lambda}(K_{n}R_{\lambda})^{k}g(s,x),\label{eqressn} \end{equation} where $\mathbf{E}_{n}^{s,x}[\cdot]$ denotes the expectation with respect to the measure $\mathbf{{P}}_{n}^{s,x}$ and $K_{n}R_{\lambda}$ is defined by \begin{align} K_{n}R_{\lambda}g(t,x) & :=\int_{\mathbb{R}^{d}\backslash\{0\}}[R_{\lambda}g(t,x+y)-R_{\lambda}g(t,x)\nonumber \\ & \qquad-\mathbf{1}_{\alpha>1}\mathbf{1}_{\{|y|\le1\}}y\cdot\nabla R_{\lambda}g(t,x)]M_{n}(t,x,dy),\quad(t,x)\in\mathbb{R}_+ \times \mathbb{R}^{d}.\label{defi: KR_lambda-1} \end{align} \end{lem} \begin{proof} For $\lambda>0$ and $f\in\mathcal{B}_{b}(\mathbb{R}_+ \times \mathbb{R}^{d})$, define \[ V_{n}^{\lambda}f:=\mathbf{E}_{n}^{s,x}\Big[\int_{s}^{\infty}e^{-\lambda(t-s)}f(t,X_{t})dt\Big]. \] For $f\in C_{b}^{1,2}(\mathbb{R}_+ \times \mathbb{R}^{d})$, we know that \begin{align*} & f(t,X_{t})-f(s,X_{s})\\ = & ``Martingale"+\int_{s}^{t}(\frac{\partial f}{\partial u}+\mathcal{L}_{n,u}f)(u,X_{u})du. \end{align*} Taking expectations of both sides of the above equality gives \begin{equation} \mathbf{E}_{n}^{s,x}[f(t,X_{t})]-f(s,x)=\mathbf{E}_{n}^{s,x}\Big[\int_{s}^{t}(\frac{\partial f}{\partial u}+\mathcal{L}_{n,u}f)(u,X_{u})du\Big].\label{thmunieq0} \end{equation} Multiplying both sides of (\ref{thmunieq0}) by $e^{-\lambda(t-s)}$, integrating with respect to $t$ from $0$ to $\infty$ and then applying Fubini's theorem, we get \begin{align} & \mathbf{E}_{n}^{s,x}\Big[\int_{s}^{\infty}e^{-\lambda(t-s)}f(t,X_{t})dt\Big]\nonumber \\ = & \frac{1}{\lambda}f(s,x)+\mathbf{E}_{n}^{s,x}\Big[\int_{s}^{\infty}e^{-\lambda(t-s)}\int_{s}^{t}\big(\frac{\partial f}{\partial u}+\mathcal{L}_{n,u}f\big)(u,X_{u})dudt\Big]\nonumber \\ = & \frac{1}{\lambda}f(s,x)+\frac{1}{\lambda}\mathbf{E}_{n}^{s,x}\Big[\int_{s}^{\infty}e^{-\lambda(u-s)}\big(\frac{\partial f}{\partial u}+\mathcal{L}_{n,u}f\big)(u,X_{u})du\Big].\label{eq: Fubini} \end{align} Therefore, for $f\in C_{b}^{1,2}(\mathbb{R}_+ \times \mathbb{R}^{d})$, \begin{equation} \lambda V_{n}^{\lambda}f=f(s,x)+V_{n}^{\lambda}\Big(\frac{\partial f}{\partial t}+\mathcal{L}_{n,t}f\Big).\label{thmunieq101} \end{equation} If $g\in C_{b}^{1,2}(\mathbb{R}_+ \times \mathbb{R}^{d})$, then $f:=R_{\lambda}g\in C_{b}^{1,2}(\mathbb{R}_+ \times \mathbb{R}^{d})$ and \begin{equation} {\displaystyle \lambda f(t,y)-Lf(t,y)-\frac{\partial}{\partial t}f(t,y)=g(t,y),\quad(t,y)\in\mathbb{R}_+ \times \mathbb{R}^{d},}\label{eq for para. resol. eq.: MPAFL} \end{equation} see, e.g., the proof of \cite[Proposition~3.8]{jin2015weak}. Substituting this $f$ in (\ref{thmunieq101}), we obtain $V_{n}^{\lambda}g=R_{\lambda}g(s,x)+V_{n}^{\lambda}(K_{n}R_{\lambda}g)$ for $g\in C_{b}^{1,2}(\mathbb{R}_+ \times \mathbb{R}^{d})$. If $g\in C_{0}(\mathbb{R}_{+}\times\mathbb{R}^{d})$, namely, $g$ is continuous with compact support, then there exist $g_{k}\in C_{b}^{1,2}(\mathbb{R}_{+}\times\mathbb{R}^{d})$ such that $g_{k}\rightarrow g$ boundedly and uniformly as $k\rightarrow\infty$. \textcolor{black}{It follows from (}\ref{esti: kr_lambda}) that\textcolor{black}{{} $K_{n}R_{\lambda}g_{k}\rightarrow KR_{\lambda}g$ boundedly and pointwise as $k\rightarrow\infty$.} By the dominated convergence theorem, we have \begin{align*} V_{n}^{\lambda}g=\lim_{k\to\infty}V_{n}^{\lambda}g_{k} & =\lim_{k\to\infty}\left\{ R_{\lambda}g_{k}(s,x)+V_{n}^{\lambda}(K_{n}R_{\lambda}g_{k})\right\} \\ & =R_{\lambda}g(s,x)+V_{n}^{\lambda}(K_{n}R_{\lambda}g),\quad g\in C_{0}(\mathbb{R}_{+}\times\mathbb{R}^{d}). \end{align*} Then by a standard monotone class argument, we arrive at \begin{equation} V_{n}^{\lambda}g=R_{\lambda}g(s,x)+V_{n}^{\lambda}(K_{n}R_{\lambda}g),\quad g\in\mathcal{B}_{b}(\mathbb{R}_+ \times \mathbb{R}^{d}).\label{WUSeqlemma43vnl} \end{equation} For $g\in\mathcal{B}_{b}(\mathbb{R}_+ \times \mathbb{R}^{d})$, we thus have \begin{align} V_{n}^{\lambda}g= & R_{\lambda}g(s,x)+V_{n}^{\lambda}(K_{n}R_{\lambda}g)\nonumber \\ {\displaystyle \overset{(\ref{WUSeqlemma43vnl})}{=}} & R_{\lambda}g(s,x)+R_{\lambda}K_{n}R_{\lambda}g(s,x)+V_{n}^{\lambda}(K_{n}R_{\lambda})^{2}g\nonumber \\ = & \cdots=\sum_{k=0}^{i}R_{\lambda}(K_{n}R_{\lambda})^{k}g(s,x)+V_{n}^{\lambda}(K_{n}R_{\lambda})^{i+1}g.\label{eqsnforn} \end{align} Let $k_{\lambda}>0$ be as in (\ref{defi: k_lambda}). By (\ref{condition: sup M_n}) and Proposition \ref{prop MPAFL:For-any-}, we have \[ \|K_{n}R_{\lambda}g\|\le k_{\lambda}\Vert g\Vert,\quad\forall n\in\mathbb{N},\ g\in\mathcal{B}_{b}(\mathbb{R}_+ \times \mathbb{R}^{d}). \] According to Corollary \ref{cor MAPAFL:There-exists-}, we have $k_{\lambda}<1/2$ for $\lambda\ge\lambda_{0}$. Therefore, for all $i,n\in\mathbb{N}$, $\lambda\ge\lambda_{0}$ and $g\in\mathcal{B}_{b}(\mathbb{R}_+ \times \mathbb{R}^{d})$, \[ |V_{n}^{\lambda}(K_{n}R_{\lambda})^{i}g|\le\lambda^{-1}\big(k_{\lambda}\big)^{i}\Vert g\Vert\le\lambda^{-1}2^{-i}\Vert g\Vert \] and \[ \left\Vert R_{\lambda}(K_{n}R_{\lambda})^{i}g\right\Vert \le\lambda^{-1}\big(k_{\lambda}\big)^{i}\Vert g\Vert\le\lambda^{-1}2^{-i}\Vert g\Vert. \] Letting $i\to\infty$ in (\ref{eqsnforn}) gives (\ref{eqressn}). This completes the proof. \end{proof} \begin{rem} In view of (\ref{condition: sup M_n}), we can repeat the proof of Proposition \ref{prop: Krylov} to obtain that for each $\lambda\ge\lambda_{0}$, \begin{equation} \left\Vert \sum_{k=0}^{\infty}R_{\lambda}(K_{n}R_{\lambda})^{k}g\right\Vert \le l_{\lambda}\|g\|_{L^{q}([0,T];L^{p}(\mathbb{R}^{d}))},\label{defi 2: l_lambda} \end{equation} where $d/p+\alpha/q<\alpha-\beta$ and $g\in\mathcal{B}_{b}([0,\infty)\times\mathbb{R}^{d})\cap L^{q}([0,T];L^{p}(\mathbb{R}^{d}))$ is an arbitrary function satisfying $supp(g)\subset[0,T]\times\mathbb{R}^{d}$. Indeed, by (\ref{defi: c tilde _lambda}) and (\ref{defi: l_lambda}), the constant $l_{\lambda}>0$ here can be chosen to be the same as in (\ref{eq: Prop 4.3, to prove}). In particular, $l_{\lambda}$ in (\ref{defi 2: l_lambda}) is independent of $n\in\mathbb{N}.$ \end{rem} \begin{cor} \label{cor: krylov esti for p_n}Let $\mathbf{{P}}_{n}^{s,x}$ be as in Lemma \ref{lem: resolvent L_n}. Let $p>(d+\alpha)/(\alpha-\beta)$. For each $T>s$, there exists a constant $C_{T}>0$, which is independent of $n$, such that \[ \mathbf{E}_{n}^{s,x}\Big[\int_{s}^{T}|f(t,X_{t})|dt\Big]\le C_{T}\|f\|_{L^{p}\left([0,T]\times\mathbb{R}^{d}\right)},\quad\forall f\in L^{p}\left([0,T]\times\mathbb{R}^{d}\right). \] \end{cor} \begin{proof} Let $f\in\mathcal{B}_{b}([0,T]\times\mathbb{R}^{d})\cap L^{p}\left([0,T]\times\mathbb{R}^{d}\right)$. Applying (\ref{defi 2: l_lambda}) with $p=q>(d+\alpha)/(\alpha-\beta)$, we get \begin{align*} \mathbf{E}_{n}^{s,x}\Big[\int_{s}^{T}|f(t,X_{t})|dt\Big] & \le e^{\lambda_{0}(T-s)}\mathbf{E}_{n}^{s,x}\Big[\int_{s}^{\infty}e^{-\lambda_{0}(t-s)}\mathbf{1}_{[0,T]}(t)|f(t,X_{t})|dt\Big]\\ & \overset{(\ref{eqressn})}{\le}e^{\lambda_{0}(T-s)}\sum_{k=0}^{\infty}R_{\lambda_{0}}(K_{n}R_{\lambda_{0}})^{k}\left(\mathbf{1}_{[0,T]}(t)|f|\right)(s,x)\\ & \overset{(\ref{defi 2: l_lambda})}{\le}l_{\lambda_{0}}e^{\lambda_{0}(T-s)}\|f\|_{L^{p}([0,T]\times\mathbb{R}^{d})}. \end{align*} For a general $f\in L^{p}\left([0,T]\times\mathbb{R}^{d}\right)$, the assertion follows from the monotone convergence theorem. \end{proof} We are now ready to prove Theorem \ref{thm: main}. \subsection*{Proof of Theorem \ref{thm: main}} ``\emph{Existence}'': \textcolor{black}{Let $(s,x)\in\mathbb{R}_+ \times \mathbb{R}^{d}$ be fixed. It follows from Lemma }\ref{lem: existence L_n}\textcolor{black}{{} that there exists a solution $\mathbf{{P}}_{n}^{s,x}$ to the martingale problem for $\mathcal{L}_{n,t}=L+K_{n,t}$ starting from $(s,x)$. } By (\ref{condition: sup M_n}) and \cite[Theorem~(A.1)]{MR0433614}, the family $\{\mathbf{{P}}_{n}^{s,x},\ n\in\mathbb{N}\}$ is tight. Let $\mathbf{{P}}^{s,x}$ be a limit point of $\{\mathbf{{P}}_{n}^{s,x},\ n\in\mathbb{N}\}$. Then there exists a subsequence of $\left(\mathbf{{P}}_{n}^{s,x}\right)_{n\in\mathbb{N}}$ which converges weekly to $\mathbf{{P}}^{s,x}$. For simplicity, we denote this subsequence still by $\left(\mathbf{{P}}_{n}^{s,x}\right)_{n\in\mathbb{N}}$. We next show that $\mathbf{{P}}^{s,x}$ is a solution to the martingale problem for $\mathcal{L}_{t}$ starting from $(s,x)$. Let $f\in C_{0}^{\infty}(\mathbb{R}^{d})$ be arbitrary. By \cite[Theorem~(1.1)]{MR0433614}, it suffices to show that \[ f(X_{t})-\int_{s}^{t}\mathcal{L}_{u}f(X_{u})du \] is a $\mathbf{P}^{s,x}$-martingale after time $s$. Suppose $s\le t_{1}\le t_{2}$, $0\le r_{1}\le\cdots\le r_{l}\le t_{1}$ and $g_{1},\cdots,g_{l}\in C_{0}(\mathbb{R}^{d})$, where $l\in\mathbb{N}$. Set $Y=\prod_{j=1}^{l}g_{j}(X_{r_{j}}).$ It reduces to show that \begin{equation} \mathbf{E}^{s,x}\Big[Y\big(f(X_{t_{2}})-f(X_{t_{1}})-\int_{t_{1}}^{t_{2}}\mathcal{L}_{u}f(X_{u})du\big)\Big]=0.\label{eq, thm 5.3: to show} \end{equation} We will complete the proof of (\ref{eq, thm 5.3: to show}) in four steps. Firstly, note that by \cite[~Chap.~3,~Lemma~7.7]{MR838085}, there exists a countable set $I\subset\mathbb{R}_{+}$ such that \begin{equation} \mathbf{P}^{s,x}(X_{t-}=X_{t})=1,\qquad\forall t\in\mathbb{R}_{+}\setminus I.\label{WUSeqthm4545} \end{equation} Since $\mathbb{R}_{+}\setminus I$ is dense in $\mathbb{R}_{+}$ and $t\mapsto X_{t}(\omega)$, $\omega\in D$, is right-continuous, it is enough to show (\ref{eq, thm 5.3: to show}) by additionally assuming that \begin{equation} r_{1},\cdots,r_{l},t_{1},t_{2}\in\mathbb{R}_{+}\setminus I.\label{extra ass, I} \end{equation} So from now on, we assume that (\ref{extra ass, I}) is true. ``\textit{Step 1}'': We establish an estimate of Krylov's type for $\mathbf{P}^{s,x}$. Let $p>(d+\alpha)/(\alpha-\beta)$.\textcolor{black}{{} By Corollary }\ref{cor: krylov esti for p_n}, for each $T>s$, there exists a constant $C_{T}>0$ such that \begin{equation} \sup_{n\in\mathbb{N}}\mathbf{E}_{n}^{s,x}\Big[\int_{s}^{T}|f(t,X_{t})|dt\Big]\le C_{T}\|f\|_{L^{p}\left([0,T]\times\mathbb{R}^{d}\right)},\quad\forall f\in L^{p}\left([0,T]\times\mathbb{R}^{d}\right).\label{eq: kry p_n} \end{equation} It follows that for each $T>s$, \begin{equation} \mathbf{E}^{s,x}\Big[\int_{s}^{T}|f(t,X_{t})|dt\Big]\le C_{T}\|f\|_{L^{p}\left([0,T]\times\mathbb{R}^{d}\right)},\quad\forall f\in L^{p}\left([0,T]\times\mathbb{R}^{d}\right).\label{eq: kry P} \end{equation} Indeed, if $f\in C_{0}([0,T]\times\mathbb{R}^{d})$, namely, $f$ is continuous on $[0,T]\times\mathbb{R}^{d}$ with compact support, then (\ref{eq: kry P}) follows from (\ref{eq: kry p_n}) and the weak convergence of $\mathbf{{P}}_{n}^{s,x}$ to $\mathbf{{P}}^{s,x}$. By a standard monotone class argument, we obtain (\ref{eq: kry P}) for all $f\in L^{p}\left([0,T]\times\mathbb{R}^{d}\right)$. ``\textit{Step 2}'': We show that \begin{align} & \lim_{n\to\infty}\mathbf{E}_{n}^{s,x}\Big[Y\big(f(X_{t_{2}})-f(X_{t_{1}})-\int_{t_{1}}^{t_{2}}Lf(X_{u})du\big)\Big]\nonumber \\ & \quad=\mathbf{E}^{s,x}\Big[Y\big(f(X_{t_{2}})-f(X_{t_{1}})-\int_{t_{1}}^{t_{2}}Lf(X_{u})du\big)\Big].\label{eq 3, thm 5.3: to show} \end{align} By Skorokhod's representation theorem, there exists a probability space $(\Omega,\mathcal{A},\mathbf{Q})$ and random elements $\xi,\xi_{1},\cdots,\xi_{n},\cdots:\Omega\to D$ such that $\mathbf{{P}}_{n}^{s,x}=\mathbf{Q}\circ\xi_{n}^{-1}$, $\mathbf{{P}}^{s,x}=\mathbf{Q}\circ\xi^{-1}$ and $d(\xi_{n},\xi)\to0$ $\mathbf{Q}-\mathrm{a.s.}$, where $d$ is the Skorokhod metric on $D$. It follows from (\ref{WUSeqthm4545}) and \cite[~Chap.~3,~Prop.~5.2]{MR838085} that \begin{equation} \lim_{n\to\infty}X_{t}(\xi_{n})=X_{t}(\xi)\quad\mathbf{Q}\mbox{-a.s.},\qquad\forall t\in\mathbb{R}_{+}\setminus I.\label{WUSeqthm4546} \end{equation} Let $\mathbf{E}[\cdot]$ denote the expectation with respect to the measure $\mathbf{Q}$. By (\ref{extra ass, I}) and the dominated convergence theorem, we have \begin{align*} & \lim_{n\to\infty}\mathbf{E}\Big[Y(\xi_{n})\Big\{ f(X_{t_{2}}(\xi_{n}))-f(X_{t_{1}}(\xi_{n}))-\int_{t_{1}}^{t_{2}}Lf(X_{u}(\xi_{n}))du\Big\}\Big]\\ & {\displaystyle \quad\overset{(\ref{WUSeqthm4546})}{=}}\mathbf{E}\Big[Y(\xi)\Big\{ f(X_{t_{2}}(\xi))-f(X_{t_{1}}(\xi))-\int_{t_{1}}^{t_{2}}Lf(X_{u}(\xi))du\Big\}\Big], \end{align*} which implies (\ref{eq 3, thm 5.3: to show}). ``\textit{Step 3}'': We show that \begin{equation} \lim_{n\to\infty}\mathbf{E}_{n}^{s,x}\Big[Y\int_{t_{1}}^{t_{2}}K_{n,u}f(X_{u})du\Big]=\mathbf{E}^{s,x}\Big[Y\int_{t_{1}}^{t_{2}}K_{u}f(X_{u})du\Big].\label{eq 2, thm 5.3: to show} \end{equation} Note that $Y$ is bounded. Let $C_{Y}:=\sup_{\omega\in D}|Y(\omega)|<\infty.$ For $r>0$ let $\chi_{r}$ be a continuous non-negative function on $\mathbb{R}^{d}$ with $\chi_{r}(x)=1$ for $|x|\le r$ , $\chi_{r}(x)=0$ for $|x|>r+1$ and $0\le$$\chi_{r}(x)\le1$ for $r<|x|\le r+1$; moreover, we can choose $\chi_{r}$ such that $\chi_{r}$ is monotone in $r$, namely, $\chi_{r_{1}}\le\chi_{r_{2}}$ if $r_{1}\le r_{2}$. Note that $|K_{n,u}f|$ and $|K_{u}f|$ are both bounded, say, by a positive constant $C_{K}$. For $i\in\mathbb{N}$, we have \begin{align*} & \left|\mathbf{E}_{i}^{s,x}\Big[Y\int_{t_{1}}^{t_{2}}K_{n,u}f(X_{u})du\Big]-\mathbf{E}_{i}^{s,x}\Big[Y\int_{t_{1}}^{t_{2}}K_{u}f(X_{u})du\Big]\right|\\ & \ \le C_{Y}\mathbf{E}_{i}^{s,x}\Big[\int_{t_{1}}^{t_{2}}|K_{n,u}f-K_{u}f|(X_{u})du\Big]\\ & \ \le C_{Y}\mathbf{E}_{i}^{s,x}\Big[\int_{t_{1}}^{t_{2}}(|K_{n,u}f-K_{u}f\ast\phi_{n}|+|K_{u}f\ast\phi_{n}-K_{u}f|)(X_{u})du\Big]\\ & \ \le C_{Y}\mathbf{E}_{i}^{s,x}\Big[\int_{t_{1}}^{t_{2}}|K_{n,u}f-K_{u}f\ast\phi_{n}|(X_{u})du\Big]+2C_{Y}C_{K}\mathbf{E}_{i}^{s,x}\Big[\int_{t_{1}}^{t_{2}}(1-\chi_{r})(X_{u})du\Big]\\ & \quad+C_{Y}\mathbf{E}_{i}^{s,x}\Big[\int_{t_{1}}^{t_{2}}\chi_{r}(X_{u})|K_{u}f\ast\phi_{n}-K_{u}f|(X_{u})du\Big]\\ & \ {\displaystyle \overset{(\ref{eq: kry p_n})}{\le}}t_{2}C_{Y}\|K_{n,u}f-K_{u}f\ast\phi_{n}\|+2C_{Y}C_{K}\sup_{i\in\mathbb{N}}\mathbf{E}_{i}^{s,x}\Big[\int_{t_{1}}^{t_{2}}(1-\chi_{r})(X_{u})du\Big]\\ & \quad+C_{Y}C_{t_{2}}\left\Vert \chi_{r}(K_{u}f\ast\phi_{n}-K_{u}f)\right\Vert _{L^{p}([0,t_{2}]\times\mathbb{R}^{d})}\\ & \ =:J_{1}+J_{2}+J_{3}. \end{align*} For any given $\epsilon_{1}>0$, by dominated convergence theorem, we can find sufficiently large $r_{0}>0$ such that \begin{equation} \mathbf{E}^{s,x}\Big[\int_{t_{1}}^{t_{2}}(1-\chi_{r_{0}})(X_{u})du\Big]<\epsilon_{1}.\label{esti 1, x_R} \end{equation} By the weak convergence of $\mathbf{{P}}_{i}^{s,x}$ to $\mathbf{{P}}^{s,x}$, we have \[ \lim_{i\to\infty}\mathbf{E}_{i}^{s,x}\Big[\int_{t_{1}}^{t_{2}}(1-\chi_{r_{0}})(X_{u})du\Big]=\mathbf{E}^{s,x}\Big[\int_{t_{1}}^{t_{2}}(1-\chi_{r_{0}})(X_{u})du\Big]. \] So there exists $i_{0}\in\mathbb{N}$ such that \begin{equation} \sup_{i>i_{0}}\mathbf{E}_{i}^{s,x}\Big[\int_{t_{1}}^{t_{2}}(1-\chi_{r_{0}})(X_{u})du\Big]\le2\epsilon_{1}.\label{esti 2, x_R} \end{equation} Similarly to (\ref{esti 1, x_R}), for $i=1,2,\cdots,i_{0}$, we can find $r_{1}>r_{0}$ such that \begin{equation} \sup_{1\le i\le i_{0}}\mathbf{E}_{i}^{s,x}\Big[\int_{t_{1}}^{t_{2}}(1-\chi_{r_{1}})(X_{u})du\Big]<\epsilon_{1}.\label{esti 3, x_R} \end{equation} Combining (\ref{esti 2, x_R}) and (\ref{esti 3, x_R}) and noting that $\chi_{r}$ is non-decreasing in $r$, we get \[ \sup_{i\in\mathbb{N}}\mathbf{E}_{i}^{s,x}\Big[\int_{t_{1}}^{t_{2}}(1-\chi_{r})(X_{u})du\Big]<3\epsilon_{1},\quad r\ge r_{1}. \] Hence we have shown that $\lim_{r\to\infty}J_{2}=0.$ By Lemma \ref{lem: delta }, we have $J_{1}\to0$ as $n\to\infty$. It is also easy to see that $J_{3}\to0$ as $n\to\infty$. With a simple ``$\epsilon-\delta$''-argument, we obtain \begin{equation} \lim_{n\to\infty}\left|\mathbf{E}_{i}^{s,x}\Big[Y\int_{t_{1}}^{t_{2}}K_{n,u}f(X_{u})du\Big]-\mathbf{E}_{i}^{s,x}\Big[Y\int_{t_{1}}^{t_{2}}K_{u}f(X_{u})du\Big]\right|=0,\label{neweqek-1} \end{equation} and the convergence is uniform with respect to $i\in\mathbb{N}.$ Similarly to (\ref{neweqek-1}), we have \begin{equation} \lim_{n\to\infty}\left|\mathbf{E}^{s,x}\Big[Y\int_{t_{1}}^{t_{2}}K_{n,u}f(X_{u})du\Big]-\mathbf{E}^{s,x}\Big[Y\int_{t_{1}}^{t_{2}}K_{u}f(X_{u})du\Big]\right|=0.\label{neweqesx} \end{equation} By (\ref{neweqek-1}) and (\ref{neweqesx}), for any given $\epsilon>0$, we can find $n_{1}\in\mathbb{N}$, which is independent of $i$, such that for all $n,m\ge n_{1}$ and $i\in\mathbb{N}$, \[ \left|\mathbf{E}^{s,x}\Big[Y\int_{t_{1}}^{t_{2}}K_{n,u}f(X_{u})du\Big]-\mathbf{E}^{s,x}\Big[Y\int_{t_{1}}^{t_{2}}K_{u}f(X_{u})du\Big]\right|<\epsilon \] and \[ \left|\mathbf{E}_{i}^{s,x}\Big[Y\int_{t_{1}}^{t_{2}}K_{n,u}f(X_{u})du\Big]-\mathbf{E}_{i}^{s,x}\Big[Y\int_{t_{1}}^{t_{2}}K_{m,u}f(X_{u})du\Big]\right|<\epsilon. \] Similarly to (\ref{eq 3, thm 5.3: to show}), there exists $n_{2}\in\mathbb{N}$ such that for $n\ge n_{2}$, \[ \left|\mathbf{E}_{n}^{s,x}\Big[Y\int_{t_{1}}^{t_{2}}K_{n_{1},u}f(X_{u})du\Big]-\mathbf{E}^{s,x}\Big[Y\int_{t_{1}}^{t_{2}}K_{n_{1},u}f(X_{u})du\Big]\right|<\epsilon. \] If $n\ge\sup\{n_{1},n_{2}\}$, then \begin{align*} & \left|\mathbf{E}_{n}^{s,x}\Big[Y\int_{t_{1}}^{t_{2}}K_{n,u}f(X_{u})du\Big]-\mathbf{E}^{s,x}\Big[Y\int_{t_{1}}^{t_{2}}K_{u}(X_{u})du\Big]\right|\\ \le & \left|\mathbf{E}_{n}^{s,x}\Big[Y\int_{t_{1}}^{t_{2}}K_{n,u}f(X_{u})du\Big]-\mathbf{E}_{n}^{s,x}\Big[Y\int_{t_{1}}^{t_{2}}K_{n_{1},u}f(X_{u})du\Big]\right|\\ & \ +\left|\mathbf{E}_{n}^{s,x}\Big[Y\int_{t_{1}}^{t_{2}}K_{n_{1},u}f(X_{u})du\Big]-\mathbf{E}^{s,x}\Big[Y\int_{t_{1}}^{t_{2}}K_{n_{1},u}f(X_{u})du\Big]\right|\\ & \quad\ +\left|\mathbf{E}^{s,x}\Big[Y\int_{t_{1}}^{t_{2}}K_{n_{1},u}f(X_{u})du\Big]-\mathbf{E}^{s,x}\Big[Y\int_{t_{1}}^{t_{2}}K_{u}f(X_{u})du\Big]\right|\le3\epsilon. \end{align*} So (\ref{eq 2, thm 5.3: to show}) is true. ``\textit{Step 4}'': We finally prove (\ref{eq, thm 5.3: to show}) under the condition (\ref{extra ass, I}). Since $\mathbf{P}_{n}^{s,x}$ solves the martingale problem for $\mathcal{L}_{n,t}$, it follows that \begin{equation} \mathbf{E}_{n}^{s,x}\Big[Y\big(f(X_{t_{2}})-f(X_{t_{1}})-\int_{t_{1}}^{t_{2}}\mathcal{L}_{u}f(X_{u})du\big)\Big]=0.\label{eqmartingale} \end{equation} So (\ref{eq, thm 5.3: to show}) follows from (\ref{eqmartingale}), (\ref{eq 3, thm 5.3: to show}) and (\ref{eq 2, thm 5.3: to show}). This completes the proof of existence. ``\emph{Uniqueness}'': Let $(s,x)\in\mathbb{R}_+ \times \mathbb{R}^{d}$ be arbitrary and $\mathbf{{\tilde{P}}}^{s,x}$ be a solution to the martingale problem for $\mathcal{L}_{t}$ starting from $(s,x)$. For each $f\in C_{b}^{1,2}(\mathbb{R}_{+}\times\mathbb{R}^{d})$, \[ f(t,X_{t})-f(s,X_{s})-{\displaystyle \int_{s}^{t}\left(\frac{\partial f}{\partial u}+\mathcal{L}_{u}f\right)(u,X_{u})du} \] is an $\mathcal{F}_{t}$-martingale after $s$ with respect to the measure $\mathbf{{\tilde{P}}}^{s,x}$. For any $s\leq t_{1}<t,\ C\in\mathcal{F}_{t_{1},}$ we thus have \begin{equation} \mathbf{{\tilde{E}}}^{s,x}[\mathbf{{1}}_{C}f(t,X_{t})]=\mathbf{{\tilde{E}}}^{s,x}[\mathbf{{1}}_{C}f(t_{1},X_{t_{1}})]+\mathbf{{\tilde{E}}}^{s,x}\left[\mathbf{{1}}_{C}\int_{t_{1}}^{t}\left(\frac{\partial f}{\partial u}+\mathcal{L}_{u}f\right)(u,X_{u})du]\right].\label{eq5.1:MPAFL} \end{equation} Similarly to (\ref{eq: Fubini}), by multiplying both sides of (\ref{eq5.1:MPAFL}) by $\exp(-\lambda(t-t_{1}))$ and then integrating with respect to $t$ from $t_{1}$ to $\infty$, we get \begin{align*} & \mathbf{{\tilde{E}}}^{s,x}\left[\mathbf{{1}}_{C}{\displaystyle \int_{t_{1}}^{\infty}e^{-\lambda(t-t_{1})}f(t,X_{t})dt}\right]\\ & \quad=\lambda^{-1}\mathbf{{\tilde{E}}}^{s,x}\left[\mathbf{{1}}_{C}f(t_{1},X_{t_{1}})\right]+\lambda^{-1}\mathbf{{\tilde{E}}}^{s,x}\left[\mathbf{{1}}_{C}{\displaystyle \int_{t_{1}}^{\infty}e^{-\lambda(u-t_{1})}\left(\frac{\partial f}{\partial u}+\mathcal{L}_{u}f\right)(u,X_{u})du}\right]. \end{align*} Therefore, \begin{align} & \mathbf{{\tilde{E}}}^{s,x}\left[\int_{t_{1}}^{\infty}e^{-\lambda(t-t_{1})}f(t,X_{t})dt|\mathcal{F}_{t_{1}}\right]\nonumber \\ \text{} & {\displaystyle \quad=\lambda^{-1}f(t_{1},X_{t_{1}})+\lambda^{-1}\mathbf{{\tilde{E}}}^{s,x}\left[\int_{t_{1}}^{\infty}e^{-\lambda(t-t_{1})}\left(\frac{\partial f}{\partial t}+\mathcal{L}_{t}f\right)(t,X_{t})dt|\mathcal{F}_{t_{1}}\right].}\label{eq5.2:MPAFL} \end{align} If $g\in C_{b}^{1,2}(\mathbb{R}_{+}\times\mathbb{R}^{d})$, then $f:=R_{\lambda}g\in C_{b}^{1,2}(\mathbb{R}_{+}\times\mathbb{R}^{d})$ and (\ref{eq for para. resol. eq.: MPAFL}) holds. Substituting this $f$ in (\ref{eq5.2:MPAFL}), we obtain \begin{align} & \mathbf{{\tilde{E}}}^{s,x}\left[\int_{t_{1}}^{\infty}e^{-\lambda(t-t_{1})}g(t,X_{t})dt|\mathcal{F}_{t_{1}}\right]\nonumber \\ & \quad=R_{\lambda}g(t_{1},X_{t_{1}})+\mathbf{{\tilde{E}}}^{s,x}\left[{\displaystyle \int_{t_{1}}^{\infty}e^{-\lambda(t-t_{1})}KR_{\lambda}g(t,X_{t})dt|\mathcal{F}_{t_{1}}}\right].\label{eq5.3:MPAFL} \end{align} With a similar argument as in the proof of (\ref{WUSeqlemma43vnl}), we see that (\ref{eq5.3:MPAFL}) is true for all $g\in\mathcal{B}_{b}(\mathbb{R}_{+}\times\mathbb{R}^{d})$. If $g\in\mathcal{B}_{b}(\mathbb{R}_{+}\times\mathbb{R}^{d})$, then $KR_{\lambda}g\in\mathcal{B}_{b}(\mathbb{R}_{+}\times\mathbb{R}^{d})$. By (\ref{eq5.3:MPAFL}) and a simple iteration, we obtain for each $k\in\mathbb{{N}}$, \begin{align*} & \mathbf{{\tilde{E}}}^{s,x}\left[\int_{t_{1}}^{\infty}e^{-\lambda(t-t_{1})}g(t,X_{t})dt|\mathcal{F}_{t_{1}}\right]\\ & \quad=\sum_{i=0}^{k}R_{\lambda}(KR_{\lambda})^{i}g(t_{1},X_{t_{1}})+\mathbf{{\tilde{E}}}^{s,x}\left[{\displaystyle \int_{t_{1}}^{\infty}e^{-\lambda(t-t_{1})}(KR_{\lambda})^{k+1}g(t,X_{t})dt|\mathcal{F}_{t_{1}}}\right]. \end{align*} By Proposition \ref{prop MPAFL:For-any-} and Corollary \ref{cor MAPAFL:There-exists-}, we see that \begin{equation} \mathbf{{\tilde{E}}}^{s,x}\left[\int_{t_{1}}^{\infty}e^{-\lambda(t-t_{1})}g(t,X_{t})dt|\mathcal{F}_{t_{1}}\right]=\sum_{i=0}^{\infty}R_{\lambda}(KR_{\lambda})^{i}g(t_{1},X_{t_{1}})=G_{\lambda}g(t_{1},X_{t_{1}})\label{eq5.5:MPAFL} \end{equation} for all $\lambda\ge\lambda_{0}$ and $g\in\mathcal{B}_{b}(\mathbb{R}_{+}\times\mathbb{R}^{d})$. Note that the choice of $t_{1}\in[s,\infty)$ in (\ref{eq5.5:MPAFL}) is arbitrary. It follows from (\ref{eq5.5:MPAFL}), Remark \ref{rem: contin. of G_lambda} and \cite[Lemma~3.1]{MR736974} that there exists at most one solution to the martingale problem for $\mathcal{L}_{t}$ starting from $(s,x)$. \qed \end{document}
\begin{document} \title{Operator-Lipschitz estimates for the singular value functional calculus} \begin{abstract} We consider a functional calculus for compact operators, acting on the singular values rather than the spectrum, which appears frequently in applied mathematics. Necessary and sufficient conditions for this singular value functional calculus to be Lipschitz-continuous with respect to the Hilbert-Schmidt norm are given. We also provide sharp constants. \end{abstract} \section{Introduction} For simplicity we restrict attention to the finite-dimensional matrix case in this introduction. Let $A$ be a matrix with singular value decomposition $A=U\Sigma V^*$, and consider the operation of changing the singular values by applying some function $\F:\mathbb{R}_+\rightarrow \C$ to $\Sigma$, thus yielding a new matrix which we will call $\F_s(A)$, where the subscript $s$ indicates that we are considering a \textit{singular value functional calculus.} For matrices with non-trivial nullspaces, it is easy to see that the condition $\F(0)=0$ is necessary for $\F_s(A)$ to be well defined (Section \mathsf{Re}f{s2}). Let us also remark that, in case $\F$ is a function defined on $\mathbb{C}$ and $A$ is a normal matrix, then $\F(A)$ is defined by the classical functional calculus (CFC) based on the spectral theorem. However, it is rarely the case that $\F(A)=\F_s(A)$ except when $A$ is positive (Section \mathsf{Re}f{s2}). The operation $A\mapsto \F_s(A)$ is commonly seen in applied mathematics, since it often appears as the proximal operator \cite{rw} in Matrix Optimization Problems and Compressed Sensing. For applications in Computer Vision, Structure from Motion, Photometric Stereo and Optical Flow, see \cite{larsson2,larsson1} and the references therein. See \cite{chu} for its use in alternating projection schemes and \cite{anderssonalternating} for a problem in financial mathematics \cite{higham}. For applications in Control Systems see \cite{fazel}, MRI see \cite{candesMRI}, and for applications to complex frequency estimation see \cite{actwIEEE}. More examples can be found in \cite{ding, recht}. When studying convergence of algorithms utilizing the singular value functional calculus, it is important to have bounds for the distance $\|\F_s(A)-\F_s(B)\|_{F}$ given $\|A-B\|_{F}$, where the subscript $F$ indicates that we are dealing with the Frobenius norm, (which is the same as the Hilbert-Schmidt norm $\|\cdot\|_{\mathcal{S}_2}$, but we will follow standard conventions and use this notation only for the infinite-dimensional case). We thus define \begin{equation}\label{SiOpLip}\|\F_s\|_{\Lip}=\sup_{A,B}\frac{\|\F_s(A)-\F_s(B)\|_{F}}{\|A-B\|_{F}}.\end{equation} In Section \mathsf{Re}f{s4} we shall show that this supremum turns out not to depend on the dimension of the matrices $A,B$, which is why this is omitted from the notation. If one restricts attention to positive matrices in the above supremum, then it is known that it equals $\|\F\|_{\Lip}$. This follows from more general work concerning the CFC-case \cite{kittaneh}, a result which is also presented with a very simple proof in \cite[Lemma VII.5.4]{bhatia} and was rediscovered in \cite{wihler}. We remark that in the CFC-case, results concerning H\"older and Lipschitz continuity with respect to various operator norm have a long history, see e.g. \cite{potapov,farforovskaya,aleksandrov} and the references therein. The main result of this paper is the following: \begin{theorem}\label{aoa} Let $\F:\mathbb{\mathbb{R}}_+\rightarrow \C$ be continuous with $\F(0)=0$. Then $\|\F_s\|_{\Lip}\leq \sqrt{2}\|\F\|_{\Lip}$ and the constant $\sqrt{2}$ is the best possible (if it is to hold for all Lipschitz functions $\F$). However, if $\F$ is real valued, then $$\|\F_s\|_{\Lip}= \|\F\|_{\Lip}.$$ \end{theorem} For an interesting related result also having $\sqrt{2}$ as the best constant in the complex case and $1$ in the real case, see \cite{araki}. In terms of applications, the study of real valued functions is most relevant. Based on general arguments \cite{ding}, one can show that $\|\F_s\|_{\Lip}$ should be bounded in terms of $\|\F\|_{\Lip}$, but the fact that the constant is 1 is rather surprising and is likely to have an impact on algorithmic design involving the singular value functional calculus. \section{The singular value functional calculus}\label{s2} Let $\mathcal{H}$ be a separable Hilbert space of dimension $d$, $1\leq d\leq \infty$, and suppose that $A : \mathcal{H} \to \mathcal{H}$ is a compact operator, $A \in \mathcal{B}_0$. Then $A$ has singular value decomposition; there exist an orthonormal basis $(u_n)_{n=0}^{d}$ of $\mathcal{H}$ and an orthonormal sequence $(v_n)_{n=0}^{d}$ such that \begin{equation} \label{eq:svddecomp} A = \sum_{n=0}^{d} s_n(A) \, u_n \otimes v_n, \end{equation} where $s_n(A)$ are the singular values of $A$. In other words $$A h = \sum_{n=0}^d s_n(A) \langle h, v_n \rangle u_n. $$ An equivalent formulation of \eqref{eq:svddecomp} is that $A$ has a polar decomposition $A = U \Sigma$, where $U$ is a partial isometry and $\Sigma = |A|$ is a positive diagonalizable operator such that $\Sigma v_n = s_n(A) v_n$. We will primarily be concerned with operators $A$ in the Hilbert-Schmidt class $\mathcal{S}_2$, i.e. compact operators such that $$\|A\|_{\mathcal{S}_2}^2=\sum_{n=0}^d s_n^2(A)<\infty.$$ Following standard conventions we denote this norm by $\|A\|_F^2$ whenever $d<\infty$, in which case it coincides with the $l^2$-norm of the elements of the matrix representation of $A$ in any orthonormal basis. Given any continuous function $\F:\mathbb{R}_+\rightarrow\mathbb{C}$ such that $\F(0) = 0$ we define $\F_s: \mathcal{B}_0 \to \mathcal{B}_0$ by \begin{equation} \label{eq:funcdef} \F_s(A) = \sum_{n=1}^\infty \F(s_n(A)) \, u_n \otimes v_n, \end{equation} or equivalently that $\F_s(A) = U \F(\Sigma)$, where $\F(\Sigma)$ is the operator such that $\F(\Sigma) v_n = \F(s_n(A)) v_n$. The subscript $s$ indicates that we are dealing with a ``singular value functional calculus''. To see that it is well defined, note that if $s=s_n(A)\neq 0$ and $h\in \ker (s^2 I-A^*A)$, then $$\F_s(A)h=\frac{\F(s)}{s}Ah.$$ However, note that if $s=0$ and we were to allow $\F(0)\neq 0$ it is clear that $\F_s(A) h$ could depend on the particular choice of $(u_n)_n$ and $(v_n)_n$. We remark that $\F$ only needs to be defined on $\mathbb{R}^+$ for $\F_s(A)$ to exist, and obviously $$\F_s(A)=\F(A)$$ for all positive operators $A$. For normal operators, the situation is more complex. Consider for example \begin{equation*}A=\left( \begin{array}{cc} 0 & 1 \\ 1 & 0 \\ \end{array} \right)=\left( \begin{array}{cc} \tiny{\frac{1}{\sqrt{2}}} & \tiny{\frac{1}{\sqrt{2}}} \\ \tiny{\frac{1}{\sqrt{2}}} & -\tiny{\frac{1}{\sqrt{2}}} \\ \end{array} \right)\left( \begin{array}{cc} 1 & 0 \\ 0 & -1 \\ \end{array} \right)\left( \begin{array}{cc} \tiny{\frac{1}{\sqrt{2}}} & \tiny{\frac{1}{\sqrt{2}}} \\ \tiny{\frac{1}{\sqrt{2}}} & -\tiny{\frac{1}{\sqrt{2}}} \\ \end{array} \right), \end{equation*} which has singular value decomposition $U\Sigma V^*=AII$. Then $$\F_s(A)=A\F(I)I=\left( \begin{array}{cc} 0 & \F(1) \\ \F(1) & 0 \\ \end{array} \right)$$ whereas $\F(A)$ is not even defined in an the classical functional calculus, due to the negative eigenvalue $-1$. Moreover, if $\F$ is defined on $\mathbb{R}$ we clearly have $\F(A)\neq \F_s(A)$ unless $\F(1)=-\F(-1)$. This is further highlighted by the next proposition. \begin{proposition} Let $\F:\C\rightarrow\C$ be a continuous function with $\F(0) = 0$. Then $\F_s(A)=\F(A)$ for a normal compact operator $A : \mathcal{H} \to \mathcal{H}$ if and only if $\F$ satisfies $$\F(\lambda)=\frac{\lambda\F(|\lambda|)}{|\lambda|}$$ for every non-zero eigenvalue $\lambda$ of $A$. \end{proposition} \begin{proof} Since $A$ is a compact normal operator, there is an orthonormal basis $(v_n)_n$ of $\mathcal{H}$ such that \begin{equation} \label{eq:spectraldecomp} A = \sum_{n=1}^\infty \lambda_n \, v_n \otimes v_n, \end{equation} where $\lambda_n$ are the eigenvalues of $A$, implicitly omitting the zero eigenvalues from the sum \eqref{eq:spectraldecomp}. On the other hand, an SVD of $A$ is given by $$ A = \sum_{n=1}^\infty |\lambda_n| \, u_n \otimes v_n,$$ where $u_n = \overline{\lambda_n}/|\lambda_n|v_n$. Therefore, we obtain that $$\F(A) = \sum_{n=1}^\infty \F(\lambda_n) \, v_n \otimes v_n,$$ while $$ \F_s(A) = \sum_{n=1}^\infty \frac{\lambda_n \F(|\lambda_n|)}{|\lambda_n|} \, v_n \otimes v_n,$$ proving the proposition. \end{proof} \begin{corollary} Let $p$ be a polynomial without constant term. Then $p_s(A)=p(A)$ for all normal compact operators if and only if $p(z)=\alpha z$, $\alpha\in\C$. \end{corollary} \section{Complex doubly substochastic matrices}\label{s3} This section contains the main technical tool for the proof of Theorem \mathsf{Re}f{aoa}. We say that a square matrix is complex doubly substochastic (cdss) if for each row and column, the $\ell^1$-sum of entries is less than or equal to 1. (In \cite{simon}, such matrices are simply called doubly substochastic. However, most other sources using this term include a non-negativity condition on the elements, which is why we have chosen to clarify by adding \textit{complex}.) Our main interest in cdss-matrices stems from the fact that if $U$ and $V$ are unitary matrices then $U\odot V$ is cdss, where $\odot$ denotes the Hadamard product, as follows immediately by the Cauchy-Schwarz inequality. Let $\pi$ denote any permutation of length $n$ and let $\gamma$ be a vector of the same length containing unimodular entries. We denote by $M_{\pi,\gamma}$ the $n \times n$ matrix whose $(j,\pi_j)$'th value is $\gamma_j$, all other entries zero. The following lemma is likely known, but lacking a reference we provide a simple proof based on the Birkhoff-von Neumann theorem, (see e.g. \cite{bhatia}). \begin{lemma} \label{lem:birk} An $n \times n$ matrix is complex doubly substochastic if and only if it lies in the convex hull of $\{M_{\pi,\gamma}:\pi,\gamma\}$. \end{lemma} \begin{proof} Let $\mathcal{V}$ denote the set of cdss $n \times n$-matrices. It is clearly a closed convex set. We shall show that the extreme points of $\mathcal{V}$ are precisely the matrices of the form $M_{\pi,\gamma}$ for some permutation $\pi$ and vector $\gamma$. The lemma then follows immediately by the Krein-Milman theorem (or rather, Minkowski's theorem on convex sets \cite{minkowski}, since we are in Euclidean space.) First, let $\pi$ and $\gamma$ be given. Let $m_{ij}$ denote the $ij$:th entry of $M_{\gamma, \pi}$. Suppose that $M_{\gamma, \pi} =(A+B)/2$ for matrices $A, B \in \mathcal{V}$ with entries $a_{ij}$ and $b_{ij}$, respectively. Suppose that $|m_{ik}| = 1$. Since $|a_{ik}| \leq 1$ and $|b_{ik}| \leq 1$ this forces that $a_{ik} = b_{ik} = m_{ik}$ and hence that $a_{ij} = b_{ij} = 0$ for $1 \leq j \leq n$, $j \neq k$. Therefore $A = B = M_{\gamma, \pi}$, and thus $M_{\gamma, \pi}$ is an extreme point of $\mathcal{V}$. For the converse, let $M \in \mathcal{V}$, with entries $m_{ij}$, be an extreme point. Consider first the case where $M$ has a row with sum of absolute values strictly less than 1, say the $p:$th row. Then clearly \begin{equation*} \label{eq:rowless1} \sum_{i=1}^n\sum_{j=1}^n |m_{ij}| < n, \end{equation*} which, upon changing the order of summation, shows that there is also a column with a sum of absolute values strictly less than 1, say the $q$:th column. Let $E$ be the matrix with its $pq$:th entry $\varepsilon$, all other entries $0$, and let $A = M - E$ and $B = M + E$. For sufficiently small $\varepsilon$ we find that $A$ and $B$ are cdss, which contradicts the fact that $M$ is an extreme point of $\mathcal{V}$, since $M = (A+B)/2$. We have thus shown that the extreme point $M$ has sums of absolute values of all rows and colums equal to 1. In other words, the matrix $M^|$ with entries $|m_{ij}|$ is a doubly stochastic matrix. By the Birkhoff-von Neumann theorem, $M^|$ is either of the form $M_{\pi, \gamma}$ for a permutation $\pi$ and $\gamma = (1, 1, \ldots, 1)$ or it is not an extreme point and thus of the form $M^| = (A+B)/2$ for two doubly stochastic matrices $A$ and $B$, $A \neq B$. In the former case, $M$ is of the form $M_{\pi, \tilde{\gamma}}$ for a suitable sequence $\tilde{\gamma}$. In the latter case, as seen by adjusting the arguments of the entries of $A$ and $B$, $M$ is clearly not an extreme point of $\mathcal{V}$, a contradiction. \end{proof} \section{Operator Lipschitz estimates}\label{s4} Throughout we let $\F:\mathbb{R}_+\rightarrow\mathbb{C}$ be Lipschitz with $\F(0)=0$. Let $\lambda_1, \lambda_2 \in \C$ be two scalars interpreted as operators on $\mathcal{H}=\mathbb{C}$. Then $\F_s(\lambda_j) = \frac{\lambda_j}{|\lambda_j|}\F(|\lambda_j|)$ for $j=1,2$. Hence a Lipschitz condition $\|\F_s(A) - \F_s(B)\|_{\mathcal{S}_2} \leq C \|A - B\|_{\mathcal{S}_2} $ implies that \begin{equation}\label{optimal}|c_1 \F(x) - c_2\F(y)| \leq C |c_1 x - c_2 y|\end{equation} for all $x, y \geq 0$ and $c_1, c_2\in \mathbb{T}$, where $\mathbb{T}$ denotes the unit circle in $\mathbb{C}$. This motivates the following definition \begin{equation}\label{apa} \|\F\|_{\Lip-\mathbb{C}}=\sup_{x,y \in \mathbb{R}_+,~c\in\mathbb{T}}=\frac{| \F(x) - c\F(y)|}{| x - c y|}. \end{equation} \begin{proposition} \label{lem:lip} Suppose that $\F: \mathbb{R}_+ \to \C$ satisfies $\F(0) = 0$. Then \begin{equation} \label{eq:rotlipM} \|\F\|_{\Lip-\mathbb{C}} \leq \sqrt{2}\|\F\|_{\Lip} \end{equation} where $\sqrt{2}$ is optimal. If $\F$ is real-valued, $\F: \mathbb{R}_+ \to \mathbb{R}$, it holds that \begin{equation} \label{eq:rotlipreal} \|\F\|_{\Lip-\mathbb{C}} =\|\F\|_{\Lip} \end{equation} \end{proposition} \begin{proof} We may clearly assume that $\|\F\|_{\Lip}=1$. Suppose first that $\F$ is real-valued and write $c = a + ib$. Noting that the hypotheses imply that $|\F(x)| \leq x$ for all $x$, we have $$ |\F(x)- c\F(y)|^2 = |\F(x) - \F(y)|^2 + 2(1-a) \F(x)\F(y) \leq |x-y|^2 + 2(1-a)xy = |x-cy|^2,$$ which shows that $\|\F\|_{\Lip-\mathbb{C}} \leq\|\F\|_{\Lip}$. Since the reverse inequality is obvious, \eqref{eq:rotlipreal} follows. For $\F$ complex-valued, the computation is more involved. The inequality \eqref{eq:rotlipM} is clearly equivalent with \begin{equation} \label{eq:rotlip} |\F(x) - c\F(y)| \leq \sqrt{2}|x - cy|, \quad x,y \in \mathbb{R}_+, ~c\in\mathbb{T}, \end{equation} (still assuming that $\|\F\|_{\Lip}=1$). Fix $c$ and suppose without loss of generality that $y \leq x$. If $\F(y) = 0$ there is nothing to prove since $|x-y| \leq |x-cy|$. Similarly, if $\F(x)=\F(y)$, \eqref{eq:rotlip} follows from the fact that $|\F(y)| \leq y$. If $\F(y) \neq 0$ and $\F(x) \neq \F(y)$, let $w \in \mathbb{C}$ be such that $\F(x) = w\F(y)$. Since $|\F(x)-\F(y)| = |w-1||\F(y)| \leq x-y$ there is a constant $d$, $0 < d \leq 1$ such that $$|\F(y)| = d \frac{x-y}{|w-1|}.$$ Since $|\F(y)| \leq y$ it follows that \begin{equation}\label{nana} x \leq \left(1 + \frac{|w-1|}{d}\right) y.\end{equation} It is straightforward to check that the function $x \mapsto (x-y)/|x-cy|$ is increasing when $x \geq y$, which combined with \eqref{nana} yields that $$\frac{x-y}{|x-cy|} \leq \frac{|w-1|/d}{ \left|1+\frac{|w-1|}{d}-c \right|} \leq \frac{|w-1|}{d|1+|w-1|-c|}.$$ Recalling that $\F(x) = w\F(y)$ we hence conclude that $$|\F(x) - c\F(y)| = |w-c| d \frac{x-y}{|w-1|} \leq |w-c|\frac{|x-cy|}{|1+|w-1|-c|} .$$ We claim that \begin{equation} \label{eq:maxnorm} \sup_{w \in \C,\, |c|=1} \frac{|w-c|}{|1+|w-1|-c|} = \sqrt{2}. \end{equation} From \eqref{eq:maxnorm} we immediately deduce \eqref{eq:rotlip}, and it also implies that $\sqrt{2}$ is the best possible constant. To see the latter, fix $w \in \C$ and a unimodular $c$ and set $x=1+|w-1|$ and $y=1$. We may then define a function $\F$ as in the statement of the theorem satisfying that $\F(1) = 1$, $\F(1+|w-1|) = w$, which gives $$\frac{|\F(x)-c\F(y)|}{|x-cy|}=\frac{|w-c|}{|1+|w-1|-c|}.$$ To see that the supremum of \eqref{eq:maxnorm} is at least $\sqrt{2}$, let $w = 1 - it$ and let $c = e^{it}$. For this choice of $w$ and $c$ we have that $$\frac{|w-c|}{|1+|w-1|-c|} = \frac{|1-e^{it}-it|}{|1-e^{it} + t|} \to \sqrt{2}, \quad t \to 0.$$ For the upper bound, note that upon squaring it is equivalent with $$ |w- c|^2 \leq 2|1+|w-1|-c|^2,$$ which upon expanding, reordering and noting that $|c|=1$ is equivalent with $$|w|^2+5-4\mathsf{Re}~w + 4|w-1| - 2\mathsf{Re} \left[ \overline{c} (2(1+|w-1|)-w) \right] \geq 0. $$ This inequality is true for all unimodular $c$ if and only if \begin{equation} \label{eq:diffeq} |w|^2+5-4\mathsf{Re} ~w + 4|w-1| - 2|2(1+|w-1|)-w| \geq 0, \quad w \in \C. \end{equation} Since $|2(1+|w-1|)-w| \leq 2|w-1| + |w - 2|$ by the triangle inequality, \eqref{eq:diffeq} is implied by $$|w|^2+5-4\mathsf{Re}~ w - 2|w-2| \geq 0, \quad w \in \C. $$ The left hand side equals $$|w-2|^2 + 1 - 2|w-2| =(|w-2|-1)^2,$$ which completes the proof. \end{proof} The next theorem is the key result of the paper. Theorem \mathsf{Re}f{aoa} is an immediate corollary of this result and Proposition \mathsf{Re}f{lem:lip}. Set \begin{equation}\label{SiOpLipS}\|\F_s\|_{\Lip}=\sup_{A,B\in\mathcal{S}_2}\frac{\|\F_s(A)-\F_s(B)\|_{\mathcal{S}_2}}{\|A-B\|_{\mathcal{S}_2}}.\end{equation} It will also follow from the proof that the definition \eqref{SiOpLip} is independent of the dimension, as claimed in the introduction, and that it coincides with \eqref{SiOpLipS}. \begin{theorem}\label{symmetricfcgeneral} Let $\F:\mathbb{R}_+ \to \mathbb{C}$ be Lipschitz with $\F(0)=0$. Suppose that $A, B \in \mathcal{S}_2$. Then $\F_s$ satisfies \begin{equation} \label{eq:oplipcomplex} \|\F_s(A)-\F_s(B)\|_{\mathcal{S}_2} \leq \|\F\|_{\Lip-\mathbb{C}}\|A-B\|_{\mathcal{S}_2} \end{equation} and this estimate is optimal. In other words, $\|\F_s\|_{\Lip}=\|\F\|_{\Lip-\mathbb{C}}$. \end{theorem} \begin{proof} We first prove \eqref{eq:oplipcomplex} for finite square matrices. That is, we will show that for $d \times d$ matrices $A$ and $B$ we have \begin{equation} \label{eq:oplipfinite} \|\F_s(A)-\F_s(B)\|_{F} \leq \|\F\|_{\Lip-\mathbb{C}}\|A-B\|_{F}. \end{equation} Suppose that this has been proved and consider general $A, B \in \mathcal{S}_2$ with singular value decompositions $$A = \sum_{n=1}^\infty s_n(A) \, u^A_n \otimes v^A_n, \quad B = \sum_{n=1}^\infty s_n(A) \, u^B_n \otimes v^B_n.$$ For $N \geq 1$ we may consider $$A_N = \sum_{n=1}^N s_n(A) \, u^A_n \otimes v^A_n, \quad B = \sum_{n=1}^N s_n(A) \, u^B_n \otimes v^B_n.$$ to be operators acting on $\mathcal{V}_N = \textrm{span} \, \{u^A_n, v^A_n, u^B_n, v^B_n \, | \, 1 \leq n \leq N\}$. Note that the singular value decompositions of $A_N$ and $B_N$ are identical whether considered operators on $\mathcal{H}$ or on $\mathcal{V}_N$, and that the exact same statement applies to $\F_s(A_N)$ and $\F_s(B_N)$. Hence \eqref{eq:oplipfinite} applied to the finite-dimensional space $\mathcal{V}_N$ gives us that $$\|\F_s(A_N) - \F_s(B_N)|_{\mathcal{S}_2(\mathcal{H})} \leq \|\F\|_{\Lip-\mathbb{C}} \|A_N - B_N\|_{\mathcal{S}_2(\mathcal{H})}.$$ Since $\|A_N - A\|_{\mathcal{S}_2(\mathcal{H})} \to 0$ and $\|\F_s(A_N) - \F_s(A)\|_{\mathcal{S}_2(\mathcal{H})} \to 0$ as $N \to \infty$, and similarly for $B$, the inequality \eqref{eq:oplipcomplex} follows. We now turn to proving \eqref{eq:oplipfinite} for $d \times d$-matrices $A$ and $B$, for which we express the singular value decompositions with the usual matrix notation; $$A = U_A\Sigma_A V_A^*, \, \quad B = U_B\Sigma_B V_B^*.$$ We have the following formula for $\|A-B\|_F^2$, \begin{align*} \|A-B\|_F^2&=\|U_A\Sigma_A V_A^*-U_B \Sigma_B V_B^*\|_F^2=\|U_B^*U_A\Sigma_A -\Sigma_B V_B^*V_A\|_F^2 \\ &=\|A\|^2_F+\|B\|_F^2-2\mathsf{Re} \sum_{ae} \Sigma_B \overline{V_B^*V_A} \odot U_B^*U_A\Sigma_A,\end{align*} where $\sum_{ae}$ denotes the operation of summing all entries of a matrix, and $\overline{M}$ denotes the action of taking the complex conjugate of every entry of a matrix $M$. Since $\overline{V_B^*V_A} \odot U_B^*U_A$ is cdss, Lemma \mathsf{Re}f{lem:birk} implies that there exist $c_1,\ldots,c_M$ in $[0,1]$ satisfying $\sum_{n=1}^Mc_n=1$, permutations $\pi_n$ and length-$d$ vectors $\gamma_n$ with unimodular entries, $1 \leq n \leq M$, such that $$ \overline{V_B^*V_A} \odot U_B^*U_A=\sum_{n=1}^N c_n M_{\pi_n,\gamma_n}.$$ We get \begin{align*}\|A-B\|_F^2 &=\|A\|^2_F+\|B\|_F^2-2 \mathsf{Re} \sum_{n=1}^Mc_n \sum_{ae} \Sigma_B M_{\pi_n,\gamma_n}\Sigma_A \\&=\sum_{n=1}^M c_n \left( \sum_{i=1}^Ns_i(A)^2+\sum_{i=1}^Ns_i(B)^2-2 \mathsf{Re} \sum_{i=1}^N s_i(B) \gamma_{n,i} s_{\pi_{n, i}}(A)\right) \\&=\sum_{n=1}^M c_n \sum_{i=1}^N| s_i(B) - \gamma_{n,i} s_{\pi_{n,i}}(A)|^2. \end{align*} This identity applied to $\F_s(A) = U_A \F(\Sigma_A) V_A^*$ and $\F_s(B) = U_B \F(\Sigma_B) V_B^*$ immediately gives \begin{align*}&\|\F_s(A)-\F_s(B)\|_F^2=\sum_{n=1}^M c_n \sum_{i=1}^N| \F(s_i(B)) - \gamma_{n,i} \F(s_{\pi_{n,i}}(A))|^2 \end{align*} Since $|\F(x)- \gamma \F(y)|\leq \|\F\|_{\Lip-\mathbb{C}}|x- \gamma y|$ for every $x, y \geq 0$ and $\gamma \in \mathbb{T}$, we get \begin{align*} &\|\F_s(A)-\F_s(B)\|_F^2 = \sum_{n=1}^M c_n \sum_{i=1}^N| \F(s_i(B)) - \gamma_{n,i} \F(s_{\pi_{n,i}}(A))|^2 \leq\\& \leq \|\F\|_{\Lip-\mathbb{C}}^2\sum_{n=1}^M c_n \sum_{i=1}^N| s_i(B) - \gamma_{n,i} s_{\pi_{n,i}}(A)|^2 = \|\F\|_{\Lip-\mathbb{C}}^2\|A-B\|_F^2,\end{align*} which establishes \eqref{eq:oplipcomplex}. The optimality of \eqref{eq:oplipcomplex} follows immediately from the argument surrounding \eqref{optimal}. This also shows that $\|\F_s\|_{\Lip}=\|\F\|_{\Lip-\mathbb{C}}$ independent of whether we use definition \eqref{SiOpLipS} or \eqref{SiOpLip} with arbitrary fixed dimension. \end{proof} \end{document}
\begin{document} \title[Measured Quantum Groupoid action] {The Unitary Implementation of a Measured Quantum Groupoid action} \author{Michel Enock} \address{Institut de Math\'ematiques de Jussieu, Unit\'{e} Mixte Paris 6 / Paris 7 / CNRS de Recherche 7586 \\175, rue du Chevaleret, Plateau 7E, F-75013 Paris} \email{[email protected]} \begin{abstract} Mimicking the von Neumann version of Kustermans and Vaes' locally compact quantum groups, Franck Lesieur had introduced a notion of measured quantum groupoid, in the setting of von Neumann algebras. In a former article, the author had introduced the notions of actions, crossed-product, dual actions of a measured quantum groupoid; a biduality theorem for actions has been proved. This article continues that program : we prove the existence of a standard implementation for an action, and a biduality theorem for weights. We generalize this way results which were proved, for locally compact quantum groups by S. Vaes, and for measured groupoids by T. Yamanouchi. \end{abstract} \maketitle \section{Introduction} \label{intro} \subsection{} In two articles (\cite{Val1}, \cite{Val2}), J.-M. Vallin has introduced two notions (pseudo-multiplicative unitary, Hopf-bimodule), in order to generalize, up to the groupoid case, the classical notions of multiplicative unitary \cite{BS} and of Hopf-von Neumann algebras \cite{ES} which were introduced to describe and explain duality of groups, and leaded to appropriate notions of quantum groups (\cite{ES}, \cite{W1}, \cite{W2}, \cite{BS}, \cite{MN}, \cite{W3}, \cite{KV1}, \cite{KV2}, \cite{MNW}). \\ In another article \cite{EV}, J.-M. Vallin and the author have constructed, from a depth 2 inclusion of von Neumann algebras $M_0\subset M_1$, with an operator-valued weight $T_1$ verifying a regularity condition, a pseudo-multiplicative unitary, which leaded to two structures of Hopf bimodules, dual to each other. Moreover, we have then constructed an action of one of these structures on the algebra $M_1$ such that $M_0$ is the fixed point subalgebra, the algebra $M_2$ given by the basic construction being then isomorphic to the crossed-product. We construct on $M_2$ an action of the other structure, which can be considered as the dual action. \\ If the inclusion $M_0\subset M_1$ is irreducible, we recovered quantum groups, as proved and studied in former papers (\cite{EN}, \cite{E2}). \\ Therefore, this construction leads to a notion of "quantum groupoid", and a construction of a duality within "quantum groupoids". \subsection{} In a finite-dimensional setting, this construction can be mostly simplified, and is studied in \cite{NV1}, \cite{BSz1}, \cite{BSz2}, \cite{Sz},\cite{Val3}, \cite{Val4}, and examples are described. In \cite{NV2}, the link between these "finite quantum groupoids" and depth 2 inclusions of $II_1$ factors is given. \subsection{} F. Lesieur, in \cite{L}, starting from a Hopf-bimodule, as introduced in \cite{Val1}, when there exist a left-invariant operator-valued weight, and a right-invariant operator-valued weight, mimicking in that wider setting the technics of Kustermans and Vaes (\cite{KV1}, \cite{KV2}), obtained a pseudo-multiplicative unitary, which, as in quantum group theory, "contains" all the information about the object (the von Neumann algebra, the coproduct) and allows to construct important data (an antipod, a co-inverse, etc.) Lesieur gave the name of "measured quantum groupoids" to these objects. A new set of axioms for these had been given in an appendix of \cite{E5}. In \cite{E4} had been shown that, with suitable conditions, the objects constructed from \cite{EV} are "measured quantum groupoids" in the sense of Lesieur. \subsection{} In \cite{E5} have been developped the notions of action (already introduced in \cite{EV}), crossed-product, etc, following what had been done for locally compact quantum groups in (\cite{E1}, \cite{ES1}, \cite{V1}); a biduality theorem for actions had been obtained in (\cite{E5}, 11.6). Several points were left apart in \cite{E5}, namely the generalization of Vaes' theorem (\cite{V1}, 4.4) on the standard implementation of an action of a locally compact quantum group, which was the head light of \cite{V1}, and a biduality theorem for weights, as obtained in \cite{Y3}, \cite{Y4} (in fact, we were much more inspired by the shorter proof given in an appendix of \cite{BV}). \newline We solve here these two problems when there exists a normal semi-finite faithful operator-valued weight from the von Neumann algebra on which the measured quantum groupoid is acting, onto the copy of the basis of this measured quantum groupoid which is put inside this algebra. In fact, these results appear much more as a biduality theorem of operator-valued weights rather than a biduality theorem on weights, which seems quite natural in the spirit of measured quantum groupoids, where, for instance, left-invariant weight on a locally compact quantum group is replaced by a left-invariant operator-valued weight. The strategy for the proofs had been mostly inspired by \cite{V1} and \cite{BV}. \subsection{} This article is organized as follows : \newline In chapter \ref{not}, we recall very quickly all the notations and results needed in that article; we have tried to make these preliminaries as short as possible, and we emphazise that this article should be understood as the continuation of \cite{E5}. \newline In chapter \ref{standarddual}, we follow (\cite{V1}, 4.1 to 4.4), and prove, for any dual action, the result on the standard implementation of an action. \newline Chapter \ref{auxilliary} is rather technical; let $\mathfrak{G}=(N, M, \alpha, \beta, \Gamma, T, T', \nu)$ be a measured quantum groupoid, and let $b$ be an injective $*$-anti-homomorphism from $N$ into a von Neumann algebra $A$; let us suppose that there exists a normal semi-finite faithful operator-valued weight $\mathfrak{T}$ from $A$ onto $b(N)$, and let us write $\psi=\nu^o\circ b^{-1}\circ\mathfrak{T}$. Then, we can define on $A\underset{N}{_b*_\alpha}\mathcal L(H)$ a weight $\underline{\psi}$, which will generalize the tensor product of $\psi$ and $Tr_{\widehat{\Delta}^{-1}}$ (when $\mathfrak{G}$ is a locally compact quantum group, and therefore $N=\mathbb{C}$). \newline In chapter \ref{using}, using this auxilliary weight introduced in chapter \ref{auxilliary}, and the particular case of the dual actions studied in chapter \ref{standarddual}, we calculate the standard implementation of an action, whenever there exists a normal semi-finite faithful operator-valued weight from $A$ onto $b(N)$. This condition is fulfilled trivially when the measured quantum groupoid is a locally compact quantum group, or is a measured groupoid; therefore, we recover in both cases the results already obtained. \newline Chapter \ref{gamma} is another technical chapter; we define conditions on a weight $\psi$ defined on $A$ which allow us to construct on $A\underset{N}{_b*_\alpha}\mathcal L(H)$ a weight $\underline{\psi_\delta}$ which generalize the tensor product of $\psi$ and $Tr_{(\delta\widehat{\Delta})^{-1}}$(when $\mathfrak{G}$ is a locally compact quantum group, and therefore $N=\mathbb{C}$). \newline In chapter \ref{bidualw} we use both auxilliary weights constructions made in chapters \ref{auxilliary} and \ref{gamma}; then, when there exists a normal semi-finite faithful operator-valued weight $\mathfrak{T}$ from $A$ onto $b(N)$ such that $\psi=\nu^o\circ b^{-1}\circ\mathfrak{T}$, we can define a Radon-Nikodym derivative of the weight $\psi$ with respect to the action, which will be a cocycle for this action. This condition is fulfilled trivially when the measured quantum groupoid is a locally compact quantum group, or is a measured groupoid, and, therefore, we recover in both cases the results already obtained. \section{Definitions and notations} \label{not} This article is the continuation of \cite{E5}; preliminaries are to be found in \cite{E5}, and we just recall herafter the following definitions and notations : \subsection{Spatial theory; relative tensor products of Hilbert spaces and fiber products of von Neumann algebras (\cite{C1}, \cite{S}, \cite{T}, \cite{EV})} \label{spatial} Let $N$ a von Neumann algebra, $\psi$ a normal semi-finite faithful weight on $N$; we shall denote by $H_\psi$, $\mathfrak{N}_\psi$, $S_\psi$, $J_\psi$, $\Delta_\psi$... the canonical objects of the Tomita-Takesaki theory associated to the weight $\psi$; let $\alpha$ be a non degenerate faithful representation of $N$ on a Hilbert space $\mathcal H$; the set of $\psi$-bounded elements of the left-module $_\alpha\mathcal H$ is : \[D(_\alpha\mathcal{H}, \psi)= \lbrace \xi \in \mathcal{H};\exists C < \infty ,\| \alpha (y) \xi\| \leq C \| \Lambda_{\psi}(y)\|,\forall y\in \mathfrak{N}_{\psi}\rbrace\] Then, for any $\xi$ in $D(_\alpha\mathcal{H}, \psi)$, there exists a bounded operator $R^{\alpha,\psi}(\xi)$ from $H_\psi$ to $\mathcal{H}$, defined, for all $y$ in $\mathfrak{N}_\psi$ by : \[R^{\alpha,\psi}(\xi)\Lambda_\psi (y) = \alpha (y)\xi\] which intertwines the actions of $N$. \newline If $\xi$, $\eta$ are bounded vectors, we define the operator product \[<\xi,\eta>_{\alpha,\psi} = R^{\alpha,\psi}(\eta)^* R^{\alpha,\psi}(\xi)\] belongs to $\pi_{\psi}(N)'$, which, thanks to Tomita-Takesaki theory, will be identified to the opposite von Neumann algebra $N^o$. \newline If now $\beta$ is a non degenerate faithful antirepresentation of $N$ on a Hilbert space $\mathcal K$, the relative tensor product $\mathcal K\underset{\psi}{_\beta\otimes_\alpha}\mathcal H$ is the completion of the algebraic tensor product $K\odot D(_\alpha\mathcal{H}, \psi)$ by the scalar product defined, if $\xi_1$, $\xi_2$ are in $\mathcal{K}$, $\eta_1$, $\eta_2$ are in $D(_\alpha\mathcal{H},\psi)$, by the following formula : \[(\xi_1\odot\eta_1 |\xi_2\odot\eta_2 )= (\beta(<\eta_1, \eta_2>_{\alpha,\psi})\xi_1 |\xi_2)\] If $\xi\in \mathcal{K}$, $\eta\in D(_\alpha\mathcal{H},\psi)$, we shall denote $\xi\underset{\psi}{_\beta\otimes_\alpha}\eta$ the image of $\xi\odot\eta$ into $\mathcal K\underset{\psi}{_\beta\otimes_\alpha}\mathcal H$, and, writing $\rho^{\beta, \alpha}_\eta(\xi)=\xi\underset{\psi}{_\beta\otimes_\alpha}\eta$, we get a bounded linear operator from $\mathcal K$ into $\mathcal K\underset{\nu}{_\beta\otimes_\alpha}\mathcal H$, which is equal to $1_\mathcal K\otimes_\psi R^{\alpha, \psi}(\eta)$. \newline Changing the weight $\psi$ will give a canonical isomorphic Hilbert space, but the isomorphism will not exchange elementary tensors ! \newline We shall denote $\sigma_\psi$ the relative flip, which is a unitary sending $\mathcal{K}\underset{\psi}{_\beta\otimes_\alpha}\mathcal{H}$ onto $\mathcal{H}\underset{\psi^o}{_\alpha\otimes _\beta}\mathcal{K}$, defined, for any $\xi$ in $D(\mathcal {K}_\beta ,\psi^o )$, $\eta$ in $D(_\alpha \mathcal {H},\psi)$, by : \[\sigma_\psi (\xi\underset{\psi}{_\beta\otimes_\alpha}\eta)=\eta\underset{\psi^o}{_\alpha\otimes_\beta}\xi\] In $x\in \beta(N)'$, $y\in \alpha(N)'$, it is possible to define an operator $x\underset{\psi}{_\beta\otimes_\alpha}y$ on $\mathcal K\underset{\psi}{_\beta\otimes_\alpha}\mathcal H$, with natural values on the elementary tensors. As this operator does not depend upon the weight $\psi$, it will be denoted $x\underset{N}{_\beta\otimes_\alpha}y$. We can define a relative flip $\varsigma_N$ at the level of operators such that $\varsigma_N(x\underset{N}{_\beta\otimes_\alpha}y)=y\underset{N^o}{_\alpha\otimes_\beta}x$. If $P$ is a von Neumann algebra on $\mathcal H$, with $\alpha(N)\subset P$, and $Q$ a von Neumann algebra on $\mathcal K$, with $\beta(N)\subset Q$, then we define the fiber product $Q\underset{N}{_\beta*_\alpha}P$ as $\{x\underset{N}{_\beta\otimes_\alpha}y, x\in Q', y\in P'\}'$, and we get that $\varsigma_N(Q\underset{N}{_\beta*_\alpha}P)=P\underset{N^o}{_\alpha*_\beta}Q$. \newline Moreover, this von Neumann algebra can be defined independantly of the Hilbert spaces on which $P$ and $Q$ are represented; if $(i=1,2)$, $\alpha_i$ is a faithful non degenerate homomorphism from $N$ into $P_i$, $\beta_i$ is a faithful non degenerate antihomomorphism from $N$ into $Q_i$, and $\Phi$ (resp. $\Psi$) an homomorphism from $P_1$ to $P_2$ (resp. from $Q_1$ to $Q_2$) such that $\Phi\circ\alpha_1=\alpha_2$ (resp. $\Psi\circ\beta_1=\beta_2$), then, it is possible to define an homomorphism $\Psi\underset{N}{_{\beta_1}*_{\alpha_1}}\Phi$ from $Q_1\underset{N}{_{\beta_1}*_{\alpha_1}}P_1$ into $Q_2\underset{N}{_{\beta_2}*_{\alpha_2}}P_2$. \newline The operators $\theta^{\alpha, \psi}(\xi, \eta)=R^{\alpha, \psi}(\xi)R^{\alpha, \psi}(\eta)^*$, for all $\xi$, $\eta$ in $D(_\alpha\mathcal H, \psi)$, generates a weakly dense ideal in $\alpha(N)'$. Moreover, there exists a family $(e_i)_{i\in I}$ of vectors in $D(_\alpha\mathcal H, \psi)$ such that the operators $\theta^{\alpha, \psi}(e_i, e_i)$ are 2 by 2 orthogonal projections ($\theta^{\alpha, \psi}(e_i, e_i)$ being then the projection on the closure of $\alpha(N)e_i$). Such a family is called an orthogonal $(\alpha, \psi)$-basis of $\mathcal H$. \subsection{Measured quantum groupoids (\cite{L}, \cite{E5})} \label{MQG} A measured quantum groupoid is an octuplet $\mathfrak {G}=(N, M, \alpha, \beta, \Gamma, T, T', \nu)$ such that (\cite{E5}, 3.8) : \newline (i) $(N, M, \alpha, \beta, \Gamma)$ is a Hopf-bimodule (as defined in \cite{E5}, 3.1), \newline (ii) $T$ is a left-invariant normal, semi-finite, faithful operator valued weight $T$ from $M$ to $\alpha (N)$, \newline (iii) $T'$ is a right-invariant normal, semi-finite, faithful operator-valued weight $T'$ from $M$ to $\beta (N)$, \newline (iv) $\nu$ is normal semi-finite faitfull weight on $N$, which is relatively invariant with respect to $T$ and $T'$. \newline We shall write $\Phi=\nu\circ\alpha^{-1}\circ T$, and $H=H_\Phi$, $J=J_\Phi$, and, for all $n\in N$, $\hat{\beta}(n)=J\alpha(n^*)J$, $\hat{\alpha}(n)=J\beta(n^*)J$. The weight $\Phi$ will be called the left-invariant weight on $M$. \newline Then, $\mathfrak {G}$ can be equipped with a pseudo-multiplicative unitary $W$ from $H\underset{\nu}{_\beta\otimes_\alpha}H$ onto $H\underset{\nu^o}{_\alpha\otimes_{\hat{\beta}}}H$ (\cite{E5}, 3.6), a co-inverse $R$, a scaling group $\tau_t$, an antipod $S$, a modulus $\delta$, a scaling operator $\lambda$, a managing operator $P$, and a canonical one-parameter group $\mathfrak{a}mma_t$ of automorphisms on the basis $N$ (\cite{E5}, 3.8). Instead of $\mathfrak {G}$, we shall mostly use $(N, M, \alpha, \beta, \Gamma, T, RTR, \nu)$ which is another measured quantum groupoid, denoted $\underline{\mathfrak {G}}$, which is equipped with the same data ($W$, $R$, ...) as $\mathfrak{G}$. \newline A dual measured quantum group $\widehat{\mathfrak{G}}$, which is denoted $(N, \widehat{M}, \alpha, \hat{\beta}, \widehat{\Gamma}, \widehat{T}, \widehat{R}\widehat{T}\widehat{R}, \nu)$, can be constructed, and we have $\widehat{\widehat{\mathfrak {G}}}=\underline{\mathfrak {G}}$. \newline Canonically associated to $\mathfrak {G}$, can be defined also the opposite measured quantum groupoid is $\mathfrak{G}^o=(N^o, M, \beta, \alpha, \varsigma_N\Gamma, RTR, T, \nu^o)$ and the commutant measured quantum groupoid $\mathfrak{G}^c=(N^o, M', \hat{\beta}, \hat{\alpha}, \Gamma^c, T^c, R^cT^cR^c, \nu^o)$; we have $(\mathfrak{G}^o)^o=(\mathfrak{G}^c)^c=\underline{\mathfrak{G}}$, $\widehat{\mathfrak{G}^o}=(\widehat{\mathfrak {G}})^c$, $\widehat{\mathfrak {G}^c}=(\widehat{\mathfrak {G}})^o$, and $\mathfrak{G}^{oc}=\mathfrak {G}^{co}$ is canonically isomorphic to $\underline{\mathfrak {G}}$ (\cite{E5}, 3.12). \newline The pseudo-multiplicative unitary of $\widehat{\mathfrak{G}}$ (resp. $\mathfrak{G}^o$, $\mathfrak{G}^c$) will be denoted $\widehat{W}$ (resp. $W^o$, $W^c$). The left-invariant weight on $\widehat{\mathfrak{G}}$ (resp. $\mathfrak{G}^o$, $\mathfrak{G}^c$) will be denoted $\widehat{\Phi}$ (resp. $\Phi^o$, $\Phi^c$). \newline Let $_a\mathfrak{H}_b$ be a $N-N$-bimodule, i.e. an Hilbert space $\mathfrak{H}$ equipped with a normal faithful non degenerate representation $a$ of $N$ on $\mathfrak{H}$ and a normal faithful non degenerate anti-representation $b$ on $\mathfrak{H}$, such that $b(N)\subset a(N)'$. A corepresentation of $\mathfrak{G}$ on $_a\mathfrak{H}_b$ is a unitary $V$ from $\mathfrak{H}\underset{\nu^o}{_a\otimes_\beta}H_\Phi$ onto $\mathfrak{H}\underset{\nu}{_b\otimes_\alpha}H_\Phi$, satisfying, for all $n\in N$ : \[V(b(n)\underset{N^o}{_a\otimes_\beta}1)=(1\underset{N}{_b\otimes_\alpha}\beta(n))V\] \[V(1\underset{N^o}{_a\otimes_\beta}\alpha(x))=(a(n)\underset{N}{_b\otimes_\alpha}1)V\] such that, for any $\xi\in D(_a\mathfrak{H}, \nu)$ and $\eta\in D(\mathfrak{H}_b, \nu^o)$, the operator $(\omega_{\xi, \eta}*id)(V)$ belongs to $M$ (then, it is possible to define $(id*\theta)(V)$, for any $\theta$ in $M_*^{\alpha, \beta}$ which is the linear set generated by the $\omega_\xi$, with $\xi\in D(_\alpha H, \nu)\cap D(H_\beta, \nu^o)$), and such that the application $\theta\rightarrow (id*\theta)(V)$ from $M_*^{\alpha, \beta}$ into $\mathcal L(\mathfrak{H})$ is multiplicative (\cite{E5} 5.1, 5.5). \subsection{Action of a measured quantum groupoid (\cite{E5})} \label{action} An action (\cite{E5}, 6.1) of $\mathfrak{G}$ on a von Neumann algebra $A$ is a couple $(b, \mathfrak a)$, where : \newline (i) $b$ is an injective $*$-antihomomorphism from $N$ into $A$; \newline (ii) $\mathfrak a$ is an injective $*$-homomorphism from $A$ into $A\underset{N}{_b*_\alpha}M$; \newline (iii) $b$ and $\mathfrak a$ are such that, for all $n$ in $N$: \[\mathfrak a (b(n))=1\underset{N}{_b\otimes_\alpha}\beta(n)\] (which allow us to define $\mathfrak a\underset{N}{_b*_\alpha}id$ from $A\underset{N}{_b*_\alpha}M$ into $A\underset{N}{_b*_\alpha}M\underset{N}{_\beta*_\alpha}M$) and such that : \[(\mathfrak a\underset{N}{_b*_\alpha}id)\mathfrak a=(id\underset{N}{_b*_\alpha}\Gamma)\mathfrak a\] The set of invariants is defined as the sub von Neumann algebra : \[A^\mathfrak{a}=\{x\in A\cap b(N)', \mathfrak{a}(x)=x\underset{N}{_b\otimes_\alpha}1\}\] If the von Neumann algebra acts on a Hilbert space $\mathfrak{H}$, and if there exists a representation $a$ of $N$ on $\mathfrak{H}$ such that $b(N)\subset A\subset a(N)'$, a corepresentation $V$ of $\mathfrak{G}$ on the bimodule $_a\mathfrak{H}_b$ will be called an implementation of $\mathfrak{a}$ if we have $\mathfrak{a}(x)=V(x\underset{N^o}{_a\otimes_b}1)V^*$ , for all $x\in A$ (\cite{E5}, 6.6); we shall look at the following more precise situation : let $\psi$ is a normal semi-finite faithful weight on $A$, and $V$ an implementation of $\mathfrak{a}$ on $_a(H_\psi)_b$ (with $a(n)=J_\psi b(n^*)J_\psi$), such that : \[V^*=(J_\psi\underset{\nu^o}{_\alpha\otimes_\beta}J_{\widehat{\Phi}})V(J_\psi\underset{\nu}{_b\otimes_\alpha} J_{\widehat{\Phi}})\] \newline Such an implementation had been constructed (\cite{E5} 8.8) in the particular case when the weight $\psi$ is called $\delta$-invariant, which means that, for all $\eta\in D(_\alpha H_\Phi, \nu)\cap\mathcal D(\delta^{1/2})$, such that $\delta^{1/2}\eta$ belongs to $D((H_\Phi)_\beta, \nu^o)$, and for all $x\in\mathfrak{N}_\psi$, we have: \[\psi((id\underset{N}{_b*_\alpha}\omega_\eta)\mathfrak a(x^*x))=\|\Lambda_\psi(x)\underset{\nu^o}{_a\otimes_\beta}\delta^{1/2}\eta\|^2\] and bears the density property, which means that the subset $D((H_\psi)_b, \nu^o)\cap D(_aH_\psi, \nu)$ is dense in $H_\psi$. This standard implementation is then given by the formula (\cite{E5}, 8.4) : \[V_\psi(\Lambda_\psi (x)\underset{\nu^o}{_a\otimes_\beta}\delta^{1/2}\eta)=\sum_i\Lambda_\psi((id\underset{N}{_b*_\alpha}\omega_{\eta, e_i})\mathfrak a(x))\underset{\nu}{_b\otimes_\alpha}e_i\] for all $x\in\mathfrak{N}_\psi$, $\eta\in D(_\alpha H, \nu)\cap\mathcal D(\delta^{1/2})$ such that $\delta^{1/2}\eta$ belongs to $D(H_\beta, \nu^o)$, $(e_i)_{i\in I}$ any orthonormal $(\alpha, \nu)$-basis of $H$. Moreover (\cite{E5}, 8.9), it is possible to define one parameter groups of unitaries $\Delta_\psi^{it}\underset{N^o}{_a\otimes_\beta}\delta^{-it}\Delta_{\widehat{\Phi}}^{-it}$ and $\Delta_\psi^{it}\underset{N}{_b\otimes_\alpha}\delta^{-it}\Delta_{\widehat{\Phi}}^{-it}$, with natural values on elementary tensor, and we have : \[V_\psi(\Delta_\psi^{it}\underset{N^o}{_a\otimes_\beta}\delta^{-it}\Delta_{\widehat{\Phi}}^{-it})=(\Delta_\psi^{it}\underset{N}{_b\otimes_\alpha}\delta^{-it}\Delta_{\widehat{\Phi}}^{-it})V_\psi\] and, therefore, for any $x$ in $A$, $t$ in $\mathbb{R}$, we have : \[\mathfrak a(\sigma_t^\psi(x))=(\Delta_\psi^{it}\underset{N}{_b\otimes_\alpha}\delta^{-it}\Delta_{\widehat{\Phi}}^{-it})\mathfrak a(x)(\Delta_\psi^{-it}\underset{N}{_b\otimes_\alpha}\delta^{it}\Delta_{\widehat{\Phi}}^{it})\] \subsection{Crossed-product (\cite{E5})} \label{crossed} The crossed-product of $A$ by $\mathfrak {G}$ via the action $\mathfrak a$ is the von Neumann algebra generated by $\mathfrak a(A)$ and $1\underset{N}{_b\otimes_\alpha}\widehat{M}'$ (\cite{E5}, 9.1) and is denoted $A\rtimes_\mathfrak a\mathfrak {G}$; then there exists (\cite{E5}, 9.3) an action $(1\underset{N}{_b\otimes_\alpha}\hat{\alpha}, \tilde{\mathfrak a})$ of $(\widehat{\mathfrak {G}})^c$ on $A\rtimes_\mathfrak a\mathfrak {G}$. \newline The biduality theorem (\cite{E5}, 11.6) says that the bicrossed-product $(A\rtimes_\mathfrak a\mathfrak {G})\rtimes_{\tilde{\mathfrak a}}\widehat{\mathfrak {G}}^c$ is canonically isomorphic to $A\underset{N}{_b*_\alpha}\mathcal L(H)$; more precisely, this isomorphism is given by : \[\Theta (\mathfrak{a}\underset{N}{_b*_\alpha}id)(A\underset{N}{_b*_\alpha}\mathcal L(H))=(A\rtimes_\mathfrak a\mathfrak {G})\rtimes_{\tilde{\mathfrak a}}\widehat{\mathfrak {G}}^c\] where $\Theta$ is the spatial isomorphism between $\mathcal L(\mathfrak{H}\underset{\nu}{_b\otimes_\alpha}H\underset{\nu}{_\beta\otimes_\alpha}H)$ and $\mathcal L(\mathfrak{H}\underset{\nu}{_b\otimes_\alpha}H\underset{\nu^o}{_{\hat{\alpha}}\otimes_\beta}H)$ implemented by $1_\mathfrak{H}\underset{\nu}{_b\otimes_\alpha}\sigma_\nu W^o\sigma_\nu$; the biduality theorem says also that this isomorphism sends the action $(1\underset{N}{_b\otimes_\alpha}\hat{\beta}, \underline{\mathfrak a})$ of $\mathfrak{G}$ on $A\underset{N}{_b*_\alpha}\mathcal L(H)$, defined, for any $X\in A\underset{N}{_b*_\alpha}\mathcal L(H)$, by : \[\underline{\mathfrak a}(X)=(1\underset{N}{_b\otimes_\alpha}\sigma_{\nu^o}W\sigma_{\nu^o})(id\underset{N}{_b*_\alpha}\varsigma_N)(\mathfrak a\underset{N}{_b*_\alpha}id)(X)(1\underset{N}{_b\otimes_\alpha}\sigma_{\nu^o}W\sigma_{\nu^o})^*\] on the bidual action (of $\mathfrak{G}^{co}$) on $(A\rtimes_\mathfrak a\mathfrak {G})\rtimes_{\tilde{\mathfrak a}}\widehat{\mathfrak {G}}^o$. \newline There exists a normal faithful semi-finite operator-valued weight $T_{\tilde{\mathfrak{a}}}$ from $A\rtimes_\mathfrak a\mathfrak {G}$ onto $\mathfrak{a}(A)$; therefore, starting with a normal semi-finite weight $\psi$ on $A$, we can construct a dual weight $\tilde{\psi}$ on $A\rtimes_\mathfrak a\mathfrak {G}$ by the formula $\tilde{\psi}=\psi\circ\mathfrak{a}^{-1}\circ T_{\tilde{\mathfrak{a}}}$ (\cite{E5} 13.2). These dual weights are exactly the $\hat{\delta}^{-1}$-invariant weights on $A\rtimes_\mathfrak a\mathfrak {G}$ bearing the density property (\cite{E5} 13.3). \newline Moreover (\cite{E5} 13.3), the linear set generated by all the elements $(1\underset{N}{_b\otimes_\alpha}a)\mathfrak a(x)$, for all $x\in\mathfrak{N}_\psi$, $a\in\mathfrak{N}_{\widehat{\Phi}^c}\cap\mathfrak{N}_{\hat{T}^c}$, is a core for $\Lambda_{\tilde{\psi}}$, and it is possible to identify the GNS representation of $A\rtimes_\mathfrak a\mathfrak{G}$ associated to the weight $\tilde{\psi}$ with the natural representation on $H_\psi\underset{\nu}{_b\otimes_\alpha}H_\Phi$ by writing : \[\Lambda_\psi(x)\underset{\nu}{_b\otimes_\alpha}\Lambda_{\widehat{\Phi}^c}(a)=\Lambda_{\tilde{\psi}}[(1\underset{N}{_b\otimes_\alpha}a)\mathfrak a(x)]\] which leads to the identification of $H_{\tilde{\psi}}$ with $H_\psi\underset{\nu}{_b\otimes_\alpha}H$. Moreover, using that identification, the linear set generated by the elements of the form $\mathfrak a(y^*)(\Lambda_\psi(x)\underset{\nu}{_b\otimes_\alpha}\Lambda_{\widehat{\Phi}^c}(a))$, for $x, y$ in $\mathfrak{N}_\psi$, and $a$ in $\mathfrak{N}_{\widehat{\Phi}^c}\cap\mathfrak{N}_{\hat{T}^c}\cap\mathfrak{N}_{\widehat{\Phi}^c}^*\cap\mathfrak{N}_{\hat{T}^c}^*$ is a core for $S_{\tilde{\psi}}$, and we have : \[S_{\tilde{\psi}}\mathfrak a(y^*)(\Lambda_\psi(x)\underset{\nu}{_b\otimes_\alpha}\Lambda_{\widehat{\Phi}^c}(a))=\mathfrak a(x^*)(\Lambda_\psi(y)\underset{\nu}{_b\otimes_\alpha}\Lambda_{\widehat{\Phi}^c}(a^*))\] Then, the unitary $U_\psi^\mathfrak{a}=J_{\tilde{\psi}}(J_\psi\underset{N^o}{_a\otimes_\beta}J_{\widehat{\Phi}})$ from $H_\psi\underset{\nu^o}{_a\otimes_\beta}H_\Phi$ onto $H_\psi\underset{\nu}{_b\otimes_\alpha}H_\Phi$ satisfies : \[U^\mathfrak a_\psi(J_\psi\underset{N}{_b\otimes_\alpha}J_{\widehat{\Phi}})=(J_\psi\underset{N}{_b\otimes_\alpha}J_{\widehat{\Phi}})(U^\mathfrak a_\psi)^*\] and we have (\cite{E5} 13.4) : \newline (i) for all $y\in A$ : \[\mathfrak a (y)=U^\mathfrak a_\psi(y\underset{N^o}{_a\otimes_\beta}1)(U^\mathfrak a_\psi)^*\] (ii) for all $b\in M$ : \[(1\underset{N}{_b\otimes_\alpha}J_\Phi bJ_\Phi)U^\mathfrak a_\psi=U^\mathfrak a_\psi(1\underset{N^o}{_a\otimes_\beta}J_\Phi bJ_\Phi)\] (iii) for all $n\in N$ : \[U_\psi^\mathfrak a(b(n)\underset{N^o}{_a\otimes_\beta}1)=(1\underset{N}{_b\otimes_\alpha}\beta(n))U_\psi^\mathfrak a\] \[U_\psi^\mathfrak a(1\underset{N^o}{_a\otimes_\beta}\alpha(n))=(a(n)\underset{N}{_b\otimes_\alpha}1)U_\psi^\mathfrak a\] Therefore, we see that this unitary $U^\mathfrak{a}_\psi$ "implements" $\mathfrak{a}$, but we do not know whether it is a corepresentation. If it is, we shall say that it is a standard implemantation of $\mathfrak{a}$. \newline We can define the bidual weight $\tilde{\tilde{\psi}}$ on $(A\rtimes_\mathfrak a\mathfrak {G})\rtimes_{\tilde{\mathfrak a}}\widehat{\mathfrak {G}}^o$, and the weight $\tilde{\tilde{\psi}}\circ\Theta\circ (\mathfrak{a}\underset{N}{_b*_\alpha}id)$ on $A\underset{N}{_b*_\alpha}\mathcal L(H)$, that we shall denote $\overline{\psi_\mathfrak{a}}$ for simplification (or $\overline{\psi}$ if there is no ambiguity about the action). Then we get (\cite{E5}, 13.6) that the spatial derivative $\frac{d\overline{\psi}}{d\psi^o}$ is equal to the modulus operator $\Delta_{\tilde{\psi}}$. There exists a normal semi-finite faithful operator-valued weight $T_{\underline{\mathfrak{a}}}$ from $A\underset{N}{_b*_\alpha}\mathcal L(H)$ onto $A\rtimes_\mathfrak{a}\mathfrak{G}$ such that $\overline{\psi_\mathfrak{a}}=\tilde{\psi}\circ T_{\underline{\mathfrak{a}}}$ \newline Using twice (\cite{T} 4.22(ii)), we obtain, for any $x\in A$ and $t\in\mathbb{R}$, that $\sigma_t^{\overline{\psi_\mathfrak{a}}}(\mathfrak{a}(x))=\mathfrak{a}(\sigma_t^\psi(x))$; and if $\psi_1$ and $\psi_2$ are two normal semi-finite faithful weights on $A$, , we get : \[(D\overline{\psi_{1\mathfrak{a}}}:D\overline{\psi_{2\mathfrak{a}}})_t=(D\tilde{\psi_1} : D\tilde{\psi_2})_t=\mathfrak{a}((D\psi_1:D\psi_2)_t)\] \subsection{Examples of measured quantum groupoids} \label{ex} Examples of measured quantum groupoids are the following : \newline (i) locally compact quantum groups, as defined and studied by J. Kustermans and S. Vaes (\cite{KV1}, \cite {KV2}, \cite{V1}); these are, trivially, the measured quantum groupoids with the basis $N=\mathbb{C}$. \newline (ii) measured groupoids, equipped with a left Haar system and a quasi-invariant measure on the set of units, as studied mostly by T. Yamanouchi (\cite{Y1}, \cite{Y2}, \cite{Y3}, \cite{Y4}); it was proved in \cite{E6} that these measured quantum groupoids are exactly those whose underlying von Neumann algebra is abelian. \newline (iii) the finite dimensional case had been studied by D. Nikshych and L. Vainermann (\cite{NV1}, \cite{NV2}, \cite{NV3}), J.-M. Vallin (\cite{Val3}, \cite{Val4}) and M.-C. David (\cite{D}); in that case, non trivial examples are given, for instance Temperley-Lieb algebras (\cite{NV3}, \cite{D}), which had appeared in subfactor theory (\cite{J}). . \newline (iv) continuous fields of ($\bf{C}^*$-version of) locally compact quantum groups, as studied by E. Blanchard in (\cite{Bl1}, \cite{Bl2}); it was proved in \cite{E6} that these measured quantum groupoids are exactly those whose basis is central in the underlying von Neumann algebras of both the measured quantum groupoid and its dual. \newline (v) in \cite{DC}, K. De Commer proved that, in the case of a monoidal equivalence between two locally compact quantum groups (which means that these two locally compact quantum group have commuting ergodic and integrable actions on the same von Neumann algebra), it is possible to construct a measurable quantum groupoid of basis $\mathbb{C}^2$ which contains all the data. Moreover, this construction was usefull to prove new results on locally compact quantum groups, namely on the deformation of a locally compact quantum group by a unitary $2$-cocycle; he proved that these measured quantum groupoids are exactly those whose basis $\mathbb{C}^2$ is central in the underlying von Neumann algebra of the measured quatum groupoid, but not in the underlying von Neumann algebra of the dual measured quantum groupoid. \newline (vi) starting from a depth 2 inclusion $M_0\subset M_1$ of von Neumann algebras, equipped with an operator-valued weight $T_1$ from $M_1$ onto $M_0$, satisfying appropriate conditions, such that there exists a normal semi-finite faithful weight $\chi$ on the first relative commutant $M'_0\cap M_1$, invariant under the modular automorphism group $\sigma_t^{T_1}$, it has been proved (\cite{EV}, \cite{E4}) that it is possible to put on the second relative commutant $M'_0\cap M_2$ (where $M_0\subset M_1\subset M_2\subset M_3 ...$ is Jones' tower associated to the inclusion $M_0\subset M_1$) a canonical structure of a measured quantum groupoid; moreover, its dual is given then by the same construction associated to the inclusion $M_1\subset M_2$, and this dual measured quantum groupoid acts canonically on the von Neumann algebra $M_1$, in such a way that $M_0$ is equal to the subalgebra of invariants, and the inclusion $M_1\subset M_2$ is isomorphic to the inclusion of $M_1$ into its crossed-product. This gives a "geometrical" construction of measured quantum groupoids; in another article in preparation (\cite{E7}), in which is used the biduality theorem for weights proved in \ref{cocycle}, had been proved that any measured quantum groupoid has an outer action on some von Neumann algebra, and can be, therefore, obtained by this "geometrical construction". The same result for locally compact quantum groups relies upon \cite{V2} and the corresponding result for measured quantum groupoids had been pointed out in \cite{E5}. \newline (vii) in \cite{VV} and \cite{BSV} was given a specific procedure for constructing locally compact quantum groups, starting from a locally compact group $G$, whose almost all elements belong to the product $G_1G_2$ (where $G_1$ and $G_2$ are closed subgroups of $G$ whose intersection is reduced to the unit element of $G$); such $(G_1, G_2)$ is called a "matched pair" of locally compact groups (more precisely, in \cite{VV}, the set $G_1G_2$ is required to be open, and it is not the case in \cite{BSV}).Then, $G_1$ acts naturally on $L^\infty(G_2)$ (and vice versa), and the two crossed-products obtained bear the structure of two locally compact quantum groups in duality. In \cite{Val5}, J.-M. Vallin generalizes this constructions up to groupoids, and, then, obtains examples of measured quantum groupoids; more specific examples are then given by the action of a matched pair of groups on a locally compact space, and also more exotic examples. \section{The standard implementation of an action : the case of a dual action} \label{standarddual} In this chapter, following \cite{V1}, we prove that the unitary $U_\psi^\mathfrak{a}$ introduced in \ref{crossed} is a standard implementation of $\mathfrak{a}$, for all normal semi-finite faithful weight $\psi$ on $A$, whenever $\mathfrak{a}$ is a dual action (\ref{cordual}). For this purpose, we prove first that, if for some weight $\psi_1$, the unitary $U^\mathfrak{a}_{\psi_1}$ is a standard implementation, then, for any weight $\psi$, $U^\mathfrak{a}_\psi$ is a standard implementation (\ref{propu}). Second (\ref{Uinv}), we prove, for a $\delta$-invariant weight $\psi$, that $U_\psi^\mathfrak{a}$ is equal to the implementation $V_\psi$ constructed in (\cite{E5} 8.8) and recalled in \ref{action}. Thanks to (\cite{E5} 13.3), recalled in \ref{crossed}, we then get the result. \subsection{Proposition} \label{propu} {\it Let $\mathfrak{G}$ be a measured quantum groupoid, and $(b,\mathfrak a)$ an action of $\mathfrak{G}$ on a von Neumann algebra $A$; let $\psi_1$ and $\psi_2$ be two normal faithful semi-finite weights on $A$ and $U^\mathfrak a_{\psi_1}$ and $U^\mathfrak a_{\psi_2}$ the two unitaries constructed in \ref{crossed}; let $u$ be the unitary from $H_{\psi_1}$ onto $H_{\psi_2}$ intertwining the representations $\pi_{\psi_1}$ and $\pi_{\psi_2}$; then : \newline (i) the unitary $u\underset{N}{_b\otimes_\alpha}1$ intertwines the representations of $A\rtimes_\mathfrak a\mathfrak{G}$ on $H_{\psi_1}\underset{\nu}{_b\otimes_\alpha}H_\Phi$ and on $ H_{\psi_2}\underset{\nu}{_b\otimes_\alpha}H_\Phi$; moreover, we have : \[(u\underset{N}{_b\otimes_\alpha}1)U^\mathfrak a_{\psi_1}=U^\mathfrak a_{\psi_2}(u\underset{N^o}{_{a_1}\otimes_\beta}1)\] where $a_1(n)=J_{\psi_1}\pi_{\psi_1}(b(n^*))J_{\psi_1}$, for all $n\in N$. \newline (ii) if $U^\mathfrak{a}_{\psi_1}$ is a corepresentation of $\mathfrak{G}$ on $H_{\psi_1}$, then $U^\mathfrak{a}_{\psi_2}$ is a corepresentation of $\mathfrak{G}$ on $H_{\psi_2}$. \newline (iii) if $U^\mathfrak{a}_{\psi_1}$ is a standard implementation of $\mathfrak{a}$, then $U^\mathfrak{a}_{\psi_2}$ is a standard implementation of $\mathfrak{a}$. } \begin{proof} Let us write $J_{2,1}$ the relative modular conjugation, which is an antilinear surjective isometry from $H_{\psi_1}$ onto $H_{\psi_2}$. Then we have $u=J_{2,1}J_{\psi_1}=J_{\psi_2}J_{2,1}$, by (\cite{St} 3.16). Moreover, let us define, for $x\in A$, and $t\in\mathbb{R}$ $\sigma_t^{2,1}(x)=[D\psi_2:D\psi_1]_t\sigma_t^{\psi_1}(x)$; then, by (\cite{St}, 3.15), for $x\in\mathfrak{N}_{\psi_1}$, $y\in D(\sigma^{2,1}_{-i/2})$, $xy^*$ belongs to $\mathfrak{N}_{\psi_2}$ and : \[\Lambda_{\psi_2}(xy^*)=J_{2,1}\pi_{\psi_1}(\sigma^{2,1}_{-i/2}(y))J_{\psi_1}\Lambda_{\psi_1}(x)\] Therefore, if $a\in\mathfrak{N}_{\widehat{\Phi}^c}$, $(1\underset{N}{_b\otimes_\alpha}a)\mathfrak a(xy^*)$ belongs to $\mathfrak{N}_{\tilde{\psi_2}}$, and, we have, where $V_i$ ($i=(1,2)$) denotes the unitary from $H_{\psi_i}\underset{\nu}{_b\otimes_\alpha}H_\Phi$ onto $H_{\tilde{\psi_i}}$ defined in \ref{crossed} : \begin{eqnarray*} \Lambda_{\tilde{\psi_2}}[(1\underset{N}{_b\otimes_\alpha}a)\mathfrak a(xy^*)] &=&V_2(\Lambda_{\psi_2}(xy^*)\underset{\nu}{_b\otimes_\alpha}\Lambda_{\widehat{\Phi}^c}(a))\\ &=&V_2J_{2,1}\pi_{\psi_1}(\sigma^{2,1}_{-i/2}(y))J_{\psi_1}\Lambda_{\psi_1}(x)\underset{\nu}{_b\otimes_\alpha}\Lambda_{\widehat{\Phi}^c}(a)) \end{eqnarray*} which is equal to : \[V_2(J_{2,1}\pi_{\psi_1}(\sigma^{2,1}_{-i/2}(y))J_{\psi_1}\underset{N}{_b\otimes_\alpha}1)V_1^*\Lambda_{\tilde{\psi_1}}[(1\underset{N}{_b\otimes_\alpha}a)\mathfrak a(x)]\] and, as the linear set generated by the elements of the form $(1\underset{N}{_b\otimes_\alpha}a)\mathfrak a(x)$ is a core for $\Lambda_{\tilde{\psi_1}}$, we get, for any $z\in\mathfrak{N}_{\tilde{\psi_1}}$, that $z\mathfrak a(y^*)$ belongs to $\mathfrak{N}_{\tilde{\psi_2}}$, and that : \[\Lambda_{\tilde{\psi_2}}(z\mathfrak a (y^*))=V_2(J_{2,1}\pi_{\psi_1}(\sigma^{2,1}_{-i/2}(y))J_{\psi_1}\underset{N}{_b\otimes_\alpha}1)V_1^*\Lambda_{\tilde{\psi_1}}(z)\] Let us denote by $\tilde{J_{2,1}}$ the relative modular conjugation constructed from the weights $\tilde{\psi_1}$ and $\tilde{\psi_2}$, and $\tilde{\sigma}^{2,1}_t$ the one-parameter group of isometries of $A\rtimes_\mathfrak a \mathfrak{G}$ constructed from these two weights by the formula, for any $X\in A\rtimes_\mathfrak a \mathfrak{G}$ : \[\tilde{\sigma}^{2,1}_t(X)=[D\tilde{\psi_2}:D\tilde{\psi_1}]_t\sigma_t^{\tilde{\psi_1}}(X)\] Using (\cite{St}, 3.15) applied to these two weights, we get that $\mathfrak a(y)$ belongs to $D(\tilde{\sigma}^{2,1}_{-i/2})$ and that : \[\tilde{J_{2,1}}\pi_{\tilde{\psi_1}}(\tilde{\sigma}^{2,1}_{-i/2}(\mathfrak a (y)))J_{\tilde{\psi_1}}= V_2(J_{2,1}\pi_{\psi_1}(\sigma^{2,1}_{-i/2}(y))J_{\psi_1}\underset{N}{_b\otimes_\alpha}1)V_1^*\] We easily get that $\tilde{\sigma}^{2,1}_t(\mathfrak a(y))=\mathfrak a (\sigma^{2,1}_t(y))$ and, therefore, we have : \[\pi_{\tilde{\psi_1}}(\mathfrak a(\sigma^{2,1}_{-i/2}(y))=\tilde{J_{2,1}}^*V_2(J_{2,1}\pi_{\psi_1}(\sigma^{2,1}_{-i/2}(y))J_{\psi_1}\underset{N}{_b\otimes_\alpha}1)V_1^*J_{\tilde{\psi_1}}\] As we have, using \ref{crossed} : \[(J_{\psi_1}\underset{N}{_b\otimes_\alpha}J_{\widehat{\Phi}})V_1^*J_{\tilde{\psi_1}}=U_{\psi_1}^\mathfrak a V_1^*\] we get : \[\pi_{\tilde{\psi_1}}(\mathfrak a(\sigma^{2,1}_{-i/2}(y))= \tilde{J_{2,1}}^*V_2(J_{2,1}\underset{N^o}{_{a_1}\otimes_\beta}J_{\widehat{\Phi}})(\pi_{\psi_1}(\sigma^{2,1}_{-i/2}(y))\underset{N^o}{_{a_1}\otimes_\beta}1)U_{\psi_1}^\mathfrak a V_1^*\] and, therefore, using \ref{crossed} : \begin{eqnarray*} \tilde{J_{2,1}}^*V_2(J_{2,1}\underset{N^o}{_{a_1}\otimes_\beta}J_{\widehat{\Phi}})(\pi_{\psi_1}(\sigma^{2,1}_{-i/2}(y))\underset{N^o}{_{a_1}\otimes_\beta}1) &=& \pi_{\tilde{\psi_1}}(\mathfrak a(\sigma^{2,1}_{-i/2}(y))V_1(U_{\psi_1}^\mathfrak a)^*\\ &=&V_1\mathfrak a(\sigma^{2,1}_{-i/2}(y))(U_{\psi_1}^\mathfrak a)^* \end{eqnarray*} which, using \ref{crossed}, is equal to : \[V_1U_{\psi_1}^\mathfrak a(\pi_{\psi_1}(\sigma^{2,1}_{-i/2}(y))\underset{N^o}{_{a_1}\otimes_\beta}1)\] By density, we get : \[U_{\psi_1}^\mathfrak a=V_1^*\tilde{J_{2,1}}^*V_2(J_{2,1}\underset{N^o}{_{a_1}\otimes_\beta}J_{\widehat{\Phi}})\] and, therefore, using \ref{crossed} again : \begin{eqnarray*} 1_{H_{\psi_1}}\underset{N}{_b\otimes_\alpha}1_{H_\Phi}&=& V_1^*\tilde{J_{2,1}}^*V_2(J_{2,1}\underset{N^o}{_{a_1}\otimes_\beta}J_{\widehat{\Phi}}) (J_{\psi_1}\underset{N}{_b\otimes_\alpha}J_{\widehat{\Phi}})V_1^*J_{\tilde{\psi_1}}V_1\\ &=& V_1^*\tilde{J_{2,1}}^*V_2(u\underset{N}{_b\otimes_\alpha}1)V_1^*J_{\tilde{\psi_1}}V_1 \end{eqnarray*} which implies that : \[1_{H_{\tilde{\psi_1}}}\underset{N}{_b\otimes_\alpha}1_{H_\Phi}=\tilde{J_{2,1}}^*V_2(u\underset{N}{_b\otimes_\alpha}1)V_1^*J_{\tilde{\psi_1}}\] and : \[V_2(u\underset{N}{_b\otimes_\alpha}1)V_1^*=\tilde{J_{2,1}}J_{\tilde{\psi_1}}\] But $\tilde{J_{2,1}}J_{\tilde{\psi_1}}=J_{\tilde{\psi_2}}\tilde{J_{2,1}}$ is the unitary from $H_{\tilde{\psi_1}}$ onto $H_{\tilde{\psi_2}}$ which intertwines $\pi_{\tilde{\psi_1}}$ and $\pi_{\tilde{\psi_2}}$; from which we get the first result. \newline This formula gives also, where $a_2(n)=J_{\psi_2}\pi_{\psi_2}(b(n^*))J_{\psi_2}$, for all $n\in N$ : \begin{eqnarray*} U_{\psi_2}^\mathfrak a &=& V_2^*J_{\tilde{\psi_2}}V_2(J_{\psi_2}\underset{N^o}{_{a_2}\otimes_\beta}J_{\widehat{\Phi}})\\ &=&(u\underset{N}{_b\otimes_\alpha}1)V_1^*J_{\tilde{\psi_1}}\tilde{J_{2,1}}^*J_{\tilde{\psi_2}}V_2(J_{\psi_2}\underset{N^o}{_{a_2}\otimes_\beta}J_{\widehat{\Phi}})\\ &=&(u\underset{N}{_b\otimes_\alpha}1)V_1^*\tilde{J_{2,1}}^*V_2(J_{\psi_2}\underset{N^o}{_{a_2}\otimes_\beta}J_{\widehat{\Phi}})\\ &=&(u\underset{N}{_b\otimes_\alpha}1)U_{\psi_1}^\mathfrak a(J_{\psi_1}\underset{N}{_b\otimes_\alpha}J_{\widehat{\Phi}})V_1^*J_{\tilde{\psi_1}}\tilde{J_{2,1}}^*V_2(J_{\psi_2}\underset{N^o}{_{a_2}\otimes_\beta}J_{\widehat{\Phi}})\\ &=&(u\underset{N}{_b\otimes_\alpha}1)U_{\psi_1}^\mathfrak a(J_{\psi_1}\underset{N}{_b\otimes_\alpha}J_{\widehat{\Phi}})V_1^*V_1(u^*\underset{N}{_b\otimes_\alpha}1)V_2^*V_2(J_{\psi_2}\underset{N^o}{_{a_2}\otimes_\beta}J_{\widehat{\Phi}})\\ &=&(u\underset{N}{_b\otimes_\alpha}1)U_{\psi_1}^\mathfrak a(J_{\psi_1}\underset{N}{_b\otimes_\alpha}J_{\widehat{\Phi}})(u^*\underset{N}{_b\otimes_\alpha}1)(J_{\psi_2}\underset{N^o}{_{a_2}\otimes_\beta}J_{\widehat{\Phi}})\\ &=&(u\underset{N}{_b\otimes_\alpha}1)U_{\psi_1}^\mathfrak a(u^*\underset{N^o}{_{a_2}\otimes_\beta}1) \end{eqnarray*} from which we finish the proof of (i). Using the intertwining properties of $u$, (i) and (\cite{E5} 5.2), we then get (ii). Using then (ii) and the properties of $U^\mathfrak{a}_\psi$ (\cite{E5} 13.4) recalled in \ref{crossed}, we get (iii). \end{proof} \subsection{Proposition} \label{Uinv} {\it Let $\mathfrak{G}$ be a measured quantum groupoid, and $(b,\mathfrak a)$ an action of $\mathfrak{G}$ on a von Neumann algebra $A$; let $\psi$ be a $\delta$-invariant weight on $A$, bearing the density condition, as defined in \ref{action}; then : \newline (i) the unitary $U^\mathfrak a_\psi$ constructed in \ref{crossed} is equal to the implementation $V_\psi$ of $\mathfrak a$ constructed in \ref{action}. \newline (ii) the dual weight satisfies $\Delta_{\tilde{\psi}}^{it}=\Delta_\psi^{it}\underset{N}{_b\otimes_\alpha}(\delta\Delta_{\widehat{\Phi}})^{-it}$, where this last one-parameter group of unitaries had been defined in \ref{action}. \begin{proof} Let $\xi\in D(_\alpha H_\Phi, \nu)$, $x$, $y$ in $\mathfrak{N}_\psi\cap\mathfrak{N}_\psi^*$, $a\in\mathfrak{N}_{\hat{T}^c}\cap\mathfrak{N}_{\hat{T}^c}^*\cap\mathfrak{N}_{\widehat{\Phi}^c}\cap\mathfrak{N}_{\widehat{\Phi}^c}^*$, such that $\Lambda_{\widehat{\Phi}^c}(a^*)$ belongs to the set $\widehat{\mathcal E_{\hat{\tau}}}$ introduced in (\cite{E5}4.4). We have, using \ref{crossed}: \[(\rho_\xi^{b, \alpha})^*S_{\tilde{\psi}}\mathfrak a(x^*)(\Lambda_\psi(y)\underset{\nu}{_b\otimes_\alpha}\Lambda_{\widehat{\Phi}^c}(a)) = (\rho_\xi^{b, \alpha})^*\mathfrak a(y^*)(\Lambda_\psi(x)\underset{\nu}{_b\otimes_\alpha}\Lambda_{\widehat{\Phi}^c}(a^*))\] and, as $\Lambda_{\widehat{\Phi}^c}(a^*)$ belongs to $D(_\alpha H_\Phi, \nu)$, thanks to (\cite{E5}4.4) it is equal to : \[(id\underset{N}{_b*_\alpha}\omega_{\Lambda_{\widehat{\Phi}^c}(a^*), \xi})\mathfrak a(y^*)\Lambda_\psi(x) =\Lambda_\psi((id\underset{N}{_b*_\alpha}\omega_{\Lambda_{\widehat{\Phi}^c}(a^*), \xi})\mathfrak a(y^*)x)\] Let us suppose now that $x$ is analytic with respect to $\psi$; as $\delta^{1/2}\Lambda_{\widehat{\Phi}^c}(a^*)$ belongs to $D((H_\Phi)_\beta, \nu^o)$, thanks again to (\cite{E5}4.4), we get, using (\cite{E5} 8.4.(iii)), that it is equal to : \begin{multline*} J_\Psi\sigma_{-i/2}^\psi(x^*)J_\psi\Lambda_\psi[(id\underset{N}{_b*_\alpha}\omega_{\Lambda_{\widehat{\Phi}^c}(a^*), \xi})\mathfrak a(y^*)]=\\ = J_\Psi\sigma_{-i/2}^\psi(x^*)J_\psi(id*\omega_{\delta^{1/2}\Lambda_{\widehat{\Phi}^c}(a^*), \xi})(V_\psi)\Lambda_\psi(y^*)\\ = (\rho_\xi^{b, \alpha})^* (J_\Psi\sigma_{-i/2}^\psi(x^*)J_\psi\underset{\nu}{_b\otimes_\alpha}1)V_\psi(\Lambda_\psi(y^*)\underset{\nu^o}{_a\otimes_\beta}\delta^{1/2}\Lambda_{\widehat{\Phi}^c}(a^*)) \end{multline*} from which we get that : \[S_{\tilde{\psi}}\mathfrak a(x^*)(\Lambda_\psi(y)\underset{\nu}{_b\otimes_\alpha}\Lambda_{\widehat{\Phi}^c}(a))= (J_\Psi\sigma_{-i/2}^\psi(x^*)J_\psi\underset{\nu}{_b\otimes_\alpha}1)V_\psi(\Lambda_\psi(y^*)\underset{\nu^o}{_a\otimes_\beta}\delta^{1/2}\Lambda_{\widehat{\Phi}^c}(a^*))\] and, taking a bounded net $x_i$ strongly converging to $1$, such that $\sigma_{-i/2}^\psi(x_i^*)$ is also converging to $1$, and using the fact that $S_{\tilde{\psi}}$ is closed, we get : \[S_{\tilde{\psi}}(\Lambda_\psi(y)\underset{\nu}{_b\otimes_\alpha}\Lambda_{\widehat{\Phi}^c}(a))= V_\psi[J_\psi\Delta_\psi^{1/2}\Lambda_\psi(y)\underset{\nu^o}{_a\otimes_\beta}J_{\widehat{\Phi}}(\overline{\delta\Delta_{\widehat{\Phi}}})^{-1/2}\Lambda_{\widehat{\Phi}^c}(a)]\] from which we deduce that : \[V_\psi(J_\psi\underset{N}{_b\otimes_\alpha}J_{\widehat{\Phi}})(\Delta_\psi^{1/2}\underset{N}{_b\otimes_\alpha}\overline{\delta\Delta_{\widehat{\Phi}}}^{-1/2})\subset S_{\tilde{\psi}}\] where $\Delta_\psi^{1/2}\underset{N}{_b\otimes_\alpha}(\overline{\delta\Delta_{\widehat{\Phi}}})^{-1/2}$ is the infinitesimal generator of the one-parameter group of unitaries $\Delta^{it}_\psi\underset{N}{_b\otimes_\alpha}\delta^{-it}\Delta_{\widehat{\Phi}}^{-it}$ introduced in \ref{action}. But, on the other hand, for all $t\in\mathbb{R}$, we have, using \ref{crossed} : \begin{multline*} (\Delta^{it}_\psi\underset{N}{_b\otimes_\alpha}\delta^{-it}\Delta_{\widehat{\Phi}}^{-it})S_{\tilde{\psi}}\mathfrak a(x^*)(\Lambda_\psi(y)\underset{\nu}{_b\otimes_\alpha}\Lambda_{\widehat{\Phi}^c}(a))=\\ =(\Delta^{it}_\psi\underset{N}{_b\otimes_\alpha}\delta^{-it}\Delta_{\widehat{\Phi}}^{-it})\mathfrak a(y^*)(\Lambda_\psi(x)\underset{\nu}{_b\otimes_\alpha}\Lambda_{\widehat{\Phi}^c}(a^*)) \end{multline*} which, using \ref{action}, is equal to : \begin{multline*} \mathfrak a(\sigma^\psi_t(y^*))(\Lambda_\psi(\sigma_t^\psi(x))\underset{\nu}{_b\otimes_\alpha}S_{\widehat{\Phi}^c}\delta^{-it}\Delta_{\widehat{\Phi}}^{-it}\Lambda_{\widehat{\Phi}^c}(a))=\\ \mathfrak a(\sigma^\psi_t(y^*))(\Lambda_\psi(\sigma_t^\psi(x))\underset{\nu}{_b\otimes_\alpha}S_{\widehat{\Phi}^c}\delta^{-it}\Delta_{\widehat{\Phi}}^{-it}\Lambda_{\widehat{\Phi}^c}(a)) \end{multline*} which is equal, using again \ref{crossed}, to : \[S_{\tilde{\psi}}\mathfrak a(\sigma^\psi_t(x^*))(\Lambda_\psi(\sigma_t^\psi(y))\underset{\nu}{_b\otimes_\alpha}\delta^{-it}\Delta_{\widehat{\Phi}}^{-it}\Lambda_{\widehat{\Phi}^c}(a))\] Taking again a family $x_i$ converging to $1$, and using the closedness of $S_{\tilde{\psi}}$, we get that : \begin{multline*} (\Delta^{it}_\psi\underset{N}{_b\otimes_\alpha}\delta^{-it}\Delta_{\widehat{\Phi}}^{-it})S_{\tilde{\psi}}(\Lambda_\psi(y)\underset{\nu}{_b\otimes_\alpha}\Lambda_{\widehat{\Phi}^c}(a))=\\ S_{\tilde{\psi}}(\Lambda_\psi(\sigma_t^\psi(y))\underset{\nu}{_b\otimes_\alpha}\delta^{-it}\Delta_{\widehat{\Phi}}^{-it}\Lambda_{\widehat{\Phi}^c}(a))=\\ S_{\tilde{\psi}}(\Delta^{it}_\psi\underset{N}{_b\otimes_\alpha}\delta^{-it}\Delta_{\widehat{\Phi}}^{-it}) (\Lambda_\psi(y)\underset{\nu}{_b\otimes_\alpha}\Lambda_{\widehat{\Phi}^c}(a)) \end{multline*} from which, using \ref{crossed}, we deduce that \[(\Delta^{it}_\psi\underset{N}{_b\otimes_\alpha}\delta^{-it}\Delta_{\widehat{\Phi}}^{-it})S_{\tilde{\psi}}=S_{\tilde{\psi}}(\Delta^{it}_\psi\underset{N}{_b\otimes_\alpha}\delta^{-it}\Delta_{\widehat{\Phi}}^{-it})\] and, therefore, we have : \[V_\psi(J_\psi\underset{N}{_b\otimes_\alpha}J_{\widehat{\Phi}})(\Delta_\psi^{1/2}\underset{N}{_b\otimes_\alpha}\overline{\delta\Delta_{\widehat{\Phi}}}^{-1/2})= S_{\tilde{\psi}}\] and, by polar decomposition, we have : \[J_{\tilde{\psi}}=V_\psi(J_\psi\underset{N}{_b\otimes_\alpha}J_{\widehat{\Phi}})\] which, by definition of $U_\psi^\mathfrak{a}$, leads to (i). \newline We also get : \[\Delta_{\tilde{\psi}}^{1/2}=\Delta_\psi^{1/2}\underset{N}{_b\otimes_\alpha}\overline{\delta\Delta_{\widehat{\Phi}}}^{-1/2}\] which leads to (ii). \end{proof} \subsection{Corollary} \label{corstandard} {\it Let $\mathfrak{G}$ be a measured quantum groupoid, and $(b,\mathfrak a)$ an action of $\mathfrak{G}$ on a von Neumann algebra $A$; let us suppose that there exists on $A$ a $\delta$-invariant weight on $A$, bearing the density condition, as defined in \ref{action}; then, for any normal semi-finite faithful weight $\psi$ on $A$, the unitary $U^\mathfrak a_\psi$ constructed in \ref{crossed} is a standard implementation of $\mathfrak a$ as defined in \ref{crossed}. } \begin{proof} If $\psi$ is a $\delta$-invariant weight on $A$, bearing the density condition, as defined in \ref{action}, we have the result using \ref{Uinv}; for another weight, using \ref{propu}(iii), we get the result. \end{proof} \subsection{Corollary} \label{cordual} {\it Let $\mathfrak{G}$ be a measured quantum groupoid, and $(b,\mathfrak a)$ an action of $\mathfrak{G}$ on a von Neumann algebra $A$; let us suppose that $A$ is isomorphic to a crossed-product $B\rtimes_\mathfrak {b}\widehat{\mathfrak{G}}^o$ where $\mathfrak {b}$ is an action of $\widehat{\mathfrak{G}}^o$ on a von Neumann algebra $B$, and that this isomorphism sends $\mathfrak{a}$ on $\tilde{\mathfrak {b}}$. Then, for any normal semi-finite faithful weight $\psi$ on $A$, the unitary $U^\mathfrak a_\psi$ constructed in \ref{crossed} is a standard implementation of $\mathfrak a$ as defined in \ref{crossed}. } \begin{proof} We have recalled in \ref{crossed} that any dual weight on $B\rtimes_\mathfrak {b}\widehat{\mathfrak{G}}^o$ is a $\delta$-invariant weight on $B\rtimes_\mathfrak {b}\widehat{\mathfrak{G}}^o$, bearing the density condition; therefore, using \ref{corstandard}, we get the result. \end{proof} \subsection{Corollary} \label{cora} {\it Let $\mathfrak{G}$ be a measured quantum groupoid, and $(b,\mathfrak a)$ an action of $\mathfrak{G}$ on a von Neumann algebra $A$; let us consider the action $(1\underset{N}{_b\otimes_\alpha}\hat{\beta}, \underline{\mathfrak{a}})$ of $\mathfrak{G}$ on $A\underset{N}{_b*_\alpha}\mathcal L(H)$, introduced in \ref{crossed}; then, for any normal semi-finite faithful weight $\psi$ on $A\underset{N}{_b*_\alpha}\mathcal L(H)$, the unitary $U^{\underline{\mathfrak{a}}}_\psi$ is a standard implementation of the action $\underline{\mathfrak{a}}$}. } \begin{proof} This is just a corollary of \ref{cordual} and of the biduality theorem, recalled in \ref{crossed}. \end{proof} \subsection{Corollary} \label{corsigma} {\it Let $\mathfrak{G}$ be a measured quantum groupoid, and $(b, \mathfrak{a})$ an action of $\mathfrak{G}$ on a von Neumann algebra $A$; let $\psi$ be a $\delta$-invariant weight on $A$, bearing the density condition, as defined in \ref{action}; then, for any $x\in\widehat{M}'$, $t\in\mathbb{R}$, we have :} \[\sigma_t^{\tilde{\psi}}(1\underset{N}{_b\otimes_\alpha}x)=1\underset{N}{_b\otimes_\alpha}\Delta_{\Phi}^{it}x\Delta_{\Phi}^{-it}\] \begin{proof} Using \ref{Uinv}(ii), we get that : \[\sigma_t^{\tilde{\psi}}(1\underset{N}{_b\otimes_\alpha}x)=1\underset{N}{_b\otimes_\alpha}(\delta\Delta_{\widehat{\Phi}})^{-it}x(\delta\Delta_{\widehat{\Phi}})^{it}\] But, using (\cite{E5}3.11(ii)), we know that $(\delta\Delta_{\widehat{\Phi}})^{it}=(\hat{\delta}\Delta_\Phi)^{-it}$; as $\hat{\delta}$ is affiliated to $\widehat{M}$, we get the result. \end{proof} \subsection{Corollary} \label{corsigma2} {\it Let $\mathfrak{G}$ be a measured quantum groupoid, and $(b, \mathfrak{a})$ an action of $\mathfrak{G}$ on a von Neumann algebra $A$; let $\psi$ be a normal semi-finite faithful weight on $A$; then, for any $x$ in $M'$, $t\in\mathbb{R}$, we have :} \[\sigma_t^{\tilde{\tilde{\psi}}}(1\underset{N}{_b\otimes_\alpha}1\underset{N^o}{_{\hat{\alpha}}\otimes_\beta}x)=1\underset{N}{_b\otimes_\alpha}1\underset{N^o}{_{\hat{\alpha}}\otimes_\beta}\Delta_{\widehat{\Phi}}^{-it}x\Delta_{\widehat{\Phi}}^{it}\] \begin{proof} Let's apply \ref{corsigma} to the dual action $(1\underset{N}{_b\otimes_\alpha}\hat{\alpha}, \tilde{\mathfrak{a}})$ of $\mathfrak{G}^c$ on $A\rtimes_\mathfrak{a}\mathfrak{G}$, and the dual weight $\tilde{\psi}$, and we get the result. \end{proof} \subsection{Corollary} \label{corsigma3} {\it Let $\mathfrak{G}$ be a measured quantum groupoid, and $(b, \mathfrak{a})$ an action of $\mathfrak{G}$ on a von Neumann algebra $A$; let $\psi$ be a normal semi-finite faithful weight on $A$; let $(1\underset{N}{_b\otimes_\alpha}\hat{\beta}, \underline{\mathfrak{a}})$ be the action of $\mathfrak{G}$ on $A\underset{N}{_b*_\alpha}\mathcal L(H)$ obtained by transporting on $A\underset{N}{_b*_\alpha}\mathcal L(H)$ the bidual action and $\overline{\psi}_\mathfrak{a}$ be the normal semi-finite faithful weight on $A\underset{N}{_b*_\alpha}\mathcal L(H)$ obtained by transporting the bidual weight. Then, for any $x$ in $M'$, $t\in\mathbb{R}$, we have : \[\sigma_t^{\overline{\psi}_\mathfrak{a}}(1\underset{N}{_b\otimes_\alpha}x)=1\underset{N}{_b\otimes_\alpha}\Delta_{\widehat{\Phi}}^{-it}x\Delta_{\widehat{\Phi}}^{it}\]} \begin{proof} The canonical isomorphism between $A\underset{N}{_b*_\alpha}\mathcal L(H)$ and $(A\rtimes_\mathfrak{a}\mathfrak{G})\rtimes_{\tilde{\mathfrak{a}}}\widehat{\mathfrak{G}}^c$ sends, for all $x\in M'$, $1\underset{N}{_b\otimes_\alpha}x$ on $1\underset{N}{_b\otimes_\alpha}1\underset{N^o}{_{\hat{\alpha}}\otimes_\beta}x$ (cf. \cite{E5} 11.2). So, the result is a straightforward consequence of \ref{corsigma2}. \end{proof} \section{An auxilliary weight $\underline{\psi}$.} \label{auxilliary} If $b$ is a normal faithful non degenerate anti-homomorphism from $N$ into a von Neumann algebra $A$, such that there exists a normal faithful semi-finite operator-valued weight $\mathfrak{T}$ from $A$ on $b(N)$, we associate to the weight $\psi=\nu^o\circ b^{-1}\circ\mathfrak{T}$ a weight $\underline{\psi}$ on $A\underset{N}{_b*_\alpha}\mathcal L (H)$ (\ref{psibarre}); we calculate its modular automorphism group (\ref{psibarre2}), and the GNS representation of $A\underset{N}{_b*_\alpha}\mathcal L (H)$ given by this weight (\ref{psibarre3}). \subsection{Definitions} \label{defw} Let $b$ be an injective $*$-antihomorphism from a von Neumann algebra $N$ into a von Neumann algebra $A$; we shall then say that $(N, b, A)$ (or simply $A$) is a faithful right von Neumann $N$-module. If there exists a normal semi-finite faithful operator-valued weight $\mathfrak{T}$ from $A$ onto $b(N)$, we shall say that this faithful right $N$-module is weighted. \newline Let then $\psi$ be a normal faithful semi-finite weight on $A$; if, for all $t$ in $\mathbb{R}$, $n$ in $N$, we have $\sigma_t^\psi(b(n))=b(\sigma_{-t}^\nu(n))$, then there exists a normal semi-finite faithful operator-valued weight $\mathfrak{T}$ from $A$ onto $b(N)$ such that $\psi=\nu^o\circ b^{-1}\circ \mathfrak{T}$; such a weight $\psi$ on $A$ will be said lifted from $\nu^o$ by $\mathfrak{T}$ (or, simply, a lifted weight). \newline If $\psi$ is a normal semi-finite faithful weight on $A$, lifted from $\nu^o$ by $\mathfrak{T}$, then the weight $\psi$ bears the density property introduced in (\cite{E5}, 8.1), recalled in \ref{action}. Namely, using (\cite{E5}, 2.2.1), one gets that $D(_aH_\psi, \nu)\cap D((H_\psi)_b, \nu^o)$ contains all the vectors of the form $\Lambda_\psi(x)$, where $x\in \mathfrak{N}_{\mathfrak{T}}\cap\mathfrak{N}_{\mathfrak{T}}^*\cap\mathfrak{N}_\psi\cap\mathfrak{N}_\psi^*$ is analytical with respect to $\psi$, and such that, for any $z\in\mathbb{C}$, $\sigma_z(x)$ belongs to $\mathfrak{N}_{\mathfrak{T}}\cap\mathfrak{N}_{\mathfrak{T}}^*\cap\mathfrak{N}_\psi\cap\mathfrak{N}_\psi^*$; therefore $D(_aH_\psi, \nu)\cap D((H_\psi)_b, \nu^o)$ is dense in $H_\psi$, which is the density property. \newline If $(b, \mathfrak{a})$ is an action of a measured quantum goupoid $\mathfrak{G}=(N, M, \alpha, \beta, \Gamma, T, T', \nu)$ on a von Neumann algebra $A$, we shall say that this action is weighted if the faithful right $N$-module $(N, b, A)$ is weighted. \subsection{Lemma} \label{propw} {\it Let $(N, b, A)$ be a faithful weighted right von Neumann $N$-module, and let $\mathfrak{T}$ be a normal semi-finite faithful operator-valued weight from $A$ onto $b(N)$; let $\alpha$ be a nomal faithful representation of $N$ on a Hilbert space $H$ and $\nu$ a normal semi-finite faithful weight on $N$; then, it is possible to define a canonical normal semi-finite faithful operator-valued weight $(\mathfrak{T}\underset{N}{_b*_\alpha}id)$ from $A\underset{N}{_b*_\alpha}\mathcal L(H)$ onto $1\underset{N}{_b\otimes_\alpha}\alpha(N)'$ (which is equal to $b(N)\underset{N}{_b*_\alpha}\mathcal L(H)$, by (\cite{E5}, 2.4)), such that, if $\psi$ denotes the weight on $A$ lifted from $\nu^o$ by $\mathfrak{T}$, we get, for any $X\in (A\underset{N}{_b*_\alpha}\mathcal L(H))^+$, that $(\mathfrak{T}\underset{N}{_b*_\alpha}id)(X)=1\underset{N}{_b\otimes_\alpha}(\psi\underset{\nu}{_b*_\alpha}id)(X)$, where $\mathfrak{T}\underset{N}{_b*_\alpha}id$ and $\psi\underset{\nu}{_b*_\alpha}id$ are slice maps introduced in \cite{E4} and recalled in (\cite{E5}, 2.5).} \begin{proof} Let us represent $A$ on a Hilbert space $\mathcal H$; using Haagerup's theorem (\cite{T}, 4.24), there exists a canonical normal semi-finite faithful operator valued weight $\mathfrak{T}^{-1}$ from $b(N)'$ onto $A'$; considering the representation of $b(N)'$ on $\mathcal H\underset{\nu}{_b\otimes_\alpha}H$, and using again Haagerup's theorem, we obtain another normal semi-finite faithful operator-valued weight $(\mathfrak{T}^{-1})^{-1}$ from the commutant of $A'$ on $\mathcal H\underset{\nu}{_b\otimes_\alpha}H$ (which is $A\underset{N}{_b*_\alpha}\mathcal L(H)$) onto the commutant of $b(N)'$ on $\mathcal H\underset{\nu}{_b\otimes_\alpha}H$ (which is $b(N)\underset{N}{_b*_\alpha}\mathcal L(H)$). As both $\mathfrak{T}$ and $(\mathfrak{T}^{-1})^{-1}$ are obtained by taking the commutants, within two different representations, of the same operator-valued weight $\mathfrak{T}^{-1}$, a closer look at this construction leads (\cite{EN}, 10.2) to the fact that $(\mathfrak{T}^{-1})^{-1}=(\mathfrak{T}\underset{N}{_b*_\alpha}id)$. The link between $(\mathfrak{T}\underset{N}{_b*_\alpha}id)$ and $(\psi\underset{\nu}{_b*_\alpha}id)$ is recalled in (\cite{E5} 2.5). \end{proof} \subsection{ Proposition} \label{subalgebra} {\it Let $(N, b, A)$ be a von Neumann faithful right $N$-module, and let $\alpha$ be a normal faithful non degenerate representation of $N$ on a Hilbert space $\mathcal H$ and $\nu$ a normal semi-finite faithful weight on $N$; then : \newline (i) let's represent $A$ on a Hilbert space $\mathcal K$; the linear set generated by all operators on $\mathcal K\underset{\nu}{_\beta\otimes_\alpha}\mathcal H$, of the form $\rho^{\beta, \alpha}_{\xi_1}a(\rho^{\beta, \alpha}_{\xi_2})^*$, with $a$ in $A$ and $\xi_1$, $\xi_2$ in $D(_\alpha \mathcal H, \nu)$, is a $*$-algebra, which is weakly dense in $A\underset{N}{_b*_\alpha}\mathcal L(\mathcal{H})$. \newline (ii) let $\psi$ be a normal faithful semi-finite weight on $A$, and let's represent $A$ on $H_\psi$; then, for any $a$ in $\mathfrak{N}_\psi$ and $\xi$ in $D(_\alpha \mathcal H, \nu)$, $\Lambda_\psi(a)\underset{\nu}{_b\otimes_\alpha}\xi$ belongs to $D(H_\psi\underset{\nu}{_b\otimes_\alpha}\mathcal H, \psi^o)$ (where we deal with the representation $x\mapsto x\underset{N}{_b\otimes_\alpha}1$ of $A^o=J_\psi AJ_\psi$), and we have $\theta^{\psi^o}( \Lambda_\psi(a)\underset{\nu}{_b\otimes_\alpha}\xi, \Lambda_\psi(a)\underset{\nu}{_b\otimes_\alpha}\xi)=\rho^{b, \alpha}_\xi a a^*(\rho^{b, \alpha}_\xi)^*$. \newline (iii) for all $n\in N$, let us define $a(n)=J_\psi b(n^*)J_\psi$; let $\mathfrak{G}=(N, M, \alpha, \beta, \Gamma, T, T', \nu)$ be a measured quantum groupoid; then, the representation of $A\underset{N}{_b*_\alpha}\mathcal L(H)$ on $H\underset{\nu}{_\beta\otimes_a}H_\psi\underset{\nu}{_b\otimes_\alpha}H$ defined by $x\mapsto 1\underset{N}{_\beta\otimes_a}x$ is standard, when we equip the Hilbert space with the antilinear involutive isometry $J$ defined, for any $\xi$, $\eta$ in $D(_\alpha H, \nu)$, $\zeta$ in $H_\psi$, by : \[J(J_{\widehat{\Phi}}\eta\underset{\nu}{_\beta\otimes_a}\zeta\underset{\nu}{_b\otimes_\alpha}\xi)=J_{\widehat{\Phi}}\xi\underset{\nu}{_\beta\otimes_a}J_\psi\zeta\underset{\nu}{_b\otimes_\alpha}\eta\] and with the closed cone $\mathcal P$ generated by all elements of the form $J_{\widehat{\Phi}}\xi\underset{\nu}{_\beta\otimes_a}\zeta'\underset{\nu}{_b\otimes_\alpha}\xi$, when $\xi$ is in $D(_\alpha H, \nu)$, and $\zeta'$ in the cone $\mathcal P_\psi$ given by the Tomita-Takesaki theory associated to the weight $\psi$. \newline (iv) let $\varphi$ be a normal semi-finite faithful weight on $A\underset{N}{_b*_\alpha}\mathcal L(H)$; then $\Lambda_\psi (a)\underset{\nu}{_b\otimes_\alpha}\xi$ belongs to $\mathcal D((\frac{d\varphi}{d\psi^o})^{1/2})$ if and only if $\rho^{b, \alpha}_\xi a a^*(\rho^{b, \alpha}_\xi)^*$ belongs to $\mathfrak{M}_{\varphi}^+$, and then : \[\varphi(\rho^{b, \alpha}_\xi a a^*(\rho^{b, \alpha}_\xi)^*)=\|(\frac{d\varphi}{d\psi^o})^{1/2}(\Lambda_\psi(a)\underset{\nu}{_b\otimes_\alpha}\xi)\|^2\] Moreover, if $\Lambda_\psi (a)\underset{N}{_b\otimes_\alpha}\xi$ belongs to $\mathcal D((\frac{d\varphi}{d\psi^o})^{1/2})$, the vector $(\frac{d\varphi}{d\psi^o})^{1/2}(\Lambda_\psi (a)\underset{N}{_b\otimes_\alpha}\xi)$ belongs to $D(H_\psi\underset{\nu}{_b\otimes_\alpha}H, \varphi)$, and the canonical isomorphism between $H_\varphi$ and $H\underset{\nu}{_\beta\otimes_a}H_\psi\underset{\nu}{_b\otimes_\alpha}H$ sends $R^{\varphi}((\frac{d\varphi}{d\psi^o})^{1/2}(\Lambda_\psi (a)\underset{\nu}{_b\otimes_\alpha}\xi))^*(\zeta\underset{\nu}{_b\otimes_\alpha}\eta)$ on $J_{\widehat{\Phi}}\xi\underset{\nu}{_\beta\otimes_a}J_\psi aJ_\psi\zeta\underset{\nu}{_b\otimes_\alpha}\eta$. } \begin{proof} Using \ref{spatial}, we get, for $a_1$, $a_2$ in $A$, and $\xi_1$, $\xi_2$, $\xi_3$, $\xi_4$ in $D(_\alpha \mathcal H, \nu)$, that : \[\rho^{\beta, \alpha}_{\xi_1}a_1(\rho^{\beta, \alpha}_{\xi_2})^*\rho^{\beta, \alpha}_{\xi_3}a_2(\rho^{\beta, \alpha}_{\xi_4})^*=\rho^{\beta, \alpha}_{\xi_1}a_1b(<\xi_3, \xi_2>_{\alpha, \nu})a_2(\rho^{\beta, \alpha}_{\xi_4})^*\] from which we see that this linear set is indeed an algebra; moreover, it is clear that it is invariant under taking the adjoint. Let's take $c\in A'$; we have : \begin{eqnarray*} \rho^{\beta, \alpha}_{\xi_1}a(\rho^{\beta, \mathfrak{a}mma}_{\xi_2})^*(c\underset{N}{_\beta\otimes_\alpha}1) &=&\rho^{\beta, \alpha}_{\xi_1}ac(\rho^{\beta, \alpha}_{\xi_2})^*\\ &=&\rho^{\beta, \alpha}_{\xi_1}ca(\rho^{\beta, \alpha}_{\xi_2})^*\\ &=&(c\underset{N}{_\beta\otimes_\alpha}1) \rho^{\beta, \alpha}_{\xi_1}a(\rho^{\beta, \alpha}_{\xi_2})^* \end{eqnarray*} from which we get that $\rho^{\beta, \alpha}_{\xi_1}a(\rho^{\beta, \alpha}_{\xi_2})^*$ belongs to $A\underset{N}{_\beta*_\alpha}\mathcal L(\mathcal{H})$. Let now $X\in A\underset{N}{_\beta*_\alpha}\mathcal L(\mathcal{H})$, and let $(e_i)_{i\in I}$ be a $(\alpha, \nu)$-orthogonal basis of $\mathcal H$; we get that $(id\underset{\nu}{_\beta*_\alpha}\omega_{e_i, e_j})(X)$ belongs to $A$, and we have, when we take the weak limits over the finite subsets $J$, $J'$ of $I$ : \begin{eqnarray*} X&=&lim_{J, J'}\sum_{i\in J, j\in J'}(1\underset{N}{_\beta\otimes_\alpha}\theta^{\alpha, \nu}(e_i, e_i))X(1\underset{N}{_\beta\otimes_\alpha}\theta^{\alpha, \nu}(e_j, e_j))\\ &=&lim_{J, J'}\sum_{i\in J, j\in J'}\rho^{\beta, \alpha}_{e_i}(id\underset{\nu}{_\beta*_\alpha}\omega_{e_i, e_j})(X)(\rho^{\beta, \alpha}_{e_j})^* \end{eqnarray*} which proves (i). \newline Let $a\in \mathfrak{N}_\psi$, $\xi\in D(_\alpha \mathcal H, \nu)$; then, for all $x\in\mathfrak{N}_\psi$, we have : \begin{eqnarray*} J_\psi xJ_\psi\Lambda_\psi (a)\underset{\nu}{_b\otimes_\alpha}\xi &=& aJ_\psi\Lambda_\psi (x)\underset{\nu}{_b\otimes_\alpha}\xi\\ &=&\rho^{b, \alpha}_\xi aJ_\psi\Lambda_\psi(x) \end{eqnarray*} Therefore, $\Lambda_\psi (a)\underset{\nu}{_b\otimes_\alpha}\xi$ belongs to $D(H_\psi\underset{\nu}{_b\otimes_\alpha}\mathcal H, \psi^o)$, and $R^{\psi^o}(\Lambda_\psi (a)\underset{\nu}{_b\otimes_\alpha}\xi)=\rho^{b, \alpha}_\xi a$. So, we get that $\theta^{\psi^o}(\Lambda_\psi (a)\underset{\nu}{_b\otimes_\alpha}\xi, \Lambda_\psi (a)\underset{\nu}{_b\otimes_\alpha}\xi)=\rho_\xi^{b, \alpha}aa^*(\rho_\xi^{b, \alpha})^*$, which is (ii). \newline By \cite{S}(3.1), we know that $A\underset{N}{_b*_\alpha}\mathcal L(H)$ has a standard representation $x\mapsto x\otimes_\psi 1$ on the Hilbert space $(H_\psi\underset{\nu}{_b\otimes_\alpha}H)\otimes_\psi\overline{(H_\psi\underset{\nu}{_b\otimes_\alpha}H)}$. Using then (ii) and \cite{S}(0.3.1), we get that this Hilbert space is isomorphic to $H\underset{\nu}{_\beta\otimes_a}H_\psi\underset{\nu}{_b\otimes_\alpha}H$, and that this isomorphism sends, for $b\in\mathfrak{N}_\psi$, $\eta\in D(_\alpha \mathcal H, \nu)$ : \newline a) the vector $(\Lambda_\psi(a)\underset{\nu}{_b\otimes_\alpha}\xi)\otimes_\psi\overline{(\Lambda_\psi(b)\underset{\nu}{_b\otimes_\alpha}\eta)}$ on $J_{\widehat{\Phi}}\eta\underset{\nu}{_\beta\otimes_a}J_\psi bJ_\psi \Lambda_\psi (a)\underset{\nu}{_b\otimes_\alpha}\xi$, \newline b) the standard representation $x\mapsto x\otimes_\psi 1$ on the representation $x\mapsto 1\underset{N}{_\beta\otimes_a}x$, \newline c) the antilinear involutive isometry which sends $(\Lambda_\psi(a)\underset{\nu}{_b\otimes_\alpha}\xi)\otimes_\psi\overline{(\Lambda_\psi(b)\underset{\nu}{_b\otimes_\alpha}\eta)}$ to $(\Lambda_\psi(b)\underset{\nu}{_b\otimes_\alpha}\eta)\otimes_\psi\overline{(\Lambda_\psi(a)\underset{\nu}{_b\otimes_\alpha}\xi)}$ on $J$, \newline d) the cone generated by all elements of the form $(\Lambda_\psi(a)\underset{\nu}{_b\otimes_\alpha}\xi)\otimes_\psi\overline{(\Lambda_\psi(a)\underset{\nu}{_b\otimes_\alpha}\xi)}$ on $\mathcal P$, which gives (iii). \newline Using (ii), we get that : \[\varphi(\rho^{b, \alpha}_\xi a a^*(\rho^{b, \alpha}_\xi)^*)=\varphi(\theta^{\psi^o}( \Lambda_\psi(a)\underset{\nu}{_b\otimes_\alpha}\xi, \Lambda_\psi(a)\underset{\nu}{_b\otimes_\alpha}\xi))\] and, by definition of the spatial derivative, we know that, if $\Lambda_\psi(a)\underset{N}{_b\otimes_\alpha}\xi$ belongs to $\mathcal D((\frac{d\varphi}{d\psi^o})^{1/2})$, we have : \[\varphi(\rho^{b, \alpha}_\xi a a^*(\rho^{b, \alpha}_\xi)^*)=\varphi(\theta^{\psi^o}( \Lambda_\psi(a)\underset{\nu}{_b\otimes_\alpha}\xi, \Lambda_\psi(a)\underset{\nu}{_b\otimes_\alpha}\xi)=\|(\frac{d\varphi}{d\psi^o})^{1/2}(\Lambda_\psi(a)\underset{\nu}{_b\otimes_\alpha}\xi)\|^2\] and, if $\Lambda_\psi(a)\underset{\nu}{_b\otimes_\alpha}\xi$ does not belong to $\mathcal D((\frac{d\varphi}{d\psi^o})^{1/2})$, we know that $\varphi(\rho^{b, \alpha}_\xi a a^*(\rho^{b, \alpha}_\xi)^*)=+\infty$. So, we have the first part of (iv). Then, the second part of (iv) is given by \cite{S}(3.2) and (iii). \end{proof} \subsection{Proposition} \label{psibarre} {\it Let $\mathfrak{G}=(N, M, \alpha, \beta, \Gamma, T, T', \nu)$ be a measured quantum groupoid; let $(N, b, A)$ be a faithful weighted right von Neumann $N$-module, and let $\psi$ be a normal semi-finite faithful weight on $A$ lifted from $\nu^o$ in the sense of \ref{defw}. Then : \newline (i) it possible to define a one-parameter group of unitaries on $\Delta_\psi^{it}\underset{\nu}{_b\otimes_\alpha}\Delta_{\widehat{\Phi}}^{-it}$ on $H_\psi\underset{\nu}{_b\otimes_\alpha}H$, with natural values on elementary tensors. This one-parameter group of unitaries implements on $A\underset{N}{_b*_\alpha}\mathcal L(H)$ the one-parameter group of automorphisms $\sigma_t^\psi\underset{N}{_b*_\alpha}Ad\Delta_{\widehat{\Phi}}^{-it}$. \newline (ii) there exists a normal semi-finite faithful weight $\underline{\psi}$ on $A\underset{N}{_b*_\alpha}\mathcal L(H)$ such that the spatial derivative $\frac{d\underline{\psi}}{d\psi^o}$ is equal to the generator $\Delta_\psi\underset{\nu}{_b\otimes_\alpha}\Delta_{\widehat{\Phi}}^{-1}$ of the one-parameter group of unitaries constructed in (i); the modular automorphism group $\sigma_t^{\underline{\psi}}$ is equal to the automorphism group $\sigma_t^\psi\underset{N}{_b*_\alpha}Ad \Delta_{\widehat{\Phi}}^{-it}$ constructed in (i). \newline (iii) for any $a$ in $\mathfrak{N}_\psi\cap\mathfrak{N}_\psi^*$, and $\xi\in D(_\alpha H, \nu)\cap \mathcal D(\Delta_{\widehat{\Phi}}^{-1/2})$, such that $\Delta_{\widehat{\Phi}}^{-1/2}\xi$ belongs to $D(_\alpha H, \nu)$, we have :} \[\underline{\psi}(\rho_\xi^{b, \alpha}aa^*(\rho_\xi^{b, \alpha})^*)=\|\Delta_\psi^{1/2}\Lambda_\psi (a)\underset{\nu}{_b\otimes_\alpha}\Delta_{\widehat{\Phi}}^{-1/2}\xi\|^2\] \begin{proof} If $\xi\in D(_\alpha H, \nu)$, we get, for all $t\in \mathbb{R}$ and $n\in \mathfrak{N}_\nu$ : \begin{eqnarray*} \alpha(n)\Delta_{\widehat{\Phi}}^{-it}\xi&=&\Delta_{\widehat{\Phi}}^{-it}\sigma_t^{\widehat{\Phi}}(\alpha(n))\xi\\ &=&\Delta_{\widehat{\Phi}}^{-it}\alpha(\sigma_t^\nu(n))\xi\\ &=&\Delta_{\widehat{\Phi}}^{-it}R^{\alpha, \nu}(\xi)\Delta_\nu^{it}\Lambda_\nu(n) \end{eqnarray*} from which we get that $\Delta_{\widehat{\Phi}}^{-it}\xi$ belongs to $D(_\alpha H, \nu)$, and $R^{\alpha, \nu}(\Delta_{\widehat{\Phi}}^{-it}\xi)=\Delta_{\widehat{\Phi}}^{-it}R^{\alpha, \nu}(\xi)\Delta_\nu^{it}$. Therefore, we have $<\Delta_{\widehat{\Phi}}^{-it}\xi, \Delta_{\widehat{\Phi}}^{-it}\xi>_{\alpha, \nu}^o=\sigma_{-t}^\nu(<\xi, \xi>_{\alpha, \nu}^o)$. Taking now $\eta\in H_\psi$, we get : \begin{eqnarray*} \|\Delta_\psi^{it}\eta\underset{\nu}{_b\otimes_\alpha}\Delta_{\widehat{\Phi}}^{-it}\xi\|^2 &=&(b(<\Delta_{\widehat{\Phi}}^{-it}\xi, \Delta_{\widehat{\Phi}}^{-it}\xi>_{\alpha, \nu}^o)\Delta_\psi^{it}\eta|\Delta_\psi^{it}\eta)\\ &=&(b(\sigma_{-t}^\nu(<\xi, \xi>_{\alpha, \nu}^o))\Delta_\psi^{it}\eta|\Delta_\psi^{it}\eta)\\ &=&(\sigma_t^\psi(b(<\xi, \xi>_{\alpha, \nu}^o))\Delta_\psi^{it}\eta|\Delta_\psi^{it}\eta)\\ &=&(b(<\xi, \xi>_{\alpha, \nu}^o)\eta|\eta)\\ &=&\|\eta\underset{\nu}{_b\otimes_\alpha}\xi\|^2 \end{eqnarray*} from which we get the existence of the one-parameter group of unitaries. It is then easy to finish the proof of (i). \newline As $(\Delta_\psi^{it}\underset{\nu}{_b\otimes_\alpha}\Delta_{\widehat{\Phi}}^{-it})(J_\psi xJ_\psi\underset{N}{_b\otimes_\alpha}1)(\Delta_\psi^{-it}\underset{\nu}{_b\otimes_\alpha}\Delta_{\widehat{\Phi}}^{it})= J_\psi \sigma_t^\psi(x)J_\psi\underset{N}{_b\otimes_\alpha}1$, we obtain (\cite{T}, 3.11) that there exists a normal faithful semi-finite weight $\underline{\psi}$ on $A\underset{N}{_b*_\alpha}\mathcal L(H)$ such that : \[\frac {d\underline{\psi}}{d\psi^o}=\Delta_\psi\underset{\nu}{_b\otimes_\alpha}\Delta_{\widehat{\Phi}}^{-1}\] Moreover, the modular automorphism group $\sigma_t^{\underline{\psi}}$ is then equal to the one-parameter automorphism group $\sigma_t^\psi\underset{N}{_b*_\alpha}Ad\Delta_{\widehat{\Phi}}^{-it}$, constructed in (i), which finishes the proof of (ii). \newline So, using now \ref{subalgebra}(iv) applied to $\underline{\psi}$, we get that $\rho_\xi^{b, \alpha}aa^*(\rho_\xi^{b, \alpha})^*$ belongs to $\mathfrak{M}_{\underline{\psi}}^+$ if and only if $\Lambda_\psi (a)\underset{\nu}{_b\otimes_\alpha}\xi$ belongs to $\mathcal D(\Delta_\psi^{1/2}\underset{\nu}{_b\otimes_\alpha}\Delta_{\widehat{\Phi}}^{-1/2})$, and then, we have : \[\underline{\psi}(\rho_\xi^{b, \alpha}aa^*(\rho_\xi^{b, \alpha})^*)=\|\Delta_\psi^{1/2}\Lambda_\psi (a)\underset{\nu}{_b\otimes_\alpha}\Delta_{\widehat{\Phi}}^{-1/2}\xi\|^2\] from which we get (iii). \end{proof} \subsection{Corollary} \label{corunderline} {\it Let $\mathfrak{G}=(N, M, \alpha, \beta, \Gamma, T, T', \nu)$ be a measured quantum groupoid; let $(N, b, A)$ be a faithful weighted right von Neumann $N$-module, $\psi_1$ (resp. $\psi_2$) be a normal faithful semi-finite weight on $A$ lifted from $\nu^o$ and $\underline{\psi_1}$ (resp. $\underline{\psi_2}$) be the normal semi-finite faithful weight on $A\underset{N}{_b*_\alpha}\mathcal L(H)$ constructed in \ref{psibarre}(ii); then : \newline (i) the cocycle $(D\psi_1:D\psi_2)_t$ belongs to $A\cap b(N)'$; \newline (ii) we have : $(D\underline{\psi_1} : D\underline{\psi_2})_t=(D\psi_1:D\psi_2)_t\underset{N}{_b\otimes_\alpha}1$. } \begin{proof} As $\psi_1$ and $\psi_2$ are lifted weights, (i) is well known (\cite{T}, 4.22. (iii)). \newline Let $(\mathfrak{H}, \pi, J, \mathcal P)$ be a standard representation of the von Neumann algebra $A$; then $A^o$ is represented on $\mathfrak{H}$ by $JAJ$; for any normal semi-finite faithful weight $\psi$ on $A$, we have $\frac{d\psi}{d\psi^o}=\Delta_\psi^{1/2}$; moreover, we have then : \begin{align*} (\frac{d\psi_1}{d\psi_1^o})^{it}(D\psi_1^o:D\psi_2^o)_t(\frac{d\psi_2^o}{d\psi_2})^{it} &= (\frac{d\psi_1}{d\psi_1^o})^{it}(\frac{d\psi_1^o}{d\psi_1})^{it}(\frac{d\psi_2^o}{d\psi_1})^{-it}(\frac{d\psi_2^o}{d\psi_2})^{it}\\ &= (\frac{d\psi_1}{d\psi_2^o})^{it}(\frac{d\psi_2}{d\psi_2^o})^{-it}\\ &= (D\psi_1: D\psi_2)_{t} \end{align*} and, therefore $(D\psi_1^o:D\psi_2^o)_t=\Delta_{\psi_1}^{-it}(D\psi_1: D\psi_2)_{t}\Delta_{\psi_2}^{it}$. By similar arguments, we have on $\mathfrak{H}\underset{\nu}{_b\otimes_\alpha}H$ : \begin{align*} (D\underline{\psi_1} : D\underline{\psi_2})_t &= (\frac{d\underline{\psi_1}}{d\psi_1^o})^{it}(\frac{d\psi_1^o}{d\underline{\psi_2}})^{it}\\ &=(\frac{d\underline{\psi_1}}{d\psi_1^o})^{it}(D\psi_1^o:D\psi_2^o)_t(\frac{d\underline{\psi_2}}{d\psi_2^o})^{-it} \end{align*} As $(D\psi_1^o:D\psi_2^o)_t$ belongs to $JAJ\underset{N}{_b\otimes_\alpha}1_H$ and is therefore equal to : \[\Delta_{\psi_1}^{-it}(D\psi_1:D\psi_2)_{t}\Delta_{\psi_2}^{it}\underset{N}{_b\otimes_\alpha}1_H\] we obtain, using \ref{psibarre}(ii), that $(D\underline{\psi_1} : D\underline{\psi_2})_t$ is equal to : \[(\Delta_{\psi_1}^{it}\underset{N}{_b\otimes_\alpha}\Delta_{\widehat{\Phi}}^{-it})(\Delta_{\psi_1}^{-it}(D\psi_1:D\psi_2)_{t}\Delta_{\psi_2}^{it}\underset{N}{_b\otimes_\alpha}1_H)(\Delta_{\psi_2}^{-it}\underset{N}{_b\otimes_\alpha}\Delta_{\widehat{\Phi}}^{it})\] from which we get the result. \end{proof} \subsection{Remarks} \label{example} Let us consider the trivial action $(id, \beta)$ of $\mathfrak{G}$ on $N^o$ (\cite{E5}, 6.2); it is clearly a weighted action (with the identity of $N^o$ as operator-valued weight); the crossed product is then $\widehat{M}'$, and the dual action is equal to $\widehat{\Gamma}^c$ (\cite{E5}, 9.4); the operator-valued weight from $\widehat{M}'$ onto $\beta(N)$ is then $\hat{T}^c$, and, therefore, the dual weight $\tilde{\nu^o}$ of the weight $\nu^o$ on $N^o$ is the Haar weight $\widehat{\Phi}^c$; by the biduality theorem (\ref{action}), we get that the crossed-product $\widehat{M}'\rtimes_{\widehat{\Gamma}^c}\widehat{\mathfrak{G}}^c$ is isomorphic to $N^o*\mathcal L(H)=\alpha(N)'$; transporting the bidual weight $\tilde{\tilde{\nu^o}}$ on $\widehat{M}'\rtimes_{\widehat{\Gamma}^c}\widehat{\mathfrak{G}}^c$ by this isomorphism, we obtain the weight $\overline{\nu^o_\beta}$ on $\alpha(N)'$, which verifies, thanks to \ref{crossed}, for all $\xi\in D(_\alpha H, \nu)\cap\mathcal D(\Delta_{\widehat{\Phi}}^{-1/2})$ : \[\overline{\nu^o_\beta}(\theta^{\alpha, \nu}(\xi, \xi))=\|\Delta_{\widehat{\Phi}}^{-1/2}\xi\|^2\] and, for all $t\in\mathbb{R}$, $x\in\alpha(N)'$, $\sigma_t^{\overline{\nu^o_\beta}}(x)=\Delta_{\widehat{\Phi}}^{-it}x\Delta_{\widehat{\Phi}}^{it}$. \newline On the other hand, for any $y\in\mathfrak{N}_{\hat{T}^c}\cap\mathfrak{N}_{\widehat{\Phi}^c}$, $z\in \mathfrak{N}_{T^{oc}}\cap\mathfrak{N}_{\Phi^{oc}}$, we have, by construction of $\overline{\nu^o_\beta}$ : \[\overline{\nu^o_\beta}(y^*z^*zy)=\widehat{\Phi^c}(y^*T^{oc}(z^*z)y)=\|\Lambda_{\widehat{\Phi}^c}(y)\underset{\nu^o}{_{\hat{\alpha}}\otimes_\beta}\Lambda_{\Phi^{oc}}(z)\|^2\] Let now $(b, \mathfrak{a})$ be an action of $\mathfrak{G}$ on a von Neumann algebra $A$, and $\psi$ a normal semi-finite faithful weight on $A$; by construction of $\overline{\psi_\mathfrak{a}}$, we have, for any $x\in\mathfrak{N}_\psi$ : \[\overline{\psi_\mathfrak{a}}(\mathfrak{a}(x^*)(1\underset{N}{_b\otimes_\alpha}y^*z^*zy)\mathfrak{a}(x))= \|\Lambda_\psi(x)\underset{\nu}{_b\otimes_\alpha}\Lambda_{\widehat{\Phi}^c}(y)\underset{\nu^o}{_{\hat{\alpha}}\otimes_\beta}\Lambda_{\Phi^{oc}}(z)\|^2\] and, by applying (\cite{E5},13.3) to the weight $\tilde{\tilde{\nu^o}}$, we get, for any $X\in\mathfrak{N}_{\overline{\nu^o_\beta}}$ such that $\Lambda_{\overline{\nu^o_\beta}}(X)$ belongs to $D(_\alpha H_{\overline{\nu^o_\beta}}, \nu)$ : \[\overline{\psi_\mathfrak{a}}(\mathfrak{a}(x^*)(1\underset{N}{_b\otimes_\alpha}X^*X)\mathfrak{a}(x))= \|\Lambda_\psi(x)\underset{\nu}{_b\otimes_\alpha}\Lambda_{\overline{\nu^o_\beta}}(X)\|^2\] \subsection{Lemma} \label{lem} {\it Let $\mathfrak{G}=(N, M, \alpha, \beta, \Gamma, T, T', \nu)$ be a measured quantum groupoid; let $(N, b, A)$ be a faithful weighted right von Neumann right $N$-module; then : \newline (i) if $\xi$, $\eta$ are in $D(_\alpha H, \nu)\cap \mathcal D(\Delta_{\widehat{\Phi}}^{-1/2})$, such that $\Delta_{\widehat{\Phi}}^{-1/2}\xi$ and $\Delta_{\widehat{\Phi}}^{-1/2}\eta$ belong to $D(_\alpha H, \nu)$, $<\Delta_{\widehat{\Phi}}^{-1/2}\xi, \eta>_{\alpha, \nu}^o$ belongs to $\mathcal D(\sigma_{-i/2}^\nu)$ and $\sigma_{-i/2}^\nu(<\Delta_{\widehat{\Phi}}^{-1/2}\xi, \eta>_{\alpha, \nu}^o)=<\xi, \Delta_{\widehat{\Phi}}^{-1/2}\eta>_{\alpha, \nu}^o$. \newline (ii) there exists an $(\alpha, \nu)$-orthogonal basis of $H$ such that, for all $i\in I$, $e_i$ belongs to $D(_\alpha H, \nu)\cap \mathcal D(\Delta_{\widehat{\Phi}}^{-1/2})$ and $\Delta_{\widehat{\Phi}}^{-1/2}e_i$ belongs to $D(_\alpha H, \nu)$; \newline (iii) for any such basis, the weight $\overline{\nu^o_\beta}$ defined in \ref{example} satisfies, for all $x\in \alpha(N)'^+$ :} \[\overline{\nu^o_\beta} (x)=\sum_i(x\Delta_{\widehat{\Phi}}^{-1/2}e_i|\Delta_{\widehat{\Phi}}^{-1/2}e_i)\] \begin{proof} We get, for any $n\in\mathfrak{N}_\nu$, analytic with respect to $\nu$ : \[R^{\alpha, \nu}(\Delta_{\widehat{\Phi}}^{-1/2}\xi)\Lambda_\nu (n)=\alpha(n)\Delta_{\widehat{\Phi}}^{-1/2}\xi=\Delta_{\widehat{\Phi}}^{-1/2}\alpha(\sigma_{-i/2}^\nu(n))\xi=\Delta_{\widehat{\Phi}}^{-1/2}R^{\alpha, \nu}(\xi)\Delta_\nu^{1/2}\Lambda_\nu (n)\] and, using (\cite{C2}, 1.5) : \begin{eqnarray*} \Lambda_\nu(<\eta, \Delta_{\widehat{\Phi}}^{-1/2}\xi>_{\alpha, \nu}^o)&=& J_\nu\Delta_\nu^{1/2}\Lambda_\nu(<\Delta_{\widehat{\Phi}}^{-1/2}\xi, \eta>_{\alpha, \nu}^o)\\ &=& J_\nu\Delta_\nu^{1/2}R^{\alpha, \nu}(\Delta_{\widehat{\Phi}}^{-1/2}\xi)^*\eta\\ &=& J_\nu R^{\alpha, \nu}(\xi)^*\Delta_{\widehat{\Phi}}^{-1/2}\eta\\ &=&J_\nu\Lambda_\nu(<\xi, \Delta_{\widehat{\Phi}}^{-1/2}\eta>_{\alpha, \nu}) \end{eqnarray*} from which we get (i). \newline Applying (\cite{E3}2.10) to the inclusion $\alpha(N)\subset \widehat{M}$ and the operator-valued weight $\hat{T}$, we get that it is possible to construct an orthogonal $(\alpha, \nu)$-basis $(e_i)_{i\in I}$ such that $e_i=J_{\widehat{\Phi}}\Lambda_{\widehat{\Phi}}(x_i)$, with $x_i\in\mathfrak{N}_{\widehat{\Phi}}\cap\mathfrak{N}_{\widehat{\Phi}}^*\cap\mathfrak{N}_{\hat{T}}\cap\mathfrak{N}_{\hat{T}}^*$; so, $e_i$ belongs to $\mathcal D(\Delta_{\widehat{\Phi}}^{-1/2})$, and $\Delta_{\widehat{\Phi}}^{-1/2}e_i=J_{\widehat{\Phi}}\Lambda_{\widehat{\Phi}}(x_i^*)$ which belongs to $D(_\alpha H, \nu)$; which is (ii). \newline Using (ii) and (i), we have : \begin{eqnarray*} (\theta^{\alpha, \nu}(\xi, \xi)\Delta_{\widehat{\Phi}}^{-1/2}e_i|\Delta_{\widehat{\Phi}}^{-1/2}e_i) &=& \|R^{\alpha, \nu}(\xi)^*\Delta_{\widehat{\Phi}}^{-1/2}e_i\|^2\\ &=&\|\Lambda_\nu(<e_i, \Delta_{\widehat{\Phi}}^{-1/2}\xi>_{\alpha, \nu}^o)\|^2\\ &=&\nu((R^{\alpha, \nu}(\Delta_{\widehat{\Phi}}^{-1/2}\xi)^*\theta^{\alpha, \nu}(e_i, e_i)R^{\alpha, \nu}(\Delta_{\widehat{\Phi}}^{-1/2}\xi)^o) \end{eqnarray*} and we get, using (i) and \ref{example} : \[\sum_i(\theta^{\alpha, \nu}(\xi, \xi)\Delta_{\widehat{\Phi}}^{-1/2}e_i|\Delta_{\widehat{\Phi}}^{-1/2}e_i)= \nu(<\Delta_{\widehat{\Phi}}^{-1/2}\xi, \Delta_{\widehat{\Phi}}^{-1/2}\xi>_{\alpha, \nu}^o)=\|\Delta_{\widehat{\Phi}}^{-1/2}\xi\|^2=\overline{\nu^o_\beta}(\theta^{\alpha, \nu}(\xi, \xi))\] from which we get that $\sum_i\omega_{\Delta_{\widehat{\Phi}}^{-1/2}e_i}$ is a normal semi-finite weight on $\alpha(N)'$, and, by unicity of the spatial derivative, we get this weight is equal to $\overline{\nu^o_\beta}$. \end{proof} \subsection{Theorem} \label{psibarre2} {\it Let $\mathfrak{G}=(N, M, \alpha, \beta, \Gamma, T, T', \nu)$ be a measured quantum groupoid; let $(e_i)_{i\in I}$ be an $(\alpha, \nu)$-orthogonal basis of $H$ such that, for all $i\in I$, $e_i$ belongs to $D(_\alpha H, \nu)\cap \mathcal D(\Delta_{\widehat{\Phi}}^{-1/2})$ and $\Delta_{\widehat{\Phi}}^{-1/2}e_i$ belongs to $D(_\alpha H, \nu)$; let $(N, b, A)$ be a faithful weighted right von Neumann right $N$-module, and let $\psi$ be a normal semi-finite faithful weight on $A$ lifted from $\nu^o$; then, we have, with the notations of \ref{psibarre}, \ref{propw} and \ref{example} : \[\underline{\psi}=\sum_i\psi\underset{\nu}{_b*_\alpha}\omega_{\Delta_{\widehat{\Phi}}^{-1/2}e_i} =\overline{\nu^o_\beta}\circ (\psi\underset{\nu}{_b*_\alpha}id)\] } \begin{proof} Let $X\in (A\underset{N}{_b*_\alpha}\mathcal L(H))^+$; we have : \begin{eqnarray*} \sum_i\psi\underset{\nu}{_b*_\alpha}\omega_{\Delta_{\widehat{\Phi}}^{-1/2}e_i}(X) &=&\sum_i \nu\circ b^{-1}\underset{\nu}{_b*_\alpha}\omega_{\Delta_{\widehat{\Phi}}^{-1/2}e_i}(\mathfrak{T}\underset{N}{_b*_\alpha}id)(X)\\ &=&(\nu\circ b^{-1}\underset{\nu}{_b*_\alpha}\overline{\nu^o_\beta})(\mathfrak{T}\underset{N}{_b*_\alpha}id)(X)\\ &=&\overline{\nu^o_\beta}(\nu\circ b^{-1}\underset{\nu}{_b*_\alpha}id)(\mathfrak{T}\underset{N}{_b*_\alpha}id)(X)\\ &=&\overline{\nu^o_\beta}\circ (\psi\underset{\nu}{_b*_\alpha}id)(X) \end{eqnarray*} which is the second equality, and proves therefore that $\sum_i\psi\underset{\nu}{_b*_\alpha}\omega_{\Delta_{\widehat{\Phi}}^{-1/2}e_i}$ defines a normal semi-finite faithful weight on $A\underset{N}{_b*_\alpha}\mathcal L(H)$, which does not depend on the choice of the $(\alpha, \nu)$-orthogonal basis $(e_i)_{i\in I}$. Let us denote $\psi_0$ that weight. \newline We get : \[\psi_0(\rho_\xi^{b, \alpha}aa^*(\rho_\xi^{b, \alpha})^*) =\sum_i \psi(b(<\Delta_{\widehat{\Phi}}^{-1/2}e_i, \xi>_{\alpha, \nu}^o)^*aa^*b(<\Delta_{\widehat{\Phi}}^{-1/2}e_i, \xi>_{\alpha, \nu}^o))\] Applying \ref{lem}(i), if $\xi$ belongs to $D(_\alpha H, \nu)\cap \mathcal D(\Delta_{\widehat{\Phi}}^{-1/2})$, and is such that $\Delta_{\widehat{\Phi}}^{-1/2}\xi$ belongs to $D(_\alpha H, \nu)$, we get that $b(<\Delta_{\widehat{\Phi}}^{-1/2}e_i, \xi>_{\alpha, \nu}^o)^*)$ belongs to $\mathcal D(\sigma_{-i/2}^\psi)$ and that : \[\sigma_{-i/2}^\psi(b(<\Delta_{\widehat{\Phi}}^{-1/2}e_i, \xi>_{\alpha, \nu}^o)^*)=b(\sigma_{i/2}^\nu(<\xi, \Delta_{\widehat{\Phi}}^{-1/2}e_i>_{\alpha, \nu})^o)= b(<\Delta_{\widehat{\Phi}}^{-1/2}\xi, e_i>_{\alpha, \nu}^o)\] So, with such an hypothesis on $\xi$, and if $a$ belongs to $\mathfrak{N}_\psi\cap\mathfrak{N}_\psi^*$, we get that : \begin{eqnarray*} \psi_0(\rho_\xi^{b, \alpha}aa^*(\rho_\xi^{b, \alpha})^*) &=& \sum_i\|J_\psi b(<e_i, \Delta_{\widehat{\Phi}}^{-1/2}\xi>_{\alpha, \nu}^o)J_\psi\Lambda_\psi (a^*)\|^2\\ &=&\sum_i\|b(< \Delta_{\widehat{\Phi}}^{-1/2}\xi, e_i>_{\alpha, \nu}^o)\Delta_\psi^{1/2}\Lambda_\psi (a)\|^2\\ &=&\sum_i(b(<\Delta_{\widehat{\Phi}}^{-1/2}\xi, e_i>_{\alpha, \nu}^o<e_i, \Delta_{\widehat{\Phi}}^{-1/2}\xi>_{\alpha, \nu}^o)\Delta_\psi^{1/2}\Lambda_\psi (a)|\Delta_\psi^{1/2}\Lambda_\psi (a))\\ &=&(b(<\Delta_{\widehat{\Phi}}^{-1/2}\xi, \Delta_{\widehat{\Phi}}^{-1/2}\xi>_{\alpha, \nu}^o)\Delta_\psi^{1/2}\Lambda_\psi (a)|\Delta_\psi^{1/2}\Lambda_\psi (a))\\ &=&\|\Delta_\psi^{1/2}\Lambda_\psi (a)\underset{\nu}{_b\otimes_\alpha}\Delta_{\widehat{\Phi}}^{-1/2}\xi\|^2 \end{eqnarray*} Using \ref{psibarre}(iii), we get that $\underline{\psi}(\rho_\xi^{b, \alpha}aa^*(\rho_\xi^{b, \alpha})^*)=\psi_0(\rho_\xi^{b, \alpha}aa^*(\rho_\xi^{b, \alpha})^*)$, for all $a$ in $\mathfrak{N}_\psi\cap\mathfrak{N}_\psi^*$ and $\xi\in D(_\alpha H, \nu)\cap \mathcal D(\Delta_{\widehat{\Phi}}^{-1/2})$, and is such that $\Delta_{\widehat{\Phi}}^{-1/2}\xi$ belongs to $D(_\alpha H, \nu)$. By polarisation, we get $\underline{\psi}(\rho_\xi^{b, \alpha}ab(\rho_\eta^{b, \alpha})^*)=\psi_0(\rho_\xi^{b, \alpha}ab(\rho_\eta^{b, \alpha})^*)$, for all $a$, $b$ in $\mathfrak{N}_\psi\cap\mathfrak{N}_\psi^*$ and $\xi$, $\eta$ in $D(_\alpha H, \nu)\cap \mathcal D(\Delta_{\widehat{\Phi}}^{-1/2})$, such that $\Delta_{\widehat{\Phi}}^{-1/2}\xi$ and $\Delta_{\widehat{\Phi}}^{-1/2}\eta$ belong to $D(_\alpha H, \nu)$. The linear set generated by such elements is an involutive algebra, whose weak closure contains, using (\cite{E5} 2.2.1) and the semi-finiteness of $\psi$, all operators of the form $\rho_{\xi_1}^{b, \alpha}c(\rho_{\xi_2}^{b, \alpha})^*$, for any $\xi_1$, $\xi_2$ in $D(_\alpha H, \nu)$ and $c$ in $A$; therefore, using \ref{subalgebra}, we get that $\underline{\psi}$ and $\psi_0$ are equal on a dense involutive algebra. \newline We easily get that $\psi_0\circ\sigma_t^{\underline{\psi}}=\psi_0\circ (\sigma_t^\psi\underset{N}{_b*_\alpha}Ad \Delta_{\widehat{\Phi}}^{-it})$ is equal to $\sum_i\psi\underset{\nu}{_b*_\alpha}\omega_{\Delta_{\widehat{\Phi}}^{-1/2}\Delta_{\widehat{\Phi}}^{-it}e_i}$; the family $\Delta_{\widehat{\Phi}}^{-it}e_i$ is another $(\alpha, \nu)$-orthogonal basis of $H$, which bears the same properties as $(e_i)_{i\in I}$. As, using (i), we know that the definition of $\psi_0$ does not depend on the choice of the orthogonal $(\alpha, \nu)$-basis, we get that $\psi_0$ is invariant under $\sigma_t^{\underline{\psi}}$, and, therefore $\underline{\psi}=\psi_0$, which finishes the proof. \end{proof} \subsection{Example} \label{ex2} Looking again at the particular example given in \ref{example}, we get, using \ref{psibarre2}, that $\underline{\nu^o}=\overline{\nu^o_\beta}$. \subsection{Theorem} \label{psibarre3} {\it Let $\mathfrak{G}=(N, M, \alpha, \beta, \Gamma, T, T', \nu)$ be a measured quantum groupoid; let $(e_i)_{i\in I}$ be an $(\alpha, \nu)$-orthogonal basis of $H$ such that, for all $i\in I$, $e_i$ belongs to $D(_\alpha H, \nu)\cap \mathcal D(\Delta_{\widehat{\Phi}}^{-1/2})$ and $\Delta_{\widehat{\Phi}}^{-1/2}e_i$ belongs to $D(_\alpha H, \nu)$; let $(N, b, A)$ be a faithful weighted right von Neumann right $N$-module, and let $\mathfrak{T}$ be a normal semi-finite faithful operator-valued weight from $A$ onto $b(N)$; let us write $\psi=\nu^o\circ b^{-1}\circ\mathfrak{T}$ and $\underline{\psi}$ the normal semi-finite faithful weight on $A\underset{N}{_b*_\alpha}\mathcal L(H)$ constructed in \ref{psibarre}; for $n\in N$, let us define $a(n)=J_\psi b(n^*)J_\psi$. Let $\xi$ be in $D(_\alpha H, \nu)\cap \mathcal D(\Delta_{\widehat{\Phi}}^{-1/2})$, such that $\Delta_{\widehat{\Phi}}^{-1/2}\xi$ belongs to $D(_\alpha H, \nu)$; let $\eta$, $\xi_1$ be in $D(_\alpha H, \nu)$, and $\xi_2\in D(H_\beta, \nu^o)$; let $z$ be in $\mathfrak{N}_\psi$, $\zeta$ be in $H_\psi$, $X$ be in $A\underset{N}{_b*_\alpha}\mathcal L(H)$. Then : \newline (i) the operator $\rho^{b, \alpha}_\eta z(\rho^{b, \alpha}_\xi)^*$ belongs to $\mathfrak{N}_{\underline{\psi}}$, and we have : \[\Lambda_{\underline{\psi}}(\rho^{b, \alpha}_\eta z(\rho^{b, \alpha}_\xi)^*)=J_{\widehat{\Phi}}\Delta_{\widehat{\Phi}}^{-1/2}\xi\underset{\nu}{_\beta\otimes_a}\Lambda_\psi(z)\underset{\nu}{_b\otimes_\alpha}\eta\] Moreover, the linear set generated by the operators $\rho^{b, \alpha}_\eta z(\rho^{b, \alpha}_\xi)^*$, where $z$ is in $\mathfrak{N}_\psi$, $\eta$ is in $D(_\alpha H, \nu)$, and $\xi$ is in $D(_\alpha H, \nu)\cap \mathcal D(\Delta_{\widehat{\Phi}}^{-1/2})$, such that $\Delta_{\widehat{\Phi}}^{-1/2}\xi$ belongs to $D(_\alpha H, \nu)$, is a core for $\Lambda_{\underline{\psi}}$. \newline (ii) we have : $J_{\underline{\psi}}(\xi_2\underset{\nu}{_\beta\otimes_a}\zeta\underset{\nu}{_b\otimes_\alpha}\xi_1)=J_{\widehat{\Phi}}\xi_1\underset{\nu}{_\beta\otimes_a}J_\psi\zeta\underset{\nu}{_b\otimes_\alpha}J_{\widehat{\Phi}}\xi_2$. \newline (iii) we have : $\pi_{\underline{\psi}}(X)=1\underset{N}{_\beta\otimes_a}X$. \newline (iv) it is possible to define a one parameter group of unitaries $\Delta_{\widehat{\Phi}}^{-it}\underset{\nu}{_\beta\otimes_a}\Delta_\psi^{it}\underset{\nu}{_b\otimes_\alpha}\Delta_{\widehat{\Phi}}^{-it}$ on $H\underset{\nu}{_\beta\otimes_a}H_\psi\underset{\nu}{_b\otimes_\alpha}H$ with natural values on elementary tensors, and $\Delta_{\underline{\psi}}^{1/2}$ is equal to its generator $\Delta_{\widehat{\Phi}}^{-1/2}\underset{\nu}{_\beta\otimes_a}\Delta_\psi^{1/2}\underset{\nu}{_b\otimes_\alpha}\Delta_{\widehat{\Phi}}^{-1/2}$. } \begin{proof} We have $(\rho^{b, \alpha}_\eta z(\rho^{b, \alpha}_\xi)^*)^*\rho^{b, \alpha}_\eta z(\rho^{b, \alpha}_\xi)^*= \rho^{b, \alpha}_\xi z^*b(<\eta, \eta>_{\alpha, \nu}^o)z(\rho^{b, \alpha}_\xi)^*$, which belongs to $\mathfrak{M}_{\underline{\psi}}$, by \ref{psibarre}(iii). \newline Let $a$ in $\mathfrak{N}_\psi\cap\mathfrak{N}_\psi^*$; let us take $\eta_1$ satisfying the same hypothesis as $\xi$. We have, using \ref{subalgebra}(iv) applied to the weight $\underline{\psi}$, and \ref{psibarre}(ii) : \[J_{\widehat{\Phi}}\eta_1\underset{\nu}{_\beta\otimes_a}J_\psi aJ_\psi\zeta\underset{\nu}{_b\otimes_\alpha}\eta_1= R^{\underline{\psi}}(\Delta_\psi^{1/2}\Lambda_\psi (a)\underset{\nu}{_b\otimes_\alpha}\Delta_{\widehat{\Phi}}^{-1/2}\eta_1)^*(\zeta\underset{\nu}{_b\otimes_\alpha}\xi_1)\] and, therefore : \begin{multline*} (\Lambda_{\underline{\psi}}(\rho^{b, \alpha}_\eta z(\rho^{b, \alpha}_\xi)^*)|J_{\widehat{\Phi}}\eta_1\underset{\nu}{_\beta\otimes_a}J_\psi aJ_\psi\zeta\underset{\nu}{_b\otimes_\alpha}\xi_1)=\\ (\rho^{b, \alpha}_\eta z(\rho^{b, \alpha}_\xi)^*(\Delta_\psi^{1/2}\Lambda_\psi (a)\underset{\nu}{_b\otimes_\alpha}\Delta_{\widehat{\Phi}}^{-1/2}\eta_1)|\zeta\underset{\nu}{_b\otimes_\alpha}\xi_1) \end{multline*} which, using \ref{lem}(i), and the definition of $\psi$, is equal to : \begin{multline*} (\rho^{b, \alpha}_\eta zb(<\Delta_{\widehat{\Phi}}^{-1/2}\eta_1, \xi>_{\alpha, \nu}^o)\Delta_\psi^{1/2}\Lambda_\psi (a)|\zeta\underset{\nu}{_b\otimes_\alpha}\xi_1)=\\ (zb(<\Delta_{\widehat{\Phi}}^{-1/2}\eta_1, \xi>_{\alpha, \nu}^o)\Delta_\psi^{1/2}\Lambda_\psi (a)\underset{\nu}{_b\otimes_\alpha}\eta|\zeta\underset{\nu}{_b\otimes_\alpha}\xi_1)=\\ (zb(\sigma^\nu_{-i/2}(<\eta_1, \Delta_{\widehat{\Phi}}^{-1/2}\xi>_{\alpha, \nu}^o))\Delta_\psi^{1/2}\Lambda_\psi (a)\underset{\nu}{_b\otimes_\alpha}\eta|\zeta\underset{\nu}{_b\otimes_\alpha}\xi_1)=\\ (z\Delta_\psi^{1/2}b(<\eta_1, \Delta_{\widehat{\Phi}}^{-1/2}\xi>_{\alpha, \nu}^o)\Lambda_\psi (a)\underset{\nu}{_b\otimes_\alpha}\eta|\zeta\underset{\nu}{_b\otimes_\alpha}\xi_1) \end{multline*} Let us suppose that $z$ belongs to $\mathcal D(\sigma_{i/2}^\psi)$; we get that : \begin{align*} z\Delta_\psi^{1/2}b(<\eta_1, \Delta_{\widehat{\Phi}}^{-1/2}\xi>_{\alpha, \nu}^o)\Lambda_\psi (a) &= \Delta_\psi^{1/2}\sigma^\psi_{i/2}(z)b(<\eta_1, \Delta_{\widehat{\Phi}}^{-1/2}\xi>_{\alpha, \nu}^o)\Lambda_\psi (a)\\ &= J_\psi\Lambda_\psi((a^*b(<\Delta_{\widehat{\Phi}}^{-1/2}\xi, \eta_1>_{\alpha, \nu}^o)\sigma_{-i/2}(z^*))\\ &= J_\psi a^*b(<\Delta_{\widehat{\Phi}}^{-1/2}\xi, \eta_1>_{\alpha, \nu}^o)J_\psi\Lambda_\psi(z)\\ &=J_\psi a^*J_\psi a(<\eta_1, \Delta_{\widehat{\Phi}}^{-1/2}\xi>_{\alpha, \nu}^o)\Lambda_\psi(z)\\ &=J_\psi a^*J_\psi a(<J_{\widehat{\Phi}}\Delta_{\widehat{\Phi}}^{-1/2}\xi, J_{\widehat{\Phi}}\eta_1>_{\beta, \nu^o})\Lambda_\psi(z) \end{align*} which remains true for all $z\in\mathfrak{N}_\psi$; therefore, we then get that : \begin{multline*} (\Lambda_{\underline{\psi}}(\rho^{b, \alpha}_\eta z(\rho^{b, \alpha}_\xi)^*)|J_{\widehat{\Phi}}\eta_1\underset{\nu}{_\beta\otimes_a}J_\psi aJ_\psi\zeta\underset{\nu}{_b\otimes_\alpha}\xi_1)=\\ (J_\psi a^*J_\psi a(<J_{\widehat{\Phi}}\Delta_{\widehat{\Phi}}^{-1/2}\xi, J_{\widehat{\Phi}}\eta_1>_{\beta, \nu^o})\Lambda_\psi(z)\underset{\nu}{_b\otimes_\alpha}\eta|\zeta\underset{\nu}{_b\otimes_\alpha}\xi_1)=\\(a(<J_{\widehat{\Phi}}\Delta_{\widehat{\Phi}}^{-1/2}\xi, J_{\widehat{\Phi}}\eta_1>_{\beta, \nu^o})\Lambda_\psi(z)\underset{\nu}{_b\otimes_\alpha}\eta|J_\psi aJ_\psi \zeta\underset{\nu}{_b\otimes_\alpha}\xi_1)=\\ (J_{\widehat{\Phi}}\Delta_{\widehat{\Phi}}^{-1/2}\xi\underset{\nu}{_\beta\otimes_a}\Lambda_\psi(z)\underset{\nu}{_b\otimes_\alpha}\eta|J_{\widehat{\Phi}}\eta_1\underset{\nu}{_\beta\otimes_a}J_\psi aJ_\psi \zeta\underset{\nu}{_b\otimes_\alpha}\xi_1) \end{multline*} from which, by density, we get the first result of (i). \newline Using \ref{psibarre}(ii), we get that $\sigma_t^{\underline{\psi}}(\rho^{b, \alpha}_\eta z(\rho^{b, \alpha}_\xi)^*)=\rho^{b, \alpha}_{\Delta_{\widehat{\Phi}}^{-it}\eta}\sigma_t^\psi(z)(\rho^{b, \alpha}_{\Delta_{\widehat{\Phi}}^{-it}\xi})^*$; so, the linear set generated by the operators $\rho^{b, \alpha}_\eta z(\rho^{b, \alpha}_\xi)^*$, where $z$ belongs to $\mathfrak{N}_\psi\cap\mathfrak{N}_\psi^*$, and $\xi$ (resp. $\eta$) is in $D(_\alpha H, \nu)\cap \mathcal D(\Delta_{\widehat{\Phi}}^{-1/2})$, such that $\Delta_{\widehat{\Phi}}^{-1/2}\xi$ (resp. $\Delta_{\widehat{\Phi}}^{-1/2}\eta$) belongs to $D(_\alpha H, \nu)$ is a $*$-subalgebra of $\mathfrak{N}_{\underline{\psi}}\cap \mathfrak{N}_{\underline{\psi}}^*$, dense in $A\underset{N}{_b*_\alpha}\mathcal L(H)$ by \ref{subalgebra}, and globally invariant under $\sigma_t^{\underline{\psi}}$. It is possible to put on the image of this algebra under $\Lambda_{\underline{\psi}}$ a structure of left-Hilbert algebra, which, in turn, leads to a faithful normal semi-finite weight $\psi_0$ on $A\underset{N}{_b*_\alpha}\mathcal L(H)$, equal to $\underline{\psi}$ on this subalgebra, and invariant under $\sigma_t^{\underline{\psi}}$. So, we get $\psi_0=\underline{\psi}$, which finishes the proof of (i). \newline On the other hand, let's apply \ref{subalgebra}(iii) to the standard representation of $A\underset{N}{_b*_\alpha}\mathcal L(H)$ given by the weight $\underline{\psi}$, and we get (ii) and (iii). \newline Let now $\xi\in D(H_\beta, \nu^o)$; we have, for all $t\in\mathbb{R}$, $n\in\mathfrak{N}_\nu$ : \begin{align*} \beta(n^*)\Delta_{\widehat{\Phi}}^{-it}\xi &=\Delta_{\widehat{\Phi}}^{-it}\tau_t(\beta(n^*))\xi\\ &=\Delta_{\widehat{\Phi}}^{-it}\beta(\sigma_t^\nu(n^*))\xi\\ &=\Delta_{\widehat{\Phi}}^{-it}R^{\beta, \nu^o}(\xi)J_\nu\Lambda_\nu(\sigma_t^\nu(n))\\ &=\Delta_{\widehat{\Phi}}^{-it}R^{\beta, \nu^o}(\xi)J_\nu\Delta_\nu^{it}\Lambda_\nu(n) \end{align*} and, therefore, $\Delta_{\widehat{\Phi}}^{-it}\xi$ belongs to $D(H_\beta, \nu^o)$, and $R^{\beta, \nu^o}(\Delta_{\widehat{\Phi}}^{-it}\xi)=\Delta_{\widehat{\Phi}}^{-it}R^{\beta, \nu^o}(\xi)\Delta_\nu^{it}$, and $<\Delta_{\widehat{\Phi}}^{-it}\xi, \Delta_{\widehat{\Phi}}^{-it}\xi>_{\beta, \nu^o}=\sigma_{-t}^\nu(<\xi, \xi>_{\beta, \nu^o})$. Therefore, if $\xi'$ belongs to $D(_\alpha H, \nu)$, $\eta\in H_\psi$, we get : \begin{align*} \|\Delta_{\widehat{\Phi}}^{-it}\xi\underset{\nu}{_\beta\otimes_a}\Delta_\psi^{it}\eta\underset{\nu}{_b\otimes_\alpha}\Delta_{\widehat{\Phi}}^{-it}\xi'\|^2 &= (b(<\Delta_{\widehat{\Phi}}^{-it}\xi', \Delta_{\widehat{\Phi}}^{-it}\xi'>_{\alpha, \nu}^o)a(<\Delta_{\widehat{\Phi}}^{-it}\xi, \Delta_{\widehat{\Phi}}^{-it}\xi>_{\beta, \nu^o})\Delta_\psi^{it}\eta|\Delta_\psi^{it}\eta)\\ &=(b(\sigma_{-t}^{\nu^o}(<\xi', \xi'>_{\alpha, \nu}^o))a(\sigma_{-t}^\nu(<\xi, \xi>_{\beta, \nu^o}))\Delta_\psi^{it}\eta|\Delta_\psi^{it}\eta)\\ &=(\sigma_t^\psi(b(<\xi', \xi'>_{\alpha, \nu}^o))J_\psi b(\sigma_{-t}^\nu(<\xi, \xi>_{\beta, \nu^o}))J_\psi\Delta_\psi^{it}\eta|\Delta_\psi^{it}\eta)\\ &=(b(<\xi', \xi'>_{\alpha, \nu}^o)\Delta_\psi^{-it}J_\psi\sigma_t^\psi(b(<\xi, \xi>_{\beta, \nu^o}))J_\psi\Delta_\psi^{it}\eta|\eta)\\ &=(b(<\xi', \xi'>_{\alpha, \nu}^o)J_\psi b(<\xi, \xi>_{\beta, \nu^o})J_\psi\eta|\eta)\\ &=(b(<\xi', \xi'>_{\alpha, \nu}^o)a(<\xi, \xi>_{\beta, \nu^o})\eta|\eta)\\ &=\|\xi\underset{\nu}{_\beta\otimes_a}\eta\underset{\nu}{_b\otimes_\alpha}\xi'\|^2 \end{align*} Now, from (i) and (ii), we get that the infinitesimal generator $\Delta_{\widehat{\Phi}}^{-1/2}\underset{\nu}{_\beta\otimes_a}\Delta_\psi^{1/2}\underset{\nu}{_b\otimes_\alpha}\Delta_{\widehat{\Phi}}^{-1/2}$ of this one-parameter of unitaries is included in $\Delta_{\underline{\psi}}^{1/2}$; these operators being self-adjoint, we get (iv). \end{proof} \subsection{Corollary} \label{psia} {\it Let $\mathfrak{G}=(N, M, \alpha, \beta, \Gamma, T, T', \nu)$ be a measured quantum groupoid, $(b,\mathfrak{a})$ an action of $\mathfrak{G}$ on a von Neumann algebra $A$, $\psi$ a normal semi-finite faithful weight on $A$, $\overline{\psi_\mathfrak{a}}$ the normal semi-finite faithful weight constructed on $A\underset{N}{_b*_\alpha}\mathcal L(H)$ by transporting the bidual weight. Then, for any $x\in \mathfrak{N}_\psi$, $\xi\in D(_\alpha H, \nu)$, $\eta\in D(_\alpha H, \nu)\cap \mathcal D(\Delta_{\widehat{\Phi}}^{-1/2})$ such that $\Delta_{\widehat{\Phi}}^{-1/2}\eta$ belongs to $D(H_\beta, \nu^o)$, the operator $(1\underset{N}{_b\otimes_\alpha}\theta^{\alpha, \nu}(\xi, \eta))\mathfrak{a}(x)$ belongs to $\mathfrak{N}_{\overline{\psi_\mathfrak{a}}}$, and we have:} \[\overline{\psi_\mathfrak{a}}(\mathfrak{a}(x^*)((1\underset{N}{_b\otimes_\alpha}\theta^{\alpha, \nu}(\xi, \eta)^*\theta^{\alpha, \nu}(\xi, \eta))\mathfrak{a}(x))=\|\Lambda_\psi(x)\underset{\nu}{_b\otimes_\alpha}J_{\widehat{\Phi}}\Delta_{\widehat{\Phi}}^{-1/2}\eta\underset{\nu}{_\beta\otimes_\alpha}\xi\|^2\] \begin{proof} Using \ref{psibarre3} applied to $\underline{\nu^o}$, we get that $\Lambda_{\underline{\nu^o}}(\theta^{\alpha, \nu}(\xi, \eta))=J_{\widehat{\Phi}}\Delta_{\widehat{\Phi}}^{-1/2}\eta\underset{\nu}{_\beta\otimes_\alpha}\xi$, which belongs to $D(_\alpha H_{\underline{\nu^o}}, \nu)$; so, using \ref{example} and \ref{ex2}, we get the result. \end{proof} \section{Standard implementation: using the weight $\underline{\psi}$. } \label{using} In that section, we calculate (\ref{propW*sigma}) the dual weight $\widetilde{(\underline{\psi})}$ of $\underline{\psi}$, with respect to the action $\underline{\mathfrak{a}}$ (\ref{psitildetheta}(ii)); this will allow us to calculate $J_{\widetilde{(\underline{\psi})}}$ (\ref{propW*sigma}), and then, to obtain a formula linking $U^\mathfrak{a}_\psi$ and $U^{\underline{\mathfrak{a}}}_{\underline{\psi}}$ (\ref{Upsibarre}). As $U^{\underline{\mathfrak{a}}}_{\underline{\psi}}$ is a corepresentation by \ref{cora}, we obtain then that $U^\mathfrak{a}_\psi$ is a corepresentation (and, therefore, a standard implementation) whenever it is possible to construct $\underline{\psi}$ (\ref{standard}). \subsection{Proposition} \label{tildeTheta} {\it Let $\mathfrak{G}=(N, M, \alpha, \beta, \Gamma, T, T', \nu)$ be a measured quantum groupoid; let $A$ be a von Neumann algebra acting on a Hilbert space $\mathfrak{H}$, $(b, \mathfrak a)$ be an action of $\mathfrak{G}$ on $A$, $(1\underset{N}{_b\otimes_\alpha}\hat{\beta}, \underline{\mathfrak a})$ be the action of $\mathfrak{G}$ on $A\underset{N}{_b*_\alpha}\mathcal L(H)$ introduced in \ref{crossed}; then, let us write, for any $Y$ in $\mathcal L(\mathfrak{H}\underset{\nu}{_b\otimes_\alpha}H\underset{\nu}{_{\hat{\beta}}\otimes_\alpha}H)$, \[\tilde{\Theta}(Y)=(1\underset{N}{_b\otimes_\alpha}W)^*(id\underset{N}{_b*_\alpha}\varsigma_N)(Y)(1\underset{N}{_b\otimes_\alpha}W)\] which belongs to $\mathcal L(\mathfrak{H}\underset{\nu}{_b\otimes_\alpha}H\underset{\nu}{_\beta\otimes_\alpha}H)$; then, we have : \newline (i) for any $X\in A\underset{N}{_b*_\alpha}\mathcal L(H)$, $\tilde{\Theta}(\underline{\mathfrak a}(X))=(\mathfrak a\underset{N}{_b*_\alpha}id)(X)$ and : \[\tilde{\Theta}((A\underset{N}{_b*_\alpha}\mathcal L(H))\rtimes_{\underline{\mathfrak a}}\mathfrak{G})= (A\rtimes_\mathfrak a\mathfrak{G})\underset{N}{_\beta*_\alpha}\mathcal L(H)\] \newline (ii) $(1\underset{N}{_b\otimes_\alpha}\hat{\alpha}, (id\underset{N}{_b*_\alpha}\varsigma_N)(\tilde{\mathfrak a}\underset{N}{_\beta*_\alpha}id))$ is an action of $\hat{\mathfrak{G}}^c$ on $(A\rtimes_\mathfrak a\mathfrak{G})\underset{N}{_\beta*_\alpha}\mathcal L(H)$, and : \[(\tilde{\Theta}\underset{N^o}{_\alpha*_\beta}id)\widetilde{(\underline{\mathfrak a})}=(id\underset{N}{_b*_\alpha}\varsigma_{N^o})(\tilde{\mathfrak a}\underset{N^o}{_{\hat{\alpha}}*_\beta}id)\tilde{\Theta}\] where $\widetilde{(\underline{\mathfrak{a}})}$ is the dual action of $\underline{\mathfrak{a}}$ (it is therefore an action of $\widehat{\mathfrak{G}}^c$ on $(A\underset{N}{_b*_\alpha}\mathcal L(H))\rtimes_{\underline{\mathfrak{a}}}\mathfrak{G}$).} \begin{proof} By the definition of $\underline{\mathfrak{a}}$, we get the first formula of (i). The second formula of (i) was already proved in (\cite{E5} 11.4). Moreover, using (i), we have, for all $X\in A\underset{N}{_b*_\alpha}\mathcal L(H)$ : \begin{eqnarray*} (\tilde{\Theta}\underset{N^o}{_{\hat{\alpha}}*_\beta}id)\widetilde{(\underline{\mathfrak a})}(\underline{\mathfrak a}(X))&=& (\tilde{\Theta}\underset{N^o}{_{\hat{\alpha}}*_\beta}id)(\underline{\mathfrak a}(X)\underset{N^o}{_{\hat{\alpha}}\otimes_\beta}1)\\ &=& (\mathfrak a\underset{N}{_b*_\alpha}id)(X)\underset{N^o}{_{\hat{\alpha}}\otimes_\beta}1\\ &=&(id\underset{N}{_b*_\alpha}\varsigma_{N^o})(\tilde{\mathfrak a}\underset{N^o}{_{\hat{\alpha}}*_\beta}id)(\mathfrak a\underset{N}{_b*_\alpha}id)(X)\\ &=&(id\underset{N}{_b*_\alpha}\varsigma_{N^o})(\tilde{\mathfrak a}\underset{N^o}{_{\hat{\alpha}}*_\beta}id)\tilde{\Theta}(\underline{\mathfrak a}(X)) \end{eqnarray*} and, for all $z\in \widehat{M}'$, we have : \[(\tilde{\Theta}\underset{N^o}{_{\hat{\alpha}}*_\beta}id)\widetilde{(\underline{\mathfrak a})}(1\underset{N}{_b\otimes_\alpha}1\underset{N^o}{_{\hat{\alpha}}\otimes_\beta}z) = (\tilde{\Theta}\underset{N^o}{_{\hat{\alpha}}*_\beta}id)(1\underset{N}{_b\otimes_\alpha}\widehat{\Gamma}^c(z))\] which, thanks again to (\cite{E5} 11.4), is equal to : \[(1\underset{N}{_b\otimes_\alpha}1\underset{N}{_\beta\otimes_{\hat{\alpha}}}J_\Phi J_{\widehat{\Phi}} \underset{N}{_\beta\otimes_{\hat{\alpha}}}1)(\widehat{\Gamma}^{oc}\underset{N^o}{_{\hat{\alpha}}*_\beta}id)\widehat{\Gamma}^c(z)(1\underset{N}{_b\otimes_\alpha}1\underset{N}{_\beta\otimes_\alpha}J_{\widehat{\Phi}}J_\Phi \underset{N^o}{_{\hat{\alpha}}\otimes_\beta}1)\] and we have : \begin{multline*} (id\underset{N}{_b*_\alpha}\varsigma_{N^o})(\tilde{\mathfrak a}\underset{N^o}{_{\hat{\alpha}}*_\beta}id)\tilde{\Theta}(1\underset{N}{_b\otimes_\alpha}1\underset{N^o}{_{\hat{\alpha}}\otimes_\beta}z)=\\ (id\underset{N}{_b*_\alpha}\varsigma_{N^o})(\widehat{\Gamma}^c\underset{N}{_\beta*_\alpha}id)[(1\underset{N}{_\beta\otimes_{\hat{\alpha}}}J_\Phi J_{\widehat{\Phi}})\widehat{\Gamma}^{oc}(z)(1\underset{N}{_\beta\otimes_\alpha}J_{\widehat{\Phi}}J_\Phi)] \end{multline*} from which we deduce that : \[(\tilde{\Theta}\underset{N^o}{_{\hat{\alpha}}*_\beta}id)\widetilde{(\underline{\mathfrak a})}(1\underset{N}{_b\otimes_\alpha}1\underset{N^o}{_{\hat{\alpha}}\otimes_\beta}z)= (id\underset{N}{_b*_\alpha}\varsigma_{N^o})(\tilde{\mathfrak a}\underset{N^o}{_{\hat{\alpha}}*_\beta}id)\tilde{\Theta}(1\underset{N}{_b\otimes_\alpha}1\underset{N^o}{_{\hat{\alpha}}\otimes_\beta}z)\] and we get that : \[(\tilde{\Theta}\underset{N^o}{_\alpha*_\beta}id)\widetilde{(\underline{\mathfrak a})}=(id\underset{N}{_b*_\alpha}\varsigma_{N^o})(\tilde{\mathfrak a}\underset{N^o}{_{\hat{\alpha}}*_\beta}id)\tilde{\Theta}\] from which we deduce that $(1\underset{N}{_b\otimes_\alpha}\hat{\alpha}, (id\underset{N}{_b*_\alpha}\varsigma_N)(\tilde{\mathfrak a}\underset{N}{_\beta*_\alpha}id))$ is an action of $\hat{\mathfrak{G}}^o$ on the von Neumann algebra $(A\rtimes_\mathfrak a\mathfrak{G})\underset{N}{_\beta*_\alpha}\mathcal L(H)$, which finishes the proof. \end{proof} \subsection{Corollary} \label{cortildeTheta} {\it Let $\mathfrak{G}=(N, M, \alpha, \beta, \Gamma, T, T', \nu)$ be a measured quantum groupoid; let $A$ be a von Neumann algebra acting on a Hilbert space $\mathfrak{H}$, $(b, \mathfrak a)$ an action of $\mathfrak{G}$ on $A$, $(1\underset{N}{_b\otimes_\alpha}\hat{\beta}, \underline{\mathfrak a})$ be the action of $\mathfrak{G}$ on $A\underset{N}{_b*_\alpha}\mathcal L(H)$ introduced in \ref{crossed} and $\tilde{\Theta}$ the isomorphism introduced in \ref{tildeTheta} which sends $(A\underset{N}{_b*_\alpha}\mathcal L(H))\rtimes_{\underline{\mathfrak a}}\mathfrak{G}$ onto $(A\rtimes_\mathfrak a\mathfrak{G})\underset{N}{_\beta*_\alpha}\mathcal L(H)$; then, we have $\tilde{\Theta}\circ T_{\widetilde{(\underline{\mathfrak{a}})}}=(T_{\tilde{\mathfrak{a}}}\underset{N}{_\beta*_\alpha}id)\Tilde{\Theta}$. } \begin{proof} Using \ref{tildeTheta}(ii), we get : \begin{align*} \tilde{\Theta}\circ T_{\tilde{\underline{\mathfrak{a}}}} &= (id\underset{N}{_b*_\alpha}id\underset{N}{_{\hat{\alpha}}*_\beta}\widehat{\Phi}^c)(\tilde{\Theta}\underset{N}{_{\hat{\alpha}}*_\beta}id)\tilde{\underline{\mathfrak{a}}}\\ &= (id\underset{N}{_b*_\alpha}id\underset{N}{_{\hat{\alpha}}*_\beta}\widehat{\Phi}^c)(id\underset{N}{_b*_\alpha}\varsigma_{N^o})(\tilde{\mathfrak a}\underset{N^o}{_{\hat{\alpha}}*_\beta}id)\tilde{\Theta}\\ &= ((id\underset{N}{_{\hat{\alpha}}*_\beta}\widehat{\Phi}^c)\tilde{\mathfrak{a}}\underset{N}{_\beta*_\alpha}id)\Tilde{\Theta}\\ &=(T_{\tilde{\mathfrak{a}}}\underset{N}{_\beta*_\alpha}id)\Tilde{\Theta} \end{align*} which is the result. \end{proof} \label{psitildetheta} \subsection{Theorem} {\it Let $\mathfrak{G}=(N, M, \alpha, \beta, \Gamma, T, T', \nu)$ be a measured quantum groupoid; let $A$ be a von Neumann algebra acting on a Hilbert space $\mathfrak{H}$, $(b, \mathfrak a)$ a weighted action of $\mathfrak{G}$ on $A$, $(1\underset{N}{_b\otimes_\alpha}\hat{\beta}, \underline{\mathfrak a})$ be the action of $\mathfrak{G}$ on $A\underset{N}{_b*_\alpha}\mathcal L(H)$ introduced in \ref{crossed} and $\tilde{\Theta}$ the isomorphism introduced in \ref{tildeTheta} which sends $(A\underset{N}{_b*_\alpha}\mathcal L(H))\rtimes_{\underline{\mathfrak a}}\mathfrak{G}$ onto $(A\rtimes_\mathfrak a\mathfrak{G})\underset{N}{_\beta*_\alpha}\mathcal L(H)$; then : \newline (i) $(N, 1\underset{N}{_b\otimes_\alpha}\beta, A\rtimes_\mathfrak{a}\mathfrak{G})$ is a von Neumann faithful right $N$-module; let $\psi$ be a lifted weight on $A$, then $\tilde{\psi}$ is a lifted weight on $A\rtimes_\mathfrak{a}\mathfrak{G}$. Let's denote then $\underline{\psi}$ and $\underline{(\tilde{\psi})}$ the weights constructed by \ref{psibarre} applied to $\psi$ and $\tilde{\psi}$. \newline (ii) we have $\underline{(\tilde{\psi})}\circ\tilde{\Theta}=\widetilde{(\underline{\psi})}$ and, for all $t\in\mathbb{R}$, $\sigma_t^{\underline{(\tilde{\psi})}}\circ\tilde{\Theta}=\tilde{\Theta}\circ\sigma_t^{\widetilde{(\underline{\psi})}}$. \newline (iii) moreover, $\overline{\psi_\mathfrak{a}}$ is a lifted weight on $A\underset{N}{_b*_\alpha}\mathcal L(H)$, and we can define a normal semifinite faithful weight $\underline{(\overline{\psi_\mathfrak{a}})}$ on $A\underset{N}{_b*_\alpha}\mathcal L(H)\underset{N}{_\beta*_\alpha}\mathcal L(H)$ . On the other hand, we can define the normal semi-finite faithful weight $\overline{(\underline{\psi})_{\underline{\mathfrak{a}}}}$ on $A\underset{N}{_b*_\alpha}\mathcal L(H)\underset{N}{_{\hat{\beta}}*_\alpha}\mathcal L(H)$. Then, we have $\underline{(\overline{\psi_\mathfrak{a}})}\circ\tilde{\Theta}=\overline{(\underline{\psi})_{\underline{\mathfrak{a}}}}$. } \begin{proof} Let $\mathfrak{T}$ be a normal faithful semi-finite operator valued weight from $A$ into $b(N)$; then $\mathfrak{a}\circ\mathfrak{T}\circ \mathfrak{a}^{-1}$ is a normal faithful semi-finite operator valued weight from $\mathfrak{a}(A)$ into $1\underset{N}{_b\otimes_\alpha}\beta(N)$, and $\mathfrak{a}\circ\mathfrak{T}\circ \mathfrak{a}^{-1}\circ T_{\tilde{\mathfrak{a}}}$ is a normal faithful semi-finite operator-valued weight from $A\rtimes_\mathfrak{a}\mathfrak{G}$ into $1\underset{N}{_b\otimes_\alpha}\beta(N)$; then, if we write $\psi=\nu^o\circ b^{-1}\circ\mathfrak{T}$, the dual weight $\tilde{\psi}$ can be written as $\nu^o\circ (1\underset{N}{_b\otimes_\alpha}\beta)^{-1}\circ (\mathfrak{a}\circ\mathfrak{T}\circ \mathfrak{a}^{-1}\circ T_{\tilde{\mathfrak{a}}})$, which finishes the proof of (i). \newline We have then, using the notations of \ref{psibarre}, and results \ref{cortildeTheta} and \ref{tildeTheta}(i) : \begin{align*} \underline{(\tilde{\psi})}\circ\tilde{\Theta} &= \sum_i(\tilde{\psi}\underset{\nu}{_\beta*_\alpha}\omega_{\Delta_{\widehat{\Phi}}^{-1/2}e_i})\circ\tilde{\Theta}\\ &= \sum_i(\psi\circ\mathfrak{a}^{-1}\circ T_{\tilde{a}}\underset{\nu}{_\beta*_\alpha}\omega_{\Delta_{\widehat{\Phi}}^{-1/2}e_i})\circ\tilde{\Theta}\\ &= \sum_i(\psi\underset{\nu}{_b*_\alpha}\omega_{\Delta_{\widehat{\Phi}}^{-1/2}e_i})\circ(\mathfrak{a}\underset{N}{_b*_\alpha}id)^{-1}\circ (T_{\tilde{\mathfrak{a}}}\underset{N}{_\beta*_\alpha}id)\circ\tilde{\Theta}\\ &= \underline{\psi}\circ(\mathfrak{a}\underset{N}{_b*_\alpha}id)^{-1}\circ \tilde{\Theta}\circ T_{\tilde{\underline{\mathfrak{a}}}}\\ &= \underline{\psi}\circ(\underline{\mathfrak{a}})^{-1}\circ T_{\tilde{\underline{\mathfrak{a}}}}\\ &= \widetilde{(\underline{\psi})} \end{align*} which finishes the proof of (ii). \newline We have : \[\overline{\psi_\mathfrak{a}}=\nu^o\circ (1\underset{N}{_b\otimes_\alpha}\beta)^{-1}\circ (\mathfrak{a}\circ\mathfrak{T}\circ \mathfrak{a}^{-1}\circ T_{\tilde{\mathfrak{a}}})\circ T_{\underline{\mathfrak{a}}}\] So, by composition of operator-valued weights, we get that $\overline{\psi_\mathfrak{a}}$ is a lifted weight on the faithful right $N$-module $(N, A\underset{N}{_b*_\alpha}\mathcal L(H), 1\underset{N}{_b\otimes_\alpha}\beta)$, and, applying \ref{psibarre}, we can construct the normal semi-finite faithful weight $\underline{(\overline{\psi_\mathfrak{a}})}$ on $A\underset{N}{_b*_\alpha}\mathcal L(H)\underset{N}{_\beta*_\alpha}\mathcal L(H)$. \newline On the other hand, as $\underline{\psi}$ is a normal semi-finite faithful weight on $A\underset{N}{_b*_\alpha}\mathcal L(H)$, and as $(1\underset{N}{_b\otimes_\alpha}\hat{\beta}, \underline{\mathfrak{a}})$ (\ref{crossed}) is an action of $\mathfrak{G}$ on $A\underset{N}{_b*_\alpha}\mathcal L(H)$, we can define (\ref{crossed}) a weight $\overline{(\underline{\psi})_{\underline{\mathfrak{a}}}}$ on $A\underset{N}{_b*_\alpha}\mathcal L(H)\underset{N}{_{\hat{\beta}}*_\alpha}\mathcal L(H)$. As $\tilde{\Theta}$ is an isomorphism from $A\underset{N}{_b*_\alpha}\mathcal L(H)\underset{N}{_{\hat{\beta}}*_\alpha}\mathcal L(H)$ onto $A\underset{N}{_b*_\alpha}\mathcal L(H)\underset{N}{_\beta*_\alpha}\mathcal L(H)$, we can define then another normal semi-finite faithful weight $\underline{(\overline{\psi_\mathfrak{a}})}\circ\tilde{\Theta}$ on $A\underset{N}{_b*_\alpha}\mathcal L(H)\underset{N}{_{\hat{\beta}}*_\alpha}\mathcal L(H)$. \newline Let's represent $A$ on $H_\psi$ and consider the isomorphism $\tilde{\Theta}$ from $\mathcal L(H_\psi\underset{\nu}{_b\otimes_\alpha}H\underset{\nu}{_{\hat{\beta}}\otimes_\alpha}H)$ onto $\mathcal L(H_\psi\underset{\nu}{_b\otimes_\alpha}H\underset{\nu}{_\beta\otimes_\alpha}H)$. The commutant of $A\underset{N}{_b*_\alpha}\mathcal L(H)\underset{N}{_\beta*_\alpha}\mathcal L(H)$ on the Hilbert space $H_\psi\underset{\nu}{_b\otimes_\alpha}H\underset{\nu}{_\beta\otimes_\alpha}H$ is $A'\underset{N}{_b\otimes_\alpha}1_H\underset{N}{_\beta\otimes_\alpha}1_H$, which is isomorphic to $A^o$. Let us consider the spatial derivative $\frac{d\overline{(\underline{\psi})_{\underline{\mathfrak{a}}}}\circ\tilde{\Theta}^{-1}}{d\psi^o}$ on $H_\psi\underset{\nu}{_b\otimes_\alpha}H\underset{\nu}{_{\hat{\beta}}\otimes_\alpha}H$. As, for $x\in A'$, $\tilde{\Theta}$ sends $x\underset{N}{_b\otimes_\alpha}1_H\underset{N}{_{\hat{\beta}}\otimes_\alpha}1_H$ on $x\underset{N}{_b\otimes_\alpha}1_H\underset{N}{_\beta\otimes_\alpha}1_H$, we get that : \[\frac{d\overline{(\underline{\psi})_{\underline{\mathfrak{a}}}}\circ\tilde{\Theta}^{-1}}{d\psi^o}=\tilde{\Theta}(\frac{d\overline{(\underline{\psi})_{\underline{\mathfrak{a}}}}}{d\psi^o})\] where the spatial derivative $\frac{d\overline{(\underline{\psi})_{\underline{\mathfrak{a}}}}}{d\psi^o}$ is taken on the Hilbert space $H_\psi\underset{\nu}{_b\otimes_\alpha}H\underset{\nu}{_{\hat{\beta}}\otimes_\alpha}H$. But, (using \cite{St} 12.11), we get that : \[\frac{d\overline{(\underline{\psi})_{\underline{\mathfrak{a}}}}}{d\psi^o}=\frac{d\widetilde{(\underline{\psi})}\circ T_{\underline{\mathfrak{a}}}}{d\psi^o}=\frac{d\widetilde{(\underline{\psi})}}{d\tilde{\psi}^o}\] where we write, for simplification, $\tilde{\psi}^o$ for the weight taken on $(A\underset{N}{_b*_\alpha}\mathcal L(H)\rtimes_{\underline{\mathfrak{a}}}\mathfrak{G})'$, whose image by $\tilde{\Theta}$ is, thanks to (i), equal to $(A\rtimes_a\mathfrak{G})'\underset{N}{_\beta\otimes_\alpha}1_H$. \newline Therefore, using (ii), we get that : \[\tilde{\Theta}(\frac{d\overline{(\underline{\psi})_{\underline{\mathfrak{a}}}}}{d\psi^o})=\frac{d\widetilde{(\underline{\psi})}\circ\Theta^{-1}}{d\tilde{\psi}^o}=\frac{d\underline{(\tilde{\psi})}}{d\tilde{\psi}^o}=\frac{d\underline{(\overline{\psi_\mathfrak{a}})}}{d\psi^o}\] which gives the result. \end{proof} \subsection{Lemma} \label{lemW} {\it Let $\mathfrak{G}=(N, M, \alpha, \beta, \Gamma, T, T', \nu)$ be a measured quantum groupoid, $W$ its pseudo-multiplicative unitary, $(e_i)_{i\in I}$ an orthogonal $(\alpha, \nu)$-basis of $H$; then, we have, for all $a\in\mathfrak{N}_{\widehat{\Phi}^c}\cap\mathfrak{N}_{\hat{T}^c}$, $\zeta\in D(_\alpha H, \nu)\cap D(H_{\hat{\beta}}, \nu^o)$ : \[\sum_i\Lambda_{\widehat{\Phi}^c}(\omega_{ J_{\widehat{\Phi}}J_\Phi\zeta, J_{\widehat{\Phi}}J_\Phi e_i}\underset{N^o}{_{\hat{\alpha}}*_\beta}id)\widehat{\Gamma}^c(a)\underset{\nu}{_\beta\otimes_\alpha}e_i= W^*(\Lambda_{\widehat{\Phi}^c}(a)\underset{\nu^o}{_\alpha\otimes_{\hat{\beta}}}\zeta)\]} \begin{proof} Let us first remark that $J_\Phi J_{\widehat{\Phi}}\zeta$ and $J_\Phi J_{\widehat{\Phi}}e_i$ belong to $D(_{\hat{\alpha}}H, \nu)$, and that $\Lambda_{\widehat{\Phi}^c}(a)$ belongs, thanks to (\cite{E5} 2.2), to $D(_\alpha H, \nu)$. Applying then the definition (\cite{E5} 3.6 (i)) of the pseudo-multiplicative unitary $W^{^c}$ of the measured quantum group $\widehat{\mathfrak{G}}^c$, we get that : \[\Lambda_{\widehat{\Phi}^c}((\omega_{J_{\widehat{\Phi}}J_\Phi \zeta, J_{\widehat{\Phi}}J_\Phi e_i}\underset{N^o}{_{\hat{\alpha}}*_\beta}id)\widehat{\Gamma}^c(a))=(\omega_{J_{\widehat{\Phi}}J_\Phi \zeta, J_{\widehat{\Phi}}J_\Phi e_i}*id)(\widehat{W}{^ c}^*)\Lambda_{\widehat{\Phi}^c}(a)\] As $\widehat{W}{^ c}^*=(\widehat{W^o})^*=\sigma W^o\sigma$, we get : \[(\omega_{ J_{\widehat{\Phi}}J_\Phi\zeta, J_{\widehat{\Phi}}J_\Phi e_i}*id)(\widehat{W}{^ c}^*)= (id*\omega_{J_{\widehat{\Phi}}J_\Phi \zeta, J_{\widehat{\Phi}}J_\Phi e_i})(W^o)\] and, using \cite{E5} 3.12 (v) and 3.11(iii), we get : \[(id*\omega_{ J_{\widehat{\Phi}}J_\Phi\zeta, J_{\widehat{\Phi}J_\Phi }e_i})(W^o)= J_{\widehat{\Phi}}(id*\omega_{J_\Phi\zeta, J_\Phi e_i})(W)J_{\widehat{\Phi}}=(id*\omega_{\zeta, e_i})(W^*)\] from which we get the result. \end{proof} \subsection{Proposition} \label{propW*sigma} {\it Let $\mathfrak{G}=(N, M, \alpha, \beta, \Gamma, T, T', \nu)$ be a measured quantum groupoid; let $A$ be a von Neumann algebra, $(b, \mathfrak a)$ a weighted action of $\mathfrak{G}$ on $A$, $(1\underset{N}{_b\otimes_\alpha}\hat{\beta}, \underline{\mathfrak a})$ be the action of $\mathfrak{G}$ on $A\underset{N}{_b*_\alpha}\mathcal L(H)$ introduced in \ref{crossed} and $\tilde{\Theta}$ the isomorphism introduced in \ref{tildeTheta} which sends $(A\underset{N}{_b*_\alpha}\mathcal L(H))\rtimes_{\underline{\mathfrak a}}\mathfrak{G}$ onto $(A\rtimes_\mathfrak a\mathfrak{G})\underset{N}{_\beta*_\alpha}\mathcal L(H)$; let $\psi$ be a lifted weight on $A$, and $\underline{\psi}$ be the normal semi-finite faithful weight on $A\underset{N}{_b*_\alpha}\mathcal L(H)$ introduced in \ref{psibarre}, and $\widetilde{(\underline{\psi})}$ its dual weight on $(A\underset{N}{_b*_\alpha}\mathcal L(H))\rtimes_{\underline{\mathfrak a}}\mathfrak{G}$; let $\underline{(\tilde{\psi})}$ be the normal semi-finite faithful weight on $(A\rtimes_\mathfrak a\mathfrak{G})\underset{N}{_\beta*_\alpha}\mathcal L(H)$ introduced by applying \ref{psibarre} to the weight $\tilde{\psi}$ on $A\rtimes_\mathfrak a\mathfrak{G}$. Then : \newline (i) for any $X\in \mathfrak{N}_{\widetilde{(\underline{\psi})}}$, $\tilde{\Theta}(X)$ belongs to $\mathfrak{N}_{\underline{(\tilde{\psi})}}$, and : \[\Lambda_{\underline{(\tilde{\psi})}}(\tilde{\Theta}(X))=(1_H\underset{N}{_\beta\otimes_a}1_{H_\psi}\underset{N}{_b\otimes_\alpha}W^*\sigma_\nu)\Lambda_{\widetilde{(\underline{\psi})}}(X)\] (ii) we have : $J_{\underline{(\tilde{\psi})}}(1_H\underset{N}{_\beta\otimes_a}1_{H_\psi}\underset{N}{_b\otimes_\alpha}W^*\sigma_\nu)=(1_H\underset{N}{_\beta\otimes_a}1_{H_\psi}\underset{N}{_b\otimes_\alpha}W^*\sigma_\nu)J_{\widetilde{(\underline{\psi})}}$. } \begin{proof} The fact that $\tilde{\Theta}(X)$ belongs to $\mathfrak{N}_{\underline{(\tilde{\psi})}}$ is a straightforward corollary of \ref{psitildetheta}(ii). Let us take $x$ in $\mathfrak{N}_\psi$, $\xi$ in $D(_\alpha H, \nu)\cap\mathcal D(\Delta_{\widehat{\Phi}}^{-1/2})$, such that $\Delta_{\widehat{\Phi}}^{-1/2}\zeta$ belongs to $D(_\alpha H, \nu)$, $\eta$ in $D(_\alpha H, \nu)$, and $a$ in $\mathfrak{N}_{\widehat{\Phi}^c}\cap\mathfrak{N}_{\hat{T}^c}$. Then, by \ref{psibarre3}(i), we get that $\rho_\eta^{b, \alpha}x(\rho^{b, \alpha}_\xi)^*$ belongs to $\mathfrak{N}_{\underline{\psi}}$, and, by (\cite{E5} 13.3), $(1\underset{N}{_b\otimes_\alpha}a)\underline{\mathfrak{a}}(\rho_\eta^{b, \alpha}x(\rho^{b, \alpha}_\xi)^*)$ belongs to $\mathfrak{N}_{\widetilde{(\underline{\psi})}}$. Moreover, we have, where $(e_i)_{i\in I}$ is an orthogonal $(\alpha, \nu)$-basis of $H$ : \begin{align*} \Lambda_{\underline{(\tilde{\psi})}}(\tilde{\Theta}((1\underset{N}{_b\otimes_\alpha}a)\underline{\mathfrak{a}}(\rho_\eta^{b, \alpha}x(\rho^{b, \alpha}_\xi)^*)) &= \Lambda_{\underline{(\tilde{\psi})}}(\tilde{\Theta}(1\underset{N}{_b\otimes_\alpha}a)\tilde{\Theta}\underline{\mathfrak{a}}(\rho_\eta^{b, \alpha}x(\rho^{b, \alpha}_\xi)^*))\\ &= \Lambda_{\underline{(\tilde{\psi})}}(\tilde{\Theta}(1\underset{N}{_b\otimes_\alpha}a)(\mathfrak{a}\underset{N}{_b*_\alpha}id)(\rho_\eta^{b, \alpha}x(\rho^{b, \alpha}_\xi)^*))\\ &= \Lambda_{\underline{(\tilde{\psi})}}(\tilde{\Theta}(1\underset{N}{_b\otimes_\alpha}a)\rho_\eta^{\beta, \alpha}\mathfrak{a}(x)(\rho^{\beta, \alpha}_\xi)^*)\\ &= \sum_i \Lambda_{\underline{(\tilde{\psi})}}(\rho_{e_i}^{\beta, \alpha}(\rho_{e_i}^{\beta, \alpha})^*\tilde{\Theta}(1\underset{N}{_b\otimes_\alpha}a)\rho_\eta^{\beta, \alpha}\mathfrak{a}(x)(\rho^{\beta, \alpha}_\xi)^*) \end{align*} Then, using \ref{tildeTheta}, we get that $\tilde{\Theta}(1\underset{N}{_b\otimes_\alpha}a)=1\underset{N}{_b\otimes_\alpha}(1\underset{N^o}{_\beta\otimes_\alpha}J_\Phi J_{\widehat{\Phi}})\widehat{\Gamma}^{oc}(a)(1\underset{N^o}{_\beta\otimes_\alpha}J_{\widehat{\Phi}}J_\Phi )$, and, therefore, that : \[(\rho_{e_i}^{\beta, \alpha})^*\tilde{\Theta}(1\underset{N}{_b\otimes_\alpha}a)\rho_\eta^{\beta, \alpha} =1\underset{N}{_b\otimes_\alpha}(id\underset{N^o}{_\beta*_{\hat{\alpha}}}\omega_{J_{\widehat{\Phi}}J_\Phi \eta, J_{\widehat{\Phi}}J_\Phi e_i})\widehat{\Gamma}^{oc}(a)\] and, we get then, applying \ref{psibarre3}(i) to the weight $\tilde{\psi}$, that : \[\Lambda_{\underline{(\tilde{\psi})}}(\tilde{\Theta}((1\underset{N}{_b\otimes_\alpha}a)\underline{\mathfrak{a}}(\rho_\eta^{b, \alpha}x(\rho^{b, \alpha}_\xi)^*)) = \sum_i\Lambda_{\underline{(\tilde{\psi})}}(\rho_{e_i}^{\beta, \alpha} (1\underset{N}{_b\otimes_\alpha}(id\underset{N^o}{_\beta*_{\hat{\alpha}}}\omega_{J_{\widehat{\Phi}}J_\Phi \eta, J_{\widehat{\Phi}}J_\Phi e_i})\widehat{\Gamma}^{oc}(a)) \mathfrak{a}(x)(\rho^{\beta, \alpha}_\xi)^*)\] is equal to : \[\sum_i J_{\widehat{\Phi}}\Delta_{\widehat{\Phi}}^{-1/2}\xi\underset{\nu}{_\beta\otimes_{\tilde{a}}}\Lambda_{\tilde{\psi}}((1\underset{N}{_b\otimes_\alpha}(id\underset{N^o}{_\beta*_{\hat{\alpha}}}\omega_{J_{\widehat{\Phi}}J_\Phi \eta, J_{\widehat{\Phi}}J_\Phi e_i})\widehat{\Gamma}^{oc}(a))\mathfrak{a}(x))\underset{\nu}{_\beta\otimes_\alpha}e_i\] where, for all $n\in N$, we put $\tilde{a}(n)=J_{\tilde{\psi}}(1\underset{N}{_b\otimes_\alpha}\beta(n^*))J_{\tilde{\psi}}$. We then get, by \ref{crossed}, that $\tilde{a}(n)=U^\mathfrak{a}_\psi(1\underset{N^o}{_a\otimes_\beta}\alpha(n))(U^\mathfrak{a}_\psi)^*=a(n)\underset{N}{_b\otimes_\alpha}1$. And, therefore, using now (\cite{E5} 13.3), we get that $\Lambda_{\underline{(\tilde{\psi})}}(\tilde{\Theta}((1\underset{N}{_b\otimes_\alpha}a)\underline{\mathfrak{a}}(\rho_\eta^{b, \alpha}x(\rho^{b, \alpha}_\xi)^*)) $ is equal to : \[\sum_i J_{\widehat{\Phi}}\Delta_{\widehat{\Phi}}^{-1/2}\xi\underset{\nu}{_\beta\otimes_{a}}\Lambda_\psi(x)\underset{\nu}{_b\otimes_\alpha}\Lambda_{\widehat{\Phi}^c}((id\underset{N^o}{_\beta*_{\hat{\alpha}}}\omega_{J_{\widehat{\Phi}}J_\Phi \eta, J_{\widehat{\Phi}}J_\Phi e_i})\widehat{\Gamma}^{oc}(a))\underset{\nu}{_\beta\otimes_\alpha}e_i\] which, thanks to \ref{lemW} is equal to : \[(1_H\underset{N}{_\beta\otimes_a}1_{H_\psi}\underset{N}{_b\otimes_\alpha}W^*\sigma_\nu) (J_{\widehat{\Phi}}\Delta_{\widehat{\Phi}}^{-1/2}\xi\underset{\nu}{_b\otimes_a}\Lambda_\psi (x)\underset{\nu}{_b\otimes_\alpha}\zeta\underset{\nu}{_{\hat{\beta}}\otimes_\alpha}\Lambda_{\widehat{\Phi}^c}(a))\] which, using \ref{psibarre3}(i) again, is equal to : \[(1_H\underset{N}{_\beta\otimes_a}1_{H_\psi}\underset{N}{_b\otimes_\alpha}W^*\sigma_\nu) (\Lambda_{\underline{\psi}}((\rho_\eta^{b, \alpha}x(\rho^{b, \alpha}_\xi)^*))\underset{\nu}{_{\hat{\beta}}\otimes_\alpha}\Lambda_{\widehat{\Phi}^c}(a))\] and, by (\cite{E5} 13.3) again, to : \[(1_H\underset{N}{_\beta\otimes_a}1_{H_\psi}\underset{N}{_b\otimes_\alpha}W^*\sigma_\nu) \Lambda_{\widetilde{(\underline{\psi})}}((1\underset{N}{_b\otimes_\alpha}a)\underline{\mathfrak{a}}(\rho_\eta^{b, \alpha}x(\rho^{b, \alpha}_\xi)^*))\] Using now \ref{psibarre3}(i), we get that, for any $a$ in $\mathfrak{N}_{\widehat{\Phi}^c}\cap\mathfrak{N}_{\hat{T}^c}$ and $Y$ in $\mathfrak{N}_{\underline{\psi}}$ : \[\Lambda_{\underline{(\tilde{\psi})}}(\tilde{\Theta}((1\underset{N}{_b\otimes_\alpha}a)\underline{\mathfrak{a}}(Y)) =(1_H\underset{N}{_\beta\otimes_a}1_{H_\psi}\underset{N}{_b\otimes_\alpha}W^*\sigma_\nu) \Lambda_{\widetilde{(\underline{\psi})}}((1\underset{N}{_b\otimes_\alpha}a)\underline{\mathfrak{a}}(Y))\] and, using now (\cite{E5} 13.3), we finish the proof of (i). \newline Let's suppose now that $X$ is analytic with respect to $\widetilde{(\underline{\psi})}$, such that $\sigma_{-i/2}^{\widetilde{(\underline{\psi})}}(X^*)$ belongs to $\mathfrak{N}_{\widetilde{(\underline{\psi})}}$. Then, using \ref{psitildetheta}(ii) and (i), we get that $\tilde{\Theta}(X)$ is analytic with respect to $\underline{(\tilde{\psi})}$, and that $\sigma_{-i/2}^{\underline{(\tilde{\psi})}}(\tilde{\Theta}(X^*))$ belongs to $\mathfrak{N}_{\underline{(\tilde{\psi})}}$. More precisely, we then get : \begin{align*} J_{\underline{(\tilde{\psi})}}(1_H\underset{N}{_\beta\otimes_a}1_{H_\psi}\underset{N}{_b\otimes_\alpha}W^*\sigma_\nu)\Lambda_{\widetilde{(\underline{\psi})}}(X) &= J_{\underline{(\tilde{\psi})}}\Lambda_{\underline{(\tilde{\psi})}}(\tilde{\Theta}(X))\\ &= \Lambda_{\underline{(\tilde{\psi})}}(\sigma_{-i/2}^{\underline{(\tilde{\psi})}}(\tilde{\Theta}(X^*)))\\ &= \Lambda_{\underline{(\tilde{\psi})}}(\tilde{\Theta}(\sigma_{-i/2}^{\widetilde{(\underline{\psi})}}(X^*)))\\ &= (1_H\underset{N}{_\beta\otimes_a}1_{H_\psi}\underset{N}{_b\otimes_\alpha}W^*\sigma_\nu)\Lambda_{\widetilde{(\underline{\psi})}}(\sigma_{-i/2}^{\widetilde{(\underline{\psi})}}(X^*)))\\ &= (1_H\underset{N}{_\beta\otimes_a}1_{H_\psi}\underset{N}{_b\otimes_\alpha}W^*\sigma_\nu)J_{\widetilde{(\underline{\psi})}}\Lambda_{\widetilde{(\underline{\psi})}}(X) \end{align*} which, by density, gives (ii). \end{proof} \subsection{Proposition} \label{Upsibarre} {\it Let $\mathfrak{G}=(N, M, \alpha, \beta, \Gamma, T, T', \nu)$ be a measured quantum groupoid; let $A$ be a von Neumann algebra, $(b, \mathfrak a)$ a weighted action of $\mathfrak{G}$ on $A$, and $(1\underset{N}{_b\otimes_\alpha}\hat{\beta}, \underline{\mathfrak a})$ be the action of $\mathfrak{G}$ on $A\underset{N}{_b*_\alpha}\mathcal L(H)$ introduced in \ref{crossed}; let $\psi$ be a lifted weight on $A$, and $\underline{\psi}$ be the normal semi-finite faithful weight on $A\underset{N}{_b*_\alpha}\mathcal L(H)$ introduced in \ref{psibarre}. Then, the unitary $U^{\underline{\mathfrak{a}}}_{\underline{\psi}}$ satisfies : \[U^{\underline{\mathfrak{a}}}_{\underline{\psi}}= (1_H\underset{N}{_\beta\otimes_a}1_{H_\psi}\underset{N}{_b\otimes_\alpha}\sigma W\sigma)(1_H\underset{N}{_\beta\otimes_\alpha}(id\underset{N^o}{_a*_\beta}\varsigma_N)(U_\psi^\mathfrak{a}\underset{N^o}{_a\otimes_\beta}1_H))\sigma_1^{\beta, \alpha}(W^{o*}\underset{N}{_\beta\otimes_a}1_{H_\psi}\underset{N}{_b\otimes_\alpha}1_H)(\sigma_1^{\beta, \hat{\alpha}})^*\] where $\sigma_1^{\beta, \alpha}$ is the flip from $(H\underset{\nu^o}{_\alpha\otimes_\beta}H)\underset{\nu}{_\beta\otimes_a}H_\psi\underset{\nu}{_b\otimes_\alpha}H$ onto $H\underset{\nu}{_\beta\otimes_\alpha}((H_\psi\underset{\nu}{_b\otimes_\alpha}H)\underset{\nu^o}{_a\otimes_\beta}H)$, and $\sigma_1^{\beta, \hat{\alpha}}$ is the flip from $H\underset{\nu}{_\beta\otimes_{\hat{\alpha}}}H\underset{\nu}{_\beta\otimes_a}H_\psi\underset{\nu}{_b\otimes_\alpha}H$ onto $(H\underset{\nu}{_\beta\otimes_a}H_\psi\underset{\nu}{_b\otimes_\alpha}H)\underset{\nu^o}{_{\hat{\alpha}}\otimes_\beta}H$. } \begin{proof} Let us recall (\ref{crossed}) that $(1\underset{N}{_b\otimes_\alpha}\hat{\beta}, \underline{\mathfrak{a}})$ is an action of $\mathfrak{G}$ on $A\underset{N}{_b*_\alpha}\mathcal L(H)$. Let $\underline{a}$ be the representation of $N$ on $H_{\underline{\psi}}$ defined, for all $n\in N$ by : \[\underline{a}(n)=J_{\underline{\psi}}\pi_{\underline{\psi}}(1\underset{N}{_b\otimes_\alpha}\hat{\beta}(n^*))J_{\underline{\psi}}\] Using \ref{psibarre3} (iii) and (ii), we get that : \[\underline{a}(n)=J_{\widehat{\Phi}}\hat{\beta}(n^*)J_{\widehat{\Phi}}\underset{N}{_\beta\otimes_a}1_{H_\psi}\underset{N}{_b\otimes_\alpha}1_{H}=\hat{\alpha}(n)\underset{N}{_\beta\otimes_a}1_{H_\psi}\underset{N}{_b\otimes_\alpha}1_{H}\] and, therefore, $U^{\underline{\mathfrak{a}}}_{\underline{\psi}}$ is a unitary from $(H\underset{\nu}{_\beta\otimes_a}H_\psi\underset{\nu}{_b\otimes_\alpha}H)\underset{\nu^o}{_{\hat{\alpha}}\otimes_\beta}H$ onto $H\underset{\nu}{_\beta\otimes_a}H_\psi\underset{\nu}{_b\otimes_\alpha}H\underset{\nu}{_{\hat{\beta}}\otimes_\alpha}H$ given by the formula : \[U^{\underline{\mathfrak{a}}}_{\underline{\psi}}=J_{\widetilde{(\underline{\psi})}}(J_{\underline{\psi}}\underset{\nu^o}{_{\hat{\alpha}}\otimes_\beta}J_{\widehat{\Phi}})\] We have, using \ref{propW*sigma} : \[(1_H\underset{N}{_\beta\otimes_a}1_{H_\psi}\underset{N}{_b\otimes_\alpha}W^*\sigma_\nu)U^{\underline{\mathfrak{a}}}_{\underline{\psi}}\sigma_1^{\beta, \hat{\alpha}}= J_{\underline{(\tilde{\psi})}}(1_H\underset{N}{_\beta\otimes_a}1_{H_\psi}\underset{N}{_b\otimes_\alpha}W^*\sigma_\nu)(J_{\underline{\psi}}\underset{\nu^o}{_{\hat{\alpha}}\otimes_\beta}J_{\widehat{\Phi}})\sigma_1^{\beta, \hat{\alpha}}\] Let $\xi_1$, $\xi_2$ in $D(H_\beta, \nu^o)$, $\xi_3$ in $D(_\alpha H, \nu)$, $\eta\in H_\psi$; then $J_{\widehat{\Phi}}\xi_1$ belongs to $D(_\alpha H, \nu)$, and let us define $\zeta_i\in D(H_\beta, \nu^o)$ and $\zeta'_i\in D(_\alpha H, \nu)$ such that : \[W^*(J_{\widehat{\Phi}}\xi_1\underset{\nu^o}{_\alpha\otimes_{\hat{\beta}}}J_{\widehat{\Phi}}\xi_2)=lim_J\sum_{i\in J}(\zeta_i\underset{\nu}{_\beta\otimes_\alpha}\zeta'_i)\] the limit being taken on the filter of finite subsets $J\subset I$. Let us look at the image of the vector $\xi_1\underset{\nu}{_\beta\otimes_{\hat{\alpha}}}\xi_2\underset{\nu}{_\beta\otimes_a}\eta\underset{\nu}{_b\otimes_\alpha}\xi_3$ under the unitary : \[J_{\underline{(\tilde{\psi})}}(1_H\underset{N}{_\beta\otimes_a}1_{H_\psi}\underset{N}{_b\otimes_\alpha}W^*\sigma_\nu)(J_{\underline{\psi}}\underset{\nu^o}{_{\hat{\alpha}}\otimes_\beta}J_{\widehat{\Phi}})\sigma_1^{\beta, \hat{\alpha}}\] This vector is first sent by $\sigma_1^{\beta, \hat{\alpha}}$ on $(\xi_2\underset{\nu}{_\beta\otimes_a}\eta\underset{\nu}{_b\otimes_\alpha}\xi_3)\underset{\nu^o}{_{\hat{\alpha}}\otimes_\beta}\xi_1$, then $(J_{\underline{\psi}}\underset{\nu^o}{_{\hat{\alpha}}\otimes_\beta}J_{\widehat{\Phi}})$ sends it on $J_{\widehat{\Phi}}\xi_3\underset{\nu}{_\beta\otimes_a}J_\psi\eta\underset{\nu}{_b\otimes_\alpha}J_{\widehat{\Phi}}\xi_2\underset{\nu}{_{\hat{\beta}}\otimes_\alpha}J_{\widehat{\Phi}}\xi_1$, then $(1_H\underset{N}{_\beta\otimes_a}1_{H_\psi}\underset{N}{_b\otimes_\alpha}W^*\sigma_\nu)$ sends it on \[J_{\widehat{\Phi}}\xi_3\underset{\nu}{_\beta\otimes_a}J_\psi\eta\underset{\nu}{_b\otimes_\alpha}W^*(J_{\widehat{\Phi}}\xi_1\underset{\nu^o}{_\alpha\otimes_{\hat{\beta}}}J_{\widehat{\Phi}}\xi_2)= lim_J\sum_{i\in J}(J_{\widehat{\Phi}}\xi_3\underset{\nu}{_\beta\otimes_a}J_\psi\eta\underset{\nu}{_b\otimes_\alpha} \zeta_i\underset{\nu}{_\beta\otimes_\alpha}\zeta'_i)\] and $J_{\underline{(\tilde{\psi})}}$ sends it then on : \[lim_J\sum_{i\in J}(J_{\widehat{\Phi}}\zeta'_i\underset{\nu}{_\beta\otimes_a}J_{\tilde{\psi}}(J_\psi\eta\underset{\nu}{_b\otimes_\alpha}\zeta_i)\underset{\nu}{_\beta\otimes_\alpha}\xi_3) = lim_J\sum_{i\in J}(J_{\widehat{\Phi}}\zeta'_i\underset{\nu}{_\beta\otimes_a}U^\mathfrak{a}_\psi(\eta\underset{\nu^o}{_a\otimes_\beta}J_{\widehat{\Phi}}\zeta_i)\underset{\nu}{_\beta\otimes_\alpha}\xi_3)\] which is equal to : \[(1_H\underset{N}{_\beta\otimes_a}U^\mathfrak{a}_\psi\underset{N}{_b\otimes_\alpha}1_H)(1_H\underset{N}{_\beta\otimes_\alpha}\sigma_\nu\underset{N}{_b\otimes_\alpha}1_H)(\sigma W^{o*}\underset{N}{_\beta\otimes_a}1_{H_\psi}\underset{N}{_b\otimes_\alpha}1_H)(\xi_1\underset{\nu}{_\beta\otimes_{\hat{\alpha}}}\xi_2\underset{\nu}{_\beta\otimes_a}\eta\underset{\nu}{_b\otimes_\alpha}\xi_3)\] from which, using again the density of finite sums of elementary tensors in the relative Hilbert tensor product, we get that : \begin{multline*} (1_H\underset{N}{_\beta\otimes_a}1_{H_\psi}\underset{N}{_b\otimes_\alpha}W^*\sigma_\nu)U^{\underline{\mathfrak{a}}}_{\underline{\psi}}\sigma_1^{\beta, \hat{\alpha}}=\\ (1_H\underset{N}{_\beta\otimes_a}U^\mathfrak{a}_\psi\underset{N}{_b\otimes_\alpha}1_H)(1_H\underset{N}{_\beta\otimes_\alpha}\sigma_\nu\underset{N}{_b\otimes_\alpha}1_H)(\sigma W^{o*}\underset{N}{_\beta\otimes_a}1_{H_\psi}\underset{N}{_b\otimes_\alpha}1_H)=\\ (1_H\underset{N}{_\beta\otimes_a}(id\underset{N^o}{_a*_\beta}\varsigma_N)(U_\psi^\mathfrak{a}\underset{N}{_b\otimes_\alpha}1_H))\sigma_1^{\beta, \alpha}(W^{o*}\underset{N}{_\beta\otimes_a}1_{H_\psi}\underset{N}{_b\otimes_\alpha}1_H) \end{multline*} from which we get the result. \end{proof} \subsection{Proposition} \label{Upsicorep} {\it Let $\mathfrak{G}=(N, M, \alpha, \beta, \Gamma, T, T', \nu)$ be a measured quantum groupoid; let $A$ be a von Neumann algebra, and let $(b, \mathfrak a)$ be a weighted action of $\mathfrak{G}$ on $A$; let $\psi$ be a lifted weight on $A$; then, the unitary $U^\mathfrak{a}_\psi$ introduced in \ref{crossed} is a copresentation of $\mathfrak{G}$.} \begin{proof} With the notations of \ref{Upsibarre}, we get, using \ref{Upsibarre}, that : \[1_H\underset{N}{_\beta\otimes_a}(id\underset{N^o}{_a*_\beta}\varsigma_N)(U_\psi^\mathfrak{a}\underset{N}{_b\otimes_\alpha}1_H)= (1_H\underset{N}{_\beta\otimes_a}1_{H_\psi}\underset{N}{_b\otimes_\alpha}\sigma W^*\sigma)U^{\underline{\mathfrak{a}}}_{\underline{\psi}}\sigma_1^{\beta, \hat{\alpha}}(W^{o}\underset{N}{_\beta\otimes_a}1_{H_\psi}\underset{N}{_b\otimes_\alpha}1_H)(\sigma_1^{\beta, \alpha})^*\] which we shall write, for simplification, with the usual leg numbering notation : \[(U^\mathfrak{a}_{\psi})_{2,4}=\widehat{W}_{3,4}U^{\underline{\mathfrak{a}}}_{\underline{\psi}}(W^o)_{4, 1}\] But $\widehat{W}$ is a corepresentation of $\mathfrak{G}^o$ (\cite{E5}, 5.6), $U^{\underline{\mathfrak{a}}}_{\underline{\psi}}$ is a corepresentation of $\mathfrak{G}$ by \ref{cora}, and $\sigma W^o\sigma$ is a corepresentation of $\mathfrak{G}^o$ by (\cite{E5}, 5.6 and 5.3). So, we get : \begin{align*} (id*\Gamma)(U^\mathfrak{a}_\psi)_{2,4,5} &= \widehat{W}_{3,5}\widehat{W}_{3,4}(U^{\underline{\mathfrak{a}}}_{\underline{\psi}})_{1,2,3,4}(U^{\underline{\mathfrak{a}}}_{\underline{\psi}})_{1,2,3,5}W^o_{5,1}W^o_{4,1}\\ &=\widehat{W}_{3,5}(U^\mathfrak{a}_{\psi})_{2,4}W^{o*}_{4,1}(U^{\underline{\mathfrak{a}}}_{\underline{\psi}})_{1,2,3,5}W^o_{5,1}W^o_{4,1}\\ &=(U^\mathfrak{a}_{\psi})_{2,4}W^{o*}_{4,1}\widehat{W}_{3,5}(U^{\underline{\mathfrak{a}}}_{\underline{\psi}})_{1,2,3,5} W^o_{5,1}W^o_{4,1}\\ &=(U^\mathfrak{a}_{\psi})_{2,4}W^{o*}_{4,1}(U^\mathfrak{a}_{\psi})_{2,5}W^o_{4,1}\\ &=(U^\mathfrak{a}_{\psi})_{2,4}(U^\mathfrak{a}_{\psi})_{2,5} \end{align*} which shows that $U^\mathfrak{a}_\psi$ is a corepresentation. A more complete proof is a painful exercise we leave to the reader. \end{proof} \subsection{Theorem} \label{standard} {\it Let $\mathfrak{G}=(N, M, \alpha, \beta, \Gamma, T, T', \nu)$ be a measured quantum groupoid; let $A$ be a von Neumann algebra, and let $(b, \mathfrak a)$ be a weighted action of $\mathfrak{G}$ on $A$; then, for any normal semi-finite faithful weight $\psi$ on $A$, the unitary $U^\mathfrak{a}_\psi$ introduced in \ref{crossed} is a standard implementation of $\mathfrak{a}$, in the sense of \ref{action}. } \begin{proof} As the action is weighted, there exists a normal semi-finite faithful weight $\psi$ on $A$ which is lifted from $\nu^o$; we get that $U^\mathfrak{a}_\psi$ is a corepresentation by \ref{Upsicorep}, and is therefore a standard implementation. Using now \ref{propu}, we easily get that it remains true for any normal semi-finite faithful weight on $A$, which is the result. \end{proof} \subsection{Remark} \label{remstandard} In \ref{standard}, we had obtained that $U^\mathfrak{a}_\psi$ is a standard implementation of $\mathfrak{a}$, if there exists a normal-semi-finite faithful operator-valued weight from $A$ onto $b(N)$; this is true in particular in the following cases : \newline (i) $\mathfrak{G}$ is a locally compact quantum group ($N=\mathbb{C}$); this result was obtained in (\cite{V1} 4.4); \newline (ii) if $N$ is abelian and $b(N)\subset Z(A)$; in particular, if $\mathfrak{G}$ is a measured groupoid; we shall discuss this particular case in \ref{exgd}. More general, if $\mathfrak{G}$ is a continuous field of locally compact quantum groups (\ref{ex} (iv)), or is De Commer's example (\ref{ex} (v)). \newline (iii) $A$ is a type I factor; if we write $A=\mathcal L(\mathfrak{H})$, starting from any normal semi-finite weight on $b(N)'$, we get a normal faithful semi-finite operator-valued weight from $A$ to $b(N)$. More generally, this remains true if $A$ is a sum of type I factors; \newline (iv) $N$ is a sum of type I factors (in particular, if $N$ is a finite dimensional algebra, which is the case, in particular if $\mathfrak{G}$ is a finite dimensional quantum groupoid); \newline (v) $N$ and $A$ are semi-finite. \newline In \ref{Uinv}, the result was proved if $\mathfrak{a}$ is a dual action. \subsection{Example} \label{exgd} Let $\mathcal G$ be a measured groupoid, with $\mathcal G^{(0)}$ as its set of units, $r$ and $s$ its range and source application, $(\lambda^u)_{u\in \mathcal G^{(0)}}$ its Haar system, and $\nu$ a quasi-invariant measure; let $\mu=\int_{\mathcal G^{(0)}}\lambda^ud\nu$ ; let us consider the von Neumann algebra $L^\infty(\mathcal G, \mu)$, which is a $L^\infty(\mathcal G^{(0)})$-bimodule, thanks to the two homomorphisms $r_{\mathcal G}$ and $s_{\mathcal G}$ defined, for $f$ in $L^\infty(\mathcal G^{(0)})$ by $r_{\mathcal G}(f)=f\circ r$ and $s_{\mathcal G}(f)=f\circ s$. We have shown in (\cite{E5}, 3.1, 3.4 and 3.17) how it is possible to put a measured quantum groupoid structure on this von Neumann bimodule. \newline An action $(b, \mathfrak{a})$ of this measured quantum groupoid on a von Neumann algebra $A$ verifies that $b(L^\infty(\mathcal G^{(0)}))\subset Z(A)$, and, therefore, $A$ can be decomposed as $A=\int_{\mathcal G^{(0)}}^\oplus A^xd\nu(x)$ (\cite{E5}, 6.1); moreover, let $\psi$ be a normal semi-finite faithful on $A=\int_{\mathcal G^{(0)}}^\oplus A^xd\nu(x)$. Then $\psi$ is a lifted weight; more precisely, there exists a measurable field $\psi^x$ of normal semi-finite faithful weights, such that $\psi=\int_{\mathcal G^{(0)}}^\oplus \psi^x d\nu(x)$ in the sense of (\cite{T} 4.6), and $H_\psi=\int _{\mathcal G^{(0)}}^\oplus H_{\psi^x}d\nu (x)$. \newline On the other hand, the action $\mathfrak{a}$ is (\cite{E5}, 6.3) an action of $\mathcal G$ in the sense of (\cite{Y3}, 3.1), i.e. for all $g\in\mathcal G$, there exists a family of $*$-isomorphisms $\mathfrak{a}_g$ from $A^{s(g)}$ onto $A^{r(g)}$, such that, if $(g_1, g_2)\in\mathcal G^{(2)}$, we have $\mathfrak{a}_{g_1g_2}=\mathfrak{a}_{g_1}\mathfrak{a}_{g_2}$, and such that, for any normal positive functional $\omega=\int_{\mathcal G^{(0)}}^\oplus \omega^x d\nu(x)$, and any $y=\int_{\mathcal G^{(0)}}^\oplus y^x d\nu (x)$, the function $g\mapsto \omega^{r(g)}(\mathfrak{a}_g(y^{s(g)}))$ is $\mu$-measurable. These $*$-isomorphisms have standard implementations $u_g : H_{\psi^{s(g)}}\to H_{\psi^{r(g)}}$ such that $\mathfrak{a}_g(y^{s(g)})=u_gy^{s(g)}u_g^*$. if $(g_1, g_2)\in\mathcal G^{(2)}$, we have $u_{g_1g_2}=u_{g_1}u_{g_2}$. \newline More precisely, the Hilbert space $H_\psi\underset{\nu}{_b\otimes_{r_\mathcal G}}L^2(\mathcal G, \mu)$ can be identified with $\int_{\mathcal G}^\oplus H_{\psi^{r(g)}}d\mu(g)$. We then get : \[\mathfrak{a} (\int_{\mathcal G^{(0)}}^\oplus y^x d\nu (x))=\int_{\mathcal G}^\oplus \mathfrak{a}_g(y^{s(g)})d\mu(g)\] In \cite{Y1} and \cite{Y2} is given a construction of the crossed product of $A$ by $\mathcal G$; using (\cite{Y3} 2.14), we see (\cite{E5}, 9.2) that this crossed-product is isomorphic to the definition given in (\cite{E5}, 9.1). Moreover, we get the same notion of dual action (\cite{E5}, 9.6) and of dual weight (\cite{E5}, 13.1). \newline As $b$ is central, we have $a=b$, and the Hilbert space $H_\psi\underset{\nu^o}{_a\otimes_{s_\mathcal G}}L^2(\mathcal G, \mu)$ can be identified with $\int_{\mathcal G}^\oplus H_{\psi^{s(g)}}d\mu(g)$. Using then \cite{Y3}, 2.6, we get that $U^\mathfrak{a}_\psi=\int_\mathcal G^\oplus u_gd\mu(g)$, which is a unitary from $\int_{\mathcal G}^\oplus H_{\psi^{s(g)}}d\mu(g)$ onto $\int_{\mathcal G}^\oplus H_{\psi^{r(g)}}d\mu(g)$. \section{The $(b,\mathfrak{a}mma)$ property for weights} \label{gamma} If $\mathfrak{G}=(N, M, \alpha, \beta, \Gamma, T, T', \nu)$ be a measured quantum groupoid, and if $b$ is a normal faithful non degenerate anti-homomorphism from $N$ into a von Neumann algebra $A$, we define the $(b, \mathfrak{a}mma)$ property for normal faithful semi-finite weights on $A$ (\ref{defgamma}). We define then, for such a weight, a normal semi-finite faithful weight $\underline{\psi_\delta}$ on $A\underset{N}{_b*_\alpha}\mathcal L(H)$ (\ref{thgamma}). We obtain then several technical results (\ref{cor2}, \ref{deltaDelta}, \ref{psibarre4}) which will be used in chapter \ref{bidualw}. \subsection{Definition} \label{defgamma} Let $\mathfrak{G}=(N, M, \alpha, \beta, \Gamma, T, T', \nu)$ be a measured quantum groupoid, and let $b$ be a normal faithful non degenerate anti-homomorphism from $N$ into a von Neumann algebra $A$; we shall say that a normal faithful semi-finite weight $\psi$ on $A$ satisfies the $(b,\mathfrak{a}mma)$ property if, for all $n\in N$ and $t\in\mathbb{R}$, we have $\sigma_t^\psi (b(n))=b(\mathfrak{a}mma_t(n))$, where $\mathfrak{a}mma_t$ is the one-parameter automorphism group of $N$ defined by $\sigma_t^T(\beta(n))=\beta(\mathfrak{a}mma_t(n))$ (\cite{E5}, 3.8 (v)). \subsection{Example} \label{delta} Let $\mathfrak{G}=(N, M, \alpha, \beta, \Gamma, T, T', \nu)$ be a measured quantum groupoid, $A$ a von Neumann algebra, $(b,\mathfrak{a})$ an action of $\mathfrak{G}$ on $A$, $\psi$ a $\delta$-invariant normal faithful semi-finite weight on $A$ bearing the density property, as defined in (\cite{E5}) and recalled in \ref{action}. Then, $\psi$ satisfies the $(b, \mathfrak{a}mma)$ property. \newline Namely, for any $x\in A$, $t\in\mathbb{R}$, we have : \[\mathfrak a(\sigma_t^\psi(x))=(\Delta_\psi^{it}\underset{\nu}{_b\otimes_\alpha}\delta^{-it}\Delta_{\widehat{\Phi}}^{-it})\mathfrak a(x)(\Delta_\psi^{-it}\underset{\nu}{_b\otimes_\alpha}\delta^{it}\Delta_{\widehat{\Phi}}^{it})\] and, therefore, for any $n\in N$, we get, using (\cite{E5}, 3.8(ii)) : \begin{align*} \mathfrak{a}(\sigma_t^\psi(b(n))) &= (\Delta_\psi^{it}\underset{\nu}{_b\otimes_\alpha}\delta^{-it}\Delta_{\widehat{\Phi}}^{-it})(1\underset{N}{_b\otimes_\alpha}\beta(n))(\Delta_\psi^{-it}\underset{\nu}{_b\otimes_\alpha}\delta^{it}\Delta_{\widehat{\Phi}}^{it})\\ &=1\underset{N}{_b\otimes_\alpha}\delta^{-it}\Delta_{\widehat{\Phi}}^{-it}\beta(n)\delta^{it}\Delta_{\widehat{\Phi}}^{it}\\ &=1\underset{N}{_b\otimes_\alpha}\sigma_{t}^\Phi\sigma_{-t}^{\Phi\circ R}\tau_{-t}(\beta(n))\\ &=1\underset{N}{_b\otimes_\alpha}\sigma_{t}^\Phi(\beta(n))\\ &=1\underset{N}{_b\otimes_\alpha}\beta(\mathfrak{a}mma_t(n))\\ &=\mathfrak{a}(b(\mathfrak{a}mma_t(n))) \end{align*} from which we get the property, by the injectivity of $\mathfrak{a}$. \subsection{Example} \label{betahat} Let $\mathfrak{G}=(N, M, \alpha, \beta, \Gamma, T, T', \nu)$ be a measured quantum groupoid, and let $(N, b, A)$ be a faithful weighted right von Neumann right-module, in the sense of \ref{defw}; let $\psi$ be a normal faithful semi-finite weight on $A$, lifted from $\nu^o$, and let $\underline{\psi}$ be the normal faithful semi-finite weight on $A\underset{N}{_b*_\alpha}\mathcal L(H)$ defined in \ref{psibarre}. Then, $\underline{\psi}$ satisfies the $(1\underset{N}{_b\otimes_\alpha}\hat{\beta}, \mathfrak{a}mma)$ property. \newline Namely, using \ref{psibarre2} and (\cite{E5} 3.10 (vii)), we get : \[\sigma_t^{\underline{\psi}}(1\underset{N}{_b\otimes_\alpha}\hat{\beta}(n))=1\underset{N}{_b\otimes_\alpha}\Delta_{\widehat{\Phi}}^{-it}\hat{\beta}(n)\Delta_{\widehat{\Phi}}^{it}=1\underset{N}{_b\otimes_\alpha}\sigma_{-t}^{\widehat{\Phi}}(\hat{\beta}(n))= 1\underset{N}{_b\otimes_\alpha}\hat{\beta}(\hat{\mathfrak{a}mma}_{-t}(n))=1\underset{N}{_b\otimes_\alpha}\hat{\beta}(\mathfrak{a}mma_t(n))\] \subsection{Theorem} \label{thgamma} {\it Let $\mathfrak{G}=(N, M, \alpha, \beta, \Gamma, T, T', \nu)$ be a measured quantum groupoid, and let $b$ be a normal faithful non degenerate anti-homomorphism from $N$ into a von Neumann algebra $A$; let $\psi$ be a normal faithful semi-finite weight on $A$ satisfying the $(b,\mathfrak{a}mma)$ property; then : \newline (i) it is possible to define a one-parameter group of unitaries $\Delta_\psi^{it}\underset{\nu}{_b\otimes_\alpha}(\delta\Delta_{\widehat{\Phi}})^{-it}$ on $H_\psi\underset{\nu}{_b\otimes_\alpha}H$, with natural values on elementary tensors. We shall denote $\Delta_\psi^{1/2}\underset{\nu}{_b\otimes_\alpha}(\delta\Delta_{\widehat{\Phi}})^{-1/2}$ its analytic generator. \newline (ii) there exists a normal semi-finite faithful weight $\underline{\psi}_\delta$ on $A\underset{N}{_b*_\alpha}\mathcal L(H)$ such that : \[\frac{d\underline{\psi}_\delta}{d\psi^o}=\Delta_\psi^{1/2}\underset{\nu}{_b\otimes_\alpha}(\delta\Delta_{\widehat{\Phi}})^{-1/2}\] (iii) for any $a$ in $\mathfrak{N}_\psi\cap\mathfrak{N}_\psi^*$, and $\xi\in D(_\alpha H, \nu)\cap \mathcal D((\delta\Delta_{\widehat{\Phi}})^{-1/2})$, such that $(\delta\Delta_{\widehat{\Phi}})^{-1/2}\xi$ belongs to $D(_\alpha H, \nu)$, we have :} \[\underline{\psi}_\delta(\rho_\xi^{b, \alpha}aa^*(\rho_\xi^{b, \alpha})^*)=\|\Delta_\psi^{1/2}\Lambda_\psi (a)\underset{\nu}{_b\otimes_\alpha}(\delta\Delta_{\widehat{\Phi}})^{-1/2}\xi\|^2\] \begin{proof} Let $\eta\in D(_\alpha H, \nu)$, $n\in\mathfrak{N}_\nu$; then, we get : \begin{align*} \alpha(n)(\delta\Delta_{\widehat{\Phi}})^{-it}\eta &=(\delta\Delta_{\widehat{\Phi}})^{-it}\sigma_t^{\widehat{\Phi}}\sigma_t^{\Phi\circ R}\sigma_{-t}^\Phi(\alpha(n))\eta\\ &=(\delta\Delta_{\widehat{\Phi}})^{-it}\alpha(\sigma^\nu_t\mathfrak{a}mma_{-t}\tau_{-t}(n))\eta\\ &=(\delta\Delta_{\widehat{\Phi}})^{-it}R^{\alpha, \nu}(\eta)\Lambda_\nu(\mathfrak{a}mma_{-t}(n)) \end{align*} There exists a positive self-adjoint non singular operator $h$ on $H_\nu$ such that : \[\Lambda_\nu(\mathfrak{a}mma_t(n))=h^{it}\Lambda_\nu(n)\] We then get that : \[\alpha(n)(\delta\Delta_{\widehat{\Phi}})^{-it}\eta=(\delta\Delta_{\widehat{\Phi}})^{-it}R^{\alpha, \nu}(\eta)h^{-it}\Lambda_\nu(n)\] from which we get that $(\delta\Delta_{\widehat{\Phi}})^{-it}\eta$ belongs to $D(_\alpha H, \nu)$, and that : \[R^{\alpha, \nu}((\delta\Delta_{\widehat{\Phi}})^{-it}\eta)=(\delta\Delta_{\widehat{\Phi}})^{-it}R^{\alpha, \nu}(\eta)h^{-it}\] from which we get that : \[<(\delta\Delta_{\widehat{\Phi}})^{-it}\eta, (\delta\Delta_{\widehat{\Phi}})^{-it}\eta>_{\alpha, \nu}= h^{it}<\eta, \eta>_{\alpha, \nu}h^{-it}\] As we have, for all $m\in N$, $\mathfrak{a}mma_t(m)=h^{it}mh^{-it}$, we therefore get that : \[<(\delta\Delta_{\widehat{\Phi}})^{-it}\eta, (\delta\Delta_{\widehat{\Phi}})^{-it}\eta>_{\alpha, \nu}^o= \mathfrak{a}mma_t(<\eta, \eta>_{\alpha, \nu}^o)\] and, therefore, for all $\xi\in H_\psi$ : \begin{align*} \|\Delta_\psi^{it}\xi\underset{\nu}{_b\otimes_\alpha}(\delta\Delta_{\widehat{\Phi}})^{-it}\eta\|^2 &= (b(\mathfrak{a}mma_t(<\eta, \eta>_{\alpha, \nu}^o))\Delta_\psi^{it}\xi|\Delta_\psi^{it}\xi)\\ &= (\sigma_t^\psi(b(<\eta, \eta>_{\alpha, \nu}^o))\Delta_\psi^{it}\xi|\Delta_\psi^{it}\xi)\\ &= \|\xi\underset{\nu}{_b\otimes_\alpha}\eta\|^2 \end{align*} which is (i). \newline As $(\Delta_\psi^{it}\underset{\nu}{_b\otimes_\alpha}(\delta\Delta_{\widehat{\Phi}})^{-it})(J_\psi xJ_\psi\underset{N}{_b\otimes_\alpha}1)(\Delta_\psi^{it}\underset{\nu}{_b\otimes_\alpha}(\delta\Delta_{\widehat{\Phi}})^{-it})=J_\psi \sigma_t^{\psi}(x)J_\psi\underset{N}{_b\otimes_\alpha}1$, we get (ii). Result (iii) is just a corollary of (ii) and \ref{subalgebra}(iv). \end{proof} \subsection{Corollary} \label{corthgamma} {\it Let $\mathfrak{G}$ be a measured quantum groupoid, and $(b,\mathfrak a)$ an action of $\mathfrak{G}$ on a von Neumann algebra $A$; let $\psi$ be a $\delta$-invariant weight on $A$, bearing the density condition, as defined in \ref{action}, and $\overline{\psi_\mathfrak{a}}$ the weight constructed on $A\underset{N}{_b*_\alpha}\mathcal L(H)$ by transporting the bidual weight (\ref{crossed}) of $\psi$. Using \ref{delta}, we can use \ref{thgamma} and define the weight $\underline{\psi}_\delta$ on $A\underset{N}{_b*_\alpha}\mathcal L(H)$ Then, we have : $\overline{\psi_\mathfrak{a}}=\underline{\psi}_\delta$. } \begin{proof} We have, in general, $\frac{d\overline{\psi_\mathfrak{a}}}{d\psi^o}=\Delta_{\tilde{\psi}}^{1/2}$ (\ref{crossed}). So, using \ref{Uinv}(ii) and \ref{thgamma}, we get the result. \end{proof} \subsection{Corollary} \label{cor2} {\it Let $\mathfrak{G}=(N, M, \alpha, \beta, \Gamma, T, T', \nu)$ be a measured quantum groupoid, and let $b$ be a normal faithful non degenerate anti-homomorphism from $N$ into a von Neumann algebra $A$; let $\psi_1$ (resp. $\psi_2$) be a normal faithful semi-finite weight on $A$ satisfying the $(b,\mathfrak{a}mma)$ property; then : \newline (i) the cocycle $(D\psi_1:D\psi_2)_t$ belongs to $A\cap b(N)'$; \newline (ii) we have : $(D\underline{\psi_1}_\delta : D\underline{\psi_2}_\delta)_t=(D\psi_1:D\psi_2)_t\underset{N}{_b\otimes_\alpha}1$. } \begin{proof} For any $x\in A$, we have : \[\sigma_t^{\psi_1}(x)=(D\psi_1:D\psi_2)_t\sigma_t^{\psi_2}(x)(D\psi_1:D\psi_2)_t^*\] and, therefore : \[\sigma_t^{\psi_1}\circ\sigma_{-t}^{\psi_2}(x)=(D\psi_1:D\psi_2)_t x(D\psi_1:D\psi_2)_t^*\] In particular, we get, for any $n\in N$ : \[b(n)=(D\psi_1:D\psi_2)_t b(n)(D\psi_1:D\psi_2)_t^*\] from which we get (i). Let $(\mathfrak{H}, \pi, J, \mathcal P)$ be a standard representation of the von Neumann algebra $A$; then $A^o$ is represented on $\mathfrak{H}$ by $JAJ$; for any normal semi-finite faithful weight $\psi$ on $A$, we have $\frac{d\psi}{d\psi^o}=\Delta_\psi^{1/2}$; moreover, we have then : \begin{align*} (\frac{d\psi_1}{d\psi_1^o})^{it}(D\psi_1^o:D\psi_2^o)_t(\frac{d\psi_2^o}{d\psi_2})^{it} &= (\frac{d\psi_1}{d\psi_1^o})^{it}(\frac{d\psi_1^o}{d\psi_1})^{it}(\frac{d\psi_2^o}{d\psi_1})^{-it}(\frac{d\psi_2^o}{d\psi_2})^{it}\\ &= (\frac{d\psi_1}{d\psi_2^o})^{it}(\frac{d\psi_2}{d\psi_2^o})^{-it}\\ &= (D\psi_1: D\psi_2)_{t} \end{align*} and, therefore $(D\psi_1^o:D\psi_2^o)_t=\Delta_{\psi_1}^{-it}(D\psi_1: D\psi_2)_{t}\Delta_{\psi_2}^{it}$. By similar arguments, we have on $\mathfrak{H}\underset{\nu}{_b\otimes_\alpha}H$ : \begin{align*} (D\underline{\psi_1}_\delta : D\underline{\psi_2}_\delta)_t &= (\frac{d\underline{\psi_1}_\delta}{d\psi_1^o})^{it}(\frac{d\psi_1^o}{d\underline{\psi_2}_\delta})^{it}\\ &=(\frac{d\underline{\psi_1}_\delta}{d\psi_1^o})^{it}(D\psi_1^o:D\psi_2^o)_t(\frac{d\underline{\psi_2}_\delta}{d\psi_2^o})^{-it} \end{align*} As $(D\psi_1^o:D\psi_2^o)_t$ belongs to $JAJ\underset{N}{_b\otimes_\alpha}1_H$ and is therefore equal to : \[\Delta_{\psi_1}^{-it}(D\psi_1:D\psi_2)_{t}\Delta_{\psi_2}^{it}\underset{N}{_b\otimes_\alpha}1_H\] we obtain, using \ref{thgamma}(ii), that $(D\underline{\psi_1}_\delta : D\underline{\psi_2}_\delta)_t$ is equal to : \[(\Delta_{\psi_1}^{it}\underset{\nu}{_b\otimes_\alpha}(\delta\Delta_{\widehat{\Phi}})^{-it})(\Delta_{\psi_1}^{-it}(D\psi_1:D\psi_2)_{t}\Delta_{\psi_2}^{it}\underset{N}{_b\otimes_\alpha}1_H)(\Delta_{\psi_2}^{-it}\underset{\nu}{_b\otimes_\alpha}(\delta\Delta_{\widehat{\Phi}})^{it})\] from which we get the result. \end{proof} \subsection{Proposition} \label{deltaDelta} {\it Let $\mathfrak{G}=(N, M, \alpha, \beta, \Gamma, T, T', \nu)$ be a measured quantum groupoid; it is possible to define one parameter groups of unitaries $\Delta_{\widehat{\Phi}}^{it}\underset{\nu}{_\beta\otimes_\alpha}\Delta_{\widehat{\Phi}}^{it}$ and $(\delta\Delta_{\widehat{\Phi}})^{it}\underset{\nu^o}{_\alpha\otimes_{\hat{\beta}}}\Delta_{\widehat{\Phi}}^{it}$, with natural values on elementary tensors, and we have :} \[W(\Delta_{\widehat{\Phi}}^{it}\underset{\nu}{_\beta\otimes_\alpha}\Delta_{\widehat{\Phi}}^{it})W^*= (\delta\Delta_{\widehat{\Phi}})^{it}\underset{\nu^o}{_\alpha\otimes_{\hat{\beta}}}\Delta_{\widehat{\Phi}}^{it}\] \begin{proof} From (\cite{E5} 3.10 (vi)), we get that $\Delta_{\widehat{\Phi}}$ is the closure of $PJ_\Phi\delta^{-1}J_\Phi$, where $P$ is the managing operator of the pseudo-multiplicative unitary $W$, and $\delta$ the modulus of $\mathfrak{G}$; in (\cite{E5} 3.8 (vii)), we had got that it is possible to define one parameter groups of unitaries $P^{it}\underset{\nu}{_\beta\otimes_\alpha}P^{it}$ and $P^{it}\underset{\nu^o}{_\alpha\otimes_{\hat{\beta}}}P^{it}$, with natural values on elementary tensors, and that : \[W(P^{it}\underset{\nu}{_\beta\otimes_\alpha}P^{it})=(P^{it}\underset{\nu^o}{_\alpha\otimes_{\hat{\beta}}}P^{it})W\] On the other hand, it is possible (\cite{E5}, 3.8 (vi)) to define a one parameter group of unitaries $\delta^{it}\underset{\nu}{_\beta\otimes_\alpha}\delta^{it}$, with natural values on elementary tensors, and that : \[\delta^{it}\underset{\nu}{_\beta\otimes_\alpha}\delta^{it}=\Gamma(\delta^{it})=W^*(1\underset{N^o}{_\alpha\otimes_{\hat{\beta}}}\delta^{it})W\] Moreover, we know, from (\cite{E5}, 3.11 (iii)), that : \[W(J_{\widehat{\Phi}}\underset{\nu^o}{_\alpha\otimes_{\hat{\beta}}}J_\Phi)=(J_{\widehat{\Phi}}\underset{\nu^o}{_\alpha\otimes_{\hat{\beta}}}J_\Phi)W^*\] and from (\cite{E5} 3.8 (vi)) that $J_{\widehat{\Phi}}\delta^{-it}J_{\widehat{\Phi}}=R(\delta^{it})=\delta^{-it}$. \newline With all these data, we get that it is possible to define $\Delta_{\widehat{\Phi}}^{it}\underset{\nu}{_\beta\otimes_\alpha}\Delta_{\widehat{\Phi}}^{it}$ as : \[\Delta_{\widehat{\Phi}}^{it}\underset{\nu}{_\beta\otimes_\alpha}\Delta_{\widehat{\Phi}}^{it}=(P^{it}\underset{\nu}{_\beta\otimes_\alpha}P^{it})(J_\Phi\delta^{it}J_\Phi\underset{N}{_\beta\otimes_\alpha}J_\Phi\delta^{it}J_\Phi)\] and $(\delta\Delta_{\widehat{\Phi}})^{it}\underset{\nu^o}{_\alpha\otimes_{\hat{\beta}}}\Delta_{\widehat{\Phi}}^{it}$ as : \[(\delta\Delta_{\widehat{\Phi}})^{it}\underset{\nu^o}{_\alpha\otimes_{\hat{\beta}}}\Delta_{\widehat{\Phi}}^{it}=(P^{it}\underset{\nu^o}{_\alpha\otimes_{\hat{\beta}}}P^{it})(J_{\widehat{\Phi}}\underset{\nu}{_\beta\otimes_\alpha}J_\Phi)(\delta^{it}\underset{\nu}{_\beta\otimes_\alpha}\delta^{it})(J_{\widehat{\Phi}}\underset{\nu^o}{_\alpha\otimes_{\hat{\beta}}}J_\Phi)(J_\Phi\delta^{it}J_\Phi\underset{N^o}{_\alpha\otimes_{\hat{\beta}}}1)\] and to verify that : \begin{align*} W(\Delta_{\widehat{\Phi}}^{it}\underset{\nu}{_\beta\otimes_\alpha}\Delta_{\widehat{\Phi}}^{it})W^* &= W(P^{it}\underset{\nu}{_\beta\otimes_\alpha}P^{it})(J_\Phi\delta^{it}J_\Phi\underset{N}{_\beta\otimes_\alpha}J_\Phi\delta^{it}J_\Phi)W^*\\ &=(P^{it}\underset{\nu^o}{_\alpha\otimes_{\hat{\beta}}}P^{it})W(J_\Phi\delta^{it}J_\Phi\underset{N}{_\beta\otimes_\alpha}J_\Phi\delta^{it}J_\Phi)W^*\\ &=(P^{it}\underset{\nu^o}{_\alpha\otimes_{\hat{\beta}}}P^{it})(J_\Phi\delta^{it}J_\Phi\underset{N^o}{_\alpha\otimes_{\hat{\beta}}}1)W(1\underset{N}{_\beta\otimes_\alpha}J_\Phi\delta^{it}J_\Phi)W^* \end{align*} which is equal to : \[(P^{it}\underset{\nu^o}{_\alpha\otimes_{\hat{\beta}}}P^{it})(J_\Phi\delta^{it}J_\Phi\underset{N^o}{_\alpha\otimes_{\hat{\beta}}}1)(J_{\widehat{\Phi}}\underset{\nu}{_\beta\otimes_\alpha}J_\Phi)W^*(1\underset{N^o}{_\alpha\otimes_{\hat{\beta}}}\delta^{it})W(J_{\widehat{\Phi}}\underset{\nu^o}{_\alpha\otimes_{\hat{\beta}}}J_\Phi)\] and, therefore, to : \[(P^{it}\underset{\nu^o}{_\alpha\otimes_{\hat{\beta}}}P^{it})(J_\Phi\delta^{it}J_\Phi\underset{N^o}{_\alpha\otimes_{\hat{\beta}}}1)(J_{\widehat{\Phi}}\underset{\nu}{_\beta\otimes_\alpha}J_\Phi)(\delta^{it}\underset{\nu}{_\beta\otimes_\alpha}\delta^{it})(J_{\widehat{\Phi}}\underset{\nu^o}{_\alpha\otimes_{\hat{\beta}}}J_\Phi)\] or to : \[(P^{it}\underset{\nu^o}{_\alpha\otimes_{\hat{\beta}}}P^{it})(J_\Phi\delta^{it}J_\Phi\underset{N^o}{_\alpha\otimes_{\hat{\beta}}}1)(\delta^{it}\underset{\nu^o}{_\alpha\otimes_{\hat{\beta}}}J_{\widehat{\Phi}}\delta^{it}J_{\widehat{\Phi}})=(\delta\Delta_{\widehat{\Phi}})^{it}\underset{\nu^o}{_\alpha\otimes_{\hat{\beta}}}\Delta_{\widehat{\Phi}}^{it}\] which finishes the proof. \end{proof} \subsection{Proposition} \label{psibarre4} {\it Let $\mathfrak{G}=(N, M, \alpha, \beta, \Gamma, T, T', \nu)$ be a measured quantum groupoid, $(b,\mathfrak{a})$ a weighted action of $\mathfrak{G}$ on a von Neumann algebra $A$, and $\psi$ a normal semi-finite faithful weight on $A$, lifted from $\nu^o$; then the von Neumann algebra $A\underset{N}{_b*_\alpha}\mathcal L(H)$ is a faithful right $N$-module in two different ways, using $1\underset{N}{_b\otimes_\alpha}\beta$, and $1\underset{N}{_b\otimes_\alpha}\hat{\beta}$; moreover, the weight $\underline{\psi}$ constructed in \ref{psibarre} is a lifted weight from $\nu$, using $1\underset{N}{_b\otimes_\alpha}\beta$, and, on the other hand, satisfies the $(1\underset{N}{_b\otimes_\alpha}\hat{\beta}, \mathfrak{a}mma)$ property ; therefore, we can define a normal semi-finite faithful weight $\underline{\underline{\psi}}$ on $A\underset{N}{_b*_\alpha}\mathcal L(H)\underset{N}{_\beta*_\alpha}\mathcal L(H)$, and another normal semi-finite faithful weight $\underline{(\underline{\psi})}_\delta$ on $A\underset{N}{_b*_\alpha}\mathcal L(H)\underset{N}{_{\hat{\beta}}*_\alpha}\mathcal L(H)$. As in \ref{tildeTheta}, let us write, for any $Y$ in $\mathcal L(\mathfrak{H}\underset{\nu}{_b\otimes_\alpha}H\underset{\nu}{_{\hat{\beta}}\otimes_\alpha}H)$, \[\tilde{\Theta}(Y)=(1\underset{N}{_b\otimes_\alpha}W)^*(id\underset{N}{_b*_\alpha}\varsigma_N)(Y)(1\underset{N}{_b\otimes_\alpha}W)\] which belongs to $\mathcal L(\mathfrak{H}\underset{\nu}{_b\otimes_\alpha}H\underset{\nu}{_\beta\otimes_\alpha}H)$. Then, we have :} \[\underline{\underline{\psi}}\circ\tilde{\Theta}=\underline{(\underline{\psi})}_\delta\] \begin{proof} By definition, the weight $\underline{\underline{\psi}}$ is defined on $A\underset{N}{_b*_\alpha}\mathcal L(H)\underset{N}{_\beta*_\alpha}\mathcal L(H)$ by considering on $H_{\underline{\psi}}\underset{\nu}{_\beta\otimes_\alpha}H$ the spatial derivative : \[\frac{d\underline{\underline{\psi}}}{d(\underline{\psi})^o}=\Delta_{\underline{\psi}}\underset{\nu}{_\beta\otimes_\alpha}\Delta_{\widehat{\Phi}}^{-1}\] and, using \ref{psibarre3}, we therefore get, on $H\underset{\nu}{_\beta\otimes_a}H_\psi\underset{\nu}{_b\otimes_\alpha}H\underset{\nu}{_\beta\otimes_\alpha}H$, that : \[\frac{d\underline{\underline{\psi}}}{d(\underline{\psi})^o}=\Delta_{\widehat{\Phi}}^{-1}\underset{\nu}{_\beta\otimes_a}\Delta_\psi\underset{\nu}{_b\otimes_\alpha}\Delta_{\widehat{\Phi}}^{-1}\underset{\nu}{_\beta\otimes_\alpha}\Delta_{\widehat{\Phi}}^{-1}\] On the other hand, the weight $\underline{(\underline{\psi})}_\delta$ is defined on $A\underset{N}{_b*_\alpha}\mathcal L(H)\underset{N}{_{\hat{\beta}}*_\alpha}\mathcal L(H)$ by considering on $H_{\underline{\psi}}\underset{\nu}{_{\hat{\beta}}\otimes_\alpha}H=H\underset{\nu}{_\beta\otimes_a}H_\psi\underset{\nu}{_b\otimes_\alpha}H\underset{\nu}{_{\hat{\beta}}\otimes_\alpha}H$ the spatial derivative : \[\frac{d\underline{(\underline{\psi})}_\delta}{d(\underline{\psi})^o}=\Delta_{\underline{\psi}}\underset{\nu}{_{\hat{\beta}}\otimes_\alpha}(\delta\Delta_{\widehat{\Phi}})^{-1}=\Delta_{\widehat{\Phi}}^{-1}\underset{\nu}{_\beta\otimes_a}\Delta_\psi\underset{\nu}{_b\otimes_\alpha}\Delta_{\widehat{\Phi}}^{-1}\underset{\nu}{_{\hat{\beta}}\otimes_\alpha}(\delta\Delta_{\widehat{\Phi}})^{-1}\] from which we get, using \ref{deltaDelta} and the definition of $\tilde{\Theta}$ that : \[\frac{d\underline{\underline{\psi}}}{d(\underline{\psi})^o}=(id\underset{N}{_\beta*_a}\tilde{\Theta})(\frac{d\underline{(\underline{\psi})}_\delta}{d(\underline{\psi})^o})\] The weight $(\underline{\psi})^o$ is defined on $J_{\underline{\psi}}\pi_{\underline{\psi}}(A\underset{N}{_b*_\alpha}\mathcal L(H))J_{\underline{\psi}}$, which, using again \ref{psibarre3}, is equal to $\mathcal L(H)\underset{N}{_\beta*_a}A'\underset{N}{_b\otimes_\alpha}1_H$; we see, therefore, for $X\in \mathcal L(H)\underset{N}{_\beta*_a}A'$, that $(id\underset{N}{_\beta*_a}\tilde{\Theta})$ sends $X\underset{N}{_b\otimes_\alpha}1_H\underset{N}{_{\hat{\beta}}\otimes_\alpha}1_H$ on $X\underset{N}{_b\otimes_\alpha}1_H\underset{N}{_\beta\otimes_\alpha}1_H$, and leaves $(\underline{\psi})^o$ invariant. From which we deduce that : \[\frac{d\underline{\underline{\psi}}\circ\tilde{\Theta}}{d(\underline{\psi})^o}=\frac{d\underline{(\underline{\psi})}_\delta}{d(\underline{\psi})^o}\] from which we get the result. \end{proof} \section{Biduality of weights} \label{bidualw} In that chapter, following what had been done for locally compact quantum groups in \cite{Y4}, \cite{Y5}, and \cite{BV}, starting from an action $\mathfrak{a}$ of a measured quantum groupoid on a von Neumann algebra $A$, we define the Radon-Nikodym derivative of a lifted weight on $A$ with respect to this action (\ref{defder}); this operator is an $\mathfrak{a}$-cocycle (\ref{cocycle}), which measures, in a certain sense, how the weight $\psi$ behaves towards the action. In particular, we prove that this cocycle is equal to $1$ if and only if the weight is invariant by the action (\ref{thinv}, \ref{thinv2}). \subsection{Theorem} \label{thbidualw} {\it Let $\mathfrak{G}=(N, M, \alpha, \beta, \Gamma, T, T', \nu)$ be a measured quantum groupoid, $(b,\mathfrak{a})$ a weighted action of $\mathfrak{G}$ on a von Neumann algebra $A$, $\psi$ a normal semi-finite faithful weight on $A$ lifted from $\nu^o$; let $\tilde{\psi}$ be the dual weight on the crossed-product $A\rtimes_\mathfrak{a}\mathfrak{G}$, and let $\overline{\psi_\mathfrak{a}}$ be the normal semi-finite faithful weight on $A\underset{N}{_b*_\alpha}\mathcal L(H)$ obtained from the bidual weight $\tilde{\tilde{\psi}}$ and the isomorphism between $A\underset{N}{_b*_\alpha}\mathcal L(H)$ and the double crossed-product; let $\underline{\psi}$ be normal semi-finite faithful weight on $A\underset{N}{_b*_\alpha}\mathcal L(H)$ constructed in \ref{psibarre}. We have then : \[(D\overline{\psi_\mathfrak{a}}:D\underline{\psi})_t=\Delta_{\tilde{\psi}}^{it}(\Delta_\psi^{-it}\underset{\nu}{_b\otimes_\alpha}\Delta_{\widehat{\Phi}}^{it})\] Moreover, the unitaries $\Delta_{\tilde{\psi}}^{it}(\Delta_\psi^{-it}\underset{\nu}{_b\otimes_\alpha}\Delta_{\widehat{\Phi}}^{it})$ belong to $A\underset{N}{_b*_\alpha}(M\cap\beta(N)')$. } \begin{proof} We have $(D\overline{\psi_\mathfrak{a}}:D\underline{\psi})_t= (\frac{d\overline{\psi_\mathfrak{a}}}{d\psi^o})^{it}(\frac{d\underline{\psi}}{d\psi^o})^{-it}$, from which we get the first result, by \ref{crossed} and \ref{psibarre2}. So, we get that the unitaries $\Delta_{\tilde{\psi}}^{it}(\Delta_\psi^{-it}\underset{\nu}{_b\otimes_\alpha}\Delta_{\widehat{\Phi}}^{it})$ belong to $A\underset{N}{_b*_\alpha}\mathcal L(H)$; let's take $x\in M'$; using \ref{corsigma3}, we have $\sigma_t^{\overline{\psi}_\mathfrak{a}}(1\underset{N}{_b\otimes_\alpha}x)=1\underset{N}{_b\otimes_\alpha}\Delta_{\widehat{\Phi}}^{-it}x\Delta_{\widehat{\Phi}}^{it}$, and, using \ref{psibarre2}, we get that $\sigma_t^{\underline{\psi}}(1\underset{N}{_b\otimes_\alpha}x)=1\underset{N}{_b\otimes_\alpha}\Delta_{\widehat{\Phi}}^{-it}x\Delta_{\widehat{\Phi}}^{it}$; therefore, we get that $(\frac{d\overline{\psi_\mathfrak{a}}}{d\psi^o})^{it}(\frac{d\underline{\psi}}{d\psi^o})^{-it}$ commutes with $1\underset{N}{_b\otimes_\alpha}x$, and, therefore, belongs to $A\underset{N}{_b*_\alpha}M$. \newline Let $n\in N$; we have : \[\sigma_t^{\underline{\psi}}(1\underset{N}{_b\otimes_\alpha}\beta(n))=1\underset{N}{_b\otimes_\alpha}\Delta_{\widehat{\Phi}}^{-it}\beta(n)\Delta_{\widehat{\Phi}}^{it}=1\underset{N}{_b\otimes_\alpha}\tau_{-t}(\beta(n))=1\underset{N}{_b\otimes_\alpha}\beta(\sigma_{-t}^\nu(n))\] and, on the other hand : \[\sigma_t^{\overline{\psi}_\mathfrak{a}}(1\underset{N}{_b\otimes_\alpha}\beta(n))=\sigma_t^{\overline{\psi}_\mathfrak{a}}(\mathfrak{a}(b(n)))=\mathfrak{a}(\sigma_t^\psi(b(n)))=\mathfrak{a}(b(\sigma_{-t}^\nu(n))=1\underset{N}{_b\otimes_\alpha}\beta(\sigma_{-t}^\nu(n))\] which proves that both $\underline{\psi}$ and $\overline{\psi}_\mathfrak{a}$ are lifted weights from the weight $\nu^o$, and, therefore, that $(D\overline{\psi_\mathfrak{a}}:D\underline{\psi})_t$ belongs to $A\underset{N}{_b*_\alpha}\beta(N)'$, which finishes the proof. \end{proof} \subsection{Definition} \label{defder} Let $\mathfrak{G}=(N, M, \alpha, \beta, \Gamma, T, T', \nu)$ be a measured quantum groupoid, $(b,\mathfrak{a})$ a weighted action of $\mathfrak{G}$ on a von Neumann algebra $A$, $\psi$ a normal semi-finite faithful weight on $A$ lifted from $\nu^o$; we shall call the unitaries $(D\overline{\psi_\mathfrak{a}}:D\underline{\psi})_t\in A\underset{N}{_b*_\alpha}(M\cap\beta(N)')$ the Radon-Nikodym derivative of the weight $\psi$ with respect to the action $(b, \mathfrak{a})$, and denote it, for simplification, $(D\psi\circ\mathfrak{a}:D\psi)_t$, following the notations of (\cite{BV}, 10.2). \subsection{Theorem} \label{cocycle} {\it Let $\mathfrak{G}=(N, M, \alpha, \beta, \Gamma, T, T', \nu)$ be a measured quantum groupoid, $(b,\mathfrak{a})$ a weighted action of $\mathfrak{G}$ on a von Neumann algebra $A$, $\psi$ a normal semi-finite faithful weight on $A$ lifted from $\nu^o$; the Radon-Nikodym derivative $(D\psi\circ\mathfrak{a}:D\psi)_t$ introduced in \ref{defder} is a $\mathfrak{a}$-cocycle, i.e., we have :} \[(id\underset{N}{_b*_\alpha}\Gamma)((D\psi\circ\mathfrak{a}:D\psi)_t)=(\mathfrak{a}\underset{N}{_b*_\alpha}id)((D\psi\circ\mathfrak{a}:D\psi)_t)((D\psi\circ\mathfrak{a}:D\psi)_t)\underset{N}{_\beta\otimes_\alpha}1)\] \begin{proof} For all $t\in\mathbb{R}$, $(\mathfrak{a}\underset{N}{_b*_\alpha}id)((D\psi\circ\mathfrak{a}:D\psi)_t)$ belongs to $A\underset{N}{_b*_\alpha}M\underset{N}{_\beta*_\alpha}M$, and the operator $\underline{\mathfrak{a}}((D\psi\circ\mathfrak{a}:D\psi)_t)=\tilde{\Theta}^{-1}(\mathfrak{a}\underset{N}{_b*_\alpha}id)((D\psi\circ\mathfrak{a}:D\psi)_t)$ belongs to $A\underset{N}{_b*_\alpha} M\underset{N}{_{\hat{\beta}}*_\alpha}M$ (where $\tilde{\Theta}$ had been defined in \ref{psibarre4}) . \newline We have, using successively \ref{crossed}, \ref{corthgamma} and \ref{tildeTheta}(iii) : \[\underline{\mathfrak{a}}((D\psi\circ\mathfrak{a}:D\psi)_t) = \underline{\mathfrak{a}}((D\overline{\psi_\mathfrak{a}}:D\underline{\psi})_t) = (D\overline{(\overline{\psi_\mathfrak{a}})_{\underline{\mathfrak{a}}}}:D\overline{(\underline{\psi})_{\underline{\mathfrak{a}}}})_t = (D\underline{(\overline{\psi_\mathfrak{a}})}_\delta :D\underline{(\overline{\psi_\mathfrak{a}})}\circ\tilde{\Theta})_t\] On the other hand, using successively \ref{corunderline}(ii) and \ref{psibarre4} : \begin{align*} \tilde{\Theta}^{-1}((D\psi\circ\mathfrak{a}:D\psi)_t)\underset{N}{_\beta\otimes_\alpha}1) &= \tilde{\Theta}^{-1}((D\overline{\psi_\mathfrak{a}}:D\underline{\psi})_t\underset{N}{_\beta\otimes_\alpha}1)\\ &= \tilde{\Theta}^{-1}(D\underline{\overline{\psi_\mathfrak{a}}}:D\underline{\underline{\psi}})_t)\\ &= (D\underline{\overline{\psi_\mathfrak{a}}}\circ\tilde{\Theta}:D \underline{\underline{\psi}}\circ\tilde{\Theta})_t\\ &=(D\underline{\overline{\psi_\mathfrak{a}}}\circ\tilde{\Theta}:D\underline{(\underline{\psi})}_\delta)_t \end{align*} and, therefore, we get that : \begin{multline*} \tilde{\Theta}^{-1}[(\mathfrak{a}\underset{N}{_b*_\alpha}id)((D\psi\circ\mathfrak{a}:D\psi)_t)((D\psi\circ\mathfrak{a}:D\psi)_t)\underset{N}{_\beta\otimes_\alpha}1)]\\ = \underline{\mathfrak{a}}((D\psi\circ\mathfrak{a}:D\psi)_t)\tilde{\Theta}^{-1}((D\psi\circ\mathfrak{a}:D\psi)_t)\underset{N}{_\beta\otimes_\alpha}1) \end{multline*} is equal, using \ref{cor2}(ii), to : \begin{align*} (D\underline{(\overline{\psi_\mathfrak{a}})}_\delta :D\underline{(\overline{\psi_\mathfrak{a}})}\circ\tilde{\Theta})_t(D\underline{(\overline{\psi_\mathfrak{a}})}\circ\tilde{\Theta}:D\underline{(\underline{\psi})}_\delta)_t &= (D\underline{(\overline{\psi_\mathfrak{a}})}_\delta :D\underline{(\underline{\psi})}_\delta)_t\\ &= (D\overline{\psi_\mathfrak{a}}:D\underline{\psi})_t\underset{N}{_{\hat{\beta}}\otimes_\alpha}1\\ &=(D\psi\circ\mathfrak{a}:D\psi)_t\underset{N}{_{\hat{\beta}}\otimes_\alpha}1 \end{align*} from which we get that : \begin{align*} (\mathfrak{a}\underset{N}{_b*_\alpha}id)((D\psi\circ\mathfrak{a}:D\psi)_t)((D\psi\circ\mathfrak{a}:D\psi)_t)\underset{N}{_\beta\otimes_\alpha}1) &= \tilde{\Theta}((D\psi\circ\mathfrak{a}:D\psi)_t\underset{N}{_{\hat{\beta}}\otimes_\alpha}1)\\ &= (id\underset{N}{_b*_\alpha}\Gamma)((D\psi\circ\mathfrak{a}:D\psi)_t) \end{align*} which is the result. \end{proof} \subsection{Example} \label{exlcg} Let $\bf{G}$ be a locally compact quantum group, and $\mathfrak{a}$ an action of $\bf{G}$ on a von Neumann algebra $A$; then this result had been obtained in (\cite{Y4}, 4.8 and \cite{Y5}, 3.7 and \cite{BV}, 10.3). \subsection{Example} \label{exgd2} Let $\mathcal G$ be a measured groupoid; let us use all the notations introduced in \ref{exgd}. Let $(\mathfrak{a})_{g\in\mathcal G}$ be an action of $\mathcal G$ on a von Neumann algebra $A=\int_{\mathcal G^{(0)}}^\oplus A^x d\nu(x)$, and $\psi=\int_{\mathcal G^{(0)}}^\oplus \psi^x d\nu(x)$ a normal semi-finite faithful weight on $A$. Then, the Radon-Nikodym derivative of $\psi$ with respect to the action $\mathfrak{a}$, is, using (\cite{Y3}, 2.6), given by : \[(D\psi\circ\mathfrak{a}:D\psi)_t=\int_\mathcal G^\oplus (D\psi^{r(g)}:D\psi^{s(g)}\circ \mathfrak{a}_{g^{-1}})_t d\nu(g)\] which is acting on $\int_\mathcal G^\oplus H_{\psi^{r(g)}} d\mu(g)=H_\psi\underset{\nu}{_b\otimes_{r_\mathcal G}}L^2(\mathcal G, \mu)$. \subsection{Definition} \label{inv} Let $(b, \mathfrak{a})$ an action of a measured quantum groupoid $\mathfrak{G}$ on a von Neumann algebra $A$. A normal semi-finite faithful weight $\psi$ on $A$ will be said invariant by $\mathfrak{a}$ if, for all $\eta\in D(_\alpha H, \nu)\cap D(H_\beta, \nu^o)$ and $x\in\mathfrak{N}_\psi$, we have : \[\psi[(id\underset{N}{_b*_\alpha}\omega_\eta)\mathfrak{a}(x^*x)]=\|\Lambda_\psi (x)\underset{\nu^o}{_a\otimes_\beta}\eta\|^2\] We shall always suppose that such weights bear the density property, defined in \ref{action}, as for $\delta$-invariant weights. \subsection{Theorem} \label{thinv} {\it Let $(b, \mathfrak{a})$ an action of a measured groupoid $\mathfrak{G}$ on a von Neumann algebra $A$, $\psi$ a normal semi-finite faithful weight on $A$, invariant by $\mathfrak{a}$ in the sense of \ref{inv}, and bearing the density property, as defined in \ref{action}. Then, let $(e_i)_{i\in I}$ be an $(\alpha, \nu)$-orthogonal basis of $H$, $x\in\mathfrak{N}_\psi$, $\eta\in D(_\alpha H, \nu)\cap D(H_\beta, \nu^o)$ : \newline (i) for any $\xi\in D(_\alpha H, \nu)$, $(id\underset{N}{_b*_\alpha}\omega_{\eta, \xi})\mathfrak{a}(x)$ belongs to $\mathfrak{N}_\psi$; \newline (ii) the sum $\sum_i\Lambda_\psi((id\underset{N}{_b*_\alpha}\omega_{\eta, e_i})\mathfrak{a}(x)\underset{\nu}{_b\otimes_\alpha}e_i$ is strongly converging; its limit does not depend upon the choice of the $(\alpha, \nu)$-othogonal basis of $H$, and allow us to define an isometry $V'_\psi$ from $H_\psi\underset{\nu^o}{_a\otimes_\beta}H$ to $H_\psi\underset{\nu}{_b\otimes_\alpha}H$ such that : \[V'_\psi(\Lambda_\psi(x)\underset{\nu^o}{_a\otimes_\beta}\eta)=\sum_i\Lambda_\psi((id\underset{N}{_b*_\alpha}\omega_{\eta, e_i})\mathfrak{a}(x)\underset{\nu}{_b\otimes_\alpha}e_i\] (iii) we have : \[\Lambda_\psi((id\underset{N}{_b*_\alpha}\omega_{\eta, \xi})\mathfrak{a}(x))=(id*\omega_{\eta, \xi})(V'_\psi)\lambda_\psi (x)\] (iv) for any $y\in A$, $z\in M'$, $n\in N$, we have : \[\mathfrak{a}(y)V'_\psi=V'_\psi (y\underset{N^o}{_a\otimes_\beta}1)\] \[(1\underset{N}{_b\otimes_\alpha}z)V'_\psi=V'_\psi(1\underset{N^o}{_a\otimes_\beta}z)\] \[(a(n)\underset{N}{_b\otimes_\alpha}1)V'_\psi=V'_\psi(1\underset{N^o}{_a\otimes_\beta}\alpha(n))\] \[(1\underset{N}{_b\otimes_\alpha}\beta(n))V'_\psi=V'_\psi(b(n)\underset{N^o}{_a\otimes_\beta}1)\] \[(1\underset{N}{_b\otimes_\alpha}\hat{\beta}(n))V'_\psi=V'_\psi(1\underset{N^o}{_a\otimes_\beta}\hat{\beta}(n))\] (v) the operator $V'_\psi$ is a unitary; moreover, it is a copresentation of $\mathfrak{G}$ on $_a(H_\psi)_b$ which implements $\mathfrak{a}$; \newline (vi) we have : \[V'_\psi(\Delta_\psi^{it}\underset{N^o}{_a\otimes_\beta}\Delta_{\widehat{\Phi}}^{-it})=(\Delta_\psi^{it}\underset{N}{_b\otimes_\alpha}\Delta_{\widehat{\Phi}}^{-it})V'_\psi\] Moreover, the weight $\psi$ is lifted from $\nu^o$; more precisely, there exists a normal faithful semi-finite operator-valued weight $\mathfrak{T}$ from $A$ onto $b(N)$ such that $\psi=\nu^o\circ b^{-1}\circ \mathfrak{T}$, and, for all $x\in\mathfrak{N}_{\mathfrak{T}}\cap\mathfrak{N}_\psi$, we have : \[(\mathfrak{T}\underset{N}{_b*_\alpha}id)\mathfrak{a}(x^*x)=1\underset{N}{_b\otimes_\alpha}\beta\circ b^{-1}\mathfrak{T}(x^*x)=\mathfrak{a}(\mathfrak{T}(x^*x))\] \[(\psi\underset{\nu}{_b*_\alpha}id)\mathfrak{a}(x^*x)=\beta\circ b^{-1}\mathfrak{T}(x^*x)\] (vii) we have : \[\mathfrak{a}(\sigma_t^\psi(y))=(\sigma_t^\psi\underset{N}{_b*_\alpha}\tau_t)\mathfrak{a}(y)\] (viii) the standard implementation $U^{\mathfrak{a}}_\psi$ is equal to $V'_\psi$; \newline (ix) the dual weight satisfies $\Delta_{\tilde{\psi}}^{it}=\Delta_\psi^{it}\underset{N}{_b\otimes_\alpha}\Delta_{\widehat{\Phi}}^{-it}$; \newline (x) the Radon-Nikodym derivative $(D\psi\circ\mathfrak{a}:D\psi)_t$ is equal to $1$. } \begin{proof} Result (i) is identical to (\cite{E5}, 8.3(i)), and (ii) is similar to (\cite{E5}, 8.3(ii) and 8.4(i)); the proof of (iii) is similar to the proof of (\cite{E5}, 8.4(ii) and (iii)), and the proof of (iv) is similar (and somehow simpler) to the proof of (\cite{E5}, 8.4(iv) and (v)). Now result (v) is obtained in a similar way to (\cite{E5}, 8.5 and 8.6); by similar calculations to (\cite{E5}, 8.7 and 8.8(i)), we obtain that, for all $t\in\mathbb{R}$, we have $\sigma_t^\psi(b(n))=b(\sigma_{-t}^\nu(n))$, which gives the existence of a normal faithful semi-finite operator-valued weight $\mathfrak{T}$ from $A$ onto $b(N)$ such that $\psi=\nu^o\circ b^{-1}\circ \mathfrak{T}$. For any $x\in\mathfrak{N}_\psi\cap\mathfrak{N}_{\mathfrak{T}}$, the vector $\Lambda_\psi(x)$ belongs to $\mathcal D(_\alpha H, \nu)$, and we have, for any $\eta\in H$ : \[\|\Lambda_\psi (x)\underset{\nu^o}{_a\otimes_\beta}\eta\|^2=(\beta\circ b^{-1}\mathfrak{T}(x^*x)\eta|\eta)\] So, using the density property and \ref{inv}, we get, for all $x\in \mathfrak{N}_\psi\cap\mathfrak{N}_{\mathfrak{T}}$, that : \[(\psi\underset{\nu}{_b*_\alpha}id)\mathfrak{a}(x^*x)=\beta\circ b^{-1}\mathfrak{T}(x^*x)\] and, therefore, that : \[(\mathfrak{T}\underset{N}{_b*_\alpha}id)\mathfrak{a}(x^*x)=1\underset{N}{_b\otimes_\alpha}\beta\circ b^{-1}\mathfrak{T}(x^*x)=\mathfrak{a}(\mathfrak{T}(x^*x)\] we finish the proof of (vi) in a similar way to (\cite{E5}, 8.8(ii)). Then (vii) is a straightforward corollary of (vi) and (v), and (viii) and (ix) are obtained in a similar way to \ref{Uinv}(i) and (ii). As $\Delta_{\tilde{\psi}}=\frac{d\overline{\psi_\mathfrak{a}}}{d\psi^o}$ (\cite{E5} 13.6) and $\Delta_\psi\underset{N}{_b\otimes_\alpha}\Delta_{\widehat{\Phi}}^{-1}=\frac{d\underline{\psi}}{d\psi^o}$ by \ref{psibarre}(ii), we infer from (ix) that $\overline{\psi_\mathfrak{a}}=\underline{\psi}$, which, by \ref{defder}, finishes the proof. \end{proof} \subsection{Corollary} \label{corinv} {\it Let $(b, \mathfrak{a})$ be an action of a measured quantum groupoid $\mathfrak{G}$ on a von Neumann algebra $A$; let $\psi_1$, $\psi_2$ be two invariant normal faithful semi-finite weights on $A$, as defined in \ref{inv}, and let us suppose that both $\psi_1$ and $\psi_2$ bear the density property, as defined in \ref{action}. Then, for all $t\in\mathbb{R}$, $(D\psi_1:D\psi_2)_t$ belongs to $A^\mathfrak{a}$. } \begin{proof} The proof is similar to (\cite{E5}, 8.11). \end{proof} \subsection{Theorem} \label{thinv2} {\it Let $(b, \mathfrak{a})$ be a weighted action of a measured quantum groupoid $\mathfrak{G}$ on a von Neumann algebra $A$, and $\psi$ a normal semi-finite faithful weight on $A$, lifted from $\nu^o$. If the Radon-Nikodym derivative $(D\psi\circ\mathfrak{a}:D\psi)_t$ is equal to $1$, then the weight $\psi$ is invariant by $\mathfrak{a}$ in the sense of \ref{inv}. } \begin{proof} Let $\xi\in D(_\alpha H, \nu)\cap D(H_\beta, \nu^o)\cap\mathcal D(\Delta_{\widehat{\Phi}}^{-1/2})$ such that $\Delta_{\widehat{\Phi}}^{-1/2}\xi$ belongs to $D(_\alpha H, \nu)$; let us remark first that if $y$ belongs to $\mathfrak{N}_{\widehat{\Phi}}\cap\mathfrak{N}_{\widehat{\Phi}}^*\cap\mathfrak{N}_{\hat{T}}\cap\mathfrak{N}_{\hat{T}}^*$, and is analytic with respect to $\sigma_t^{\widehat{\Phi}}$, and such that $\sigma_z(x)$ belongs to $\mathfrak{N}_{\widehat{\Phi}}\cap\mathfrak{N}_{\widehat{\Phi}}^*\cap \mathfrak{N}_{\hat{T}}\mathfrak{N}_{\hat{T}}^*$, for all $z\in\mathbb{C}$, then $\Lambda_{\widehat{\Phi}}(z)$ satisfies all those conditions, and this gives that the set of such elements $\xi$ is dense in $H$. \newline Let $\eta$ be in $D(_\alpha H, \nu)\cap \mathcal D(\Delta_{\widehat{\Phi}}^{-1/2})$ such that $\Delta_{\widehat{\Phi}}^{-1/2}\eta$ belongs to $D(_\alpha H, \nu)$, and $x\in \mathfrak{N}_\psi$, analytic with respect to $\psi$, such that $\sigma_{-i/2}(x^*)$ belongs to $\mathfrak{N}_\psi$. Then, we have, using \ref{psibarre3}(i) applied to $\underline{\nu^o}$ : \begin{multline*} ((\psi\underset{\nu}{_b*_\alpha}id)\mathfrak{a} (x^*x)\xi\underset{N^o}{_\alpha\otimes_\beta}J_{\widehat{\phi}}\Delta_{\widehat{\Phi}}^{-1/2}\eta|\xi\underset{N^o}{_\alpha\otimes_\beta}J_{\widehat{\phi}}\Delta_{\widehat{\Phi}}^{-1/2}\eta)=\\ ((\psi\underset{\nu}{_b*_\alpha}id)\mathfrak{a} (x^*x)\Lambda_{\underline{\nu^o}}(\theta^{\alpha, \nu}(\xi, \eta))|\Lambda_{\underline{\nu^o}}(\theta^{\alpha, \nu}(\xi, \eta))=\\ \underline{\nu^o}(\theta^{\alpha, \nu}(\xi, \eta)^*(\psi\underset{\nu}{_b*_\alpha}id)\mathfrak{a} (x^*x)\theta^{\alpha, \nu}(\xi, \eta) \end{multline*} which is equal, using \ref{psibarre2} and \ref{ex2}, to : \[\underline{\psi}(1\underset{N}{_b\otimes_\alpha}\theta^{\alpha, \nu}(\xi, \eta))^*\mathfrak{a}(x^*x)(1\underset{N}{_b\otimes_\alpha}\theta^{\alpha, \nu}(\xi, \eta)))\] By hypothesis, as $\overline{\psi_\mathfrak{a}}=\underline{\psi}$ by \ref{defder}, we get, using \ref{action} that $\sigma_t^{\underline{\psi}}(\mathfrak{a}(x))=\sigma_t^{\overline{\psi_\mathfrak{a}}}(\mathfrak{a}(x))=\mathfrak{a}(\sigma_t^\psi(x))$. Moreover, we can write, thanks to the hypothesis and to \ref{psibarre3} applied to $\underline{\nu^o}$ : \[J_{\underline{\nu^o}}\Lambda_{\underline{\nu^o}}(\theta^{\alpha, \nu}(\xi, \eta))=J_{\widehat{\Phi}}\xi\underset{\nu}{_\beta\otimes_\alpha}\Delta_{\widehat{\Phi}}^{-1/2}\eta=\Lambda_{\underline{\nu^o}}(\theta^{\alpha, \nu}(\Delta_{\widehat{\Phi}}^{-1/2}\eta, \Delta_{\widehat{\Phi}}^{1/2}\xi))\] from which we get that $[\mathfrak{a}(x)(1\underset{N}{_b\otimes_\alpha}\theta^{\alpha, \nu}(\xi, \eta))]^*$ belongs to $\mathcal D(\sigma_{-i/2}^{\underline{\psi}})$, and, therefore, that: \[((\psi\underset{\nu}{_b*_\alpha}id)\mathfrak{a} (x^*x)\xi\underset{\nu^o}{_\alpha\otimes_\beta}J_{\widehat{\phi}}\Delta_{\widehat{\Phi}}^{-1/2}\eta|\xi\underset{\nu^o}{_\alpha\otimes_\beta}J_{\widehat{\phi}}\Delta_{\widehat{\Phi}}^{-1/2}\eta)\] is equal to : \[\|\Lambda_{\underline{\psi}}(\sigma_{-i/2}^{\underline{\psi}}([\mathfrak{a}(x)(1\underset{N}{_b\otimes_\alpha}\theta^{\alpha, \nu}(\xi, \eta))]^*)\|^2= \|\Lambda_{\underline{\psi}}((1\underset{N}{_b\otimes_\alpha}\theta^{\alpha, \nu}(\Delta_{\widehat{\Phi}}^{-1/2}\eta, \Delta_{\widehat{\Phi}}^{1/2}\xi))\mathfrak{a}(\sigma_{-i/2}^\psi(x^*)))\|^2\] which, thanks again to the hypothesis and to \ref{psia}, is equal to : \begin{align*} \|\Lambda_{\psi}(\sigma_{-i/2}^\psi(x^*))\underset{\nu}{_b\otimes_\alpha}J_{\widehat{\Phi}}\xi\underset{\nu}{_\beta\otimes_\alpha}\Delta_{\widehat{\Phi}}^{-1/2}\eta\|^2 &= \|J_\psi\Lambda_\psi(x)\underset{\nu}{_b\otimes_\alpha}J_{\widehat{\Phi}}\xi\underset{\nu}{_\beta\otimes_\alpha}\Delta_{\widehat{\Phi}}^{-1/2}\eta\|^2\\ &= \|\Lambda_\psi(x)\underset{\nu^o}{_a\otimes_\beta}\xi\underset{\nu^o}{_\alpha\otimes_\beta}J_{\widehat{\Phi}}\Delta_{\widehat{\Phi}}^{-1/2}\eta\|^2 \end{align*} So, finally, we get the equality : \[((\psi\underset{\nu}{_b*_\alpha}id)\mathfrak{a} (x^*x)\xi\underset{\nu^o}{_\alpha\otimes_\beta}J_{\widehat{\phi}}\Delta_{\widehat{\Phi}}^{-1/2}\eta|\xi\underset{\nu^o}{_\alpha\otimes_\beta}J_{\widehat{\phi}}\Delta_{\widehat{\Phi}}^{-1/2}\eta)= \|\Lambda_\psi(x)\underset{\nu^o}{_a\otimes_\beta}\xi\underset{\nu^o}{_\alpha\otimes_\beta}J_{\widehat{\Phi}}\Delta_{\widehat{\Phi}}^{-1/2}\eta\|^2\] which, by continuity, remains true for any $x\in\mathfrak{N}_\psi$ and $\xi\in D(_\alpha H, \nu)\cap D(H_\beta, \nu^o)$; from which we infer that : \begin{multline*} ((\psi\underset{\nu}{_b*_\alpha}id)\mathfrak{a} (x^*x)\alpha(<J_{\widehat{\Phi}}\Delta_{\widehat{\Phi}}^{-1/2}\eta, J_{\widehat{\Phi}}\Delta_{\widehat{\Phi}}^{-1/2}\eta>_{\beta, \nu^o})\xi|\xi)=\\ (\Lambda_\psi(x)\underset{\nu^o}{_a\otimes_\beta}\alpha(<J_{\widehat{\Phi}}\Delta_{\widehat{\Phi}}^{-1/2}\eta, J_{\widehat{\Phi}}\Delta_{\widehat{\Phi}}^{-1/2}\eta>_{\beta, \nu^o})\xi|\Lambda_\psi(x)\underset{\nu^o}{_a\otimes_\beta}\xi) \end{multline*} from which, by density of the elements of the form $<J_{\widehat{\Phi}}\Delta_{\widehat{\Phi}}^{-1/2}\eta, J_{\widehat{\Phi}}\Delta_{\widehat{\Phi}}^{-1/2}\eta>_{\beta, \nu^o}$ in $N^+$, we get, for any $n\in N^+$ : \[((\psi\underset{\nu}{_b*_\alpha}id)\mathfrak{a} (x^*x)\alpha(n)\xi|\xi)=(\Lambda_\psi(x)\underset{\nu^o}{_a\otimes_\beta}\alpha(n)\xi|\Lambda_\psi(x)\underset{\nu^o}{_a\otimes_\beta}\xi)\] from which we get the result, by density of $D(_\alpha H, \nu)\cap D(H_\beta, \nu^o)$. \end{proof} \subsection{Proposition} \label{propcocycle} {\it Let $\mathfrak{G}$ be a measured quantum groupoid, $(b,\mathfrak{a})$ a weighted action of $\mathfrak{G}$ on a von Neumann algebra $A$, $\psi_1$ and $\psi_2$ two normal semi-finite faithful weights on $A$, lifted from $\nu^o$, and $(D\psi_1\circ\mathfrak{a} :D\psi_1)_t$, $(D\psi_2\circ\mathfrak{a}: D\psi_2)_t$ their Radon-Nikodym derivatives with respect to the action $(b, \mathfrak{a})$, as defined in \ref{defder}. Then, the Radon-Nikodym derivative $(D\psi_1:D\psi_2)_t$ belongs to $A\cap b(N)'$, and we have, for all $t\in\mathbb{R}$ :} \[(D\psi_2\circ\mathfrak{a}: D\psi_2)_t=\mathfrak{a}((D\psi_2:D\psi_1)_t)(D\psi_1\circ\mathfrak{a} :D\psi_1)_t((D\psi_2:D\psi_1)_t^*\underset{N}{_b\otimes_\alpha}1)\] \begin{proof} As $\psi_1$ and $\psi_2$ are lifted weights from $\nu$, we get that $(D\psi_1:D\psi_2)_t$ belongs to $A\cap b(N)'$ by (\cite{T}, 4.22(iii)); moreover, we have : \[(D\overline{\psi_{2\mathfrak{a}}}:D\underline{\psi_2})_t=(D\overline{\psi_{2\mathfrak{a}}}:D\overline{\psi_{1\mathfrak{a}}})_t(D\overline{\psi_{1\mathfrak{a}}}:D\underline{\psi_1})_t(D\underline{\psi_1}:D\underline{\psi_2})_t\] from which we get the result, using \ref{action}, \ref{defder} and \ref{corunderline}(ii). \end{proof} \subsection{Corollary} \label{corcocycle} {\it Let $\mathfrak{G}$ be a measured quantum groupoid, $(b,\mathfrak{a})$ a weighted action of $\mathfrak{G}$ on a von Neumann algebra $A$; then, are equivalent : \newline (i) there exists a normal semi-finite faithful weight on $A$, which is invariant and bears the density condition; \newline (ii) there exists a normal semi-finite faithful weight $\psi$ on $A$, lifted from $\nu^o$, and a $\sigma_t^\psi$-cocycle $u_t$ on $A\cap b(N)'$ such that $(D\psi\circ\mathfrak{a} :D\psi)_t=\mathfrak{a}(u_t^*)(u_t\underset{N}{_b\otimes_\alpha}1)$; \newline (iii) for any normal semi-finite faithful weight $\psi$ on $A$, lifted from $\nu^o$, there exists a $\sigma_t^\psi$-cocycle $u_t$ on $A\cap b(N)'$ such that $(D\psi\circ\mathfrak{a} :D\psi)_t=\mathfrak{a}(u_t^*)(u_t\underset{N}{_b\otimes_\alpha}1)$.} \begin{proof} Let suppose (i), and let $\varphi$ be an invariant weight on $A$, bearing the density condition; then, by \ref{thinv}(vi), the weight is lifted, and, if $\psi$ is any another lifted weight on $A$, $u_t=(D\varphi :D\psi)_t$ is a $\sigma_t^\psi$-cocycle in $A\cap b(N)'$ by (\cite{T}, 4.22(iii)); moreover, using \ref{propcocycle}, we get (iii). \newline Conversely, if we suppose (ii), there exists a normal semi-finite faithful weight $\varphi$ on $A$ such that $u_t=(D\varphi:D\psi)_t$; as $\psi$ is lifted, and $u_t$ belongs to $A\cap b(N)'$, we know, using (\cite{T}, 4.22(iii)), that $\varphi$ is lifted, too. Using now \ref{propcocycle}, we get that $(D\varphi\circ\mathfrak{a}:D\varphi)_t=1$, which, thanks to \ref{thinv2}, gives the result. \end{proof} \end{document}
\begin{document} \title{Bayesian Inference for Generalized Extreme Value Distributions via Hamiltonian Monte Carlo} \date{Dec 2014} \author{Marcelo Hartmann$^{\rm a}$ and Ricardo S. Ehlers$^{\rm a}$ \thanks{$^{\ast}$ Corresponding author. Email: [email protected]} \\ $^{\rm a}${\em Universidade de S\~ao Paulo, S\~ao Carlos, Brazil}} \maketitle \begin{abstract} In this paper we propose to evaluate and compare Markov chain Monte Carlo (MCMC) methods to estimate the parameters in a generalized extreme value model. We employed the Bayesian approach using traditional Metropolis-Hastings methods, Hamiltonian Monte Carlo (HMC) and Riemann manifold HMC (RMHMC) methods to obtain the approximations to the posterior marginal distributions of interest. Applications to real datasets of maxima illustrate illustrate how HMC can be much more efficient computationally than traditional MCMC and simulation studies are conducted to compare the algorithms in terms of how fast they get close enough to the stationary distribution so as to provide good estimates with a smaller number of iterations. \vskip .5cm Key words: Extreme value; Bayesian approach; Hamiltonian Monte Carlo; Markov chain Monte Carlo. \end{abstract} \section{Introduction} Extreme Value Theory (EVT) can be seen as a branch of probability theory which studies the stochastic behaviour of extremes associated to a set of random variables with a common probability distribution. In recent years, several statistical techniques capable of better quantifying the probability of occurence of rare events have grown in popularity, especially in areas such as Finance, Actuaries and Environmental sciences (see for example, \citeNP{colw94}, \citeNP{colt96}). For a good review of both theory and interesting applications of EVT the main reference is still \citeN{coles01}. Natural phenomena like river flows, wind speed and rain are subject to extreme values that can imply in great material and financial losses. Financial markets where large amounts of money invested can have an impact in the economy of a country need to have their risks of large losses and gains quantified. In risk analysis, estimating future losses by modelling events associated to default is of fundamental importance. In Insurance, the potencial risk of high value claims needs to be quantified and associated to possible catastrofic events due to the large amount of money involved in payments. The usual approach for the analysis of extreme data is based on the Generalized Extreme Value (GEV) distribution which distribution function is given by, \begin{equation} H(y|\mu,\sigma,\xi)= \exp\left\{-\left(1+\xi~\dfrac{y-\mu}{\sigma}\right)_{+}^{-1/\xi}\right\}, \end{equation} where $\mu$, $\sigma$ and $\xi$ are location, scale and shape parameters respectively. The $+$ sign denotes the positive part of the argument. We use the notation $Y\sim GEV(\mu,\sigma,\xi)$. The value of the shape parameter $\xi$ defines the tail behaviour of the distribution. If $\xi=0$ the distribution is defined for $y\in\mathbb{R}$ and is called a Gumbel distribution (exponentially decaying tail). If $\xi>0$ the distribution is defined for values $y > \mu-\sigma/\xi$, has a lower bound and is called a Fr\'echet distribution (slowly decaying tail). If $\xi<0$ the distribution is defined for values $y < \mu-\sigma/\xi$, has an upper bound and is called a negative Weibull distribution (upper bounded tail). The density function of the GEV distribution is given by, \begin{equation} h(y|\xi,\mu,\sigma)= \left\{\begin{array}{l} \dfrac{1}{\sigma}\left(1+\xi~\dfrac{y-\mu}{\sigma}\right)^{-1/\xi-1} \exp\left\{-\left(1+\xi~\dfrac{y-\mu}{\sigma}\right)^{-1/\xi}\right\}, ~\xi\ne 0\\\\ \dfrac{1}{\sigma} \exp\left\{-\left(\dfrac{y-\mu}{\sigma}\right)- \exp\left(-\dfrac{y-\mu}{\sigma}\right)\right\}, ~\xi=0. \end{array} \right. \end{equation} which is illustrated in Figure \ref{fig1} for $\mu=0$, $\sigma=1$ and $\xi\in\{1,0,-0.75\}$.\\ \begin{center} Figure \ref{fig1} about here.\\ \end{center} Now suppose that we have observed data $\hbox{\boldmath$y$}=(y_1,\dots,y_n)$ and assume that they are realizations from independent and identically distributed random variables $Y_1,\dots,Y_n$ with $Y_i\sim GEV(\mu,\sigma,\xi)$. We wish to make inferences about the unknown parameters $\mu$, $\sigma$ and $\xi$. The likelihood function is given by, \begin{equation} p(\hbox{\boldmath$y$}|\xi,\mu,\sigma)= \sigma^{-n} \prod_{i=1}^n\left[1+\xi~\dfrac{y_i-\mu}{\sigma}\right]^{-1/\xi-1} \exp\left\{-\sum_{i=1}^n\left(1+\xi~\dfrac{y_i-\mu}{\sigma}\right)^{-1/\xi}\right\} \end{equation} for $\mu-\sigma/\xi > y_{(n)}$ when $\xi<0$ and for $\mu-\sigma/\xi < y_{(1)}$ when $\xi>0$. Otherwise the likelihood function is undefined. A Bayesian analysis is then carried out by assigning prior distributions on $\mu$, $\sigma$ and $\xi$. Simulation methods, in particular Markov chain Monte Carlo (MCMC) methods, are now routinely employed to produce a sample of simulated values from the posterior distribution which can in turn be used to make inferences about the parameters. In GEV models, the random walk Metropolis algorithm is usually employed where a proposal distribution must be chosen and tuned, for which a poor choice will considerably delay convergence towards the posterior distribution. Our main motivation to investigate alternative algorithms is computational and we hope that our findings are useful for the applied user of this class of models. In the next section we describe an alternative algorithm to generate these posterior samples in a much more efficient way. This is compared with the traditional MCMC methods in Section \ref{sec:app} in terms of computational efficiency through a real dataset and a simulation study. In Section \ref{sec:ar} a time series ingredient is included in the model to analyse time series of extreme values. Some final comments are given in Section \ref{sec:conclusion}. \section{Hamiltonian Monte Carlo} Hamiltonian Monte Carlo (HMC) was originaly proposed by \shortciteN{duane} for simulating molecular dynamics under the name of Hybrid Monte Carlo. In what follows we present the HMC method in a compact form which will be used in the context of GEV models. The reader is referred to \citeN{nea2011} for an up to date review of theoretical and practical aspects of Hamiltonian Monte Carlo methods. Let $\hbox{\boldmath$\theta$}\in\mathbb{R}^d$ denote a $d$-dimensional vector of parameters, $\pi(\hbox{\boldmath$\theta$})$ denote the posterior density of $\hbox{\boldmath$\theta$}$ and $\hbox{\boldmath$p$}\in\mathbb{R}^d$ denote a vector of auxiliary parameters independent of $\hbox{\boldmath$\theta$}$ and distributed as $\hbox{\boldmath$p$}\sim N(\hbox{\boldmath$0$},\hbox{\boldmath$M$})$. If $\hbox{\boldmath$\theta$}$ is interpreted as the position of a particle and $-\log\pi(\hbox{\boldmath$\theta$})$ describes its potential energy while $\hbox{\boldmath$p$}$ is the momentum with kinetic energy $\hbox{\boldmath$p$}'\hbox{\boldmath$M$}^{-1}\hbox{\boldmath$p$}/2$ then the total energy of a closed system is the Hamiltonian function, $$ H(\hbox{\boldmath$\theta$},\hbox{\boldmath$p$})=-\mathcal{L}(\hbox{\boldmath$\theta$})+\hbox{\boldmath$p$}'\hbox{\boldmath$M$}^{-1}\hbox{\boldmath$p$}/2. $$ where $\mathcal{L}(\hbox{\boldmath$\theta$})=\log\pi(\hbox{\boldmath$\theta$})$.\\ \noindent The (unormalized) joint density of $(\hbox{\boldmath$\theta$},\hbox{\boldmath$p$})$ is then given by, \begin{eqnarray*} f(\hbox{\boldmath$\theta$},\hbox{\boldmath$p$}) \propto \pi(\hbox{\boldmath$\theta$}) \exp(-\hbox{\boldmath$p$}'\hbox{\boldmath$M$}^{-1}\hbox{\boldmath$p$}/2) \propto \exp[-H(\hbox{\boldmath$\theta$},\hbox{\boldmath$p$})]. \end{eqnarray*} For continuous time $t$, the deterministic evolution of a particle that keeps the total energy constant is given by the Hamiltonian dynamics equations, \begin{eqnarray*} \frac{\partial\hbox{\boldmath$\theta$}}{\partial t} &=& \frac{\partial H(\hbox{\boldmath$\theta$},\hbox{\boldmath$p$})}{\partial\hbox{\boldmath$p$}}=\hbox{\boldmath$M$}^{-1}\hbox{\boldmath$p$}\\ \frac{\partial\hbox{\boldmath$p$}}{\partial t} &=& -\frac{\partial H(\hbox{\boldmath$\theta$},\hbox{\boldmath$p$})}{\partial\hbox{\boldmath$\theta$}}=\nabla_{{\scriptsize \hbox{\boldmath$\theta$}}}\mathcal{L}(\hbox{\boldmath$\theta$}). \end{eqnarray*} where $\nabla_{{\scriptsize \hbox{\boldmath$\theta$}}}\mathcal{L}(\hbox{\boldmath$\theta$})$ is the gradient of $\mathcal{L}(\hbox{\boldmath$\theta$})$ with respect to $\hbox{\boldmath$\theta$}$. So, the idea is that introducing the auxiliary variables $\hbox{\boldmath$p$}$ and using the gradients will lead to a more efficient exploration of the parameter space.\\ \noindent However these differential equations cannot be solved analytically and numerical methods are required. One such method is the St\"ormer-Verlet (or Leapfrog) numerical integrator (\citeNP{leimr04}) which discretizes the Hamiltonian dynamics as the following steps, \begin{eqnarray*} \hbox{\boldmath$p$}^{(\tau+\epsilon/2)} &=& \hbox{\boldmath$p$}^{(\tau)} + \frac{\epsilon}{2}\nabla_{{\scriptsize \hbox{\boldmath$\theta$}}}\mathcal{L}(\hbox{\boldmath$\theta$}^{(\tau)})\\ \hbox{\boldmath$\theta$}^{(\tau+\epsilon)} &=& \hbox{\boldmath$\theta$}^{(\tau)} + \epsilon\hbox{\boldmath$M$}^{-1}\hbox{\boldmath$p$}^{(\tau+\epsilon/2)}\\ \hbox{\boldmath$p$}^{(\tau+\epsilon)} &=& \hbox{\boldmath$p$}^{(\tau+\epsilon/2)} + \frac{\epsilon}{2}\nabla_{{\scriptsize \hbox{\boldmath$\theta$}}}\mathcal{L}(\hbox{\boldmath$\theta$}^{(\tau+\epsilon)}) \end{eqnarray*} for some user specified small step-size $\epsilon>0$. After a given number of time steps this results in a proposal $(\hbox{\boldmath$\theta$}^*,\hbox{\boldmath$p$}^*)$. In Appendix \ref{appendix} we provide details on the required expressions of partial derivatives for HMC. A Metropolis acceptance probability must then be employed to correct the error introduced by this discretization and ensure convergence to the invariant distribution. Since the joint distribution of $(\hbox{\boldmath$\theta$},\hbox{\boldmath$p$})$ is our target distribution, the transition to a new proposed value $(\hbox{\boldmath$\theta$}^*,\hbox{\boldmath$p$}^*)$ is accepted with probability, \begin{eqnarray*} \alpha[(\hbox{\boldmath$\theta$},\hbox{\boldmath$p$}),(\hbox{\boldmath$\theta$}^*,\hbox{\boldmath$p$}^*)] &=& \min\left[\frac{f(\hbox{\boldmath$\theta$}^*,\hbox{\boldmath$p$}^*)}{f(\hbox{\boldmath$\theta$},\hbox{\boldmath$p$})},1\right]\\\\ &=& \min\left[\exp[H(\hbox{\boldmath$\theta$},\hbox{\boldmath$p$}) - H(\hbox{\boldmath$\theta$}^*,\hbox{\boldmath$p$}^*)],1\right]. \end{eqnarray*} In the distribution of the auxiliary parameters, $\hbox{\boldmath$M$}$ is a symmetric positive definite mass matrix which is typically diagonal with constant elements, i.e.\linebreak $\hbox{\boldmath$M$}=m\hbox{\boldmath$I$}_d$. The HMC algorithm in its simplest form taking $m=1$ is given by, \begin{enumerate} \item Give an initial position $\hbox{\boldmath$\theta$}^{(0)}$ and set $i=1$, \item\label{step2} draw $\hbox{\boldmath$p$}^*\sim N_d(\hbox{\boldmath$0$},\hbox{\boldmath$I$}_d)$ and $u\sim U(0,1)$, \item set $(\hbox{\boldmath$\theta$}^{(I)},\hbox{\boldmath$p$}^{(I)})=(\hbox{\boldmath$\theta$}^{(i-1)},\hbox{\boldmath$p$}^{*})$ and $H_0=H(\hbox{\boldmath$\theta$}^{(I)},\hbox{\boldmath$p$}^{(I)})$, \item repeat the St\"ormer-Verlag solution $L$ times,\vskip .2cm \begin{itemize} \item $\hbox{\boldmath$p$}^* = \hbox{\boldmath$p$}^* + \frac{\epsilon}{2}\nabla_{{\scriptsize \hbox{\boldmath$\theta$}}}\mathcal{L}(\hbox{\boldmath$\theta$}^{(i-1)})$ \item $\hbox{\boldmath$\theta$}^{(i-1)} = \hbox{\boldmath$\theta$}^{(i-1)} + \epsilon\hbox{\boldmath$p$}^{*}$ \item $\hbox{\boldmath$p$}^* = \hbox{\boldmath$p$}^* + \frac{\epsilon}{2}\nabla_{{\scriptsize \hbox{\boldmath$\theta$}}}\mathcal{L}(\hbox{\boldmath$\theta$}^{(i-1)})$ \end{itemize} \vskip .2cm \item set $(\hbox{\boldmath$\theta$}^{(L)},\hbox{\boldmath$p$}^{(L)})=(\hbox{\boldmath$\theta$}^{(i-1)},\hbox{\boldmath$p$}^{*})$ and $H_1=H(\hbox{\boldmath$\theta$}^{(L)},\hbox{\boldmath$p$}^{(L)})$, \item compute $\alpha[(\hbox{\boldmath$\theta$}^{(I)},\hbox{\boldmath$p$}^{(I)}),(\hbox{\boldmath$\theta$}^{(L)},\hbox{\boldmath$p$}^{(L)})]$ = $\min[\exp(H_0-H_1),1]$, \item set $\hbox{\boldmath$\theta$}^{(i)}=\hbox{\boldmath$\theta$}^{(L)}$ if $\alpha[(\hbox{\boldmath$\theta$}^{(I)},\hbox{\boldmath$p$}^{(I)}),(\hbox{\boldmath$\theta$}^{(L)},\hbox{\boldmath$p$}^{(L)})] > u$ and $\hbox{\boldmath$\theta$}^{(i)}=\hbox{\boldmath$\theta$}^{(I)}$ otherwise. \item set $i=i+1$ and return to step \ref{step2} until convergence. \end{enumerate} \noindent Since the algorithm is making use of first derivatives of the (unormalized) log-posterior densities it tends to propose moves to regions of higher probabilities and the chains are expected to reach stationarity faster. Also, in order to employ this algorithm all sampling must be done on an unconstrained space, so we need to implement a transformation of $\hbox{\boldmath$\theta$}$ to the real line. Then prior distributions are assigned and derivatives are taken for the transformed parameters. \subsection{Riemann Manifold Hamiltonian Monte Carlo} \citeN{giro11} developed a modification in the proposal mechanism in which the moves are according to a Riemann metric instead of the standard Euclidean distance. This procedure explores geometric properties of the posterior distribution and is referred to as Riemann manifold HMC or RMHMC. The idea is to redefine the Hamiltonian function as, $$ H(\hbox{\boldmath$\theta$},\hbox{\boldmath$p$})= -\mathcal{L}(\hbox{\boldmath$\theta$})+\frac{1}{2}\log|\hbox{\boldmath$G$}(\hbox{\boldmath$\theta$})| + \frac{1}{2}\hbox{\boldmath$p$}'\hbox{\boldmath$G$}(\hbox{\boldmath$\theta$})^{-1}\hbox{\boldmath$p$}. $$ where the position dependent matrix $\hbox{\boldmath$G$}(\hbox{\boldmath$\theta$})$ adapts to the local geometry of the posterior distribution (see also \shortciteNP{wang-etal}). In this paper we adopt the form proposed in \citeN{giro11} where, \begin{eqnarray*} \hbox{\boldmath$G$}(\hbox{\boldmath$\theta$}) = -E\left(\frac{d^2\mathcal{L}(\hbox{\boldmath$\theta$})}{d\hbox{\boldmath$\theta$}^\top\hbox{\boldmath$\theta$}}\right)= -E\left(\frac{d^2 \log f(\hbox{\boldmath$y$}|\hbox{\boldmath$\theta$})}{d\hbox{\boldmath$\theta$}^\top\hbox{\boldmath$\theta$}}\right) -\frac{d^2\log f(\hbox{\boldmath$\theta$})}{d\hbox{\boldmath$\theta$}^\top\hbox{\boldmath$\theta$}} \end{eqnarray*} i.e. the expected Fisher information matrix plus the negative Hessian of the log-prior. The Hamiltonian dynamics becomes, \begin{eqnarray*} \frac{\partial\hbox{\boldmath$\theta$}}{\partial t} &=& \frac{\partial H(\hbox{\boldmath$\theta$},\hbox{\boldmath$p$})}{\partial\hbox{\boldmath$p$}}=\hbox{\boldmath$G$}(\hbox{\boldmath$\theta$})^{-1}\hbox{\boldmath$p$}\\ \frac{\partial p_i}{\partial t} &=& -\frac{\partial H(\hbox{\boldmath$\theta$},\hbox{\boldmath$p$})}{\partial\theta_i}=\nabla_{{\scriptsize \hbox{\boldmath$\theta$}}_i}\mathcal{L}(\hbox{\boldmath$\theta$}) -\frac{1}{2} tr\left[\hbox{\boldmath$G$}(\hbox{\boldmath$\theta$})^{-1}\frac{\partial\hbox{\boldmath$G$}(\hbox{\boldmath$\theta$})}{\partial\theta_i}\right] +\frac{1}{2}\hbox{\boldmath$p$}'\hbox{\boldmath$G$}(\hbox{\boldmath$\theta$})^{-1}\frac{\partial\hbox{\boldmath$G$}(\hbox{\boldmath$\theta$})}{\partial\theta_i}\hbox{\boldmath$p$}. \end{eqnarray*} and in order to simulate values in discrete time we adopt the generalized St\"ormer-Verlet solution (\citeNP{leimr04}). Expressions for the expected Fisher information matrix and the Hessian of the log-prior are provided in Appendix \ref{appendix}. \section{Applications}\label{sec:app} \subsection{Annual Maximum Sea Levels} This example is taken from \citeN{coles04} page 59 and refers to the annual maximum sea levels (in metres) from 1923 to 1987 at Port Pirie, South Australia (see Figure \ref{fig:data}). The objective is to fit a generalized extreme value distribution to this data. The prior distribution adopted is a trivariate normal on $(\mu,\log(\sigma),\xi)$ with mean vector zero and diagonal variance covariance matrix (i.e. assuming prior independence) with prior variances equal to 25. The complete conditional distributions are not of any standard form and Metropolis steps are used to yield the required realizations from the posterior distribution. \begin{center} Figure \ref{fig:data} about here. \end{center} For comparison purposes we also used the {\tt R} package {\tt evdbayes} (\citeNP{stepr06}) which is freely available from the website {\tt http://cran.r-project.org/web/packages/evdbayes} and provides functions for the Bayesian analysis of extreme value models using MCMC methods. This package uses the Metropolis-Hastings algorithm. Figure \ref{fig:mh1} shows the trace plots of the sampled values of $\mu$, $\sigma$ and $\xi$ using the {\tt evdbayes} package with $6000$ simulations discarding the first $1000$ as burn-in. We note that even after discarding the first $1000$ iterations the chains are far from convergence and sample autocorrelations are still high. \begin{center} Figure \ref{fig:mh1} about here. \end{center} The HMC algoritm was implemented in {\tt R}. After some pilot tunning the parameter $\epsilon$ was taken as 0.12 and the St\"ormer-Verlet solution was replicated 27 times. The results appear in Figure \ref{fig:hmc} which shows the trace plots of sampled values of $\mu$, $\sigma$ and $\xi$ using HMC. We note that the HMC algorithm had an acceptance rate around 0.95 and reachs a stationary regime much faster than the Metropolis-Hastings. Besides, there is practically no autocorrelation in the output chains. In order to compare the relative efficiency of these methods we calculate the effective sample size (ESS) using the posterior samples for each parameter. This measure is defined as $ESS=N/(1+2\sum_k\gamma(k)$ where $N$ is the number of posterior samples and $\gamma(k)$ are the monotone lag $k$ sample autocorrelations (\citeNP{gey92}). It can thus be interpreted as the number of effectively independent samples. For a fair comparison, first we discarded another 1500 iterations from the samples generated by MH and HMC algorithms. The ESS is easily obtained from any MCMC output using the functionality from the {\tt R} package {\tt coda} (\shortciteNP{plummer-etal06}) which provides tools for output analysis and diagnostics. Table \ref{tab1} shows the effective samples sizes for the parameters using both algorithms based on the last 3500 iterations from which we can see a much lower degree of autocorrelation in the HMC output. \begin{center} Table \ref{tab1} about here. \end{center} \subsection{A Simulation Study} In order to evaluate and compare the performances of HMC and MH algorithms two simulation studies were conducted for parameter estimation in a GEV model. In both studies we generated $m=1000$ replications of $n=15,30,50,100$ observations from a GEV model with parameters $\mu=2$, $\sigma=0.5$ and $\xi=-0.1$. Location and scale parameters are usually not too difficult to estimate but according to \citeN{coles04} the value $\xi=-0.1$ is not common in practice as it leads to distributions with too heavy tails. This makes the inferences for this parameter more problematic. Let $\hat{\theta}^{(i)}$ the estimate of a parameter $\theta$ for the $i$-th replication, $i=1,\dots,m$. To evaluate the estimation method, two criteria were considered: the bias and the mean square error (mse), which are defined as, \begin{eqnarray} bias &=& \left\{\frac{1}{m} \sum_{i=1}^m \hat{\theta}^{(i)}\right\} - \theta,\label{eq1}\\ mse &=& \frac{1}{m} \sum_{i=1}^m \left \{ \hat{\theta}^{(i)} - \theta \right\}^2.\label{eq2} \end{eqnarray} For each replication and each sample size a GEV model was fitted using the HMC and Metropolis algorithm (using {\tt evdbayes} package) based on 20000 iterations discarding 10000 as burn-in. In this study the posterior modes were taken as parameter point estimates in (\ref{eq1}) and (\ref{eq2}) since the marginal posterior distributions are skewed. The results in terms of bias and mean square errors for each parameter appear in Table \ref{ressim}. Overall, both measures are pretty small for both algorithms although they tend to be slightly smaller for the HMC. This was expected since after the 10000 iterations discarded the Metropolis algorithm is as close to the invariant distribution as the HMC algorithm. In a second experiment, we generated only 1100 samples from the posterior distribution discarding the first 100 as burn-in. The main objetive here is to see whether the HMC algorithm tends to get close enough to the stationary distribution so as to provide good estimates with such a small number of iterations. The results are shown in Table \ref{ressim1} from which we can see that both bias and mean square error are still relatively small for the HMC algorithm while the Metropolis algorithm appears to be definetely far from the stationary distribution. Therefore, the advantage of adopting the HMC algorithm instead of Metropolis seems clear at least in terms of speed of convergence. This comes at a price of obtaining and evaluating first derivatives which are really easy to obtain and code as shown in Appendix \ref{appendix}. Finally, the computational times for each iteration were not too large in this application after some pilot tunning for the step-size. Of course each iteration of HMC takes more time than in the Metropolis algorithm but this is more than compensated by the faster convergence (we need many less iterations). \begin{center} Table \ref{ressim} about here. \end{center} \begin{center} Table \ref{ressim1} about here. \end{center} \section{Modelling Time Dependence}\label{sec:ar} In this section we extend the GEV model by allowing the location parameter to vary across observations through an autoregressive process of order $p$ (AR($p$)). The model is given by, $$ Y_t = \mu + \sum_{j=1}^p\theta_j Y_{t-j} + e_t, ~t=1,\dots,n $$ where $e_t$ are independent identically distributed random errors distributed as $e_t\sim GEV(0,\sigma,\xi)$. Assuming second order stationarity and restricting $\xi\in (-0.5,0.5)$ it follows that, \begin{eqnarray} E[Y_t]=\mu_{y_t} &=& \dfrac{\mu_{e_t} + \mu}{1-\sum_{j=1}^p \theta_j}, \forall t \nonumber \\ E[e_t]=\mu_{e_t} &=& - \dfrac{\sigma}{\xi} + \dfrac{\sigma}{\xi}\Gamma(1-\xi), \nonumber \\ Var[e_t]=\sigma^2_{e_t} &=& \dfrac{\sigma^{2}}{\xi^{2}}\left[\Gamma(1-2\xi)-\Gamma^{2}(1-\xi)\right]. \end{eqnarray} The likelihood function is given by, \begin{equation}\label{lik} l(\mu,\hbox{\boldmath$\theta$},\sigma,\xi)= \prod_{t=p+1}^n f(y_t|D_{t-1},\mu,\hbox{\boldmath$\theta$},\sigma,\xi)I_{\Omega_t}(y_t), \end{equation} where $D_{t-1}=(y_{t-1},\dots,y_{t-p})$ and $\hbox{\boldmath$\theta$}=(\theta_1,\dots,\theta_p)$. Denoting $\mu_t = \mu + \sum_{j=1}^p\theta_j Y_{t-j}$ then $\Omega_t=\{y_t:1+\xi(y_t-\mu_t)/\sigma > 0\}$ and $Y_t|\hbox{\boldmath$y$}_{-p},\mu,\hbox{\boldmath$\theta$},\sigma,\xi\sim GEV(\mu_t,\sigma,\xi)$. \vskip .5cm Prior distributions are then assigned to the parameters $\hbox{\boldmath$\theta$}$, $\mu$, $\sigma$ and $\xi$. These are assumed to be a priori independent with relatively vague prior distributions defined in the original parameter space, except for $\xi$ which is constrained to the interval $(-0.5,0.5)$ so that both the mean and the variance of the autoregressive process exist. In what follows, we adopt the prior specifications $\theta_j\sim N(0,25)$, $j=1,\dots,p$, $\mu\sim N(0,25)$, $\sigma\sim IG(0.1,0.001)$ and $\xi\sim U(-0.5,0.5)$. \vskip .5cm \subsection{A Simulation Study for GEV-AR Models} In this simulation study, the main objective is to investigate the behaviour of the HMC and RMHMC algorithms in terms of speed to reach the stationary distribution. Therefore, in this experiment we performed only 600 MCMC iterations discarding the first 100 as burn-in. We generated $m=1000$ replications of $n=60,150,300$ time series observations from GEV-AR($p$) models with $p=1,2,3$. The artificial time series were simulated from the following stationary models, \begin{eqnarray*} M_1: Y_t &=& -1 + 0.80Y_{t-1}+ e_t\\ M_2: Y_t &=& -1 + 0.90Y_{t-1}- 0.80Y_{t-2} + e_t\\ M_3: Y_t &=& -1 - 1.56Y_{t-1}- 0.55Y_{t-2} + 0.04Y_{t-3} + e_t \end{eqnarray*} where the error terms $e_t$ are independent and identicaly distributed as\linebreak $e_t\sim GEV(0,\sigma=1, \xi=0.3)$, $t=1,\dots,n$. For the HMC algorithm we set $\epsilon=0.006$ and repeated the St\"ormer-Verlet solution 13 times. For the RMHMC, we used a fixed metric given by the model information matrix evaluated at the MAP estimate. For the $AR$-$GEV(1)$ and $AR$-$GEV(2)$ models the elements $E[Y_t^2]$ and $E[Y_t Y_{t+1}]$ are determined in closed form for all $t$. For the $AR$-$GEV(3)$ model we used the approximation $E[Y_t Y_{t+i}] \approx \mu_{Y_t}^2 + \widehat{C}(Y_t, Y_{t+i})$, $i=0,1,2$, where $\widehat{C}$ is the sample covariance matrix. We set $\epsilon = 0.15$ and repeated the St\"ormer-Verlet solution 13 times. The simulation results are reported in Table \ref{ressim2} as bias and mean square errors as defined in expressions (\ref{eq1}) and (\ref{eq2}). For models of orders 1 and 2 and the three sample sizes considered the performances in terms of bias are barely similar but these are in general smaller for the RMHMC algorithm. This is also true for the model of order 3 and sample sizes 60 and 150, but for samples of size 300 the HMC algorithm underestimates $\mu$ and $\sigma$ more severely and, except for $\theta_1$, the biases are smaller for the RMHMC algorithm. When we look at the mean square errors, the comparison is in general more favorable to the RMHMC specially for larger sample sizes. In particular, for the $AR$-$GEV(3)$ model the mean square error tends to decrease (sometimes dramatically) for all sample sizes. At this point, an explanation for the large values of mse for $\mu$ and $\sigma$ in the $AR$-$GEV(3)$ model is in order. Recall that we comparing the performances of the two algorithms based on relatively few MCMC iterations. So, for samples of size 300 the initial values where probably far from regions of higher posterior probabilities and the HMC would require more iterations while for the RMHMC these initial values were much less influencial. All in all, we consider that this simulation study provides empirical evidence of a better performance of the RMHMC algorithm and we would recommend this approach to the applied user dealing with time series of extreme values. \begin{center} Table \ref{ressim2} about here. \end{center} \subsection{A Real Data Application} In this application, each observation represents the maximum annual level of Lake Michigan, which is obtained as the highest mean monthly level, 1860 to 1955 ($T = 96$ observations). The time series data can be obtained from the Time Series Data Library repository at {\tt https://datamarket.com/data/set/22p3/} \nocite{hyndman} Based on the autocorrelation and partial autocorrelation functions of the data we propose a $AR$-$GEV(1)$ model for this dataset. To assess the quality of predictions, we removed the last three observations from estimation. The predictions are then compared with the actual data. The RMHMC algorithm was applied with a fixed metric evaluated at the MAP estimate to simulate values from the posterior distribution of $(\mu,\theta,\sigma,\xi)$. After a short pilot tunning a step-size $\epsilon=0.06$ was taken and the St\"ormer-Verlet solution was repeated 11 times at each iteration. A total of 21000 values were simulated discarding the first 1000 as burn-in. Table \ref{tableAR1} shows the approximations for the marginal posterior mean, standard deviation, mode, median and credible interval for the model parameters. From Table \ref{tableAR1} we note that the estimated model is stationary with high probability and the point estimate of $\xi$ is about $-0.25$ with a small standard deviation thus characterizing a distribution with moderate asymetry. Convergence of the Markov chains was assessed by visual inspection of trace and autocorrelation plots (not shown) and all indicated that the chains reached stationarity relatively fast with low autocorrelations. In the Bayesian approach, given $\hbox{\boldmath$y$}=(y_1,\dots,y_T)$, the $j$-steps ahead predictions are obtained from the predictive density of $Y_{T+j}$ which is given by, \begin{eqnarray*} \pi(y_{_{T+j}}|\hbox{\boldmath$y$}) &=& \int_{\Theta}f(y_{_{T+j}}|\mu+\theta y_{_{T+j-1}},\sigma,\xi) \pi(\mu, \theta, \sigma, \xi|\hbox{\boldmath$y$}) d(\mu, \theta, \sigma, \xi) \nonumber\\ &=& E_{\mu, \theta, \sigma,\xi|D}[f(y_{_{T+j}}|\mu + \theta y_{_{T+j-1}}, \sigma, \xi)]. \end{eqnarray*} Here we propose to compute a point prediction $\hat{y}_{_{T+j}}$ of $Y_{T+j}$ as a Monte Carlo approximation of the predictive expectation, $E[y_{_{T+j}}|\hbox{\boldmath$y$}] = E[E[y_{_{T+j}}|\mu,\theta,\sigma,\xi,\hbox{\boldmath$y$}]]$. So, given a sample of $N$ simulated parameter values we sample values $y^{(i)}_{_{T+j}}$ given $\mu^{(i)},\theta^{(i)},y^{(i)}_{_{T+j-1}},\sigma^{(i)},\xi^{(i)}$, $i=1,\dots,N$ which allow us to use the following approximation, \begin{equation*} \hat{y}_{_{T+j}}\approx \dfrac{1}{N}\sum\limits_{i=1}^{N} y^{(i)}_{_{T+j}} \end{equation*} for $j=1,2,3$. In Figure \ref{pAR1} we can see how the predictions behave relative to the actual values. All observed values are within the credible intervals of the predictive distributions which tend to follow the time series. \section{Conclusions}\label{sec:conclusion} In this paper we evaluated Bayesian MCMC methods to estimate the parameters in a generalized extreme value model both for independent and time series data. We employed the Bayesian approach using both traditional MCMC (Metropolis-Hastings) methods and (Riemann manifold) Hamiltonian Monte Carlo methods to obtain the approximations to the posterior marginal distributions of interest. Applications to real datasets of maxima illustrated how (RM)HMC can be much more efficient computationally than traditional MCMC. In a simulation study for independent data we noticed that parameter estimation is relatively robust to the choice of algorithm for a large number of iterations and discarding a lot of initial values as burn-in although bias and mean square error tend to be slightly smaller for HMC. However, HMC was much faster to reach the stationary distribution and this was observed by repeating the simulations with a small number of iterations. Another simulation study for time series data has shown that RMHMC is to be recommended for the applied user. As in any simulation study, our results are limited to our particular selection of sample sizes, prior distributions and GEV parameters. In particular, the choice $\xi=-0.1$ in Section 3.2 was intended to compare the algorithms in a more difficult scenario in terms of estimation (\citeNP{coles04}). We hope that our findings are useful to the practitioners. \section{Appendix}\label{appendix} In this appendix we present the expressions of gradients needed for the implementation of HMC and RMHMC in the GEV model. In what follows, let $z_t = 1+\xi(y_t-\mu)/\sigma$. Denoting $\hbox{\boldmath$\theta$}=(\mu,\sigma,\xi)$ and $L_{y|\theta}=\log f(\hbox{\boldmath$y$}|\hbox{\boldmath$\theta$})$ then, \begin{equation*} L_{y|\theta}= -n\log\sigma -\left(\frac{1}{\xi}+1\right)\sum_{t=1}^n\log\left[1+\xi~\dfrac{y_t-\mu}{\sigma}\right] -\sum_{i=1}^n\left(1+\xi~\dfrac{y_t-\mu}{\sigma}\right)^{-1/\xi}. \end{equation*} \noindent The partial derivatives of this log-density with respect to the transformed parameters $(\mu,\log(\sigma),\xi)$ are given by, \begin{eqnarray*} \frac{dL_{y|\theta}}{d\mu} &=& \frac{1}{\sigma}\left[(1+\xi)\sum_{t=1}^n z_t^{-1} - \sum_{t=1}^n z_t^{-1/\xi-1}\right] \\ \frac{dL_{y|\theta}}{d\delta} &=& -n+(1+\xi)\sum_{t=1}^n \frac{y_t-\mu}{\sigma}z_t^{-1} -\sum_{t=1}^n \frac{y_t-\mu}{\sigma}z_t^{-1/\xi-1}\\ \frac{dL_{y|\theta}}{d\xi} &=& \sum_{t=1}^n\frac{\log z_t}{\xi^2}- \left(\frac{1}{\xi}+1\right)\left(\frac{y_t-\mu}{\sigma}\right)z_t^{-1}+ \frac{1}{\xi}\left(\frac{y_t-\mu}{\sigma}\right)z_t^{-1/\xi-1}- \frac{\log z_t}{\xi^2}z_t^{-1/\xi}. \end{eqnarray*} \noindent Now letting $L_{\theta}=\log\pi(\hbox{\boldmath$\theta$})$ and since the (transformed) parameters are assumed a priori independent and normally distributed with mean zero then, $$ \frac{dL_{\theta}}{d\mu}= -\frac{\mu}{\tau^2_{\mu}}, \quad \frac{dL_{\theta}}{d\delta}=-\frac{\log\sigma}{\tau^2_{\sigma}}, \quad \frac{dL_{\theta}}{d\xi}=-\frac{\xi}{\tau^2_{\xi}}. $$ where $\tau^2_{\mu}$, $\tau^2_{\sigma}$ and $\tau^2_{\xi}$ are the prior variances. \vskip .5cm For the GEV-AR model we denote $\hbox{\boldmath$\theta$}=(\mu,\theta_1,\dots,\theta_p,\sigma,\xi)$ and the gradient vector for the logarithm of the likelihood function (\ref{lik}), is a $(p+3)\times 1$ vector which elements are, \begin{eqnarray*} \frac{\partial L_{y|\theta}}{\partial\mu} &=& \sum_{t=p+1}^T\frac{1}{\sigma}z_{t}^{-1}\left( (1 + \xi) - z_{t}^{-1/\xi} \right) \\ \frac{\partial L_{y|\theta}}{\partial\theta_i} &=& \sum_{t=p+1}^T\frac{1}{\sigma}z_{t}^{-1}\left( (1 + \xi) - z_{t}^{-1/\xi} \right)y_{t-i}, ~i=1,\dots,p\\ \frac{\partial L_{y|\theta}}{\partial\sigma} &=& \sum_{t=p+1}^T (1+\xi) \left( \frac{y_t-\mu_t}{\sigma^2} \right) z_{t}^{-1} - \dfrac{1}{\sigma} - z_{t}^{-(1/\xi + 1)} \left( \dfrac{y_t-\mu_t}{\sigma^2} \right)\\ \frac{\partial L_{y|\theta}}{\partial\xi} &=& \sum_{t=p+1}^T \dfrac{\log z_{t}}{\xi^{2}}-\left(\dfrac{1}{\xi} + 1 \right) \left( \dfrac{y_{t} - \mu_t}{\sigma}\right) z_{t}^{-1} + \dfrac{1}{\xi} \left( \dfrac{y_{t}-\mu_t}{\sigma} \right) z_{t}^{-(1/\xi + 1)} - \dfrac{\log z_{t}}{\xi^2}z_{t}^{-1/\xi}. \end{eqnarray*} \noindent To obtain the Fisher information matrix we use the fact that $E[g(Y_t)] = E[E[g(Y_t)|D_{t-1}]], ~\forall t$. The nonzero elements are given by, \begin{eqnarray*} -E\left(\dfrac{\partial^2\ell}{\partial \mu^2} \right) &=& -E\left[E\left( \dfrac{\partial^2\ell}{\partial \mu_t^2}\middle| D_{t-1} \right) \right] = (T-p)\dfrac{A}{\sigma^{2}} \\ -E \left( \dfrac{\partial^2 \ell}{\partial \mu \partial \theta_j} \right) &=& (T-p)\dfrac{A}{\sigma^{2}} E[Y_{t-j}] = \mu_{Y_t}(T-p)\dfrac{A}{\sigma^{2}}\\ -E \left(\dfrac{\partial^2 \ell}{\partial \mu \partial \sigma} \right) &=& - E \left[ E\left( \dfrac{\partial^2 \ell}{\partial \sigma \partial \mu_t} \middle| D_{t-1} \right) \right] = -(T-p) \dfrac{1}{\sigma^{2}\xi}[A - \Gamma(2+\xi)] \\ -E \left( \dfrac{\partial^2 \ell}{\partial \mu \partial \xi} \right) &=& - E \left[ E\left( \dfrac{\partial^2 \ell}{\partial \xi \partial \mu_t} \middle| D_{t-1} \right) \right] = - (T-p)\dfrac{1}{\sigma\xi}\left(B - \dfrac{A}{\xi} \right)\\ -E \left( \dfrac{\partial^2 \ell}{\partial \theta_i \partial \theta_j} \right) &=& - E \left[ E\left( \dfrac{\partial^2\ell}{\partial \mu_t^2} Y_{t-i} Y_{t-j} \middle| D_{t-1}\right)\right] = (T-p)\dfrac{A}{\sigma^{2}} E[Y_{t-i}Y_{t-j}] \\ -E \left( \dfrac{\partial^2 \ell}{\partial \sigma \partial \theta_j} \right) &=& - E \left[ E\left( \dfrac{\partial^2\ell}{\partial \sigma \partial \mu_t} Y_{t-j} \middle| D_{t-1} \right) \right]\\ &=& -(T-p) \dfrac{1}{\sigma^{2}\xi}[A - \Gamma(2+\xi)] E[Y_{t-j}]\\ &=& -(T-p) \dfrac{1}{\sigma^{2}\xi}[A - \Gamma(2+\xi)] \mu_{Y_t}\\ -E \left( \dfrac{\partial^2 \ell}{\partial \xi \partial \theta_j} \right) &=& - E \left[ E\left( \dfrac{\partial^2\ell}{\partial \xi \partial \mu_t} Y_{t-j} \middle| D_{t-1} \right) \right]\\ &=& -(T-p) \dfrac{1}{\sigma\xi}\left(B - \dfrac{A}{\xi} \right) E[Y_{t-j}] \\ &=& -(T-p) \dfrac{1}{\sigma\xi}\left(B - \dfrac{A}{\xi} \right)\mu_{Y_t}\\ -E\left( \dfrac{\partial^2 \ell}{\partial \xi \partial \sigma} \right) &=& - (T-p) \dfrac{1}{\sigma\xi^{2}}\left[1 - \gamma + \dfrac{1 - \Gamma(2+\xi)}{\xi} - B + \dfrac{A}{\xi} \right] \end{eqnarray*} where $A = (1+\xi)^{2}\Gamma(1+2\xi)$, $B = \Gamma(2+\xi)[\psi(1+\xi) + (1+\xi)\xi^{-1}]$, $\Gamma(\cdot)$ is the gamma function, $\psi(\cdot)$ is the digamma function and $\gamma$ is the Euler's constant ($\cong 0.577215$). \setcounter{section}{0} \begin{table}[ht] \begin{center} \caption{Effective sample sizes (ESS) for each parameter using Metropolis-Hastings (MH) and Hamiltonian Monte Carlo (HMC) algorithms.} \label{tab1} \begin{tabular}{rrrr} \hline & $\mu$ & $\sigma$ & $\xi$ \\ \hline MH & 238.94 & 325.45 & 279.86 \\ HMC & 994.11 & 2613.72 & 3427.73 \\ \hline \end{tabular} \end{center} \end{table} \renewcommand{1.5}{1.5} \begin{table}\centering \caption{Bias and mean squared error, based 1000 replications, for each parameter of the GEV distribution using Metropolis-Hastings (MH) and Hamiltonian Monte Carlo (HMC) algorithms. 20000 iterations discarding 10000 as burn-in.} \label{ressim} \vskip .5cm \begin{tabular}{cccccc} \hline & & \multicolumn{2}{c}{HMC} & \multicolumn{2}{c}{MH}\\ \cline{3-6} $n$ & & bias & MSE & bias & MSE \\ \hline 15 & $\mu$ & -0.0008 & 0.0255 & -0.0028 & 0.0250 \\ & $\sigma$ & -0.0119 & 0.0135 & -0.0121 & 0.0130 \\ & $\xi$ & -0.0352 & 0.0737 & -0.0364 & 0.0727 \\ \cline{2-6} 30 & $\mu$ & 0.0000 & 0.0107 & -0.0005 & 0.0108 \\ & $\sigma$ & -0.0098 & 0.0057 & -0.0084 & 0.0058 \\ & $\xi$ & -0.0090 & 0.0248 & -0.0114 & 0.0256 \\ \cline{2-6} 50 & $\mu$ & -0.0059 & 0.0079 & -0.0045 & 0.0063 \\ & $\sigma$ & 0.0026 & 0.0336 & -0.0028 & 0.0034 \\ & $\xi$ & -0.0124 & 0.0149 & -0.0108 & 0.0127 \\ \cline{2-6} 100& $\mu$ & -0.0012 & 0.0053 & -0.0010 & 0.0033 \\ & $\sigma$ & 0.0022 & 0.0017 & -0.0023 & 0.0016 \\ & $\xi$ & -0.0050 & 0.0058 & -0.0041 & 0.0053 \\ \hline \end{tabular} \end{table} \begin{table}\centering \caption{Bias and mean squared error, based 1000 replications, for each parameter of the GEV distribution using Metropolis-Hastings (MH) and Hamiltonian Monte Carlo (HMC) algorithms. 1100 iterations discarding 100 as burn-in.} \label{ressim1} \vskip .5cm \begin{tabular}{cccccc} \hline & & \multicolumn{2}{c}{HMC} & \multicolumn{2}{c}{MH}\\ \cline{3-6} $n$ & & bias & MSE & bias & MSE \\ \hline 15 & $\mu$ & 0.5169 & 0.5196 & -1.7424 & 6.0973 \\ & $\sigma$ & 0.4572 & 1.5135 & 5.0180 & 51.007 \\ & $\xi$ & -0.0681 & 0.1867 & -1.0650 & 2.5525 \\ \cline{2-6} 30 & $\mu$ & -0.2183 & 0.3943 & -2.3592 & 8.4136 \\ & $\sigma$ & 0.3655 & 1.0837 & 7.0782 & 78.279 \\ & $\xi$ & -0.0651 & 0.0965 & -1.4178 & 3.3511 \\ \cline{2-6} 50 & $\mu$ & -0.2202 & 0.3505 & -2.6573 & 9.8133 \\ & $\sigma$ & 0.3362 & 0.8232 & 8.5333 & 103.20 \\ & $\xi$ & -0.0582 & 0.0542 & -1.5587 & 3.9191 \\ \cline{2-6} 100& $\mu$ & -0.4297 & 0.6297 & -3.2037 & 12.541 \\ & $\sigma$ & 0.6450 & 1.7392 & 10.203 & 138.04 \\ & $\xi$ & -0.0793 & 0.1241 & -1.7940 & 4.3145 \\ \hline \end{tabular} \end{table} \setlength{\tabcolsep}{1mm} \begin{landscape} {\footnotesize \begin{table}\centering \caption{Bias and mean squared error, based 1000 replications, for each parameter of the GEV-AR model using Hamiltonian Monte Carlo (HMC) and Riemann manifold HMC algorithms. 600 iterations discarding 100 as burn-in.} \label{ressim2} \vskip .5cm \begin{tabular}{cccccccccccccc} \hline & AR-GEV$(p)$ & \multicolumn{4}{c}{$\mathbf{1}$} & \multicolumn{4}{c}{$\mathbf{2}$} & \multicolumn{4}{c}{$\mathbf{3}$} \\ \cline{1-14} & & \multicolumn{2}{c}{HMC} & \multicolumn{2}{c}{RMHMC} & \multicolumn{2}{c}{HMC} & \multicolumn{2}{c}{RMHMC} & \multicolumn{2}{c}{HMC} & \multicolumn{2}{c}{RMHMC} \\ $n$ & & bias & mse & bias & mse & bias & mse & bias & mse & bias & mse & bias & mse \\ \hline 60 &$\mu$ &-0.0236 & 0.5600 &-0.0292 &0.7966 &-0.0175 &0.3382 & 0.0086 & 0.0829 &-0.0323 &1.1502 &-0.0339 &1.2462\\ &$\sigma$ &-0.0238 & 0.5701 &-0.0269 &0.6677 &-0.0039 &0.0173 &-0.0124 & 0.1506 &-0.0124 &0.1701 &-0.0129 &0.1817\\ &$\xi$ & 0.0276 & 0.7667 & 0.0322 &0.9849 & 0.0111 &0.1300 & 0.0291 & 0.8020 & 0.0290 &0.9294 & 0.0283 &0.8730\\ &$\theta_{1}$ & 0.0233 & 0.5459 & 0.0173 &0.2704 & 0.0058 &0.0381 & 0.0076 & 0.0570 & 0.0021 &0.0048 &-0.0038 &0.0159\\ &$\theta_{2}$ & & & & &-0.0050 &0.0278 &-0.0058 & 0.0332 & 0.0121 &0.1611 & 0.0033 &0.0119\\ &$\theta_{3}$ & & & & & & & & & 0.0095 &0.0995 & 0.0085 &0.0787\\ \cline{2-14} 150 &$\mu$ & 0.0018 &0.0035 & 0.0007 &0.0005 &-0.0100 &0.1115 &-0.0077 &0.0668 &-0.0953 &10.006 &-0.0376 &1.5560 \\ &$\sigma$ &-0.0242 &0.5899 &-0.0135 &0.1843 &-0.0007 &0.0005 &-0.0011 &0.0015 &-0.0863 &8.2037 &-0.0323 &1.1485 \\ &$\xi$ & 0.0053 &0.0282 &-0.0016 &0.0027 & 0.0022 &0.0053 & 0.0025 &0.0071 & 0.0369 &1.5019 & 0.0170 &0.3194 \\ &$\theta_{1}$ & 0.0144 &0.2095 & 0.0009 &0.0926 & 0.0005 &0.0002 & 0.0008 &0.0006 &-0.0082 &0.0745 &-0.0087 &0.0815 \\ &$\theta_{2}$ & & & & &-0.0014 &0.0023 &-0.0018 &0.0038 &-0.0060 &0.0406 &-0.0055 &0.0334 \\ &$\theta_{3}$ & & & & & & & & &-0.0004 &0.0002 & 0.0020 &0.0047 \\ \cline{2-14} 300 &$\mu$ &-0.0009 &0.0008 & -0.0002 & 0.0054 & -0.0051 & 0.0286 & -0.0048 & 0.0257 & -0.3205 & 106.06 & -0.0400 & 1.6533 \\ &$\sigma$ &-0.0293 &0.8555 & -0.0058 & 0.0344 & -0.0073 & 0.0588 & -0.0053 & 0.0315 & -0.3208 & 106.26 & -0.0444 & 2.0343 \\ &$\xi$ & 0.0225 &0.5082 & -0.0007 & 0.0005 & 0.0005 & 0.0003 & 0.0000 & 0.0000 & -0.0471 & 2.2938 & -0.0136 & 0.1923 \\ &$\theta_{1}$ & 0.0232 &0.5391 & 0.0053 & 0.0289 & 0.0012 & 0.0017 & 0.0015 & 0.0027 & -0.0046 & 2.1750 & -0.0221 & 0.5036 \\ &$\theta_{2}$ & & & & & -0.0011 & 0.0014 & -0.0016 & 0.0028 & -0.0471 & 2.2938 & -0.0136 & 0.1923 \\ &$\theta_{3}$ & & & & & & & & & -0.0136 & 0.1924 & 0.0007 & 0.0005 \\ \hline \end{tabular} \end{table} } \end{landscape} \begin{table} \centering \begin{tabular}{ccccc} \hline N = 20000 & $\mu$ & $\theta$ & $\sigma$ & $\xi$\\ \hline $\widehat{E[.|D]}$ & 5.929 & 0.923 & 0.692 & -0.258 \\ $\widehat{DP[.|D]}$ & 3.350 & 0.041 & 0.055 & 0.058 \\ $\widehat{\mathrm{Moda}}$ & 6.369 & 0.922 & 0.687 & -0.261 \\ $\widehat{\mathrm{Mediana}}$ & 5.945 & 0.923 & 0.689 & -0.259 \\ $IC \ 95\%$ & [0.443, 11.437] & [0.856, 0.991] & [0.609, 0.790] & [-0.351, -0.160]\\ \hline \end{tabular} \caption{\footnotesize Posterior mean, standard deviation, mode, median and credible interval.} \label{tableAR1} \end{table} \setkeys{Gin}{width=3in,height=3in} \begin{figure} \caption{Density functions of the GEV distribution with $\mu=0$, $\sigma=1$ and $\xi=1$ (full line), $\xi=0$ (dashed line) and $\xi=-0.75$ (dotted line).} \label{fig1} \end{figure} \setkeys{Gin}{width=5in,height=4.5in} \begin{figure} \caption{Histogram and plots of maximum sea levels (in metres) from 1923 to 1987 at Port Pirie, South Australia.} \label{fig:data} \end{figure} \setkeys{Gin}{width=5.5in,height=5.5in} \begin{figure} \caption{Trace plots and autocorrelations for the parameter values generated using Metropolis-Hastings (5000 iterations after $1000$ burn-in).} \label{fig:mh1} \end{figure} \setkeys{Gin}{width=5.5in,height=5.5in} \begin{figure} \caption{Trace plots and autocorrelations for the parameter values generated using HMC (5000 iterations after $1000$ burn-in).} \label{fig:hmc} \end{figure} \begin{figure} \caption{\footnotesize Predicted values marked with an 'x' and actual observed values as filled circles. Horizontal bars represent the 95\% credible intervals.} \label{pAR1} \end{figure} \end{document}
\begin{document} \title{\LARGE \bf Impacts of Network Topology on the Performance of a Distributed Algorithm Solving Linear Equations } \thispagestyle{empty} \pagestyle{empty} \begin{abstract} Recently a distributed algorithm has been proposed for multi-agent networks to solve a system of linear algebraic equations, by assuming each agent only knows part of the system and is able to communicate with nearest neighbors to update their local solutions. This paper investigates how the network topology impacts exponential convergence of the proposed algorithm. It is found that networks with higher mean degree, smaller diameter, and homogeneous degree distribution tend to achieve faster convergence. Both analytical and numerical results are provided. \end{abstract} \section{Introduction} A major goal in studying networked systems is to understand the impact of network topology within the context of the application of interest, from epidemic spreading \cite{pastor2001epidemic,cohen2000resilience} to synchronization \cite{nishikawa2003heterogeneity,wang2005partial}, controllability \cite{liu2011controllability,jadbabaie2004stability,pasqualetti2014controllability} , observability \cite{liu2013observability}, flocking \cite{vicsek1995novel,jadbabaie2003coordination} and consensus \cite{tsitsiklis1984problems,tsitsiklis1986distributed,murray2003consensus,olfati2007consensus}. Recently, Mou \emph{et al.} proposed a network-based distributed algorithm to solve for $x$ in the linear equation $\mathbf{A}x=b$ \cite{mou2013fixed,mou2014distributed}. In this algorithm it is assumed that each agent is located in a communication network and has partial knowledge of $\mathbf{A}$ and $b$. Under mild conditions on the connectivity of the underlying network, all the agents' states (or local solutions) converge to the exact solution $x=\mathbf{A}^{-1}b$ \cite{mou2013fixed,liu2013asynchronous,mou2014distributed,anderson2015decentralized,mou2015distributed}. The proposed algorithm in \cite{mou2014distributed} is distributed, applicable for all linear equations as long as they have solutions, works for time-varying networks, converges exponentially fast, operates asynchronously, and does not involve any small step-size. The aim of this paper is to further characterize the relation between its exponential convergence and the network topology. The main contribution of this work is an analytical bound that connects the convergence rate of the algorithm to the network topology and the linear equation. Both theoretical and numerical results show that networks with higher mean degree, smaller diameter, and homogeneous degree distributions tend to speed up this distributed algorithm. The following notation is used throughout the paper. The $\ell^{2}$-norm is denoted as $\|\cdot\|$. Matrices are denoted by upper case letters in bold such as $\mathbf{A}$ and $\mathbf{P}$. A partition of a matrix is denoted by an upper case letter with a subscript, i.e. $A_i$ is a partition of matrix $\mathbf{A}$, which can also be a row vector. Vectors are denoted by lower case italic letters, such as $x$, $y$, $z$. A network or graph is denoted as $\mathcal{G}(\mathcal{V}, \mathcal{E})$, where $\mathcal{V}$ is the node (or vertex) set and $\mathcal{E}$ is the link (or edge) set. The network topology is represented by the adjacency matrix $\mathcal{A}=\{\alpha_{ij}\}$ of the network. This paper is organized as follows. The network-based distributed algorithm is briefly presented in Section \ref{sec_alg}. The theory of how the network topology impacts the algorithm performance is present in Section \ref{sec_topo}. The main proof is presented in Section \ref{sec_proof}. Finally, the conclusion is presented in Section \ref{sec_conclu}. \\ \section{A Distributed Algorithm for Solving Linear Equations} \label{sec_alg} Consider a system of linear algebraic equations \begin{equation} \label{Ax=b} \mathbf{A}x =b, \end{equation} which has a unique solution $x^*$. Here $\mathbf{A} \in \mathbb{R}^{\mathit{n}\times\mathit{n}}$, $b \in \mathbb{R}^{\mathit{n}}$ and $x\in \mathbb{R}^{n}$. The partition of the matrix $\mathbf{A}$ is defined as $\mathbf{A}=\mathrm{col}\left\lbrace A_1, A_2,\cdots, A_m \right\rbrace$, where $\mathrm{col}\{\cdot\}$ is an operator that stacks elements into a column, $A_i\in \mathbb{R}^{n_i\times n}$, and the partition of the vector $b$ is defined as $b=\left[ b_1, b_2, \cdots, b_m \right]^{\mathrm{T}}$, $b_i\in \mathbb{R}^{n_i}$, where $\sum_{i=1}^{m}n_i=n$. Assume that the entire system $\left(\mathbf{A},b\right)$ is unavailable to a single agent; instead different partitions of the system $\left( A_i^{n_i \times n},b_i^{n_i}\right)$ are available to different agents. In this paper we consider the simplest case: $n_i=1$ and $m=n$, i.e. each agent knows exactly one row of $\mathbf{A}$ matrix and one element of the $b$ vector. The distributed algorithm proposed in \cite{mou2014distributed} computes the solution of the linear equation \eqref{Ax=b} through a multi-agent network $\mathcal{G}(\mathcal{V}, \mathcal{E})$, where $\mathcal{V}=\{1,2,\cdots, n\}$ and $\mathcal{E}\subseteq \mathcal{V} \times \mathcal{V}$. The topology of this $n$-agent network is represented by its adjacency matrix $\mathcal{A}(\mathcal{G})=\left[ \alpha_{ij}\right]_{n\times n}$ with \begin{equation*} \alpha_{ij}= \left\lbrace \begin{aligned} & 1\ \mathrm{if}(i,j)\in \mathcal{E} \\ & 0\ \mathrm{otherwise.} \end{aligned}\right. \end{equation*} Agent $i$ in the network is synonymous with vertex $i$ in the graph $\mathcal{G}(\mathcal{V}, \mathcal{E})$. The topology of the multi-agent network is completely independent of the linear equation in \eqref{Ax=b}. For simplicity we make the following assumption: \begin{assumption} The graph $\mathcal{G}$ is undirected and connected. Every vertex has a self loop and there are no multiple edges between two vertices. \end{assumption} Consider agent $i$ who knows $\left( A_i,b_i \right)$. It calculates its local solution $x_i\in \mathbb{R}^n$ to $A_ix_i=b_i$ and exchanges the solution $x_i$ with its neighbors, denoted as $\mathcal{N}_{i}=\{j\in \mathcal{V}|(i,j)\in \mathcal{E}\}$. In this work $t$ is the discrete time variable and takes values in $\{0,\ 1,\ 2,\cdots\}$. The exact (or global) solution to $\mathbf{A}x=b$ is obtained when all the local solutions $x_i$'s reach consensus through the following iteration procedure: \begin{equation} \label{x(t+1)=Mx(t)} x_i(t+1) =x_i(t)-\frac{1}{d_i}\mathbf{P}_i\left(d_ix_i(t)-\sum_{j \in \mathcal{N}_{i}}x_j(t)\right), \end{equation} where $\mathbf{P}_i =\mathbf{I}-A_i^{\mathrm{T}}{\left(A_i\cdot A_i^{\mathrm{T}}\right)}^{-1}A_i$ is the orthogonal projection on the kernel of $A_i$, $i=1,\cdots,n$, and $d_i=\sum_{j=1}^n \alpha_{ij}$ is the degree of agent $i$. Let $x^*$ be the true solution to \eqref{Ax=b} and it must satisfy $A_ix^*=b_i$ for $i=1,\cdots,n$. Define the error between $x_i(t)$ and $x^*$ as \begin{equation} \label{y_i} y_i(t)=x_i(t)-x^*, \end{equation} which is in the kernel of $A_i$. In addition, note that $\mathbf{P}_i^2=\mathbf{P}_i$ and $\mathbf{P}_iy_i(t)=y_i(t)$. Replacing $x_i(t+1)$ and $x_i(t)$ by $y_i(t+1)$ and $\mathbf{P}_iy_i(t)$ in \eqref{x(t+1)=Mx(t)}, we get the {\em error updating equation} \begin{equation} \label{y+=my} y_i(t+1) =\frac{1}{d_i}\mathbf{P}_i\sum_{j \in \mathcal{N}_{i}}\mathbf{P}_jy_j(t), \end{equation} for $i=1,\cdots,n$. These $n$ equations can be rewritten in the following compact form \begin{equation} \label{y=My} y(t) =\left(\mathbf{P}_{\mathrm{diag}}\left[\left(\mathbf{D}^{-1}\mathcal{A}^{\mathrm{T}}\right)\otimes \mathbf{I}\right]\mathbf{P}_{\mathrm{diag}}\right)^t y(0) =\mathbf{M}^t y(0), \end{equation} where the matrix $\mathbf{M}$ is called the {\em updating matrix} and $y(t)=\mathrm{col}\left\lbrace y_1(t),y_2(t),\cdots,y_n(t)\right\rbrace$. The matrix $\mathbf{P}_{\mathrm{diag}}=\mathrm{diag}\{\mathbf{P}_1,\mathbf{P}_2,\cdots,\mathbf{P}_n\}\in \mathbb{R}^{n^2 \times n^2}$ is a block diagonal matrix with $\mathbf{P}_i \in \mathbb{R}^{n \times n}$ and $\mathbf{D}=\mathrm{diag}\{d_1,d_2,\cdots,d_n\}$ is a diagonal matrix. The operator $\otimes$ is the kronecker product \cite{neudecker1969note}. This algorithm has been proven to converge by using the mixed norm \cite{russo2013contraction} \cite[Chapter 4.3.1]{mou2014distributed} of $\mathbf{M}$ defined as \begin{equation*} \|\mathbf{M}\|_{\mathrm{mix}}=\| \mathbf{Q} \|_\infty, \end{equation*} where $\mathbf{Q}=\{q_{ij}\}$, $q_{ij}=\frac{\alpha_{ij}}{d_i}\|\mathbf{P}_i\mathbf{P}_j\|$. Indeed, $\mathbf{M}^t$ satisfies $\lim_{t\to \infty}\|\mathbf{M}^t\|_{\mathrm{mix}}=0$ if the undirected multi-agent network is connected \cite{mou2014distributed}. Therefore $y=\mathbf{M}^ty(0)\to 0$ and thus $x_i\to x^*$ for all $i\in \mathcal{V}$. Network properties play important roles in consensus problems. In particular, the second smallest eigenvalue $\lambda_2(\mathcal{L})$ of the graph laplacian bounds the convergence rate of consensus \cite{fiedler1973algebraic,olfati2007consensus}. Given the fact that projection matrices $\mathbf{P}_i$'s are used in constructing the updating matrix $\mathbf{M}$, it is not clear how the network topology $\mathcal{A}$ impacts the convergence rate of this algorithm. Thus, in this work we approach the proof of convergence from a different angle. \section{Impacts of Network Topology on the Distributed Algorithm} \label{sec_topo} \subsection{Theoretical Analysis} In this section, we study how network topology impacts the performance of the network-based distributed algorithm. Before we state the main theorem, we introduce the following definitions. \begin{definition}[Walk] In a graph $\mathcal{G}$, a walk $w^{l}\in \mathcal{V}^{l+1}$ \cite{chung1997spectral} of length $l$ is a sequence of vertices $(v_0,v_1,\cdots,v_l)$ with $\{v_{i-1},v_{i}\}\in \mathcal{E}(\mathcal{G})$ for all $1\leqslant i \leqslant l$ when $l \geqslant 1$. If $l=0$, then $w^{0}$ is simply a vertex $v_0$. Specifically, we denote a walk of length $l$ starting at vertex $v_0$ and ending at vertex $v_{l}$ as $w_{v_0 v_l}^{l}$. \end{definition} \begin{definition}[$f(w^l,\beta)$ Product of a Walk] Let $w^{l}$ be a walk of length $l$. Let $\beta_{v_i}\in U$ be a value associated with vertex $v_i$. We can define a function of the walk $w^l$ as \begin{equation*} f(w^l,\beta)=\Pi_{i=0}^{i=l}\beta_{v_i}, \end{equation*} where $\beta$ is indexed by the walk $w^l = (v_0, v_1, \cdots, v_l)$ with values $\beta = (\beta_{v_0}, \beta_{v_1}, \cdots, \beta_{v_l})$. The function $f(w^l,\beta)\in U$ is called the {\em product of walk $w^{l}$}. In this work $U$ is either $\mathbb{R}$ or $\mathbb{R}^{n\times n}$. \end{definition} \begin{definition}[$\mathbb{S}(l)$ and $\mathbb{S}^{1}(l)$ Spaces] In a graph $\mathcal{G}$, all the possible walks of length $l$ form the $\mathbb{S}(l)$ Space. Denote a subspace of $\mathbb{S}(l)$ as $\mathbb{S}^{1}(l)$ if and only if \begin{itemize} \item the walk $w^{l}$ starts from an arbitrary vertex $v_0$ and ends at $v_{l}$ and visits all the vertices $v_i\in \mathcal{V}$ of $\mathcal{G}$, \item there does not exist a vertex $v_j\in \mathcal{V}$ that divides $w^{l}$ into two sub-walks, where one walk starts at $v_0$ and ends at $v_j$, the other one starts at $v_j$ and ends at $v_l$, that both of them visit all the vertices $v_i\in \mathcal{V}$ of $\mathcal{G}$. \end{itemize} Note that the end vertex of the previous sub-walk and the starting vertex of the following sub-walk are repeated twice when dividing a walk. It is trivial that for $w^{l}$ walks of length $l\leqslant n-1$, they can't be in the $\mathbb{S}^{1}(l)$ subspace. \end{definition} \begin{definition}[Order $r$] If a walk $w^{l}$ can be divided into several walks $w^{l_1}$, $w^{l_2}$, $\cdots$, $w^{l_r}$, where $l_i\geqslant 1$ and $w^{l_i}\in \mathbb{S}^{1}(l_i)$, then all the walks of the same number $r$ form a subspace $\mathbb{S}^{r}(l)$ where $r$ is called the {\em order} of the space. We also say that $r$ is the {\em order} of the walk $w^{l}$. $\mathbb{S}^r(l) \subsetneq\mathbb{S}(l)$ for any order $r$. If a walk $w^l$ does not visit all the vertices in a graph $\mathcal{G}$, then its order is $r=0$ and it is in $\mathbb{S}^{0}(l)$. This special case means that there exists at least one vertex $v_i\in \mathcal{V}$ which does not appear in the sequence of the walk $w^{l}$. The order of any $w^{l}$ walk is uniquely determined and non-negative, i.e. $r\geqslant 0$. \end{definition} Let $\varphi=\frac{1}{\left(\sqrt{n}\tau\|\mathbf{A}^{-1}\|\right)^2}$, $\tau=\underset{i}{\max}\left(\|A_i\|\right)$, $\frac{1}{d}=\left(\frac{1}{d_{i}},\frac{1}{d_{v_1}},\cdots,\frac{1}{d_j}\right)$ be indexed by the walk $w_{ij}^{t}=\left(i,v_1,\cdots,v_{t-1},j\right)$ which starts at agent $i$ and ends at agent $j$ where $w_{ij}^{t}\in \mathcal{V}^{t+1}$, then we have the following theorem \begin{theorem}[Convergence Bound] \label{th_bound} Given a linear equation $\mathbf{A}x=b$, $\mathbf{A}=\mathrm{col}\{A_i\}\in \mathbb{R}^{n \times n}$ and its unique solution $x^*$, let $x_{i}(t)$ be the local solution at agent $i$ located in an undirected network $\mathcal{G}(\mathcal{V},\mathcal{E})$ whose adjacency matrix is $\mathcal{A}=\{\alpha_{ij}\}$, then the error $y_{i}(t)$ defined in \eqref{y_i} is bounded as \begin{equation} \label{bound_of_th} \|y_{i}(t+1)\| \leqslant \sum_{\mathcal{N}_j}\sum_{r=0}^{r_m(t)}\sum_{w_{ij}^{t}\in \mathbb{S}^{r}} f(w_{ij}^t,\frac{1}{d})\left(1-\varphi \right)^{\frac{nr}{2}} \|y_j(0)\| \end{equation} for $i=1,\cdots, n$. Here $r_m(t)\leqslant \lfloor\frac{t}{n}\rfloor$ is the maximum order of the product. Note that $w_{ij}^{0}=w_{ii}^{0}=\left(i\right)$ and $w_{ij}^{1}=\left(i,j\right)$. \end{theorem} Theorem \ref{th_bound} provides another method to prove that the distributed algorithm converges to the true solution $x^*$ besides the mixed norm method in \cite{mou2014distributed}, which is discussed at the end of this work. The bound in \eqref{bound_of_th} connects the network topology with the convergence rate of the algorithm, by the degree $d_i$ of agent $i$ explicitly, and by counting the number of $w^t\in \mathbb{S}^{r}(t)$ walks in every order $r\geqslant 0$ in the network implicitly. Before moving to the detailed proof of this theorem, we first discuss how topology impacts the performance of the algorithm. To illustrate the topology impacts, we start with the definition of a walk $w^{t}$, then we discuss the properties of the corresponding ${f}(w^t,\frac{1}{d})$ product. Given a network $\mathcal{G}$ of size $n$, all the possible walks of length $t$ are determined by its adjacency matrix $\mathcal{A}=\{\alpha_{ij}\}$. Let $\frac{1}{d_i}$ be the inverse degree of agent $i$, then the product $\frac{1}{d_{i_0}}\frac{1}{d_{i_1}}\cdots\frac{1}{d_{i_t}}$ can be represented by ${f}^{r}(w_{i_0i_t}^t,\frac{1}{d})$, where we recall that $\frac{1}{d}$ is indexed by the walk $w_{i_0i_t}^{t}$. For simplicity, we let $i=i_0$ and $j=i_t$. Hence given a starting agent $i$, the summation of all products of the walk $w^{1}$ from $i$ to all the agents $j=1,2,\cdots,n$ is represented as $\sum_{j=1}^{n}\frac{\alpha_{ij}}{d_{i}d_{j}}$. In general, we have \begin{equation*} \begin{aligned} \sum_{r=0}^{r_m(t)}\sum_{w_{ij}^{t}} {f}^{r}(w_{ij}^t,\frac{1}{d}) = \sum_{l_{t-1}=1}^{n}\cdots\sum_{l_{1}=1}^{n}\frac{\alpha_{il_1}\alpha_{l_1l_2}}{d_id_{l_1}}\cdots\frac{\alpha_{l_{t-1}j}}{d_{l_{t-1}}}\frac{1}{d_{j}}. \end{aligned} \end{equation*} It is trivial that for any $r$, $i$, $j$ and the walk $w_{ij}^t$, $f(w_{ij}^t,\frac{1}{d})\in (0,1)$. We now explore a scenario when the above mentioned sum remains a constant, even if the walk length increases. Given a network $\mathcal{G}$ and given a starting agent $i$, if all walks $w_{ij}^{t}$, $j=1,2,\cdots,n$ are repeated by walks $w_{ij^{\prime}}^{t+1}$ who visit one more agent $j^{\prime}$ at the end, after reaching agent $j$, then the summation of all ${f}(w_{ij^{\prime}}^{t+1},\frac{1}{d})$ products remains the same. This visit of agent $j^{\prime}$ generates $n$ products based on each $f(w_{ij}^t,\frac{1}{d})$ and each of them equals to $\frac{\alpha_{jj^{\prime}}}{d_{j^{\prime}}}f(w_{ij}^t,\frac{1}{d})$, $j=1,2,\cdots,n$. Only $d_{j}$ out of $n$ products are not zero when $\alpha_{jj^{\prime}}=1$. The summation of all newly generated products is unchanged, which is \begin{equation} \label{sum_visit} \sum_{\mathcal{N}_{j^{\prime}}} \frac{1}{d_{j^{\prime}}} \sum_{\mathcal{N}_{j}} {f(w_{ij}^t,\frac{1}{d})} = \sum_{\mathcal{N}_{j}} f(w_{ij}^t,\frac{1}{d}) \end{equation} for $\sum_{\mathcal{N}_{j^{\prime}}} =d_{j^{\prime}}$. In general, the summation of all products of all walks by $t+1$ visits starting from a given agent $i$ to all the neigbors of all the agents $j$ is \begin{equation} \label{sum_walk} \begin{aligned} & \sum_{\mathcal{N}_j}\sum_{r=0}^{r_m(t)}\sum_{w_{ij}^{t}\in \mathbb{S}^{r}(t)} f(w_{ij}^t,\frac{1}{d}) \\ = & \sum_{j^{\prime}=1}^{n}\sum_{j}^{n}\sum_{i_{t-1}}^{n}\cdots \sum_{i_{1}}^{n}\frac{\alpha_{ii_{1}}}{d_{i}}\cdots \frac{\alpha_{i_{t-1}j}}{d_{i_{t-1}}}\frac{\alpha_{jj^{\prime}}}{d_{j}} = 1. \\ \end{aligned} \end{equation} Given a network $\mathcal{G}$ and a starting agent $i$, the summation $\sum_{\mathcal{N}_j}\sum_{w_{ij}^{t}\in \mathbb{S}^{0}(t)}f(w_{ij}^t,\frac{1}{d})$ is never increasing and the order $r$ of the $f(w^t,\frac{1}{d})$ product is never decreasing as the walk length $t$ grows. Given an arbitrary $f(w_{i_0i_t}^t,\frac{1}{d})$ product of the walk $w_{i_0i_t}^t\in \mathbb{S}^{0}(t)$, when the walk $w_{i_0i_t}^t$ makes one more visit from agent $i_{t}$ to the next agent $i_{t+1}$, it forms $d_{i_{t}}$ new products and the summation of all $d_{i_{t}}$ products is unchanged, which is already shown in \eqref{sum_visit}. However, there exists a walk of length $t_1$ when there exists at least one walk changing from the $\mathbb{S}^{0}(t_1)$ subspace to the $\mathbb{S}^{0}(t_1+1)$ subspace. For every $w^{t_2}$ walk (of order $r\geqslant 1$) of length $t_2$, it never changes to a walk of order $r=0$. This hold for any walk $w^t\in \mathbb{S}^{0}(t)$, hence the summation of all ${f}(w^t,\frac{1}{d})$, $w^t\in \mathbb{S}^{0}(t)$ product is never increasing, that is \begin{equation*} \sum_{\mathcal{N}_j}\sum_{w_{ij}^{t+1}\in \mathbb{S}^{0}(t+1)}f(w_{ij}^{t+1},\frac{1}{d}) \leqslant \sum_{\mathcal{N}_j} \sum_{w_{ij}^{t}\in \mathbb{S}^{0}(t)}f(w_{ij}^t,\frac{1}{d}) \end{equation*} and given a walk of length $t$ and a starting agent $i$, the bound in \eqref{bound_of_th} decreases when the order of walks increases, due to the exponential factor $\lim_{r\to \infty}\left(1-\varphi \right)^{\frac{nr}{2}}=0$. Since the summation of all $f$ products starting from a chosen agent $i$ is always $1$ \eqref{sum_walk}, the bound in \ref{th_bound} can only be decreased by either i) for a fixed length $t$, increasing the percentage of walks with higher $r$, or ii) by increasing the order $r$ for all walks as rapidly as possible. With the above two observations we conclude that given any two networks $\mathcal{G}_1$ and $\mathcal{G}_2$, the distributed algorithm \eqref{x(t+1)=Mx(t)} tends to converge faster on networks $\mathcal{G}_1$ if $\mathcal{G}_1$ and $\mathcal{G}_2$ have similar topology properties except any combinations of the following \begin{itemize} \item[1] $\mathcal{G}_1$ has a shorter diameter, \item[2] $\mathcal{G}_1$ has a more homogeneous degree distribution, \item[3] $\mathcal{G}_1$ has a higher mean degree. \end{itemize} Although Theorem \ref{th_bound} has $\frac{1}{d}$ as a factor in the products, it is not trivial to conclude that higher degree makes the products smaller since higher degree decreases each product while increases the number of products. The summation of all products remains a constant, as shown in \eqref{sum_walk}. However the bound decreases when the order $r$ of the products increases. We address these three points in order. \subsubsection{Diameter} For two graphs $\mathcal{G}_1$ and $\mathcal{G}_2$ with the same degree distribution and hence the same mean degree, if $\mathcal{G}_1$ has a shorter diameter \cite{diestel2005graph} than $\mathcal{G}_2$, then for fixed $t$, walks from $\mathcal{G}_1$ will necessarily have a larger minimum order $r$ as compared to those from $\mathcal{G}_2$. This follows from the fact that all the agents can be visited with fewer steps in a network with shorter diameter. Thus, all things being equal between two graphs, if $r(t)$ increases more rapidly for one graph as opposed to another, the exponential factor $\left(1-\varphi \right)^{\frac{nr}{2}}$ will decrease more rapidly. Therefore networks with shorter diameter make the distributed algorithm converge faster. \subsubsection{Degree Distribution} Let $\mathcal{G}_1$ and $\mathcal{G}_2$ be two graphs with same mean degree but different degree distributions. Let $\mathcal{G}_1$ have a more homogeneous degree distribution than $\mathcal{G}_2$. Walks in $\mathcal{G}_2$ typically have lower order $r$ than the walks of the same length in $\mathcal{G}_1$. This is because walks on $\mathcal{G}_2$ rather than $\mathcal{G}_1$ have to walk though the high degree vertices again and again to reach all the other low degree vertices. Hence for a given length of walks, the order $r$ from the walks on $\mathcal{G}_1$ is higher. Therefore homogeneous degree distribution makes the algorithm converges faster. \subsubsection{Mean Degree} Adding edges to a graph typically results in a shorter diameter. Given two graphs $\mathcal{G}_1$ and $\mathcal{G}_2$ with similar degree distribution where $\mathcal{G}_1$ has a higher mean degree, the diameter of $\mathcal{G}_1$ is typically no larger than $\mathcal{G}_2$. Hence the orders $r$'s from $\mathcal{G}_1$ are typically higher than those in $\mathcal{G}_2$ for walks of fixed length. Adding a new edge can either make the degree distribution homogeneous or make it heterogeneous, depending on where the new edge is added. The overall change of degree distribution for each newly added edge is difficult to analyze. However, if multiple new edges are added uniformly to a graph, this will typically result in a more homogeneous degree distribution, thus increasing the mean degree of the network makes the distributed algorithm converge faster. \subsection{Simulation Results} To verify our theoretical predictions, we perform extensive numerical simulations. We first quantify the convergence rate of the network-based distributed algorithm. One measure is the solution accuracy of the algorithm, which is the Euclidean distance between the local solution and the exact (or global) one: \begin{equation*} \epsilon_{i}(t)=\|x_i(t)-x^*\|,\ i=1,2,\cdots,n. \end{equation*} Smaller $\epsilon_{i}$ means faster convergence rate and hence better algorithm performance. The impacts of different network topologies are measured by the statistical performances of the distributed algorithm, i.e. $E\left(\sum_{i=1}^n\epsilon_{i}\right)$ on an ensemble of linear equations. We notice that the Euclidean distance defined above needs a reference. For example, if the true solutions of two cases are $\|x^{*,1}\|=100$ and $\|x^{*,2}\|=0.1$ respectively, while the summation of Euclidean distances of all local solutions to $x^{*,j}$ are both $\sum_{i=1}^{n}\epsilon_{i}^{j}=\sum_{i=1}^{n}\|x_i^{j}-x^{*,j}\|=1$, $j=1,2$, it is obvious the accuracy of the former iterative process is much higher than the latter one. Therefore the Euclidean distance should be scaled by the initial error $\sum_{i=1}^n \epsilon_{i}(0)$, yielding the relative error \begin{equation} \label{rel_err} R(t)=\frac{\sum_{i=1}^n \epsilon_{i}(t)}{\sum_{i=1}^n \epsilon_{i}(0)}=\frac{\sum_{i=1}^n \|x_i(t)-x^*\|_2}{\sum_{i=1}^n \|x_i(0)-x^*\|_2}. \end{equation} In this way, convergence performances among a system of linear equations can be compared. \begin{figure} \caption{\label{fig_cv_box} \label{fig_cv_box} \end{figure} Figure.~\ref{fig_cv_box} shows the relative error changes with different network topologies, including small-world (SW) networks \cite{watts1998collective} with random rewiring probability $p$, scale-free (SF) networks \cite{barabasi1999emergence} with degree exponent $\gamma$, Erd\"{o}s-R\'{e}nyi (ER) random graphs \cite{erdos1960evolution} with connectivity probability $p$ and random regular (RR) graphs \cite{wormald1999models} with mean degree $\langle k \rangle$. The networks in each subfigure are the same in their mean degree and they are different on only one parameter. Small-world networks (a-c) are different in rewiring probabilities $p$, which determines network diameters. Scale-free networks, graphs and RR graphs are drastically different in their degree distributions: scale-free networks are most heterogeneous and random regular graphs are most homogeneous. \begin{figure} \caption{\label{fig_cv_last_all} \label{fig_cv_last_all} \end{figure} The numerical results shown in Figure.~\ref{fig_cv_box} clearly verify our theoretical predictions, i.e. if two networks share similar topological properties, the one with smaller diameter (or more homogeneous degree distribution, or higher mean degree) perform better than the other. To further demonstrate the topology impacts, consider $R(t)$ at $t=2000$ shown as box-and-whisker plots in Figure.~\ref{fig_cv_last_all}. The smaller relative error $R(t)$ means higher convergence rate. It is clear from Figure.~\ref{fig_cv_last_all}a-c and Figure.~\ref{fig_cv_last_all}d-f that the upper bound of relative errors decreases as the mean degree increases for a given network model. In other words, higher mean degree makes the algorithm reach the true solution faster, and is consistent with our theoretical analysis. Figure.~\ref{fig_cv_last_all}a-c display that small-world networks with higher rewiring probability (and hence smaller diameters) have smaller relative errors $R$ , confirming our theoretical prediction smaller diameter contributes to higher convergence rate. As shown in Figure.~\ref{fig_cv_last_all}d-f, for any given mean degree, the random regular graphs have the smallest relative errors while scale free networks perform the worst. This means that the degree heterogeneity degrades the performance of the network-based distributed algorithm in solving linear equations \eqref{Ax=b}. \section{Proof of the Bound Theorem} \label{sec_proof} Before the formal proof of Theorem \ref{th_bound}, we discuss the structure of the matrix $\mathbf{M}^t$ \eqref{y=My} and introduce some technical lemmas. Let $m_{ij}^{(1)}\in \mathbb{R}^{n \times n}$ be the $i,j$-th partition matrix of $\mathbf{M}$, then \begin{equation*} m_{ij}^{(1)}=\frac{\alpha_{ij}}{d_i} \mathbf{P}_i \cdot \mathbf{P}_j, \end{equation*} where we recall that $\mathbf{P}_i$ is an orthogonal projection matrix defined right after \eqref{x(t+1)=Mx(t)}. Theses block matrices $m_{ij}^{(1)}$ are actually the updating matrix of $y_i(t)$, which means $y_i(t+1)=\sum_{j=1}^{n}m_{ij}^{(1)}y_j(t)$. Similarly, let $m_{ij}^{(t)}$ denote the partition matrix of $\mathbf{M}^t$, then \begin{equation*} \begin{aligned} m_{ij}^{(t)} & = \sum_{l_{t-1}=1}^n \cdots \sum_{l_{1}=1}^n m_{il_1} \cdots m_{l_{t-1}j} \\ & = \sum_{l_{t-1}=1}^n \frac{\alpha_{l_{{t-1}}j}}{d_{l_{t-1}}}\cdots \sum_{l_{1}=1}^n \frac{\alpha_{il_{1}}\cdot \alpha_{l_{1}l_{2}}}{d_i\cdot d_{l_{1}}} \mathbf{P}_i \cdots \mathbf{P}_{l_{t-1}} \mathbf{P}_j. \end{aligned} \end{equation*} Although the expression of $m_{ij}^{(t)}$ is long, it shows that $\mathbf{M}^t$ is simply a weighted sum of projection products. It follows that \eqref{y+=my} can be written as $y_i(t)=\sum_{j=1}^n m_{ij}^{(t)}y_j(0)$. Define $\mu_{ij}=\frac{\alpha_{ij}}{d_i}\in \left[0,0.5\right]$, then we have \begin{equation} \label{y=mu_p_y} y_{i}(t) = \sum_{j=1}^n \cdots \sum_{l_{1}=1}^n \mu_{il_{1}} \cdots \mu_{l_{t-1}l_{j}} \mathbf{P}_{i} \cdots \mathbf{P}_{l_{t-1}} \mathbf{P}_j y_j(0). \end{equation} Note that it is a summation of $n^t$ products. We now separate $\mu_{il_{1}} \mu_{l_{1}l_{2}} \cdots \mu_{l_{t-1}l_{j}} \mathbf{P}_{i} \mathbf{P}_{l_{1}} \cdots \mathbf{P}_{l_{t-1}} \mathbf{P}_jy_j(0)$ into a {\em $\mu$ product} \begin{equation} \label{lmdlmd} \mu_{il_{1}} \mu_{l_{1}l_{2}} \cdots \mu_{l_{t-1}j} \end{equation} and its corresponding projection product with $y_j(0)$, which is called {\em error sequence}, \begin{equation} \label{pppy} \mathbf{P}_{i} \mathbf{P}_{l_1} \cdots \mathbf{P}_{l_{t-1}} \mathbf{P}_j y_j(0). \end{equation} From \eqref{sum_visit} the summation of all $\mu$ products \eqref{lmdlmd} satisfies the following equality \begin{equation} \label{sum_lmd} \sum_{j=1}^n\sum_{l_{t-1}=1}^n \cdots \sum_{l_{1}=1}^n \mu_{il_{1}} \mu_{l_{1}l_{2}} \cdots \mu_{l_{t-1}j} = 1. \end{equation} The construction of $\mathbf{M}^t$ as a $\mu$ product and an error sequence of projections allows us to separate the topological features from the part of the algorithm that is specific to a particular linear equation. We first analyse each product in the error updating equation \eqref{y=mu_p_y} by bounding the error sequences of \eqref{pppy}. Define a sequence of vectors $z(t)\in \mathbb{R}^{n}$ as following \begin{equation} \label{kacz} z^{(j)}(t+1)=z(t)+\frac{b_j-A_jz(t)}{\|A_j\|^2}A_j^{\mathrm{T}}, \end{equation} where $t\geqslant 0$ and the superscript $(j)$ corresponds to its row vector $A_j$ and its scaler $b_j$. Then \begin{equation*} \begin{aligned} \mathbf{P}_{i}\left(z(0)-x^*\right) & = z(0)-\frac{A_iz(0)}{\|A_i\|^2}A_i^{\mathrm{T}}-x^*+ \frac{b_j}{\|A_i\|^2}A_i^{\mathrm{T}}\\ & = z(0)+\frac{b_i-A_iz(0)}{\|A_i\|^2}A_i^{\mathrm{T}}-x^* \\ & = z^{(i)}(1)-x^*. \end{aligned} \end{equation*} Let $z^{\left(j\right)}(0)=x_j(0)$, then each error sequence in \eqref{pppy} can be written as \begin{equation} \label{pppy=z-x} \begin{aligned} & \mathbf{P}_{i} \mathbf{P}_{l_1} \cdots \mathbf{P}_{l_{t-2}} \mathbf{P}_{l_{t-1}} \mathbf{P}_j y_j(0) \\ = & \mathbf{P}_{i} \mathbf{P}_{l_1} \cdots \mathbf{P}_{l_{t-2}} \mathbf{P}_{l_{t-1}} \left(z^{(j)}(0)-x^*\right) \\ = & \mathbf{P}_{i} \cdots \mathbf{P}_{l_{t-2}} \left(z^{(j)}(0)+\frac{b_{l_{t-1}}-A_{l_{t-1}}z^{(j)}(0)}{\|A_{l_{t-1}}\|^2}A_{l_{t-1}}^{\mathrm{T}}-x^* \right) \\ = & \mathbf{P}_{i} \cdots \mathbf{P}_{l_{t-2}} \left(z^{(jl_{t-1})}(1)-x^*\right) \\ = & z^{(il_{1}\cdots l_{t-2}l_{t-1}j)}(t)-x^*. \end{aligned} \end{equation} Essentially, $z^{(il_{1}\cdots l_{t-2}l_{t-1}j)}(t)$ forms the sequence of $z(t)$ by taking different combinations of orthogonal projection $\mathbf{P}_i$ at different agents, $i=1,2,\cdots,n$. We now show that sequences $z(t)$ can be bounded, so that the error sequence is bounded as well. We now present two theorems for bounding $z(t)-x^*$, first for the case when the walk $w^t$ is associated with the product $f\left(w^{t}, \mathbf{P}_i\right)$, $w^{t}\in \mathbb{S}^0(t)$, and second for the $f\left(w^{t}, \mathbf{P}_i\right)$ product where $w^{t}\in S^r(t)$ and $r\geqslant 1$. \begin{theorem}[${f}^{0}$ Bound] \label{th_Sbar} For any $w^t\in \mathbb{S}^{0}(t)$ it follows that $\|f(w^t,\mathbf{P})\|\leqslant 1$ and thus $\|f(w^t,\mathbf{P})\|\leqslant 1$. Therefore the dynamics in \eqref{kacz} satisfy the following inequality \begin{equation} \|z(t)-x^*\| \leqslant \|z(0)-x^*\| \end{equation} \end{theorem} \begin{proof} Given that $\mathbf{P}_i$ is a normalized projection matrix it follows that $\|\mathbf{P}_i\|=1$. \end{proof} \begin{theorem}[${f}$ Bound] \label{th_S} The sequence $z(t)-x^*$ of the part whose $\mathbf{P}_{i} \mathbf{P}_{l_1} \cdots \mathbf{P}_{l_{t-1}} \mathbf{P}_j$ product is an ${f}\left(w^{t},\mathbf{P}\right)$ product where $w^{t}\in \mathbb{S}^{r}(t)$ and $r\geqslant 1$, then all the sequence $\mathbf{P}_{i_1}\mathbf{P}_{i_2}\cdots y_j(0)$ in this part from \eqref{pppy=z-x} can be written as \begin{equation*} z(t)-x^* = f\left(w^{t},\mathbf{P}\right)y_j(0), \end{equation*} where $z(t)-x^*$ consists of several $f(w^{i},\mathbf{P})$, $w^{i}\in \mathbb{S}^{1}(i)$ products. Then all the sequences $z(t)-x^*$ in this part are bounded by \begin{equation*} \begin{aligned} \|z(t)-x^*\| & \leqslant \left( 1-\frac{1}{\left(\sqrt{n}\tau\|\mathbf{A}^{-1}\|\right)^2} \right)^{\frac{nr}{2}} \|z(0)-x^*\| \\ & < \left(1-\kappa(\mathbf{A})^{-2}\right)^{ \frac{nr}{2} } \|z(0)-x^*\|, \end{aligned} \end{equation*} where $\kappa(\mathbf{A})=\|\mathbf{A}\|\cdot\|\mathbf{A}^{-1}\|$ is the usual condition number of $\mathbf{A}$ and we recall the definition $\tau=\underset{i}{\max}\left(\|A_i\|\right)$. \end{theorem} The proof of Theorem \ref{th_S} requires several technical Lemmas. \begin{lemma}[Orthogonal Projection] \label{lm_OP} Let $z(t)\in \mathbb{R}^{n}$, $\|z(0)\|=0$ be a sequence that follows \begin{equation*} z^{(j)}(t+1)=z(t)+\frac{b_j-A_jz(t)}{\|A_j\|^2}A_j^{\mathrm{T}}, \end{equation*} where $A_j$, $b_j$ are defined as those in linear equation \eqref{Ax=b}, which is the same as \eqref{kacz}. Then the orthogonal projection matrix $\mathbf{P}_{i}^{\star}$ onto the solution space of the linear equation \eqref{Ax=b} is given in \cite{strohmer2009randomized} as \begin{equation*} z(t+1)=\mathbf{P}_i^{\star}z(t). \end{equation*} Let $\langle z(t+1),z(t)\rangle$ denotes the inner product of two vectors $z(t+1)$ and $z(t)$, then the above equation can be written as follows by using the updating function \eqref{kacz} \begin{equation*} \begin{aligned} \mathbf{P}_i^{\star}z(t) & =z(t)-\frac{A_iz(t)-b_i}{\|A_i\|^2}A_i^{\mathrm{T}}\\ & =z(t)-\frac{A_iz(t)-A_iz^*}{\|A_i\|}\frac{A_i^{\mathrm{T}}}{\|A_i\|}\\ & =z(t)-\langle z(t)-z^*,Z_i\rangle Z_i^{\mathrm{T}}, \end{aligned} \end{equation*} where $Z_i=\frac{A_i}{\|A_i\|}$, $i=1,2,\cdots, n$, $\|Z_i\|=1$ is a set of normal vectors in the hyperplane $\{z(t): \langle A_i, z(t)\rangle=b_i\}$. \end{lemma} \begin{lemma}[Orthogonality] \label{lm_orthogonality} Consider the linear equation \eqref{Ax=b} and let $x^*$ be the unique solution. The difference of two vectors $z(t+1)$ and $z(t)$ is in the kernel of $\mathbf{P}_i^{\star}$ by Orthogonal Projection Lemma \ref{lm_OP}, which means that it is orthogonal to the solution space. Therefore it is also orthogonal to $z(t+1)-x^*$. In other words, the orthogonality of two vectors $z(t+1)-z(t)$ and $z(t+1)-x^*$ satisfies \begin{equation*} \|z(t+1)-z(t)\|^2+\|z(t+1)-x^*\|^2=\|z(t)-x^*\|^2. \end{equation*} \end{lemma} \begin{lemma}[Inequality] \label{lm_inequal} Let $\mathbf{A}=\mathrm{col}\{A_i\}$, $\mathbf{A}\in \mathbb{R}^{n\times n}$ is full rank. Then the following inequality holds \begin{equation*} \sum_{i=1}^n \|\langle \frac{A_i}{\|A_i\|}, x \rangle\|^2 \geqslant \frac{1}{\left(\tau\|\mathbf{A}^{-1}\|\right)^2}\|x\|^2. \end{equation*} where $\langle A_i, x \rangle$ denotes the inner product of vector $A_i$ and $x$ and we recall the definition $\tau=\underset{i}{\max}\left(\|A_i\|\right)$. \end{lemma} \begin{proof}[Proof of Inequality Lemma \ref{lm_inequal}] Consider the linear equation in \eqref{Ax=b} and using the submultiplicative property of the $\ell^2$-norm the following holds \begin{equation*} \|\mathbf{A}^{-1}\|^2\cdot\|\mathbf{A}x\|^2 \geqslant \|\mathbf{A}^{-1}\mathbf{A}x\|^2,\ \forall\ x\in\mathbb{R}^{n}, \end{equation*} where $\mathbf{A}^{-1}$ is defined because $x^*$ is the unique solution of the linear equation in \eqref{Ax=b}. Considering the matrix partition $\mathbf{A}=\mathrm{col}\{A_i\}$, we have \begin{equation*} \sum_{i=1}^n \|\langle A_i, x \rangle\|^2 = \sum_{i=1}^n \|A_i\|^2\|\langle \frac{A_i}{\|A_i\|}, x \rangle\|^2 \geqslant \frac{\|x\|^2}{\|\mathbf{A}^{-1}\|^2}. \end{equation*} Moreover, \begin{equation*} \begin{aligned} \sum_{i=1}^n \tau^2\|\langle \frac{A_i}{\|A_i\|}, x \rangle\|^2 & \geqslant \sum_{i=1}^n \|A_i\|^2\|\langle \frac{A_i}{\|A_i\|}, x \rangle\|^2 \\ & \geqslant \frac{1}{\|\mathbf{A}^{-1}\|^2}\|x\|^2, \end{aligned} \end{equation*} where $\tau>0$ since $\mathbf{A}$ is full rank. Dividing by $\tau$ we arrive at the following inequality \begin{equation*} \sum_{i=1}^n \|\langle \frac{A_i}{\|A_i\|}, x \rangle\|^2 \geqslant \frac{1}{\left(\tau\|\mathbf{A}^{-1}\|\right)^2}\|x\|^2. \end{equation*} \end{proof} \begin{proof}[Proof of ${f}$ Bound \ref{th_S}] Let $x^*$ denote the unique solution to the linear equation \eqref{Ax=b}. Let $z(t)-x^*$ be vector sequence from the $f(w^{t},\mathbf{P}_i)$, $w^{t}\in \mathbb{S}^{r}(t)$ product part of the error sequence \eqref{pppy=z-x} where $r\geqslant 1$ and substitute the $z(t+1)$ by the updating function \eqref{kacz} in the the Orthogonality Lemma \ref{lm_orthogonality} then we have \begin{equation*} \begin{aligned} & \|z(t+1)-x^*\|^2 \\ = & -\|z(t+1)-z(t)\|^2+\|z(t)-x^*\|^2 \\ = & -\|\frac{\langle A_i,z(t)-x^*\rangle}{\|A_i\|}\frac{A_i^{\mathrm{T}}}{\|A_i\|}\|^2+\|z(t)-x^*\|^2 \\ = & -\|\langle z(t)-x^*,Z_i \rangle\|^2 +\|z(t)-x^*\|^2, \end{aligned} \end{equation*} where $Z_i=\frac{A_i}{\|A_i\|}$. Since the walk $w^{t}\in \mathbb{S}^{r}(t)$, $r\geqslant 1$, the subscript $i$ in $Z_i=\frac{A_i}{\|A_i\|}$ takes all the values $1,2,\cdots,n$ at least once. There exists $\theta_{i(t)}\geqslant 0$ such that \begin{equation*} \|\langle z(t)-x^*,Z_{i} \rangle\|^2 \geqslant \frac{\theta_{i(t)}}{\left(\tau\|\mathbf{A}^{-1}\|\right)^2}\|z(t)-x^*\|^2 \end{equation*} for ${i(t)}=1,2,\cdots,n$, by the Inequality Lemma \ref{lm_inequal}. Note that \begin{equation*} \begin{aligned} \frac{\theta_{i(t)}}{\left(\tau\|\mathbf{A}^{-1}\|\right)^2}\|z(t)-x^*\|^2 & \leqslant \|\langle z(t)-x^*,Z_{i} \rangle\|^2 \\ & \leqslant \|z(t)-x^*\|^2, \end{aligned} \end{equation*} where $\|Z_{i}\|=1$. Therefore $\frac{\theta_{i(t)}}{\left(\tau\|\mathbf{A}^{-1}\|\right)^2}\leqslant 1$ for $i(t)=1,\cdots, n$, and then $\|z(t)-x^*\|^2$ is bounded as \begin{equation*} \begin{aligned} & \|z(t)-x^*\|^2 \\ \leqslant & \left(1-\frac{\theta_{i(1)}}{\left(\tau\|\mathbf{A}^{-1}\|\right)^2}\right) \cdots \left(1-\frac{\theta_{i(t)}}{\left(\tau\|\mathbf{A}^{-1}\|\right)^2}\right) \|z(0)-x^*\|^2, \end{aligned} \end{equation*} where \begin{equation} \label{>=0} 0 \leqslant \left(1-\frac{\theta_{i(t)}}{\left(\tau\|\mathbf{A}^{-1}\|\right)^2}\right) \leqslant 1. \end{equation} Note that the sequence $z(t)-x^*$ forms the $f(w^{t},\mathbf{P})$, $w^{t}\in \mathbb{S}^{r}(t)$, $r\geqslant 1$ product part. Because of the fact $\mathbf{P}_{i}^{r}=\mathbf{P}_{i}$, all ${i(t)}=1,2,\cdots,n$ are present at least once in the each sub-walk of the original walk by definition. Hence the walk $w^{t}$ corresponding to $\left(1-\frac{\theta_{i(1)}}{\left(\tau\|\mathbf{A}^{-1}\|\right)^2}\right) \cdots \left(1-\frac{\theta_{i(t)}}{\left(\tau\|\mathbf{A}^{-1}\|\right)^2}\right)$ is divided into $r$ sub-walks $w^{t_i}\in \mathbb{S}^{1}(t_i)$ and each sub-walk corresponds to an $f\left(w^{t_i}, 1-\frac{\theta}{\left(\tau\|\mathbf{A}^{-1}\|\right)^2}\right)$ product where $\theta=(\theta_i)$ are the values at all the agents indexed by the walk $w^{t_i}$ and all the agents $i=1,2,\cdots,n$ appear in the walk $w^{t_i}$ at least once. Then each sub-part of the product corresponding to the walk $w^{t_i}$ is denoted as \begin{equation*} \Pi_{w^{t_i}}\left(1-\frac{\theta_{i(t)}}{\left(\tau\|\mathbf{A}^{-1}\|\right)^2}\right) = f\left(w^{t_i},1-\frac{\theta}{\left(\tau\|\mathbf{A}^{-1}\|\right)^2}\right) \end{equation*} where the subscript $w^{t_i}$ denotes the consecutive product corresponding to the walk $w^{t_i}$. Furthermore each product corresponding to a $w^{t_i}\in \mathbb{S}^{1}(t_i)$ is bounded as \begin{equation*} f\left(w^{t_i},1-\frac{\theta}{\left(\tau\|\mathbf{A}^{-1}\|\right)^2}\right) \leqslant \Pi_{i=1}^{n} \left(1-\frac{\theta_i}{\left(\tau\|\mathbf{A}^{-1}\|\right)^2}\right) \end{equation*} since we can always pick $n$ agents $i=1,2,\cdots,n$ in the walk $w^{t_i}$ and keep their values unchanged and let all the left $\theta_i=0$. Since $\left(1-\frac{\theta_{i(t)}}{\left(\tau\|\mathbf{A}^{-1}\|\right)^2}\right)\geqslant 0$ \eqref{>=0} and \begin{equation*} {\Pi_{l=1}^{n}\theta_l} \leqslant \left(\frac{1}{n}{\sum_{l=1}^{n}\theta_l}\right)^{n} \end{equation*} holds when $\theta_l\geqslant 0$. Therefore the $\|z(t)-x^*\|^2$ is bounded as \begin{equation*} \begin{aligned} & \|z(t)-x^*\|^2 \\ \leqslant & \Pi_{i=1}^{r} f\left(w^{t_i},1-\frac{\theta}{\left(\tau\|\mathbf{A}^{-1}\|\right)^2}\right) \|z(0)-x^*\|^2 \\ \leqslant & \Pi_{l=1}^{r} \Pi_{i=1}^{n}\left(1-\frac{\theta_{i}}{\left(\tau\|\mathbf{A}^{-1}\|\right)^2}\right) \|z(0)-x^*\|^2 \\ \leqslant & \Pi_{l=1}^{r} \left( \frac{1}{n} \sum_{i=1}^{n}\left( \ 1-\frac{\theta_{i}}{\left(\tau\|\mathbf{A}^{-1}\|\right)^2} \right) \right)^n \|z(0)-x^*\|^2 \\ = & \left( 1-\frac{1}{\left(\sqrt{n}\tau\|\mathbf{A}^{-1}\|\right)^2} \right)^{nr} \|z(0)-x^*\|^2, \end{aligned} \end{equation*} where $\sum_{{i}=1}^{n} \theta_{i} = 1$ by the Inequality Lemma \ref{lm_inequal}. A loose bound given in terms of condition number $\kappa(\mathbf{A})=\|\mathbf{A}\|\cdot\|\mathbf{A}^{-1}\|$ is as follows. Since $\tau=\underset{i}{\mathrm{max}}\left(\|A_i\|\right)$ and $\mathbf{A}$ is full rank, then \begin{equation*} \tau=\underset{i}{\mathrm{max}}\left(\|A_i\|\right) < \|\mathbf{A}\|_F, \end{equation*} where $\|\mathbf{A}\|_F=\sqrt{\sum_{i=1}^{n}\sum_{j=1}^{n}a_{ij}^{2}}$ is the Frobenius norm. The scaled condition number \cite{demmel1988probability} $\kappa_s(\mathbf{A})=\|\mathbf{A}\|_{F}\|\mathbf{A}^{-1}\|$ and the condition number $\kappa(\mathbf{A})$ satisfies the following inequality $1 \leqslant \frac{\kappa_s(\mathbf{A})}{\sqrt{n}} \leqslant \kappa(\mathbf{A})$, then \begin{equation*} \sqrt{n}\tau\|\mathbf{A}^{-1}\| < \sqrt{n}\|\mathbf{A}\|_F\|\mathbf{A}^{-1}\| \leqslant \kappa(\mathbf{A}) \end{equation*} and therefore the loose bound is \begin{equation*} \|z(t)-x^*\|^2 < \left( 1-\kappa(\mathbf{A})^{-2} \right)^{nr} \|z(0)-x^*\|^2. \end{equation*} This concludes the proof of Theorem \ref{th_S}. \end{proof} \begin{remark} The ${f}$ Bound Theorem \ref{th_S} is important since it also bounds the convergence rate of Kaczmarz's algorithm \cite{kaczmarz1937angenaherte}, which was not well solved in literature \cite{gower2015randomized}. It gives a tight bound in terms of matrix inverse $\|\mathbf{A}^{-1}\|$ and a loose bound in terms of condition number $\kappa(\mathbf{A})$. The bounds can be easily computed when the iterative sequence of Kaczmarz's algorithm is given, compared to the known estimate \cite{galantai2005rate}. Furthermore the ${f}$ Bound Theorem \ref{th_S} clearly explains the reason that Kaczmarz's algorithm is slower than a randomized Kaczmarz's algorithm \cite{strohmer2009randomized,dai2014randomized,gower2015randomized}. \end{remark} \begin{remark} With the help of ${f}^{0}$ Bound Theorem \ref{th_Sbar} and ${f}$ Bound Theorem \ref{th_S}, each product in the error updating equation \eqref{y=mu_p_y} can be divided into two parts and bounded separately. One corresponding to the ${f}$ product part where $r\geqslant 1$ and all the $1\leqslant i\leqslant n$ are present and the other one corresponding to the ${f}^{0}$ product part where not all $1\leqslant i\leqslant n$ are present. We can now prove the Bound Theorem \ref{th_bound}. \end{remark} \begin{proof}[Proof of Bound Theorem \ref{th_bound}] Let $f(w^{t},\mu_{ij})$ denote the corresponding $\mu$ product of the error sequence \eqref{pppy} in \eqref{y=mu_p_y}, then according to the ${f}^{0}$ Bound Theorem \ref{th_Sbar} and ${f}$ Theorem \ref{th_S} the error updating equation \eqref{y=mu_p_y} is bounded as follows \begin{equation*} \begin{aligned} & \|y_i(t+1)\| \\ \leqslant & \left( \sum_{j=1}^n \cdots \sum_{l_{1}=1}^n \|\mu_{il_{1}} \cdots \mu_{l_{t-1}l_{j}} \mathbf{P}_i \cdots \mathbf{P}_j y_j(0)\| \right) \\ \leqslant & \sum_{j=1}^{n}\sum_{r=1}^{r_m(t)}\sum_{w_{ij}^{t}\in \mathbb{S}^{r}(t)} f(w_{ij}^{t},\frac{1}{d}) \left(1-\varphi\right)^{\frac{nr}{2}} \|y_j(0)\| \\ & + \sum_{j=1}^{n}\sum_{w_{ij}^{t}\in \mathbb{S}^{0}(t)} f(w_{ij}^{t},\frac{1}{d}) \left(1-\varphi\right)^{\frac{0}{2}} \|y_j(0)\| \\ = & \sum_{j=1}^{n}\sum_{r=0}^{r_m(t)}\sum_{w_{ij}^{t}\in \mathbb{S}^{r}} f(w_{ij}^{t},\frac{1}{d}) \left(1-\varphi\right)^{\frac{nr}{2}} \|y_j(0)\| \end{aligned} \end{equation*} where $\mu_{ij}=\frac{\alpha_{ij}}{d_i}$. Hence the proof is finished. \end{proof} The bound in \eqref{bound_of_th} gives another proof that the distributed algorithm studied in this paper converges to $x^*$ for connected undirected networks, as shown below. \subsection*{Discussion of the Algorithm Convergence} Note that the order of a $w^t$ walk typically increases as the length of walks keeps growing, since $\mathcal{G}$ is a connected network. This implies that for any given order $r$, the total number of $w^{t}\in \mathbb{S}^{r}(t)$ is limited and hence the summation of all corresponding $f(w^{t},\frac{1}{d})$ products is bounded, for the summation of all walks is 1 \eqref{sum_walk}. The number of all walks starting at vertex $v_i$ for any given order $r$ and length $t$ can be estimated by combinatorics. This method is shown when the network topology is a complete graph. For any given network, the number of walks can be bounded similarly, but it can become quite involved. For any walk of length $t$ starting at a fixed vertex $v_0$ in a complete network $\mathcal{G}\in \mathbb{R}^{n\times n}$, the total number of all walks is $n^{t}$. Let $t\gg n$. In order to count the maximum number of $w^{t}\in \mathbb{S}^{0}(t)$ walks, we first choose subsets of vertices $\mathcal{V}_{k}^{0}\subsetneq \mathcal{V}$ by picking $k\leqslant n-2$ vertices out of $n$ and $\mathcal{V}_{n-1}^{0}\subsetneq \mathcal{V}$ by picking $n-1$ vertices except the case $v_0$ is not picked, which results in walks in $\mathbb{S}^{1}(t)$ space rather than $\mathbb{S}^{0}(t)$. There are a total $C_{n}^{k}$ of $\mathcal{V}_{k}^{0}$ sets where $C_{n}^{k}=\frac{n!}{k!\left(n-k\right)!}$, $k\leqslant n-2$ and $C_{n}^{n-1}-1$ of $\mathcal{V}_{n-1}^{0}$ sets. Then we choose a vertex with replacement each time from $\mathcal{V}_{k}^{0}$ and $\mathcal{V}_{n-1}^{0}$ and put it into the sequence of walks to generate all possible walks. The total number $c^{0}(t)$ of $w^{0}(t)$ walks is \begin{equation*} c^{0}(t)=\sum_{k=1}^{n-1}C_{n}^{k}k^{t}-\left(n-1\right)^{t}. \end{equation*} The $w^{t}\in \mathbb{S}^{1}(t)$ walks are regarded as combinations of $w^{n}\in \mathbb{S}^{1}(n)$ walks and $w^{t-n}\in \mathbb{S}^{0}(t-n)$ walks. We first choose $n$ positions out of $t$ in the sequences of walks and make these $n$ positions form $w^{n}\in \mathbb{S}^{1}(n)$ walks. There are $C_{t}^{n}$ ways to choose $n$ vertices to form a $\mathcal{V}_{n}^{1}$ set and the number $w^{1}(n)$ walks is exactly $n!$ for each set, so the number of different sub-sequences in $w^{1}(t)$ walks is $P_{t}^{n}=n!C_{t}^{n}$. The number of walks $w^{0}(t-n)$ is simply $c^{0}(t-n)$. Hence the number of $w^{1}(t)$ walks is bounded by \begin{equation*} c^{1}(t)= P_{t}^{n}c^{0}(t-n). \end{equation*} In general cases where $r\geqslant 2$, we pick $n$ positions out of $1,\cdots,t-(r-1)n$ locations to form the first $w^{n}\in \mathbb{S}^{1}(n)$ walk sequence and pick the left-over locations till $t-(r-2)n$ to form the second $w^{n}\in \mathbb{S}^{1}(n)$ walk and so on. Let $t_1$ denote the start position and $t_2-1$ be the end position picked out by the first $w^{n}\in \mathbb{S}^{1}(n)$ walk sequence, then the total number of sub-sequences is $P_{t_2-t_1}^{n}$. Define $t_3,\cdots t_r$ similarly, then the second $w^{n}\in \mathbb{S}^{1}(n)$ walk sequence can pick from position $t_2$ till $t_3-1$ in the original $w^{t}\in \mathbb{S}^{r}(t)$ sequence. The total number of sub-sequences for the second $w^{n}\in \mathbb{S}^{1}(n)$ walk is $P_{t_3-t_2}^{n}$. The total number of $w^{r}(t)$ walks is bounded by \begin{equation*} c^{r}(t)=\Pi_{i=1}^{r} P_{t_{i+1}-t_i}^{n}c^{0}\left(t-rn\right). \end{equation*} The number of total walks then satisfies \begin{equation*} \lim_{t\to \infty} \frac{c^{r}(t)}{n^{t}} = \lim_{t\to \infty} \frac{t^{rn}c^{0}(t-rn)}{n^{rn}n^{t-rn}}= 0 \end{equation*} since $\frac{c^{0}(t-rn)}{n^{t-rn}}$ reduces exponentially to 0. This means for any given order $r$, the corresponding walks account for only a minor portion of all walks. In other words, the order of all products keeps growing when the length of walks increases. Since the summation of all $f\left(w^{t},\frac{1}{d}\right)$ products is 1 \eqref{sum_walk} and the limit of the portion of walks among all walks of a given order $r_c$ is $0$, therefore the following two limits exist \begin{equation*} \begin{aligned} & \lim_{t\to \infty} \rho_1=\lim_{t\to \infty} \sum_{j=1}^{n}\sum_{r=r_c+1}^{r_m(t)}\sum_{w_{ij}^{t}\in \mathbb{S}^{r}}f(w_{ij}^{t},\mu) = 1,\\ & \lim_{t\to \infty} \rho_2=\lim_{t\to \infty} \sum_{j=1}^{n}\sum_{r=0}^{r_c}\sum_{w_{ij}^{t}\in \mathbb{S}^{r}(t)}f(w_{ij}^{t},\mu) = 0. \end{aligned} \end{equation*} for any finite $r_c$. Furthermore, the limit of the error in \eqref{y_i} satisfies the following \begin{equation*} \begin{aligned} & \lim_{t\to \infty} \|y_i(t+1)\| \\ \leqslant & \lim_{t\to \infty} \rho_1\left(1-\varphi\right)^{\frac{nr}{2}} \|y_j(0)\| +\lim_{t\to \infty} \rho_2 \left(1-\varphi\right)^{\frac{nr}{2}} \|y_j(0)\| \\ = & 0, \end{aligned} \end{equation*} where $\lim_{t\to \infty}r_m(t) = \infty$. Therefore the algorithm converges as all the $\lim_{t\to \infty}x_i(t)\to x^*$ for regular networks. Future work will look to analyze the combinatorics of more general network topologies. \section{CONCLUSIONS} \label{sec_conclu} In this work, we systematically study the impact of network topology on the performance of a network-based distributed algorithm in solving linear algebraic equations. Both theoretical analysis and simulation results show that networks with higher mean degree, smaller diameter, and more homogeneous degree distribution make the algorithm converge faster. Interestingly, $k$-regular random networks with small mean degree could have a comparable performance as degree-heterogeneous networks with very high mean degree. Hence, it is possible to reduce the communication cost (i.e. by designing sparser networks) and simultaneously keep the fast convergence rate. Besides classical consensus problems, we expect that more complicated problems can also be solved with network-based distributed algorithms. Our results presented here provide a method to analyse the topology impacts on a network-based distributed algorithm. It may shed light on the design of better network topologies to improve the performance of general multi-agent distributed algorithms in solving more challenging real-world problems. \end{document}
\begin{document} \title{Definable Closure in Randomizations} \author{Uri Andrews, Isaac Goldbring, and H. Jerome Keisler} \address{University of Wisconsin-Madison, Department of Mathematics, Madison, WI 53706-1388} \email{[email protected]} \urladdr{www.math.wisc.edu/~andrews} \email{[email protected]} \urladdr{www.math.wisc.edu/~keisler} \address {University of Illinois at Chicago, Department of Mathematics, Statistics, and Computer Science, Science and Engineering Offices (M/C 249), 851 S. Morgan St., Chicago, IL 60607-7045, USA} \email{[email protected]} \urladdr{www.math.uic.edu/~isaac} \begin{abstract} The randomization of a complete first order theory $T$ is the complete continuous theory $T^R$ with two sorts, a sort for random elements of models of $T$, and a sort for events in an underlying probability space. We give necessary and sufficient conditions for an element to be definable over a set of parameters in a model of $T^R$. \end{abstract} \subjclass[2010]{Primary 03C40. Secondary 03B48, 03B50, 03C35} \maketitle \section{Introduction} A randomization of a first order structure $\cu M$, as introduced by Keisler [Kei1] and formalized as a metric structure by Ben Yaacov and Keisler [BK], is a continuous structure $\cu N$ with two sorts, a sort for random elements of $\cu M$, and a sort for events in an underlying atomless probability space. Given a complete first order theory $T$, the theory $T^R$ of randomizations of models of $T$ forms a complete theory in continuous logic, which is called the randomization of $T$. In a model $\cu N$ of $T^R$, for each $n$-tuple $\vec{ a}$ of random elements and each first order formula $\varphi(\vec v)$, the set of points in the underlying probability space where $\varphi(\vec{ a})$ is true is an event denoted by $\l\varphi(\vec{ a})\rr$. In a first order structure $\cu M$, an element $b$ is \emph{definable over} a set $A$ of elements of $\cu M$ (called parameters) if there is a tuple $\vec a$ in $A$ and a formula $\varphi(u,\vec a)$ such that $$\cu M\models (\forall u)(\varphi(u,\vec a)\leftrightarrow u=b).$$ In a general metric structure $\cu N$, an element $ b$ is said to be \emph{definable over} a set of parameters $ A$ if there is a sequence of tuples $\vec{ a}_n$ in $ A$ and continuous formulas $\Phi_n(x,\vec{ a}_n)$ whose truth values converge uniformly to the distance from $x$ to $ b$. In this paper we give necessary and sufficient conditions for definability in a model of the randomization theory $T^R$. These conditions can be stated in terms of sequences of first order formulas. The results in this paper will be applied in a forthcoming paper about independence relations in randomizations. In Theorem \ref{t-definableB}, we show that an event $\sa E$ is definable over a set $ A$ of parameters if and only if it is the limit of a sequence of events of the form $\l\varphi_n(\vec{ a}_n)\rr$, where each $\varphi_n$ is a first order formula and each $\vec{ a}_n$ is a tuple from $ A$. In Theorem \ref{t-separable}, we show that a random element $ b$ is definable over a set $ A$ of parameters if and only if $ b$ is the limit of a sequence of random elements $ b_n$ such that for each $n$, $$\l(\forall u)(\varphi_n(u,\vec{ a}_n)\leftrightarrow u= b_n)\rr$$ has probability one for some first order formula $\varphi_n(u,\vec v)$ and a tuple $\vec{ a}_n$ from $ A$. In Section 4 we give some consequences in the special case that the underlying first order theory $T$ is $\aleph_0$-categorical. Continuous model theory in its current form is developed in the papers [BBHU] and [BU]. The papers [Go1], [Go2], [Go3] deal with definability questions in metric structures. Randomizations of models are treated in [AK], [Be], [BK], [EG], [GL], [Ke1], and [Ke2]. \section{Preliminaries} We refer to [BBHU] and [BU] for background in continuous model theory, and follow the notation of [BK]. We assume familiarity with the basic notions about continuous model theory as developed in [BBHU], including the notions of a theory, structure, pre-structure, model of a theory, elementary extension, isomorphism, and $\kappa$-saturated structure. In particular, the universe of a pre-structure is a pseudo-metric space, the universe of a structure is a complete metric space, and every pre-structure has a unique completion. In continuous logic, formulas have truth values in the unit interval $[0,1]$ with $0$ meaning true, the connectives are continuous functions from $[0,1]^n$ into $[0,1]$, and the quantifiers are $\sup$ and $\inf$. A \emph{tuple} is a finite sequence, and $A^{<\mathbb{N}}$ is the set of all tuples of elements of $A$. \subsection{The theory $T^R$} We assume throughout that $L$ is a finite or countable first order signature, and that $T$ is a complete theory for $L$ whose models have at least two elements. The \emph{randomization signature} $L^R$ is the two-sorted continuous signature with sorts $\mathbb{K}$ (for random elements) and $\mathbb{B}$ (for events), an $n$-ary function symbol $\l\varphi(\cdot)\rr$ of sort $\mathbb{K}^n\to\mathbb{B}$ for each first order formula $\varphi$ of $L$ with $n$ free variables, a $[0,1]$-valued unary predicate symbol $\mu$ of sort $\mathbb{B}$ for probability, and the Boolean operations $\top,\bot,\sqcap, \sqcup,\neg$ of sort $\mathbb{B}$. The signature $L^R$ also has distance predicates $d_\mathbb{B}$ of sort $\mathbb{B}$ and $d_\mathbb{K}$ of sort $\mathbb{K}$. In $L^R$, we use ${\sa B},{\sa C},\ldots$ for variables or parameters of sort $\mathbb{B}$. ${\sa B}\doteq{\sa C}$ means $d_\mathbb{B}({\sa B},{\sa C})=0$, and ${\sa B}\sqsubseteq{\sa C}$ means ${\sa B}\doteq{\sa B}\sqcap{\sa C}$. A pre-structure for $T^R$ will be a pair $\cu P=(\cu K,\cu B)$ where $\cu K$ is the part of sort $\mathbb{K}$ and $\cu B$ is the part of sort $\mathbb{B}$. The \emph{reduction} of $\cu P$ is the pre-structure $\cu N=(\widehat{\cu K},\widehat{\cu B})$ obtained from $\cu P$ by identifying elements at distance zero, and the associated mapping from $\cu P$ onto $\cu N$ is called the \emph{reduction map}. The \emph{completion} of $\cu P$ is the structure obtained by completing the metrics in the reduction of $\cu P$. A pre-structure $\cu P$ is called \emph{pre-complete} if the reduction of $\cu P$ is already the completion of $\cu P$. In [BK], the randomization theory $T^R$ is defined by listing a set of axioms. We will not repeat these axioms here, because it is simpler to give the following model-theoretic characterization of $T^R$. \begin{df} \label{d-nice} Given a model $\cu M$ of $T$, a \emph{nice randomization of} $\cu M$ is a pre-complete structure $(\cu K,\cu B)$ for $L^R$ equipped with an atomless probability space $(\Omega,\cu B,\mu )$ such that: \begin{enumerate} \item $\cu B$ is a $\sigma$-algebra with $\top,\bot,\sqcap, \sqcup,\neg$ interpreted by $\Omega,\emptyset,\cap,\cup,\setminus$. \item $\cu K$ is a set of functions $a\colon\Omega\to M$. \item For each formula $\psi(\vec{x})$ of $L$ and tuple $\vec{a}$ in $\cu K$, we have $$\l\psi(\vec{a})\rr=\{\omega\in\Omega:\cu M\models\psi(\vec{a}(\omega))\}\in\cu B.$$ \item $\cu B$ is equal to the set of all events $ \l\psi(\vec{a})\rr$ where $\psi(\vec{v})$ is a formula of $L$ and $\vec{a}$ is a tuple in $\cu K$. \item For each formula $\textnormal{\thh}eta(u, \vec{v})$ of $L$ and tuple $\vec{b}$ in $\cu K$, there exists $a\in\cu K$ such that $$ \l \textnormal{\thh}eta(a,\vec{b})\rr=\l(\exists u\,\textnormal{\thh}eta)(\vec{b})\rr.$$ \item On $\cu K$, the distance predicate $d_\mathbb{K}$ defines the pseudo-metric $$d_\mathbb{K}(a,b)= \mu \l a\neq b\rr .$$ \item On $\cu B$, the distance predicate $d_\mathbb{B}$ defines the pseudo-metric $$d_\mathbb{B}({\sa B},{\sa C})=\mu ( {\sa B}\triangle {\sa C}).$$ \end{enumerate} \end{df} \begin{df} For each first order theory $T$, the \emph{randomization theory} $T^R$ is the set of sentences that are true in all nice randomizations of models of $T$. \end{df} It follows that for each first order sentence $\varphi$, if $T\models\varphi$ then $T^R\models \l\varphi\rr\doteq \top$. The following basic facts are from [BK], Theorem 2.1 and Proposition 2.2, Example 3.4 (ii), Proposition 2.7, and Theorem 2.9. \begin{fact} \label{f-complete} For every complete first order theory $T$, the randomization theory $T^R$ is complete. \end{fact} \begin{fact} \label{f-T^R} Every model $\cu M$ of $T$ has nice randomizations. \end{fact} \begin{fact} \label{f-perfectwitnesses} (Fullness) Every pre-complete model $\cu P=(\cu K,\cu B)$ of $T^R$ has perfect witnesses, i.e., \begin{enumerate} \item For each first order formula $\textnormal{\thh}eta(u,\vec v)$ and each $\vec{b }$ in $\cu K^n$ there exists $a \in\cu K$ such that $$ \l\textnormal{\thh}eta(a,\vec b)\rr \doteq \l(\exists u\,\textnormal{\thh}eta)(\vec{b })\rr;$$ \item For each ${\sa B}\in\cu B$ there exist $a ,b \in\cu K$ such that ${\sa B}\doteq\l a=b \rr$. \end{enumerate} \end{fact} \begin{cor} \label{c-two} Every model $\cu N$ of $T^R$ has a pair of elements $ c, d$ such that $\l c\ne d\rr=\top$. \end{cor} \begin{proof} Every model of $T$ has at least two elements, so $T\models(\exists u)(\exists v)u\ne v$. The result follows by applying Fullness twice. \end{proof} \begin{fact} \label{f-qe} (Strong quantifier elimination) Every formula $\Phi$ in the continuous language $L^R$ is $T^R$-equivalent to a formula with the same free variables and no quantifiers of sort $\mathbb{K}$ or $\mathbb{B}$. \end{fact} \begin{lemma} \label{l-glue} Let $\cu P=(\cu K,\cu B)$ be a pre-complete model of $T^R$ and let $a ,b \in\cu K$ and ${\sa B}\in\cu B$. Then there is an element $c \in\cu K$ that agrees with $a $ on ${\sa B}$ and agrees with $b $ on $\neg{\sa B}$, that is, ${\sa B}\sqsubseteq\l c =a \rr$ and $(\neg{\sa B})\sqsubseteq\l c =b \rr$. \end{lemma} \begin{df} In Lemma \ref{l-glue}, we will call $c$ a \emph{characteristic function of $\sa B$ with respect to $a,b$}. \end{df} Note that the distance between any two characteristic functions of an event $\sa B$ with respect to elements $a,b$ is zero. In particular, in a model of $T^R$, the characteristic function is unique. \begin{proof}[Proof of Lemma \ref{l-glue}] By Fact \ref{f-perfectwitnesses} (2), there exist $d,e\in\cu K$ such that ${\sa B}\doteq\l d=e\rr$. The first order sentence $$(\forall u)(\forall v)(\forall x)(\forall y)(\exists z)[(x=y\rightarrow z=u)\wedge( x\neq y\rightarrow z=v)]$$ is logically valid, so we must have $$\l (\exists z)[(d=e\rightarrow z=a)\wedge (d\ne e\rightarrow z=b)]\rr\doteq \top.$$ By Fact \ref{f-perfectwitnesses} (1) there exists $c\in\cu K$ such that $$\l d=e\rightarrow c=a\rr\doteq\top,\quad \l d\ne e\rightarrow c=b\rr\doteq \top,$$ so $\l d=e\rr\sqsubseteq \l c=a\rr$ and $\l d\ne e\rr\sqsubseteq\l c=b\rr$. \end{proof} We will need the following result, which is a consequence of Theorem 3.11 of [Be]. Since the setting in [Be] is quite different from the present paper, we give a direct proof here. \begin{prop} \label{p-representation} Every model of $T^R$ is isomorphic to the reduction of a nice randomization of a model of $T$. \end{prop} \begin{proof} Let $\cu N=(\widehat{\cu K},\widehat{\cu B})$ be a model of $T^R$ of cardinality $\kappa$. Let $\Omega$ be the Stone space of the Boolean algebra $\widehat{\cu B}=(\widehat{\cu B},\top,\bot,\sqcap, \sqcup,\neg)$. Thus $\Omega$ is a compact topological space, the points of $\Omega$ are ultrafilters, we may identify $\widehat{\cu B}$ with the Boolean algebra of clopen sets of $\Omega$, and $\mu^\cu N$ is a finitely additive probability measure on $\widehat{\cu B}$. We next show that $\mu$ is $\sigma$-additive on $\widehat{\cu B}$. To do this, we assume that ${\sa A}_0\supseteq{\sa A}_1\supseteq\cdots$ in $\widehat{\cu B}$ and ${\sa C} =\bigcap_n{\sa A}_n\in\widehat{\cu B}$, and prove that $\mu(\sa C)=\lim_{n\to\infty}\mu({\sa A}_n)$. Indeed, the family $\{\sa C\cup(\Omega\setminus{\sa A}_n)\colon n\in\mathbb{N}\}$ is an open covering of $\Omega$, so by the topological compactness of $\Omega$, we have $\Omega=\bigcup_{k=0}^n (\sa C\cup(\Omega\setminus{\sa A}_k))$ for some $n\in\mathbb{N}$. Then $\sa C={\sa A}_n$, so $\mu(\sa C)=\mu({\sa A}_n)=\lim_{n\to\infty}\mu({\sa A}_n)$. By the Caratheodory theorem, there is a complete probability space $(\Omega,\cu B,\mu)$ such that $\cu B\supseteq\widehat{\cu B}$, $\mu$ agrees with $\mu^\cu N$ on $\widehat{\cu B}$, and for each ${\sa B}\in\cu B$ and $m>0$ there is a countable sequence ${\sa A}_{m0}\subseteq {\sa A}_{m1}\subseteq\cdots$ in $\widehat{\cu B}$ such that \begin{equation} \label{eq-rep} B\subseteq \bigcup_n {\sa A}_{mn} \mbox{ and } \mu\left(\bigcup_n {\sa A}_{mn}\right)\le\mu({\sa B})+1/m. \end{equation} Note that since the probability space $(\Omega,\cu B,\mu)$ is complete, every subset of $\Omega$ that contains a set in $\cu B$ of measure one also belongs to $\cu B$ and has measure one. We claim that for each $\sa B\in\cu B$ there is a unique event $f(\sa B)\in\widehat{\cu B}$ such that $\mu(f(\sa B)\triangle{\sa B})=0$. The uniqueness of $f(\sa B)$ follows from the fact that the distance function $d_\mathbb{B}(\sa C,\sa D)=\mu(\sa C\triangle\sa D)$ is a metric on $\widehat{\cu B}$. To show the existence of $f(\sa B)$, for each $m>0$ let ${\sa A}_{m0}\subseteq {\sa A}_{m1}\subseteq\cdots$ be as in (\ref{eq-rep}). Note that $({\sa A}_{m0},{\sa A}_{m1},\ldots)$ is a Cauchy sequence of events in the model $\cu N$, so there is an event ${\sa C}_m\in\widehat{\cu B}$ such that ${\sa C}_m=\lim_{n\to\infty} {\sa A}_{mn}$. Hence $\lim_{n\to\infty}\mu({\sa A}_{mn}\triangle {\sa C}_m)=0$, so $\mu((\bigcup_n {\sa A}_{mn})\triangle {\sa C}_m)=0$. Then $({\sa C}_1,{\sa C}_2,\ldots)$ is a Cauchy sequence, so there is an event $f(\sa B)=\lim_{m\to\infty} {\sa C}_m$ in $\widehat{\cu B}$ with $\mu(f(\sa B)\triangle{\sa B})=0$. We make some observations about the mapping $f\colon\cu B\to\widehat{\cu B}$. If $\sa B, \sa C\in\cu B$ and $d_\mathbb{B}(\sa B,\sa C)=0$, then $f(\sa B)=f(\sa C)$. For each $\sa B, \sa C\in\cu B$, we have $$f(\sa B\cup\sa C)=f(\sa B)\cup f(\sa C),\qquad f(\sa B\cap\sa C)=f(\sa B)\cap f(\sa C),$$ $$\Omega\setminus f(\sa B)=f(\Omega\setminus\sa B), \qquad \mu(\sa B)=\mu(f(\sa B)).$$ Moreover, the mapping $f$ sends $\cu B$ onto $\widehat{\cu B}$, because if $\sa C\in\widehat{\cu B}$ then $\sa C\in\cu B$ and $f(\sa C)=\sa C$. Therefore the mapping $\widehat f$ that sends the equivalence class of each $\sa B\in\cu B$ under $d_\mathbb{B}$ to $f(\sa B)$ is well defined and is an isomorphism from the reduction of the pre-structure $(\cu B,\sqcup,\sqcap,\neg.\top,\bot,\mu)$ onto the measured algebra $(\widehat{\cu B},\sqcup,\sqcap,\neg.\top,\bot,\mu)$. A model $\cu M$ of $T$ is \emph{$\kappa^+$-universal} if every model of $T$ of cardinality $\le\kappa$ is elementarily embeddable in $\cu M$. By Theorem 5.1.12 in [CK], every $\kappa$-saturated model of $T$ is $\kappa^+$-universal, so $\kappa^+$-universal models of $T$ exist. We now assume that $\cu M$ is a $\kappa^+$-universal model of $T$, and prove that $\cu N$ is isomorphic to the reduction of a nice randomization of $\cu M$ with the underlying probability space $(\Omega,\cu B,\mu)$. In the following paragraphs, we will use boldface letters $\bo b,\bo d,\ldots$ for elements of $\widehat{\cu K}$. Let $L_{\widehat{\cu K}}$ be the first order signature formed by adding a constant symbol for each element $\bo b\in\widehat{\cu K}$. For each $\omega\in\Omega$, the set of $L_{\widehat{\cu K}}$-sentences $$ U(\omega)=\{\psi(\vec{\bo b})\colon \omega\in\l\psi(\vec{\bo b})\rr\}$$ is consistent with $T$ and has cardinality $\le\kappa$. By the Compactness and L\"{o}wenheim-Skolem theorems, each $U(\omega)$ has a model $(\cu M_\omega,{\bo b}_\omega)_{\bo b\in\widehat{\cu K}}$ of cardinality $\le\kappa$. Since $\cu M$ is $\kappa^+$-universal, for each $\omega\in\Omega$ we may choose an elementary embedding $h_\omega\colon\cu M_\omega\prec\cu M$. Then $(\cu M,h_\omega({\bo b}_\omega))_{\bo b\in\widehat{\cu K}}\models U(\omega)$ for every $\omega\in\Omega$. It follows that for each formula $\psi(\vec v)$ of $L$ and each tuple $\vec {\bo b}\in\widehat{\cu K}^{<\mathbb{N}}$, $$ \l\psi(\vec{\bo b})\rr=\{\omega\in\Omega\colon\cu M_\omega\models\psi(\vec {\bo b}_\omega)\}= \{\omega\in\Omega\colon \cu M\models\psi(h_\omega(\vec {\bo b}_\omega))\}\in\widehat{\cu B}.$$ For each formula $\psi(\vec v)$ of $L$ and tuple $\vec c$ of functions in $M^\Omega$, define $$ \l\psi(\vec c)\rr:=\{\omega\in\Omega\colon \cu M\models\psi(\vec c(\omega))\}.$$ Let $\cu K$ be the set of all functions $a\colon\Omega\to M$ such that for some element $\bo b\in\widehat{\cu K}$, we have $$\mu(\{\omega\in\Omega\colon a(\omega)=h_\omega({\bo b}_\omega)\})=1.$$ We claim that for each $a\in\cu K$ there is a unique element $f(a)\in\widehat{\cu K}$ such that $$\mu(\{\omega\in\Omega\colon a(\omega)=h_\omega(f(a)_\omega)\})=1.$$ The existence of $f(a)$ is guaranteed by the definition of $\cu K$. To prove uniqueness, suppose $\bo b, \bo d\in\widehat{\cu K}$ and $$\mu(\{\omega\in\Omega\colon a(\omega)=h_\omega({\bo b}_\omega)\})=\mu(\{\omega\in\Omega\colon a(\omega)=h_\omega({\bo d}_\omega)\})=1.$$ Then $$\mu(\{\omega\in\Omega\colon h_\omega({\bo b}_\omega)=h_\omega({\bo d}_\omega) \})=1,$$ so $$\mu(\l\bo b=\bo d\rr)=\mu(\{\omega\in\Omega\colon {\bo b}_\omega={\bo d}_\omega\})=1,$$ and hence $d_\mathbb{K}(\bo b,\bo d)=0$. Since $d_\mathbb{K}$ is a metric on $\widehat{\cu K}$, it follows that $\bo b=\bo d$. We now make some observations about the mapping $f\colon\cu K\to\widehat{\cu K}$. This mapping sends $\cu K$ onto $\widehat{\cu K}$, because for each $\bo b\in\widehat{\cu K}$, we have $f(a)=\bo b$ where $a$ is the element of $\cu K$ such that $a(\omega)=h_\omega({\bo b}_\omega)$ for all $\omega\in\Omega$. Suppose $\vec c\in{\cu K}^{<\mathbb{N}}$ and $\vec {\bo d}=f(\vec c)$. We have $\vec {\bo d}\in\widehat{\cu K}^{<\mathbb{N}}$ and $$\l\psi(\vec {\bo d})\rr=\{\omega\in\Omega\colon \cu M\models\psi(h_\omega(\vec {\bo d}_\omega))\}\doteq \{\omega\in\Omega\colon \cu M\models\psi(\vec c(\omega))\}=\l\psi(\vec c)\rr.$$ Since the probability space $(\Omega,\cu B,\mu)$ is complete, $\l\psi(\vec {\bo d})\rr\in\widehat{\cu B}\subseteq\cu B$, and $\l\psi(\vec {\bo d})\rr\doteq\l\psi(\vec c)\rr$, we have $\l\psi(\vec c)\rr\in\cu B$ and $\l\psi(\vec {\bo d})\rr=f(\l\psi(\vec c)\rr)$. Therefore, if $a,c\in\cu K$ and $d_\mathbb{K}(a,c)=0$, then $d_\mathbb{K}(f(a),f(c))=0$, and hence $f(a)=f(c)$. This shows that $\cu P=(\cu K,\cu B)$ is a well-defined pre-complete structure for $L^R$, and that the mapping $\widehat f$ that sends the equivalence class of each ${\sa B}\in\cu B$ to $f(\sa B)$, and the equivalence class of each $a\in\cu K$ to $f(a)$, is an isomorphism from the reduction of $\cu P$ to $\cu N$. It remains to show that $\cu P$ is a nice randomization of $\cu M$. It is clear that $\cu P$ satisfies conditions (1)-(3) in Definition \ref{d-nice}. Proof of (4): We have already shown that $\l\psi(\vec c)\rr\in\cu B$ for each formula $\psi(\vec v)$ of $L$ and each tuple $\vec c$ in $\cu K$. For the other direction, let $\sa B\in\cu B$. By Corollary \ref{c-two}, there exist $a,e\in\cu K$ such that $\l a\ne e\rr\doteq\Omega$. We may choose a function $b\in M^\Omega$ such that $b(\omega)=e(\omega)$ whenever $a(\omega)\ne e(\omega)$, and $b(\omega)\ne a(\omega)$ for all $\omega\in\Omega$. Then $b\in\cu K$ and $\l a\ne b\rr=\Omega$. By Lemma \ref{l-glue}, there exists $c\in\cu K$ which is a characteristic function of $\sa B$ with respect to $a,b$. Then $\l c=a\rr\doteq\sa B$. Let $d\in M^\Omega$ be the function such that $d(\omega)=a(\omega)$ for $\omega\in\sa B$, and $d(\omega)=b(\omega)$ for $\omega\in\neg\sa B$. Then $\mu(\l c=d\rr)=1$, so $d\in\cu K$, and $\l a=d\rr=\sa B$. Thus (4) holds with $\psi$ being the sentence $a=d$. Proof of (5): Consider a formula $\textnormal{\thh}eta(u,\vec v)$ of $L$ and a tuple $\vec b$ in $\cu K$. By Fullness, there exists $c\in\cu K$ such that $$\l\textnormal{\thh}eta(c,\vec b)\rr\doteq\l(\exists u)\textnormal{\thh}eta(u,\vec b)\rr.$$ We may choose a function $a\in M^\Omega$ such that for all $\omega\in\Omega$, $$\cu M\models [\textnormal{\thh}eta(c(\omega),\vec b(\omega))\leftrightarrow (\exists u)\textnormal{\thh}eta(u,\vec b)] \mbox{ implies } a(\omega)=c(\omega),$$ and $$\cu M\models [(\exists u)\textnormal{\thh}eta(u,\vec b(\omega))\rightarrow\textnormal{\thh}eta(a(\omega),\vec b(\omega))].$$ Then $\mu(\l a=c\rr)=1$, so $a\in\cu K$ and $$\l\textnormal{\thh}eta(a,\vec b)\rr=\l(\exists u)\textnormal{\thh}eta(u,\vec b)\rr,$$ as required. Proof of (6) and (7): By Fact \ref{f-T^R}, the properties $$ (\forall x)(\forall y) d_\mathbb{K}(x,y)=\mu(\l x\ne y\rr),\quad (\forall \sa U)(\forall \sa V)d_\mathbb{B}(\sa U,\sa V)=\mu(\sa U\triangle\sa V)$$ hold in some model of $T^R$. By Fact \ref{f-complete}, these properties hold in all models of $T^R$, and thus in $\cu N$. Therefore (6) and (7) hold for $\cu P$. \end{proof} \subsection{Types and Definability} For a first order structure $\cu M$ and a set $A$ of elements of $\cu M$, $\cu M_A$ denotes the structure formed by adding a new constant symbol to $\cu M$ for each $a\in A$. The \emph{type realized by} a tuple $\vec b$ over the parameter set $A$ in $\cu M$ is the set $\tp^\cu M(\vec b/A)$ of formulas $\varphi(\vec u,{\vec a})$ with $\vec a\in A^{<\mathbb{N}}$ satisfied by $\vec b$ in $\cu M_A$. We call $\tp^{\cu M}(\vec b/A)$ an \emph{$n$-type} if $n=|\vec b|$. In the following, let $\cu N$ be a continuous structure and let $ A$ be a set of elements of $\cu N$. $\cu N_{ A}$ denotes the structure formed by adding a new constant symbol to $\cu N$ for each $ a\in A$. The \emph{type} $\tp^\cu N(\vec{ b}/ A)$ \emph{realized} by $\vec { b}$ over the parameter set $ A$ in $\cu N$ is the function $p$ from formulas to $[0,1]$ such that for each formula $\Phi(\vec{x},\vec { a})$ with $\vec{ a}\in { A}^{<\mathbb{N}}$, we have $\Phi(\vec{x},\vec { a})^p=\Phi(\vec { b},\vec { a})^\cu N$. We now recall the notions of definable element and algebraic element from [BBHU]. An element ${ b}$ is \emph{definable over} $ A$ in $\cu N$, in symbols $ b\in\dcl^\cu N( A)$, if there is a sequence of formulas $\langle\Phi_k(x,\vec{ a}_k)\rangle$ with $\vec{ a}_k\in{ A}^{<\mathbb{N}}$ such that the sequence of functions $\langle\Phi_k(x,\vec{ a}_k)^\cu N\rangle$ converges uniformly in $x$ to the distance function $d(x, b)^\cu N$ of the corresponding sort. $ b$ is \emph{algebraic over $ A$} in $\cu N$, in symbols $ b\in\acl^\cu N( A)$, if there is a compact set $C$ and a sequence of formulas $\langle\Phi_k(x,\vec { a}_k)\rangle$ with $\vec{ a}_k\in{ A}^{<\mathbb{N}}$ such that $b\in C$ and the sequence of functions $\langle\Phi_k(x,\vec{ a}_k)^\cu N\rangle$ converges uniformly in $x$ to the distance function $d(x,C)^\cu N$ of the corresponding sort. If the structure $\cu N$ is clear from the context, we will sometimes drop the superscript and write $\tp, \dcl, \acl$ instead of $\tp^\cu N, \dcl^\cu N, \acl^\cu N$. \begin{fact} \label{f-definable} ([BBHU], Exercises 10.7 and 10.10) For each element $ b$ of $\cu N$, the following are equivalent, where $p=\tp^\cu N( b/ A)$: \begin{enumerate} \item $ b$ is definable over $ A$ in $\cu N$; \item in each model $\cu N'\succ\cu N$, $ b$ is the a unique element that realizes $p$ over $ A$; \item $ b$ is definable over some countable subset of $ A$ in $\cu N$. \end{enumerate} \end{fact} \begin{fact} \label{f-algebraic} ([BBHU], Exercise 10.8 and 10.11) For each element $ b$ of $\cu N$, the following are equivalent, where $p=\tp^\cu N( b/ A)$: \begin{enumerate} \item $ b$ is algebraic over $ A$ in $\cu N$; \item in each model $\cu N'\succ\cu N$, the set of elements $ b$ that realize $p$ over $ A$ in $\cu N'$ is compact. \item $ b$ is algebraic over some countable subset of $ A$ in $\cu N$. \end{enumerate} \end{fact} \begin{fact} \label{f-definableclosure} (Definable Closure, Exercises 10.10 and 10.11 in [BBHU]) \begin{enumerate} \item If $ A\subseteq\cu N$ then $\dcl( A)=\dcl(\dcl( A))$ and $\acl( A)=\acl(\acl( A))$. \item If $ A$ is a dense subset of $ B$ and $ B\subseteq\cu N$, then $\dcl(A)=\dcl( B)$ and $\acl(A)=\acl( B)$. \end{enumerate} \end{fact} It follows that for any $ A\subseteq\cu N$, $\dcl( A)$ and $\acl( A)$ are closed with respect to the metric in $\cu N$. We now turn to the case where $\cu N$ is a model of $T^R$. In that case, a set of elements of $\cu N$ may contain elements of both sorts $\mathbb{K}, \mathbb{B}$. But as we will now explain, we need only consider definability over sets of parameters of sort $\mathbb{K}$. \begin{rmk} \label{r-sortK-definability} Let $\cu N=(\widehat{\cu K},\widehat{\cu B})$ be a model of $T^R$. Since every model of $T$ has at least two elements, $\cu N$ has a pair of elements $ a, b$ of sort $\mathbb{K}$ such that $\cu N\models\l a= b\rr=\bot$. For each event ${\sa D}\in\widehat{\cu B}$, let $1_{\sa D}$ be the characteristic function of ${\sa D}$ with respect to $ a, b$. Then in the model $\cu N$, ${\sa D}$ is definable over $\{ a, b,1_{\sa D}\}$, and $1_{\sa D}$ is definable over $\{ a, b,{\sa D}\}$. \end{rmk} \begin{proof} By Fact \ref{f-definable}. \end{proof} In view of Remark \ref{r-sortK-definability} and Fact \ref{f-definableclosure}, if $C$ is a set of parameters in $\cu N$ of both sorts, and there are elements $a,b\in C$ such that $\cu N\models\l a= b\rr=\bot$, then an element of either sort is definable over $C$ if and only if it is definable over the set of parameters of sort $\mathbb{K}$ obtained by replacing each element of $C$ of sort $\mathbb{B}$ by its characteristic function with respect to $ a, b$. For this reason, in a model $\cu N$ of $T^R$ we will only consider definability over sets of parameters of sort $\mathbb{K}$. We write $\dcl_\mathbb{B}( A)$ for the set of elements of sort $\mathbb{B}$ that are definable over $ A$ in $\cu N$, and write $\dcl( A)$ for the set of elements of sort $\mathbb{K}$ that are definable over $ A$ in $\cu N$. Similarly for $\acl_\mathbb{B}( A)$ and $\acl( A)$. \subsection{Conventions and Notation} We will assume hereafter that $\cu N=(\widehat{\cu K},\widehat{\cu B})$ is a model of $T^R$, $\cu P=(\cu K,\cu B)$ is a nice randomization of a model $\cu M\models T$ with probability space $(\Omega,\cu B,\mu)$, and $\cu N$ is the reduction of $\cu P$. The existence of $\cu P$ is guaranteed by Proposition \ref{p-representation}. We will use boldfaced letters $\bo a,\bo b,\ldots$ for elements of $\widehat{\cu K}$. For each element $\bo a\in\widehat{\cu K}$, we will choose once and for all an element $a\in\cu K$ such that the image of $a$ under the reduction map is $\bo a$. It follows that for each first order formula $\varphi(\vec v)$, $\l\varphi(\vec{\bo a})\rr$ is the image of $\l\varphi(\vec a)\rr$ under the reduction map. For any countable set $ A\subseteq\widehat{\cu K}$ and each $\omega\in\Omega$, we define $$ A(\omega)=\{a(\omega)\colon \bo a\in A\}.$$ When $ A\subseteq\widehat{\cu K}$, $\cl( A)$ denotes the closure of $ A$ in the metric $d_\mathbb{K}$. When $ B\subseteq\widehat{\cu B}$, $\cl( B)$ denotes the closure of $ B$ in the metric $d_\mathbb{B}$, and $\sigma( B)$ denotes the smallest $\sigma$-subalgebra of $\widehat{\cu B}$ containing $ B$. \section{Randomizations of Arbitrary Theories} \label{s-arb} \subsection{Definability in Sort $\mathbb{B}$} We characterize the set of elements of $\widehat{\cu B}$ that are definable in $\cu N$ over a set of parameters $ A\subseteq\widehat{\cu K}$. \begin{df} For each $A\subseteq \widehat{\cu K}$, we say that an event ${\sa E}$ is \emph{first order definable} over $A$, in symbols ${\sa E}\in\fo_\mathbb{B}(A)$, if $\sa E=\l\varphi(\vec{\bo a})\rr$ for some first order formula $\varphi(\vec v)$ and tuple $\vec{\bo a}$ in ${A}^{<\mathbb{N}}$. \end{df} \begin{thm} \label{t-definableB} For each $ A\subseteq \widehat{\cu K}$, $\dcl_\mathbb{B}( A)=\cl(\fo_\mathbb{B}( A))=\sigma(\fo_\mathbb{B}( A))$. \end{thm} \begin{proof} By quantifier elimination (Fact \ref{f-qe}), in any elementary extension $\cu N'\succ\cu N$, two events have the same type over $ A$ if and only if they have the same type over $\fo_\mathbb{B}( A)$. Then by Fact \ref{f-definable}, $\dcl_\mathbb{B}( A)=\dcl_\mathbb{B}(\fo_\mathbb{B}( A))$. Moreover, $\dcl_\mathbb{B}(\fo_\mathbb{B}( A))$ is equal to the definable closure of $\fo_\mathbb{B}( A)$ in the pure measured algebra $(\widehat{\cu B},\mu)$. By Observation 16.7 in [BBHU], the definable closure of $\fo_\mathbb{B}( A)$ in $(\widehat{\cu B},\mu)$ is equal to $\sigma(\fo_\mathbb{B}( A))$, so $\dcl_\mathbb{B}( A)=\sigma(\fo_\mathbb{B}( A))$. Since $\fo_\mathbb{B}( A)$ is a Boolean subalgebra of $\widehat{\cu B}$, $\cl(\fo_\mathbb{B}( A))$ is a Boolean subalgebra of $\widehat{\cu B}$. By metric completeness, $\cl(\fo_\mathbb{B}( A))$ is a $\sigma$-algebra and $\sigma(\fo_\mathbb{B}( A))$ is closed, so $\cl(\fo_\mathbb{B}( A))=\sigma(\fo_\mathbb{B}( A))$. \end{proof} \begin{cor} \label{c-event-noparameters} The only events that are definable without parameters in $\cu N$ are $\top$ and $\bot$. \end{cor} \begin{proof} For every first order sentence $\varphi$, either $T\models\varphi$ and $T^R\models\l\varphi\rr=\top$, or $T\models\neg\varphi$ and $T^R\models\l\varphi\rr=\bot$. So $\fo_\mathbb{B}(\emptyset)=\{\top,\bot\}$. \end{proof} \subsection{First Order and Pointwise Definability} To prepare the way for a characterization of the definable elements of sort $\mathbb{K}$, we introduce two auxiliary notions, one that is stronger than definability in sort $\mathbb{K}$ and one that is weaker than definability in sort $\mathbb{K}$. We will work in the nice randomization $\cu P=(\cu K,\cu B)$ of $\cu M$, and let $A$ be a subset of $\widehat{\cu K}$ and $\bo b$ be an element of $\widehat{\cu K}$. \begin{df} A first order formula $\varphi(u,\vec v)$ is \emph{functional} if $$T\models(\forall \vec v)(\exists ^{\le 1} u)\varphi(u,\vec v).$$ We say that $\bo b$ is \emph{first order definable on ${\sa E}$ over $A$} if there is a functional formula $\varphi(u,\vec v)$ and a tuple $\vec{\bo a}\in {A}^{<\mathbb{N}}$ such that $\sa E=\l \varphi(\bo b,\vec{\bo a})\rr$. We say that $\bo b$ is \emph{first order definable over $ A$}, in symbols $\bo b\in\fo( A)$, if $\bo b$ is first order definable on $\top$ over $A$. \end{df} \begin{rmks} \label{r-definableover} $\bo b$ is first order definable over $ A$ if and only if there is a first order formula $\varphi(u,\vec v)$ and a tuple $\vec{\bo a}$ from $ A$ such that $$\mu(\l(\forall u)(\varphi(u,\vec{\bo a})\leftrightarrow u=\bo b)\rr)=1.$$ First order definability has finite character, that is, $\bo b$ is first order definable over $ A$ if and only if $\bo b$ is first order definable over some finite subset of $ A$. If $\bo b$ is first order definable on ${\sa E}$ over $A$, then $\sa E$ is first order definable over $A\cup\{\bo b\}$. If $\bo b$ is first order definable on ${\sa D}$ over $A$, and $\sa E$ is first order definable over $A\cup\{\bo b\}$, then $\bo b$ is first order definable on ${\sa D}\sqcap\sa E$ over $A$. \end{rmks} \begin{lemma} \label{l-firstorderdefinable} If $\bo b$ is first order definable over $ A$ then $\bo b$ is definable over $ A$ in $\cu N$. Thus $\fo( A)\subseteq\dcl( A)$. \end{lemma} \begin{proof} Let $\cu N'\succ\cu N$ and suppose that $\tp^{\cu N'}(\bo b)=\tp^{\cu N'}(\bo d)$. Then $$\l \varphi(\bo b,\vec{\bo a})\rr=\l \varphi(\bo d,\vec{\bo a})\rr=\top.$$ Since $\varphi$ is functional, $$ \l(\forall t)(\forall u) (\varphi(t,\vec{\bo a})\wedge\varphi(u,\vec{\bo a})\rightarrow t=u)\rr=\top.$$ Then $\l \bo b = \bo d\rr=\top$, so $\bo b=\bo d$, and by Fact \ref{f-definable}, $\bo b\in\dcl( A)$. \end{proof} \begin{df} When $A$ is countable, we define $$\l b\in\dcl^{\cu M}(A)\rr:=\{\omega\in\Omega\colon b(\omega)\in \dcl^\cu M(A(\omega))\}.$$ \end{df} \begin{lemma} \label{l-pointwisemeasurable} If $A$ is countable, then $$\l b\in\dcl^{\cu M}(A)\rr= \bigcup\{\l\textnormal{\thh}eta(b,\vec a)\rr\colon\textnormal{\thh}eta(u,\vec v) \mbox{ functional, } \vec{\bo a}\in A^{<\mathbb{N}}\},$$ and $\l b\in\dcl^{\cu M}(A)\rr\in\cu B$. \end{lemma} \begin{proof} Note that for every first order formula $\textnormal{\thh}eta(u,\vec v)$, the formula $$\textnormal{\thh}eta(u,\vec v)\wedge(\exists^{\le 1}u)\,\textnormal{\thh}eta(u,\vec v)$$ is functional. Therefore $\omega\in\l b\in\dcl^{\cu M}(A)\rr$ if and only if $b(\omega)\in \dcl^\cu M(A(\omega))$, and this holds if and only if there is a functional formula $\textnormal{\thh}eta(u,\vec v)$ and a tuple $\vec {\bo a}\in A^{<\mathbb{N}}$ such that $\cu M\models \textnormal{\thh}eta(b(\omega),\vec a(\omega)).$ Since $A$ and $L$ are countable, $\l b\in\dcl^{\cu M}(A)\rr$ is the union of countably many events in $\cu B$, and thus belongs to $\cu B$. \end{proof} \begin{df} When $A$ is countable, we say that $\bo b$ is \emph{pointwise definable over $A$} if $$\mu(\l b\in\dcl^{\cu M}(A)\rr)=1.$$ \end{df} \begin{cor} \label{c-pointwisedefinable} If $A$ is countable, then $\bo b$ is pointwise definable over $A$ if and only if there is a function $f$ on $\Omega$ such that: \begin{enumerate} \item For each $\omega\in \Omega$, $f(\omega)$ is a pair $\<\textnormal{\thh}eta_\omega(u,\vec v),\vec a_\omega\>$ where $\textnormal{\thh}eta_\omega(u,\vec v)$ is functional and $\vec a_\omega\in A^{|\vec v|}$; \item $f$ is $\sigma(\fo_\mathbb{B}(A))$-measurable (i.e., the inverse image of each point belongs to $\sigma(\fo_\mathbb{B}(A))$); \item $\cu M\models \textnormal{\thh}eta_\omega(b(\omega),\vec a_\omega(\omega))$ for almost every $\omega\in\Omega$. \end{enumerate} \end{cor} \begin{proof} If $\omega\in\l b\in\dcl^{\cu M}(A)\rr$, let $f(\omega)$ be the first pair $\<\textnormal{\thh}eta_\omega,\vec a_\omega\>$ such that $\textnormal{\thh}eta_\omega(u,\vec v)$ is functional, $\vec a_\omega\in A^{|\vec v|}$, and $\cu M\models \textnormal{\thh}eta_\omega(b(\omega),\vec a_\omega(\omega))$. Otherwise let $f(\omega)=\<\bot,\emptyset\>$. The result then follows from Lemma \ref{l-pointwisemeasurable}. \end{proof} \begin{lemma} \label{l-pointwisedefinable} If $\bo b$ is definable over $ A$ in $\cu N$, then $\bo b$ is pointwise definable over some countable subset of $A$. \end{lemma} \begin{proof} By Fact \ref{f-definable} (3), we may assume that $A$ is countable. By Lemma \ref{l-pointwisemeasurable}, the measure $r:= \mu(\l b\in\dcl^{\cu M}(A)\rr)$ exists. Suppose $\bo b$ is not pointwise definable over $A$. Then $r<1$. For each finite collection $\chi_1(u,\vec v),\ldots,\chi_n(u,\vec v)$ of first order formulas, each tuple $\vec{\bo a}\in A^{<\mathbb{N}}$, and each $\omega\in\Omega\setminus \l b\in\dcl^{\cu M}(A)\rr$, the sentence $$ (\exists u)[u\ne b(\omega)\wedge\bigwedge_{i=1}^n [\chi_i(b(\omega),\vec a(\omega))\leftrightarrow \chi_i(u,\vec a(\omega))] $$ holds in $\cu M$, because $b(\omega)$ is not definable over $A(\omega)$. Therefore in $\cu P$ we have $$ \mu\l (\exists u)[u\ne b\wedge\bigwedge_{i=1}^n [\chi_i(b,\vec a)\leftrightarrow \chi_i(u,\vec a)]\rr\ge 1-r.$$ By condition \ref{d-nice} (5), there is an element $\bo d\in\widehat{\cu K}$ such that $$ \mu\l d\ne b\wedge\bigwedge_{i=1}^n [\chi_i(b,\vec a)\leftrightarrow \chi_i(d,\vec a)]\rr\ge 1-r.$$ It follows that $ \mu(\l d\ne b\rr)\ge 1-r $, and $\l \chi_i(b,\vec a)\rr\doteq\l \chi_i(d,\vec a)\rr$ for each $i\le n$. By compactness, in some elementary extension of $\cu N$ there is an element $\bo d$ such that $\mu\l\bo d\ne\bo b\rr\ge 1-r$, and $\l\chi(\bo b,\vec{\bo a})\rr=\l\chi(\bo d,\vec{\bo a})\rr$ for each first order formula $\chi(u,\vec v)$. Then $\bo d\ne\bo b$, and by quantifier elimination, $\tp(\bo d/A)=\tp(\bo b/A)$. Hence by Fact \ref{f-definable} (2), $\bo b\notin\dcl( A)$. \end{proof} The following example shows that the converse of Lemma \ref{l-pointwisedefinable} fails badly. \begin{ex} Let $\cu M$ be a finite structure with a constant symbol for every element. Then every element of $\cu K$ is pointwise definable without parameters, but the only elements of $\widehat{\cu K}$ that are definable without parameters are the equivalence classes of constant functions $b\colon\Omega\to\cu M$. \end{ex} \subsection{Definability in Sort $\mathbb{K}$} We will now give necessary and sufficient conditions for an element of $\bo b\in \widehat{\cu K}$ to be definable over a parameter set $ A\subseteq\widehat K$ in $\cu N$. \begin{thm} \label{t-dcl} $\bo{b}$ is definable over $ A$ if and only if there exist pairwise disjoint events $\{\sa E_n\colon n\in\mathbb{N}\}$ such that $\sum_{n\in\mathbb{N}}\mu( \sa E_n)=1$, and for each $n$, ${\sa E}_n$ is definable over $ A$, and $\bo b$ is first order definable on $\sa E_n$ over $A$. \end{thm} \begin{proof} $(\Rightarrow)$: Suppose $\bo{b}\in \dcl(A)$. By Lemma \ref{l-pointwisedefinable}, $\bo b$ is pointwise definable over some countable subset $A_0$ of $A$. The set of all events $\sa C$ such that $\bo b$ is first order definable on $\sa C$ over $A_0$ is countable, and may be arranged in a list $\{\sa C_n\colon n\in\mathbb{N}\}$. Let $\sa E_0=\sa C_0$, and $$\sa E_{n+1}=\sa C_{n+1}\sqcap\neg(\sa C_0\sqcup\cdots\sqcup\sa C_n).$$ The events $\sa E_n$ are pairwise disjoint, and for each $n$ we have $$\sa E_0\sqcup\cdots\sqcup\sa E_n=\sa C_0\sqcup\cdots\sqcup\sa C_n.$$ By Remarks \ref{r-definableover}, for each $n$, $\bo b$ is first order definable on $\sa E_n$ over $A$. By Lemma \ref{l-pointwisemeasurable} and pointwise definability, $$ \sum_{n\in\mathbb{N}}\mu(\sa E_n)=\lim_{n\to\infty}\mu(\sa C_0\sqcup\cdots\sqcup\sa C_n)=\mu(\l\dcl^{\cu M}(A_0)\rr)=1.$$ By Remarks \ref{r-definableover}, ${\sa E}_n$ is definable over $ A\cup\{\bo b\}$, and since $\bo b$ is definable over $ A$, ${\sa E}_n$ is definable over $ A$ by Fact \ref{f-definableclosure}. \ $(\Leftarrow)$: Let $\sa E_n$ be as in the theorem. For each $n$, we have $\sa E_n=\l\textnormal{\thh}eta_n(\bo b,\vec{\bo a}_n)\rr$ for some functional formula $\textnormal{\thh}eta_n$ and tuple $\vec {\bo a}_n\in A^{<\mathbb{N}}$. Since $\sa E_n$ is definable over $A$, by Theorem \ref{t-definableB} there is a sequence of formulas $\psi_k(\vec v)$ and tuples $\vec{\bo a_k}\in A^{<\mathbb{N}}$ such that $$\lim_{k\to\infty}d_\mathbb{B}(\l\psi_k(\vec{\bo a}_k)\rr,\l\textnormal{\thh}eta_n(\bo b,\vec{\bo a})\rr)=0.$$ Suppose $\bo d$ has the same type over $ A$ as $\bo b$ in some elementary extension ${\cu N}'$ of ${\cu N}$. Then $$\lim_{k\to\infty}d_\mathbb{B}(\l\psi_k(\vec{\bo a}_k)\rr,\l\textnormal{\thh}eta_n(\bo d,\vec{\bo a})\rr)=0.$$ Hence $$\l\textnormal{\thh}eta_n(\bo d,\vec{\bo a}_n)\rr = \l\textnormal{\thh}eta_n(\bo b,\vec{\bo a}_n)\rr={\sa E}_n$$ in ${\cu N}'$. Since $\textnormal{\thh}eta_n(u,\vec v)$ is functional, we have $\l\textnormal{\thh}eta_n(\bo b,\vec{\bo a})\rr\sqsubseteq\l\bo d=\bo b\rr$ for each $n$. Then $$\mu(\l \bo d=\bo b\rr)\ge\sum_{n\in\mathbb{N}}\mu({\sa E}_n)=1,$$ so $\bo d =\bo b$. Then by Fact \ref{f-definable}, $\bo b\in\dcl( A)$. \end{proof} \begin{cor} \label{c-definableK-noparameters} An element $\bo b \in\widehat{\cu K}$ is definable without parameters if and only if $\bo b$ is first order definable without parameters. Thus $\dcl(\emptyset)=\fo(\emptyset)$. \end{cor} \begin{proof} $(\Rightarrow)$: Suppose $\bo b\in\dcl(\emptyset)$. By Theorem \ref{t-dcl}, there is an event $\sa E$ such that $\mu(\sa E)>0$, ${\sa E}$ is definable without parameters, and $\bo b$ is first order definable on $\sa E$ without parameters. By Corollary \ref{c-event-noparameters} we have ${\sa E}=\top$, so $\bo b$ is first order definable without parameters. $(\Leftarrow)$: By Lemma \ref{l-firstorderdefinable}. \end{proof} \begin{cor} \label{c-definable-finite} If $\fo_\mathbb{B}( A)$ is finite, then $\dcl_\mathbb{B}( A)=\fo_\mathbb{B}( A)$ and $\dcl( A)=\fo( A)$. \end{cor} \begin{proof} $\dcl_\mathbb{B}( A)=\fo_\mathbb{B}( A)$ follows from Theorem \ref{t-definableB}. Lemma \ref{l-firstorderdefinable} gives $\dcl( A)\supseteq\fo( A)$. For the other inclusion, suppose $\bo b\in\dcl( A)$. By Theorem \ref{t-dcl}, there is a finite partition $\sa E_0,\ldots,\sa E_k$ of $\top$, a tuple $\vec {\bo a}\in A^{<\mathbb{N}}$, and first order formulas $\psi_i(\vec v)$ such that $\sa E_i=\l\psi_i(\vec{\bo a})\rr$ and $\bo b$ is first order definable on $\sa E_i$. Then there are functional formulas $\varphi_i(u,\vec v)$ such that $\sa E_i\doteq\l\varphi_i(\bo b,\vec{\bo a})\rr$. We may take the formulas $\psi_i(\vec v)$ to be pairwise inconsistent and such that $T\models\bigvee_{i=0}^n\psi(\vec v)$. Then $\bigwedge_{i=0}^n (\psi_i(\vec v)\rightarrow\varphi_i(u,\vec v))$ is a functional formula such that $$\l\bigwedge_{i=0}^n (\psi_i(\vec{\bo a})\rightarrow\varphi_i(\bo b,\vec {\bo a}))\rr=\top,$$ so $\bo b$ is first order definable over $ A$. \end{proof} \begin{cor} \label{c-dcl2} $\bo{b}$ is definable over $ A$ if and only if: \begin{enumerate} \item $\bo b$ is pointwise definable over some countable subset of $A$; \item for each functional formula $\varphi(u,\vec v)$ and tuple $\vec {\bo a}\in A^{<\mathbb{N}}$, $\l \varphi (\bo b, \vec{\bo a})\rr$ is definable over $ A$. \end{enumerate} \end{cor} \begin{proof} $(\Rightarrow)$: Suppose $\bo b \in\dcl( A)$. Then (1) holds by Lemma \ref{l-pointwisedefinable}. $\l \varphi (\bo b, \vec{\bo a})\rr$ is obviously definable over $ A\cup\{\bo b\}$, so $\l \varphi (\bo b, \vec{\bo a})\rr$ is definable over $ A$ by Fact \ref{f-definableclosure}, and thus (2) holds. $(\Leftarrow)$: Assume conditions (1) and (2). By (1) and Lemma \ref{l-pointwisemeasurable}, there is a sequence of functional formulas $\textnormal{\thh}eta_n(u,\vec v)$ and tuples $\vec{\bo a}_n\in A^{<\mathbb{N}}$ such that $$\l b\in\dcl^{\cu M}(A)\rr= \bigcup_{n\in\mathbb{N}}\l\textnormal{\thh}eta_n(b,\vec a_n)\rr\doteq\Omega.$$ Let $\sa E_n=\l\textnormal{\thh}eta_n(\bo b,\vec{\bo a}_n)\rr$, so $\bo b$ is first order definable on $\sa E_n$ over $A$. By Remark \ref{r-definableover}, we may take the $\sa E_n$ to be pairwise disjoint, and thus $\sum_{n\in\mathbb{N}}\mu(\sa E_n)=1$. By (2), ${\sa E}_n$ is definable over $ A$ for each $n$. Then by Theorem \ref{t-dcl}, $\bo b \in\dcl( A)$. \end{proof} \begin{cor} \label{c-dcl3} $\bo{b}$ is definable over $ A$ if and only if: \begin{enumerate} \item $b$ is pointwise definable over some countable subset of $A$; \item $\fo_\mathbb{B}( A\cup\{\bo b\})\subseteq\dcl_\mathbb{B}( A)$. \end{enumerate} \end{cor} \begin{thm} \label{t-separable} $\bo b$ is definable over $ A$ if and only if $\bo b=\lim_{m\to\infty} \bo b_m$, where each $\bo b_m$ is first-order definable over $ A$. Thus $\dcl( A)=\cl(\fo( A))$. \end{thm} \begin{proof} $(\Rightarrow)$: Suppose that $\bo b\in \dcl( A)$. If $A$ is empty, then $\bo b$ is already first order definable from $ A$ by Corollary \ref{c-definableK-noparameters}. Assume $A$ is not empty and let $\bo c\in A$. Let $\{\sa E_n\colon n\in\mathbb{N}\}$ be as in Theorem \ref{t-dcl}, and fix an $\varepsilon>0$. Then for some $n$, $\sum_{k=0}^n\mu( \sa E_k)>1-\varepsilon$. For each $k$, ${\sa E}_k$ is definable over $ A$, so by Theorem \ref{t-definableB}, there is an event $\sa D_k\in\fo_\mathbb{B}(A)$ such that $\mu(\sa D_k\triangle\sa E_k)<\varepsilon/n$. Since the events $\sa E_k$ are pairwise disjoint, we may also take the events $\sa D_k$ to be pairwise disjoint. We have $\sa E_k=\l\textnormal{\thh}eta_k(\bo b,\vec{\bo a}_k)\rr$ for some functional $\textnormal{\thh}eta_k(u,\vec v)$, so we may assume that $\sa D_k$ has the additional properties that $\sa D_k\sqsubseteq\l(\exists ! u)\textnormal{\thh}eta_k(u,\vec{\bo a}_k)\rr$, and that $\sa D_k=\l\psi_k(\vec{\bo a}_k)\rr$ for some formula $\psi_k(\vec v)$. Then there is a unique element $\bo d\in\widehat{\cu K}$ such that $$\begin{cases} \cu M\models\textnormal{\thh}eta_k(d(\omega),\vec a_k(\omega)) & \mbox{ if } k \le n \mbox{ and } \omega\in\l\psi_k(\vec{a}_k)\rr,\\ d(\omega) = c(\omega) & \mbox{ if } \omega\in\Omega\setminus\bigcup_{k=0}^n \l\psi_k(\vec{a}_k)\rr. \end{cases} $$ Then $\bo d$ is first order definable over $ A$, and $d_\mathbb{K}(\bo b,\bo d)<\varepsilon$. $(\Leftarrow)$: This follows because first order definability implies definability (Lemma \ref{l-firstorderdefinable}) and the set $\dcl( A)$ is metrically closed (Fact \ref{f-definableclosure} (2)). \end{proof} The following result was proved in [Be] by an indirect argument using Lascar types. We give a simple direct proof here. \begin{prop} For any model $\cu N=(\widehat{\cu K},\widehat{\cu B})$ of $T^R$ and set $ A\subseteq\widehat{\cu K}$, $\acl_\mathbb{B}( A)=\dcl_\mathbb{B}( A)$ and $\acl( A)=\dcl( A)$. \end{prop} \begin{proof} By Facts \ref{f-definable} and \ref{f-algebraic}, we may assume $\cu N$ is $\aleph_1$-saturated and $ A$ is countable. Suppose an event ${\sa E}\in\widehat{\cu B}$ is not definable over $ A$. By Fact \ref{f-definable} and $\aleph_1$-saturation there exists $\sa D\in\widehat{\cu B}$ such that $\tp(\sa D/ A)=\tp(\sa E/ A)$ but $d_\mathbb{B}(\sa D,\sa E)>0$. By $\aleph_1$-saturation again, there is a countable sequence of events $\<{\sa F}_n\colon n\in\mathbb{N}\>$ in $\widehat{\cu B}$ such that $$\mu(\sa C\cap{\sa F}_n)=\mu(\sa C\setminus{\sa F}_n)=\mu(\sa C)/2$$ for each $n$ and each event $\sa C$ in the Boolean algebra generated by $$\fo_\mathbb{B}(A)\cup\{\sa D,\sa E\}\cup\{{\sa F}_k\colon k<n\}.$$ For each $n$, let $${\sa D}_n=(\sa D\cap{\sa F}_n)\cup(\sa E\setminus{\sa F}_n).$$ Then for each $\sa C\in\fo_\mathbb{B}(A)$ and $n\in\mathbb{N}$, we have $$\mu({\sa D}_n\cap\sa C)=\mu(\sa D\cap \sa C)/2 + \mu(\sa E\cap \sa C)/2 =\mu(\sa E\cap \sa C).$$ By quantifier elimination, $\tp(\sa D_n/ A)=\tp(\sa E/ A)$ for each $n\in\mathbb{N}$. Moreover, whenever $k< n$ we have $${\sa D}_n\setminus{\sa D}_k=((\sa D\setminus{\sa D}_k)\cap{\sa F}_n)\cup((\sa E\setminus{\sa D}_k)\setminus{\sa F}_n),$$ so $$\mu({\sa D}_n\setminus{\sa D}_k)=\mu(\sa D\setminus{\sa D}_k)/2+\mu(\sa E\setminus{\sa D}_k)/2.$$ Note that whenever $\tp(\sa D'/A)=\tp(\sa D''/A)$, we have $\mu(\sa D')=\mu(\sa D'')$, and hence $$\mu(\sa D'\setminus\sa D'')=\mu(\sa D''\setminus\sa D')=d_\mathbb{B}(\sa D',\sa D'')/2.$$ Therefore $$d_\mathbb{B}({\sa D}_n,{\sa D}_k)=d_\mathbb{B}(\sa D,{\sa D}_k)/2 + d_\mathbb{B}(\sa E,{\sa D}_k)/2\ge d_\mathbb{B}(\sa D,\sa E)/2.$$ It follows that the set of realizations of $\tp(\sa E/A)$ is not compact, and $\sa E$ is not algebraic over $ A$. This shows that $\acl_\mathbb{B}( A)=\dcl_\mathbb{B}( A)$. Now suppose $\bo b\in \acl( A)\setminus \dcl( A)$. There is an element $\bo c\in\widehat{\cu K}$ such that $\tp(\bo b/ A)=\tp(\bo c/ A)$ but $d_\mathbb{K}(\bo b,\bo c)>0$. For each first order formula $\psi(u,\vec v)$ and $\vec{\bo a}\in A^{<\mathbb{N}}$, $\l\psi(\bo b,\vec{\bo a})\rr\in \acl_\mathbb{B}(\{\bo b\}\cup A)\subseteq\acl_\mathbb{B}(\acl(A))$. By Fact \ref{f-definableclosure}, $\l\psi(\bo b,\vec{\bo a})\rr\in \acl_\mathbb{B}(A)$. By the preceding paragraph, $\l\psi(\bo b,\vec{\bo a})\rr\in \dcl_\mathbb{B}( A)$. Since $\tp(\bo b/ A)=\tp(\bo c/ A)$, we have $\tp(\l\psi(\bo b,\vec{\bo a})\rr/A)=\tp(\l\psi(\bo c,\vec{\bo a})\rr/A)$. By Fact \ref{f-definable}, it follows that $\l\psi(\bo b,\vec{\bo a})\rr=\l\psi(\bo c,\vec{\bo a})\rr$ for every first order formula $\psi(u,\vec v)$. Then $\tp(b(\omega)/A(\omega))=\tp(c(\omega)/A(\omega))$ for $\mu$-almost all $\omega$. By $\aleph_1$-saturation, there are countably many independent events $\sa D_n\in\widehat{\cu B}$ such that $\sa D_n\sqsubseteq\l\bo b \ne\bo c\rr$ and $\mu(\sa D_n)=d_\mathbb{K}(\bo b,\bo c)/2$. Let $\bo c_n$ agree with $\bo c$ on $\sa D_n$ and agree with $\bo b$ elsewhere. We have $\tp(\bo c_n/A)=\tp(\bo b/A)$ for every $n\in\mathbb{N}$, and $d_\mathbb{K}(\bo c_n,\bo c_k)=d_\mathbb{K}(\bo b,\bo c)/2$ whenever $k< n$. Thus the set of realizations of $\tp(\bo b/A)$ is not compact, contradicting the fact that $\bo b\in \acl( A)$. \end{proof} \section{A Special Case: $\aleph_0$-categorical theories} \subsection{Definability and $\aleph_0$-Categoricity} We use our preceding results to characterize $\aleph_0$-categorical theories in terms of definability in randomizations. \begin{thm} \label{t-categorical} The following are equivalent: \begin{enumerate} \item $T$ is $\aleph_0$-categorical; \item $\fo_\mathbb{B}( A)$ is finite for every finite $ A$; \item $\dcl_\mathbb{B}( A)$ is finite for every finite $ A$; \item $\fo_\mathbb{B}(A)=\dcl_\mathbb{B}(A)$ for every finite $A$; \item $\fo( A)$ is finite for every finite $ A$; \item $\dcl( A)$ is finite for every finite $ A$. \item $\fo(A)=\dcl(A)$ for every finite $A$; \end{enumerate} \end{thm} \begin{proof} By the Ryll-Nardzewski Theorem (see [CK], Theorem 2.3.13), (1) is equivalent to (0) For each $n$ there are only finitely many formulas in $n$ variables up to $T$-equivalence. Assume (0) and let $A\subseteq\widehat{\cu K}$ be finite. Then (2) holds. Moreover, there are only finitely many functional formulas in $|A|+1$ variables, so (5) holds. Then by Corollary \ref{c-definable-finite}, (3), (4), (6), and (7) hold. Now assume that (0) fails. \emph{Proof that (2) and (3) fail}: For some $n$ there are infinitely many formulas in $n$ variables that are not $T$-equivalent. Hence there is an $n$-type $p$ in $T$ without parameters that is not isolated. So there are formulas $\varphi_1(\vec v), \varphi_2(\vec v),\ldots$ in $p$ such that for each $k>0$, $T\models \varphi_{k+1}\rightarrow\varphi_k$ but the formula $\textnormal{\thh}eta_k=\varphi_k\wedge\neg\varphi_{k+1}$ is consistent with $T$. The formulas $\textnormal{\thh}eta_k$ are consistent but pairwise inconsistent. By Fullness, for each $k>0$ there exists an $n$-tuple $\vec {\bo b}_k\in\widehat{\cu K}^n$ such that $\l\textnormal{\thh}eta_k(\vec{\bo b}_k)\rr=\top$. Since the measured algebra $(\widehat{\cu B},\mu)$ is atomless, there are pairwise disjoint events $\sa E_1,\sa E_2,\ldots$ in $\widehat{\cu B}$ such that $\mu(\sa E_k)=2^{-k}$ for each $k>0$. By applying Lemma \ref{l-glue} $k$ times, we see that for each $k>0$ there is an $n$-tuple $\vec{\bo a}_k\in\widehat{\cu K}^n$ that agrees with $\vec{\bo b}_i$ on $\sa E_i$ whenever $0<i\le k$. Whenever $0<k\le j$, we have $\mu(\l \vec {\bo a}_k=\vec{\bo a}_j\rr)\ge 1-2^{-k}$. So $\<\vec {\bo a}_1,\vec{\bo a}_2,\ldots\>$ is a Cauchy sequence, and by metric completeness the limit $\vec {\bo a}=\lim_{k\to\infty}\vec{\bo a}_k$ exists in $\widehat{\cu K}^n$. Let $A=\range(\vec{\bo a})$. For each $k>0$ we have $\sa E_k=\l\vec{\bo a}=\vec{\bo b}_k\rr=\l\textnormal{\thh}eta_k(\vec{\bo a})\rr$, so $\sa E_k\in\fo_\mathbb{B}(A)$. Thus $\fo_\mathbb{B}(A)$ is infinite, so (2) fails and (3) fails. \emph{Proof that (4) fails}: Let $\sa E_k$ be as in the preceding paragraph. The set $\fo_\mathbb{B}(A)$ is countable. But the closure $\cl(\fo_\mathbb{B}(A))$ is uncountable, because for each set $S\subseteq\mathbb{N}\setminus\{0\},$ the supremum $\bigsqcup_{k\in S}\sa E_k$ belongs to $\cl(\fo_\mathbb{B}(A))$. Thus by Theorem \ref{t-definableB}, $$\dcl_\mathbb{B}(A)=\cl(\fo_\mathbb{B}(A))\ne\fo_\mathbb{B}(A),$$ and (4) fails. \emph{Proof that (5), (6), and (7) fail}: By Corollary \ref{c-two}, there exist $\bo c, \bo d\in\cu K$ such that $\l \bo c\ne\bo d\rr=\top$. Let $C$ be the finite set $C=A\cup\{\bo c,\bo d\}$. By Remark \ref{r-sortK-definability}, for any event $\sa D\in\fo_\mathbb{B}( A)$, the characteristic function $1_{\sa D}$ of $\sa D$ with respect to $\bo c,\bo d$ is definable over $C$. Moreover, we always have $d_\mathbb{K}(1_{\sa D},1_{\sa E})=d_\mathbb{B}(\sa D,\sa E)$. It follows that $\fo(C)$ is infinite, so (5) and (6) fail. To show that (7) fails, we take an event $\sa D\in \dcl_\mathbb{B}(A)\setminus\fo_\mathbb{B}(A)$. By Theorem \ref{t-definableB} we have $\sa D\in\cl(\fo_\mathbb{B}(A))$. It follows that $1_{\sa D}\in\cl(\fo(C))$, so by Theorem \ref{t-separable}, $1_{\sa D}\in\dcl(C)$. Hence $\dcl(C)$ is uncountable. But $\fo(C)$ is countable, so (7) fails. \end{proof} By the Ryll-Nardzewski Theorem, if $T$ is $\aleph_0$-categorical then for each $n$, $T$ has finitely many $n$-types; so each type $p$ in the variables $(u,\vec v)$ has an \emph{isolating formula}, that is, a formula $\varphi(u,\vec v)$ such that $T\models \varphi(u,\vec v)\leftrightarrow \bigwedge p$. We now characterize the definable closure of a finite set $ A\subseteq \widehat{\cu K}$ in the case that $T$ is $\aleph_0$-categorical. Hereafter, when $A$ is a finite subset of $\widehat{\cu K}$, $\vec{\bo a}$ will denote a finite tuple whose range is $A$. \begin{cor} \label{c-cat-definable1} Suppose that $T$ is $\aleph_0$-categorical, $\bo b\in\widehat{\cu K}$, and $A$ is a finite subset of $\widehat{\cu K}$. Then $\bo{b}\in \dcl( A)$ if and only if: \begin{enumerate} \item $\bo b$ is pointwise definable over $A$; \item for every isolating formula $\varphi(u,\vec v)$, if $\mu(\l\varphi(\bo b,\vec{\bo a})\rr)>0$ then $$\l\varphi(\bo b,\vec{\bo a})\rr=\l(\exists u)\varphi(u,\vec{\bo a})\rr.$$ \end{enumerate} \end{cor} \begin{proof} $(\Rightarrow)$: Suppose $\bo b\in \dcl({ A})$. (1) holds by Lemma \ref{l-pointwisedefinable}. Suppose $\varphi(u,\vec v)$ is isolating and $\mu(\l\varphi(\bo b,\vec{\bo a})\rr)>0$. We have $\l\varphi(\bo b,\vec{\bo a})\rr\in\fo_\mathbb{B}(\{\bo b\}\cup{ A})$, so by Corollary \ref{c-dcl3}, $\l\varphi(\bo b,\vec{\bo a})\rr\in\dcl_\mathbb{B}(A)$. By Theorem \ref{t-categorical}, $\l\varphi(\bo b,\vec{\bo a})\rr\in\fo_\mathbb{B}(A)$. We note that $(\exists u)\varphi(u,\vec v)$ is an isolating formula, so $\l(\exists u)\varphi(u,\vec{\bo a})\rr$ is an atom of $\fo_\mathbb{B}(A)$. Therefore (2) holds. $(\Leftarrow)$: Assume (1) and (2). By (2), for every isolating formula $\varphi(u,\vec v)$ such that $\mu(\l\varphi(\bo b,\vec{\bo a})\rr)>0$, we have $$\l\varphi(\bo b,\vec{\bo a})\rr\in\fo_\mathbb{B}(A).$$ Every formula $\textnormal{\thh}eta(u,\vec v)$ is $T$-equivalent to a finite disjunction of isolating formulas in the variables $(u,\vec v)$. It follows that $\fo_\mathbb{B}(A\cup\{\bo b\})\subseteq\fo_\mathbb{B}(A)$. Therefore by Corollary \ref{c-dcl3}, $\bo b\in\dcl(A)$. \end{proof} \begin{cor} \label{c-cat-definable2} Suppose that $T$ is $\aleph_0$-categorical, $\bo b\in\widehat{\cu K}$, and $A$ is a finite subset of $\widehat{\cu K}$. Then $\bo{b}\in \dcl({ A})$ if and only if for every isolating formula $\psi(\vec v)$ there is a functional formula $\varphi(u,\vec v)$ such that $\l\psi(\vec{\bo a})\rr\sqsubseteq\l\varphi(\bo b,\vec {\bo a})\rr.$ \end{cor} \begin{proof} $(\Rightarrow)$: Suppose $\bo b\in \dcl({ A})$. By Theorem \ref{t-categorical}, $\bo b$ is first order definable over $\vec{\bo a}$, so there is a functional formula $\varphi(u,\vec v)$ such that $\l\varphi(\bo b,\vec{\bo a})\rr=\top$. Then for every isolating $\psi(\vec v)$ we have $\l\psi(\vec{\bo a})\rr\sqsubseteq\l\varphi(\bo b,\vec{\bo a})\rr.$ $(\Leftarrow)$: There is a finite set $\{\psi_0(\vec v),\ldots,\psi_k(\vec v)\}$ that contains exactly one isolating formula for each $|\vec{\bo a}|$-type of $T$. By hypothesis, for each $i\le k$ there is a functional formula $\varphi_i(u,\vec v)$ such that $\l\psi_i(\vec{\bo a})\rr\sqsubseteq\l\varphi_i(\bo b,\vec{\bo a})\rr.$ Since the formulas $\psi_i(\vec v)$ are pairwise inconsistent, the formula $\bigvee_{i=0}^k (\psi_i(\vec v)\wedge\varphi_i(u,\vec v))$ is functional, and $$\l \bigvee_{i=0}^k (\psi_i(\vec{\bo a})\wedge\varphi_i(\bo b,\vec{\bo a}))\rr=\top.$$ Hence $\bo b$ is first order definable over $\vec{\bo a}$, so by Lemma \ref{l-firstorderdefinable} we have $\bo b\in \dcl({ A})$. \end{proof} \subsection{The Theory $\DLO^R$} We will use Corollary \ref{c-cat-definable2} to give a more natural characterization of the definable closure of a finite parameter set in a model of $\DLO^R$, where $\DLO$ is the theory of dense linear order without endpoints. Note that in $\DLO$, every type in $(v_1,\ldots,v_n)$ has an isolating formula of the form $\bigwedge_{i=1}^{n-1} u_i\alpha_i u_{i+1}$ where $\{u_1,\ldots u_n\}=\{v_1,\ldots,v_n\}$ and each $\alpha_i\in\{<,=\}$. (This formula linearly orders the equality-equivalence classes). \begin{cor} \label{c-DLO-definable} Let $T=\DLO$, $\bo b\in\widehat{\cu K}$, and $A$ be a finite subset of $\widehat{\cu K}$. Then $\bo{b}\in \dcl({ A})$ if and only if for every isolating formula $\psi(v_1,\ldots,v_n)$ there is an $i\in\{1,\ldots,n\}$ such that $\l\psi(\vec {\bo a})\rr\sqsubseteq\l \bo b= \bo a_i\rr.$ \end{cor} \begin{proof} For any $\cu M\models\DLO$ and parameter set $A$, we have $\dcl^{\cu M}(A)=A$. Therefore for every isolating formula $\psi(v_1,\ldots,v_n)$ and functional formula $\varphi(u,v_1,\ldots,v_n)$ there exists $i\in\{1,\ldots,n\}$ such that $$\DLO\models(\psi(v_1,\ldots,v_n)\wedge\varphi(u,v_1,\ldots,v_n))\rightarrow u=v_i.$$ The result now follows from Corollary \ref{c-cat-definable2}. \end{proof} In the theory $\DLO$, we define $\min(u,v)$ and $\max(u,v)$ in the usual way. For $\bo a,\bo b\in\widehat{\cu K}$, we let $\min(\bo a,\bo b)$ be the unique element $\bo e\in\widehat{\cu K}$ such that $$\l e=\min(a,b)\rr=\top,$$ and similarly for $\max$. For finite subsets $A$ of $\widehat{\cu K}$, $\min(A)$ and $\max(A)$ are defined by repeating the two-variable functions $\min$ and $\max$ in the natural way. We next show that in $\DLO^R$, the definable closure of a finite set can be characterized as the closure under a ``choosing function'' of four variables. \begin{df} In the theory $\DLO$, let $\ell$ be the function of four variables defined by the condition $$ \ell(u,v,x,y)=x \mbox{ if } u < v, \mbox{ and } \ell(u,v,x,y) = y \mbox{ if not } u < v.$$ For $\bo a,\bo b,\bo c,\bo d\in\cu K$, let $\ell(\bo a,\bo b,\bo c,\bo d)$ be the unique element $\bo e\in\widehat{\cu K}$ such that $\l e=\ell(a,b,c,d)\rr=\top$. Given a set $ A\subseteq\widehat{\cu K}$, let $\lcl( A)$ be the closure of $ A$ under the function $\ell$. \end{df} Note that in $\DLO$, the function $\ell$ is definable without parameters. In both $\DLO$ and $\DLO^R$, $\min(u,v)=\ell(u,v,u,v)$, and $\max(u,v)=\ell(u,v,v,u)$. \begin{prop} \label{p-DLO} Let $T=\DLO$. Then for every finite subset $A$ of $\widehat{\cu K}$, $\dcl( A)=\lcl( A)$. \end{prop} \begin{proof} It is clear that $\lcl( A)\subseteq\dcl( A)$. We prove the other inclusion. If $A$ is empty, the result is trivial, so we assume $A$ is non-empty. Let $\bo 0=\min(A), \bo 1=\max(A)$. We have $\bo 0, \bo 1\in\lcl( A)$. Let $\Omega_0=\l 0<1\rr$. Note that $\Omega\setminus\Omega_0=\l 0=1\rr$. If $\mu(\Omega_0)=0$, then $ A$ is a singleton, and we trivially have $\lcl( A)=\dcl( A)= A$. We may therefore assume that $\mu(\Omega_0)>0$. To simplify notation we will instead assume that $\Omega_0=\Omega$; the argument in the general case is similar. In the following, all characteristic functions are understood to be with respect to $\bo 0, \bo 1$. Note that $\ell(\bo a,\bo b,\bo 0,\bo 1)$ is the characteristic function of the event $\l \bo a <\bo b\rr$. If $\bo d$ is the characteristic function of an event $\sa D$ and $\bo e$ is the characteristic function of an event $\sa E$, then $\ell(\bo d,\bo 1,\bo 1,\bo 0)$ is the characteristic function of $\neg\sa D$, $\min(\bo d,\bo e)$ is the characteristic function of $\sa D\sqcap\sa E$, and $\max(\bo d,\bo e)$ is the characteristic function of $\sa D\sqcup\sa E$. It follows that for every quantifier-free first order formula $\varphi(\vec v)$ of $\DLO$ with $|\vec v|=|\vec{\bo a}|$, the characteristic function of the event $\l\varphi(\vec{\bo a})\rr$ belongs to $\lcl(A)$. Since $\DLO$ admits quantifier elimination, the characteristic function of every event that is first order definable over $A$ belongs to $\lcl( A)$. Hence by Theorem \ref{t-categorical}, the characteristic function of every event in $\dcl_\mathbb{B}(A)$ belongs to $\lcl(A)$. Moreover, for every $\bo c\in A$ and event $\sa D\in\dcl_\mathbb{B}(A)$ with characteristic function $\bo d$, $\bo c\upharpoonright{\sa D}:=\ell(\bo d,\bo 1,\bo 0,\bo c)$ is the element that agrees with $\bo c$ on $\sa D$ and agrees with $\bo 0$ on the complement of $\sa D$, so $\bo c\upharpoonright{\sa D}$ belongs to $\lcl( A)$. Let $\{\sa D_1,\ldots,\sa D_n\}$ be the set of atoms of $\dcl_\mathbb{B}( A)$ (which is finite because $\DLO$ is $\aleph_0$-categorical). By Corollary \ref{c-DLO-definable}, every element of $\dcl( A)$ has the form $$ \max(\bo c_1\upharpoonright\sa D_1,\ldots,\bo c_n\upharpoonright\sa D_n)$$ for some $\bo c_1,\ldots,\bo c_n\in A$. Therefore $\dcl( A)\subseteq\lcl( A)$. \end{proof} \begin{ex} In this example we show that the exchange property fails for $\DLO^R$, even though it holds for $\DLO$. Thus the exchange property is not preserved under randomizations. Let $T=\DLO$. By Fullness, there exist elements $\bo a, \bo b\in\widehat{\cu K}$ such that $\max(\bo a,\bo b)\notin\{\bo a,\bo b\}$. Let $\bo c=\max(\bo a,\bo b), \bo d=\min(\bo a,\bo b)$. It is easy to check that $$\dcl(\{\bo a,\bo b\})=\{\bo a,\bo b,\bo c,\bo d\}, \quad \dcl(\{\bo a,\bo c\})=\{\bo a,\bo c\},\quad \dcl(\{\bo a\})=\{\bo a\}.$$ Thus $\bo c\in\dcl(\{\bo a,\bo b\})\setminus\dcl(\{\bo a\})$ but $\bo b\notin\dcl(\{\bo a,\bo c\})$. \end{ex} \section*{References} [AK] Uri Andrews and H. Jerome Keisler. Randomizations of Theories with Countably Many Countable Models. To appear. Available online at www.math.wisc.edu/$\sim$Keisler. [Be] Ita\"i{} Ben Yaacov. On Theories of Random Variables. To appear, Israel J. Math. ArXiv:0901.1584v3 (2001). [BBHU] Ita\"i{} Ben Yaacov, Alexander Berenstein, C. Ward Henson and Alexander Usvyatsov. Model Theory for Metric Structures. To appear, Lecture Notes of the London Math. Society. [BK] Ita\"i{} Ben Yaacov and H. Jerome Keisler. Randomizations of Models as Metric Structures. Confluentes Mathematici 1 (2009), pp. 197-223. [BU] Ita\"i{} Ben Yaacov and Alexander Usvyatsov. Continuous first order logic and local stability. Transactions of the American Mathematical Society 362 (2010), no. 10, 5213-5259. [CK] C.C.Chang and H. Jerome Keisler. Model Theory. Dover 2012. [EG] Clifton Early and Isaac Goldbring. Thorn-Forking in Continuous Logic. Journal of Symbolic Logic 77 (2012), 63-93. [Go1] Isaac Goldbring. Definable Functions in Urysohn's Metric Space. To appear, Illinois Journal of Mathematics. [Go2] Isaac Goldbring. An Approximate Herbrand's Theorem and Definable Functions in Metric Structures. Math. Logic Quarterly 50 (2012), 208-216. [Go3] Isaac Goldbring. Definable Operators on Hilbert Spaces. Notre Dame Journal of Formal Logic 53 (2012), 193-201. [GL] Isaac Goldbring and Vinicius Lopes. Pseudofinite and Pseudocompact Metric Structures. To appear, Notre Dame Journal of Formal Logic. Available online at www.homepages.math.uic.edu/$\sim$isaac. [Ke1] H. Jerome Keisler. Randomizing a Model. Advances in Math 143 (1999), 124-158. [Ke2] H. Jerome Keisler. Separable Randomizations of Models. To appear. Available online at www.math.wisc.edu/$\sim$Keisler. \end{document}
\begin{document} \title{Quantum optimization using variational algorithms on near-term quantum devices} \author{Nikolaj~Moll$^1$, Panagiotis~Barkoutsos$^1$, Lev~S.~Bishop$^2$, Jerry~M.~Chow$^2$, Andrew Cross$^2$, Daniel~J.~Egger$^1$, Stefan~Filipp$^1$, Andreas~Fuhrer$^1$, Jay~M.~Gambetta$^2$, Marc~Ganzhorn$^1$, Abhinav~Kandala$^2$, Antonio~Mezzacapo$^2$, Peter~M\"uller$^1$, Walter~Riess$^1$, Gian~Salis$^1$, John~Smolin$^2$, Ivano~Tavernelli$^1$, and Kristan~Temme$^2$} \address{$^1$ IBM Research -- Zurich, S\"aumerstrasse 4, 8803 R\"uschlikon, Switzerland} \address{$^2$ IBM T.J. Watson Research Center, Yorktown Heights, NY 10598, USA} \date{\today} \begin{abstract} Universal fault-tolerant quantum computers will require error-free execution of long sequences of quantum gate operations, which is expected to involve millions of physical qubits. Before the full power of such machines will be available, near-term quantum devices will provide several hundred qubits and limited error correction. Still, there is a realistic prospect to run useful algorithms within the limited circuit depth of such devices. Particularly promising are optimization algorithms that follow a hybrid approach: the aim is to steer a highly entangled state on a quantum system to a target state that minimizes a cost function via variation of some gate parameters. This variational approach can be used both for classical optimization problems as well as for problems in quantum chemistry. The challenge is to converge to the target state given the limited coherence time and connectivity of the qubits. In this context, the \emph{quantum volume} as a metric to compare the power of near-term quantum devices is discussed. With focus on chemistry applications, a general description of variational algorithms is provided and the mapping from fermions to qubits is explained. Coupled-cluster and heuristic trial wave-functions are considered for efficiently finding molecular ground states. Furthermore, simple error-mitigation schemes are introduced that could improve the accuracy of determining ground-state energies. Advancing these techniques may lead to near-term demonstrations of useful quantum computation with systems containing several hundred qubits. \end{abstract} \pacs{quantum computation, quantum chemistry, quantum algorithms} \maketitle \tableofcontents \title[]{} \section{Introduction} Recent advances in the field of quantum computing have boosted the hope that one day complex problems can be solved efficiently on quantum computers. The ultimate goal is a universal fault-tolerant quantum computer that runs arbitrary algorithms much faster than on a classical computer. However, millions of physical qubits and high-fidelity gate operations are required to implement a universal fault-tolerant quantum computer, a system that currently cannot be built. Yet, quantum devices with a couple of hundred physical qubits with limited or no error correction are likely to become available in the near future. With it comes the question how to exploit these devices for useful calculations. In this paper, we discuss how the variational quantum eigensolver can be run on near-term quantum devices to tackle optimization problems that are exponentially hard on classical computers. We differentiate between two types of optimization problems. The first kind are quantum optimization problems, such as finding the ground state of a complex molecule or the simulation of its dynamics. In this case, optimization typically involves minimization of the total energy as described by the energy expectation value of a non-trivial Hamiltonian as a function of some molecular parameters, such as interatomic distances. The second kind are classical optimization problems which can usually be mapped onto a relatively simple Ising-type Hamiltonian. In both cases, exponential scaling of the required computational resources with the problem size can make the problems hard to solve or even in-tractable on classical computers. Generally, optimization problems are solved by finding the extremum of an objective function, such as cost, energy, profit or error. As the cost function typically depends on a large set of parameters, finding a solution involves searching a high-dimensional parameter space, which quickly makes a brute-force approach unfeasible. A quantum computer operates on Hilbert space, which grows exponentially as $2^N$ with the number of qubits $N$. The idea is to use this vast state space with the help of quantum entanglement, and thus boost the efficiency in finding the right solution, ideally with exponential speed-up~\cite{lloyd_universal_1996, abrams_simulation_1997, abrams_quantum_1999, aspuru-guzik_simulated_2005, harrow_quantum_2009}. A more careful analysis shows, however, that the speed-up for classical optimization problems is in many cases rather modest~\cite{denchev_whatis_2016, albash_evidence_2017, smolin_classical_2014}. In contrast, one can benefit from quantum speed-up in problems that are directly related to the quantum-mechanical description of nature itself. A prominent example is finding the many-electron wavefunction of a molecular system. Classical computers fail to solve such problems exactly for more than a few tens of electrons because of the exponential increase of Hilbert space with the number of electrons. The large state space of a quantum computer can be used to simulate a chemical system and calculate its properties, including correlations and reaction rates, once the challenge of efficiently mapping the fermionic problem to the available qubit hardware is overcome. In fact, on a quantum device the natural way is to solve the chemical system in second quantization~\cite{abrams_quantum_1999, aspuru-guzik_simulated_2005, buluta_quantum_2009, lanyon_towards_2010, brown_using_2010, temme_quantum_2011, kassal_simulating_2011, whitfield_simulation_2011, aspuru-guzik_photonic_2012, whitfield_computational_2012, yung_quantumquantum_2012, jones_faster_2012, toloui_quantum_2013, wecker_gate-count_2014, georgescu_quantum_2014, hastings_improving_2015, poulin_trotter_2015, mueck_quantum_2015, garcia-alvarez_quantum_2015, whitfield_unified_2015, reiher_elucidating_2017, babbush_exponentially_2016, whitfield_local_2016, wendin_quantum_2016, popkin_quest_2016, shen_quantum_2017, colless_robust_2017} formulated in terms of fermionic annihilation and creation operators. Because of the different statistics there is no direct one-to-one mapping: each fermion operator must be represented by a string of qubit operators, which induces long-range qubit-qubit correlations in the system and places demanding requirements on the connectivity and the number of gates (see Section~\ref{sec:mappingqubits}). To compute the quantum evolution of chemical systems on a digital quantum computer, decomposition into discrete time steps is required and accordingly long gate sequences \cite{abrams_quantum_1999, whitfield_simulation_2011, omalley_scalable_2016}. On current quantum devices, gate errors and decoherence restrict the number of sequential gate operations that can be performed while keeping a meaningful, coherent quantum state. Moreover, connectivity between qubits is limited by the physical routing of the wires on a qubit chip. This is why a new class of hybrid classical quantum algorithms, called the variational quantum eigensolver (VQE)~\cite{li_solving_2011, tempel_quantum_2012, peruzzo_variational_2014, omalley_scalable_2016, wecker_progress_2015, li_efficient_2017, mcclean_theory_2016, colless_robust_2017,farhi_quantum_2014, farhi_quantum_2014-1, farhi_quantum_2016}, holds a lot of prospects for near-term quantum-computing systems (see Fig.~\ref{fig:hybrid}). \begin{figure} \caption{Schematic of a hybrid quantum classical computing architecture.} \label{fig:hybrid} \end{figure} These algorithms work with short-depth circuits and will result in approximate results when the number of qubits, their coherence and the connectivity is large enough. These requirements on the quantum system can be quantified by the \emph{quantum volume}~\cite{bishop_quantum_2017}, a hardware-independent figure or merit for the power of a quantum computer. The VQE can be used both for classical optimization problems as well as for fermionic Hamiltonians describing, e.~g., quantum chemistry. In quantum chemistry the variational quantum eigensolver is used to calculate ground states~\cite{li_solving_2011, tempel_quantum_2012, peruzzo_variational_2014, omalley_scalable_2016, wecker_progress_2015, li_efficient_2017, colless_robust_2017} of chemical systems. The high-dimensional trial wavefunctions, which are costly to represent on a classical computer, are generated on the quantum computer using parametrized single-qubit and entangling gates. The optimization of the gate parameters is performed on a classical computer by summing expectation values of the qubit operators measured on the quantum device and thereby calculating the total energy as a cost function. This can in principle lead to very short-depth circuits which ideally run in a time that is shorter than the coherence time of the quantum computer. The same variational quantum eigensolver can be applied to other physical systems in condensed matter such as the Fermi-Hubbard model~\cite{abrams_simulation_1997, verstraete_mapping_2005, temme_quantum_2011, yung_quantumquantum_2012, barends_digital_2015, reiner_emulating_2016, havlicek_operator_2017} and spin systems~\cite{heras_digital_2014, you_quantum_2010, biamonte_nonperturbative_2008, zintchenko_local_2015}. Hybrid algorithms are, however, not resilient against decoherence and gate errors, which may lead to inaccurate estimates of the expectation values. Currently available error-correction schemes, such as those based on surface codes \cite{fowler_surface_2012}, require a significant number of qubits, rendering quantum simulations of practical systems challenging in the near future. Still, novel schemes that do not require ancillas or code qubits can help mitigate induced errors, enabling longer and bigger quantum computations. Such error mitigation schemes \cite{schwenk_reconstructing_2017, temme_error_2016} need to be developed further and tested to improve accuracy without the full overhead of error-correction codes for universal quantum computing. The paper is structured as follows: The quantum volume is discussed in Section~\ref{sec:qv} before we explain the variational quantum eigensolver in Section~\ref{sec:vir} and its application to quantum chemistry problems in Section~\ref{sec:chemistry}. After a brief discussion of the prospects of solving classical optimization problems with near-term quantum devices in Section~\ref{sec:qaoa}, we elaborate on the choice of suitable optimizers for the classical feedback in the VQE in Section~\ref{sec:opt} and discuss the prospects of fighting back decoherence in near-term quantum devices without full error correction in Section~\ref{sec:error}. Finally, we conclude in Section~\ref{sec:conclusion}. \section{Quantum volume, a metric for near-term quantum devices} \label{sec:qv} For current quantum processors, various architectures and physical qubit realizations are being considered. While quantum systems based on superconducting qubits~\cite{corcoles_demonstration_2015, takita_demonstration_2016, riste_detecting_2015, omalley_scalable_2016, barends_superconducting_2014, ofek_extending_2016, qx_ibm_quantum_2016} at the moment seem to be leading the way, ion-trap-based systems~\cite{debnath_demonstration_2016, monz_realization_2016} are close competitors. Furthermore, semiconductor-based spin qubits~\cite{veldhorst_two-qubit_2015, zajac_quantum_2017, nichol_high-fidelity_2017} and other quantum architectures~\cite{mourik_signatures_2012, sarma_majorana_2015, obrien_optical_2007} may still become important in the future. Given the different hardware implementations it is often difficult to benchmark the usefulness or power of quantum systems, which is why a hardware-independent measure is required. To define a suitable metric, we first note that a quantum computer's performance depends on five main hardware parameters: \begin{enumerate} \item Number of physical qubits $N$ \item Connectivity between qubits \item Number of gates that can be applied before errors or decoherence mask the result \item Available hardware gate set \item Number of operations that can be run in parallel \end{enumerate} With the goal to quantify a quantum computer's power with a single parameter, we would like to consider a metric based on the question `can this device run a given algorithm?'. For any given instance of a quantum algorithm, there is a lower bound on the number of qubits $N$ required to run the algorithm, as well as the necessary number of steps (or circuit depth) $d$. We therefore define a {\it quantum volume} $V_{\rm Q}$~\cite{bishop_quantum_2017} that takes into account both the number of qubits $N$ and the allowable depth $d$ of quantum circuits that can be run on a near-term quantum device. In the simplest case, we could just choose the quantum volume to be $d\cdot N$; however, this has some undesirable properties in that it can be gamed in various ways. For example, in many cases the smallest error rates and therefore the largest circuit depth will result from very few qubits, even $N = 2$, as in this case there will be less connectivity and parallelization overhead and fewer issues with crosstalk between qubits. However, clearly $N = 2$ is a completely uninteresting limit. Also the other extreme, where a device has many qubits but little coherence, i.e.\ $d \approx 1$, is not interesting because such a system cannot use entanglement as a resource and calculations become effectively classical. We therefore conceptually define the quantum volume as \begin{equation} \label{eqn:qv1} \tilde V_{\rm Q} = \min\left[N, d(N)\right]^2 \,. \end{equation} Here, the number of qubits $N$ is an easily accessible hardware parameter; however, the achievable circuit depth $d(N)$ needs further specification in terms of the hardware parameters given in the list above. We start by considering one step of a quantum algorithm (a depth-one circuit) on a number of $N$ qubits. Such a step is expressed as a unitary operator that can be written as a tensor product of randomly chosen arbitrary two-qubit gates on disjoint pairs of qubits (see step 1 in Fig.~\ref{fig:fig2}(a)). Here, we allow any unitary two-qubit operation in the SU(4) group, which may consist of a combination of one- and two-qubit gates on the actual hardware. Then an effective error rate $\epsilon_{\rm eff}$ is defined as the error rate per two-qubit gate averaged over many realizations of such depth-one circuits. Therefore, $\epsilon_{\rm eff}$ depends on the gate overhead required when all-to-all connectivity, full parallelism and a suitable gate set is not available. Thereby, it also encapsulates both the errors of single- and two-qubit gates. If the hardware supports all possible two-qubit gates directly (requiring an all-to-all connectivity) with identical error rate $\epsilon$, and in addition allows unlimited gate parallelism, then $\epsilon_{\rm eff} = \epsilon$. If the connectivity is limited, then it will be necessary to insert additional SWAP gates to permute the qubits in order to implement the random two-qubit gates, leading to an increase of $\epsilon_{\rm eff} > \epsilon$. A planar nearest-neighbor qubit coupling would lead to an effective error rate of $\epsilon_{\rm eff} \propto \sqrt{N} \epsilon$, and a linear chain of qubits would yield an effective error rate of $\epsilon_{\rm eff} \propto N \epsilon$. On the other hand a hardware which supports more complex gates such as the Tofoli gate directly or the use of a compiler which efficiently compresses the gates of a test circuit could also lead to a situation with $\epsilon_{\rm eff} < \epsilon$. Other special features and limitations of the hardware must be dealt with in a similar manner. The error rate of a single circuit step scales with the number of simultaneous two-qubit gates $\epsilon_{\rm 1step}\propto N \epsilon_{\rm eff}$. In other words, we can estimate the circuit depth in which, on average, a single error occurs as $d \simeq 1/(N \epsilon_{\rm eff})$, linking the effective error $\epsilon_{\rm eff}$ to the previous definition of the quantum volume using the circuit depth. As an example, if an effective error rate $\epsilon_{\rm eff} = 10^{-4}$ is experimentally achievable, depth $d = 10$ algorithms could be run on a 1000-qubit device, and $d=100$ algorithms on a 100-qubit device. However, the effective error rate $\epsilon_{\rm eff}$ will depend not only on the gate error rates and the connectivity but, more generally, on the complexity of the quantum system which grows with the number of qubits, for example, because of crosstalk. The effective error rate $\epsilon_{\rm eff}(N)$ will therefore likely be a function of $N$ even if full connectivity is available. Moreover, $\epsilon_{\rm eff}$ also depends on the sophistication of the scheduling algorithm responsible for mapping the quantum algorithm considered to the hardware. Both hardware and software improvements will thus impact the effective error rate $\epsilon_{\rm eff}(N)$. Finally, we note that with this definition the allowable circuit depth $d \simeq 1/(N \epsilon_{\rm eff})$ decreases with $N$ at constant effective error $\epsilon_{\rm eff}$, which means that a system's quantum volume decreases if more qubits with the same fidelity are made available on the hardware. However, a given algorithm does not necessarily need all $N$ available qubits. It could even be beneficial for an algorithm that requires $n<N$ qubits to run on an $N$-qubit machine when selecting a subset of qubits with good connectivity is selected. We therefore further refine the definition of the quantum volume in Eq.~(\ref{eqn:qv1}): \begin{equation} V_{\rm Q} = \max\limits_{n < N}\left(\min\left[n,\frac{1}{n\epsilon_{\rm eff}(n)}\right]^2\right) \, , \end{equation} where the maximum is taken over an arbitrary choice of $n$ qubits to maximize the quantum volume that can be obtained with such a subset. To illustrate this, we plot an example quantum circuit with two circuit steps and the functional dependence of the quantum volume on the number of qubits and an effective two-qubit error rate in Fig.~\ref{fig:fig2}. \begin{figure} \caption{(a) Example quantum circuit with two circuit steps. Step 2 requires different connectivity and would lead to an increased gate count on quantum hardware with only nearest neighbor interactions. This is illustrated to the right of step 2. (b) Quantum volume as a function of the effective error rate $\epsilon_{\rm eff} \label{fig:fig2} \end{figure} The dashed line denotes the tipping point where $d = 1/(N \epsilon_{\rm eff}) = N$. From any point on this line, a significant increase in $V_{\rm Q}$ requires improvements in both $\epsilon_{\rm eff}$ and $N$. We also see that the usefulness of current quantum devices is likely limited by the typical effective error rates, which are $\epsilon_{\rm eff} > 10^{-3}$. To improve $\epsilon_{\rm eff}$ we will have to start encoding quantum states in logical qubits with an overhead in the number of physical qubits. This will eventually lead to fault tolerant quantum computing. The quantum volume is therefore an architecture-neutral metric that characterizes the capability of a chosen quantum computing architecture to run useful quantum circuits. It enables the comparison of hardware with widely different performance characteristics and quantifies the complexity of algorithms that can be run on such a system. An important conclusion that we can draw for the usefulness of near-term quantum devices is that when increasing the number of qubits the power of the quantum device will increase only if the effective error rate is improved at the same time. \section{Exploring Hilbert space with the variational quantum eigensolver} \label{sec:vir} To exploit near-term quantum devices, applications and algorithms have to be tailored to current quantum hardware with only tens or hundreds of qubits and without full quantum error correction. One main constraint is the limited quantum volume that restricts the depth of meaningful quantum circuits. Still, a small-scale quantum computer with hundred qubits can process quantum states that cannot even be stored in any classical memory. A natural way to make use of this quantum advantage is via a hybrid quantum-classical architecture: A quantum co-processor prepares multi-qubit quantum states $|\Psi(\bm{\theta})\rangle$ parametrized by control parameters $\bm{\theta}$. The subsequent measurement of a cost function $E_q(\bm{\theta}) = \langle\Psi(\bm{\theta}) | H_q | \Psi(\bm{\theta})\rangle$, typically the energy of a problem Hamiltonian $H_q$, serves a classical computer to find new values $\bm{\theta}$ in order to minimize $E_q(\bm{\theta})$ and find the ground-state energy \begin{equation} \label{eq:Hq} E_q^{\rm{min}} = \min_{\bm{\theta}} \left(\langle\Psi(\bm{\theta}) | H_q | \Psi(\bm{\theta})\rangle\right) \, . \end{equation} This variational quantum eigensolver approach to Hamiltonian-problem solving has been recently applied in different contexts \cite{Barrett2013, peruzzo_variational_2014, omalley_scalable_2016, mcclean_theory_2016, eichler_exploring_2015, kandala_hardware-efficient_2017}. In fact, the Hamiltonian $H_q$ can take many forms, the only requirement being that it can be mapped to a system of interacting qubits with a non-exponentially increasing number of terms. Here we distinguish two relevant cases: Hamiltonians that describe fermionic condensed-matter or molecular system (Section \ref{sec:chemistry}) and Hamiltonians that describe a classical optimization problem (Section \ref{sec:qaoa}). \subsection{Variational quantum eigensolver method} In detail, the variational quantum eigensolver method consists of four main steps as shown in Figure \ref{fig:algorithm}. \begin{figure} \caption{Variational quantum eigensolver method. The trial states, which depend on a few classical parameters $\bm{\theta} \label{fig:algorithm} \end{figure} First, on the quantum processor a tentative variational eigenstate, a trial state, $| \Psi(\bm{\theta}) \rangle$ is generated by a sequence of gates parameterized by a set of control parameters $\bm{\theta}$. In the ideal case, this trial state depends on a small number of classical parameters $\bm{\theta}$, whereas the set of gates is chosen to efficiently explore Hilbert space. In particular, the class of states forming the solution to the minimization problem in Eq.~(\ref{eq:Hq}) has to lie within the set of possible trial states. Suitable gate sets which provide a good approximation to the wanted target state, which minimizes the cost function, have been found for both classical optimization problems \cite{farhi_quantum_2014} (Section \ref{sec:qaoa}) and quantum chemistry problems (Section \ref{sec:chemistry}). Aside from these considerations, it is also essential that hardware constraints be taken into account. As not all gates are directly realizable in hardware, decomposing them into those available in the quantum hardware adds extra overhead in circuit depth. An alternative is, therefore, to use a heuristic approach based on gates that are readily available in hardware \cite{kandala_hardware-efficient_2017} as discussed below. Second, once the trial state has been prepared and the expectation value of the problem Hamiltonian $H_q$ is determined. The problem Hamiltonian can be decomposed into Pauli strings $P_\alpha = \sigma_1^{\alpha_1}\otimes \sigma_2^{\alpha_2}\otimes \ldots \sigma_N^{\alpha_N}$ with single-qubit Pauli operators $\sigma_i^j \in \{\mathds{1},\sigma_i^x,\sigma_i^y,\sigma_i^z\}$ and the identity operator $\mathds{1}$, \begin{equation} H_q= \sum_\alpha h_\alpha P_\alpha. \label{eq:Hqpauli} \end{equation} $N$ denotes the number of qubits. To determine the expectation value of each Pauli operator in $P_\alpha$, each single qubit's population is measured repeatedly for a given number of experiments with identical trial state preparation $|\Psi(\bm{\theta})\rangle$. This corresponds to measuring $\sigma_j^z$ for each qubit; other Pauli operators can be determined by applying a pre-rotation on the qubit before the measurement that effectively rotates the measurement axis. To determine the expectation value of the Pauli strings, the measurement outcomes are multiplied for each run of the experiment and then averaged. In a third step, the cost function $E_q(\bm{\theta}) = \langle \Psi(\bm{\theta})| H_q | \Psi(\bm{\theta})\rangle = \sum\limits_\alpha h_\alpha \langle \Psi(\bm{\theta})| P_\alpha | \Psi(\bm{\theta})\rangle $ is calculated by summing up the expectation values of $P_\alpha$ with corresponding coefficients $h_\alpha$. Finally, the value of $E_q(\bm{\theta})$ is minimized as a function of the parameters $\bm{\theta}$. A classical optimization algorithm processes $E_q(\bm{\theta})$ and provides new parameters $\bm{\theta}$. For each parameter set, a new set of gates for trial state preparation has to be loaded onto the quantum processor. As this requires rather time-consuming re-programming of the quantum hardware, it is important that only a minimal number of queries should be made to the quantum processor. Moreover, the calculated expectation values will be noisy because of the limited sampling statistics of the qubit state. Therefore, classical robust optimizers have to used that can handle the noise on the measured expectation values and scale favorably with the number of parameters as described in Section~\ref{sec:opt}. The procedure ends when the minimum of $E_q(\bm{\theta})$ in Eq.~(\ref{eq:Hq}) is reached within a given accuracy and the optimal parameters $\bm\theta^*$ are found. \section{Quantum chemistry with qubits} \label{sec:chemistry} To demonstrate the potential of a quantum processor with limited quantum volume, one needs to consider quantum algorithms that provide a large scaling advantage compared with their classical counterparts. The solution of the electronic structure problem in quantum chemistry belongs to this class: Because of the exponential scaling of the problem, it is impossible to find an exact solution to the Schr\"odinger equation of systems with more than a few tens of electrons on a classical computer. Several approximations have been introduced to access the properties of large-scale systems with more than 1000 electrons on high-performance computers. The aim is to reach the required accuracy for chemical energies ($\sim 50~\mathrm{meV}$). One approach is to approximate the many-electron Hamiltonian itself using, for example, density-functional theory~\cite{kohn_self-consistent_1965}. There, the original system of interacting electrons is replaced by a fictitious one of non-interacting electrons moving in a modified external potential that allows, at least \textit{in principle}, the original exact solution to be recovered. An alternative approach starts from the exact Hamiltonian and attempts to find suitable approximations for the system wavefunction in the many-electron Hilbert space. This calculation can, in principle, be performed either within the first or the second quantization formalism. In first quantization, all spatial integrals have to evaluated on the quantum computer. For this reason, approaches based on second quantization are more suited for first-generation quantum devices. In this case, all spatial integrals are evaluated beforehand on a classical computer, whereas the sampling of the Hilbert space is performed in the orbital configuration space spanned by molecular Slater determinants. This approach maps naturally to the variational method described above (Section \ref{sec:vir}). It starts from the one-electron basis states that are obtained by solving the Hartree-Fock equation. These Hartree-Fock orbitals are then used to construct an anti-symmetrized product wavefunction, the Slater determinant, which is used as a starting point for a perturbative expansion. In this expansion a controlled series of {\it excited} configurations is added until a sufficiently accurate approximation of the ground state is found. \subsection{Mapping fermions to qubits} \label{sec:mappingqubits} The electronic Hamiltonian in second quantization is given by \begin{equation} H_{F} = \sum_{ij} t_{ij} a^{\dagger} _i a_j+ \sum_{ijkl} u_{ijkl} \, \, a^{\dagger}_i a^{\dagger}_k a_l a_j \, , \label{eq:Second_quant_Ham} \end{equation} where the operators $a_i^{\dagger}$ and $a_i$ create and annihilate electrons in the $i$-th orbital. The parameters $t_{ij}$ and $u_{ijkl}$ describe the one- and two-electron interactions and can be efficiently computed classically as the overlap integrals of the orbitals in the basis set~\cite{jorgensen_second_1981}. The two-electron term scales at most with the number of orbitals to the fourth power~\cite{aspuru-guzik_simulated_2005, babbush_low_2017} and does not grow exponentially, which would prohibit efficient computation even on a quantum computer. Because $a_i$ and $a_i^{\dagger}$, unlike the Pauli spin operators, follow fermionic commutation rules $\{a_i, a_j \} = 0$, $\{a_i^{\dagger},a_j^{\dagger} \} = 0$, $\{a_i ,a_j^{\dagger} \} = \delta_{ij}$, a direct implementation of Eq.~(\ref{eq:Second_quant_Ham}) on a qubit-based quantum processor is not feasible without a mapping from fermionic to Pauli operators. The fermionic nature of electrons implies that many-electron wavefunctions must be anti-symmetric with respect to particle exchange. This is reflected in the way fermionic creation and annhilation operators act on state vectors: \begin{eqnarray} \hspace{-2cm} a_i^{\dagger} | f_0, \dots, f_{i-1}, f_i, f_{i+1}, \dots, f_n \rangle &= \delta_{f_i,0} \, p_i \, | f_0, \dots, f_{i-1}, 1, f_{i+1}, \dots, f_n \rangle \label{eq:creat}\\ \hspace{-2cm}a_i | f_0, \dots, f_{i-1}, f_i, f_{i+1}, \dots, f_n \rangle &=\delta_{f_i,1} \, p_i \, | f_0, \dots, f_{i-1}, 0, f_{i+1}, \dots, f_n \rangle. \label{eq:anhil} \end{eqnarray} Here $p_i=(-1)^{\sum_{k=0}^{i-1}f_{k}}$ denotes the parity and $f_i \in \{0,1\}$ the occupation number of the fermionic orbital $i$. The naive replacement of the fermionic operators $a_i^{(\dagger)}$ by Pauli ladder operators $\sigma_i^{\pm} = (\sigma^x\pm i\sigma^y)/2$ does, however, not reproduce Eqs.~(\ref{eq:anhil}) because $\sigma_i^{\pm}$ describe distinguishable {\it particles} with no special symmetries. A variety of mappings have been developed that guarantee that the fermion statistics are captured on a system of qubits \cite{bravyi_fermionic_2002, tranter_bravyikitaev_2015, bravyi_tapering_2017}. Among those, the Jordan-Wigner mapping \cite{jordan_uber_1928} is particularly intuitive: It is based on a one-to-one mapping of fermionic to qubit occupations, i.e. the occupancy information is stored \emph{locally}. To take into account the parity information $p_i$ in Eqs.~(\ref{eq:anhil}), fermionic operators are translated as \begin{eqnarray} a_i^{\dagger} \rightarrow \mathds{1}^{\otimes i-1} \otimes \sigma^- \otimes (\sigma^{z})^{\otimes N-i} \\ a_i \rightarrow \mathds{1}^{\otimes i-1} \otimes \sigma^+ \otimes (\sigma^z)^{\otimes N-i} \, , \label{eq:JW} \end{eqnarray} where $N$ is the total number of qubits considered. It is obvious that calculating the parity when acting on qubit $i$ requires the knowledge of all state occupations $j<i$, which is accomplished by the $\sigma^z$ terms in Eq.~(\ref{eq:JW}). However, this introduces a non-locality in the mapping and, when inserted into the Hamiltonian in Eq.~(\ref{eq:Second_quant_Ham}), gives rise to long sequences of $\sigma^z$ operators intercalating between $\sigma^{\pm}$ operators of length $k$, known as $k$-local terms. This means that a fermionic wavefunction is spread out over $\mathcal{O}(N)$ qubits, posing fidelity issues in the readout process of the expectation value of the Hamiltonian. Recent schemes for tapering off qubits in mapped fermionic Hamiltonians~\cite{moll_optimizing_2016, bravyi_tapering_2017}, based on fermionic symmetries, can partially alleviate the hardware requirements necessary for performing simulations of fermionic systems. These second-quantized tapering schemes exploit symmetries in the mapped qubit Hamiltonian to reduce the simulation space needed to host the mapped fermionic system. The Jordan-Wigner transformation~\cite{jordan_uber_1928} consists of a local occupancy map and a non-local, $\mathcal{O}(N)$, parity function, whereas the binary-tree transformation encodes both operations on maps that scale $\mathcal{O}(\log(N))$ with the number of qubits~\cite{bravyi_fermionic_2002, tranter_bravyikitaev_2015, bravyi_tapering_2017}, which is a clear advantage compared with the Jordan-Wigner transformation. \subsection{Coupled cluster trial wavefunctions} \label{Section:ucc} Once a mapping of fermions to qubit has been chosen, suitable trial states for the VQE have to be prepared on the quantum processor. At best, these trial states incorporate the structure of the problem Hamiltonian and known properties of the solution state, such as the total number $N$ of fermions. While one could aim to find a gate set that allows one to generate all possible excited Slater determinant configurations, which is known as the full configuration interaction (FCI) approach, the number of states scales factorially with the number of electrons, a clear obstacle for computing larger molecules. One way to improve the efficiency is to use a coupled-cluster approach for creating the trial states, which allows a systematic sampling of all relevant excited Slater determinants up to a given excitation degree. In conventional quantum chemistry, these coupled-cluster expansions are used as a benchmark for all other approaches. In the unitary coupled-cluster (UCC) approach \cite{taube_new_2006}, which is a variational version of the commonly used coupled-cluster method \cite{stanton_equation_1993}, the unitary operator $U(\bm{\theta})$ that is used to generate a trial wavefunction $|\Psi(\bm{\theta})\rangle$ from the reference state $|\Phi\rangle$ is given by \begin{equation} |\Psi(\bm{\theta})\rangle = U(\bm{\theta}) |\Phi\rangle = e^{T(\bm{\theta}) - T^{\dagger}(\bm{\theta})} |\Phi\rangle. \label{UCC_equation1} \end{equation} It is constructed by exponentiation of the cluster operator $T(\bm{\theta})$ defined as \begin{equation} \hspace{-2.4cm} T(\bm{\theta}) = \sum_k T_{(k)}(\bm{\theta}) \, , \quad T_{(1)}(\bm{\theta}) = \sum_{i \in {\rm occ} \atop j \in {\rm unocc}} \theta_{(i)}^{(j)} a_j^\dagger a_i \, , \quad T_{(2)}(\bm{\theta}) = \sum_{i, j \in {\rm occ} \atop l, k \in {\rm unocc}} \theta_{(i,j)}^{(k, l)} a_l^\dagger a_k^\dagger a_j a_i, ... \quad. \label{UCC_equation4} \end{equation} Here, the coefficients $\bm{\theta}$ describes a vector of parameters that will be optimized using VQE. A common choice for the reference state $|\Phi\rangle$ is the ground-state Slater-determinant made up of the lowest-energy molecular orbitals obtained from the solution of the Hartree-Fock equation. The coefficients $\bm{\theta}$ of the cluster operators are not independent and their value decreases with the order of the excitation. Therefore, this expansion is typically truncated at the double (UCCSD) or triple level (UCCSDT) of excitation without significantly reducing the accuracy. In fact, the exponentiation of the cluster operator $T(\bm{\theta})$ introduces higher uncorrelated excitations at each level of truncation, e.~g., for $T(\bm{\theta})=T_{(1)}(\bm{\theta}) + T_{(2)}(\bm{\theta})$ \begin{equation} e^{T(\bm{\theta})} = 1 + T_{(1)}(\bm{\theta}) + T_{(2)}(\bm{\theta}) + \frac{T_{(1)}^2(\bm{\theta})}{2!} + T_{(1)}(\bm{\theta}) T_{(2)}(\bm{\theta}) + \frac{T_{(2)}^2(\bm{\theta})}{2!} + ... \, , \label{UCC_expT} \end{equation} the expansion produced triplet and quadruple excitations in the first few terms of the expansion (fifth and sixth terms, respectively). Despite the compactness of this expansion, the number of coefficients $\bm{\theta}$ increases already in UCCSD with the number of orbitals to the fourth power, which impacts the efficiency of the classical optimization of the trial state $|\Psi(\bm{\theta})\rangle$. In practice, in the case of large molecular systems the limited achievable circuit depth in current quantum devices requires a further truncation of the series in Eq.~(\ref{UCC_expT}). Thus, while the coupled cluster method guarantees in principle an efficient convergence towards the exact ground state, its implementation in state-of-the-art quantum computers requires further studies in terms of how different approximations (truncations) affect the accuracy of the solution. \subsection{Hardware-efficient trial states suitable for near-term quantum hardware} \label{sec:heuristic} A much simpler approach is, therefore, the heuristic generation of the trial state with unitary operations that are more suited to the available quantum hardware~\cite{kandala_hardware-efficient_2017}. Independently of the particular problem to be solved, one may choose trial states that can be efficiently generated in current quantum hardware and at the same time allow the generation of highly entangled states that are close to the target state. This approach is showcased in the examples provided in Sections \ref{sec:chemistryexample} and \ref{sec:qaoaexample}. As shown in Fig.~\ref{fig:heuristiccircuit}, the preparation of the heuristic trial states comprises two types of quantum gates, single-qubit Euler rotations $U(\bm{\theta})$ determined by the rotation angles $\bm{\theta}$ and an entangling \emph{drift} operation $U_{\rm{ent}}$ acting on pairs of qubits. \begin{figure} \caption{\label{fig:heuristiccircuit} \label{fig:heuristiccircuit} \end{figure} The $N$-qubit trial states are obtained by applying a sequence of $D$ entanglers $U_{\rm ent}$ alternating with the Euler rotations on the $N$ qubits to the initial ground state $|00\ldots0\rangle$, \begin{equation} | \Phi(\bm{\theta}) \rangle = \overbrace{ U^{D}(\bm{\theta}) U_{\rm ent}\ldots U^{1}(\bm{\theta})U_{\rm ent}}^{\rm{D-times}} U^{0}(\bm{\theta}) |00\ldots0\rangle \end{equation} This gate sequence has a total number of $p = N (3 D + 2)$ independent angles. To be more specific, the single-qubit operations are decomposed into rotations about the $x-$ and the $z-$axes, $U^{q,i}(\bm{\theta}) = Z^q_{\theta^{q,i}_1}X^q_{\theta^{q,i}_2}Z^q_{\theta^{q,i}_3}$, with $X^q(\theta^{q,i}_j) = \exp\left[-i\theta^{q,i}_j\sigma^x_q/2\right]$ (and similarly for $Z^q(\theta^{q,i}_j)$, $Y^(\bm\theta)$) denoting the unitary operation acting on qubit $q$ at the $i$-th position in the gate sequences. The heuristic approach does not rely on the accurate implementation of specific two-qubit gates and can be used with any $U_{\rm ent}$ that generates sufficient entanglement. A natural choice can be the cross-resonance gate~\cite{chow_simple_2011, rigetti_fully_2010} as a two-qubit gate suited for the fixed-frequency superconducting qubit architecture as used, for example, for the IBM Q experience \cite{qx_ibm_quantum_2016}. \subsection{Small molecules calculated with the variational quantum eigensolver} \label{sec:chemistryexample} As an application of the method described above, we present the calculation of the ground-state energy of simple molecules such as the hydrogen molecule: The starting point is the Hamiltonian in second quantization in Eq.~(\ref{eq:Second_quant_Ham}) with the one-body terms, $t_{i j}$, representing the kinetic energy of the electrons and the potential energy that they experience in the presence of the nuclei, \begin{equation} \label{eq:t} t_{i j}=\int d\boldsymbol x_1\phi_i(\boldsymbol{r}_1) \, \left(-\frac{\boldsymbol\nabla_1^2}{2}+\sum_{n=1}^2 \frac{Z_n}{|\boldsymbol{r}_{1}-\boldsymbol{R}_n |}\right)\phi_j (\boldsymbol{x}_1), \end{equation} and the Coulomb repulsion terms \begin{equation} \label{eq:u} u_{i j k l}=\int\int d \boldsymbol{r}_1 d \boldsymbol{r}_2 \, \phi_i^*(\boldsymbol{r}_1)\phi_j(\boldsymbol{r}_1)\frac{1}{|\boldsymbol{r}_{1}- \boldsymbol{r}_{2}|}\phi_k^*(\boldsymbol{r}_2)\phi_l(\boldsymbol{r}_2). \end{equation} $Z_n$ are the nuclei charges $Z_n$ ($n=1,2$), and each wavefunction $\phi_i(\boldsymbol{x}_1)$ orbital is a $1s$ orbital centered at the one hydrogen atom. We assume that the system is in its spin singlet state. After reduction~\cite{bravyi_tapering_2017} a two-qubit Hamiltonian is obtained \begin{equation} H_{H_2}= f_0 \, \mathds{1} \otimes \mathds{1}+ f_1 \, \sigma_z \otimes \sigma_z + f_2 \, \sigma_z \otimes \mathds{1} + f_3 \, \mathds{1} \otimes \sigma_z + f_4 \, \sigma_x \otimes \sigma_x \label{eq:Ham_H2_qubits} \end{equation} with $f_0=-1.0524$, $f_1=0.01128$, $f_2=0.3979$, $f_3=0.3979$, and $f_4=0.1809$. These coefficients are calculated at the equilibrium distance of 0.74~\AA\ using Eqs.~(\ref{eq:t}) and (\ref{eq:u}). We evaluate the ground state of the Hamiltonian in (\ref{eq:Ham_H2_qubits}) on an ideal quantum simulator~\cite{qx_ibm_quantum_2016} using a heuristic trial wavefunction approach (Section~\ref{sec:heuristic}) with an increasing number of entangling steps (one, two and four). Here, the single qubit rotations of heuristic trial wavefunctions where implemented as $U^i(\theta) = Y(\theta^i_0) Z(\theta^i_1)$ and the entanglement was introduced via control phase gates~\cite{ibm_qiskit_2017}. Figure~\ref{fig:qchem} shows that a single entangling step is not sufficient to converge towards the correct energy value, whereas two or more entanglers can reproduce the expected results within a few tens of optimization steps in the rotation-angle space $\bm{\theta}$. \begin{figure} \caption{\label{fig:qchem} \label{fig:qchem} \end{figure} This method can be extended to larger molecules. For lithium hydride (LiH) and beryllium dihydride (BeH$_2$) the second-quantized fermionic Hamiltonian is constructed using a minimal set of atomic orbitals ~\cite{kandala_hardware-efficient_2017} (labelled by the conventional hydrogenic quantum numbers). In beryllium dihydride the basis is composed of the $1s$, $2s$, $2p_x$ orbitals associated to beryllium, and the $1s$ orbital associated to each hydrogen atom. This results in a total of ten spin orbitals. The two innermost $1s$ spin orbitals of beryllium are assumed to be completely filled. The remaining eight spin-orbitals of beryllium dihydride are reduced to six by exploiting spin-parity symmetries~\cite{bravyi_tapering_2017}. Similarly, the lithium hydride is mapped onto four qubits. It is demonstrated numerically that in the absence of noise, a number of entangling steps $D = 8$ and $D= 28$ are required to achieve chemical accuracy for lithium hydride and beryllium dihydride, respectively, for the given experimental connectivity. However, the combined effect of decoherence and finite sampling limits the optimal depth for optimizations on current quantum hardware to between zero and two entanglers, which results in deviations of the simulated bond-dissociation energies from the real values. Decreasing the effective error rates or applying error-mitigation schemes as discussed in Section~\ref{sec:error} will improve the accuracy of the simulations. \section{Classical optimization with qubits} \label{sec:qaoa} The complex Hamiltonians of quantum chemistry problems give quantum computers an inherent advantage over classical hardware. For classical optimization the advantage is not as obvious because many of the relevant problems can be mapped to a relatively simple Ising-spin Hamiltonian. It is diagonal in the computational basis and can be tackled by a range of classical methods. One of the issues with classical solvers is to avoid solutions in local minima of the cost function. In this context simulated annealing~\cite{kirkpatrick_optimization_1983} is an approach that makes use of thermal fluctuations to escape such local minima. Quantum annealing~\cite{kadowaki_quantum_1998} additionally exploits quantum tunneling and can potentially reach a ground state faster especially for problems with very corrugated cost functions~\cite{denchev_whatis_2016, albash_evidence_2017}. The potential for quantum speed-up with this approach is heavily debated in the community; however, because of the tremendous application space even a modest speed-up for a selected number of problems might have a significant impact. Moreover, understanding the detailed evolution of the optimization process and the potential role of entanglement is critical even for improving algorithms that run on classical hardware. This is why the application of the VQE for solving classical optimization problems on gate-based near-term quantum devices is especially interesting. To run the variational quantum eigensolver we again consider two different ways to create trial wavefunctions. First, the quantum approximate optimization algorithm (QAOA)~\cite{farhi_quantum_2014} is discussed, which is a polynomial-time algorithm for finding an approximate solution to a classical optimization problem with a desired accuracy. It is related to the quantum adiabatic algorithm~\cite{farhi_quantum_2000}, but has shorter circuit-depth requirements. Second, we give a short example how heuristic trial states can be used to solve a \emph{MaxCut} problem on a real quantum device using the variational quantum eigensolver. \subsection{Quantum approximate optimization algorithm with short depth} \label{sec:qaoaexample} Similarly to the approach described in Section~\ref{sec:heuristic} the trial wavefunction in the QAOA is guided towards the solution by repeated unitary evolution according to two Hamiltonians. The first one is the Hamiltonian $H_C$, which encodes the classical cost function $C(\mathbf x)$ of a binary constrained optimization problem. The second one is a mixing Hamiltonian $H_M$, which helps guide the optimization in Hilbert space towards the ground state of $H_C$. The number of times that both Hamiltonians are applied in the optimization process defines the level $D$ of the circuit and determines the complexity of the algorithm. Without loss of generality, it is assumed that an optimal solution $\mathbf x$ \emph{minimizes} the cost function $C(\mathbf x)$ which is a polynomial in the binary components $x_i \in \{ 0,1\}$ of the variable $\mathbf x$. Encoding of the cost function $C(\mathbf x)$ into a Hamiltonian $H_C$ requires shifting the binary variables $x_i\to(1-z_i )/2$ with $z_i \in \{ -1,1\}$ and then substituting $z_i\to\sigma_i^z$ to obtain an Ising-type Hamiltonian. We chose the same notation as in Eq.~(\ref{eq:Hqpauli}) but consider only diagonal terms $\sigma_i^j \in \{\mathds{1},\sigma_i^z\}$ which gives \begin{equation} H_C = \sum_\alpha h_\alpha P_\alpha = \sum\limits_{\alpha} h_\alpha \bigotimes_{i_\alpha} \sigma_{i_\alpha}^z \, . \label{eq:QAOA_Hamil} \end{equation} Here the index $i_\alpha$ runs over all $\sigma_{i_\alpha}^z$ in $P_\alpha$, which constitutes a $k$-local term (many-body interaction term among $k\leq N$ qubits), matching the polynomial terms in the cost function $C$ with corresponding real coefficients $h_\alpha$. The second Hamiltonian $H_M$ is just a global transverse field, i.e.\ $H_M = -\sum\limits_i \sigma^x_i$. To find the ground state of the problem Hamiltonian $H_C$, one proceeds by applying the evolution operator \begin{equation} U(\bm{\beta}, \bm\gamma)=\prod\limits_{l=1}^{D}e^{-i\beta_l H_M}e^{-i\gamma_l H_C} \end{equation} to a starting state $|\psi_0\rangle$ that can easily be generated on the quantum computer, e.~g.\ a uniform superposition state. Using the VQE, the parameters of the final state $| \bm{\beta}, \bm\gamma \rangle = U(\bm{\beta}, \bm\gamma)|\psi_0\rangle$ are then adjusted such as to minimize the expectation value $\langle \bm{\beta}, \bm\gamma|H_C | \bm{\beta}, \bm\gamma \rangle$. Measurement of the final state $| \bm{\beta}, \bm\gamma \rangle$ directly yields the solution of the classical optimization problem with a probability that approaches unity as $D$ increases. However, with increasing $D$ the circuit depth required will reach the decoherence limits of available quantum hardware, and the fidelity of the result will again decrease. Also, the number of classical parameters that need to be optimized for large $D$ will result in a slower convergence. Instead of using the VQE choosing a fine interpolation $(\beta_l,\gamma_l)=(1-l/D,l/D)$ with $l= 0,...,D$ would be equivalent to first order with a trotterized version of the adiabatic quantum algorithm~\cite{lloyd_universal_1996, farhi_quantum_2000}. By letting the VQE select optimal parameters $(\gamma_l,\beta_l)$, a more direct path to the target state becomes possible and the algorithm can reach the ground state with high accuracy even for relatively small values of $D$. The QAOA has been generalized and successfully applied to MaxCut with analytical and numerical studies~\cite{farhi_quantum_2017}. \subsection{Variational quantum eigensolver applied to the MaxCut problem} \label{Section:maxcut} To give an example of a classical optimization problem, we discuss an instance of the maximum-cut (MaxCut) problem with five qubits. Instead of generating trial states with the QAOA, we again use the hardware-efficient approach explained in Section~\ref{sec:heuristic} to run the algorithm on a real quantum device. The MaxCut problem is an NP-complete binary optimization problem, with applications in clustering, network science, and statistical physics. It aims at grouping the nodes of a graph into two subgroups by cutting across the links between them. The cut is to be made in such a way that the added weights of the links (edges) that were cut are maximized. The formal definition of this problem is the following: Consider an $n$-node non-directed graph with edge weights $w_{ij} > 0$, $w_{ij} = w_{ji}$, where $(i, j)$ enumerate the nodes linked by the corresponding edge~\cite{lucas_ising_2014}. The profit function to be maximized is therefore the sum of edge weights connecting points in the two different subsets. By assigning a subset label $x_i=0$ or $x_i=1$ to each node $i$, one tries to maximize \begin{equation} C(\textbf{x}) = \sum_{i,j} w_{ij} x_i (1 - x_j) \, . \end{equation} We can then use the mapping described in Section~\ref{sec:qaoaexample} to obtain the Ising Hamiltonian \begin{eqnarray} H_I & = & \sum_{i<j} \frac{w_{ij}}{2} (1-\sigma^z_i)(1+\sigma^z_j) \\ & = & -\frac{1}{2}\sum_{i<j} w_{ij} \sigma^z_i \sigma^z_j+\mathrm{const} \, , \end{eqnarray} In other words, the weighted MaxCut problem is equivalent to finding the ground state of the Ising Hamiltonian \begin{equation} H_C = \sum_{i<j} w_{ij} \sigma^z_i \sigma^z_j \, . \end{equation} For exploring the solution space of $H_C$ we use the approach from Section~\ref{sec:heuristic} to define a hardware-efficient heuristic trial wave function \begin{equation} |\psi(\theta)\rangle = [U(\boldsymbol\theta) U_\mathrm{ent}]^D |\psi_0 \rangle \, , \end{equation} where $U_\mathrm{ent}$ is a collection of fully entangling gates that are diagonal and the number of entanglers $D$ defines the level of the quantum circuit. The single-qubit rotations are chosen to be $U(\theta) = \prod\limits_{i=1}^N Y(\theta_{i})$, where $N$ is the number of qubits. For a classical problem this choice allows a search over the space of quantum states with only real coefficients, while still exploiting entanglement to potentially converge faster to the solution. Evaluation of the energy expectation value for a specific trial wavefunction is especially simple in this case as it is sufficient to measure all four qubits and extract the pairwise $\sigma^z_i \sigma^z_j$ correlators. Figure~\ref{fig:maxcut}(a) shows two different cuts through a problem instance with four nodes (qubits). The lower of the two solves the problem if all non-zero weights in $w_{ij}$ are assumed to be equal. \begin{figure} \caption{\label{fig:maxcut} \label{fig:maxcut} \end{figure} When we implement this on an ideal quantum simulator~\cite{qx_ibm_quantum_2016, ibm_qiskit_2017} and use the VQE to optimize the parameters of the trial state in 100 trial steps, we get the state probabilities shown in Fig.~\ref{fig:maxcut}(b). For this simple simulation, the solution is found with a probability that is higher than $95\%$. \section{Classical robust optimizers for measured expectation values} \label{sec:opt} The optimization cycle of the VQE (see Section \ref{sec:vir}) involves evaluation of the cost function on a real quantum device, e.~g., a superconducting quantum processor, and adjustment of the variational parameters using classical optimization algorithms (see Section~\ref{sec:vir}). In the latter, several important aspects need to be considered for successful application of the VQE. First, the optimization could get stuck in a local minimum that would correspond to an excited state of the system. Using a suitable optimization routine can prevent finding such false minima. Gradient-descent methods may be combined with simulated annealing steps or strategies that involve starting from multiple initial points. In this context, in~\cite{wecker_progress_2015} a greedy search with multiple starting points is alternated with a Powell search, showing good performances on Hubbard lattices of up to twelve sites. Second, because of the limited number of samples of the Hamiltonian terms on the quantum computer one only has access to a noisy energy (cost) value. The error in the energy estimation goes as $\mathcal{O}(1/\sqrt{s})$, with $s$ the number of samples taken. Grouping Pauli operators into commuting sets \cite{mcclean_theory_2016, kandala_hardware-efficient_2017} that can be measured with the same state preparation and post-rotations reduces the number of separate measurements and enables more averages and better sampling statistics. Still, the choice of the optimizer must take into account that the cost function is affected by stochastic fluctuations. In fact, while unitary coupled-cluster methods and other analytical variational circuits in principle support the use of gradient-based methods that increase the efficiency of the optimization~\cite{romero_strategies_2017}, an imperfect knowledge of the unitary gates implemented in a given quantum device and statistical noise render gradient-based approaches less useful. Derivative-free methods, such as Nelder-Mead and the TOMLAB method, have been tested for optimization of the hydrogen molecule, resulting in a superior performance of the latter method in the presence of stochastic noise~\cite{mcclean_theory_2016}. Third, time overheads due to repeated sampling and the number of function evaluations to update the variational parameters will affect the performance of the optimization. In this spirit, the use of a simultaneous perturbation stochastic approximation (SPSA)~\cite{spall_multivariate_1992}, used in~\cite{kandala_hardware-efficient_2017} for molecular structure problems, provides both a constant overhead in terms of the number of variational parameters and robustness with respect to stochastic fluctuations. Extensions of the SPSA method that include approximations to the Hessian matrix can be explored to improve the speed of the optimization in the final steps, where estimating second derivatives helps achieve faster convergence~\cite{spall_adaptive_2000}. In contrast, additional savings in time overhead in SPSA optimizations that rely on just one evaluation of the cost function per update step~\cite{spall_one-measurement_1997} could further improve the performance in large-scale quantum problems where sampling is particularly difficult. While simultaneous perturbation methods can be very useful in the optimization of fermionic problems, for classical problems, such as instances of MaxCut, the ease of evaluating the cost function may favor standard gradient-descent or derivative-free routines. Another critical aspect is the improvement of the classical control hardware for running the VQE on a quantum device: measurement of the cost function with sufficient accuracy requires repeated sampling of the output state and thereby also repeated cycles of qubit initialization, application of the quantum gates and qubit measurement. The speed of the execution of the optimization can be improved on the hardware side by using integrated active reset techniques. In the case of superconducting qubits this is true for both qubits and resonators~\cite{mcclure_rapid_2016, bultink_active_2016}. Moreover, the costly time overhead in synthesizing and loading control pulses onto the quantum processor for trial-state preparation can be reduced by short-latency field-programmable gate-array-based control and measurement architectures such that time overheads are solely related to the execution of the quantum gates and the readout of the qubits. \section{Prospects of fighting decoherence without full error correction} \label{sec:error} The hardest challenge for practical near-term quantum devices is their sensitivity to noise. Any computation that has the potential to leverage quantum effects and to provide a quantum speed-up over classical algorithms needs sufficiently coherent qubits. It was realized early on~\cite{unruh_maintaining_1995} that the coupling to the environment sets both a time and size limit for a quantum computation. Hence, the strength of this coupling determines how large a computation can be performed. This constant limit has to be contrasted to the improvements that are gained from the asymptotic scaling advantages of quantum algorithms. This limitation was, at least in theory, remedied with the advent of quantum error correction~\cite{shor_scheme_1995, steane_error_1996, calderbank_good_1996}. However, in spite of rapid experimental progress, the resource requirements for fully fault-tolerant operations with current codes~\cite{fowler_surface_2012} seem prohibitively large~\cite{jones_layered_2012, devitt_requirements_2013}. In turn, hopes were raised that non-error-corrected devices will soon become available that reach a regime of reasonably long coherence times and give rise to dynamics too complex to be simulated on a classical computer~\cite{boixo_characterizing_2016, farhi_quantum_2016}. In light of these developments, the question arises which computational tasks can be accomplished with quantum devices that have only limited or no error correction. Depending on the form of the actual physical noise, it is expected that the production of entropy in any quantum circuit that is subject to noise will set a limit to this approach~\cite{aharonov_limitations_1996}, and error correction is indispensable for any advanced form of quantum information processing. However, the full computational power of even short-depth circuits is not yet fully understood, and based on complexity-theoretic grounds, it can be argued, that even finite-depth circuits lie beyond the computational power of a classical computer~\cite{terhal_adaptive_2002, farhi_quantum_2016}. Recent experiments in which the quantum simulation of small molecules was performed~\cite{kandala_hardware-efficient_2017} showed that even for very short-depth circuits the effects of decoherence become apparent. For the simulation to be of value, the effect of this error needs to be mitigated, and several proposals have been made to deal with the effects of decoherence in short-depth quantum computation~\cite{mcclean_hybrid_2017, li_efficient_2017, temme_error_2016, schwenk_reconstructing_2017}. For a large fraction of applications, the computational task can be abstracted to estimate the expectation value of some observable after the application of a short-depth quantum circuit. This estimation must be accurate enough to achieve a simulation precision that outperforms approximate classical simulation tasks. Techniques to mitigate the error in the estimation of expectation values were introduced in \cite{temme_error_2016}. It is shown that the estimate can be improved in the presence of noise with only a modest time overhead. This approach requires no additional hardware resources such as fresh ancilla or code qubits. In this scheme, the estimation of an expectation value is improved by an \textit{extrapolation to the limit of zero noise} as originally proposed by Richardson~\cite{richardson_deferred_1927}. The method requires no \textit{a priori} knowledge about the noise source, except that the noise is weak and time-independent. To understand this approach it is useful to choose a more physically motivated description of the computation rather than the gate-based quantum circuits. It is more convenient to consider a time-dependent Hamiltonian dynamics $H(t)= \sum_{\alpha}{J_{\alpha}(t) P_{\alpha}}$ that implements the circuit, where $J_{\alpha}(t)$ are coupling coefficients and $P_{\alpha}$ are $N$-qubit Pauli operators. In this model the coherent evolution is subject to a noise contribution ${\cal L}$ that is effectively constant in time and acts on a time scale much larger than the time-dependent Hamiltonian implementing the quantum circuit. The time evolution up to some time $T$ of the open system with initial state $\rho_0$ can by described by a Lindblad master equation \begin{equation} \frac{\partial}{\partial t} \rho(t) = -i[H(t),\rho(t)] + \lambda {\cal L}(\rho(t)) \, . \end{equation} The expectation value $E(\lambda)$ of some observable $A$ is then obtained from the final state $\rho_\lambda(T)$ and can be written as a power series of the noise rate $\lambda$ \begin{equation} E(\lambda)= E^{*}(0)+ \sum_{i=1}^{n}{a_i \lambda^{i}} + O(\lambda^{n+1}) \end{equation} where $E^{*}(0)$ corresponds to the noise-free expectation value. Richardson proposed a so-called {\em deferred approach to the limit} to estimate an expectation value such as $E^{*}(0)$ with high accuracy~\cite{richardson_deferred_1927, sidi_practical_2003}. For this purpose, the expectation value $E(\lambda_j)$ is measured for different noise rates $\lambda_j=c_j \lambda$, where $c_j$ is a rescaling factor and $\lambda$ the actual noise rate in the experiment. The noise-free expectation value can then be estimated by~\cite{temme_error_2016} \begin{equation} E^{*}(0) = \sum_{j=0}^{n}{\gamma_j E(\lambda_j)} + O(\lambda^{n+1}) \end{equation} where $\sum_{j=0}^{n}{\gamma_j} = 1$ and $\sum_{j=0}^{n}{\gamma_j c_j^k} = 0$ for $k=1...n$. In this way the largest terms in the error up to $O(\lambda^n)$ are cancelled, thus leading to an estimation of the noise-free expectation value with very high accuracy. In practice however, the noise rate $\lambda$ is fixed. To still obtain an experimental estimate of the expectation values $E(\lambda_j)$, the following trick can be applied: the quantum circuit $H(t)$ can be run for a time $c_j T$ and with a reduced coupling $J_{\alpha}/c_j$. As the noise ${\cal L}$ is assumed to be constant in time, it can be shown that the state resulting from a rescaled dynamics is identical to the state obtained from the dynamics with an effectively rescaled noise parameter. Depending on the nature of the noise, relative errors for the noise-free expectation value range from $10^{-6}$ to $10^{-11}$~\cite{temme_error_2016}. \section{Conclusion} \label{sec:conclusion} Current and near-term quantum processors will most likely be limited to a few hundred, maybe a thousand qubits, and operate without quantum error correction. If the qubits and their control were ideal, the computational power of quantum devices with a couple hundred qubits would already dwarf that of any classical computer and could show \emph{quantum advantage}. However, errors in the quantum operations reduce their computational power. In this paper it is argued that a proper metric, such as the \emph{quantum volume}, should be used to assess the computing power of a quantum processor and to compare different prototypes on a fair basis. With this metric, it becomes clear that not only the qubit number has to be increased, but also and even more importantly, the effective error rate needs to be significantly reduced before practical applications come within reach. Simple estimates show that to run a algorithm with depth hundred on a hundred-qubit device requires an effective error rate of 0.01~\%. This number is not completely unrealistic, but shows the necessity to construct algorithms with short depth. Moreover, error-mitigation schemes using no or only a small number of extra ancilla qubits will be important to compensate systematic deviations in the computed result. Besides enlarging the quantum volume and reducing the effect of errors, it is essential to find suitable methods and algorithms to use quantum effects efficiently. We have discussed that a promising way forward is to consider hybrid quantum-classical architectures in which the quantum processor is used to generate trial quantum states that could not be stored in conventional memory. The variational quantum eigensolver method can be use to solve any type of problem that can be cast into a physical Hamiltonian. Constrained binary optimization problems can be described by an Ising-type Hamiltonian, whereas problems from the field of quantum chemistry or material science map into a more general spin Hamiltonian with more than longitudinal interactions among the spins. For Ising-type Hamiltonian problems, it is not clear how much quantum speed-up can be expected, because many fast classical algorithms have already been developed~\cite{farhi_quantum_2014}. In contrast, the Hamiltonian for chemistry and materials-related problems contains so-called non-stoquastic terms, which makes it difficult to solve these problems exactly on a classical computer. It is, therefore, believed that using a quantum processor will lead to exponential speed-up. The current state of the art encompasses proof of concept simulations of small molecules: In the context of superconducting qubits the hydrogen molecule has been simulated with two qubits~\cite{omalley_scalable_2016, colless_robust_2017, kandala_hardware-efficient_2017} and larger molecules such as lithium hydride and beryllium dihydride have been simulated with seven qubits~\cite{kandala_hardware-efficient_2017}. As the size of the systems under study grows in electron number so does the required number of qubits, for example, the simulation of the electronic structure of small organic molecules such as benzene and ethane~\cite{kassal_simulating_2011} already requires tens to hundreds of qubits. In the case of strongly correlated electrons, even the simplest systems made of a few atoms, like for instance the chromium dimer~\cite{booth_linear_2014}, quickly become intractable for classical computers when accurate numerical solutions are required. To address strongly correlated problems of practical relevance such as the nitrogen fixation catalytic center in bacteria~\cite{reiher_elucidating_2017} or the iron-sulphur clusters in the respiratory chain protein complexes~\cite{zhang_diphthamide_2010, zhou_thiabendazole_2011} (see Figure~\ref{fig:QCqubits}) quantum processors with a significantly increased quantum volume are needed. \begin{figure} \caption{Qubit resources needed for quantum chemistry. Qubit numbers up to ten are based on existing experiments, whereas the resources for larger molecules are estimates. From left to right: hydrogen molecule, lithium hydride, beryllium hydride, iron sulphor (Fe-S) cluster in DPH2 complex of Pyrococcus Horikoshii (PDB entry code 3LZD), and Fe-S clusters sequence in cytochrome B560 subunit of mitochondria (PDB entry code 3SFD).} \label{fig:QCqubits} \end{figure} To achieve this, the capabilities of next-generation quantum processors have to improve along several directions: \begin{enumerate} \item \label{item:errors} Improvement of coherence and qubit control, as well as development of error-mitigation schemes. \item \label{item:trialstate} Hardware-efficient and problem-specific trial state preparation when using variational quantum eigensolver. \item \label{item:fermion} Efficient circuit optimization by code optimizers and improved mappings from fermions to qubits. \item \label{item:optimization} Classical parameter optimization methods suited for variational quantum eigensolver. \end{enumerate} As for (\ref{item:errors}), current best error rates of $\sim10^{-4}$ for single and $\sim10^{-3}$ for two-qubit gate fidelities in the case of superconducting qubit architectures do not provide sufficient accuracy for more complex quantum calculations. The coherence time of qubits has to be improved, e.~g., by improving fabrication techniques or chip designs. The control pulses for qubits and their interaction have to be optimized to avoid systematic gate errors. Any remaining errors have to be compensated by error-mitigation strategies. As for (\ref{item:trialstate}), trial states that require only a variation of a few parameters to prepare the targeted solution state are required. It is an open question how to construct suitable trial states for a general problem set. One may speculate that some combination of heuristic and problem-specific approaches is best suited for the variational quantum eigensolver, e.~g., hardware-efficient trial wavefunctions which obey certain physical constraints, for example, to conserve the particle number in the quantum chemistry context. Moreover, enlarging the set of available gates, e.~g.\ by exploring coupling primitives that allow different types of interactions between two or more qubits to be realized~\cite{mckay_universal_2016, roth_analysis_2017} is considered to create problem-specific trial states and render the VQE efficient. As for (\ref{item:fermion}), different fermions-to-qubits maps have been proposed which do not require the creation of entanglement over the entire qubit space. Among the different variants of the Jordan-Wigner and binary-tree methods, one can envisage approaches that perform better in the presence of system-specific noise. Moreover, it may be possible to identify new maps into qubits, which are especially suited for variational quantum eigensolvers and that can exploit, for instance, the use of additional ancilla qubits to further reduce the number and the complexity of the gates. Of particular interest is also the possibility to optimize quantum circuits using {\em post-processing} tools at compilation~\cite{reiher_elucidating_2017}. The use of high-level languages for the generation and the manipulation of quantum circuits will indeed offer the possibility to rationalize the qubits resources, thus reducing the circuit depth and therefore the time to solution. As for (\ref{item:optimization}), specialized classical optimizers that can deal with large stochastic fluctuations resulting from queries to the quantum processor in the VQE are required. The possibility that optimization routines get trapped in false local minima or the effect of high noise render the robustness of optimizers of critical importance for near-term applications. Even the use of quantum-enhanced optimization schemes may be envisaged. In conclusion, several promising approaches to make use of near-term devices with hundreds of qubits and limited coherence times have been developed. Overcoming the remaining challenges will allow us to solve tangible problems, most likely in quantum chemistry, material science or classical optimization. \section*{References} \end{document}
\begin{document} \title{Uniform Martin's conjecture, locally} \begin{abstract} We show that part I of uniform Martin's conjecture follows from a local phenomenon, namely that if a non-constant Turing invariant function goes from the Turing degree $\boldsymbol x$ to the Turing degree $\boldsymbol y$, then $\boldsymbol x \le_T \boldsymbol y$. Besides improving our knowledge about part I of uniform Martin's conjecture (which turns out to be equivalent to Turing determinacy), the discovery of such local phenomenon also leads to new results that did not look strictly related to Martin's conjecture before. In particular, we get that computable reducibility $\le_c$ on equivalence relations on $\mathbb{N}$ has a very complicated structure, as $\le_T$ is Borel reducible to it. We conclude raising the question \emph{Is part II of uniform Martin's conjecture implied by local phenomena, too?} and briefly indicating a possible direction. \end{abstract} \section{Introduction to Martin's conjecture} Providing evidence for the intricacy of the structure $(\mathcal D,\le_T)$ of Turing degrees has been arguably one of the main concerns of computability theory since the mid '50s, when the celebrated priority method was discovered. However, some have pointed out that if we restrict our attention to those Turing degrees that correspond to relevant problems occurring in mathematical practice, we see a much simpler structure: such \virg{natural} Turing degrees appear to be well-ordered by $\le_T$, and there seems to be no \virg{natural} Turing degree strictly between a \virg{natural} Turing degree and its Turing jump. Martin's conjecture is a long-standing open problem whose aim was to provide a precise mathematical formalization of the previous insight. The leading idea is to formalize the notion of \virg{natural} Turing degree as a suitable equivalence class of \virg{definable} functions over Turing degrees. A function $f:A\to{2^\NN}$, where $A\subseteq{2^\NN}$, is said to be \textbf{Turing invariant} (abbreviated \textbf{TI}) if, for all $x,y\in A$ one has \[ x\equiv_T y \implies f(x)\equiv_T f(y), \] whereas it is said to be \textbf{order-preserving} (abbreviated \textbf{OP}) if, for all $x,y\in A$, \[ x\le_T y \implies f(x)\le_T f(y). \] The intuition behind Martin's conjecture is that \virg{natural} Turing degrees are supposed to induce, by relativization, \virg{definable} TI functions and, vice versa, \virg{definable} TI functions are supposed to come from this process of relativizing some \virg{natural} Turing degree. Precisely, \apo{definable} is formalized setting Martin's conjecture under the Axiom of Determinacy (\ax{AD}). Recall that, in this context, upward Turing cones, i.e.\ sets of the form \[ \Set{x\in{2^\NN}|x\ge_T z}, \] are usually referred to just as \textbf{cones}. Also recall that $A\subseteq{2^\NN}$ is said to be Turing-invariant if it is closed under $\equiv_T$. Turing Determinacy (\ax{TD}) denotes the statement that every Turing-invariant subset of ${2^\NN}$ either contains a cone or is disjoint from a cone. Martin's celebrated cone theorem \cite{martin1968} states that \ax{TD} follows from \ax{AD}. The importance of \ax{TD} lies in the fact that it enables to define a natural notion of \emph{largeness} of Turing-invariant sets: the map \[ \mu(A)=\begin{cases} 1 &\text{if $A$ contains a cone}\\ 0 &\text{otherwise} \end{cases} \] defines, under \ax{TD}, a measure on the $\sigma$-algebra of Turing-invariant subsets of ${2^\NN}$. Recall the definition of the Turing jump of $x\in{2^\NN}$: \[ x'(n)=\begin{cases} 1&\text{if $n\in\dom(\varphi_n^x)$,}\\ 0&\text{otherwise.} \end{cases} \] Finally, given two TI functions $f,g:{2^\NN}\to{2^\NN}$, one defines \[ f\le_M g\iff f(x)\le_T g(x)\text{ on a cone.} \] \begin{conj}[Martin]\thlabel{mc} In \ax{ZF+DC+AD}, the following are conjectured: \begin{enumerate}[\upshape I. ] \item if $f:{2^\NN}\to{2^\NN}$ is Turing invariant, then either $f(x)\ge_T x$ on a cone or there exists $y\in{2^\NN}$ such that $f(x)\equiv_T y$ on a cone; \item the set of TI functions $f$ such that $f\ge_M\id_{{2^\NN}}$ is pre-well-ordered by $\le_M$; moreover, if such an $f$ has $\le_M$-rank $\alpha$, then $f'$ (defined by $f'(x)= f(x)'$) has $\le_M$-rank $\alpha+1$. \end{enumerate} \end{conj} On a side note, part I of Martin's conjecture, in particular, has been in vogue since the discovery of its profound consequences in the theory of countable Borel equivalence relations (see \cite{thomas}). Despite still being open, Martin's conjecture was proved true when restricted to a particular class of functions --- namely the UTI functions --- by Slaman and Steel in \cite{steel, ss}. Let us recall what UTI functions are. Let $(\varphi_i^x)_{i\in\mathbb{N}}$ be the standard numbering of partial unary computable-in-$x$ functions, where the oracle $x$ is a function from $\mathbb{N}$ to $\mathbb{N}$. Given $x,y\in{2^\NN}$ and $i,j\in\mathbb{N}$, we say that $x\le_T y$ via $i$ if $x=\varphi_i^y$, and we say that $x\equiv_T y$ via $(i,j)$ if $x\ge_T y$ via $i$ and $x\le_T y$ via $j$. A function $f:A\to{2^\NN}$, with $A\subseteq{2^\NN}$, is said to be \textbf{uniformly order-preserving} (abbreviated \textbf{UOP}) if every time we have $x,y\in A$ such that $x\le_T y$ via $i$, we can choose \emph{uniformly in $x$ and $y$} an index $j$ such that $f(x)\le_T f(y)$ via $j$. In other words, $f$ is UOP if there is a function $u:\mathbb{N}\to\mathbb{N}$ such that \[ x\le_T y\text{ via $i$}\implies f(x)\le_T f(y)\text{ via $u(i)$} \] for all $x,y\in A$. Similarly, $f$ is said to be \textbf{uniformly Turing invariant} (abbreviated \textbf{UTI}) if there is a function $u:\mathbb{N}^2\to\mathbb{N}^2$ such that, for all $x,y\in A$, \[ x\equiv_T y\text{ via $(i,j)$}\implies f(x)\equiv_T f(y)\text{ via $u(i,j)$}. \] \emph{Uniform} Martin's conjecture refers to Martin's conjecture for \emph{UTI} functions only, \emph{projective} Martin's conjecture refers to Martin's conjecture for \emph{projective} TI functions only, and so on. \section{Part I of uniform Martin's conjecture, from local to global} In Steel's paper \cite{steel} and in Slaman and Steel's paper \cite{ss}, it was proved respectively that part II and part I of uniform Martin's conjecture hold. We shall present a soft proof of the following slight improvement of the latter result. Recall that Turing Determinacy (\ax{TD}) denotes the statement that every $A\subseteq{2^\NN}$ which is closed under $\equiv_T$ either contains a cone or is disjoint from a cone, and also recall this follows from \ax{AD} by Martin's cone theorem. \begin{thm}\thlabel{umc1} Assume \ax{TD} and let $f:{2^\NN}\to{2^\NN}$ be UTI on a cone. Then, either $f(x)\ge_T x$ on a cone, or there exists $y\in{2^\NN}$ such that $f(x)=y$ on a cone. \end{thm} Let us stress the differences between the results: Slaman and Steel showed that, under \ax{AD}, UTI functions are either increasing or constant \emph{up to Turing equivalence} on a cone. By contrast, \thref{umc1} tells us that, under the sole assumption of \ax{TD}, UTI functions are either increasing or \emph{literally} constant on a cone. The interesting thing about our proof of \thref{umc1} is that it shows us how the global dichotomy in such theorem actually arises from an analogous local dichotomy, i.e.\ a dichotomy that UTI functions exhibit on each single Turing degree. \begin{thm}\thlabel{lmc1} Let $x\in{2^\NN}$ and $f:[x]_{\equiv_T}\to{2^\NN}$ be UTI. Then, either $f(x)\ge_T x$ or $f$ is constant. \end{thm} This \thnameref{lmc1}, which could be called a local version of Slaman and Steel's theorem, is the main result of this paper. Before we prove it, let us show how easily \thref{umc1} descends from it. \begin{proof}[Proof of \thref{umc1}] Suppose $f$ is UTI in the cone above $z$. Consider \[ A=\Set{x\in{2^\NN} | \text{$f\upharpoonright [x]_{\equiv_T}$ is constant}}. \] $A$ is Turing invariant, so by \ax{TD} either ${2^\NN}\setminus A$ or $A$ contains a cone. In the former case --- say ${2^\NN}\setminus A$ contains the cone above $w$ --- given any $x\ge_T z\oplus w$, we can apply \thref{lmc1} and deduce $f(x)\ge_T x$. Otherwise, if $A$ contains a cone, next folklore \thnameref{lemcost} applies. \end{proof} \begin{fact}\thlabel{lemcost} Suppose $f:{2^\NN}\to{2^\NN}$ is such that the following holds for all $x,y$ in a cone: \[ x\equiv_T y\implies f(x)=f(y). \] Then, assuming \ax{TD}, $f$ is literally constant on a cone. \end{fact} The easy yet classic argument for \thref{lemcost} is probably found for the first time in \cite{ss}, in the form of a remark that \ax{AD} implies there is no choice function on Turing degrees. We present it here for the reader's convenience. \begin{proof}[Proof of \thref{lemcost}] Suppose that the hypothesis holds in the cone based in $z$. Then, the sets of the form \[ \Set{x\ge_T z| f(x)(i)=j} \] are Turing invariant, and so \ax{TD} implies that the $i$-th digit of $f(x)$ is constant for all $x$ in a cone $C_i$. Hence, $f$ is constant on the intersection of the $C_i$'s (which trivially contains a cone). \end{proof} \thref{lmc1} enables us to calibrate the strength of the statement of part I of uniform Martin's conjecture over \ax{ZF+DC}. We actually have two different statements for uniform Martin's conjecture part I, namely the original one and the statement of \thref{umc1}. However, our calibration holds for both. \begin{coroll}\thlabel{metac} The following statements are equivalent over \ax{ZF+DC}: \begin{enumerate}[$(a)$] \item \ax{TD}; \item for all $f:{2^\NN}\to{2^\NN}$ which is either UTI on a cone, either $f(x)\ge_T x$ on a cone, or $f$ is literally constant on a cone; \item for all $f:{2^\NN}\to{2^\NN}$ which is either UTI on a cone, either $f(x)\ge_T x$ on a cone, or $f$ is constant up to $\equiv_T$ on a cone. \end{enumerate} \end{coroll} \begin{proof} $(a)\implies (b)$ is precisely \thref{umc1}, whereas $(b)\implies(c)$ is trivial. Let us prove $(a)$ from $(c)$. Fix $A\subseteq{2^\NN}$ which is Turing invariant (i.e.\ closed under $\equiv_T$), and define \[ f(x)=\begin{cases} \underline0=000\dots&\text{if $x\in A$,}\\ \underline0'&\text{if $x\not\in A$.} \end{cases} \] Of course, $f$ is UTI, so by $(c)$ we get that $f$ is constant on a cone up to $\equiv_T$. Then, either $f(x)\equiv_T\underline0$ on a cone, or $f(x)\equiv_T\underline0'$ on a cone. In the former case, $A$ contains a cone, in the latter one, the complement of $A$ does. \end{proof} In \cite{chong}, the authors calibrated the strength of part II of uniform Martin's conjecture for projective functions. \begin{thm}[Chong, Wang and Yu, \cite{chong}]\thlabel{thmchong} Over \ax{ZFC}, part II of projective uniform Martin's conjecture is equivalent to Projective Determinacy (which, by unpublished work by Woodin, is equivalent over \ax{ZFC} to Projective Turing Determinacy). \end{thm} Putting together \thref{metac} and \thref{thmchong}, and observing that our assumption of \ax{TD} in the proof of \thref{metac} ``localizes'', we get: \begin{thm} The following are equivalent over \ax{ZFC}: \begin{itemize} \item Projective Determinacy; \item Projective Turing Determinacy; \item part I of projective uniform Martin's conjecture; \item part II of projective uniform Martin's conjecture. \end{itemize} \end{thm} \section{The proof} We now address the proof of \thref{lmc1}. The argument itself is very short and easy, but we need a few preliminaries and notation first. Recall that the join (or merge) of $x,y\in{2^\NN}$, is the element of ${2^\NN}$ denoted by $x\oplus y$ and defined by \[ (x\oplus y)(n)=\begin{cases} x\left(\frac n2\right)&\text{if $n$ is even,}\\ y\left(\frac{n-1}2\right)&\text{if $n$ is odd.} \end{cases} \] Moreover, the join (or merge) of a sequence $(x_n)_{n\in\mathbb{N}}$ of elements of ${2^\NN}$ is the element of ${2^\NN}$ denoted by $\bigoplus_{n }x_n$ defined by \[ \left( \bigoplus_{n }x_n\right) (\braket{i,j})= x_j(i), \] where $\braket{\cdot,\cdot}$ is a computable bijection between $\mathbb{N}^2$ and $\mathbb{N}$ chosen once for all. We shall say that $x_j$ is the $j$-th column of $\bigoplus_n x_n$, while $n\mapsto x_n(i)$ is its $i$-th row. The following fact easily descends from the existence of a universal oracle Turing machine. \begin{fact}\thlabel{complem} Fix $x\in{2^\NN}$ and a computable $t:\mathbb{N}\to\mathbb{N}$. If \begin{equation*} \bigoplus_{n } \varphi_{t(n)}^x \end{equation*} is in ${2^\NN}$, then it is Turing reducible to $x$. \end{fact} \begin{defi} For $e\in\mathbb{N}$ and $x\in{2^\NN}$, set \[ e\odot_T x= \begin{cases} \varphi_e^x &\text{if $\varphi_e^x\in{2^\NN}$}\\ \text{undefined}&\text{otherwise.} \end{cases} \] \end{defi} The graph of $\odot_T$ is the set of $(e,x,y)$ in $\mathbb{N}\times{2^\NN}\times{2^\NN}$ such that $y\le_T x$ via $e$; for this reason, we call $\odot_T$ ``\textbf{Turing reducibility via}''. \begin{notation} Let $\simeq$ denote Kleene's equality: $\varphi\simeq \psi$ holds exactly when, if either $\varphi$ or $\psi$ is defined, then the other is defined as well and the two are equal. \end{notation} Rephrasing the definition of UOP, given $A \subseteq{2^\NN}$, we have that $f:A\to {2^\NN}$ is UOP when there is $u:\mathbb{N}\to\mathbb{N}$ such that, for all $e\in\mathbb{N}$ and $x,y\in A$, \[ e\odot_T x\simeq y\implies u(e)\odot_T f(x)\simeq f(y), \] or equivalently, \begin{equation}\label{uop} e\odot_{T}x\text{ is defined and is in $A$}\implies u(e)\odot_T f(x)\simeq f(e\odot_T x). \end{equation} A function $u$ as above is called \textbf{uniformity function} for $f$. Also define, for $(i,j)\in\mathbb{N}^2$ and $x\in{2^\NN}$, \[ (i,j)\mathbin{^s\odot_T} x= \begin{cases} \varphi_i^x&\text{if $\varphi_i^x\in{2^\NN}$ and $\varphi_j^{\varphi_i^x}=x$,}\\ \text{undefined}&\text{otherwise.} \end{cases} \] The symbol $^s$ stands for `symmetrization': $\mathbin{^s\odot_T}$ can be viewed as some kind of symmetrization of $\odot_T$, as we have \begin{align*} (i,j)\mathbin{^s\odot_T} x\simeq y \iff \begin{cases} i\odot_T x\simeq y \\ j\odot_T y\simeq x. \end{cases} \end{align*} We call $\mathbin{^s\odot_T}$ \textbf{Turing equivalence via}, since its graph is the set of $\bigl((i,j),x,y\bigr)$ in $\mathbb{N}^2\times{2^\NN}\times{2^\NN}$ such that $x\equiv_T y$ via $(i,j)$. Similarly as above, note that $f:A\to {2^\NN}$ is UTI when there is $u:\mathbb{N}^2\to\mathbb{N}^2$ such that, for all $(i,j)\in\mathbb{N}^2$ and $x,y\in A$, \[ (i,j)\mathbin{^s\odot_T} x\simeq y\implies u(i,j)\mathbin{^s\odot_T} f(x)\simeq f(y), \] or equivalently, \begin{equation*}\label{uti} (i,j)\mathbin{^s\odot_T} x\text{ is defined and is in $A$}\implies u(i,j)\odot_T f(x)\simeq f\bigl((i,j)\mathbin{^s\odot_T} x\bigr). \end{equation*} Also in this case, $u$ is called a uniformity function for $f$. \begin{lem}\thlabel{comvar} Let $A\subseteq{2^\NN}$ be such that for all $x\in A$, the concatenations $0^\frown x$ and $1^\frown x$ are in $A$, too. Let $f:A\to{2^\NN}$ be either UOP or UTI. In either case, there is a computable uniformity function for $f$. \end{lem} \begin{proof} First, suppose $f$ is UOP and $u$ is a uniformity function for it. Consider the obvious binary operation $*_T$ on $\mathbb{N}$ that leads \begin{equation*} \varphi_i^{\varphi_j^x}=\varphi_{i*_T j}^x, \end{equation*} so that we have \begin{equation}\label{act} j\odot_T x\text{ is defined}\implies i\odot_T (j\odot_T x)\simeq (i*_Tj)\odot_T x. \end{equation} Observe that $*_T$ is defined, at least implicitly, when showing that $\le_T$ is transitive. The crucial thing to note here is that $*_T$ is computable. Let $a,b,c\in\mathbb{N}$ be such that $\varphi_c^x=1^\frown x$, $\varphi_b^x=0^\frown x$ and \[ \varphi_a^{0^e 1^\frown x}=\varphi_e^x \] for all $x\in{2^\NN}$ ($0^e1$ is shorthand for $\underbrace{0\dots0}_e1$). Also, let $ij$ be shorthand for $i*_Tj$, $ijk$ for $i*_T(j*_T k)$, and so on.\footnote{In fact, $*_T$ is associative and \eqref{act} tells us that $\odot_T$ resembles an action of the semi-group $(\mathbb{N},*_T)$.} Now, fix $x\in A$ and $e\in\mathbb{N}$ such that $e\odot_T x$ is defined and is in $A$ and notice that we have \[ e\odot_T x \simeq (ab^ec)\odot_T x. \] Therefore: \begin{align*} f(e\odot_T x)\simeq f\bigl((ab^ec)\odot_T x\bigr) &\simeq u(a)\odot_T f\bigl((b^ec)\odot_T x\bigr)\\ &\;\;\vdots\\ &\simeq u(a)u(b)^eu(c)\odot_T f(x), \end{align*} where we used \eqref{act}, \eqref{uop} and the hypothesis that $(b^kc)\odot_T x$ is defined and is in $A$ for all $k\le e$.\footnote{The author wishes to thank Kirill Gura for pointing out the necessity of the hypothesis that $A$ is closed under initial appending of $0$'s and $1$'s in order to carry on this argument.} Thus, setting \[ v(e)= u(a)u(b)^e u(c) \] we get that $v$ is a uniformity function for $f$, and since $u(a)$, $u(b)$, $u(c)$ are three fixed natural numbers and $*_T$ is computable, $v$ is computable, too. When $f$ is UTI, the argument analogous. This time, define \[ (i,j)\mathbin{^s*_T}(k,l)= (i*_T k,l*_T j). \] Abbreviate $(i,j)\mathbin{^s*_T} (k,l)$ as $(i,j)(k,l)$ and $(i,j)\bigl( (k,l)(m,n)\bigr)$ as $(i,j)(k,l)(m,n)$.\footnote{Again, $^s*_T$ is associative, but this is unnecessary for the scope of this proof.} Observe that we have \[ \text{$(k,l)\mathbin{^s\odot_T} x$ is defined}\implies (i,j)\mathbin{^s\odot_T}\bigl((k,l)\mathbin{^s\odot_T} x\bigr)\simeq (i,j)(k,l)\mathbin{^s\odot_T} x. \] Pick $m\in\mathbb{N}$ such that $\varphi_m^x(n)=x(n+1)$ for all $n$, and notice that, with $b,c\in\mathbb{N}$ as before, we have, for all $x\in{2^\NN}$: \begin{align*} (c,m)\mathbin{^s\odot_T} x&\simeq 1^\frown x & (b,m)\mathbin{^s\odot_T} x&\simeq 0^\frown x ; \\ (m,c)\mathbin{^s\odot_T} (1^\frown x)&\simeq x & (m,b)\mathbin{^s\odot_T} (0^\frown x)&\simeq x . \end{align*} Also let $d\in\mathbb{N}$ be such that, for all $x\in{2^\NN}$, \[ \varphi_d^{0^i10^j1^\frown x}=0^j10^i1^\frown\varphi_i^x. \] Now observe that, for $x,y\in{2^\NN}$: \begin{gather*} (i,j)\mathbin{^s\odot_T} x\simeq y \implies (d,d)\mathbin{^s\odot_T} (0^i10^j1^\frown x) \simeq 0^j10^i1^\frown y \\ \implies (d,d)(b,m)^i(b,c)(b;m)^j(c,m)\mathbin{^s\odot_T} x\simeq (b,m)^j(c,m)(b,m)^i(c,m)\mathbin{^s\odot_T} y,\\ \implies (m,c)(m,b)^i(m,c)(m,b)^j(d,d)(b,m)^i(b,c)(b,m)^j(c,m)\mathbin{^s\odot_T} x\simeq y. \end{gather*} Thus, if $u$ is a uniformity function for $f$, we can set \[ v(i,j)= u(m,c)u(m,b)^iu(m,c)u(m,b)^ju(d,d)u(b,m)^iu(b,c)u(b,m)^ju(c,m) \] and the same argument as before gives us that $v$ is a computable uniformity function for $f$. \end{proof} \begin{proof}[Proof of \thref{lmc1}] Suppose $f$ is not constant, so that there is $z\equiv_T x$ such that $f(x)\ne f(z)$. Obviously, there is a computable function $r$ such that \[ \varphi_{r(n)}^x= \begin{cases} x &\text{if $x(n)=1$,}\\ z &\text{if $x(n)=0$.} \end{cases} \] Also obviously, there is $e\in\mathbb{N}$ such that \[ \varphi_e^x=\varphi_e^z=x. \] Setting $t:\mathbb{N}\to\mathbb{N}^2,n\mapsto(r(n),e)$, we get that $t$ is computable and \[ t(n)\mathbin{^s\odot_T} x\simeq \begin{cases} x &\text{if $x(n)=1$,}\\ z &\text{if $x(n)=0$.} \end{cases} \] We thus have \[ f\bigl(t(n)\mathbin{^s\odot_T} x\bigr)= \begin{cases} f(x) &\text{if $x(n)=1$,}\\ f(z)&\text{if $x(n)=0$.} \end{cases} \] This means the columns of $\bigoplus_n f(t(n)\mathbin{^s\odot_T} x)$ are either $f(x)$ or $f(z)$, and they alternate exactly as the bits of $x$ do. So, supposing that $f(x)$ and $f(z)$ differ on the $k$-th digit, the $k$-th row of $\bigoplus_{n}f\bigl(t(n)\mathbin{^s\odot_T} x\bigr)$ is either $x$ or $i\mapsto1-x(i)$, and hence \begin{equation*} \bigoplus_{n}f\bigl(t(n)\mathbin{^s\odot_T} x\bigr)\ge_T x. \end{equation*} But also, if we let $u$ be a computable uniformity function for $f$ (which exists by \thref{comvar}) and $\pi:\mathbb{N}^2\to\mathbb{N}$ be the projection on the first coordinate, we get \[ \bigoplus_{n }f(t(n)\mathbin{^s\odot_T} x)=\bigoplus_{n }\Bigl(u(t(n))\mathbin{^s\odot_T} f(x)\Bigr)=\bigoplus_{n }\varphi^{f(x)}_{\pi\circ u\circ t(n)}, \] so \thref{complem} tells us that \[ f(x)\ge_T \bigoplus_{n}f\bigl(t(n)\mathbin{^s\odot_T} x\bigr).\qedhere \] \end{proof} \section{Applications} \subsection{Comparing Turing degrees as structures} Although Turing degrees are usually viewed as the ``atoms'' of the main structure investigated in computability theory, namely $(\mathcal D,\le_T)$, Turing reductions provide each Turing degree with a structure, so we might study Turing degrees as structures themselves. Given $A\subseteq{2^\NN}$, we call Turing reducibility on $A$ the following two-sorted relation: \[ (\odot_Ta)=\Set{(e,x,y)\in\mathbb{N}\times A\times A| y\le_Tx\text{ via }e}, \] or, with an abuse of language, the underlying two-sorted structure $(A,\mathbb{N};\odot_Ta)$.\footnote{Note that $\mathbb{N}$ does not carry any structure with it in $(A,\mathbb{N};\odot_Ta)$.} Turing equivalence via on $A$ is defined analogously. Even though, single Turing degrees are trivial structures when equipped with Turing reducibility or equivalence, they are \emph{not} trivial when endowed with Turing reducibility \emph{via} or Turing equivalence \emph{via}. So, for instance, we might want to understand, if the complexity of $\bm x$ as a structure depends on the computational complexity of $\bm x$ as a Turing degree, or how the structure on $\bm x$ relates to the structure on a different $\bm y$. An \textbf{embedding} of $(\bm x,\mathbb{N};\odot_Tx)$ into $(\bm y,\mathbb{N};\odot_Ty)$ (or, more shortly, of $\odot_Tx$ to $\odot_Ty$) is a pair of functions $(f,u)$, with $f:\bm x\to\bm y$ and $u:\mathbb{N}\to\mathbb{N}$ preserving the truth of atomic formulas in both directions, which means \begin{align*} i=j&\iff u(i)=u(j)\\ x=y&\iff f(x)=f(z)\\ i\odot_Tx x\simeq z&\iff u(i)\odot_Ty f(x)\simeq f(z), \end{align*} for all $i,j\in\mathbb{N}$ and $x,z\in\bm x$. In other words, $f$ and $u$ are injective and preserve Turing reducibility via in both directions. \begin{thm}\thlabel{thm2} For all Turing degrees $\bm x$ and $\bm y$, the following are equivalent: \begin{enumerate} \item the structure on $\bm x$ is embeddable in the structure on $\bm y$, when the structure is given by Turing reducibility via;\label{emb} \item the structure on $\bm x$ is embeddable in the structure on $\bm y$, when the structure is given by Turing equivalence via;\label{emb2} \item $\bm x\le_T\bm y$.\label{red} \end{enumerate} \end{thm} \begin{proof} $\mathit{\ref{emb}}\implies\mathit{\ref{emb2}}$: if $(f,u)$ is an embedding of $\odot_Tx$ into $\odot_Ty$, then $(f,u\times u)$ is an embedding of $\mathbin{^s\odot_T}x$ into $\mathbin{^s\odot_T}y$, where $u\times u$ is the map $(i,j)\mapsto(u(i),u(j))$. $\mathit{\ref{emb2}}\implies\mathit{\ref{red}}$: if $(f,u)$ is an embedding of $\mathbin{^s\odot_T}x$ into $\mathbin{^s\odot_T}y$, then $f$ is an injective (hence non-constant) and UTI function from $\bm x$ to $\bm y$, so we get $\bm x\le_T\bm y$ from \thref{lmc1}. $\mathit{\ref{red}}\implies\mathit{\ref{emb}}$: choose $y\in\bm y$ and define $f:\bm x\to\bm y,z\mapsto z\oplus y$. Observe that $f$ is injective and its range is indeed included in $\bm y$ because $\bm x\le_T\bm y$. It is easy to see that there is an injective $u:\mathbb{N}\to\mathbb{N}$ such that, for all $z_1,z_2,z_3\in{2^\NN}$ and all $i\in\mathbb{N}$, we have \[ i\odot_T z_1=z_2\iff u(i)\odot_T (z_1\oplus z_3)=z_2\oplus z_3. \] Thus, $(f,u)$ is an embedding of $\odot_Tx$ into $\odot_Ty$. \end{proof} \begin{oss} The clearness of \thref{thm2} seems rather peculiar of the Turing case: for example, we can formulate a \emph{via} version for arithmetic reducibility and equivalence, too, but the analog of \thref{thm2} would fail in the arithmetic case. Indeed, taking $g:{2^\NN}\to{2^\NN}$ to be a counter-example of the arithmetic uniform Martin's conjecture, part I (proved to exist by Slaman and Steel, see \cite{mss}), we get that there is $z\in{2^\NN}$ such that, for all $x\ge_A z$, arithmetic reducibility via on $[x]_{\equiv_A}$ is embeddable into arithmetic reducibility via on $[g(x)]_{\equiv_A}$ even though $x\not\le_A g(x)$. Indeed, although almost every part of the proof remains valid in the arithmetic case, \thref{complem} does not, as there is no universal arithmetic reduction. Hence, the best that we can get from the analog of \thref{lmc1} is that every non-constant uniformly arithmetically invariant $f:[x]_{\equiv_A}\to{2^\NN}$ satisfies $x\le_T \aj{f(x)}$. However, this is not enough to characterize those pairs of arithmetic degrees $(\bm x,\bm y)$ such that there is an embedding (or a homomorphism) from $\bm x$ to $\bm y$. \end{oss} \subsection{Reducing $\le_T$ to computable reducibility}\label{sec5} Recall that, given two binary relations $R$ and $S$ on sets $X$ and $Y$ respectively, a \textbf{homomorphism} from $R$ to $S$ is a function $f:X\to Y$ such that \[ x\mathbin R y\implies f(x)\mathbin S f(y),\quad\forall x,y\in X. \] Furthermore, such $f$ is a \textbf{reduction} if \[ x\mathbin R y\iff f(x)\mathbin S f(y),\quad\forall x,y\in X. \] When $X$ and $Y$ are standard Borel spaces and there is a Borel reduction from $R$ to $S$, one says that $R$ is \textbf{Borel reducible} to $S$, and writes $R\le_B S$. On the other hand, when $X=Y=\mathbb{N}$ and there is a computable reduction from $R$ to $S$, one says that $R$ is \textbf{computably reducible} to $S$, and writes $R\le_c S$. When $R\le_c S$ and $S\le_c R$, one says that $R$ and $S$ are \textbf{computably bi-reducible}, and writes $R\sim_c S$. Analogously, one defines Borel bi-reducibility $\sim_B$ as the symmetrization of $\le_B$. Such reducibility notions are well-established tools to compare the complexity of equivalence relations, and thus measure the difficulty of the classification problems that equivalence relations embody (see, for example, \cite{Gao} for $\le_B$ and \cite{coskey} for $\le_c$). Borel reducibility is frequently used to compare quasi-orders, too. Computable reducibility and bi-reducibility are themselves a Borel quasi-order and a Borel equivalence relation respectively, whether they considered on the Polish space $2^{\NN\times\NN}$ of all binary relations, or they are considered on the closed subset (hence, Polish space itself) $\mathrm{ER}\subseteq2^{\NN\times\NN}$ of all equivalence relations on $\mathbb{N}$. For the rest of the paper, we refer to $\le_c$ and $\sim_c$ as being defined on $\mathrm{ER}$. \begin{defi} When a Borel quasi-order (resp.\ equivalence relation) has countable downward cones (resp.\ equivalence classes) it is called \textbf{countable Borel quasi-order} (resp.\ \textbf{countable Borel equivalence relation}). \end{defi} These are well-studied classes of Borel relations (see, for instance, \cite{williams} and \cite{cber}). For instance, $\le_T$ and $\le_c$ are countable Borel quasi-orders and $\equiv_T$ and $\sim_c$ are countable Borel equivalence relations. As we are going to show, essentially the same argument that led to \thref{lmc1} entails that $\le_T$ is Borel reducible to ${\le_c}$, and hence $\equiv_T$ is Borel reducible to ${\sim_c}$. \begin{defi} For $x\in{2^\NN}$, define $\ap x$ to be the equivalence relation on $\mathbb{N}$ given by \begin{align*} i\ap x j \iff \varphi_i^x=\varphi_j^x. \end{align*} \end{defi} \begin{thm}\thlabel{redu} The map $x\mapsto{\ap x}$ is a Borel reduction from $\le_T$ to $\le_c$ (and hence from $\equiv_T$ to $\sim_c$). \end{thm} \begin{proof} If $x\le_T y$, say $x=\varphi_k^y$, then recall the definition of $*_T$ from the proof of \thref{comvar} and note that \[ i\ap x j\iff i \ap{\varphi_k^y} j \iff (i*_T k)\ap y (j*_T k), \] so the map $i\mapsto i*_T k$ is a computable reduction from $\approx_T^x$ to $\approx_T^y$. Vice versa, suppose $(\ap x)\le_c(\ap y)$ as witnessed by the computable reduction $v$. We exploit the same idea as in \thref{lmc1}. Choose any two $a,b\in\mathbb{N}$ such that $a\not\ap x b$ and hence, since $v$ is a reduction, $v(a)\not\ap y v(b)$. This means there is some $k$ such that $\varphi_{v(a)}^y(k)\not\simeq\varphi_{v(b)}^y(k)$. Thus, $\varphi_{v(a)}^y(k)$ and $\varphi_{v(b)}^y(k)$ cannot be both undefined, so suppose, for example, that $\varphi_{v(a)}^y(k)$ is defined and equals, say, $m$; then, whether is defined or not, $\varphi_{v(b)}^y(k)$ does not equal $m$. Take now a computable function $r$ such that, for all $n$, \begin{align*} \varphi_{r(2n)}^x= \begin{cases} \varphi_a^x&\text{if $x(n)=1$}\\ \varphi_b^x&\text{if $x(n)=0$} \end{cases} \qquad \varphi_{r(2n+1)}^x= \begin{cases} \varphi_b^x&\text{if $x(n)=1$}\\ \varphi_a^x&\text{if $x(n)=0$.} \end{cases} \end{align*} Using the fact that $v$ is a reduction (in particular, a homomorphism), we get \begin{align*} \varphi_{v(r(2n))}^y= \begin{cases} \varphi_{v(a)}^y&\text{if $x(n)=1$}\\ \varphi_{v(b)}^y&\text{if $x(n)=0$} \end{cases} \qquad \varphi_{v(r(2n+1))}^y= \begin{cases} \varphi_{v(b)}^y&\text{if $x(n)=1$}\\ \varphi_{v(a)}^y&\text{if $x(n)=0$.} \end{cases} \end{align*} Now, to know if $x(n)$ equals $1$ or $0$, it suffices for $y$ to parallel compute $\varphi_{v(r(2n))}^y(k)$ and $\varphi_{v(r(2n+1))}^y(k)$ and wait for $m$ to come out as the output of either computation. If $m$ comes from $\varphi_{v(r(2n))}^y(k)$, then $x(n)=1$, otherwise, if it comes from $\varphi_{v(r(2n+1))}^y(k)$, then $x(n)=0$. Since $v$ and $r$ are computable and there exists a universal oracle Turing machine, the function $n\mapsto\varphi_{v(r(n))}^y$ is computable in $y$, and hence the procedure above describes a program that computes $x$ from $y$. Thus, we have \[ x\le_T y\iff (\ap x)\le_c(\ap y) \] and the Borelness of the map is clear. \end{proof} \begin{oss} The connection between \thref{lmc1} and \thref{redu} is the following. If we examine the proof of the former, we can observe that it holds not only when $f:[x]_{\equiv_T}\to{2^\NN}$ is UTI, but it suffices that $f$ admits a \emph{computable} function $u$ such that \[ f((i,j)\mathbin{^s\odot_T} x)=u(i,j)\mathbin{^s\odot_T} f(x),\text{ for all $(i,j)$ s.t.\ $(i,j)\mathbin{^s\odot_T} x$ is defined.} \] Note that such $u$ need not be a uniformity function for $f$, as the previous formula need not hold for all elements of $[x]_{\equiv_T}$, but just for $x$. On the other hand, if we define $\mathop{\downarrow_T} x=\set{\varphi_e^x|e\in\mathbb{N}}$, then a homomorphism $v$ from $\ap x$ to $\ap y$ defines a function $f:(\mathop{\downarrow_T} x)\to (\mathop{\downarrow_T} y)$ by \[ f(\varphi_e^x)=\varphi_{v(e)}^y,\quad\forall e\in\mathbb{N} \] and vice versa. When, $v$ is not just a homomorphism, but a reduction, then $f$ is injective, in particular non-constant. Thus, we can view the proof that $(\ap x)\le_c(\ap y)$ implies $x\le_T y$ as an argument in the style of \thref{lmc1} applied to such $f$. \end{oss} \begin{oss} In \cite{coskey}, the authors indicated a way to turn an equivalence relation $E$ on ${2^\NN}$ to an equivalence relation $E^{\mathit{ce}}$ on $\mathbb{N}$, defined by \[ i\mathbin{E^{\mathit{ce}}}j\iff W_i\mathbin E W_j, \] where $W_i$ denotes the $i$-th computably enumerable set, i.e.\ $\dom(\varphi_i)$. In particular, they studied $=^{\mathit{ce}}$. Of course, the same process can be done relative to any oracle $x\in{2^\NN}$: we could define \[ i\mathbin{E^{\mathit{ce}}e x}j\iff W_i^x\mathbin E W_j^x. \] Then, it is easy to see that $(=^{\mathit{ce}}e x)\sim_c(\ap x)$ for all $x$, so the map $x\mapsto(=^{\mathit{ce}}e x)$ is another Borel reduction from $\le_T$ to $\le_c$. It would be interesting to understand the behavior of the map $T$ that takes a countable Borel equivalence relation $E$ to the Borel equivalence relation $T(E)$ that makes the map $x\mapsto E^{\mathit{ce}}e x$ a reduction from $T(E)$ to $\sim_c$. \end{oss} In the theory of countable Borel equivalence relations, a fundamental result by Adams and Kechris revealed the intricacy of the structure of $\le_B$ on countable Borel equivalence relations. \begin{thm}[Adams-Kechris, \cite{ak:2000}] The partial order of Borel sets under inclusion can be embedded in the quasi-order of Borel reducibility of countable Borel equivalence relations, i.e., there is a map $A\mapsto E_A$ from the Borel subsets of $\mathbb R$ to countable Borel equivalence relations such that $A\subseteq B\iff E_A\le_B E_B$. In particular it follows that any Borel partial order can be embedded in the quasi-order of Borel reducibility of countable Borel equivalence relations. \end{thm} This theorem disclosed at once many features of $\le_B$ on countable Borel equivalence relations, like --- for instance --- that it features antichains of size $2^{\aleph_0}$ and chains of size $\aleph_1$. \thref{redu} can be viewed as something similar for the theory of equivalence relations on $\mathbb{N}$. Indeed, we know from computability theory that there are many orders that we can embed into the Turing degrees. \begin{thm}[Sacks, \cite{sacks:1961}] Every partial order of cardinality $\le\aleph_1$ in which every downward cone is countable can be embedded into the Turing degrees. \end{thm} \begin{coroll} Let $\mathrm{ER}$ be the set of equivalence relations on $\mathbb{N}$. Every partial order of cardinality $\le\aleph_1$ in which every downward cone is countable can be embedded into $(\mathrm{ER}/{\sim_c},{\le_c})$. \end{coroll} We also know that there are antichains of Turing degrees of size $2^{\aleph_0}$ (for example, that given by minimal Turing degrees). \begin{coroll} There are $2^{\aleph_0}$ equivalence relations on $\mathbb{N}$ that are mutually $\le_c$-incomparable. \end{coroll} \section{Part II, locally?} After showing that part I of uniform Martin's conjecture is the consequence of a local phenomenon, it comes natural to ask whether this is also the case for part II. In \cite{becker}, Becker reproved part II of uniform Martin's conjecture in a particularly perspicuous way: he used the descriptive set-theoretic notion of \virg{reasonable pointclass} and proved that, under \ax{AD}, every UTI $f>_M\id_{{2^\NN}}$ is Turing equivalent on a cone to a $\Gamma$-jump operator \[ J_\Gamma:x\mapsto\text{a universal $\Gamma(x)$ subset of $\mathbb{N}$} \] for some reasonable pointclass $\Gamma$. Reasonable pointclasses are indeed lightface pointclasses that can be relativized to arbitrary $x\in{2^\NN}$ and admit universal sets. For example, the Turing jump $x\mapsto x'$ is a $\Sigma^0_1$-jump operator, the relativization of Kleene's $\mathcal O$, $x\mapsto\mathcal O^x$, is a $\Pi^1_1$-jump operator, and so on. Part II of uniform Martin's conjecture then follows from the link between the ordering $\le_M$ on pointclass jump operators and Wadge reducibility $\le_W$ on ${2^\NN}$. Recent work by Kihara and Montalb\'an \cite{kihara} improved Becker's result, pushing even further this connection. Thus, we might ask whether these results arise locally. In fact, Becker's theorem, and \emph{a fortiori} Kihara and Montalb\'an's, tell us that, up to Turing equivalence on a cone, there exist no other UTI functions besides constant functions, identity function and pointclass jump operators (under \ax{AD}), so it is natural to ask whether any UTI functions that have nothing to do with constant functions, identity function and pointclass jump operators can exist locally. \begin{question} Fix a Turing degree $\bm x$, and consider the smallest family $\mathcal J_{\bm x}$ of functions $f:\bm x\to{2^\NN}$ that contains \begin{itemize} \item all constant functions from $\bm x$ to ${2^\NN}$ \item $\id_{\bm x}$ and $\bm x\ni x\mapsto \NN^\NNr x$, where $\NN^\NNr x:i\mapsto\bigl(1-x(i)\bigr)$ \item all pointclass jump operators defined on $\bm x$ \end{itemize} and such that, if $f_0, f_1,\dots$ are in $\mathcal J_{\bm x}$, then $f_0\oplus f_1$, $\bigoplus_n f_n$ and $(i,j)\mathbin{^s\odot_T} f_0$ are in $\mathcal J_{\bm x}$, too, for all $(i,j)$ such that $(i,j)\mathbin{^s\odot_T} f_0(x)$ is defined for each $x\in\bm x$.\footnote{Of course, these operations are meant to be pointwise, e.g.\ $(f_0\oplus f_1)(x)=f_0(x)\oplus f_1(x)$.} Every function in $\mathcal J_{\bm x}$ is UTI. Is $\mathcal J_{\bm x}$ the set of \emph{all} UTI functions from $\bm x$ to ${2^\NN}$? \end{question} \printbibliography \end{document}
\begin{document} \title{A convenient category for directed homotopy} \author[L. Fajstrup and J. Rosick\'{y}] {L. Fajstrup and J. Rosick\'{y}$^*$} \thanks{ $^*$ Supported by the Ministry of Education of the Czech republic under the projects MSM 0021622409 and 1M0545. The hospitality of the Aalborg University is gratefully acknowledged.} \address{\newline L. Fajstrup\newline Department of Mathematics\newline University of Aalborg\newline Fredrik Bajers Vej 7G, DK9220 Aalborg {\O}st, Denmark\newline Denmark\newline [email protected] \newline\newline J. Rosick\'{y}\newline Department of Mathematics and Statistics\newline Masaryk University, Faculty of Sciences, \newline Jan\'{a}\v{c}kovo n\'{a}m. 2a, 60200 Brno, Czech Republic\newline [email protected] } \begin{abstract} We propose a convenient category for directed homotopy consisting of preordered topological spaces generated by cubes. Its main advantage is that, like the category of topological spaces generated by simplices suggested by J. H. Smith, it is locally presentable. \end{abstract} \keywords{simplex-generated spaces, directed homotopy, dicovering} \maketitle \section{Introduction} We propose a convenient category for doing directed homotopy whose main advantage is its local presentability. It is based on the suggestion of J. H. Smith to use $\Delta$-generated topological spaces as a convenient category for usual homotopy. His suggestion was written down by D. Dugger \cite{D} but it turns out that it is not clear how to prove that the resulting category is locally presentable. We will present the missing proof and, in fact, we prove a more general result saying that for each fibre-small topological category $\ck$ and each small full subcategory $\ci$, the category $\ck_\ci$ of $\ci$-generated objects in $\ck$ is locally presentable. In the case of J. H. Smith, we take as $\ck$ the category $\Top$ of topological spaces and continuous maps and as $\ci$ the full subcategory consisting of simplices $\Delta_n$, $n=0,1,\dots,n,\dots$. Recall that a category $\ck$ is topological if it is equipped with a faithful functor $U:\ck\to\Set$ to the category of sets such that one can mimick the formation of "initially generated topological spaces" (see \cite{AHS}). The category $\dSpace$ of d-spaces (in the sense of \cite{G}) is topological and its full subcategory generated by suitably ordered cubes is our proposed convenient category for directed homotopy. The idea of suitably generated topological spaces is quite old and goes back to \cite{W} and \cite{V} where the aim was to get a cartesian closed replacement of $\Top$. The classical choice of $\ci$ is the category of compact Hausdorff spaces. The insight of Smith is that the smallness of $\ci$ makes $\Top_\ci$ locally presentable. By \cite{W} 3.3, $\Top_\Delta$ is even cartesian closed. \section{Locally presentable categories} A category $\ck$ is \textit{locally} $\lambda$-\textit{presentable} (where $\lambda$ is a regular cardinal) if it is cocomplete and has a set $\ca$ of $\lambda$-presentable objects such that every object of $\ck$ is a $\lambda$-directed colimit of objects from $\ca$. A category which is locally $\lambda$-presentable for some regular cardinal $\lambda$ is called \textit{locally pre\-sen\-tab\-le}. Recall that an object $K$ is $\lambda$-presentable if its hom-functor $\hom(K,-):\ck\to\Set$ preserves $\lambda$-filtered colimits. We will say that $K$ is \textit{presentable} if it is $\lambda$-presentable for some regular cardinal $\lambda$. A useful characterization is that a category $\ck$ is locally presentable if and only if it is cocomplete and has a small dense full subcategory consisting of presentable objects (see \cite{AR}, 1.20). A distinguished advantage of locally presentable categories are the following two results. Recall that, given morphisms $f:A\to B$ and $g:C\to D$ in a category $\ck$, we write $$ f\square g\quad\quad (f\perp g) $$ if, in each commutative square $$ \xymatrix{ A \ar [r]^{u} \ar [d]_{f}& C \ar [d]^{g}\\ B\ar [r]_{v}& D } $$ there is a (unique) diagonal $d:B\to C$ with $df=u$ and $gd=v$. For a class $\ch$ of morphisms of $\ck$ we put \begin{align*} \ch^{\square}&=\{g| f\square g \mbox{ for each } f\in \ch\},\\ {}^{\square}\ch&= \{f| f\square g \mbox{ for each } g\in \ch\},\\ \ch^{\perp}&=\{g| f\perp g \mbox{ for each } f\in \ch\},\\ {}^{\perp}\ch&= \{f| f\perp g \mbox{ for each } g\in \ch\}.\\ \end{align*} The smallest class of morphisms of $\ck$ containing isomorphisms and being closed under transfinite compositions and pushouts of morphisms from $\ch$ is denoted as $\cof(\ch)$ while the smallest class of morphisms of $\ck$ closed under all colimits (in the category $\ck^\to$ of morphisms of $\ck$) and containing $\ch$ is denoted as $\colim(\ch)$. Given two classes $\cl$ and $\crr$ of morphisms of $\ck$, the pair $(\cl,\crr)$ is called a \textit{weak factorization system} if \begin{enumerate} \item $\crr = \cl^\square$, $\cl = {}^\square \crr$ \end{enumerate} and \begin{enumerate} \item[(2)] any morphism $h$ of $\ck$ has a factorization $h= gf$ with $f\in\cl$ and $g\in\crr$. \end{enumerate} The pair $(\cl,\crr)$ is called a \textit{factorization system} if condition (1) is replaced by \begin{enumerate} \item[(1')] $\crr = \cl^\perp$, $\cl = {}^\perp \crr$. \end{enumerate} While the first result below can be found in \cite{B} (or \cite{AHRT}), we are not aware of any published proof of the second one. \begin{theo}\label{th2.1} Let $\ck$ be a locally presentable category and $\cc$ a set of morphisms of $\ck$. Then $(\cof(\cc),\cc^\square)$ is a weak factorization system in $\ck$. \end{theo} \begin{theo}\label{th2.2} Let $\ck$ be a locally presentable category and $\cc$ a set of morphisms of $\ck$. Then $(\colim(\cc),\cc^\perp)$ is a factorization system in $\ck$. \end{theo} \begin{proof} It is easy to see (and well known) that $$ \colim(\cc)\subseteq{}^\perp(\cc^\perp). $$ It is also easy to see that $g:C\to D$ belongs to $\cc^\perp$ if and only if it is orthogonal in $\ck\downarrow D$ to each morphism $f:(A,vf)\to (B,v)$ with $f\in\cc$. By \cite{AR}, 4.4, it is equivalent to $g$ being injective to a larger set of morphisms of $\ck\downarrow D$. Since this larger set is constructed using pushouts and pushouts in $\ck\downarrow D$ are given by pushouts in $\ck$, $g:C\to D$ belongs to $\cc^\perp$ if and only if it is injective in $\ck\downarrow D$ to each morphism $f:(A,vf)\to (B,v)$ with $f\in\bar{\cc}$ where $\bar{\cc}$ is given as follows Given $f\in\cc$, we form the pushout of $f$ and $f$ and consider a unique morphism $f^\ast$ making the following diagram commutative $$ \xymatrix@C=4pc@R=4pc{ A\ar[r]^f \ar[d]_f & B\ar[d]^{p_2}\ar[ddr]^{\id_B}&\\ B\ar[r]_{p_1}\ar[drr]_{\id_B}& A^\ast \ar[dr]^{f^\ast}&\\ &&B } $$ Then $f^\ast$ belongs to $\colim(\cc)$ because it is the pushout of $f:f\to\id_B$ and $f:f\to\id_B$ in $\ck^\to$ and $f,\id_B\in\colim(\cc)$: $$ \xymatrix@C=2pc@R=2pc{ B\ar[rrr]^{\id_B} \ar[ddd]_{\id_B} & & & B\ar[ddd]^{\id_B} \\ & A\ar[ul]_f \ar[r]^f \ar[d]_f & B\ar[ur]_{\id_B} \ar[d]^{p_2} & \\ & B \ar[dl]^{\id_B}\ar[r]_{p_1} & A^\ast \ar[dr]^{f^\ast} & \\ B \ar[rrr]_{\id_B} &&& B } $$ Since $\bar{\cc}$ is a set, $(\cof(\bar{\cc}),\bar{\cc}^\square)$ is a weak factorization system (by \ref{th2.1}). We have shown that $$ \bar{\cc}^\square=\cc^\perp $$ and $$ \bar{\cc}\subseteq\colim(\cc). $$ The consequence is that $$ \cof(\bar{\cc})\subseteq\colim{\cc}. $$ It follows from the fact that each pushout of a morphism $f$ belongs to $\colim(\{f\})$ (see \cite{IK}, (the dual of) M13) and a transfinite composition of morphisms belongs to their colimit closure. In fact, given a smooth chain of morphisms $(f_{ij}: K_i\to K_j)_{i<j<\lambda}$ (i.e., $\lambda$ is a limit ordinal, $f_{jk}f_{ij}=f_{ik}$ for $i<j<k$ and $f_{ij}: K_i\to K_j$ is a colimit cocone for any limit ordinal $j<\lambda$), let $f_i:K_i \to K$ be a colimit cocone. Then $f_0$, which is the transfinite composition of $f_{ij}$ is a colimit in $\ck^\to$ of the chain $$ \xymatrix@C=3pc@R=3pc{ K_0 \ar[r]^{\id_{K_0}}\ar[d]_{f_{00}}& K_0 \ar[r] \ar[d]^{f_{01}}&\ar @{.}[r] & K_0\ar[d]^{f_{0}}\\ K_0\ar[r]_{f_{01}}& K_1 \ar[r]& \ar@{.}[r] & K } $$ Thus we have $$ \cof(\bar{\cc})\subseteq{}^\perp(\cc^\perp). $$ Conversely $$ {}^\perp(\cc^\perp)\subseteq{}^\square{}(\cc^\perp)={}^\square(\bar{\cc}^\square)=\cof(\bar{\cc}). $$ We have proved that $(\colim(\cc),\cc^\perp)$ is a factorization system. \end{proof} \section{Generated spaces} A functor $U:\ck\to\Set$ is called \textit{topological} if each cone $$ (f_i:X\to UA_i)_{i\in I} $$ in $\Set$ has a unique $U$-initial lift $(\bar{f}_i:A\to A_i)_{i\in I}$ (see \cite{AHS}). It means that \begin{enumerate} \item $UA=X$ and $U\bar{f}_i=f_i$ for each $i\in I$ and \item given $h:UB\to X$ with $f_ih=U\bar{h}_i$, $\bar{h}_i:B\to A_i$ for each $i\in I$ then $h=U\bar{h}$ for $\bar{h}:B\to A$. \end{enumerate} Each topological functor is faithful and thus the pair $(\ck,U)$ is a concrete category. Such concrete categories are called topological. The motivating example of a topological category is $\Top$. \begin{exam}\label{ex.3.1} { \em (1) A preordered set $(A,\leq)$ is a set $A$ equipped with a reflexive and transitive relation $\leq$. It means that it satisfies the formulas $$ (\forall x)(x\leq x) $$ and $$ (\forall x,y,z)(x\leq y\wedge y\leq z\to x\leq z). $$ Morphisms of preordered sets are isotone maps, ie., maps preserving the relation $\leq$. The category of preordered sets is topological. The $U$-initial lift of a cone $(f_i:X\to UA_i)_{i\in I}$ is given by putting $a\leq b$ on $X$ if and only if $f_i(a)\leq f_i(b)$ for each $i\in I$. (2) An ordered set is a preordered set $(A,\leq)$ where $\leq$ is also antisymmetric, i.e., if it satisfies $$ (\forall x,y)(x\leq y\wedge y\leq x\to x=y). $$ The category of ordered sets is not topological because the underlying functor to sets does not preserve colimits. } \end{exam} All three formulas from the example are strict universal Horn formulas and the difference between the first two and the third one is that antisymmetry uses the equality. It was shown in \cite{R} that this situation is typical. But one has to use the logic $L_{\infty,\infty}$ (see \cite{Di}). It means that one has a class of relation symbols whose arities are arbitrary cardinal numbers and one uses conjunctions of an arbitrary set of formulas and quantifications over an arbitrary set of variables. A relational universal strict Horn theory $T$ without equality then consists of formulas $$ (\forall x)(\varphi(x)\to\psi(x)) $$ where $x$ is a set of variables and $\varphi,\psi$ are conjunctions of atomic formulas without equality. The category of models of a theory $T$ is denoted by $\Mod(T)$. \begin{theo}\label{th3.2} Each fibre-small topological category $\ck$ is isomorphic (as a concrete category) to a category of models of a relational universal strict Horn theory $T$ without equality. \end{theo} This result was proved in \cite{R}, 5.3. A theory $T$ can consist of a proper class of formulas. When $T$ is a set, $\Mod(T)$ is locally presentable (see \cite{AR}, 5.30). The theory for $\Top$ is given by an ultrafilter convergence (see \cite{R}, 5.4) and it was presented by Manes \cite{M}. This theory is not a set of formulas. The category $\Top$ is far from being locally presentable because it does not have a small dense full subcategory (see \cite{AR}, 1.24(7)) and no non-discrete space is presentable (\cite{AR}, 1.14(6)). A cone $(\bar{f}_i:A\to A_i)_{i\in I}$ is $U$-\textit{initial} if it satisfies condition (2) above. Topological functors can be characterized as functors $U$ such that each cocone $(f_i:UA_i\to X)_{i\in I}$ has a unique $U$-final lift $(\bar{f}_i:A_i\to A)_{i\in I}$ (see \cite{AHS}, 21.9). It means that \begin{enumerate} \item[(1')] $UA=X$ and $U\bar{f}_i=f_i$ for each $i\in I$ and \item[(2')] given $h:X\to UB$ with $hf_i=U\bar{h}_i$, $\bar{h}_i:A_i\to B$ for each $i\in I$ then $h=U\bar{h}$ for $\bar{h}:A\to B$. \end{enumerate} A cocone $(\bar{f}_i:A_i\to A)_{i\in I}$ is called $U$-\textit{final} if it satisfies the condition (2'). \begin{defi}\label{def3.3} { \em Let $(\ck,U)$ be a topological category and $\ci$ a full subcategory of $\ck$. An object $K$ of $\ck$ is called $\ci$-\textit{generated} if the cocone $(C\to K)_{C\in\ci}$ consisting of all morphisms from objects of $\ci$ to $K$ is $U$-final. } \end{defi} Let $\ck_\ci$ denote the full subcategory of $\ck$ consisting of $\ci$-generated objects. Using the terminology of \cite{AHS}, $\ck_\ci$ is the \textit{final closure} of $\ci$ in $\ck$ and $\ci$ is \textit{finally dense} in $\ck_\ci$. \begin{rem}\label{re3.4} { \em Let $\ci$ be a full subcategory of $\Top$. A topological space $X$ is $\ci$-\textit{generated} if it has the property that a subset $S\subseteq X$ is open if and only if $f^{-1}(S)$ is open for every continuous map $f:Z\to X$ with $Z\in\ci$. Thus we get $\ci$-generated spaces of \cite{D} in this case. We follow the terminology of \cite{D} although it is somewhat misleading because, in the classical case of $\ci$ consisting of compact Hausdorff spaces, the resulting $\ci$-generated spaces are called $k$-spaces. A compactly generated space should also be weakly Hausdorff (see, e.g., \cite{H}). } \end{rem} \begin{propo}\label{prop3.5} Let $(\ck,U)$ be a topological category and $\ci$ a full subcategory. Then $\ck_\ci$ is coreflective in $\ck$ and contains $\ci$ as a dense subcategory. \end{propo} \begin{proof} By \cite{AHS}, 21.31, $\ck_\ci$ is coreflective in $\ck$. Since $\ci$ is finally dense in $\ck_\ci$, it is dense. \end{proof} The coreflector $R:\ck\to\ck_\ci$ assigns to $K$ the smallest $\ci$-generated object on $UK$. A concrete category $(\ck,U)$ is called \textit{fibre-small} provided that, for each set $X$, there is only a set of objects $K$ in $\ck$ with $UK=X$. \begin{theo}\label{th3.6} Let $(\ck,U)$ be a fibre-small topological category and let $\ci$ be a full small subcategory of $\ck$. Then the category $\ck_\ci$ is locally presentable. \end{theo} \begin{proof} By \ref{th3.2}, $\ck$ is concretely isomorphic to $\Mod(T)$ where $T$ is a relational universal strict Horn theory without equality. We can express $T$ as a union of an increasing chain $$ T_0\subseteq T_1\subseteq\dots T_i\subseteq\dots $$ of subsets $T_i$ indexed by all ordinals. The inclusions $T_i\subseteq T_j$, $i\leq j$ induce functors $H_{ij}:\Mod(T_j)\to\Mod(T_i)$ given by reducts. Analogously, we get functors $H_i:\Mod(T)\to\Mod(T_i)$ for each $i$. All these functors are concrete (i.e., preserve underlying sets) and have left adjoints $$ F_{ij}:\Mod(T_i)\to\Mod(T_j) $$ and $$ F_i:\Mod(T_i)\to\Mod(T). $$ These left adjoints are also concrete and $F_i(A)$ is given by the $U$-initial lift of the cone $$ f:U_i(A)\to U(B) $$ consisting of all maps $f$ such that $f:A\to H_i(B)$ is a morphism in $\Mod(T_i)$. The functors $F_{ij}$ are given in the same way. Since these left adjoints are concrete, they are faithfull and it immediately follows from their construction that they are also full. Thus we have expressed $\Mod(T)$ as a union of an increasing chain of full coreflective subcategories $$ \Mod(T_0)\subseteq\Mod(T_1)\subseteq\dots\Mod(T_i)\subseteq\dots $$ indexed by all ordinals. Moreover, all these coreflective subcategories are locally presentable. Let $\ci$ be a full small subcategory of $\ck$. Then there is an ordinal $i$ such that $\ci\subseteq\Mod(T_i)$. Consequently, $\ck_\ci\subseteq\Mod(T_i)$ and thus $\ck_\ci$ is a full coreflective subcategory of a locally presentable $\Mod(T_i)$ having a small dense full subcategory $\ci$. Since $\ci$ is a set, there is a regular cardinal $\lambda$ such that all objects from $\ci$ are $\lambda$-presentable in $\Mod(T_i)$ (see \cite{AR}, 1.16). Since $\ck_\ci$ is closed under colimits in $\Mod(T_i)$, each object from $\ci$ is $\lambda$-presentable in $\ck_\ci$. Hence $\ck_\ci$ is locally $\lambda$-presentable. \end{proof} \begin{coro}\label{cor3.7} Let $\ci$ be a small full subcategory of $\Top$. Then the category $\Top_\ci$ is locally presentable. \end{coro} \begin{rem}\label{re3.8} { \em Let $\ck$ be a category such that the coreflective closure $\ck_\ci$ of each small full subcategory $\ci$ of $\ck$ is locally presentable. Then $\ck$ is a union of a chain $$ \ck_0\subseteq\ck_1\subseteq\dots\ck_i\subseteq $$ of full coreflective subcategories which are locally presentable. It suffices to express $\ck$ as a union of a chain $$ \ci_0\subseteq\ci_1\subseteq\dots\ci_i\subseteq $$ of small full subcategories and pass to $$ \ck_{\ci_0}\subseteq\ck_{\ci_1}\subseteq\dots\ck_{\ci_i}\subseteq $$ } \end{rem} \begin{theo}\label{th3.9} Let $\ci$ be a full subcategory of $\Top$ containing discs $D_n$ and spheres $S_n$, $n=0,1,\dots$. Then the category $\Top_\ci$ admits a cofibrantly generated model structure, where cofibrations and weak equivalences are the same as in $\Top$. \end{theo} \begin{proof} Analogous to \cite{H}, 2.4.23. \end{proof} \section{Generated ordered spaces} In order to get our convenient category for directed homotopy, we have to replace $\Top$ by a suitable category of ordered topological spaces. We have considered two such categories: \begin{itemize} \item The category $\PTop$ of preordered topological spaces. Its objects are topological spaces whose underlying set is preordered. Morphisms are continuous maps $f$ s.t. $x\leq y\Rightarrow f(x)\leq f(y)$. \item The category $\dSpace$ of topological spaces $X$ with a set of paths $\vec{P}(X)\subset X^I$ (see \ref{def4.1}). \end{itemize} These are all topological categories, i.e., the forgetful functor to $\Set$ is topological, and they are directed. We would like to have directed loops in the category, i.e., the circle $S^1$ with counterclockwise direction. In $\PTop$ we require transitivity, and hence, a relation relating pairs of points on the circle $e^{i\theta}\leq e^{i\phi}$ when $\theta\leq \phi$, will be the trivial relation in $\PTop$ In $\dSpace$, \cite{G} the directions are represented in the allowed paths and not as a relation on the space itself. On a d-space, $(X,\vec{P}(X))$ the relation $x\leq y$ if there is $\gamma\in \vec{P}(X)$ s.t. $\gamma(0)=x$ and $\gamma(1)=y$ is gives a functor from $\dSpace$ to $\PTop$. In the other direction, the increasing continuous maps from $\vec{I}$ to a space in $\PTop$ will give a set of dipaths, hence a functor to $\dSpace$. \begin{defi}\label{def4.1} { \em The objects in $\dSpace$ are pairs $(X,\vec{P}(X))$, where $X$ is a topological space and $\vec{P}(X)\subset X^I$ satisfies \begin{itemize} \item All constant paths are in $\vec{P}(X)$ \item $\vec{P}(X)$ is closed under concatenation and increasing reparametrization. \end{itemize} $\vec{P}(X)$ is called the set of dipaths or directed paths. A morphism $f:(X,\vec{P}(X))\to (Y,\vec{P}(Y))$ is a continuous map $f:X\to Y$ s.t. $\gamma\in\vec{P}(X)$ implies $f\circ\gamma\in\vec{P}(Y)$ } \end{defi} In $\dSpace$ we do have directed circles. \begin{theo}\label{th4.2} $\dSpace$ is a topological category. \end{theo} \begin{proof} Let $T$ be a relational universal strict Horn theory without equality giving $\Top$ and using relation symbols $R_j$, $j\in J$. We add a new continuum-ary relation symbol $R$ whose interpretation is the set of directed paths. We add to $T$ the following axioms: \begin{enumerate} \item[(1)] $(\forall x)R(x)$ where $x$ is the constant, \item[(2)] $(\forall x,y,z)(\bigwedge\limits_{0<i\leq\frac{1}{2}} z_t=x_t \wedge\bigwedge\limits_{0<i\leq\frac{1}{2}} z_{\frac{1}{2}+i}=y_i \wedge x_1= y_0\wedge R(x)\wedge R(y)\to R(z)),$ \item[(3)] $(\forall x)(R(x)\to R(xt)$ where $t$ is an increasing reparametrization, \item[(4)] $(\forall x) (R(x)\to R_j(xa))$ where $j\in J$ and $I$ satisfies $R_j$ for $a$. \end{enumerate} The resulting relational universal strict Horn theory axiomatizes d-spaces. In fact, (1) makes each constant path directed, (2) says that directed paths are closed under concatenation, (3) says that they are closed under increasing reparametrization and (4) says that they are continuous. \end{proof} \begin{rem}\label{re4.3} { \em (i) A d-space is called \textit{saturated} if it satisfies the converse implication to (3): (5) $(\forall x)(R(xt)\to R(x)$ where $t$ is an increasing reparametrization \noindent It means that a path is directed whenever some of its increasing re\-pa\-ra\-met\-ri\-za\-ti\-ons is directed. Thus saturated d-spaces also form a topological category. (ii) There is, of course, a direct proof of \ref{th4.2}. By \cite{AHS}, 21.9, it suffices to see that the forgetful functor $U:\dSpace \to \Set$ satisfies: For any cocone $(f_i:UA_i\to X)$ there is a unique \emph{$U$-final lift} $(\bar{f}_i:A_i\to A)$, i.e., there is a unique $\dSpace$ structure on $X$ such that $h:X\to UB$ is a d-morphism whenever $h\circ f_i$ is a d-morphism for all $i$. The topology is defined by $V$ open if and only if $f_i^{-1}(V)$ open for all $i$. Let $\vec{P}(A)$ be the closure under concatenation and increasing reparametrization of the set of all constant paths and all $f_i\circ\gamma$ where $\gamma\in\vec{P}(A_i)$. It is not hard to see, that this is a $U$-final lift. } \end{rem} \begin{coro}\label{cor4.4} Let $\ci$ be a small full subcategory of $\dSpace$. Then the category $\dSpace_\ci$ is locally presentable. \end{coro} \begin{defi}\label{def4.5} { \em Let $\cB$ be the full subcategory of $\dSpace$ with objects all cubes $I_1\times I_2\times \ldots \times I_n$ where $I_k$ is either the unit interval with the trivial order (i.e., $a\leq b$ for all $a,b$) or the unit interval with the standard order. The (pre)order on $I_1\times I_2\times \ldots \times I_n$ is the product relation. The dipaths are the increasing paths wrt. this relation. } \end{defi} \begin{nota}\label{not4.6} { \em Let $I$ denote the unit interval with the trivial order and let $\di{I}$ denote the unit interval with the standard order. } \end{nota} \begin{coro}\label{cor4.7} The category ${\dSpace}_{\cB}$ is locally presentable. \end{coro} We consider the category ${\dSpace}_{\cB}$ a suitable framework for studying the directed topology problems arising in concurrency. One reason for this is, that the geometric realization of a cubical complex is in ${\dSpace}_{\cB}$. These are geometric models of Higher Dimensional Automata, see \cite{FGR}. In \cite{FGR}, the directions on the spaces are given via a {\em{local partial order}} and not as d-spaces, but the increasing paths wrt. the local partial order are precisely the dipaths in the d-space structure. For directed homotopy theory, this category is also suitable: \begin{defi}\label{def4.8} { \em Let $f,g:X\to Y$ be d-maps. A d-homotopy \cite{G} is a d-map $H:X\times \vec{I}\to Y$ s.t. $H(x,0)=f(x)$ and $H(x,1)=g(x)$; the d-homotopy equivalence relation is the reflexive transitive hull of this relation. A d-homotopy of dipaths $\gamma$, $\mu$ with common initial and final points is a d-map $H:\vec{I}\times \vec{I}\to Y$ s.t. $H(t,0)=\gamma(t)$, $H(t,1)=\mu(t)$ and $H(0,s)=\gamma(0)=\mu(0)$ and $H(1,s)=\gamma(1)=\mu(1)$. A dihomotopy \cite{FGR} is unordered along the homotopy coordinate: $H:X\times I\to Y$. This gives an equivalence relation without closing off. Dihomotopic dipaths are defined as above - with fixed endpoints. } \end{defi} Since we allowed both the trivially ordered interval and the naturally ordered interval in $\cB$, the category $\dSpace_{\cB}$ is convenient for both kinds of directed homotopy. Globes have been considered as models for higher dimensional automata, in \cite{GG}. A globe on a non-empty (d-)space $X$ is the unreduced suspension $X\times \vec{I}/(x,1)\sim *_1, (x,0)\sim *_0$. If $X$ is in $\dSpace_{\cB}$ then clearly so is the globe of $X$ as a coequalizer. The globe of the empty set is the d-space of two disjoint points, which is also in $\dSpace_{\cB}$ The elementary globes, the globe of an unordered ball, are equivalent to the globe of an unordered cube, which is in our category. \section{Dicoverings} In \cite{F1}, dicoverings, i.e., coverings of directed topological spaces are introduced as a counterpart of coverings in the undirected case. The categorical framework there is (subcategories of) locally partially ordered spaces. It turns out, that it is not obvious which category, one should choose to get universal dicoverings. With the framework here, we have a setting which on the one hand is much more general than the almost combinatorial one of cubical sets, and on the other hand, it is not as general as locally partially ordered topological spaces, where dicovering theory is certainly not well behaved. In \cite{F1} we consider dicoverings with respect to a basepoint, a fixed initial point. \begin{defi}\label{def5.1} { \em Let $p:Y\to X$ be a morphism in $\dSpace$, let $x_0\in X$. Then $p$ is a dicovering wrt. $x_0$ if for all $y_0\in p^{-1}(x_0)$ and all $\gamma\in\vec{P}(X)$ with $\gamma(0)=x_0$, there is a unique lift $\hat{\gamma}$ with $\hat{\gamma}(0)=y_0$: $$\xymatrix{{\{0\}}\ar[r]\ar@{^{(}->}[d]&Y\ar[d]^{p}\\\di{I}\ar@{-->}[ur]^{\hat{\gamma}}\ar[r]^\gamma &X}$$ And for all $H:I\times\vec{I}\to X$ with $H(s,0)=x_0$, there is a unique lift $\hat{H}$: $$\xymatrix{(I\times\{0\},I\times\{0\})\ar[r]\ar@{^{(}->}[d]&(Y,y_0)\ar[d]^{p}\\(I\times\di{I},I\times\{0\})\ar@{-->}[ur]^{\hat{H}}\ar[r]^H &(X,x_0)}$$ } \end{defi} In the present framework however, we will consider lifting properties wrt. all initial points: Let $J$ be the coequalizer $$\xymatrix{I\ar@/^/[r]^g\ar@/_/[r]_f &I\times\vec{I}\ar[r] & J}$$ where $f(x)=(0,0)$ and $g(x)=(x,0).$ \begin{defi}\label{def5.2} { \em Let $p:Y\to X$ be a morphism in $\dSpace$. Then $p$ is a dicovering, if for all $\gamma\in\vec{P}(X)$ there is a unique lift $\hat{\gamma}$ $$\xymatrix{{\{0\}}\ar[r]\ar@{^{(}->}[d]&Y\ar[d]^{p}\\\di{I}\ar@{-->}[ur]^{\hat{\gamma}}\ar[r]^\gamma &X}$$ and for all $H:J\to X$ there is a unique lift $$\xymatrix{{*}\ar[r]\ar@{^{(}->}[d]&Y\ar[d]^{p}\\ J\ar@{-->}[ur]^{\hat{\gamma}}\ar[r]^\gamma &X}$$ where $*$ is the point $ (x,0)\in J.$ } \end{defi} Hence, a dicovering is a morphism $p:Y\to X$ which has the unique right lifting property with respect to the inclusions $\mathcal{C}=\{0\to \vec{I}, *\to J\}$. Hence \begin{propo}\label{prop5.3} A morphism $p:Y\to X$ in $\dSpace$ is a dicovering if and only if it is in $\mathcal{C}^{\perp}.$ \end{propo} \begin{defi}\label{def5.4} { \em A \textit{universal dicovering} of $X\in \dSpace_{\cB}$ is a morphism $\pi:\tilde{X}\to X$ such that for any dicovering $p:Y\to X$ in $\dSpace_{\cB}$, there is a unique morphism $\phi:\tilde{X}\to Y$ such that $\pi=p\circ\phi.$ } \end{defi} \begin{coro}\label{cor5.5} Let $X\in \PTop_{\cB}$. Then there is a universal dicovering $\pi:\tilde{X}\to X$, and it is unique. \end{coro} \begin{proof} This follows from \ref{th2.2}, since $\dSpace_{\cB}$ is locally presentable. Let $$ 0 \xrightarrow{\ w \ } \tilde{X}\xrightarrow{\ u \ } X $$ be the $(\colim(\mathcal C),\mathcal C^\bot)$ factorization of the unique morphism from the initial object $0$ (the empty set) to $X$. Then $u:\tilde{X}\to X$ is a universal dicovering of $X$. In fact, each dicovering $$ v:Y\to X $$ has a unique factorization through $u$. It suffices to apply the unique right lifting property to $$ \xymatrix@C=3pc@R=3pc{ \tilde{X} \ar[r]^{u} & X \\ 0 \ar [u]^{} \ar [r]_{} & Y \ar[u]_{v} } $$ \end{proof} In \cite{F1}, we construct a ``universal'' dicovering $\pi:\tilde{X}\to X$ by endowing the set of dihomotopy classes of dipaths initiating in a fixed point $x_0$ with a topology and a local partial order. If all points in $X$ are reachable by a directed path from $x_0$ and if $X$ is in $\dSpace_{\cB}$ the construction here and the underlying d-space of the locally partially ordered space $\tilde{X}$ in \cite{F1} should coincide, but we do not have a proof of this yet. \end{document}
\begin{document} \begin{abstract} We prove lower large deviations for geometric functionals in sparse, critical and dense regimes. Our results are tailored for functionals with nonexisting exponential moments, for which standard large deviation theory is not applicable. The primary tool of the proofs is a sprinkling technique that, adapted to the considered functionals, ensures a certain boundedness. This substantially generalizes previous approaches to tackle lower tails with sprinkling. Applications include subgraph counts, persistent Betti numbers and edge lengths based on a sparse random geometric graph, power-weighted edge lengths of a $k$-nearest neighbor graph as well as power-weighted spherical contact distances in a critical regime and volumes of $k$-nearest neighbor balls in a dense regime. \varepsilonnd{abstract} \title{Lower large deviations for geometric functionals in sparse, critical and dense regimes} \sigmaection{Introduction} The theory of large deviations is a central research topic in probability theory which aims to quantify and understand large fluctuations in systems affected by randomness. As it becomes increasingly important to understand the behavior of random systems not only in typical situations but also in unlikely scenarios, large deviations theory has become a central element in a broad range of application domains, such as telecommunications, rare-event simulations, insurance mathematics and information theory \cite{dembozeitouni}. While classical large deviations theory predominantly investigates sequences of random variables or time-varying processes, more recently there has been vigorous research activity in investigating large deviations properties of random geometric and topological structures \cite{SchreiberYukich}. One of the key characteristics of these spatial systems is that we frequently observe a distinctively different behavior in the lower and in the upper large deviation tails. More precisely, for upper large deviations, we often observe \varepsilonmph{condensation}. That is, the rare events are caused by a highly pathological structure localized in a small part of the sampling window, while the rest of the system behaves essentially as in the typical regime \cite{chatterjeeharel,hirschwill,kerriou}. In contrast, in the lower large deviations, we are typically in a homogenization phase. That means the large deviations are caused by consistent changes away from the typical regime throughout the sampling window. The classical techniques to deal with large deviations are predominantly designed to deal with situations where the lower and the upper tails are of the same nature \cite{SchreiberYukich,georgii}. Hence, it is often unclear how to apply them in the geometric situations outlined above. On a mathematical level, the reason for this difficulty is the lack of suitable exponential moments. To address these problems, recently \cite{hirsch} proposed a sprinkling method. Loosely speaking, this method is based on the idea that it is often possible to eliminate pathological configurations through a small modification of the underlying Poisson process. On a technical level, this sprinkling is implemented through a carefully devised coupling construction. The benefit of this sprinkling step is that after this modification, the pathological configurations are removed and become amenable to an analysis with classical tools. However, while the examples described in \cite{hirsch} provide a first idea of the feasibility of the sprinkling approach, the assumptions that are imposed prevent the method from being applied to a broad class of models. For instance, while the method in \cite{hirsch} can deal with power-weighted edge lengths of $k$-nearest neighbor graphs, the power is restricted to be smaller than the dimension. In particular, it does not yield the lower-tail complement of the upper tail analysis in \cite{hirschwill}. More generally, the approach in \cite{hirsch} only deals with the critical regime, where the number of relevant Poisson points is proportional to the size of the sampling window. However, in the context of topological data analysis also, different regimes characterized by either much sparser or much denser configurations of points gained substantial interest \cite{kahlemeckes,owadathomas}. In the present paper, we address the shortcomings described above. More precisely: \begin{enumerate} \item In the critical regime, we describe an extension of the sprinkling approach that allows us to deal with large deviations of distance-based functionals to a high power. \item In the sparse regime, we describe the lower large deviations of a large class of additive functionals, including persistent Betti numbers. \item In the particularly challenging dense regime, we are able to deal with the lower large deviations of large power-weighted $k$-nearest neighbor distances. \varepsilonnen On a methodological level, the key contribution of our work is a substantial improvement of the sprinkling construction from \cite{hirsch}. While in that work, the coupling was relatively basic in the sense that it typically was enough to add a sparsely distributed process of sprinkled points homogeneously throughout the window. In the present paper, we describe sprinkling strategies that are far more adapted to the actual pathological configurations. In particular, in the dense regime, we show that it is even possible to implement a desired coupling in a sequential manner where the distribution of the sprinkling in the next step is allowed to depend on the configuration of the sprinkling constructed so far. The rest of the present paper is structured as follows. Section \ref{section_model} begins with an introduction of the model and an explanation of how to interpret the different regimes and distinguish them. Next, in Sections \ref{section_model_sparse}, \ref{section_model_thermodynamic} and \ref{section_model_dense}, we give a much more detailed view into every regime, the sparse, critical and dense one, respectively. Each of these subsections also contains requirements for the specific regimes that allow a functional to fit within our frameworks for the lower large deviations and in each subsection a theorem is stated. Afterwards, we give a small overview of the literature that our results build on and identify in which way ours differ from and extend these. Sections \ref{section_applications_thermodynamic} and \ref{section_applications_sparse} then consist of examples of functionals that fit within the frameworks of the sparse and critical regimes. Due to the complexity of the dense regime, we restrict ourselves to the case of volumes of large $k$-nearest neighbor balls. The rest of the paper is devoted to the proofs of the three main theorems for each regime. Section \ref{section_proof_thermodynamic} deals with the proof within the critical regime, Section \ref{section_proof_sparse} with the proof within the sparse regime and Section \ref{section_proof_dense} with the proof within the dense regime. \sigmaection{Model}\lambdabel{section_model} For $d\in\mbb N$, let $\mbb PP_n \sigmaubseteq [0,1]^d$ be a Poisson point process with intensity $n$. The unit cube is equipped with the torus distance given by $$|x-y| := \min_{z\in\mbb Z^d} \|x-y+z\|$$ for $x,y\in[0,1]^d$, where $\|\cdot\|$ represents the Euclidean norm in $\mbb R^d$. For $x\in[0,1]^d$ and $r>0$, we express the closed ball of radius $r$ with respect to the Euclidean or toroidal metric by $B_r(x)$. Which metric is meant will be clear from the context, and we use $\kappa_d$ to denote the volume of the $d$-dimensional unit ball. First, we demonstrate how geometric functionals on the vertex set $\mbb PP_n$ are commonly set up and how to categorize them into one of the three regimes. In general, most geometric functionals, such as subgraph counts of a random geometric graph or power-weighted edge length of the $k$-nearest neighbor graph, can be encoded by a functional of the form \begin{equation}\lambdabel{equation_general_functional} H_n(\mbb PP_n) := \frac1{s_n} \sigmaum_{X\in\mbb PP_n} \xi_n(X,\mbb PP_n), \varepsilonnd{equation} where \begin{equation}\lambdabel{equation_general_score} \xi_n\colon \mbb R^d \tauimes \mbb NNNN \rightarrow [0,\infty] \varepsilonnd{equation} represents the score function, i.e., the contribution of each single vertex of a set of nodes to the whole functional, where by $\mbb NNNN$, we denote the space of locally finite subsets of $\mbb R^d$. Since $\mbb PP_n$ almost surely contains only a finite amount of points, in most cases, it will be sufficient to only define the score function on finite subsets of $\mbb R^d$, which we denote by $\mbb NNN$. In some cases, we desire to only consider such configurations on the torus for which we write $\mbb NNN^{(1)} = \{\varphi\in\mbb NNN\colon \varphi\sigmaubseteq [0,1]^d\}$. Further, informally expressed, the normalizing factor $s_n$ corresponds to the expected number of nodes in $\mbb PP_n$ that admit a positive score. We call such points \varepsilonmph{relevant}. Throughout the paper, we will use the expression $\varphi(A)$ for a configuration $\varphi\in\mbb NNNN$ and a measurable set $A\sigmaubseteq\mbb R^d$ to denote the number of points of $\varphi$ that are located within $A$. We distinguish between three regimes, the sparse, the critical, sometimes also called thermodynamic, and the dense regime. From a heuristic point of view, this distinction comes from the typical amount of Poisson points in the range that determines the score of a relevant point. Loosely speaking, for many score functions, the score of vertices can be determined locally by only looking at a small neighborhood around the considered point. More precisely, the regimes are distinguished by a sequence $(r_n)_n$ such that for a relevant point $X\in\mbb PP_n$, typically \begin{equation}\lambdabel{equation_heuristics_regimes} \xi_n(X,\mbb PP_n) = \xi_n(X,\mbb PP_n\cap B_{r_n}(X)). \varepsilonnd{equation} The simplest case are functionals that represent features of the random geometric graph, in which $r_n$ corresponds to (the order of) the connectivity radius. For this specific example, the asymptotic behavior of the expected degree of a vertex in the random geometric graph characterizes the respective regime. We emphasize that for other functionals, the distinction into the regimes can be more complicated and refer to Sections \ref{section_model_sparse}, \ref{section_model_thermodynamic} and \ref{section_model_dense} for more details about the particular regimes. Sticking with the heuristic explanation and \varepsilonqref{equation_heuristics_regimes}, the expected number of Poisson points within the typical range of the score function is consequently of order $n r_n^d$. Hence, there are three possible scenarios for the asymptotics. \begin{enumerate} \item Sparse regime: $n r_n^d\overset{n\tauo\infty}{\longrightarrow} 0$; \item Critical regime: $n r_n^d\overset{n\tauo\infty}{\longrightarrow} c >0$; \item Dense regime: $n r_n^d\overset{n\tauo\infty}{\longrightarrow} \infty$. \varepsilonnd{enumerate} \begin{figure}[H] \centering \input{Tikz_pictures/GeometricGraphSparse} \hspace{.5cm} \input{Tikz_pictures/GeometricGraphThermodynamic} \hspace{.5cm} \input{Tikz_pictures/GeometricGraphDense} \caption{Illustrations of a random geometric graph in a sparse, critical and dense regime.} \lambdabel{fig:simulations} \varepsilonnd{figure} The next sections give details about our results in the three different regimes. \sigmaubsection{Sparse regime}\lambdabel{section_model_sparse} In the sparse regime, we investigate functionals for the random geometric graph. We study score functions given by $$\xi\colon \mbb NNN \rightarrow [0,\infty)$$ defined on finite point configurations in $\mbb R^d$. We also set \begin{equation}\lambdabel{equation_minimum_non_zero} k_0 := \inf\{m\gammae 0\colon \xi(\varphi) >0 \tauext{ for some } \varphi\sigmaubseteq\mbb R^d \tauext{ with }\#\varphi = m\} \varepsilonnd{equation} as the smallest size of a configuration that can yield a positive functional value. We are going to plug configurations of $\mbb PP_n$ into the functional that are rescaled using a sequence of connectivity radii $(r_n)_n\sigmaubseteq(0,\infty)$ that will tend to zero. Configurations that have vertices close to the boundary of the torus, which we denote by $\partial[0,1]^d$, might lead to ambiguities if plugged into $\xi$ because the functional itself is not allowed to depend on $n$ and therefore, carries no information about the size of the underlying rescaled torus. For this reason, we generalize the functional to some extent and for $n\in\mbb N$, let $$\xi_n\colon\mbb NNN^{(1)} \rightarrow [0,\infty)$$ be a functional such that for all configurations $\varphi\in\mbb NNN^{(1)}$ with ${\rm d}ist(\varphi,\partial[0,1]^d) > r_n$ $$\xi_n(\varphi) = \xi(r_n^{-1} \varphi),$$ where ${\rm d}ist(\cdot,\cdot)$ denotes the Euclidean distance between two subsets of $\mbb R^d$. We require $(\xi_n)_n$ and $\xi$ to satisfy the following conditions that are related to the requirements in \cite[Section 3]{hirschowada}. \begin{enumerate} \item $\xi$ is translation invariant. That means for all $\varphi\in\mbb NNN$ and $y\in\mbb R^d$, a shift of the configuration $\varphi$ with the vector $y$ does not affect its value, i.e, \begin{equation}\lambdabel{INV}\tauag{\tauextbf{INV}} \xi(\varphi+y)=\xi(\varphi). \varepsilonnd{equation} \item $\xi$ is locally determined for configurations of size $k_0$, which means for all $\varphi\in\mbb NNN$ with $\#\varphi=k_0$ \begin{equation}\lambdabel{LOC}\tauag{\tauextbf{LOC}} \xi(\varphi) = 0 \qquad\tauext{if } {\rm d}iam(\varphi) > k_0, \varepsilonnd{equation} where ${\rm d}iam(\varphi) := \max_{y\neq z\in \varphi} \|y-z\|$ denotes the maximal Euclidean distance between points in $\varphi$. \item For each $m>0$ there exists $b:=b(m)>0$ such that for every $n\in\mbb N$ and every configuration $\varphi\in\mbb NNN^{(1)}$, it holds that \begin{equation}\lambdabel{BND}\tauag{\tauextbf{BND}} \xi_n(\varphi)\le b \qquad\tauext{when } \#\varphi \le m. \varepsilonnd{equation} \item It holds that \begin{equation}\lambdabel{POS}\tauag{\tauextbf{POS}} \int_{\mbb R^{(k_0-1)d}} \xi(\{0,x_2,{\rm d}ots,x_{k_0}\}) {\rm d}(x_2,{\rm d}ots,x_{k_0}) > 0. \varepsilonnd{equation} \varepsilonnd{enumerate} Requiring \varepsilonqref{INV} does not exclude any common functionals that represent statistics of random geometric graphs. \varepsilonqref{LOC} can be interpreted as a condition that validates $k_0$ as smallest size of a connected component with positive score and is implied if $\xi$ is additive, see Remark \ref{remark_sparse_functional}. Condition \varepsilonqref{BND} yields that the score of finite-sized components is finite and \varepsilonqref{POS} is a technical condition needed for the result in \cite[Section 3]{hirschowada} that we are going to invoke. Henceforth, $\mathsf{GG}_n(\varphi)$ denotes the geometric graph with respect to $|\cdot|$ and with connectivity radius $r_n$ on $\varphi\in\mbb NNN^{(1)}$. Now, we consider the lower large deviations of the functional \begin{equation}\lambdabel{equation_functional_RGG} H_n^\sigmap := H_n^\sigmap(\mbb PP_n) := \frac{1}{\rho_{n,k_0}^\sigmap} \sigmaum_{\varphi\sigmaubseteq \mbb PP_n} \xi_n(\varphi) s_n(\varphi,\mbb PP_n). \varepsilonnd{equation} Here, $s_n(\varphi,\mbb PP_n)$ is an indicator function, taking value $1$ if $\varphi$ is a connected component $\mathsf{GG}_n(\mbb PP_n)$, i.e., for $\varphi\sigmaubseteq\psi\in\mbb NNN^{(1)}$ that indicator is given by \begin{equation}\lambdabel{equation_indicator_torus} s_n(\varphi,\psi) := \mathbbmss{1}\{\varphi\tauext{ is a connected component of }\mathsf{GG}_n(\psi)\}. \varepsilonnd{equation} The configuration $r_n^{-1} \varphi$ for $\varphi\sigmaubseteq\mbb PP_n$ is considered as a subset of the torus $[0,r_n^{-1}]^d / \sigmaim$ and the normalizing factor has the form $$\rho_{n,k_0}^\sigmap := n^{k_0} r_n^{d(k_0-1)},$$ which can be interpreted as the order of the expected number of points that are part of some connected component of size $k_0$. \begin{remark}\lambdabel{remark_sparse_functional} \begin{enumerate} \item Note that the functional in \varepsilonqref{equation_functional_RGG} is stated in a more general form than suggested in \varepsilonqref{equation_general_functional} and \varepsilonqref{equation_general_score}. We could recover the representation that sums over all nodes of $\mbb PP_n$ with an indicator that is only nonzero for one vertex of each connected component. \item Most examples of such functionals, such as subgraph counts, Betti numbers and edge lengths, also fulfill that $\xi_n$ is additive for all $n\in\mbb N$, which means $$\xi_n(\varphi_1 \cup \varphi_2) = \xi_n(\varphi_1) + \xi_n(\varphi_2)$$ whenever the distance between $\varphi_1\in\mbb NNN^{(1)}$ and $\varphi_2\in\mbb NNN^{(1)}$ with respect to the toroidal metric is larger than $r_n$. For such functionals we could also write the functional $H_n^\sigmap$ as $\xi_n(\mbb PP_n)/\rho_{n,k_0}^\sigmap$. \varepsilonnd{enumerate} \varepsilonnd{remark} Next, along the lines of \cite{hirschowada}, we define a measure on the set $(0,\infty)$ by $$\tauau_{k_0}^\sigmap(A) := \frac1{k_0!} \lambdambda_{k_0-1}(\{(y_2,{\rm d}ots,y_{k_0})\in \mbb R^{d(k_0-1)} \colon \xi(\{0,y_2,{\rm d}ots,y_{k_0}\})\in A\}),$$ for a measurable $A\sigmaubseteq (0,\infty)$, where $\lambdambda_{k_0-1}$ corresponds to the Lebesgue measure on $\mbb R^{d(k_0-1)}$. Additionally, define the relative entropy of a Radon measure $\rho$ on $(0,\infty)$ by $$h^\sigmap(\rho \mid \tauau_{k_0}^\sigmap) := \begin{cases} \int_{(0,\infty)} \log \frac{{\rm d} \rho}{{\rm d} \tauau_{k_0}^\sigmap}(x) \rho({\rm d} x) - \rho((0,\infty)) + \tauau_{k_0}^\sigmap((0,\infty)) &\tauext{if }\rho \ll \tauau_{k_0}^\sigmap \\ \infty &\tauext{otherwise} \varepsilonnd{cases},$$ where $\rho \ll \tauau_{k_0}^\sigmap$ denotes absolute continuity of $\rho$ with respect to $\tauau_{k_0}^\sigmap$. Note that in accordance with \cite[Remark 3.6]{hirschowada}, under some circumstances, some simplifications of the rate function are possible. We refer to the examples in Section \ref{section_applications_sparse} for details. The first main theorem states that $H_n^\sigmap$ admits lower large deviations with rate function $h^\sigmap(\cdot \mid \tauau_{k_0}^\sigmap)$. \begin{theorem}[Lower large deviations in the sparse regime]\lambdabel{theorem_main_sparse} Assume that \varepsilonqref{INV}, \varepsilonqref{LOC}, \varepsilonqref{BND} and \varepsilonqref{POS} are satisfied and assume that $k_0\in[1,\infty)$. If $n r_n^d\rightarrow 0$ and $\rho_{n,k_0}^\sigmap\rightarrow\infty$, then, for $a\in\mbb R$ \begin{equation}\lambdabel{upper_bound_sparse} \limsup_{n\tauo\infty} \frac{1}{\rho_{n,k_0}^\sigmap} \log \mbb P(H_n^\sigmap \leq a) \leq -\inf_{\rho\colon T^\sigmap(\rho) \le a} h^\sigmap(\rho \mid \tauau_{k_0}^\sigmap) \varepsilonnd{equation} and \begin{equation}\lambdabel{lower_bound_sparse} \liminf_{n\tauo\infty} \frac{1}{\rho_{n,k_0}^\sigmap} \log \mbb P(H_n^\sigmap < a) \gammaeq -\inf_{\rho\colon T^\sigmap(\rho) < a} h^\sigmap(\rho \mid \tauau_{k_0}^\sigmap). \varepsilonnd{equation} where $T^\sigmap(\rho) := \int_{(0,\infty)} x {\rm d}\rho(x)$. \varepsilonnd{theorem} If we assume that $n r_n^d \overset{n\tauo\infty}{\longrightarrow} 0$, we are indeed in a sparse random geometric graph. But, also using our characterization of the regimes, this setting deserves to be labeled sparse. To verify this, we give a small outlook on the proof of the lower large deviations in this case. First, the typical range to determine the score of a node corresponds to the typical size of a connected component. As it turns out, connected components of size $k_0+1$ or larger do not significantly contribute to the lower large deviations. Therefore, typically the range we have to consider to determine the score of a node or rather the volume occupied by a typical component size is bounded by $k_0^d n r_n^d$, which tends to $0$. \sigmaubsection{Critical regime}\lambdabel{section_model_thermodynamic} For the critical regime, we let $\xi$ be a measurable function $$\xi \colon \mbb R^d\tauimes\mbb NNNN \rightarrow [0,\infty].$$ Its desired properties are specified later. To turn $\xi$ into the score function we scale everything with the factor $n^{1/d}$ and define \begin{equation}\lambdabel{equation_score_thermo} \xi_n \colon \mbb R^d\tauimes\mbb NNNN \rightarrow [0,\infty],\ (x,\varphi) \mapsto\xi(n^{1/d} x,n^{1/d} \varphi). \varepsilonnd{equation} Here, unlike the sparse regime, we give two different forms of the functional of interest. \noindent\tauextbf{Representation A:} We can sum up the scores of each node of the Poisson point process, which is encoded by \begin{subequations} \begin{equation}\lambdabel{equation_functional_thermo1} H_n^\tauh := H_n^\tauh(\mbb PP_n) := \frac1{n} \sigmaum_{X\in\mbb PP_n} \xi_n(X,\mbb PP_n). \varepsilonnd{equation} \tauextbf{Representation B:} It is also possible to integrate the scores of all space points in $[0,1]^d$, which can be represented by \begin{equation}\lambdabel{equation_functional_thermo2} H_n^\tauh := H_n^\tauh(\mbb PP_n) := \int_{[0,1]^d} \xi_n(x,\mbb PP_n) {\rm d} x. \varepsilonnd{equation} \varepsilonnd{subequations} Power-weighted edge lengths of $k$-nearest neighbor graphs is an example of a functional that can be displayed using representation \tauextbf{A}. Spherical contact distances of space points can be encoded with representation \tauextbf{B}. See, Section \ref{section_applications_thermodynamic} for details. \begin{remark} It is possible to express every functional in representation \tauextbf{A} in terms of representation \tauextbf{B} and treat \varepsilonqref{equation_functional_thermo1} as a special case of \varepsilonqref{equation_functional_thermo2} by using that \begin{equation}\lambdabel{equation_sum_integral_rep} \frac1{n} \sigmaum_{X\in\mbb PP_n} \xi_n(X,\mbb PP_n) = \int_{[0,1]^d} \sigmaum_{y\in\mbb PP_n\cap B_{(n\kappa_d)^{-1/d}}(x)} \xi_n(y,\mbb PP_n) {\rm d} x, \varepsilonnd{equation} which can be verified by an application of Fubini's theorem. If all our requirements for a score function would directly translate to the sum of the score function over nodes in a small volume, we could solely consider representation \tauextbf{B}. However, we aim to study the lower large deviations of functionals for which some of the requirements for the score function do not translate. In particular, the sum over scores of nodes in a small space can be excessively large if there are many nodes, even if the individual scores are bounded. For this reason, we chose to use two different representations. \varepsilonnd{remark} In the critical regime, the notion of \varepsilonmph{stabilization} plays an important role in many frameworks that deal with limit theory for geometric functionals, see, for example, \cite{weakLLN} or \cite{SchreiberYukich}. Namely, let a function $$\mathcal R\colon \mbb R^d\tauimes\mbb NNN \mapsto [0,\infty]$$ be homogeneous of degree $1$, which means that for all $m>0$, $\varphi\in\mbb NNN$ and $x\in\varphi$ it holds that \begin{equation}\lambdabel{equation_stabilization_radius_homogeneity} \mathcal R(mx, m\varphi) = m \mathcal R(x, \varphi). \varepsilonnd{equation} Further, we ask for events of the form $\{\mathcal R(x,\mbb PP_n) \le r\}$ to be measurable with respect to $\mbb PP_n\cap B_r(x)$ for each $x\in[0,1]^d$ and $r>0$. We call $\mathcal R$ stabilization radius for $\xi$ if for every $n\in\mbb N$ and $x\in[0,1]^d$ \begin{equation}\lambdabel{equation_stabilization_radius} \mbb P\big(\xi_n(x,\mbb PP_n) = \xi_n(x,\mbb PP_n\cap B_{\mathcal R(x,\mbb PP_n)}(x))\big) = 1. \varepsilonnd{equation} To be able to apply sprinkling to couple two Poisson processes, for an $M>0$ and each $n\in\mbb N$, we introduce $\mbb PP_n^{-,M}$ as a thinning of $\mbb PP_n$ with survival probability $1-M^{-1}$, as well as $\mbb PP_n^{+,M}$ as a Poisson point process on $[0,1]^d$ with intensity $n M^{-1}$ that is independent of $\mbb PP_n$ and the thinning. Then, \begin{equation}\lambdabel{equation_union_poisson_prcesses_thermo} \mbb PP_n^M := \mbb PP_n^{-,M}\cup \mbb PP_n^{+,M}, \varepsilonnd{equation} is a Poisson point process $\mbb PP_n^M$ on $[0,1]^d$ with the same distribution as $\mbb PP_n$. The goal for the applications will be to let $\mbb PP_n^{-,M}$ fully cover $\mbb PP_n$ and to sprinkle in additional nodes using $\mbb PP_n^{+,M}$ to control the stabilization radii while at the same time $H_n^{M}(\mbb PP_n^M)$ approximates $H_n^{M}(\mbb PP_n)$. For this purpose, we define an event that is supposed to be the goal of the sprinkling. Here, we need to distinguish between the two representations \varepsilonqref{equation_functional_thermo1} and \varepsilonqref{equation_functional_thermo2} because in the former, only the nodes of the Poisson point process need to stabilize after the sprinkling. \noindent\tauextbf{Representation A:} In the first case, we define the event \begin{subequations} \begin{equation}\lambdabel{equation_stabilization_event1} E_n^M := \Big\{\sigmaup_{X\in\mbb PP_n} \mathcal R(X,\mbb PP_n\cup \mbb PP_n^{+,M})\leq M/n^{1/d}\Big\} \varepsilonnd{equation} that the maximal stabilization radius of a node of $\mbb PP_n\cup\mbb PP_n^{+,M}$ is bounded by $M/n^{1/d}$. \noindent\tauextbf{Representation B:} In the second case, we let \begin{equation}\lambdabel{equation_stabilization_event2} E_n^M := \Big\{\sigmaup_{x\in [0,1]^d} \mathcal R(x,\mbb PP_n\cup \mbb PP_n^{+,M})\leq M/n^{1/d}\Big\} \varepsilonnd{equation} \varepsilonnd{subequations} be the set that the maximal stabilization radius of a space point in $[0,1]^d$ with respect to $\mbb PP_n\cup\mbb PP_n^{+,M}$ is bounded by $M/n^{1/d}$. We note that here, $E_n^M$ might not be measurable. But this is of no concern because we only have to deal with subsets of $E_n^M$ later that certainly will be measurable. Next, for a functional $\xi$ to fit in our framework for lower large deviations in the critical regime, we require additional conditions. Condition \varepsilonqref{STA} limits the magnitude of a score function conditioned on a bounded stabilization radius. \varepsilonqref{INC} makes tools such as monotone convergence available to use in the proof. \varepsilonqref{STA} and \varepsilonqref{INC} are satisfied by most examples of score functions in the literature. \varepsilonqref{SPR1}, \varepsilonqref{SPR2} and \varepsilonqref{SPR3} are more restrictive. They make sure that it is possible to find a strategy for sprinkling that bounds the maximal stabilization radius without creating too much excess in the functional. Details about the specific strategies are given in Section \ref{section_applications_thermodynamic}. \begin{enumerate} \item Let there exist a stabilization radius $\mathcal R$ for $\xi$ such that for $x\in[0,1]^d$ and $M>0$ large enough and $n\in\mbb N$ \begin{equation}\lambdabel{STA}\tauag{\tauextbf{STA}} \mbb P\big(\mathcal R(x,\mbb PP_n) \leq M/n^{1/d}, \xi_n(x, \mbb PP_n) > g(M)\big) = 0 \varepsilonnd{equation} for some function $g\colon(0,\infty)\rightarrow (0,\infty)$. In particular, $\mathcal R$ has to satisfy \varepsilonqref{equation_stabilization_radius_homogeneity} and \varepsilonqref{equation_stabilization_radius}. \item For each $r>0$, there exists a functional $\xi^r\colon\mbb R^d\tauimes\mbb NNNN \rightarrow [0,\infty]$ bounded by some $r$-dependent constant such that for each $\varphi\in\mbb NNNN$ and $x\in\mbb R^d$ it holds that $\xi^r(x,\varphi)=\xi^r(x,\varphi\cap B_r(x))$ and \begin{equation}\lambdabel{INC}\tauag{\tauextbf{INC}} \xi^r(x,\varphi) \tauo \xi(x,\varphi), \varepsilonnd{equation} as $r\rightarrow\infty$. In words, $\xi^r$ is nondecreasing with pointwise limit $\xi$. \varepsilonnd{enumerate} \noindent Before the last set of requirements, for each $n\in\mbb N$ and $M>0$, we introduce two cut-off versions of the score function using the map $g$ from \varepsilonqref{STA} by $$\xi_n^{M', M}(x, \varphi) = \xi(n^{1/d}x, (n^{1/d}\varphi\cap B_{M'}(n^{1/d} x))) \wedge g(M)$$ and $\xi_n^{M}(x,\varphi) := \xi_n^{M, M}(x, \varphi)$ where $x\in\varphi\in\mbb NNN$. Then, for representation \tauextbf{A}, we write \begin{subequations} \begin{equation} H^{M',M}_n := H_n^{M',M}(\mbb PP_n) := \frac1{n} \sigmaum_{X\in\mbb PP_n} \xi_n^{M',M}(X,\mbb PP_n) \varepsilonnd{equation} and for representation \tauextbf{B}, \begin{equation} H^{M',M}_n := H_n^{M',M}(\mbb PP_n) := \int_{[0,1]^d} \xi_n^{M',M}(x,\mbb PP_n) {\rm d} x \varepsilonnd{equation} \varepsilonnd{subequations} as well as $H^{M}_n := H^{M,M}_n$ in both cases for the respective functionals. \begin{enumerate}[resume] \item Define the event $$F_n^{M,(1)} := \{\mbb PP_n = \mbb PP_n^{+,M}\}$$ and for a collection of positive integers $m$ and $I_n^M(\mbb PP_n)$, and a family of disjoint balls in $[0,1]^d/\sigmaim$ that may depend on the Poisson point process $$(B_{n,i}^M(\mbb PP_n))_{i\in\{1,{\rm d}ots, I_n^M(\mbb PP_n)\}}$$ with volume $V / n$ for some $V>0$, we set \begin{equation}\lambdabel{equation_sprinkling_event1} F_n^{M,(2)} := \big\{\mbb PP_n^{+,M}\big([0,1]^d \sigmaetminus (\cup_{i = 1}^{I_n^M(\mbb PP_n)} B_{n,i}^M(\mbb PP_n))\big) = 0\big\} \varepsilonnd{equation} and \begin{equation}\lambdabel{equation_sprinkling_event2} F_n^{M,(3)} := \bigcap_{i = 1}^{I_n^M(\mbb PP_n)} \{\mbb PP_n^{+,M}(B_{n,i}^M(\mbb PP_n)) = m\}. \varepsilonnd{equation} We assume that the functional allows for such a collection such that \begin{enumerate} \item for $M$ sufficiently large, we have \begin{equation}\lambdabel{SPR1}\tauag{\tauextbf{SPR1}} F_n^M := F_n^{M,(1)} \cap F_n^{M,(2)} \cap F_n^{M,(3)} \sigmaubseteq E_n^M; \varepsilonnd{equation} \item under $\{H_n^{M',M}<a\}$, for $a\in\mbb R$, there exists $c_M^{(1)} \in o(1/\log M)$ as $M\rightarrow\infty$ satisfying that almost surely \begin{equation}\lambdabel{SPR2}\tauag{\tauextbf{SPR2}} I_n^M(\mbb PP_n) \le c_M^{(1)} n; \varepsilonnd{equation} \item there exists $c_M^{(2)} \in o(1)$ as $M\rightarrow\infty$ satisfying that under $\{H_n^{M',M}<a\}\cap F_n^M$, for $a\in\mbb R$, almost surely \begin{equation}\lambdabel{SPR3}\tauag{\tauextbf{SPR3}} H_n^M(\mbb PP_n^M) \le H_n^{M',M}(\mbb PP_n) + c_M^{(2)} \varepsilonnd{equation} if $M$ is sufficiently large. \varepsilonnd{enumerate} \varepsilonnd{enumerate} Similar to \cite{hirsch}, we give the rate function in its entropy-based formulation. For a stationary point process $\mathcal Q$ defined on $\mbb R^d$, we let $\mbb Q$ be its law, $\mbb Q_n$ be the law $\mbb Q$ restricted to the cube $[0,n^{1/d}]^d$ and $\mbb P_n$ be the law of $n^{1/d}\mbb PP_n$. This lets us set $$h^\tauh(\mbb Q) := \begin{cases}\lim_{n\tauo\infty} \frac{1}{n} \int_\mbb NNNN \log \frac{{\rm d}\mbb Q_n}{{\rm d}\mbb P_n}(\varphi) {\rm d}\mbb Q_n(\varphi) &\tauext{if }\mbb Q_n \ll \mbb P_n \\ \infty &\tauext{otherwise} \varepsilonnd{cases}.$$ Further, for any measure $\widetilde\mbb Q$ on $\mbb NNNN$, we use $\widetilde\mbb Q[\xi]$ to denote $\int_\mbb NNNN \xi(0,\varphi) {\rm d}\widetilde\mbb Q(\varphi)$. Next, we need to introduce the Palm version of $\mbb Q$. As it is stated in \cite{georgii}, $\mbb Q$ with finite intensity has a unique finite measure on $\mbb NNNN$ that we denote by $\mbb Q^o$, the Palm version, with the property that for all measurable functions $f\colon\mbb R^d\tauimes\mbb NNNN \rightarrow [0,\infty)$ the equation $$\mbb E_\mbb Q \Big[\sigmaum_{x\in\varphi} f(x,\varphi - x)\Big] = \int_{\mbb R^d} \int_\mbb NNNN f(x,\varphi\cup\{x\}) {\rm d}\mbb Q^o(\varphi) {\rm d} x$$ is fulfilled. This lets us state the theorem dealing with the lower large deviations for the critical regime. \begin{theorem}[Lower large deviations in the critical regime]\lambdabel{theorem_main_thermodynamic} Let $a>0$. \begin{itemize} \item[a)] Assume that $\xi$ satisfies \varepsilonqref{INC}. Then, \begin{equation}\lambdabel{upper_bound} \limsup_{n\tauo\infty} \frac1{n} \log \mbb P(H_n^\tauh \leq a) \leq -\inf_\mbb Q h^\tauh(\mbb Q), \varepsilonnd{equation} where the infimum expands over $\{\mbb Q\colon\mbb Q^o[\xi]\le a\}$ or $\{\mbb Q\colon\mbb Q[\xi]\le a\}$ for representation \tauextbf{A} and representation \tauextbf{B}, respectively. \item[b)] Let $H_n^\tauh$ be given either in representation \tauextbf{A} or representation \tauextbf{B}. Assume that $\xi$ satisfies \varepsilonqref{STA}, \varepsilonqref{SPR1}, \varepsilonqref{SPR2} and \varepsilonqref{SPR3} for the respective form of $H_n^\tauh$. Then, \begin{equation}\lambdabel{lower_bound} \liminf_{n\tauo\infty} \frac1{n} \log \mbb P(H_n^\tauh < a) \gammaeq -\inf_\mbb Q h^\tauh(\mbb Q), \varepsilonnd{equation} where the infimum expands over $\{\mbb Q\colon\mbb Q^o[\xi]<a\}$ or $\{\mbb Q\colon\mbb Q[\xi]<a\}$ for representation \tauextbf{A} and representation \tauextbf{B}, respectively. \varepsilonnd{itemize} \varepsilonnd{theorem} To see that this coincides with our characterization of the critical regime, we first point out that in order to categorize functionals in representation \tauextbf{B} into a regime, the characterization via relevant nodes needs to be extended. When dealing with an integral instead of a sum it is sensible to consider any space point $x\in[0,1]^d$ in terms of relevance. Assuming the integral representation for now, we recall that the stabilization radius $\mathcal R$ is homogeneous of order $1$. In particular, for any relevant $x\in[0,1]^d$ we observe that $$n^{1/d}\mathcal R(x,\mbb PP_n) = \mathcal R(n^{1/d}x,n^{1/d}\mbb PP_n).$$ Note that $n^{1/d}\mbb PP_n$ is a Poisson point process on $[0,n^{1/d}]^d$ with intensity $1$, and thus, for large $n$ typically $\mathcal R(n^{1/d}x,n^{1/d}\mbb PP_n)$ does not depend on $n$ anymore. Thus, typically $\mathcal R(x,\mbb PP_n)$ should be of order $n^{-1/d}$, and therefore also the typical range that we need to consider to determine a score of a relevant point, which justifies classifying this framework as critical. If we only consider relevant nodes $X\in\mbb PP_n$, we can repeat the same steps for representation \tauextbf{A}. Before continuing with the dense case, we briefly elaborate on the representation of the score function in \varepsilonqref{equation_score_thermo}. If a score function is homogeneous of degree $\begin{theorem}a$, thus, there exists $\begin{theorem}a\in\mbb R$ such that for all $m>0$ and $\varphi\in\mbb NNN$ and $x\in\varphi$ it holds that $\xi(m x, m \varphi) = m^\begin{theorem}a \xi(x, \varphi)$, then, the rescaling by $n^{1/d}$ in the arguments of the score function could be replaced by a different normalizing factor for the functional. Power-weighted edge lengths of $k$-nearest neighbor graphs are such an example. \sigmaubsection{Dense regime}\lambdabel{section_model_dense} Since the case of dense spatial networks requires much finer technical argumentation, we focus only on one type of functional for a $k$-nearest neighbor graph for an arbitrary $k\in\mbb N$. In particular, we associate the $k$-nearest neighbor graph with the functional representing large volumes of $k$-nearest neighbor balls. For $x\in\varphi\in\mbb NNN$, this is encoded in \begin{equation}\lambdabel{equation_stabilization_dense} R_k(x, \varphi) := \inf\{r > 0 \colon \varphi( B_r(x) \sigmaetminus\{x\}) \gammae k\}. \varepsilonnd{equation} This lets us define the according functional by \begin{equation}\lambdabel{equation_functional_kNN} H_n^{\rm d}e := H_n^{\rm d}e(\mbb PP_n) := \frac{1}{\rho_{n,k}^{\rm d}e} \sigmaum_{X\in \mbb PP_n} (n\kappa_d R_k(X, \mbb PP_n)^d - a_n - s_0)_+, \varepsilonnd{equation} where $s_0\in\mbb R$ and $(a_n)_n\sigmaubseteq\mbb R$ is a sequence that tends to infinity slower than $n$. The normalizing factor has the form $$\rho_{n,k}^{\rm d}e := n a_n^{k-1} e^{-a_n}.$$ This factor is derived from the computation $$\mbb P\big(R_k(X, \mbb PP_n)^d \gammae a_n/(n\kappa_d)\big) = \sigmaum_{i=0}^{k-1} \frac{a_n^{i-1}}{i!} e^{-a_n}$$ and represents the expected number of points for which the maximum in \varepsilonqref{equation_functional_kNN} is nonzero. We proceed as in \cite{hirschowadakang} and define a measure on $E_0 := [s_0,\infty)$ by $${\rm d}\tauau_k^{\rm d}e(x) := \frac{e^{-x}}{(k - 1)!} {\rm d} x$$ and denote the relative entropy of a Radon measure $\rho$ on $E_0$ with respect to $\tauau_k^{\rm d}e$ by $$h^{\rm d}e(\rho \mid \tauau_k^{\rm d}e) = \begin{cases} \int_{E_0} \log \frac{{\rm d} \rho}{{\rm d} \tauau_k^{\rm d}e}(x) {\rm d}\rho(x) - \rho(E_0) + \tauau_k^{\rm d}e(E_0) &\tauext{if }\rho \ll \tauau_k^{\rm d}e \\ \infty &\tauext{otherwise} \varepsilonnd{cases}.$$ This lets us state the lower large deviations for the functional in \varepsilonqref{equation_functional_kNN}. \begin{theorem}[Lower large deviations in the dense regime]\lambdabel{theorem_main_dense} Let $(a_n)_n$ be a sequence such that $a_n\rightarrow\infty$ and $a_n - \log n - (k - 1) \log\log n \rightarrow -\infty$. Then, for $a\in\mbb R^d$ \begin{equation}\lambdabel{upper_bound_dense} \limsup_{n\tauo\infty} \frac{1}{\rho_{n,k}^{\rm d}e} \log \mbb P(H_n^{\rm d}e \leq a) \leq -\inf_{\rho\colon T_k^{\rm d}e(\rho) \le a} h^{\rm d}e(\rho \mid \tauau_k^{\rm d}e) \varepsilonnd{equation} and \begin{equation}\lambdabel{lower_bound_dense} \liminf_{n\tauo\infty} \frac{1}{\rho_{n,k}^{\rm d}e} \log \mbb P(H_n^{\rm d}e < a) \gammaeq -\inf_{\rho\colon T_k^{\rm d}e(\rho) < a} h^{\rm d}e(\rho \mid \tauau_k^{\rm d}e), \varepsilonnd{equation} where $T_k^{\rm d}e(\rho) := \int_{E_0} x-s_0 {\rm d}\rho(x)$. \varepsilonnd{theorem} We point out that for a node to have a positive score within any configuration, we have to consider a range of at least $r_n :=((a_n+s_0)/(n\kappa_d))^{1/d}$. Then, $nr_n^d$ diverges if $a_n\rightarrow\infty$. Therefore, typically we would expect to consider an infinite amount of points in the volume within range, and thus, calling this regime dense is indeed sensible. \sigmaubsection{Outline} Lower large deviations or even large deviation principles for geometric functionals have been derived for sparse, critical and dense regimes in \cite{hirschowada}, \cite{hirsch} and \cite{hirschowadakang}. To achieve an extension of those results, we rely on the technique of sprinkling \cite{sprinkling}, which was already successfully used as a main tool to prove lower large deviations in \cite{hirsch}. In general, it means that we carefully perform small changes to the underlying process at locations that we deem as not suitable in a way such that the functional applied to the adapted configuration still approximates the one with the original point configuration. Mathematically speaking, the idea behind it is to couple two Poisson point processes such that conditioned on one of them, applying the functional to the other one guarantees some additional properties of the score function that allow us to invoke general large deviations theory. In the following paragraphs, we give an overview of the extensions of the sprinkling technique derived in the present work compared to the results from \cite{hirsch}, \cite{hirschowada} and \cite{hirschowadakang}. \begin{enumerate} \item \varepsilonmph{Critical regime:} For the critical case, \cite{hirsch} applies sprinkling on a macroscopic level to control the maximal stabilization radius of any node without significantly altering the functional. A coupled Poisson point process retains all nodes from the original process and consistently inserts additional points across the observation window. The results in \cite{hirsch} are limited to certain functionals for which the magnitude of the score function is comparable to the $d$th power of the stabilization radius. For instance, power-weighted edge lengths for a power as large as or larger than $d$ do not meet the requirements for their results. This restriction substantially simplifies the analysis because in that case regularly inserting points does not alter the functional by a big margin. We will examine some functionals that violate this condition, which requires a much finer adaption of the sprinkling to the studied functional as we will demonstrate in Section \ref{section_applications_thermodynamic}. \item \varepsilonmph{Sparse regime:} For a sparse random geometric graph, \cite{hirschowada} derives a large deviations principle for empirical measures counting potentially connected components of a fixed size and certain statistics derived from these. Their strategy builds on weak dependencies of scores assigned to relatively distant connected components in the sparse setting. This lets them approximate functionals restricted to each single box with i.i.d.\ Poisson random measures and apply well-established large deviations theory. However, for their proof to work, it is necessary that considered components cannot be too big. Otherwise, the exponential moments cannot be handled anymore. Using sprinkling, we extend their results. The framework that we present in Section \ref{section_model_sparse} for the sparse regime also focuses on functionals for the random geometric graph but allows to consider connected components of arbitrary size. \item \varepsilonmph{Dense regime:} For an empirical measure counting large $k$-nearest neighbor distances, \cite{hirschowadakang} provides a large deviation principle. It proceeds similarly to \cite{hirschowada} by introducing a grid and by approximating the restricted functionals. In our extension, presented in Section \ref{section_model_dense}, we aim to leave the empirical measure setting and use \cite[Theorem 2.1]{hirschowadakang} combined with a sprinkling argument to derive lower large deviations for the functional that directly represents the sum of large distances to the $k$-closest point. The general way sprinkling is applied here is similar to the sparse case. However, due to the finer dependencies between adjacent boxes that have to be resampled, the procedure becomes much more complicated. For this reason, we go sequentially through the boxes, deciding whether to resample them and also making sure that each box, if resampled or not, does not affect the potential resampling of the next boxes negatively. \varepsilonnd{enumerate} \sigmaection{Examples} \sigmaubsection{Functionals for critical spatial random networks}\lambdabel{section_applications_thermodynamic} \sigmaubsubsection{Power-weighted edge lengths of the directed k-nearest neighbor graph}\lambdabel{section_applications_thermodynamic_kNN} Let $k\in\mbb N$ and $\alpha\gammaeq 0$ be arbitrary. In the directed $k$-nearest neighbor graph, there is a directed edge from each node to its $k$ closest neighbors. We aim to represent the statistic of the power-weighted edge lengths using representation \tauextbf{A}. To achieve that, for each $n\in\mbb N$, we let the score function $\xi_n$ be given by $$\xi(x,\varphi) := \sigmaum_{y\in\varphi\cap B_{R_k(x,\varphi)}(x)} \|x-y\|^\alpha$$ for $x\in\mbb R^d$ and $\varphi\in\mbb NNNN$, where we recall $R_k$ from \varepsilonqref{equation_stabilization_dense} in the dense case, which simultaneously acts as stabilization radius here. For formality reasons, we set $\xi(x,\varphi) := \infty$ if $\#\varphi<k$. Note that when we plug $\mbb PP_n$ into the functional $\xi_n$, we replace the Euclidean norm $\|\cdot\|$ with the toroidal distance on $[0,n^{1/d}]^d/\sigmaim$. Further, the case $\alphalpha<d$ was already covered in \cite{hirsch}. This functional satisfies \varepsilonqref{STA} with the choice $g(m):=e^m$ and also \varepsilonqref{INC} is satisfied when choosing $\xi^r(x,\varphi) := \xi(x,\varphi\cap B_r(x))\wedge r$ for $r>0$, $x\in\mbb R^d$ and $\varphi\in\mbb NNNN$. In order to show that the sprinkling requirements \varepsilonqref{SPR1}, \varepsilonqref{SPR2} and \varepsilonqref{SPR3} hold as well, we denote all nodes with exceptionally large stabilization radii by $$\mathcal J_n^M := \mathcal J_n^M := \{X\in \mbb PP_n \colon \mathcal{R}(X,\mbb PP_n) > M_n\},$$ where we use the abbreviation $M_n := M/n^{1/d}$. We point out that the number of vertices in $\mbb PP_n$ on the torus $[0,1]^d/\sigmaim$ with a stabilization radius larger than $M_n$ is bounded, i.e., \begin{equation}\lambdabel{inequality_nodes_nearest_neighbor_large_stabilization} \#\mathcal J_n^M \le k 2^d n / (\kappa_d M^d). \varepsilonnd{equation} This can be seen by going through a configuration from $\mbb PP_n$ node by node and assigning the labels essential and inessential to some of them. Each considered node $X\in\mbb PP_n$ with stabilization radius larger than $M_n$ that has not been labeled yet, is labeled as essential and each of its $k-1$ closest neighbors is labeled as inessential if it has not been labeled as essential before. After the procedure, all essential nodes cannot have any other essential points within distance $M_n$. Consequently, balls with radius $M_n/2$ around the essential nodes cannot intersect. The bound in \varepsilonqref{inequality_nodes_nearest_neighbor_large_stabilization} is derived by bounding the number of these balls in $[0,1]^d$ through the volume each occupies and multiplying with $k$ to adjust for the inessential points. An issue that can arise when it comes to the sprinkling requirements are relatively close nodes in $\mathcal J_n^M$, due to potentially not disjoint sets in the sprinkling event. To make sure that such scenarios cannot occur, we aim to thin out the set of these bad vertices. We say a node $X\in\mbb PP_n$ is \varepsilonmph{distinguished} if $X$ is the smallest node in the lexicographic order of $(\mbb PP_n\cap B_{n^{-1/d}}(X))-X$. Then, we define $$ \widetilde{\mathcal J}_n^M := \widetilde{\mathcal J}_n^M(\mbb PP_n) := \{X\in\mathcal J_n^M\colon X\tauext{ is distinguished}\} $$ as a subset of $\mathcal J_n^M$ that only keeps distinguished nodes. This guarantees that the distance between two nodes in $\widetilde{\mathcal J}_n^M$ is at least $n^{-1/d}$ and therefore balls with radius $n^{-1/d}/2$ centered in each node in $\widetilde{\mathcal J}_n^M$ are disjoint. With this in mind, we can define the sprinkling event by setting $$F_n^{M,(2)} := \big\{\mbb PP_n^{+,M}\big([0,1]^d \sigmaetminus \cup_{X\in\widetilde{\mathcal J}_n^M} B_{n^{-1/d}/2}(X)\big) = 0\big\}$$ and $$F_n^{M,(3)} := \bigcap_{X\in\widetilde{\mathcal J}_n^M} \{\mbb PP_n^{+,M}(B_{n^{-1/d}/2}(X)) = k\}.$$ Further, if we assume that $M$ is large, it follows that each node $X\in\mathcal J_n^M$ can only have $k-1$ other nodes in $B_{k n^{-1/d}}(X)$. Otherwise, $X$ would have a stabilization radius bounded by $k n^{-1/d}$. Thus, one of the nodes in $B_{k n^{-1/d}}(X)$ has to be distinguished. Subsequently, after adding $k$ points to $B_{n^{-1/d}/2}(X)$ for each $X\in\widetilde{\mathcal J}_n^M$, the stabilization radius for each $X\in\mathcal J_n^M$ is bounded by $(k+1)n^{-1/d}$ and the same bound holds for the stabilization radii of the additionally inserted points. Hence, \varepsilonqref{SPR1} is fulfilled and the bound from \varepsilonqref{inequality_nodes_nearest_neighbor_large_stabilization} with the definition of $\widetilde{\mathcal J}_n^M$ implies \varepsilonqref{SPR2} with $I_n^M(\mbb PP_n) := \#\widetilde{\mathcal J}_n^M$, $V:= 2^{-d}$ and $m:=k$. Finally, to verify \varepsilonqref{SPR3}, we see that the $k$ nodes put in $B_{n^{-1/d}/2}(X)$ for every $X\in\widetilde{\mathcal J}_n^M$, each come with an additional score that is bounded by $k$ after the rescaling with $n^{1/d}$. All scores of vertices that already existed can only decrease when inserting the new nodes and the same holds for the cut-off score. This means we arrive at $$H_n^{M',M} (\mbb PP_n^M) \leq H_n^{M',M}(\mbb PP_n) + \frac1{n} k^2 \#\mathcal J_n^M \le H_n^{M',M}(\mbb PP_n) + \frac{\log M}{M^d}$$ under $F_n^M$, for large $M$ and $M'>M$, confirming \varepsilonqref{SPR3}. \sigmaubsubsection{Power-weighted spherical contact distances} A basic characteristic of a point pattern is the distribution of the spherical contact distances \cite[Section 4.2]{illian}. Loosely speaking, it describes the distance to the nearest point of the given point pattern measured from a space point that is selected at random. A basic approach to estimate this quantity is the point-count method \cite[Section 4.2]{illian}. Here, the window is discretized, and then for each subcube, the distance of its center to the closest point is recorded. A natural way to formulate an estimator that is independent of the discretization, is to replace the discretization with an integral. Following this setup, in the present example, we describe the large deviation behavior of estimators of the $\alpha$th moment of the spherical contact distances for $\alpha > d$. For this, we aim to use the integral form representation \tauextbf{B}. We define the score function by $$\xi(x,\varphi) := \inf_{y\in\varphi} \|y - x\|^\alphalpha,$$ where, as in the previous section, we replace the Euclidean norm with the toroidal distance when applying the score function to a configuration on a torus. \begin{figure}[H] \centering \begin{tikzpicture}[scale = 5.85] \input{Tikz_pictures/ContactDistances} \varepsilonnd{tikzpicture} \hspace{.3cm} \begin{tikzpicture}[scale = .95] \begin{axis}[ colorbar, colormap name = whiteblack, view={30}{70}, ] \alphaddplot3[surf, mesh/rows=51] table [x index=0, y index=1, z index=2] {Tikz_pictures/contact_distances.txt}; \varepsilonnd{axis} \varepsilonnd{tikzpicture} \caption{Simulation of spherical contact distances based on a Poisson point process on a two-dimensional torus. The lighter the shade, the smaller the distance of a space point to its closest node in the configuration.}\lambdabel{fig:contact_distances} \varepsilonnd{figure} We can set the stabilization radius to be $$\mathcal R(x,\varphi) := \inf\{r > 0 \colon \varphi(B_r(x)) \gammae 1\}.$$ With this stabilization radius and the choice $g(M):=M^\alphalpha$, \varepsilonqref{STA} is satisfied. Also, \varepsilonqref{INC} holds with the choice $\xi^r(x,\varphi) := \xi(x,\varphi\cap B_r(x))\wedge r$ for $r>0$, $x\in\mbb R^d$ and $\varphi\in\mbb NNNN$. In order to construct the sprinkling event, we divide $[0,1]^d$ into a grid of cubes of side length $n^{-1/d} M/\log M$, denote this collection by $\mathcal Q_n^M$ and call a box $Q\in\mathcal Q_n^M$ bad if it does not contain any Poisson points, i.e., if $Q\cap\mbb PP_n = \varepsilonmptyset$. Let $$\mathcal J_n^M := \mathcal J_n^M(\mbb PP_n) := \{Q\in\mathcal Q_n^M\colon \mbb PP_n\cap Q=\varepsilonmptyset\}$$ be the set of bad boxes. If a box is bad, all points in a cube of volume $n^{-1/d}$ in the center of the bad box must have a distance to the closest node of at least $n^{-1/d} M/(\log M)^2$ for sufficiently large $M$. Thus, a bad subcube contributes with a value of at least $M^\alphalpha /(\log M)^{2\alphalpha}$ to the total functional after resolving the rescaling with factor $n^{1/d}$. Thus, under the event $\{H_n^{M',M} < a\}$, for $M'>M$, such bad boxes can only occur a limited number of times. More precisely, due to our choice of $g$, we find that \begin{equation}\lambdabel{equation_bad_boxes_contact_distance1} \#\mathcal J_n^M \le n a /(M^\alphalpha /(\log M)^{2\alphalpha}) = n a \frac{(\log M)^{2\alphalpha}}{M^\alphalpha}. \varepsilonnd{equation} Now, to define the sprinkling event, we introduce an additional sub grid. First, without explicitly stating it, in the following, we will assume that $M$ is sufficiently large for some properties to hold and that we can manage the assignment of the subcubes without having to deal with fractions of subcubes. A negligible adjustment of the side length of the boxes would assure the latter. Divide $Q\in\mathcal Q_n^M$ into subcubes of side length $n^{-1/d} \log M$ and call this collection $\mathcal W_n^M(Q)$. With the observation in \varepsilonqref{equation_bad_boxes_contact_distance1}, the number of subcubes in bad boxes is bounded by \begin{equation}\lambdabel{equation_bad_boxes_contact_distance2} \#\{W\in \mathcal W_n^M(Q) \colon Q\in \mathcal J_n^M\} \le n a \frac{(\log M)^{2\alphalpha}}{M^\alphalpha} \frac{(n^{-1/d} M/\log M)^d}{(n^{-1/d} \log M)^d} = n a \frac{(\log M)^{2(\alphalpha-d)}}{M^{\alphalpha-d}}. \varepsilonnd{equation} For $Q\in\mathcal J_n^M$ and $W\in\mathcal W_n^M(Q)$, let $B_W\sigmaubseteq W$ be the ball with radius $n^{-1/d}$ that is located around the center of $W$. Now, we can define the sprinkling event by inserting a node in each subcube of every bad box. Thus, accordingly to \varepsilonqref{equation_sprinkling_event1} and \varepsilonqref{equation_sprinkling_event2}, we get the events $$F_n^{M,(2)} := \big\{\mbb PP_n^{+,M}\big([0,1]^d \sigmaetminus (\cup_{W\in\{W'\in \mathcal W_n^M(Q) \colon Q\in \mathcal J_n^M\}} B_W)\big) = 0\big\}$$ and $$F_n^{M,(3)} := \bigcap_{W\in\{W'\in \mathcal W_n^M(Q) \colon Q\in \mathcal J_n^M\}} \{\mbb PP_n^{+,M}(B_W) = 1\}.$$ If all bad boxes contain at least one vertex, the stabilization radius of any space point can be at most of order $n^{-1/d} M/\log M$, and is therefore, less than or equal to $M/n^{1/d}$ for large enough $M$, verifying \varepsilonqref{SPR1}. Further, since we assumed $\alphalpha>d$, \varepsilonqref{equation_bad_boxes_contact_distance2} confirms \varepsilonqref{SPR2} with $I_n^M(\mbb PP_n) := \#\{W\in \mathcal W_n^M(Q) \colon Q\in \mathcal J_n^M\}$, $V:= 1$ and $m:=1$. For \varepsilonqref{SPR3}, we point out that inserting an additional node cannot increase the contact distance of any point. Additionally, every space point in a good box has a contact distance of order $n^{-1/d} M /\log M$ and, thus, cannot be affected by the cut-off of the score in the functional and thus, also with respect to the cut-off functional the contact distance of a space point in a good box after the sprinkling can only decrease. This observation yields that only the added points in bad boxes have to be considered to bound the increase of the cut-off functional under the sprinkling event. But under $F_n^{M,(3)}$, the distance to the closest node of every space point in a bad box is of order $\log M$ and thus, bounded by $(\log M)^2$ for large $M$. Hence, we get that under $F_n^M\cap \{H_n^{M',M} < a\}$ $$ H_n^M (\mbb PP_n^M) = H_n^{M',M}(\mbb PP_n^M) \le H_n^{M',M}(\mbb PP_n) + \frac{(\log M)^{2\alphalpha}}{n} I_n^M(\mbb PP_n) \le H_n^M(\mbb PP_n) + a \frac{(\log M)^{4\alphalpha-2d}}{M^{\alphalpha-d}}, $$ also verifying \varepsilonqref{SPR3}. \sigmaubsection{Functionals for the sparse random geometric graph}\lambdabel{section_applications_sparse} \sigmaubsubsection{Subgraph counts} Let $G_0 := (V,E)$, where $V$ represents a set of vertices and $E$ a set of edges, be an arbitrary fixed finite connected graph. With this, we define $$\xi(\varphi) := \#\big\{(\varphi', E') \colon \varphi'\sigmaubseteq\varphi,\; E'\sigmaubseteq\{\{x,y\}\sigmaubseteq \varphi'\colon \|x-y\|\le 1\},\; (\varphi', E') \cong G_0\big\}$$ for a configuration $\varphi\in\mbb NNN$, to count the occurrence of the graph $G_0$ in the geometric graph with connectivity radius $1$ on $\varphi$. For $n\in\mbb N$ and $\varphi\in\mbb NNN^{(1)}$ with ${\rm d}ist(\varphi,\partial[0,1]^d) > r_n$, we define $\xi_n(\varphi)$ similar to $\xi(r_n^{-1}\varphi)$ but replace the Euclidean distance with the toroidal metric of $[0,r_n^{-1}]^d/\sigmaim$. These functionals fulfill all requirements stated in Theorem \ref{theorem_main_sparse}. If used as a score function, as displayed in \varepsilonqref{equation_functional_RGG}, it represents occurrences of $G_0$ in a random geometric graph with connectivity radius $r_n$ in a sparse regime. Additionally, sometimes it is possible to simplify the rate function further. More precisely, assume that we count the occurrences of a $k_0$-clique. Then, with Mecke's formula, it can be computed that \begin{equation}\lambdabel{equation_example_subgraph_count_expectation1} \mbb E[H_n^\sigmap] \overset{n\tauo\infty}{\longrightarrow} \frac{v_{d,k}(G_0)}{k_0!} =: \mu_{d,k_0}. \varepsilonnd{equation} The right-hand side is given by \begin{align}\lambdabel{equation_example_subgraph_count_expectation2} \begin{split} v_{d,k_0}(G_0) &:= \int_{\mbb R^{d(k_0-1)}} \prod_{\{i, j\}\in \{1,{\rm d}ots,k_0\}} \mathbbmss{1}\big\{\|x_i-x_j\| \le 1 \big\} {\rm d}(x_2,{\rm d}ots,x_{k_0}), \varepsilonnd{split} \varepsilonnd{align} where $x_1:=0$ and $v_{d,k_0}(G_0):=1$ if $k_0=1$. Intuitively, \varepsilonqref{equation_example_subgraph_count_expectation2} represents the volume of all possible locations to place $k_0-1$ points around a fixed point such that the generated geometric graph with connectivity radius $1$ is isomorphic to $G_0$. Now, from our proof for the sparse regime, it follows that we can also write $\xi$ directly as an indicator that triggers for complete connected components of size $k_0$. Then, \cite[Remark 3.6]{hirschowada} implies that \begin{align*} -\inf_{\rho\colon T^\sigmap(\rho) \le a} h_{k_0}^\sigmap(\rho \mid \tauau_{k_0}^\sigmap) &= -\inf_{x \le a} x\log(x/\mu_{d,k_0}) - x + \mu_{d,k_0} \\ &= \begin{cases} -a\log(a/\mu_{d,k_0}) + a - \mu_{d,k_0} &\tauext{if } a<\mu_{d,k_0} \\ 0 &\tauext{otherwise}\varepsilonnd{cases}, \varepsilonnd{align*} and an analogous simplification could be achieved but would require substantial additional computations and is therefore omitted. \sigmaubsubsection{Betti numbers and persistent Betti numbers} Simply expressed, Betti numbers count holes of a certain dimension in simplicial complexes. \cite[Section 4.1]{hirschowada} gives a short overview of literature dealing with the basic concepts behind Betti numbers and the more general persistent Betti numbers. They can be built upon the \v{C}ech complex. For a set $\varphi\in\mbb NNN$ and $r\gammae0$, the \v{C}ech complex is defined by $$\check{C}_r(\varphi) := \big\{\psi\sigmaubseteq\varphi\colon \cap_{x\in\psi} B_r(x) \neq \varepsilonmptyset\big\}.$$ Now, we can define the $k$th persistent Betti number for $0\le s\le t\le \infty$ by $$\begin{theorem}a_k(\varphi,s,t) = {\rm d}im \frac{Z_k(\check{C}_s(\varphi))}{Z_k(\check{C}_s(\varphi))\cap \widetilde B_k(\check{C}_t(\varphi))},$$ where $\varphi\in\mbb NNN$, $Z_k$ is the $k$th cycle group of the \v{C}ech complex and $\widetilde B_k$ represents the $k$th boundary group. For configurations close to $\partial[0,1]^d$, we define $\xi_n$ similar to $\xi(r_n^{-1}\,\cdot\,)$, using balls with respect to the torus $[0,r_n^{-1}]^d/\sigmaim$ to set up the \v{C}ech complex. The requirements for Theorem \ref{theorem_main_sparse} are satisfied and we recover the ordinary Betti numbers by setting $s=t$. As in the case of subgraph counts, also here, a simplification of the rate function according to \cite[Remark 3.6]{hirschowada} is achievable. However, to keep this section at a reasonable size, we omit the explicit computations. \sigmaubsubsection{Edge lengths} For a point set $\varphi\in\mbb NNN$, we define $$\xi(\varphi) := \sigmaum_{x,y\in\varphi} \|x-y\| \mathbbmss{1}\{\|x-y\| \le 1\}$$ and $\xi_n$, for $n\in\mbb N$, is defined analogously to the subgraph counts or Betti numbers examples, using the toroidal metric of $[0,r_n^{-1}]^d/\sigmaim$ instead of the Euclidean distance. Then, all requirements of Theorem \ref{theorem_main_sparse} are satisfied. Note that here, $k_0=2$ and thus, as the proof of Theorem \ref{theorem_main_sparse} shows, only isolated edges will be relevant for the lower large deviations. \sigmaection{Proof of Theorem \ref{theorem_main_thermodynamic} (critical)}\lambdabel{section_proof_thermodynamic} For bounded and local score functions, \cite{georgii} provides a large deviation principle for associated functionals. We recall that our strategy is to use a coupling consisting of a thinned Poisson point process and another independent Poisson process. We let the thinning fully replicate $\mbb PP_n$ while using the independent Poisson point process to sprinkle in additional points following a specific pattern to guarantee locality and boundedness of the score function such that the general large deviations theory becomes invokable. First, we let $\widetilde\mbb PP_n$ be a Poisson point process with intensity $1$ on the torus $[0,n^{1/d}]^d / \sigmaim$. Note that $n^{1/d}\mbb PP_n$ and $\widetilde\mbb PP_n$ have the same distribution. Now, we can replicate the proof of \cite[Theorem 1.1]{hirsch} to get Theorem \ref{theorem_main_thermodynamic} a), the upper bound for the lower large deviations. \begin{proof}[Proof of Theorem \ref{theorem_main_thermodynamic} a)] We recall $\xi^r$ from \varepsilonqref{INC}. Further, for the next steps, we assume that we are in representation \tauextbf{A} and indicate that the other case works analogously. The functional $\xi^r$ is bounded and local, and thus, we can use \cite[Theorem 3.1]{georgii} (or \cite[Corollary 3.2]{georgii} in the case of representation \tauextbf{B}) to get that \begin{align*} \limsup_{n\tauo\infty} \frac1{n} \log\mbb P(H_n^\tauh \le a) &\le \limsup_{n\tauo\infty} \frac1{n} \log\mbb P\bigg(\frac1{n}\sigmaum_{X\in\mbb PP_n} \xi^r(n^{1/d}X, n^{1/d}\mbb PP_n) \le a\bigg) \\ &= \limsup_{n\tauo\infty} \frac1{n} \log\mbb P\bigg(\frac1{n}\sigmaum_{X\in\widetilde\mbb PP_n} \xi^r(X, \widetilde\mbb PP_n) \le a\bigg) \le -\inf_{\mbb Q\colon\mbb Q^o[\xi^r] \le a} h^\tauh(\mbb Q). \varepsilonnd{align*} By \varepsilonqref{INC}, $\xi^r(x,\varphi)$ increases, as $r$ grows, towards $\xi(x,\varphi)$ for each $x\in\varphi\in\mbb NNNN$. Proceeding, using monotone convergence, as in the proof of \cite[Theorem 1.1]{hirsch}, it follows that $$-\limsup_{r\tauo\infty} \inf_{\mbb Q\colon\mbb Q^o[\xi^r] \le a} h^\tauh(\mbb Q) \le -\inf_{\mbb Q\colon\mbb Q^o[\xi] \le a} h^\tauh(\mbb Q),$$ which concludes the upper bound. \varepsilonnp In order to prove the lower bound, it is necessary to examine the event $F_n^M$ from \varepsilonqref{SPR1} in detail. For this, we denote the number of Poisson points of $\mbb PP_n$ by $N_n := \mbb PP_n([0,1]^d)$. The next lemma gives a lower bound for the probability of the sprinkling event. \begin{lemma}[Sprinkling regularizes with high probability]\lambdabel{lemma_sprinkling} For $n\gammaeq M\gammaeq 1$ sufficiently large, we get that almost surely $$\mbb P(F_n^M\mid\mbb PP_n) \gammaeq (1-M^{-1})^{N_n} e^{-n/M} \big( \taufrac{(V/M)^m}{m!} e^{-V/M}\big)^{I_n^M(\mbb PP_n)}.$$ \varepsilonnl \begin{proof}[Proof of Lemma \ref{lemma_sprinkling}] Looking at the probabilities of each single event of $F_n^M$ gives \begin{align*} &\mbb P(\mbb PP_n^{-,M}= \mbb PP_n \mid \mbb PP_n) = (1-M^{-1})^{N_n}, \\ &\mbb P\big(\mbb PP_n^{+,M}\big([0,1]^d \sigmaetminus (\cup_{i = 1}^{I_n^M(\mbb PP_n)} B_{n,i}^M(\mbb PP_n)) = 0 \mid \mbb PP_n\big) \gammaeq e^{-n/M} \intertext{and} &\mbb P\bigg(\bigcap_{i = 1}^{I_n^M(\mbb PP_n)} \{\mbb PP_n^{+,M}(B_{n,i}^M(\mbb PP_n)) = m\} \biggm\vert \mbb PP_n\bigg) = \big(\taufrac{(V/M)^m}{m!} e^{-V/M}\big)^{I_n^M(\mbb PP_n)} \varepsilonnd{align*} almost surely, where we used that the survival probability of the thinning is $1-M^{-1}$ and the intensity of $\mbb PP_n^{+,M}$ was assumed to be $n/M$. Using independence between all three events conditioned on $\mbb PP_n$ yields the desired statement. \varepsilonnp Now, we conclude the proof of Theorem \ref{theorem_main_thermodynamic}. \begin{proof}[Proof of Theorem \ref{theorem_main_thermodynamic} b)] In the following, assume that $M>0$ is large and $M'>M$. Because of $F_n^M\sigmaubseteq E_n^M$, which was assumed in \varepsilonqref{SPR1}, it follows that under the event $F_n^M$ the radius of stabilization with respect to $\mbb PP_n^M$ of each node in $\mbb PP_n^M$ or space point in $[0,1]^d$, depending on whether we consider a functional given in representation \tauextbf{A} or representation \tauextbf{B}, is at most $M n^{-1/d}$. Hence, we can invoke \varepsilonqref{STA} from which follows that under $F_n^M$ we can replace $H_n(\mbb PP_n^M)$ by $H_n^M(\mbb PP_n^M)$, and get $$\mbb P(H_n(\mbb PP_n)<a) = \mbb P(H_n(\mbb PP_n^M)<a) \gammaeq \mbb P(\{H_n^M(\mbb PP_n^M) < a\} \cap F_n^M\cap\{H_n^{M',M}(\mbb PP_n) < a\}).$$ Due to \varepsilonqref{SPR3} it holds that under $F_n^M\cap\{H_n^{M',M}(\mbb PP_n) < a\}$ almost surely \begin{equation}\lambdabel{inequality_added_points} H_n^M (\mbb PP_n^M) \leq H_n^{M',M}(\mbb PP_n) + c_M^{(2)}, \varepsilonnd{equation} as $M\rightarrow\infty$ and thus, since $\{H_n^{M',M}(\mbb PP_n) < a - c_M^{(2)}\} \sigmaubseteq \{H_n^{M',M}(\mbb PP_n) < a\}$, it follows that, $$\mbb P(\{H_n^M(\mbb PP_n^M) < a\} \cap F_n^M \cap\{H_n^{M',M}(\mbb PP_n) < a\}) \gammaeq \mbb P(\{H_n^{M',M}(\mbb PP_n) + c_M^{(2)} < a\} \cap F_n^M).$$ By conditioning on $\mbb PP_n$ and applying Lemma \ref{lemma_sprinkling} for sufficiently large $n$, we arrive at \begin{align*} &\mbb E\big[\mathbbmss{1}\{H_n^{M',M}(\mbb PP_n) < a - c_M^{(2)}\} \mbb P(F_n^M\mid\mbb PP_n)\big] \\ &\gammae \mbb E\big[\mathbbmss{1}\{H_n^{M',M}(\mbb PP_n) < a - c_M^{(2)}\} (1-M^{-1})^{N_n} \big(\taufrac{(V/M)^m}{m!} e^{-V/M}\big)^{I_n^M(\mbb PP_n)}\big] e^{-n/M} \\ &= \mbb E\Big[\mathbbmss{1}\{H_n^{M',M}(\mbb PP_n) < a - c_M^{(2)}\} \varepsilonxp\Big(N_n\log(1-M^{-1}) + I_n^M(\mbb PP_n)\log\big(\taufrac{(V/M)^m}{m!} e^{-V/M}\big)\Big)\Big] e^{-n/M}. \varepsilonnd{align*} Moreover, invoking \varepsilonqref{SPR2} and introducing a bound for $N_n$ yields for any $c>0$, \begin{align*} &\mbb E\Big[\mathbbmss{1}\{H_n^{M',M}(\mbb PP_n) < a - c_M^{(2)}\} \varepsilonxp\Big(N_n\log(1-M^{-1}) + I_n^M(\mbb PP_n)\log\big(\taufrac{(V/M)^m}{m!} e^{-V/M}\big)\Big)\Big] \\ &\gammae \mbb P\big(H_n^{M',M}(\mbb PP_n) < a - c_M^{(2)}, N_n < cn\big) \varepsilonxp\big(c n\log(1-M^{-1}) + c_M^{(1)} n \log(\taufrac{(V/M)^m}{m!} e^{-V/M})\big). \varepsilonnd{align*} To convince ourselves that the exponential factors are not relevant, we recall that $c_M^{(1)}/\log M \rightarrow 0$ as $M\rightarrow\infty$ was assumed, which yields \begin{align*} &\frac1{n}\log\Big(\varepsilonxp\big(cn \log(1-M^{-1}) + c_M^{(1)} n \log(\taufrac{(V/M)^m}{m!} e^{-V/M}) - n/M\big)\Big) \\ &= c \log(1-M^{-1}) + c_M^{(1)} \big(\log(\taufrac{(V/M)^m}{m!}) - V/M\big) - M^{-1} \overset{M\tauo\infty}{\longrightarrow} 0. \varepsilonnd{align*} Now, for the other factor, \begin{align*} &\mbb P\big(H_n^{M',M}(\mbb PP_n) < a - c_M^{(2)}, N_n < c n\big) \gammae \mbb P\big(H_n^{M',M}(\mbb PP_n) < a - c_M^{(2)}\big) - \mbb P\big(N_n \gammae c n\big), \varepsilonnd{align*} where for large $c$, \cite[Lemma 1.2]{poisson_conc} can be used to show that the second term does not affect the large deviations. For the next computations, we assume that $H_n^\tauh$ has representation \tauextbf{A}. The other case works analogously. We define $\xi^{M',M}(x,\varphi) := \xi(x, \varphi \cap B_{M'}(x))\wedge g(M)$ for $x\in\mbb R^d$ and $\varphi\in\mbb NNNN$, and point out that $\xi^{M',M}$ can be locally determined and is bounded by $g(M)$. Besides that, recall that $\widetilde{\mbb PP}_n$ is equal in distribution to $n^{1/d}\mbb PP_n$. Then, applying \cite[Theorem 3.1]{georgii} (or \cite[Corollary 3.2]{georgii} in the case of representation \tauextbf{B}), we can proceed as in the proof of the upper bound, and we arrive at \begin{align*} &\liminf_{n\tauo\infty} \frac1{n} \log \mbb P\big(H_n^{M',M}(\mbb PP_n) < a - c_M^{(2)}\big) \\ &= \liminf_{n\tauo\infty} \frac1{n} \log\mbb P\Big(\frac1{n} \sigmaum_{X\in\mbb PP_n} \xi(n^{1/d}X, n^{1/d}\mbb PP_n \cap B_{M'}(n^{1/d} X))\wedge g(M) \le a - c_M^{(2)}\Big) \\ &= \liminf_{n\tauo\infty} \frac1{n} \log\mbb P\Big(\frac1{n} \sigmaum_{X\in\widetilde{\mbb PP}_n} \xi^{M',M}(X,\widetilde\mbb PP_n) \le a - c_M^{(2)}\Big) \gammae -\inf_{\mbb Q\colon\mbb Q^o[\xi^{M',M}]<a - c_M^{(2)}} h^\tauh(\mbb Q). \varepsilonnd{align*} Finally, we assert that $$\liminf_{M\rightarrow\infty} \liminf_{M'\rightarrow\infty} \Big(-\inf_{\mbb Q\colon\mbb Q^o[\xi^{M',M}]<a - c_M^{(2)}} h^\tauh(\mbb Q)\Big) \gammae -\inf_{\mbb Q\colon\mbb Q^o[\xi]<a} h^\tauh(\mbb Q),$$ which yields the desired result. To prove this assertion, let $\mbb Q$ be an arbitrary point process that satisfies $\mbb E_{\mbb Q^o}[\xi(0,\cdot\, )]<a$. This lets us find some ${\rm d}elta>0$ such that $\mbb E_{\mbb Q^o}[\xi(0,\cdot\, )]<a-{\rm d}elta$. Next, for any $M>0$, it also holds that $\mbb E_{\mbb Q^o}[\xi(0,\cdot\, ) \wedge g(M)]<a-{\rm d}elta$ due to monotonicity. Further, dominated convergence yields that $\lim_{M'\tauo\infty} \mbb E_{\mbb Q^o}[\xi(0,\cdot\, \cap B_{M'}(0)) \wedge g(M)] = \mbb E_{\mbb Q^o}[\xi(0,\cdot\, ) \wedge g(M)]$ from which we deduce the existence of $M_0({\rm d}elta,M)$ such that for all $M'>M_0({\rm d}elta,M)$ $$\mbb E_{\mbb Q^o}[\xi(0,\cdot\, \cap B_{M'}(0)) \wedge g(M)] < a - {\rm d}elta/2.$$ In particular, from $c_M^{(2)} \rightarrow 0$ as $M\rightarrow\infty$ we get that for some $M_0(M)>0$ and all $M'>M_0(M)$ $$\mbb E_{\mbb Q^o}[\xi(0,\cdot\, \cap B_{M'}(0)) \wedge g(M)] < a - c_M^{(2)}$$ if $M$ is large enough. Therefore, $$\{\mbb Q\colon\mbb Q^o[\xi]<a\} \sigmaubseteq \{\mbb Q\colon\mbb Q^o[\xi^{M',M}]<a - c_M^{(2)} \tauext{ for all }M'>M_0(M) \tauext{ and }M\tauext{ large}\}$$ which implies that $$\limsup_{M\rightarrow\infty} \limsup_{M'\rightarrow\infty} \Big(\inf_{\mbb Q\colon\mbb Q^o[\xi^{M',M}]<a - c_M^{(2)}} h^\tauh(\mbb Q)\Big) \le \inf_{\mbb Q\colon\mbb Q^o[\xi]<a} h^\tauh(\mbb Q).$$ \varepsilonnp \sigmaection{Proof of Theorem \ref{theorem_main_sparse} (sparse)}\lambdabel{section_proof_sparse} For the sparse case, we would like to apply the large deviation principle for empirical measures counting potentially connected components of a fixed size of a random geometric graph from \cite[Theorem 2.1]{hirschowada}. Using sprinkling, we would ideally like to create a coupled Poisson point process that, when serving as nodes for a geometric graph, only contains fixed-sized components. A simple replication of the procedure in the critical case for the sparse case is not possible as we will desire for the thinning to keep most of the points, which will be with very high probability an amount of order $n$, thus, resulting in costs for the thinning of magnitude $e^{-c n}$ for some $c>0$. But the speed for the sparse regime satisfies $$\frac{n}{\rho_{n,k_0}^\sigmap} = \frac{n}{n^{k_0} r_n^{d(k_0-1)}} = (n r_n^d)^{-(k_0-1)} \rightarrow \infty,$$ if $k_0>1$. Instead, as in \cite{hirschowada}, we will divide $[0,1]^d$ into a grid and resample an entire box of the grid if we deem the configuration in it as not feasible and additionally bound the inevitable error in the functional that this process creates. This then results in a coupled Poisson process as a foundation for a geometric graph for which all significant connected components are of a fixed size, and therefore, we can invoke the large deviation principle from \cite[Theorem 2.1]{hirschowada}. Now, to give more details after this overview, as announced, we start by dividing $[0,1]^d$ into a grid of cubes with side length $(\rho_{n,k_0}^\sigmap)^{-1/d}$ each, where to keep the notation simpler, we assume that $\rho_{n,k_0}^\sigmap$ is a natural number and denote this collection by $\mathcal{Q}_n$. We define $\mbb PP'_n$ as an Poisson point process on $[0,1]^d$ with intensity $n$ independent of $\mbb PP_n$. Further, for all cubes $Q\in\mathcal{Q}_n$, let $X_{Q, \varepsilonps}$ be Bernoulli random variables with parameter $\varepsilonps\in(0,1)$, independent of each other and all introduced Poisson random measures. Using this, we define $$\mbb PP_n^Q := \begin{cases} Q\cap\mbb PP'_n &\tauext{if } X_{Q, \varepsilonps} = 1 \\ Q\cap\mbb PP_n &\tauext{if } X_{Q, \varepsilonps} = 0 \varepsilonnd{cases},$$ which yields a Poisson point process on $Q$ with intensity $n$ for each $n\in\mbb N$. Consequently, $\mbb PP''_n := \cup_{Q\in\mathcal{Q}_n} \mbb PP_n^Q$ is a Poisson point process on $[0,1]^d$ with intensity $n$. The idea is to use the Bernoulli random variables to control $\mbb PP''_n$ in such a way that we resample $\mbb PP_n$ using $\mbb PP'_n$ in each box that has a node with $k_0$ relatively close other vertices while keeping $\mbb PP_n$ in all other boxes. To achieve this, let $$\mathcal{J}_n := \mathcal{J}_n(\mbb PP_n) := \big\{Q \in \mathcal{Q}_n\colon \max_{X\in Q\cap\mbb PP_n} \mbb PP_n(B_{2^d k_0 r_n}(X)) \gammae k_0+1\big\}$$ be the boxes that contain a vertex with at least $k_0$ other vertices within distance $2^d k_0 r_n$ and that we would therefore like to resample. To further ease notation, we also denote the number of bad boxes by $$J_n := \#\mathcal{J}_n(\mbb PP_n)$$ and we point out that we can consider $k_0$ as fixed from now on, which lets us write $$\rho_n^\sigmap := \rho_{n,k_0}^\sigmap.$$ We first make sure that these bad boxes do not occur too many times with a probability that is too high. \begin{lemma}[Bad boxes are exponentially negligible]\lambdabel{lemma_probability_bad_boxes_sparse} Let ${\rm d}elta>0$. Assume that $n r_n^d\rightarrow 0$ and $\rho_n^\sigmap \rightarrow\infty$. Then, $$\limsup_{n\tauo\infty} \frac1{\rho_n^\sigmap} \log \mbb P(J_n \gammae {\rm d}elta\rho_n^\sigmap) = -\infty.$$ \varepsilonnl Next, we determine what happens within a box that was resampled and ignore effects of adjacent boxes for now. Preferably we would like the sprinkled process not to create any new components consisting of $k_0$ or more vertices within a resampled cube. The next lemma states that for each $n\in\mbb N$, conditioned on $\mbb PP_n$, the probability of not having $k_0$ close points within a resampled box $Q\in\mathcal{J}_n$ is bounded from below. \begin{lemma}[With positive probability, a resampled box does not contain $k_0$ close nodes]\lambdabel{lemma_prob_new_edges_sparse} Assume that $n r_n^d\rightarrow 0$ and $\rho_n^\sigmap\rightarrow\infty$. Then, for any $M > 2 \kappa_d^{k_0-1} 2^{k_0(d^2+1)} k_0^{k_0}$ it holds that $$\mbb P\bigg(\bigcap_{Q\in\mathcal{J}_n} \Big\{\max_{X\in Q\cap\mbb PP'_n} \mbb PP'_n\big(B_{2^d k_0 r_n}(X)\cap Q\big) \le k_0-1\Big\} \biggm\vert \mbb PP_n\bigg) \gammae \alphalpha_M^{J_n},$$ where $\alphalpha_M := 2^{-M-1} $. \varepsilonnl One issue that we cannot prevent is that there can be large connected components between two adjacent boxes when at least one of them is resampled. But we can show that the number of these components will, with high enough probability, not be significant. More precisely, the next lemma will control the number of large components that can occur between boxes when resampling. To ease notation, for every $Q\in\mathcal{Q}_n$, we let $$\partial_n Q := \{x\in Q\colon {\rm d}ist(\{x\},\partial Q) \le 2^d k_0 r_n\}$$ denote the set of all points in $Q$ within distance $2^d k_0 r_n$ of the boundary of $Q$. The factor $2^d k_0$ appears here to be able to deal with boxes that share a face, which allows for large connected components to exist that span over multiple boxes. We also let $$\ms{CC}_{n,k_0} := \{X\in\mbb PP''_n\colon s_n(\{X\}\cup\varphi,\mbb PP''_n)=1 \tauext{ for some }\varphi\sigmaubseteq\mbb PP''_n \tauext{ with } \#(\{X\}\cup\varphi)\in{\{k_0,{\rm d}ots, 2^d k_0\}}\}$$ be the vertices in $\mbb PP''_n$ that are part of a connected component of size between $k_0$ and $2^d k_0$, where we recall the definition of $s_n$ from \varepsilonqref{equation_indicator_torus}. \begin{lemma}[The number of large connected components between boxes is negligible]\lambdabel{lemma_prob_resampling_between_boxes_sparse} Let ${\rm d}elta>0$. Assume that $n r_n^d\rightarrow 0$ and $\rho_n^\sigmap\rightarrow\infty$. Then, $$\limsup_{n\tauo\infty} \frac1{\rho_n^\sigmap} \log \mbb P\big(\ms{CC}_{n,k_0}(\cup_{Q\in \mathcal{Q}_n} \partial_n Q) \gammae {\rm d}elta\rho_n^\sigmap\big) = -\infty.$$ \varepsilonnl With these lemmas and preliminaries, we can prove the lower large deviations in the sparse regime. \begin{proof}[Proof of Theorem \ref{theorem_main_sparse}] We point out that \cite{hirschowada} worked with the Euclidean distance on $[0,1]^d$ instead of the toroidal metric. For this reason, we need some additional notation to deal with this subtle difference. Also, recall that $H_n^\sigmap = \frac{1}{\rho_n^\sigmap} \sigmaum_{\varphi\sigmaubseteq \mbb PP_n} \xi_n(\varphi) s_n(\varphi,\mbb PP_n)$, where $s_n$ checks for connected components with respect to the toroidal metric. We define the restriction to components of size $k_0$ by $$H_{n,k_0}^\sigmap := H_{n,k_0}^\sigmap(\mbb PP_n) :=\frac{1}{\rho_n^\sigmap} \sigmaum_{\varphi\sigmaubseteq \mbb PP_n, \#\varphi = k_0} \xi_n(\varphi) s_n(\varphi,\mbb PP_n).$$ To also incorporate the Euclidean metric, we define $\mathsf{GG}'_n(\varphi)$ as the geometric graph on $\varphi\in\mbb NNN$ with connectivity radius $r_n$ and with respect to the Euclidean distance. With this, for $\varphi\sigmaubseteq\psi\in\mbb NNN$, we set $$s'_n(\varphi,\psi) := \mathbbmss{1}\{\varphi\tauext{ is a connected component of }\mathsf{GG}'_n(\psi)\}$$ to be the counterpart of $s_n$ in terms of the Euclidean distance. Further, along the lines of \cite[Theorem 3.3]{hirschowada}, we define $$\widetilde{H}_{n,k_0}^\sigmap := \widetilde{H}_{n,k_0}^\sigmap(\mbb PP_n) := \frac1{\rho_n^\sigmap} \sigmaum_{\varphi\sigmaubseteq\mbb PP_n, \#\varphi=k_0} \xi(r_n^{-1}\varphi) t_n(\varphi,\mbb PP_n),$$ where $$t_n(\varphi,\mbb PP_n) := \mathbbmss{1}\{\|y-z\| \gammae r_n \tauext{ for all }y\in \varphi \tauext{ and } z\in \mbb PP_n\sigmaetminus \varphi\} \mathbbmss{1}\{{\rm d}iam(\varphi)\le k_0 r_n\}$$ is the indicator assuring that $\varphi$ is isolated and locally concentrated within $\mbb PP_n$. Our goal is to apply \cite[Theorem 3.3]{hirschowada} to $\widetilde{H}_{n,k_0}^\sigmap$. One main step for the upper bound of this proof will be to show that the error between $H_{n,k_0}^\sigmap$ and $\widetilde{H}_{n,k_0}^\sigmap$ that occurs close to the boundary is negligible. Thus, we define $$H_{n,k_0}^{\tauext{err},1}(\mbb PP_n) := \frac{1}{\rho_n^\sigmap} \sigmaum_{\sigmaubstack{\varphi\sigmaubseteq\mbb PP_n\colon \#\varphi=k_0, \\ {\rm d}ist(\varphi,\partial[0,1]^d) \le r_n}} \xi(r_n^{-1}\varphi) s'_n(\varphi,\mbb PP_n)$$ and compute \begin{align}\lambdabel{inequality_fixed_size_error_sparse} \begin{split} H_n^\sigmap &\gammae H_{n,k_0}^\sigmap \gammae \frac{1}{\rho_n^\sigmap} \sigmaum_{\sigmaubstack{\varphi\sigmaubseteq\mbb PP_n\colon \#\varphi=k_0, \\ {\rm d}ist(\varphi,\partial[0,1]^d) > r_n}} \xi(r_n^{-1}\varphi) s'_n(\varphi,\mbb PP_n) \\ &= \frac{1}{\rho_n^\sigmap} \sigmaum_{\varphi\sigmaubseteq\mbb PP_n, \#\varphi=k_0} \xi(r_n^{-1}\varphi) t_n(\varphi,\mbb PP_n) - \frac{1}{\rho_n^\sigmap} \sigmaum_{\sigmaubstack{\varphi\sigmaubseteq\mbb PP_n\colon \#\varphi=k_0, \\ {\rm d}ist(\varphi,\partial[0,1]^d) \le r_n}} \xi(r_n^{-1}\varphi) s'_n(\varphi,\mbb PP_n) \\ &= \widetilde{H}_{n,k_0}^\sigmap - H_{n,k_0}^{\tauext{err},1}(\mbb PP_n), \varepsilonnd{split} \varepsilonnd{align} where we used that from \varepsilonqref{LOC} it follows that for all $\varphi\sigmaubseteq\mbb PP_n$ with $\#\varphi=k_0$ \begin{equation}\lambdabel{equation_distances_sparse} \xi(r_n^{-1}\varphi) t_n(\varphi,\mbb PP_n) = \xi(r_n^{-1}\varphi) s'_n(\varphi,\mbb PP_n). \varepsilonnd{equation} Further, we introduce the event \begin{align*} G_n := \big\{\#\{X\in\mbb PP_n\sigmaetminus[r_n,1-r_n]^d\colon &s'_n(\{X\}\cup\varphi,\mbb PP_n)=1 \tauext{ for some }\varphi\sigmaubseteq\mbb PP_n \\ &\tauext{with } \#(\{X\}\cup\varphi)=k_0\} < {\rm d}elta\rho_n^\sigmap\big\}, \varepsilonnd{align*} which implies that the number of connected components of size $k_0$ with respect to the Euclidean distance that are located close to the boundary of $[0,1]^d$ is negligible. To deal with the probability of $G_n$, note that it is possible to replace the event in Lemma \ref{lemma_prob_resampling_between_boxes_sparse} with the complement of $G_n$ and we still get that \begin{equation}\lambdabel{inequality_event_points_close_to_boundary_of_torus} \limsup_{n\tauo\infty} \frac1{\rho_n^\sigmap} \log \mbb P(G_n^c) = -\infty. \varepsilonnd{equation} To show this, the proof of Lemma \ref{lemma_prob_resampling_between_boxes_sparse} can be repeated with only one modification that arises from switching from the toroidal to the Euclidean metric. In \varepsilonqref{inequality_probability_bad_subbox_sparse} one has to consider that it is possible that only a fraction of the ball intersects the box. Under $G_n$ the number of components summed over in $H_{n,k_0}^{\tauext{err},1}$ is bounded by ${\rm d}elta\rho_n^\sigmap$ and with \varepsilonqref{inequality_fixed_size_error_sparse}, we can compute \begin{align*} \mbb P(H_n^\sigmap \le a) &\le \mbb P(\widetilde{H}_{n,k_0}^\sigmap - H_{n,k_0}^{\tauext{err},1}(\mbb PP_n) \le a) \le \mbb P(\widetilde{H}_{n,k_0}^\sigmap \le a + H_{n,k_0}^{\tauext{err},1}(\mbb PP_n), G_n) + \mbb P(G_n^c) \\ &\le \mbb P\Big(\widetilde{H}_{n,k_0}^\sigmap \le a + {\rm d}elta \sigmaup_{\varphi\sigmaubseteq[0,1]^d, \#\varphi = k_0} \xi(r_n^{-1}\varphi), G_n\Big) + \mbb P(G_n^c) \\ &\le \mbb P(\widetilde{H}_{n,k_0}^\sigmap \le a + {\rm d}elta b) + \mbb P(G_n^c), \varepsilonnd{align*} where we used \varepsilonqref{INV}, \varepsilonqref{LOC} and \varepsilonqref{BND} to get for sufficiently large $n$ $$\sigmaup_{\varphi\sigmaubseteq[0,1]^d, \#\varphi = k_0} \xi(r_n^{-1}\varphi) = \sigmaup_{\varphi\sigmaubseteq[0,1]^d\sigmaetminus[r_n,1-r_n]^d, \#\varphi = k_0} \xi(r_n^{-1}\varphi) \le \sigmaup_{\varphi\sigmaubseteq[0,1]^d, \#\varphi = k_0} \xi_n(\varphi) \le b.$$ By \varepsilonqref{inequality_event_points_close_to_boundary_of_torus}, the probability of the complement of $G_n$ does not significantly contribute to the large deviations. From this point, \varepsilonqref{INV}, \varepsilonqref{LOC}, \varepsilonqref{BND} and \varepsilonqref{POS} let us apply \cite[Theorem 3.3]{hirschowada} to $\widetilde{H}_{n,k_0}^\sigmap$, which yields $$\limsup_{n\tauo\infty} \frac1{\rho_n^\sigmap} \log \mbb P(H_n^\sigmap \le a) \le \limsup_{n\tauo\infty} \frac1{\rho_n^\sigmap} \log \mbb P(\widetilde{H}_{n,k_0}^\sigmap \le a + {\rm d}elta b) \le -\inf_{\rho\colon T^\sigmap(\rho) \le a+{\rm d}elta b} h^\sigmap(\rho\mid \tauau_{k_0}^\sigmap).$$ and therefore, the asserted upper bound, after letting ${\rm d}elta\rightarrow0$. Note that the rate function in \cite[Theorem 3.3]{hirschowada} is given as a Legendre transform. Arguing as in \cite[Corollary 3.2]{hirschowada}, this can be equivalently written in the relative entropy form. For the lower bound, as a first step, with the same reasoning, we get for any ${\rm d}elta>0$ that \begin{equation}\lambdabel{inequality_LDP_sparse} \liminf_{n\tauo\infty} \frac{1}{\rho_n^\sigmap} \log \mbb P(\widetilde{H}_{n,k_0}^\sigmap < a-{\rm d}elta) \gammaeq -\inf_{\rho\colon T^\sigmap(\rho) < a-{\rm d}elta} h^\sigmap(\rho \mid \tauau_{k_0}^\sigmap). \varepsilonnd{equation} The next part of this proof is dedicated to show that in terms of large deviations, also for the lower bound, $H_n^\sigmap$ can be replaced with $\widetilde{H}_{n,k_0}^\sigmap$. For this, let $$ E_n^\tauext{good} := \Big\{\max_{X\in \{Y\in Q\cap\mbb PP'_n\colon Q\in\mathcal{J}_n\}} \mbb PP'_n\big(B_{2^d k_0 r_n}(X)\cap Q\big) \le k_0-1\Big\} $$ and for $\varepsilon>0$ serving as parameter for the Bernoulli random variables, $$ E_n := E_n^\tauext{good} \cap \bigcap_{Q\in \mathcal{J}_n} \{X_{Q, \varepsilonps} = 1\}\cap \bigcap_{Q\not\in \mathcal{J}_n} \{X_{Q, \varepsilonps} = 0\}. $$ We start the computations with $$\mbb P(H_n^\sigmap < a) = \mbb P(H_n^\sigmap(\mbb PP''_n) < a) \gammae \mbb P(E_n, H_n^\sigmap(\mbb PP''_n) < a).$$ Next, we can divide the functional into contributions that come from components intersecting the volume close to the boundary of a cube, denoted by $$H_n^{\tauext{err},2}(\mbb PP''_n) := \frac{1}{\rho_n^\sigmap} \sigmaum_{\varphi\sigmaubseteq\mbb PP''_n\colon \varphi \cap (\cup_{Q\in\mathcal{Q}_n} \partial_n Q) \neq \varepsilonmptyset} \xi_n(\varphi) s_n(\varphi,\mbb PP''_n),$$ and those that do not. Under the event $E_n$, we then have that \begin{align*} H_n^\sigmap(\mbb PP''_n) &= \frac{1}{\rho_n^\sigmap} \sigmaum_{\varphi\sigmaubseteq\mbb PP''_n\colon \varphi \cap (\cup_{Q\in\mathcal{Q}_n} \partial_n Q) = \varepsilonmptyset} \xi(r_n^{-1}\varphi) s_n(\varphi,\mbb PP''_n) + H_n^{\tauext{err},2}(\mbb PP''_n) \\ &\le \widetilde{H}_{n,k_0}^\sigmap(\mbb PP_n) + H_n^{\tauext{err},2}(\mbb PP''_n). \varepsilonnd{align*} We were able to bound the first term by $\widetilde{H}_{n,k_0}^\sigmap$ applied to $\mbb PP_n$ instead of $\mbb PP''_n$ because under the sprinkling event, if we disregard the space close to the boundaries of the cubes, the coupled process $\mbb PP''_n$ replaces $\mbb PP_n$ in each cube that contained at least a part of a connected component of size $k_0+1$, without creating any new connected components of size $k_0$ or bigger. Further, we made use of \varepsilonqref{equation_distances_sparse} as in the proof of the upper bound. This lets us proceed with $$\mbb P(E_n, H_n^\sigmap(\mbb PP''_n) < a) \gammae \mbb P(E_n, \widetilde{H}_{n,k_0}^\sigmap(\mbb PP_n) + H_n^{\tauext{err},2}(\mbb PP''_n) < a).$$ Further, to ease notation, let $$F_n := \{\ms{CC}_{n,k_0}(\cup_{Q\in \mathcal{Q}_n} \partial_n Q) < {\rm d}elta\rho_n^\sigmap\}$$ denote the complement of the event from Lemma \ref{lemma_prob_resampling_between_boxes_sparse} for some ${\rm d}elta>0$, which gives us $$\mbb P(E_n, H_{n,k_0}^\sigmap(\mbb PP_n) + H_n^{\tauext{err},2}(\mbb PP''_n) < a) \gammae \mbb P(E_n, F_n, \widetilde{H}_{n,k_0}^\sigmap(\mbb PP_n) + H_n^{\tauext{err},2}(\mbb PP''_n) < a).$$ Now, conditioned on $E_n$ and for sufficiently large $n$, the random geometric graph on $\mbb PP''_n$ with connectivity radius $r_n$ cannot have a connected component of more than $2^d k_0$ nodes, since in that case if $n$ is large, a box $Q\in\mathcal{J}_n$ would exist that contains $k_0+1$ vertices of $\mbb PP''_n$ with diameter less than or equal to $2^d k_0 r_n$. This contradicts $E_n^\tauext{good}$. Note that $2^d$ occurs here because it is the maximal number of boxes that can share a face. Thus, due to the nonnegativity of $\xi$, under $E_n\cap F_n$, it holds that $$H_n^{\tauext{err},2}(\mbb PP''_n)< {\rm d}elta \sigmaup_{\varphi\sigmaubseteq[0,1]^d, \#\varphi\le 2^d k_0} \xi_n(\varphi) \le {\rm d}elta b,$$ where we recall that $b$ depending only on $d$ and $k_0$ arises from \varepsilonqref{BND}. This leads to \begin{align*} \mbb P(E_n, F_n, \widetilde{H}_{n,k_0}^\sigmap(\mbb PP_n) + H_n^{\tauext{err},2}(\mbb PP''_n) < a) &\gammae \mbb P(E_n, F_n, \widetilde{H}_{n,k_0}^\sigmap(\mbb PP_n) + {\rm d}elta b < a) \\ &\gammae \mbb P(E_n, \widetilde{H}_{n,k_0}^\sigmap(\mbb PP_n) + {\rm d}elta b < a) - \mbb P(F_n^c). \varepsilonnd{align*} Summarizing these steps and applying the tower property of the conditional expectation, we arrive at $$ \mbb P(H_n^\sigmap < a) \gammae \mbb E[\mbb P(E_n | \mbb PP_n) \mathbbmss{1}\{\widetilde{H}_{n,k_0}^\sigmap(\mbb PP_n) < a-{\rm d}elta b\}] - \mbb P(F_n^c).$$ Now, using Lemma \ref{lemma_prob_new_edges_sparse} and independence of the events intersected in $E_n$ under $\mbb PP_n$, we get that $$ \mbb P(E_n | \mbb PP_n) = \mbb P(E_n^\tauext{good} | \mbb PP_n) \varepsilonps^{J_n} (1-\varepsilonps)^{\rho_n^\sigmap-J_n} \gammae (\alphalpha_M \varepsilonps)^{J_n} (1-\varepsilonps)^{\rho_n^\sigmap} $$ for an arbitrary $M > 2 \kappa_d^{k_0-1} 2^{k_0(d^2+1)} k_0^{k_0}$. This lets us proceed with \begin{align*} \mbb P(H_n^\sigmap < a) &\gammae \mbb E\big[(\alphalpha_M \varepsilonps)^{J_n} (1-\varepsilonps)^{\rho_n^\sigmap} \mathbbmss{1}\{\widetilde{H}_{n,k_0}^\sigmap(\mbb PP_n) < a-{\rm d}elta b\}\big] - \mbb P(F_n^c) \\ &\gammae \mbb E\big[(\alphalpha_M \varepsilonps)^{{\rm d}elta\rho_n^\sigmap} (1-\varepsilonps)^{\rho_n^\sigmap} \mathbbmss{1}\{\widetilde{H}_{n,k_0}^\sigmap(\mbb PP_n) < a-{\rm d}elta b\} \mathbbmss{1}\{J_n < {\rm d}elta\rho_n^\sigmap\}\big] - \mbb P(F_n^c) \\ &\gammae (\alphalpha_M \varepsilonps)^{{\rm d}elta\rho_n^\sigmap} (1-\varepsilonps)^{\rho_n^\sigmap} \big(\mbb P(\widetilde{H}_{n,k_0}^\sigmap(\mbb PP_n) < a-{\rm d}elta b) -\mbb P(J_n \gammae {\rm d}elta\rho_n^\sigmap)\big) - \mbb P(F_n^c). \varepsilonnd{align*} From this inequality and Lemma \ref{lemma_prob_resampling_between_boxes_sparse}, it follows that $\mbb P(F_n^c)$ does not contribute significantly to the lower bound for the lower large deviations. Therefore, we arrive at \begin{align*} &\liminf_{n\tauo\infty} \frac{1}{\rho_n^\sigmap} \log \mbb P(H_n^\sigmap < a) \\ \gammae\ &\liminf_{n\tauo\infty} \frac{1}{\rho_n^\sigmap} \log\big((\alphalpha_M \varepsilonps)^{{\rm d}elta\rho_n^\sigmap} (1-\varepsilonps)^{\rho_n^\sigmap} (\mbb P(\widetilde{H}_{n,k_0}^\sigmap(\mbb PP_n) < a-{\rm d}elta b) -\mbb P(J_n \gammae {\rm d}elta\rho_n^\sigmap))\big) \\ \gammae\ &{\rm d}elta \log (\alphalpha_M \varepsilonps) + \log(1-\varepsilonps) + \liminf_{n\tauo\infty} \frac{1}{\rho_n^\sigmap} \log\big(\mbb P(\widetilde{H}_{n,k_0}^\sigmap(\mbb PP_n) < a-{\rm d}elta b) -\mbb P(J_n \gammae {\rm d}elta\rho_n^\sigmap)\big). \varepsilonnd{align*} Now, Lemma \ref{lemma_probability_bad_boxes_sparse} implies that $\mbb P(J_n \gammae {\rm d}elta\rho_n^\sigmap)$ does not affect the lower bound of the lower tails in this situation, and thus, plugging in \varepsilonqref{inequality_LDP_sparse}, we get \begin{align*} \liminf_{n\tauo\infty} \frac1{\rho_n^\sigmap} \log \mbb P(H_n^\sigmap < a) &\gammae {\rm d}elta \log(\alphalpha_M \varepsilonps) + \log(1-\varepsilonps) + \liminf_{n\tauo\infty} \frac{1}{\rho_n^\sigmap} \log\big(\mbb P(\widetilde{H}_{n,k_0}^\sigmap(\mbb PP_n) < a-{\rm d}elta b) \\ &\gammae {\rm d}elta \log(\alphalpha_M \varepsilonps) + \log(1-\varepsilonps) - \inf_{\rho\colon T^\sigmap(\rho) < a-{\rm d}elta b} h^\sigmap(\rho \mid \tauau_{k_0}^\sigmap). \varepsilonnd{align*} Letting ${\rm d}elta\rightarrow 0$ and then $\varepsilonps\rightarrow 0$ gives the lower bound $$\liminf_{n\tauo\infty} \frac{1}{\rho_n^\sigmap} \log \mbb P(H_n^\sigmap < a) \gammae -\inf_{\rho\colon T^\sigmap(\rho) < a} h^\sigmap(\rho \mid \tauau_{k_0}^\sigmap).$$ \varepsilonnp What follows are the proofs of the previously introduced lemmas. But, since we come across the task of bounding a similar quantity in the proofs of Lemmas \ref{lemma_probability_bad_boxes_sparse}, \ref{lemma_prob_new_edges_sparse} and \ref{lemma_prob_resampling_between_boxes_sparse}, we insert a short lemma that helps with this first. \begin{lemma}[Bound for the probability of many Poisson points in a ball]\lambdabel{lemma_bound_Poisson_points_in_ball} For $Q\sigmaubseteq[0,1]^d$ and $m,l,r\in\mbb N$, it holds that $$\mbb E[\#\{X\in Q\cap\mbb PP_m\colon \mbb PP_m(B_r(X)) \gammae l\}] \le m^l \kappa_d^{l-1} r^{(l-1)d} |Q|.$$ \varepsilonnl \begin{proof}[Proof of Lemma \ref{lemma_probability_bad_boxes_sparse}] We are going to categorize boxes to create independence and use a binomial concentration inequality from \cite[Lemma 1.1]{poisson_conc}. We use the set $\mathcal{L} := \{1,2\}^d$ to label each box in $\mathcal{Q}_n$ in a certain way to achieve that between two boxes of the same label, there will always be a box with a different label. To guarantee that this is possible on the torus, we assume that the number of boxes along each axis is divisible by $2$. For $l\in\mathcal{L}$, we denote the boxes of label $l$ by $\mathcal{Q}_n^{(l)}$. Then, $$ \mbb P(J_n \gammae {\rm d}elta\rho_n^\sigmap) \le \sigmaum_{l\in\mathcal{L}} \mbb P(\#(\mathcal{Q}_n^{(l)}\cap \mathcal{J}_n) \gammae {\rm d}elta\rho_n^\sigmap/2^d). $$ For $n$ large enough, the labeling guarantees that the events $\{Q\in\mathcal{J}_n\}$ are independent for different $Q\in\mathcal{Q}_n^{(l)}$. Thus, we are in a binomial setting and to use the mentioned binomial concentration inequality, we first bound the probability of one box being bad by using Lemma \ref{lemma_bound_Poisson_points_in_ball} to get that for an arbitrary $Q\in\mathcal{Q}_n$ \begin{align}\lambdabel{inequality_probability_bad_box_sparse} \begin{split} \mbb P(Q\in\mathcal{J}_n) &= \mbb P(\mbb PP_n(B_{2^d k_0 r_n}(X)) \gammae k_0+1 \tauext{ for some } X\in Q\cap\mbb PP_n) \\ &\le \mbb E\bigg[\sigmaum_{X\in Q\cap\mbb PP_n} \mathbbmss{1}\{\mbb PP_n(B_{2^d k_0 r_n}(X)) \gammae k_0+1\}\bigg] \\ &\le n^{k_0+1} \kappa_d^{k_0}(2^d k_0 r_n)^{k_0d} |Q| = \kappa_d^{k_0} (2^d k_0)^{k_0d} n r_n^d. \varepsilonnd{split} \varepsilonnd{align} Next, using \cite[Lemma 1.1]{poisson_conc} for $n$ large, we get for every $l\in\mathcal{L}$ and every ${\rm d}elta>0$, if $n$ is large enough, that \begin{align*} \mbb P(\#(\mathcal{Q}_n^{(l)}\cap \mathcal{J}_n) \gammae {\rm d}elta\rho_n^\sigmap/2^d) &\le \varepsilonxp\bigg(-\frac{{\rm d}elta\rho_n^\sigmap/2^d}{2} \log\Big(\frac{{\rm d}elta\rho_n^\sigmap/2^d}{\rho_n^\sigmap \kappa_d^{k_0} (2^d k_0)^{k_0d} n r_n^d}\Big)\bigg) \\ &= \varepsilonxp\bigg(-\frac{{\rm d}elta\rho_n^\sigmap}{2^{d+1}} \log\Big(\frac{{\rm d}elta}{\kappa_d^{k_0} 2^d (2^d k_0)^{k_0d} n r_n^d}\Big)\bigg). \varepsilonnd{align*} The assumption $n r_n^d\rightarrow 0$ yields the assertion. \varepsilonnp \begin{proof}[Proof of Lemma \ref{lemma_prob_new_edges_sparse}] First, we let $M> 2 \kappa_d^{k_0-1} 2^{k_0(d^2+1)} k_0^{k_0}$ as well as $Q\in\mathcal{Q}_n$ be arbitrary and start by examining the probability that $\mbb PP_{2n}$ has some amount of close points within $Q$ by invoking Markov's inequality and Lemma \ref{lemma_bound_Poisson_points_in_ball} to get \begin{align*} &\mbb P\big(\#\{X\in Q\cap\mbb PP_{2n} \colon \mbb PP_{2n}(B_{2^d k_0 r_n}(X)\cap Q) \gammae k_0\} \gammae M\big) \\ &\le \frac1{M} \mbb E\Big[\sigmaum_{X\in Q\cap\mbb PP_{2n}} \mathbbmss{1}\{\mbb PP_{2n}(B_{2^d k_0 r_n}(X)\cap Q) \gammae k_0\}\Big] \\ &\le \frac1{M} (2n)^{k_0} \kappa_d^{k_0-1} (2^d k_0 r_n)^{(k_0-1)d} |Q| = \frac{\kappa_d^{k_0-1} 2^{k_0(d^2+1)} k_0^{k_0 d}}{M} \le \frac12. \varepsilonnd{align*} Note that a thinning of $\mbb PP_{2n}$, where we keep each point independently with probability $1/2$ has the same distribution as $\mbb PP_n$. Denote the thinned process by $\mbb PP_{2n}^{\tauext{thin}}$. We proceed by deleting unwanted points in the thinning and get \begin{align*} &\mbb P\Big(\max_{X\in Q\cap\mbb PP'_n} \mbb PP'_n(B_{2^d k_0 r_n}(X)\cap Q) \le k_0-1\Big) \\ &= \mbb P\Big(\max_{X\in Q\cap\mbb PP_{2n}^{\tauext{thin}}} \mbb PP_{2n}^{\tauext{thin}}(B_{2^d k_0 r_n}(X)\cap Q) \le k_0-1\Big) \\ &= \mbb E\big[(1/2)^{\#\{X\in Q\cap\mbb PP_{2n} \colon \mbb PP_{2n}(B_{2^d k_0 r_n}(X)\cap Q) \gammae k_0\}}\big] \\ &\gammae \mbb E[2^{-M} \mathbbmss{1}\{\{X\in Q\cap\mbb PP'_n \colon \mbb PP'_n(B_{2^d k_0 r_n}(X)\cap Q) \gammae k_0\} < M\}] \gammae 2^{-M-1}. \varepsilonnd{align*} Now, we can use independence of the above events when considering different boxes to get \begin{align*} &\mbb P\bigg(\bigcap_{Q\in\mathcal{J}_n} \Big\{\max_{X\in Q\cap\mbb PP'_n} \mbb PP'_n(B_{2^d k_0 r_n}(X)\cap Q) \le k_0-1\Big\} \biggm\vert \mbb PP_n\bigg) \\ &= \prod_{Q\in\mathcal{J}_n} \mbb P\Big(\max_{X\in Q\cap\mbb PP'_n} \mbb PP'_n(B_{2^d k_0 r_n}(X)\cap Q) \le k_0-1\Big) \gammae (2^{-M-1})^{J_n}. \varepsilonnd{align*} \varepsilonnp \begin{proof}[Proof of Lemma \ref{lemma_prob_resampling_between_boxes_sparse}] For a box $Q\in\mathcal{Q}_n$, we divide $\partial_n Q$ into a grid consisting of boxes with side length $r_n$ and call this collection of boxes $\mathcal{W}_n(\partial_n Q)$. We denote the total collection of these boxes by $\overline{\mathcal{W}}_n := \cup_{Q\in\mathcal{Q}_n} \mathcal{W}_n(\partial_n Q)$. Next, we can proceed with the same strategy that was already successfully applied in the proof of Lemma \ref{lemma_probability_bad_boxes_sparse}, but use more labels this time to achieve that between two boxes $W_1,W_2\in\overline{\mathcal{W}}_n$ of the same label, there are always $2^{d+1} k_0$ boxes labeled differently. We choose the label set $\mathcal{L} := \{1,2,{\rm d}ots,2^{d+1} k_0 + 1\}^d$ and we reuse the notation $\overline{\mathcal{W}}_n^{(l)}$ for the boxes of label $l\in\mathcal{L}$. Again, we assume that the number of boxes along each axis is divisible by $2^{d+1}k_0+1$. This construction lets us search for connected components of at most $2^d k_0$ nodes in boxes with the same label independently. Our aim is to apply the already encountered binomial concentration bound \cite[Lemma 1.1]{poisson_conc} to the number of subcubes of a fixed label that contain a large connected component. This requires two things, a bound for the number of subcubes in $\overline{\mathcal{W}}_n^{(l)}$ and a bound for the probability of a subcube $W\in\overline{\mathcal{W}}_n$ containing at least one node in $\ms{CC}_{n,k_0}$. For the latter, i.e., the probability that $W\in\overline{\mathcal{W}}_n$ contains vertices that are part of a connected component of size between $k_0$ and $2^d k_0$, we compute, using Markov's inequality and Lemma \ref{lemma_bound_Poisson_points_in_ball}, that \begin{align}\lambdabel{inequality_probability_bad_subbox_sparse} \begin{split} \mbb P\big(W\cap\ms{CC}_{n,k_0} \neq \varepsilonmptyset\big) &\le \mbb E[\#(W\cap\mbb PP''_n\cap\ms{CC}_{n,k_0})] \le \mbb E[\#\{X\in W\cap\mbb PP''_n\colon \mbb PP''_n(B_{k_0 r_n}(X)) \gammae k_0\}] \\ &\le n^{k_0} \kappa_d^{k_0-1} (k_0 r_n)^{d(k_0-1)} |W| = \kappa_d^{k_0-1} k_0^{(k_0-1)d} (n r_n^d)^{k_0}. \varepsilonnd{split} \varepsilonnd{align} To find a bound for the number of subcubes, note that the volume of $\partial_n Q$ for $Q\in\mathcal{Q}_n$ is of order $$(\rho_n^\sigmap)^{-(d-1)/d} r_n = n^{-k_0(d-1)/d} r_n^{-(k_0-1)(d-1)+1}.$$ Consequently, the number of boxes in $\mathcal{W}_n(\partial_n Q)$ can be bounded by dividing the above by the volume of a subcube $r_n^d$, which yields \begin{equation}\lambdabel{inequality_number_bad_subboxes_sparse} \#\mathcal{W}_n(\partial_n Q) \le c_1 n^{-k_0(d-1)/d} r_n^{-k_0(d-1)} = c_1 (n r_n^d)^{-k_0(d-1)/d} \varepsilonnd{equation} for some $c_1 := c_1(d, k_0) > 0$. Thus, there are at most $\rho_n^\sigmap c_1 (n r_n^d)^{-k_0(d-1)/d}$ subcubes in $\overline{\mathcal{W}}_n$. Before we invoke \cite[Lemma 1.1]{poisson_conc}, we can union over all labels and combine this with the union bound to get \begin{align*} \mbb P\big(\ms{CC}_{n,k_0}(\cup_{Q\in \mathcal{J}_n} \partial_n Q) \gammae {\rm d}elta\rho_n^\sigmap \big) &\le \mbb P\bigg(\bigcup_{l\in\mathcal{L}} \big\{\ms{CC}_{n,k_0}(\cup_{W\in \overline{\mathcal{W}}_n^{(l)}} W) \gammae {\rm d}elta\rho_n^\sigmap/(\#\mathcal L)\big\}\bigg) \\ &\le \sigmaum_{l\in\mathcal{L}} \mbb P\big(\ms{CC}_{n,k_0}(\cup_{W\in \overline{\mathcal{W}}_n^{(l)}} W) \gammae {\rm d}elta\rho_n^\sigmap/(\#\mathcal L)\big). \varepsilonnd{align*} At this point, let $W\in\overline{\mathcal{W}}_n$ be arbitrary. An important observation is that $\ms{CC}_{n,k_0}(W)$ is bounded by a constant that does not depend on $n$. More precisely, a connected component occupies a ball of radius at least $r_n$ that cannot intersect any other connected component. Consequently, when choosing $r_n/2$ as radius instead, that ball cannot intersect any ball of radius $r_n/2$ that is centered at a node that belongs to another connected component. When considering connected components with a vertex in $W$, at least $1/2^d$ of the volume of a ball with radius $r_n/2$ centered at that vertex has to be contained in $W$. The factor $1/2^d$ adjusts for the possibility that the center of the ball is in a corner of $W$. Therefore, we can bound the available space by $|W|$ and the maximal component size by $2^d k_0$ and arrive at $$\ms{CC}_{n,k_0}(W) \le \frac{2^d k_0 |W|}{\kappa_d (r_n/2)^d /2^d} = 8^d \kappa_d^{-1} k_0,$$ which implies that for a fixed $l\in\mathcal{L}$ $$ \mbb P\big(\ms{CC}_{n,k_0}(\cup_{W\in \overline{\mathcal{W}}_n^{(l)}} W) \gammae {\rm d}elta\rho_n^\sigmap/(\#\mathcal L)\big) \le \mbb P\big(\#\{W\in \overline{\mathcal{W}}_n^{(l)} \colon W\cap\ms{CC}_{n,k_0} \neq \varepsilonmptyset\} \gammae {\rm d}elta\rho_n^\sigmap/c_2 \big), $$ where $c_2 := c_2(d,k_0) := 8^d\kappa_d^{-1} k_0 \#\mathcal L$. Next, the independence guaranteed by the labeling and the bounds derived in \varepsilonqref{inequality_probability_bad_subbox_sparse} and \varepsilonqref{inequality_number_bad_subboxes_sparse} let us apply the binomial bound \cite[Lemma 1.1]{poisson_conc} for sufficiently large $n$ to arrive at \begin{align*} &\mbb P\big(\#\{W\in \overline{\mathcal{W}}_n^{(l)} \colon W\cap\ms{CC}_{n,k_0} \neq \varepsilonmptyset\} \gammae {\rm d}elta\rho_n^\sigmap/c_2\big) \\ \le\ &\varepsilonxp\bigg(-\frac{{\rm d}elta\rho_n^\sigmap}{2 c_2} \log\Big(\frac{{\rm d}elta\rho_n^\sigmap/c_1}{\rho_n^\sigmap d c_1 (n r_n^d)^{-k_0(d-1)/d} \kappa_d^{k_0-1} k_0^{(k_0-1)d} (n r_n^d)^{k_0}}\Big)\bigg) \\ =\ &\varepsilonxp\bigg(-\frac{{\rm d}elta\rho_n^\sigmap}{2 c_2} \log\Big(\frac{{\rm d}elta}{c_2 c_1 \kappa_d^{k_0-1} k_0^{(k_0-1)d} (n r_n^d)^{k_0/d}}\Big)\bigg), \varepsilonnd{align*} yielding the assertion, since $n r_n^d \rightarrow 0$. \varepsilonnp \begin{proof}[Proof of Lemma \ref{lemma_bound_Poisson_points_in_ball}] If $l=1$, we get \begin{align*} &\mbb E[\#\{X\in Q\cap\mbb PP_m\colon\mbb PP_m(B_r(X)) \gammae 1\}] = \mbb E[\mbb PP_m(Q)] = m |Q|. \varepsilonnd{align*} For $l>1$, an application of Mecke's equation and Markov's inequality yields \begin{align*} &\mbb E[\#\{X\in Q\cap\mbb PP_m\colon\mbb PP_m(B_r(X)) \gammae l\}] = m \int_{Q} \mbb E[\mathbbmss{1}\{\mbb PP_m(B_r(x)) \gammae l-1\}] {\rm d} x \\ &= m \int_{Q} \mbb P(\{Y_1,{\rm d}ots,Y_{l-1}\}\sigmaubseteq B_r(x) \tauext{ for some } \{Y_1,{\rm d}ots,Y_{l-1}\}\sigmaubseteq\mbb PP_m) {\rm d} x \\ &\le m \int_Q \mbb E\bigg[\sigmaum_{\{Y_1,{\rm d}ots,Y_{l-1}\}\sigmaubseteq \mbb PP_m} \mathbbmss{1}\{Y_1,{\rm d}ots,Y_{l-1}\in B_r(x)\}\bigg] {\rm d} x \\ &\le m^l \int_{Q} \int_{[0,1]^{(l-1)d}} \mathbbmss{1}\{y_1,{\rm d}ots,y_{l-1}\in B_r(x)\} {\rm d} (y_1,{\rm d}ots,y_{l-1}) {\rm d} x = m^l \kappa_d^{l-1} r^{(l-1)d} |Q|. \varepsilonnd{align*} \varepsilonnp \sigmaection{Proof of Theorem \ref{theorem_main_dense} (dense)}\lambdabel{section_proof_dense} The general outline of the proof of the dense regime follows the ideas for the sparse case. Here, we aim to apply a contraction principle using the large deviation asymptotics with respect to the weak topology from \cite{hirschowadakang}. In order to apply the contraction principle directly, $T_k^{\rm d}e$ must be continuous with respect to the weak topology, meaning that the integrand needs to be bounded. However, this condition is not immediately satisfied. To overcome this, using the technique of sprinkling, we aim to artificially introduce a bound for the score function that will translate to the integrand of $T_k^{\rm d}e$. As in the sparse regime, we divide $[0,1]^d$ into a grid of cubes with side length $(\rho_{n,k}^{\rm d}e)^{-1/d}$, assuming that $\rho_{n,k}^{\rm d}e$ is a natural number and denote this collection by $\mathcal{Q}_n$. We are going to use the same objects that were introduced in the sparse regime. As a reminder, $\mbb PP'_n$ is a Poisson point process on $[0,1]^d$ with intensity $n$ independent of $\mbb PP_n$, and for Bernoulli random variables with parameter $\varepsilonps\in(0,1)$, independent of each other and all introduced Poisson random measures, for every $Q\in\mathcal Q_n$, we defined $$\mbb PP_n^{Q} := \begin{cases} Q\cap\mbb PP'_n &\tauext{if } X_{Q, \varepsilonps} = 1 \\ Q\cap\mbb PP_n &\tauext{if } X_{Q, \varepsilonps} = 0 \varepsilonnd{cases}.$$ Finally, we denoted the union $\cup_{Q\in\mathcal{Q}_n} \mbb PP_n^Q$ by $\mbb PP''_n$. In the dense regime, we aim to use the Bernoulli random variables to control $\mbb PP''_n$ in such a way that we resample $\mbb PP_n$ using $\mbb PP'_n$ in each box that makes it too likely that there is an $X$ with a large edge while keeping $\mbb PP_n$ in all other boxes. Mathematically expressed, for a random configuration $\varepsilonta\in\mbb NNN$, we want to avoid boxes that foster the existence of an $X\in\varepsilonta$ with \begin{equation}\lambdabel{equation_indicator} \xi_n(X, \varepsilonta): = (n\kappa_dR_k(X, \varepsilonta)^d - a_n - s_0)_+ > M \varepsilonnd{equation} for $M>0$. If a box $Q\in \mathcal Q_n$ has no such point within $Q\cap\varepsilonta$, we will refer to it as $(\varepsilonta,M)$\varepsilonmph{-bounded}. To achieve this goal, we need to ensure that the resampling is done in such a way that adjacent boxes remain compatible in the sense that even after the resampling, the conditional probability that a box fulfills the boundedness property remains high. To that end, we fix an arbitrary ordering of the boxes in $\mathcal Q_n$ such that $Q_n^{(i)}$ denotes the $i$th box in $\mathcal Q_n$ and then impose conditions recursively. More precisely, for $\varepsilonta\in\{\mbb PP_n,\mbb PP'_n\}$, we denote $\bar{\mathcal S}_n^{(1)}(\varepsilonta) := \mathcal (Q_n^{(1)}\cap\varepsilonta)\cup(\mbb PP_n\sigmaetminus Q_n^{(1)})$, where outside of the box $Q_n^{(1)}$ we could have used an arbitrary Poisson point process with intensity $n$ in the definition of $\bar{\mathcal S}_n^{(1)}$. Next, for an arbitrary $j\in\{1,{\rm d}ots,\rho_{n,k}^{\rm d}e\}$, let \begin{enumerate} \item $\mathcal N(j)$ denotes the ordering indices of the boxes adjacent to box $Q_n^{(j)}$; \item $\mathcal N_+(j) := \mathcal N(j)\cup\{j\}$ be the above unioned with $\{j\}$; \item $\varepsilonta^{(j)} := \cup_{s\le j} (Q_n^{(s)}\cap\varepsilonta)$ be $\varepsilonta$ restricted to the first $j$ boxes. \varepsilonnd{enumerate} Then, by setting $$a_{i, 1}(\varepsilonta) := \mbb P\big(Q_n^{(i)}\tauext{ is $(\bar{\mathcal S}_n^{(1)}(\varepsilonta),M)$-bounded}\mid \mbb PP_n^{(1)}, (\mbb PP'_n)^{(1)}\big), \quad i\in\mbb NN_+(1),$$ we label the box $Q_n^{(1)}$ as \varepsilonmph{$(\varepsilonta,M)$-good} if \begin{align*} \min_{i \in \mbb NN_+(1)}a_{i, 1}(\varepsilonta) \gammae 1 - e^{-M/2}. \varepsilonnd{align*} Then, we proceed step by step and for $2\le j\le \rho_{n,k}^{\rm d}e$ set $$\mathcal S_n^{(j-1)} := \begin{cases} Q_n^{(j-1)}\cap\mbb PP'_n \quad\tauext{if } Q_n^{(j-1)} \tauext{ is }(\mbb PP_n,M)\tauext{-bad} \\ Q_n^{(j-1)}\cap\mbb PP_n \quad\tauext{if } Q_n^{(j-1)} \tauext{ is }(\mbb PP_n,M)\tauext{-good} \varepsilonnd{cases},$$ to be able to define $$ \bar{\mathcal S}_n^{(j)}(\varepsilonta) := (Q_n^{(j)}\cap\varepsilonta) \cup (\cup_{s\le j-1} \mathcal S_n^{(s)}) \cup (\mbb PP_n\sigmaetminus \cup_{s\le j} Q_n^{(j)}). $$ Additionally, for $i\in\mbb NN_+(j)$, we define the conditional probabilities $$a_{i, j}(\varepsilonta) := \mbb P\big(Q_n^{(i)}\tauext{ is $(\bar{\mathcal S}_n^{(j)}(\varepsilonta),M)$-bounded}\mid \mbb PP_n^{(j)}, (\mbb PP'_n)^{(j)}\big)$$ and note that $a_{i, j}(\varepsilonta)$ only depends on the configurations of $\mbb PP_n,\mbb PP'_n$ in $Q_n^{(s)}$ for $s\in\mbb NN_+(i)\cap\{1,{\rm d}ots,j\}$. We then say that the box $Q_n^{(j)}$ is \varepsilonmph{$(\mathcal \varepsilonta, M)$-good} if \begin{align} \lambdabel{eq:mgood} \min_{i \in \mbb NN_+(j)} a_{i, j}(\varepsilonta) \gammae 1 - b_{i,j}^{(M)} \varepsilonnd{align} holds, where \begin{equation}\lambdabel{equation_probability_Mgood} b_{i,j}^{(M)} := e^{-M 2^{-1-\#\{s\in\mbb NN_+(i) \colon s\le j\}}}. \varepsilonnd{equation} In words, we consider a configuration $\varepsilonta$ within the box $Q_n^{(j)}$ suitable if the probability of any adjacent box $Q_n^{(i)}$ being $(\bar{\mathcal S}_n^{(j)}(\varepsilonta),M)$-bounded is large conditioned on the configurations in the boxes that have already been considered in a step $s < j$ and the configuration $Q_n^{(j)}\cap\varepsilonta$ in the current box. Next, let $$\mathcal{J}_n^M := \mathcal{J}_n^M(\mbb PP_n, \mbb PP_n') := \{Q_n^{(i)} \in \mathcal{Q}_n\colon\tauext{ $Q_n^{(i)}$ is $(\mbb PP_n,M)$-bad}\}$$ be the collection of $(\mbb PP_n,M)$-bad boxes and we abbreviate its cardinality by $$J_n^M := \#\mathcal{J}_n^M.$$ Since $k$ can be considered as fixed now, we can write $$\rho_n^{\rm d}e := \rho_{n,k}^{\rm d}e$$ to ease notation. We first make sure that those bad boxes do not occur too many times with a probability that is too high. \begin{lemma}[Bad boxes are exponentially negligible]\lambdabel{lemma_probability_bad_boxes} Let ${\rm d}elta, M>0.$ Then, $$\mbb P(J_n^M \gammae {\rm d}elta\rho_n^{\rm d}e) \le \varepsilonxp\Big(-\frac{{\rm d}elta\rho_n^{\rm d}e}{5^d 2} \log\Big(\frac{{\rm d}elta e^{M/2+s_0}}{15^d 2^{3^d+1} k}\Big)\Big).$$ In particular, $$\limsup_{M\tauo\infty}\limsup_{n\tauo\infty} \frac1{\rho_n^{\rm d}e} \log\mbb P(J_n^M \gammae {\rm d}elta\rho_n^{\rm d}e) = -\infty.$$ \varepsilonnl Furthermore, we do not desire that a resampled box is still deemed bad. To achieve this, for $j\in\{1,{\rm d}ots,\rho_n^{\rm d}e\}$ and $\varepsilonta$ equal to either $\mbb PP_n$ or $\mbb PP'_n$, we let \begin{equation}\lambdabel{equation_good_condition1} E_j^\tauext{good}(\varepsilonta) := E_{j,n}^\tauext{good}(\varepsilonta) := \{Q_n^{(j)}\tauext{ is }(\varepsilonta,M)\tauext{-good}\} \varepsilonnd{equation} be the event that $Q_n^{(j)}$ is $(\varepsilonta,M)$-good. In addition, let $E_j^\tauext{bad}(\varepsilonta)$ be the event's complement and for $M_0>0$, let \begin{equation}\lambdabel{equation_good_condition2} E_j^{b} := E_{j,n}^{b} := \{Q_n^{(j)}\sigmaetminus\partial_n Q_n^{(j)}\tauext{ is }(\mbb PP'_n, M_0)\tauext{-bounded}\} \varepsilonnd{equation} be the event that $\mbb PP'_n$ not close to the boundary of a box $Q_n^{(j)}$ fulfills an additional boundedness condition. Here, for every $Q\in\mathcal{Q}_n$, we denoted by $$\partial_n Q := \big\{x\in Q\colon {\rm d}ist(\{x\},\partial Q) \le t_n \big\}$$ the set of all points in $Q$ within distance $$t_n := \Big(\frac{a_n + w_n}{n \kappa_d }\Big)^{1/d}$$ of the complement of $Q$, where $(w_n)_n$ is a sequence with $w_n \rightarrow \infty$ and $w_n \in o(a_n)$ that we henceforth fix. The next lemma states that for each $n\in\mbb N$, conditioned on $\mbb PP_n$, the probability that a box is either good, or we can resample it in a beneficial way otherwise is positive. \begin{lemma}[Lower bound for probability of a good box or resampling a good box]\lambdabel{lemma_prob_new_edges} For any $M , M_0 > 0$ it holds that \begin{align*} \mbb P\bigg(\bigcap_{j=1}^{\rho_n^{\rm d}e} E_j^\tauext{good}(\mbb PP_n) \cup \big(E_j^\tauext{bad}(\mbb PP_n) \cap E_j^\tauext{good}(\mbb PP'_n) \cap E_j^b \big) \biggm\vert \mbb PP_n\bigg) \gammae q_{M_0,M}^{\rho_n^{\rm d}e}, \varepsilonnd{align*} where $q_{M_0,M} := 1 - 3^d 2 k e^{|s_0|} (e^{-M_0} + e^{-M/2^{4^d}})$. \varepsilonnl Then, for $\varepsilon>0$ serving as parameter for the Bernoulli random variables, we define $$ E_n^* := \bigcap_{i=1}^{\rho_n^{\rm d}e}\big(E_j^\tauext{good}(\mbb PP_n) \cap \{X_{Q_n^{(j)},\varepsilon} = 0\}\big) \cup \big(E_j^\tauext{bad}(\mbb PP_n) \cap E_j^\tauext{good}(\mbb PP'_n) \cap E_j^b \cap \{X_{Q_n^{(j)},\varepsilon} = 1\}\big). $$ Recalling the definition of the mixed Poisson point process $\mbb PP''_n$, this means that, using the Bernoulli random variables, we resample all boxes that are bad with respect to $\mbb PP_n$ and ask for $\mbb PP'_n$ to satisfy the goodness as in the event in \varepsilonqref{equation_good_condition1} and the additional condition described in \varepsilonqref{equation_good_condition2} in the boxes, where the sprinkling triggered. \begin{lemma}[Lower bound for probability of resampling bad boxes]\lambdabel{lemma_lower_bound_good_event} For $\varepsilon\in(0,1)$ and arbitrary $M,M_0>0$, the event $E_n^*$ satisfies that \begin{equation}\lambdabel{subset_good_event} E_n^* \sigmaubseteq \{Q_n^{(i)} \tauext{ is } (\mbb PP''_n,M)\tauext{-bounded for every }i\le\rho_n^{\rm d}e\}. \varepsilonnd{equation} Further, it holds that \begin{equation}\lambdabel{inequality_good_event} \mbb P(E_n^* \mid \mbb PP_n) \gammae \varepsilon^{{\rm d}elta\rho_n^{\rm d}e} (1-\varepsilon)^{\rho_n^{\rm d}e} \big(q_{M_0,M}^{\rho_n^{\rm d}e} - \mbb P(J_n^M \gammae {\rm d}elta\rho_n^{\rm d}e \mid \mbb PP_n)\big). \varepsilonnd{equation} \varepsilonnl Recalling the definition of $\xi_n$ in \varepsilonqref{equation_indicator}, we introduce the error terms \begin{equation}\lambdabel{equation_dense_error1} H_{n,M}^{\tauext{err},\partial}(\mbb PP''_n) := \frac1{\rho_n^{\rm d}e}\sigmaum_{ X \in \mbb PP_n'' \cap (\cup_{Q\in\mathcal{Q}_n} \partial_n Q) } M \wedge \xi_n(X,\mbb PP''_n) \varepsilonnd{equation} and \begin{equation}\lambdabel{equation_dense_error2} H_{n,M,M_0}^{\tauext{err}, \mathcal J}(\mbb PP_n, \mbb PP'_n, \mbb PP''_n) := \frac1{\rho_n^{\rm d}e}\sigmaum_{ X \in \mbb PP_n'' \cap (\cup_{Q\in\mathcal{J}_n^M} Q ) } M_0 \wedge \xi_n(X,\mbb PP''_n) \varepsilonnd{equation} that will denote potential deviations introduced by the sprinkling. The following lemma is devoted to show that these errors are insignificant. \begin{lemma}[$H_{n,M}^{\tauext{err},\partial}(\mbb PP''_n)$ and $H_{n,M,M_0}^{\tauext{err}, \mathcal J}(\mbb PP_n, \mbb PP'_n, \mbb PP''_n)$ are negligible]\lambdabel{lemma_prob_resampling_between_boxes} Let ${\rm d}elta>0$. Then, for any $M>0$ $$\limsup_{n\tauo\infty} \frac1{\rho_n^{\rm d}e} \log \mbb P\big(H_{n,M}^{\tauext{err},\partial}(\mbb PP''_n) \gammae {\rm d}elta\big) = -\infty$$ and for additionally any $M_0>0$ $$ \limsup_{M\tauo\infty}\limsup_{n\tauo\infty} \frac1{\rho_n^{\rm d}e} \log \mbb P\big(H_{n,M,M_0}^{\tauext{err}, \mathcal J}(\mbb PP_n, \mbb PP'_n, \mbb PP''_n) \gammae {\rm d}elta\big) = -\infty. $$ \varepsilonnl These lemmas allow us to prove the main theorem. \begin{proof}[Proof of Theorem \ref{theorem_main_dense}] Let $M>0$. We start by defining the functional $$H_{n,M} := H_{n,M}(\mbb PP_n) :=\frac{1}{\rho_n^{\rm d}e} \sigmaum_{X \in \mbb PP_n} \xi_n(X, \mbb PP_n) \mathbbmss{1}\{n\kappa_d R_k(X, \mbb PP_n)^d - a_n -s_0 \le M\},$$ where we only add up scores of vertices, for which the distance to the $k$-closest node satisfies an additional bound, with the goal of applying \cite[Theorem 2.1]{hirschowada} to it. Along these lines, we define $$L_{n,k} := \frac1{\rho_n^{\rm d}e} \sigmaum_{X\in\mbb PP_n} {\rm d}elta_{n\kappa_d R_k(X,\mbb PP_n)^d - a_n}$$ as a random Radon measure on $\mbb R$, which we henceforth restrict to a random Radon measure on $E_0$, denoted by $L_{n,k}^{E_0}$. Next, defined on the domain of Radon measures on $E_0$, the map given by $$T_M(\rho) := \int_{E_0} (x-s_0) \wedge M {\rm d}\rho(x)$$ is continuous with respect to the weak topology and applied to $L_{n,k}^{E_0}$ yields $T_M(L_{n,k}^{E_0}) = H_{n,M}$. Now, for the upper bound, note that $$H_{n,M} \le H_n^{\rm d}e.$$ From this point, \cite[Theorem 2.1]{hirschowada} and the contraction principle yield $$\limsup_{n\tauo\infty} \frac{1}{\rho_n^{\rm d}e} \log \mbb P(H_n^{\rm d}e \le a) \le \limsup_{n\tauo\infty} \frac{1}{\rho_n^{\rm d}e} \log \mbb P(H_{n,M} \le a) \le -\inf_{\rho\colon T_M(\rho) \le a} h^{\rm d}e(\rho \mid \tauau_k^{\rm d}e)$$ and therefore, $$\limsup_{n\tauo\infty} \frac{1}{\rho_n^{\rm d}e} \log \mbb P(H_n^{\rm d}e \le a) \le - \limsup_{M\tauo\infty} \inf_{\rho\colon T_M(\rho) \le a} h^{\rm d}e(\rho \mid \tauau_k^{\rm d}e).$$ Using monotone convergence of $T_M(\rho)$ towards $T_k^{\rm d}e(\rho)$ for every Radon measure $\rho$ on $E_0$ as $M\rightarrow\infty$, gives the assertion. For the lower bound, with the same reasoning we get for any ${\rm d}elta>0$ that \begin{equation}\lambdabel{inequality_LDP} \liminf_{n\tauo\infty} \frac{1}{\rho_n^{\rm d}e} \log \mbb P(H_{n,M} < a-{\rm d}elta) \gammaeq -\inf_{\rho\colon T_M(\rho) < a-{\rm d}elta} h^{\rm d}e(\rho \mid \tauau_k^{\rm d}e). \varepsilonnd{equation} Next, as in the proof of the sparse regime, we need to show that $H_n^{\rm d}e$ can be replaced with $H_{n,M}$ when it comes to the lower large deviations. We start the computations with $$\mbb P(H_n^{\rm d}e < a) = \mbb P(H_n^{\rm d}e(\mbb PP''_n) < a) \gammae \mbb P(E_n^*, H_n^{\rm d}e(\mbb PP''_n) < a).$$ Next, let $M_0>0$. Then, under the event $E_n^*$ we assert that \begin{equation}\lambdabel{inequality_good_event_errors} H_n^{\rm d}e(\mbb PP''_n) \le H_{n,M}(\mbb PP_n) + H_{n,M}^{\tauext{err},\partial}(\mbb PP''_n) + H_{n,M,M_0}^{\tauext{err}, \mathcal J}(\mbb PP_n, \mbb PP'_n, \mbb PP''_n), \varepsilonnd{equation} where we recall the definitions of the error terms $H_{n,M}^{\tauext{err},\partial}(\mbb PP''_n)$ and $H_{n,M,M_0}^{\tauext{err}, \mathcal J}(\mbb PP_n, \mbb PP'_n, \mbb PP''_n)$ from \varepsilonqref{equation_dense_error1} and \varepsilonqref{equation_dense_error2}. To show this claim, we partition $[0,1]^d$ into three subsets. Let \begin{enumerate} \item $S_1 := \cup_{Q\in\mathcal Q_n} \partial_n Q$, be the space close to the boundary of each box; \item $S_2 := \cup_{Q\in\mathcal{J}_n^M} Q\sigmaetminus\partial_n Q$, be the union of all bad boxes without the space close to their boundaries; \item $S_3 := \cup_{Q\in\mathcal Q_n\sigmaetminus\mathcal{J}_n^M} Q\sigmaetminus\partial_n Q$, be the union of all good boxes without the space close to their boundaries. \varepsilonnd{enumerate} Then, $$ H_n^{\rm d}e(\mbb PP''_n) = \underbrace{ \frac1{\rho_{n, k}^{\rm d}e}\sigmaum_{ X \in \mbb PP_n'' \cap S_1} \xi_n(X,\mbb PP''_n)}_{=: (\sigmatar)} + \underbrace{ \frac1{\rho_{n, k}^{\rm d}e}\sigmaum_{ X \in \mbb PP_n'' \cap S_2} \xi_n(X,\mbb PP''_n)}_{=: (\sigmatar\sigmatar)} + \underbrace{ \frac1{\rho_{n, k}^{\rm d}e}\sigmaum_{ X \in \mbb PP_n'' \cap S_3} \xi_n(X,\mbb PP''_n)}_{=: (\sigmatar\sigmatar\sigmatar)}. $$ Under $E_n^*$, for all $X\in\mbb PP''_n\cap S_1$ it is satisfied that the box in which $X$ is located is $(\mbb PP''_n,M)$-bounded by Lemma \ref{lemma_lower_bound_good_event}, which means that $\xi_n(X,\mbb PP''_n) \le M$. Thus, $$(\sigmatar) \le H_{n,M}^{\tauext{err},\partial}(\mbb PP''_n).$$ Further, for all boxes $Q\in\mathcal Q_n\sigmaetminus\mathcal{J}_n^M$, i.e., that are already $(\mbb PP_n,M)$-good, we stress that the distance of $\partial_n Q$ to the boundary of $Q$ was set to be at least $t_n$, and thus, we can assume that this distance is larger than $((M+a_n+s_0)/(n\kappa_d))^{1/d}$. Therefore, points in $\partial_n Q$ for a $(\mbb PP_n,M)$-good box $Q$ are not affected by the potential replacement of $\mbb PP_n$ with $\mbb PP'_n$ in adjacent boxes, which means that due to the $(\mbb PP_n,M)$-boundedness of $Q$, all nodes $X\in (Q\sigmaetminus\partial_n Q)\cap\mbb PP_n$ satisfy that $\xi_n(X,\mbb PP_n)\le M$. This yields that for large enough $n$ $$(\sigmatar\sigmatar\sigmatar) \le H_{n,M}(\mbb PP_n).$$ Finally, under $E_n^*$, for all boxes $Q$ that were initially $(\mbb PP_n,M)$-bad, the sprinkling assures that $Q\sigmaetminus\partial_n Q$ is $(\mbb PP''_n,M_0)$-bounded, which results in $$(\sigmatar\sigmatar) \le H_{n,M,M_0}^{\tauext{err}, \mathcal J}(\mbb PP_n, \mbb PP'_n, \mbb PP''_n)$$ and confirms \varepsilonqref{inequality_good_event_errors}. This lets us proceed with $$\mbb P(E_n^*, H_n^{\rm d}e(\mbb PP''_n) < a) \gammae \mbb P(E_n^*, H_{n,M}(\mbb PP_n) + H_{n,M}^{\tauext{err},\partial}(\mbb PP''_n) + H_{n,M,M_0}^{\tauext{err}, \mathcal J}(\mbb PP_n, \mbb PP'_n, \mbb PP''_n) < a).$$ Further, to ease notation, let $$F_n := \{H_{n,M}^{\tauext{err},\partial}(\mbb PP''_n)< {\rm d}elta\} \cap \{H_{n,M,M_0}^{\tauext{err}, \mathcal J}(\mbb PP_n, \mbb PP'_n, \mbb PP''_n) < {\rm d}elta\}$$ denote the complements of the events from Lemma \ref{lemma_prob_resampling_between_boxes} for some ${\rm d}elta>0$, which gives us \begin{align*} &\mbb P(E_n^*, H_{n,M}(\mbb PP_n) + H_{n,M}^{\tauext{err},\partial}(\mbb PP''_n) + H_{n,M,M_0}^{\tauext{err}, \mathcal J}(\mbb PP_n, \mbb PP'_n, \mbb PP''_n) < a) \\ &\gammae \mbb P(E_n^*, F_n, H_{n,M}(\mbb PP_n) + 2{\rm d}elta < a) \\ &\gammae \mbb P(E_n^*, H_{n,M}(\mbb PP_n) < a - 2{\rm d}elta) - \mbb P(F_n^c). \varepsilonnd{align*} Summarizing these steps and applying the tower property of the conditional expectation, we arrive at $$ \mbb P(H_n^{\rm d}e < a) \gammae \mbb E[\mbb P(E_n^* \mid \mbb PP_n) \mathbbmss{1}\{H_{n,M}(\mbb PP_n) < a-2{\rm d}elta\}] - \mbb P(F_n^c).$$ Now, due to Lemma \ref{lemma_lower_bound_good_event}, we get that \begin{align}\lambdabel{inequality_ldp_with_errors} \begin{split} &\mbb P(H_n^{\rm d}e < a) \\ &\gammae \varepsilon^{{\rm d}elta\rho_n^{\rm d}e} (1-\varepsilon)^{\rho_n^{\rm d}e} \Big(q_{M_0,M}^{\rho_n^{\rm d}e} \mbb P(H_{n,M}(\mbb PP_n) < a-2{\rm d}elta) \\ &\qquad\qquad\qquad\qquad\quad - \mbb E[\mbb P(J_n^M \gammae {\rm d}elta\rho_n^{\rm d}e \mid \mbb PP_n) \mathbbmss{1}\{H_{n,M}(\mbb PP_n) < a-2{\rm d}elta\}]\Big) - \mbb P(F_n^c) \\ &\gammae \varepsilon^{{\rm d}elta\rho_n^{\rm d}e} (1-\varepsilon)^{\rho_n^{\rm d}e} q_{M_0,M}^{\rho_n^{\rm d}e} \mbb P(H_{n,M}(\mbb PP_n) < a-2{\rm d}elta) - \mbb P(J_n^M \gammae {\rm d}elta\rho_n^{\rm d}e) - \mbb P(F_n^c). \varepsilonnd{split} \varepsilonnd{align} From here, Lemmas \ref{lemma_probability_bad_boxes} and \ref{lemma_prob_resampling_between_boxes} assert that neither $\mbb P(J_n^M \gammae {\rm d}elta\rho_n^{\rm d}e)$ nor $\mbb P(F_n^c)$ contribute significantly to the lower bound for the lower large deviations. Thus, we focus on the first term of the sum in the last line of \varepsilonqref{inequality_ldp_with_errors} and examine it under the assumption that $M$ and $M_0$ are large enough such that $q_{M_0,M}>0$ by computing \begin{align*} &\liminf_{M\tauo\infty} \liminf_{n\tauo\infty} \frac1{\rho_n^{\rm d}e} \log\big(\varepsilon^{{\rm d}elta\rho_n^{\rm d}e} (1-\varepsilon)^{\rho_n^{\rm d}e} q_{M_0,M}^{\rho_n^{\rm d}e} \mbb P(H_{n,M}(\mbb PP_n) < a-2{\rm d}elta)\big) \\ &\gammae {\rm d}elta \log \varepsilon + \log(1-\varepsilon) + \log q_{M_0,\infty} + \liminf_{M\tauo\infty} \liminf_{n\tauo\infty} \frac1{\rho_n^{\rm d}e} \log \mbb P(H_{n,M}(\mbb PP_n) < a-2{\rm d}elta), \varepsilonnd{align*} where $q_{M_0,\infty} := 1 - 3^d 2 k e^{|s_0|} e^{-M_0}$. Now, after plugging in \varepsilonqref{inequality_LDP}, we arrive at \begin{align*} &\liminf_{M\tauo\infty} \liminf_{n\tauo\infty} \frac1{\rho_n^{\rm d}e} \log\big(\varepsilon^{{\rm d}elta\rho_n^{\rm d}e} (1-\varepsilon)^{\rho_n^{\rm d}e} q_{M_0,M}^{\rho_n^{\rm d}e} \mbb P(H_{n,M}(\mbb PP_n) < a-2{\rm d}elta)\big) \\ &\gammae {\rm d}elta \log \varepsilon + \log(1-\varepsilon) + \log q_{M_0,\infty} - \limsup_{M\tauo\infty} \inf_{\rho\colon T_M(\rho) < a-2{\rm d}elta} h^{\rm d}e(\rho \mid \tauau_k^{\rm d}e) \\ &\gammae {\rm d}elta \log \varepsilon + \log(1-\varepsilon) + \log q_{M_0,\infty} - \inf_{\rho\colon T_k^{\rm d}e(\rho) < a-2{\rm d}elta} h^{\rm d}e(\rho \mid \tauau_k^{\rm d}e), \varepsilonnd{align*} where in the last line we used that $T_M(\rho)\le T_k^{\rm d}e(\rho)$. Letting ${\rm d}elta\rightarrow 0$, $\varepsilonps\rightarrow 0$ and then $M_0\rightarrow \infty$ gives the lower bound $$\liminf_{n\tauo\infty} \frac{1}{\rho_n^{\rm d}e} \log \mbb P(H_n^{\rm d}e < a) \gammae -\inf_{\rho\colon T_k^{\rm d}e(\rho) < a} h^{\rm d}e(\rho \mid \tauau_k^{\rm d}e).$$ \varepsilonnp What follows are the proofs of the previously introduced lemmas. \begin{proof}[Proof of Lemma \ref{lemma_probability_bad_boxes}] We claim that for some $c := c(d,k) >0$ \begin{equation}\lambdabel{inequality_prob_bad_box} \mbb P(Q^{(j)} \tauext{ is } (\mbb PP_n,M)\tauext{-bad}) \le c e^{-M/2-s_0} \varepsilonnd{equation} if we choose $n$ sufficiently large. Once the claim in \varepsilonqref{inequality_prob_bad_box} is established, we conclude the proof as follows. For each $n\in\mbb N$, we will categorize the boxes in $\mathcal Q_n$ to create independence and use the already encountered binomial concentration inequality from \cite[Lemma 1.1]{poisson_conc}. We can use $5^d$ labels, for instance, the set $\mathcal{L} := \{1,2,3,4,5\}^d$, to label each box in $\mathcal{Q}_n$ in a certain way to achieve that between two boxes of the same label, there will always be four boxes with different labels. Here, we assumed that the number of boxes along each axis is divisible by $5$. For $l\in\mathcal{L}$, we denote the boxes of label $l$ by $\mathcal{Q}_n^{(l)}$. Then, $$ \mbb P(J_n^M \gammae {\rm d}elta\rho_n^{\rm d}e) \le \sigmaum_{l\in\mathcal{L}} \mbb P(\#(\mathcal{Q}_n^{(l)}\cap \mathcal{J}_n^M) \gammae {\rm d}elta\rho_n^{\rm d}e/5^d). $$ For $n$ large enough, the labeling guarantees that the events $\{Q\in\mathcal{J}_n^M\}$ are independent for different $Q\in\mathcal{Q}_n^{(l)}$. Thus, we are in a binomial setting and can invoke \cite[Lemma 1.1]{poisson_conc} with success probability given by the bound in \varepsilonqref{inequality_prob_bad_box}, to get for every $l\in\mathcal{L}$ and delta ${\rm d}elta>0$ that \begin{align*} \mbb P(\#(\mathcal{Q}_n^{(l)}\cap \mathcal{J}_n^M) \gammae {\rm d}elta\rho_n^{\rm d}e/5^d) &\le \varepsilonxp\bigg(-\frac{{\rm d}elta\rho_n^{\rm d}e/5^d}{2} \log\Big(\frac{{\rm d}elta\rho_n^{\rm d}e/5^d}{\rho_n^{\rm d}e ce^{-M/2-s_0}}\Big)\bigg) \\ &= \varepsilonxp\bigg(-\frac{{\rm d}elta\rho_n^{\rm d}e}{5^d 2} \log\Big(\frac{{\rm d}elta e^{M/2+s_0}}{5^d c}\Big)\bigg) \varepsilonnd{align*} if $n$ is large enough. From this point, we see that $$\frac1{\rho_n^{\rm d}e} \log\mbb P(J_n^M \gammae {\rm d}elta\rho_n^{\rm d}e) \le -\frac{{\rm d}elta}{5^d 2} \log\Big(\frac{{\rm d}elta e^{M/2+s_0}}{5^d c}\Big),$$ and the right-hand side does not depend on $n$ anymore. Furthermore, it satisfies that $$-\frac{{\rm d}elta}{5^d 2} \log\Big(\frac{{\rm d}elta e^{M/2+s_0}}{5^d c}\Big) \overset{M\tauo\infty}{\longrightarrow} -\infty.$$ It remains to show \varepsilonqref{inequality_prob_bad_box}. For this, let $j\le \rho_n^{\rm d}e$ and $i\in\mbb NN_+(j)$ be arbitrary. Then, the tower property yields \begin{align*} &\mbb P(Q_n^{(i)} \tauext{ is }(\bar{\mathcal S}_n^{(j)}(\mbb PP_n),M)\tauext{-bounded}) = \mbb E[a_{i,j}(\mbb PP_n)] \\ &= \mbb E[a_{i,j}(\mbb PP_n) \mathbbmss{1}\{a_{i, j}(\mbb PP_n) \gammae 1 - b_{i,j}^{(M)}\}] + \mbb E[a_{i,j}(\mbb PP_n) \mathbbmss{1}\{a_{i, j}(\mbb PP_n) < 1 - b_{i,j}^{(M)}\}] \\ &\le \mbb P(a_{i, j}(\mbb PP_n) \gammae 1 - b_{i,j}^{(M)}) + (1 - b_{i,j}^{(M)}) \mbb P(a_{i, j}(\mbb PP_n) < 1 - b_{i,j}^{(M)}) = 1 - b_{i,j}^{(M)} \mbb P(a_{i, j}(\mbb PP_n) < 1 - b_{i,j}^{(M)}) \varepsilonnd{align*} and therefore, \begin{align*} \mbb P(a_{i, j}(\mbb PP_n) < 1 - b_{i,j}^{(M)}) &\le \mbb P(Q_n^{(i)} \tauext{ not }(\bar{\mathcal S}_n^{(j)}(\mbb PP_n),M)\tauext{-bounded}) / b_{i,j}^{(M)} \\ &\le \mbb P(Q_n^{(i)} \tauext{ not }(\bar{\mathcal S}_n^{(j)}(\mbb PP_n),M)\tauext{-bounded}) e^{M/2}. \varepsilonnd{align*} Whether $Q_n^{(i)}$ is $(\bar{\mathcal S}_n^{(j)}(\mbb PP_n),M)$-bounded depends only on the configurations in boxes $Q_n^{(s)}$ for $s\in\mbb NN_+(i)$. For each of them, $Q_n^{(s)} \cap \bar{\mathcal S}_n^{(j)}(\mbb PP_n) \in\{Q_n^{(s)}\cap\mbb PP_n, Q_n^{(s)}\cap\mbb PP'_n\}$, i.e., there are less than $2^{\#\mbb NN_+(i)}\le 2^{3^d}$ possibilities. With the union bound, this leads to \begin{equation}\lambdabel{inequality_poisson_union} \mbb P(Q_n^{(i)}\tauext{ is not }(\bar{\mathcal S}_n^{(j)}(\mbb PP_n),M)\tauext{-bounded}) \le 2^{3^d} \mbb P(Q_n^{(i)}\tauext{ is not }(\mbb PP_n,M)\tauext{-bounded}). \varepsilonnd{equation} From here, we can continue by using Markov's inequality and Mecke's formula. To simplify the notation we set $m_n := M+a_n + s_0$ and get \begin{align}\lambdabel{inequality_probability_bad_box} \begin{split} &\mbb P(Q_n^{(i)}\tauext{ is not }(\mbb PP_n,M)\tauext{-bounded}) = \mbb P\Big(\min_{X\in Q_n^{(i)}\cap\mbb PP_n} \mbb PP_n\big(B_{(\frac{m_n}{n\kappa_d})^{1/d}}(X)\big) \le k \Big) \\ &\le \mbb E\bigg[\sigmaum_{X\in Q_n^{(i)}\cap\mbb PP_n} \mathbbmss{1}\big\{\mbb PP_n\big(B_{(\frac{m_n}{n\kappa_d})^{1/d}}(X)\big) \le k\big\}\bigg] = n \int_{Q_n^{(i)}} \mbb E\big[\mathbbmss{1}\big\{\mbb PP_n\big(B_{(\frac{m_n}{n\kappa_d})^{1/d}}(x)\big) \le k-1\big\}\big] {\rm d} x \\ &= n |Q_n^{(i)}| \sigmaum_{i=0}^{k-1} \frac{m_n^i}{i!} e^{-m_n} \le \rho_n^{\rm d}e |Q_n^{(i)}| k(1+M/a_n+s_0/a_n)^{k-1} e^{-M-s_0} \le 2k e^{-M-s_0} \varepsilonnd{split} \varepsilonnd{align} for large enough $n$. With this, for the $j$th box of the arbitrary ordering, $Q_n^{(j)}$, we compute that \begin{align*} \mbb P(Q^{(j)} \tauext{ is } (\mbb PP_n,M)\tauext{-bad}) &= \mbb P\Big(\bigcup_{i\in\mbb NN_+(j)} \{a_{i,j}(\mbb PP_n) < 1 - b_{i,j}^{(M)}\}\Big) \le \sigmaum_{i\in\mbb NN_+(j)} \mbb P(a_{i,j}(\mbb PP_n) < 1 - b_{i,j}^{(M)}) \\ &\le 3^d 2^{3^d+1} e^{M/2} ke^{-M-s_0} = 3^d 2^{3^d+1} k e^{-M/2-s_0}, \varepsilonnd{align*} and thus, choosing $c:=3^d 2^{3^d+1} k$ suffices for the claim to hold. \varepsilonnp \begin{proof}[Proof of Lemma \ref{lemma_prob_new_edges}] First, we recall the events $E_j^\tauext{good}(\varepsilonta)$, $E_j^\tauext{good}(\varepsilonta)$ and $E_j^b$ from \varepsilonqref{equation_good_condition1} and \varepsilonqref{equation_good_condition2} for $\varepsilonta$ equal to either $\mbb PP_n$ or $\mbb PP'_n$. Then, as a first step, we point out that by the tower property \begin{align}\lambdabel{equality_recursive1} \begin{split} &\mbb P\bigg(\bigcap_{j=1}^{\rho_n^{\rm d}e} E_j^\tauext{good}(\mbb PP_n)\cup \big(E_j^\tauext{bad}(\mbb PP_n)\cap E_j^\tauext{good}(\mbb PP'_n) \cap E_j^b\big) \biggm\vert \mbb PP_n\bigg) \\ &= \mbb E\bigg[\Big(\mathbbmss{1}_{E_{\rho_n^{\rm d}e}^\tauext{good}(\mbb PP_n)} + \mathbbmss{1}_{E_{\rho_n^{\rm d}e}^\tauext{bad}(\mbb PP_n)} \mbb E\big[ \mathbbmss{1}_{E_{\rho_n^{\rm d}e}^\tauext{good}(\mbb PP'_n) \cap E_{\rho_n^{\rm d}e}^b} \bigm\vert \mbb PP_n, (\mbb PP'_n)^{(\rho_n^{\rm d}e-1)}\big]\Big) \\ &\qquad\ \prod_{j=1}^{\rho_n^{\rm d}e-1} \big(\mathbbmss{1}_{E_j^\tauext{good}(\mbb PP_n)} + \mathbbmss{1}_{E_j^\tauext{bad}(\mbb PP_n)} \mathbbmss{1}_{E_j^\tauext{good}(\mbb PP'_n) \cap E_j^b}\big) \biggm\vert \mbb PP_n\bigg]. \varepsilonnd{split} \varepsilonnd{align} This gives an indication of the recursive approach to this proof. We start by working towards a bound of the inner conditional expectation after the equals sign of \varepsilonqref{equality_recursive1}. Fixing an arbitrary $j\in\{1,{\rm d}ots,\rho_n^{\rm d}e\}$, note that the $(\mbb PP'_n,M)$-goodness of $Q_n^{(j)}$ does not depend on $\cup_{s\gammae j} Q_n^{(s)}\cap\mbb PP_n$ and therefore \begin{equation}\lambdabel{equality_recursion2} \mbb P\big(E_j^\tauext{good}(\mbb PP'_n), E_j^b \mid \mbb PP_n, (\mbb PP'_n)^{(j-1)}\big) = \mbb P\big(E_j^\tauext{good}(\mbb PP'_n), E_j^b \mid \mbb PP_n^{(j-1)}, (\mbb PP'_n)^{(j-1)}\big). \varepsilonnd{equation} Now, we can use the definition of goodness to arrive at \begin{align}\lambdabel{inequality_recursion3} \begin{split} &\mbb P(E_j^\tauext{good}(\mbb PP'_n), E_j^b \mid \mbb PP_n^{(j-1)}, (\mbb PP'_n)^{(j-1)}) \\ &= \mbb P\big(\cap_{i\in\mbb NN_+(j)} \{a_{i,j}(\mbb PP'_n) \gammae 1-b_{i,j}^{(M)}\} \cap E_j^b \mid \mbb PP_n^{(j-1)}, (\mbb PP'_n)^{(j-1)}\big) \\ &\gammae 1 - \sigmaum_{i\in\mbb NN_+(j)} \big(1-\mbb P\big(a_{i,j}(\mbb PP'_n) \gammae 1-b_{i,j}^{(M)}, E_j^b \mid \mbb PP_n^{(j-1)}, (\mbb PP'_n)^{(j-1)}\big)\big). \varepsilonnd{split} \varepsilonnd{align} Subsequently, the key step is to show that under $\cap_{s\le j-1} \big(E_s^\tauext{good}(\mbb PP_n)\cup (E_s^\tauext{bad}(\mbb PP_n)\cap E_s^\tauext{good}(\mbb PP'_n))\big)$ for sufficiently large $n$ \begin{equation}\lambdabel{inequality_assertion_dense_proof} \mbb P\big(a_{i,j}(\mbb PP'_n) \gammae 1-b_{i,j}^{(M)}, E_j^b \mid \mbb PP_n^{(j-1)}, (\mbb PP'_n)^{(j-1)}\big) \gammae 1 - 2 k e^{|s_0|} (e^{-M_0} - e^{-M/2^{4^d}}). \varepsilonnd{equation} Once \varepsilonqref{inequality_assertion_dense_proof} is established, we conclude the proof as follows. Continuing at \varepsilonqref{equality_recursion2} and \varepsilonqref{inequality_recursion3}, using that $\#\mbb NN_+(j) = 3^d$, yields that $$ \mbb P\big(E_j^\tauext{good}(\mbb PP'_n), E_j^b \mid \mbb PP_n^{(j-1)}, (\mbb PP'_n)^{(j-1)}\big) \gammae 1 - 3^d 2 k e^{|s_0|} (e^{-M_0} - e^{-M/2^{4^d}}) = q_{M_0,M}. $$ This lets us proceed at \varepsilonqref{equality_recursive1} to arrive at \begin{align*} &\mbb P\bigg(\bigcap_{j=1}^{\rho_n^{\rm d}e} \Big(E_j^\tauext{good}(\mbb PP_n)\cup \big(E_j^\tauext{bad}(\mbb PP_n)\cap E_j^\tauext{good}(\mbb PP'_n) \cap E_j^b\big)\Big) \biggm\vert \mbb PP_n\bigg) \\ &\gammae \mbb E\bigg[\Big(\mathbbmss{1}_{E_{\rho_n^{\rm d}e}^\tauext{good}(\mbb PP_n)} + \mathbbmss{1}_{E_{\rho_n^{\rm d}e}^\tauext{bad}(\mbb PP_n)} q_{M_0,M}\Big) \prod_{j=1}^{\rho_n^{\rm d}e-1} \big(\mathbbmss{1}_{E_j^\tauext{good}(\mbb PP_n)} + \mathbbmss{1}_{E_j^\tauext{bad}(\mbb PP_n)} \mathbbmss{1}_{E_j^\tauext{good}(\mbb PP'_n) \cap E_j^b}\big) \biggm\vert \mbb PP_n\bigg] \\ &\gammae q_{M_0,M} \mbb E\bigg[ \prod_{j=1}^{\rho_n^{\rm d}e-1} \big(\mathbbmss{1}_{E_j^\tauext{good}(\mbb PP_n)} + \mathbbmss{1}_{E_j^\tauext{bad}(\mbb PP_n)} \mathbbmss{1}_{E_j^\tauext{good}(\mbb PP'_n) \cap E_j^b}\big) \biggm\vert \mbb PP_n\bigg] \gammae q_{M_0,M}^{\rho_n^{\rm d}e}, \varepsilonnd{align*} where the last inequality follows from repeating the previous steps $\rho_n^{\rm d}e$ times. It remains to prove the assertion stated in \varepsilonqref{inequality_assertion_dense_proof}. In order to do so, let $i\in\mbb NN_+(j)$ be fixed. If $\{1,{\rm d}ots,j-1\}\cap\mbb NN_+(i)\neq\varepsilonmptyset$, we can denote the largest index of an adjacent box of the box $Q_n^{(i)}$ that comes before $j$ in the ordering by $j_0 := \max(\{1,{\rm d}ots,j-1\}\cap\mbb NN_+(i))$. Note that $Q_n^{(j_0)}\cap(\bar{\mathcal S}_n^{(j)}(\mbb PP'_n)$ can either be equal to $Q_n^{(j_0)}\cap\mbb PP_n$ or $Q_n^{(j_0)}\cap\mbb PP'_n$, resulting in two options that we can include in a similar way as was done in \varepsilonqref{inequality_poisson_union}. Then, we have that under $\cap_{s\le j-1} \big(E_s^\tauext{good}(\mbb PP_n)\cup (E_s^\tauext{bad}(\mbb PP_n)\cap E_s^\tauext{good}(\mbb PP'_n))\big)$ \begin{align}\lambdabel{inequality_proof_with_recursion1} \begin{split} &\mbb P(Q^{(i)}\tauext{ is $(\bar{\mathcal S}_n^{(j)}(\mbb PP'_n),M)$-bounded}, E_j^b \mid \mbb PP_n^{(j-1)}, (\mbb PP'_n)^{(j-1)}) \\ &\gammae \mbb P(E_j^b \mid \mbb PP_n^{(j-1)}, (\mbb PP'_n)^{(j-1)}) - \mbb P(Q^{(i)}\tauext{ is not }(\bar{\mathcal S}_n^{(j)}(\mbb PP'_n),M)\tauext{-bounded} \mid \mbb PP_n^{(j-1)}, (\mbb PP'_n)^{(j-1)}) \\ &= \mbb P(E_j^b) - \mbb P(Q^{(i)}\tauext{ is not }(\bar{\mathcal S}_n^{(j)}(\mbb PP'_n),M)\tauext{-bounded} \mid \mbb PP_n^{(j_0)}, (\mbb PP'_n)^{(j_0)}) \\ &\gammae \mbb P(E_j^b) - 2\mbb P(Q^{(i)}\tauext{ is not }(\bar{\mathcal S}_n^{(j_0)}(\mbb PP'_n),M)\tauext{-bounded} \mid \mbb PP_n^{(j_0)}, (\mbb PP'_n)^{(j_0)}) \\ &\gammae \mbb P(E_j^b) - 2 b_{i,j_0}^{(M)} = \mbb P(E_j^b) - 2 b_{i,j-1}^{(M)}. \varepsilonnd{split} \varepsilonnd{align} In the other case, i.e., if $\{1,{\rm d}ots,j-1\}\cap\mbb NN_+(i)=\varepsilonmptyset$, we get \begin{align}\lambdabel{inequality_proof_with_recursion2} \begin{split} &\mbb P(Q^{(i)}\tauext{ is $(\bar{\mathcal S}_n^{(j)}(\mbb PP'_n),M)$-bounded}, E_j^b \mid \mbb PP_n^{(j-1)}, (\mbb PP'_n)^{(j-1)}) \\ &= \mbb P(Q^{(i)}\tauext{ is $(\mbb PP'_n,M)$-bounded}, E_j^b) \gammae \mbb P(E_j^b) - 2ke^{-M-s_0} \varepsilonnd{split} \varepsilonnd{align} for large $n$, where the last inequality follows from \varepsilonqref{inequality_probability_bad_box}. For completeness, note that we viewed $\mbb PP_n^{(0)}$ and $(\mbb PP'_n)^{(0)}$ as $\varepsilonmptyset$. Additionally, with the tower property, it follows that \begin{align*} &\mbb P(Q^{(i)}\tauext{ is $(\bar{\mathcal S}_n^{(j)}(\mbb PP'_n),M)$-bounded}, E_j^b \mid \mbb PP_n^{(j-1)}, (\mbb PP'_n)^{(j-1)}) \\ &= \mbb E\big[\mbb P(Q^{(i)}\tauext{ is $(\bar{\mathcal S}_n^{(j)}(\mbb PP'_n),M)$-bounded} \mid \mbb PP_n^{(j)}, (\mbb PP'_n)^{(j)}) \mathbbmss{1}\{E_j^b\} \bigm\vert \mbb PP_n^{(j-1)}, (\mbb PP'_n)^{(j-1)}\big] \\ &=\mbb E\big[\mbb P(Q^{(i)}\tauext{ is $(\bar{\mathcal S}_n^{(j)}(\mbb PP'_n),M)$-bounded} \mid \mbb PP_n^{(j)}, (\mbb PP'_n)^{(j)}) \mathbbmss{1}\{E_j^b\} \\ &\qquad\ (\mathbbmss{1}\{a_{i,j}(\mbb PP'_n) \gammae 1 - b_{i,j}^{(M)}\} + \mathbbmss{1}\{a_{i,j}(\mbb PP'_n) < 1 - b_{i,j}^{(M)}\}) \bigm\vert \mbb PP_n^{(j-1)}, (\mbb PP'_n)^{(j-1)}\big] \\ &\le\mbb E\big[\mathbbmss{1}\{E_j^b\} \big(\mathbbmss{1}\{a_{i,j}(\mbb PP'_n) \gammae 1-b_{i,j}^{(M)}\} + (1 - b_{i,j}^{(M)}) \mathbbmss{1}\{a_{i,j}(\mbb PP'_n) < 1 - b_{i,j}^{(M)}\}\big) \bigm\vert \mbb PP_n^{(j-1)}, (\mbb PP'_n)^{(j-1)}\big] \\ &\le (1 - b_{i,j}^{(M)}) \mbb P(E_j^b) + b_{i,j}^{(M)} \mbb P(a_{i,j}(\mbb PP'_n) \gammae 1 - b_{i,j}^{(M)}, E_j^b \mid \mbb PP_n^{(j-1)}, (\mbb PP'_n)^{(j-1)}). \varepsilonnd{align*} Note that similar to \varepsilonqref{inequality_probability_bad_box}, we can also show that $\mbb P(E_j^b)\gammae 1-2ke^{-M_0-s_0}$. Using this, \varepsilonqref{inequality_proof_with_recursion1} and \varepsilonqref{inequality_proof_with_recursion2} as well as the definition of $b_{i,j}^{(M)}$ from \varepsilonqref{equation_probability_Mgood}, we arrive at \begin{align*} &\mbb P(a_{i,j}(\mbb PP'_n) \gammae 1 - b_{i,j}^{(M)}, E_j^b \mid \mbb PP_n^{(j-1)}, (\mbb PP'_n)^{(j-1)}) \\ &\gammae \frac{\mbb P(E_j^b) - \max\{2 b_{i,j-1}^{(M)}, 2 k e^{-M-s_0}\} - (1 - b_{i,j}^{(M)}) \mbb P(E_j^b)}{b_{i,j}^{(M)}} \gammae \frac{b_{i,j}^{(M)} \mbb P(E_j^b) - 2 k b_{i,j-1}^{(M)} e^{|s_0|}}{b_{i,j}^{(M)}} \\ &\gammae \frac{b_{i,j}^{(M)}(1-2 k e^{-M_0-s_0}) - 2 k b_{i,j-1}^{(M)} e^{|s_0|}}{b_{i,j}^{(M)}} = 1-2 k e^{-M_0-s_0} - 2 k b_{i,j-1}^{(M)} e^{|s_0|} / b_{i,j}^{(M)} \\ &= 1 - 2 k e^{-M_0-s_0} - 2 k e^{-M (2^{-1-(\#\{s\in\mbb NN_+(i) \colon s\le j\}-1)} - 2^{-1-\#\{s\in\mbb NN_+(i) \colon s\le j\}})} e^{|s_0|} \\ &= 1 - 2 k e^{-M_0-s_0} - 2 k b_{i,j}^{(M)} e^{|s_0|} \gammae 1 - 2 k e^{-M_0-s_0} - 2 k e^{-M 2^{-1-3^d}} e^{|s_0|} \\ &\gammae 1 - 2 k e^{|s_0|} (e^{-M_0} - e^{-M/2^{4^d}}). \varepsilonnd{align*} \varepsilonnp \begin{proof}[Proof of Lemma \ref{lemma_lower_bound_good_event}] For the first part, note that given $E_n^*$ the events $\{a_{i,\rho_n^{\rm d}e}(\mbb PP_n'') \gammae 1-b_{i,\rho_n^{\rm d}e}^{(M)}\}$ occur for all $i\in\mathcal N_+(\rho_n^{\rm d}e)$. Thus, for sufficiently large $M$ and every $i\in\mathcal N_+(\rho_n^{\rm d}e)$ \begin{align}\lambdabel{inequality_recursion_dense} \begin{split} 0 &< 1 - b_{i,\rho_n^{\rm d}e}^{(M)} \le a_{i, \rho_n^{\rm d}e}(\mbb PP''_n) = \mbb P\big(Q_n^{(i)}\tauext{ is $(\bar{\mathcal S}_n^{(\rho_n^{\rm d}e)}(\mbb PP''_n),M)$-bounded}\bigm\vert \mbb PP_n^{(\rho_n^{\rm d}e)}, (\mbb PP'_n)^{(\rho_n^{\rm d}e)}\big) \\ &= \mathbbmss{1}\{Q^{(i)}\tauext{ is $(\bar{\mathcal S}_n^{(\rho_n^{\rm d}e)}(\mbb PP''_n),M)$-bounded}\} = \mathbbmss{1}\{Q_n^{(i)}\tauext{ is $(\mbb PP''_n,M)$-bounded}\} \varepsilonnd{split} \varepsilonnd{align} by measurability with respect to $\mbb PP_n^{(\rho_n^{\rm d}e)}, (\mbb PP'_n)^{(\rho_n^{\rm d}e)}$ and therefore, $Q_n^{(i)}$ is $(\mbb PP''_n,M)$-bounded. Next, repeating this argument, it follows that $Q_n^{(i')}$ is $(\mbb PP''_n,M)$-bounded for all $i'\in\mathcal N_+(\rho_n^{\rm d}e-1) \sigmaetminus \mathcal N_+(\rho_n^{\rm d}e)$. Note that $i'\not\in\mathcal N_+(\rho_n^{\rm d}e)$ is an important requirement to be able to replicate the last two equalities in \varepsilonqref{inequality_recursion_dense} in this case. Afterwards, we consider $i''\in\mathcal N_+(\rho_n^{\rm d}e-2) \sigmaetminus \big(\mathcal N_+(\rho_n^{\rm d}e-1) \cup \mathcal N_+(\rho_n^{\rm d}e-1)\big)$. We can repeat this until all boxes have been dealt with and we conclude the first part of the proof of Lemma \ref{lemma_lower_bound_good_event} by deducing from this that given $E_n^*$, the event that $Q_n^{(j)}$ is $(\mbb PP''_n,M)$-bounded holds for all $j\in\{1,{\rm d}ots,\rho_n^{\rm d}e\}$. For the second part, the tower property yields \begin{align}\lambdabel{inequality_bernoulli_separation} \begin{split} \mbb P(E_n^* \mid \mbb PP_n) &= \mbb E\bigg[\prod_{j=1}^{\rho_n^{\rm d}e} \big(\mathbbmss{1}_{\{E_j^\tauext{good}(\mbb PP_n)\}} \mathbbmss{1}_{\{X_{Q_n^{(j)},\varepsilon} = 0\}} + \mathbbmss{1}_{\{E_j^\tauext{bad}(\mbb PP_n)\}} \mathbbmss{1}_{\{E_j^\tauext{good}(\mbb PP'_n)\cap E_j^b\}} \mathbbmss{1}_{\{X_{Q_n^{(j)},\varepsilon} = 1\}}\big) \biggm\vert \mbb PP_n\bigg] \\ &= \mbb E\bigg[\prod_{j=1}^{\rho_n^{\rm d}e} \mbb E\Big[\mathbbmss{1}_{\{E_j^\tauext{good}(\mbb PP_n)\}} \mathbbmss{1}_{\{X_{Q_n^{(j)},\varepsilon} = 0\}} \\ &\qquad\qquad\qquad + \mathbbmss{1}_{\{E_j^\tauext{bad}(\mbb PP_n)\}} \mathbbmss{1}_{\{E_j^\tauext{good}(\mbb PP'_n)\cap E_j^b\}} \mathbbmss{1}_{\{X_{Q_n^{(j)},\varepsilon} = 1\}} \Bigm\vert \mbb PP_n, \mbb PP'_n\Big] \biggm\vert \mbb PP_n\bigg]. \varepsilonnd{split} \varepsilonnd{align} From this point, using the independence of $(X_{Q_n^{(j)},\varepsilon})_j$ of all Poisson point processes and the measurability of $E_j^\tauext{good}(\mbb PP_n)$, $E_j^\tauext{good}(\mbb PP'_n)$ and $E_j^b$ with respect to $\sigmaigma(\mbb PP_n,\mbb PP'_n)$, we can compute that it almost surely holds that \begin{align*} &\mbb E\Big[\mathbbmss{1}_{\{E_j^\tauext{good}(\mbb PP_n)\}} \mathbbmss{1}_{\{X_{Q_n^{(j)},\varepsilon} = 0\}} + \mathbbmss{1}_{\{E_j^\tauext{bad}(\mbb PP_n)\}} \mathbbmss{1}_{\{E_j^\tauext{good}(\mbb PP'_n)\cap E_j^b\}} \mathbbmss{1}_{\{X_{Q_n^{(j)},\varepsilon} = 1\}} \Bigm\vert \mbb PP_n, \mbb PP'_n\Big] \\ &= \varepsilon^{\mathbbmss{1}_{\{E_j^\tauext{good}(\mbb PP_n)\}}} (1-\varepsilon)^{\mathbbmss{1}_{\{E_j^\tauext{bad}(\mbb PP_n)\}} \mathbbmss{1}_{\{E_j^\tauext{good}(\mbb PP'_n)\cap E_j^b\}}} (\mathbbmss{1}_{\{E_j^\tauext{good}(\mbb PP_n)\}} + \mathbbmss{1}_{\{E_j^\tauext{bad}(\mbb PP_n)\}} \mathbbmss{1}_{\{E_j^\tauext{good}(\mbb PP'_n)\cap E_j^b\}}) \\ &\gammae \varepsilon^{\mathbbmss{1}_{\{E_j^\tauext{good}(\mbb PP_n)\}}} (1-\varepsilon) (\mathbbmss{1}_{\{E_j^\tauext{good}(\mbb PP_n)\}} + \mathbbmss{1}_{\{E_j^\tauext{bad}(\mbb PP_n)\}} \mathbbmss{1}_{\{E_j^\tauext{good}(\mbb PP'_n)\cap E_j^b\}}). \varepsilonnd{align*} Next, we revisit \varepsilonqref{inequality_bernoulli_separation} and continue with \begin{align*} &\mbb P(E_n^* \mid \mbb PP_n)\\ &\gammae \mbb E\bigg[\varepsilon^{J_n^M} (1-\varepsilon)^{\rho_n^{\rm d}e} \prod_{j=1}^{\rho_n^{\rm d}e} \big(\mathbbmss{1}_{\{E_j^\tauext{good}(\mbb PP_n)\}} + \mathbbmss{1}_{\{E_j^\tauext{bad}(\mbb PP_n)\}} \mathbbmss{1}_{\{E_j^\tauext{good}(\mbb PP'_n)\cap E_j^b\}} \big) \biggm\vert \mbb PP_n\bigg] \\ &\gammae \mbb E\bigg[\varepsilon^{{\rm d}elta\rho_n^{\rm d}e} (1-\varepsilon)^{\rho_n^{\rm d}e} \mathbbmss{1}_{\{J_n^M < {\rm d}elta\rho_n^{\rm d}e\}} \prod_{j=1}^{\rho_n^{\rm d}e} \big(\mathbbmss{1}_{\{E_j^\tauext{good}(\mbb PP_n)\}} + \mathbbmss{1}_{\{E_j^\tauext{bad}(\mbb PP_n)\}} \mathbbmss{1}_{\{E_j^\tauext{good}(\mbb PP'_n)\cap E_j^b\}} \big) \biggm\vert \mbb PP_n\bigg] \\ &\gammae \varepsilon^{{\rm d}elta\rho_n^{\rm d}e} (1-\varepsilon)^{\rho_n^{\rm d}e} \bigg(\mbb E\bigg[\prod_{j=1}^{\rho_n^{\rm d}e} \big(\mathbbmss{1}_{\{E_j^\tauext{good}(\mbb PP_n)\}} + \mathbbmss{1}_{\{E_j^\tauext{bad}(\mbb PP_n)\}} \mathbbmss{1}_{\{E_j^\tauext{good}(\mbb PP'_n)\cap E_j^b\}} \big) \biggm\vert \mbb PP_n\bigg] \\ &\qquad\qquad\qquad\qquad\quad - \mbb P(J_n^M \gammae {\rm d}elta\rho_n^{\rm d}e \mid \mbb PP_n)\bigg). \varepsilonnd{align*} Here, Lemma \ref{lemma_prob_new_edges} yields \begin{equation}\lambdabel{inequality_good_event2} \mbb P(E_n^* \mid \mbb PP_n) \gammae \varepsilon^{{\rm d}elta\rho_n^{\rm d}e} (1-\varepsilon)^{\rho_n^{\rm d}e} \big(q_{M_0,M}^{\rho_n^{\rm d}e} - \mbb P(J_n^M \gammae {\rm d}elta\rho_n^{\rm d}e \mid \mbb PP_n)\big). \varepsilonnd{equation} \varepsilonnp \begin{proof}[Proof of Lemma \ref{lemma_prob_resampling_between_boxes}] For a box $Q\in\mathcal{Q}_n$, we divide $\partial_n Q$ into a grid consisting of boxes with side length $u_n := (\frac{a_n+s_0}{n\kappa_d})^{1/d}$ and call this collection of boxes $\mathcal{W}_n(\partial_n Q)$. We denote the total collection of these boxes by $\overline{\mathcal{W}}_n := \cup_{Q\in\mathcal{Q}_n} \mathcal{W}_n(\partial_n Q)$. The volume of $\partial_n Q$ for $Q\in\mathcal{Q}_n$ can be bounded by $5 d (\rho_n^{\rm d}e)^{-(d-1)/d} t_n$ for large $n$ and therefore, \begin{equation}\lambdabel{limit_zero_boxes_partial} |\partial_n Q|/ |Q| \le 5 d (\rho_n^{\rm d}e)^{-(d-1)/d} t_n \rho_n^{\rm d}e = 5 d (\rho_n^{\rm d}e)^{1/d} t_n = e^{-a_n/d} a_n^{(k-1)/d} (a_n+w_n)^{1/d} / \kappa_d^{1/d} \overset{n\tauo\infty}{\longrightarrow} 0. \varepsilonnd{equation} Next, we can proceed with the same strategy that was previously employed to prove Lemma \ref{lemma_probability_bad_boxes}. We use the label set $\mathcal{L} := \{1,2,3\}^d$ to achieve that between two boxes of the same label, there are always two boxes labeled differently, where for simplicity, we assume that the number of boxes along each axis is divisible by $3$. We reuse the notation $\overline{\mathcal{W}}_n^{(l)}$ for the boxes of label $l\in\mathcal{L}$. Let $M,{\rm d}elta>0$. Now, we can union over all labels and combine this with the union bound to arrive at \begin{align*} &\mbb P\bigg(\sigmaum_{ X \in \mbb PP_n \cap (\cup_{Q\in\mathcal{Q}_n} \partial_n Q) } M \wedge \xi_n(X, \mbb PP_n) \gammae {\rm d}elta\rho_n^{\rm d}e\bigg) \\ &\le \sigmaum_{l\in\mathcal{L}} \mbb P\bigg(\sigmaum_{ X \in \mbb PP_n \cap (\cup_{W\in\overline{\mathcal{W}}_n^{(l)}} W) } M \wedge \xi_n(X, \mbb PP_n) \gammae {\rm d}elta\rho_n^{\rm d}e/3^d\bigg). \varepsilonnd{align*} For each $W\in\overline{\mathcal{W}}_n^{(l)}$, we assert that the maximal number of Poisson points in $X\in W\cap\mbb PP_n$ with $R_k(X,\mbb PP_n) \gammae u_n$ is bounded by some $c := c(d,k)>0$. This follows similarly as in \varepsilonqref{inequality_nodes_nearest_neighbor_large_stabilization}. We go through nodes $W\cap\mbb PP_n$ one by one and label some of them in the same manner as in Section \ref{section_applications_thermodynamic_kNN}. The only difference is that we can only argue that a fraction of $1/2^d$ of the volume of each constructed disjoint ball is in $W$, to account for vertices close to the boundary of $W$. This means the bound is computed by $$\frac{k|W|}{\kappa_d (u_n/2)^d / 2^d} = k4^d/\kappa_d =: c.$$ Using this, for each $l\in\mathcal{L}$, we compute \begin{align*} &\mbb P\bigg(\sigmaum_{ X \in \mbb PP_n \cap (\cup_{W\in\overline{\mathcal{W}}_n^{(l)}} W) } M \wedge \xi_n(X, \mbb PP_n) \gammae {\rm d}elta\rho_n^{\rm d}e/3^d\bigg) \\ &\le \mbb P\bigg(M \sigmaum_{ X \in \mbb PP_n \cap (\cup_{W\in\overline{\mathcal{W}}_n^{(l)}} W) } \mathbbmss{1}\{R_k(X, \mbb PP_n) \gammae u_n\} \gammae {\rm d}elta\rho_n^{\rm d}e/3^d\bigg) \\ &\le \mbb P\bigg( \sigmaum_{W\in\overline{\mathcal{W}}_n^{(l)}} \mathbbmss{1}\{\max_{X\in W\cap\mbb PP_n} R_k(X, \mbb PP_n) \gammae u_n\} \gammae {\rm d}elta\rho_n^{\rm d}e/(3^d c M)\bigg). \varepsilonnd{align*} With the goal of using the spatial independence to invoke a binomial concentration bound, we combine Markov's inequality and Mecke's equation, which yields for each $W\in\overline{\mathcal{W}}_n^{(l)}$ and $n$ large \begin{align*} \mbb P\big(\max_{X\in W\cap\mbb PP_n} R_k(X, \mbb PP_n) \gammae u_n\big) &\le n \int_W \mbb P(R_k(x, \mbb PP_n\cup\{x\}) \gammae u_n) {\rm d} x = n \int_W \mbb P(\mbb PP_n(B_{u_n}(x)) < k) {\rm d} x \\ &= n\int_W \sigmaum_{i=0}^{k-1} e^{- n u_n^d \kappa_d} \frac{(n u_n^d \kappa_d)^i}{i!} {\rm d} x \le n\int_W k e^{- (a_n + s_0)} (a_n + s_0)^{k-1} {\rm d} x \\ &= n|W| k e^{- (a_n + s_0)} (a_n + s_0)^{k-1} = |W| \rho_n^{\rm d}e k e^{-s_0} (1 + s_0/a_n)^{k-1}. \varepsilonnd{align*} With the binomial bound from \cite[Lemma 1.2]{poisson_conc} and the computations from \varepsilonqref{limit_zero_boxes_partial} we arrive at \begin{align*} &\mbb P\Big( \sigmaum_{W\in\overline{\mathcal{W}}_n^{(l)}} \mathbbmss{1}\{\max_{X\in W\cap\mbb PP_n} R_k(X, \mbb PP_n) \gammae u_n \} \gammae {\rm d}elta\rho_n^{\rm d}e/(3^d c M)\Big) \\ &\le \varepsilonxp\bigg(- \frac{{\rm d}elta\rho_n^{\rm d}e}{3^d 2 c M} \log\Big(\frac{{\rm d}elta\rho_n^{\rm d}e/(3^d c M)}{|W| \rho_n^{\rm d}e k e^{-s_0} (1 + s_0/a_n)^{k-1} \rho_n^{\rm d}e |\partial_n Q_n^{(1)}|/|W| }\Big)\bigg) \\ &= \varepsilonxp\bigg(- \frac{{\rm d}elta\rho_n^{\rm d}e}{3^d 2 c M} \log\Big(\frac{{\rm d}elta / (3^d c M)}{k e^{-s_0} (1 + s_0/a_n)^{k-1} \underbrace{|\partial_n Q_n^{(1)}| / |Q_n^{(1)}|}_{\overset{n\tauo\infty}{\longrightarrow}0}}\Big)\bigg). \varepsilonnd{align*} Thus, $\limsup_{n\tauo\infty} \frac1{\rho_n^{\rm d}e} \log\mbb P(H_{n,M}^{\tauext{err},\partial}(\mbb PP''_n) \gammae {\rm d}elta) = -\infty$. For the second part, we proceed roughly in the same fashion. But first, we note that for additionally $M_0,\tauilde{\rm d}elta>0$, \begin{align*} &\mbb P(H_{n,M,M_0}^{\tauext{err}, \mathcal J}(\mbb PP_n, \mbb PP'_n, \mbb PP''_n) \gammae {\rm d}elta) \\ &\le \mbb P(H_{n,M,M_0}^{\tauext{err}, \mathcal J}(\mbb PP_n, \mbb PP'_n, \mbb PP''_n) \gammae {\rm d}elta, J_n^M < \tauilde{\rm d}elta \rho_n^{\rm d}e) + \mbb P(J_n^M \gammae \tauilde{\rm d}elta \rho_n^{\rm d}e). \varepsilonnd{align*} In the following computations, we will use the upper bound for the binomial coefficient $\binom{a}{b} \le (ea/b)^b$, see \cite[Section 1.2.6 Exercise 67]{knuth1997}, Applied here, it yields $$\binom{\rho_n^{\rm d}e}{\tauilde{\rm d}elta\rho_n^{\rm d}e} \le (e\rho_n^{\rm d}e / (\tauilde{\rm d}elta \rho_n^{\rm d}e))^{\tauilde{\rm d}elta \rho_n^{\rm d}e} = (e / \tauilde{\rm d}elta)^{\tauilde{\rm d}elta \rho_n^{\rm d}e},$$ where we assume that $\tauilde{\rm d}elta\le 1/2$ and that the pair of numbers occurring in the binomial coefficient are both positive integers. Now, we continue with \begin{align*} &\mbb P(H_{n,M,M_0}^{\tauext{err}, \mathcal J}(\mbb PP_n, \mbb PP'_n, \mbb PP''_n) \gammae {\rm d}elta, J_n^M < \tauilde{\rm d}elta \rho_n^{\rm d}e) \\ &\le \mbb P\bigg(\bigcup_{\mathcal A\sigmaubseteq \mathcal Q_n, \#\mathcal A < \tauilde{\rm d}elta \rho_n^{\rm d}e} \bigg\{\sigmaum_{ X \in \mbb PP_n'' \cap (\cup_{Q\in\mathcal A} Q ) } M_0 \wedge \xi_n(X, \mbb PP_n'') \gammae {\rm d}elta\rho_{n, k}^{\rm d}e\bigg\}\bigg) \\ &\le \sigmaum_{i=1}^{\tauilde{\rm d}elta \rho_n^{\rm d}e} \sigmaum_{\mathcal A\sigmaubseteq \mathcal Q_n, \#\mathcal A = i} \mbb P\bigg(\sigmaum_{ X \in \mbb PP_n'' \cap (\cup_{Q\in\mathcal A} Q ) } M_0 \wedge \xi_n(X, \mbb PP_n'') \gammae {\rm d}elta\rho_{n, k}^{\rm d}e\bigg) \\ &\le \tauilde{\rm d}elta \rho_n^{\rm d}e (e / \tauilde{\rm d}elta )^{\tauilde{\rm d}elta \rho_n^{\rm d}e} \mbb P\bigg(\sigmaum_{ X \in \mbb PP_n'' \cap (\cup_{i=1}^{\tauilde{\rm d}elta \rho_n^{\rm d}e} Q_n^{(i)} ) } M_0 \wedge \xi_n(X, \mbb PP_n'') \gammae {\rm d}elta\rho_{n, k}^{\rm d}e\bigg). \varepsilonnd{align*} Next, we cover $\cup_{i=1}^{\tauilde{\rm d}elta \rho_n^{\rm d}e} Q_n^{(i)}$ with cubes of side length $u_n$ and consistently with prior convention, denote this collection by $\overline{W}_n$. Then, $\#\overline{W}_n = \tauilde{\rm d}elta \rho_n^{\rm d}e |Q_n^{(1)}| u_n^{-d} = \tauilde{\rm d}elta u_n^{-d}$, which we assume to be an integer. Next, we can simply introduce the same labeling as for the first part of this proof and by the same calculations as in the first part, we get \begin{align*} &\mbb P\bigg(\sigmaum_{ X \in \mbb PP_n'' \cap (\cup_{i=1}^{\tauilde{\rm d}elta \rho_n^{\rm d}e} Q_n^{(i)} ) } M_0 \wedge \xi_n(X,\mbb PP''_n) \gammae {\rm d}elta\rho_{n, k}^{\rm d}e\bigg) \\ &\le \sigmaum_{l\in\mathcal{L}} \varepsilonxp\Big(- \frac{{\rm d}elta\rho_n^{\rm d}e}{3^d 2 c M_0} \log\Big(\frac{{\rm d}elta\rho_n^{\rm d}e/(3^d c M_0)}{|W| \rho_n^{\rm d}e k e^{-s_0} (1 + s_0/a_n)^{k-1} \tauilde{\rm d}elta u_n^{-d}}\Big)\Big) \\ &\le 3^d \varepsilonxp\Big(- \frac{{\rm d}elta\rho_n^{\rm d}e}{3^d 2 c M_0} \log\Big(\frac{{\rm d}elta}{\tauilde{\rm d}elta 3^d c M_0 k e^{- s_0} (1 + s_0/a_n)^{k-1}}\Big)\Big) \varepsilonnd{align*} for large $n$. When choosing $\tauilde{\rm d}elta={\rm d}elta/\log M$, this yields \begin{align*} &\frac1{\rho_n^{\rm d}e} \log \mbb P(H_{n,M,M_0}^{\tauext{err}, \mathcal J}(\mbb PP_n, \mbb PP'_n, \mbb PP''_n) \gammae {\rm d}elta, J_n^M < \tauilde{\rm d}elta \rho_n^{\rm d}e) \\ &\le \frac1{\rho_n^{\rm d}e}\log\Big(\frac{{\rm d}elta \rho_n^{\rm d}e 3^d}{ \log M}\Big) + \frac{{\rm d}elta\log(e (\log M) / {\rm d}elta)}{\log M} - \frac{{\rm d}elta}{3^d 2 c M_0} \log\Big(\frac{\log M}{ 3^d c M_0 k e^{- s_0} (1 + s_0/a_n)^{k-1}}\Big) \\ &\overset{n\tauo\infty}{\longrightarrow} \frac{{\rm d}elta\log(e (\log M) / {\rm d}elta)}{\log M} - \frac{{\rm d}elta}{3^d 2 c M_0} \log\Big(\frac{\log M}{ 3^d c M_0 k e^{- s_0} }\Big) \overset{M\tauo\infty}{\longrightarrow} -\infty. \varepsilonnd{align*} On the other hand, from Lemma \ref{lemma_probability_bad_boxes}, for $\tauilde{\rm d}elta={\rm d}elta/\log M$, we can deduce that \begin{align*} &\frac1{\rho_n^{\rm d}e} \log\mbb P(J_n^M \gammae \tauilde{\rm d}elta\rho_n^{\rm d}e) \le -\frac{{\rm d}elta/\log M}{5^d 2} \log\Big(\frac{{\rm d}elta e^{M/2} / \log M}{15^d 2^{3^d}}\Big) \\ &= \frac{-M/2 + \log\log M}{\log M} \frac{{\rm d}elta}{5^d 2} - \frac{{\rm d}elta/\log M}{5^d 2} \log\Big(\frac{{\rm d}elta}{15^d 2^{3^d}}\Big) \overset{M\tauo\infty}{\longrightarrow} -\infty \varepsilonnd{align*} and conclude the assertion. \varepsilonnp \sigmaubsection*{Acknowledgment.} The authors thank T.\ Owada for very fruitful discussions about the lower large deviations in the sparse regime. Further, DW would like to acknowledge the financial support of the CogniGron research center and the Ubbo Emmius Funds (Univ.\ of Groningen). \varepsilonnd{document}
\begin{document} \title{A property of algebraic univoque numbers} \author{Martijn de Vries} \address{Delft University of Technology, Mekelweg 4, 2628 CD Delft, the Netherlands} \email{[email protected]} \subjclass[2000]{Primary:11A63, Secondary:11B83} \date{\today} \thanks{} \begin{abstract} Consider the set $\mathcal U$ of real numbers $q \ge 1$ for which only one sequence $(c_i)$ of integers $0 \le c_i \le q$ satisfies the equality $\sum_{i=1}^{\infty} c_i q^{-i} = 1$. In this note we show that the set of algebraic numbers in $\mathcal U$ is dense in the closure $\mathcal Uu$ of $\mathcal U$. \end{abstract} \maketitle \section{Introduction}\label{s1} Given a real number $q \ge 1$, a $q-${\it expansion} (or simply {\it expansion}) is a sequence $(c_i)=c_1 c_2 \ldots$ of integers satisfying $0 \le c_i \le q$ for all $i \geq 1$ such that \begin{equation*} \frac{c_1}{q} + \frac{c_2}{q^2} + \frac{c_3}{q^3} + \cdots = 1. \end{equation*} One such expansion, denoted by $(\gamma_i(q))= (\gamma_i)$, is obtained by performing the {\it greedy algorithm} of R\'enyi (\cite{R}): if $\gamma_i$ is already defined for $ i < n$, then $\gamma_n$ is the largest integer satisfying \begin{equation*} \sum_{i=1}^{n} \frac{\gamma_i}{q^i} \leq 1. \end{equation*} Equivalently, $(\gamma_i)$ is the largest expansion in lexicographical order. If $q >1$, then another such expansion, denoted by $(\alpha_i(q))= (\alpha_i)$, is obtained by performing the {\it quasi-greedy algorithm}: if $\alpha_i$ is already defined for $i < n$, then $\alpha_n$ is the largest integer satisfying \begin{equation*} \sum_{i=1}^{n} \frac{\alpha_i}{q^i} < 1. \end{equation*} An expansion is called {\it infinite} if it contains infinitely many nonzero terms; otherwise it is called {\it finite}. Observe that there are no infinite expansions if $q = 1$: the only 1-expansions are given by $10^{\infty}, 010^{\infty}, 0010^{\infty}, \ldots$. On the other hand, if $q >1$, then $(\alpha_i)$ is the largest infinite expansion in lexicographical order. For any given $q >1$, the following relations between the quasi-greedy expansion and the greedy expansion are straightforward. The greedy expansion is finite if and only if $(\alpha_i)$ is periodic. If $(\gamma_i)$ is finite and $\gamma_m$ is its last nonzero term, then $m$ is the smallest period of $(\alpha_i)$, and \begin{equation*} \alpha_i = \gamma_i \quad \mbox{for } i = 1, \ldots, m-1, \quad \mbox{and } \alpha_m = \gamma_m -1. \end{equation*} Erd\H{o}s, Horv\'ath and Jo\'o (\cite{EHJ}) discovered that for some real numbers $q >1 $ there exists only one $q-$expansion. Subsequently, the set $\mathcal U$ of such {\it univoque numbers} was characterized in \cite{EJK1}, \cite{EJK2}, \cite{KL3} (see Theorem~\ref{t21}). Using this characterization, Komornik and Loreti showed in \cite{KL1} that $\mathcal U$ has a smallest element $q' \approx 1.787$ and the corresponding expansion $(\tau_i)$ is given by the truncated Thue-Morse sequence, defined by setting $\tau_{2^N}=1$ for $N=0,1,\ldots$ and \begin{equation*} \tau_{2^N + i} = 1 - \tau_i \quad \mbox{for }1 \le i < 2^N, \, N=1,2, \ldots. \end{equation*} Allouche and Cosnard (\cite{AC}) proved that the number $q'$ is transcendental. This raised the question whether there exists a smallest algebraic univoque number. Komornik, Loreti and Peth\H{o} (\cite{KL2}) answered this question in the negative by constructing a decreasing sequence $(q_n)$ of algebraic univoque numbers converging to $q'$. It is the aim of this note to show that for each $q \in \mathcal U$ there exists a sequence of algebraic univoque numbers converging to $q$: \begin{theorem}\label{t11} The set $\mathcal{A}$ consisting of all algebraic univoque numbers is dense in $\mathcal Uu$. \end{theorem} Our proof of Theorem \ref{t11} relies on a characterization of the closure $\mathcal Uu$ of $\mathcal U$, recently obtained by Komornik and Loreti in \cite{KL3} (see Theorem~\ref{t22}). \section{Proof of Theorem~\ref{t11}}\label{s2} In the sequel, a sequence always means a sequence of nonnegative integers. We use systematically the lexicographical order between sequences; we write $(a_i) < (b_i)$ if there exists an index $n \geq 1$ such that $a_i=b_i$ for $i < n$ and $a_n < b_n$. This definition extends in the obvious way to sequences of finite length. The following algebraic characterization of the set $\mathcal U$ can be found in \cite{EJK1}, \cite{EJK2}, \cite{KL3}: \begin{theorem}\label{t21} The map $q \mapsto (\gamma_i(q))$ is a strictly increasing bijection between the set $\mathcal U$ and the set of all sequences $(\gamma_i)$ satisfying \begin{equation}\label{21} \gamma_{j+1} \gamma_{j+2} \ldots < \gamma_1 \gamma_2 \ldots \quad \mbox{for all }\, j \geq 1 \end{equation} and \begin{equation}\label{22} \overline{\gamma_{j+1} \gamma_{j+2} \ldots} < \gamma_1 \gamma_2 \ldots \quad \mbox{for all }\, j \geq 1 \end{equation} where we use the notation $\overline{\gamma_n} := \gamma_1 - \gamma_n$. \end{theorem} \begin{remark} It was essentially shown by Parry (see \cite{P}) that a sequence $(\gamma_i)$ is the greedy $q$-expansion for some $q \ge 1$ if and only if $(\gamma_i)$ satisfies the condition \eqref{21}. \end{remark} Using the above result, Komornik and Loreti (\cite{KL3}) investigated the topological structure of the set $\mathcal U$. In particular they showed that $\mathcal Uu \setminus \mathcal U$ is dense in $\mathcal Uu$. Hence the set $\mathcal Uu$ is a perfect set. Moreover, they established an analogous characterization of the closure $\mathcal Uu$ of $\mathcal U$: \begin{theorem}\label{t22} The map $q \mapsto (\alpha_i(q))$ is a strictly increasing bijection between the set $\mathcal Uu$ and the set of all sequences $(\alpha_i)$ satisfying \begin{equation}\label{23} \alpha_{j+1} \alpha_{j+2} \ldots \le \alpha_1 \alpha_2 \ldots \quad \mbox{for all }\, j \geq 1 \end{equation} and \begin{equation}\label{24} \overline{\alpha_{j+1} \alpha_{j+2} \ldots} < \alpha_1 \alpha_2 \ldots \quad \mbox{for all }\, j \geq 1 \end{equation} where we use the notation $\overline{\alpha_n} := \alpha_1 - \alpha_n$. \end{theorem} \begin{remarks} \mbox{} \begin{itemize} \item It was shown in \cite{BK} that a sequence $(\alpha_i)$ is the quasi-greedy $q$-expansion for some $q >1$ if and only if $(\alpha_i)$ is infinite and satisfies \eqref{23}. Note also that a sequence satisfying \eqref{23} and \eqref{24} is automatically infinite. \item If $q \in \mathcal Uu \setminus \mathcal U$, then we must have equality in \eqref{23} for some $j \geq 1$, i.e., the greedy $q$-expansion is finite for each $q \in \mathcal Uu \setminus \mathcal U$. On the other hand, it follows from Theorems~\ref{t21} and \ref{t22} that a sequence of the form $(1^n0)^{\infty}$ $(n \ge 2)$ is the quasi-greedy $q$-expansion for some $q \in \mathcal Uu \setminus \mathcal U$. Hence the set $\mathcal Uu \setminus \mathcal U$ is countably infinite. \end{itemize} \end{remarks} The following technical lemma is a direct consequence of Theorem~\ref{t22} and Lemmas 3.4 and 4.1 in \cite{KL3}: \begin{lemma}\label{l23} Let $(\alpha_i)$ be a sequence satisfying \eqref{23} and \eqref{24}. Then \begin{itemize} \item[\rm (i)] there exist arbitrary large integers $m$ such that \begin{equation}\label{25} \overline{\alpha_{j+1} \ldots \alpha_m} < \alpha_1 \ldots \alpha_{m-j} \quad \mbox{for all }\, 0 \le j < m; \end{equation} \item[\rm (ii)] for all positive integers $m \geq 1$, \begin{equation}\label{26} \overline{\alpha_{1} \ldots \alpha_{m}} < \alpha_{m+1} \ldots \alpha_{2m}. \end{equation} \end{itemize} \end{lemma} \begin{proof}[Proof of Theorem~\ref{t11}] Since the set $\mathcal Uu \setminus \mathcal U$ is dense in $\mathcal Uu$, it is sufficient to show that $\overline{\mathcal{A}} \supset \mathcal Uu \setminus \mathcal U$. In order to do so, fix $q \in \mathcal Uu \setminus \mathcal U$. Then, according to Theorem ~\ref{t22}, the quasi-greedy $q$-expansion $(\alpha_i)$ satisfies \eqref{23} and \eqref{24}. Let $k$ be a positive integer for which equality holds in \eqref{23}, i.e., \begin{equation*} (\alpha_i)= (\alpha_1 \ldots \alpha_k)^{\infty}. \end{equation*} According to Lemma~\ref{l23} there exists an integer $m \geq k$ such that \eqref{25} is satisfied. Let $N$ be a positive integer such that $kN \geq m $ and consider the sequence $$(\gamma_i)= (\gamma_i^N)=( \alpha_1 \ldots \alpha_k)^N (\alpha_1 \ldots \alpha_m \overline{\alpha_1 \ldots \alpha_m})^{\infty}.$$ For ease of exposition we suppress the dependence of $(\gamma_i)$ on $N$. Note that $\gamma_i=\alpha_i$ for $ 1 \leq i \leq m+kN$. In particular, we have \begin{equation}\label{27} \gamma_i=\alpha_i \quad \mbox{for} \quad 1 \leq i \leq 2m. \end{equation} Since $(\gamma_i)$ has a periodic tail, the number $q_N$ determined by $$1 = \sum_{i=1}^{\infty} \frac{\gamma_i}{q_{N}^i}$$ is an algebraic number and $q_N \to q$ as $N \to \infty$. According to Theorem~\ref{t21} it remains to verify the inequalities \eqref{21} and \eqref{22}. First we verify \eqref{21} and \eqref{22} for $j \geq kN$. For those values of $j$ the inequality \eqref{21} for $j+m$ is equivalent to \eqref{22} for $j$ and \eqref{22} for $j+m$ is equivalent to \eqref{21} for $j$. Therefore it suffices to verify the inequalities \eqref{21} and \eqref{22} for $kN \le j < kN +m$. Fix $kN \le j < kN +m$. From \eqref{23}, \eqref{26} and \eqref{27} we have \begin{eqnarray*} \gamma_{j+1} \ldots \gamma_{kN+2m}&=& \alpha_{j-kN +1} \ldots \alpha_m \overline{\alpha_1 \ldots \alpha_m} \\ & < & \alpha_{j-kN +1} \ldots \alpha_m \alpha_{m+1} \ldots \alpha_{2m} \\ &\leq& \alpha_1 \ldots \alpha_{kN + 2m - j}\\ &=& \gamma_1 \ldots \gamma_{kN + 2m - j} \end{eqnarray*} and from inequality \eqref{25} we have \begin{eqnarray*} \overline{\gamma_{j+1} \ldots \gamma_{kN+m}} &=& \overline{\alpha_{j-kN+1} \ldots \alpha_m} \\ &<& \alpha_1 \ldots \alpha_{kN+m-j} \\ &=& \gamma_1 \ldots \gamma_{kN+m-j}. \end{eqnarray*} Now we verify \eqref{21} for $j < kN$. If $m \leq j < kN$, then by \eqref{23} and \eqref{26}, \begin{eqnarray*} \gamma_{j+1} \ldots \gamma_{kN+2m} &<& \alpha_{j+1} \ldots \alpha_{kN+2m} \\ & \leq& \alpha_1 \ldots \alpha_{kN+2m-j} \\ & = & \gamma_1 \ldots \gamma_{kN+2m-j}. \end{eqnarray*} If $1 \leq j < m$, then by \eqref{23} and \eqref{25}, \begin{eqnarray*} \gamma_{j+1} \ldots \gamma_{kN+m+j} &=& \alpha_{j+1} \ldots \alpha_{kN+m} \overline{\alpha_1 \ldots \alpha_j}\\ & \leq & \alpha_1 \ldots \alpha_{kN+m-j}\overline{\alpha_1 \ldots \alpha_j}\\ & < & \alpha_1 \ldots \alpha_{kN+m-j} \alpha_{m-j+1} \ldots \alpha_m \\ &=& \gamma_1 \ldots \gamma_{kN+m}.\\ \end{eqnarray*} Finally, we verify \eqref{22} for $j < kN$. Write $j=k \ell +i \, , 0 \leq \ell < N$ and $0 \leq i < k$. If $i=0$, then \eqref{22} follows from the relation \begin{equation*} \overline{\gamma_{j+1}}=\overline{\alpha_1}= 0 < \alpha_1 = \gamma_1. \end{equation*} If $ 1 \le i < k$, then applying Lemma~\ref{l23}(ii) we get \begin{equation*} \overline{\alpha_{i+1} \ldots \alpha_{2i}} < \alpha_1 \ldots \alpha_i. \end{equation*} Hence \begin{eqnarray*} \overline{\gamma_{j+1} \ldots \gamma_{j+k}} &=& \overline{\alpha_{j+1} \ldots \alpha_{j+k}} \\ &=& \overline{\alpha_{i+1} \ldots \alpha_{i+k}} \\ &<& \alpha_1 \ldots \alpha_k \\ &=& \gamma_1 \ldots \gamma_k.\\ \end{eqnarray*} (In order for the first equality to hold in case $\ell = N-1$, we need the condition $m \geq k$.) \end{proof} \begin{remarks} \mbox{} \begin{itemize} \item Since the set $\mathcal Uu$ is a perfect set and $\mathcal Uu \setminus \mathcal U$ is countable, each neighborhood of $q \in \mathcal U$ contains uncountably many elements of $\mathcal U$. Hence the set of transcendental univoque numbers is dense in $\mathcal Uu$ as well. \item Recently, Allouche, Frougny and Hare (\cite{AHF}) proved that there also exist univoque Pisot numbers. In particular they determined the smallest three univoque Pisot numbers. \end{itemize} \end{remarks} \end{document}
\begin{document} \title{On the expected diameter of planar Brownian motion} \begin{abstract} Known results show that the diameter $d_1$ of the trace of planar Brownian motion run for unit time satisfies $1.595 \leq \Exp d_1 \leq 2.507$. This note improves these bounds to $1.601 \leq \Exp d_1 \leq 2.355$. Simulations suggest that $\Exp d_1 \approx 1.99$. \end{abstract} \noindent {\em Keywords:} Brownian motion; convex hull; diameter. \noindent {\em 2010 Mathematics Subject Classifications:} 60J65 (Primary) 60D05 (Secondary). Let $(b_t, t \in [0,1])$ be standard planar Brownian motion, and consider the set $b[0,1] = \{ b_t : t \in [0,1] \}$. The Brownian convex hull ${\mathcal{H}}_1 := \mathop{{\rm hull}} b [0,1]$ has been well-studied from L\'evy~\cite[\S52.6, pp.~254--256]{levy} onwards; the expectations of the perimeter length $\ell_1$ and area $a_1$ of ${\mathcal{H}}_1$ are given by the exact formulae $\Exp \ell_1 = \sqrt{8\pi}$ (due to Letac and T\'akacs \cite{letac,takacs}) and $\Exp a_1 = \pi /2$ (due to El Bachir \cite{bachir}). Another characteristic is the \emph{diameter} \[d_1 := \mathop{{\rm diam}} {\mathcal{H}}_1 = \mathop{{\rm diam}} b [0,1]=\sup_{x,y\in b[0,1]}\|x-y\|,\] for which, in contrast, no explicit formula is known. The exact formulae for $\Exp \ell_1$ and $\Exp a_1$ rest on geometric integral formulae of Cauchy; since no such formula is available for $d_1$, it may not be possible to obtain an explicit formula for $\Exp d_1$. However, one may get bounds. By convexity, we have the almost-sure inequalities $2 \leq \ell_1/ d_1 \leq \pi$, the extrema being the line segment and shapes of constant width (such as the disc). In other words, \[ \frac{\ell_1}{\pi} \leq d_1 \leq \frac{\ell_1}{2} .\] The formula of Letac and Tak\'acs \cite{letac,takacs} says that $\Exp \ell_1 = \sqrt{ 8 \pi}$, so we get: \begin{proposition} \label{prop1} $\sqrt{ 8 / \pi } \leq \Exp d_1 \leq \sqrt{2 \pi}$. \end{proposition} Note that $\sqrt{8 / \pi} \approx 1.5958$ and $\sqrt{2\pi} \approx 2.5066$. In this note we improve both of these bounds. For the lower bound, we note that $b[0,1]$ is compact and thus, as a corollary of Lemma \ref{lem:diam} below, we have the formula \begin{equation} \label{eq:d-formula} d_1 = \sup_{0 \leq \theta \leq \pi} r (\theta) , \end{equation} where $r$ is the parametrized range function given by \[ r (\theta) = \sup_{0 \leq s \leq 1} \left(b_s \cdot \be_\theta\right) - \inf_{0 \leq s \leq 1} \left(b_s \cdot \be_\theta\right), \] with $\be_\theta$ being the unit vector $(\cos \theta, \sin \theta)$. Feller~\cite{feller} established that \begin{equation} \label{eq:feller-bounds} \Exp r (\theta) = \sqrt{ 8 /\pi } \quad \text{ and } \quad \Exp ( r(\theta)^2 ) = 4 \log 2 , \end{equation} and the density of $r(\theta)$ is given explicitly as \begin{equation} \label{eq:density} f (r ) = \frac{8}{\sqrt{2 \pi}} \sum_{k=1}^\infty (-1)^{k-1} k^2 \exp \{ - k^2 r^2 /2 \} , ~ ( r \geq 0). \end{equation} Combining~\eqref{eq:d-formula} with~\eqref{eq:feller-bounds} gives immediately $\Exp d_1 \geq \Exp r (0) = \sqrt{ 8 /\pi }$, which is just the lower bound in Proposition~\ref{prop1}. For a better result, a consequence of~\eqref{eq:d-formula} is that $d_1 \geq \max \{ r(0), r(\pi/2) \}$. Observing that $r(0)$ and $r(\pi/2)$ are independent, we get: \begin{lemma} \label{lem1} $\Exp d_1 \geq \Exp \max \{ X_1, X_2 \}$, where $X_1$ and $X_2$ are independent copies of $X := r(0)$. \end{lemma} It seems hard to explicitly compute $\Exp \max \{ X_1, X_2 \}$ in Lemma~\ref{lem1}, because although the density given at~\eqref{eq:density} is known explicitly, it is not very tractable. Instead we obtain a lower bound. Since \[ \max \{ x , y \} = \frac{1}{2} \left( x + y + | x -y | \right) \] we get \begin{equation} \label{eq:max} \Exp \max \{ X_1, X_2 \} = \Exp X + \frac{1}{2} \Exp | X_1 - X_2 | . \end{equation} Thus with Lemma~\ref{lem1}, the lower bound in Proposition~\ref{prop1} is improved given any non-trivial lower bound for $\Exp | X_1 - X_2 |$. Using the fact that for any $c \in {\mathbb{R}}$, if $m$ is a median of $X$, $\Exp | X - c | \geq \Exp | X - m |$, we see that \begin{align*} \Exp | X_1 - X_2 | \geq \Exp | X - m | . \end{align*} Again, the intractability of the density at~\eqref{eq:density} makes it hard to exploit this. Instead, we provide the following as a crude lower bound on $\Exp | X_1 - X_2 |$. \begin{lemma} \label{lem:ineq1} For any $a,h >0$, \begin{equation} \Exp | X_1 - X_2 | \geq 2 h \,\Pr ( X \leq a )\, \Pr ( X \geq a + h) . \nonumber \label{eqn:ineq1} \end{equation} \end{lemma} \begin{proof} We have \begin{align*} \Exp | X_1 - X_2 | & \geq \Exp\left[ |X_1-X_2|\1\{X_1\leq a, X_2 \geq a+h\} \right]\\ &{}\qquad{}+\Exp\left[ |X_1-X_2|\1\{X_2\leq a, X_1 \geq a+h\} \right] \\ & \geq h \,\Pr(X_1 \leq a)\,\Pr(X_2\geq a+h)+h \,\Pr(X_2\leq a)\,\Pr(X_1\geq a+h) \\ & = 2h \,\Pr(X\leq a)\,\Pr(X\geq a+h), \end{align*} which proves the statement. \end{proof} This lower bound yields the following result. \begin{proposition} For $a, h > 0$ define \[ g ( a, h) := h \left( \frac{4}{\pi} \exp \left\{ - \frac{\pi^2}{2a^2 } \right\} - \frac{4}{3\pi} \exp \left\{ - \frac{9\pi^2}{2a^2 } \right\} \right) \left( 1 - \frac{4}{\pi} \exp \left\{ - \frac{\pi^2}{8(a+h)^2 } \right\} \right) .\] Then $\Exp d_1 \geq \sqrt{ 8 / \pi} + g ( 1.492, 0.337) \approx 1.6014$. \end{proposition} \begin{proof} Consider \[ Z := \sup_{0 \leq s \leq 1} | b_s \cdot \be_0 | . \] Then it is known (see \cite{jp}) that for $x > 0$, \begin{equation} \label{eq:jp} \frac{4}{\pi} \exp \left\{ - \frac{\pi^2}{8x^2 } \right\} - \frac{4}{3\pi} \exp \left\{ - \frac{9\pi^2}{8x^2 } \right\} \leq \Pr ( Z < x ) \leq \frac{4}{\pi} \exp \left\{ - \frac{\pi^2}{8x^2 } \right\} .\end{equation} Moreover, we have \[ Z \leq X \leq 2 Z .\] Since $X \leq 2Z$, we have \[ \Pr ( X \leq a ) \geq \Pr ( Z \leq a/2) \geq \frac{4}{\pi} \exp \left\{ - \frac{\pi^2}{2a^2 } \right\} - \frac{4}{3\pi} \exp \left\{ - \frac{9\pi^2}{2a^2 } \right\} ,\] by the lower bound in~\eqref{eq:jp}. On the other hand, \[ \Pr ( X \geq a +h ) \geq \Pr ( Z \geq a+h ) \geq 1 - \frac{4}{\pi} \exp \left\{ - \frac{\pi^2}{8(a+h)^2 } \right\} ,\] by the upper bound in~\eqref{eq:jp}. Combining these two bounds and applying Lemma \ref{lem:ineq1} we get $\Exp | X_1 - X_2 | \geq 2 g (a , h)$. So from~\eqref{eq:max} and the fact that $\Exp X = \sqrt{ 8 / \pi}$ by~\eqref{eq:feller-bounds} we get $\Exp d_1 \geq \sqrt{ 8 / \pi} + g (a,h)$. Numerical evaluation using MAPLE suggests that $(a,h) = (1.492,0.337)$ is close to optimal, and this choice gives the statement in the proposition. \end{proof} We also improve the upper bound in Proposition~\ref{prop1}. \begin{proposition} \label{prop3} $\Exp d_1 \leq \sqrt{ 8 \log 2 } \approx 2.3548$. \end{proposition} \begin{proof} First, we claim that \begin{equation} \label{eq:rectangle-claim} d_1^2 \leq r(0)^2 + r ( \pi/2)^2 . \end{equation} It follows from~\eqref{eq:rectangle-claim} and~\eqref{eq:feller-bounds} that \[ \Exp ( d_1^2 ) \leq \Exp ( X_1^2 + X_2^2 ) = 2 \Exp ( X^2 ) = 8 \log 2 . \] The result now follows by Jensen's inequality. It remains to prove the claim~\eqref{eq:rectangle-claim}. Note that the diameter is an increasing function, that is, if $A\subseteq B$ then $\mathop{{\rm diam}} A \leq \mathop{{\rm diam}} B$. Note also, that by the definition of $r(\theta)$, $b[0,1] \subseteq \bz + [0,r(0)]\times[0,r(\pi/2)]=: R_\bz$ for some $\bz\in {\mathbb{R}}^2$. Since the diameter of the set $R_\bz$ is attained at the diagonal, \begin{equation*} \mathop{{\rm diam}} R_\bz = \sqrt{r(0)^2 + r(\pi/2)^2}, \end{equation*} for all $\bz \in {\mathbb{R}}^2$, and we have $\mathop{{\rm diam}} b[0,1] \leq \mathop{{\rm diam}} R_\bz$, the result follows. \end{proof} We make one further remark about second moments. In the proof of Proposition~\ref{prop3}, we saw that $\Exp ( d_1^2 ) \leq 8 \log 2 \approx 5.5452$. A bound in the other direction can be obtained from the fact that $d_1^2 \geq \ell_1^2 / \pi^2$, and we have (see~\cite[\S 4.1]{wx2}) that \[ \Exp ( \ell_1^2 ) = 4 \pi \int_{-\pi/2}^{\pi/2} {\mathrm{d}} \theta \int_0^\infty {\mathrm{d}} u \cos \theta \frac{ \cosh (u \theta )} { \sinh ( u \pi /2 ) } \tanh \left( \frac{ (2 \theta + \pi) u }{4} \right) \approx 26.1677 ,\] which gives $\Exp (d_1^2 ) \geq 2.651$. Finally, for completeness, we state and prove the lemma which was used to obtain equation \eqref{eq:d-formula}. \begin{lemma} \label{lem:diam} Let $A \subset {\mathbb{R}}^d$ be a nonempty compact set, and let $r_A(\theta) = \sup_{x \in A} ( x \cdot \be_\theta ) - \inf_{x \in A} (x \cdot \be_\theta)$. Then \[ \mathop{{\rm diam}} A = \sup_{0 \leq \theta \leq \pi} r_A (\theta ). \] \end{lemma} \begin{proof} Since $A$ is compact, for each $\theta$ there exist $x, y \in A$ such that \begin{align*} r_A (\theta) & = x \cdot \be_\theta - y \cdot \be_\theta \\ & = ( x - y) \cdot \be_\theta \leq \| x - y \| .\end{align*} So $\sup_{0 \leq \theta \leq \pi} r_A (\theta) \leq \sup_{x, y \in A} \| x - y \| = \mathop{{\rm diam}} A$. It remains to show that $\sup_{0 \leq \theta \leq \pi} r_A (\theta) \geq \mathop{{\rm diam}} A$. This is clearly true if $A$ consists of a single point, so suppose that $A$ contains at least two points. Suppose that the diameter of $A$ is achieved by $x, y \in A$ and let $z = y -x$ be such that $\hat z := z/\|z\|= \be_{\theta_0}$ for $\theta_0 \in [0,\pi]$. Then \begin{align*} \sup_{0 \leq \theta \leq \pi} r_A (\theta) & \geq r_A (\theta_0 ) \geq y \cdot \be_{\theta_0} - x\cdot \be_{\theta_0} \\ & = z \cdot \hat z = \| z \| = \mathop{{\rm diam}} A ,\end{align*} as required. \end{proof} \end{document}
\begin{document} \title[Cohomology of cong. subgroups, Steinberg modules, and real quad. fields]{Cohomology of congruence subgroups of $\SL_3(\mathbb{Z})$, Steinberg modules, and real quadratic fields} \author{Avner Ash} \address{Boston College, Chestnut Hill, MA 02467} \email{[email protected]} \author{Dan Yasaki} \address{UNCG, Greensboro, NC 27412} \email{d\[email protected]} \date{\today~\now} \keywords{arithmetic homology, Steinberg representation, real quadratic field, general linear group, arithmetic group} \subjclass[2010]{Primary 20J06; Secondary 11F67, 11F75} \begin{abstract} We investigate the homology of a congruence subgroup $\Gamma$ of $\SL_3(\mathbb{Z})$ with coefficients in the Steinberg modules $\St(\mathbb{Q}^3)$ and $\St(E^3)$, where $E$ is a real quadratic field and the coefficients are $\mathbb{Q}$. By Borel-Serre duality, $H_0(\Gamma, \St(\mathbb{Q}^3))$ is isomorphic to $H^3(\Gamma,\mathbb{Q})$. Taking the image of the connecting homomorphism $H_1(\Gamma, \St(E^3)/\St(\mathbb{Q}^3))\to H_0(\Gamma, \St(\mathbb{Q}^3))$, followed by the Borel-Serre isomorphism, we obtain a naturally defined Hecke-stable subspace $H(\Gamma,E)$ of $H^3(\Gamma,\mathbb{Q})$. We conjecture that $H(\Gamma,E)$ is independent of $E$ and consists of the cuspidal cohomology $H_\mathrm{cusp}^3(\Gamma,\mathbb{Q})$ plus a certain subspace of $H^3(\Gamma, \mathbb{Q})$ that is isomorphic to the sum of the cuspidal cohomologies of the maximal faces of the Borel-Serre boundary. We report on computer calculations of $H(\Gamma,E)$ for various $\Gamma$, $E$ which provide evidence for the conjecture. We give a partial heuristic for the conjecture. \end{abstract} \maketitle \section{Introduction}\label{intro} Let $\Gamma$ be a congruence subgroup of $\SL_n(\mathbb{Z})$. For $K$ an extension of $\mathbb{Q}$, denote the Steinberg module with $\mathbb{Q}$-coefficients of the vector space $K^n$ by $\St(K^n)$. When $K=\mathbb{Q}$ and $n$ is understood from the context, set $\St=\St(\mathbb{Q}^n)$. By filtering the inclusion $\St \subset \St(K^n)$ as in~\cite{A}, we can compare the homology groups $H_*(\Gamma,\St)$ and $H_*(\Gamma,\St(K^n))$. Now specialize to $n=2$ or $3$ and $E$ a real quadratic field. In these cases, the filtration has only one step, and the comparison boils down to the connecting homomorphisms $\partial\colon H_k(\Gamma,C)\to H_{k-1}(\Gamma,\St)$, where $C=\St(E^n)/ \St$. It turns out that we can describe $C$ in terms of ${\mathcal O}_E^\times$, the units in the ring of integers of $E$. In~\cite{AY} we studied the case $n=2$ and $k=1$. In this paper we study $n=3$ and $k=1$. We are interested in the image of $\partial$, which we call $H(\Gamma,E)$. By Borel-Serre duality we identify $H_{0}(\Gamma,\St)$ with $H^\nu(\Gamma,\mathbb{Q})$, where $\nu=1$ if $n=2$, and $\nu=3$ if $n=3$. Then $H(\Gamma,E)$ is the span of a set of modular symbols built in a certain way from those $\gamma\in\Gamma$ which are images of elements of ${\mathcal O}_E^\times$ embedded into $\Gamma$. The units of $E$ are reflected in the cohomology of $\Gamma$. When $n = 2$ it is obvious that $H(\Gamma,E)$ is contained in the cuspidal cohomology $H^1_\mathrm{cusp}(\Gamma,\mathbb{Q})$, and in~\cite{AY} we conjectured that it is equal to $H^1_\mathrm{cusp}(\Gamma,\mathbb{Q})$. We presented numerical evidence for the conjecture, and we also proved the conjecture under the assumption of the Generalized Riemann Hypothesis. When $n=3$ it is no longer obvious what the relationship should be between $H(\Gamma,E)$ and cuspidal cohomology. After some numerical experimentation, we conjectured that $H(\Gamma,E)$ is the sum of $H^3_\mathrm{cusp}(\Gamma,\mathbb{Q})$ and a subspace of $H^3(\Gamma,\mathbb{Q})$ that maps isomorphically to the part of the cohomology of the Borel-Serre boundary of the locally symmetric space for $\Gamma$ that consists of cuspidal cohomology of the maximal boundary faces. We then tested the conjecture with further computation, without finding any counter-examples. Thus $H(\Gamma,E)$ provides an interesting way of constructing cuspidal cohomology, which we hope may enable progress in the study of $H^3_\mathrm{cusp}(\Gamma,\mathbb{Q})$. \footnote{We plan to study the analogue of $H(\Gamma,E)$ for other fields $E$ in future work.} The subspace $H(\Gamma,E)$ is stable under the action of the Hecke operators. We use this fact as a check on our computations: Our computations identify a candidate space $H$ for $H(\Gamma,E)$, but which we cannot prove is actually all of $H(\Gamma,E)$ unless $H$ happens to be all of $H^3(\Gamma,\mathbb{Q})$. However, we do verify that $H$ is Hecke stable, \footnote{Of course, in practice we can only verify this for a finite number of Hecke operators} and this gives the check. If we tensor with $\mathbb{C}$, $H(\Gamma,E)\otimes_\mathbb{Q}\mathbb{C}$ can be viewed as a subspace of arithmetic cohomology, again via the the Borel-Serre duality isomorphism: \[ H_0(\Gamma,\St(\mathbb{Q}^3)\otimes_\mathbb{Q}\mathbb{C})\xrightarrow{\sim} H^{3}(\Gamma,\mathbb{C}). \] Thus $H(\Gamma,E)$ is connected to automorphic representations. We use the Hecke action computationally to identify the automorphic constituents of $H(\Gamma,E)$. The rest of this introduction summarizes in more detail the contents of the paper. First we define the congruence subgroups, for which we have carried out our computations. \begin{definition}\label{N1} Let $N$ be a positive integer. Writing matrices in block notation with respect to the partition $(1,n-1)$, define \begin{itemize} \item $\Gamma_0(N,n)^\pm$ to be the subgroup of $\GL_n(\mathbb{Z})$ consisting of matrices congruent to \[ \mat{ *&*\\ 0&* } \pmod N. \] \item $\Gamma_0(N,n)=\Gamma_0(N,n)^\pm \cap \SL_n(\mathbb{Z})$. \end{itemize} If $n$ is understood from the context, we suppress it from the notation. \end{definition} In this paper we take $\Gamma=\Gamma_0(N,3)$. When we do the actual computations, we run them for $\Gamma_0(N,3)^\pm$ because that speeds them up. This is not a problem, because $H_i(\Gamma_0(N,3),\St)$ is isomorphic to $H_i(\Gamma_0(N,3)^\pm,\St)$ for all $i$. \footnote{ Lemma: Let $n$ be odd integer, and let $R$ be a ring in which $2$ is invertible. Let $M$ be an $R[\Gamma_0(N,n)^\pm]$-module, and suppose that $-I$ acts trivially on $M$. Then the inclusion of $\Gamma_0(N,n)$ into $\Gamma_0(N,n)^\pm$ induces an isomorphism $H_i(\Gamma_0(N,n),M)\simeq H_i(\Gamma_0(N,n)^\pm,M)$ for all $i$. Proof: $\Gamma_0(N,n)^\pm$ contains $\Gamma_0(N,n)$ as a subgroup of index two and is generated by $\Gamma_0(N,n)$ and $-I$. Let $J$ be the group $\{I,-I\}$. Because 2 is invertible, transfer shows that $H_i(\Gamma_0(N,n),M)\simeq H_i(\Gamma_0(N,n)^\pm,M)_J$. But $J$ is central so it acts trivially on $H_i(\Gamma_0(N,n)^\pm,M)$. } We perform computations to find $H(\Gamma,E)$ for \begin{itemize} \item all $N\le 50$, all prime $N$ with $51\le N\le 100$, and for $N=11^2$ and $13^2$. \item for the real quadratic fields $E=\mathbb{Q}(\sqrt\Delta)$ with squarefree $\Delta\le 10$. \end{itemize} On the basis of some of these computations, we make a conjecture about $H(\Gamma,E)$, which is borne out by the rest of our computations. A numerical consequence of the conjecture is that the codimension of $H(\Gamma,E)$ in $H^3(\Gamma,\mathbb{Q})$ should equal $b=\dim H_1(T/\Gamma)$, where $T$ is the Tits building of $\GL_3(\mathbb{Q})$ (Definition~\ref{tits}). This was the first thing we noticed in our computational results. To state the full conjecture, we introduce some objects which are explained in greater detail in Sections~\ref{bound} and~\ref{conj}. Let $\mathcal{S}$ be the symmetric space for $\Gamma$, and let $X$ be the Borel-Serre compactification of the locally symmetric space $\Gamma\backslash \mathcal{S}$. There is a natural isomorphism $H^*(\Gamma,\mathbb{Q})\simeq H^*(X,\mathbb{Q})$. The interior cohomology $H_!^*(\Gamma,\mathbb{Q})$ of $\Gamma$ is defined to be what corresponds under this isomorphism to the kernel of the restriction map $H^*(X,\mathbb{Q})\to H^*(\partial X,\mathbb{Q})$ \footnote{Tensored with $\mathbb{C}$ we obtain the cuspidal cohomology of $\Gamma$: $H_!^*(\Gamma,\mathbb{Q})\otimes_\mathbb{Q} \mathbb{C}= H_\mathrm{cusp}^*(\Gamma,\mathbb{C})$. This is not true in general if $n\ge4$.}. Lee and Schwermer~\cite{lee-schwermer} show that $H^3(\Gamma,\mathbb{Q})$ is the direct sum of three pieces, $H_!^*(\Gamma,\mathbb{Q})$, $A$, and $B$. The subspace $A$ comes from the cuspidal cohomology of the maximal faces of $\partial X$ and is describable in terms of holomorphic cuspforms of weight 2 for subgroups of $\SL_2(\mathbb{Z})$. The subspace $B$ has dimension equal to $b$, defined in the paragraph above. \begin{cnj}\label{conj-intro} $H(\Gamma,E)= H_!^*(\Gamma,\mathbb{Q})+A$. \end{cnj} We do not have a good explanation for why the conjecture should be true. In Section~\ref{conj} we give a heuristic argument for why $H(\Gamma,E)$ should contain $A$. Our computations are not definitive as long as $H(\Gamma,E) \ne H^3(\Gamma, \mathbb{Q})$, because there is the possibility that further computation with additional real quadratic units could conceivably discover more elements in $H(\Gamma,E)$. The facts that the space we compute to be putatively equal $H(\Gamma,E)$ is Hecke-stable and that the computations agree with our conjecture give us confidence in the results reported here. Section~\ref{mod} recalls basic facts about the Steinberg module and about modular symbols. Section~\ref{conn} derives a formula for the connecting homomorphism $\partial$ in terms of modular symbols. Section~\ref{stuff} determines the elements of $\Gamma$ corresponding to units in the ring of integers of $E$. Section~\ref{2} describes, with proof, an algorithm for computing the image of $\partial$. In Section~\ref{cusps}, we offer a discussion of the $\Gamma$-orbits of parabolic subgroups of $\GL_3(\mathbb{Q})$, which is necessary to determine the faces of the Borel-Serre boundary $\partial X$. Also we compute the Euler characteristic of the Tits building modulo $\Gamma$, which is the nerve of $\partial X$. Section~\ref{bound} describes $\partial X$ in detail, following~\cite{lee-schwermer}. The conjecture itself is contained and discussed in Section~\ref{conj}. In Section~\ref{hecke} we review the Hecke operators. Section~\ref{what} describes our method of computation, which uses the Voronoi cellulation of the symmetric space. We also prove in Section~\ref{what} a result of independent interest: for $n\le 4$, the Voronoi homology in its lowest degree is isomorphic to $H_0(\Gamma,\St)$. In fact, we prove this for the Voronoi homology and the Steinberg module with coefficients in a general ring $R$ (not just $\mathbb{Q}$). For this reason, we define the Steinberg module in Section~\ref{mod} over a general ring $R$. Section~\ref{comp} explains the methods used in our computations, and Section~\ref{results} discusses the computational results. Thanks to P.~Gunnells for helpful comments. Also to D.~Doud and independently to M.~Masdeu for informing us of relevant computations of theirs. \section{ The Steinberg module and Steinberg homology}\label{mod} For more information about the Steinberg module than is given here, see the introduction to~\cite{APS} and its references. Let $K$ be a field, $R$ a ring, and $n\ge2$ an integer. Let $K^n$ be the vector space of column vectors. \begin{definition}\label{tits} The Tits building $T(K^n)$ is the simplicial complex with one vertex for each subvector space $V\subset K^n$ with $0\ne V\ne K^n$, where the vertices $V_1,V_2,\dots,V_k$ span a simplex if and only if they can be arranged into a flag. \end{definition} \begin{definition}\label{stein} The Steinberg module $\St(K^n;R)$ is the reduced homology of the Tits building: \[ \St(K^n;R)=\widetilde H_{n-2}(T(K^n),R). \] \end{definition} Since $\GL_n(K)$ acts on the Tits building, it also acts on $\St(K^n;R)$, making the Steinberg module a left-module for the group ring $R\GL_n(K)$. The Steinberg module $\St(K^n;\mathbb{Z})$ is a free $\mathbb{Z}$-module, and $\St(K^n;R)\simeq\St(K^n;\mathbb{Z})\otimes_\mathbb{Z} R$. \begin{definition} Let $\set{v_1,v_2, \dots,v_n}$ be a basis for $K^n$. The modular symbol $[v_1,v_2,\dots,v_n]$ denotes the element in $\St(K^n;R)$ which is the fundamental class of the $(n-2)$-sphere whose vertices are the subspaces of $K^n$ generated by the proper non-empty subsets of $\set{v_1,v_2,\dots,v_n}$. We may and do fix orientations on these spheres in such a way that the action of an element $g\in \GL_n(K)$ on the symbol $[v_1,v_2,\dots,v_n]$ satisfies \[ g[v_1,v_2,\dots,v_n]=[gv_1,gv_2,\dots,gv_n]. \] We extend the notation to all $n$-tuples of vectors by setting $[w_1,w_2,\dots,w_n]=0$ when $w_1,w_2,\dots,w_n$ are linearly dependent vectors in $K^n$. If $m$ is the matrix with columns $a_1,a_2,\dots,a_n$, we write \[ [m]=[a_1,a_2,\dots,a_n], \] so that $g[m]=[gm]$ for any $g\in \GL_n(K)$. \end{definition} We recall some standard facts about modular symbols and the Steinberg module. See \cite{ash-rudolph}. \begin{theorem}\label{thm:mod-props} Let $K$ be any field. \begin{enumerate} \item \label{it:gens} As abelian group, $\St(K^n;\mathbb{Z})$ is generated by $[v_1,v_2,\dots,v_n]$ as $v_1,v_2,\dots,v_n$ range over all elements of $K^n$. \item \label{it:relations} The following relations hold: \begin{enumerate} \item $[v_1,v_2,\dots,v_n]=0$ if $v_1,v_2,\dots,v_n$ do not span $K^n$. \item $[v_1,v_2,\dots,v_n]=[kv_1,v_2,\dots,v_n]$ for any nonzero $k\in K$; \item $[v_1,v_2,\dots,v_n]=(-1)^s[v_{s(1)},v_{s(2)}\dots,v_{s(n)}]$ for any permutation $s \in S_n$; \item $[v_1,v_2,\dots,v_n]=[x,v_2,\dots,v_n]+\cdots+ [v_1,\dots,v_{i-1},x,v_{i+1},\dots, v_n]+\newline\quad \dots+[v_1,v_2,\dots,v_{n-1},x]$ for any nonzero $x\in K^n$. \end{enumerate} \item\label{it:unipotent} $\St(K^n;\mathbb{Z})$ has as a free $\mathbb{Z}$-basis the symbols $[u]$, where $u$ runs over all upper triangular unipotent matrices in $\GL_n(K)$. \end{enumerate} \end{theorem} We call the fourth relation ``passing through $x$''. We need the following theorem in the form stated, in order to apply it to the Voronoi cellulation. It differs only slightly from an equivalent theorem proved by Bykovskii \cite[Theorem~1]{By} and follows immediately from it. See~\cite{CFP} for a different proof of this theorem and also related results for the Steinberg module of vector spaces over fields other than $\mathbb{Q}$. \begin{theorem}\label{by} The Steinberg module $\St(\mathbb{Q}^n;\mathbb{Z})$ is isomorphic to the quotient of the free abelian group generated by symbols $\gen{a_1,a_2,\dots,a_n}$ for all ordered $\mathbb{Z}$-bases $\set{a_1,a_2,\dots,a_n}$ of $\mathbb{Z}^n$ modulo the following relations: \begin{enumerate} \item $\gen{a_1,a_2,\dots,a_n}=(-1)^s\gen{a_{s(1)},a_{s(2)},\dots,a_{s(n)}}$ for any permutation $s \in S_n$; \item $ \gen{-a_1,a_2,\dots,a_n}=\gen{a_1,a_2,\dots,a_n}$, for all $\mathbb{Z}$-bases $\set{a_1,a_2,\dots,a_n}$ of $\mathbb{Z}^n$; \item $\gen{a,b,a_3,\dots,a_n}+\gen{-b,a+b,a_3,\dots,a_n}+\gen{a+b,-a,a_3,\dots,a_n}=0$, for all $\mathbb{Z}$-bases $\set{a,b,a_3,\dots,a_n}$ of $\mathbb{Z}^n$. \end{enumerate} The isomorphism is given by $\gen{a_1,a_2,\dots,a_n} \mapsto [a_1,a_2,\dots,a_n]$. \end{theorem} \begin{definition} If $\Gamma$ is any subgroup of $\GL_n(\mathbb{Q})$, we define the Steinberg homology of $\Gamma$ over $R$ to be $H_*(\Gamma, \St(\mathbb{Q}^n; R))$. \end{definition} The zero-th Steinberg homology group, $H_0(\Gamma, \St(\mathbb{Q}^n; R))$, will be identified with the group of co-invariants $\St(\mathbb{Q}^n; R)_\Gamma$. \begin{definition} If $\Gamma$ is a subgroup of $\GL_n(\mathbb{Q})$, and $m\in M_n(\mathbb{Q})$, we denote the image of $[m]$ in $\St(\mathbb{Q}^n; R)_\Gamma$ by $[m]_\Gamma$. \end{definition} \begin{corollary} The Steinberg homology $H_0(\Gamma, \St(\mathbb{Q}^n;R))$ is isomorphic to the free $R$-module generated by symbols $\gen{a_1,a_2,\dots,a_n}_\Gamma$ for all ordered $\mathbb{Z}$-bases $\set{a_1,a_2,\dots,a_n}$ of $\mathbb{Z}^n$ modulo the following relations: \begin{enumerate} \item $\gen{a_1,a_2,\dots,a_n}_\Gamma=(-1)^s\gen{a_{s(1)},a_{s(2)},\dots,a_{s(n)}}_\Gamma$ for any permutation $s \in S_n$; \item $ \gen{-a_1,a_2,\dots,a_n}_\Gamma=\gen{a_1,a_2,\dots,a_n}_\Gamma$, for all $\mathbb{Z}$-bases $\set{a_1,a_2,\dots,a_n}$ of $\mathbb{Z}^n$; \item $\gen{a,b,a_3,\dots,a_n}_\Gamma+\gen{-b,a+b,a_3,\dots,a_n}_\Gamma+\gen{a+b,-a,a_3,\dots,a_n}_\Gamma=0$, for all $\mathbb{Z}$-bases $\set{a,b,a_3,\dots,a_n}$ of $\mathbb{Z}^n$; \item $ \gen{a_1,a_2,\dots,a_n}_\Gamma=\gen{\gamma a_1,\gamma a_2,\dots, \gamma a_n}_\Gamma$, for all $\mathbb{Z}$-bases $\set{a_1,a_2,\dots,a_n}$ of $\mathbb{Z}^n$ and all $\gamma\in\Gamma$. \end{enumerate} \end{corollary} One reason for the importance of the Steinberg module is the Borel-Serre duality theorem~\cite[Theorem~11.4.2.]{BS}. We quote the case of it needed for this paper: \begin{theorem}\label{BS} Let $\Gamma$ be a subgroup of finite index in $\SL_n(\mathbb{Z})$, and let $k$ be a field whose characteristic is prime to the order of all torsion elements of $\Gamma$. Then for any $i$, there is an isomorphism \[ \lambda \colon H_i(\Gamma, \St(\mathbb{Q}^n;k)) \to H^{\nu-i}(\Gamma, k), \] where $\nu=n(n-1)/2$. \end{theorem} \section{The connecting homomorphism} \label{conn} Let $G$ be a subgroup of $\GL_n(\mathbb{Q})$, and let $s\in G$. Let $M$ be a left $\mathbb{Q} G$-module, and let $m\in M^S$. Let $F_\bullet\to\mathbb{Q}$ be the standard resolution of the group $G$ by free $\mathbb{Q} G$-modules. So $F_i$ is the $\mathbb{Q}$-vector space with basis $(g_0,g_1,\dots,g_i)\in G^{i+1}$, and the action is given by \[g(g_0,g_1,\dots,g_i)=(gg_0,gg_1,\dots,gg_i).\] We also use the ``bar'' notation, $[h_1|h_2|\cdots|h_i]=(1,h_1,h_1h_2,\dots,h_1h_2\cdots h_i)$. In this paper, we only need to deal with $1$-cycles and their boundaries. The cycles we require are of the form $z=(1,s)\otimes_G m=[s]\otimes_G m$. The boundary of $z$ is $((s)-(1))\otimes_G m = (1)\otimes_G (s^{-1}-1)m$, which vanishes since $sm=m$. Let $\partial$ denote the connecting homomorphism in the long exact sequence of homology derived from the short exact sequence of left $\mathbb{Q} G$-modules \[ 0\to D\to N\to M \to 0. \] We compute $\partial(z)$ as follows: Lift $m$ to $n\in N$ so that $s^{-1} n=n+d$, for some $d\in D$. Lift the cycle to the chain $[s]\otimes_G n$. The boundary of this chain is a cycle with coefficients in $D$, and its image in $H_0(G,D)$ is equal to $\partial(z)$. So \[\partial(z)= (1)\otimes_G (s^{-1}-1)n = (1)\otimes_G d\in D_G.\] \section{Unital matrices}\label{stuff} The algorithm to be developed in Section~\ref{2} requires us to begin with a supply of ``unital'' matrices. We define them in this section. If $v$ is a vector in affine space, $\hat v$ denotes the image of $v$ in the corresponding projective space. First, let $n$ to be any positive integer, and let $E/\mathbb{Q}$ be a finite extension. Let $b\in E^n\setminus\mathbb{Q}^n$, and assume that the entries of $b$ span $E$ over $\mathbb{Q}$. Suppose $g\in\GL_n(\mathbb{Q})$ stabilizes $\hat b$ in $\mathbb{P}(E^n)$. Then there exists $x(g)\in E^\times$ such that \[ g b = x(g) b. \] The map $g\mapsto x(g)$ is a homomorphism from the stabilizer of $\hat b$ in $\GL_n(\mathbb{Q})$ to $E^\times$. The minimal polynomial of $x(g)$ over $\mathbb{Q}$ divides the characteristic polynomial of $g$. Therefore, if $g$ is also in $\GL_n(\mathbb{Z})$, then $x(g)$ must be a unit in ${\mathcal O}_E^\times.$ \begin{definition}\label{unital} Let $n=2$. Let $\beta\in E\setminus\mathbb{Q}$. We say that $\gamma\in\GL_2(\mathbb{Z})$ is \emph{$\beta$-unital} if $\gamma(\beta:1)=(\beta:1)$. If $\gamma$ is $\beta$-unital for some $\beta\in E\setminus\mathbb{Q}$, we say $\gamma$ is \emph{unital}. \end{definition} If $n=2$ and $E$ is a real quadratic field, then $\gamma$ is $\beta$-unital if and only if there exists an integer $k$ such that \[ \gamma\begin{bmatrix} \beta\\ 1 \end{bmatrix} =\pm\epsilon^k\begin{bmatrix} \beta\\ 1 \end{bmatrix}, \] where $\epsilon$ is the fundamental unit of $E$. \section{\texorpdfstring{$H(\Gamma,E)$}{H(Gamma,E)}}\label{2} Let $E$ be a real quadratic field, and let $e$ be the column vector $\trans{(1,0,0)}$. For any $n\ge2$, from~\cite{A} we have a filtration of $\St(E^n)$ that is stable under the natural action of $\GL_n(\mathbb{Q})$: \[ 0\subset \St= \mathbb{F}F_0 \subseteq \mathbb{F}F_1 \subseteq \cdots \subseteq \mathbb{F}F_n= \St(E^n). \] \begin{definition} $\mathbb{F}F_m$ is the $\mathbb{Q}$-span of all modular symbols $[a_1,\dots,a_{n-m},b_1,\dots,b_m]\in \St(E^n)$ where $a_i\in \mathbb{Q}^n$ for all $i$ and $b_j\in E^n$ for all $j$. \end{definition} Now set $n=3$. From \cite[Section~6]{A}, we know the following: \begin{itemize} \item It turns out that $\mathbb{F}F_1=\mathbb{F}F_2=\mathbb{F}F_3$, so that the filtration has only one real step: $\St=\mathbb{F}F_0\subset\mathbb{F}F_1=\St(E^3)$. We denote the image of a modular symbol $[x,y,z]\in\mathbb{F}F_1$ in $\mathbb{F}F_1/\mathbb{F}F_0$ by $[x,y,z]'$. \item The quotient $C=\mathbb{F}F_1/\mathbb{F}F_0$ is isomorphic to a direct sum of induced modules $\mathbb{I}(a,b)$, for $b\in{\mathcal O}mega$ and $a\in A(b)$, where: we choose a set of representatives ${\mathcal O}mega'$ of the $\SL_3(\mathbb{Z})$-orbits of $\mathbb{P}^2(E)\setminus\mathbb{P}^2(\mathbb{Q})$; for each $b'\in{\mathcal O}mega'$ we choose a nonzero $b\in E^3$ such that $\hat b=b'$ and let ${\mathcal O}mega$ be the set of these $b$'s; for each $b$, $\SL_3(\mathbb{Z})_b$ denotes the stabilizer in $\SL_3(\mathbb{Z})$ of $b'$; let $A(b)'$ be a set of representatives of the $\SL_3(\mathbb{Z})_b$-orbits of $\mathbb{P}^2(\mathbb{Q})$; for each $a'\in A(b)'$ we choose a nonzero $a\in\mathbb{Q}^3$ such that $\hat a = a'$ and let $A(b)$ be the set of these $a$'s; $\SL_3(\mathbb{Z})_{a,b}$ denotes the stabilizer in $\SL_3(\mathbb{Z})_b$ of $a'$; and \[ \mathbb{I}(a,b)=\mathbb{I}nd(\SL_3(\mathbb{Z})_{a,b},\SL_3(\mathbb{Z}),\mathbb{Q}_{a,b}), \] where $\mathbb{Q}_{a,b}$ is the 1-dimensional $\mathbb{Q}$-vector space spanned by $[e,a,b]$. \item In the isomorphism of $\SL_3(\mathbb{Z})$-modules \[ C\simeq\bigoplus_{b\in{\mathcal O}mega} \bigoplus_{a\in A(b)} \mathbb{I}(a,b) \] we may assume that each $b$ has the form $b=\trans{(\beta, 1, 0)}$ for some $\beta\in E\setminus\mathbb{Q}$, and each $a$ has the form $a=\trans{(a_1,a_2,1)}$ for some $a_1, a_2\in\mathbb{Q}$. \end{itemize} To determine $C$ as a $\Gamma$-module, restrict each $\mathbb{I}(a,b)$ to $\Gamma$ and use the double coset formula for the restriction of an induced module (e.g., \cite[page 69]{B}). We obtain the following theorem: \begin{theorem} As a $\Gamma$-module, \[ C \simeq \bigoplus_{b\in{\mathcal O}mega} \bigoplus_{a\in A(b)} \bigoplus_{d\in E(a,b)} \mathbb{I}(a,b,d), \] where $E(a,b)$ is a set of representatives of the double cosets $\Gamma\backslash \SL_3(\mathbb{Z})/ \SL_3(\mathbb{Z})_{a,b}$ and \[ \mathbb{I}(a,b,d)=\mathbb{I}nd(\Gamma\cap d\SL_3(\mathbb{Z})_{a,b}d^{-1},\Gamma,d\mathbb{Q}_{a,b}). \] Here, $d\mathbb{Q}_{a,b}$ denotes the module $\mathbb{Q}_{a,b}$ where $dgd^{-1}\in d\SL_3(\mathbb{Z})_{a,b}d^{-1}$ acts via the formula $dgd^{-1}(r)=gr$. \end{theorem} Trace back $d\mathbb{Q}_{a,b}$ via the isomorphism $ C \simeq \bigoplus_{b\in{\mathcal O}mega} \bigoplus_{a\in A(b)} \bigoplus_{d\in E(a,b)} \mathbb{I}(a,b,d) $ in order to view it as a subspace of $C$. When we do that, we find that it becomes the $1$-dimensional $\mathbb{Q}$-vector space spanned by $m=d[e,b,a]'$, and the action of $dgd^{-1}\in d\SL_3(\mathbb{Z})_{a,b}d^{-1}$ is given by $dgd^{-1} \cdot m=dgm$ (which is the way $dgd^{-1}$ naturally acts on $d[e,b,a]'$.) This proves: \begin{corollary}\label{c} As a $\Gamma$-module, \[ C= \bigoplus_{b\in{\mathcal O}mega} \bigoplus_{a\in A(b)} \bigoplus_{d\in E(a,b)} \mathbb{I}'(a,b,d), \] where $E(a,b)$ is a set of representatives of the double cosets $\Gamma\backslash \SL_3(\mathbb{Z})/ \SL_3(\mathbb{Z})_{a,b}$ and \[ \mathbb{I}'(a,b,d)=\mathbb{I}nd(\Gamma\cap d\SL_3(\mathbb{Z})_{a,b}d^{-1},\Gamma,\mathbb{Q} d[e,b,a]'). \] \end{corollary} We now compute $H_1(\Gamma, C)$. \begin{theorem}\label{ab} The group $\Gamma\cap d\SL_3(\mathbb{Z})_{a,b}d^{-1}$ is infinite cyclic modulo a subgroup of order $2$. \end{theorem} \begin{proof} We first work out $\SL_3(\mathbb{Z})_{a,b}$. Since $b=\trans{(\beta,1,0)}$ and $\beta,1$ are linearly independent over $\mathbb{Q}$, an easy computation shows that \[ \SL_3(\mathbb{Z})_{b} = \{M(h,u)\} \] where in $(2,1)$ block form \[ M(h,u)=\begin{bmatrix} h&u\\ 0&\epsilon \end{bmatrix}\in\SL_3(\mathbb{Z}), \] $u\in\mathbb{Z}^2$, $\epsilon=\pm1$ is chosen to make the determinant equal to $1$, and \[ h\begin{bmatrix}\beta\\1\end{bmatrix} = \eta\begin{bmatrix}\beta\\1\end{bmatrix} \] for some $\eta\in E^\times$. Because $h\in\SL_2(\mathbb{Z})$, $\eta\in {\mathcal O}_E^\times$ is a unit. In other words, $h$ is ``$\beta$-unital''. Note that $h\mapsto\eta$ is one-to-one, so that the $h$'s form a cyclic group modulo $\pm I_2$. Now suppose that $M(h,u)$ also stabilizes the $\mathbb{Q}$-line through $a$. Then $a$ must be an eigenvector of $M(h,u)$ with eigenvalue $\epsilon$. This happens if and only if \[ h\begin{bmatrix}a_1\\a_2\end{bmatrix} + u = \epsilon\begin{bmatrix}a_1\\a_2\end{bmatrix}. \] Therefore $h$, $a$, and $\epsilon$ determine $u$ uniquely. It follows that $\SL_3(\mathbb{Z})_{a,b}$ is infinite cyclic modulo a subgroup of order $2$. To finish the proof, note that $d^{-1} \Gamma d\cap \SL_3(\mathbb{Z})_{a,b}$ has finite index in $\SL_3(\mathbb{Z})_{a,b}$ because $d^{-1} \Gamma d$ has finite index in $\SL_3(\mathbb{Z})$. \end{proof} By Shapiro's lemma, $H_1(\Gamma, \mathbb{I}'(a,b,d))$ is isomorphic to $H_1( \Gamma\cap d\SL_3(\mathbb{Z})_{a,b}d^{-1}, \mathbb{Q} d[e,b,a]'))$. By Theorem~\ref{ab}, if $\gamma$ is any non-torsion element of the group $\Gamma\cap d\SL_3(\mathbb{Z})_{a,b}d^{-1}$, then $H_1( \Gamma\cap d\SL_3(\mathbb{Z})_{a,b}d^{-1}, \mathbb{Q}_{a,b})$ is the one-dimensional $\mathbb{Q}$-vector space generated by \[[\gamma]\otimes_{\Gamma\cap d\SL_3(\mathbb{Z})_{a,b}d^{-1}} d[e,b,a]'.\] \begin{definition}\label{hge} $H(\Gamma,E)$ is the image of the connecting homomorphism \[\psi\colon H_1(\Gamma,C)\to H_0(\Gamma,\St).\] \end{definition} \begin{theorem} Let $\Gamma$ be a subgroup of finite index in $\SL_3(\mathbb{Z})$, and let $E$ be a real quadratic field. Then $H(\Gamma,E)$ is the $\mathbb{Q}$-span of all $[f,\gamma f, da]_\Gamma$, where $f,a\in\mathbb{Z}^3$, $d\in\SL_3(\mathbb{Z})$, and $\gamma\in \Gamma$ arise from all possible choices in the following steps: \begin{description} \item[Step 1] Choose a unital $h$, choose $u\in\mathbb{Z}^2$ and set $\epsilon=\det(h)$. \item[Step 2] Form $M(h,u)$, and find an eigenbasis for it of the form $\set{b,b',a}$, where $b'$ is the Galois conjugate of $b$. \item[Step 3] Choose $d\in E(a,b)$. \item[Step 4] Find the smallest positive power of $dM(h,u)d^{-1}$ which lies in $\Gamma$. Call this power $\gamma$. \item[Step 5] Set $f=de$, where $e$ is the first standard basis vector of $\mathbb{Q}^3$. \end{description} \end{theorem} \begin{proof} The notation in the first three steps has already been explained. For Step 2, note that after we have chosen $h$ and $u$ and formed $M(h,u)$, we see that $M(h,u)$ has an eigenvector of the form $b=\trans(\beta,1,0)$, for some $\beta \in E\setminus \mathbb{Q}$. Since $M(h,u)$ is rational, $b'$ is another eigenvector. Since $\epsilon = \pm 1$ is another eigenvalue of $M(h,u)$, it has third eigenvector which is rational and which we take to be $a$. In Step 4, note that some power of $dM(h,u)d^{-1}$ lies in $\Gamma$ because in fact any element of $\SL_3(\mathbb{Z})$ has some positive power in the finite index subgroup $\Gamma$. To prove the theorem, take all possible elements of $H_1(\Gamma,C)$, and apply the connecting homomorphism $\psi\colon H_1(\Gamma,C)\to H_0(\Gamma,\St)$. Applying the map induced by inclusion of groups \[ H_1(\Gamma\cap d\SL_3(\mathbb{Z})_{a,b}d^{-1},\mathbb{Q}[de,db,da]')\to H_1(\Gamma,\mathbb{Q}[de,db,da]'), \] the class of $[\gamma]\otimes_{\Gamma\cap d\SL_3(\mathbb{Z})_{a,b}d^{-1}} [de,db,da]'$ maps to the class of $[\gamma]\otimes_\Gamma [de,db,da]'$. From Corollary~\ref{c} and the paragraph before the statement of the theorem, it follows that the cycles $[\gamma]\otimes_\Gamma [de,db,da]'$, as $d,a,b,\gamma$ run over all ways of choosing them in Steps 1 through 4, span $H_1(\Gamma,C)$ over $\mathbb{Q}$. Therefore their images under $\psi$ will generate $H(\Gamma,E)$ over $\mathbb{Q}$. To compute $\psi([\gamma]\otimes_\Gamma [de,da,db]')$ lift this cycle to the chain $[\gamma]\otimes_\Gamma [de,da,db]$, recalling that $[de,da,db]$ is a modular symbol in $\St(E^3)$. Note that $\gamma^{-1} da$ is a multiple of $da$, and $\gamma^{-1} db$ is a multiple of $db$. Therefore \[[\gamma^{-1} de,\gamma^{-1} da,\gamma^{-1} db]= [\gamma^{-1} de,da,db].\] Take the boundary of this chain to obtain \[ (\gamma^{-1}-1)[de,da,db]=[\gamma^{-1} de, da, db] - [de, da, db]= - [\gamma^{-1} de,de,da]. \] The last equality follows by passing $de$ through $[\gamma^{-1} de, da, db]$ and noting that $[\gamma^{-1} de,de,db]=0$ because $\gamma^{-1} de,de,db$ are not linearly independent over $E$.\footnote{They span a plane in $E^3$ for the following reason. Note that $\gamma= dM(h,u)^kd^{-1}$ for some $k>0$. Therefore $\gamma da=\lambda da$, $\gamma db=\mu db$, and $\gamma de = d\theta e$ for some $\lambda,\mu \in E$ where we have set $\theta=M(h,u)^k$. Then $e$,$\theta e$, and $b$ span a plane because $b=\trans{(*,*,0)}$. Therefore the columns of $\gamma[\gamma^{-1} de,de,db]= [de, d\theta e, \mu db] = d[e, \theta e, \mu b]$ also span a plane.} Projecting to the coinvariants, we find that \[-\psi([\gamma]\otimes_\Gamma [de,da,db]') = [\gamma^{-1} de,de,da]_\Gamma= [de,\gamma de,da]_\Gamma,\] since $\gamma da$ is a multiple of $da$. If we set $f=de$, we can write the final answer as $[f,\gamma f, da]_\Gamma$. \end{proof} \begin{remark} Actually, we need not take $\gamma$ to be the smallest positive power that lies in $\Gamma$. Any positive power which does so will give a result which is a nonzero rational multiple of what is obtained with the smallest power because of the following lemma. \end{remark} \begin{lemma} For any $m>0$, $[f,\gamma^m f, da]_\Gamma=m [f,\gamma f, da]_\Gamma$. \end{lemma} \begin{proof} Note that for some $k>0$, $[f,\gamma f, da]_\Gamma= [\gamma f,\gamma^2 f, \gamma da]_\Gamma= [\gamma f,\gamma^2 f, dM(h,u)^kd^{-1} da]_\Gamma$. This in turn equals $[\gamma f,\gamma^2 f, da]_\Gamma$ because $a$ is an eigenvector for $M(h,u)$ with eigenvalue $\epsilon=\pm1$, and replacing a column in a modular symbol by a multiple of itself does not change the value of the symbol. Then by passing $f$ through $[\gamma f,\gamma^2 f, da]_\Gamma$, we find it is equal to $[f,\gamma^2 f,da]_\Gamma+[\gamma f, f, da]_\Gamma+ [\gamma f,\gamma^2 f,f]_\Gamma$. The last term is 0 because $f,\gamma f$ and $\gamma^2f$ all lie in a plane. To see this, let $H$ be the plane in $\mathbb{Q}^3$ consisting of vectors whose third coordinate is 0. Then $M(h,u)$ stabilizes $H$. So $\gamma=dM(h,u)^kd^{-1}$ stabilizes $dH$. But $f\in dH$. So $f, \gamma f$ and $\gamma^2 f$ are all in $dH$. We conclude that $[f,\gamma^2 f, da]_\Gamma=2[f,\gamma f, da]_\Gamma$. Similarly, one proves that \begin{align*} [f,\gamma^3 f, da]_\Gamma&= [\gamma^2 f,\gamma^3 f, da]_\Gamma+[f,\gamma^2 f, da]_\Gamma\\ &= [f,\gamma f, da]_\Gamma+2[f,\gamma f, da]_\Gamma\\ &=3[f,\gamma f, da]_\Gamma,\end{align*} and so on. \end{proof} We can simplify the algorithm by avoiding the need to compute the set of double coset representatives $E(a,b)$, as follows: In Step 3, $d$ is a ``random'' double coset representative, so a priori it can be any element of $\SL_3(\mathbb{Z})$. So replace Step 3 with \begin{description} \item[Step $3'$] Choose $d\in\SL_3(\mathbb{Z})$. \end{description} But now we can limit the choice of $d$ as follows. If we multiply $d$ on the left by an element $x$ of $\Gamma$ then the new $d$ is $xd$, the new $f$ is $xf$, the new $\gamma$ is $x\gamma x^{-1}$, and we get the output $[xf,x\gamma x^{-1} (xf), xda]_\Gamma = [f,\gamma f, da]_\Gamma$. So the output doesn't change if we replace $d$ by $xd$. On the other hand, let $P_3$ be the subgroup of $\SL_3(\mathbb{Z})$ that stabilizes the plane ${}^t(*,*,0)$. Let $p\in P_3$ and multiply $d$ on the right by $p=\begin{bmatrix} A&V\\ 0&E \end{bmatrix}$. Let \[ pM(h,u)p^{-1}=\begin{bmatrix} A&V\\ 0&E \end{bmatrix} \begin{bmatrix} h&u\\ 0&\epsilon \end{bmatrix} \begin{bmatrix} A&V\\ 0&E \end{bmatrix}^{-1}= \begin{bmatrix} h'&u'\\ 0&\epsilon \end{bmatrix}. \] Now $h'=AhA^{-1}$ is again unital, and $u'$ is still in $\mathbb{Z}^2$. Let $U$ denote the set of unital matrices. As $(h,u)$ ranges over $U\times \mathbb{Z}^2$, so does $(h',u')$ range over $U\times \mathbb{Z}^2$. We claim that the output of the algorithm for $(dp,h,u)$ is equal to the output for $(d,h',u')$. Assuming the claim, it follows that without loss of generality we can change Step $3'$ to \begin{description} \item[Step $3''$] Let $d$ range over a (finite) set of representatives of the double cosets $\Gamma\backslash \SL_3(\mathbb{Z})/P_3$. \end{description} This is the form of Step 3 that we use in our computations. To check the claim: The output for $(dp,h,u)$ is \[\mu_1=[dpe,\gamma dpe,dpa]_\Gamma,\] where $\gamma$ is the smallest power of $dpM(h,u)p^{-1} d^{-1}$ which lies in $\Gamma$, and $a$ is the rational eigenvector of $M(h,u)$. The output for $(d,h',u')$ is \[\mu_2=[de,\gamma' de,da']_\Gamma,\] where $\gamma'$ is the smallest power of $dM(h',u')d^{-1}$ which lies in $\Gamma$, and $a'$ is the rational eigenvector of $M(h',u')$. The claim follows if we can show that $\mu_1=\mu_2$. Now $pM(h,u)p^{-1}=M(h',u')$ so that $a'=pa$ and $\gamma'=\gamma$. Therefore \[\mu_1=[dpe,\gamma dpe,dpa]_\Gamma \quad \text{and}\quad \mu_2 = [de,\gamma de,dpa]_\Gamma. \] The equality of $\mu_1$ and $\mu_2$ follows from the following lemma applied to $V=dH$, $x=dpe$, $y=de$, and $z=dpa$, where $H$ be the plane spanned by the first two standard basic vectors of $\mathbb{Q}^3$. (Note that $\gamma$ stabilizes $V$ because $M(h',u')$ stabilizes $H$, and $\gamma(dpa)=\pm(dpa)$ because $a$ is an eigenvector for $M(h,u)$ with eigenvalues $\epsilon=\pm1$.) \begin{lemma} Let $x$, $y$, and $z$ be nonzero vectors in $\mathbb{Q}^3$. Let $V \subset \mathbb{Q}^3$ be a plane with $x, y \in V$ and $z \not \in V$. Let $\gamma \in \Gamma$ such that $\gamma V=V$ and $\gamma z = \pm z$. Then \[ [x,\gamma x, z]_\Gamma=[y,\gamma y, z]_\Gamma. \] \end{lemma} \begin{proof} Passing $y$ through $[x,\gamma x, z]_\Gamma$, we have $$ [x,\gamma x, z]_\Gamma = [y,\gamma x, z]_\Gamma+ [x,y, z]_\Gamma+ [x,\gamma x, y]_\Gamma. $$ Since $x,\gamma x$ and $y$ all lie in the plane $V$, the last symbol on the right is 0. Passing $\gamma y$ through $[y,\gamma x, z]_\Gamma$, we have $$ [y,\gamma x, z]_\Gamma = [\gamma y,\gamma x, z]_\Gamma+ [y,\gamma y, z]_\Gamma+ [y,\gamma x, \gamma y]_\Gamma. $$ Since $y$, $\gamma x$, and $\gamma y$ all lie in the plane $V$, the last symbol on the right is $0$. Putting this together we obtain \[ [x,\gamma x, z]_\Gamma = [\gamma y,\gamma x, z]_\Gamma+ [y,\gamma y, z]_\Gamma+ [x,y, z]_\Gamma. \] However, since $\gamma\in\Gamma$ and $\gamma z=\pm z$, we have \[[\gamma y,\gamma x, z]_\Gamma =[\gamma y,\gamma x,\gamma z]_\Gamma=[y, x,z]_\Gamma=-[x,y,z]_\Gamma,\] and the desired result follows. \end{proof} In summary, we use the following algorithm to find $H(\Gamma,E)$: \begin{algorithm}\label{algo} Let $\Gamma$ be a subgroup of finite index in $\SL_3(\mathbb{Z})$ and $E$ a real quadratic field. Let $P_3$ be the subgroup of $\SL_3(\mathbb{Z})$ that stabilizes the plane ${}^t(*,*,0)$. Then $H(\Gamma,E)$ is the $\mathbb{Q}$-span of all $[f,\gamma f, da]_\Gamma$ where $f,a\in\mathbb{Z}^3$, $d\in\SL_3(\mathbb{Z})$, and $\gamma\in \Gamma$ are found by the following algorithm: \begin{description} \item[Step 1] Choose a unital $h$, choose $u\in\mathbb{Z}^2$, and set $\epsilon=\det(h)$. \item[Step 2] Form $M(h,u)$ and find an eigenbasis for it of the form $\set{b,b',a}$, where $b'$ is the Galois conjugate of $b$. \item[Step $3''$] Choose $d$ in a set of representatives of the double cosets $\Gamma\backslash \SL_3(\mathbb{Z})/P_3$. \item[Step 4] Find the smallest positive power of $dM(h,u)d^{-1}$ which lies in $\Gamma$. Call this power $\gamma$. \item[Step 5] Set $f=de$, where $e$ is the first standard basis vector of $\mathbb{Q}^3$. \item[Step 6] Repeat Steps 1--5, going through all possible choices. \end{description} \end{algorithm} Of course, in practice we cannot go through an infinite number of choices. We explain how we deal with this in Section~\ref{comp}. \section{Maximal cusps and their stabilizers}\label{cusps} Let $n\ge2$, and fix a level $N$. Let $\Gamma=\Gamma_0(N,n)$, and \[\Gamma(N)=\set{g\in\SL_n(\mathbb{Z}) \given g \equiv I \pmod N}.\] \begin{lemma}\label{lemma1} Let $u,v\in\mathbb{Z}^n$ be primitive column vectors such that $u\equiv v \pmod N$. Then there exists $g\in\Gamma(N)$ such that $gu=v$. \end{lemma} \begin{proof} First, assume that $u=e_1=\trans{(1,0,\dots,0)}$. Let $h\in\SL_n(\mathbb{Z})$ have first column equal to $v$. So $hu=v$. Let an overline denote reduction modulo $N$. Then $\overline h^{-1} \in\SL_n(\mathbb{Z}/N\mathbb{Z})$ in $(1,n-1)$ block diagonal form looks like \[ \begin{bmatrix} 1&x\\ 0&y \end{bmatrix}. \] Since $y\in\SL_{n-1}(\mathbb{Z}/N\mathbb{Z})$, there is a matrix $Y\in \SL_{n-1}(\mathbb{Z})$ such that $\overline Y=y$. Choose $X\in\mathbb{Z}^{n-1}$ such that $\overline X=x$, and set \[ k=\begin{bmatrix} 1&X\\ 0&Y \end{bmatrix}. \] Then $\overline k = \overline h^{-1}$ and $k e_1=e_1$. Hence $hku=v$ and $\overline{hk}=\overline I$, which implies that $hk\in\Gamma(N)$. Now let $u$ be general, and choose $A\in\SL_n(\mathbb{Z})$ such that $Au = e_1$. Now $Au\equiv Av \pmod N$, so applying what we have already proved to $Au$ and $Av$, we obtain $g'\in\Gamma(N)$ such that $g'Au=Av$. Take $g=A^{-1} g' A$. \end{proof} Next, we describe the $\Gamma$-orbits of maximal parabolic subgroups of $\GL_3(\mathbb{Q})$. We have to correct \cite[Theorem~6]{ash-direct-sum}. (Although that theorem is wrong in general, it is correct for square-free $N$, so the rest of that paper, which applies only to square-free $N$, remains correct.) First, consider the case of parabolic subgroups that are stabilizers of lines. Each is determined by the line it stabilizes, so it is equivalent to find the $\Gamma$-orbits of primitive column vectors $v\in\mathbb{Z}^3$ modulo $\pm1$. \begin{lemma}\label{lemma2} The $\Gamma$-orbits of lines in $\mathbb{Q}^3$ are in 1-1 correspondence with the set of positive divisors of $N$. If $d$ is such a divisor, the corresponding line is generated by $\trans{(1,d,0)}$. \end{lemma} \begin{proof} Given a line in $\mathbb{Q}^3$, let it be generated by the primitive vector $v=\trans{(x,y,z)}$. A general element of $\Gamma$ looks like \[ \gamma= \begin{bmatrix} a&b&c\\ D&e&f\\ g&h&i \end{bmatrix}\in \SL_3(\mathbb{Z}) \] with $D\equiv g\equiv 0 \pmod N$. If $(y,z)=(0,0)$, we may multiply $v$ by some $\gamma$ with $D\ne0$, so without loss of generality $(y,z)\ne(0,0)$. Then multiplying $v$ by a suitable $\gamma$ with $b=c=D=g=0$, we may replace $(y,z)$ with $(d,0)$ where $d=\gcd(y,z)$. So now $v=\trans{(x,d,0)}$ and $x,d$ are relatively prime. Let $s$ be an integer such that $x+sd$ is prime, which exists by Dirichlet's theorem. (If $x=0$ then $d=\pm1$ so we can take $s$ to be any prime.) Multiplying $v$ by \[ \begin{bmatrix} 1&s&0\\ 0&1&0\\ 0&0&1 \end{bmatrix} \] allows us to assume that $x$ is prime to $N$. Let $\gamma\in\SL_3(\mathbb{Z})$ be a matrix that reduces modulo $N$ to \[ \begin{bmatrix} \overline x^{-1}&0&0\\ 0&1&0\\ 0&0&\overline x \end{bmatrix}. \] Then $\gamma v \equiv \trans{(1,d,0)} \pmod N$. By Lemma~\ref{lemma1} there exists $g\in\Gamma(N)$ such that $g\gamma v = \trans{(1,d,0)}$, and $g\gamma\in\Gamma$. If $\gamma\in\Gamma$ and $\gamma \ \trans{(1,d,0)}=\trans{(1,d',0)}$ for some $\gamma\in\Gamma$, then the ideal generated by $d$ in $\mathbb{Z}/N\mathbb{Z}$ equals the ideal generated by $d'$ in $\mathbb{Z}/N\mathbb{Z}$. If $d$ and $d'$ are both positive divisors of $N$, it follows that $d=d'$. \end{proof} Next, consider the case of parabolic subgroups that are stabilizers of planes. Let $\tau$ denote the automorphism of $\GL_3$ given by $A\mapsto \trans{A}^{-1}$. Then $\tau$ takes these parabolic subgroups to those which are stabilizers of lines. Let $\Gamma'=\tau(\Gamma)$. Then the $\Gamma$-orbits of stabilizers of planes are in 1-1 correspondence with the $\Gamma'$-orbits of stabilizers of lines, which are given in the next lemma. \begin{lemma}\label{lemma3} The $\Gamma'$-orbits of lines in $\mathbb{Q}^3$ are in 1-1 correspondence with the set of positive divisors of $N$. If $d$ is such a divisor, the corresponding line is generated by $\trans{(d,1,0)}$. \end{lemma} \begin{proof} Given a line in $\mathbb{Q}^3$, let it be generated by the primitive vector $v=\trans{(x,y,z)}$. A general element of $\Gamma'$ looks like \[ \gamma= \begin{bmatrix} a&b&c\\ D&e&f\\ g&h&i \end{bmatrix}\in M_3(\mathbb{Z}) \] with $b\equiv c \equiv 0 \pmod N$ and of determinant $1$. If $(y,z)=(0,0)$, we may multiply $v$ by some $\gamma$ with $D\ne0$, so without loss of generality $(y,z)\ne(0,0)$. Then multiplying $v$ by a suitable $\gamma$ with $b=c=D=g=0$, we may assume $z=0$. So now $v=\trans{(x,y,0)}$ and $x,y$ generate the unit ideal in $\mathbb{Z}$. Let $s\in\mathbb{Z}$ such that $y+sx$ is prime. Multiplying $v$ by $$ \begin{bmatrix} 1&0&0\\ s&1&0\\ 0&0&1 \end{bmatrix} $$ allows us to assume that $y$ is prime to $N$. If $x=0$, multiplying $v$ by $$ \begin{bmatrix} 1&N&0\\ 0&1&0\\ 0&0&1 \end{bmatrix} $$ allows us to assume instead that $x\ne0$. Write $x=wd$ where $d$ is a positive divisor of $N$ and $w$ is prime to $N$. Now let $\gamma\in\SL_3(\mathbb{Z})$ be a matrix that reduces modulo $N$ to \[ \begin{bmatrix} \overline w^{-1}&0&0\\ 0&\overline y^{-1}&0\\ 0&0&\overline{wy} \end{bmatrix}. \] Then $\gamma v \equiv \trans{(d,1,0)} \pmod N$. By Lemma~\ref{lemma1} there exists $g\in\Gamma(N)$ such that $g\gamma v = \trans{(d,1,0)}$, and $g\gamma\in\Gamma'$. If $\gamma\in\Gamma'$ and $\gamma \ \trans{(d,1,0)}=\trans{(d',1,0)}$, then the ideal generated by $d$ in $\mathbb{Z}/N\mathbb{Z}$ is the same as the ideal generated by $d'$. If $d$ and $d'$ are both positive divisors of $N$, it follows that $d=d'$. \end{proof} Next, we have to determine $\Gamma\cap P$ where $P$ is a maximal parabolic subgroup. Consider the exact sequence \[ 1\to U\to P\to P/U\to 1, \] where $U$ is the unipotent radical of $P$ and $P/U$ is isomorphic to any Levi-component $L$ of $P$. Let $\Gamma_P=\Gamma\cap P$, $\Gamma_U=\Gamma\cap U$ and $\Gamma_L=\Gamma_P/\Gamma_U$. (Note that it is not necessarily true that $\Gamma_L$ is isomorphic to $\Gamma\cap L$.) To compute the homology or cohomology of $\Gamma_P$, we need to identify $\Gamma_U$ and $\Gamma_L$. First, we deal with stabilizers of lines. Let $P_0$ denote the stabilizer of the line through $e_1$. Let $U_0$ be its unipotent radical, and choose a Levi-component $L_0$ as follows: \[ P_0= \begin{bmatrix} *&*&*\\ 0&*&*\\ 0&*&* \end{bmatrix}, \quad U_0= \begin{bmatrix} 1&*&*\\ 0&1&0\\ 0&0&1 \end{bmatrix}, \quad L_0= \begin{bmatrix} *&0&0\\ 0&*&*\\ 0&*&* \end{bmatrix}. \] Let $\pi\colon P_0\to L_0$ be the obvious projection map, and use it to identify $P_0/U_0$ with $L_0$. We have the exact sequence \[ 1\to U_0\to P_0 \xrightarrow{\pi} P_0/U_0\to 1. \] \begin{definition}\label{Gamma*} Let $M$ and $\Delta$ be positive integers such that $\Delta \mid M$, and set \[\Gamma_1(M,\Delta)^*=\set*{g=\begin{bmatrix}a&b\\c&D\end{bmatrix}\in\GL_2(\mathbb{Z}) \given \text{$c\equiv0 \pmod M$, and $a \equiv \det(g) \pmod \Delta$}}.\] \end{definition} \begin{lemma}\label{lines} Let $d$ be a positive divisor of $N$, and let $\Delta=\gcd(d,N/d)$. Let $P_d$ be the stabilizer of the line through $\trans{(1,d,0)}$. Set \[ g_d= \begin{bmatrix} 1&0&0\\ d&1&0\\ 0&0&1 \end{bmatrix}, \] and define $\pi_d$ by \[ \pi_d(\gamma)=\pi(g_d^{-1}\gamma g_d). \] Then we have an exact sequence \[ 1\to \Gamma_{U_d}\to \Gamma_{P_d} \xrightarrow{\pi_d} \Gamma_{L_d}\to 1, \] where (i) $\Gamma_{U_d}$ is isomorphic to $\mathbb{Z}^2$ and (ii) $\Gamma_{L_d}$ is isomorphic to $\Gamma_1(N/d,\Delta)^*$. \end{lemma} \begin{proof} Statement (i) is obvious. Since $g_de_1=\trans{(1,d,0)}$, $P_0=g_d^{-1} P_d g_d$ and the definition of $\pi_d$ makes sense. Let $\gamma\in M_3(\mathbb{Z})$, and write \[ \gamma= \begin{bmatrix} a&b&c\\ D&e&f\\ g&h&i \end{bmatrix}. \] Then \[ g_d^{-1} \gamma g_d= \begin{bmatrix} a+db&b&c\\ -d(a+db)+(D+de)&-db+e&-dc+f\\ g+dh&h&i \end{bmatrix}. \] So $\gamma\in \Gamma_{P_d}=P_d\cap\Gamma$ if and only if \begin{itemize} \item $-d(a+db)+(D+de)=0$; \item $g+dh=0$; \item $D\equiv g\equiv 0 \pmod N$; \item $\det(\gamma)=1$. \end{itemize} Note that in this case, \[ \pi_d(\gamma)= \begin{bmatrix} -db+e&-dc+f\\ h&i \end{bmatrix} \] and $ \det (\pi_d(\gamma)) = a+db = \pm1$. Therefore, if $\gamma\in \Gamma_{P_d}$, then $ -(a+db)+(D/d)+e=0, $ and $D/d$ is a multiple of $N/d$, whence $a\equiv e \equiv \det (\pi_d(\gamma)) \pmod \Delta$. Also $h\equiv0\pmod{N/d}$. We conclude that $\pi_d(\gamma) \in \Gamma_1(N/d,\Delta)^*$. Conversely, given $g'=\begin{bmatrix}a'&b'\\c'&d'\end{bmatrix}\in \Gamma_1(N/d,\Delta)^*$, we will construct $\gamma\in\Gamma_{P_d}$ such that $\pi_d(\gamma)=g'$. First, set $g=-dc'$,$h=c'$,$i=d'$, $c=0$, and $f=b'$. Then $b'$, $c'$, and $d'$ are correct, the lower left hand corner of $g_d^{-1}\gamma g_d$ is $0$, and $g \equiv 0 \pmod{N}$. Next, leaving $a$ and $b$ as variables, set $e=a'+db$ and $D=-de+da+d^2b$. This ensures that $a'$ is correct and that the $(2,1)$ coordinate of $g_d^{-1}\gamma g_d$ is $0$. It remains to choose $a$ and $b$ so that $D \equiv 0 \pmod{N}$ and $\det (\gamma) = 1$. First, we need $N$ to divide $D$. Since \[D=-d(a'+db)+da+d^2b)=d(a-a'),\] we set $a=a'+X(N/d)$, where $X$ is an unknown integer. Lastly, we need $\det(\gamma)=1$. Now $\det(\gamma)=\det( g')(a+db)$. Since $\det (g')=\pm1$ we just need to ensure that $a+db=\det(g')$. Now \[ a+db=a'+X(N/d) +bd, \] and we are given that $a'\equiv \det(g')\pmod \Delta$. Write $a'=\det(g') +m\Delta$. Then we want \[ \det (g') +m\Delta+X(N/d) +bd =\det (g'), \] so we must choose $X$ and $b$ integers so that $m\Delta+X(N/d) +bd=0$. This can be done because $\Delta=\gcd(d,N/d)$. \end{proof} Now we do the same thing for stabilizers of planes. \begin{lemma} Let $d$ be a positive divisor of $N$, and let $\Delta=\gcd(d,N/d)$. Let $P$ be the stabilizer of a plane such that $\trans{P}$ is the stabilizer of the line through $\trans{(d,1,0)}$. Then we have an exact sequence \[ 1\to \Gamma_{U}\to \Gamma_{P} \to \Gamma_{L}\to 1 \] where (i) $\Gamma_{U}$ is isomorphic to $\mathbb{Z}^2$ and (ii) $\Gamma_{L}$ is isomorphic to $\trans{\Gamma_1(d,\Delta)}^*$. \end{lemma} \begin{proof} Statement (i) is obvious. For the rest, as above it suffices to look at the stabilizer of a line in $\Gamma'=\trans{\Gamma}$. Let $d$ be a positive divisor of $N$, and $\Delta=\gcd(d,N/d)$. Let $P'_d$ be the stabilizer of the line through $\trans{(d,1,0)}$. Let $P'_0=L'_0U'_0$ be the stabilizer of the line through $\trans{(0,1,0)}$ where \[ P'_0= \begin{bmatrix} *&0&*\\ *&*&*\\ *&0&* \end{bmatrix}, \ U'_0= \begin{bmatrix} 1&0&0\\ *&1&*\\ 0&0&1 \end{bmatrix}, \ L'_0= \begin{bmatrix} *&0&*\\ 0&*&0\\ *&0&* \end{bmatrix}. \] Let $\pi'\colon P'_0\to L'_0$ be the obvious projection map and use it to identify $P'_0/U'_0$ with $L'_0$. We have the exact sequence: \[ 1\to U'_0 \to P'_0 \xrightarrow{\pi'} P'_0/U'_0 \to 1. \] Set \[ h_d= \begin{bmatrix} 1&d&0\\ 0&1&0\\ 0&0&1 \end{bmatrix}, \] and define $\pi'_d$ by \[ \pi'_d(\gamma)=\pi(h_d^{-1}\gamma h_d). \] Then we have an exact sequence \[ 1\to \Gamma_{U'_d}\to \Gamma_{P'_d} \xrightarrow{\pi'_d} \Gamma_{L'_d}\to 1. \] Let $\gamma\in M_3(\mathbb{Z})$ and write \[ \gamma= \begin{bmatrix} a&b&c\\ D&e&f\\ g&h&i \end{bmatrix}. \] Then \[ h_d^{-1} \gamma h_d= \begin{bmatrix} a-dD&da+b-d(dD+e)&c-df\\ D&dD+e&f\\ g&dg+h&i \end{bmatrix}. \] So $\gamma\in \Gamma_{P'_d}=P'_d\cap\Gamma'$ if and only if \begin{itemize} \item $da+b-d(dD+e)=0$; \item $dg+h=0$; \item $b\equiv c\equiv 0 \pmod N$; \item $\det(\gamma)=1$. \end{itemize} In that case, \[ \pi'_d(\gamma)= \begin{bmatrix} a-dD&c-df\\ g&i \end{bmatrix}, \] and so \[\det( \pi'_d(\gamma)) = dD+e = \pm1.\] Therefore, if $\gamma'\in \Gamma'_{P_d}$, then $ a+b/d-(dD+e)=0 $ and $b/d$ is a multiple of $N/d$, whence $a\equiv e \equiv \det(\pi'_d(\gamma)) \pmod \Delta$. Also $c-df\equiv0\pmod d$. We conclude that $\pi_d(\gamma) \in \trans{\Gamma_1(d,\Delta)}^*$. Conversely, given $g'=\begin{bmatrix}a'&b'\\c'&d'\end{bmatrix}\in \trans{\Gamma_1(d,\Delta)}^*$, we construct $\gamma\in\Gamma'_{P'_d}$ such that $\pi'_d(\gamma)=g'$. First, set $g=c'$, $h=-dc'$, $i=d'$, $c=0$, and $f=-b'/d$. Then $b'$, $c'$, and $d'$ are correct, the $(3,2)$-entry of $h_d^{-1}\gamma h_d$ is $0$, and $c \equiv 0 \pmod{N}$. Next, leaving $e$ and $D$ as variables, set $a=a'+dD$ and $b=d^2D+de-da$. This ensures that $a'$ is correct and that the $(1,2)$-entry of $h_d^{-1}\gamma h_d$ is $0$. It remains to choose $e$ and $D$ so that $b \equiv 0 \pmod{N}$ and $\det(\gamma) = 1$. First, we need $N$ to divide $b$. Since \[b=d^2D+de-d(a'+dD)=d(e-a'),\] we set $e=a'+X(N/d)$, where $X$ is an unknown integer. Lastly, we need $\det(\gamma)=1$. Now $\det(\gamma)=\det(g')(dD+e)$. Since $\det(g')=\pm1$ we just need to ensure that $dD+e=\det(g')$. Now \[ dD+e=dD+a'+X(N/d), \] and we are given that $a'\equiv \det(g')\pmod \Delta$. Write $a'=\det(g') +m\Delta$. Then we want \[ \det(g') +m\Delta+X(N/d) +dD =\det(g'). \] So we must choose $X$ and $D$ integers so that $m\Delta+X(N/d) +dD=0$. This can be done because $\Delta=\gcd(d,N/d)$. \end{proof} We do not attempt to describe the $\Gamma$-orbits of minimal parabolic subgroups of $\SL_3(\mathbb{Z})$, but we do need to count them. \begin{definition} For any subgroup $G$ of finite index in $\GL_2(\mathbb{Z})$, let $c(G)$ denote the number of cusps of $H/G$, where $H$ is the upper half plane. \end{definition} Note that this is the same as the number of $G$-orbits of lines in $\mathbb{Q}^2$. We let matrices of negative determinant act on $H$ using the rule that $\begin{bmatrix} 1&0\\0&-1 \end{bmatrix} \cdot z = -\overline z$. \begin{lemma}\label{minimal} The number of $\Gamma_0(N)$-orbits of minimal parabolic subgroups of $\SL_3(\mathbb{Z})$ is given by the formula \[ \sum_{d \mid N, d>0} c(\Gamma_1(d,\Delta)^*), \quad \text{where $\Delta=\gcd(d,N/d)$.} \] \end{lemma} \begin{proof} Let $\Gamma=\Gamma_0(N)$ as before. A minimal parabolic subgroup is the stabilizer of a flag, line $\subset$ plane. We have computed the $\Gamma$-orbits of stabilizers of planes. They are in 1-1 correspondence with positive divisors $d$ of $N$. For each such plane, let $P=LU$ be its stabilizer in $\SL_3(\mathbb{Q})$, so that $\Gamma_P$ is its stabilizer in $\Gamma$. Then the number of $\Gamma_P$-orbits of lines in that plane will equal $c(G)$ where $G$ is quotient of $\Gamma_P$ by $\Gamma_U$. The lemma now follows from the previous lemmas. \end{proof} Let $T$ be the Tits building of $\SL_3(\mathbb{Q})$. This is a graph whose vertices are the maximal parabolic subgroups $P$, the edges are the minimal parabolic subgroups $Q$, and $P$ is a vertex of $Q$ if $P\supset Q$. For any positive integer $M$, let $\tau(M)$ denote the number of positive factors of $M$. \begin{corollary} The dimension of $H_1(T/\Gamma_0(N),\mathbb{Q})$ is given by the formula \[ b = \sum_{d \mid N,d>0} c(\Gamma_1(d,\Delta)^*) - 2\tau(N) + 1. \] \end{corollary} To make it easier to compute the number of cusps, note that since $-I_2$ acts trivially on the upper half-plane, the number of cusps of a subgroup $G$ of $\GL_2(\mathbb{Z})$ will not change if we consider instead the group $\pm G$ generated by $G$ and $-I_2$. \begin{example} Let $N=p$ be a prime. Then $\tau(N) = 2$ and $d=1,p$. In both cases, $\Delta=1$. When $d=1$ we have \[c(\pm\Gamma_1(1,1)^*)=c(\GL_2(\mathbb{Z})) = 1,\] and when $d=p$ we have \[c(\pm\Gamma_1(p,1)^*)= c(\Gamma_0(p)^\pm) = 2.\] We obtain \[b=1+2-2\cdot 2+1=0.\] \end{example} \begin{example}\label{psq} Let $N=p^2$, with $p$ a prime. Then $\tau(N) = 3$ and $d=1,p,p^2$. In the first and third cases cases, $\Delta=1$ and in the middle case $\Delta=p$. When $d=1$ we have $c(\pm\Gamma_1(1,1)^*) = 1$ as before. When $d=p^2$ we have \[c(\pm\Gamma_1(p^2,1)^*)=c(\Gamma_0(p^2)^\pm) = \frac{p+3}{2}.\] Finally, when $d=p$, we have \[c(\Gamma_1(p,p)^*) = (p-1).\] We obtain \[b=1+(p-1)+\frac{p+3}{2}-2\cdot 3+1=\frac{3p-7}{2}.\] \end{example} \begin{example} Let $N$ be a product of $k$ distinct primes so that $\tau(N)=2^k$. Now for each $d$, $\Delta=1$ so \[ b= \sum_{d\mid N,d>0} c(\Gamma_0(d)^\pm) - 2\cdot 2^k + 1. \] Let $M$ be a positive divisor of $N$ so that $M$ is also square-free. From \cite[page 38]{shimura}, the number of $\Gamma_0(M)$-orbits of cusps equals $\sum_{t \mid M,t>0}\phi(\gcd(t,M/t))=\tau(M)$. Since the cusps $1/t$ as $t$ varies over the positive divisors of $M$ are pairwise $\Gamma_0(M)$-inequivalent, they give a complete set of representatives of the orbits. Given any cusp $a/b$, it is in the orbit of $1/t$ where $t=\gcd(M,b)$. To figure out $c(\Gamma_0(M)^\pm)$, we have to impose a further equivalence under the matrix $J=\diag(-1,1)$. Since $J\begin{bmatrix}u&v\\s&t\end{bmatrix}=\begin{bmatrix}-u&-v\\s&t\end{bmatrix}$, $J$ takes the $\Gamma_0(M)$-orbit of the cusp $1/t$ to the $\Gamma_0(M)$-orbit of the cusp $-1/t$, which is the same orbit. So $c(\Gamma_0(M)^\pm)= c(\Gamma_0(M))=2^{\tau(M)}$. Next, count the factors of $N$ according to how many prime factors each has. We get \[ \sum_{d\mid N,d>0} {2^{\tau(d)}} = 1 + \binom{k}{1}\cdot 2+ \binom{k}{2}\cdot 2^2 +\cdots+ 2^k = (1+2)^k = 3^k. \] Thus \[ b= 3^k - 2^{k+1} + 1. \] For instance, if $N$ has two distinct prime factors, then $b=9-8+1=2$. If $N$ has three distinct prime factors, then $b=27-16+1=12$. \end{example} We did not attempt to find a completely general formula for the number of cusps of $\Gamma_1(d,\Delta)^*$. Instead, to check our computations, we wrote code to compute $c(\Gamma_1(d,\Delta)^*)$ for all positive divisors $d \mid N$, and thus to compute $b$, for all $N\le 50$. Our other computations were for prime and prime-squared $N$, which are covered by the examples above. The output confirms our conjecture and acts as a check on our other computations. \section{Boundary cohomology}\label{bound} \begin{notation} For any parabolic subgroup $P$, if we write $P=LU$ we mean that $U$ is the unipotent radical of $P$ and $L$ is a Levi-component of $P$. \end{notation} Let $T$ be the Tits building of $\SL_3(\mathbb{Q})$, which is defined in Section~\ref{cusps}. Let $\Gamma$ be a congruence subgroup of $\SL_3(\mathbb{Z})$, and let $R$ be a ring. We recall the structure of the Borel-Serre compactification~\cite{BS}. Let $\mathcal{S}$ be the set of positive-definite symmetric $3\times 3$ matrices modulo homotheties. It is the symmetric space for $\SL_3(\mathbb{R})$. Let $X$ be the Borel-Serre compactification of the locally symmetric space $\Gamma\backslash \mathcal{S}$. The boundary $\partial X$ is the union of the closure of its maximal ($4$-dimensional) faces $\overline e'(P)$, where $P$ runs over a set of representatives $\mathcal{P}$ of $\Gamma$-orbits of maximal parabolic subgroups of $\SL_3(\mathbb{Q})$. Any two $\overline e'(P)$'s are either disjoint or intersect in a minimal ($3$-dimensional) face. The minimal faces, which are closed, are the nilmanifolds $e'(Q)$, where $Q$ runs over a set of representatives of $\Gamma$-orbits of minimal parabolic subgroups of $\SL_3(\mathbb{Q})$. The boundary of an $\overline e'(P)$ is a union of those minimal faces $e'(Q)$ such that some minimal parabolic subgroup in the $\Gamma$-orbit of $Q$ is contained in $P$. For a maximal parabolic subgroup $P$, let $H^3_!(\overline e'(P),R)$ denote the kernel of the restriction map $H^3(\overline e'(P),R)\to H^3(\partial\overline e'(P),R)$. Given a class $z\in H^3_!(\overline e'(P),R)$, it extends by 0 to a class in $H^3(\partial X,R)$ which we call $z^0$. It follows from the Mayer-Vietoris sequence that the span of all such $z^0$ forms a subspace of $H^3(\partial X,R)$ which restricts isomorphically onto $\bigoplus_P H^3_!(\overline e'(P),R)$. \begin{definition} Let $A'(\Gamma)$ be the $\mathbb{Q}$-span of all $z^0$, where $z\in H^3_!(\overline e'(P),\mathbb{Q})$ and $P$ runs over $\mathcal{P}$. \end{definition} Now assume that 6 is invertible in $R$. Because $X$ is contractible and $\Gamma$ acts properly discontinuously $X$ with finite stabilizers of cardinalities dividing $6^\infty$, there is a natural isomorphism $H^*(\Gamma,R)\simeq H^*(X,R)$ and we will identify these cohomology groups accordingly. \begin{definition}\label{cuspdef} The kernel of the restriction map $H^3(X,R)\to H^3(\partial X,R)$ is called the ``interior'' cohomology, and denoted by $H_!^3(X,R)$. In the introduction, we denoted the image of $H_!^3(X,\mathbb{Q})$ under the isomorphism $H^3(X,\mathbb{Q})\approx H^3(\Gamma,\mathbb{Q})$ by $H_!^3(\Gamma,\mathbb{Q})$. If $R=\mathbb{C}$, $H^3_\mathrm{cusp}(\Gamma,\mathbb{C})$ is defined to be the subspace of $H^3(\Gamma,\mathbb{C})$ represented by cuspidal automorphic differential 3-forms on $X$. If $R$ is a subring of $\mathbb{C}$, $H^3_\mathrm{cusp}(\Gamma,R)$ is defined to be $H^3_\mathrm{cusp}(\Gamma,\mathbb{C})\cap H^3(\Gamma,R)$. \end{definition} From \cite{lee-schwermer} we know \begin{itemize} \item If $R$ is a field of characteristic zero, the restriction map \[r \colon H^3(\Gamma,R)=H^3(X,R)\to H^3(\partial X,R)\] is surjective. \item $H_!^3(X,\mathbb{Q})$, is equal to $H^3_\mathrm{cusp}(\Gamma,\mathbb{Q})$. \item $A'(\Gamma)\otimes\mathbb{C}$ is isomorphic to the direct sum of the spaces of holomorphic cuspforms of weight 2 for $\Gamma_L$, where $P=LU$ runs over a set of representatives of $\Gamma$-orbits of maximal parabolic subgroups of $\SL_3(\mathbb{Q})$. \item $ H^3(\partial X,\mathbb{Q})= A'(\Gamma)\oplus B'(\Gamma)$, where $B'(\Gamma)$ is isomorphic to $H_1(T/\Gamma,\mathbb{Q})$. \end{itemize} The decomposition in the last bullet is Hecke-equivariant. The restriction map in the first bullet is also Hecke-equivariant, and we may then conclude: There is a Hecke-equivariant decomposition \[H^3(\Gamma,\mathbb{Q})=H^3_\mathrm{cusp}(\Gamma,\mathbb{Q})\oplus A(\Gamma)\oplus B(\Gamma),\] where the restriction map $r$ induces isomorphisms $A(\Gamma)\simeq A'(\Gamma)$ and $B(\Gamma)\simeq B'(\Gamma)$. The dimension of $B(\Gamma)$ equals the number of $\Gamma$-orbits of minimal parabolic subgroups of $\SL_3(\mathbb{Q})$ minus the number of $\Gamma$-orbits of maximal parabolic subgroups of $\SL_3(\mathbb{Q})$ plus 1. \begin{remark} What Lee and Schwermer actually provide in their paper is the structure of $H^3(\Gamma(N),\mathbb{C})$ as $\SL_3(\mathbb{Z})/\Gamma(N)$-module, where $\Gamma(N)$ is the principle congruence subgroup of $\SL_3(\mathbb{Z})$ of level $N$. We choose $N$ so that $\Gamma(N)$ is contained in $\Gamma$. Since the $\mathbb{C}$-cohomology is the tensor product of the $\mathbb{Q}$-cohomology with $\mathbb{C}$, the bulleted assertions follow by taking $\Gamma/\Gamma(N)$-invariants in $H^3(\Gamma(N),\mathbb{C})$ and descending to $\mathbb{Q}$. \end{remark} \section{Conjectures}\label{conj} Our computational results give us confidence to make the conjectures in this section. We want to state the conjectures in a way that conforms to our method of computation. For this reason we have to interpret modular symbols in terms of the Voronoi cellulation. This gives a concrete realization of the Borel-Serre isomorphism $H_0(\Gamma,\St) \approx H^3(\Gamma,\mathbb{Q})$. For more on the Voronoi cellulation, see Section~\ref{what}. \begin{definition} \label{vor-real} Fix as basepoint the identity matrix $I_3$ in the symmetric space $\mathcal{S}$. Let $D$ be the closure of the orbit of $I_3$ under the diagonal matrices in the Borel-Serre bordification $\overline\mathcal{S}$ of $\mathcal{S}$. It is a hexagon, in the sense that it is $2$-dimensional and has six edges, each in a different boundary face. If $m\in\GL_3(\mathbb{Q})$ and $[m]_\Gamma$ is the corresponding modular symbol modulo $\Gamma$, define the ``Voronoi realization'' of $[m]_\Gamma$ to be the projection modulo $\Gamma$ of $mD$. We fix an orientation $o$ on $[I_3]_\Gamma$ and give $[m]_\Gamma$ the orientation induced from $o$ via the action of $m$ on $\overline\mathcal{S}$. Define the ``dual Voronoi realization'' of $[m]_\Gamma$ to be $V(m)\in H^3(\Gamma,\mathbb{Q})$ where $V(m)$ is the Lefschetz dual of the homology class in $H_2(X,\partial X,\mathbb{Q})$ which is the fundamental class of the Voronoi realization of $[m]_\Gamma$. If $Y$ is a $\mathbb{Q}$-subspace of $H_0(\Gamma,\St)$, call $V(Y)$ the ``dual Voronoi realization of $Y$'', where $V(Y)=\set{V(m) \given m\in Y}$. \end{definition} \begin{lemma}\label{V} The dual Voronoi realization map $V \colon H_0(\Gamma,\St) \to H^3(\Gamma,\mathbb{Q})$ is an isomorphism. \end{lemma} \begin{proof} Unwinding the definitions, it is easy to see that $V$ is injective. Since the source and target have the same dimension because of Borel-Serre duality, $V$ is also surjective. \end{proof} Recall that $A(\Gamma)$ and $B(\Gamma)$ are defined in Section~\ref{bound}, $T$ denotes the Tits building for $\GL_3(\mathbb{Q})$, and $H(\Gamma,E)$ is given in Definition~\ref{hge}. \begin{cnj}\label{conj-1} Let $E$ be a real quadratic field, and let $\Gamma\subset\SL_3(\mathbb{Z})$ be a finite index subgroup. Then the dual Voronoi realization of $H(\Gamma,E)$ is $H^3_!(\Gamma,\mathbb{Q})+A(\Gamma)$. \end{cnj} \begin{cnj}\label{conj-2} Let $E$ and $\Gamma$ be as in Conjecture~\ref{conj-1}. Then \[ \dim_\mathbb{Q} (H(\Gamma, E))=\dim_\mathbb{Q} (H^3(\Gamma,\mathbb{Q}))-\dim_\mathbb{Q} (H_1(T/\Gamma,\mathbb{Q})). \] \end{cnj} \begin{theorem}\label{12} Conjecture~\ref{conj-1} implies Conjecture~\ref{conj-2}. \end{theorem} \begin{proof} From Section~\ref{bound} we know that $H^3(X,\mathbb{Q})\simeq H^3_!(X,\mathbb{Q})\oplus A \oplus B$. So Lemma~\ref{V} and Conjecture~\ref{conj-1} imply that the codimension of $H(\Gamma,E)$ equals the dimension of $B(\Gamma)$, namely $\dim_\mathbb{Q} (H_1(T/\Gamma,\mathbb{Q}))$. \end{proof} We first formulated Conjecture~\ref{conj-2} on the basis of purely numerical data from our computations. Later we formulated Conjecture~\ref{conj-1} and were able to check it by computing Hecke operators on $H(\Gamma,E)$ and on $H^3(\Gamma,\mathbb{Q})$. See Section~\ref{results}. Beyond the experimental evidence, we do not know why $V(H(\Gamma,E))$ should contain $H^3_!(\Gamma,\mathbb{Q})$ nor why its intersection with $B(\Gamma)$ should be trivial. We have the following heuristic as to why it might contain $A(\Gamma)$, but it is far from being a proof. Consider the commutative diagram (suppressing the $\mathbb{Q}$-coefficients in the notation): \[ \xymatrix{ H^3(X) \ar[r] & H^3(\partial X)\\ H_1(X,\partial X) \ar[r] \ar[u]& H_1(\partial X) \ar[u] } \] where the top map is restriction, the bottom map is the boundary map, the left hand vertical map is Lefschetz duality and the right hand vertical map is Poincar\'e duality. (Since we are using $\mathbb{Q}$-coefficients and the spaces we are considering are orbifolds with finite stabilizers, Lefschetz and Poincar\'e duality apply to their homology and cohomology.) If $\mathcal{P}$ is a set of representatives of $\Gamma$-orbits of maximal parabolic subgroups of $\SL_3(\mathbb{Q})$, then $A(\Gamma)=\bigoplus_{P\in\mathcal{P}}H^3(\overline e'(P),\mathbb{Q})$. By Lefschetz duality $H_!^3(\overline e'(P),\mathbb{Q})$ is isomorphic to a subspace of $H_1(\overline e'(P),\mathbb{Q})$, call it $H_1^!(\overline e'(P),\mathbb{Q})$. Let $m=[f,\gamma f,da]_\Gamma\in H(\Gamma_0(N),E)$, as in Algorithm~\ref{algo}. Let $P=LU$ be the parabolic subgroup which is the stabilizer of the plane spanned by $f$ and $\gamma f$. Then the Voronoi realization of $m$ in $H_1(X,\partial X)$ is a hexagon, one edge of which, call it $\eta$, lies in the face $e'(P)$. The fundamental class of $\eta$ lies in $H_1^!(\overline e'(P),\mathbb{Q})$. Let $p\colon P\to P/U$ be the projection, $\Gamma_L=p(\Gamma\cap L)$ and $\Gamma_U=\Gamma\cap U$. Then $e'(P)$ is a fibration with base $B_L$ and fiber $F$, where $B_L$ is isomorphic to the upper half-plane modulo $\Gamma_L$ and $F$ is the torus $U(\mathbb{R})/\Gamma_U$. Let $X_L$ denote the Borel-Serre compactification of $B_L$. Then the projection $\eta'$ to $X_L$ of the edge $\eta$ is the Voronoi realization of the modular symbol $[\gamma f,f]_{\Gamma_L}$ in $X_L$. From the commutative diagram we can see that the Voronoi dual of $m$ equals the Voronoi dual of $\eta$ (extended by 0) plus five other terms coming from the other edges of the hexagon. Now $[\gamma f,f]_{\Gamma_L}$ equals $[\delta f,f]_{\Gamma_L}$, where $\delta$ is a unital element in $\Gamma_L$. By~\cite[Theorem~9.3]{AY} (which assumes the Generalized Riemann Hypothesis) the Voronoi duals of $[\delta f,f]_{\Gamma_L}$ span $H_!^1(X_L,\mathbb{Q})$ as we vary $\delta$ over all unital elements of $\Gamma_L$. It seems likely that as we vary $\gamma$, $f$, and $a$ we could find a linear combination of various $[\gamma f,f]_{\Gamma_L}$'s, whose contribution to $H_!^1(X_L,\mathbb{Q})$ equals any desired element, while the fundamental classes of the other edges of the hexagons cancel out. This would explain why $A(\Gamma)$ is contained in $V(H(\Gamma,E))$. \section{Hecke operators and Galois representations}\label{hecke} The tame Hecke algebra ${\mathcal H}_{n,N}$ is the commutative $\mathbb{Z}$-algebra under convolution generated by the double cosets $T(\ell,k)=\Gamma_0(n,N) D(\ell,k) \Gamma_0(n,N)$ with \[D(\ell,k)=\diag(\underbrace{1,\cdots,1}_{n-k},\underbrace{\ell,\cdots,\ell}_k).\] for all prime $\ell\nmid pN$. Let $S_{n,N}$ denote the subgroup of $\GL_n(\mathbb{Q})$ generated by the elements in all these double cosets. A Hecke packet over a ring $R$ is an algebra homomorphism $\phi\colon{\mathcal H}_{n,N}\to R$. If $W$ is an ${\mathcal H}_{n,N}\otimes R$-module and $w\in W$ is a simultaneous eigenvector for all $T\in{\mathcal H}_{n,N}$, then the associated eigenvalues give a Hecke packet. \begin{definition} \label{def:attached} Let $\phi$ be a Hecke packet with $\phi(T(\ell,k))= a(\ell,k)$. We say that the Galois representation $\rho\colon G_\mathbb{Q}\to\GL_n(R)$ is attached to $\phi$ if $\rho$ is unramified outside $pN$ and \[\det(I-\rho(\frob_\ell)X)=\sum_{k=0}^n(-1)^k\ell^{k(k-1)/2}a(\ell,k)X^k\] for all $\ell\nmid pN$. (This is the arithmetic Frobenius: if $\omega$ be the cyclotomic character, $\omega(\frob_\ell)=\ell$.) When $\phi$ comes from a Hecke eigenvector $w$, we say that $\rho$ is attached to $w$. \end{definition} There is a natural action of a double coset $T(\ell,k)\in{\mathcal H}_{n,N}$ on the homology $H_*(\Gamma_0(n, N),M)$ and on the cohomology $H^*(\Gamma_0(n,N),M)$ for any $S_{n,N}$-module $M$. Now let $n = 3$ and $\Gamma = \Gamma_0(3,N)$. It is known from \cite{HLTT, scholze} that there is a Galois representation attached to each Hecke eigenclass in $H_\mathrm{cusp}^3(\Gamma,\mathbb{C})$. From \cite[Section~3.2]{ash-stevens}, it follows that if $z\in A(\Gamma)\otimes_\mathbb{Q}\mathbb{C}$ is a Hecke eigenclass, then it has an attached Galois representation which is the direct sum of a Dirichlet character of conductor dividing $N$ and an odd two-dimensional representation coming from a holomorphic modular form of weight 2 and level dividing $N$ and trivial nebentype. Adapting the proof of this result to the case of a minimal parabolic subgroup, one shows that if $z\in B(\Gamma)\otimes_\mathbb{Q}\mathbb{C}$ is a Hecke eigenclass, then it has an attached Galois representation which is the direct sum of 3 Dirichlet characters of levels dividing $N$. Therefore we can use the attached Galois representations as a quick way of identifying Hecke eigenclasses in $H^3(\Gamma,\mathbb{C})$. Because the short exact sequence \[ 0 \to \St(\mathbb{Q}^3;R) \to \St(E^3;R) \to C\to 0 \] is equivariant for the action of $\GL_3(\mathbb{Q})$, the long exact sequence of homology derived from it is equivariant for ${\mathcal H}_{3,N}$. Therefore the connecting homomorphism $\psi$ is ${\mathcal H}_{3,N}$-equivariant, and $H(\Gamma,E)$ is ${\mathcal H}_{3,N}$-stable. This gives a check on our computations. We diagonalize the Hecke operators on $H^3(\Gamma,\mathbb{Q})$ and $H(\Gamma,E)$, and thereby verify Conjecture~\ref{conj-1} for the range of levels $N$ and fields $E$ specified in the introduction. See Section~\ref{results} for an example of how we do this. \section{Voronoi homology} \label{what} We make the computations using already-existing programs that find the Voronoi homology of arithmetic subgroups of $\GL_n(F)$ for arbitrary number fields $F$. For this reason, we need to know, when $F=\mathbb{Q}$, that the Voronoi homology over $R$ in degree $0$ is isomorphic to $H_0(\Gamma, \St(\mathbb{Q}^n,R))$. We also need to make this isomorphism explicit, so that we know how to express the modular symbols in $H(\Gamma,E)$ in terms of the Voronoi homology. Although in this paper we only need the case $n=3$ and $R=\mathbb{Q}$, for future purposes we take $n\le4$ and work over a more general ring $R$. We do not know if the analogue of Theorem~\ref{whatwhat} for $n>4$ is true. For more background on the Voronoi decomposition and Voronoi homology see \cite{voronoi1, AGM2, PerfFormModGrp}. First, we describe the Voronoi complex. To conform with the notation in \cite{AY}, in this section we use a notation that clashes with that of Section~\ref{bound}. Let $\bar{\Gamma} = \GL_n(\mathbb{Z})$ and let $\Gamma$ be a subgroup of finite index in $\bar{\Gamma}$. Let $V$ be the $n(n+1)/2$-dimensional vector space of real symmetric $n\times n$ matrices. Let $C \subset V$ denote the open cone of positive definite symmetric matrices, and $q \colon \mathbb{Z}^n \to V$ be the map $q(v) = v \ \trans{v}$. (All vectors in this section are column vectors.) For each nonzero, proper subspace $W$ of $\mathbb{Q}^n$, let $C(W)\subset V$ denote the cone of positive semi-definite symmetric matrices whose kernel is $W\otimes\mathbb{R}$. Set $\overline C$ to be the union of $C$ and all the $C(W)$'s. Let $X =C/\mathbb{R}_+$ and $\overline X=\overline C/\mathbb{R}_+$, where $\mathbb{R}_+$ acts on $C$ by scaling. The group $\bar{\Gamma}$ acts on $V$: for $\gamma \in \bar{\Gamma}$ and $A \in V$, $\gamma \cdot A = \gamma A \ \trans{\gamma}$. This action restricts to an action on $\overline C$, which descends to an action on $\overline X$. There is a cellular tessellation of $X$ called the Voronoi decomposition of $\overline X$. Each cell is the conical convex hull (modulo $\mathbb{R}_+$) of $q(v_i), i=1,2,\dots,m$, where $\{\pm v_i\}$ are the minimal vectors of some positive definite real quadratic form. Assume now that $n\le 4$. There is one $\bar{\Gamma}$-orbit of cells of dimension $n-1$. As a representative, take $\sigma_0$, the one with minimal vectors given by $\set{\pm e_i \given i = 1, 2, \dots, n}$, where $\set{e_i}$ is the standard basis of $\mathbb{Z}^n$. For $g\in\bar{\Gamma}$, denote by $\sigma(g)$ the Voronoi cell $g\sigma_0$. Let $V^\mathbb{Z}_k$ be the $\mathbb{Z}$-module of oriented $k$-chains in the Voronoi cellulation of $\overline X$ modulo $\partial \overline X=\overline X\setminus X$. It is the $\mathbb{Z}$-module generated by all oriented Voronoi cells of dimension $k$ modulo the subspace generated by those which lie wholly in $\partial \overline X$. If $g\in\bar{\Gamma}$ is an orientation reversing element in the stabilizer of a Voronoi cell $\tau$, then $g\tau=-\tau$. We denote the image of $\sigma(g)$ in $V^\mathbb{Z}_{n-1}$ again by $\sigma(g)$. Let $\St_\mathbb{Z}=\St(\mathbb{Q}^n;\mathbb{Z})$ be the Steinberg module with integer coefficients. Define $\chi\colon V^\mathbb{Z}_{n-1}\to\St_\mathbb{Z}$ by \[ \chi(\sigma(g))=[g] \] for any $g\in\bar{\Gamma}$. Then $\chi$ is equivariant for the action of $\bar{\Gamma}$. Let $\partial\colon V^\mathbb{Z}_n\to V^\mathbb{Z}_{n-1}$ be the boundary map. This fits into a resolution of $\St_\mathbb{Z}$: \begin{theorem}[{\cite[Theorem 11]{AGM2}}]\label{agm2} Let $n\le 4$. Then \[ 0\to V^\mathbb{Z}_{n(n+1)/2-1}\to\cdots\to V^\mathbb{Z}_n \xrightarrow{\partial} V^\mathbb{Z}_{n-1} \xrightarrow{\chi} \St_\mathbb{Z}\to0 \] is an exact sequence of $\bar{\Gamma}$-modules \end{theorem} This is not a free $\mathbb{Z}[\bar{\Gamma}]$-resolution of $\St_\mathbb{Z}$. For each $k$, the module $V^\mathbb{Z}_k$ is isomorphic to a direct sum of induced $\mathbb{Z}[\bar{\Gamma}]$-modules, each of which is obtained from the orientation character induced from the finite stabilizer of some Voronoi cell to $\bar{\Gamma}$. Let $d=6$ if $n\le 3$ and $d=30$ if $n=4$. The orders of the stabilizers divide $d^\infty$. Set $V_k=V_k^\mathbb{Z}\otimes_\mathbb{Z} R$ for all $k$. Then the ``Voronoi homology'' of $\Gamma$ over $R$, which by definition is the homology of the complex of coinvariants, \[ 0\to (V_{n(n+1)/2-1}))_\Gamma\to\cdots\to (V_n)_\Gamma \xrightarrow{\bar\partial} (V_{n-1})_\Gamma \to0, \] is isomorphic as an $R$-module to the Steinberg homology $H_*(\Gamma,\St(\mathbb{Q}^n,R))$ if $d$ is invertible in $R$. (See~\cite[Corollary~12]{AGM2}.) The theorem we need to justify our computations is the following: \begin{theorem}\label{whatwhat} Let $n\le 4$. Let $d=6$ if $n\le 3$ and $d=30$ if $n=4$. Let $R$ be a ring on which $d$ acts invertibly, and let $\Gamma$ be a subgroup of finite index in $\bar{\Gamma}$. Then there is an isomorphism \[ \phi\colon \St(\mathbb{Q}^n,R)_\Gamma=H_0(\Gamma,\St(\mathbb{Q}^n,R))\to (V_{n-1})_\Gamma/\image\bar \partial,\] where $\phi$ may be computed as follows: For any $[g]\in\St$, suppose $[g]=\sum[B_j]$ for some $B_j\in\GL_n(\mathbb{Z})$. Then \[ \phi([g]_\Gamma)=(\sum \sigma(B_j)_\Gamma)', \] where the subscript $\Gamma$ denotes the image in the coinvariants and the prime denotes reduction modulo the image of $\bar\partial$. \end{theorem} \begin{proof} In this proof, we write $\St$ as short for $\St(\mathbb{Q}^n,R)=\St_\mathbb{Z}\otimes_\mathbb{Z} R$. Let $F_\bullet\to R$ be the standard resolution of the trivial module $R$. So $F_i$ is the free $R[\Gamma]$-module with basis $(g_0,\dots,g_i)\in \Gamma^{i+1}$, and the action is given by $g(g_0,\dots,g_i)=(gg_0,\dots,gg_i)$. Then $F_\bullet\otimes_R\St\to \St$ is a free resolution of the $R[\Gamma]$-module $\St$. By the Fundamental Lemma of Homological Algebra (FLHA)~\cite[Chpt.~I, Lemma~7.4]{B} there is an augmentation preserving chain map $f \colon F_\bullet\otimes_R\St\to V_\bullet$, which is unique up to homotopy. (Note that $f$ shifts subscripts, taking $F_i\otimes_R\St$ to $V_{i+n-1}$.) Given any resolution $\Phi_\bullet$ (not necessarily free) of $\St$ by $R\Gamma$-modules, there is a spectral sequence \[ E_{pq}^1 = H_q(\Gamma, \Phi_p) \mathbb{R}ightarrow H_{p+q} (\Gamma, \Phi_\bullet). \] See~\cite[Chpt.~VII, (5.3)]{B}. Suppose that for each $p$, the module $\Phi_p$ satisfies the condition: \begin{itemize} \item[($\ast$)] $\Phi_p$ is a direct sum of induced modules, each induced from a finite subgroup whose cardinality is invertible in $R$. \end{itemize} Then using Shapiro's lemma, we see that $E_{pq}^1=0$ whenever $q>0$. (Compare~\cite[Chpt.~VII, (7.10)]{B}.) It follows that $H_{\bullet}(\Gamma, \St)$ is isomorphic to the homology of the complex $(\Phi_\bullet)_\Gamma$ of $\Gamma$-coinvariants. Note that ($\ast$) holds for both $\Phi=F_\bullet\otimes_R\St$ and $\Phi=V_\bullet$. Therefore $H_\bullet(\Gamma, \St)$, which equals the homology of the complex $F_\bullet\otimes_\Gamma \St = (F_\bullet\otimes_R \St)_\Gamma$, is isomorphic to the homology of the complex $(V_\bullet)_\Gamma$. This isomorphism can be induced by the map $f$ on the spectral sequences. It follows that $f$ induces an isomorphism on homology $H_i(\Gamma, \St)\to H_{i+n-1}((V_\bullet)_\Gamma)$. This was proved already in~\cite{{AGM2}}. We now construct an explicit $f$, and $\phi$ will be $f$ in degree 0. Following the proof of \cite[Chpt.~I, Lemma~7.4]{B}, we construct $f$ one degree at a time, starting with the identity map on the augmentation module $\St$. Since we only need to get to $\phi$, we just have to do the first step. Choose the $R$-basis for $\St$ consisting of $\{[U]\}$ where $U$ runs through the unipotent upper triangular matrices in $\SL_n(\mathbb{Q})$. For each $[U]$, there exists a finite set $\{A_i(U)\}\subset\SL_n(\mathbb{Z})$ such that $[U]=\sum [A_i(U)]$. Then $F_0\otimes_R\St$ has a free $R\Gamma$ basis consisting of $\{I\otimes [U]\}$, where $I$ denotes the identity matrix in $\Gamma$. Define $\Psi(I\otimes [U])=\sum \sigma(A_i(U))\in V_{n-1}$. Extend $\Psi$ to all of $F_0\otimes_R\St$ to make it $\Gamma$-equivariant. Then $\Psi$ commutes with the augmentation maps and we can take $f_0=\Psi$. Let $\phi$ be the map on homology induced by $\Psi$. As shown above, it is an isomorphism. It remains to show that it can be computed as stated in the theorem. Let $g\in\GL_n(\mathbb{Z})$. Write $[g]=\sum c_U[U]$. Then \[ \phi([g]_\Gamma)=\sum\sum c_U\sigma(A_i(U))_\Gamma. \] On the other hand, suppose $[g]=\sum[B_j]$ for some $B_j\in\GL_n(\mathbb{Z})$. Then $\sum[B_j]-\sum\sum c_U[A_i(U)]=0$ in $\St$. Therefore by Theorem~\ref{by}, and using its notation, extended to write $\gen{A}$ instead of $\gen{a_1,a_2,\dots,a_n}$ if $A$ is a matrix with the columns $a_1,a_2,\dots,a_n$, there is a finite set of $n$-tuples $S$ such that \begin{multline*} \sum\gen{B_j}-\sum\sum c_U\gen{A_i(U)} = \\ \sum_{(a,b,a_3,\dots,a_n)\in S} \gen{a,b,a_3,\dots,a_n}+\gen{-b,a+b,a_3,\dots,a_n}+\gen{a+b,-a,a_3,\dots,a_n}, \end{multline*} where each $\set{a,b,a_3,\dots,a_n}$ is a $\mathbb{Z}$-basis of $\mathbb{Z}^n$. It follows that \begin{multline*} \sum\sigma(B_j)-\sum\sum c_U\sigma(A_i(U)) =\\ \sum_{{a,b,a_3,\dots,a_n}\in S} \sigma(a,b,a_3,\dots,a_n)+\sigma(-b,a+b,a_3,\dots,a_n)+\sigma(a+b,-a,a_3,\dots,a_n). \end{multline*} There is a Voronoi $n$-cell $\tau_0$ whose boundary is $$\sigma(e_1,e_2,e_3,\dots,e_n)+\sigma(-e_2,e_1+e_2,e_3,\dots,e_n) +\sigma(e_1+e_2,-e_1,e_3,\dots,e_n).$$ ($\tau_0$ has minimal vectors $\pm\{e_1,\dots, e_n, e_1+e_2 \}$). Therefore \[\Sigma\coloneqq \sigma(e_1,e_2,e_3,\dots,e_n)+\sigma(-e_2,e_1+e_2,e_3,\dots,e_n) +\sigma(e_1+e_2,-e_1,e_3,\dots,e_n)\] is in the image of $\partial$. If $h$ is the matrix with columns $a,b,a_3,\dots,a_n$, then \[h\Sigma=\sigma(a,b,a_3,\dots,a_n)+\sigma(-b,a+b,a_3,\dots,a_n)+\sigma(a+b,-a,a_3,\dots,a_n)\] is in the image of $\partial$. Therefore \[ \sum\sigma(B_j)-\sum\sum c_U\sigma(A_i(U)) \] is in the image of $\partial$, and its image in the coinvariants is in the image of $\bar\partial$. We are trying to prove $\phi([g]_\Gamma)= \sum \sigma(B_j)_\Gamma$ modulo the image of $\bar\partial$. We have seen that $\phi([g]_\Gamma)=\sum\sum c_U\sigma(A_i(U))_\Gamma$. So we are finished by the preceding paragraph. \end{proof} \section{Description of the computations} \label{comp} We now give the details of the computation for $n = 3$. Let $v_1, v_2, \dots, v_6$ be the column vectors of the matrix $\mat{ 1& 0& 0& 1& 0& 1\\ 0& 1& 0& 1& 1& 1\\ 0& 0& 1& 0& 1& 1 }$. For a subset $S \subseteq \set{1, 2, \dots, 6}$, we abuse notation and let $S$ also denote the conical convex hull (modulo $\mathbb{R}_+$) of $\set{q(v_{a})\given a \in S}$. Then the Voronoi cellulation of $\overline{X}$ comes from the $\bar{\Gamma}$-orbit of the simplicial $5$-cell $\set{1, 2, \dots, 6}$ and its faces. Fix $\Gamma = \Gamma_0(N)$. We need to compute $(V_3)_\Gamma$ and $(V_2)_\Gamma$. There are two $\bar{\Gamma}$-orbits of $3$-cells, and we fix representatives $\sigma_1 = \set{1, 2, 3, 4}$ and $\sigma_2 = \set{1, 3, 4, 5}$. There is one $\bar{\Gamma}$-orbit of $2$-cells, with representative $\sigma_0 = \set{1,2,3}$ as described in Section~\ref{what}. We follow \cite[Section~3]{AGM1} to compute $(V_{3})_\Gamma$ and $(V_{2})_\Gamma$. The $i$-cells in $(V_i)_\Gamma$ are in one-to-one correspondence with the orientable right $\bar{\Gamma}_T$-orbits in $\mathbb{P}^2(\mathbb{Z}/N\mathbb{Z})$, where $T = \sigma_0$ for $i = 2$ and $T = \sigma_1, \sigma_2$ for $i = 3$, and $\bar{\Gamma}_T$ is the stabilizer of $T$. The boundary differential $\bar \partial$ is computed as described in \cite[Section~6.2]{imquadcoh}. After one has the differential, computing the homology is a standard problem in exact linear algebra, since the Voronoi homology in bottom degree is the cokernel $\mathbb{C}oker(\bar \partial)$. We also need to compute the Hecke operators $T(\ell,k)$ for primes $\ell \nmid N$ and $k = 0, 1, \dots, 3$. Because the coefficient module is trivial, we have $T(\ell,0)$ and $T(\ell,3)$ are each the identity map. As described in Section~\ref{hecke}, set \[D(\ell,1)=\diag(1,1,\ell) \quad \text{and} \quad D(\ell,2)=\diag(1,\ell,\ell).\] For $k = 1,2$ the action of the Hecke operators comes from expressing the double coset as a disjoint union of left cosets \[\Gamma D(\ell,k) \Gamma = \coprod_{h \in {\mathcal O}mega_k} \Gamma h.\] We can take \[ {\mathcal O}mega_1 = \set*{\mat{\ell \\ & 1 \\ && 1}} \cup \set*{ \mat{1 && a\\ & 1 & b\\ && \ell} \given a, b \in R} \cup \set*{\mat{1 & a &\\ & \ell & \\ && 1} \given a \in R }. \] and \[ {\mathcal O}mega_2 = \set*{\mat{\ell \\ & \ell \\ && 1}} \cup \set*{ \mat{1 & a& b \\ & \ell & \\ && \ell} \given a, b \in R} \cup \set*{\mat{\ell & &\\ & 1 & a \\ && \ell} \given a \in R }. \] Then the action of $T(\ell,k)$ on the symbol $[v_1, v_2, v_3]$ is given by \[T(\ell,k) [v_1, v_2, v_3] = \sum_{h \in {\mathcal O}mega_k}[hv_1, hv_2, hv_3].\] See~\cite[Section~6 (B)]{AGG} for this formula. Note that the matrices $h$ have determinant $\ell^k$, so as $\ell$ increases, the determinants of the modular symbols in this formula increase quickly, and so does the number of terms in the sum. We use the modular symbol algorithm to reduce these modular symbols to ``unimodular" symbols, i.e., those with determinant $1$. The unimodular symbols represent cohomology classes that are immediately interpretable in terms of the Voronoi homology, as in Theorem~\ref{whatwhat}. We now briefly describe the modular symbol algorithm. This is used both to compute $H(\Gamma,E)$ itself, and the action of Hecke operators on it. We just explained why we need it for the Hecke operators. As for $H(\Gamma,E)$, we compute elements of it using Algorithm~\ref{algo}. What emerges from that algorithm are modular symbols $[f,\gamma f,da]_\Gamma$, which generally have large determinant. They must be reduced to unimodular symbols before we can use Theorem~\ref{whatwhat} to interpret them in the Voronoi homology in degree 0. The modular symbol algorithm expresses arbitrary modular symbols as sums of unimodular symbols. It suffices to be able to solve the following. Given an integral matrix $A= \mat{v_1 & v_2 & v_3} \in \GL_3(\mathbb{Q}) \setminus \GL_3(\mathbb{Z})$, produce a nonzero vector $x \in \mathbb{Z}^3$ such that $\abs{\det(A_i)} < \abs{\det(A)}$ for $i = 1, 2, 3$, where $A_i$ is the $3 \times 3$ matrix obtained by replacing the $i$th column by $x$. Then passing through $x$ on the symbol $[v_1, v_2, v_3]$ as described in Theorem~\ref{thm:mod-props} and repeated application gives the desired modular symbol algorithm. To find $x$, we use \cite[Section~2.10]{vanGeemenetal} which uses parts of \cite{ash-rudolph}. We give the main idea here for the convenience of the reader. Since $\abs{\det(A)} > 1$, there exists an integer $m > 1$ such that $\det(A) \equiv 0 \pmod{m}$. Thus the nullspace of $A$ modulo $m$ is nontrivial. It follows that there exists $a_i \in \mathbb{Z}$, $i = 1, 2, 3$ such that $x = \frac{1}{m}(a_1v_1 + a_2v_2 + a_3 v_3)$ has integral entries. Since the $a_i$ are representatives of integers modulo $m$, we can choose $a_i$ such that $\abs{a_i} \leq m/2$. Then \[\abs{\det(A_i)} = \frac{\abs{a_i}}{m}\abs{\det(A)} \leq \frac{1}{2}\abs{\det(A)},\] as desired. We carried out the computations using Magma V2.25-8 \cite{magma} running on Intel(R) Xeon(R) Gold 6148 CPU 2.40GHz. The server had 16 processors and 64Gb RAM with 100Gb swap. Our computations were not memory intensive, so the swap was not utilized. For the range of our computations, over various levels $N\le169$ and real quadratic fields $E=\mathbb{Q}(\sqrt\Delta)$ with squarefree $\Delta\le10$, computing the Voronoi homology is not the bottleneck. As described above, orbits of the projective plane over $\mathbb{Z}/N\mathbb{Z}$ are used to parametrize cells, so the cardinality $\#\mathbb{P}^2(\mathbb{Z}/N\mathbb{Z})$ gives a sense of the complexity. In the range of computation, this was largest for $N = 13^2 = 169$, and in that case, $\#\mathbb{P}^2(\mathbb{Z}/N\mathbb{Z}) = \num{30927}$. This results in a boundary matrix of size $1266 \times 3768$. The orbit computation, the relations, and resulting Voronoi homology took around 11 seconds total in this case. The computation of the image of $H(\Gamma,E)$ took significantly longer. This is because the determinants of the outputted modular symbols can become very large. Now we describe how we used Algorithm~\ref{algo} to compute elements of $H(\Gamma,E)$. Since $H(\Gamma,E)$ is the image of the connecting homomorphism, we call the $\mathbb{Q}$-span of the elements constructed at any stage of the computation the ``image". With the exception of $N = 121$ and $N = 169$, we conducted an exhaustive search over a rectangular box: In Step 1 of Algorithm~\ref{algo}, we first construct a unital $2 \times 2$ matrix $h$ as described in \cite[Theorem~4.1.]{AY}. For the convenience of the reader, we give additional details here. The task is to generate $\beta$-unital matrices for $\beta \in E\setminus \mathbb{Q}$. A matrix $h$ is $\beta$-unital if and only if there is a unit $\eta \in {\mathcal O}_E^\times$ such that \[h \mat{\beta\\1} = \eta \mat{\beta\\1}.\] Rather than find $h$ for each $\beta$, we instead find $h$ that admit such $\beta$ by running over the units $\eta = \pm \epsilon^k$, $k=1,2,3,\dots$. Let $f(x) = x^2 - tx + n \in \mathbb{Z}[x]$ be the minimal polynomial of $\eta$, so $t$ is the trace of $\eta$ and $n \in \set{1, -1}$ is the norm of $\eta$. Then we take $h \in \Gamma$ of the form \[h = \mat{t - d & -\frac{f(d)}{cN}\\cN & d}\] for some integers $c$ and $d$, since the minimal polynomial of $h$ is equal to the minimal polynomial of $\eta$. Thus we find $h$ by finding integers $c, d$ such that $f(d) \equiv 0 \bmod{N}$ and $c$ is a divisor of $f(d)/N$. We proceed as follows. For each unit $\eta = \pm \epsilon^k$, we find $d'\in \mathbb{Z}/N\mathbb{Z}$ such that $f(d') \equiv 0 \pmod N$. For each such $d'$, we find $10$ values of $d\in \mathbb{Z}$ in that class modulo $N$. For each such $d$, we compute $f(d)/N$. Each factor (positive and negative) of $f(d)/N$ yields a value of $c\in \mathbb{Z}$, and each pair $d$, $c$ gives rise to a $\beta$ and an $h$ that is $\beta$-unital. In practice, $f(d)$ becomes large quickly, so factoring $f(d)/N$ to find values of $c$ is computationally expensive. Because of this, if $f(d)$ is too large, we do not try to get a full factorization, instead relying on a partial factorization to get some factors $c$ before just moving on. The box is determined by taking all powers $-15 \leq k \leq 15$ and all $u \in \mathbb{Z}^2$ of the form $\trans(a, b)$ for $a, b \in \set{-1, 0, 1}$ in Step 1, and taking a complete set of representatives $d$ in Step $3''$. For each pair $N,E$, the image stabilized within a few minutes, but it took up to several hours to complete the exhaustive search. For $N = 121$ and $N = 169$ and certain $E$, a different approach was required because it was taking too long to search the whole box exhaustively. This was owing to the large number of huge determinant modular symbols that needed to be reduced. The modular symbol algorithm, which has to be applied repeatedly, became a time bottleneck. For these two levels, we switched to another approach. We selected random elements in the box instead of going through the box systematically. We then applied Hecke operators to construct more elements of the image. Thus, for these two levels, we used the known theoretical fact that $H(\Gamma,E)$ is stable under the Hecke algebra. In these two cases, we lost the ability to check our computations by verifying that the image is Hecke stable, since the method ensured Hecke stability. We still had the check that the image coincided with the prediction of our conjecture. With this approach for $N = 121$ and $N = 169$, the image stabilized in a few minutes. \section{Results of computations}\label{results} \subsection{Computed dimensions}\label{range} We compared our computed third betti number for $\Gamma_0(N)$ with Table~1 in \cite{vanGeemenetal}. Marc Masdeu communicated to us an unpublished table he produced in 2016 with Jun Bo Lau that gave the dimension of $H^3(\Gamma_0(N),\mathbb{Q})$ for $N\le371$. Their results disagreed with the table in \cite{vanGeemenetal} for $N=27$, $92$, and $93$. The Bo--Masdeu numbers are $5$, $41$, and $24$, respectively as opposed to the entries of $6$, $24$, and $41$ given in \cite{vanGeemenetal}. Our results for $N= 27$, $92$, and $93$ agree with Bo--Masdeu. The $6$ in \cite{vanGeemenetal} was no doubt a misprint, and the betti numbers for $92$ and $93$ were presumably accidentally interchanged. We then computed the dimension of $H(\Gamma_0(N),E)$ and verified Conjecture~\ref{conj-2} for $N\le 50$, prime $N<100$, and $N=11^2,13^2$, for all $E=\mathbb{Q}(\sqrt \Delta)$ with $\Delta=2,3,5,6,7,10$. \subsection{Cuspidal cohomology} Using the Hecke analysis, explained in the next subsection, we could verify the constituents of $H(\Gamma,E)$, not only its dimension. Perhaps our most surprising finding, before we formulated our conjecture, was that the Voronoi dual of $H(\Gamma,E)$ always included the entire cuspidal cohomology. For the levels $N$ treated in this paper, $H_\mathrm{cusp}^3(\Gamma_0(N),\mathbb{C})$ is nonzero for $N=53$, $61$, $79$, $89$, $121$ (where it has dimension $2$ for each level), and $169$ (where it has dimension $4$). This seems to be enough data to justify the part of Conjecture~\ref{conj-1} dealing with the cuspidal cohomology. \subsection{Hecke analysis and verification of the data} Fix $N$ and a real quadratic field $E$ in the range described in subsection~\ref{range}. Let $\Gamma = \Gamma_0(N)$. For $\ell \nmid N$, we computed the Hecke operators $T(\ell,k)$ for prime $\ell < 100$ and $k=0,1,2,3$ on $H^3(\Gamma,\mathbb{Q})$ and on $H(\Gamma,E)$ using modular symbols as described in Section~\ref{what}. In every case, $H(\Gamma,E)$ was found to be Hecke-stable, as it must be. \footnote{As explained in the previous section, when $N=121$ or $169$, the Hecke stability was actually produced by our computations, rather than ``found to be" so.} Here is more detail on the Hecke analysis of our data: First, we simultaneously diagonalized the Hecke operators $\set{T(\ell,k)\given k = 1, 2; \ell < 100}$ acting on $H^3(\Gamma,\mathbb{Q})$. Then for each Hecke eigenclass, with Hecke eigenvalues $a(\ell,k)$ \footnote{Because the coefficient module is trivial, we automatically have that $a(\ell,0)=a(\ell,3) = 1$. } we formed the Hecke polynomials \[P_{\ell}(X) = 1-a(\ell,1)X + a(\ell,2)\ell X^2 - \ell^3 X^3,\] and considered compatible families of Galois representations $\rho \colon G_\mathbb{Q}\to \GL_3(K)$ (for suitable $p$-adic fields $K$) attached to this class as described in Definition~\ref{def:attached}, i.e., \[P_{\ell}(X)=\det(1-\rho(\frob_\ell)X).\] We can determine the constituents of the decomposition \[H^3(\Gamma,\mathbb{Q})=H_\mathrm{cusp}^3(\Gamma,\mathbb{Q})\oplus A(\Gamma) \oplus B(\Gamma)\] from Section~\ref{bound} by means of the Hecke packets, or equivalently in terms of the attached Galois representations. This is explained in Section~\ref{hecke}. Namely, if $z\in A(\Gamma)\otimes_\mathbb{Q}\mathbb{C}$ is a Hecke eigenclass, then its attached Galois representation is the direct sum of a Dirichlet character of conductor dividing $N$ and an odd two-dimensional representation coming from a holomorphic modular form of weight 2 and level dividing $N$ and trivial nebentype. If $z\in B(\Gamma)\otimes_\mathbb{Q}\mathbb{C}$ is a Hecke eigenclass, its attached Galois representation is the direct sum of 3 Dirichlet characters of levels dividing $N$. Any other Hecke eigenclasses belong to $H_\mathrm{cusp}^3(\Gamma,\mathbb{C})$. \footnote{Because the Hecke eigenvalues need not lie in $\mathbb{Q}$, individual Hecke eigenclasses will be found in $H^3(\Gamma,F)$ for some (possibly nontrivial) extension $F/\mathbb{Q}$. However, if a Hecke eigenclass belongs to $A(\Gamma)\otimes \mathbb{C}$, for example, all its Galois conjugates will also lie there, so that $A(\Gamma)$ is defined over $\mathbb{Q}$. Similarly the other constituents are also defined over $\mathbb{Q}$, and the decomposition occurs over $\mathbb{Q}$, as implied by the formula $H^3(\Gamma,\mathbb{Q})=H_\mathrm{cusp}^3(\Gamma,\mathbb{Q})\oplus A(\Gamma) \oplus B(\Gamma)$.} In this way, we compared the Hecke polynomials and the attached Galois representations of the eigenclasses in our computed $H(\Gamma,E)$ with our predictions, and we verified Conjecture~\ref{conj-1} in all cases. \subsection{\texorpdfstring{$N = 121$}{N=121} example} Let $\Gamma = \Gamma_0(121)$. The cohomology $H^3(\Gamma,\mathbb{Q})$ is $29$-dimensional. For each real quadratic field $E$ in the range of computation, $H(\Gamma,E)$ is $16$-dimensional. The cohomology $H^3(\Gamma,\mathbb{Q})$ decomposes as follows. \subsubsection{Trivial: \texorpdfstring{$1$}{1}-dimensional contribution}\label{triv} There is a $1$-dimensional space where $a(\ell,1) = a(\ell,2) = \ell^2 + \ell + 1$, so that \[P_{\ell}(X) = (1 - X)(1 - \ell X)(1 - \ell^2 X).\] This class contributes to $B(\Gamma_0(121))$. \subsubsection{\texorpdfstring{$\GL_2$}{GL2}-newforms: \texorpdfstring{$8$}{1}-dimensional contribution} \label{sssec:gl2} The space of weight $2$ holomorphic cuspforms with trivial character at level $121$ has a $4$-dimensional newspace. The four newforms have LMFDB \cite{lmfdb} labels \href{https://www.lmfdb.org/ModularForm/GL2/Q/holomorphic/121/2/a/a/}{121.2.a.a}, \href{https://www.lmfdb.org/ModularForm/GL2/Q/holomorphic/121/2/a/b/}{121.2.a.b}, \href{https://www.lmfdb.org/ModularForm/GL2/Q/holomorphic/121/2/a/c/}{121.2.a.c}, and \href{https://www.lmfdb.org/ModularForm/GL2/Q/holomorphic/121/2/a/d/}{121.2.a.d}. The Hecke field of each new newform is $\mathbb{Q}$. Each newform $f$ contributes a $2$-dimensional space to $H^3(\Gamma,\mathbb{Q})$ in the following way. Let $a_\ell = a_\ell(f)$. There are two Hecke eigenforms in $H^3(\Gamma,\mathbb{Q})$ with rational Hecke eigenvalues $\set{a(\ell,k)}$ and $\set{b(\ell,k)}$ such that \[a(\ell,1) = b(\ell,2) = a_\ell + \ell^2 \quad \text{and} \quad a(\ell,2) = b(\ell,1) = \ell a_\ell + 1. \] Then the associated Hecke polynomials are \[P_{\ell}(X) = (1 - \ell^2X)(1-a_\ell X + \ell X^2)\] and \[P_{\ell}(X) = (1 - X)(1 -\ell a_\ell X+ \ell^3 X^2).\] \subsubsection{\texorpdfstring{$\GL_2$}{GL2}-oldforms: \texorpdfstring{$6$}{6}-dimensional contribution} The space of weight $2$ holomorphic cusp forms with trivial character at level $11$ is $1$-dimensional, and it is new since $11$ is prime. It is generated by the class with LMFDB label \href{https://www.lmfdb.org/ModularForm/GL2/Q/holomorphic/11/2/a/a/}{11.2.a.a}. This $\GL_2$ cuspform $f$ contributes to $H^3(\Gamma,\mathbb{Q})$ with eigenforms as in the $\GL_2$-newform case, but now each with multiplicity $3$. This is consistent with the observation in \cite[Remark~3.2]{vanGeemenetal} who cite \cite{reeder}. These classes from the newforms and the oldforms of level $121$ together span $A(\Gamma_0(121))$, which thus has dimension 14. \subsubsection{Symmetric squares: \texorpdfstring{$2$}{2}-dimensional contribution} The $\GL_2$-newform of level $121$ with LMFDB label \href{https://www.lmfdb.org/ModularForm/GL2/Q/holomorphic/121/2/a/d/}{121.2.a.d} is a quadratic twist of the newform of level $11$ with LMFDB label \href{https://www.lmfdb.org/ModularForm/GL2/Q/holomorphic/11/2/a/a/}{11.2.a.a}. Then the symmetric square lift of these $\GL_2$-forms agree and give a cuspidal Hecke eigenclass in $H^3(\Gamma,\mathbb{Q})$ with Hecke eigenvalues $\set{a(\ell,k)}$ such that \[a(\ell,1) = a(\ell,2) = a_\ell^2 - \ell, \] where $a_\ell$ is the Hecke eigenvalue of the $\GL_2$-form. The associated Hecke polynomials are \[P_{\ell}(X) = (1 - \ell X)(1-(a_\ell^2 - 2 \ell) X + \ell^2 X^2).\] The two $\GL_2$-newforms of level $121$ with LMFDB labels \href{https://www.lmfdb.org/ModularForm/GL2/Q/holomorphic/121/2/a/a/}{121.2.a.a} and \href{https://www.lmfdb.org/ModularForm/GL2/Q/holomorphic/121/2/a/c/}{121.2.a.c} are quadratic twists of each other, so they have the same symmetric square lift in $H^3_\mathrm{cusp}(\Gamma,\mathbb{Q})$ constructed as above. The remaining $\GL_2$-newform at level $N = 121$ is \href{https://www.lmfdb.org/ModularForm/GL2/Q/holomorphic/121/2/a/b/}{121.2.a.b}. It has CM and its symmetric square lift (which is not cuspidal) has level higher than $121$ and does not appear in $H^3(\Gamma,\mathbb{Q})$. In sum: $H^3_\mathrm{cusp}(\Gamma,\mathbb{Q})$ is made up of symmetric square lifts, and has dimension 2. \subsubsection{Dirichlet characters: \texorpdfstring{$12$}{12}-dimensional contribution} The group of Dirichlet characters of modulus $11$ is a cyclic group of order $10$. There is an index $2$ subgroup of order $5$. The four nontrivial elements of this subgroup each contributes a $3$-dimensional subspace of $H^3(\Gamma,\mathbb{Q})$ as follows. Let $\chi$ be one of the characters of order $5$. Then $\chi$ has codomain the cyclotomic field $\mathbb{Q}(\zeta_5)$. The character $\chi$ contributes $3$ Hecke polynomials: \begin{align*} P_\ell(X)& = (1-X)(1-\chi(\ell)\ell X)(1-\chi^{-1}(\ell)\ell^2 X) \\ P_\ell(X)& = (1-\ell X)(1-\chi(\ell) X)(1-\chi^{-1}(\ell)\ell^2 X) \\ P_\ell(X)& = (1-\ell^2 X)(1-\chi(\ell) X)(1-\chi^{-1}(\ell) \ell X). \end{align*} These classes are all in $B(\Gamma_0(121))$ and together with the class in subsection~\ref{triv} give $\dim_\mathbb{Q}(B(\Gamma_0(121)) =13$. Note that $13$ equals $b=(3\cdot 11-7)/2$ as predicted in Example~\ref{psq}. Check that the dimensions add up: \[\dim_\mathbb{Q}(H(\Gamma_0(121),E)) = 29-13=16=14+2.\] \end{document}
\begin{document} \begin{tikzpicture}[scale=0.85] \def \rA{4} \def \rB{3} \def \rC{0.5} \def \thetaA{120} \def \thetaB{20} \draw [thick] (0,0) node[below left] {$O$} -- (\thetaA :\rA) node {$\bullet$} node[above left]{$x$} node[midway, below left] {$r$}; \draw [thick] (0,0) -- (\thetaB :\rB) node {$\bullet$} node[above right]{$x'$} node[midway,below right] {$r'$}; \draw [>=stealth,->] (\thetaA :{0.9*\rC}) arc(\thetaA : \thetaB : {0.9*\rC}); \draw ({0.9*\thetaA + 0.1*\thetaB)}:\rC) node[above right]{$\scriptstyle \theta(y,y')$}; \draw [thick, dashed] (\thetaA :\rA) .. controls (\thetaA :{\rA-1.5}) and (\thetaB :{\rB-1.5}) .. (\thetaB :\rB); \end{tikzpicture} \begin{tikzpicture} \draw [>=stealth, ->] (0,0) -- (9,0); \draw (9,0) node [below] {$t$}; \draw [>=stealth, ->] (0,0) -- (0,4); \draw (0,4) node [left] {$\mu(t)$}; \draw (0,0) node [left, below] {$(0,0)$}; \draw [dashed] (6,0) -- (6,3); \draw (6,0) node[below] {$\pi \sinh \rho$}; \draw [dashed] (0,3) -- (6,3); \draw (0,3) node [left] {$2 \rho$}; \draw [thick] (6,3) -- (8.5,3); \draw [thick] (0,0) .. controls +(2,2) and +(-2,0) .. (6,3); \draw [thin,>=stealth, ->] (0,0) -- (2,2); \end{tikzpicture} \begin{tikzpicture} \def5{5} \tkzDefPoint(0,0){A} \tkzDefPoint(60:5){B} \tkzDefPoint(5,0){C} \tkzCentroid(A,B,C) \tkzGetPoint{G} \draw[thick] (A) .. controls (G) and (G) .. (B) node[pos=0.5, above left]{$\bar \gamma_1$}; \draw[thick] (B) .. controls (G) and (G) .. (C) node[pos=0.5, above right]{$\bar \gamma_2$}; \draw[thick] (C) .. controls (G) and (G) .. (A)node[pos=0.5, below]{$\bar \gamma_3$}; \tkzDrawPoints(A,B,C); \tkzLabelPoint[below left](A){$\bar x$} \tkzLabelPoint[above](B){$\bar y$} \tkzLabelPoint[below right](C){$\bar z$} \end{tikzpicture} \begin{tikzpicture} \def5{5} \tkzDefPoint(0,0){A} \tkzDefPoint(60:5){B} \tkzDefPoint(5,0){C} \tkzDefPoint(1.5*5,0.8*5){D} \tkzDefPoint(0.65*5,0.35*5){E} \tkzCentroid(A,B,C) \tkzGetPoint{G} \draw[thick] (A) .. controls (G) and (G) .. (B) node[pos=0.5, above left]{$ \gamma_1$}; \draw[thick] (B) .. controls (G) and +(-1,-0.2) .. (D) node[pos=0.5, above left]{$ \gamma_2$}; \draw[thick] (C) .. controls (G) and (G) .. (A)node[pos=0.5, below]{$ \gamma_3$}; \draw[thick, dashed] (D) .. controls +(-1,-0.3) and +(0,0.5) .. (E) node[pos=0.6, below right]{$ \gamma$} .. controls +(0,-0.5) and +($0.2*(G) - 0.2*(C)$) .. (C); \tkzDrawPoints(A,B,C,D); \tkzLabelPoint[below left](A){$x$} \tkzLabelPoint[above](B){$y$} \tkzLabelPoint[above right](D){$z$} \tkzLabelPoint[below right](C){$z'$} \end{tikzpicture} \begin{tikzpicture}[>=stealth] \tkzDefPoint(0,0){A} \tkzDefPoint(12,0){B} \tkzDefPoint(0,3){C} \tkzDefPoint(12,3){D} \draw[thick, ->] (A) .. controls + (1,1) and +(-1,1) .. (B) node[pos=0.15, shape=coordinate, name=S-]{} node[pos=0.25, shape=coordinate, name=U-]{} node[pos=0.55, shape=coordinate, name=S]{} node[pos=0.5, shape=coordinate, name=V]{} node[pos=0.75, shape=coordinate, name=U+]{} node[pos=0.85, shape=coordinate, name=S+]{}; \draw[thick, ->] (C) .. controls + (1,-1) and +(-1,-1) .. (D) node[pos=0.25, shape=coordinate, name=T-]{} node[pos=0.5, shape=coordinate, name=T]{} node[pos=0.75, shape=coordinate, name=T+]{}; \draw[thick, dashed] (T-) -- (U-); \draw[thick, dashed] (T) -- (V); \draw[thick, dashed] (T+) -- (U+); \draw[thin] (T-) -- (S-); \draw[thin] (T) -- (S); \draw[thin] (T+) -- (S+); \tkzDrawPoints(T-,T,T+,S-,S,S+,U-,V,U+) \tkzLabelPoint[shift={(-0.7,0.7)}](T-){$\gamma(t-\tau)$} \tkzLabelPoint[shift={(-0.5,0.7)}](T){$\gamma(t)$} \tkzLabelPoint[shift={(-0.7,0.7)}](T+){$\gamma(t+\tau)$} \tkzLabelPoint[shift={(-0.7,-0.1)}](U-){$q_-= \gamma'(u_-)$} \tkzLabelPoint[shift={(-1,-0.1)}](V){$p=\gamma'(s)$} \tkzLabelPoint[shift={(-1.4,-0.1)}](U+){$q_+=\gamma'(u_+)$} \tkzLabelPoint[shift={(-2.1,0.6)}](S-){$\gamma'\circ\theta(t-\tau)$} \tkzLabelPoint[shift={(-0.0,0.6)}](S){$\gamma'\circ\theta(t)$} \tkzLabelPoint[shift={(0,0.6)}](S+){$\gamma'\circ\theta(t+\tau)$} \tkzLabelPoint[below right](B){$\gamma'$} \tkzLabelPoint[above right](D){$\gamma$} \end{tikzpicture} \begin{tikzpicture} \tkzDefPoint(0,0){X} \tkzDefPoint(8,0){Y'} \tkzDefPoint(7,4){Y} \tkzDefPoint(4.2,1.2){C} \draw[thin, fill=black!10] (C) circle(1.8); \draw[thick] (X) .. controls +(4,1) and +(-2,-2) .. (Y) node[pos=0.4, shape=coordinate, name=S]{} node[pos=0.6, shape=coordinate, name=T]{} node[pos=0.8, shape=coordinate, name=R]{}; \draw[thick] (X) .. controls +(4,1) and +(-2,1) .. (Y') node[pos=0.4, shape=coordinate, name=S']{} node[pos=0.6, shape=coordinate, name=T']{} node[pos=0.8, shape=coordinate, name=R']{}; \draw[thick, dashed] (S) -- (S'); \tkzDrawPoints(X,Y,Y',S,T,S',T') \tkzLabelPoint[below left](X){$x$} \tkzLabelPoint[above right](Y){$y$} \tkzLabelPoint[below right](Y'){$y'$} \tkzLabelPoint[above left](S){$p$} \tkzLabelPoint[above left](T){$q$} \tkzLabelPoint[below](S'){$p'$} \tkzLabelPoint[below](T'){$q'$} \tkzLabelPoint[above left](R){$\gamma$} \tkzLabelPoint[shift={(0.2,0.5)}](R'){$\gamma'$} \tkzLabelPoint[shift={(-1.5,2)}](C){$B$} \end{tikzpicture} \end{document}
\begin{document} \title{Metric dimension parameterized by treewidth in chordal graphs\thanks{This work was supported by ANR project GrR (ANR-18-CE40-0032)}} \author{Nicolas Bousquet\inst{1} \and Quentin Deschamps\inst{1} \and Aline Parreau\inst{1}} \institute{Univ. Lyon, Universit\'e Lyon 1, CNRS, LIRIS UMR 5205, F-69621, Lyon, France.} \maketitle \begin{abstract} The metric dimension has been introduced independently by Harary, Melter~\cite{harray1975} and Slater~\cite{slater1975} in 1975 to identify vertices of a graph $G$ using its distances to a subset of vertices of $G$. A \text{distance vector}h{resolving set} $X$ of a graph $G$ is a subset of vertices such that, for every pair $(u,v)$ of vertices of $G$, there is a vertex $x$ in $X$ such that the distance between $x$ and $u$ and the distance between $x$ and $v$ are distinct. The metric dimension of the graph is the minimum size of a resolving set. Computing the metric dimension of a graph is NP-hard even on split graphs and interval graphs. Bonnet and Purohit~\cite{bonnet2021} proved that the metric dimension problem is W[1]-hard parameterized by treewidth. Li and Pilipczuk strenghtened this result by showing that it is NP-hard for graphs of treewidth $24$ in~\cite{li2022}. In this article, we prove that that metric dimension is FPT parameterized by treewidth in chordal graphs. \end{abstract} \section{Introduction}\label{intro} Determining the position of an agent on a network is a central problem. One way to determine his position is to place sensors on nodes of the network and the agents try to determine their positions using their positions with respect to these sensors. More formally, assume that the agent knows the topology of the graph. Can he, by simply looking at his position with respect to the sensors determine for sure his position in the network? Conversely, where do sensors have to be placed to ensure that any agent at any possible position can easily determine for sure its position? These questions received a considerable attention in the last decades and have been studied in combinatorics under different names such as metric dimension, identifying codes, locating dominating sets... Let $G=(V,E)$ be a graph and $s,u,v$ be three vertices of $G$. We say that $s$ \text{distance vector}h{resolves} the pair $(u,v)$ if the distance between $s$ and $u$ is different from the distance between $s$ and $v$. A \text{distance vector}h{resolving set} of a graph $G=(V,E)$ is a subset $S$ of vertices of $G$ such that any vertex of $G$ is identified by its distances to the vertices of the resolving set. In other words, $S$ is a resolving set if for every pair $(u,v)$ of vertices of $G$, there is a vertex $s$ of $S$ such that $s$ resolves $(u,v)$. The \text{distance vector}h{metric dimension} of $G$, denoted by $\dim(G)$, is the smallest size of a resolving set of $G$. This notion has been introduced in 1975 by Slater \cite{slater1975} for trees and by Harary and Melter \cite{harray1975} for graphs to simulate the moves of a sonar. The \text{distance vector}h{metric dimension} of $G$ is the smallest size of a resolving set of $G$. The associated decision problem, called the \textsc{Metric Dimension} problem, is defined as follows: given a graph $G$ and an integer $k$, is the metric dimension of~$G$ is at most $k$? The \textsc{Metric Dimension} problem is NP-complete~\cite{garey1979} even for restricted classes of graphs like planar graphs~\cite{Diaz2012}. Epstein et al.~\cite{epstein2015} proved that this problem is NP-complete on split graphs, bipartite and co-bipartite graphs. The problem also is NP-complete on interval graphs~\cite{foucaud2017} or sub-cubic graphs~\cite{hartung2013}. On the positive side, computing the metric dimension is linear on trees~\cite{harray1975,slater1975} and polynomial in outer-planar graphs~\cite{Diaz2012}. \paragraph{Parameterized algorithms.} In this paper, we consider the \textsc{Metric Dimension} problem from a parameterized point of view. We say a problem $\Pi$ is \text{distance vector}h{fixed parameter tractable} (FPT) for a parameter $k$ if any instance of size $n$ and parameter $k$ can be decided in time $f(k) \cdot n^{O(1)}$. Two types of parameters received a considerable attention in the litterature: the size of the solution and the "width" of the graph (for various widths, the most classical being the treewidth). Hartung and Nichterlein proved in~\cite{hartung2013} that the \textsc{Metric Dimension} problem is W[2]-hard parameterized by the size of the solution. Foucaud et al. proved that it is FPT parameterized by the solution size in interval graphs in~\cite{foucaud2017}. This result was extended by Belmonte et al. who proved in~\cite{BelmonteFGR16} that \textsc{Metric Dimension} is FPT parameterized by the size of the solution plus the tree-length of the graph. In particular, it implies that computing the metric dimension for chordal graph is FPT parameterized by the size of the solution. \textsc{Metric Dimension} is FPT paramerized by the modular width~\cite{BelmonteFGR16}. Using Courcelle's theorem, one can also remark that it is FPT paramerized by the treedepth of the graph as observed in~\cite{gima2022}. \textsc{Metric dimension} has been proven W[1]-hard parameterized by the treewidth by Bonnet and Purohit in~\cite{bonnet2021}. Li and Pilipczuk strenghtened this result by showing that it is NP-complete for graphs of treewidth, and even pathwidth, $24$ in~\cite{li2022}. While \textsc{Metric dimension} is polynomial on graphs of treewidth~$1$ (forests), its complexity is unknown for graphs of treewidth~$2$ is open (even if it is known to be polynomial for outerplanar graphs). Our main result is the following: \begin{theorem}\label{thm:main} \textsc{Metric Dimension} is FPT parameterized by treewidth on chordal graphs. That is, \textsc{Metric Dimension} can be decided in time $O(n^3+n^2 \cdot f(\omega))$ on chordal graphs of clique number $\omega$. \end{theorem} Recall that, on chordal graphs, the treewidth is equal to the size of a maximum clique minus one. Our proof is based on a dynamic programming algorithm. One of the main difficulty to compute the metric dimension is that a pair of vertices might be resolved by a vertex far from them in the graph. This non-locality, implies that it is not simple to use classical algorithmic strategies like divide-and-conquer, induction or dynamic programming since a single edge or vertex modification somewhere in the graph might change the whole solution\footnote{The addition of a single edge in a graph might modify the metric dimension by $\Omega(n)$, see e.g.~\cite{eroh2015}.}. The first ingredient of our algorithm consists in proving that, given a chordal graph, if we are using a clique tree of a desirable form and make some simple assumptions on the shape of an optimal solution, we can ensure that resolving a pairs of vertices close to a separator implies that we resolve all the pairs of vertices in the graph. Using this lemma, we build a dynamic programming algorithm that computes the minimum size of a resolving set containing a given vertex in FPT-time parameterized by treewdith. The special type of clique tree used in the paper, inspired from~\cite{kloks1994}, is presented in Section~\ref{sec:treedec}. We then give some properties of resolving sets in chordal graphs in Section~\ref{sec:chordal}. These properties will be needed to prove the correction and the running time of the algorithm. Then, we present the definition of the extended problem in Section~\ref{sec:defpb} and the rules of the dynamic programming in Section~\ref{sec:rules} where we also prove the correction of the algorithm. We end by an analysis of the complexity of the algorithm in Section~\ref{sec:complexity}. \paragraph{Further work.} The function of the treewidth in our algorithm is probbly not optimal and we did not try to optimize it to keep the algorithm as simple as possible. A first natural question is the existence of an algorithm running in time $2^{\omega} \cdot Poly(n)$ for chordal graphs. We know that Theorem~\ref{thm:main} cannot be extended to bounded treewidth graphs since \textsc{Metric Dimension} is NP-hard on graphs of treewidth at most $24$~\cite{li2022}. One can nevertheless wonder if our proof technique can be adapted to design polynomial time algorithms for graphs of treewidth at most $2$ on which the complexity status of \textsc{Metric Dimension} is still open. Our proof nevertheless crucially relies on the fact that a separators $X$ of a chordal graphs is a clique and then the way a vertex in a component of $G\setminus X$ is interacting with vertices in another component of $G \setminus X$ is simple. One can wonder if there is a tree decomposition in $G$ where all the bags have diameter at most $C$, is it true that \textsc{Metric Dimension} is FPT parameterized by the size of the bags plus $C$. Note that, since \textsc{Metric Dimension} is NP-complete on chordal graphs, the problem is indeed hard parameterized by the diameter of the bags only. \section{Preliminaries} \subsection{Clique trees}\label{sec:treedec} Unless otherwise stated, all the graphs considered in this paper are undirected, simple, finite and connected. For standard terminology and notations on graphs, we refer the reader to~\cite{bookgraph}. Let us first define some notations we use throughout the article. Let $G=(V,E)$ be a graph where $V$ is the set of vertices of $G$ and $E$ the set of edges; we let $n=|V|$. For two vertices $x$ and $y$ in $G$, we denote by $d(x,y)$ the length of a shortest path between $x$ and $y$ and call it \text{distance vector}h{distance between $x$ and $y$}. For every $x \in V$ and $U \subseteq V$, the \text{distance vector}h{distance between $x$ and $U$}, denoted by $d(x,U)$, is the minimum distance between $x$ and a vertex of $U$. Two vertices $x$ and $y$ are \text{distance vector}h{adjacent} if $xy \in E$. A \text{distance vector}h{clique} is a graph where all the pairs of vertices are adjacent. We denote by $\omega$ the size of a maximum clique. Let $U$ be a set of vertices of $G$. We denote by $G \setminus U$ the subgraph of $G$ induced by the set of vertices $V \setminus U$. We say that $U$ is a \text{distance vector}h{separator} of $G$ if $G \setminus U$ is not connected. If two vertices $x$ and $y$ of $V \setminus U$ belong to two different connected components in $G'$, we say that $U$ \text{distance vector}h{separates} $x$ and $y$. If the set $U$ induces a clique, we say that $U$ is a \text{distance vector}h{clique separator} of $G$. \begin{defn}\label{def_tree_decom} A \text{distance vector}h{tree-decomposition} of a graph $G$ is a pair $(X , T)$ where $T$ is a tree and $X = \{X_i| i \in V (T)\}$ is a collection of subsets (called bags) of $V (G)$ such that: \begin{itemize} \item $ \bigcup_{i \in V (T)} X_i = V (G)$. \item For each edge $xy \in E(G), x, y \in X_i$ for some $i \in V (T)$. \item For each $x \in V (G)$, the set $\{i | x \in X_i\}$ induces a connected sub-tree of $T$. \end{itemize} \end{defn} Let $G$ be a graph and $(X,T)$ a tree decomposition of $G$. The \text{distance vector}h{width} of the tree-decomposition $(X,T)$ is the biggest size of a bag minus one. The \text{distance vector}h{treewidth} of $G$ is the smallest width of $(X,T)$ amongst all the tree-decompositions $(X,T)$ of $G$. Chordal graphs are graphs with no induced cycle of length at least $4$. A characterization given by Dirac in~\cite{dirac1961} ensures chordal graphs are graphs where minimal vertex separators are cliques. Chordal graphs admit clique trees which are tree-decompositions such that all the bags are cliques. Our dynamic programming algorithm is performed in a bottom-up on a clique tree of the graph with more properties than the ones given by Definition~\ref{def_tree_decom}. These properties permits to simplify the analysis of the algorithm. We adapt the decomposition of~\cite[Lemma 13.1.2]{kloks1994} to get this tree-decomposition. \begin{lemme}\label{tree_dec_inter} Let $G=(V,E)$ be a chordal graph. There exists a clique tree $(X,T)$ of $G$ such that, (i) $T$ is a rooted tree that contains at most $4n$ nodes, (ii) for every bag $i \in V(T)$, the set of vertices $X_i$ induces a clique in $G$ and (iii) $T$ contains four types of nodes which are: \begin{itemize} \item Leaf nodes which satisfy $|X_i|= 1$ or, \item Introduce nodes $i$ which have exactly one child $j$, and that child satisfies $X_i = X_j \cup \{v\}$ for some vertex $v \in V (G) \setminus X_j$ or, \item Forget nodes $i$ which have exactly one child $j$, and that child satisfies $X_i = X_j \setminus \{v\}$ for some vertex $v \in X_j$ or, \item Join node $i$ which have exactly two children $i_1$ and $i_2$ satisfying $X_i = X_{i_1} = X_{i_2}$. \end{itemize} Moreover, such a clique tree can be found in linear time. \end{lemme} The original proof uses $k$-trees instead of chordal graphs but the proof only needs that the graph contains a simplicial vertex which holds for chordal graphs. Let us define now our clique tree in which the root of the tree is fixed. \begin{lemme}\label{tree_dec} Let $G=(V,E)$ be a chordal graph and $r$ a vertex of $G$, then there exists a clique tree $(X,T)$ such that, $T$ contains at most $7n$ nodes, $T$ is rooted in a node that contains only the vertex $r$, for every bag $i \in V(T)$, the set of vertices $X_i$ induces a clique in $G$ and $T$ contains four types of nodes. \begin{itemize} \item Leaf nodes, $|X_i|= 1$ which have no child. \item Introduce nodes $i$ which have exactly one child $j$, and that child satisfies $X_i = X_j \cup \{v\}$ for some vertex $v \in V (G) \setminus X_j$. \item Forget nodes $i$ which have exactly one child $j$, and that child satisfies $X_i = X_j \setminus \{v\}$ for some vertex $v \in X_j$. \item Join node $i$ which have exactly two children $i_1$ and $i_2$, and that children satisfy $X_i = X_{i_1} = X_{i_2}$. \end{itemize} Moreover, such a clique tree can be found in linear time. \end{lemme} \begin{proof} By Lemma~\ref{tree_dec_inter}, there exists a clique tree $(T,X)$ that satisfies all the properties except that the root of $T$ can be any bag. Let us first modify $(T,X)$ to ensure that the clique tree is rooted on a node that contains only $\{r\}$. If $r$ appears in a bag of a leaf node then it holds. Otherwise, there exists a forget node $i$ with a child $i'$ such that $X_i= X_{i'} \setminus \{r\}$. Let $X_i=\{v_1, \ldots, v_k\}$ with $r=v_1$. We do the following modifications on $T$: delete the edge $ii'$, add two nodes $i''$ and $i_k$ such that $X_{i''}={X_k}=X_v$, $i''$ is a join node with child $i'$ and $k$ and $i$ is a forget node with child $i''$. Ultimately, we add the nodes $i_{k-1} \ldots, i_1$ such that for any $1 \leq t \leq k-1$, $X_{i_t}=\{v_1, \ldots, v_t\}$ and $i_t$ is the child of the node $i_{t+1}$ (which is a forget node). Then, $r$ appears in a bag of a leaf node by adding at most $n$ nodes in $T$. Let us now root $T$ on the node whose bag is $\{ r \}$. We need to check that the property on nodes are preserved. Note that for every edge, the two bags on the extremities differ at most on one vertex. If a node has only one child with the same bag then merge the two nodes. If a node $i$ had two children with different bags, let $X$ be the bag of $i$, then add a new bag with vertex set $X$ between $i$ and its child with a different bag. The tree we get after these modifications satisfies all the properties of the lemma. All these modifications can be performed in linear time. So find the clique tree can be performed in time $O(n)$.\qed \end{proof} In the following, a clique tree with the properties of Lemma~\ref{tree_dec} will be called a \text{distance vector}h{nice clique tree} and we will only consider nice clique trees $(X,T)$ of chordal graphs $G$. Given a rooted clique tree $(T,X)$ of $G$, for any node $i$ of $T$, we define the \text{distance vector}h{subgraph of $G$ rooted in $X_i$}, denoted by $T(X_i)$, as the subset of vertices of $G$ containing in at least one of the bags of the subtree of $T$ rooted in $i$ (i.e. in the bag of $i$ or one of its descendants). \subsection{Clique separators and resolving sets.}\label{sec:chordal} In this section, we give some technical lemmas that will permit to bound by $f(\omega)$ the amount of information we have to remember in the dynamic programming algorithm. \begin{lemme}\label{resolve_far} Let $K$ be a clique separator of $G$ and $G_1$ be a connected component of $G \setminus K$. Let $G_{ext}$ be the subgraph of $G$ induced by the vertices of $G_1 \cup K$ and $G_{int}= G \setminus G_{ext}$. Let $x_1,x_2 \in V(G_{int})$ be such that $|d(x_1,K)-d(x_2,K)| \geq 2$. Then, every vertex $s \in V(G_{ext})$ resolves the pair $(x_1,x_2)$. \end{lemme} \begin{proof} Without loss of generality, assume $d(x_1,K)+2 \leq d(x_2,K)$. By triangular inequality and since $K$ is a clique, $d(x_1,s) \leq d(x_1,K) + 1 + d(K,s) $ and $d(x_2,s) \geq d(x_2,K)+d(K,s)$. The sum of these inequalities gives $d(x_2,K)+d(x_1,s) \leq d(x_1,K)+1+d(x_2,s)<d(x_2,K)+d(x_2,s)$. Thus, $d(x_1,s) <d(x_2,s)$, meaning that $s$ resolves the pair $(x_1,x_2)$.\qed \end{proof} Before proving Lemma~\ref{resolve_close}, let us extract a technical case. \begin{lemme} \label{tech} Let $G$ be a chordal graph and $T$ be a nice clique tree of $G$. Let $X$, $Y$ be two bags of $T$ and $x$, $y$ be two vertices in respectively $X$, $Y$ . Let $Y$ be a bag of $T$ such that $X \cap Y = \text{distance vector}tyset$. Assume $d(x,y) \geq 2$ and let $z$ be a neighbour of $x$ that appears in the bag the closest to $Y$ in $T$ amongst the bags on the path between $X$ and $Y$. Then $z$ belongs to a shortest path between $x$ and $y$. \end{lemme} \begin{proof} Let $Z$ be the bag containing $z$ and no other vertices of $N[x]$ with $Z$ on the path between $X$ and $Y$. If $Z=Y$ then $z$ is a common neighbour of $x$ and $y$ which gives the result since $d(x,y) \geq 2$. Otherwise, consider a shortest path $x=x_1,x_2,\ldots, x_m=y$ between $x$ and $y$ and let $x_i$ be the first vertex of this path belonging to $Z$. Such a vertex exists since $Z$ separates $x$ and $y$. If $x_i=z$ then the result holds. Otherwise by definition of $z$, $x_i$ is not adjacent to $x$ and is adjacent to $z$ because they both belong $Z$. Thus, if we replace the sub-path $x_1,\ldots,x_i$ by $x,z,x_i$, it gives a path from $x$ to $Z$ whose length is at most the length of the initial path which gives the result.\qed \end{proof} \begin{lemme}\label{resolve_close} Let $S$ be a subset of vertices of $G$. Let $X$, $Y$ and $Z$ be three bags of a nice tree-decomposition $T$ of $G$ such that $Z$ is on the path $P$ between $X$ and $Y$ in $T$. Denote by $P=X_1, \ldots Z \ldots X_p$ the bags of $P$ with $X=X_1$ and $Y=X_p$. Let $x$ be a vertex of $X$ and $y$ a vertex of $Y$ with $d(x,Z) \geq 2$ and $d(y,Z) \geq 2$. Assume that any pair of vertices $(u,v)$ with $u \in \ X_2 \cup \ldots \cup Z$, $v \in Z \cup \ldots \cup X_p$, $d(u,Z)<d(x,Z)$ and $d(v,Z)<d(y,Z)$ is resolved by $S$. Then the pair $(u,v)$ is resolved by $S$. \end{lemme} \begin{proof} Let $i_1$ be such that $X_{i_1} \cap N[x] \neq \text{distance vector}tyset$ and for every $j>i_1$, $X_j \cap N[x] = \text{distance vector}tyset$ and $i_2$ be such that $X_{i_2} \cap N[y] \neq \text{distance vector}tyset$ and for $j<i_2$, $X_j \cap N[y] = \text{distance vector}tyset$. Let $x'$ be the only neighbour of $x$ in $X_{i_1}$ and $y'$ be the only neighbour of $y$ in $X_{i_2}$, they are unique by definition of nice tree-decomposition. Note that $d(x,y) \geq 4$ since $d(x,Z) \geq 2$ and $d(y,Z) \geq 2$. So $N[x]$ is not adjacent to $N[y]$ and then $i_1 <i_2$. By Lemma~\ref{tech}, $x'$ is on a shortest path between $x$ and $Z$ and $y'$ is on a shortest path between $y$ and $Z$. So $d(x',Z)<d(x,Z)$ and $d(y',Z)<d(y,Z)$. By hypothesis, there is a vertex $s \in S$ resolving the pair $(x',y')$. Let us prove that $s$ resolves the pair $(x,y)$. If $s$ is a neighbour of $x$ or $y$ then $s$ resolves the pair $(x,y)$ since $d(u,v) \geq 4$. So we can assume that $d(s,x) \geq 2$ and $d(s,y) \geq 2$. Let $X_s$ be a bag of $T$ containing $s$ and $X_s'$ be the closest bag to $X_s$ on $P$ between $X$ and $Y$. \noindent Case 1: $s \in X_{i_1}$ and $s \in X_{i_2}$. Then, $d(s,x') \leq 1$ and $d(s,y') \leq 1$. The vertex $s$ resolves the pair $(x',y')$ so $d(s,x') \neq d(s,y')$ so $s=x'$ or $s=y'$. Assume by symmetry that $s=x'$, then $d(s,x)=1$ and $d(s,y) \geq 3$ because $d(x,y) \geq 4$. So $s$ resolves the pair $(x,y)$. \noindent Case 2: $s$ belongs to exactly one of $X_{i_1}$ or $X_{i_2}$. By symmetry assume that $s \in X_{i_1}$. By Lemma~\ref{tech}, $y'$ is on a shortest path between $y$ and $s$. So $d(s,y)=d(s,y')+1$. As $s$ belongs to $X_{i_1}$ then $d(x',s)=1$ and $d(x,s) \leq 2$. As $d(y',s) \neq d(x',s)$ we have $d(y',s) \geq 2$, so $d(s,y) \geq 3$. Thus $s$ resolves the pair $(x,y)$. \noindent Case 3: $s \notin X_{i_1}$ and $s \notin X_{i_2}$. First, we consider the case where $X_s'$ is between $X_{i_1}$ and $X_{i_2}$. Then, $d(s,x)=d(s,x')+1$ and $d(s,y)=d(s,y')+1$ by Lemma~\ref{tech} as $X_{i_1}$ separates $x$ and $s$ and $X_{i_2}$ separates $x$ and $s$. Thus, $s$ resolves the pair $(x,y)$. By symmetry, we can now assume that $X_s'$ is between $X$ and $X_{i_1}$. Since $i_1 < i_2$, $X_{i_2}$ separates $s$ and $y$. So $d(s,y)=d(s,y')+1$ by Lemma~\ref{tech}. To conclude we prove that $d(s,x') < d(s,y')$. Let $Q$ be a shortest path between $s$ and $y$. The bag $X_{i_1}$ separates $s$ and $y$ so $Q \cap X_{i_1} \neq \text{distance vector}tyset$. Let $y_1 \in Q \cap X_{i_1} $. By definition of $Q$, $d(s,y')=d(s,y_1)+d(y_1,y)$. We know $y_1 \neq y$ because $y_1$ is a neighbour of $x$. So $d(y_1,y) \neq 0$. We also have $d(s,x')\leq d(s,y_1)+1$ because $y_1 \in X_{i_1}$. So $y_1$ is a neighbour of $x'$. As $d(s,x')\neq d(s,y')$, this ensures $d(s,x') < d(s,y')$. So $s$ resolves the pair $(x,y)$ because $d(s,x) \leq d(s,x')+1 < d(s,y')+1=d(s,y)$.\qed \end{proof} The following corollary is essentially rephrasing Lemma~\ref{resolve_close} to get the result on a set of vertices. \begin{cor}\label{resolve_dec} Let $G$ be a chordal graph and $S$ be a subset of vertices of $G$. Let $X_i$ be a bag of $T$ and let $T_1=(X_1,E_1)$ and $T_2=(X_2,E_2)$ be two connected components of $T \setminus X_i$. Assume that any pair of vertices $(u,v)$ of $(X_1 \cup X_i) \times (X_2 \cup X_i)$ with $d(u,X_i)\leq 2$ and $d(v,X_i) \leq 2$ is resolved by $S$. Then any pair of vertices $(u,v)$ of $(X_1,X_2)$ with $|d(u,X_i)-d(v,X_i)| \leq 1$ is resolved by $S$. \end{cor} \begin{proof} Assume by contradiction, that there exist some pairs of vertices of $(T_1,T_2)$ with $|d(u,X_i)-d(v,X_i)| \leq 1$ which are not resolved by $S$. Among all these pairs, let $(u,v)$ be one pair minimizing $q:=d(u,X_i)+d(v,X_i)$. If $q \leq 4$ then $d(u,X_i)\leq 2$ and $d(v,X_i)\leq 2$, so the pair $(u,v)$ is resolved by the hypothesis of the lemma. If $q \geq 5$, then $d(u,X_i)\geq 2$ and $d(v,X_i)\geq 2$. By minimality, we know that all pairs $(u',v')$ of $(T_1,T_2)$ with $d(u',X_i)<d(u,X_i)$ and $d(v',X_i)<d(v,X_i)$ are resolved by $S$. So, by Lemma~\ref{resolve_close}, the pair $(u,v)$ is resolved by $S$.\qed \end{proof} \section{Algorithm description} In this section, we fix a vertex $v$ of a chordal graph $G$ and consider a nice clique tree $(T,X)$ rooted in $v$ which exists by Lemma~\ref{tree_dec}. We present an algorithm computing the smallest size of a resolving set of $G$ containing $v$. \subsection{Generalisation of the problem}\label{sec:defpb} The algorithm is a dynamic programming algorithm that works bottom-up from the leaves of a nice clique tree. Our algorithm computes the solution of a problem more general than the metric dimension but easiest to manipulate for combining solutions. Our algorithm consists in a dynamic programming on the clique tree. In this new problem, we will represent some vertices by vectors of distance. We define notations to edit vectors. \begin{defn} Given a vector $\mathbf{r}$, the notation $\mathbf{r}_i$ refers to the $i$-th coordinate of~$\mathbf{r}$. \begin{itemize} \item Let $\mathbf{r}=(r_1,\ldots,r_k) \in \mathbb{N}^k$ be a vector of size $k$ and $m \in N$. The vector $\mathbf{r'}=\mathbf{r|m}$ is the vector of size $k+1$ with $r'_i=r_i$ for $1 \leq i \leq k$ and $r'_{k+1}=m$. \item Let $\mathbf{r}=(r_1,\ldots,r_k) \in \mathbb{N}^k$ be a vector of size $k$. The vector $\mathbf{r^-}$ is the vector of size $k-1$ with $r^-_i=r_i$ for $1 \leq i \leq k-1$. \end{itemize} \end{defn} \begin{defn} Let $i$ be a node of $T$ and let $X_i=\{v_1,\ldots,v_k\}$ be the bag of $i$. For a vertex $x$ of $G$, the \text{distance vector}h{distance vector} $\distvec{X_i}{x}$ of $x$ to $X_i$ is the vector of size $k$ such that, for $1 \leq j \leq k$, $\distvec{X_i}{x}_j=d(x,v_j)$. We define the set $\ddeux{X_i}$ as the set of distance vectors of the vertices of $T(X_i)$ at distance at most $2$ of $X_i$ in $G$ (i.e. one of the coordinate is at most $2$). \end{defn} \begin{defn} Let $G$ be a graph and $K=\{v_1,\ldots,v_k\}$ be a clique of $G$. Let $x$ be a vertex of $G$. The \text{distance vector}h{trace} of $x$ on $K$, denoted by $\mathbf{Tr_K}(x)$, is the vector $\mathbf{r}$ of $\{0,1\}^k\setminus \{1, \ldots ,1\}$ such that for every $1 \leq i \leq k$, $d(x,v_i)=a+\mathbf{r}_i$ where $a=d(x,K)$. Let $S$ be a subset of vertices of $G$. The trace $Tr_K(S)$ of $S$ in $K$ is the set of vectors $ \{ \mathbf{Tr_K}(x), {x\in S} \}$. \end{defn} The trace is well-defined because for a vertex $x$ and a clique $K$, the distance between $x$ and a vertex of $K$ is either $d(x,K)$ or $d(x,K)+1$. \begin{defn}\label{def_resolve_vec} Let $\mathbf{r_1}, \mathbf{r_2}$ and $\mathbf{r_3}$ be three vectors of same size $k$. We say that $\mathbf{r_3}$ \text{distance vector}h{resolves} the pair $(\mathbf{r_1},\mathbf{r_2})$ if \[\min_{1 \leq i \leq k} \mathbf{(r_1+r_3)}_i \neq \min_{1 \leq i \leq k} \mathbf{(r_2+r_3)}_i.\] \end{defn} \begin{lemme} Let $K$ be a clique separator of $G$ and $G_1$ be a connected component of $G \setminus K$. Let $(x,y)$ be a pair of vertices of $G \setminus G_1$ and let $\mathbf{r}$ be a vector of size $|K|$. If $\mathbf{r}$ resolves the pair $(\distvec{K}{x},\distvec{K}{y})$, then any vertex $s \in V(G_1)$ with $\mathbf{Tr_K}(s)=\mathbf{r}$ resolves the pair $(x,y)$. \end{lemme} \begin{proof} Let $s$ be a vertex of $G_1$ such that $\mathbf{Tr_K}(s)=\mathbf{r}$. The clique $K$ separates $s$ and $x$ (resp. $y$) so $d(x,s)=\min_{1 \leq i \leq k} (\distvec{K}{x}+\mathbf{Tr_K}(s))_i+d(K,s)$ (resp. $d(y,s)=\min_{1 \leq i \leq k} (\distvec{K}{y}+\mathbf{Tr_K}(s))_i+d(K,s)$). The vector $\mathbf{r}$ resolves the pair $(\distvec{K}{x},\distvec{K}{y})$. So $d(x,s) \neq d(y,s)$ and $s$ resolves the pair $(x,y)$.\qed \end{proof} \begin{defn} Let $K$ be a clique separator of $G$ and $G_1$, $G_2$ be two (non necessarily distinct) connected components of $G \setminus K$. Let $M$ be a set of vectors and let $u \in V(G_1) \cup K$ and $v \in V(G_2) \cup K$. If a vector $\mathbf{r}$ resolves the pair $(\distvec{K}{x},\distvec{K}{y})$, we say that $\mathbf{r}$ \text{distance vector}h{resolves} the pair $(x,y)$. We say that the pair of vertices $(u,v)$ \text{distance vector}h{is resolved} by $M$ if there exists a vector $\mathbf{r} \in M$ that resolves the pair $(u,v)$. \end{defn} We can now define the generalised problem our dynamic programming algorithm actually solves. We call it the \textsc{extended metric dimension} problem ({\sc EMD} for short) . We first define the instances of this problem. \begin{defn}Let $i$ be a node of $T$. An \text{distance vector}h{instance for a node $i$} of the {\sc EMD} problem is a $5$-uplet $\I {}$ composed of the bag $X_i$ of $i$, a subset $S_I$ of $X_i$ and three sets of vectors satisfying \begin{itemize} \item $\int {I}\subseteq \{0,1\}^{|X_i|}$ and $\ext{I} \subseteq \{0,1\}^{|X_i|}$, \item $\paire{I} \subseteq [|0,3|]^{|X_i|} \times [|0,3|]^{|X_i|}$, \item $\ext{I} \neq \text{distance vector}tyset$ or $S_I \neq \text{distance vector}tyset$, \item For each pair of vectors $(\mathbf{r_1},\mathbf{r_2}) \in \paire I$, there exist two vertices $x \in \T {X_i}$ with $\distvec{X_i}{x}= \mathbf{r_1}$ and $d(x,X_i) \leq 2$ and $y \notin \T {X_i}$ with $\distvec{X_i}{y}= \mathbf{r_2}$ and $d(y,X_i) \leq 2$. \end{itemize} \end{defn} \begin{defn}\label{def_instance} A set $S \subseteq T(X_i)$ is a solution for an instance $I$ of the {\sc EMD} problem if \begin{itemize} \item \textbf{(S1)} Every pair of vertices of $\T {X_i}$ is either resolved by a vertex in $S$ or resolved by a vector of $\ext I$. \item \textbf{(S2)} For each vector $\mathbf{r} \in \int I$ there exists a vertex $s \in S$ such that $\mathbf{Tr_{X_i}}(s)= \mathbf{r}$. \item \textbf{(S3)} For each pair of vector $(\mathbf{r_1},\mathbf{r_2}) \in \paire I$, for any vertex $x \in \T {X_i}$ with $\distvec{X_i}{x}= \mathbf{r_1}$ and any vertex $y \notin \T {X_i}$ with $\distvec{X_i}{y}= \mathbf{r_2}$, if $d(x,X_i) \leq 2$ and $d(y,X_i) \leq 2$ the pair $(x,y)$ is resolved by $S$. \item \textbf{(S4)} $S \cap X_i =S_I$. \end{itemize} \end{defn} In the rest of the paper, for shortness, we will refer to an instance of the {\sc EMD} problem only by an instance. \begin{defn}\label{def_dim} Let $I$ be an instance. We denote by $\dim (I)$ the minimum size of a set $S \subseteq \T{X_i}$ which is a solution of $I$. If such a set does not exist we define $\dimx I= + \infty$. We call this value the \text{distance vector}h{extended metric dimension} of $I$. \end{defn} We now explain the meaning of each element of $I$. Firstly, a solution $S$ must resolve any pair in $\T {X_i}$, possibly with a vector of $\ext{I}$ which represents a vertex of $V \setminus \T {X_i}$ in the resolving set. Secondly, for all $\mathbf{r}$ in $\int I$, we are forced to select a vertex in $T(X_i)$ whose trace is $\mathbf{r}$. This will be useful to combine solutions since it will be a vector of $D_{ext}$ in other instances. The elements in $\paire{I}$ will also be useful for combinations. In some sense $\paire{I}$ is the additional gain of $S$ compared to the main goal to resolve $\T {X_i}$. The set $S_I$ constrains the intersection between $S$ and $X_i$ by forcing a precise subset of $X_i$ to be in $S$. The following lemma is a consequence of Definition~\ref{def_instance}. It connects the definition of the extended metric dimension with the metric dimension. \begin{lemme}\label{correct} Let $G$ be a graph, $T$ be a nice tree-decomposition of $G$ and $r$ be the root of $T$. Let $I_0$ be the instance ${(\{r\},\{r\},\text{distance vector}tyset,\text{distance vector}tyset,\text{distance vector}tyset)}$, then $\dim(I_0)$ is the smallest size of a resolving set of $G$ containing $r$. \end{lemme} To ensure that our algorithm works well, we will need to use Lemma~\ref{resolve_far} in some subgraphs of $G$. This is possible only if we know that the solution is not included in the subgraph. This corresponds to the condition $\ext{I} \neq \text{distance vector}tyset$ or $S_I \neq \text{distance vector}tyset$ and this is why the algorithm computes the size of a resolving set containing the root of $T$. \subsection{Dynamic programming} \label{sec:rules} We explain how we can compute the metric dimension of an instance $I$ given the metric dimension of the instances on the children of $X_i$ in $T$. The proof is divided according to the different type of nodes. \subsubsection{Leaf node} Computing the dimension of an instance for a leaf node can be done easily with the following lemma. \begin{lemme}\label{calcul_leaf} Let $I$ be an instance for a leaf node $i$ and $v$ be the unique vertex of~$X_i$. Then, $$ \dim(I) = \left\{ \begin{array}{lll} 0 & \text{ if } S_I=\text{distance vector}tyset,\; \int I =\text{distance vector}tyset \text{ and } \paire I = \text{distance vector}tyset \\ 1 & \text{ if } S_I=\{v\} \text{ and } \int {I} \subseteq \{ \mathbf{(0)} \} \\ +\infty & \text{ otherwise} \end{array} \right. $$ \end{lemme} \begin{proof} Let $I$ be an instance for $i$. If $S_I=\text{distance vector}tyset$, only the set $S=\text{distance vector}tyset$ can be a solution for $I$. This set is a solution only if $\int I =\text{distance vector}tyset$ and $\paire I = \text{distance vector}tyset$. If $S_I=\{v\}$, only the set $S=\{v\}$ can be a solution for $I$. This is a solution only if $\int {I}$ is empty or only contains the vector $\mathbf{Tr_{x_i}}(v)$. \qed \end{proof} In the rest of the section, we treat the three other types of nodes. For each type of nodes we will proceed as follows: define some conditions on the instances on children to be compatible with $I$, and prove an equality between the extended metric dimension on compatible children instances and the extended metric dimension of the instance of the node. \subsubsection{Join node.} Let $I$ be an instance for a join node $i$ and let $i_1$ and $i_2$ be the children of $i$. \begin{defn}\label{compatible_pair} A pair of instances $(I_1, I_2)$ for $(i_1,i_2)$ is \text{distance vector}h{compatible} with~$I$ if \begin{itemize} \item \textbf{(J1)} $S_{I_1}=S_{I_2}=S_I$, \item \textbf{(J2)} $ \ext {I_1} \subseteq \ext{I} \cup \int{I_2} $ and $ \ext {I_2} \subseteq \ext{I} \cup \int{I_1} $, \item \textbf{(J3)} $\int{I} \subseteq \int{I_1} \cup \int {I_2}$, \item \textbf{(J4)} Let $C_1=\{ (\mathbf{r},\mathbf{t}) \in \paire{I}$ such that $\mathbf{r} \notin \ddeux{X_{i_1}}\}$ and $C_2=\{ (\mathbf{r},\mathbf{t}) \in \paire {I}$ such that $\mathbf{r} \notin \ddeux{X_{i_2}}\}$. Let $D_1=\{ (\mathbf{r},\mathbf{t}) \in \ddeux{X_{i_1}} \times \ddeux{G \setminus X_{i_1}}$ such that there exists $\mathbf{u} \in \int{I_2}$ resolving the pair $(\mathbf{r},\mathbf{t}) \}$ and $D_2=\{ (\mathbf{r},\mathbf{t}) \in \ddeux{X_{i_2}} \times \ddeux{G \setminus X_{i_2}})$ such that there exists $\mathbf{u} \in \int{I_1}$ resolving the pair $(\mathbf{r},\mathbf{t})\}$ Then $ \paire{I} \subseteq (\mathbf{r},\mathbf{t}) \in (C_1 \cup D_1 \cup \paire{I_1}) \cap (C_2 \cup D_2 \cup \paire{I_2}) $, \item \textbf{(J5)} For all $ \mathbf{r_1} \in \ddeux{X_{i_1}}$, for all $\mathbf{r_2} \in \ddeux{X_{i_2}}$, $(\mathbf{r_1},\mathbf{r_2}) \in \paire {I_1}$ or $(\mathbf{r_2},\mathbf{r_1}) \in \paire {I_2}$ or there exists $\mathbf{t} \in \ext{I}$ such that $\mathbf{t}$ resolves the pair $(\mathbf{r_1},\mathbf{r_2})$. \end{itemize} \end{defn} Condition \textbf{(J4)} represents how the pairs of vertices of $V(T(X_{i_1})) \times V(T(X_{i_2}))$ can be resolved. A pair $(\mathbf{r},\mathbf{t})$ is in $(C_1 \cup D_1 \cup \paire{I_1})$ if all the pairs of vertices $(x,y)$ with $x \in V(T(X_{i_1})) $ and $y \in V(T(X_{i_2}))$ are resolved. If $(\mathbf{r},\mathbf{t})$ is in $C_1$, no such pair of vertices exists, if $(\mathbf{r},\mathbf{t})$ is in $D_1$ the pairs of vertices are resolved by a vertex outside of $V(T(X_{i_1}))$ and if $(\mathbf{r},\mathbf{t})$ is in $\paire{I_1}$ the pairs of vertices are resolved by a vertex of $V(T(X_{i_1}))$. So a pair $(\mathbf{r},\mathbf{t})$ is resolved if the pair is in $(C_1 \cup D_1 \cup \paire{I_1})$ and in $(C_2 \cup D_2 \cup \paire{I_2})$. Let $\mathcal{F}_J(I)$ be the set of pairs of instances compatible with~$I$. We want to prove the following lemma: \begin{lemme}\label{node_pair_main} Let $I $ be an instance for a join node $i$. Then, \[ \dim(I) = \min_{(I_1,I_2) \in \mathcal{F}_J(I)} (\dim(I_1) + \dim(I_2) - |S_I|). \] \end{lemme} We prove the equality by proving the two inequalities in the next lemmas. \begin{lemme}\label{calcul_pair_ineq1} Let $(I_1,I_2)$ be a pair of instances for $(i_1,i_2)$ compatible with $I$ with finite values for $\dim(I_1)$ and $\dim(I_2)$. Let $S_1 \subseteq V(T(X_{i_1}))$ be a solution for $I_1$ and $S_2 \subseteq V(T(X_{i_2}))$ be a solution for $I_2$. Then $S = S_1 \cup S_2$ is a solution for $I$. In particular, $$ \dim(I) \leq \min_{(I_1,I_2) \in \mathcal{F}_J(I)} (\dim(I_1) + \dim(I_2) - |S_I|).$$ \end{lemme} \begin{proof} Let us prove that the conditions of Definition~\ref{def_instance} are satisfied. \noindent \textbf{(S1)} Let $(x,y)$ be a pair of vertices of $T(X_i)$. Assume first that $x \in V(T(X_{i_1}))$ and $y \in V(T(X_{i_1}))$. Either $(x,y)$ is resolved by a vertex of $S_1$ and then by a vertex of $S$ or $(x,y)$ is resolved by a vector $\mathbf{r} \in \ext{I_1}$. By condition \textbf{(J2)}, $\mathbf{r} \in \ext{I}$ or $\mathbf{r} \in \int{I_2}$. If $\mathbf{r} \in \ext{I}$ then $(x,y)$ is resolved by a vector of $\ext{I_1}$. Otherwise, there exists a vertex $t \in S_2$ such that $\mathbf{Tr_{X_{i_2}}}(t)=\mathbf{r}$. So $t \in S$ and $t$ resolves the pair $(x,y)$. The case $x \in V(T(X_{i_2}))$ and $y \in V(T(X_{i_2}))$ is symmetric. So we can assume that $x \in V(T(X_{i_1}))$ and $y \in V(T(X_{i_2}))$. If $d(x,X_i) \leq 2$ and $d(y,X_i) \leq 2$, the condition \textbf{(J5)} ensures that the pair $(x,y)$ is resolved by $S$ or by a vector of $\ext{I}$. Otherwise, either $|d(x,X_i) - d(y,X_i)| \leq 1$ and $(x,y)$ is resolved by Lemma~\ref{resolve_dec} or $|d(x,X_i) - d(y,X_i)| \geq 2$ and $(x,y)$ is resolved by Lemma~\ref{resolve_far} because $\ext{I} \neq \text{distance vector}tyset$ or $ S_I \neq \text{distance vector}tyset$. \noindent \textbf{(S2)} Let $\mathbf{r} \in \int{I}$. By compatibility, the condition \textbf{(J3)} ensures that $\mathbf{r} \in \int{I_1}$ or $\mathbf{r} \in \int{I_2}$. As $S=S_1 \cup S_2$, $S$ contains a vertex $s$ such that $\mathbf{Tr_{X_i}}(s)=\mathbf{r}$. \noindent \textbf{(S3)} Let $(\mathbf{r},\mathbf{t}) \in \paire{I}$ and $(x,y)$ with $x \in V(T(X_i))$ such that $\distvec{X_i}{x}=\mathbf{y}$ and $y \notin T(X_i)$ such that $\distvec{X_i}{y}=\mathbf{t}$. Without loss of generality assume that $x \in V(T(X_{i_1}))$. By compatibility, $ (\mathbf{r},\mathbf{t}) \in (C_1 \cup D_1 \cup \paire{I_1}) \cap (C_2 \cup D_2 \cup \paire{I_2})$ so in $C_1 \cup D_1 \cup \paire{I_1}$. If $(\mathbf{r},\mathbf{t}) \in \paire I_1$, then there exists $s \in S_1$ that resolves the pair $(x,y)$ so the pair is resolved by $S$. If $(\mathbf{r},\mathbf{t}) \in D_1$, there exists $\mathbf{u} \in \int {I_2}$ such that $\mathbf{u}$ resolves the pair $(\mathbf{r},\mathbf{t})$. By compatibility, there exists $s \in S_2$ such that $\mathbf{Tr_{X_i}}(s)=\mathbf{u}$. So $s$ resolves the pair $(x,y)$. And $(\mathbf{r},\mathbf{t}) \notin C_1$ since $x$ belongs to $T(X_{i_1})$ with vector distance $\mathbf{r}$. \noindent \textbf{(S4)} is clear since $X_{i_1}=X_{i_2}=X_{i}$. Thus, $\dim(I) \leq \dim(I_1)+\dim(I_2)-|S_I|$ is true for any pair of compatible instances $(I_1,I_2)$ so $ \dim(I) \leq \min_{(I_1,I_2) \in \mathcal{F}_J(I)} (\dim(I_1) + \dim(I_2) - |S_I|)$.\qed \end{proof} \begin{lemme}\label{calcul_pair_ineq2} Let $I $ be an instance for a join node $i$ and let $i_1$ and $i_2$ be the children of $i$. Then, \[ \dim(I) \geq \min_{(I_1,I_2) \in \mathcal{F}_J(I)} (\dim(I_1) + \dim(I_2) - |S_I|). \] \end{lemme} \begin{proof} If $\dim (I)=+ \infty$ then the result indeed holds. So assume $\dim (I)$ is finite. Let $S$ be a solution for $I$ of minimal size. Let $S_1=S \cap T(X_{i_1})$ and $S_2=S \cap T(X_{i_2})$. We define now two instances $I_1$ and $I_2$ for $i_1$ and $i_2$. Let $S_{I_1} = S_{I_2}=S_I$, $\int {I_1}= Tr_{X_i}(S_1)$, $\int {I_2}= Tr_{X_i}(S_2)$, $\ext{I_1}=\ext{I} \cup \int{I_2}$ and $\ext{I_2}=\ext{I} \cup \int{I_1}$. To build the sets $\paire{I_1}$ and $\paire{I_2}$ we make the following process that we explain for $\paire{I_1}$. For all pairs of vectors $(\mathbf{r},\mathbf{t})$ of $(\ddeux{X_{i_1}},\ddeux{G \setminus X_{i_1}})$, consider all the pairs of vertices $(x,y)$ with $x \in V(T(X_{i_1}))$, $y \in V(G \setminus T(X_{i_1}))$, $\mathbf{r} \in \ddeux{X_i}$, $\mathbf{t} \in \ddeux{G \setminus X_{i_1}})$, $\distvec{X_i}{x}=\mathbf{r}$ and $\distvec{X_i}{y}=\mathbf{t}$. If all the pairs are resolved by vertices of $S_1$ (that for each pair, there exists a vertex of $S_1$ that resolves the pair), then add $(\mathbf{r},\mathbf{t})$ to $\paire{I_1}$. \begin{claim} $(I_1,I_2)$ is compatible with $I$. \end{claim} \begin{proof} \textbf{(J1)}, \textbf{(J2)} and \textbf{(J3)} are straightforward. \noindent \textbf{(J4)} Let $(\mathbf{r},\mathbf{t}) \in \paire I$, we want to prove that $ (\mathbf{r},\mathbf{t}) \in (C_1 \cup D_1 \cup \paire{I_1}) \cap (C_2 \cup D_2 \cup \paire{I_2}) $. We prove that $ (\mathbf{r},\mathbf{t}) \in (C_1 \cup D_1 \cup \paire{I_1})$, the other part of the proof is symmetrical. If $\mathbf{r} \notin \ddeux{X_{i_1}}$, then $ (\mathbf{r},\mathbf{t}) \in C_1 $. Otherwise, there exists a vertex $x$ in $T(X_{i_1})$ such that $\distvec{X_{i_1}}{x}=\mathbf{r}$ and a vertex $y$ in $G \setminus T(X_i)$ such that $\distvec{X_i}{y}=\mathbf{t}$ (because the pair $(\mathbf{r},\mathbf{t})$ belongs to $\paire I$). The pair $(x,y)$ is resolved by $S$. If there is a vertex $s \in S \cap T(X_{i_2})$ resolving the pair, then $s$ resolves all the pairs with such distance vector and then $(\mathbf{r},\mathbf{t}) \in D_1$. Otherwise, for any pair $(x,y)$ of $ T(X_{i_1}) \times G \setminus T(X_i) $ with $\distvec{X_{i_1}}{x}=\mathbf{r}$ and $\distvec{X_i}{y}=\mathbf{t}$, there is a vertex of $S \cap T(X_{i_1})$ that resolves the pair $(x,y)$, so $(\mathbf{r},\mathbf{t}) \in \paire {I_1}$. \noindent \textbf{(J5)} Let $\mathbf{r_1} \in \ddeux{X_{i_1}}$, $\mathbf{r_2} \in \ddeux{X_{i_2}}$ and two vertices $x \in X_{i_1}$ and $y \in X_{i_2}$ such that $\distvec{X_{i_1}}{x}=\mathbf{r_1}$ and $\distvec{X_{i_2}}{y}=\mathbf{r_2}$. As $S$ is a solution of $I$, either the pair $(x,y)$ is resolved by a vector $\mathbf{r_3} \in \ext I$, or there exists $s \in S$ resolving $(x,y)$. If $(x,y)$ is resolved by $s$, assume by contradiction that $(\mathbf{r_1},\mathbf{r_2}) \not \in \paire{I_1}$ and $(\mathbf{r_2},\mathbf{r_1}) \not \in \paire{I_1}$. Then there exist vertices $x_1,x_1' \in V(T(X_{i_1}))$ with $\distvec{X_{i_1}}{x_1}=\mathbf{r_1}$, $\distvec{X_{i_2}}{x_1'}=\mathbf{r_1}$ and $x_2,x_2' \in V(T(X_{i_2}))$ with $\distvec{X_{i_2}}{x_2}=\mathbf{r_2}$ and $\distvec{X_{i_2}}{x_2'}=\mathbf{r_2}$ such that the pair $(x_1,x_2)$ is not resolved by a vertex of $S_1$ and the pair $(x_1',x_2')$ is not resolved by a vertex of $S_2$. Let $s \in S$ resolving the pair $(x_1,x_2')$. If $s \in S_1$, then $s$ resolves the pair $(x_1,x_2)$ and if $s \in S_2$, then $s$ resolves the pair $(x_1',x_2')$, a contradiction. \qed \end{proof} \begin{claim} $S_1$ is a solution of $I_1$ and $S_2$ is a solution of $I_2$. \end{claim} \begin{proof} We only prove that $S_1$ is a solution of $I_1$ as the proof that $S_2$ is a solution of $I_2$ is similar. \noindent \noindent \textbf{(S1)} Let $(x,y)$ be a pair of vertices of $T(X_{i_1})$. As $S$ is a solution of $I$, the pair $(x,y)$ is either resolved by a vertex of $S$ or by a vector of $\ext{I}$. If $(x,y)$ is resolved by a vector of $\ext{I}$, the pair $(x,y)$ is also resolved by a vector of $\ext{I_1}$ since $\ext{I} \subseteq \ext{I_1}$. Otherwise let $s \in S$ resolving the pair $(x,y)$. If $s \in T(X_{i_1})$ then $(x,y)$ is resolved by a vertex of $S_1$. Otherwise $s \in T(X_{i_2})$ and by construction of $I_1$, $\ext {I_1}$ contains the vector $\mathbf{Tr_{X_i}(s)}$ so $(x,y)$ is resolved by a vector of $\ext{I_1}$. \noindent \textbf{(S2)} By definition, $ \int{I_1}=Tr_{X_i}(S_1)$. Hence, for any vector $\mathbf{r} \in \int {I_1}$, there is a vertex $s \in S_1$ with $\mathbf{Tr_{X_i}(s)}=\mathbf{r}$. \noindent \textbf{(S3)} Let $(\mathbf{r},\mathbf{t}) \in \paire{I_1}$, $x \in T(X_{i_1})$ and $y\notin T(X_{i_1})$ such that $\distvec{X_{i}}{x}=\mathbf{r}$ and $\distvec{X_{i}}{y}=\mathbf{y}$. By construction of $\paire{I_1}$ there is a vertex $s \in S_1$ resolving the pair $(x,y)$. \noindent \textbf{(S4)} $S_{I_1}=S_I$ and since $S$ is a solution of $I$, $S_I=S \cap X_i$. \qed \end{proof} Ultimately we get the announced inequality. Since $S$ is a minimal solution for $I$, we have $\dim(I)=|S|$. The sets $S_1$ and $S_2$ are solutions for $S_1$ and $S_2$ so $\dim(I_1) \leq |S_1|$ and $\dim(I_2) \leq |S_2|$. Since $|S|=|S_1|+|S_2|-|S_I|$ we get $\dim(I) \geq \dim(I_1) +\dim(I_2)- |S_I|$. This inequality is true for a specific pair of instances so in particular is true for a pair minimising the amount $\dim(I_1) +\dim(I_2)- |S_I|$, giving the result. \qed \end{proof} Lemma~\ref{node_pair_main} is a direct consequence of Lemma~\ref{calcul_pair_ineq1} and Lemma~\ref{calcul_pair_ineq2}. \subsubsection{Introduce node} We now consider an instance $I$ for an introduce node $i$. Let $j$ be the child of $i$ and $v \in V$ be such that $X_i=X_j \cup \{v\}$. Let $X_i=\{v_1,\ldots,v_k\}$ with $v=v_k$. The tree $T(X_i)$ contains one more vertex than its child. The definition of the compatibility is slightly different if we consider the same set as a solution (type $1$) or if we add this vertex to the resolving set (type $2$). \begin{defn}\label{compatible_introduce} An instance $I_1$ is compatible with $I$ of type $1$ (resp. $2$) if \begin{itemize} \item \textbf{(I1)} $S_I=S_{I_1}$ (resp. $=S_{I_1} \cup \{v\}$). \item \textbf{(I2)} For all $ \mathbf{r} \in \ext{I}$, $\mathbf{r^-} \in \ext{I_1}$ (resp. or $\mathbf{r}=(0,\ldots,0)$). \item \textbf{(I3)} For all $\mathbf{r} \in \int{I}, \mathbf{r}_k=1$ and $\mathbf{r^-}\in \int{I_1}$ (resp. or $\mathbf{r}=(1,\ldots,1,0)$). \item \textbf{(I4)} For all $ (\mathbf{r},\mathbf{t}) \in \paire {I}$, $(\mathbf{r^-},\mathbf{t^-}) \in \paire {I_1}$. \item \textbf{(I5)} If $I_1$ is of type $1$, for all $(\mathbf{r},\mathbf{t})$ with $\mathbf{t}=(0,\ldots,0)$, $(\mathbf{r},\mathbf{t}) \in \paire{I_1}$. \end{itemize} \end{defn} We want to prove that the following holds: \begin{lemme}\label{node_introduce_main} Let $I$ be an instance for an introduce node $i$. Let $\mathcal{F}_{1}(I)$ be the set of instances $I_1$ for $i_1$ compatible with $I$ of type 1 and $\mathcal{F}_{2}(I)$ be the set of instances $I_2$ for $i_1$ compatible with $I$ of type~2. Then, \[\dim(I)=\min\;\{ \min_{I_1 \in \mathcal{F}_{1}(I)}\;\{\dim(I_1)\};\min_{I_2 \in \mathcal{F}_{2}(I)}\;\{\dim(I_2)+1\}\}.\] \end{lemme} Let us first prove a technical case. \begin{lemme}\label{resolved_outside} Let $i$ be an introduce node, $j$ be the child of $i$ and $v \in V$ such that $X_i=X_j \cup \{v\}$. Let $(x,y)$ be a pair of vertices of $ T(X_j)$. Let $\mathbf{r}$ be a binary vector of size $|X_i|$, then $\mathbf{r}$ resolves $(x,y)$ if and only if $\mathbf{r^-}$ resolves $(x,y)$. \end{lemme} \begin{proof} Let $\mathbf{r_1}=\mathbf{Tr_{X_i}}(x)$ and $\mathbf{r_2}=\mathbf{Tr_{X_i}}(y)$. Note that the set ${X_i\setminus \{v\}}$ separates $v$ from $x$ and $y$ so $\mathbf{(r_1)}_k=\mathbf{(r_2)}_k=1$. Assume first that $\mathbf{r}$ resolves $(x,y)$ and by contradiction that $\mathbf{r^-}$ does not resolve $(x,y)$. Since $\mathbf{r}$ resolves $(x,y)$, $\min_{1 \leq l \leq k} \mathbf{(r_1+r)}_\ell \neq \min_{1 \leq \ell \leq k} \mathbf{(r_2+r)}_\ell$ and $\min_{1 \leq \ell \leq k-1} \mathbf{(r_1+r)}_\ell = \min_{1 \leq \ell \leq k-1} \mathbf{(r_2+r)}_\ell$ by Definition~\ref{def_resolve_vec}. So the minimum change in at least one case. Assume by symmetry that $\min_{1 \leq \ell \leq k} \mathbf{(r_1+r)}_\ell \neq \min_{1 \leq \ell \leq k-1} \mathbf{(r_1+r)}_\ell$. So for $\ell<k$ we have $\mathbf{(r_1+r)}_\ell > \mathbf{(r_1+r)}_k$. Since $(\mathbf{r_1)}_k=1$, it implies that $d(x,v)<d(x,v_j)$ for all $ \ell \leq k-1$. A contradiction since $\{v_1, \ldots, v_{k-1}\}$ separates $x$ from $v$. Assume now that $\mathbf{r^-}$ resolves $(x,y)$ and by contradiction that $\mathbf{r}$ does not resolve $(x,y)$. Then by symmetry we can assume that $\min_{1 \leq j \leq k} \mathbf{(r_1+r)}_j \neq \min_{1 \leq j \leq k-1} \mathbf{(r_1+r)}_j$ meaning $ \mathbf{(r_1+r)}_k < \min_{1 \leq j \leq k-1} \mathbf{(r_1+r)}_j$. Since $(\mathbf{r_1)}_k=1$, $\mathbf{(r_1+r)}_k \geq 1$ and $\mathbf{(r_1+r)}_j =2$ for $1 \leq j \leq k-1$. So $\mathbf{r_1}=(1,\ldots1)$ which contradicts the fact that ${X_i\setminus \{v\}}$ separates $v$ from $x$.\qed \end{proof} \begin{lemme} \label{join_node1} Let $I_1$ be a compatible instance of type $1$ and $S$ be a solution of $I_1$, then $S$ is a solution of $I$. \end{lemme} \begin{proof} Let us prove that the conditions of Definition~\ref{def_instance} are satisfied. \noindent \textbf{(S1)} Let $(x,y)$ be a pair of vertices of $T(X_i)$. First assume that $x \neq v$ and $y \neq v$. If the pair $(x,y)$ is not resolved by a vertex of $S$, since $S$ is a solution for $I_1$, $(x,y)$ is resolved by a vector of $\ext I$. Let $\mathbf{r} \in \ext I$ resolving the pair $(x,y)$. As $I_1$ is compatible of type $1$, $\mathbf{r^-} \in \ext{I_1}$. Then $\mathbf{r^-}$ resolves $(x,y)$ by Lemma~\ref{resolved_outside}. So we can assume that $x=v$. The pair $(x,y)$ is also resolved by $S$ since $(\distvec{X_j}{x_1} ,(0,\ldots,0)) \in \paire{I_1}$. As $S$ is a solution for $I_1$, there is a vertex $s \in S$ that resolves the pair $(x,y)$. \noindent \textbf{(S2)} Let $\mathbf{r} \in \int {I}$. Since $I_1$ is compatible with $I$, there exists $\mathbf{r_1}\in \int{I_1}$ such that $\mathbf{r}=\mathbf{r_1|1}$. Let $s \in S$ such that $\mathbf{Tr_{X_j}}(s)=\mathbf{r_1}$, then $\mathbf{Tr_{X_i}}(s)=\mathbf{r}$. Indeed, the vertex $v$ is not the closest vertex of $X_i$ from $s$. If $s \in X_j$, that is clear. Otherwise $X_j$ is a separator between $s$ and $v$, so the shortest path between $s$ and $v$ crosses a vertex of $X_j$. Thus, $\mathbf{Tr_{X_i}}(s)=\mathbf{r_1|1}$. \noindent \textbf{(S3)} Let $(\mathbf{r},\mathbf{t}) \in \paire I$. Let $x \in V(T(X_i))$ such that $\distvec{X_i}{x}=\mathbf{r}$ and $y \notin T(X_i)$ such that $\distvec{X_i}{y}=\mathbf{t}$. Then $\distvec{X_j}{x_1}=\mathbf{r^-}$ and $\distvec{X_j}{x_2}=\mathbf{t^-}$ so the pair $(x,v)$ is resolved by $S$ because $(\mathbf{r_1^-},\mathbf{r_2^-})$ belongs to $\paire{I_1}$. \noindent \textbf{(S4)} As $S_I=S_{I_1}$ we have $S \cap X_i=S_I$.\qed \end{proof} \begin{lemme}\label{join_node2} Let $I_2$ be a compatible instance of type $2$ and $S$ a solution of $I_2$, then $S'=S \cup \{v\}$ is a solution of~$I$. \end{lemme} \begin{proof} Let us prove that the conditions of Definition~\ref{def_instance} are satisfied. \noindent \textbf{(S1)} Let $(x,y)$ be a pair of vertices of $T(X_i)$ with $x \neq v$ and $y \neq v$. If the pair $(x,y)$ is not resolved by a vertex of $S$, since $S$ is a solution for $I_2$, $(x,y)$ is resolved by a vector $\mathbf{r} \in \ext {I_2}$. By compatibility there exists $\mathbf{r'} \in \ext I$ such that $\mathbf{r'^-}=\mathbf{r}$. By Lemma~\ref{resolved_outside}, $\mathbf{r'}$ resolves the pair $(x,y)$. Ultimately, if $v=x$ or $v=y$, the pair $(x,y)$ is also resolved by $S'$ as $v \in S'$. \noindent \textbf{(S2)} Let $\mathbf{r} \in \int {I}$. If $\mathbf{r}=(1,\ldots,1,0)$, as $\mathbf{Tr_{X_i}}(v)=\mathbf{r}$, there is a vertex in $S'$ with trace $\mathbf{r}$. Otherwise, as $I_1$ is compatible, there exists $\mathbf{r_1}\in \int{I_2}$ such that $\mathbf{r}=\mathbf{r_1|1}$. Let $s \in S$ such that $\mathbf{Tr_{X_j}}(s)=\mathbf{r_1}$, then $\mathbf{Tr_{X_i}}(s)=\mathbf{r}$. Indeed, the vertex $v$ is not the closest vertex of $X_i$ from $s$. If $s \in X_j$, that's clear. Otherwise $X_j$ is a separator between $s$ and $v$, so the shortest path between $s$ and $v$ crosses a vertex of $X_j$. Thus, $\mathbf{Tr_{X_i}}(s)=\mathbf{r_1|1}$. \noindent \textbf{(S3)} Let $(\mathbf{r},\mathbf{t}) \in \paire I$. Let $x \in V(T(X_i))$ such that $\distvec{X_i}{x}=\mathbf{r}$ and $y \notin T(X_i)$ such that $\distvec{X_i}{y}=\mathbf{t}$. Then $\distvec{X_j}{x}=\mathbf{r^-}$ and $\distvec{X_j}{y}=\mathbf{t^-}$ so the pair $(x,y)$ is resolved by $S$ because $(\mathbf{r^-},\mathbf{t^-})$ belongs to $\paire{I_1}$. \noindent \textbf{(S4)} As $S_I=S_{I_1} \cup \{v\}$ we have $S \cap X_i=S_{I_1} \cup \{v\}=S_I$.\qed \end{proof} \begin{lemme}\label{join_node3} Let $I$ be an instance for an introduce node $i$. Let $\mathcal{F}_{1}(I)$ be the set of instances $I_1$ for $i_1$ compatible with $I$ of type 1 and $\mathcal{F}_{2}(I)$ be the set of instances $I_2$ for $i_1$ compatible with $I$ of type~2. Then, \[\dim(I) \leq \min\;\{ \min_{I_1 \in \mathcal{F}_{1}(I)}\;\{\dim(I_1)\},\min_{I_2 \in \mathcal{F}_{2}(I)}\;\{\dim(I_2)+1\}\}.\] \end{lemme} \begin{proof} The proof directly follows from the fact that, for any instance $I_1$ for $j$ compatible with $I$, we can get a solution of $I$ of size $\dim(I_1)$ if $I_1 \in \mathcal{F}_1$ by Lemma~\ref{join_node1} and of size $\dim(I_1)+1$ if $I_1 \in \mathcal{F}_2$ by Lemma~\ref{join_node2}. \qed \end{proof} \begin{lemme}\label{join_node4} Let $S$ be a solution for $I$ with $v \notin S$. Then there exists $I_1 \in \mathcal{F}_1$ such that $S$ is a solution of $I_1$. \end{lemme} \begin{proof} Let $I_1$ be the instance defined by $S_{I_1}=S_I$, $\int{I_1}= (\cup_{\mathbf{r} \in \int{I}}\mathbf{r^-})$, $\ext{I_1}=(\cup_{\mathbf{r} \in \ext{I}}\mathbf{r^-})$ and $\paire{I_1}= \cup_{(\mathbf{r},\mathbf{t}) \in \paire{I}} (\mathbf{r^-},\mathbf{t^-}))$. One can easily remark that $I_1$ is compatible with $I$ of type $1$. We prove that $S$ is a solution of $I_1$. \noindent \textbf{(S1)} Let $(x,y)$ be a pair of vertices of $T(X_j)$. As $S$ is a solution for $I$, either there exists $s \in S$ that resolves the pair $(x,y)$, or there is a vector $\mathbf{r} \in \ext I$ that resolves $(x,y)$. In the second case, by construction of $I_1$, the vector $\mathbf{r^-}$ belongs to $\ext {I_1}$ and resolves $(x,y)$ by Lemma~\ref{resolved_outside}. So the pair $(x,y)$ is resolved in both cases. \noindent \textbf{(S2)} Let $\mathbf{r} \in \int {I_1}$. By construction, there is $\mathbf{r'} \in \int I$ such that $\mathbf{r'^-}=\mathbf{r}$. Let $s \in S$ such that $\mathbf{Tr_{X_i}}(s)=\mathbf{r'}$, then $\mathbf{Tr_{X_j}}(s)=\mathbf{r}$. \noindent \textbf{(S3)} Let $(\mathbf{r},\mathbf{t}) \in \paire {I_1}$, $x \in \T {X_i}$ with $\distvec{X_j}{x}= \mathbf{r}$ and $y \notin \T {X_j}$ with $\distvec{X_j}{y}= \mathbf{t}$ with $d(x,X_j) \leq 2$ and $d(y,X_j) \leq 2$. Let $(\mathbf{r'},\mathbf{t'}) \in \paire{I}$ such that $(\mathbf{r},\mathbf{t})=(\mathbf{r'^-},\mathbf{t'^-})$. First $d(x,v)=d(x,X_j)+1$ because $X_j$ separates $x$ and $v$. This is true for any vertex of $X_j$ so the last component of $\mathbf{r'}$ is $d(x,X_j)+1$. So $\distvec{X_i}{x}= \mathbf{r'}$. If $\distvec{X_i}{y}= \mathbf{t'}$, then $(x,y)$ is resolved by a vertex of $s$. Otherwise, as $(\mathbf{r'},\mathbf{t'}) \in \paire{I}$, there exist a vertex $z \notin T(X_{j})$ such that $\distvec{X_i}{z}= \mathbf{t'}$ and $s$ is $S$ that resolves the pair $(x,z)$. Then $s$ resolves the pair $(x,y)$ because $d(s,y)=d(s,z)$. \noindent \textbf{(S4)} We have $S_I=S_{I_1}$ and $v \notin S_I $ so $S \cap X_{j} =S_{I_1}$. Finally, $S$ is a solution of $I_1$ so $\dim(I_1) \leq |S| \leq \dim(I)$. In particular $\dim(I) \geq \min_{I_1 \in \mathcal{F}_1}\;\{\dim(I_1)\}$. \qed \end{proof} \begin{lemme}\label{join_node5} Let $S$ be a solution for $I$ of minimal size with $v \in S$. Then there exists $I_2 \in \mathcal{F}_2$ such that $S \setminus \{v\}$ is a solution of $I_2$. \end{lemme} \begin{proof} Let $I_2$ be the instance where $S_{I_1}=S_I \setminus \{v\}$, $\int{I_2}= (\cup_{\mathbf{r} \in \int{I}}\mathbf{r^-}) $, $\ext{I_2}=\{\cup_{\mathbf{r} \in \ext{I}}\mathbf{r^-}\} \cup \{(0,\ldots0)\}$ and $\paire{I_2}= \cup_{(\mathbf{r},\mathbf{t}) \in \paire{I}} (\mathbf{r^-},\mathbf{t^-})$. One can easily remark that $I_2$ is compatible with $I$ of type $2$. We prove that $S$ is a solution of $I_2$. \noindent \textbf{(S1)} Let $(x,y)$ be a pair of vertices of $T(X_j)$. As $S$ is a solution for $I$, either there exists $s \in S$ that resolves the pair $(x,y)$ or there is a vector $\mathbf{r} \in \ext I$ that resolves $(x,y)$. If $(x,y)$ is resolved by a vertex of $S \setminus \{v\}$, then the pair $(x,y)$ is resolved in $I_2$. If $(x,y)$ is resolved by $v$, then the vector $(0,\ldots0)$ of $\ext{I_2}$ resolves the pair. If $(x,y)$ is resolved by a vector $\mathbf{r}$ of $\ext I$, then by Lemma~\ref{resolved_outside}. So $\mathbf{r^-}$ resolves the pair $(x,y)$ and $\mathbf{r^-} \in \ext {I_2}$ by construction. \noindent \textbf{(S2)} Let $\mathbf{r} \in \int {I_1}$. By construction, there is $\mathbf{r'} \in \int I$ such that $\mathbf{r'^-}=\mathbf{r}$. Let $s \in S$ such that $\mathbf{Tr_{X_i}}(s)=\mathbf{r'}$, then $\mathbf{Tr_{X_j}}(s)=\mathbf{r}$. \noindent \textbf{(S3)} Let $(\mathbf{r},\mathbf{t}) \in \paire {I_1}$, $x \in \T {X_i}$ with $\distvec{X_j}{x}= \mathbf{r}$ and $y \notin \T {X_j}$ with $\distvec{X_j}{y}= \mathbf{t}$ with $d(x,X_j) \leq 2$ and $d(y,X_j) \leq 2$. Let $(\mathbf{r'},\mathbf{t'}) \in \paire{I}$ such that $(\mathbf{r},\mathbf{t})=(\mathbf{r'^-},\mathbf{t'^-})$. First $d(x,v)=d(x,X_j)+1$ because $X_j$ separates $x$ and $v$. This is true for any vertex of $X_j$ so the last component of $\mathbf{r'}$ is $d(x,X_j)+1$. So $\distvec{X_i}{x}= \mathbf{r'}$. If $\distvec{X_i}{y}= \mathbf{t'}$, then $(x,y)$ is resolved by a vertex of $s$. Otherwise, as $(\mathbf{r'},\mathbf{t}) \in \paire{I}$, there exist a vertex $z \notin T(X_j)$ such that $\distvec{X_i}{y}= \mathbf{t'}$ and $s$ in $S$ that resolves the pair $(x,z)$. Then $s$ resolves the pair $(x,y)$ because $d(s,y)=d(s,z)$. \noindent \textbf{(S4)} We have $S_I=S_{I_1}$ and $v \notin S_I $ so $S \cap X_{j} =S_{I_1}$. Finally, $S \setminus \{v\}$ is a solution of $I_2$, thus $\dim(I_2) \leq |S-1| \leq \dim(I)$. In particular $\dim(I) \geq \min_{I_2 \in \mathcal{F}_2}\;\{\dim(I_2)+1\}$.\qed \end{proof} Lemma~\ref{node_introduce_main} is a consequence of Lemmas~\ref{join_node3},~\ref{join_node4} and~\ref{join_node5}. \subsubsection{Forget node} We now consider an instance $I$ for an forget node $i$. Let $j$ be the child of $i$ and $v \in V$ be such that $X_j=X_i \cup \{v\}$. Let $X_j=\{v_1,\ldots,v_k\}$ with $v=v_k$. The trees $T(X_i)$ and $T(X_j)$ contain the same vertices, the definition of compatibility gives conditions to have the same solution for $I$ and for an instance on the child node. We introduce three functions on vectors representing how the trace of a vertex can be modified when one considers two separators that differ by one vertex. \begin{defn} Let $\mathbf{r}$ be any binary vector. We define the functions $f$, $f^-$ and $f^+$ which return a vector with one more component. The function $f^-$ is defined as $\mathbf{f^-(r)}= \mathbf{r | \min(r_i)}$ if $\mathbf{r}$ is not constant and $\mathbf{f^-(r)}= \mathbf{ r| (r_1-1)}$ if $\mathbf{r}$ is constant. We define $f^+$ as $\mathbf{f^+(r)}= \mathbf{r| \max(r_i)}$ is $\mathbf{r}$ is not constant and $\mathbf{f^+(r)}= \mathbf{ r| (r_1+1)}$ if $\mathbf{r}$ is constant. We define $f$ as $\mathbf{f(r)}= \mathbf{r| \min(r_i)}$ is $\mathbf{r}$ is not constant and $\mathbf{f(r)}= \mathbf{ r| r_1}$ if $\mathbf{r}$ is constant. \end{defn} The function $f$ is introduced only to deal with the case of constant vector. These functions are defined to deal with the following case. Let $X_i$ and $X_j$ be two bags such that $X_i=X_j \setminus \{v\}$ for some vertex $v$. Let $x$ be any vertex, then $\distvec{X_j}{x}$ is equal to $\mathbf{f(\distvec{X_i}{x})}$, $\mathbf{f^+(\distvec{X_i}{x})}$ or $\mathbf{f^-(\distvec{X_i}{x})}$. Moreover, if $X_i$ separates $x$ and $v$, $\distvec{X_j}{x}=\mathbf{f^+(\distvec{X_i}{x})}$. \begin{defn}\label{compatible_forget} Let $I$ be an instance for a forget node $i$ and let $j$ be the child of $i$ and $v \in V$ such that $X_i=X_j \setminus \{v\}$. Let $X_j=\{v_1,\ldots,v_k\}$ with $v=v_k$. An instance $I_1$ for $j$ is compatible with $I$ if \begin{itemize} \item \textbf{(F1)} $S_I=S_{I_1} \setminus \{v\}$. \item \textbf{(F2)} For all $ \mathbf{r} \in \ext{I}$, $\mathbf{r|1} \in \ext{I_1}$. \item \textbf{(F3)} For all $ \mathbf{r} \in \int{I}$, $\mathbf{r|0}\in \int{I_1}$ or $\mathbf{r|1}\in \int{I_1}$. \item \textbf{(F4)} $\forall (\mathbf{r},\mathbf{t}) \in \paire {I}$, if there exist two vertices $x \in \T {X_i}$ with $\distvec{X_j}{x}= \mathbf{f^-(r)}$ (resp. $\mathbf{f(r})$, $(\mathbf{f^+(r)}$) and $y \notin \T {X_i}$ with $\distvec{X_j}{y}= \mathbf{f^+(t)}$ with $d(x,X_j) \leq 2$ and $d(y,X_j) \leq 2$ then $(\mathbf{f^-(r)},\mathbf{f^+(t)})$ (resp. $(\mathbf{f(r}),\mathbf{f^+(t)})$, $(\mathbf{f^+(r)},\mathbf{f^+(t)})$) belongs to $ \paire {I_1}$. \end{itemize} \end{defn} \begin{lemme}\label{node_forget_main} Let $I$ be an instance for a forget node $i$. Let $\mathcal{F}_{F}(I)$ be the set of instances $I_1$ for $j$ compatible with $I$. Then, \[\dim(I)= \min_{I_1 \in \mathcal{F}_{F}(I)}\;\{\dim(I_1)\}.\] \end{lemme} The end of this section is devoted to prove Lemma~\ref{node_forget_main} by proving both inequalities in a similar way than for join and introduce nodes. We prove a technical lemma similar to Lemma~\ref{resolved_outside} with a similar proof. \begin{lemme}\label{resolved_outside2} Let $i$ be a forget node, $j$ be the child of $i$ and $v \in V$ such that $X_i=X_j \setminus \{v\}$. Let $(x,y)$ be a pair of vertices of $ \T{X_j}$. Let $\mathbf{r}$ be a binary vector of size $k$ with $\mathbf{r}_k=1$. Then $\mathbf{r}$ resolves $(x,y)$ if and only if $\mathbf{r^-}$ resolves $(x,y)$. \end{lemme} \begin{proof} Let $\mathbf{t_1}=\mathbf{Tr_{X_i}}(x)$ and $\mathbf{t_2}=\mathbf{Tr_{X_i}}(y)$. Assume $\mathbf{r}$ resolves $(x,y)$ and by contradiction that $\mathbf{r^-}$ does not resolve $(x,y)$. As $\mathbf{r}$ resolves $(x,y)$ we have by Definition~\ref{def_resolve_vec}, $\min_{1 \leq l \leq k} \mathbf{(t_1+r)}_l \neq \min_{1 \leq l \leq k} \mathbf{(t_2+r)}_l$ and $\min_{1 \leq l \leq k-1} \mathbf{(t_1+r)}_l \neq \min_{1 \leq l \leq k-1} \mathbf{(t_2+r)}_l$. So the minimum change in at least one case, assume by symmetry that $\min_{1 \leq l \leq k} \mathbf{(t_1+r)}_l \neq \min_{1 \leq i \leq k-1} \mathbf{(t_1+r)}_l$. Thus, for $l<k$, we have $\mathbf{(t_1+r)}_l > \mathbf{(t_1+r)}_k$. We know $\mathbf{r}_k=1$, so, for $l<k$, we have $\mathbf{(t_1+r)}_l > 1$. That gives $\mathbf{r}=(1,\ldots,1)$ which contradicts the fact that ${X_j}$ separates $v$ from $x_1$, one vertex of $X_j$ is strictly closer to $x$ than $v$. Assume $\mathbf{r^-}$ resolves $(x,y)$ and by contradiction that $\mathbf{r}$ does not resolve $(x,y)$. Then, by symmetry we can assume that $\min_{1 \leq l \leq k} \mathbf{(t_1+r)}_l \neq \min_{1 \leq l \leq k-1} \mathbf{(t_1+r)}_l$, meaning $ \mathbf{(t_1+r)}_k < \min_{1 \leq l \leq k-1} \mathbf{(t_1+r)}_l$. We know $\mathbf{r}_k=1$ so $\mathbf{(t_1+r)}_k \geq 1$ and $\mathbf{(t_1+r)}_l =2$ for $1 \leq l \leq k-1$. So $\mathbf{r}=(1,\ldots1)$ which contradicts the fact that ${X_j}$ separates $v$ from $s$.\qed \end{proof} \begin{lemme}\label{calcul_forget1} Let $I_1 \in \mathcal{F}_{F}(I)$ and $S$ be a solution for $I_1$, then $S$ is a solution for~$I$. \end{lemme} \begin{proof} Let us prove that the conditions of Definition~\ref{def_instance} are satisfied. \noindent \textbf{(S1)} Let $(x,y)$ be a pair of vertices of $T(X_i)$. As $T(X_i)=T(X_j)$, the pair is resolved by a vertex of $S$ or by a vector $\mathbf{r}$ of $\ext{I_1}$. If $(x,y)$ is resolved by $\mathbf{r} \in \ext{I_1}$ then by Lemma~\ref{resolved_outside2}, $\mathbf{r^-}$ resolves the pair $(x,y)$ and $\mathbf{r^-} \in \ext {I}$ by compatibility. \noindent \textbf{(S2)} Let $\mathbf{r} \in \int{I}$. By compatibility, $\mathbf{r|0} \in \int{I_1}$ or $\mathbf{r|1} \in \int{I_1}$. Let $s \in S$ such that $\mathbf{Tr_{X_i}}(s) \in \{ \mathbf{r|0},\mathbf{r|1} \}$, then $\mathbf{Tr_{X_j}}(s) =\mathbf{r}$. \noindent \textbf{(S3)} Let $(\mathbf{r},\mathbf{t}) \in \paire{I}$, $x \in V(T(X_{i}))$ and $y\notin T(X_{i})$ such that $\distvec{X_{i}}{x}=\mathbf{r}$ and $\distvec{X_{i}}{y}=\mathbf{t}$. Assume also $d(x,X_i) \leq 2$ and $d(y,X_i) \leq 2$. The set $X_j$ separates $v$ and $y$ so $\distvec{X_j}{y}= f^+(t)$. As $\distvec{X_j}{x}$ is equal either to $\mathbf{f^-(r)}$, $\mathbf{f(r)}$ or to $\mathbf{f^+(r)}$, the pair $(x,y)$ is resolved by a vertex of $S$. \noindent \textbf{(S4)} is clear.\qed \end{proof} \begin{lemme}\label{calcul_forget2} Let $S$ be a solution for $I$ of minimal size. Then there exists $I_1$ compatible with $I$ such that $S$ is a solution of $I_1$. \end{lemme} \begin{proof} Let $S$ be an solution for $I$ of minimal size. Let $I_1$ be the following instance: $S_{I_1} = S \cap X_j$, $\int {I_1}= \{ \mathbf{Tr_{X_j}}(s),s \in S\}, \ext{I_1} = \{\mathbf{r}|1, \mathbf{r} \in \ext{I} \}, \paire {I_1} =\{ (\mathbf{f^-(r)},\mathbf{f^+(t)}), (\mathbf{r},\mathbf{t}) \in \paire{I} \} \cup \{ (\mathbf{f(r)},\mathbf{f^+(t)}), (\mathbf{r},\mathbf{t}) \in \paire{I} \} \cup \{ (\mathbf{f^+(r)},\mathbf{f^+(t)}) (\mathbf{r},\mathbf{t}) \in \paire{I} \}$. We first check the compatibility. \noindent \textbf{(F1)}, \textbf{(F2)} and \textbf{(F4)} are straightforward. \noindent \textbf{(F3)} Let $\mathbf{r} \in \int{I}$ and $s\in S$ such that $\mathbf{Tr_{X_i}}(s)=\mathbf{r}$. By construction, $\mathbf{r'}=\mathbf{Tr_{X_j}}(s)$ belongs to $\int {I_1}$ and $\mathbf{r'}=\mathbf{r^-}$ so $\mathbf{r|0} \in \int{I_1}$ or $\mathbf{r|1} \in \int{I_1}$. We prove now $S$ is a solution for $I_1$. \noindent \textbf{(S1)} Let $(x,y)$ be a pair of vertices of $T(X_j)$. As $T(X_i)=T(X_j)$, the pair is resolved by a vertex of $S$ or by a vector $\mathbf{r}$ of $\ext{I}$. If $(x,y)$ is resolved by $\mathbf{r} \in \ext{I}$ then by Lemma~\ref{resolved_outside2}, $\mathbf{r|1}$ resolves the pair $(x,y)$ and $\mathbf{r|1} \in \ext {I_1}$ by construction. \noindent \textbf{(S2)} Let $\mathbf{r} \in \int{I_1}$. By construction there is $s \in S$ such that $\mathbf{Tr_{X_j}}(s)=\mathbf{r}$. \noindent \textbf{(S3)} Let $(\mathbf{r},\mathbf{t}) \in \paire{I_1}$, $x \in V(T(X_j))$ and $y\notin T(X_j)$ such that $\distvec{X_j}{x}=\mathbf{r}$ and $\distvec{X_j}{y}=\mathbf{t}$. Assume also $d(x,X_i) \leq 2$ and $d(y,X_i) \leq 2$. Then $\mathbf{Tr_{X_j}}(y)=f^+(\mathbf{t})$ and $\mathbf{Tr_{X_j}}(x) \in \{ \mathbf{f^-(r)}, \mathbf{f(r)}, \mathbf{f^+(r)} \}$. Since $S$ is a solution of $I$, the pair $(x,y)$ is resolved by a vertex of $S$. \noindent \textbf{(S4)} is clear.\qed \end{proof} Lemma~\ref{node_forget_main} is a consequence of Lemmas~\ref{calcul_forget1} and~\ref{calcul_forget2}. \subsection{Algorithm} Given as input a nice clique tree, the algorithm computes the extended metric dimension of all the possible instances bottom up from the leaves. The algorithm computes the values for leaves using Lemma~\ref{calcul_leaf}, for join nodes using Lemma~\ref{node_pair_main}, for introduce nodes using Lemma~\ref{node_introduce_main} and forget nodes using Lemma~\ref{node_forget_main}. The correction of the algorithm is straightforward by these lemmas. We denote this algorithm by $IMD$ in the following which takes as input a nice clique tree $T$ and outputs the minimal size of a resolving set of $G$ containing the root of $T$. \section{Proof of Theorem~\ref{thm:main}}\label{sec:complexity} Let us finally explain how we can compute the metric dimension of $G$. \begin{lemme} \label{multi} The metric dimension of $G$ is $ \min_{v \in G} \{IMD(T(v))\}$ where $T(v)$ is a nice clique tree of $G$ rooted in $v$. \end{lemme} \begin{proof} For any input, $IMD(T(v))$ outputs the size of a resolving set of $G$. So, $ \min_{v \in G} \{IMD(T(v))\} \geq \dim(G)$. Let $S$ be a minimum resolving set of $G$ and let $v \in S$. By Lemma~\ref{correct}, $IMD(T(v))$ outputs the minimum size of a resolving set containing $v$ so $ \min_{v \in G} \{IMD(T(v))\} \leq \dim(G)$ which complete the proof. \qed \end{proof} In particular, $n$ executions of the $IMD$ algorithm with different inputs are enough to compute the metric dimension. Lemma~\ref{tree_dec} ensures that we can find for any vertex $v$ of $G$ a nice clique tree in linear time, the last part is to compute the complexity of the $IMD$ algorithm. To get the announced complexity, we add a first step to the $IMD$ algorithm: for each bag $X$, we compute $\ddeux{X} \cap T(X)$ and $\ddeux{X} \cap (G \setminus T(X))$. This computation can be done in $O(n^2)$ times (recall that $T$ has a linear number of bag by Lemma~\ref{tree_dec}). Note also that the size of $\ddeux{X} \cap T(X)$ and $\ddeux{X} \cap (G \setminus T(X))$ depends only of $|X_i|$. To compute the complexity, we need to compute the number of instances and the time to solve an instance. For simplicity we let $\alpha(k):= 2^k\cdot 2^{2^k}\cdot 2^{2^k}\cdot 2^{4^{2k}}$. \begin{lemme}\label{complexity_instance} Let $I $ be any instance for a node $i$ and assume $\dim(I')$ is known for every instance $I'$ compatible with $I$ for every child of $i$ . Then $\dim (I)$ can be computed in time $O(f(|X_i|))$ for a computable function $f$. \end{lemme} \begin{proof} If $i$ is a leaf node then $\dim (I)$ can be computed in constant time by Lemma~\ref{calcul_leaf}. Otherwise, let us prove that one can compute for all the instances on the child nodes (at most two child nodes) all the compatible instances. Given a $5$-uplet $(X_i,S_I, \int I,\ext I,\paire{I})$, checked if it is an instance according to Definition~\ref{def_instance} can be done in time $O(|I|\cdot g(|X_i|))$ where $g$ gives the size of $\ddeux{X_i} \cap T(X_i)$ plus the size $\ddeux{X_i} \cap (G \setminus T(X_i))$. The number of such $5$-uplet $(X_i,S_I, \int I,\ext I,\paire{I})$ is bounded by $\alpha(|X_i|)$. Thus, identifying the instances among all the $5$-uplet ca be done in time depending only of $|X_i|$. Checking the compatibility can be done in a time that only depends on $|X_i|$. Condition \textbf{(J5)} can be checked in time $O(|X_i|^2 \cdot |I|)$ to check for each pair of vectors if a vector of $\ext{I}$ resolves it. Condition \textbf{(I5)} can be checked in time $O(|X_i|)$ and condition \textbf{(F4)} in time $O(|X_i|^2)$. The other compatibility conditions can be checked in time $O(|I|)$ and by Definition~\ref{def_instance}, $|I|$ is bounded by a function of $|X_i|$. Then, computing the minimum using the formulas of Lemmas~\ref{node_pair_main},~\ref{node_introduce_main} and~\ref{node_forget_main} can be done in time $O(\alpha(X_i))$. Ultimately the computation of $\dim(I)$ is done in time bounded by a function of $X_i$. \qed \end{proof} \begin{cor}\label{complexiteIMD} The algorithm for $IMD$ runs in time $O(n(T)^2 + n(T) \cdot f(\omega))$ where $n(T)$ is the number of vertices of the input tree $T$ and $f=O(k^2 \cdot 2^{O(4^{2^k})})$ is a function that only depends on the size of a maximum clique $\omega$. \end{cor} \begin{proof} By definition of the treewidth, for any bag $X$ of $T$, $|X| \leq \omega$. The first step of computation to get $\ddeux{X_i} \cap T(X_i)$ and $\ddeux{X_i} \cap (G \setminus T(X_i))$ takes time $O(n^2)$. Then, the number of instances to compute for each vertex of $T$ is bounded by $\alpha(\omega)$ and each instance $I$ can be computed in time bounded $O(\omega^2 \cdot |I|)$ by Lemma~\ref{complexity_instance}. \qed \end{proof} We now have all the ingredients to prove Theorem~\ref{thm:main}: \begin{proof} For each vertex $v$ of $G$, one can compute a nice clique tree of size at most $7n$ according to Lemma~\ref{tree_dec}. Given this clique tree, the $IMD$ algorithm outputs the size of a smallest resolving set containing $v$ by Lemma~\ref{correct} in time $O(n(T)^2 + n(T) \cdot f(\omega))$ for a computable function $f$ according to Corollary~\ref{complexiteIMD}. Repeat this for all vertices of $G$ permits to compute the metric dimension of $G$ by Lemma~\ref{multi} in time $O(n^3+n^2 \cdot f(\omega))$.\qed \end{proof} \end{document}
\begin{document} \title{Grothendieck duality for non-proper morphisms} \begin{abstract} \noindent We generalize the adjunction between the functors $Rf_*$ and $f^!$ of derived categories of quasi-coherent sheaves for proper morphisms $f\colon X \to Y$ of Noetherian schemes to the following situation: Let $f$ be a finite type morphism and let $Z' \subseteq X$ and $Z \subseteq Y$ be closed subsets such that $f$ restricts to a proper morphism $f'\colon Z'\to Z$ of $f$. Then the functor $Rf_*$ is left adjoint to $R\Gamma_{Z'}f^!$ when considered as functors between complexes supported on $Z'$ or $Z$. \end{abstract} \section*{Introduction} Grothendieck's generalization of Serre duality is formulated in terms of adjoint functors. For a \emph{proper} morphism $f\colon X \to Y$ of Noetherian schemes of finite dimension it consists of the following quasi-isomorphism in the derived category of quasi-coherent sheaves on $Y$: \begin{align} \label{GDuality} Rf_*\underline{\operatorname{RHom}}_{\mathcal{O}_X}^{\bullet}(\mathcal{F}^{\bullet},f^!\mathcal{G}^{\bullet}) \overset{\sim}{\longrightarrow} \underline{\operatorname{RHom}}_{\mathcal{O}_Y}^{\bullet}(Rf_*\mathcal{F}^{\bullet},\mathcal{G}^{\bullet}) \end{align} for a bounded above complex $\mathcal{F}^{\bullet}$ and a bounded below complex $\mathcal{G}^{\bullet}$, both having coherent cohomology sheaves (\cite[VII, 3.4(c)]{HartshorneRD}). This isomorphism is known as \emph{coherent duality}. Taking $0$-th cohomology of the global sections functor, this implies that $Rf_*$ is left adjoint to the twisted inverse image functor $f^!$. The classical Serre duality for a coherent sheaf $\mathcal{F}$ on a projective Cohen-Macaulay variety $X$ can be written in the form \[ \operatorname{Hom}_k(H^i(X,\mathcal{F}),k) \cong H^{n-i}(X,\underline{\operatorname{Hom}}_{\mathcal{O}_X}(\mathcal{F},\omega)), \] for instance. It is obtained by applying the above quasi-isomorphism \autoref{GDuality} to the structure morphism $X \to \operatorname{Spec} k$. In this paper we are concerned with morphisms $f$ which are proper only over closed subsets $Z'$ and $Z$ of $X$ and $Y$, which appear as the supports of the considered objects. Moreover, we just require that the cohomology sheaves of these objects are quasi-coherent. The main result is the following generalization of Grothendieck-Serre duality involving the functors $Rf_*$ and $R\Gamma_{Z'}f^!$, where $R\Gamma_{Z'}$ denotes the derived local cohomology functor: \begin{theorem*} Let $f\colon X \to Y$ be a separated and finite type morphism of Noetherian schemes and let $i\colon Z \to Y$ and $i'\colon Z' \to X$ be closed immersions with a proper morphism $f'\colon Z' \to Z$ such that the diagram \[ \xymatrix{ Z' \ar[r]^-{i'} \ar[d]^-{f'} & X \ar[d]^-f \\ Z \ar[r]^-i & Y } \] commutes. Then there is a natural transformation $\operatorname{tr}_f\colon Rf_*R\Gamma_{Z'}f^! \to \operatorname{id}$ such that, for all $\mathcal{F}^{\bullet} \in D_{\text{qc}}^-(\mathcal{O}_X)_{Z'}$ and $\mathcal{G}^{\bullet} \in D_{\text{qc}}^+(\mathcal{O}_Y)_Z$, the composition \begin{align*} \xymatrix{ Rf_* \underline{\operatorname{RHom}}_{\mathcal{O}_X}^{\bullet}(\mathcal{F}^{\bullet},R\Gamma_{Z'}f^!\mathcal{G}^{\bullet}) \ar[r] & \underline{\operatorname{RHom}}_{\mathcal{O}_Y}^{\bullet}(Rf_* \mathcal{F}^{\bullet},Rf_*R\Gamma_{Z'}f^! \mathcal{G}^{\bullet}) \ar[d]^{\operatorname{tr}_f} \\ & \underline{\operatorname{RHom}}_{\mathcal{O}_Y}^{\bullet}(Rf_* \mathcal{F}^{\bullet}, \mathcal{G}^{\bullet}),} \end{align*} where the first arrow is the natural map, is an isomorphism. Here $D_{\text{qc}}^-(\mathcal{O}_X)_{Z'}$ (and $D_{\text{qc}}^+(\mathcal{O}_Y)_Z$) denote the full subcategories of bounded above (and bounded below) complexes of the derived category of quasi-coherent sheaves on $X$ (and on $Y$) whose cohomology sheaves are supported on $Z'$ (and on $Z$). As a consequence, taking global sections, the functor $Rf_*$ is left adjoint to the functor $R\Gamma_{Z'}f^!$ between these categories. \end{theorem*} For the proof we employ Nagata compactification for the morphism $f$, which yields a factorization $X \overset{j}{\longrightarrow} \overline{X} \overset{\overline{f}}{\longrightarrow} Y$ into an open immersion $j$ and a proper morphism $\overline{f}$. The key step is then to define the map $\operatorname{tr}_f$ in this more general situation by using the trace for the proper morphism $\overline{f}$. This idea has its origin in \cite{Ruelling}, where Chatzistamatiou and R\"ulling consider morphisms which are proper along a family of supports. In particular, when working with residual complexes, $\operatorname{tr}_f$ is even a morphism of complexes. \section*{Acknowledgments} I cordially thank the supervisor of my PhD thesis, Manuel Blickle, for his excellent guidance and various inspiring conversations. I also thank Kay R\"ulling, who explained to us how the trace map underlying our adjunction could be constructed. Moreover, I thank Gebhard B\"ockle and Axel St\"abler for many useful comments. The author was partially supported by SFB / Transregio 45 Bonn-Essen-Mainz financed by Deutsche Forschungsgemeinschaft. \section*{Notation and conventions} All schemes we consider are assumed to be Noetherian. In particular, all schemes and all scheme morphisms are \emph{concentrated}, i.e.\ quasi-compact and quasi-separated. For a scheme $X$, we let $D_{\text{qc}}^*(X)$ or $D_{\operatorname{coh}}^*(X)$ with $* \in \{+,-,\text{b} \}$ denote the derived category of $\mathcal{O}_X$-modules with quasi-coherent or coherent cohomology. Here $*=+$ or $*=-$ or $*=b$ means that we require that the cohomology sheaves are bounded below or bounded above or bounded in both directions. \section{Local cohomology} First, let us provide some basic facts about local cohomology which will be needed due to working with objects supported on closed subsets. Unless otherwise stated, let $i\colon Z \to X$ be a closed immersion of Noetherian schemes. \begin{definition} \label{deflocalcohomology} The \emph{local cohomology functor} $R\Gamma_Z\colon D_{\text{qc}}(X) \to D_{\text{qc}}(X)$ is the derived functor of the left exact functor \[ \Gamma_Z:= \underset{n \in \mathbb N}{\varinjlim} \underline{\operatorname{Hom}}_{\mathcal{O}_X}(\mathcal{O}_X/\mathcal{I}^n,\usc), \] where $\mathcal{I}$ is any sheaf of ideals defining $Z$. \end{definition} A reference for local cohomology in this context is \cite{LocalHomology}. For example, \autoref{deflocalcohomology} is equation (0.1) of ibid. \begin{definition} \label{supportdefine} We say that a complex $\mathcal{F}^{\bullet}$ of $\mathcal{O}_X$-modules has \emph{support in} or \emph{on $Z$} or that $\mathcal{F}^{\bullet}$ \emph{is supported in} or \emph{on $Z$} if $j^*\mathcal{F}^{\bullet} = 0$ in $D(X)$. We write $D(X)_Z$, $D_{\text{qc}}(X)_Z$ etc. for the subcategory of objects of $D(X)$, $D_{\text{qc}}(X)$ etc. whose cohomology is supported on $Z$. \end{definition} The natural inclusion $\Gamma_Z \to \operatorname{id}$ induces a transformation $R\Gamma_Z \to \operatorname{id}$. As pointed out in the proof of \cite[Lemma (0.4.2)]{LocalHomology}, one has the following triangle: \begin{proposition} For every $\mathcal{F}^{\bullet} \in D_{\text{qc}}(X)$, there is a fundamental distinguished triangle \[ R\Gamma_Z \mathcal{F}^{\bullet} \longrightarrow \mathcal{F}^{\bullet} \longrightarrow Rj_*j^*\mathcal{F}^{\bullet} \longrightarrow R\Gamma_Z \mathcal{F}^{\bullet}[1], \] where the second map is the natural one from the adjunction of $Rj_*$ and $j^*$. This triangle restricts to the subcategories $D_{\text{qc}}^+(X)$ and $D_{\text{qc}}^b(X)$ because $j^*$ is exact and $Rj_*\colon D_{\text{qc}}^+(U) \to D_{\text{qc}}^+(X)$ has finite cohomological amplitude. \end{proposition} In particular, $R\Gamma_Z$ only depends on the closed subset $i(Z)$ and not on the scheme structure of $Z$. The fundamental triangle allows another characterization of $D_{\text{qc}}(X)_Z$: \begin{corollary} The subcategory $D_{\text{qc}}(X)_Z$ consists of all complexes $\mathcal{F}^{\bullet} \in D_{\text{qc}}(X)$ such that the natural map $R\Gamma_Z\mathcal{F}^{\bullet} \to \mathcal{F}^{\bullet}$ is an isomorphism. \end{corollary} \begin{lemma} If $\mathcal{I}$ is an injective quasi-coherent sheaf, then also the quasi-coherent sheaf $\Gamma_Z(\mathcal{I})$ is injective. \end{lemma} \begin{proof} It suffices to check the injectivity of $\Gamma_Z(\mathcal{I})$ locally. Thus the assertion follows from (\cite[Proposition 2.1.4]{Brodmann.LocCoh}). \end{proof} As $R\Gamma_Z \circ R\Gamma_Z \cong R\Gamma_Z$, the image of $R\Gamma_Z$ is exactly the subcategory $D_{\text{qc}}(X)_Z$. Furthermore, the functor $R\Gamma_Z$ is right adjoint to the inclusion $D_{\text{qc}}(X)_Z \into D_{\text{qc}}(X)$. This is a consequence of the following proposition, see the proof of \autoref{qcohadjunction}. \begin{proposition} \label{Gammaadj} Let $\mathcal{G}^{\bullet}$ be a complex in $D_{\text{qc}}(X)_Z$. Then there is a functorial isomorphism \[ \underline{\operatorname{RHom}}_{\mathcal{O}_X}^{\bullet}(\mathcal{G}^{\bullet},R\Gamma_Z\mathcal{F}^{\bullet}) \cong \underline{\operatorname{RHom}}_{\mathcal{O}_X}^{\bullet}(\mathcal{G}^{\bullet},\mathcal{F}^{\bullet}) \] for every $\mathcal{F}^{\bullet} \in D_{\text{qc}}(X)$. \end{proposition} \begin{proof} This is \cite[Lemma (0.4.2)]{LocalHomology}. We even do not have to assume that the cohomology sheaves of $\mathcal{F}^{\bullet}$ and $\mathcal{G}^{\bullet}$ are quasi-coherent. \end{proof} Next we verify the compatibility of $R\Gamma_Z$ with the derived functors $Rf_*$, $f^!$ and $\derotimes$. Let us fix a notation for base change: \begin{definition} \label{bchange} Let $f\colon X \to Y$ be a separated morphism of finite type and let $u\colon Y' \to Y$ be a flat morphism. Consider the cartesian square \[ \xymatrix{ X \times_Y Y' \ar[r]^-{v} \ar[d]^-{f'} & X \ar[d]^-f \\ Y' \ar[r]^-u & Y, } \] where $v$ and $u$ are the projections. The \emph{base change morphism} $\operatorname{bc}\colon u^*Rf_* \overset{\sim}{\longrightarrow} Rf'_*v^*$ is the adjoint of the composition \[ Rf_* \xrightarrow{Rf_*\operatorname{ad}_{v}} Rf_*Rv_*v^* \overset{\sim}{\longrightarrow} Ru_*Rf'_*v^*. \] Here $\operatorname{ad}_{v}$ is the unit of the adjunction between $Rv_*$ and $v^*$. \end{definition} The map $\operatorname{bc}$ is an isomorphism in several cases. For our purposes, we will need the case of flat base change: \begin{lemma}[\operatorname{pr}otect{\cite[Proposition 3.9.5]{LipmanGrothDual}}] With the notation of the preceding definition, the map $\operatorname{bc}$ is an isomorphism if $u$ is flat. \end{lemma} \begin{lemma} \label{Gammacomm} Let $f\colon X \to Y$ be a morphism of finite type and $i\colon Z \to Y$ a closed immersion. Let $Z'$ denote the fiber product $Z \times_Y X$ regarded as a closed subset of $X$ via the projection $Z \times_Y X \to X$. \begin{enumerate} \item There is a natural isomorphism of functors \[ Rf_*R\Gamma_{Z'} \cong R\Gamma_ZRf_*. \] \item If $f$ is flat, then there is a natural isomorphism of functors \[ f^*R\Gamma_Z \cong R\Gamma_{Z'}f^*. \] \end{enumerate} \end{lemma} \begin{proof} First we show that $Rf_*R\Gamma_{Z'}$ is supported on $Z$. Let $u\colon U \into Y$ and $v\colon V \into X$ be the open immersions of the complements of $Z$ and $Z'$ in $Y$ and $X$. Let $f'$ denote the restriction of $f$ to $V$. We obtain a cartesian square \[ \xymatrix{ V \ar[r]^-{v} \ar[d]^-{f'} & X \ar[d]^-f \\ U \ar[r]^-u & Y. } \] Hence $u^*Rf_*R\Gamma_{Z'} \cong Rf'_*v^*R\Gamma_{Z'} = 0$. From \autoref{Gammaadj} we know that the canonical morphism $Rf_*R\Gamma_{Z'} \to Rf_*$ factors through $R\Gamma_ZRf_*$. Let $\alpha$ denote the corresponding morphism $Rf_*R\Gamma_{Z'} \to R\Gamma_ZRf_*$. The square \[ \xymatrix@C40pt{ Ru_*u^*Rf_* \ar[r]^-{Ru_*\operatorname{bc}} & Ru_*Rf'_*v^* \\ Rf_* \ar[u]^-{\operatorname{ad}_uRf_*} \ar[r]^-{Rf_*\operatorname{ad}_v} & Rf_*Rv_*v^* \ar[u]^-{\sim} } \] commutes because $Ru_*\operatorname{bc} \circ \operatorname{ad}_uRf_*$ is the adjoint of $\operatorname{bc}$ and hence equals the original morphism $Rf_* \to Ru_*Rf'_*v^*$. Let $\beta$ be the composition of the natural isomorphism $Rf_*Rv_*v^* \simeq Ru_*Rf'_*v^*$ with the inverse of $Ru_*\operatorname{bc}$. We have just seen that the right square of the diagram \[ \xymatrix{ Rf_*R\Gamma_{Z'} \ar[d]^{\alpha} \ar[r] & Rf_* \ar@{=}[d] \ar[r] & Rf_*Rv_*v^* \ar[d]_{\sim}^{\beta} \\ R\Gamma_ZRf_* \ar[r] & Rf_* \ar[r] & Ru_*u^*Rf_* } \] commutes. The left square commutes by construction. As the lines are distinguished triangles, $\alpha$ is an isomorphism. This shows (a). For (b) we proceed similarly. The cartesian square above gives rise to the isomorphism \[ \xymatrix{ f^*R\Gamma_Z \ar[d]^{\sim} \ar[r] & f^* \ar@{=}[d] \ar[r] & f^*Ru_*u^* \ar[d]_{\sim} \\ R\Gamma_{Z'}f^* \ar[r] & f^* \ar[r] & Rv_*v^*f^* } \] of distinguished triangles. \end{proof} \begin{remark} \label{underGammaPullback} With the notation of part (b) of the preceding lemma, for every quasi-coherent sheaf $\mathcal{F}$, we even have a natural isomorphism \[ f^*\Gamma_Z\mathcal{F} \cong \Gamma_{Z'}f^*\mathcal{F}, \] see \cite[Lemma 4.3.1]{Brodmann.LocCoh}. \end{remark} \begin{lemma} \label{RGammaTensor} Let $\mathcal{F}^{\bullet}$ and $\mathcal{G}^{\bullet}$ be complexes in $D_{\text{qc}}^b(X)$. There are natural isomorphisms \[ (R\Gamma_Z\mathcal{F}^{\bullet}) \derotimes_{\mathcal{O}_X} \mathcal{G}^{\bullet} \cong \mathcal{F}^{\bullet} \derotimes_{\mathcal{O}_X} (R\Gamma_Z\mathcal{G}^{\bullet}) \cong R\Gamma_Z(\mathcal{F}^{\bullet} \derotimes_{\mathcal{O}_X} \mathcal{G}^{\bullet}) \] in $D_{\text{qc}}(X)$. \end{lemma} \begin{proof} The natural map $\mathcal{F}^{\bullet} \derotimes R\Gamma_Z\mathcal{G}^{\bullet} \to \mathcal{F}^{\bullet} \derotimes \mathcal{G}^{\bullet}$ factors through $R\Gamma_Z(\mathcal{F}^{\bullet} \derotimes R\Gamma_Z\mathcal{G}^{\bullet})$ because \[ j^*(\mathcal{F}^{\bullet} \derotimes R\Gamma_Z\mathcal{G}^{\bullet}) \cong j^*\mathcal{F}^{\bullet} \derotimes j^*R\Gamma_Z\mathcal{G}^{\bullet} \cong 0. \] Let $\rho$ denote the composition of the natural isomorphism \[ Rj_*j^*(\mathcal{F}^{\bullet} \derotimes_{\mathcal{O}_X} \mathcal{G}^{\bullet}) \cong Rj_*(j^*\mathcal{F}^{\bullet} \derotimes_{\mathcal{O}_X} j^*\mathcal{G}^{\bullet}) \] and the isomorphism from the projection formula \[ Rj_*(j^*\mathcal{F}^{\bullet} \derotimes_{\mathcal{O}_X} j^*\mathcal{G}^{\bullet}) \cong \mathcal{F}^{\bullet} \derotimes_{\mathcal{O}_X} Rj_*j^*\mathcal{G}^{\bullet}. \] We obtain a morphism of distinguished triangles \[ \xymatrix{ \mathcal{F}^{\bullet} \derotimes R\Gamma_Z\mathcal{G}^{\bullet} \ar[d] \ar[r] & \mathcal{F}^{\bullet} \derotimes \mathcal{G}^{\bullet} \ar@{=}[d] \ar[r] & \mathcal{F}^{\bullet} \derotimes Rj_*j^*\mathcal{G}^{\bullet} \ar[d]_-{\sim}^-{\rho} \\ R\Gamma_Z(\mathcal{F}^{\bullet} \derotimes \mathcal{G}^{\bullet}) \ar[r] & \mathcal{F}^{\bullet} \derotimes \mathcal{G}^{\bullet} \ar[r] & Rj_*j^*(\mathcal{F}^{\bullet} \derotimes \mathcal{G}^{\bullet}). } \] Therefore, the left vertical arrow is an isomorphism. Analogously, one shows that \[ R\Gamma_Z\mathcal{F}^{\bullet} \derotimes \mathcal{G}^{\bullet} \cong R\Gamma_Z(\mathcal{F}^{\bullet} \derotimes \mathcal{G}^{\bullet}). \] \end{proof} Finally, for an open immersion $j\colon X \to \overline{X}$, we study the connection between $R\Gamma_Z$ and $R\Gamma_{\overline{Z}}$, where $\overline{Z}$ is the closure of $Z$ in $\overline{X}$. \begin{definition} Let $Z$ and $Z'$ be closed subsets of a scheme $X$. We let $D_{\text{qc}}(X)_Z^{Z'}$ denote the full subcategory of $D_{\text{qc}}(X)_Z$ of complexes $\mathcal{F}^{\bullet}$ with $R\Gamma_{Z'} \cong 0$. \end{definition} \begin{proposition} \label{opensupport} Let $j\colon X \to \overline{X}$ be an open immersion of schemes, $Z \subseteq X$ a closed subset and $\overline{Z}$ the closure of $Z$ in $\overline{X}$. The functors $Rj_*$ and $j^*$ restrict to inverse equivalences \[ \xymatrix{ D_{\text{qc}}(X)_Z \ar@<.5ex>[r]^-{Rj_*} & D_{\text{qc}}(\overline{X})_{\overline{Z}}^{\overline{Z} \backslash X}. \ar@<.5ex>[l]^-{j^*} } \] \end{proposition} \begin{proof} Let $u\colon U \to X$ and $u'\colon U' \to \overline{X}$ denote the open immersions of the complements $U$ of $Z$ in $X$ and $U'$ of $\overline{Z}$ in $\overline{X}$. Let $j'$ be the restriction of $j$ to $U$. We obtain a cartesian square \[ \xymatrix{ U \ar[r]^-u \ar[d]^-{j'} & X \ar[d]^-j \\ U' \ar[r]^-{u'} & \overline{X}. } \] The natural isomorphism $u'^*Rj_* \overset{\operatorname{bc}}{\longrightarrow} Rj'_*u^*$ shows that the essential image of $D_{\text{qc}}(X)_Z$ under $Rj_*$ is a subcategory of $D_{\text{qc}}(\overline{X})_{\overline{Z}}$. The inclusion $j$ factors through the open immersions $\sigma\colon X \to U' \cup X$ and $\tau\colon U' \cup X \to \overline{X}$. In particular, we have a natural isomorphism $Rj_* \cong R\tau_* R\sigma_*$. Since the composition \[ R\tau_* R\sigma_* \xrightarrow{\operatorname{id} \to R\tau_* \tau^*} R\tau_* \tau^* R\tau_* R\sigma_* \xrightarrow{\tau^* R\tau_* \to \operatorname{id}} R\tau_* R\sigma_* \] is the identity and the second morphism is an isomorphism, the first map is an isomorphism too. As $\tau$ is the open immersion of the complement of $\overline{Z} \backslash X$ into $\overline{X}$, it follows from the distinguished triangle \[ R\Gamma_{\overline{Z} \backslash X} \longrightarrow \operatorname{id} \longrightarrow R\tau_* \tau^* \longrightarrow R\Gamma_{\overline{Z} \backslash X}[1] \] that $R\Gamma_{\overline{Z} \backslash X} Rj_* j^* = 0$. The adjunction morphism $j^*Rj_* \to \operatorname{id}$ is always an isomorphism. It remains to show that the natural map $\operatorname{id} \to Rj_*j^*$ is an isomorphism. For every $\mathcal{F}^{\bullet}$ in $D_{\text{qc}}(\overline{X})_{\overline{Z}}^{\overline{Z} \backslash X}$, we have $\mathcal{F}^{\bullet} \cong R\Gamma_{\overline{Z}} \mathcal{F}^{\bullet}$ and $R\Gamma_{\overline{Z} \backslash X} \mathcal{F}^{\bullet} \cong 0$. It follows that \begin{align*} R\Gamma_{\overline{X} \backslash X}\mathcal{F}^{\bullet} &\cong R\Gamma_{\overline{X} \backslash X}R\Gamma_{\overline{Z}}\mathcal{F}^{\bullet} \\ &\cong R\Gamma_{\overline{Z} \backslash X}\mathcal{F}^{\bullet} \\ &\cong 0. \end{align*} Thus the second morphism in the fundamental triangle \[ R\Gamma_{\overline{X} \backslash X}\mathcal{F}^{\bullet} \longrightarrow \mathcal{F}^{\bullet} \longrightarrow Rj_* j^*\mathcal{F}^{\bullet} \longrightarrow R\Gamma_{\overline{X} \backslash X}\mathcal{F}^{\bullet}[1] \] is an isomorphism. \end{proof} \begin{remark} In the standard reference \cite[Corollary II.5.11]{HartshorneRD}, Hartshorne proves that for a morphism $f\colon X \to Y$ of schemes, the functor $Lf^*$ from $D_c^-(Y)$ to $D_c^-(X)$ is left adjoint to the functor $Rf_*$ from $D^+(X)$ to $D^+(Y)$. One the one hand, we can relax the coherence assumption because in the case of an open immersion, which is a flat morphism, $f^*$ is exact. On the other hand, Proposition (3.2.1) of the more recent reference \cite{LipmanGrothDual} shows this adjunction generally for ringed spaces and without any boundedness or (quasi-)coherence assumptions on the complexes. \end{remark} \begin{corollary} \label{Gammajung} If $Z$ is a closed subset of a scheme $X$ and $j\colon X \to \overline{X}$ is an open immersion such that the image of $Z$ in $\overline{X}$ is closed, then there is a natural isomorphism of functors \[ \varepsilon\colon R\Gamma_Z \overset{\sim}{\longrightarrow} Rj_*R\Gamma_Zj^*. \] \end{corollary} \begin{proof} We define $\varepsilon$ as the composition of the natural map $R\Gamma_Z \to Rj_*j^*R\Gamma_Z$, which is an isomorphism by \autoref{opensupport}, and the natural isomorphism $Rj_*j^*R\Gamma_Z \overset{\sim}{\longrightarrow} Rj_*R\Gamma_Zj^*$. \end{proof} For example, the condition that $Z$ is also closed in $\overline{X}$ is satisfied if $j\colon X \to \overline{X}$ is an open immersion of $Y$-schemes and $i\colon Z \to X$ is a closed immersion of $Y$-schemes over some base scheme $Y$ such that the structural morphisms $Z \to Y$ and $\overline{X} \to Y$ are proper, see \autoref{ExerciseHartshorne}. When constructing the generalized trace map, we will be exactly in this situation. \begin{lemma} \label{opennattrans} Let $j\colon X \to \overline{X}$ be an open immersion. Let $Z \subset X$ be a closed subset such that $j(Z)$ is closed in $\overline{Z}$. Then for $\mathcal{F}^{\bullet} \in D_{\text{qc}}^-(X)_Z$ and $\mathcal{G}^{\bullet} \in D_{\text{qc}}^+(X)$, the natural transformation \[ \tau\colon Rj_* \underline{\operatorname{RHom}}_{\mathcal{O}_X}^{\bullet}(\mathcal{F}^{\bullet},\mathcal{G}^{\bullet}) \to \underline{\operatorname{RHom}}_{\mathcal{O}_{\overline{X}}}^{\bullet}(Rj_*\mathcal{F}^{\bullet},Rj_*\mathcal{G}^{\bullet}) \] is a functorial isomorphism. \end{lemma} \begin{proof} Consider the following diagram \[ \xymatrix{ Rj_*\underline{\operatorname{RHom}}_{\mathcal{O}_X}^{\bullet}(\mathcal{F}^{\bullet},\mathcal{G}^{\bullet}) \ar[r]^-{\sim} \ar[d]^{\tau} & Rj_*\underline{\operatorname{RHom}}_{\mathcal{O}_X}^{\bullet}(j^*Rj_*\mathcal{F}^{\bullet},\mathcal{G}^{\bullet}) \ar[d]^{\tau} \\ \underline{\operatorname{RHom}}_{\mathcal{O}_{\overline{X}}}^{\bullet}(Rj_*\mathcal{F}^{\bullet},Rj_*\mathcal{G}^{\bullet}) \ar[r]^-{\sim} \ar@{=}[dr] & \underline{\operatorname{RHom}}_{\mathcal{O}_{\overline{X}}}^{\bullet}(Rj_*j^*Rj_*\mathcal{F}^{\bullet},Rj_*\mathcal{G}^{\bullet}) \ar[d] \\ & \underline{\operatorname{RHom}}_{\mathcal{O}_{\overline{X}}}^{\bullet}(Rj_*\mathcal{F}^{\bullet},Rj_*\mathcal{G}^{\bullet}), } \] where the horizontal arrows are induced by the counit $j^*Rj_* \to \operatorname{id}$ of adjunction -- these maps are isomorphisms by \autoref{Gammajung} since $\mathcal{F}^{\bullet}$ is supported in $Z$ -- and the arrow to the bottom right corner stems from the unit $\operatorname{id} \to Rj_*j^*$ of adjunction. The upper square commutes because of the functoriality of $\tau$. The triangle on the bottom commutes because the composition of the unit and counit of an adjunction in the manner of the diagram is canonically isomorphic to the identity. Hence the whole diagram is commutative. Finally, the composition of the two vertical arrows on the right is an isomorphism (\cite[Proposition (3.2.3)]{LipmanGrothDual}. It follows that the vertical arrow on the left is an isomorphism. \end{proof} \section{Generalization of the trace map} The adjunction between $Rf_*$ and $f^!$ for a proper morphism $f\colon X \to Y$ is based on the \emph{trace map}, which is a natural transformation of functors \[ \operatorname{tr}_f\colon Rf_*f^! \to \operatorname{id}. \] The first step of the classical way to construct the trace is to define it for residual complexes. If $Y$ is regular, the structure sheaf $\mathcal{O}_Y$ is a dualizing sheaf, and hence in particular a pointwise dualizing complex. Its Cousin complex $K^{\bullet} := E^{\bullet}(\mathcal{O}_Y)$, see \cite[IV.2]{HartshorneRD}, is an injective resolution of $\mathcal{O}_Y$ and an example for a \emph{residual complex}. We recall the basic facts from chapter 3.2 of \cite{conrad_grothendieck_2000}. For every morphism $g\colon X \to Y$ of finite type, one can construct a functor $g^{\Delta}$ mapping residual complexes on $Y$ to residual complexes on $X$ by gluing the functors $g^{\flat}$ for finite $g$ and $g^{\sharp}$ for separated and smooth $g$. This gives rise to the \emph{twisted} or \emph{exceptional inverse image functor}: \begin{definition} Let $g\colon X \to Y$ be a morphism of finite type. We define the functor $g^!\colon D_{\operatorname{coh}}^+(Y) \to D_{\operatorname{coh}}^+(X)$ by \[ g^! = D_{g^{\Delta}K^{\bullet}} \circ Lg^* \circ D_{K^{\bullet}}, \] where $D$ is the duality. \end{definition} For proper $f$, we can define a map of complexes $\operatorname{tr}_f(K^{\bullet})\colon f_*f^{\Delta}K^{\bullet} \to K^{\bullet}$ (\cite[VII, Theorem 2.1]{HartshorneRD}), where $f^{\Delta}$ is the functor $f^!$ for residual complexes, see \cite[VI.3.]{HartshorneRD}. With this map in hand one defines the natural transformation $\operatorname{tr}_f\colon Rf_*f^! \to \operatorname{id}$ in the category $D_{\operatorname{coh}}^+(Y)$ as the unique map making the diagram \begin{align*} \xymatrix{ Rf_*f^! \ar@{=}[r] \ar@{.>}[dd]^{\operatorname{tr}_f}& Rf_*\underline{\operatorname{RHom}}_{\mathcal{O}_X}^{\bullet}(Lf^* \circ D_{K^{\bullet}}(\usc),f^{\Delta}K^{\bullet}) \ar[d]^{\sim}\\ & \underline{\operatorname{RHom}}_{\mathcal{O}_Y}^{\bullet}(D_{K^{\bullet}}(\usc),f_*f^{\Delta}K^{\bullet}) \ar[d]^{\operatorname{tr}_f(K^{\bullet})} \\ \operatorname{id} \ar[r]^-{\sim} & \underline{\operatorname{RHom}}_{\mathcal{O}_Y}^{\bullet}(D_{K^{\bullet}}(\usc),K^{\bullet}) } \end{align*} commutative. Here the first vertical isomorphism on the right is the natural isomorphism from the adjunction of $Rf_*$ and $Lf^*$. Note that $f^{\Delta}(K^{\bullet})$ is injective, hence $f_*f^{\Delta}(K^{\bullet})$ computes $Rf_*f^{\Delta}(K^{\bullet})$. Instead of constructing the twisted inverse image functor $f^!$ by pasting it from special situations such as smooth and proper maps, Lipman uses a more abstract method, the Special Adjoint Functor Theorem, to obtain a right adjoint of $Rf_*$ under weak assumptions on the morphism $f$. Then he extends this result to a ``sheafified duality'', i.e.\ for $\mathcal{F}^{\bullet} \in D_{\text{qc}}(X)$, $\mathcal{G}^{\bullet} \in D_{\text{qc}}^+(Y)$ and quasi-proper $f$, a natural isomorphism \[ Rf_*\underline{\operatorname{RHom}}_{\mathcal{O}_X}^{\bullet}(\mathcal{F}^{\bullet},f^!\mathcal{G}^{\bullet}) \to \underline{\operatorname{RHom}}_{\mathcal{O}_Y}^{\bullet}(Rf_*\mathcal{F}^{\bullet},\mathcal{G}^{\bullet}). \] The compatibility of the approaches of \cite{HartshorneRD} and \cite{LipmanGrothDual} is involved, as pointed out in the introduction of \cite{LipmanGrothDual}. Let us recall some results of the trace for proper morphisms. \begin{definition}[\operatorname{pr}otect{{\cite[VI. 5.]{HartshorneRD}}}] A morphism $f\colon X \to Y$ of schemes is called \emph{residually stable} if it is flat, integral and the fibers of $f$ are Gorenstein. \end{definition} \begin{lemma}[\operatorname{pr}otect{{\cite[Corollary 4.4.3]{LipmanGrothDual}}}] \label{flatcommute} Let $f\colon X \to Y$ be proper and let $g\colon Y' \to Y$ be flat. Let $f'$ and $g'$ be the projections of $X \times_Y Y'$ such that the square \[ \xymatrix{ Y' \times_Y X \ar[d]^-{f'} \ar[r]^-{g'} & X \ar[d]^-f \\ Y' \ar[r]^-g & Y } \] is cartesian. The morphism $\beta\colon g'^*f^! \overset{\sim}{\longrightarrow} f'^!g^*$, defined as the adjoint of the composition \[ Rf'_*g'^*f^! \xrightarrow{\operatorname{bc}^{-1} f^!} g^*Rf_*f^! \xrightarrow{g^*\operatorname{tr}_f} g^*, \] is an isomorphism. Here $\operatorname{bc}$ denotes the base change isomorphism (\autoref{bchange}). \end{lemma} Let us recall two compatibilities of the trace, which usually are known as ``TRA 1'' and ``TRA 4''. \begin{lemma} \label{proptrace} Let $f\colon X \to Y$ be a proper morphism of schemes. \begin{enumerate} \item (TRA 1) If $g\colon Y \to Z$ is another proper morphism, then there is a commutative diagram \[ \xymatrix{ R(gf)_*(gf)^! \ar[r]^-{\operatorname{tr}_{g \circ f}} \ar[d]^{\sim} & \operatorname{id} \\ Rg_*Rf_*f^!g^! \ar[r]^-{\operatorname{tr}_f} & Rg_*g^! \ar[u]_{\operatorname{tr}_g} } \] where the first vertical arrow is the natural isomorphism. \item (TRA 4) For a flat morphism $g\colon Y' \to Y$, there is a commutative diagram \[ \xymatrix@C45pt{ g^*Rf_*f^! \ar[r]^-{g^* \operatorname{tr}_f} \ar[d]^{\operatorname{bc}}_{\sim} & g^* \\ Rf'_*g'^*f^! \ar[r]^-{Rf'_*\beta}_-{\sim} & Rf'_*f'^!g^*, \ar[u] _{\operatorname{tr}_{f'}g^*} } \] where $g'$ and $f'$ are the two projections of $X \times_Y Y'$. \end{enumerate} \end{lemma} \begin{proof} (a) is \cite[Corollary VII.3.4]{HartshorneRD}. The diagram in (b) commutes by construction of $\beta$: The composition $\operatorname{tr}_{f'}g^* \circ Rf'_*\beta$ is the adjoint of the adjoint of the composition $u^* \operatorname{tr}_f \circ \operatorname{bc}^{-1}$, see \autoref{flatcommute}. \end{proof} \begin{remark} Part (b) of the preceding lemma holds under the milder assumption that $f$ is of finite Tor-dimension, see \cite[Corollary 4.4.3]{LipmanGrothDual}. In this more general case one considers the left derived functors $Lf^*$ and $Lf'^*$. However, we will need the compatibility of the trace with pullback only for flat morphisms. \end{remark} From now on we do not assume that $f$ is proper. We are interested in the case where $f\colon X \to Y$ is a separated morphism of finite type of Noetherian schemes and $i\colon Z \to Y$ and $i'\colon Z' \to X$ are closed immersions with a proper morphism $f'\colon Z' \to Z$ such that $f \circ i' = i \circ f'$. The compactification theorem of Nagata (\cite{Nagata}, see also \cite{Luet} for a more recent proof) states that there exists a factorization of $f$ into an open immersion $j\colon X \to \overline{X}$ and a proper morphism $\overline{f} \colon \overline{X} \to Y$. \begin{lemma} \label{ExerciseHartshorne} Let $f\colon X \to Y$ be a morphism of schemes that factors through an open immersion $j\colon X \to \overline{X}$ followed by a proper morphism $\overline{f}\colon \overline{X} \to Y$. Then for every closed immersion $i\colon Z \to X$ such that $f \circ i$ is proper, the composition $j \circ i$ is also a closed immersion. \end{lemma} \begin{proof} We have to show that $j(i(Z))$ is closed in $\overline{X}$ (which is a special case of the first part of exercise II.4.4 of \cite{Hartshorne}). By assumption the composition $\overline{f} \circ j \circ i = f \circ i$ is proper and $\overline{f}$ is proper, in particular $\overline{f}$ is separated. Hence by \cite[Corollary II.4.8]{Hartshorne}, $j \circ i$ is proper, which implies that the image $j(i(Z))$ is closed. \end{proof} The following generalization of the trace map stems from \cite{Ruelling}, where Chatzistamatiou and R\"ulling define a trace for morphisms which are proper not only over a single closed subset but along a family of supports. Our construction is similar to the morphism $\operatorname{Tr}_f$ from Corollary 1.7.6 of ibid. \begin{definition} \label{tracedef} For a morphism $f\colon X \to Y$ of finite type and closed immersions $i\colon Z \to Y$ and $i'\colon Z' \to X$ with a proper morphism $f'\colon Z' \to Z$ such that $f \circ i' = i\circ f'$, choose a compactification, i.e.\ an open immersion $j\colon X \to \overline{X}$ and a proper morphism $\overline{f}\colon \overline{X} \to X$ with $\overline{f} \circ j = f$. We obtain the following commutative diagram: \[ \xymatrix{ Z' \ar@^{(->}[r]^-{i'} \ar[d]^-{f'} & X \ar[r]^-j \ar[d]^-f & \overline{X} \ar[dl]^{\overline{f}} \\ Z \ar@^{(->}[r]^-i & Y & } \] We define the trace of $f$ as the morphism of functors \[ \operatorname{tr}_{f,Z} = \operatorname{tr}_f\colon Rf_*R\Gamma_{Z'}f^! \to \operatorname{id} \] on $D_{\text{qc}}^+(\mathcal{O}_Y)$ given by the composition \begin{align*} \label{trace} Rf_*R\Gamma_{Z'}f^! \overset{\sim}{\longrightarrow} R\overline{f}_*Rj_*R\Gamma_{Z'}j^*\overline{f}^! \xrightarrow{R\overline{f}_*\varepsilon^{-1}\overline{f}^!} R\overline{f}_*R\Gamma_{Z'}\overline{f}^! \xrightarrow{R\Gamma_{Z'} \to \operatorname{id}}R\overline{f}_*\overline{f}^! \xrightarrow{tr_{\overline{f}}} \operatorname{id}, \end{align*} where $\varepsilon$ is the isomorphism of \autoref{Gammajung} and the last morphism is the classical Grothendieck-Serre trace for the proper map $\overline{f}$. \end{definition} Because $Rf_*R\Gamma_{Z'}f^! \cong R\Gamma_ZRf_*f^!$ (\autoref{Gammacomm}), the complex $Rf_*R\Gamma_{Z'}f^!$ is supported on $Z$. By \autoref{Gammaadj}, $\operatorname{tr}_f$ factors through $R\Gamma_Z$, i.e.\ there is a commutative diagram \[ \xymatrix{ Rf_*R\Gamma_{Z'}f^! \ar[rr]^-{\operatorname{tr}_f} \ar[dr]_-{\widetilde{\operatorname{tr}}_f} & & \operatorname{id}, \\ & R\Gamma_Z \ar[ur] & } \] where $\widetilde{\operatorname{tr}}_f$ is induced by $\operatorname{tr}_f$ and the map $R\Gamma_Z \to \operatorname{id}$ is the natural one. We will not distinguish between $\widetilde{\operatorname{tr}}_f$ and $\operatorname{tr}_f$. For a residual complex $E^{\bullet}$, the trace defined above is a \emph{morphism of complexes} because $f^{\Delta}E^{\bullet}$ and $\overline{f}^{\Delta}E^{\bullet}$ are residual complexes and $\Gamma_Z$ preserves injectives. Of course we have to show that $\operatorname{tr}_f$ is well-defined, i.e.\ it does not depend on the choice of a compactification. The next lemma prepares the proof of this independence. \begin{lemma} \label{opentrace} Let $f\colon X \to Y$ be an open immersion. Let $Z \subseteq X$ be a closed subset such that $f(Z)$ is closed in $Y$. Then for every compactification $X \xrightarrow{j} \overline{X} \xrightarrow{\overline{f}} Y$, the map $\operatorname{tr}_f$ equals the inverse $Rf_*R\Gamma_Zf^* \cong R\Gamma_Z$ of the isomorphism of \autoref{Gammajung} followed by the natural morphism $R\Gamma_Z \to \operatorname{id}$. \end{lemma} \begin{proof} Let $\alpha\colon R\Gamma_Z \to \operatorname{id}$ denote the canonical morphism of functors. The claim of the lemma is the commutativity of the diagram \[ \xymatrix{ Rf_* R\Gamma_Z f^* \ar[r]^-{\sim} \ar[d]_{\sim}^{\varepsilon^{-1}} & R\overline{f}_* Rj_* R\Gamma_Z j^* \overline{f}^! \ar[d]_{\sim}^{R\overline{f}_* \varepsilon^{-1} \overline{f}^!} \\ R\Gamma_Z \ar[d]^{\alpha} & R\overline{f}_* R\Gamma_Z \overline{f}^! \ar[d]^{R\overline{f}_* \alpha \overline{f}^*} \ar[l]_-{\varphi}\\ \operatorname{id} & R\overline{f}_* \overline{f}^!, \ar[l]_-{\operatorname{tr}_{\overline{f}}} } \] where $\varepsilon$ is the isomorphism of \autoref{Gammajung}. The map $\varphi$ can be constructed in the following way: Let $Z'$ be the closed subset $\overline{f}^{-1}(Z)$, which contains $Z$. Then define $\varphi$ as the composition \[ R\overline{f}_* R\Gamma_Z \overline{f}^! \to R\overline{f}_*R\Gamma_{Z'}\overline{f}^!\overset{\sim}{\longrightarrow} R\Gamma_Z R\overline{f}_*\overline{f}^! \xrightarrow{\operatorname{tr}_{\overline{f}}} R\Gamma_Z \] of canonical transformations obtained from the natural transformation $R\Gamma_Z \to R\Gamma_{Z'}$, the isomorphism of \autoref{Gammacomm} and the trace. The commutativity of the bottom square is easy to see. The upper square can be extracted from the following bigger diagram: \[ \xymatrix{ Rf_* R\Gamma_Z f^* \ar[r]^-{\sim} \ar[d]_{\sim} & R\overline{f}_*Rj_* R\Gamma_Z j^*\overline{f}^! \ar[d]_{\sim} \ar[dr]^{\sim} & \\ R\Gamma_Z Rf_* f^* \ar[r]^-{\sim} & R\Gamma_Z R\overline{f}_* Rj_* j^* \overline{f}^! & R\overline{f}_* R\Gamma_{Z'} Rj_*j^*\overline{f}^! \ar[l]_-{\sim} \\ R\Gamma_Z \ar[u]_{\operatorname{ad}_f}^{\sim} & R\Gamma_Z R\overline{f}_* \overline{f}^! \ar[u]_{\operatorname{ad}_j}^{\sim} \ar[l]_-{\operatorname{tr}} & R\overline{f}_* R\Gamma_{Z'} \overline{f}^!. \ar[u]_{\operatorname{ad}_j}^{\sim} \ar[l]_-{\sim} } \] Here $\operatorname{ad}_f$ and $\operatorname{ad}_j$ denote the units of adjunction as in the proof of \autoref{Gammacomm}. The only part whose commutativity is not obvious is the bottom left square. For this it is enough to show that the diagram \begin{align} \label{opendiagram} \xymatrix@C50pt{ & R\overline{f}_* \overline{f}^! \ar[r]^-{\operatorname{tr}} \ar[d]^{\operatorname{ad}_f} \ar[ddl]_{\operatorname{ad}_j} & \operatorname{id} \ar[d]^{\operatorname{ad}_f} \\ & Rf_* f^* R\overline{f}_* \overline{f}^! \ar[r]^-{Rf_*f^*\operatorname{tr}_{\overline{f}}} \ar[d]^{\operatorname{bc}} & Rf_* f^* \ar@{=}[dd] \\ R\overline{f}_* Rj_* j^* \overline{f}^! \ar[d]^-{\sim} & Rf_* Rf'_* j^* \overline{f}^! \ar[l]_-{\sim} \ar[d]^{\sim} & \\ R\overline{f}_* Rj_* f'^* f^* & Rf_* Rf'_* f'^* f^* \ar[l]_-{\sim} \ar[r]^-{Rf_*\operatorname{tr}_{f'}f^*} & Rf_*f^* } \end{align} commutes. Here the morphism $\operatorname{bc}$ is the base change morphism with respect to the cartesian square \[ \xymatrix{ X' \ar[r]^j \ar[d]^-{f'} & \overline{X} \ar[d]^-{\overline{f}} \\ X \ar[r]^f & Y. } \] The commutativity of the upper left triangle of the diagram \autoref{opendiagram} was part of the proof of \autoref{Gammacomm}. The upper square of this diagram commutes by naturality of $\operatorname{ad}_f$ and the commutativity of the square below follows from \autoref{flatcommute}. Finally, the bottom left square commutes by naturality of the transformation $Rf_* Rf'_* \to R\overline{f}_* Rj_*$. \end{proof} \begin{lemma} The map $\operatorname{tr}_f$ is well-defined, i.e.\ it is independent of the choice of the compactification $j\colon X \into \overline{X}$. \end{lemma} \begin{proof} Let $j_1\colon X \to \overline{X}_1$ and $j_2\colon X \to \overline{X}_2$ be two open immersions with proper morphisms $f_1\colon \overline{X}_1 \to Y$ and $f_2\colon \overline{X}_2 \to Y$ such that $f = f_1 \circ j_1 = f_2 \circ j_2$. By considering $\overline{X}_1 \times_Y \overline{X}_2$ we can reduce to the case that there is a proper morphism $g\colon \overline{X}_1 \to \overline{X}_2$ such that $g \circ j_1 = j_2$, i.e.\ the diagram \[ \xymatrix{ & X \ar[dl]_{j_1} \ar[dr]^{j_2} \ar[dd]_/-15pt/f & \\ \overline{X}_1\ \ar[dr]_{f_1} \ar[rr]_/15pt/g & & \overline{X}_2 \ar[dl]^{f_2} \\ & Y & } \] commutes. That $\operatorname{tr}_f$ is well-defined means exactly that the following diagram of functors is commutative: \[ \xymatrix{ & Rf_*R\Gamma_Zf^! \ar[dl]_-{\sim} \ar[d]_-{\sim} \ar[dr]^-{\sim} & \\ R{f_1}_*R{j_1}_*R\Gamma_Zj_1^!f_1^! \ar[d] & R{f_2}_*Rg_*R{j_1}_*R\Gamma_Zj_1^!g^!f_2^! \ar[l]_-{\sim} \ar[r]^-{\sim} \ar[d] & R{f_2}_*{j_2}_* R\Gamma_Z j_2^!f_2^! \ar[d] \\ R{f_1}_*R\Gamma_Zf_1^! \ar[d] & R{f_2}_*Rg_*R\Gamma_Zg^!f_2^! \ar[l]_-{\sim} \ar[d] & R{f_2}_*R\Gamma_Zf_2^! \ar[d] \\ R{f_1}_*f_1^! \ar[dr]_-{\operatorname{tr}_{f_1}} & R{f_2}_*Rg_*g^!f_2^! \ar[l]_-{\sim} \ar[r]^-{{Rf_2}_* \operatorname{tr}_g f_2^!} \ar[d]^{\operatorname{tr}_{f_2 \circ g}} & R{f_2}_*Rf_2^! \ar[dl]^-{\operatorname{tr}_{f_2}} \\ & \operatorname{id}. & \\ } \] Here the six vertical arrows in the middle are the natural maps occurring in the definition of $\operatorname{tr}_f$. The only part of which the commutativity is not obvious is the bigger rectangle on the right hand side, which follows from \autoref{opentrace} after canceling $R{f_2}_*$ and $f_2^!$ from the edges of the terms. \end{proof} Note that the independence of $tr_f$ of the chosen compactification implies that $\operatorname{tr}_f$ equals the classical trace map whenever $f$ is proper. \begin{proposition} \label{traceres} The map $\operatorname{tr}_f$ is compatible with residually stable base change: For a residually stable morphism $g\colon S \to Y$, let $f'$ and $g'$ be the projections of $S \times_Y X$. Furthermore, let $Z_S$ and $Z_S'$ be the preimages of $Z$ and $Z'$ in $S$ and $S \times_Y X$. Then the diagram \[ \xymatrix@C55pt{ g^*Rf_*R\Gamma_{Z'}f^! \ar[d]_-{\operatorname{bc}}^-{\sim} \ar[r]^-{g^* \operatorname{tr}_f} & g^* \\ Rf'_*g'^*R\Gamma_{Z'}f^! \ar[d]^-{\sim} & \\ Rf'_*R\Gamma_{Z_S'}g'^*f^! \ar[r]^-{Rf'_*R\Gamma_{Z_S'}\beta} & Rf'_*R\Gamma_{Z_S'}f'^!g^* \ar[uu]_-{\operatorname{tr}_{f'}g^*} } \] commutes. Here $\beta$ is the isomorphism of \autoref{flatcommute}. \end{proposition} \begin{proof} First we treat the case of an open immersion $u\colon U \to Y$. Let $h\colon S \to Y$ be a residually stable morphism and let $u'$ and $h'$ denote the projections of $S \times_Y U$. Again, we let $\operatorname{ad}_u$, $\operatorname{ad}_{u'}$, $\operatorname{ad}_{h'}$ and $\operatorname{ad}_{h \circ u'}$ denote the units of adjunction. The natural isomorphisms $Ru_*Rh'_* \cong Rh_*Ru'_*$ and $h'^*u^* \cong u'^*h^*$ are compatible with the adjunction of $(u \circ h')^*$ and $R(u \circ h')_*$, i.e.\ the diagram \[ \xymatrix{ \operatorname{id} \ar[r]^-{\operatorname{ad}_{h \circ u'}} \ar[d]_-{\operatorname{ad}_u} & Rh_*Ru'_*u'^*h^* \ar[r]^-{\sim} & Rh_*Ru'_*h'^*u^* \ar@{=}[d] \\ Ru_*u^* \ar[r]^-{\operatorname{ad}_{h'}} & Ru_*Rh'_*h'^*u^* \ar[r]^-{\sim} & Rh_*Ru'_*h'^*u^* } \] of natural maps commutes. Passing to the adjoint maps we see that the square \[ \xymatrix{ h^* \ar[r]^-{\operatorname{ad}_{u'}} \ar[d]_-{\operatorname{ad}_u} & Ru'_*u'^*h^* \\ h^*Ru_*u^* \ar[r]^-{\operatorname{bc}} & Ru'_*h'^*u^* \ar[u]_-{\sim} } \] is commutative. Applying the derived local cohomology functor and taking the inverse of the now invertible units of adjunction (\autoref{Gammajung}), we obtain the commutative diagram \[ \xymatrix{ h^*Ru_*R\Gamma_{Z'}u^* \ar[r]^-{h^* \operatorname{tr}_u} \ar[d]_-{\operatorname{bc}} \ar@{.>}[dr]^-{d}& h^* \\ Ru'_*h'^*R\Gamma_{Z'}u^* \ar[r]^-{\sim} & Ru'_*R\Gamma_{Z_U'}u'^*h^*, \ar[u]_-{\operatorname{tr}_{u'} h^*} } \] where $d$ denote the composition $h^*Ru_*R\Gamma_{Z'}u^* \overset{\operatorname{bc}}{\longrightarrow} Ru'_*h'^*R\Gamma_{Z'}u^* \overset{\sim}{\longrightarrow} Ru'_*R\Gamma_{Z_U'}u'^*h^*$. Now we choose a compactification $X \xrightarrow{j} \overline{X} \xrightarrow{\overline{f}} Y$ of $f$. Then $S \times_Y X \xrightarrow{j'} S \times_Y \overline{X} \xrightarrow{\overline{f'}} S$ is a compactification of $f'$ where $j' := \operatorname{id} \times j$ and $\overline{f'}$ is the projection. Let $\overline{g'}\colon S \times_Y \overline{X} \to \overline{X}$ denote the projection onto $\overline{X}$. The three squares in the commutative diagram \[ \xymatrix{ & S \times_Y \overline{X} \ar[rr]^-{\overline{g'}} \ar@/^4mm/[dddl]^{\overline{f'}} & & \overline{X} \ar@/^4mm/[dddl]^{\overline{f}} \\ S \times_Y X \ar[ur]^{j'} \ar[rr]^/5mm/{g'} \ar[dd]_{f'} & & X \ar[ur]^j \ar[dd]_f & \\ & & & \\ S \ar[rr]^-g & & Y & } \] are cartesian. It suffices to show the commutativity of \[ \xymatrix{ g^*\overline{f}_*j_*\Gamma_{Z'}j^*\overline{f}^! \ar[r]^-{\operatorname{bc}} \ar[d]_{\sim}^{\operatorname{tr}_j} & \overline{f'}_*\overline{g'}^*j_*\Gamma_{Z'}j^*\overline{f}^! \ar[r]^-{d} \ar[d]_{\sim}^{\operatorname{tr}_j} & \overline{f'}_*j'_*\Gamma_{Z_S'}j'^*\overline{g'}^*\overline{f}^! \ar[r]^-{\beta} \ar[d]_{\sim}^{\operatorname{tr}_{j'}} & \overline{f'}_*j'_*\Gamma_{Z_S'}j'^*\overline{f'}^!g^* \ar[d]_{\sim}^{\operatorname{tr}_{j'}} \\ g^*\overline{f}_*\Gamma_{Z'}\overline{f}^! \ar[r]^-{\operatorname{bc}} \ar[d]^{\operatorname{tr}_{\overline{f}}} & \overline{f'}_*\overline{g'}^*\Gamma_{Z'} \overline{f}^! \ar[r]^-{\sim} & \overline{f'}_*\Gamma_{Z_S'}\overline{g'}^*\overline{f}^! \ar[r]^-{\beta} & \overline{f'}_* \Gamma_{Z_S'} \overline{f'}^! g^* \ar[d]^{\operatorname{tr}_{\overline{f'}}} \\ g^* \ar[rrr]^-{\operatorname{id}} & & & g^*, } \] where we left out the $R$ indicating derived functors to streamline the notation. The first and the third upper square commute because of the naturality of $\operatorname{tr}_j$ and $\operatorname{tr}_{j'}$. The commutativity of the upper square in the middle is the case of an open immersion, which we have already seen. Finally, the commutativity of the bottom rectangle is the case of a proper morphism (\autoref{proptrace}). \end{proof} \begin{proposition} \label{tracecomp} Let $f\colon X \to Y$ and $g\colon Y \to S$ be separated and finite type morphisms of schemes. Assume that $i\colon Z \to S$, $i'\colon Z' \to Y$ and $i''\colon Z'' \to X$ are closed immersions with proper morphisms $f'\colon Z'' \to Z'$ and $g'\colon Z' \to Z$ making the diagram \[ \xymatrix{ Z'' \ar[r]^-{i''} \ar[d]^-{f'} & X \ar[d]^-f \\ Z' \ar[r]^-{i'} \ar[d]^-{g'} & Y \ar[d]^-g \\ Z \ar[r]^-i & S } \] commutative. Then there is a commutative diagram: \[ \xymatrix@C40pt@R30pt{ R(g \circ f)_*R\Gamma_{Z''}(g \circ f)^! \ar[d]^{\sim} \ar[drr]^-{\operatorname{tr}_{g \circ f}} & & \\ Rg_*Rf_*R\Gamma_{Z''}f^!g^! \ar[r]_-{Rg_* \operatorname{tr}_f g^!} & Rg_*R\Gamma_{Z'}g^! \ar[r]_-{\operatorname{tr}_g} & \operatorname{id}. } \] \end{proposition} \begin{proof} Choose a compactification $Y \xrightarrow{j_Y} \overline{Y} \xrightarrow{\overline{g}} S$ of $g$, then choose a compactification $X \xrightarrow{j_X} \overline{X} \xrightarrow{f'} \overline{Y}$ of the composition $j_Y \circ f$. The morphisms $f$ and $j_X$ induce a morphism $h\colon X \to Y \times_{\overline{Y}}\overline{X}$. The projection $\operatorname{pr}_Y\colon Y \times_{\overline{Y}} \overline{X} \to Y$ is proper because it is the base change of the proper morphism $f'$. The projection $\operatorname{pr}_{\overline{X}}\colon X \times_{\overline{Y}}\overline{X} \to \overline{X}$ is a base change of $j_Y$ and hence an open immersion. We obtain the following commutative diagram: \[ \xymatrix{ X \ar[d]_-f \ar[r]^-{h} & Y \times_{\overline Y} \overline{X} \ar[dl]^-{\operatorname{pr}_{Y}} \ar[r]^-{\operatorname{pr}_{\overline{X}}} & \overline{X} \ar[dl]^-{f'} \\ Y \ar[d]_-g \ar[r]_-{j_Y} & \overline{Y} \ar[dl]^-{\overline{g}} & \\ S. } \] Because $\operatorname{pr}_{\overline{X}} \circ h$ equals the open immersion $j_X$, it follows that $h$ is an open immersion too. The asserted compatibility of the trace map now follows from the compatibility of the classical trace with compositions applied to the proper morphisms $f'$ and $\overline{g}$ and using \autoref{traceres} for $f'$ and the open immersion $j_Y$. The details of the calculation are left to the reader. \end{proof} \section{Adjunction for morphisms with proper support} With his approach to the functor $f^!$ mentioned in the preceding section, Lipman proved the following version of Grothendieck duality (\cite[Corollary 4.4.2]{LipmanGrothDual}). \begin{theorem} \label{adjunctionclassic} Let $f\colon X \to Y$ be a proper morphism between Noetherian schemes. For $\mathcal{F}^{\bullet} \in D_{\text{qc}}(X)$ and $\mathcal{G}^{\bullet} \in D_{\text{qc}}^+(Y)$, the composition \begin{align*} \xymatrix{ Rf_* \underline{\operatorname{RHom}}_{\mathcal{O}_X}^{\bullet}(\mathcal{F}^{\bullet},f^!\mathcal{G}^{\bullet}) \ar[r] & \underline{\operatorname{RHom}}_{\mathcal{O}_Y}^{\bullet}(Rf_* \mathcal{F}^{\bullet},Rf_*f^! \mathcal{G}^{\bullet}) \ar[d]^{\operatorname{tr}_f} \\ & \underline{\operatorname{RHom}}_{\mathcal{O}_Y}^{\bullet}(Rf_* \mathcal{F}^{\bullet}, \mathcal{G}^{\bullet})} \end{align*} is an isomorphism. Here the first morphism is the canonical one and the second is the trace map. \end{theorem} This generalizes the classical coherent duality (\cite[VII, 3.4(c)]{HartshorneRD}), where $\mathcal{F}^{\bullet} \in D_{\operatorname{coh}}^-(X)$ and $\mathcal{G}^{\bullet} \in D_{\operatorname{coh}}^+(Y)$ and $Y$ is assumed to have a dualizing complex. In this paper we relax the properness assumption and show the following: \begin{theorem} \label{qcohadjunction} Let $f\colon X \to Y$ be a separated and finite type morphism of Noetherian schemes and let $i\colon Z \to Y$ and $i'\colon Z' \to X$ be closed immersions with a proper morphism $f'\colon Z' \to Z$ such that the diagram \[ \xymatrix{ Z' \ar[r]^-{i'} \ar[d]^-{f'} & X \ar[d]^-f \\ Z \ar[r]^-i & Y } \] commutes. Then for all $\mathcal{F}^{\bullet} \in D_{\text{qc}}^-(\mathcal{O}_X)_Z$ and $\mathcal{G}^{\bullet} \in D_{\text{qc}}^+(\mathcal{O}_Y)_Z$ (see \autoref{supportdefine}), the composition \begin{align*} \xymatrix{ Rf_* \underline{\operatorname{RHom}}_{\mathcal{O}_X}^{\bullet}(\mathcal{F}^{\bullet},R\Gamma_{Z'}f^!\mathcal{G}^{\bullet}) \ar[r] & \underline{\operatorname{RHom}}_{\mathcal{O}_Y}^{\bullet}(Rf_* \mathcal{F}^{\bullet},Rf_*R\Gamma_{Z'}f^! \mathcal{G}^{\bullet}) \ar[d]^{\operatorname{tr}_f} \\ & \underline{\operatorname{RHom}}_{\mathcal{O}_Y}^{\bullet}(Rf_* \mathcal{F}^{\bullet}, \mathcal{G}^{\bullet})} \end{align*} is an isomorphism. Here $\operatorname{tr}_f\colon Rf_*R\Gamma_{Z'}f^! \to \operatorname{id}$ is the natural transformation of \autoref{tracedef}. In particular, taking global sections, the functor $Rf_*$ is left adjoint to the functor $R\Gamma_{Z'}f^!$. \end{theorem} Note that the properness of $f'$ is equivalent to the properness of $i \circ f'$. \begin{proof} Consider the commutative diagram \[ \xymatrix{ Rf_*\underline{\operatorname{RHom}}_{\mathcal{O}_X}^{\bullet}(\mathcal{F}^{\bullet},R\Gamma_{Z'}f^!\mathcal{G}^{\bullet}) \ar[d] \ar[dr] & \\ R\overline{f}_*\underline{\operatorname{RHom}}_{\mathcal{O}_{\overline{X}}}^{\bullet}(Rj_*\mathcal{F}^{\bullet},Rj_*R\Gamma_{Z'}j^*\overline{f}^!\mathcal{G}^{\bullet}) \ar[r] \ar[d]^{\varepsilon^{-1}} & \underline{\operatorname{RHom}}_{\mathcal{O}_Y}^{\bullet}(Rf_*\mathcal{F}^{\bullet},R\overline{f}_*Rj_*R\Gamma_{Z'}j^*\overline{f}^!\mathcal{G}^{\bullet}) \ar[d] \\ R\overline{f}_*\underline{\operatorname{RHom}}_{\mathcal{O}_{\overline{X}}}^{\bullet}(Rj_*\mathcal{F}^{\bullet},R\Gamma_{Z'}\overline{f}^!\mathcal{G}^{\bullet}) \ar[r] \ar[d] & \underline{\operatorname{RHom}}_{\mathcal{O}_Y}^{\bullet}(Rf_*\mathcal{F}^{\bullet},R\overline{f}_*R\Gamma_{Z'}\overline{f}^!\mathcal{G}^{\bullet}) \ar[d] \\ R\overline{f}_*\underline{\operatorname{RHom}}_{\mathcal{O}_{\overline{X}}}^{\bullet}(Rj_*\mathcal{F}^{\bullet},\overline{f}^!\mathcal{G}^{\bullet}) \ar[dr] \ar[r] & \underline{\operatorname{RHom}}_{\mathcal{O}_Y}^{\bullet}(Rf_*\mathcal{F}^{\bullet},R\overline{f}_*\overline{f}^!\mathcal{G}^{\bullet}) \ar[d] \\ & \underline{\operatorname{RHom}}_{\mathcal{O}_Y}^{\bullet}(Rf_*\mathcal{F}^{\bullet},\mathcal{G}^{\bullet}) } \] of natural morphisms. The vertical arrows on the left are isomorphisms by \autoref{opennattrans}, \autoref{Gammajung} and \autoref{Gammaadj}. The diagonal map to the lower right corner is the isomorphism from the duality of \autoref{adjunctionclassic} for the proper morphism $\overline{f}$. Hence the composition of the first diagonal morphism and the vertical morphisms on the right is an isomorphism. Finally, for the adjunction of $Rf_*$ and $R\Gamma_{Z'}f^!$, we apply the degree zero cohomology of the right derived functor of global sections $H^0R\Gamma$ to both sides of the just proven isomorphism \[ \underline{\operatorname{RHom}}_{\mathcal{O}_Y}^{\bullet}(Rf_*\mathcal{F}^{\bullet},\mathcal{G}^{\bullet}) \overset{\sim}{\longrightarrow} Rf_*\underline{\operatorname{RHom}}_{\mathcal{O}_X}^{\bullet}(\mathcal{F}^{\bullet},R\Gamma_{Z'}f^!\mathcal{G}^{\bullet}). \] Then we use the natural isomorphisms \begin{align*} H^0R\Gamma\underline{\operatorname{RHom}}_{\mathcal{O}_Y}^{\bullet}(Rf_*\mathcal{F}^{\bullet},\mathcal{G}^{\bullet}) &\overset{\sim}{\longrightarrow} H^0\operatorname{RHom}_{\mathcal{O}_Y}^{\bullet}(Rf_*\mathcal{F}^{\bullet},\mathcal{G}^{\bullet}) \\ &\overset{\sim}{\longrightarrow} \operatorname{Hom}_{D(\mathcal{O}_Y)}(Rf_*\mathcal{F}^{\bullet},\mathcal{G}^{\bullet}) \end{align*} of Proposition II.5.3 and Theorem I.6.4 of \cite{HartshorneRD} and similarly for \[ H^0R\Gamma Rf_*\underline{\operatorname{RHom}}_{\mathcal{O}_X}^{\bullet}(\mathcal{F}^{\bullet},R\Gamma_{Z'}f^!\mathcal{G}^{\bullet}), \] where we additionally use the isomorphism $R\Gamma(X,\usc) \overset{\sim}{\longrightarrow} R\Gamma(Y,Rf_*(\usc))$ of Proposition II.5.2 of ibid. \end{proof} We conclude with a statement which, under certain hypothesis, allows us to recover the trace $\operatorname{tr}_f$ by its application to the structure sheaf $\mathcal{O}_Y$. \begin{definition} \label{EssentiallyPerfect} For a separated morphism $f\colon X \to Y$ of finite type, a compactification $X \xrightarrow{j} \overline{X} \xrightarrow{\overline{f}} Y$ and $\mathcal{F}^{\bullet} \in D_{\text{qc}}^+(Y)$, let \[ \chi_{\mathcal{F}^{\bullet}}^f\colon f^!\mathcal{O}_Y \derotimes_{\mathcal{O}_X} Lf^*\mathcal{F}^{\bullet} \to f^!\mathcal{F}^{\bullet} \] be the morphism $j^*\varphi$, where $\varphi\colon \overline{f}^!\mathcal{O}_Y \derotimes_{\mathcal{O}_X} L\overline{f}^*\mathcal{F}^{\bullet} \to \overline{f}^!\mathcal{F}^{\bullet}$ is the adjoint of the composition \[ R\overline{f}_*(\overline{f}^!\mathcal{O}_Y \derotimes_{\mathcal{O}_X} L\overline{f}^*\mathcal{F}^{\bullet}) \xrightarrow{\rho} R\overline{f}_*(\overline{f}^!\mathcal{O}_Y) \derotimes_{\mathcal{O}_Y} \mathcal{F}^{\bullet} \xrightarrow{\operatorname{tr}_{\overline{f}}} \mathcal{O}_Y \otimes_{\mathcal{O}_Y} \mathcal{F}^{\bullet}. \] Here $\rho$ denotes the isomorphism of the projection formula. The morphism $\chi_{\mathcal{F}^{\bullet}}^f$ is independent of the choice of the compactification \cite[Proposition 5.8]{Na.CompEFT}. If $\chi^f$ is an isomorphism of functors, then the morphism $f$ is called \emph{essentially perfect}. \end{definition} Theorem 5.9 of \cite{Na.CompEFT} Nayak gives various characterizations of essentially perfect morphisms. For example, smooth morphisms are essentially perfect. \begin{proposition} \label{Nayak} Let $f$ be an essentially perfect map fulfilling the assumptions and with the notation of \autoref{qcohadjunction}. For every $\mathcal{F}^{\bullet} \in D_{\text{qc}}^+(Y)$, there is a commutative diagram \[ \xymatrix{ Rf_*R\Gamma_{Z'}(f^!\mathcal{O}_Y \derotimes_{\mathcal{O}_X} Lf^*\mathcal{F}^{\bullet}) \ar[r]^-{\chi_{\mathcal{F}^{\bullet}}^f} \ar[d]^-{\rho} & Rf_*R\Gamma_{Z'}f^!\mathcal{F}^{\bullet} \ar[d]^-{\operatorname{tr}_f} \\ Rf_*R\Gamma_{Z'}f^!\mathcal{O}_Y \derotimes_{\mathcal{O}_Y} \mathcal{F}^{\bullet} \ar[r]^-{\operatorname{tr}_f \otimes \operatorname{id}} & \mathcal{F}^{\bullet}. } \] Here $\rho$ denotes the isomorphism of the projection formula and \autoref{RGammaTensor}. \end{proposition} \begin{proof} We have to verify that the diagram \begin{align} \label{NayakDiagram} \xymatrix{ Rf_*R\Gamma_{Z'}(f^!\mathcal{O}_Y \derotimes_{\mathcal{O}_X} Lf^*\mathcal{F}^{\bullet}) \ar[r]^-{\chi_{\mathcal{F}^{\bullet}}^f} \ar[d]^-{\sim} & Rf_*R\Gamma_{Z'}f^!\mathcal{F}^{\bullet} \ar[d]_-{\sim} \\ R\overline{f}_*Rj_*R\Gamma_{Z'}(j^*\overline{f}^!\mathcal{O}_Y \derotimes j^*L\overline{f}^*\mathcal{F}^{\bullet}) \ar[d]^-{\rho} & R\overline{f}_*Rj_*R\Gamma_{Z'}j^*\overline{f}^!\mathcal{F}^{\bullet} \ar[dd]_-{\sim}^{\varepsilon^{-1}} \\ R\overline{f}_*(Rj_*R\Gamma_{Z'}j^*\overline{f}^!\mathcal{O}_Y \derotimes L\overline{f}^*\mathcal{F}^{\bullet}) \ar[d]_-{\varepsilon^{-1}}^-{\sim} & \\ R\overline{f}_*(R\Gamma_{Z'}\overline{f}^!\mathcal{O}_Y \derotimes L\overline{f}^*\mathcal{F}^{\bullet}) \ar[d]^-{\rho} \ar[r]^-{\varphi} & R\overline{f}_*R\Gamma_{Z'}\overline{f}^!\mathcal{F}^{\bullet} \ar[dd]^-{\operatorname{tr}_{\overline{f}}} \\ Rf_*R\Gamma_{Z'}f^!\mathcal{O}_Y \derotimes_{\mathcal{O}_Y} \mathcal{F}^{\bullet} \ar[d]^{\operatorname{tr}_f \otimes \operatorname{id}} & \\ \mathcal{O}_Y \otimes \mathcal{F}^{\bullet} \ar[r]^-{\sim} & \mathcal{F}^{\bullet} } \end{align} commutes. The upper rectangle commutes because the projection formula is compatible with the unit $\operatorname{id} \to Rj_*j^*$ of adjunction, which we denote by $\operatorname{ad}_j$. More precisely, it follows from the commutativity of the diagram \[ \xymatrix@C40pt{ \overline{f}^!\mathcal{O}_Y \derotimes L\overline{f}^* \mathcal{F}^{\bullet} \ar[r] \ar[d] & Rj_*j^*(\overline{f}^!\mathcal{O}_Y \derotimes L\overline{f}^* \mathcal{F}^{\bullet}) \ar[d] \\ (Rj_*j^*\overline{f}^!\mathcal{O}_Y) \derotimes L\overline{f}^* \mathcal{F}^{\bullet} \ar[r] \ar[d]^-{\operatorname{pr}oj} & Rj_*j^*((Rj_*j^*\overline{f}^!\mathcal{O}_Y) \derotimes L\overline{f}^* \mathcal{F}^{\bullet}) \ar[d]_-{\sim} \\ Rj_*(j^*\overline{f}^!\mathcal{O}_Y \derotimes j^*L\overline{f}^* \mathcal{F}^{\bullet}) & Rj_*(j^*(Rj_*j^*\overline{f}^!\mathcal{O}_Y) \derotimes j^*L\overline{f}^* \mathcal{F}^{\bullet}), \ar[l]_-{\widetilde{\operatorname{ad}}_j} } \] where the maps of the upper square stem from $\operatorname{ad}_j$ (this square commutes by the naturality of the unit of adjunction), where $\operatorname{pr}oj$ is the isomorphism from the projection formula and where the lower horizontal arrow is obtained from the counit of adjunction $\widetilde{\operatorname{ad}}_j\colon j^*Rj_* \to \operatorname{id}$. The lower rectangle commutes by construction of $\operatorname{pr}oj$. The composition \[ j^* \xrightarrow{j^* \operatorname{ad}_j} j^*Rj_*j^* \xrightarrow{\widetilde{\operatorname{ad}}_j j^*} j^* \] is the identity. Therefore, the composition of the vertical arrows on the right hand side and the lower horizontal arrow equals $Rj_*$ applied to the natural isomorphism \[ j^*(\overline{f}^!\mathcal{O}_Y \derotimes L\overline{f}^* \mathcal{F}^{\bullet}) \to j^*\overline{f}^!\mathcal{O}_Y \derotimes j^*L\overline{f}^* \mathcal{F}^{\bullet}. \] The bottom rectangle of the diagram \autoref{NayakDiagram} commutes by construction of $\chi_{\mathcal{F}^{\bullet}}^{\overline{f}}$. \end{proof} \end{document}
\begin{document} \title{On the Fiber Characters of $\mathbb F^*_{p^m}$ and \\ related Polynomial Algebras } \author{Michele Elia \thanks{Politecnico di Torino Corso Duca degli Abruzzi 24, I - 10129 Torino -- Italy; ~~ e-mail: [email protected] }} \maketitle \thispagestyle{empty} \begin{abstract} \noindent Let $p$ be a prime, $m$ be a positive integer ( $m \geq 1$, and $m \geq 2$ if $p=2$), and $\chi_n$ be a multiplicative complex character on $\mathbb F^*_{p^m}$ with order $n| (p^m-1)$. We show that a partition $\mathcal A_1 \cup \mathcal A_2 \cup \cdots \cup \mathcal A_n$ of $\mathbb F^{*}_{p^m}$ is the partition by fibers of $\chi_n$ if and only if these fibers satisfy certain additive properties. This is equivalent to showing that the set of multivariate characteristic polynomials of these fibers, completed with the constant polynomial $1$, is the basis of an $(n+1)$-dimensional commutative algebra with identity in the ring $\mathbb Q[x_1,\ldots,x_n]/\langle x_1^p-1, \ldots, x_n^p-1 \rangle$. \end{abstract} pace{1mm} \noindent {\bf Mathematics Subject Classification (2000): 11A15, 11N69, 11R32} pace{1mm} \noindent {\bf Key words: } {\em nth power residue, cyclotomic coset, character, polynomial ring}. \section{Introduction} In 1952, Perron gave some additive properties of the fibers of the quadratic character on $\mathbb F_p$. Specifically in \cite{perron}, he showed that if $\mathfrak A, ~\mathfrak{B} \subset \mathbb F_p$ are the subsets of quadratic residues and non-residues, respectively, and letting $ ~ d_p = \frac{p-1}{4}~~\mbox{if}~~ p =1\bmod 4, ~~ \mbox{and}~~ d_p = \frac{p+1}{4} ~~\mbox{if}~~ p=3 \bmod 4 ~$, then \begin{enumerate} \item Every element of $\mathfrak A$ [respectively $\mathfrak B$] can be written as the sum of two elements of $\mathfrak A$ [respectively $\mathfrak B$] in exactly $d_p - 1$ ways. \item Every element of $\mathfrak A$ [respectively $\mathfrak B$] can be written as the sum of two elements of $\mathfrak B$ [respectively $\mathfrak A$] in exactly $d_p $ ways. \end{enumerate} \noindent It was natural to inquire just how strong this result is, and to what extent it may hold for any character $\chi_n$, other than $\chi_2$. In \cite{monico} it is shown that these additive properties uniquely characterize the even partition of $\mathbb F_p$ into quadratic residues and non-residues. In \cite{monico1}, the even restriction is removed, and the result is generalized to fibers of arbitrary multiplicative character $\chi_n$ on $\mathbb F_p$ ($n$ being a divisor of $(p-1)$), with suitable cyclotomic numbers in place of the constants $d_p$ above. Lastly, in \cite{elia}, the generalization of the even partition (i.e. by the quadratic character $\chi_2$) to every finite field of odd characteristic, that is, the partition of $\mathbb F_{p^m}$ into squares and non-squares, is discussed and settled. Perron's view is attractive, but the formulation of the problem purely in terms of characteristic polynomials and their algebras permits a full description and proof of facts that occur in every finite field. The purpose of this paper is to prove this definitive result. \section{Preliminary results} Let $\mathbb F_{p^m}$ be a finite field with $p^m$ elements generated by a root $\gamma$ of a primitive irreducible polynomial $p(x)=x^m+a_{m-1}x^{m-1}+\ldots+a_0$ over $\mathbb F_p$. Let $\mathcal B=\{1, \gamma, \ldots, \gamma^{m-1} \}$ be a basis of $\mathbb F_{p^m}$, any non-zero element $\beta \in \mathbb F_{p^m}$ is represented either as a power $\gamma^h$ or in the basis $\mathcal B$ as $ \sum_{i=1}^m b_i \gamma^{i-1}$ with $\beta_i \in \mathbb F_p$. In the following, $\beta$ will be interchangeably indicated with the $m$-dimensional vector $\mathbf b= [ b_1,b_2, \ldots, b_m] \in \mathbb F^m_p$, whenever necessary. \\ A multiplicative complex character is an isomorphism $\chi: \mathbb F^*_{p^m} \rightarrow \mathcal C_{p^m-1}$ between the multiplicative cyclic group $\mathbb F^*_{p^m}$ and the complex multiplicative group $\mathcal C_{p^m-1}$ of the units of order $p^m-1$ in the complex field $\mathbb C$. Let $n>1$ be a non-trivial positive divisor of $p^m-1$, that is $n \cdot s=p^m-1$ (if $p=2$ then $m$ must be greater than $1$), then the subset consisting of the powers of $\rho=\gamma^n$ is a cyclic subgroup of order $s$ of $\mathbb F^*_{p^m}$. \\ Let $\zeta_n$ be a primitive $n$th complex root of unity, i.e. $\zeta_n$ satisfies the $n$th cyclotomic polynomial. A character of order $n$ is explicitly defined as the mapping $\chi_n: \gamma \rightarrow e^{\frac{2\pi i}{n}} =\zeta_n$, that is $$ \chi_n(\gamma^{n~t+h}) = e^{ \frac{2\pi i(n~t+h)}{n}} = e^{\frac{2\pi i }{n}h} = \zeta^h_n ~~\forall t \in \mathbb Z ~~,~~ \mbox{and}~~ h \in \{0,1, \ldots, n-1 \}~~. $$ \noindent For each integer $0 \leq k \leq (n-1)$ let $\mathcal A_{k}$ be the fiber $\chi^{-1}( \zeta_n^{k-1})$, then the fiber $\mathcal A_1=\chi^{-1}(1)$ is the subgroup of $\mathbb F^*_{p^m}$ consisting of the $n$-th powers of $\gamma$, and the fiber $\mathcal A_k=\chi^{-1}( \zeta_n^{k-1})$, with $k>1$, is clearly the coset $\gamma^{k-1}\mathcal A_1$. We have $|\mathcal A_k| = \frac{p^m-1}{n} = s$, and for each $1 \leq k \leq n$, the corresponding multivariate characteristic polynomial is $$ q_k(\mathbf x)=q_k(x_1,\ldots,x_n) = \sum_{\beta \in \mathcal A_k} \prod_{i=1}^m x^{b_i}_i \in \mathbb Z[x_1,\ldots,x_n] ~~. $$ The set of fibers $\mathcal A_1,\ldots ,\mathcal A_n$ form a partition of $\mathbb F^*_{p^m}=\{1, \gamma, \gamma^2,\ldots , \gamma^{p^m-2}\}$, thus, defining the polynomial $q_0(\mathbf x)=1$ which is the characteristic polynomial of the set $\{ 0 \}$, we have $$ \sum_{k=0}^n q_k(\mathbf x) = \prod_{i=1}^{m} \frac{x_i^p-1}{x_i-1} ~~. $$ The following lemmas and theorem show that the set of these $n+1$ multivariate polynomials is the basis of an algebra of dimension $n+1$ in the polynomial ring $\mathfrak R_n[\mathbf x]=\mathbb Q[\mathbf x]/\langle x_1^p-1, \ldots, x_n^p-1 \rangle$, where $\langle x_1^p-1, \ldots, x_n^p-1 \rangle$ denotes the ideal generated by the polynomials included in brackets. \noindent Since the fiber $\mathcal A_1$ is a sub-group of order $s=\frac{p^m-1}{n}$ of $\mathbb F^*_{p^m}$, and the remaining fibers are its cosets, which form a partition of $\mathbb F^*_{p^m}$, the following proposition easily follows \begin{proposition} \label{pro1} The set $\{q_0(\mathbf x), q_1(\mathbf x), \ldots, q_n(\mathbf x)\}$ of $n+1$ multivariate polynomials is a basis of a $\mathbb Q$-subspace $\mathbf V_{n+1}$ of dimension $n+1$ in the $p^m$-dimensional vector space $\mathbb Q[\mathbf x]/\langle x_1^p-1, \ldots, x_n^p-1 \rangle$ of multivariate polynomials of degree at most $p - 1$ in each variable $x_i$. \end{proposition} \noindent The elements of $ \mathcal A_1$ have the following properties: \begin{lemma} \label{lemma1} Let $p$ be an odd prime, and assuming the above hypotheses, we have \begin{enumerate} \item If $s$ is even, for any $\beta \in \mathcal A_1$ there exists a $\alpha \in \mathcal A_1$ such that $\beta+\alpha=0$. \item If $s$ is odd, there exists a coset $\mathcal E = \eta \mathcal A_1$ such that for any $\beta \in \mathcal A_1$ there is a $\alpha \in \mathcal E$ such that $\beta+\alpha=0$. \end{enumerate} \noindent Let $p=2$, then \begin{enumerate} \item[3.] In $\mathbb F_{2^m}$, any element $\beta$ is the opposite of itself, i.e. $\beta+\beta=0$. \end{enumerate} \end{lemma} \begin{proof} Consider the primitive element $\gamma$ of $\mathbb F_{p^m}$, then \begin{enumerate} \item If $s$ is even, the elements of $\mathcal A_1$ are all the roots of $X^s-1$, which splits as $(X^{s/2}-1)(X^{s/2}+1)$. Let $\eta=\gamma^n$ denote a root of $X^{s/2}+1$, and $\beta= \gamma^{2n t}$ be any root of $X^{s/2}-1$. Since $\eta^{s/2}=-1$, we have $$ \beta \eta^{s/2} =-\beta= \gamma^{2n t} \gamma^{n s/2} = \gamma^{(2t+s/2)n} \in \mathcal A_1 ~~,$$ therefore $ \beta+ \gamma^{(2t+s/2)n} =0$, i.e. $\alpha= \gamma^{(2t+s/2)n}$. \item If $s$ is odd, no power of any element in $\mathcal A_1$ is equal to $-1$. However, let $\theta=\gamma^n$ be a generator of the cyclic group $\mathcal A_1$, then an $\eta=\gamma^t \in \mathbb F_{p^m}$ certainly exists such that $\theta+\eta=0$. Consider the coset $\eta \mathcal A_1$, therefore for any $\beta=\theta^u \in \mathcal A_1$, the element $\zeta=\eta\theta^{u-1}$ is such that $\beta+\zeta=0$ because we have $$ \beta+\zeta=\theta^u+\eta\theta^{u-1} =\theta^{u-1} (\theta+\eta )= 0 ~, $$ i.e. $\alpha=\gamma^t \theta^{u-1} =\gamma^t \gamma^{n(u-1)}=\gamma^{t+n(u-1)}$. \item If $p=2$, then we trivially have $\beta+\beta=0$, thus in any fiber $\mathcal A_k$ in $\mathbb F_{2^m}$, the sum of every element with itself is $0$, and the sum of two elements that are not in the same fiber is always different from zero. \end{enumerate} \end{proof} \noindent The immediate goal is to show that $\mathbf V_{n+1}$ is actually a $\mathbb Q$-sub-algebra of $\mathbb Q[\mathbf x]/ \langle x_1^{p} - 1, x_2^{p} - 1, \ldots , x_m^{p} - 1\rangle$. \begin{lemma} \label{lemma2} The following properties hold for the sums of elements of cosets in $\mathbb F^*_{p^m}$ with odd $p$: \begin{enumerate} \item If a fixed $u \in \mathbb F_{p^m}$ can be expressed as the sum $\alpha_1+\alpha_2=u$, with $\alpha_1 \in \mathcal A_i$ and $\alpha_2 \in \mathcal A_j$, then every element of the coset $\mathcal A_{k(u)}=u \mathcal A_1$ can be expressed as the sum of two elements, one from $\mathcal A_i$, and one from $\mathcal A_j$. \item As a direct consequence of the previous point, the product $q_i(\mathbf x)q_j(\mathbf x)$ is a linear combination of the basis elements of $\mathbf V_{n+1}$. \end{enumerate} \end{lemma} \begin{proof} The proof of claim 1 is immediate, assuming $\alpha_1+\alpha_2=u$, we have $$ \alpha (\alpha_1+\alpha_2) =\alpha \alpha_1+ \alpha \alpha_2 = \alpha u ~~\forall ~ \alpha \in \mathcal A_1 ~~, $$ and the conclusion follows from the definition of the coset $\mathcal A_{k(u)}=u\mathcal A_1$, and group closure. pace{2mm} \noindent The proof of claim 2 is a little more elaborate. Due to the definition of the monomials $m(\mathbf x)$ that form part of the definition of the polynomials $q_k(\mathbf x)$, and the correspondence $m(\mathbf x)\leftrightarrow \eta \in \mathbb F_{p^m}$, the product of two monomials in the ring $\mathbb Q[\mathbf x]/ \langle x_1^{p} - 1, x_2^{p} - 1, \ldots , x_m^{p} - 1\rangle$ corresponds to the sum of the corresponding elements in $\mathbb F_{p^m}$. Now the product $q_i(\mathbf x)q_j(\mathbf x)$ consists of $s^2$ distinct monomials, which can be partitioned into groups of $s$ monomials, each group corresponding to some polynomial $q_{k(i,j)}(\mathbf x)$ by the previous claim 1; the conclusion follows, by linearity. \end{proof} \begin{theorem} \label{lemma3} Let $2 \leq n|(p^m-1)$, $p$ prime, $m$ positive integer ($m\geq 2$ if $p=2$), and $s=\frac{p^m-1}{n}$. The $\mathbb Q$-vector space $\mathbf V_{n+1}$ of Proposition \ref{pro1} is a $\mathbb Q$-sub-algebra of the residue ring $\mathbb Q[\mathbf x]/ \langle x_1^{p} - 1, x_2^{p} - 1, \ldots , x_m^{p} - 1\rangle$. In particular, for every $1 \leq i, j \leq n$ there exist integers $c_{ijk}$ such that \begin{equation} \label{eqsubalg} q_i(\mathbf x)q_j(\mathbf x) \bmod \langle x_1^{p} - 1, x_2^{p} - 1, \ldots , x_m^{p} - 1\rangle=c_{ij0}+ \sum_{k=1}^n c_{ijk} q_k(\mathbf x) ~~. \end{equation} The coefficients $c_{ij0}$ can be explicitly expressed considering $p$ odd and $p=2$ separately: \begin{itemize} \item[a)] $p$ odd \begin{enumerate} \item $c_{ii0} = s$ and $c_{ij0} = 0$ for every $j \neq i$ if $s$ is even; \item $c_{ii0} = 0$ and $c_{ij0} = s$ for a suitable pair $j \neq i$, if $s$ is odd. \end{enumerate} \item[b)] $p=2$, in this case $s$ is always odd, and we have \begin{enumerate} \item $c_{ii0} = s$, and $c_{ij0} = 0$ for every $j \neq i$. \end{enumerate} \end{itemize} \end{theorem} \begin{proof} The $\mathbb Q$-vector space $\mathbf V_{n+1}$ is a sub-algebra of $\mathbb Q[\mathbf x]/ \langle x_1^{p} - 1, x_2^{p} - 1, \ldots , x_m^{p} - 1\rangle$ by Lemma \ref{lemma2}. pace{3mm} \noindent In general it does not seem possible to obtain a closed form for all constants $c_{ijk}$ holding for every $p$ and every $m$, except for the following exceptions. \\ Let $\mathbf 1$ be the all-ones $m$-dimensional vector, then we have $$ q_i(\mathbf 1)q_j(\mathbf 1)=c_{ij0}+\sum_{k=1}^n c_{ijk} q_k(\mathbf 1) ~~, $$ since $q_i(\mathbf 1)=s$, we have $s^2 = c_{ij0}+ s \sum_{k=1}^n c_{ijk}$; this equation implies that $s|c_{ij0}$; since the integer $c_{ij0} \leq s$, it follows that $c_{ij0}$ is either $0$ or $s$. If $p$ is odd, by Lemma \ref{lemma1} it follows that \begin{enumerate} \item $c_{ii0} = s$ and $c_{ij0} = 0$ for every $j \neq i$ if $s$ is even; \item $c_{ii0} = 0$ and $c_{ij0} = s$ for a suitable pair $j \neq i$, if $s$ is odd. \end{enumerate} \noindent If $p=2$, then $s$ is necessarily odd, however in $\mathbb F_{p^m}$ every element is the opposite of itself, then letting $m_\beta(\mathbf x)$ be the monomial associated to $\beta$, it follows that $m_\beta(\mathbf x)^2 \bmod \langle x_1^{2} - 1, x_2^{2} - 1, \ldots , x_m^{2} - 1\rangle $ is the monomial associated to $\beta+\beta=0$, that is the monomial $1$; it follows that \begin{enumerate} \item $c_{ii0} = s$ and $c_{ij0} = 0$ for every $j \neq i$. \end{enumerate} \end{proof} \noindent Theorem \ref{lemma3} shows that the vector space $\mathbf V_{n+1}$ is a commutative sub-algebra with identity of the ring of residue polynomials $\mathbb Q[\mathbf x]/ \langle x_1^{p} - 1, x_2^{p} - 1, \ldots , x_m^{p} - 1\rangle$. As observed in the proof of Theorem \ref{lemma3}, in general it seems that the structure constants cannot be given in closed form for every prime $p$, extension degree $m$, and power residue exponent $n$, thus the computational aspects for obtaining numerical values of every $c_{ijk}$ may be of interest. \section{Computation of the structure constants} The structure constants $c_{ijk}$ are easily found in closed form for $n=2$, $m=1$, and any odd $p$; however, for every $m\geq 2$ and $n >2$, in general these constants must be numerically computed by means of convenient algorithms. We briefly, describe two different computational methods. \subsection{Direct method} For fixed $i,j$, equation (\ref{eqsubalg}) can be directly used to compute the structure constants. A consistent linear system of $n$ equations in the $n$ unknowns $c_{ijk}$, $k=1, \ldots,n$, can be obtained by comparing the coefficients of equal multivariate monomials on the two sides of (\ref{eqsubalg}); actually, we would obtain a consistent linear system of $n^2$ linear equations in $n$ unknowns. The search for the solution could present some difficulty because the product $$ q_1(x_1, \ldots, x_2)q_1(x_1, \ldots, x_2) \bmod \langle x_1^p-1, \ldots ,x_n^p-1 \rangle$$ consists of $n^2$ monomials in some order that, a priori, we do not know. They must all be computed, but only $n$ are used. When $n$ is small, as in the following examples, the method is very efficient, but when $n$ is large, $n^2$ multivariate monomials must be sorted according to some ordering criterion: this computational issue is left as an open problem. \begin{example} Let $p=3$ and $m=2$, thus $p^m-1=8$ and $n$ may be $2$ or $4$, which are the only proper divisors of $8$. Let $m(z)=z^2+z+2$ be a primitive quadratic polynomial over $\mathbb F_3$. Let $\alpha$ be a root of $m(z)$, the $9$ elements of $\mathbb F_9$ are $$ \begin{array}{|c|cr|} \hline 0 &= & 0 \\ 1 &= & 1 \\ \alpha & = & 0+ \alpha \\ \alpha^2 & = & 1+2\alpha \\ \alpha^3 & = & 2+2\alpha \\ \alpha^4 & = & 2 \\ \alpha^5 & = & 0+2\alpha \\ \alpha^6 & = & 2+\alpha \\ \alpha^7 & = & 1+ \alpha \\ \hline \end{array} $$ \paragraph{Case 1: $ \mathbf n=2, ~s=4$}; we have two fibers (cosets) $$ \mathcal A_1=\{ 1,1+2\alpha, 2, 2+\alpha \} ~~,~~ \mathcal A_2=\{ \alpha, 2+2\alpha, 2\alpha,1+\alpha \} ~, $$ and the corresponding characteristic multivariate polynomials are $$ q_1(x_1,x_2) = x_1+x_1x_2^2+x_1^2+x_1^2x_2 ~~~~,~~~~ q_2(x_1,x_2) = x_2+x_1^2x_2^2+x_2^2+x_1x_2 ~~. $$ The structure constants of the polynomial algebra of $\mathbf V_3$, with basis $\{1, q_1(x_1,x_2), q_2(x_1,x_2) \}$, are identified by the system $$ \left\{ \begin{array}{lcl} q_1(x_1,x_2)q_1(x_1,x_2) \bmod \langle x_1^3-1,x_2^3-1 \rangle&=& c_{110}+c_{111}q_1(x_1,x_2)+c_{112}q_2(x_1,x_2) \\ q_1(x_1,x_2)q_2(x_1,x_2) \bmod \langle x_1^3-1,x_2^3-1 \rangle&=& c_{120}+c_{121}q_1(x_1,x_2)+c_{122}q_2(x_1,x_2) \\ q_2(x_1,x_2)q_2(x_1,x_2) \bmod \langle x_1^3-1,x_2^3-1 \rangle&=& c_{220}+c_{221}q_1(x_1,x_2)+c_{222}q_2(x_1,x_2) \\ \end{array} \right. $$ where the constants with the third index equal to $0$ are known by Theorem \ref{lemma3} $c_{110}=4$, $c_{120}=0$, and $c_{220}=4$. \\ To find the remaining $6$ constants with the direct method we compute $q_i(x_1,x_2)q_j(x_1,x_2) \bmod \langle x_1^3-1,x_2^3-1 \rangle $ and subtract $(c_{ij1}q_1(x_1,x_2)+c_{ij2}q_2(x_1,x_2))+c_{ij0} $, obtaining three multivariate polynomials which must be identically zero $$ \left\{ \begin{array}{lcl} 4+(2-c_{112})x_1^2x_2^2+(1-c_{111})x_1^2x_2+(1-c_{111})x_1^2+(1-c_{111})x_1x_2^2+(2-c_{112})x_1x_2 & & \\ ~~~+(1-c_{111})x_1+(2-c_{112})x_2^2+(2-c_{112})x_2-4&=& 0 \\ (2-c_{122})x_1^2x_2^2+(2-c_{121})x_1^2x_2+(2-c_{121})x_1^2+(2-c_{121})x_1x_2^2+(2-c_{122})x_1x_2 & & \\ ~~~~~+(2-c_{121})x_1+(2-c_{122})x_2^2+(2-c_{122})x_2&=& 0 \\ 4+(1-c_{222})x_1^2x_2^2+(2-c_{221})x_1^2x_2+(2-c_{221})x_1^2+(2-c_{221})x_1x_2^2+(1-c_{222})x_1x_2 & & \\ ~~~~~ +(2-c_{221})x_1+(1-c_{222})x_2^2+(1-c_{222})x_2-4&=& 0 \\ \end{array} \right. $$ From the first equation we obtain $c_{111}=1,~c_{112}=2$, from the second equation we obtain $c_{121}=2,~c_{122}=2$, and from the third equation $c_{221}=2,~c_{222}=1$, which allows us to write the multiplication table with the coefficients of the linear combinations (the trivial multiplications by $q_0(x_1,x_2)=1$ are not reported) $$ \begin{array}{l|ccc} & q_0(x_1,x_2) & q_1(x_1,x_2) &q_2(x_1,x_2) \\ \hline q_1(x_1,x_2)q_1(x_1,x_2) & 4 & 1 & 2 \\ q_1(x_1,x_2)q_2(x_1,x_2) & 0 & 2 & 2 \\ q_2(x_1,x_2)q_2(x_1,x_2) & 4 & 2 & 1 \\ \end{array} $$ pace{3mm} \paragraph{Case 2: $\mathbf n=4, ~s=2$}; we have four cosets $$ \mathcal A_1=\{ 1, 2 \} ~~,~~\mathcal A_2=\{\alpha , 2\alpha \} ~~,~~ \mathcal A_3=\{2+ \alpha, 1+2\alpha\} ~~,~~\mathcal A_4=\{ 2+2\alpha,1+\alpha \} ~, $$ and, correspondingly, the characteristic multivariate polynomials are $$ q_1(x_1,x_2) = x_1+x_1^2 ~~,~~ q_2(x_1,x_2) = x_2+x_2^2 ~~,~~ q_3(x_1,x_2)=x_1x_2^2+x_1^2x_2 ~~,~~ q_4(x_1,x_2) = x_1^2x_2^2+x_1x_2 ~~. $$ The multiplication table can be conveniently written as a $4\times 4$ table, where rows and columns are orderly indexed by the polynomials $q_i(\mathbf x)$, and the entries are five-tuples of integers which are the five coefficients of the linear combinations $$ \begin{array}{|c||c|c|c|c|} \hline & q_1 & q_2 & q_3 & q_4 \\ \hline \hline q_1 & [2,1,0,0,0] & [0,0,0,1,1] & [0,0,1,0,1] & [0,0,1,1,0] \\ \hline q_2 & [0,0,0,1,1] & [2,0,1,0,0] & [0,0,1,0,1] & [0,0,1,1,0] \\ \hline q_3 & [0,0,1,0,1] & [0,0,1,1,0] & [2,0,0,1,0] & [0,0,1,1,0] \\ \hline q_4 & [0,0,1,1,0] & [0,0,1,1,0] & [0,0,1,0,1] & [2,0,0,0,1] \\ \hline \end{array} $$ For instance we have $$ \left\{ \begin{array}{lcl} q_1(x_1,x_2)q_1(x_1,x_2) \bmod \langle x_1^3-1,x_2^3-1 \rangle&=&2 +q_1(x_1,x_2) \Rightarrow [2,1,0,0,0] \\ q_1(x_1,x_2)q_2(x_1,x_2) \bmod \langle x_1^3-1,x_2^3-1 \rangle&=&q_3(x_1,x_2)+q_4(x_1,x_2) \Rightarrow [0,0,0,1,1] \end{array} \right. $$ \end{example} pace{3mm} \begin{example} Let $m(z)=z^4+z+1$ be a $4$-degree primitive polynomial over $\mathbb F_2$. Let $\alpha$ be a root of $m(z)$, the $16$ elements of $\mathbb F_{16}$ are $$ \begin{array}{|c|cr|} \hline 0 &= & 0 \\ 1 &= & 1 \\ \alpha & = & \alpha \\ \alpha^2 & = & \alpha^2 \\ \alpha^3 & = & \alpha^3 \\ \alpha^4 & = & 1+\alpha \\ \alpha^5 & = & \alpha+\alpha^2 \\ \alpha^6 & = & \alpha^2+\alpha^3 \\ \alpha^7 & = & 1+ \alpha+\alpha^3 \\ \alpha^8 & = & 1+\alpha^2 \\ \alpha^9 & = &\alpha+ \alpha^3 \\ \alpha^{10} & = & 1+\alpha+ \alpha^2 \\ \alpha^{11} & = & \alpha+\alpha^2+\alpha^3 \\ \alpha^{12} & = & 1+\alpha+\alpha^2+\alpha^3 \\ \alpha^{13} & = & 1+ \alpha^2+\alpha^3 \\ \alpha^{14} & = & 1+\alpha^3 \\ \hline \end{array} $$ \noindent In this case $n$ may be $3$ or $5$; only $n=3$ is considered, being fully illustrative. \paragraph{Case: $n=3$, $s=5$}; we have three cosets $$ \left\{ \begin{array}{lcl} \mathcal A_1 &= &\{ 1,\alpha^3, \alpha^2+\alpha^3, \alpha+ \alpha^3, 1+\alpha+\alpha^2+\alpha^3 \} \\ \mathcal A_2 &=& \{ \alpha,1+\alpha , 1+ \alpha+\alpha^3, 1+\alpha+\alpha^2,1+ \alpha^2+\alpha^3\} \\ \mathcal A_3 &=& \{ \alpha^2, \alpha+\alpha^2, 1+\alpha^2,\alpha+\alpha^2+\alpha^3, 1+\alpha^3 \} \end{array} \right. $$ and correspondingly three characteristic multivariate polynomials $$ \left\{ \begin{array}{lcl} q_1(x_1,x_2,x_3,x_4) &= & x_1+ x_4 + x_3 x_4+x_2x_4+x_1x_2x_3 x_4 \\ q_2(x_1,x_2,x_3,x_4) &=& x_2+x_1x_2+x_1x_2x_4+x_1x_2x_3+x_1x_3x_4 \\ q_3(x_1,x_2,x_3,x_4) &=& x_3+ x_2x_3+ x_1x_3+ x_2x_3x_4+ x_1x_4 \end{array} \right.~~. $$ Let $\mathbf x=[x_1,x_2,x_3,x_4]$, a basis of $\mathbf V_4$ is $\{1, q_1(\mathbf x), q_2(\mathbf x),q_3(\mathbf x) \}$, and the structure constants of the polynomial algebra can be computed from the following system of six equations $$ \left\{ \begin{array}{lcl} q_1(\mathbf x)q_1(\mathbf x)&=& c_{110}+c_{111}q_1(\mathbf x)+c_{112}q_2(\mathbf x) +c_{113}q_3(\mathbf x) \\ q_1(\mathbf x)q_2(\mathbf x)&=& c_{120}+c_{121}q_1(\mathbf x)+c_{122}q_2(\mathbf x) +c_{123}q_3(\mathbf x) \\ q_1(\mathbf x)q_3(\mathbf x)&=& c_{130}+c_{131}q_1(\mathbf x)+c_{132}q_2(\mathbf x) +c_{133}q_3(\mathbf x) \\ q_2(\mathbf x)q_2(\mathbf x)&=& c_{220}+c_{221}q_1(\mathbf x)+c_{222}q_2(\mathbf x) +c_{223}q_3(\mathbf x) \\ q_2(\mathbf x)q_3(\mathbf x)&=& c_{230}+c_{231}q_1(\mathbf x)+c_{232}q_2(\mathbf x) +c_{233}q_3(\mathbf x) \\ q_3(\mathbf x)q_3(\mathbf x)&=& c_{330}+c_{331}q_1(\mathbf x)+c_{332}q_2(\mathbf x) +c_{333}q_3(\mathbf x) \\ \end{array} \right. $$ Now $c_{110}=c_{220}=c_{330}=5$, and $c_{120}=c_{130}=c_{230}=0$, then we have to compute only $18$ constants instead of $24$. Proceeding as in the previous example we obtain all structure constants $c_{ijk}$ and write the multiplication table where the coefficients of the linear combinations for $ q_i(\mathbf x)q_j(\mathbf x)$ are reported in the corresponding row (the trivial multiplications by $q_0(x_1,x_2)=0$ are not reported) $$ \begin{array}{l|cccc} & q_0(\mathbf x) & q_1(\mathbf x) &q_2(\mathbf x) &q_3(\mathbf x) \\ \hline q_1(\mathbf x)q_1(\mathbf x) & 5 & 0 & 2 & 2 \\ q_1(\mathbf x)q_2(\mathbf x) & 0 & 2 & 2 & 1 \\ q_1(\mathbf x)q_3(\mathbf x) & 0 & 2 & 1 & 2 \\ q_2(\mathbf x)q_2(\mathbf x) & 5 & 2 & 0 & 2 \\ q_2(\mathbf x)q_3(\mathbf x) & 0 & 1 & 2 & 2 \\ q_3(\mathbf x)q_3(\mathbf x) & 5 & 2 & 2 & 0 \\ \end{array} $$ \end{example} \subsection{A numerical method based on cyclotomic fields} Let $\mathbb Q(\zeta_{p})$ be the cyclotomic field of $p$-th roots of unity, with $\zeta_{p}$ denoting a primitive root of unity, that is a root of the cyclotomic polynomial of degree $p-1$. Thus $\mathbb Q(\zeta_{p})$ is an extension of degree $p-1$ of $\mathbb Q$. Let $\mathfrak G_p$ denote the multiplicative cyclic group generated by $\zeta_{p}$. Let $\mathbf u =(\zeta^{i_1}_p,\zeta^{i_2}_p, \ldots ,\zeta^{i_m}_p)$ denote an $n$-tuple of elements of $\mathfrak G_p$, thus from the evaluation of equation (\ref{eqsubalg}) for $\mathbf x =\mathbf u $, we get a polynomial in $\zeta_p$ that is equal to $0$ \begin{equation} \label{nummeq} c_{ij0}+ \sum_{k=1}^n c_{ijk} q_k(\mathbf u) - q_i(\mathbf u)q_j(\mathbf u) =0~~. \end{equation} We thus obtain a system of $p$ linear equations with integral coefficients in $n$ unknowns. If $n\leq p$ a solution is easily obtained, since it certainly exists by Theorem \ref{lemma3}. If $n>p$ we need more linear equations, then we consider the equations obtained using $\ell$ different vectors $\mathbf u$, with the aim of getting $n$ linearly independent equations. \begin{example} Reconsider the problem of example 1. Its solutions by this second method are obtained working in $\mathbb Q(\zeta_{3})$ with $\zeta_3$ a primitive complex cubic root of unity. \\ Take $\mathbf u_0=(1,1)$, and $\mathbf u_1=(1,\zeta_3)$; in this case we obtain two equations using (\ref{nummeq}), considering that $c_{110}=4$, $c_{120}=0$, and $c_{220}=4$, $ q_1(1,1)=q_2(1,1)=4$, $ q_1(1,\zeta_3)=1$, and $q_2(1,\zeta_3)=-2$. Thus we can write the system $$ \left\{ \begin{array}{lcl} 4+4c_{111}+4c_{112} &=& 16 \\ 4+c_{111}-2c_{112} &=& 1 \end{array} \right. $$ Solving for $ c_{111}, c_{112}$ we obtain $ c_{111}=1, c_{112}=2$. \\ Similarly, we obtain all structure constants summarized in the following table $$ \begin{array}{l|ccc} & 1 & q_1(x_1,x_2) &q_2(x_1,x_2) \\ \hline q_1(x_1,x_2)q_1(x_1,x_2) & 4 & 1 & 2 \\ q_1(x_1,x_2)q_2(x_1,x_2) & 0 & 2 & 2 \\ q_2(x_1,x_2)q_2(x_1,x_2) & 4 & 2 & 1 \\ \end{array} $$ \end{example} pace{5mm} \subsection{A new proof of Perron's original observations} The history of the $\mathbb F^*_{p^m}$ partition by the fibers of a given character began with Perron's characterization of the sets or quadratic residues and non-residues in prime fields, and several independent proofs have since been given. A "new" proof is obtained by specializing the general results given above, and holds for every finite field of odd characteristic . \\ Consider the prime field $\mathbb F_{p^m}$, $p$ odd, and the character $\chi_2$ of order $2$ defined over $\mathbb F^*_{p^m}$. Let $\mathcal R$ and $\mathcal N$ be the subsets of $\mathbb F^*_{p^m}$ of squares and non-squares, respectively, that is $\mathcal R=\chi^{-1}(1)$ and $\mathcal N=\chi^{-1}(-1)$. The corresponding characteristic polynomials are $$ q_{\mathcal R}(\mathbf x) = \sum_{\beta\in \mathbb F^*_{p^m}} \frac{1+\chi_2(\beta)}{2} \prod_{i=1}^m x_i^{b_i} ~~,~~ q_{\mathcal N}(x) = \sum_{\beta\in \mathbb F^*_{p^m}} \frac{1-\chi_2(\beta)}{2} \prod_{i=1}^m x_i^{b_i} ~~, $$ depending on whether $\frac{p^m-1}{2}$ is odd or even, we have \begin{equation} \label{eqmain} \begin{array}{l} \frac{p^m-1}{2} ~~ \mbox{odd}~~~~ \left\{ \begin{array}{lcl} q_{\mathcal R}(\mathbf x)q_{\mathcal R}(\mathbf x) \bmod \langle x_1^p-1, \ldots,x_m^p-1\rangle&=&~~~~~0+a_{11} q_{\mathcal R}(\mathbf x)+b_{11} q_{\mathcal N}(\mathbf x) \\ q_{\mathcal R}(\mathbf x)q_{\mathcal N}(\mathbf x) \bmod \langle x_1^p-1, \ldots,x_m^p-1\rangle&=&\frac{p-1}{2}+a_{12} q_{\mathcal R}(\mathbf x)+b_{12} q_{\mathcal N}(\mathbf x) \\ q_{\mathcal N}(\mathbf x)q_{\mathcal N}(\mathbf x) \bmod \langle x_1^p-1, \ldots,x_m^p-1\rangle&=&~~~~~0+a_{22} q_{\mathcal R}(\mathbf x)+b_{22} q_{\mathcal N}(\mathbf x) \\ \end{array} \right. \\ \\ \frac{p^m-1}{2} ~~ \mbox{even}~~~~ \left\{ \begin{array}{lcl} q_{\mathcal R}(\mathbf x)q_{\mathcal R}(\mathbf x) \bmod \langle x_1^p-1, \ldots,x_m^p-1\rangle&=&\frac{p-1}{2}+a_{11} q_{\mathcal R}(\mathbf x)+b_{11} q_{\mathcal N}(\mathbf x) \\ q_{\mathcal R}(\mathbf x)q_{\mathcal N}(\mathbf x) \bmod \langle x_1^p-1, \ldots,x_m^p-1\rangle&=&~~~~~0+a_{12} q_{\mathcal R}(\mathbf x)+b_{12} q_{\mathcal N}(\mathbf x) \\ q_{\mathcal N}(\mathbf x)q_{\mathcal N}(\mathbf x) \bmod \langle x_1^p-1, \ldots,x_m^p-1\rangle&=&\frac{p-1}{2}+a_{22} q_{\mathcal R}(\mathbf x)+b_{22} q_{\mathcal N}(\mathbf x) \\ \end{array} \right. \end{array} \end{equation} \noindent Let $\mathbf u_o$ be the vector of all ones, then we have $$ q_{\mathcal R}(\mathbf u_o) = \sum_{\beta\in \mathbb F^*_{p^m}} \frac{1+\chi_2(\beta)}{2} = \frac{p^m-1}{2} ~~,~~q_{\mathcal N}(\mathbf u_o) = \sum_{\beta\in \mathbb F^*_{p^m}} \frac{1-\chi_2(\beta)}{2} = \frac{p^m-1}{2} $$ $$ q_{\mathcal R}(-\mathbf u_o) = \sum_{\beta\in \mathbb F^*_{p^m}} \frac{1+\chi_2(\beta)}{2} (-1)^{\sum_{i=1}^m b_i} = t ~,~q_{\mathcal N}(-\mathbf u_o) = \sum_{\beta\in \mathbb F^*_{p^m}} \frac{1-\chi_2(\beta)}{2} (-1)^{\sum_{i=1}^m b_i} = -t $$ If $q_{\mathcal R}(-\mathbf u_o)=0$, it is necessary to use a vector $\mathbf u$ different from $-\mathbf u_o$: there are $2^m-2$ possible choices for $\mathbf u \neq -\mathbf u_o$, and one of them certainly works because of Theorem \ref{lemma3}. pace{5mm} \end{document}
\begin{document} \title{ Peak reduction and finite presentations for automorphism groups of right-angled Artin groups } \begin{abstract} We generalize the peak-reduction algorithm (Whitehead's theorem) for free groups to a theorem about a general right-angled Artin group $A_\Gamma$. As an application, we find a finite presentation for the automorphism group $\mathrm{Aut}\,A_\Gamma$ that generalizes McCool's presentation for the automorphism group of a finite rank free group. We also give consider a stronger generalization of peak-reduction, giving a counterexample and proving a special case. \end{abstract} \section{Introduction} \subsection{Background} Let $\Gamma$ be a graph on $n$ vertices, with vertex set $X$ and adjacency relation denoted by $\adj{-}{-}$. Let $A_\Gamma$ be the \emph{right-angled Artin group of $\Gamma$}, defined by \[A_\Gamma:= \langle X |R_\Gamma \rangle\] where the relations are $R_\Gamma=\{[x,y]|\text{ $x,y\in X$ and $\adj{x}{y}$}\}$ (we use the convention that $[x,y]=xyx^{-1}y^{-1}$). If $\Gamma$ is the edgeless graph ($n$ vertices and no edges), then $A_\Gamma$ is the free group $F_n$ on $n$ generators. If $\Gamma$ is the complete graph, then $A_\Gamma$ is the free abelian group $\Z^n$. So in a sense, the group $A_\Gamma$ interpolates between free groups and free abelian groups as we vary $\Gamma$. Similarly, automorphism group $\AAG$ interpolates between $\Aut F_n$ and the integral general linear group $\GL(n,\Z)$. In this paper, we develop a framework for understanding $\AAG$ in which ideas from the study of linear groups and ideas from the study of $\Aut F_n$ can both be applied. We also give a finite presentation for $\AAG$. Whitehead's 1936 theorem~(\cite{wh}, Theorem 2) is a result about automorphism groups of free groups with important applications; peak reduction is an algorithmic approach used by Rapaport~\cite{rap}, Higgins--Lyndon~\cite{hl} and others to reprove and extend this theorem. Whitehead's theorem states that there is a finite generating set for $\Aut F_n$ that has a special property concerning factorizations of elements of $\Aut F_n$ and the lengths of elements of $F_n$. One corollary of this theorem is that for any $k$--tuple $W$ of elements of $F_n$, the stabilizer $(\Aut F_n)_{W}$ is finitely generated; in fact, McCool~\cite{mccool} used peak-reduction methods to prove that $(\Aut F_n)_{W}$ is finitely presented. Another corollary is that there is an algorithm that determines whether two $k$--tuples of elements of $F_n$ are in the same orbit under the action of $\Aut F_n$ (see Lyndon--Schupp~\cite{ls}, Chapter~I, Proposition~4.19). Peak-reduction methods were also used by Culler--Vogtmann~\cite{cullervogtmann} to study the structure of the outer space of $F_n$. We proceed to define the notion of a peak-reduced factorization. Define the length of a conjugacy class of $A_\Gamma$ to be the minimum of the lengths of its representative elements (with respect to $X$), and define the length of a $k$--tuple of conjugacy classes of $A_\Gamma$ to be the sum of the lengths of its elements (for any $k\geq 1$). For $W$ a $k$--tuple of conjugacy classes in $A_\Gamma$, we say that a string $\alpha_m\cdots\alpha_1$ of elements of $\Aut A_\Gamma$ is \emph{peak-reduced} with respect to $W$ if for each $i=1,\ldots,m-1$, we do not have both \begin{align*} |(\alpha_{i+1}\cdots\alpha_1)\cdot W |\leq|(\alpha_i\cdots\alpha_1)\cdot W|\\ \tag*{\text{and}}|(\alpha_i\cdots\alpha_1)\cdot W|\leq|(\alpha_{i-1}\cdots\alpha_1)\cdot W| \end{align*} unless all three lengths are equal. Suppose $G<\AAG$, $S$ is a finite generating set for $G$, and $W$ is a $k$--tuple of conjugacy classes in $A_\Gamma$. We say that $G$ has \emph{peak reduction} with respect to $W$ by elements of $S$ if every $\alpha\in G$ has a factorization by elements of $S$ that is peak-reduced with respect to $W$. The peak-reduction theorem for a free group $F_n$ states that there is a finite generating set $\Omega$ for $\Aut F_n$ (called the \emph{Whitehead automorphisms}) such that $\Aut F_n$ has peak reduction with respect to any $k$--tuple of conjugacy classes $W$ in $F_n$ by elements of $\Omega$. See Lyndon and Schupp~\cite{ls}, Chapter~I, Proposition~4.17 for a proof. Whitehead's theorem is an important special case of this theorem. The following definition of a Whitehead automorphism in $\AAG$ generalizes the definition of a Whitehead automorphism of a free group (see chapter I.4 of Lyndon and Schupp~\cite{ls}). \pagebreak[3] \begin{definition}\label{de:whauts} A \emph{Whitehead automorphism} is an element $\alpha\in\AAG$ of one of the following two types: \begin{description} \item[Type~(1):] $\alpha$ restricted to $X\cup X^{-1}$ is a permutation of $X\cup X^{-1}$, or \item[Type~(2):] there is an element $a\in X\cup X^{-1}$, called the \emph{multiplier} of $\alpha$, such that for each $x\in X$, the element $\alpha(x)$ is in $\{x,xa,a^{-1}x,a^{-1}xa\}$. \end{description} Let $\Omega$ be the set of all Whitehead automorphisms of $A_\Gamma$. \end{definition} For our purposes, it is more natural to single out the following two subsets of $\Omega$: \begin{definition} A Whitehead automorphism $\alpha\in\Omega$ is \emph{long-range} if $\alpha$ is of type~(1) or if $\alpha$ is of type~(2) with multiplier $a\in X\cup X^{-1}$ and $\alpha$ fixes the elements of $X$ adjacent to $a$ in $\Gamma$. Let $\Omega_\ell$ be the set of long-range elements of $\Omega$. A Whitehead automorphism $\alpha\in\Omega$ is \emph{short-range} if $\alpha$ is of type~(2) with multiplier $a\in X\cup X^{-1}$ and $\alpha$ fixes the elements of $X$ not adjacent to $a$ in $\Gamma$. Let $\Omega_s$ be the set of short-range elements of $\Omega$. \end{definition} It is easy to see that $\Omega$ is finite, and it is a consequence of the work of Laurence~\cite{la} (see Section~\ref{ss:Laurencesgenerators}) that $\OLS$ generates $\AAG$. The following theorem is the main result of the current paper: \begin{maintheorem}\label{mt:threeparts} The finite generating set $\OLS$ for $\AAG$ has the following properties: \begin{enumerate} \item \label{it:complements} each $\alpha\in\AAG$ can be written as $\alpha=\beta\gamma$ for some $\beta\in \langle\Omega_s\rangle$ and some $\gamma\in\langle\Omega_\ell\rangle$; \item \label{it:srinj} the usual representation $\AAG\to \Aut H_1(A_\Gamma)$ to the automorphism group of the abelianization $H_1(A_\Gamma)$ of $A_\Gamma$ restricts to an embedding $\langle\Omega_s\rangle\into \Aut H_1(A_\Gamma)$; and \item \label{it:lrwhalg} the subgroup $\langle\Omega_\ell\rangle$ has peak reduction by elements of $\Omega_\ell$ with respect to any $k$--tuple $W$ of conjugacy classes in $A_\Gamma$. \end{enumerate} \end{maintheorem} \newcommand\partref[1]{part~(\ref{#1}) of Theorem~\ref{mt:threeparts}} The proof of Theorem~\ref{mt:threeparts} is effective: there is an algorithm that splits an automorphism into its $\langle \Omega_\ell\rangle$ and $\langle \Omega_s\rangle$ parts and an algorithm that peak-reduces an element of $\langle \Omega_\ell\rangle$. Further, the theorem implies that we can analyze an element of $\langle \Omega_s\rangle$ by using row-reduction methods in $\Aut H_1(A_\Gamma)\colon\thinspaceng \GL(n,\Z)$. Note that if $A_\Gamma$ is a free group, $\Omega_\ell$ is Whitehead's generating set $\Omega$, our $\Omega_s$ contains only the identity, and Theorem~\ref{mt:threeparts} restricts to the peak-reduction theorem. For most right-angled Artin groups, $\langle \Omega_\ell\rangle$ is a proper subgroup of $\AAG$, and \partref{it:lrwhalg} is seemingly weaker than the peak-reduction theorem for free groups. In fact, we cannot hope for a straightforward generalization of peak reduction that applies to all of $\AAG$ for arbitrary $\Gamma$. We show the following in Section~\ref{se:nowhalg}: \begin{mainproposition}\label{mp:nowhalg} There is a graph $\Gamma$ such that for every finite generating set $S$ of $\Aut A_\Gamma$, there is a conjugacy class $w$ in $A_\Gamma$ and an automorphism $\alpha\in\Aut A_\Gamma$ such that $\alpha$ cannot be peak-reduced with respect to $w$ by members of $S$. \end{mainproposition} In spite of this disappointing fact, there are still special cases where peak-reduction works. As an example of such a special case, we prove the following: \begin{mainproposition}\label{mp:specialwhalg} Let $W=(w_1,\ldots,w_k)$ be a $k$--tuple of conjugacy classes such that for each $i$, $|w_i|=1$. If $\alpha\in\AAG$ and $|\alpha\cdot W|=|W|$, then $\alpha$ can be peak-reduced with respect to $W$ by elements of $\Omega_\ell\cup\Omega_s$. \end{mainproposition} As an application of Theorem~\ref{mt:threeparts}, we give a presentation for $\AAG$. In Section~\ref{ss:whgens}, we define a finite set $R$ of relations among the Whitehead automorphisms $\Omega$. These relations tell us when one element of $\Omega$ is the inverse of another, when one is a product of two others, when two elements of $\Omega$ commute, when an element of $\Omega$ is the commutator of two other elements of $\Omega$, and how type (1) Whitehead automorphisms interact with type~(2) Whitehead automorphisms. These relations are based on the relations McCool gives for the automorphism group of the free group in~\cite{mcpres}. \begin{maintheorem}\label{mt:pres} For any graph $\Gamma$, the group $\AAG$ is finitely presented. Specifically, we have $\AAG=\langle \Omega | R\rangle$. \end{maintheorem} Although Bux--Charney--Vogtmann~\cite{bcv} have shown that $\AAG$ is finitely presented when $\Gamma$ is a tree, our result is more explicit and holds for arbitrary $\Gamma$. The idea behind Theorem~\ref{mt:pres} is that we can use the methods of Theorem~\ref{mt:threeparts} to put any word in $\Omega$ representing the identity in $\AAG$ into a standard form. We also use the fact that the inner automorphism group of a right-angled Artin group is also a right-angled Artin group. \section{Generating sets for right-angled Artin groups}\label{se:background} \subsection{Combinatorial group theory of $A_\Gamma$} Let the set of letters $L$ be $X\cup X^{-1}$. For $x\in L$, let $\pg{x}\in X$, the \emph{vertex of $x$}, be the unique element of $X\cap \{x,x^{-1}\}$. We will use $\adj{x}{y}$ as a shorthand for $\adj{\pg{x}}{\pg{y}}$ and we will use $\stl{x}$ and $\lkl{x}$ as notation for $\st(\pg{x})\cup\st(\pg{x})^{-1}$ and $\lk(\pg{x})\cup\lk(\pg{x})^{-1}$ respectively. As usual, a word in $L$ represents an element in $A_\Gamma$. A \emph{cyclic word} is a string of elements of $L$ indexed cyclically (or alternatively, an equivalence class of words under cyclic permutation of the indices). Any two non-cyclic indexings of a cyclic word $w$ represent group elements that are conjugate to each other, so $w$ represents a well-defined conjugacy class. If $w$ is a cyclic word, we will use $[w]$ to denote the conjugacy class it represents. If $w$ is a non-cyclic word, we will sometimes use $[w]$ to denote the cyclic word or conjugacy class it represents. We will denote the length of a word or cyclic word $w$ by $|w|$. The length of a group element or conjugacy class is the minimum length of any of its representative words or cyclic words, respectively. A word or cyclic word $w$ on $L$ is \emph{graphically reduced} if it contains no subsegments of the form $aua^{-1}$, where $a\in L$ and $u$ is a word in $\lkl{a}$. The \emph{support} $\supp w$ of a word or cyclic word $w$ is the subset of $X$ consisting of all generators that appear (or whose inverses appear) in $w$, and the support $\supp W$ of a $k$-tuple $W=(w_1,\ldots,w_k)$ of conjugacy classes is $\bigcup_{i=1}^k\supp w_i$. According to Servatius (see~\cite{se}, Section I) any graphically reduced word can be transformed into any other graphically reduced representative of the same element by repeated application of commutation moves (replacing a subsegment $ab$ with $ba$ when $\adj{a}{b}$). The same is true for cyclic words and conjugacy classes. Therefore, we take the support $\supp w$ of a group element or conjugacy class to be the support of any graphically reduced representative. The number of instances of a given generator in a group element or conjugacy class can be defined in the same way. Servatius's centralizer theorem from~\cite{se}, Section III, finds all the centralizers of elements in $A_\Gamma$. We restate a special case here: \begin{theorem}[Special case of Servatius's centralizer theorem]\label{th:centralizer} For $x\in X$, the centralizer of $x$ in $A_\Gamma$ is $\langle\stl{x}\rangle$. \end{theorem} \subsection{Laurence's generators for $\AAG$}\label{ss:Laurencesgenerators} There is a reflexive, transitive, binary relation on $X$ called the \emph{domination relation}: say $x\geq y$ ($x$ dominates $y$) if $\lk(y)\subset \st(x)$. Domination is clearly reflexive and transitive. For $x, y\in L$, say $x\geq y$ if $\pg{x}\geq \pg{y}$. Write $x\sim y$ when $x\geq y$ and $y\geq x$; the relation $\sim$ is called the \emph{domination equivalence} relation. We will also consider the \emph{adjacent domination} relation, which holds for $x$ and $y$ if $\adj{x}{y}$ and $x\leq y$, and the \emph{non-adjacent domination} relation, which holds if $x\leq y$ and not $\adj{x}{y}$. Each of these relations has a corresponding equivalence relation. We say that $x$ strictly dominates $y$ if $x\geq y$ and $x\not\sim y$ (other authors have used ``strict domination" to refer to what we mean by ``non-adjacent domination"). The following classes of automorphisms were defined by Servatius in~\cite{se}, where he conjectured that they generate $\AAG$. \begin{definition} The Laurence--Servatius generators are the following four classes of automorphisms: {\bf Dominated Transvections:} For $x, y\in L$ with $x\geq y$ and $\pg{x}\neq \pg{y}$, the \emph{dominated transvection} (or simply \emph{transvection}) $\tau_{x,y}$ is the automorphism that sends \[y \mapsto yx\] and fixes all generators not equal to $\pg{y}$. {\bf Partial Conjugations:} For $x\in L$ and $Y$ a union of connected components of $\Gamma - \st(\pg{x})$, the \emph{partial conjugation} $c_{x,Y}$ is the automorphism that sends \[y \mapsto x^{-1}yx\quad\mbox{for $y\in Y$}\] and fixes all generators not in $Y$. {\bf Inversions:} For $x\in X$, the \emph{inversion} of $x$ is the automorphism that sends \[x \mapsto x^{-1}\] and fixes all other generators. {\bf Graphic Automorphisms:} For $\pi$ an automorphism of the graph $\Gamma$, the \emph{graphic automorphism} of $\pi$ is the automorphism that sends \[x \mapsto \pi(x)\] for each generator $x\in X$. \end{definition} It is a potential point of confusion that an ordinary conjugation automorphism is an example of a partial conjugation automorphism. The following is Theorem~6.9 of Laurence~\cite{la}. \begin{theorem}[Laurence]\label{th:lau} The group $\AAG$ is generated by the finite set consisting of all dominated transvections, partial conjugations, inversions and graphic automorphisms of $A_\Gamma$. \end{theorem} \subsection{Whitehead automorphisms for right-angled Artin groups}\label{ss:whgens} We start with some comments on the Whitehead automorphisms $\Omega$ defined the introduction. There is a special notation for type~(2) Whitehead automorphisms. Let $A\subset L$ and $a\in L$, such that $a\in A$ and $a^{-1}\notin A$. If it exists, the symbol $(A,a)$ denotes the Whitehead automorphism satisfying \[(A,a)(a)=a\] and for $x\in X-\pg{a}$: \[(A,a)(x)=\left\{\begin{array}{ll} x & \mbox{ if $x\notin A$ and $x^{-1}\notin A$}\\ xa & \mbox{ if $x\in A$ and $x^{-1}\notin A$}\\ a^{-1}x & \mbox{ if $x\notin A$ and $x^{-1}\in A$}\\ a^{-1}xa & \mbox{ if $x\in A$ and $x^{-1}\in A$} \end{array}\right.\] Say that $(A,a)$ is \emph{well defined} if the formula given above defines an automorphism of $A_\Gamma$. For $\alpha\in\Omega$ of type~(2), one can always find a multiplier $a\in L$ and a subset $A\subset L$ such that $\alpha=(A,a)$. There is a little ambiguity in choosing such a representation that comes from the following fact: if $a,b\in L$ with $\adj{a}{b}$, then $(\{a,b,b^{-1}\},a)$ is the trivial automorphism. Note that the set of type~(1) Whitehead automorphisms is the finite subgroup of $\AAG$ generated by the graphic automorphisms and inversions. \begin{claim} The set $\Omega$ of Whitehead automorphisms is a finite generating set for $\AAG$. \end{claim} \begin{proof} The set $\Omega$ contains the Laurence-Servatius generators from Theorem~\ref{th:lau}. There are only finitely many permutations of $L$ and finitely many subsets of $L$, so $\Omega$ is finite. \end{proof} \begin{lemma}\label{le:whdef} For $A\subset L$ with $a\in A$ and $a^{-1}\notin A$, the automorphism $(A,a)$ is well defined if and only if both of the following hold: \begin{enumerate} \item The set $X\cap A\cap A^{-1}-\lk(\pg{a})$ is a union of connected components of $\Gamma - \st(\pg{a})$. \item For each $x\in (A-A^{-1})$, we have $a\geq x$. \end{enumerate} Alternatively, $(A,a)$ is well defined if and only if for each $x\in A-\stl{a}$ with $a\not\geq x$, $(A,a)$ acts on the entire component of $\pg{x}$ in $\Gamma-\st(\pg{a})$ by conjugation. \end{lemma} \begin{proof} The alternate statement follows immediately from the first one. For the ``only if" direction of the first statement, note that if both conditions hold, one can write $(A,a)$ as a product of the Laurence-Servatius generators. For the other direction, assume either condition fails and $(A,a)$ defines an automorphism. One can then find elements $x,y\in X$ such that $[x,y]=1$, but $[(A,a)(x),(A,a)(y)]\neq 1$ by Theorem~\ref{th:centralizer}. This is a contradiction. \end{proof} \subsection{Relations among Whitehead automorphisms} In this section we define the set of relations $R$ in Theorem~\ref{mt:pres}. Note that we use function composition order and automorphisms act on the left. With sets, we use the notation $A+B$ for $A\cup B$ when $A\cap B=\emptyset$. Note the shorthands $A-a$ for $A-\{a\}$ and $A+a$ for $A+\{a\}$. Let $\Phi$ be the free group generated by the set $\Omega$. We understand the relation ``$w_1=w_2$" to correspond to $w_1w_2^{-1}\in\Phi$. Note that if $(A,a)\in\Omega$ with $B\subset \lk(\pg{a})$ and $(B\cup B^{-1})\cap A=\emptyset$, then $(A,a)$ and $(A+B+B^{-1},a)$ represent the same element of $\Omega$ and therefore the same element of $\Phi$. This is why we do not list ``$(A,a)=(A+B+B^{-1},a)$" in the relations below. \begin{definition}\label{de:identities} The relations of type (R1) are \begin{equation}\label{eq:R1}\tag{R1} (A,a)^{-1}=(A-a+a^{-1},a^{-1}) \end{equation} for $(A,a)\in \Omega$. The relations of type (R2) are \begin{equation}\label{eq:R2}\tag{R2} (A,a)(B,a)=(A\cup B,a) \end{equation} for $(A,a)$ and $(B,a)\in\Omega$ with $A\cap B=\{a\}$. The relations of type (R3) are \begin{equation}\label{eq:R3}\tag{R3} (B,b)(A,a)(B,b)^{-1}=(A,a) \end{equation} for $(A,a)$ and $(B,b)\in\Omega$ such that $a\notin B$, $b\notin A$, $a^{-1}\notin B$, $b^{-1}\notin A$, and at least one of (a) $A\cap B=\emptyset$ or (b) $b\in\lkl{a}$ holds. We refer to this relation as (R3a) if condition (a) holds and (R3b) if condition (b) holds. The relations of type (R4) are \begin{equation}\label{eq:R4}\tag{R4} (B,b)(A,a)(B,b)^{-1}=(A,a)(B-b+a,a) \end{equation} for $(A,a)$ and $(B,b)\in\Omega$ such that $a\notin B$, $b\notin A$, $a^{-1}\notin B$, $b^{-1}\in A$, and at least one of (a) $A\cap B=\emptyset$ or (b) $b\in\lkl{a}$ holds. We refer to this relation as (R4a) if condition (a) holds and (R4b) if condition (b) holds. The relations of type (R5) are \begin{equation}\label{eq:R5}\tag{R5} (A-a+a^{-1},b)(A,a)=(A-b+b^{-1},a)\sigma_{a,b} \end{equation} for $(A,a)\in \Omega$ and $b\in A$ with $b^{-1}\notin A$, $b\neq a$, and $b\sim a$, where $\sigma_{a,b}$ is the type~(1) Whitehead automorphism with $\sigma_{a,b}(a)=b^{-1}$, $\sigma_{a,b}(b)= a$ and which fixes the other generators. The relations of type (R6) are \begin{equation}\label{eq:R6}\tag{R6} \sigma(A,a)\sigma^{-1}=(\sigma(A),\sigma(a)) \end{equation} for $(A,a)\in\Omega$ of type~(2) and $\sigma\in\Omega$ of type~(1). The relations of type (R7) are the entire multiplication table of the type~(1) Whitehead automorphisms, which form a finite subgroup of $\AAG$. The relations of type (R8) are \begin{equation}\label{eq:R8}\tag{R8} (A,a)=(L-a^{-1},a)(L-A,a^{-1}) \end{equation} for $(A,a)\in \Omega$. The relations of type (R9) are \begin{equation}\label{eq:R9}\tag{R9} (A,a)(L-b^{-1},b)(A,a)^{-1}=(L-b^{-1},b) \end{equation} for $(A,a)\in\Omega$ and $b\in L$ with $b,b^{-1}\notin A$. The relations of type (R10) are \begin{equation}\label{eq:R10}\tag{R10} (A,a)(L-b^{-1},b)(A,a)^{-1}=(L-a^{-1},a)(L-b^{-1},b) \end{equation} for $(A,a)\in\Omega$ and $b\in L$ with $b\in A$ and $b^{-1}\notin A$. Let $R$ be the set of elements of $\Phi$ corresponding to all relations of the forms (R1), (R2), (R3a), (R3b), (R4a), (R4b), (R5), (R6), (R7), (R9) and (R10). \end{definition} This is the same $R$ as in Theorem~\ref{mt:pres}, so we will show in Section~\ref{se:pres} that $\AAG=\langle \Omega | R\rangle$. Note that $R$ is a finite set. Relations (R1), (R2), (R3a), (R4a), and (R5)-(R10) appear for the automorphism group of the free group in McCool~\cite{mcpres} (McCool uses reverse composition order for his statements). We have renamed McCool's (R3) as (R3a) and (R4) as (R4a). Note that in Lyndon--Schupp~\cite{ls}, these relations also appear, but (R7) is unnamed, and (R8)-(R10) are relabeled as (R7)-(R9). The relations (R3b) and (R4b) are new here. Our statement of (R4a) varies from McCool's by an application of (R2); this allows us to give (R4b) as a relation of the same form. Our statement of (R10) varies from McCool's by applications of (R1) and (R2), and our statement of (R9) varies from McCool's by an application of (R1). This restatement should make it easier to see what relations (R9) and (R10) do. \begin{remark} For $a\in L$, the automorphism $(L-a^{-1},a)$ is the inner automorphism given by conjugating by $a$. Relation (R8) states that $(A,a)$ and $(L-A,a^{-1})$ represent the same element of $\OAG$. Relations (R9) and (R10) are cases of the following familiar fact about groups: if for $g$ in a group $G$, $C_g$ denotes conjugation by $g$, then for any $\phi\in\Aut G$, we have $\phi C_g\phi^{-1}=C_{\phi(g)}$. In the case of the free group, Relations~(R8), (R9), and (R10) follow from relations (R1)-(R7). However, in the case of a general right-angled Artin group, this is only true of Relation~(R8), which follows immediately from Relations~(R1) and (R2). This is why we leave relations of type (R8) out of $R$. However, we leave Relation~(R8) in the list of relations for convenience and to keep with McCool's numbering system. Relations~(R9) and~(R10) follow from the other relations only if conjugation automorphisms can be factored into products of dominated transvections; this is not possible for general $A_\Gamma$. \end{remark} \begin{proposition}\label{pr:identities} For each relation $w$ in any of the classes of relations (R1)-(R10), all the symbols appearing in $w$ denote well-defined Whitehead automorphisms. Furthermore, these relations are true identities in $\AAG$. \end{proposition} \begin{proof} If $w$ is a relation of type~(R3) or (R7), then it is vacuously true that all the terms appearing in $w$ are well defined (since the instances of these relations are indexed over well-defined terms). If $w$ is a relation of type~(R1), (R2), (R5) or (R6), then type~(2) Whitehead automorphisms in $w$ are clearly well defined by Lemma~\ref{le:whdef}. If $w$ is a relation of type~(R4), then since $b\notin A$ but $b^{-1}\in A$, we know $a\geq b$ (by Lemma~\ref{le:whdef}). Since $a\geq b$, every component of $\Gamma-\st(\pg{b})$ is a union of components of $\Gamma-\st(\pg{a})$ and elements of $\st(a)$. Then by Lemma~\ref{le:whdef}, $(B-b+a,a)$ is well defined. If $w$ is a relation of type~(R5), then we have $a\sim b$ with $a\neq b$, which implies that there is an automorphism $\pi$ of $\Gamma$ switching $\pg{a}$ and $\pg{b}$ but fixing the other vertices. Then $\sigma_{a,b}$ is the composition of the automorphism of $A_\Gamma$ induced from $\pi$ with the inversion of $b$. If $w$ is a relation of type~(R8), then $(L-A,a^{-1})$ is well defined by Lemma~\ref{le:whdef}. Since for any $c\in L$, $(L-c^{-1},c)$ represents conjugation by $c$, we know the terms in $w$ are well defined if $w$ is a relation of type~(R8), (R9), or (R10). Each identity can then be verified by computing actions on $X$. \end{proof} \begin{remark} At this point it is easy to see that $\Omega=\Omega^{-1}$. This is because of Equation~(\ref{eq:R1}) and the fact that the set of type~(1) Whitehead automorphisms is closed under taking inverses. \end{remark} \section{The structure of $\AAG$} \subsection{Sorting automorphisms by their scope}\label{ss:sorting} Using the special notation for type (2) Whitehead automorphisms, we can restate the definitions of $\Omega_s$ and $\Omega_\ell$ more succinctly. A Whitehead automorphism $\alpha$ is in $\Omega_\ell$ if it is of type~(1), or if $\alpha$ is of type (2) and we can write $\alpha=(A,a)$ for some $A$ with $A\cap\lkl{a}=\emptyset$. A Whitehead automorphism $\alpha$ is in $\Omega_s$ if $\alpha=(A,a)$ is of type (2) and $A\subset\stl{a}$. Whenever we declare an element $(A,a)\in\Omega_\ell$, we will assume that $A\cap\lkl{a}=\emptyset$. This is necessary since if $x\in\lkl{a}$, then $(A\cup\{x,x^{-1}\},a)=(A,a)$. The goal of this subsection is to prove \partref{it:complements}. We proceed by describing a series of identities that allow us to rewrite a product of a long-range and a short-range automorphism. We will then show that by a finite number of applications of these identities, we can express any automorphism as a product of a single element of $\langle \Omega_s\rangle$ and a single element of $\langle \Omega_\ell\rangle$. \begin{lemma}\label{le:l&s} Every Whitehead automorphism $\alpha\in\Omega$ has a unique decomposition as a product $\alpha=\alpha_s\alpha_\ell$, where $\alpha_s\in\Omega_s$ and $\alpha_\ell\in\Omega_\ell$. Furthermore, $\AAG$ is generated by $\OLS$. \end{lemma} \begin{proof} If $\alpha$ is of type~(1), then $\alpha_s=1$ and $\alpha_\ell=\alpha$. So assume $\alpha=(A,a)$. Set $A_1= A\cap\stl{a}$ and set $A_2=A-\lkl{a}$. By Lemma~\ref{le:whdef}, both $(A_1,a)$ and $(A_2,a)$ are well defined. So set $\alpha_s=(A_1,a)$ and set $\alpha_\ell=(A_2,a)$. By Equation~(\ref{eq:R2}), we have $\alpha=\alpha_s\alpha_\ell$. This decomposition is unique since $\Omega_s\cap\Omega_\ell=\{1\}$. Of course, this means that $\Omega\subset\langle\OLS\rangle$. Then since $\Omega$ generates $\AAG$, the set $\OLS$ also generates $\AAG$. \end{proof} We call $\alpha_s$ the \emph{short-range part} of $\alpha$ and $\alpha_\ell$ the \emph{long-range part} of $\alpha$. Let $\ell\colon\thinspace\Omega\to\Omega_\ell$ be given by $\ell(\alpha)=\alpha_\ell$ and let $s\colon\thinspace\Omega\to\Omega_s$ be given by $s(\alpha)=\alpha_s$. \begin{definition}\label{de:sort} Suppose $\alpha\in\Omega_\ell$ and $\beta=(B,b)\in\Omega_s$. Of course, we may assume that for $x\in\lkl{b}$, not both $x$ and $x^{-1}$ are in $B$. Let the \emph{sorting substitution} of $\alpha\beta$ be the word in $\Omega$ defined as follows. If $\alpha$ is given by a permutation of $L$, then $(\alpha(B),\alpha(b))\in \Omega_s$, and the substitution is: \begin{equation}\label{eq:permsubs} \alpha\beta\mapsto (\alpha(B),\alpha(b))\alpha \end{equation} Now suppose $\alpha=(A,a)$. If $\pg{a}=\pg{b}$, then the substitution is given by: \begin{equation}\label{eq:vertsubs} \alpha\beta\mapsto \beta\alpha \end{equation} If $a\in\lkl{b}$, then we know $b,b^{-1}\notin A$. As we assumed earlier, not both $a\in B$ and $a^{-1}\in B$. The substitution is given by: \begin{equation}\label{eq:adjsubs} \alpha\beta\mapsto\left\{\begin{array}{ll} \beta\alpha & \mbox{if $a\notin B$, $a^{-1}\notin B$}\\ \beta s((A-a+b,b))\ell((A-a+b,b))\alpha & \mbox{if $a\notin B$, $a^{-1}\in B$}\\ \beta s((A-a+b^{-1},b^{-1}))\ell((A-a+b^{-1},b^{-1}))\alpha & \mbox{if $a\in B$, $a^{-1}\notin B$} \end{array}\right. \end{equation} If $a\notin\stl{b}$, the substitution is given by: \begin{equation}\label{eq:farsubs} \alpha\beta\mapsto \beta\alpha \end{equation} \end{definition} \begin{sublemma}\label{sl:domadjdom} Suppose $b\in L$, $c\in\lkl{b}$ and $b\geq c$. If $a\in L$ and $a\geq b$, then $b\in\stl{a}$. \end{sublemma} \begin{proof} By transitivity, $a\geq c$. Then by the definition of domination, $b\in\stl{a}$. \end{proof} \begin{lemma}\label{le:validsubs} All of the elements substituted for $\alpha\beta$ in Definition~\ref{de:sort} are equal to $\alpha\beta$ in $\AAG$. \end{lemma} \begin{proof} Substitution~(\ref{eq:permsubs}) is valid by Equation~(\ref{eq:R6}). Substitution~(\ref{eq:vertsubs}) is valid by Equation~(\ref{eq:R2}) (if $a=b$, then we know $A\cap B=\{a\}$ and therefore $\alpha\beta=(A\cup B,a)=\beta\alpha$) and Equation~(\ref{eq:R1}) (if $a^{-1}=b$, then $\alpha^{-1}=(L-A-\lkl{a},a^{-1})$ and Equation~(\ref{eq:R2}) implies $\alpha^{-1}$ and $\beta$ commute). The first substitution in Equation~(\ref{eq:adjsubs}) is valid by Equation~(\ref{eq:R3}b). The second substitution in Equation~(\ref{eq:adjsubs}) is valid by Equation~(\ref{eq:R4}b) (and Equation~(\ref{eq:R2}) to split $(A-a+b,b)$ into long-range and short-range parts). To get the third substitution in Equation~(\ref{eq:adjsubs}), note that since $\beta\in\Omega_s$, we have $\beta=(\stl{b}-B,b^{-1})$. Then the third substitution is simply the second substitution, after representing $\beta$ in a different way. Now suppose $a\notin\stl{b}$. By Sublemma~\ref{sl:domadjdom} (assuming $\beta$ is nontrivial), we know that $a\not\geq b$. Then if $b\notin A$, each element of the component of $\pg{b}$ in $\Gamma-\st(\pg{a})$ is fixed by $\alpha$. Then $A\cap B=\emptyset$ since $\beta\in\Omega_s$. So if $b\notin A$, then Equation~(\ref{eq:farsubs}) is valid by Equation~(\ref{eq:R3}). If $b\in A$, we apply Equation~(\ref{eq:R8}) to replace $(A,a)$ by $(L-a^{-1},a)(L-A,a^{-1})$. Then $b\notin L-A$, so $(L-A,a^{-1})$ commutes with $\beta$ by Equation~(\ref{eq:R3}), and $(L-a^{-1},a)$ commutes with $\beta$ by Equation~(\ref{eq:R9}) ($a,a^{-1}\notin B$ since $\beta\in\Omega_s$). After commuting both automorphisms past $\beta$, we recombine them by Equation~(\ref{eq:R8}). \end{proof} \begin{remark} Equation~(\ref{eq:adjsubs}) indicates that there are many examples of graphs $\Gamma$ such that neither $\langle\Omega_\ell\rangle$ nor $\langle\Omega_s\rangle$ is a normal subgroup of $\AAG$. \end{remark} \begin{lemma}\label{le:1sr} Suppose we have $(A,a)\in \Omega_\ell$. Suppose $w$ is a product (in any order) of long-range automorphisms of the form $\ell((A-a+x,x))$ for $x\in\stl{a}$ with $x\geq a$, together with short-range automorphisms with multiplier $b^{\pm1}$. Then we can apply sorting substitutions to rewrite $w$ as a word $w'$ satisfying the same hypotheses as $w$ (for the same $(A,a)$), and such that all the short-range automorphisms in $w'$ appear to the left of any long-range automorphisms in $w'$. \end{lemma} \begin{proof} We argue by induction on the number $k$ of long-range automorphisms in $w$ with multipliers other than $b^{\pm1}$. It is clear that by applying Substitution~(\ref{eq:vertsubs}), we can move a short-range automorphism appearing in the word to the left across a long-range automorphism with multiplier $b^{\pm1}$. In the base case $k=0$, we only need to move short-range automorphisms across long-range automorphisms with multiplier $b^{\pm1}$, so we are done. Now suppose $k>0$. We break up $w$ as $w_1w_2$, where there is only one long-range automorphism with multiplier not equal to $b^{\pm1}$ in $w_2$, say $\ell((A-a+x,x))$. If $x\notin\stl{b}$, then by applying Substitution~(\ref{eq:farsubs}) (and possibly Substitution~(\ref{eq:vertsubs})), we can move all the short-range automorphisms in $w_2$ to the left across $\ell((A-a+x,x))$. If $x\in\lkl{b}$, then by applying Substitution~(\ref{eq:adjsubs}), we can move any short-range automorphism to the left across $\ell((A-a+x,x))$. In doing so, we may introduce a new short-range automorphism with multiplier $b^{\pm1}$ to the left of $\ell((A-a+x,x))$, as well as a new long-range automorphism $\ell((A-a+y,y))$ where $y=b^{\pm1}$. It is then clear that by applying Substitutions~(\ref{eq:adjsubs}) and~(\ref{eq:vertsubs}), we can move all the short-range automorphisms to the left across $\ell((A-a+x,x))$. In either case, we can rewrite $w_2$ as a word $w_3v$ satisfying the hypotheses of the lemma, where $w_3$ contains no long-range elements with multipliers other than $b^{\pm1}$ and $v$ contains no short-range elements. Then by induction, $w_1w_3$ can be rewritten as a word $u$ satisfying the conclusions of the lemma, and we have rewritten $w$ as a word $uv$ satisfying the conclusions of the lemma. \end{proof} \begin{lemma}\label{le:1lr} Suppose $\alpha=(A,a)\in\Omega_\ell$ and $\beta_1,\ldots,\beta_k\in \Omega_s$. Then we can apply finitely many sorting substitutions to the word $w_0=\alpha\beta_k\cdots\beta_1$ to get a word where all of the long-range elements are of the form $\ell((A-a+x,x))$ for various $x\in \stl{a}$ with $x\geq a$, and all the short-range elements are to the left of any long-range elements. \end{lemma} \begin{proof} We prove the lemma by induction on $k$. If $k=0$, it is true. Now assume the lemma holds for $\alpha\beta_k\cdots\beta_2$. Then we rewrite $w_0$ as $u_1\delta_m\cdots\delta_1\beta_1$, where $u_1$ is a word in $\Omega_s$ and $\delta_i=\ell((A-a+x_i,x_i))$ for some $x_1,\ldots,x_m\in \stl{a}$ with $x_i\geq a$. We apply Lemma~\ref{le:1sr} to the subsequence $\delta_m\cdots\delta_1\beta_1$ and rewrite it as $u_2v$, where $u_2$ is a word in $\Omega_s$ and $v$ is a product of automorphisms of the form $\ell((A-a+x,x))$ for various $x\geq a$. Then we have rewritten $w_0$ as $u_1u_2v$, which is in the desired form. \end{proof} \begin{proof}[Proof of \partref{it:complements}] We induct on the length of the word $w$ in $\OLS$ that we wish to rewrite. If $|w|\leq1$, we are done. Now assume the theorem is true for words of length $|w|-1$. Let $w'$ be a word in $\OLS$ and let $\alpha\in\OLS$ such that $w=\alpha w'$ is a reduced factorization. Then the theorem applies to $w'$, so $w'=w_sw_\ell$ where $w_s$ is a word in $\Omega_s$ and $w_\ell$ is a word in $\Omega_\ell$. If $\alpha\in\Omega_s$, we are done, so assume $\alpha\in\Omega_\ell$. If $\alpha$ is induced by a permutation of $L$, we can move it across $w_s$ by $|w_s|$ applications of Substitution~(\ref{eq:permsubs}) and we are done. Otherwise, $\alpha=(A,a)$, and we apply Lemma~\ref{le:1lr} to rewrite $\alpha w_s$ as $w'_sw'_\ell$, with $w'_s$ a word in $\Omega_s$ and $w'_\ell$ a word in $\Omega_\ell$. Then $w=w'_sw'_\ell w_\ell$ and we are done. \end{proof} \subsection{The homology representation and short-range automorphisms} Let $H_\Gamma$ denote the abelianization $H_1(A_\Gamma)\colon\thinspaceng \Z^n$ of $A_\Gamma$. Since the commutator subgroup of $A_\Gamma$ is a characteristic subgroup of $A_\Gamma$, every automorphism of $A_\Gamma$ induces an automorphism of $H_\Gamma$. This defines a map $\AAG\to\Aut H_\Gamma\colon\thinspaceng \GL(n,\Z)$, which we call the \emph{homology representation}. For $\alpha\in\AAG$, we denote its image under the homology representation by $\alpha_*\in\Aut H_\Gamma$. In this section, we prove \partref{it:srinj}, and we examine the structure of $\langle \Omega_s\rangle$. \begin{lemma}\label{le:srsupp} Let $\beta\in\langle\Omega_s\rangle$ and let $c\in X$. Then $\supp \beta(c)$ is a clique contained in $\st(c)$. \end{lemma} \begin{proof} We proceed by induction on the length of $\beta$ as a product of members of $\Omega_s$. If $|\beta|=0$, the lemma holds. Now suppose that $w$ is a word such that $\supp w$ is a clique contained in $\st(c)$ and that $(B,b)\in\Omega_s$. Of course, we may assume that $B\cap B^{-1}=\emptyset$. If $(B-\{b\})\cap (\supp w)^{\pm1}=\emptyset$, then $\beta(w)=w$. So suppose we have $a\in (B-\{b\})\cap (\supp w)^{\pm1}$. Then $a\in\stl{c}$ and by Lemma~\ref{le:whdef}, $b\geq a$. Since $\beta\in\Omega_s$, $b$ is adjacent to $a$, and since $\supp w$ is a clique, it follows from the definition of domination that $b$ is adjacent to every other member of $\supp w$. Since $\supp (B,b)(w)\subset \{\pg{b}\}\cup\supp w$, this proves the lemma. \end{proof} \begin{proof}[Proof of \partref{it:srinj}] Suppose $\beta\in\langle\Omega_s\rangle$ and $\beta_*\in \Aut H_\Gamma$ is trivial. Then for any $a\in X$, it follows from Lemma~\ref{le:srsupp} that we can commute the elements of $\supp\beta(a)$ past each other. But since $\beta_*$ is trivial, the sum exponent of $a$ in $\beta(a)$ is $1$, and the sum exponent of any other $x$ in $\beta(a)$ is $0$. So $\beta(a)=a$ for any $a$, and $\beta$ is trivial. \end{proof} Now we examine the structure of $\langle \Omega_s\rangle$. The argument below tells us the structure of the images of many subgroups of $\AAG$ under the homology representation, so we phrase it in greater generality. In particular, we prove an intermediate result that is quoted in the sequel to the current paper~\cite{ssraag}. Let $\leq'$ be a transitive, reflexive relation on $X$, such that $a\leq'b$ implies $a\leq b$ for $a,b\in X$ (for example, the adjacent domination relation). Write $a\sim' b$ when $a\leq' b$ and $b\leq' a$; then $\sim'$ is an equivalence relation on $X$. Let $G<\Aut H_\Gamma$ be generated by $\{(\tau_{a,b})_*|\text{$a\geq'b$}\}$. Let $C_1\cup \cdots\cup C_m = X$ be the $\sim'$--classes of $X$. Let $N=\langle\{(\tau_{a,b})_*|\text{$a,b\in X$, $a\geq' b$ and $a\not\sim' b$}\}\rangle$ and for each $i=1,\ldots,m$, let $G_i=\langle\{(\tau_{a,b})_*|\text{$a,b\in C_i$}\}\rangle$. \begin{proposition} \label{pr:homrepimstruct} The group $N$ is nilpotent, each $G_i\colon\thinspaceng \SL(|C_i|,\Z)$, and the inclusion maps of $N$ and the $G_i$ into $G$ give the decomposition: \begin{equation}\label{eq:transstruct}G\colon\thinspaceng \left(G_1\times \cdots\times G_m\right)\ltimes N\end{equation} \end{proposition} \begin{proof} We can pick an indexing $X=\{x_1,\ldots,x_n\}$ such that if $x_i\geq' x_j$ with $x_i\not\sim' x_j$, then $j> i$. Taking the image of $X$ in $H_\Gamma$ to be an ordered basis under this indexing, the homology representation takes the transvection $\tau_{x_i,x_j}$ to the elementary matrix $E_{i,j}$. Then the group $N$ is then a group of upper-triangular unipotent matrices and is therefore nilpotent. We make a further demand on our indexing of $X$: if $i<j<k$ and $x_i\sim' x_k$, then $x_j\sim' x_i$. Under such an indexing, the elements of $C_i$ are an unbroken string of elements of $X$, so say $C_i=\{x_{r_i},\ldots,x_{s_i}\}$. So $G_i$ is generated by the elementary matrices $E_{j,k}$ with $r_i\leq j,k\leq s_i$; in particular, it is an embedded copy of $\SL(|C_i|,\Z)$. The generators of $G_i$ and $G_j$ commute for $i\neq j$ by Equation~(\ref{eq:R3}), so the subgroup generated by the $\{G_i\}_i$ is a direct product. It is obvious that $N$ and $G_1,\ldots,G_m$ generate $G$. Now suppose that $(\tau_{a,b})_*$ is a generator of $N$ and $(\tau_{c,d})_*$ is a generator of one of the $G_i$. Then $(\tau_{c,d})_*^{-1}(\tau_{a,b})_*(\tau_{c,d})_*$ is an element of $N$; if $d\neq b$, this follows from Equations~(\ref{eq:R3}) and~(\ref{eq:R4}), and if $d=b$, then this element is $(\tau_{a,b})_*$ since we are working in $H_\Gamma$. Since $N$ is normal, we get the decomposition of $G$ in Equation~(\ref{eq:transstruct}). \end{proof} \begin{proposition}\label{pr:linpres} The group $G$ has a presentation in which the generators $S_G$ are the row operations $E_{a,b}=(\tau_{a,b})_*$ for $a, b\in X$ with $a\geq'b$, and with the relations $R_G$ being all relations among the $S_G$ of the following forms: \begin{enumerate} \item\label{it:r1} $[E_{a,b},E_{c,d}]=1$ if $b\neq c$ and $a\neq d$, \item\label{it:r2} $[E_{a,b},E_{b,d}]E_{a,d}^{-1}=1$ if $a\neq d$, \item\label{it:r3} $(E_{a,b}E_{b,a}^{-1}E_{a,b})^4=1$, if $a\sim' b$ and $a\neq b$, \item\label{it:r4} $(E_{a,b}E_{b,a}^{-1}E_{a,b})^2(E_{a,b}E_{b,a}^{-1}E_{a,b}E_{b,a})^{-3}=1$, if $a,b\in C_i$, $a\neq b$, for some $i$ with $|C_i|=2$. \end{enumerate} \end{proposition} \begin{proof} By Proposition~\ref{pr:homrepimstruct}, each $G_i\colon\thinspaceng\SL(|C_i|,\Z)$ and $N$ is a nilpotent group. For each $i$, it follows from classical presentations for $\SL(n,\Z)$ that $G_i$ has a presentation with generators $S_G\cap G_i$ and whose relations are those elements of $R_G$ only involving the generators in $S_G\cap G_i$ (see Corollary 10.3 of Milnor~\cite{mil} for $n\geq 3$ and Example~4.2(c) of Section~I.4 of Serre~\cite{sertree} for $n=2$). Since $N$ is a unipotent matrix group and $S_G\cap N$ is a generating set for $N$ that is closed under taking commutators, we know that $N$ has a presentation with generators $S_G\cap N$ and whose relations are those elements of $R_G$ only involving generators in $S_G\cap N$. Relation~(\ref{it:r1}) implies that the group generated by $S_G\cap (G_1\cup\cdots\cup G_m)$ subject to these relations is isomorphic to the product $G_1\times\cdots \times G_m$, and Relation~(\ref{it:r1}) and Relation~(\ref{it:r2}) encode the semi-direct product action of $G_1\times\cdots\times G_m$ on $N$, so that the group $\langle S_G|R_G\rangle\colon\thinspaceng G$. \end{proof} \begin{corollary}\label{co:tvgenstruct} Suppose $\tilde G$ is a subgroup of $\AAG$ generated by a set of dominated transvections. Then there is a relation $\leq'$ such that the image of $\tilde G$ under the homology representation is the group $G$ generated by $\{(\tau_{a,b})_*|\text{$a\geq'b$}\}$ and the conclusions of Proposition~\ref{pr:homrepimstruct} and Proposition~\ref{pr:linpres} hold for $G$. In particular, the group $\langle \Omega_s\rangle$ has a decomposition of the form in Equation~(\ref{eq:transstruct}) and a presentation of the form given in Proposition~\ref{pr:linpres}. \end{corollary} \begin{proof} Let $S$ be a set of dominated transvections such that $\tilde G=\langle S\rangle$. Let $\leq'$ be the reflexive relation that is the transitive closure of $\leq''$, where $a\geq''b$ whenever $\tau_{a,b}\in S$. Then $\tilde G$ is generated by $\{\tau_{a,b}|\text{$a\geq'b$}\}$, and its image under the homology representation is the group $G$ generated by $\{(\tau_{a,b})_*|\text{$a\geq'b$}\}$. Then we can apply Proposition~\ref{pr:homrepimstruct} and Proposition~\ref{pr:linpres}. If $S=\Omega_s$, then by \partref{it:srinj}, the homology representation restricted to $\langle \Omega_s\rangle=\tilde G$ maps isomorphically to $G$. So the conclusions of Proposition~\ref{pr:homrepimstruct} and Proposition~\ref{pr:linpres} apply to $\langle \Omega_s\rangle$, where $\leq'$ is adjacent domination. \end{proof} \subsection{Peak-reducing products of long-range automorphisms} The goal of this subsection is to prove \partref{it:lrwhalg}. Our proof is similar to the proof of the peak reduction theorem for free groups given in Higgins--Lyndon~\cite{hl}. Let $k\geq 1$. For a $k$--tuple $W=(w_1,\ldots,w_k)$ of cyclic words, we denote the $k$--tuple of conjugacy classes by $[W]=([w_1],\ldots,[w_k])$. We proceed by putting the definition of peak reduction from the introduction in context. \begin{definition} Suppose $\alpha,\beta \in\Omega$ and $[W]$ is a $k$--tuple of conjugacy classes in $A_\Gamma$. Then $\beta\alpha$, the word of length $2$, is called a \emph{peak} with respect to $[W]$ if \[|\alpha\cdot [W]|\geq |[W]|\] \[|\alpha\cdot [W]|\geq |\beta\alpha\cdot [W]|\] and at least one of these inequalities is strict. Suppose $\gamma\in\AAG$ and we have a factorization $\gamma=\alpha_k\cdots\alpha_1$ with $\alpha_1,\ldots,\alpha_k$ in $\Omega$. We say $\alpha_i$ is a \emph{peak} of this factorization, with respect to $[W]$, if $1<i<k$ and $\alpha_{i+1}\alpha_i$ is a peak with respect to $(\alpha_{i-1}\cdots\alpha_1)\cdot [W]$. The $\emph{height}$ of a peak $\alpha_i$ is simply $|(\alpha_i\cdots\alpha_1)\cdot [W]|$. \end{definition} Then the factorization $\gamma=\alpha_k\cdots\alpha_1$ is peak-reduced with respect to $[W]$ (as defined in the introduction) if and only if it has no peaks with respect to $[W]$. It is important to note that for general right-angled Artin groups, the automorphism group $\AAG$ does not act on the set of graphically reduced words; rather it only acts on the set of group elements. This means we need to take care to distinguish words from the elements they represent. These measures were unnecessary in the original proof for free groups, since for a free group the set of reduced words is the set of group elements. \begin{definition}\label{de:nicerep} If $(A,a)\in\Omega_\ell$ and $w$ is a graphically reduced cyclic word, define the \emph{obvious representative} of $(A,a)([w])$ based on $w$ to be the cyclic word $w'$ gotten from $w$ by the following replacements: \begin{itemize} \item for every subsegment of $w$ of the form $buc^{-1}$ or $cub^{-1}$, with $u$ any word in $\lkl{a}$, $b\in A-a$ and $c\in L-A-\lkl{a}$, replace this subsegment with $bauc^{-1}$ or $cua^{-1}b^{-1}$ respectively in $w'$, and \item for every subsegment of $w$ of the form $bua^{-1}$ or $aub^{-1}$, with $u$ any word in $\lkl{a}$ and $b\in A-a$, replace this subsegment with $bu$ or $ub^{-1}$ respectively in $w'$. \end{itemize} The \emph{obvious representative} of $(A,a)\cdot [W]$ based on $W$ is the $k$--tuple $(w'_1,\ldots,w'_k)$, where each $w'_i$ is the obvious representative of $(A,a)([w_i])$ based on $w_i$. \end{definition} \begin{claim}The obvious representative $w'$ of $(A,a)([w])$ based on $w$ is a graphically reduced representative of $(A,a)([w])$. \end{claim} \begin{proof} First we show that $w'$ is graphically reduced. Note that we have only added or removed instances of $a^{\pm1}$. Since $w$ is graphically reduced, $w'$ can only fail to be graphically reduced on subsegments where we added or removed instances of $a^{\pm1}$. Those replacements that introduce an instance of $a^{\pm1}$ introduce it in a way where it cannot cancel ($a$ does not commute with $b\in A-a$ or with $c\in L-A-\lkl{a}$). Suppose a replacement that removes an instance of $a^{\pm1}$ results in $w'$ not being graphically reduced. Then we have a subsegment $dvd^{-1}$ of $w$ being replaced by $dv'd^{-1}$ in $w'$, where $d\in L-\stl{a}$, $v'$ is a word in $\lkl{d}$, and $v$ is $v'$ with some instances $a$ or $a^{-1}$ inserted. Then $dvd^{-1}$ contains an instance of some $b\in (A-a)^{\pm1}$. If $a\not\geq b$, then $\pg{b}$ is in a component of $\Gamma-\st(\pg{a})$ that is conjugated by $(A,a)$. It follows that for each $x$ in $\supp dv'd^{-1}$, either $x\in\stl{a}$ or $\pg{x}$ is in the same component of $\Gamma-\st(\pg{a})$ as $\pg{b}$. Then $v=v'$, a contradiction. So suppose $a\geq b$. If $\pg{b}\neq\pg{d}$, then $d$ commutes with $a$, a contradiction. If $\pg{b}=\pg{d}$, then $\supp v\subset\st(\pg{a})$, and our substitutions never would have removed instances of $a^{\pm1}$ from $v$. So $w'$ is graphically reduced. Observe that $w'$ represents $(A,a)([w])$: because $(A,a)$ is long-range, it has no effect on the subsegments of $w$ that are words in $\lkl{a}$, and the substitutions in the definition of $w'$ capture all those changes that $(A,a)$ makes to $w$ that do not cancel each other out. \end{proof} \begin{definition}\label{de:adjco} Let $w$ be a graphically reduced cyclic word and let $a\in L$. Then for $b,c\in L-\lkl{a}$, we define the \emph{adjacency counter} of $w$ relative to $a$, written as $\aco{b}{c}{w,a}$, to be the number of subsegments of $w$ of the form $(buc^{-1})^{\pm1}$, where $u$ is any (possibly empty) word in $\lkl{a}$. For a $k$--tuple of graphically reduced cyclic words $W=(w_1,\ldots,w_k)$, define the adjacency counter of $W$ relative to $a$ as: \[\aco{b}{c}{W,a}=\sum_{i=1}^k\aco{b}{c}{w_i,a}\] For $B,C\subset L$, we define: \[\aco{B}{C}{W,a}=\sum_{b\in (B-\lkl{a})}\sum_{c\in (C-\lkl{a})}\aco{b}{c}{W,a}\] For $\alpha\in\AAG$, we define: \[D_{[W]}(\alpha)=|\alpha\cdot [W]|-|[W]|\] When $W$ is clear, we leave it out, writing $\aco{B}{C}{a}$ and $D(\alpha)$. \end{definition} With $W$ and $a$ as above, note that for any $B,C\subset L$, the number $\aco{B}{C}{a}\geq 0$. Further, we have $\aco{B}{C}{a}=\aco{C}{B}{a}$. If $D\subset L$ with $D\cap C=\emptyset$, then we have: \[\aco{B}{C+D}{a}=\aco{B}{C}{a}+\aco{B}{D}{a}\] Also note that $\aco{a}{a}{a}=0$ (since each $w_i$ is graphically reduced). \begin{lemma}\label{le:obvadj} If $W$ is a $k$--tuple of graphically reduced cyclic words, $(A,a)\in\Omega_\ell$, and $W'$ is the obvious representative of $(A,a)\cdot [W]$, then: \[D_{[W]}((A,a))=|W'|-|W|=\aco{A-a}{L-A}{W,a}-\aco{a}{A-a}{W,a}\] \end{lemma} \begin{proof} This is immediate from counting the letters removed and added in the definition of $W'$. \end{proof} Note that if $W'$ and $W$ are different $k$-tuples of graphically reduced cyclic words representing the same $k$-tuple of conjugacy classes, we may have different adjacency counters with respect to $W$ and $W'$. However, the function $D_{[W]}$ depends only on $[W]$. \begin{lemma}\label{le:dico} Let $W$ be a $k$--tuple of graphically reduced cyclic words. If $(A,a)\in\Omega_\ell$, then \[D_{[W]}((A,a))=\aco{A}{L-A}{W,a}-\aco{a}{L}{W,a}\] \end{lemma} \begin{proof} From Lemma~\ref{le:obvadj}: \[ \begin{split} D((A,a))&=\aco{A-a}{L-A}{a}-\aco{a}{A-a}{a}\\ &=\aco{A}{L-A}{a} -(\aco{a}{L-A}{a}+\aco{a}{A-a}{a}+\aco{a}{a}{a})\\ & =\aco{A}{L-A}{a}-\aco{a}{L}{a} \end{split} \] \end{proof} The following lemma is the machine that makes peak reduction possible. This is an extension of a parallel lemma for free groups that appears in Higgins--Lyndon~\cite{hl}. \begin{lemma}\label{le:machine} Suppose $\alpha,\beta\in \Omega_\ell$ and $[W]$ is a $k$--tuple of conjugacy classes of $A_\Gamma$. If $\beta\alpha^{-1}$ forms a peak with respect to $[W]$, there exist $\delta_1,\ldots,\delta_k\in \Omega_\ell$ such that $\beta\alpha^{-1}=\delta_k\cdots\delta_1$ and for each $i,1\leq i< k$, we have: \[|(\delta_i\cdots\delta_1)\cdot[W]|<|\alpha^{-1}\cdot [W]|\] \end{lemma} A factorization of $\beta\alpha^{-1}$ is \emph{peak-lowering} if it satisfies the conclusions of the lemma, so Lemma~\ref{le:machine} states that every peak has a peak-lowering factorization. Such a factorization might not be peak-reduced, but the height of its highest peak is lower than the height of the peak in $\beta\alpha^{-1}$. We postpone the proof of Lemma~\ref{le:machine} to show how it implies \partref{it:lrwhalg}. \begin{proof}[Proof of \partref{it:lrwhalg}] Let $\gamma\in\langle\Omega_\ell\rangle$ and write $\gamma=\alpha_k\cdots\alpha_1$ with $\alpha_1,\ldots\alpha_k\in\Omega_\ell$. Let $h$ be \[h=\sup \left\{|(\alpha_i\cdots\alpha_1)\cdot [W]|\big|\mbox{$\alpha_i$ is a peak} \right\}\] which is the height of the highest peak in the factorization, and let $m$ be the number of maximal-height steps between peaks: \[m=\Big|\left\{i\big|h=|(\alpha_i\cdots\alpha_1)\cdot [W]|\mbox{ and $\alpha_i$ is a peak or between two peaks}\right\}\Big|\] If the factorization is not peak-reduced, then there is a peak $\alpha_i$ of maximal height. Apply Lemma~\ref{le:machine} to the peak $\alpha_{i+1}\alpha_i$ with respect to $(\alpha_{i-1}\cdots\gamma_1)\cdot W$ to get \[\alpha_{i+1}\alpha_i=\delta_j\cdots\delta_1\] satisfying the conclusions of the lemma, and therefore a new factorization of $\gamma$: \[\alpha_k\cdots\alpha_{i+2}\delta_j\cdots\delta_1\alpha_{i-1}\cdots\alpha_1\] If $m$ of the old factorization was not 1, then $m$ of the new factorization is one less. If $m$ of the old factorization was 1, then $h$ of the new factorization is strictly lower than $h$ of the old factorization. By repeating this process, we eventually obtain a factorization with $h<1$. This can only mean that there are no peaks, so we have a factorization which is peak-reduced. \end{proof} \begin{sublemma} Let $\alpha$, $\beta$, and $[W]$ be as in Lemma~\ref{le:machine}. Then we have: \begin{equation}\label{eq:pkineq} 2|\alpha^{-1}\cdot [W]|> |[W]|+|\beta\alpha^{-1}\cdot[W]| \end{equation} \end{sublemma} \begin{proof} Since $\beta\alpha^{-1}$ is a peak with respect to $[W]$, we can sum the two inequalities in the definition of a peak; by the fact that one of them is strict, we obtain this new inequality. \end{proof} \begin{sublemma}\label{sl:notnear} Suppose we have $(A,a),(B,b)\in\Omega_\ell$ with $a\notin B$ and $\pg{a}$ not adjacent to $\pg{b}$ in $\Gamma$ (possibly $a=b^{-1}$). Then $\lkl{a}\cap B=\emptyset$. \end{sublemma} \begin{proof} If $x\in\lkl{a}\cap B$, then $x\in B$ and by Lemma~\ref{le:whdef}, either $b\geq x$ or $(B,b)$ acts on the connected component of $\pg{x}$ in $\Gamma-\st(\pg{b})$ by conjugation. If the latter were true, since $\pg{a}$ is adjacent to $\pg{x}$ and not $\pg{b}$, we would have that $a\in B$, a contradiction. So $b\geq x$, in which case $\pg{a}$ is adjacent to $\pg{b}$, a contradiction. \end{proof} \begin{sublemma}\label{sl:commshort} Suppose $\alpha$, $\beta$, and $[W]$ are as in Lemma~\ref{le:machine}, and also that $\alpha=(A,a)$, $\beta=(B,b)$, and that either $\adj{a}{b}$ or that $A\cap B=\emptyset$ with $a^{-1}\notin B$. Then $ |\beta\cdot [W]|<|\alpha^{-1}\cdot [W]| $. \end{sublemma} \begin{proof} Take $W'$ to be a representative of $\alpha^{-1}\cdot [W]$ and take $W$ to be the obvious representative of $\alpha\cdot [W']$ based on $W'$ (this doesn't change our original $[W]$). We will show the sublemma by analyzing adjacency counters. First we claim that: \begin{equation}\label{eq:twocounters1}\aco{B}{L-B}{W,b}\geq\aco{B}{L-B}{W',b}\end{equation} We will show this by showing that every subsegment of $W'$ (meaning a subsegment of an element of $W'$) that is counted by the adjacency counter on the right above is also counted by the one on the left. So suppose $(cud^{-1})^{\pm1}$ is counted on the right in Equation~(\ref{eq:twocounters1}), \emph{i.e.} $(cud^{-1})^{\pm1}$ is a subsegment of $W'$ with $c\in B$, $d\in L-B-\lkl{b}$, and with $u$ a word in $\lkl{b}$. If $\adj{a}{b}$, then since $\beta\in\Omega_\ell$, we know $a^{\pm1}\notin B$. This means that $\pg{c}\neq\pg{a}$. Since $a\in\lkl{b}$, we also have $\pg{d}\neq\pg{a}$. This means that the corresponding subsegment of $W$ is $(cu'd^{-1})^{\pm1}$, where $u'$ is $u$, possibly with instances of $a^{\pm1}$ added or removed. This subsegment is then counted by the counter on the right. If $A\cap B=\emptyset$, then $a\notin B$ and $b\notin A$. Since $a^{\pm1}\notin B$, we know that $\pg{c}\neq\pg{a}$. By Sublemma~\ref{sl:notnear}, we know $\lkl{b}\cap A=\emptyset$. Note that it is possible that in passing to $W$, this letter $d$ could be deleted by a copy of $a^{-1}$ added to its left if $d=a^{-1}$. We consider this case separately. First suppose $d$ is not deleted. Then the subsegment of $W$ corresponding to $(cud^{-1})^{\pm1}$ is either $(cud^{-1})^{\pm1}$ or $(cua^{-1}d^{-1})^{\pm1}$, depending on whether $d\in A$ or not. In either case, this subsegment is counted once by the counter on the left, in the second case because $a\in L-B-\lkl{b}$. If $d$ is deleted, then $d=a^{-1}$, and our original $(cud^{-1})^{\pm1}=(cua)^{\pm1}$ was part of a subsegment $(cuavf^{-1})^{\pm1}$, where $f\in (A-a)$ and $v$ is a word in $\lkl{a}$. Note that by Sublemma~\ref{sl:notnear}, we know $\lkl{a}\cap B=\emptyset$, so the counter on the right counts this segment only once. The subsegment of $W$ corresponding to this $(cuavf^{-1})^{\pm1}$ is then $(cuvf^{-1})^{\pm1}$. Write $v=v'v''$ where $v'$ is the maximal initial segment of $v$ that is a word in $\lkl{b}\cap\lkl{a}$ and let $f'$ be the unique letter such that $cuv'(f')^{-1}$ is an initial segment of $cuvf^{-1}$. Either $f'=f$ or $f'\in(\lkl{a}-\lkl{b})$. In either case, $f'\in L-B-\lkl{b}$ (since $A\cap B=\emptyset$ and $B\cap\lkl{a}=\emptyset$), so the corresponding subsegment of $W$ is counted once by the counter on the left. This shows our Equation~(\ref{eq:twocounters1}). Now we will show: \begin{equation}\label{eq:twocounters2}\aco{b}{L}{W',b}\geq\aco{b}{L}{W,b}\end{equation} Suppose $(bud^{-1})^{\pm1}$ is a subsegment of $W$ counted by the counter on the right above, so $d\in L-\lkl{b}$ and $u$ is a word in $\lkl{b}$. If $\adj{a}{b}$, then $W$ came from a subsegment $(bu'd^{-1})^{\pm1}$ of $W'$, where $u'$ is $u$, possibly with an instance of $a^{\pm1}$ added or removed; this subsegment is counted by the counter on the left. If $A\cap B=\emptyset$, then either $d$ originally appeared in $W$ or it was added in passing to $W'$. If it originally appeared in $W$, then $(bud^{-1})$ came from either a $(bud^{-1})^{\pm1}$ or a $(bu'au''d^{-1})^{\pm1}$, where in the second case $u''$ is the maximal terminal segment of $u$ that is a word in $\lkl{a}\cap\lkl{b}$; this subsegment of $W'$ is counted by the counter on the left. If it was added, our $(bud^{-1})^{\pm1}$ in $W$ is part of a $(buavf^{-1})^{\pm1}$, with $v$ a word in $\lkl{a}$ and $f\in A$. This subsegment is counted only once and came from a subsegment $(buvf^{-1})^{\pm1}$ that is counted once (for similar reasons as above). This shows Equation~(\ref{eq:twocounters2}). From Lemma~\ref{le:dico}, Equation~(\ref{eq:twocounters1}) and Equation~(\ref{eq:twocounters2}), we see that: \[D_{[W]}(\beta)\geq D_{\alpha^{-1}\cdot[W]}(\beta)\] By the definition of $D$, this means that $ |[W]|+|\beta\alpha^{-1}\cdot [W]|\geq |\alpha^{-1}\cdot [W]|+|\beta\cdot[W]| $. Combining this with Equation~(\ref{eq:pkineq}), we obtain \[2|\alpha^{-1}\cdot [W]|>|\alpha^{-1}\cdot[W]|+|\beta\cdot[W]|,\] which immediately implies the sublemma. \end{proof} \begin{proof}[Proof of Lemma~\ref{le:machine}] If we have $\alpha=(A,a)$ and $\beta=(B,b)$, we will set $A'=L-A-\lkl{a}$ and $B'=L-B-\lkl{b}$. Let $\overline\alpha=(A', a^{-1})$ and $\overline\beta=(B',b^{-1})$. By Equation~(\ref{eq:R8}) and the fact that $\alpha$ and $\beta$ are long-range, these automorphisms describe the same elements of $\OAG$, and therefore $\alpha^{-1}\cdot [W]=\overline\alpha^{-1}\cdot [W]$ and $\beta\alpha^{-1}\cdot [W]=\overline\beta\alpha^{-1}\cdot [W]$. We claim that if the lemma holds with $\alpha$ or $\beta$ replaced with $\overline\alpha=(A', a^{-1})$ or $\overline\beta=(B',b^{-1})$, respectively, then it holds as originally stated. Suppose $\delta_k\cdots\delta_1$ is a peak-lowering factorization of $\overline\beta\alpha^{-1}$ (for example). By Equation~(\ref{eq:R8}), the element $\beta\alpha^{-1}(\overline\beta\alpha^{-1})^{-1}$ is the conjugation $(L-b^{-1},b)$ (which is in $\Omega_\ell$). If $|\beta\alpha^{-1}\cdot [W]|<|\alpha\cdot [W]|$ then \[\beta\alpha^{-1}=(L-b^{-1},b)\delta_k\cdots\delta_1\] is a peak-lowering factorization of $\beta\alpha^{-1}$, since $(L-b^{-1},b)$ does not change the length of any conjugacy class. Otherwise $|W|<|\alpha\cdot [W]|$. Again by Equation~(\ref{eq:R8}), $\overline\beta\beta$ is the conjugation $(L-b,b^{-1})$. So $(\overline\beta\alpha^{-1})^{-1}\beta\alpha^{-1}$ is $\alpha(L-b,b^{-1})\alpha^{-1}$. If $b\notin A$, then by Equations~(\ref{eq:R9}) and~(\ref{eq:R10}), we know $(\overline\beta\alpha^{-1})^{-1}\beta\alpha^{-1}$ is a product of conjugations. If $b\in A$, then by Equation~(\ref{eq:R8}), we know $(\overline\beta\alpha^{-1})^{-1}\beta\alpha^{-1}$ is $(L-a^{-1},a)\overline\alpha(L-b,b^{-1})\overline\alpha^{-1}(L-a,a^{-1})$, which is then a product of conjugations by Equations~(\ref{eq:R9}) and~(\ref{eq:R10}). In any case, we have a product of conjugations $\gamma_j'\cdots\gamma_1'$ equal to $(\overline\beta\alpha^{-1})^{-1}\beta\alpha^{-1}$; then \[\beta\alpha^{-1}=\delta_k\cdots\delta_1\gamma_j'\cdots\gamma_1'\] is a peak-lowering factorization of $\beta\alpha^{-1}$, since conjugations do not change the length of conjugacy classes. So we may swap out $\overline \alpha$ for $\alpha$ and $\overline\beta$ for $\beta$ as needed in the proof of this lemma. Also, by the symmetry in the definition of a peak, we may switch $\alpha$ and $\beta$ if needed. We fix a $k$-tuple of graphically reduced cyclic words $W$ representing the conjugacy class $[W]$. Throughout this proof, $W'$ will denote the obvious representative of $\alpha^{-1}\cdot[W]$ based on $W$. We break this proof down into several cases. \noindent {\bf Case 1:} $\alpha$ is induced by a permutation of $L$. Then $|\alpha\cdot [W]|=|[W]|$. Since $(\beta,\alpha)$ is a peak, $\beta$ must shorten $\alpha\cdot[W]$, so $\beta=(B,b)$ for some $(B,b)$. Then the automorphism $(\alpha^{-1}(B),\alpha^{-1}(b))\in \Omega_\ell$ is well defined, and by Equation~(\ref{eq:R6}) the following factorization is peak-lowering: \[\beta\alpha^{-1}=\alpha^{-1}(\alpha^{-1}(B),\alpha^{-1}(b))\] In the remaining cases, we assume that $\alpha=(A,a)$ and $\beta=(B,b)$. We will implicitly use Equation~(\ref{eq:R1}) to write $\alpha^{-1}$ as $(A-a+a^{-1},a^{-1})$ in the following. \noindent {\bf Case 2:} $a\in\lkl{b}$. Of course, this implies that $\pg{a}\neq\pg{b}$. Since $\alpha$ and $\beta$ are long-range, we know that $a,a^{-1}\notin B$ and $b, b^{-1}\notin A$. Then by Equation~(\ref{eq:R3}b), we have: \[\beta\alpha^{-1}=(B,b)(A-a+a^{-1},a^{-1})=(A-a+a^{-1},a^{-1})(B,b)=\alpha^{-1}\beta\] By Sublemma~\ref{sl:commshort}, we know $|\beta\cdot [W]|<|\alpha^{-1}\cdot[W]|$, so this factorization is peak-lowering. \noindent {\bf Case 3:} $A\cap B=\emptyset$ and $a\notin\lkl{b}$. We will break into sub-cases according to the configuration of $a^{-1}$ and $b^{-1}$. \noindent {\bf Sub-case 3a:} $\pg{a}=\pg{b}$. Since $A\cap B=\emptyset$, this implies that $a=b^{-1}$. By Equation~(\ref{eq:R2}), the following factorization is peak-lowering: \[\beta\alpha^{-1}=(B,b)(A-a+b,b)=(A+B+b,b)\] \noindent {\bf Sub-case 3b:} $a^{-1}\notin B$. If $b^{-1}\notin A$, then \[\beta\alpha^{-1}=(B,b)(A-a+a^{-1},a^{-1})=(A-a+a^{-1},a^{-1})(B,b)\] by Equation~(\ref{eq:R3}a). If $b^{-1}\in A$, then by Equations~(\ref{eq:R2}) and~(\ref{eq:R4}a), we have: \[\beta\alpha^{-1}=(B,b)(A-a+a^{-1},a^{-1})=(A+B-b-a+a^{-1},a^{-1})(B,b)\] In either case, by Sublemma~\ref{sl:commshort}, $|\beta\cdot[W]|<|\alpha^{-1}\cdot[W]|$, so these factorizations are peak-lowering. \noindent {\bf Sub-case 3c:} $\pg{a}\neq \pg{b}$, $a^{-1}\in B$, and $b^{-1}\in A$. Note that since we are allowed to switch $\alpha$ and $\beta$, if $\pg{a}\neq\pg{b}$ and either $a^{-1}\notin B$ or $b^{-1}\notin A$, we are in sub-case 3b. Therefore this sub-case finishes case 3. Since $a^{-1}\in B$ and $a\notin B$, we see from Lemma~\ref{le:whdef} that $b\geq a$. Similarly, $a\geq b$. So $a\sim b$ and by Lemma~\ref{le:whdef}, the automorphisms $\alpha'=(A,b^{-1})$ and $\beta'=(B,a^{-1})$ are well defined. In the rest of this case, all adjacency counting is done with respect to $W'$. Since $a\sim b$ and $\pg{a}$ is not adjacent to $\pg{b}$ in $\Gamma$, note that $\lkl{a}=\lkl{b}$ and therefore the adjacency counters with respect to $a$ and $b$ are the same functions. Then by Lemma~\ref{le:dico}: \[D(\alpha)+D(\beta)=D(\alpha')+D(\beta')\] Also, by the definition of $D$ and Equation~(\ref{eq:pkineq}): \[D(\alpha)+D(\beta)=-(2|\alpha^{-1}\cdot[W]|-|[W]|-|\beta\alpha^{-1}\cdot[W]|)<0\] So either $D(\alpha')<0$ or $D(\beta')<0$. Since we may swap $\alpha$ and $\beta$ (which swaps $\alpha'$ and $\beta'$), we assume $D(\beta')<0$. Now we will find our peak-lowering factorization. Let $\sigma_{a,b}$ be the type~(1) Whitehead automorphism from Equation~(\ref{eq:R5}). By Equation~(\ref{eq:R5}), we have: \[\beta(\beta')^{-1}=(B,b)(B-a^{-1}+a,a)=(B-a^{-1}+a-b+b^{-1},a)\sigma_{a,b}\] By Equation~(\ref{eq:R2}): \[\beta'\alpha^{-1}=(B,a^{-1})(A-a+a^{-1},a^{-1})=(A+ B-a,a^{-1})\] Then using $\beta\alpha^{-1}=\beta(\beta')^{-1}\beta'\alpha^{-1}$, we have the factorization: \[\beta\alpha^{-1}=(B-a^{-1}+a-b+b^{-1},a)\sigma_{a,b}(A+ B-a,a^{-1})\] To show this factorization is peak-lowering, note the following: \[ |(A+ B-a,a^{-1})\cdot[W]| =|\beta'\alpha^{-1}\cdot[W]| =D(\beta')+|\alpha^{-1}\cdot[W]| <|\alpha^{-1}\cdot [W]| \] This is because $D(\beta')=|\beta'\alpha^{-1}\cdot[W]|-|\alpha^{-1}\cdot [W]|$. Then since $\sigma_{a,b}$ does not change the length of a conjugacy class, this factorization is peak-lowering and we are done with this case. \noindent {\bf Case 4:} $A\cap B\neq\emptyset$ and $a\notin\lkl{b}$. All adjacency counting in this case is done with respect to $W'$. First we show we can assume that we are in one of two sub-cases: either $a\notin B$ and $b\notin A$, or else $a\notin B$, $a^{-1}\in B$, $b\in A$ and $b^{-1}\notin A$. Possibly by replacing $\beta$ with $\overline\beta$, we may assume that $a\notin B$. Then if $b\notin A$, then we are in the first sub-case, so suppose $b\in A$. First suppose $a^{-1}\in B$; if $b^{-1}\notin A$, then we are in the second sub-case, and if $b^{-1}\in A$, we can get to the first sub-case by swapping both $\alpha$ with $\overline\alpha$ and $\beta$ with $\overline\beta$. Otherwise $a^{-1}\notin B$, and swapping $\alpha$ with $\overline\alpha$ puts us in the first sub-case. In both of these sub-cases we will find that $\alpha^{-1}\cdot[W]$ is shortened by a well-defined Whitehead automorphism $(C\cap D,c)$, where $C$ is $A$ or $A'$, $D$ is $B$ or $B'$, and $c$ is an element of $\{a,a^{-1},b^{-1},b\}\cap A\cap C$. By swapping $\alpha$ with $\overline\alpha$ if necessary, we assume $C=A$; similarly, we assume that $D=B'$. Then $c$ is $a$ or $b^{-1}$, if it is $b^{-1}$, we swap $\alpha$ with $\beta$, $\alpha$ with $\overline\alpha$, and $\beta$ with $\overline\beta$ to get $(C\cap D,c)=(A\cap B',a)$. Then in any event, we may assume that $(A\cap B',a)$ shortens $\alpha^{-1}\cdot[W]$. We deduce from Lemma~\ref{le:whdef} that $(A-B'+a,a)$ is a well defined Whitehead automorphism. From Equation~(\ref{eq:R2}) we have: \[\alpha=(A- B'+a,a)(A\cap B',a)\] Then we factor: \[\beta\alpha^{-1}=\beta(A\cap B',a)^{-1}(A- B'+ a,a)^{-1}\] Since $(A\cap B',a)$ shortens $\alpha^{-1}\cdot[W]$, we know that \[|(A-B'+a,a)^{-1}\cdot[W]|<|\alpha^{-1}\cdot[W]|\] and that $\beta(A\cap B',a)^{-1}$ is a peak with respect to $(A- B'+ a,a)^{-1}\cdot[W]$. Then we can apply case 3 of this lemma to the peak $\beta(A\cap B' ,a)^{-1}$, and obtain a peak-lowering factorization of our original peak. \noindent {\bf Sub-case 4a:} $a\notin B$, $a^{-1}\in B$, $b\in A$ and $b^{-1}\notin A$. Then $a\sim b$ by Lemma~\ref{le:whdef}, and since $\pg{a}$ is not adjacent to $\pg{b}$, we have $\lkl{a}=\lkl{b}$. Then adjacency counters taken with respect to $a$ and $b$ are the same. Let $\gamma_1=(A\cap B,b)$, $\gamma_2=(A\cap B',a)$, $\gamma_3=(A'\cap B,a^{-1})$, and $\gamma_4=(A'\cap B', b^{-1})$. Since $a\sim b$, these $\gamma_1,\gamma_2,\gamma_3$, and $\gamma_4$ are all well defined by Lemma~\ref{le:whdef}. Now we will show that one of these automorphisms shortens $\alpha^{-1}\cdot[W]$. Apply Lemma~\ref{le:dico} twice to get: \[ D(\alpha)+D(\beta)=\aco{A}{A'}{a}-\aco{a}{L}{a}+\aco{B}{B'}{b}-\aco{b}{L}{b}\] Then by further applications of Lemma~\ref{le:dico}, we obtain: \[\begin{split} \sum_{i=1}^4D(\gamma_i&)=\\ &\aco{A\cap B}{A'\cup B'}{b}-\aco{b}{L}{b} +\aco{A'\cap B}{A\cup B'}{a^{-1}}-\aco{a^{-1}}{L}{a^{-1}} \\& +\aco{A\cap B'}{A'\cup B}{a}-\aco{a}{L}{a} +\aco{A'\cap B'}{A\cup B}{b^{-1}}-\aco{b^{-1}}{L}{b^{-1}}\\ \end{split}\] Putting these together, it follows from the additivity of adjacency counters that: \[ 2(D(\alpha)+D(\beta))= \sum_{i=1}^4D(\gamma_i)+2(\aco{A\cap B}{A'\cap B'}{a}+\aco{A'\cap B}{A\cap B'}{a}) \] Then by Equation~(\ref{eq:pkineq}): \[0>2(D(\alpha)+D(\beta))\geq\sum_{i=1}^4D(\gamma_i)\] This shows that for some $i$, $D(\gamma_i)<0$, so one of $\gamma_1$, $\gamma_2$, $\gamma_3$ or $\gamma_4$ shortens $[W']$. We have found an automorphism shortening $[W']$ as described above, so we are done with this sub-case. \noindent{\bf Sub-case 4b:} $a\notin B$ and $b\notin A$. We claim that $(A\cap B',a)$ is well defined. By Lemma~\ref{le:whdef}, $(A\cap B',a)$ is well defined if for every $x\in A\cap B'$ with $a\not\geq x$, $(A\cap B',a)$ acts on component of $\pg{x}$ in $\Gamma-\st(\pg{a})$ by conjugation. So suppose $x\in A\cap B'$ with $a\not\geq x$ and let $Y_1$ denote the component of $\pg{x}$ in $\Gamma-\st(\pg{a})$. Then $(A\cap B',a)$ is well defined if for every $y\in Y_1$, we have $y,y^{-1}\in A$ and $y,y^{-1}\in B'$. This first condition is true since $(A,a)$ acts on $Y_1$ by conjugation (since $a\not\geq x$). So suppose for contradiction that $y\in B\cup \lkl{b}$ (meaning $y\notin B'$) with $\pg{y}\in Y_1$. Then $y\in A$ and $a\not\geq y$. By Sublemma~\ref{sl:notnear}, we know $A\cap\lkl{b}=\emptyset$. This forces $y$ to be in $B$. Let $Y_2$ be the component of $\pg{y}$ in $\Gamma-\st(\pg{b})$. Since $y\in B$, either $b\geq y$ or $(B,b)$ conjugates $Y_2$. If $b\geq y$, the fact that $\st(\pg{a})$ separates $\pg{b}$ from $\pg{y}$ means that $a\geq y$, a contradiction. Then $(B,b)$ conjugates $Y_2$. Since $a\notin B$, this means $a\notin Y_2$, which implies $\st(\pg{a})\cap Y_2=\emptyset$. So since $\st(\pg{a})$ separates $\pg{b}$ from $\pg{y}$ in $\Gamma$, this means that $Y_2$ is also a component of $\Gamma-\st(\pg{a})$. In that case, however, $Y_1=Y_2$, which implies $x\in B$, a contradiction. So $(A\cap B',a)$ is well defined. Note that $(B\cap A', b)$ is well defined by the same argument. Next we will show that either $(A\cap B',a)$ or $(B\cap A', b)$ shortens $\alpha^{-1}\cdot[W]$. By Equation~(\ref{eq:pkineq}), we know that $0>D(\alpha)+D(\beta)$. By Lemma~\ref{le:dico}, we know that \begin{align*} D(\alpha)&=\aco{A}{A'}{a}-\aco{a}{L}{a} =\aco{A\cap B'}{A'}{a}+\aco{A\cap B}{A'}{a}-\aco{a}{L}{a} \end{align*} and that: \begin{align*} D(\beta)&=\aco{B}{B'}{b}-\aco{b}{L}{a} =\aco{B\cap A'}{B'}{b}+\aco{B\cap A}{B'}{b}-\aco{b}{L}{b} \end{align*} Also from Lemma~\ref{le:dico}, we know that \begin{align*} D((A\cap B',a))&=\aco{A\cap B'}{A'\cup B}{a}-\aco{a}{L}{a}\\ &=\aco{A\cap B'}{A'}{a}+\aco{B\cap A}{B'\cap A}{a}-\aco{a}{L}{a} \end{align*} and that: \begin{align*} D((B\cap A',b))&=\aco{B\cap A'}{B'\cup A}{b}-\aco{b}{L}{b}\\ &=\aco{B\cap A'}{B'}{b}+\aco{A\cap B}{A'\cap B}{b}-\aco{b}{L}{b} \end{align*} We claim that $\aco{A\cap B}{A'}{a}\geq \aco{A\cap B}{A'\cap B}{b}$. Since $b\notin A$, Sublemma~\ref{sl:notnear} says that $\lkl{b}\cap A=\emptyset$. If $(cud^{-1})^{\pm1}$ is a subsegment of $W'$ with $c\in A\cap B$, $d\in A'\cap B$, and $u$ a word in $\lkl{b}$, then either $u$ is a word in $\lkl{b}\cap\lkl{a}$, or $u=u'u_1u''$ where $u'$ a word in $\lkl{b}\cap\lkl{a}$ and $u_1\in \lkl{b}-\lkl{a}$. If the former is true, $cud^{-1}$ is counted by $\aco{A\cap B}{A'}{a}$; if the latter is holds, then instead $cu'u_1$ is counted by $\aco{A\cap B}{A'}{a}$ (since $\lkl{b}\cap A=\emptyset$). Either way, each subsegment of $W'$ counted by one counter is also counted by the other, showing the inequality. Similarly, we know $\aco{B\cap A}{B'}{b}\geq \aco{B\cap A}{B'\cap A}{a}$. Putting this all together, we have that: \[0>D(\alpha)+D(\beta) >D((A\cap B',a))+D((B\cap A',b))\] So one of $(A\cap B',a)$ and $(B\cap A',b)$ shortens $[W']$. \end{proof} \begin{remark}\label{re:purewhalg} The \emph{pure automorphism group} $\AAGo$ of $A_\Gamma$ is the subgroup of $\AAG$ generated by dominated transvections, partial conjugations, and inversions. It contains those graphic automorphisms which can be expressed as products of transvections and inversions; depending on $\Gamma$, $\AAGo$ may or may not be all of $\AAG$. In any case, $\AAGo$ is a finite-index normal subgroup of $\AAG$. Define the \emph{pure long-range Whitehead automorphisms} $\Omega_\ell^0$ to be $\Omega_\ell\cap \AAGo$. If $\alpha\in\langle\Omega_\ell^0\rangle$, then in fact, we can peak reduce $\alpha$ with respect to any $k$-tuple of conjugacy classes $W$ by elements of $\Omega_\ell^0$. To see this, consider the proof of Lemma~\ref{le:machine}: when we lower peaks in factorizations of $\alpha$, we move around type~(1) Whitehead automorphisms in case~1, and we introduce a type~(1) Whitehead automorphism in sub-case~3c that is in $\Omega_\ell^0$, and in no other case do we introduce a type~(1) Whitehead automorphism. So if we start with a factorization of $\alpha$ by elements of $\Omega_\ell^0$ and peak-reduce it, we will end up with a peak-reduced factorization of $\alpha$ by elements of $\Omega_\ell^0$. This technical detail is important for the application in Day~\cite{ssraag}. \end{remark} \section{Attempting to extend peak reduction to $\AAG$} \subsection{A failure of peak-reduction}\label{se:nowhalg} In this section we prove Proposition~\ref{mp:nowhalg}. \begin{example}[Outer automorphisms of the four-vertex path]\label{ex:orbit} Let $\Gamma$ be the four-vertex path, with labels as in Figure~\ref{fig:countex2}. Let $P$ denote the subgroup of $\Out A_\Gamma$ generated by the images of the inversions and the single graphic automorphism (which swaps $a$ with $d$ and $b$ with $c$). Then $P\colon\thinspaceng (\Z/2\Z)\ltimes(\Z/2\Z)^4$. \begin{figure} \caption{A graph $\Gamma$ such that peak-reduction fails on $A_\Gamma$.} \label{fig:countex2} \end{figure} Let $N$ denote the subgroup of $\Out A_\Gamma$ generated by the images of the dominated transvections. We have adjacent dominations $b\geq a$ and $c\geq d$, and non-adjacent dominations $c\geq a$ and $b\geq d$. These are the only examples of domination in $\Gamma$. This gives us six infinite cyclic subgroups of $\Aut A_\Gamma$ generated by dominated transvections: two for each example of non-adjacent domination (multiplying on the right and on the left) and one for each example of adjacent domination. Since $\adj{b}{c}$, these transvections commute and generate a copy of $\Z^6<\Aut A_\Gamma$. Each of our pairs of non-adjacent transvections differ by an inner automorphism, so $N\colon\thinspaceng\Z^4$. From Equation~(\ref{eq:R6}), we know that $P$ normalizes $N$. No vertex of $\Gamma$ has a star that separates $\Gamma$, so each partial conjugation is a full conjugation. Then by Laurence's Theorem (Theorem~\ref{th:lau}), we have: \[\Out A_\Gamma\colon\thinspaceng P\ltimes N\] Let $\phi\colon\thinspace\Z^4\to N$ be given by $\phi(p,q,r,s)(a)=ab^pc^q$ and $\phi(p,q,r,s)(d)=b^rc^sd$ (note $\phi(p,q,r,s)$ fixes the conjugacy classes $b$ and $c$). Let $k\geq 2$ and let $w$ be the conjugacy class of the cyclic word $ad^k$. For any $(p,q,r,s)\in \Z^4$, we have: \[\phi(p,q,r,s)(w)=ab^{p+r}c^{q+ks}d(b^rd)^{k-1}\] Note that the word on the right side is a graphically reduced cyclic word. Then $\phi(p,q,r,s)$ fixes $w$ if and only if $p=0$, $r=0$, and $q=-ks$. So the stabilizer $N_w$ is $\langle\phi(0,-k,0,1)\rangle$. Further, the only classes in $(\Out A_\Gamma)\cdot w$ with length less than or equal to $|w|$ are the $8$ classes in $P\cdot w$. Also note that if $w'=\sigma(w)$ with $\sigma\in P$, then the stabilizer $N_{w'}$ is $\langle \sigma\phi(0,-k,0,1)\sigma^{-1}\rangle$. \end{example} \begin{proof}[Proof of Proposition~\ref{mp:nowhalg}] Let $\Gamma$ be as in Example~\ref{ex:orbit}. For $\alpha\in\Aut A_\Gamma$, let $|\alpha|$ denote the length of the class of $\alpha$ in $\Out A_\Gamma$ with respect to Laurence's generators. Pick a natural number $k$ such that: \[k > 1+\sup_{\alpha\in S}|\alpha|\] Let $w$ be the conjugacy class of the cyclic word $ad^k$. Let $\beta\in\Aut A_\Gamma$ represent the class of $\phi(0,-k,0,1)\in\Out A_\Gamma$, with $\phi$ as in Example~\ref{ex:orbit}. Note that $\beta$ fixes $w$ and $\beta$ does not represent an element of $P$. Suppose that $\beta$ can be peak reduced with respect to $w$ by elements of $S$. Since $w$ is a minimal-length element of $(\Out A_\Gamma)\cdot w$, this means we can factor $\beta$ as $\gamma_m\cdots\gamma_1$ for some $\gamma_1,\ldots,\gamma_m\in S$, such that for each $j$, $1\leq j\leq m$, we have $|\gamma_j\cdots\gamma_1(w)|=|w|$. Each $\gamma_{j}\cdots\gamma_1(w)$ is the same length as $w$ and in the same orbit, so by Example~\ref{ex:orbit}, it is in $P\cdot w$. Fix a $j$, $1\leq j\leq m$. Let $w'=\gamma_{j-1}\cdots\gamma_1(w)$. There is $\sigma$ representing an element of $P$ such that $\sigma\gamma_j$ represents an element of $N$. Since $|\sigma\gamma_j(w')|=|w'|$, we know from Example~\ref{ex:orbit} that $\sigma\gamma_j(w')=w'$ (since $w'\in P\cdot w$). Also by Example~(\ref{ex:orbit}), $\sigma\gamma_j$ represents $\rho\phi(0,-ks,0,s)\rho^{-1}$ for some $s\in\Z$ and some $\rho\in P$. Then $|\sigma\gamma_j|=s(k+1)$; but since $\gamma_j\in S$ and therefore $|\sigma\gamma_j|\leq k$, this $s$ must be $0$. Therefore each $\gamma_j$ represents an element of $P$. Then $\beta$ represents an element of $P$, a contradiction. \end{proof} \subsection{Automorphisms fixing a set of basis elements} In this section we prove Proposition~\ref{mp:specialwhalg}. \begin{lemma}\label{le:shortensrgen} Suppose $x$ is the conjugacy class of an element of $X$ and $\alpha\in\langle\Omega_s\rangle$. Then $\alpha(x)$ cannot be shortened by a member of $\Omega_\ell$. \end{lemma} \begin{proof} If an element of $\Omega_\ell$ shortens $\alpha(x)$, then it must be a type (2) automorphism $(A,a)$. Further, we must have $a\in(\supp \alpha(x))^{\pm1}$ and $(A-a)\cap (\supp \alpha(x))^{\pm1}\neq\emptyset$ by Lemma~\ref{le:obvadj}. We know $\supp \alpha(x)$ is a clique in $\Gamma$ by Lemma~\ref{le:srsupp}, so this contradicts the fact that $(A,a)$ is long-range. \end{proof} The image of $X$ in $H_\Gamma$ is a basis for $H_\Gamma$. By declaring this basis to be orthonormal, we get an inner product $\langle-,-\rangle$ on $H_\Gamma$. \begin{lemma}\label{le:knowdoms} If $\alpha\in \langle\Omega_s\rangle$, then for any $a,b\in X$, we have $\langle \alpha_*b,a\rangle\neq 0$ implies $a=b$, or $a\geq b$ with $\adj{a}{b}$. \end{lemma} \begin{proof} We induct on the length of $\alpha$ with respect to the subset of transvections $\tau_{a,b}\in\Omega_s$. The assertion is obvious if the length is zero. Assume it is true for $\beta$ and that $\alpha=\beta\gamma$ where $\gamma=\tau_{c,d}\in\Omega_s$. Suppose $\gamma=\tau_{c,d}$ for some $c,d\in L$ with $c\geq d$. Then $\langle \alpha_*b,a\rangle\neq 0$ implies either that $\langle \beta_*b,a\rangle\neq 0$ or that $\pg{a}=\pg{c}$ and $\langle \beta_*b,d\rangle\neq 0$. In the first case, the lemma follows. In the second case, we have $a\geq d$ and also $d\geq b$ with $\adj{d}{b}$ by inductive assumption. \end{proof} \begin{lemma}\label{le:fixgens} Let $S\subset X$. The pointwise stabilizer of $S$ in $\langle\Omega_s\rangle$ is generated by transvections $\tau_{a,b}\in\Omega_s$ with $\pg{b}\notin S$. \end{lemma} \begin{proof} Suppose $\alpha\in\langle \Omega_s\rangle$ and $\alpha$ fixes $S$ pointwise. Let $A=\alpha_*\in\Aut H_\Gamma$. Since $\alpha$ fixes $S$ pointwise, for any $x\in S$, $\langle Ax,y\rangle=0$ for all $y\in X-x$ and $\langle Ax,x\rangle=1$. Let $X=C_1\cup\cdots\cup C_m$ be the decomposition of $X$ into adjacent domination equivalence classes. First of all, for each $i$, we can row-reduce $A$ such that for any $a,b\in C_i$, $\langle Ab,a\rangle$ is $0$ if $a\neq b$ and $1$ if $a=b$. In fact, we can do this by multiplying $\alpha$ by transvections $\tau_{a,b}$ for various $a,b\in C_i$ with $\pg{b}\notin S$ (each $\tau_{a,b}$ corresponds to a row operation). Now suppose some $\langle A b,a\rangle\neq 0$ with $a\not\sim b$. Then $a\geq b$ and $\adj{a}{b}$ by Lemma~\ref{le:knowdoms}, and $\pg{b}\notin S$. Since we have already reduced the diagonal, applying some power of $\tau_{a,b}$ to $\alpha$ will change this entry to zero. Of course, by doing this in appropriate order to the nonzero entries with $a\not\sim b$, we can row-reduce the rest of $A$. So we can reduce $\alpha$ to the identity by applying elements $\tau_{a,b}\in\Omega_s$ with $\pg{b}\notin S$. \end{proof} \begin{proof}[Proof of Proposition~\ref{mp:specialwhalg}] Suppose $W=(w_1,\ldots,w_k)$ is a $k$--tuple of conjugacy classes with each $|w_i|=1$ and suppose $\alpha\in\AAG$ with $|\alpha\cdot W|=|W|$. By Theorem~\ref{mt:threeparts}, we write $\alpha=\beta\gamma$ where $\beta\in\langle\Omega_\ell\rangle$ and $\gamma\in\langle\Omega_s\rangle$. Also by Theorem~\ref{mt:threeparts}, we have a factorization $\beta=\delta_m\cdots\delta_1$ by elements of $\Omega_\ell$ that is peak-reduced with respect to $\gamma\cdot W$. By Lemma~\ref{le:shortensrgen}, this $\delta_1$ cannot shorten $\gamma\cdot W$. So since $\delta_m\cdots\delta_1$ is peak-reduced, we have $|\gamma\cdot W|=|\alpha\cdot W|=|W|$. Since each $w_i$ is a minimal-length representative of its $\AAG$--orbit, it follows that each $\gamma(w_i)$ is the conjugacy class of an element of $L$. If $x,y\in L$ with $y$ conjugate to $\gamma(x)$, then $x\sim y$ by Corollary~\ref{co:tvgenstruct} and $e(x,y)$ by Lemma~\ref{le:srsupp}. In general, if $S$ is a basis for $\Z^j$ for some $j$, $S'\subset S$ and $A\in\SL(j,\Z)$ sends $S'$ to a subset of $S^{\pm1}$, then there is $B\in\SL(j,\Z)$ such that $B|_{S'}=A|_{S'}$ and $B$ restricts to a permutation on $S\cup(-S)$ (this can be proven by a row reduction argument). Then from Corollary~\ref{co:tvgenstruct}, we deduce that there is a type~(1) Whitehead automorphism $\sigma$ such that $\sigma\gamma\cdot W=W$ and $\sigma\in\langle\Omega_s\rangle$. Then by Lemma~\ref{le:fixgens}, we can write $\sigma\gamma$ as a product $\phi_r\cdots\phi_1$ of elements $\phi_1,\ldots,\phi_r$ of $\Omega_s$ that fix $\supp W$ pointwise. Then the following is a peak-reduced factorization of $\alpha$ by elements of $\Omega_\ell\cup\Omega_s$: \[\alpha=\delta_m\cdots\delta_1\sigma^{-1}\phi_r\cdots\phi_1\] \end{proof} \begin{corollary}\label{co:fixvertsgenset} Suppose $W=(w_1,\ldots,w_k)$ is a $k$--tuple of conjugacy classes of $A_\Gamma$ with each $|w_i|=1$. Then the stabilizer $(\AAG)_W$ of $W$ in $\AAG$ is generated by $(\Omega_\ell\cup\Omega_s)\cap((\AAG)_W)$. \end{corollary} \begin{proof} Let $\overline\Delta$ be the directed multi-graph whose vertices are $k$--tuples of conjugacy classes $W'$ with $|W'|=|W|$, and an edge from $W_1$ to $W_2$, labeled by $\alpha$, if $\alpha\in\Omega_\ell\cup\Omega_s$ with $\alpha(W_1)=W_2$. Let $\Delta$ be the (undirected) connected component of $W$ in $\overline \Delta$. This is called the \emph{Whitehead graph} of $W$. We map the paths of $\Delta$ to $\AAG$ by composing their edge labels; a path from a vertex $W_1$ to a vertex $W_2$ will map to an automorphism $\alpha$ with $\alpha(W_1)=W_2$ (this is true for paths of length $1$ and remains true under concatenations). In particular, $\pi_1(\Delta,W)\to(\AAG)_W$. By Proposition~\ref{mp:specialwhalg}, if $\alpha\in(\AAG)_W$, we can write $\alpha=\beta_m\cdots\beta_1$ where each $\beta_i\in\Omega_\ell\cup\Omega_s$ and for each $i$, $0\leq i\leq m$, we have $|\beta_i\cdots\beta_1\cdot W|=|W|$. Then $\beta_m\cdots\beta_1$ describes a path in $\Delta$ mapping to $\alpha$, and the map $\pi_1(\Delta,W)\to(\AAG)_W$ is surjective. To get generators for $\pi_1(\Delta,W)$, we pick a maximal tree for $\Delta$. Since each $|w_i|=1$, we know each vertex of $\Delta$ is the image of $W$ under some permutation of $L$. Then we can pick our maximal tree $T$ to be a union of edges labeled by type~(1) Whitehead automorphisms originating at $W$. There is a unique loop in $\pi_1(\Delta,W)$ for each (directed) edge in $\Delta-T$ (the loop leaving $T$ only to cross this edge once); these loops generate $\pi_1(\Delta,W)$, and the images of these loops in $(\AAG)_W$ generate. If $\alpha$ is a type (2) Whitehead automorphism labeling an edge in $\Delta-T$, then $\alpha$ labels a loop from a vertex $W'$ to itself (if a type (2) Whitehead automorphism changes a vertex $W'$ of $\Delta$, then it lengthens it). So if $W\neq W'$ there is a type~(1) Whitehead automorphism $\sigma$ labeling the edge in $T$ from $W$ to $W'$, and by Equation~(R6), the automorphism $\sigma \alpha\sigma^{-1}$ is a Whitehead automorphism labeling an edge from $W$ to itself. If $\alpha$ is a type~(1) Whitehead automorphism labeling an edge in $\Delta-T$ that is not a loop at $W$, then by relations of type (R7), the loop based at $W$ through edges in $T$ and $\alpha$ is redundant with a type~(1) Whitehead automorphism labeling an edge from $W$ to itself. So in fact, the loops in $\pi_1(\Delta,W)$ of length $1$ map to a generating set for $(\AAG)_W$. By definition, they map to $(\Omega_\ell\cup\Omega_s)\cap((\AAG)_W)$. \end{proof} \begin{remark}\label{re:pred} There is another case where a peak-reduction theorem holds for $\AAG$: the author has shown in~\cite{ssraag} that if $w=[a_1,b_1]\cdots[a_k,b_k]$ for distinct $a_1,\cdots a_k$, $b_1,\cdots b_k\in X$, and $\alpha\in\AAG$ with $\alpha(w)=w$, then $\alpha$ can be peak reduced with respect to $w$ by elements of $\Omega$. \end{remark} \section{A presentation for $\AAG$}\label{se:pres} The goal of this section is to prove Theorem~\ref{mt:pres}. Recall that $\Phi$ is the free group on $\Omega$. Let $\Phi_\ell<\Phi$ be the subgroup generated by $\Omega_\ell$. Let $R_\ell=R\cap \Phi_\ell$. Denote the normal closure of $\langle R_\ell\rangle$ in $\Phi_\ell$ by $\overline{\langle R_\ell\rangle}$. Say that $w_1$ and $w_2$ in $\Phi_\ell$ are congruent modulo $R_\ell$ if $w_1w_2^{-1}\in \overline {\langle R_\ell\rangle}$. Similarly, we denote the normal closure of $\langle R\rangle$ in $\Phi$ by $\overline{\langle R\rangle}$ and say that $w_1$ and $w_2$ in $\Phi$ are congruent modulo $R$ if $w_1w_2^{-1}\in\overline{\langle R\rangle}$. \begin{lemma}\label{le:presmach} Suppose $\alpha,\beta\in \Omega_\ell$ and $[W]$ is a $k$--tuple of conjugacy classes of $A_\Gamma$. Suppose $\beta\alpha^{-1}$ forms a peak with respect to $[W]$. Then there exist $\delta_1,\ldots,\delta_k\in \Omega_\ell$ such that, when multiplied in $\Phi_\ell$, $\beta\alpha^{-1}$ is congruent to $\delta_k\cdots\delta_1$ modulo $R_\ell$ and for each $i,1\leq i< k$, we have: \[|(\delta_i\cdots\delta_1)\cdot[W]|<|\alpha^{-1}\cdot [W]|\] \end{lemma} \begin{proof} This lemma is a refinement of Lemma~\ref{le:machine}, so to prove it, it is enough to review the proof of Lemma~\ref{le:machine}, noting in each case that the peak-lowering factorization $\delta_k\cdots\delta_1$ is congruent to $\beta\alpha^{-1}$ modulo $R_\ell$. This will be true if in each case, the only manipulations we apply to elements of $\AAG$ are applications of relations in $R$. At the start of the proof, we established that if $\alpha=(A,a)$ and $\beta=(B,b)$, we may switch $\alpha$ and $\beta$ or swap $\beta$ with $(L-B-\lkl{b},b^{-1})$. By the symmetry in the statement, it is again apparent that we may still switch $\alpha$ and $\beta$ if necessary. In showing we could swap $\beta$ with $(L-B-\lkl{b},b^{-1})$, we used Relations~(\ref{eq:R8})--(\ref{eq:R10}). In case 1, we used Relation~(\ref{eq:R6}). We used Relation~(\ref{eq:R1}) in cases 2, 3 and 4. In case 2, we used Relation~(\ref{eq:R3}b). In case 3, we used Relation~(\ref{eq:R2}) in sub-case 3a; Relations~(\ref{eq:R2}),~(\ref{eq:R3}a) and~(\ref{eq:R4}a) in sub-case 3b; and Relations~(\ref{eq:R2}) and~(\ref{eq:R5}) in sub-case 3c. In case 4, we used Relation~(\ref{eq:R2}) and invoked case 3. These were the only manipulations done to elements of $\AAG$ in that proof, so we are done. \end{proof} The following lemma is similar to Proposition~6.2.5 of Culler-Vogtmann. \begin{lemma}\label{le:len2tuple} Let $V$ be a $k$--tuple of conjugacy classes whose elements are all the conjugacy classes in $A_\Gamma$ of length $2$, each appearing once. If $(A,a)\in\Omega_\ell$ and $|(A,a)\cdot V|\leq |V|$, then $(A,a)$ is trivial or is the conjugation $(L-a^{-1},a)$. \end{lemma} \begin{proof} We partition $L$ into the following seven sets: \begin{align*} L=&(A\cap A^{-1})+ (A-A^{-1}-a)+ (A^{-1}-A-a^{-1})\\ &\quad + (L-\lkl{a}-A\cup A^{-1})+ \lkl{a}+ \{a\} + \{a^{-1}\} \end{align*} If $bc$ is a cyclic word of length $2$ (not necessarily with $b\neq c$), then we can use Lemma~\ref{le:obvadj} to compute $D_{[bc]}((A,a))$ according to the sets in the partition of $L$ that $b$ and $c$ are members of. Note that since $bc$ is a cyclic word, we may switch $b$ with $c$ in our enumeration of cases. Also note that if both $b,c\in (L-\lkl{a}-A\cup A^{-1})+ \lkl{a}+\{a\}+\{a^{-1}\}$, then $D_{[bc]}((A,a))=0$. We list the remaining cases in Table~\ref{ta:DV}. \begin{table}[ht!] \begin{tabular}{|r|ccc|} \hline \backslashbox{$b$}{$c$} & $A\cap A^{-1}$ & $A-A^{-1}-a$ & $A^{-1}-A-a^{-1}$ \\ \hline $A\cap A^{-1}$ & 0 & & \\ $A-A^{-1} -a$ & 1 & 2 & \\ $A^{-1}-A-a^{-1}$ & 1 & 0 & 2 \\ $L-\lkl{a}-A\cup A^{-1} $ & 2 & 1 & 1\\ $\lkl{a}$ & 0 & 1 & 1\\ $\{a\}$ & 0 & 1 & -1 \\ $\{a^{-1}\}$ & 0 & -1 & 1 \\ \hline \end{tabular} \caption{The value of $D_{[bc]}((A,a))$ as $b$ and $c$ are in different subsets of $L$.} \label{ta:DV} \end{table} As usual, $n=|X|$. Let $m=|X-\lk(\pg{a})|$, let $x=\frac{1}{2}|A\cap A^{-1}|$, and let $y=|A-A^{-1}-a|=|A^{-1}-A-a^{-1}|$. Then $|L-\lkl{a}-A\cup A^{-1}|=2(m-x-y)$. We list the number of conjugacy classes appearing in $V$ of the form $[bc]$ as $b$ and $c$ are in the different subsets of $L$ in Table~\ref{ta:Vsubs}, leaving out the cases in which $D_{[bc]}((A,a))=0$. \begin{table}[ht!] \begin{tabular}{|r|ccc|} \hline \backslashbox{$b$}{$c$} & $A\cap A^{-1}$ & $A-A^{-1}-a$ & $A^{-1}-A-a^{-1}$ \\ \hline $A\cap A^{-1}$ & - & & \\ $A-A^{-1}-a $ & $2xy$ & $\frac{y(y+1)}{2}$ & \\ $A^{-1}-A-a^{-1}$ & $2xy$ & - & $\frac{y(y+1)}{2}$ \\ $L-\lkl{a}-A\cup A^{-1} $ & $4x(m-x-y)$ & $2y(m-x-y)$ & $2y(m-x-y)$\\ $\lkl{a}$ & - & $y(n-m)$ & $y(n-m)$\\ $\{a\}$ & - & $y$ & $y$ \\ $\{a^{-1}\}$ & - & $y$ & $y$ \\ \hline \end{tabular} \caption{The number of conjugacy classes in $V$ of the form $[bc]$, as $b$ and $c$ are in different subsets of $L$.} \label{ta:Vsubs} \end{table} We compute $D_V((A,a))$ from the two tables by taking products and summing: \[D_V((A,a))=4xy+8x(m-x-y)+2y(y+1)+4y(m-x-y)+4y(n-m)\] Note that the contribution to $D_V((A,a))$ from the entries in $V$ containing a copy of $a$ or $a^{-1}$ cancel each other out. Since the numbers $x$, $y$, $(m-x-y)$, and $(n-m)$ are all nonnegative (they count the cardinalities of sets), we know that $D_V((A,a))$ cannot be negative; further, for $D_V((A,a))$ to be zero, we must have each of the terms equal to zero. This implies that $y=0$, and that $x(m-x)=0$, which means that $(A,a)$ is either the trivial automorphism $(\{a\},a)$ or the conjugation $(L-\lkl{a}-a^{-1},a)$. \end{proof} \begin{lemma}\label{le:innisraag} The group of inner automorphisms $\Inn A_\Gamma$ is a right-angled Artin group. Specifically, if $Z$ is the intersection of the stars in $\Gamma$ of the elements of $X$, and $\Gamma'$ is the full subgraph of $\Gamma$ on the vertices $X-Z$, then the map sending $x\in X-Z$ to conjugation by $x$ in $A_\Gamma$ is an isomorphism $A_{\Gamma'}\isomarrow \Inn A_\Gamma$. \end{lemma} \begin{proof} By the Servatius centralizer theorem (Theorem~\ref{th:centralizer}), we know that the center $Z(A_\Gamma)$ is $\langle Z\rangle <A_\Gamma$. The obvious inclusion $A_{\Gamma'}\into A_\Gamma$ induces an isomorphism $A_{\Gamma'}\isomarrow A_\Gamma/Z(A_\Gamma)$; composing this map with the usual isomorphism $A_\Gamma/Z(A_\Gamma)\isomarrow\Inn A_\Gamma$ gives the isomorphism in the statement. \end{proof} The proof of the following proposition is based on McCool's argument from~\cite{mcpres}. \begin{proposition}\label{pr:lrpres} The group $\langle \Omega_\ell\rangle<\AAG$ has the presentation $\langle\Omega_\ell|R_\ell\rangle$. \end{proposition} \begin{proof} We already know that every relation in $R_\ell$ is an identity of $\langle\Omega_\ell\rangle$, so it is enough to show that every word representing the trivial element in $\langle\Omega_\ell\rangle$ is a product of conjugates of elements of $R_\ell$. Suppose $w\in\Phi_\ell$ represents the trivial element in $\langle\Omega_\ell\rangle <\AAG$. We claim that there is $w'\in\overline{\langle R_\ell\rangle}$ such that $ww'$ is a product of type~(1) Whitehead automorphisms and conjugations. Let $V_0$ be a $k$--tuple containing each conjugacy class of $A_\Gamma$ of length $2$ once. We will prove the claim by induction on the peaks of $w$ with respect to $V_0$; specifically, inducting on the number of points between peaks of maximal height and also on the maximum height of peaks. Write $w=\alpha_j\cdots\alpha_1$ for $\alpha_j,\ldots,\alpha_1\in\Omega_\ell$. In our base case, we assume that $\alpha_j\cdots\alpha_1$ is a factorization of $w$ that is peak reduced with respect to $V_0$. By Lemma~\ref{le:len2tuple}, we know that $V_0$ is a minimal-length representative of its $\AAG$ orbit. So since our factorization of $w$ is peak reduced, for each $i$, we have $|(\alpha_i\cdots\alpha_1)\cdot V_0|=|V_0|$. We claim that for each $i$, $(\alpha_i\cdots\alpha_i)\cdot V_0$ is a $k$--tuple containing each conjugacy class of length 2 once. This is true if $i=0$ by assumption. Now assume it for $i-1$; since $|(\alpha_i\cdots\alpha_1)\cdot V_0|=|(\alpha_{i-1}\cdots\alpha_1)\cdot V_0|$, we know by Lemma~\ref{le:len2tuple} that $\alpha_i$ is then either trivial, a conjugation, or a type~(1) Whitehead automorphism and the statement is then true for $i$. So in our base case, $w$ is already a product of type~(1) Whitehead automorphisms and conjugations. For the inductive step, suppose that $\alpha_k\cdots\alpha_1$ has peaks with respect to $V_0$. Let $\alpha_i$ be a peak of maximal height. Then by Lemma~\ref{le:presmach}, there are $\delta_1,\ldots,\delta_m\in\Omega_\ell$ such that $(\alpha_{i+1}\alpha_i)^{-1}\delta_m\cdots\delta_1\in \overline{\langle R_\ell\rangle}$ and such that we can lower the peak at $\alpha_i$ in $\alpha_k\cdots\alpha_1$ by substituting in $\delta_m\cdots\delta_1$ for $\alpha_{i+1}\alpha_i$. So we define: \[w_1=(\alpha_{i-1}\cdots\alpha_1)^{-1}(\alpha_{i+1}\alpha_i)^{-1}\delta_m\cdots\delta_1(\alpha_{i-1}\cdots\alpha_1)\in \overline{\langle R_\ell \rangle}\] Then $ww_1=\alpha_k\cdots\alpha_{i+2}\delta_m\cdots\delta_1\alpha_{i-1}\cdots\alpha_1$ has a smaller number of points between maximal-height peaks that $w$ with respect to $V_0$, or its maximal-height peak is shorter. So we have reduced the peaks of $ww_1$, and we invoke the inductive hypothesis for $ww_1$: we have a $w_2\in\overline{\langle R_\ell\rangle}$ such that $ww_1w_2$ is a product of type~(1) Whitehead automorphisms and conjugations. So $w_1w_2\in\overline{\langle R_\ell \rangle}$ satisfies the conclusions of our inductive claim. So we have that $w$ is congruent modulo $R_\ell$ to a product of type (1) Whitehead automorphisms and conjugations. Then by applying instances of Relation~(\ref{eq:R6}), we know that $w$ is congruent to a product $\beta\gamma$ where $\beta$ is a product of type~(1) Whitehead automorphisms and $\gamma$ is a product of conjugation automorphisms in $\Omega_\ell$. The subgroup of $\AAG$ generated by type~(1) Whitehead automorphisms acts faithfully on $\Aut H_\Gamma$, so since $\alpha$ maps to the trivial element of $\AAG$ and $\gamma$ is in the kernel of the homology representation, we deduce that $\beta$ represents the trivial automorphism. So by some instances of Relation~(R7), we know that $w$ is congruent modulo $R_\ell$ to $\gamma$, which represents the trivial automorphism in $\Inn A_\Gamma$. Let $Z$ and $\Gamma'$ be as in Lemma~\ref{le:innisraag}. Map the free group on $X-Z$ to $\Phi_\ell$ by sending $a\in X-Z$ to $(L-\lkl{a}-a^{-1},a)$. This sends the relations from the right-angled Artin group presentation of $A_{\Gamma'}$ to instances of Relation~(\ref{eq:R3}b). Of course, this map descends to the isomorphism $A_{\Gamma'}\isomarrow \Inn A_\Gamma$ in Lemma~\ref{le:innisraag}. Then since $\gamma$ represents the trivial element of $\Inn A_\Gamma$, it corresponds to an element $w_\gamma$ of the free group on $X-Z$ that maps to the trivial element of $A_{\Gamma'}$. This $w_\gamma$ is a product of conjugates of the relations from the presentation of $A_\Gamma$, so $\gamma$ is a product of conjugates of instances of Relation~(\ref{eq:R3}b). So $\gamma$ is in $\overline{\langle R_\ell\rangle}$, and therefore $w$ is in $\overline{\langle R_\ell \rangle}$. \end{proof} If $a,b\in X$ with $a\in\lkl{b}$ and $a\sim b$, then the type~(1) Whitehead automorphism $\sigma_{a,b}$ of Relation~(\ref{eq:R5}) exists. According to that relation, we have $\sigma_{a,b}\in \langle\Omega_s\rangle$. Let $P_s\subset \Omega$ be the finite subgroup of $\AAG$ generated by such $\sigma_{a,b}$ as $a$ and $b$ range over all adjacent domination-equivalent pairs in $X$. Let $\Phi_s$ be the free subgroup of $\Phi$ generated by $\Omega_s\cup P_s$. Let $R_s=R\cap \Phi_s$. \begin{proposition}\label{pr:srwhpres} The group $\langle \Omega_s\rangle$ has the presentation $\langle\Omega_s\cup P_s|R_s\rangle$. \end{proposition} \begin{proof} Let $G=\langle\Omega_s\rangle<\AAG$ and let $\tilde G=\langle \Omega_s\cup P_s|R_s\rangle$. We know that each of the relations in $R_s$ is an identity in $\tilde G$, so we have homomorphism $G\to \tilde G$ by sending each element of $\Omega_s$ to its own coset. We will show this map is an isomorphism by constructing an inverse. By Corollary~\ref{co:tvgenstruct}, the group $G$ has a presentation where the generators are $\{E_{a,b}|\text{$a,b\in X$, $a\in\lkl{b}$, and $a\geq b$}\}$ and the relations are all the relations of the forms~(1)--(4) from Proposition~\ref{pr:linpres}. This presentation identifies each $E_{a,b}$ with the corresponding $\tau_{a,b}$. By Relations~(\ref{eq:R1}), (\ref{eq:R2}), (\ref{eq:R5}) and~(R7), we know $\tilde G$ is generated by the transvections $(\{a,b\},a)$ with $a\in\lkl{b}$ and $a\geq b$. We map $\tilde G$ to $G$ by sending each $(\{a,b\},a)$ to the corresponding $\tau_{a,b}$. We will show that this is a homomorphism by checking the relations of our presentation for $G$ already hold in $\tilde G$. Relation~(\ref{it:r1}) follows from Relations~(\ref{eq:R2}) and~(\ref{eq:R3}b). Relation~(\ref{it:r2}) follows from Relation~(\ref{eq:R4}b). For any $a,b\in X$ with $a\in\lkl{b}$ and $a\sim b$, we know from Relation~(\ref{eq:R5}) that $\tau_{a,b}\tau_{b,a}^{-1}\tau_{a,b}$ is $\sigma_{a,b}$, which has order 4 by Relation~(R7) (here we are using that $\tau_{a^{-1},b^{-1}}=\tau_{a,b}$, which holds because $a\in\lkl{b}$, and that $\tau_{a^{-1},b}=\tau_{a,b}^{-1}$). This means that Relation~(\ref{it:r3}) already holds in $\tilde G$. By Relation~(\ref{eq:R5}), $(\tau_{a,b}\tau_{b,a}^{-1}\tau_{a,b}\tau_{b,a})^3$ is $(\sigma_{a,b}\tau_{b,a})^3$, which is $\tau_{a,b^{-1}}\tau_{b^{-1},a^{-1}}\tau_{a^{-1},b}\sigma_{a,b}^3$ by Relation~(\ref{eq:R6}), which is $\sigma_{a,b}^{-1}\sigma_{a,b}^3$ by Relation~(\ref{eq:R5}) (and using the facts that $\tau_{a,b^{-1}}=\tau_{a,b}^{-1}$, $\tau_{b^{-1},a^{-1}}=\tau_{a^{-1},b}^{-1}$ and $\tau_{a^{-1},b}=\tau_{a^{-1},b^{-1}}^{-1}$). Then Relation~(\ref{it:r4}) already holds in $\tilde G$. So we map $\tilde G$ to $G$ homomorphically by sending $(\{a,b\},a)\in\Omega_s$ to $E_{a,b}$. It is apparent (from looking at the action on generating sets) that this homomorphism is the inverse to the homomorphism $G$ to $\tilde G$ that sends each element of $\Omega_s\cup P_s$ to its own coset. So $\langle \Omega_s\rangle<\AAG$ has the presentation $\langle \Omega_s\cup P_s|R_s\rangle$. \end{proof} \begin{proposition}\label{pr:pressort} Every $w\in\Phi$ is congruent modulo $R$ to a product $uv$ for some $u\in\Phi_\ell$ and $v\in\Phi_s$. \end{proposition} \begin{proof} This proposition is a refinement of \partref{it:complements}. The only manipulations of elements of $\AAG$ done in that proof are through the sorting substitutions in Definition~\ref{de:sort}. Each of the sorting substitutions comes from applications of relations from $R$, as in Lemma~\ref{le:validsubs}. So the entire argument goes through for $\langle \Omega|R\rangle$. \end{proof} \begin{proof}[Proof of Theorem~\ref{mt:pres}] We have already shown that all the relations in $R$ are identities of $\AAG$ (Proposition~\ref{pr:identities}), so it is enough to show that any element of $\Phi$ representing the trivial element of $\AAG$ is in $\overline{\langle R\rangle}$. Let $w\in\Phi$ represent the trivial element of $\AAG$. By Proposition~\ref{pr:pressort}, $w$ is congruent modulo $R$ to a product $uv$ for $u\in\Phi_\ell$ and $v\in \Phi_x$. Let $[u]\in\langle\Omega_\ell\rangle$ and $[v]\in\langle\Omega_s\rangle$ denote the elements of $\AAG$ they represent. Let $W_0$ be the elements of $X$ as an $n$--tuple of conjugacy classes. Suppose that $[v]$ is not a type~(1) Whitehead automorphism; then $[v]$ sends $W_0$ to a strictly longer $n$--tuple. By \partref{it:lrwhalg}, we peak reduce $[u]$ with respect to $[v]\cdot W_0$. Since $[u][v]$ is trivial, $[u]$ sends $[v]\cdot W_0$ to $W_0$; since we have peak reduced $[u]$, the first automorphism $\alpha\in\Omega_\ell$ in our peak-reduced factorization of $[u]$ shortens $[v]\cdot W_0$. However, this contradicts Lemma~\ref{le:shortensrgen}. So $[v]$ is a type~(1) Whitehead automorphism, which we write as $\sigma$. Then $w$ is congruent to $u\sigma^{-1}\sigma v$ modulo $R$. From Proposition~\ref{pr:srwhpres}, we know that $u\sigma^{-1}$ is a product of conjugates of members of $R_s\subset R$, and from Proposition~\ref{pr:lrpres}, we know that $\sigma v$ is a product of conjugates of members of $R_\ell\subset R$. So $w\in\overline{\langle R\rangle}$. \end{proof} \section{Closing Remarks} The applications of peak reduction on $F_n$ mentioned in the introduction all suggest further applications of Theorem~\ref{mt:threeparts}. Firstly, peak reduction can be used to get finite generation and finite presentation results for stabilizers of $k$--tuples of conjugacy classes in $\Aut F_n$, as in McCool~\cite{mcpres}. Along these lines, the author has used Theorem~\ref{mt:threeparts} in~\cite{ssraag} to show that an analog of the mapping class group of a surface inside $\AAG$ is finitely generated. Generally, one could obtain further results similar to Corollary~\ref{co:fixvertsgenset} by proving propositions similar to Proposition~\ref{mp:specialwhalg}, \emph{i.e.} finding additional special cases where peak reduction works on all of $\AAG$. Peak reduction on the free group $F_n$ makes an algorithm possible that determines whether two $k$--tuples of conjugacy classes in $F_n$ are in the same $\Aut F_n$ orbit (and makes it possible to find an automorphism taking one to the other, if it exists). Please see Lyndon--Schupp~\cite{ls}, Chapter~1, Proposition~4.19 for a description of this algorithm. As for free abelian groups, row-reduction lets us transform $k$--tuples of elements of $\Z^n$ standard representatives of their $GL(n,\Z)$--orbits (and more carefully, to find an automorphism taking one to another if it exists). So it seems natural to conjecture the existence of a similar algorithm for $\AAG$: \begin{conjecture}\label{con:autalg} There is an algorithm which, given $u,v\in A_\Gamma$, produces $\alpha\in\AAG$ with $\alpha(u)=v$, or determines in finite time that no such automorphism exists. \end{conjecture} Part~(\ref{it:lrwhalg}) of Theorem~\ref{mt:threeparts} easily implies such an algorithm if we are only considering $\alpha\in\langle\Omega_\ell\rangle$, and \partref{it:srinj} suggests a row-reduction approach if we are only considering $\alpha\in\langle\Omega_\ell\rangle$. However, it is not clear how these methods could be extended to apply to all of $\AAG$. Proposition~\ref{mp:nowhalg} indicates that it will not be possible to produce the algorithm in Conjecture~\ref{con:autalg} by a direct generalization of the approach for free groups. Finally, it may be possible to use these algorithmic techniques to improve our understanding of spaces that $\AAG$ acts on. As in Culler-Vogtmann~\cite{cullervogtmann}, it should be possible to use peak-reduction techniques to find paths in $\AAG$--spaces that behave nicely with respect to combinatorial Morse functions. In particular, this should help us to better understand outer space of right-angled Artin groups, as defined in Charney--Crisp--Vogtmann~\cite{ccv} for triangle-free $\Gamma$. For general $\Gamma$, certain spaces of isometric actions of $A_\Gamma$ on CAT(0) cubical complexes are $\AAG$--spaces. Hopefully our techniques could lead to a better understanding of these spaces as well. \noindent Dept. of Mathematics, California Institute of Technology\\ Pasadena, Ca 91125\\ E-mail: {\tt [email protected]} \end{document}
\begin{document} \title{The Burgess inequality and the least $k$-th power non-residue} \author{Enrique Trevi\~no } \address{Department of Mathematics and Computer Science, Lake Forest College, Lake Forest, Illinois 60045} \email{[email protected]} \thanks{Many results in this paper are in Chapter 6 of the author's Ph. D. Dissertation \cite{ET}.} \subjclass[2010]{Primary 11L40, 11Y60} \keywords{Character Sums, Burgess inequality, power residues and non-residues.} \begin{abstract} The Burgess inequality is the best upper bound we have for the character sum $S_{\chi}(M,N) = \sum_{M<n\le M+N} \chi(n).$ Until recently, no explicit estimates had been given for the inequality. In 2006, Booker gave an explicit estimate for quadratic characters which he used to calculate the class number of a 32-digit discriminant. McGown used an explicit estimate to show that there are no norm-Euclidean Galois cubic fields with discriminant greater than $10^{140}$. Both of their explicit estimates are on restricted ranges. In this paper we prove an explicit estimate that works for any $M$ and $N$. We also improve McGown's estimates in a slightly narrower range, getting explicit estimates for characters of any order. We apply the estimates to the question of how large must a prime $p$ be to ensure that there is a $k$-th power non-residue less than $p^{1/6}$. \end{abstract} \maketitle \section{introduction} Let $\chi$ be a character $\bmod{\,q}$ for some integer $q > 1$. Throughout the paper we will use the term character for Dirichlet character. Let $S_{\chi}(M,N)$ be defined as follows \begin{equation*} S_{\chi}(M,N) = \sum_{M<n\le M+N} \chi(n). \end{equation*} Historically, studying this sum has proven fruitful in analytic number theory to bound the least $k$-th power non-residue, to bound class numbers, to bound the least inert prime in a number field, and to bound the least primitive root, among other applications. The first non-trivial bound for this sum was proven independently by P\'olya and Vinogradov in 1918; namely, they showed that there exists an absolute constant $c>0$ such that $|S_{\chi}(M,N)|\le c\sqrt{q}\log{q}$. The P\'olya--Vinogradov inequality is very useful when $N$ is big compared to $\sqrt{q}$, but not very useful otherwise (since trivially $|S_{\chi}(M,N)| \le N$). What we want is an inequality that gives a nontrivial result even when $N$ is small compared to $\sqrt{q}$. The best theorem for short character sums is known as the Burgess inequality (\cite{Bur1957},\cite{Bur1962}, \cite{Bur1963}, \cite{Bur1986}) and allows us to take $N$ as small as $q^{\frac{1}{4}-o(1)}$.\footnote{To get $-o(1)$ instead of $+o(1)$, one needs a clever use of the large sieve as done by Hildebrand in \cite{Hil1986}.} We state the theorem below: \begin{theorema}[Burgess] Let $\chi$ be a primitive character $\bmod{\,q}$ with $q > 1$, and let $M$ and $N$ be non-negative reals with $N\ge 1$. Then $$|S_{\chi}(M,N)| \ll N^{1-\frac{1}{r}}q^{\frac{r+1}{4r^2} + \varepsilon}$$ for $r = 2,3$ and for any $r \geq 1$ if $q$ is cubefree, the implied constant depending only on $\varepsilon$ and $r$. \end{theorema} To prove the Burgess inequality, one of the keys is the following inequality which relies on a deep theorem of Weil \cite{Weil}: \begin{theore}\label{main inequality Burgess} For $p$ a prime number, $r$ a positive integer and $B$ a positive real number satisfying $r \le 9B$, let $\chi$ be a non-principal character to the modulus $p$. Then $$\sum_{x \bmod{\,p}}\left|\sum_{1\le b\le B} \chi(x + b)\right|^{2r} \le (2r-1)!! B^{r} p + (2r-1)B^{2r}\sqrt{p}.,$$ where $(2r-1)!! = (2r-1)(2r-3)\ldots(1).$ \end{theore} The above theorem was proven with weaker constants by Erd{\"o}s and Davenport in \cite{DE1952}, and Burgess improved it to better constants and used it to get the Burgess inequality. In \cite{Boo2006}, Booker proved it with these constants for quadratic characters. In \cite{ETk}, the author extended it to all characters. The reliance on the Weil estimate makes it difficult to improve the Burgess inequality asymptotically. Recently, some problems have required getting explicit estimates on the Burgess inequality. Booker in \cite{Boo2006} needed an explicit form of the inequality to compute a 32-digit discriminant. McGown in \cite{McG3} used an explicit form of the inequality to show that there are no norm-Euclidean Galois cubic fields of discriminant greater than $10^{140}$. The goal of this paper is to improve their explicit estimates in the ranges they work in and give an explicit estimate that works regardless of the range of $N$. We apply these estimates to a question about $k$-th power non-residues $\bmod{\,p}$. The work of Booker and McGown relies on the exposition of the Burgess inequality in \cite{IK2004}. In that book, Iwaniec and Kowalski sketch the proof of the following: \begin{theoremab}\label{theorem Iwaniec} Let $p$ be a large enough prime. Let $\chi$ be a non-principal character $\bmod{\,p}$. Let $r$ be a positive integer, and let $M$ and $N$ be non-negative integers with $N\ge 1$. Then $$|S_{\chi}(M,N)| \le 30 N^{1-\frac{1}{r}} p^{\frac{r+1}{4r^2}}(\log{p})^{\frac{1}{r}}.$$ \end{theoremab} In Section \ref{burgess section 1} we improve Theorem B to \begin{theorem} \label{burgess kiks 1} Let $p$ be a prime. Let $\chi$ be a non-principal character $\bmod{\,p}$. Let $M$ and $N$ be non-negative integers with $N\ge 1$, let $2\le r\le 10$ be a positive integer, and let $p_0$ be a positive real number. Then for $p \ge p_0$, there exists $c_1(r)$, a constant depending on $r$ and $p_0$ such that $$|S_{\chi}(M,N)| \le c_1(r) N^{1-\frac{1}{r}} p^{\frac{r+1}{4r^2}}(\log{p})^{\frac{1}{r}},$$ where $c_1(r)$ is given by Table \ref{super table burgess}. \begin{table}[h] \begin{center} \begin{tabular}{|c| c | c| c|} \hline $r$ & $p_0 = 10^7$ & $p_0= 10^{10}$ & $p_0= 10^{20}$ \\ \hline 2 & 2.7381 & 2.5173 & 2.3549 \\ \hline 3 & 2.0197 & 1.7385 & 1.3695 \\ \hline 4 & 1.7308 & 1.5151 & 1.3104 \\ \hline 5 & 1.6107 & 1.4572 & 1.2987 \\ \hline 6 & 1.5482 & 1.4274 & 1.2901 \\ \hline 7 & 1.5052 & 1.4042 & 1.2813 \\ \hline 8 & 1.4703 & 1.3846 & 1.2729 \\ \hline 9 & 1.4411 & 1.3662 & 1.2641 \\ \hline 10 & 1.4160 & 1.3495 & 1.2562 \\ \hline \end{tabular} \end{center} \caption{Values for the constant $c_1(r)$ in the Burgess inequality.}\label{super table burgess} \end{table} \end{theorem} In the spirit of Theorem B, where we have no restriction on $r$, we also prove the following corollary: \begin{corollary}\label{burgess corollary 1} Let $p$ be a prime such that $p \ge 10^7$. Let $\chi$ be a non-principal character $\bmod{\,p}$. Let $r$ be a positive integer, and let $M$ and $N$ be non-negative integers with $N\ge 1$. Then $$|S_{\chi}(M,N)| \le 2.74 N^{1-\frac{1}{r}} p^{\frac{r+1}{4r^2}}(\log{p})^{\frac{1}{r}}.$$ \end{corollary} Restricting $N$ to be less than $4 p^{\frac{1}{2} + \frac{1}{4r}}$, McGown in \cite{McG3} proved an explicit version of Burgess with worse constants but with a better exponent in $\log{p}$. Indeed, he proved: \begin{theoremac}\label{McGown's theorem burgess} Let $p\ge 2\cdot 10^{4}$ be a prime number. Let $M$ and $N$ be non-negative integers with $1\le N\le 4 p^{\frac{1}{2} + \frac{1}{4r}}$. Suppose $\chi$ is a non-principal character $\bmod{\,p}$. Then there exists a computable constant $C(r)$ such that \begin{equation*} |S_{\chi}(M,N)| < C(r) N^{1-\frac{1}{r}} p^{\frac{r+1}{4r^2}}(\log{p})^{\frac{1}{2r}}, \end{equation*} where $C(r)$ is given by Table \ref{table mcgown burgess}. \begin{table}[h] \begin{center} \begin{tabular}{|c| c | c| c|} \hline $r$ & $C(r)$ & $r$ & $C(r)$ \\ \hline 2 & 10.0366 & 9 & 2.1467 \\ \hline 3 & 4.9539 & 10 & 2.0492 \\ \hline 4 & 3.6493 & 11 & 1.9712 \\ \hline 5 & 3.0356 & 12 & 1.9073 \\ \hline 6 & 2.6765 & 13 & 1.8540 \\ \hline 7 & 2.4400 & 14 & 1.8088 \\ \hline 8 & 2.2721 & 15 & 1.7700\\ \hline \end{tabular} \end{center} \caption{Values for the constant $C(r)$ in the Burgess inequality.}\label{table mcgown burgess} \end{table} \end{theoremac} The restriction that $N \le 4p^{\frac{1}{2} + \frac{1}{4r}}$ is used to get the exponent $\frac{1}{2r}$ in the $\log{p}$ term of the inequality. In Section \ref{burgess section 2}, we improve McGown's Theorem to have better constants in a similar range. \begin{theorem}\label{burgess kiks 2} Let $p$ be a prime. Let $\chi$ be a non-principal character $\bmod{\,p}$. Let $M$ and $N$ be non-negative integers with $1\le N\le 2 p^{\frac{1}{2} + \frac{1}{4r}}$, let $r\le 10$ be a positive integer, and let $p_0$ be a positive real number. Then for $p \ge p_0$, there exists $c_2(r)$, a constant depending on $r$ and $p_0$ such that \begin{equation*} |S_{\chi}(M,N)| < c_2(r) N^{1-\frac{1}{r}} p^{\frac{r+1}{4r^2}}(\log{p})^{\frac{1}{2r}}, \end{equation*} where $c_2(r)$ is given by Table \ref{table kiks burgess}. \begin{table} \begin{center} \begin{tabular}{|c| c | c| c|} \hline $r$ & $p_0 = 10^{10}$ & $p_0 = 10^{15}$ & $p_0 = 10^{20}$ \\ \hline 2 & 3.6529 & 3.5851 & 3.5751 \\ \hline 3 & 2.5888 & 2.5144 & 2.4945 \\ \hline 4 & 2.1914 & 2.1258 & 2.1078 \\ \hline 5 & 1.9841 & 1.9231 & 1.9043 \\ \hline 6 & 1.8508 & 1.7959 & 1.7757 \\ \hline 7 & 1.7586 & 1.7066 & 1.6854 \\ \hline 8 & 1.6869 & 1.6384 & 1.6187 \\ \hline 9 & 1.6283 & 1.5857 & 1.5654 \\ \hline 10 & 1.5794 & 1.5410 & 1.5216 \\ \hline \end{tabular} \end{center}\caption{Values for the constant $c_2(r)$ in the Burgess inequality.}\label{table kiks burgess} \end{table} \end{theorem} Using an idea from \cite{MV2007}, we can get rid of the restriction on $N$ for $r\ge 3$. \begin{corollary}\label{burgess kiks corollary 2} Let $p\ge 10^{10}$ be a prime number. Let $M$ and $N$ be non-negative integers with $N\ge 1$. Suppose $\chi$ is a non-principal character $\bmod{\,p}$ and that $p \ge p_0$ for some positive real $p_0$. Then for $r\ge 3$, there exists a computable constant $c_2(r)$ depending on $r$ and $p_0$, such that \begin{equation*} |S_{\chi}(M,N)| < c_2(r) N^{1-\frac{1}{r}} p^{\frac{r+1}{4r^2}}(\log{p})^{\frac{1}{2r}}, \end{equation*} where $c_2(r)$ is the same as that of Table \ref{table kiks burgess} whenever $r\ge 3$. \end{corollary} Putting an extra restriction on $N$ (namely, $N \le 2\sqrt{p})$, Booker in \cite{Boo2006} gave better bounds in the special case of quadratic characters. \begin{comment} \begin{theorema} \label{Booker's thm} Let $p > 10^{20}$ be a prime number with $p \equiv 1 \pmod{4}$. Let $r\in \{2,3,4,\ldots,15\}$. Let $M$ and $N$ be real numbers such that $0 < M , N \le 2\sqrt{p}$. Let $\chi$ be a non-principal quadratic character $\bmod{\,p}$. Then $$|S_{\chi}(M,N)| \le \alpha(r) N^{1-\frac{1}{r}} p^{\frac{r+1}{4r^2}}\left(\log{p} + \beta(r)\right)^{\frac{1}{2r}},$$ where $\alpha(r)$ and $\beta(r)$ are given by Table \ref{table booker}. \begin{table} \begin{center} \begin{tabular}{c c c | c c c} $r$ & $\alpha(r)$ & $\beta(r)$ & $r$ & $\alpha(r)$ & $\beta(r)$ \\ \hline 2 & 1.8221 & 8.9077 & 9 & 1.4548 & 0.0085 \\ 3 & 1.8000 & 5.3948 & 10 & 1.4231 & -0.4106\\ 4 & 1.7263 & 3.6658 & 11 & 1.3958 & -0.7848\\ 5 & 1.6526 & 2.5405 & 12 & 1.3721 & -1.1232\\ 6 & 1.5892 & 1.7059 & 13 & 1.3512 & -1.4323\\ 7 & 1.5363 & 1.0405 & 14 & 1.3328 & -1.7169\\ 8 & 1.4921 & 0.4856 & 15 & 1.3164 & -1.9808 \end{tabular} \end{center} \caption{Explicit constants on the Burgess inequality for quadratic characters.}\label{table booker} \end{table} \end{theorema} \end{comment} \begin{remark} Using Theorem A one could extend Booker's theorem to all orders of $\chi$ (with slightly worse constants). The reason we would get slightly worse constants is that in the quadratic case, the inequality in Theorem A can be improved slightly to $(2r-1)!! B^{r} p + (2r-2)B^{2r}\sqrt{p}$. Every other part of Booker's proof extends naturally, but this part of the inequality fails when looking at higher orders. \end{remark} In Section \ref{section p^1/6}, we apply these estimates to a question about $k$-th power non-residues $\bmod{\,p}$. Indeed, let $p$ be a prime and let $k$ be an integer with $k \mid p-1$ and $k > 1$. Let $g(p,k)$ be the least $k$-th power non-residue $\bmod{\,p}$. The case $k = 2$, i.e., the question of how big the least quadratic non-residue is, has been studied extensively. A probabilistic heuristic using that a prime $q$ is a quadratic non-residue $\bmod{\,p}$ half of the time, suggests that $g(p,2) \ll \log{p}\log{\log{p}}$ and that $g(p,2) \gg \log{p}\log{\log{p}}$ for infinitely many $p$. Assuming the Generalized Riemann Hypothesis for Dirichlet L-functions (GRH), Ankeny \cite{Ank1952} showed that $g(p,k) \ll (\log{p})^2$ and Bach \cite{Bach1990} made this explicit by proving (under GRH) that $g(p,k) \le 2(\log{p})^2.$ The best unconditional results (for $g(p,k)$) are due to Burgess \cite{Bur1957}, who, building on work by Vinogradov \cite{Vinogradov}, showed that $$g(p,k) \ll_{\varepsilon} p^{\frac{1}{4\sqrt{e}}+\varepsilon}.$$ For $k \ge 3$, better estimates which depend upon $k$ have been proven by Wang Yuan(\cite{Yuan}) building on work of Vinogradov (\cite{Vinogradov}) and Buh{\v{s}}tab (\cite{Bu1949}). All of the work described so far has been of asymptotic nature. In terms of getting explicit bounds, Karl Norton \cite{Nor1971}, building on a technique of Burgess \cite{Bur19632}, was able to show that $g(p,k) \leq 3.9 p^{1/4}\log p$ unless $k =2$ and $p\equiv 3 \pmod 4$ for which he showed $g(p,k) \leq 4.7 p^{1/4}\log p$. In \cite{ETk}, the author improved Norton's bounds to $0.9 p^{1/4}\log{p}$ and $1.1 p^{1/4}\log{p}$, respectively. These bounds are far from the asymptotic bound of $p^{\frac{1}{4\sqrt{e}}+\varepsilon}$. In this paper, as an application of the explicit Burgess inequality, we find an upper bound on how large $p$ has to be to ensure that there is a $k$-th power non-residue less than $p^{1/6}$. \begin{theorem}\label{theorem p^1.6} Let $p$ be a prime number and $k >1$ be a positive divisor of $p-1$. Then for $p \ge 10^{4732}$, the least $k$-th power non-residue $\bmod{\,p}$ is less than or equal to $p^{1/6}$. \end{theorem} \begin{remark} The techniques involved in the proof can be used to answer this question for $p^{\alpha}$ whenever $\alpha > \frac{1}{4\sqrt{e}}$. \end{remark} \section{Preliminary lemmas} Let $A$ and $N$ be positive integers. Let $v(x)$ be the number of representations of $x$ as $\bar{a}n \pmod{p}$, where $\bar{a}$ is the inverse of $a \pmod{p}$, $1\le a\le A$ and $M < n \le M+N$, that is, \begin{equation}\label{burgess v(x)} v(x) = \# \left\{(a,n)\in\mathbb{N}^2\mid 1\le a\le A, \mbox{ } M < n\le M+N \mbox{ and } n\equiv ax \bmod{\,p}\right\}. \end{equation} The main lemma in this section is the following: \begin{lemma}\label{burgess V2 lemma} Let $p$ be a prime and let $N < p$ be a positive integer. Let $A\ge 28$ be an integer satisfying $A< \frac{N}{12}$, then \begin{equation}\label{burgess V2 claim} V_2 = \sum_{x\kern-3pt\mod{p}}v^2(x) \leq 2AN\left(\frac{AN}{p} + \log(1.85 A)\right). \end{equation} \end{lemma} To prove the lemma regarding $V_2$ we will need a couple of estimates involving the $\phi$ function (Lemmas \ref{burgess claim 2} and \ref{burgess claim 1}), an estimate on a sum of logarithms (Lemma \ref{burgess claim 3}) and a non-trivial combinatorial count (Lemma \ref{burgess counting lemma}). \begin{lemma}\label{burgess claim 2} For $x\ge 1$ a real number we have: \begin{equation}\label{burgess claim 2 eq} \sum_{n\leq x}\frac{\phi(n)}{n} \leq \frac{6}{\pi^2} x + \log{x} + 1. \end{equation} \end{lemma} \begin{proof} For $1\le x < 2$, the left hand side of \eqref{burgess claim 2 eq} is $1$, while the right hand side is at least $1$. We can manually check that for all integers $x$ satisfying $2\le x \le 41$ we have $$\sum_{n\le x} \frac{\phi(n)}{n} \le \frac{6}{\pi^2}(x-1) + \log{(x-1)} + 1,$$ implying that \eqref{burgess claim 2 eq} is true for $x< 41$. Therefore, we may assume that $x \ge 41$. Let's work with the sum: \begin{equation}\label{kakaro} \sum_{n\leq x}\frac{\phi(n)}{n} = \sum_{n \leq x}\frac{1}{n}\sum_{d | n}\mu(d)\frac{n}{d} = \sum_{d\leq x}\sum_{n\leq \frac{x}{d}}\frac{\mu(d)}{d} = \sum_{d\leq x}\left\lfloor\frac{x}{d}\right\rfloor\frac{\mu(d)}{d}. \end{equation} From \cite[Theorem $422$]{Hardy} it follows that for $x \ge 1$ \begin{equation}\label{Hardylin} \sum_{d\le x}\frac{1}{d} < \log{x} + \gamma + \frac{1}{x}. \end{equation} Using \eqref{Hardylin} in \eqref{kakaro} yields \begin{equation}\label{almost there 1} \sum_{n\le x}\frac{\phi(n)}{n} \le x\sum_{d\ge 1} \frac{\mu(d)}{d^2} -x\sum_{d>x}\frac{\mu(d)}{d^2}+ \sum_{d\leq x} \frac{1}{d} \leq\frac{6}{\pi^2}x + \log{x} + \gamma+\frac{1}{x} - x\sum_{d>x}\frac{\mu(d)}{d^2}. \end{equation} Moser and Macleod \cite{MM1966} gave a simple proof that for $x\ge 2$ we have \begin{equation}\label{moser} \left|\sum_{d>x}\frac{\mu(d)}{d^2}\right|\le \frac{1}{3x} + \frac{8}{3x^2}. \end{equation} Combining \eqref{moser} with \eqref{almost there 1} yields for $x\ge 41$ that $$\sum_{n\le x}\frac{\phi(n)}{n} \le \frac{6}{\pi^2}x + \log{x} + \gamma + \frac{1}{x}+\frac{1}{3} + \frac{8}{3x} \le \frac{6}{\pi^2}x + \log{x} + 1.$$ \end{proof} \begin{lemma}\label{burgess claim 1} For $x\ge 1$ a real number we have: \begin{equation}\label{burgess claim 1 eq} \sum_{n \leq x} n\phi(n) \leq \frac{2}{\pi^2}x^3 + \frac{1}{2}x^2\log{x} + x^2. \end{equation} \end{lemma} \begin{proof} For $1\le x<2$, the left hand side of \eqref{burgess claim 1 eq} is $1$, while the right hand side is at least $x^2 \ge 1$. Therefore it is true for $1\le x<2$. Now for $2\le x<3$, the left hand side is $3$, while the right hand side is at least $x^2 \ge 4$. Therefore \eqref{burgess claim 1 eq} is true for $1\le x < 3$. In the rest of the proof we will assume that $x\ge 3$. Let's work with the sum: \begin{multline*} \sum_{n\leq x} \phi(n)n = \sum_{n \leq x}\sum_{d | n} \frac{\mu(d)n^2}{d} = \sum_{d \leq x} \mu(d)d \sum_{dm \leq x}m^2\\ = \sum_{d\leq x}\frac{\mu(d)d}{6}\left\lfloor\frac{x}{d}\right\rfloor\left(\left\lfloor\frac{x}{d}\right\rfloor + 1\right)\left(2\left\lfloor\frac{x}{d}\right\rfloor + 1\right). \end{multline*} Now, let $\theta_d =\frac{x}{d}-\left\lfloor\frac{x}{d}\right\rfloor.$ Then we have \begin{multline}\label{choco2} \sum_{n\le x} \phi(n) n = \frac{x^3}{3}\sum_{d\leq x} \frac{\mu(d)}{d^2} + \frac{x^2}{6}\sum_{d\le x}\frac{(3-6\theta_d )\mu(d)}{d} \\+ \frac{x}{6}\sum_{d\le x}\left(6\theta_d^2 - 6\theta_d + 1\right)\mu(d) - \frac{1}{6}\sum_{d\le x} \theta_d(1-\theta_d )(1-2\theta_d )\mu(d)d . \end{multline} From \eqref{Hardylin} it follows that for $x\ge 3$ \begin{equation}\label{hardy no mames} \sum_{d\le x} \frac{1}{d} < \log{x} + \gamma + \frac{1}{x} < \log{x} + 1-\frac{1}{60}-\frac{1}{60x}. \end{equation} Using that $0\le\theta_d \leq 1$ we have that $|3-6\theta_d|\le 3$, that $|6\theta_d^2 - 6\theta_d+1| \le 1$ and $|(1-\theta_d)(1-2\theta_d)(-\theta_d)| \le \frac{1}{10}$. Therefore, using \eqref{choco2}, \eqref{hardy no mames}, that $\displaystyle\sum_{d\ge 1}\frac{\mu(d)}{d^2} = \frac{6}{\pi^2}$, and that $|\mu(d)| \leq 1$, we get \begin{multline}\label{burgess lemma 2 eq} \sum_{n\le x} \phi(n) n \leq \frac{x^3}{3}\sum_{d\leq x} \frac{\mu(d)}{d^2} + \frac{x^2}{2}\sum_{d\leq x}\frac{1}{d} + \frac{x}{6}\sum_{d\leq x} 1 + \frac{1}{60}\sum_{d \leq x} d \\ \leq \frac{2}{\pi^2}x^3 - \frac{x^3}{3}\sum_{d>x}\frac{\mu(d)}{d^2} + \frac{1}{2}x^2\log{x} + \frac{x^2}{2} -\frac{x^2}{120} -\frac{x}{120}+\frac{x^2}{6}+ \frac{1}{60}\left(\frac{x(x+1)}{2}\right). \end{multline} From \eqref{moser} we have (for $x \ge 2$) \begin{equation*}\label{burgess key to lemma 2} \sum_{d> x}\frac{\mu(d)}{d^2} \ge \frac{1}{3x} - \frac{8}{3x^2} \ge -\frac{1}{x}. \end{equation*} Combining this with \eqref{burgess lemma 2 eq} yields the lemma. \end{proof} \begin{lemma}\label{burgess claim 3} For $x \geq 1$ we have: $$\sum_{d\leq x} \log{\left(\frac{x}{d}\right)} \leq x -1$$ \end{lemma} \begin{proof} For $1\leq x <2$ we have $\displaystyle\sum_{d\leq x} \log{\left(\frac{x}{d}\right)} = \log{x} \leq x -1$. Therefore, we may assume $x \geq 2$. Now, \begin{equation}\label{logsum} \sum_{d\leq x} \log{\left(\frac{x}{d}\right)} = \left\lfloor x\right\rfloor \log{x} - \sum_{d\leq x} \log{d} \leq \left\lfloor x\right\rfloor \log{x} - \left\lfloor x\right\rfloor \log{\lfloor x\rfloor}+\lfloor x\rfloor -1. \end{equation} To get the second inequality we used that $$\sum_{d\leq x} \log{d} = \sum_{2\le d\le x}\log{d} \geq \int_{1}^{\lfloor x\rfloor}\log{t}\,dt = \lfloor x \rfloor\log{\lfloor x\rfloor} -\lfloor x\rfloor + 1.$$ Now, notice that $x = \lfloor x \rfloor + \{x\}$ and $\log{(1+y)}\leq y$. Therefore we have \begin{equation}\label{logito} \lfloor x\rfloor \log{x} = \lfloor x\rfloor\log{\lfloor x\rfloor} + \lfloor x\rfloor\log{(x/\lfloor x\rfloor)}\leq \lfloor x\rfloor\log{\lfloor x\rfloor} + \{x\}. \end{equation} Combining equations (\ref{logsum}) and (\ref{logito}) yields \begin{equation*} \sum_{d\leq x} \log{\left(\frac{x}{d}\right)}\leq \{x\} + \lfloor x\rfloor - 1 = x-1. \end{equation*} \end{proof} \begin{lemma}\label{burgess counting lemma} Let $A \ge 2$, $N \ge 1$, $a_1$, $a_2$ and $M$ be integers. Let $p > N$ be a prime number. Suppose, $1\le a_1,a_2 \le A$ with $a_1 \neq a_2$. Then the number of pairs of integers $(n_1,n_2)$ satisfying $M < n_1,n_2 \le N+M$ and $a_1 n_2 - a_2 n_1 = kp$ is bounded above by $$N \frac{\gcd{(a_1,a_2)}}{\max\{a_1,a_2\}} + 1.$$ \end{lemma} \begin{proof} Let $d = \gcd{(a_1,a_2)}$. Since $a_1n_2 -a_2n_1 = kp$, we have that $d | k$. Let $a_1=a_1^{'}d, a_2 = a_2^{'}d$ and $k = k^{'} d$. Now, we also have \begin{equation}\label{goku} n_2 = \frac{k p+a_2n_1}{a_1} = \frac{k^{'} p + a_2^{'}n_1}{a_1^{'}}. \end{equation} The right hand side of \eqref{goku} must be an integer. Therefore $k^{'}p + a_2^{'}n_1 \equiv 0 \bmod{a_1^{'}}$. Since this is a linear equation in terms of $n_1$, there is at most one solution $\bmod{\,a_1^{'}}$. Therefore, in the interval $(M, M+N]$ there are at most $\displaystyle\frac{N}{a_1^{'}} + 1$ choices of $n_1$. Since $n_2$ is uniquely determined from $n_1$, the number of pairs $(n_1, n_2)$ satisfying the conditions of the lemma is bounded by $$\frac{N}{a_1^{'}} + 1 = N\frac{\gcd{(a_1,a_2)}}{a_1} + 1.$$ Analogously, the number of pairs is bounded by $N\displaystyle\frac{\gcd{(a_1,a_2)}}{a_2} + 1$. The statement of the lemma is now an easy consequence. \end{proof} Now we are ready to prove Lemma \ref{burgess V2 lemma}. \begin{proof}[Proof of Lemma \ref{burgess V2 lemma}] We'll begin by noting that $V_2$ is the number of quadruples $(a_1,a_2,n_1,n_2)$ with $1\leq a_1,a_2 \leq A$ and $M < n_1,n_2 \leq M + N$ such that $a_1n_2 \equiv a_2n_1\pmod p$. If $a_1 = a_2$, since $N < p$, we have that $n_1 = n_2$ because $n_1 \equiv n_2 \pmod p$ while $|n_1 - n_2| \leq N < p$. Therefore, the number of quadruples in this case is $AN$. Fix $a_1$ and $a_2$ with $a_1 \neq a_2$. Let $k$ be an integer satisfying \begin{equation}\label{burgess the equation} a_1n_2 - a_2n_1 = kp, \end{equation} for some $n_1$ and $n_2$. We can put a bound on possible values for $k$. First of all, $k$ must be a multiple of $\gcd{(a_1,a_2)}$. Now, if we write $n_1 = n_1^{'} + M$ and $n_2 = n_2^{'} + M$, we have, using $kp - (a_1-a_2)M = a_1n_2^{'} - a_2n_1^{'}$, that $$-a_2 N \le - a_2n_1^{'} < kp - (a_1-a_2)M < a_1n_2^{'} \le a_1 N .$$ Therefore $k$ lies in an interval of length at most $\frac{(a_1 + a_2) N}{p}$. Since $k$ is a multiple of $\gcd{(a_1,a_2)}$ and $k$ lies in such an interval, then there are at most $$\frac{(a_1 + a_2) N}{\gcd{(a_1,a_2)} p} + 1,$$ choices for $k$. Given $a_1,a_2$ and $k$ we can count the number of pairs $(n_1,n_2)$ which would satisfy (\ref{burgess the equation}). By Lemma \ref{burgess counting lemma}, the number of pairs is bounded by $N \frac{\gcd{(a_1,a_2)}}{\max\{a_1,a_2\}} + 1$. Therefore we get \begin{multline}\label{bubu} V_2 \leq AN + 2\sum_{a_1 < a_2} \Big( \frac{(a_1 + a_2) N}{\gcd{(a_1,a_2)}p} + 1 \Big) \Big( \frac{\gcd{(a_1,a_2)} N}{\max\{a_1,a_2\}} + 1 \Big) \\ = AN + \frac{2N^2}{p} S_1 + \frac{2N}{p} S_2 + 2N S_3 + A^2 - A, \end{multline} where \begin{equation*}\label{S_1} S_1 = \sum_{a_1 < a_2} \frac{a_1 + a_2}{\max\{a_1,a_2\}} \mbox{ , } \end{equation*} \begin{equation*}\label{S_2} S_2 = \sum_{a_1 < a_2} \frac{a_1 + a_2}{\gcd{(a_1,a_2)}} \mbox{ , } \end{equation*} and \begin{equation}\label{S_3} S_3 = \sum_{a_1 < a_2} \frac{\gcd{(a_1,a_2)}}{\max\{a_1,a_2\}}. \end{equation} Dealing with $S_1$ is straightforward, in fact $S_1$ is \begin{equation}\label{burgess S1} \sum_{a_2 \leq A}\sum_{a_1 < a_2} \frac{a_1 + a_2}{a_2} = \sum_{a_2 \leq A} \left(a_2 -1 + \frac{a_2(a_2-1)}{2a_2}\right) = \frac{3}{2}\frac{A(A-1)}{2} = \frac{3}{4}A^2- \frac{3}{4}A. \end{equation} Now, let's estimate $S_2$: \begin{multline*} S_2 = \sum_{a_1< a_2 \leq A} \frac{a_1+a_2}{\gcd{(a_1,a_2)}} = \sum_{d \leq A}\sum_{b_2 \leq \frac{A}{d}}\sum_{b_1 < b_2, (b_1,b_2) = 1} \left(b_1 + b_2\right)\\ = \sum_{d\leq A}\sum_{2\leq b_2 \leq \frac{A}{d}} \left(\frac{\phi(b_2)}{2}b_2+ \phi(b_2)b_2 \right) = \frac{3}{2}\sum_{d\leq A}\sum_{2\leq b_2 \leq \frac{A}{d}} \phi(b_2)b_2. \end{multline*} Using Lemma \ref{burgess claim 1}, we get \begin{equation*} S_2 \leq \frac{3}{\pi^2}\sum_{d\leq A}\big(\frac{A}{d}\big)^3 + \frac{3}{4}\sum_{d\leq A} \big(\frac{A}{d}\big)^2\log{(\frac{A}{d})} + \frac{3}{2}\sum_{d\leq A} \big(\frac{A}{d}\big)^2. \end{equation*} Using that $\log{(\frac{A}{d})} = \log{A}-\log{d}$, and that $\displaystyle\sum_{d\ge 1} \frac{1}{d^s} = \zeta(s)$, yields \begin{equation*} S_2 \le \frac{3\zeta(3)}{\pi^2}A^3 + \frac{3\zeta(2)}{4}A^2\log{A} - \frac{3}{4}A^2\sum_{d\le A}\frac{\log{d}}{d^2} + \frac{3}{2}A^2\zeta(2). \end{equation*} Using that for $A\ge 11$ we have $\frac{3\zeta(2)}{2} - \frac{3}{4}\sum_{d\le A}\frac{\log{d}}{d^2} < 2$ yields \begin{equation}\label{burgess S2} S_2 \leq \frac{3\zeta(3)}{\pi^2}A^3 + \frac{3\zeta(2)}{4}A^2\log{(A)} + 2 A^2. \end{equation} Let's estimate $S_3$. We have $$S_3 = \sum_{a_1< a_2 \leq A} \frac{\gcd{(a_1,a_2)}}{\max(a_1,a_2)} = \sum_{d \leq A}\sum_{b_2 \leq \frac{A}{d}}\sum_{b_1 < b_2, (b_1,b_2) = 1} \frac{1}{b_2} = \sum_{d \leq A} \sum_{2\leq b_2 \leq \frac{A}{d}} \frac{\phi(b_2)}{b_2}.$$ Using Lemma \ref{burgess claim 2} yields $$S_3 \leq \sum_{d\leq A} \left(\frac{A}{d}\frac{1}{\zeta(2)} + \log{(\frac{A}{d})}\right) = \frac{6}{\pi^2} A\sum_{d\le A}\frac{1}{d} + \sum_{d\le A} \log{(\frac{A}{d})}. $$ From \eqref{Hardylin} it follows that for $A\ge 27$ \begin{equation*} \sum_{d\le A} \frac{1}{d} < \log{A} + \gamma + \frac{1}{A} < \log{(1.85 A)}. \end{equation*} Using this and Lemma \ref{burgess claim 3} yields \begin{equation}\label{burgess S3} S_3 \le \frac{6}{\pi^2} A\log{(1.85A)} + A -1. \end{equation} Using (\ref{burgess S1}), (\ref{burgess S2}) and (\ref{burgess S3}) in (\ref{bubu}) yields the following upper bound for $V_2$: \begin{equation}\label{*} 2AN\Big(\frac{3}{2} + \frac{A-1}{2N} + \frac{3AN}{4p} - \frac{3N}{4p} + \frac{3\zeta(3)A^2}{\pi^2 p} + \frac{3\zeta(2)A\log{A}}{4p} + \frac{6}{\pi^2}\log{(1.85A)} -\frac{1}{A} + \frac{2A}{p}\Big) \end{equation} For $A\ge 4$, we have \begin{equation}\label{burgess *1} \frac{3\zeta(3)A^2}{\pi^2 p} + \frac{3\zeta(2)A\log{A}}{4p} < \frac{3}{4}\frac{A^2}{p}. \end{equation} Since $N > 3A$ we have the following two inequalities: \begin{equation}\label{1}\frac{AN}{4p} > \frac{3}{4}\frac{A^2}{p} \mbox{ and } \frac{3N}{4p} > \frac{2A}{p}. \end{equation} Combining \eqref{burgess *1} and \eqref{1} yields \begin{equation}\label{burgess *2} \frac{3AN}{4p} +\left(\frac{3\zeta(3)A^2}{\pi^2 p} + \frac{3\zeta(2) A\log{A}}{4p}\right)+\left(\frac{2A}{p} - \frac{3N}{4p}\right) < \frac{AN}{p}. \end{equation} Finally, using that $A\ge 28$ and that $N > 12A$, yields \begin{equation}\label{3}\Big(1-\frac{6}{\pi^2}\Big)\log{(1.85 A)} \geq \Big(1-\frac{6}{\pi^2}\Big)\log{(51.8)} \geq 1.54766 > \frac{3}{2} + \frac{1}{24} \geq \frac{3}{2} + \frac{A}{2N} .\end{equation} Combining \eqref{burgess *2} and \eqref{3} in \eqref{*} yields \eqref{burgess V2 claim}. \end{proof} \begin{remark} The main term will come from the $\log{(1.85A)}$ term and the $1.85$ can be changed to a smaller number (the limit being $e^{\gamma}$), forcing $A$ to be slightly larger to make the inequalities work. Also, the coefficient on $\log{(1.85A)}$ can be changed to be as close to $\frac{6}{\pi^2}$ as we want as long as $A$ is big enough. It is important to note that big $A$'s will mean forcing $p$ to be much bigger in the estimates for the Burgess inequality. \end{remark} \begin{remark} The constraint $A\ge 28$ is used to get the main term to be $\log{(1.85 A)}$; however, we can relax the condition on $A$ and get a slightly worse main term. We chose our values this way to get the constants in Tables \ref{super table burgess} and \ref{table kiks burgess} as low as possible for small values of $r$. Relaxing the $A\ge 28$ condition would make these constants worse, but improve the constants for larger values of $r$. Since the small values of $r$ seem to be the most useful in applications, we decided to focus on minimizing these cases. \end{remark} \section{Explicit Burgess inequality}\label{burgess section 1} \begin{proof}[Proof of Theorem \ref{burgess kiks 1}] Let $M$ and $N\ge 1$ be non-negative integers. Let $r \ge 2$ be a positive integer. Fix a constant $c_1(r) \ge 1$ (which we will name later). We will prove the Theorem by induction. Assume that for all positive integers $h< N$, we have $$|S_{\chi}(M,h)|\le c_1(r) h^{1-\frac{1}{r}} p^{\frac{r+1}{4r^2}}(\log{p})^{\frac{1}{r}}.$$ The idea is to estimate $S_{\chi}(M,N)$ by shifting by $h$ ($n \mapsto n + h$) and getting an error that we can deal with by induction. Note that, for all $h < N$, \begin{equation*} S_{\chi}(M,N) = \sum_{M < n \leq M+N} \chi(n + h) + \sum_{M<n\leq M+h} \chi(n) - \sum_{M+N < n \leq M+N + h} \chi(n). \end{equation*} Therefore \begin{equation*} S_{\chi}(M,N) = \sum_{M < n \leq N+M} \chi(n + h)+ 2\theta E(h), \end{equation*} where $|\theta|\le 1$ which depends upon $h$, and $E(h) = \displaystyle\max_{K}|S_{\chi}(K,h)|$. Let $A$ and $B$ be positive reals and let $H = \lfloor A\rfloor\lfloor B\rfloor$. We will use shifts of length $h = ab$ where $a$ and $b$ are positive integers satisfying $a \leq A$ and $b \leq B$. After averaging over all the pairs $(a,b)$ we get \begin{equation}\label{bur eq 1} S_{\chi}(M,N) = \frac{1}{H}\sum_{a,b} \sum_{M < n \leq M + N} \chi(n + ab) + \frac{1}{H}\sum_{a,b}2\theta E(ab). \end{equation} Let $v(x)$ be defined as in (\ref{burgess v(x)}), then \begin{equation}\label{bur eq 2} \left|\sum_{a,b}\sum_{M < n \leq M + N} \chi(n + ab)\right| \le \sum_{x\kern-3pt\mod{p}} v(x)\left|\sum_{b\le B} \chi(x+b)\right|. \end{equation} Let $$V:= \sum_{x\kern-3pt\mod{p}} v(x)\left|\sum_{b\le B}\chi(x+b)\right|.$$ Then, combining (\ref{bur eq 1}) with (\ref{bur eq 2}), we get \begin{equation}\label{burgess ineq V/H + 2/H} |S_{\chi}(M,N)| \leq \frac{V}{H} + \frac{2}{H}\sum_{a,b} E(ab). \end{equation} We can now focus on estimating $V$. Now define $V_1 := \displaystyle \sum_{x \pmod p} v(x)$,\\ $V_2 := \displaystyle \sum_{x \pmod p} v^2(x)$ and $W := \displaystyle \sum_{x \pmod p} \Big|\displaystyle \sum_{1 \leq b \leq B} \chi(x + b)\Big|^{2r}$. Using H\"older's Inequality we get \begin{equation}\label{burgess V} V \leq V_1^{1- \frac{1}{r}}V_2^{\frac{1}{2r}}W^{\frac{1}{2r}}. \end{equation} First note that \begin{equation*}\label{burgess V1} V_1 = \lfloor A\rfloor N\le AN. \end{equation*} From Lemma \ref{burgess V2 lemma}, for $\lfloor A\rfloor\ge 28$ and $\lfloor A\rfloor<\frac{N}{12}$, we have \begin{equation}\label{burgess V2} V_2 \leq 2AN\left(\frac{AN}{p} + \log(1.85 A)\right). \end{equation} We can also bound $W$, since by Theorem A, we have (for $r\le 9B$): \begin{equation}\label{burgess W} W \le \frac{(2r)!}{2^r r!}B^r p + (2r-1)B^{2r}\sqrt{p} = (2r-1)!! B^r p + (2r-1) B^{2r}\sqrt{p}. \end{equation} Let's head back to proving the Burgess bound. We will let $AB = kN$ for $k$ a real number to be chosen later. Using the inequalities of $V_1,V_2$ and $W$ together with \eqref{burgess V} yields the following bound upper bound for $\frac{V}{H}$: \begin{multline}\label{Opti} \frac{V}{H} \le \frac{1}{\lfloor A\rfloor \lfloor B\rfloor}V_1^{1-\frac{1}{r}}V_2^{\frac{1}{2r}}W^{\frac{1}{2r}} \leq \frac{\frac{AB}{\lfloor A\rfloor\lfloor B\rfloor} }{(AB)^{\frac{1}{2r}}}\cdot\frac{(2WB)^{\frac{1}{2r}}}{B}\left(\frac{AN}{p} + \log{(1.85A)}\right)^{\frac{1}{2r}}N^{1-\frac{1}{2r}} \\\le \frac{A}{A-1}\cdot\frac{B}{B-1}\cdot\frac{1}{k^{\frac{1}{2r}}}\cdot\frac{(2WB)^{\frac{1}{2r}}}{B}\left(\frac{AN}{p} + \log{(1.85A)}\right)^{\frac{1}{2r}}N^{1-\frac{1}{r}}. \end{multline} Because of \eqref{Opti} we can see that a good choice for $B$ is the one that minimizes $\frac{WB}{B^{2r}}$. Using \eqref{burgess W}, we seek to minimize the expression $(2r-1)!!\frac{p}{B^{r-1}} + (2r-1)Bp^{\frac{1}{2}}$. We take the derivative with respect to $B$ and equal it to zero. After this process we get that a good $B$ is \begin{equation}\label{burgess B} B = \big((2r-3)!!(r-1)\big)^{\frac{1}{r}}p^{\frac{1}{2r}}. \end{equation} Using this value of $B$ we get \begin{equation}\label{WB} \frac{(2WB)^{\frac{1}{2r}}}{B} \leq \left(\frac{2r(2r-1)}{r-1}\right)^{\frac{1}{2r}}(r-1)^{\frac{1}{2r^2}}\big((2r-3)!!\big)^{\frac{1}{2r^2}}p^{\frac{r+1}{4r^2}}. \end{equation} Now we must try to bound $\frac{AN}{p} + \log{(1.85A)}$. To do this, we can use the P\'olya--Vinogradov inequality to give an upper bound for $N$, since for $N$ large, the P\'olya--Vinogradov inequality would be a better bound than the Burgess inequality. Indeed, if \begin{equation}\label{range} N \geq p^{\frac{1}{2} + \frac{1}{4r}}\log{p}, \end{equation} then, since $c_1(r)\ge 1$, we have $$c_1(r)N^{1-\frac{1}{r}}p^{\frac{r+1}{4r^2}}(\log{p})^{\frac{1}{r}} \ge \sqrt{p}\log{p}.$$ Therefore, from the P\'olya--Vinogradov inequality (see Section $9.4$ in \cite{MV2007}) we can conclude that $|S_{\chi}(M,N)|\le c_1(r)N^{1-\frac{1}{r}}p^{\frac{r+1}{4r^2}}\left(\log{p}\right)^{\frac{1}{r}},$ whenever we have \eqref{range}. If we have $r\ge 3$, then we can use the Burgess inequality with $r-1$ instead of the P\'olya--Vinogradov inequality, to get a better upper bound on $N$. Indeed, if we let $s$ be a real number that satisfies \begin{equation}\label{s condition} c_1(r-1) \le s^{\frac{1}{r(r-1)}} c_1(r), \end{equation} then if $$N\ge s\, p^{\frac{1}{4} + \frac{1}{2r} + \frac{1}{4r(r-1)}}\log{p},$$ then $$c_1(r)N^{1-\frac{1}{r}}p^{\frac{r+1}{4r^2}}(\log{p})^{\frac{1}{r}}\ge c_1(r-1)N^{1-\frac{1}{r-1}}p^{\frac{r}{4(r-1)^2}}(\log{p})^{\frac{1}{r-1}}.$$ Similarly, we can put a lower bound on $N$, by noting that $|S_{\chi}(M,N)| \le N$. Indeed, $$c_1(r)N^{1-\frac{1}{r}}p^{\frac{r+1}{4r^2}}(\log{p})^{\frac{1}{r}} \ge N,$$ whenever \begin{equation*}\label{range low} N\le c_1(r)^r p^{\frac{1}{4} + \frac{1}{4r}}\log{p}. \end{equation*} Therefore, we may assume that \begin{equation}\label{range for N} c_1(2)^2 p^{\frac{3}{8} }\log{p} < N < p^{\frac{5}{8}}\log{p}, \end{equation} for $r=2$, and that \begin{equation}\label{range2 for N} c_1(r)^r p^{\frac{1}{4} + \frac{1}{4r}}\log{p} < N < s\, p^{\frac{1}{4} + \frac{1}{2r} + \frac{1}{4r(r-1)}}\log{p}, \end{equation} for $r\ge 3$. Using that $A = \frac{kN}{B}$, the upper bound for $N$ in \eqref{range for N}, and \eqref{burgess B}, we get \begin{equation}\label{log} \frac{AN}{p} = \frac{kN^2}{pB} \le \frac{k p^{\frac{5}{4}}\log^2{p}}{p B} \le k\log^2{p} , \end{equation} for $r=2$, and for $r\ge 3$, we get \begin{equation}\label{log2} \frac{AN}{p} = \frac{kN^2}{pB} \le \frac{s^2 k p^{\frac{1}{2} +\frac{1}{r}+\frac{1}{2r(r-1)}}\log^2{p}}{p B} = \frac{s^2 k}{\left((2r-3)!!(r-1)\right)^{\frac{1}{r}}p^{\frac{1}{2}-\frac{1}{2r}-\frac{1}{2r(r-1)}}}\log^2{p} , \end{equation} Now we consider what happens to $\log{(1.85A)}$. \begin{equation}\label{la otra vez} \log{(1.85A)} = \log{\left(\frac{1.85kN}{B}\right)} \leq \log{\left(1.85k\log{p}\right)}+\frac{3\log{p}}{8}, \end{equation} for $r=2$, and for $r\ge 3$, we get \begin{equation}\label{la otra vez2} \log{(1.85A)} = \log{\left(\frac{1.85kN}{B}\right)} \leq \log{\left(\frac{1.85 s\,k\log{p}}{((2r-3)!!(r-1))^{\frac{1}{r}}}\right)}+\frac{\log p}{4} + \frac{\log{p}}{4r(r-1)} . \end{equation} Now, let's bound the error term, the part we have labeled as $E(h)$. For any $a,b$ such that $ab = h < N$, we have by induction hypothesis $E(h) \leq c_1(r)(ab)^{1-\frac{1}{r}}p^{\frac{r+1}{4r^2}}(\log p)^{\frac{1}{r}}$. Therefore, \begin{multline}\label{error} \frac{1}{c_1(r)p^{\frac{r+1}{4r^2}}(\log{p})^{\frac{1}{r}}}\cdot\frac{2}{H}\sum_{a,b} E(ab) \le\frac{2}{\lfloor A\rfloor \lfloor B\rfloor}\sum_{1\leq a\leq A}\sum_{1 \leq b \leq B} (ab)^{1-\frac{1}{r}}\\ \le 2\frac{1}{AB}\left(\int_1^{A+1} t^{1-\frac{1}{r}}\,dt\right)\left(\int_1^{B+1} t^{1-\frac{1}{r}}\,dt\right)\frac{AB}{(A-1)(B-1)}\\\le (AB)^{1-\frac{1}{r}}\frac{2}{(2-\frac{1}{r})^2} \left(\frac{(A+1)(B+1)}{AB}\right)^{2-\frac{1}{r}} \frac{AB}{(A-1)(B-1)} \\ = \frac{2r^2}{(2r-1)^2}k^{1-\frac{1}{r}} N^{1-\frac{1}{r}}\left(\frac{(A+1)(B+1)}{AB}\right)^{2-\frac{1}{r}} \frac{AB}{(A-1)(B-1)}. \end{multline} Combining equations \eqref{Opti}, \eqref{WB}, \eqref{log}, \eqref{la otra vez} and \eqref{error} with \eqref{burgess ineq V/H + 2/H} yields (for $r=2$) \begin{multline}\label{burgess super messy} \frac{|S_{\chi}(M,N)|}{N^{\frac{1}{2}}p^{\frac{3}{16}}(\log{p})^{\frac{1}{2}}}\leq \frac{AB}{(A-1)(B-1)}\left(12\right)^{\frac{1}{4}}\left(1 + \frac{3}{8k\log{p}} + \frac{\log{\left(1.85k\log{p}\right)}}{k \log^2{p}}\right)^{\frac{1}{4}} \\ + \frac{8}{9}k^{\frac{1}{2}}c_1(2) \left(\frac{(A+1)(B+1)}{AB}\right)^{\frac{3}{2}}\frac{AB}{(A-1)(B-1)}. \end{multline} Similarly, for $r\ge 3$, combining equations \eqref{Opti}, \eqref{WB}, \eqref{log2}, \eqref{la otra vez2} and \eqref{error} with \eqref{burgess ineq V/H + 2/H} yields \begin{multline}\label{burgess super messy2} \frac{|S_{\chi}(M,N)|}{N^{1-\frac{1}{r}}p^{\frac{r+1}{4r^2}}(\log{p})^{\frac{1}{r}}}\leq \left(\frac{2r(2r-1)}{r-1}\right)^{\frac{1}{2r}}\big((2r-3)!!(r-1)\big)^{\frac{1}{2r^2}}\frac{AB}{(A-1)(B-1)} \\ \left(\frac{s^2}{\left((2r-3)!!(r-1)\right)^{\frac{1}{r}}p^{\frac{r-2}{2(r-1)}}} + \frac{1}{4k\log{p}} + \frac{1}{4r(r-1) k \log{p}} + \frac{\log{\left(\frac{1.85s\,k\log{p}}{((2r-3)!!(r-1))^{\frac{1}{r}}}\right)}}{k \log^2{p}}\right)^{\frac{1}{2r}} \\ + \frac{2r^2}{(2r-1)^2}k^{1-\frac{1}{r}}c_1(r) \left(\frac{(A+1)(B+1)}{AB}\right)^{2-\frac{1}{r}}\frac{AB}{(A-1)(B-1)}. \end{multline} Now, if we let $c_1(r)$ be defined as follows \begin{equation}\label{c} c_1(2) = \frac{AB}{(A-1)(B-1)}\left(12\right)^{\frac{1}{4}}\frac{\left(1 + \frac{3}{8k\log p} + \frac{\log{\left(1.85k\log{p}\right)}}{k \log^2{p}}\right)^{\frac{1}{4}}}{1 - \frac{8}{9}k^{\frac{1}{2}} \left(\frac{(A+1)(B+1)}{AB}\right)^{\frac{3}{2}}\left(\frac{AB}{(A-1)(B-1)}\right)}, \end{equation} for $r =2$, and \begin{multline}\label{c2} c_1(r) = \frac{AB}{(A-1)(B-1)}\left(\frac{2r(2r-1)\left((2r-3)!! (r-1)\right)^{\frac{1}{r}}}{r-1}\right)^{\frac{1}{2r}}\\ \cdot\frac{\left(\frac{s^2}{\left((2r-3)!!(r-1)\right)^{\frac{1}{r}}p^{\frac{1}{2}-\frac{1}{2r}-\frac{1}{2r(r-1)}}} + \frac{1}{4k\log{p}} + \frac{1}{4r(r-1) k \log{p}} + \frac{\log{\left(\frac{1.85s\,k\log{p}}{((2r-3)!!(r-1))^{\frac{1}{r}}}\right)}}{k \log^2{p}}\right)^{\frac{1}{2r}}}{1 - \frac{2r^2}{(2r-1)^2}k^{1-\frac{1}{r}}\left(\frac{(A+1)(B+1)}{AB}\right)^{2-\frac{1}{r}}\left(\frac{AB}{(A-1)(B-1)}\right)}, \end{multline} for $r\ge 3$. Therefore from \eqref{burgess super messy} and \eqref{burgess super messy2}, we get that $$|S_{\chi}(M,N)| \le c_1(r) N^{1-\frac{1}{r}}p^{\frac{r+1}{4r^2}}(\log{p})^{\frac{1}{r}}.$$ All we have to do is pick $k$ to minimize $c_1(r)$ in such a way that $\lfloor A\rfloor\ge 28$, and that $N\ge 12 A$. First, we'll start by showing that \begin{equation*}\label{burgess B>12} B \ge 15. \end{equation*} Since $B = ((2r-3)!!(r-1))^{\frac{1}{r}}p^{\frac{1}{2r}}$, we can just manually check for $ 2\le r\le 20$ that the inequality is satisfied. To show that it works for $r\ge 21$, we can show that \begin{equation}\label{machin32} ((2r-3)!!(r-1))^{\frac{1}{r}} \ge 15, \end{equation} by noticing that it works for $r=21$ and that the left hand side of \eqref{machin32} is increasing. Indeed, the left hand side is increasing; by noticing that $(2r-3)(r-1) < (2r-1)(r+1)$, we get $$(2r-3)!!(r-1) < \frac{(2r-1)^{r-1}(r+1)^{r-1}}{(r-1)^{r-2}} < \frac{(2r-1)^{r}(r+1)^{r}}{(r-1)^{r}},$$ implying that $$\frac{1}{r}\log{((2r-3)!!(r-1))} < \log{((2r-1)(r+1))} - \log{(r-1)},$$ which implies $$\frac{r+1}{r}\log{((2r-3)!!(r-1))} < \log{((2r-1)!!)} + \log{(r+1)},$$ and hence $$\log{\left(((2r-3)!!(r-1))^{\frac{1}{r}}\right)} < \log{\left(((2r-1)!!)(r+1))^{\frac{1}{r+1}}\right)}.$$ \begin{comment}We also have to make sure that $\log{(1.85A)} < \frac{\log{p}}{2} -\frac{\log{p}}{4r} + \log{(1.9)}.$ Then, we have to pick such a $k$ in terms of $r$ to make $c$ as small as possible. \end{comment} Using that $B \ge 15$, since $A = \frac{kN}{B}$, then \begin{equation*}\label{burgess A} A = \frac{kN}{B} < \frac{kN}{12} < \frac{N}{12}, \end{equation*} whenever $k < 1$. \begin{comment} by remembering that $k = \frac{AB}{N}$. Let's assume that $p \ge 10^7$. It is not hard to check that $N\ge 12A$, since $\log{(10^7)} \ge 16$ and hence for $N \ge 48$, we have $A \le \frac{N}{16}+1 \le \frac{N}{12}$. Now for $p \ge 10^4$, we have $N \ge 48$ because $N \ge c_1(r)^r p^{\frac{1}{4} + \frac{1}{4r}}\log{p} \ge p^{\frac{1}{4}}\log{p} > 48$. \end{comment} Let $k \ge \frac{1}{30}$. To check that $\lfloor A\rfloor \ge 28$, we use \eqref{range2 for N} and \eqref{burgess B} and we note that \begin{equation*} \lfloor A\rfloor \ge A-1 \ge\frac{N}{30B}-1 \ge \frac{c_1(r)^r p^{\frac{1}{4}-\frac{1}{4r}}\log{p}}{30((2r-3)!!(r-1))^{\frac{1}{r}}}-1. \end{equation*} Table \ref{burgess table lower bounds for c} shows the lower bound $c_1(r)$ must satisfy to have $\lfloor A\rfloor\ge 28$ in different situations. \begin{table} \begin{center} \begin{tabular}{|c| c | c| c|} \hline $r$ & $p\ge 10^7$ & $p\ge 10^{10}$ & $p\ge 10^{20}$ \\ \hline $2$ & $2.68289$ & $1.45765$ & $0.24442$ \\ \hline $3$ & $1.88354$ & $1.13939$ & $0.251637$ \\ \hline $4$ & $1.6153$ & $1.06881$ & $0.305418$ \\ \hline $5$ & $1.48379$ & $1.04807$ & $0.363232$ \\ \hline $6$ & $1.40512$ & $1.04167$ & $0.417191$ \\ \hline $7$ & $1.35216$ & $1.04007$ & $0.465518$ \\ \hline $8$ & $1.31369$ & $1.04016$ & $0.508197$ \\ \hline $9$ & $1.28422$ & $1.04077$ & $0.545749$ \\ \hline $10$ & $1.26077$ & $1.04147$ & $0.578819$ \\ \hline \end{tabular} \end{center} \caption{Lower bounds for the constant $c_1(r)$ in the Burgess inequality to satisfy $\lfloor A\rfloor\ge 28$.}\label{burgess table lower bounds for c} \end{table} \begin{comment}The last thing to check is that $\log{(1.85A)} < \frac{\log{p}}{2} - \frac{\log{p}}{4r} + \log{(1.9)}$. Using \eqref{range for N} and \eqref{burgess A} we get \begin{multline*} \log{(1.85A)} \le \log{\left(\frac{1.85N}{p^{\frac{1}{2r}}\log{p}} + 1.85\right)} \le \log{\left(1.85 p^{\frac{1}{2}-\frac{1}{4r}} + 1.85\right)} \\= \left(\frac{1}{2} - \frac{1}{4r}\right)\log{p} + \log{(1.85)} + \log{\left(1+\frac{1}{p^{\frac{1}{2}-\frac{1}{4r}}}\right)} < \frac{\log{p}}{2} - \frac{\log{p}}{4r} + \log{(1.9)}, \end{multline*} the last inequality being true for $p\ge 10^5$ and $r\ge 2$. Now, using \eqref{burgess B}, \eqref{burgess A} and \eqref{range for N}, we can place the following bounds on $k$: \begin{equation*}\label{burgess range of k} k\ge \frac{\left((2r-3)!! (r-1)\right)^{\frac{1}{r}}}{\log{p}} - \frac{1}{p^{\frac{1}{2r}}\log{p}} , \end{equation*} and \begin{equation*} k \le \frac{\left((2r-3)!! (r-1)\right)^{\frac{1}{r}}}{\log{p}} + \frac{\left((2r-3)!! (r-1)\right)^{\frac{1}{r}}}{p^{\frac{1}{4}-\frac{1}{4r}}\log{p}}. \end{equation*} \end{comment} Now, $B$ is defined in terms of $r$ and $p$. By fixing an $r$ and a $p_0$ (a fixed lower bound for $p$), we can calculate $B$ in terms of $r$ and $p_0$. Let $c_1'(r)$ be a parameter satisfying $c_1'(r) < c_1(r)$. $A$ is written in terms of $k$ and $N$ and from \eqref{range for N} and \eqref{range2 for N} we have a range for $N$ in terms of $c_1(r)$, $p$, $r$, $k$ and $s$. From this we can find a lower bound for $A$ in terms of $c_1'(r)$, $k$, $r$, $s$ and $p$. The parameter $s$ is optimal when it is as small as possible so we fix $s$ (in terms of $r$, $c_1'(r)$ and $c_1(r-1)$) to be the smallest real satisfying \eqref{s condition}. After plugging in $A$, $B$, $r$, $s$, $k$, $p_0$ and $c_1'(r)$ to the equations \eqref{c} and \eqref{c2}, we can find a good value of $k \in [\frac{1}{30},1)$, and a good value of $c_1'(r)$ for each $r$ and $p_0$ to find the smallest $c_1(r)$ we can. After making the choices of $k$ and $c_1'(r)$ described in Table \ref{last table 1}, we conclude that $c_1(r)$ has the values listed in Table \ref{super table burgess} as upper bounds. \begin{table}[h] \begin{center} \begin{tabular}{c| c | c| c| c | c | c |} \cline{2-7} & \multicolumn{2}{|c|}{$p_0 = 10^7$} & \multicolumn{2}{|c|}{$p_0 = 10^{10}$} & \multicolumn{2}{|c|}{$p_0 = 10^{20}$ }\\ \hline \multicolumn{1}{|c|}{$r$} & $k$ & $c_1'(r)$ & $k$ & $c_1'(r)$ & $k$ & $c_1'(r)$ \\ \hline \multicolumn{1}{|c|}{$2$} & 2/45 & 2.738 & 1/30 & 2.517 & 1/30 & 2.354\\ \hline \multicolumn{1}{|c|}{$3$} & 1/16 & 2.019 & 11/150 & 1.737 & 2/15 & 1.369\\ \hline \multicolumn{1}{|c|}{$4$} & 1/12 & 1.729 & 31/300 & 1.515 & 37/300 & 1.310\\ \hline \multicolumn{1}{|c|}{$5$} & 1/12 & 1.610 & 7/75 & 1.456 & 31/300 & 1.298\\ \hline \multicolumn{1}{|c|}{$6$} & 1/12 & 1.548 & 1/12 & 1.426 & 7/75 & 1.289\\ \hline \multicolumn{1}{|c|}{$7$} & 11/150 & 1.504 &11/150 & 1.404 & 1/12 & 1.281\\ \hline \multicolumn{1}{|c|}{$8$} & 19/300 & 1.470 & 19/300 & 1.383 & 1/12 & 1.272\\ \hline \multicolumn{1}{|c|}{$9$} & 19/300 & 1.441 & 19/300 & 1.366 & 11/150 & 1.264\\ \hline \multicolumn{1}{|c|}{$10$} & 4/75 & 1.415 & 4/75 & 1.349 & $11/150$ & 1.256\\ \hline \end{tabular} \end{center} \caption{Values chosen for $k$ and $c_1'(r)$ to build Table \ref{super table burgess}.}\label{last table 1} \end{table} \end{proof} \begin{proof}[Proof of Corollary \ref{burgess corollary 1}] We begin by pointing out that Theorem \ref{burgess kiks 1} proves this for $2\le r\le 10$ and $p \ge 10^{7}$. We also know that it is true for the $r=1$ case by the P\'olya--Vinogradov inequality (Vinogradov proved it with the constant 1 in \cite{Vinogradov}). Following the proof of Theorem \ref{burgess kiks 1}, we also have that $B \ge 15$ for all $r$ and hence, for any $k < 1$, we have $A < \frac{N}{12}$. It is also worth pointing out that we can use $s = 1$, since now the constant $2.74$ is fixed as the constant in our upper bound, instead of a constant depending on $r$. We need to show that you can pick a $k$ such that $\lfloor A\rfloor \ge 28$. First, let's prove that $2.74^r \ge ((2r-3)!!(r-1))^{\frac{1}{r}}$. Indeed, for all $r \ge 1$ we have $$2.74^r > 2r \ge ((2r -3)!!(r-1))^{\frac{1}{r}}.$$ Now we have $$A = \frac{kN}{B} \ge \frac{k (2.74)^r p^{\frac{1}{4}-\frac{1}{4r}}\log{p}}{((2r-3)!!(r-1))^{\frac{1}{r}}} \ge k p^{\frac{1}{4}-\frac{1}{4r}}\log{p} > 29,$$ whenever $k > \frac{29}{p^{\frac{1}{4}-\frac{1}{4r}}\log{p}}$. We replace $B$ by 15 in \eqref{c2} (since $B \ge 15$), and we can see that the only factors that don't decrease with $r$ are the $k^{1-\frac{1}{r}}$ term which appears in the denominator, and the $\left(2-\frac{1}{r}\right)$ exponent in the denominator. With this in mind, let $c(r)$ be defined as follows for $r \ge 4$: \begin{multline}\label{cmatad} c(r) = \frac{15A}{14(A-1)}\left(\frac{2r(2r-1)\left((2r-3)!! (r-1)\right)^{\frac{1}{r}}}{r-1}\right)^{\frac{1}{2r}}\\ \cdot\frac{\left(\frac{1}{\left((2r-3)!!(r-1)\right)^{\frac{1}{r}}p^{\frac{1}{2}-\frac{1}{2r}-\frac{1}{2r(r-1)}}} + \frac{1}{4k\log{p}} + \frac{1}{4r(r-1) k \log{p}} + \frac{\log{\left(\frac{1.85\,k\log{p}}{((2r-3)!!(r-1))^{\frac{1}{r}}}\right)}}{k \log^2{p}}\right)^{\frac{1}{2r}}}{1 - \frac{2r^2}{(2r-1)^2}k\left(\frac{16(A+1)}{15A}\right)^2\left(\frac{15A}{14(A-1)}\right)}. \end{multline} Letting $k = \frac{11}{64}$, $A \ge kp^{\frac{1}{4}-\frac{1}{4r}}$ and $p \ge 10^7 $ we confirm that $c(r) \le 2.74$ whenever $r\ge 3$. Since it is also true for $r\le3$, we conclude our corollary. \end{proof} \section{Improving McGown's theorem}\label{burgess section 2} The main obstacle in improving the $(\log{p})^{\frac{1}{r}}$ factor in the Burgess inequality is the bound on $V_2$. However, if we put a bound on $N$, we can make the proof cleaner while also improving the exponent in $\log{p}$ to $\frac{1}{2r}$. First we prove a lemma regarding $V_2$ and then we will be able to prove Theorem \ref{burgess kiks 2}. \begin{lemma}\label{burgess V2 improved} Let $p$ be a prime, and $N$ be a positive integer. Let $A \ge 30$ be an integer such that $N>7A$ and $2AN< p$. Let $v(x)$ be defined as in \eqref{burgess v(x)}. Then \begin{equation*}\label{burgess V2 eq} V_2 = \sum_{x\kern-3pt\mod{p}}v^2(x) \leq 2AN \log(1.85 A). \end{equation*} \end{lemma} \begin{proof} The proof is essentially the same as that of Lemma \ref{burgess V2 lemma}. Recall that $V_2$ is the number of quadruples $(a_1,a_2,n_1,n_2)$ with $1\leq a_1,a_2 \leq A$ and $M < n_1,n_2 \leq M + N$ such that $a_1n_2 \equiv a_2n_1\pmod p$. If $a_1 = a_2$, since $N < p$, we have that $n_1 = n_2$ because $n_1 \equiv n_2 \pmod p$ while $|n_1 - n_2| \leq N < p$. Therefore, the number of quadruples in this case is $AN$. Fixing $a_1\ne a_2$ and writing $$a_1n_2-a_2n_1 = kp,$$ we can put a bound on possible values for $k$. As shown in the proof of Lemma \ref{burgess V2 lemma}, there are at most $\frac{(a_1+a_2)N}{\gcd{(a_1,a_2)}p} + 1$ values of $k$. Since $2AN < p$, then we have that $k$ is uniquely determined. In the proof of Lemma \ref{burgess V2 lemma}, we showed that given $a_1,a_2$ and $k$, the number of pairs $(n_1,n_2)$ is bounded by $N\frac{\gcd{(a_1,a_2)}}{\max\{a_1,a_2\}} + 1$. Now, for $A\ge 30$ and $N > 7A$ we have \begin{equation}\label{orales} \left(1-\frac{6}{\pi^2}\right)\log{(1.85 A)} \ge \left(1-\frac{6}{\pi^2}\right)\log{(55.5)} = 1.57471 > \frac{3}{2} + \frac{1}{14} > \frac{3}{2} + \frac{A}{2N}. \end{equation} Using the definition of $S_3$ as in \eqref{S_3}, using the inequalities \eqref{burgess S3} and \eqref{orales}, for $A\ge 30$ and $N>7A$, we have \begin{multline*} V_2 \le AN + 2\sum_{a_1 < a_2}\left(\frac{\gcd{(a_1,a_2)}N}{\max\{a_1,a_2\}} + 1\right)\\ = AN + 2NS_3 + A^2-A \le 2AN\log{(1.85A)}. \end{multline*} \end{proof} Now we are ready to prove Theorem \ref{burgess kiks 2}. \begin{proof}[Proof of Theorem \ref{burgess kiks 2}] The proof is very similar to the proof of Theorem \ref{burgess kiks 1}. We proceed by induction, assuming that for all $h < N$ we have $|S_{\chi}(M,h)|\le c_2(r) p^{\frac{r+1}{4r^2}}(\log{p})^{\frac{1}{2r}}.$ Most of the work in the proof of Theorem $\ref{burgess kiks 1}$ can be replicated. So I'll just point out the things that change. The first change is that by employing Lemma \ref{burgess V2 improved}, \eqref{burgess V2} becomes \begin{equation*}\label{burgess V2 2} V_2 \le 2AN\log{(1.85A)}. \end{equation*} This change affects \eqref{Opti}, by deleting $\frac{AN}{p}$ inside the parenthesis. Now it looks as follows: \begin{equation}\label{burgess V/H3} \frac{V}{H} \le \frac{AB}{(A-1)(B-1)}\frac{1}{k^{\frac{1}{2r}}}\frac{(2WB)^{\frac{1}{2r}}}{B}(\log{(1.85A)})^{\frac{1}{2r}} N^{1-\frac{1}{r}}. \end{equation} The next change is the range for $N$, which we deduced by using the P\'olya--Vinogradov inequality, the trivial bound, and the case for $r-1$. Instead of \eqref{range for N}, using our hypothesis and the trivial bound, we now have \begin{equation}\label{new range for N} c_2(2)^r p^{\frac{3}{8}} \sqrt{\log{p}} < N < 2 p^{\frac{5}{8}}, \end{equation} for $r=2$. Assuming $c_2(r-1) \le s^{\frac{1}{r(r-1)}}c_2(r)$ for a real number $s$, and using the Burgess inequality for $r-1$ we have, for $r\ge 3$, the following range for $N$ \begin{equation}\label{new range for N2} c_2(r)^r p^{\frac{1}{4} + \frac{1}{4r}} \sqrt{\log{p}} < N < \min\{2 p^{\frac{1}{2} + \frac{1}{4r}},s\,p^{\frac{1}{4}+\frac{1}{2r}+\frac{1}{4r(r-1)}}\sqrt{\log{p}}\}. \end{equation} Using that $A=\frac{kN}{B}$ and \eqref{new range for N}, we get \begin{equation}\label{la otra vez 2} \log{(1.85A)}=\log{\left(\frac{1.85kN}{B}\right)}\le \log{\left(3.7k\right)}+\frac{3\log{p}}{8} \le \frac{3\log{p}}{8}, \end{equation} for $r=2$ (we're assuming $k < 1/4$, which implies $\log{(3.7k)} < 0$). Using \eqref{new range for N2}, yields \begin{equation}\label{la otra vez 22} \log{(1.85A)}=\log{\left(\frac{1.85kN}{B}\right)}\le \log{\left(\frac{1.85s\,k\sqrt{\log{p}}}{((2r-3)!!(r-1))^{\frac{1}{r}}}\right)}+\frac{\log{p}}{4} + \frac{\log{p}}{4r(r-1)}, \end{equation} for $r\ge 3$. The bound for $E(h)$ is almost the same as in \eqref{error}, the only difference being the exponent of $\log{p}$, which is now $\frac{1}{2r}$ instead of $\frac{1}{r}$. Making this change and using both \eqref{WB} and \eqref{la otra vez 2} with \eqref{burgess V/H3} yields (for $r=2$) \begin{multline}\label{burgess super messy 2} \frac{|S_{\chi}(M,N)|}{N^{\frac{1}{2}}p^{\frac{3}{16}}(\log{p})^{\frac{1}{4}} }\le \frac{AB}{(A-1)(B-1)}\left(12\right)^{\frac{1}{4}} \left(\frac{3}{8k} \right)^{\frac{1}{4}} \\ + \frac{AB}{(A-1)(B-1)}\frac{8}{9}k^{\frac{1}{2}}c_2(2) \left(\frac{(A+1)(B+1)}{AB}\right)^{\frac{3}{2}}. \end{multline} For $r\ge 3$, using \eqref{WB} and \eqref{la otra vez 22} with \eqref{burgess V/H3} yields \begin{multline}\label{burgess super messy 22} \frac{|S_{\chi}(M,N)|}{N^{1-\frac{1}{r}}p^{\frac{r+1}{4r^2}}(\log{p})^{\frac{1}{2r}} }\le \frac{AB}{(A-1)(B-1)}\left(\frac{2r(2r-1)\left((2r-3)!!(r-1)\right)^{\frac{1}{r}}}{r-1}\right)^{\frac{1}{2r}} \\ \cdot \left(\frac{\log{\left(\frac{1.85s\,k\sqrt{\log{p}}}{((2r-3)!!(r-1))^{\frac{1}{r}}}\right)}}{k\log{p}}+\frac{1}{4k} +\frac{1}{4r (r-1)k}\right)^{\frac{1}{2r}} \\ + \frac{AB}{(A-1)(B-1)}\frac{2r^2}{(2r-1)^2}k^{1-\frac{1}{r}}c_2(r) \left(\frac{(A+1)(B+1)}{AB}\right)^{2-\frac{1}{r}}. \end{multline} Now, if we let $c_2(r)$ be defined as follows \begin{equation}\label{c21} c_2(2) = \frac{A}{A-1}\frac{B}{B-1}\frac{\left(12\right)^{\frac{1}{4}}\left(\frac{3}{8k}\right)^{\frac{1}{4}}}{1 - \frac{8}{9}k^{\frac{1}{2}}\left(\frac{(A+1)(B+1)}{AB}\right)^{\frac{3}{2}}\left(\frac{AB}{(A-1)(B-1)}\right)}, \end{equation} and, for $r\ge 3$, \begin{equation}\label{c22} c_2(r) = \frac{A}{A-1}\frac{B}{B-1}\frac{\left(\frac{2r(2r-1)\left((2r-3)!! (r-1)\right)^{\frac{1}{r}}}{r-1}\left(\frac{\log{\left(\frac{1.85s\,k\sqrt{\log{p}}}{((2r-3)!!(r-1))^{\frac{1}{r}}}\right)}}{k\log{p}}+\frac{1}{4k} +\frac{1}{4r (r-1)k}\right)\right)^{\frac{1}{2r}}}{1 - \frac{2r^2}{(2r-1)^2}k^{1-\frac{1}{r}}\left(\frac{(A+1)(B+1)}{AB}\right)^{2-\frac{1}{r}}\left(\frac{AB}{(A-1)(B-1)}\right)}, \end{equation} for $r\ge 3$. Then, from \eqref{burgess super messy 2} and \eqref{burgess super messy 22}, we get that $$|S_{\chi}(M,N)| \le c_2(r) N^{1-\frac{1}{r}}p^{\frac{r+1}{4r^2}}(\log{p})^{\frac{1}{2r}}.$$ All we have to do is pick $k$ to minimize $c_2(r)$ in such a way that $\lfloor A\rfloor\ge 30$, that $N> 7 A$ and $2AN < p$. \begin{comment}All we have to do is pick $k$ in such a way that $A$ is an integer, i.e., $\frac{kN}{B}$ is an integer. Make sure that $A\ge 30$, that $N > 7 A$ and that $2AN < p$. We also have to make sure that $\log{(1.85A)} < \frac{\log{p}}{2} -\frac{\log{p}}{4r}.$ Then, we have to pick such a $k$ in terms of $r$ to make $c$ as small as possible. \end{comment} \begin{comment} when \begin{equation}\label{burgess A2} A = \left\lfloor\frac{N}{9 p^{\frac{1}{2r}}}\right\rfloor + 1, \end{equation} \end{comment} Using that $B \ge 15$, it is not hard to check that $N\ge 7A$. Indeed, since $A = \frac{kN}{B}$, we have $A \le \frac{kN}{15} < \frac{N}{7}$. To check that $\lfloor A\rfloor \ge 30$ for $k\ge \frac{3}{64}$, we do the following: \begin{equation*} \lfloor A\rfloor\ge A-1 \ge \frac{3N}{64B} -1 \ge \frac{3c_2(r)^r p^{\frac{1}{4}-\frac{1}{4r}}\sqrt{\log{p}}}{64((2r-3)!!(r-1))^{\frac{1}{r}}}-1. \end{equation*} Table \ref{burgess table lower bounds for c 2} shows the lower bound $c$ must satisfy to have $\lfloor A\rfloor\ge 30$ in different situations. \begin{table}[h] \begin{center} \begin{tabular}{|c| c | c| c|} \hline $r$ & $p\ge 10^{10}$ & $p\ge 10^{15}$ & $p\ge 10^{20}$ \\ \hline $2$ & $2.78392$ & $1.22500$ & $0.55514$ \\ \hline $3$ & $1.75393$ & $0.86474$ & $0.43480$ \\ \hline $4$ & $1.47708$ & $0.81850$ & $0.46029$ \\ \hline $5$ & $1.35767$ & $0.82260$ & $0.50431$ \\ \hline $6$ & $1.29240$ & $0.83775$ & $0.54839$ \\ \hline $7$ & $1.25127$ & $0.85450$ & $0.58848$ \\ \hline $8$ & $1.22279$ & $0.87022$ & $0.62388$ \\ \hline $9$ & $1.20171$ & $0.88422$ & $0.65489$ \\ \hline $10$ & $1.18536$ & $0.89649$ & $0.68202$ \\ \hline \end{tabular} \end{center} \caption{Lower bounds for the constant $c_2(r)$ in the Burgess inequality to satisfy $\lfloor A\rfloor\ge 30$.}\label{burgess table lower bounds for c 2} \end{table} Let's now verify that $2AN < p$. Indeed, from the fact that $A=\frac{kN}{B}$ and from \eqref{new range for N}, we have \begin{equation*} 2AN = \frac{2kN^2}{B} \le \frac{8kp}{((2r-3)!!(r-1))^{\frac{1}{r}}} < p, \end{equation*} whenever $k < \min\left\{\frac{((2r-3)!!(r-1))^{\frac{1}{r}}}{8},1\right\}.$ \begin{comment} the last inequality being true for $p \ge 15000$ and $r\ge 2$. The last thing to check is that $\log{(1.85A)} < \frac{\log{p}}{2} - \frac{\log{p}}{4r}$. Using \eqref{new range for N} and \eqref{burgess A2} we get \begin{multline*} \log{(1.85A)} \le \log{\left(\frac{1.85N}{9 p^{\frac{1}{2r}}} + 1.85\right)} \le \log{\left(\frac{3.7 p^{\frac{1}{2}-\frac{1}{4r}}}{9} + 1.85\right)} \\= \left(\frac{1}{2} - \frac{1}{4r}\right)\log{p} + \log{\left(\frac{3.7}{9}+\frac{1.85}{p^{\frac{1}{2}-\frac{1}{4r}}}\right)} < \frac{\log{p}}{2} - \frac{\log{p}}{4r}, \end{multline*} the last inequality being true for $p\ge 23$ and $r\ge 2$. Now, using \eqref{burgess B}, \eqref{burgess A2} and \eqref{new range for N}, we can place the following bounds on $k$: \begin{equation*} k \ge \frac{\left((2r-3)!! (r-1)\right)^{\frac{1}{r}}}{9} - \frac{1}{9 p^{\frac{1}{2r}}}, \end{equation*} and \begin{equation*} k \le \frac{\left((2r-3)!! (r-1)\right)^{\frac{1}{r}}}{9} + \frac{\left((2r-3)!! (r-1)\right)^{\frac{1}{r}}}{p^{\frac{1}{4}-\frac{1}{4r}}\sqrt{\log{p}}} . \end{equation*} \end{comment} As in the proof of Theorem 1, we define can find bounds for $c_2(r)$ by controlling the parameters $p_0$, $c_2'(r)$ and $k$. We find a good value of $k \in [\frac{3}{64},\frac{((2r-3)!!(r-1))^{\frac{1}{r}}}{8})$ and a good value of $c_2'(r)$ for each $r$ and $p_0$, and plug in the values of $B$, $k$, and a lower bound bound for $A$ on \eqref{c21} to find $c_2(2)$ and on \eqref{c22} to find $c_2(r)$ for $r\ge 3$ in Table \ref{table kiks burgess} and conclude the theorem. The values of $k$ and $c_2'(r)$ we chose can be found on Table \ref{last table 2}. \begin{table}[h] \begin{center} \begin{tabular}{c| c | c| c| c | c | c |} \cline{2-7} & \multicolumn{2}{|c|}{$p_0 = 10^{10}$} & \multicolumn{2}{|c|}{$p_0 = 10^{15}$} & \multicolumn{2}{|c|}{$p_0 = 10^{20}$ }\\ \hline \multicolumn{1}{|c|}{$r$} & $k$ & $c_2'(r)$ & $k$ & $c_2'(r)$ & $k$ & $c_2'(r)$ \\ \hline \multicolumn{1}{|c|}{$2$} & 0.124 & 3.65 & 0.124 & 3.58 & 0.124 & 3.57\\ \hline \multicolumn{1}{|c|}{$3$} & 0.126 & 2.58 & 0.131 & 2.51 & 0.135 & 2.49\\ \hline \multicolumn{1}{|c|}{$4$} & 0.106 & 2.19 & 0.116 & 2.12 & 0.120 & 2.10\\ \hline \multicolumn{1}{|c|}{$5$} & 0.091 & 1.98 & 0.101 & 1.92 & 0.107 & 1.90\\ \hline \multicolumn{1}{|c|}{$6$} & 0.080 & 1.85 & 0.090 & 1.79 & 0.095 & 1.77\\ \hline \multicolumn{1}{|c|}{$7$} & 0.072 & 1.75 & 0.079 & 1.70 & 0.084 & 1.68\\ \hline \multicolumn{1}{|c|}{$8$} & 0.064 & 1.68 & 0.071 & 1.635 & 0.077 & 1.61\\ \hline \multicolumn{1}{|c|}{$9$} & 0.058 & 1.625 & 0.065 & 1.58 & 0.070 & 1.56\\ \hline \multicolumn{1}{|c|}{$10$} & 0.054 & 1.579 & 0.060 & 1.54 & 0.064 & 1.52\\ \hline \end{tabular} \end{center} \caption{Values chosen for $k$ and $c_2'(r)$ to build Table \ref{table kiks burgess}.}\label{last table 2} \end{table} \end{proof} \begin{proof}[Proof of Corollary \ref{burgess kiks corollary 2}] By Theorem \ref{burgess kiks 2}, we have our desired result whenever $N < 2 p^{\frac{1}{2} + \frac{1}{4r}}$. Therefore, the only thing we need to prove is that for $p\ge 10^{10}$ and $r\ge 3$, $N < 2 p^{\frac{1}{2}+\frac{1}{4r}}$. Since the induction in the proof of Theorem \ref{burgess kiks 2} relied on the upper bound for $N$, we can't use the Burgess inequalities in Theorem \ref{burgess kiks 2} to give an upper bound for $N$ in this corollary. However, we can use the Burgess inequalities from Theorem \ref{burgess kiks 1} to improve the upper bound for $N$. Indeed, for $p\ge 10^{10}$, we have $$|S_{\chi}(M,N)| \le 2.6 N^{1-\frac{1}{2}}p^{\frac{3}{16}}(\log{p})^{\frac{1}{2}}.$$ If $$N \ge (2.6)^{\frac{2r}{r-1}}p^{\frac{3}{8}-\frac{1}{8r} - \frac{3}{8r(r-1)}}\sqrt{\log{p}},$$ then $$N^{1-\frac{1}{r}}p^{\frac{r+1}{4r^2}}(\log{p})^{\frac{1}{2r}} \ge 2.6 N^{1-\frac{1}{2}}p^{\frac{3}{16}}\sqrt{\log{p}} \ge |S_{\chi}(M,N)|.$$ Therefore, we may assume that \begin{equation}\label{burgess corollary 2 eq} N \le (2.6)^{\frac{2r}{r-1}}p^{\frac{3}{8}-\frac{1}{8r} - \frac{3}{8r(r-1)}}\sqrt{\log{p}}. \end{equation} Now, all we need to conclude is to show that the right hand side of \eqref{burgess corollary 2 eq} is less than $2p^{\frac{1}{2} +\frac{1}{4r}}$. Using that $p \ge 10^{10}$, we can verify this manually for $r\in\{3,4,\ldots,21\}$. Now, for $r\ge 22$ we have $$N \le (2.6)^{\frac{2r}{r-1}}p^{\frac{3}{8} -\frac{1}{8r} - \frac{3}{8r(r-1)}}\sqrt{\log{p}} \le (2.6)^{\frac{44}{21}}p^{\frac{3}{8}}\sqrt{\log{p}} < 2p^{\frac{1}{2}}.$$ The last inequality is true whenever $p\ge 10^{10}$. \end{proof} \begin{remark} Booker and McGown in their proofs have $A$ range through only prime numbers. This idea makes the constants converge quicker. For large enough $p$, it doesn't improve the numbers, but it does for smaller $p$. To save space, we ommited using that technique here, instead focusing on other techniques that made an impact on the "asymptotic" constant. One of the nice ideas not used by McGown or Booker is the idea of using Burgess for smaller $r$ to help out with the larger $r$. This allows the theorems to extend to the whole range when $r \ge 3$. \end{remark} \begin{remark} Theorem A is a little stronger in \cite{ETk} when the order of the character is bigger. Therefore, one could use that theorem to get better constants for cubic characters, quartic characters and so on. \end{remark} \section{Least $k$-th power non-residue}\label{section p^1/6} To prove our results on the least $k$-th power non-residues, we will need the following estimates from \cite{RS1962}: \begin{lemma}\label{Rosser and Scho} Let $B = \displaystyle\lim_{m\rightarrow\infty} \sum_{p\le m}\frac{1}{p} - \log{\log{x}}$, and let $\pi(x)$ be the number of primes up to $x$. Then the following estimates are true: \begin{align*} \log{\log{x}} + B - \frac{1}{2\log^2{x}} < \sum_{p\le x} \frac{1}{p} & \mbox{ for } x > 1,\\ \sum_{p\le x} \frac{1}{p} < \log{\log{x}} + B + \frac{1}{2\log^2{x}} & \mbox{ for } x \ge 286,\\ \pi(x) < \frac{x}{\log{x}}\left(1 + \frac{3}{2\log{x}}\right) & \mbox{ for } x > 1. \end{align*} \end{lemma} From it we derive the following immediate corollary: \begin{corollary}\label{Vino corollary} For real numbers $x,y$ satisfying $x > y > 1$ and $x \ge 286$, the following estimate is true: \begin{equation*} \sum_{y < p\le x} \frac{1}{p} < \log{\log{x}}-\log{\log{y}} + \frac{1}{2\log^2{x}} + \frac{1}{2\log^2{y}}. \end{equation*} \end{corollary} Now we are ready to prove the key lemma (a lower bound on a character sum), which is the essence of Vinogradov's trick. \begin{lemma}\label{lower bound non-residue} Let $x\ge 286$ be a real number, and let $ y = x^{\frac{1}{\sqrt{e}} + \delta}$ for some $\delta > 0$. Let $\chi$ be a non-principal character $\bmod{\,p}$ for some prime $p$. If $\chi(n) = 1$ for all $n\le y$, then \begin{equation*} \left|\sum_{n\le x} \chi(n)\right| \ge x\left(2\log{(\delta\sqrt{e}+1)} - \frac{1}{\log^2{x}} - \frac{1}{\log^2{y}} - \frac{1}{x}\right). \end{equation*} \end{lemma} \begin{proof} Since $\chi(n)$ is totally multiplicative, $\chi(n) = 1$ for all $n \le y$, and $x < p$, then \begin{align*}\label{vinogradov trick step 1} \left|\sum_{n\le x} \chi(n)\right| &= \left|\sum_{\substack{n\le x\\ \chi(n)=1}}\chi(n) + \sum_{\substack{n\le x\\ \chi(n)\ne 1}}\chi(n)\right|\\ &= \left|\sum_{n\le x} 1 -\sum_{\substack{n\le x\\ \chi(n)\ne 1}}1 + \sum_{\substack{n\le x\\ \chi(n)\ne 1}}\chi(n)\right|\\ &\ge \sum_{n\le x} 1 - 2\sum_{\substack{n \le x\\ \chi(n) \ne 1}}\chi(n)\\ &\ge \sum_{n\le x} 1 - 2\sum_{\substack{y < q \le x\\ \chi(q) \ne 1}}\sum_{n\le \frac{x}{q}}1, \end{align*} where the sum ranges over $q$ prime. Therefore we have \begin{equation*} \left|\sum_{n\le x} \chi(n)\right| \ge \left\lfloor x\right\rfloor - 2\sum_{y < q \le x} \left\lfloor\frac{x}{q}\right\rfloor \ge x - 1 - 2x\sum_{y < q \le x} \frac{1}{q} . \end{equation*} Using Corollary \ref{Vino corollary} to estimate the sum of the reciprocals of primes we get the desired inequality. \end{proof} We can now prove Theorem 3. We will use the explicit Burgess inequality proved as Corollary \ref{burgess corollary 1} because it works for all $r$. \begin{proof}[Proof of Theorem \ref{theorem p^1.6}] Let $\chi$ be a character $\bmod{\,p}$. Then if $n < p$ and $\chi(n) \neq 1$, $n$ is a $k$-th power non-residue. Let $r$ be an integer. Let $x\ge 286$ be a real number and let $y = x^{\frac{1}{\sqrt{e}}+\delta} = p^{1/6}$ for some $\delta>0$. Assume that $\chi(n) = 1$ for all $n\le y$. Now by Corollary \ref{burgess corollary 1} and Lemma \ref{lower bound non-residue} we have \begin{equation*} 2.74x^{1-\frac{1}{r}} p^{\frac{r+1}{4r^2}}(\log{p})^{\frac{1}{r}} \ge x\left(2\log{(\delta\sqrt{e}+1)} - \frac{1}{\log^2{x}} - \frac{1}{\log^2{y}} - \frac{1}{x}\right). \end{equation*} Now, letting $x = p^{\frac{1}{4} + \frac{1}{2r}}$ we get \begin{equation}\label{p^1/6} 2.74 p^{\frac{\log{\log{p}}}{r \log{p}} - \frac{1}{4r^2}} \ge 2\log{(\delta\sqrt{e}+1)} - \frac{1}{\log^2{x}} - \frac{1}{\log^2{y}} - \frac{1}{x}. \end{equation} Picking $r = 22$, one finds that $\delta = 0.00458\ldots$. For $p \ge 10^{4732}$, the right hand side of \eqref{p^1/6} is bigger than the left hand side, showing that $\chi(n)$ is not always 1 for $n \le y = p^{1/6}$, and hence the theorem is true. \end{proof} \begin{remark} To be able to use Theorem \ref{burgess kiks 2} to improve Theorem \ref{theorem p^1.6}, we would need to calculate what happens for $r > 20$ since the restriction $y = p^{1/6}$ implies $r > 20$. Since we know that $p$ will be large, we can also pick a large $p_0$ and then find a good constant for the Burgess inequality when $p$ is very large and $r > 20$. After doing all of this work, one could show that Theorem \ref{theorem p^1.6} works for $p \ge 10^{3850}$. \end{remark} \nocite{Fri1987} \nocite{PPP} \end{document}
\begin{document} \author{Karol Gietka} \email[]{[email protected]} \author{Christoph Hotter} \author{Helmut Ritsch} \affiliation{Institut f\"ur Theoretische Physik, Universit\"at Innsbruck, A-6020 Innsbruck, Austria} \title{Unique Steady-State Squeezing in a Driven Quantum Rabi Model} \begin{abstract} Squeezing is essential to many quantum technologies and our understanding of quantum physics. Here we develop a theory of steady-state squeezing that can be generated in the closed and open quantum Rabi as well as Dicke model. To this end, we eliminate the spin dynamics which effectively leads to an abstract harmonic oscillator whose eigenstates are squeezed with respect to the physical harmonic oscillator. The generated form of squeezing has the unique property of time-independent uncertainties and squeezed dynamics, a novel type of quantum behavior. Such squeezing might find applications in continuous back-action evading measurements and should already be observable in optomechanical systems and Coulomb crystals. \end{abstract} \date{\today} \maketitle \section{Introduction} Squeezing~\cite{gardiner2004quantum,Zubairy_2005_squeezing} relies on redistributing quantum uncertainties between two non-commuting observables. The primary example is the squeezing of light~\cite{squeezing1983walls}, where the uncertainties are redistributed between the strength of electric and magnetic fields with respect to a coherent state where the uncertainties are equal. Squeezing is a precious quantum resource as it is rather robust to decoherence and dissipation. For this reason, it finds applications in many quantum technologies with the most prominent ones being high-precision measurements~\cite{laporta1987squeezedpolariztion,Kimble1987precision,polzik1992squeezing,geo6002011squeezing,grote2013firstsqueezing,SCHNABEL2017squeezing,ligo2019enhanced,virgo2019increasingsqueezeed,ligovirgo2021new,pan2023robustbeyondNOON, pedrozo2020entanglement, colombo2022timereversal} and entanglement-based quantum key distribution~\cite{schnabel2015CVkeydist,yongmin2019cvsqueezedqkd,Derkach_2020squeezingQKD,timothy2022squeezedlaser}. On the other hand, squeezing is a form of quantum correlation, important in the context of quantum phase transitions~\cite{plenio2015QRMphasetrans} and is used to study the fundamental aspects of quantum physics~\cite{koy2018squeezingbelltest}. The quantum Rabi model~\cite{Braak_2016qrmcelebration80,Xie_2017rabimodelsolutionanddynamics} is a paradigmatic model in physics that describes a quantized harmonic oscillator coupled to a two-level system and its Hamiltonian reads ($\hbar=1$) \begin{align} \hat H = \omega \hat a^\dagger \hat a + \frac{\Omega}{2}\hat \sigma_z + \frac{g}{2}(\hat a + \hat a^\dagger)\hat \sigma_x, \label{eq:H_rabi} \end{align} where $\hat a$ and $\hat a^\dagger$ are the annihilation and creation operators for a harmonic oscillator with frequency $\omega$. The Pauli matrices $\hat \sigma_z$ and $\hat \sigma_x$ describe the two-level system (here interchangeably referred to as spin) with frequency $\Omega$ and $g$ is the interaction strength between the two sub-systems. The interaction term can be rewritten with the help of spin raising and lowering operators, $\hat \sigma_x =(\hat \sigma_+ + \hat \sigma_-)$, into two terms \begin{align} (\hat a + \hat a^\dagger)\hat \sigma_x = (\hat a \hat \sigma_- + \hat a^\dagger \hat \sigma_+) + (\hat a \hat \sigma_+ + \hat a^\dagger \hat \sigma_-). \end{align} The first one is typically referred to as the counter-rotating term and the second one is the rotating term. Neglecting the fast oscillating counter-rotating term leads to the Jaynes-Cummings model~\cite{knight1993JCmodel} which is the backbone of modern quantum optics. This (rotating wave) approximation is valid for $g\ll \omega,\Omega$ and {$|\Omega-\omega|\ll|\Omega+\omega|$}, however, it is not able to capture the {rich and intriguing physics close to the critical point of the quantum Rabi model ($g\sim g_c \equiv \sqrt{\omega \Omega}$)~\cite{plenio2015QRMphasetrans}.} In order to see why the vicinity of the critical point is interesting, we eliminate the dynamics of the spin using the Schrieffer-Wolff transformation~\cite{2011schriefferwolff}. Under the assumption of $1 -{g^2}/{g_c^2} \gg \left(\omega/\Omega\right)^{2/3}$~\cite{gietka2022comqm}, this leads to \begin{align}\label{eq:effRabi} \hat H_a = \omega \hat a^\dagger \hat a - \frac{g^2}{4 \Omega}\left(\hat a + \hat a^\dagger\right)^2 \end{align} which is a squeezing Hamiltonian with eigenstates \begin{align} |\psi_n \rangle = \exp\bigg\{\frac{1}{2}\left(\xi^*\hat a^2-\xi\hat a^{\dagger2}\right)\bigg\}|n\rangle, \end{align} where $\xi = \frac{1}{4} \ln\{1-g^2/g_c^2\}$ is the squeezing parameter and $|n\rangle$ are the Fock states. In other words, the spin can be thought of as a mediator of interactions between harmonic oscillator excitations in the limit of $\omega\ll\Omega $. {Here we see why the Jaynes-Cummings model cannot be applied close to the critical point, after the elimination of the spin the counter-propagating terms become squeezing terms (see Appendix).} \begin{figure*} \caption{Time evolution of the squeezed state for the kicked quantum Rabi model. The top panel shows the Husimi Q function at various times including the phase-space trajectory. The bottom left panel depicts the mean values $\langle\hat X \rangle$ and $\langle \hat P \rangle$ (subscript $a$ indicates the approximated abstract oscillator Hamiltonian) and the bottom right panel the squeezing of $\hat X$ and $\hat P$. The orbit (white line) is equally squeezed as the time-independent squeezed uncertainties. The dynamics described by the full (dashed lines) and the effective (solid lines) Hamiltonian agree very well, the visible wiggles for the squeezing appear because the simulation parameters are on the verge of the approximation breakdown. The parameters are $\Omega/\omega = 10^5$, $g/g_c = 0.9$, and $\alpha = 3$.} \label{fig:fig1} \end{figure*} To interpret this squeezing we introduce the operators $\hat x = (\hat a + \hat a^\dagger)/\sqrt{2\omega}$ and $\hat p = \sqrt{\omega}(\hat a - \hat a^\dagger)/\sqrt{-2}$ {such that} \begin{align} \hat H_a = \frac{\hat p^2}{2} + \frac{\omega^2}{2}\left(1-\frac{g^2}{g_c^2}\right)\hat x^2. \end{align} This Hamiltonian describes an abstract harmonic oscillator with a modified frequency $ \omega\sqrt{1-g^2/g_c^2}$ and a unit mass. Approaching then the critical point amounts to opening the abstract harmonic oscillator which leads to squeezing with respect to the physical harmonic oscillator~\cite{gietka2022squeezing} (the ground state of a given harmonic oscillator is a squeezed ground state of a harmonic oscillator with a different frequency). This means that if the measurement is performed in the basis of the physical harmonic oscillator described by $\hat x$ and $\hat p$, increasing the coupling will lead to redistribution of uncertainties between $\hat x$ and $\hat p$. Therefore, the lower the effective frequency, the larger the spread $\Delta \hat x$. In particular, for $g=g_c$ the spread $\Delta \hat x$ is infinite and $\Delta \hat p$ becomes $0$ as the Hamiltonian describes an abstract free particle whose eigenstates are that of the abstract momentum. In this manuscript, we present a method that allows to treat a driven (and dissipative) quantum Rabi model close to the critical point and show how it can be used to generate (steady-state) squeezing of the harmonic oscillator excitations and its dynamics. Such squeezing has the unique property of time-independent uncertainties which might be crucial for several quantum technologies. We start with the closed quantum Rabi model, subsequently, we consider an open and driven system, and finally, we show how increasing the number of spins (Dicke model) leads to enhanced squeezing. We conclude by identifying potential applications of the presented squeezing mechanism and discussing possible platforms for the implementation of the protocol. \section{Kicked Quantum Rabi model} \begin{figure*} \caption{Time evolution of the squeezed state for the driven-dissipative quantum Rabi model. The top panel shows the Husimi Q function at various times including the phase-space trajectory. The bottom left panel depicts the mean values $\langle\hat X \rangle$ and $\langle \hat P \rangle$ (subscript $e$ indicates the adiabatically eliminated abstract oscillator Hamiltonian) and the bottom right panel the squeezing of $\hat X$ and $\hat P$. The uncertainties reach a steady state (no need for the adiabatic time evolution) with only minor time dependence, the Heisenberg uncertainty principle is no longer saturated. The dynamics described by the full (dashed lines) and the effective (solid lines) Hamiltonian agree very well. The parameters are $\Omega/\omega = 2\cdot10^3$, $g/g_c = 0.8$, $\omega_d/\omega = \sqrt{1-g^2/g_c^2} \label{fig:fig2} \end{figure*} In order to excite the harmonic oscillator, we include a drive term to the quantum Rabi Hamiltonian \eqref{eq:H_rabi} \begin{align} \hat H_d = \eta (\hat a e^{i \omega_d t} + \hat a^\dagger e^{-i \omega_d t}), \end{align} where $\eta$ is the strength of the drive and $\omega_d$ is its frequency. Although such a driving term is characteristic of laser-pumped cavities~\cite{Helmut2013rmpcavity} it can also describe driving of other harmonic oscillators. In an isolated system, the drive will excite the system indefinitely, therefore we assume a strong short pulse (kick) in this case. Such a drive acts as a displacement operator \begin{align} \hat D(\alpha) = \exp(\hat a \alpha + \hat a^\dagger \alpha^*) \end{align} which displaces an initial vacuum state by $\alpha$, creating a coherent state $|\alpha \rangle$. If the coupling strength $g$ is equal to $0$, the coherent state will rotate around the origin of the phase space with frequency $\omega$ at a fixed radius $|\alpha|$. Adiabatically increasing the coupling strength towards the critical point will then change the frequency of the abstract harmonic oscillator, leading to the change of the orbit from circular to elliptical and will redistribute the uncertainties between the quadrature operators $\hat X = (\hat a + \hat a^\dagger)/2$ and $\hat P = (\hat a - \hat a^\dagger)/2i$ (see figure~\ref{fig:fig1}). The final state can be easily found by constructing a coherent state out of squeezed Fock states of the harmonic mode $\hat a$. The time evolution of such a squeezed coherent state becomes then \begin{align} |\psi (t) \rangle = \sum_{n=0}^\infty e^{-i n \omega \sqrt{1-\frac{g^2}{g_c^2}} t}\frac{\alpha^n}{\sqrt{n!}}e^{\frac{1}{2}\left(\xi^* \hat a^2-\xi \hat a^{\dagger2} \right)}|n \rangle. \end{align} Note that the same state can be obtained by first (adiabatically) squeezing the harmonic oscillator and then displacing the state. The equation of the new (squeezed) phase-space orbit can be found by calculating the average value of $\hat X$ and~$\hat P$ \begin{align}\label{eq:squeezedorb} \begin{split} \langle \hat X \rangle =& \alpha\cos\left(\omega \sqrt{1-\frac{g^2}{g_c^2}} t\right)\exp(\xi), \\ \langle \hat P \rangle =& -\alpha\sin\left(\omega \sqrt{1-\frac{g^2}{g_c^2}} t\right)\exp(-\xi). \end{split} \end{align} The uncertainties in $\hat X$ and $\hat P$ can be calculated in the same way \begin{align}\label{eq:squeezedunc} \begin{split} \Delta^2\hat X &= \frac{1}{4}\exp(2\xi), \\ \Delta^2\hat P &= \frac{1}{4}\exp(-2\xi), \end{split} \end{align} and turn out to be squeezed and time-independent. In contrast, creating squeezing in other ways, for example with a position measurement, squeezes the state but not the harmonic oscillator. Therefore, the squeezing experiences additional rotation which leads, for instance, to measurement back-action~\cite{lopez1995qndbackation,sillanpaa2016qbem,nunnenkamp2019squeezingbackation,mitchell2021backationevaging} and consequently to the standard quantum limit of measurement precision~\cite{caves1980rmpbackation}. In our approach, we adiabatically squeeze the abstract harmonic oscillator by changing its frequency which also squeezes the state. As a result, the state does not experience any rotations around its own axis at the \emph{expense} of exhibiting equally squeezed orbit and uncertainties [see figure~\ref{fig:fig1}, Eqs~\eqref{eq:squeezedorb} and \eqref{eq:squeezedunc}]. This squeezing of dynamics is a novel quantum effect and it corresponds to a harmonic oscillator where increasing the maximal displacement reduces the maximal momentum. Classically such a movement would violate energy conservation, however here the energy mismatch is compensated by the squeezing of the uncertainties. In practice, obtaining large and detectable squeezing of a macroscopic coherent state ($\alpha \gg 1$) might be difficult with a single two-level system, as it would require extremely large $\Omega/\omega$ to prevent the spin from getting excited (the effective Hamiltonian will no longer be valid). One way to artificially increase the atomic frequency in the quantum Rabi model is by enlarging the number of spins (Dicke model) which can be naively understood as changing the frequency from $\Omega$ to $N \Omega$~\cite{gietka2021invertedoscDicke}, with $N$ being the number of two-level systems (see figure \ref{fig:fig3}). In the limiting case $N\rightarrow \infty$, the large spin can be replaced by another harmonic oscillator by means of the Holstein-Primakoff transformation~\cite{1940HPtransf}. \begin{figure*} \caption{Steady state squeezing for large number of spins. The left panel shows the squeezing of $\hat X$ (solid lines) and $\hat P$ (dashed lines) and the right side shows the excitation of one spin. For large number of spins $N$ the excitation per spin $\langle \hat{\sigma} \label{fig:fig3} \end{figure*} \section{Driven-Dissipative Quantum Rabi model} In an open quantum system, the dissipation will eventually bring the state of the system to a (squeezed) vacuum. In order to prevent this, we continuously drive the system. Since the effective Hamiltonian is quadratic the system is described by a Gaussian state, and we expect that the dynamics of the full Hamiltonian is also well approximated by Gaussian physics. Therefore, we use a second-order mean-field description \cite{Plankensteiner2022quantumcumulantsjl}, which leads to a closed set of equations~\cite{[{See Supplemental Material at }][{ for the details.}]supp}. The effect of spin and harmonic oscillator dissipation is taken into account by means of the Lindblad master equation \begin{align} \begin{split} \mathcal{L}[\hat \rho] = & -i\left[\hat H + \hat H_d, \hat \rho \right] +\kappa\left(\hat a \hat \rho \hat a^\dagger -\frac{1}{2}\big\{\hat a^\dagger\hat a, \hat \rho \big\}\right) \\ & + \gamma\left( \hat \sigma_- \hat \rho \hat \sigma_+ -\frac{1}{2}\big\{\hat \sigma_+ \hat\sigma_-, \hat \rho \big\}\right). \end{split} \end{align} Here $\kappa$ is harmonic excitation losses and $\gamma$ is related to the spontaneous emission rate which helps to keep the spin in its ground state. After adiabatic elimination of the spin dynamics (equivalent to the Schrieffer-Wolff transformation for the closed system) and assuming $\Omega \gg \gamma$, we can find the effective Hamiltonian \begin{align} \hat H_{e} = \omega \hat a^\dagger \hat a - \frac{g^2}{4 \Omega}\left(\hat a + \hat a^\dagger\right)^2 + \eta (\hat a e^{i \omega_d t} + \hat a^\dagger e^{-i \omega_d t}), \end{align} which describes the abstract harmonic oscillator with an additional drive term. Figure \ref{fig:fig2} depicts the time evolution of the squeezing for the driven dissipative case. At the top and lower left panel we see that the introduction of drive and dissipation rotates the squeezing axis and the phase-space trajectory. The lower right panel shows the almost time-independent squeezing after some settling time. Note that the squeezing no longer saturates the Heisenberg uncertainty principle (dotted line). The time evolution described by the adiabatically eliminated Hamiltonian (dashed lines) agrees very well with that of the full Hamiltonian (solid lines). For non-negligible spin excitation this approximation breaks down, furthermore, also the uncertainties become strongly time-dependent. This can be suppressed by increasing the number of spins $N$. In figure \ref{fig:fig3}, we show the time dependence of the squeezing (left) and spin excitation (right) for different $N$. For a sufficiently large number of spins the uncertainties become time-independent. The equations of motion for the effective Hamiltonian can be integrated, and the tilt can be calculated analytically (see Appendix). We would like to point out that the direction of squeezing (tilt) depends on the type of coupling $g(\hat a e^{i \theta} + \hat a^\dagger e^{-i \theta})\hat \sigma_x$ and it is related to $\theta$ (this also holds for the closed system). For instance, choosing $\theta$ to be $\pi/2$ would result in a harmonic oscillator where the $\hat X$ quadrature is squeezed and not anti-squeezed as in this work. In principle, by adjusting $\theta$ to the tilt angle $\phi$, one could obtain squeezing in an arbitrary quadrature direction even in the driven-dissipative case. This in turn means that it should be possible to observe motion of a harmonic oscillator where at the point of maximal displacement the oscillator has the maximal momentum, for example, by tilting the axis by $\pi/4$. {Such behavior is unfathomable for a classical harmonic oscillator.} \section{Squeezing detection} So far the description was general, and we did not specify the harmonic oscillator and the underlying physical system. In this section, we discuss whether it is possible to observe steady-state squeezing and how to do it. In the case of a mechanical oscillator whose internal degree of freedom is coupled to the center-of-mass motion (phonon mode), the squeezing could be simply observed by measuring the position or momentum of the center of mass. In this case, the squeezing manifests itself in decreased or increased uncertainty of position and momentum. Such squeezing should already be realizable in optomechanical systems~\cite{uros2020,uros2022} and ions interacting with a common phonon mode~\cite{2015ionCoulombCrystal,2019CrystalDickesym}. The crucial element that allows to observe squeezing in these systems is the well-defined observables $\hat x$ and $\hat p$ for the uncoupled mechanical oscillator. For the electromagnetic resonators, the measurement of squeezing is different. In this case, the squeezing manifests itself in a changed resonance frequency of the resonator. This means that the definition of $\hat x$ and $\hat p$ depends on the frequency of the resonator. Therefore it is impossible to perform a measurement in the basis of the physical harmonic oscillator (see Appendix). The cavity in which atoms are strongly coupled to a single mode of radiation cannot generate squeezed light simply by driving it close to the critical point of the phase transition. From another perspective, light-matter interactions change the index of refraction (resonance frequency) which can be understood as squeezing of the electromagnetic field (changing its frequency). \section{Conclusions} We have presented a comprehensive theory of steady-state squeezing in the closed and open quantum Rabi model. In both cases, we obtain steady-state squeezing with the unique property of time-independent variances and a squeezed trajectory in the phase-space picture defined by the physical harmonic oscillator. Such squeezing is a novel phenomenon and it can find applications in many quantum technologies, in particular, in quantum back-action-free continuous measurements~\cite{caves1980rmpbackation,schwab2010nanoQBFmeasure,polzik2017baemeasure} and driven-dissipative~\cite{porras2023ddmetrologycollectivespin,ilias2023criticalityenhanced,tobe2023} critical~\cite{2020criticalwitkowska,Gietka2021adiabaticcritical,2021liuexperiment,2021criticalwitkowska,2022PRXQuantumcontinus,2022_Garbe_heisenbegkible,Gietka2022understanding,2022entropycritical,2022dingnature,Aybar2022criticalquantum,2022garbeexponetialsqueezing,gietka2023squeezingHeisenberg} metrology. In order to understand squeezing, we introduced an effective Hamiltonian describing an abstract harmonic oscillator which we obtain by eliminating the dynamics of the two-level system. To this end, we employed the Schrieffer-Wolff transformation in the closed system and adiabatic elimination in the open system. A promising extension of this proposal is the possibility of driving the spin directly and eliminating subsequently its dynamics in the dispersive regime to see steady-state squeezing. Since a quantum harmonic oscillator coupled to a two-level (or multiple-level) system can be used to describe many systems, the proposed method could be tested in a variety of physical platforms including mechanical resonators~\cite{aspelmeyer2014cavityoptom,uros2020,uros2022}, spin-orbit coupled quantum gases~\cite{busch2016socreview,gietka2022comqm}, ions coupled to phonons~\cite{duan2021qrbphonon}, Coulomb crystals~\cite{2015ionCoulombCrystal,2019CrystalDickesym}, and even electrons trapped on a surface of liquid helium~\cite{konstantinov2019qrmhelium}. We predict, that the most promising system for the implementation of the described steady-state squeezing would be linear optomechanical systems in the far red-detuned and ultrastrong coupling regime~\cite{kustura2022mechanicalsqueezing}. Also, systems composed of $N$ trapped ions interacting with a single phononic mode (realizing the Dicke model) might be a perfect platform to create steady-state squeezing as it should be relatively easy to enter the regime where the effective Hamiltonian is valid. \section{Derivation of the effective Hamiltonian and its dynamics} In order to derive the effective Hamiltonian, we will use the first-order cumulant expansion (all the averages are time-dependent) \begin{align} \frac{\mathrm{d}}{\mathrm{d}t} \langle \hat a\rangle =& g \left( - i \langle \hat \sigma^{12}\rangle - i \langle \hat \sigma^{{21}}\rangle \right) -\frac{\kappa}2 \langle \hat a\rangle - i \omega \langle \hat a\rangle - i \eta e^{- i \omega_d t}, \\ \frac{\mathrm{d}}{\mathrm{d}t} \langle \hat \sigma^{12}\rangle =& g \left( - i \langle \hat a^\dagger\rangle - i \langle \hat a\rangle + 2 i \langle \hat a^\dagger\rangle \langle \hat \sigma^{{22}}\rangle + 2 i \langle \hat a\rangle \langle \hat \sigma^{{22}}\rangle \right) - i \Omega \langle \hat \sigma^{12}\rangle -\frac{\gamma}2 \langle \hat \sigma^{12}\rangle, \\ \frac{\mathrm{d}}{\mathrm{d}t} \langle \hat \sigma^{{22}}\rangle =& g \left( i \langle \hat a^\dagger\rangle \langle \hat \sigma^{12}\rangle - i \langle \hat a^\dagger\rangle \langle \hat \sigma^{{21}}\rangle + i \langle \hat a\rangle \langle \hat \sigma^{12}\rangle - i \langle \hat a\rangle \langle \hat \sigma^{{21}}\rangle \right) - \gamma \langle \hat \sigma^{{22}}\rangle, \end{align} where $\hat \sigma^{ij} \equiv |i \rangle \langle j|$ (note that we used a different notation in the main text $\hat \sigma^{12} \equiv \hat \sigma_-$ and $\hat \sigma^{21} \equiv \hat \sigma_+$). Assuming the population of the excited state is negligible $\langle \hat \sigma^{22} \rangle \ll1$, we can find an expression for $\langle \hat \sigma^{12} \rangle$ in the steady state \begin{align} \langle \hat \sigma^{12} \rangle = \frac{-\frac{i k}{2}\left(\langle \hat a\rangle + \langle \hat a^\dagger \rangle\right) \sin(\omega_d t)}{\omega_d \cos(\omega_d t) + \sin(\omega_d t)\left(i\Omega +\frac{\gamma}{2}\right)} \approx \frac{-\frac{ k}{2}\left(\langle \hat a\rangle + \langle \hat a^\dagger \rangle\right) }{ \Omega}, \end{align} where we assume $\Omega\gg \omega_d,\gamma$ for the approximation. Eliminating $ \langle \hat \sigma^{12} \rangle$ from the mean-field equations, leads to an effective Hamiltonian \begin{align} \hat H_{e} = \omega \hat a^\dagger \hat a - \frac{g^2}{4 \Omega}\left(\hat a + \hat a^\dagger\right)^2 + \eta (\hat a e^{i \omega_d t} + \hat a^\dagger e^{-i \omega_d t}), \end{align} which can be rewritten using the quadrature operators $\hat X = (\hat a + \hat a^\dagger)/2$ and $\hat P = (\hat a - \hat a^\dagger)/2i$ \begin{align} \hat H_{e} = \omega \hat P^2 + \omega\left(1-\frac{g^2}{ g_c^2}\right)\hat X^2 + 2\eta\left(\hat X \cos(\omega_d t) + \hat P \sin(\omega_d t) \right). \end{align} Note that the above derivation is heuristic in nature and a more consistent derivation can be performed on the level of the second-order cumulant expansion. Although in the above equation $\gamma$ is absent, its presence in the full Hamiltonian allows to extend the regime in which the above Hamiltonian is valid. The mean-field Heisenberg equations of motion for $\hat X$ and $\hat P$ become \begin{align} \frac{\mathrm{d}}{\mathrm{d} t} \langle \hat X \rangle = & \omega \langle \hat P \rangle - \frac{\kappa}2 \langle \hat X \rangle -\eta \sin \left( \omega_d t \right) , \\ \frac{\mathrm{d}}{\mathrm{d} t} \langle \hat P \rangle = & - \omega\left(1- \frac{g^2}{g_c^2}\right) \langle \hat X \rangle - \frac{\kappa}{2} \langle \hat P \rangle-\eta \cos \left( \omega_d t \right), \end{align} with a steady-state solution given by ($\xi = 1-{g^2}/{g_c^2}$) \begin{align} \langle \hat X \rangle = &-\frac{2 \eta \left(2 \cos ( \omega_d t) \left(\kappa ^2 (\omega -\omega_d)+4 (\omega +\omega_d) \left(\xi \omega ^2-\omega_d^2\right)\right)+\kappa \sin ( \omega_d t) \left(\kappa ^2+4 \xi \omega ^2+4 \omega_d (2 \omega +\omega_d)\right)\right)}{\kappa ^4+8 \kappa ^2 \left(\xi \omega ^2+\omega_d^2\right)+16 \left(\omega_d^2-\xi \omega ^2\right)^2}, \\ \langle \hat P \rangle = & \frac{4 \eta \sin (\omega_d t) \left(\kappa ^2 (\xi \omega -\omega_d)+4 (\xi \omega +\omega_d) \left(\xi \omega ^2-\omega_d^2\right)\right)-2 \eta \kappa \cos ( \omega_d t) \left(\kappa ^2+4 \left(\xi \omega (\omega +2 \omega_d)+\omega_d^2\right)\right)}{\kappa ^4+8 \kappa ^2 \left(\xi \omega ^2+\omega_d^2\right)+16 \left(\omega_d^2-\xi \omega ^2\right)^2}, \end{align} which parametrizes an ellipse tilted by an angle defined by \begin{align} \tan (2 \phi )=\frac{\kappa \left(\kappa ^4+8 \kappa ^2 (\xi \omega (\omega +\omega_d)+\omega_d (\omega -\omega_d))+16 \left(\xi \omega ^2-\omega_d^2\right) (\xi \omega (\omega +2 \omega_d)+\omega_d (2 \omega +3 \omega_d))\right)}{\kappa ^4 (\xi \omega +\omega -6 \omega_d)+8 \kappa ^2 \left(\xi (\xi +1) \omega ^3-2 \xi \omega ^2 \omega_d-3 (\xi +1) \omega \omega_d^2-2 \omega_d^3\right)+16 (\xi \omega +\omega +2 \omega_d) \left(\omega_d^2-\xi \omega ^2\right)^2}. \end{align} \section{Relation between squeezing and cavity resonance frequency} The Hamiltonian of an empty cavity is given by \begin{align} \hat H = \omega \hat a^\dagger \hat a, \end{align} where $\hat a$ and $\hat a^\dagger$ describe a field with frequency $\omega$. Adding a single quantum electric dipole to the system leads to the quantum Rabi model \begin{align} \hat H = \omega \hat a^\dagger \hat a + \frac{\Omega}{2}\hat \sigma_z + \frac{g}{2}(\hat a + \hat a^\dagger)\hat \sigma_x. \end{align} In the optical domain, the coupling strength is typically very small compared to the critical coupling strength $g_c = \sqrt{\omega \Omega}$. In this case the system is well approximated by the Jaynes-Cummings model \begin{align} \hat H = \omega \hat a^\dagger \hat a + \frac{\Omega}{2}\hat \sigma_z + \frac{g}{2}(\hat a \hat \sigma_+ + \hat a^\dagger \hat \sigma_-). \end{align} Assuming the dynamics of the spin is negligible, it can be eliminated from the Hamiltonian (adiabatic elimination) leading to an effective description of the field \begin{align} \hat H = \left(\omega -\frac{g^2}{2 \Omega} \right) \hat a^\dagger \hat a, \end{align} where $\omega -\frac{g^2}{2 \Omega}$ is the resonance frequency of the field in the cavity with an atom inside. Note that the same operators $\hat a$ and $\hat a^\dagger$ describe field with frequency $\omega$ and $\omega -\frac{g^2}{2 \Omega}$. The Jaynes-Cummings Hamiltonian is an approximation of the quantum Rabi model and these two Hamiltonians should lead to the same results in the right regime. Eliminating the spin dynamics from the quantum Rabi model leads to an effective Hamiltonian for the field \begin{align} \label{eq:H_Rabi_adiab} \hat H = \omega \hat a^\dagger \hat a - \frac{g^2}{4\Omega}(\hat a + \hat a^\dagger)^2. \end{align} Note that the Hamiltonian \eqref{eq:H_Rabi_adiab} is not diagonal and can be diagonalized with a Bogoliubov transformation \begin{align} \hat b = \hat a \cosh{\xi} - \hat a^\dagger \sinh{\xi} , \end{align} whose generator is the squeeze operator \begin{align} \hat U = \exp\left(\frac{\xi}{2}\left(\hat a^2 -\hat a^{2\dagger} \right) \right) \end{align} where $\xi =\frac{1}{4}\log(1-g^2/g_c^2)$ is the squeezing parameter. The diagonalization leads to \begin{align} \hat H = \omega \sqrt{1-\frac{g^2}{\omega \Omega}} \hat b^\dagger \hat b, \end{align} where $\omega \sqrt{1-\frac{g^2}{\omega \Omega}}$ is the resonance frequency of the cavity. In this sense, the resonance frequency of the cavity with an atom inside is squeezed with respect to the resonance frequency of the empty cavity. If $g\ll g_c$, the resonance frequency can be approximated as $\omega \sqrt{1-\frac{g^2}{\omega \Omega}}\approx \omega - \frac{g^2}{2 \Omega}$ and $\hat b \approx \hat a$ (Jaynes-Cummings approximation). If, however, this is not the case, analyzing the cavity mode of a strongly interacting light-matter system with the operators of the empty cavity $\hat a$ and $\hat a^\dagger$ can lead to unphysical results. A naive calculation of the average number of photons leads to \begin{align} \langle \hat a^\dagger \hat a \rangle = \sinh^2 \xi \approx \frac{1}{\sqrt{1-g^2/g_c^2}}, \end{align} which diverges at the critical point. In fact, at the critical point of the superradiant phase transition, the frequency of the field approaches 0. This means that the ground state of such a harmonic oscillator is an extremely squeezed ground state (squeezed vacuum) of any other harmonic oscillator and in particular the harmonic oscillator describing the empty cavity. Since a squeezed vacuum has a non-zero number of photons, it appears as if these photons are real. However, they appear only because the field with a very low frequency (described by $\hat b$) is analyzed with the wrong operators ($\hat a$) describing the field with a much larger frequency. \twocolumngrid \begin{thebibliography}{64} \makeatletter \providecommand \@ifxundefined [1]{ \@ifx{#1\undefined} } \providecommand \@ifnum [1]{ \ifnum #1\expandafter \@firstoftwo \else \expandafter \@secondoftwo \fi } \providecommand \@ifx [1]{ \ifx #1\expandafter \@firstoftwo \else \expandafter \@secondoftwo \fi } \providecommand \natexlab [1]{#1} \providecommand \enquote [1]{``#1''} \providecommand \bibnamefont [1]{#1} \providecommand \bibfnamefont [1]{#1} \providecommand \citenamefont [1]{#1} \providecommand \href@noop [0]{\@secondoftwo} \providecommand \href [0]{\begingroup \@sanitize@url \@href} \providecommand \@href[1]{\@@startlink{#1}\@@href} \providecommand \@@href[1]{\endgroup#1\@@endlink} \providecommand \@sanitize@url [0]{\catcode `\\12\catcode `\$12\catcode `\&12\catcode `\#12\catcode `\^12\catcode `\_12\catcode `\%12\relax} \providecommand \@@startlink[1]{} \providecommand \@@endlink[0]{} \providecommand \url [0]{\begingroup\@sanitize@url \@url } \providecommand \@url [1]{\endgroup\@href {#1}{\urlprefix }} \providecommand \urlprefix [0]{URL } \providecommand \Eprint [0]{\href } \providecommand \doibase [0]{https://doi.org/} \providecommand \selectlanguage [0]{\@gobble} \providecommand \bibinfo [0]{\@secondoftwo} \providecommand \bibfield [0]{\@secondoftwo} \providecommand \translation [1]{[#1]} \providecommand \BibitemOpen [0]{} \providecommand \bibitemStop [0]{} \providecommand \bibitemNoStop [0]{.\EOS\space} \providecommand \EOS [0]{\spacefactor3000\relax} \providecommand \BibitemShut [1]{\csname bibitem#1\endcsname} \let\auto@bib@innerbib\@empty \bibitem [{\citenamefont {Gardiner}\ and\ \citenamefont {Zoller}(2004)}]{gardiner2004quantum} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Gardiner}}\ and\ \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Zoller}},\ }\href@noop {} {\emph {\bibinfo {title} {Quantum noise: a handbook of Markovian and non-Markovian quantum stochastic methods with applications to quantum optics}}}\ (\bibinfo {publisher} {Springer Science \& Business Media},\ \bibinfo {year} {2004})\BibitemShut {NoStop} \bibitem [{\citenamefont {Zubairy}(2005)}]{Zubairy_2005_squeezing} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Zubairy}},\ }\bibfield {title} {\bibinfo {title} {Quantum squeezing},\ }\href {https://doi.org/10.1088/1464-4266/7/5/B01} {\bibfield {journal} {\bibinfo {journal} {Journal of Optics B: Quantum and Semiclassical Optics}\ }\textbf {\bibinfo {volume} {7}},\ \bibinfo {pages} {156} (\bibinfo {year} {2005})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Walls}(1983)}]{squeezing1983walls} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {D.~F.}\ \bibnamefont {Walls}},\ }\bibfield {title} {\bibinfo {title} {Squeezed states of light},\ }\href {https://doi.org/10.1038/306141a0} {\bibfield {journal} {\bibinfo {journal} {Nature}\ }\textbf {\bibinfo {volume} {306}},\ \bibinfo {pages} {141} (\bibinfo {year} {1983})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Grangier}\ \emph {et~al.}(1987)\citenamefont {Grangier}, \citenamefont {Slusher}, \citenamefont {Yurke},\ and\ \citenamefont {LaPorta}}]{laporta1987squeezedpolariztion} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Grangier}}, \bibinfo {author} {\bibfnamefont {R.~E.}\ \bibnamefont {Slusher}}, \bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {Yurke}},\ and\ \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {LaPorta}},\ }\bibfield {title} {\bibinfo {title} {Squeezed-light--enhanced polarization interferometer},\ }\href {https://doi.org/10.1103/PhysRevLett.59.2153} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {59}},\ \bibinfo {pages} {2153} (\bibinfo {year} {1987})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Xiao}\ \emph {et~al.}(1987)\citenamefont {Xiao}, \citenamefont {Wu},\ and\ \citenamefont {Kimble}}]{Kimble1987precision} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Xiao}}, \bibinfo {author} {\bibfnamefont {L.-A.}\ \bibnamefont {Wu}},\ and\ \bibinfo {author} {\bibfnamefont {H.~J.}\ \bibnamefont {Kimble}},\ }\bibfield {title} {\bibinfo {title} {Precision measurement beyond the shot-noise limit},\ }\href {https://doi.org/10.1103/PhysRevLett.59.278} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {59}},\ \bibinfo {pages} {278} (\bibinfo {year} {1987})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Polzik}\ \emph {et~al.}(1992)\citenamefont {Polzik}, \citenamefont {Carri},\ and\ \citenamefont {Kimble}}]{polzik1992squeezing} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {E.~S.}\ \bibnamefont {Polzik}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Carri}},\ and\ \bibinfo {author} {\bibfnamefont {H.~J.}\ \bibnamefont {Kimble}},\ }\bibfield {title} {\bibinfo {title} {Spectroscopy with squeezed light},\ }\href {https://doi.org/10.1103/PhysRevLett.68.3020} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {68}},\ \bibinfo {pages} {3020} (\bibinfo {year} {1992})}\BibitemShut {NoStop} \bibitem [{\citenamefont {{LIGO Scientific Collaboration}}(2011)}]{geo6002011squeezing} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibnamefont {{LIGO Scientific Collaboration}}},\ }\bibfield {title} {\bibinfo {title} {A gravitational wave observatory operating beyond the quantum shot-noise limit},\ }\href {https://doi.org/10.1038/nphys2083} {\bibfield {journal} {\bibinfo {journal} {Nature Physics}\ }\textbf {\bibinfo {volume} {7}},\ \bibinfo {pages} {962} (\bibinfo {year} {2011})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Grote}\ \emph {et~al.}(2013)\citenamefont {Grote}, \citenamefont {Danzmann}, \citenamefont {Dooley}, \citenamefont {Schnabel}, \citenamefont {Slutsky},\ and\ \citenamefont {Vahlbruch}}]{grote2013firstsqueezing} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Grote}}, \bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Danzmann}}, \bibinfo {author} {\bibfnamefont {K.~L.}\ \bibnamefont {Dooley}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Schnabel}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Slutsky}},\ and\ \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Vahlbruch}},\ }\bibfield {title} {\bibinfo {title} {First long-term application of squeezed states of light in a gravitational-wave observatory},\ }\href {https://doi.org/10.1103/PhysRevLett.110.181101} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {110}},\ \bibinfo {pages} {181101} (\bibinfo {year} {2013})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Schnabel}(2017)}]{SCHNABEL2017squeezing} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Schnabel}},\ }\bibfield {title} {\bibinfo {title} {Squeezed states of light and their applications in laser interferometers},\ }\href {https://doi.org/https://doi.org/10.1016/j.physrep.2017.04.001} {\bibfield {journal} {\bibinfo {journal} {Physics Reports}\ }\textbf {\bibinfo {volume} {684}},\ \bibinfo {pages} {1} (\bibinfo {year} {2017})},\ \bibinfo {note} {squeezed states of light and their applications in laser interferometers}\BibitemShut {NoStop} \bibitem [{\citenamefont {{LIGO Scientific Collaboration}}(2019)}]{ligo2019enhanced} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibnamefont {{LIGO Scientific Collaboration}}},\ }\bibfield {title} {\bibinfo {title} {Quantum-enhanced advanced ligo detectors in the era of gravitational-wave astronomy},\ }\href {https://doi.org/10.1103/PhysRevLett.123.231107} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {123}},\ \bibinfo {pages} {231107} (\bibinfo {year} {2019})}\BibitemShut {NoStop} \bibitem [{\citenamefont {{Virgo Collaboration}}(2019)}]{virgo2019increasingsqueezeed} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibnamefont {{Virgo Collaboration}}},\ }\bibfield {title} {\bibinfo {title} {Increasing the astrophysical reach of the advanced virgo detector via the application of squeezed vacuum states of light},\ }\href {https://doi.org/10.1103/PhysRevLett.123.231108} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {123}},\ \bibinfo {pages} {231108} (\bibinfo {year} {2019})}\BibitemShut {NoStop} \bibitem [{\citenamefont {{LIGO Scientific Collaboration and Virgo Collaboration}}(2021)}]{ligovirgo2021new} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibnamefont {{LIGO Scientific Collaboration and Virgo Collaboration}}},\ }\bibfield {title} {\bibinfo {title} {Gwtc-2: Compact binary coalescences observed by ligo and virgo during the first half of the third observing run},\ }\href {https://doi.org/10.1103/PhysRevX.11.021053} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. X}\ }\textbf {\bibinfo {volume} {11}},\ \bibinfo {pages} {021053} (\bibinfo {year} {2021})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Qin}\ \emph {et~al.}(2023)\citenamefont {Qin}, \citenamefont {Deng}, \citenamefont {Zhong}, \citenamefont {Peng}, \citenamefont {Su}, \citenamefont {Luo}, \citenamefont {Xu}, \citenamefont {Wu}, \citenamefont {Gong}, \citenamefont {Liu}, \citenamefont {Wang}, \citenamefont {Chen}, \citenamefont {Li}, \citenamefont {Liu}, \citenamefont {Lu},\ and\ \citenamefont {Pan}}]{pan2023robustbeyondNOON} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Qin}}, \bibinfo {author} {\bibfnamefont {Y.-H.}\ \bibnamefont {Deng}}, \bibinfo {author} {\bibfnamefont {H.-S.}\ \bibnamefont {Zhong}}, \bibinfo {author} {\bibfnamefont {L.-C.}\ \bibnamefont {Peng}}, \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Su}}, \bibinfo {author} {\bibfnamefont {Y.-H.}\ \bibnamefont {Luo}}, \bibinfo {author} {\bibfnamefont {J.-M.}\ \bibnamefont {Xu}}, \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Wu}}, \bibinfo {author} {\bibfnamefont {S.-Q.}\ \bibnamefont {Gong}}, \bibinfo {author} {\bibfnamefont {H.-L.}\ \bibnamefont {Liu}}, \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Wang}}, \bibinfo {author} {\bibfnamefont {M.-C.}\ \bibnamefont {Chen}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Li}}, \bibinfo {author} {\bibfnamefont {N.-L.}\ \bibnamefont {Liu}}, \bibinfo {author} {\bibfnamefont {C.-Y.}\ \bibnamefont {Lu}},\ and\ \bibinfo {author} {\bibfnamefont {J.-W.}\ \bibnamefont {Pan}},\ }\bibfield {title} {\bibinfo {title} {Unconditional and robust quantum metrological advantage beyond n00n states},\ }\href {https://doi.org/10.1103/PhysRevLett.130.070801} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {130}},\ \bibinfo {pages} {070801} (\bibinfo {year} {2023})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Pedrozo-Peñafiel}\ \emph {et~al.}(2020)\citenamefont {Pedrozo-Peñafiel}, \citenamefont {Colombo}, \citenamefont {Shu}, \citenamefont {Adiyatullin}, \citenamefont {Li}, \citenamefont {Mendez}, \citenamefont {Braverman}, \citenamefont {Kawasaki}, \citenamefont {Akamatsu}, \citenamefont {Xiao},\ and\ \citenamefont {Vuletić}}]{pedrozo2020entanglement} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Pedrozo-Peñafiel}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Colombo}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Shu}}, \bibinfo {author} {\bibfnamefont {A.~F.}\ \bibnamefont {Adiyatullin}}, \bibinfo {author} {\bibfnamefont {Z.}~\bibnamefont {Li}}, \bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Mendez}}, \bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {Braverman}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Kawasaki}}, \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Akamatsu}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Xiao}},\ and\ \bibinfo {author} {\bibfnamefont {V.}~\bibnamefont {Vuletić}},\ }\bibfield {title} {\bibinfo {title} {Entanglement on an optical atomic-clock transition},\ }\bibfield {journal} {\bibinfo {journal} {Nature}\ }\textbf {\bibinfo {volume} {588}},\ \href {https://doi.org/10.1038/s41586-020-3006-1} {10.1038/s41586-020-3006-1} (\bibinfo {year} {2020})\BibitemShut {NoStop} \bibitem [{\citenamefont {Colombo}\ \emph {et~al.}(2022)\citenamefont {Colombo}, \citenamefont {Pedrozo-Peñafiel}, \citenamefont {Adiyatullin}, \citenamefont {Li}, \citenamefont {Mendez}, \citenamefont {Shu},\ and\ \citenamefont {Vuletić}}]{colombo2022timereversal} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Colombo}}, \bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Pedrozo-Peñafiel}}, \bibinfo {author} {\bibfnamefont {A.~F.}\ \bibnamefont {Adiyatullin}}, \bibinfo {author} {\bibfnamefont {Z.}~\bibnamefont {Li}}, \bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Mendez}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Shu}},\ and\ \bibinfo {author} {\bibfnamefont {V.}~\bibnamefont {Vuletić}},\ }\bibfield {title} {\bibinfo {title} {Time-reversal-based quantum metrology with many-body entangled states},\ }\href {https://doi.org/10.1038/s41567-022-01653-5} {\bibfield {journal} {\bibinfo {journal} {Nature Physics}\ }\textbf {\bibinfo {volume} {18}},\ \bibinfo {pages} {925–930} (\bibinfo {year} {2022})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Gehring}\ \emph {et~al.}(2015)\citenamefont {Gehring}, \citenamefont {H{\"a}ndchen}, \citenamefont {Duhme}, \citenamefont {Furrer}, \citenamefont {Franz}, \citenamefont {Pacher}, \citenamefont {Werner},\ and\ \citenamefont {Schnabel}}]{schnabel2015CVkeydist} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Gehring}}, \bibinfo {author} {\bibfnamefont {V.}~\bibnamefont {H{\"a}ndchen}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Duhme}}, \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Furrer}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Franz}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Pacher}}, \bibinfo {author} {\bibfnamefont {R.~F.}\ \bibnamefont {Werner}},\ and\ \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Schnabel}},\ }\bibfield {title} {\bibinfo {title} {Implementation of continuous-variable quantum key distribution with composable and one-sided-device-independent security against coherent attacks},\ }\href {https://doi.org/10.1038/ncomms9795} {\bibfield {journal} {\bibinfo {journal} {Nature Communications}\ }\textbf {\bibinfo {volume} {6}},\ \bibinfo {pages} {8795} (\bibinfo {year} {2015})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Wang}\ \emph {et~al.}(2019)\citenamefont {Wang}, \citenamefont {Wang},\ and\ \citenamefont {Li}}]{yongmin2019cvsqueezedqkd} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Wang}}, \bibinfo {author} {\bibfnamefont {X.}~\bibnamefont {Wang}},\ and\ \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Li}},\ }\bibfield {title} {\bibinfo {title} {Continuous-variable measurement-device-independent quantum key distribution using modulated squeezed states and optical amplifiers},\ }\href {https://doi.org/10.1103/PhysRevA.99.042309} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {99}},\ \bibinfo {pages} {042309} (\bibinfo {year} {2019})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Derkach}\ \emph {et~al.}(2020)\citenamefont {Derkach}, \citenamefont {Usenko},\ and\ \citenamefont {Filip}}]{Derkach_2020squeezingQKD} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {I.}~\bibnamefont {Derkach}}, \bibinfo {author} {\bibfnamefont {V.~C.}\ \bibnamefont {Usenko}},\ and\ \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Filip}},\ }\bibfield {title} {\bibinfo {title} {Squeezing-enhanced quantum key distribution over atmospheric channels},\ }\href {https://doi.org/10.1088/1367-2630/ab7f8f} {\bibfield {journal} {\bibinfo {journal} {New Journal of Physics}\ }\textbf {\bibinfo {volume} {22}},\ \bibinfo {pages} {053006} (\bibinfo {year} {2020})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Hosseinidehaj}\ \emph {et~al.}(2022)\citenamefont {Hosseinidehaj}, \citenamefont {Winnel},\ and\ \citenamefont {Ralph}}]{timothy2022squeezedlaser} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {N.}~\bibnamefont {Hosseinidehaj}}, \bibinfo {author} {\bibfnamefont {M.~S.}\ \bibnamefont {Winnel}},\ and\ \bibinfo {author} {\bibfnamefont {T.~C.}\ \bibnamefont {Ralph}},\ }\bibfield {title} {\bibinfo {title} {Simple and loss-tolerant free-space quantum key distribution using a squeezed laser},\ }\href {https://doi.org/10.1103/PhysRevA.105.032602} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {105}},\ \bibinfo {pages} {032602} (\bibinfo {year} {2022})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Hwang}\ \emph {et~al.}(2015)\citenamefont {Hwang}, \citenamefont {Puebla},\ and\ \citenamefont {Plenio}}]{plenio2015QRMphasetrans} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {M.-J.}\ \bibnamefont {Hwang}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Puebla}},\ and\ \bibinfo {author} {\bibfnamefont {M.~B.}\ \bibnamefont {Plenio}},\ }\bibfield {title} {\bibinfo {title} {Quantum phase transition and universal dynamics in the rabi model},\ }\href {https://doi.org/10.1103/PhysRevLett.115.180404} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {115}},\ \bibinfo {pages} {180404} (\bibinfo {year} {2015})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Thearle}\ \emph {et~al.}(2018)\citenamefont {Thearle}, \citenamefont {Janousek}, \citenamefont {Armstrong}, \citenamefont {Hosseini}, \citenamefont {Sch\"unemann~(Mraz)}, \citenamefont {Assad}, \citenamefont {Symul}, \citenamefont {James}, \citenamefont {Huntington}, \citenamefont {Ralph},\ and\ \citenamefont {Lam}}]{koy2018squeezingbelltest} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {O.}~\bibnamefont {Thearle}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Janousek}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Armstrong}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Hosseini}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Sch\"unemann~(Mraz)}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Assad}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Symul}}, \bibinfo {author} {\bibfnamefont {M.~R.}\ \bibnamefont {James}}, \bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Huntington}}, \bibinfo {author} {\bibfnamefont {T.~C.}\ \bibnamefont {Ralph}},\ and\ \bibinfo {author} {\bibfnamefont {P.~K.}\ \bibnamefont {Lam}},\ }\bibfield {title} {\bibinfo {title} {Violation of bell's inequality using continuous variable measurements},\ }\href {https://doi.org/10.1103/PhysRevLett.120.040406} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {120}},\ \bibinfo {pages} {040406} (\bibinfo {year} {2018})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Braak}\ \emph {et~al.}(2016)\citenamefont {Braak}, \citenamefont {Chen}, \citenamefont {Batchelor},\ and\ \citenamefont {Solano}}]{Braak_2016qrmcelebration80} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Braak}}, \bibinfo {author} {\bibfnamefont {Q.-H.}\ \bibnamefont {Chen}}, \bibinfo {author} {\bibfnamefont {M.~T.}\ \bibnamefont {Batchelor}},\ and\ \bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Solano}},\ }\bibfield {title} {\bibinfo {title} {Semi-classical and quantum rabi models: in celebration of 80 years},\ }\href {https://doi.org/10.1088/1751-8113/49/30/300301} {\bibfield {journal} {\bibinfo {journal} {Journal of Physics A: Mathematical and Theoretical}\ }\textbf {\bibinfo {volume} {49}},\ \bibinfo {pages} {300301} (\bibinfo {year} {2016})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Xie}\ \emph {et~al.}(2017)\citenamefont {Xie}, \citenamefont {Zhong}, \citenamefont {Batchelor},\ and\ \citenamefont {Lee}}]{Xie_2017rabimodelsolutionanddynamics} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {Q.}~\bibnamefont {Xie}}, \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Zhong}}, \bibinfo {author} {\bibfnamefont {M.~T.}\ \bibnamefont {Batchelor}},\ and\ \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Lee}},\ }\bibfield {title} {\bibinfo {title} {The quantum rabi model: solution and dynamics},\ }\href {https://doi.org/10.1088/1751-8121/aa5a65} {\bibfield {journal} {\bibinfo {journal} {Journal of Physics A: Mathematical and Theoretical}\ }\textbf {\bibinfo {volume} {50}},\ \bibinfo {pages} {113001} (\bibinfo {year} {2017})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Shore}\ and\ \citenamefont {Knight}(1993)}]{knight1993JCmodel} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {B.~W.}\ \bibnamefont {Shore}}\ and\ \bibinfo {author} {\bibfnamefont {P.~L.}\ \bibnamefont {Knight}},\ }\bibfield {title} {\bibinfo {title} {The jaynes-cummings model},\ }\href {https://doi.org/10.1080/09500349314551321} {\bibfield {journal} {\bibinfo {journal} {Journal of Modern Optics}\ }\textbf {\bibinfo {volume} {40}},\ \bibinfo {pages} {1195} (\bibinfo {year} {1993})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Bravyi}\ \emph {et~al.}(2011)\citenamefont {Bravyi}, \citenamefont {DiVincenzo},\ and\ \citenamefont {Loss}}]{2011schriefferwolff} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Bravyi}}, \bibinfo {author} {\bibfnamefont {D.~P.}\ \bibnamefont {DiVincenzo}},\ and\ \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Loss}},\ }\bibfield {title} {\bibinfo {title} {Schrieffer–wolff transformation for quantum many-body systems},\ }\href {https://doi.org/https://doi.org/10.1016/j.aop.2011.06.004} {\bibfield {journal} {\bibinfo {journal} {Annals of Physics}\ }\textbf {\bibinfo {volume} {326}},\ \bibinfo {pages} {2793} (\bibinfo {year} {2011})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Gietka}(2022{\natexlab{a}})}]{gietka2022comqm} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Gietka}},\ }\bibfield {title} {\bibinfo {title} {Harnessing center-of-mass excitations in quantum metrology},\ }\href {https://doi.org/10.1103/PhysRevResearch.4.043074} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Res.}\ }\textbf {\bibinfo {volume} {4}},\ \bibinfo {pages} {043074} (\bibinfo {year} {2022}{\natexlab{a}})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Gietka}(2022{\natexlab{b}})}]{gietka2022squeezing} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Gietka}},\ }\bibfield {title} {\bibinfo {title} {Squeezing by critical speeding up: Applications in quantum metrology},\ }\href {https://doi.org/10.1103/PhysRevA.105.042620} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {105}},\ \bibinfo {pages} {042620} (\bibinfo {year} {2022}{\natexlab{b}})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Ritsch}\ \emph {et~al.}(2013)\citenamefont {Ritsch}, \citenamefont {Domokos}, \citenamefont {Brennecke},\ and\ \citenamefont {Esslinger}}]{Helmut2013rmpcavity} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Ritsch}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Domokos}}, \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Brennecke}},\ and\ \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Esslinger}},\ }\bibfield {title} {\bibinfo {title} {Cold atoms in cavity-generated dynamical optical potentials},\ }\href {https://doi.org/10.1103/RevModPhys.85.553} {\bibfield {journal} {\bibinfo {journal} {Rev. Mod. Phys.}\ }\textbf {\bibinfo {volume} {85}},\ \bibinfo {pages} {553} (\bibinfo {year} {2013})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Bencheikh}\ \emph {et~al.}(1995)\citenamefont {Bencheikh}, \citenamefont {Levenson}, \citenamefont {Grangier},\ and\ \citenamefont {Lopez}}]{lopez1995qndbackation} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Bencheikh}}, \bibinfo {author} {\bibfnamefont {J.~A.}\ \bibnamefont {Levenson}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Grangier}},\ and\ \bibinfo {author} {\bibfnamefont {O.}~\bibnamefont {Lopez}},\ }\bibfield {title} {\bibinfo {title} {Quantum nondemolition demonstration via repeated backaction evading measurements},\ }\href {https://doi.org/10.1103/PhysRevLett.75.3422} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {75}},\ \bibinfo {pages} {3422} (\bibinfo {year} {1995})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Ockeloen-Korppi}\ \emph {et~al.}(2016)\citenamefont {Ockeloen-Korppi}, \citenamefont {Damsk\"agg}, \citenamefont {Pirkkalainen}, \citenamefont {Clerk}, \citenamefont {Woolley},\ and\ \citenamefont {Sillanp\"a\"a}}]{sillanpaa2016qbem} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {C.~F.}\ \bibnamefont {Ockeloen-Korppi}}, \bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Damsk\"agg}}, \bibinfo {author} {\bibfnamefont {J.-M.}\ \bibnamefont {Pirkkalainen}}, \bibinfo {author} {\bibfnamefont {A.~A.}\ \bibnamefont {Clerk}}, \bibinfo {author} {\bibfnamefont {M.~J.}\ \bibnamefont {Woolley}},\ and\ \bibinfo {author} {\bibfnamefont {M.~A.}\ \bibnamefont {Sillanp\"a\"a}},\ }\bibfield {title} {\bibinfo {title} {Quantum backaction evading measurement of collective mechanical modes},\ }\href {https://doi.org/10.1103/PhysRevLett.117.140401} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {117}},\ \bibinfo {pages} {140401} (\bibinfo {year} {2016})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Brunelli}\ \emph {et~al.}(2019)\citenamefont {Brunelli}, \citenamefont {Malz},\ and\ \citenamefont {Nunnenkamp}}]{nunnenkamp2019squeezingbackation} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Brunelli}}, \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Malz}},\ and\ \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Nunnenkamp}},\ }\bibfield {title} {\bibinfo {title} {Conditional dynamics of optomechanical two-tone backaction-evading measurements},\ }\href {https://doi.org/10.1103/PhysRevLett.123.093602} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {123}},\ \bibinfo {pages} {093602} (\bibinfo {year} {2019})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Troullinou}\ \emph {et~al.}(2021)\citenamefont {Troullinou}, \citenamefont {Jim\'enez-Mart\'{\i}nez}, \citenamefont {Kong}, \citenamefont {Lucivero},\ and\ \citenamefont {Mitchell}}]{mitchell2021backationevaging} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Troullinou}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Jim\'enez-Mart\'{\i}nez}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Kong}}, \bibinfo {author} {\bibfnamefont {V.~G.}\ \bibnamefont {Lucivero}},\ and\ \bibinfo {author} {\bibfnamefont {M.~W.}\ \bibnamefont {Mitchell}},\ }\bibfield {title} {\bibinfo {title} {Squeezed-light enhancement and backaction evasion in a high sensitivity optically pumped magnetometer},\ }\href {https://doi.org/10.1103/PhysRevLett.127.193601} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {127}},\ \bibinfo {pages} {193601} (\bibinfo {year} {2021})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Caves}\ \emph {et~al.}(1980)\citenamefont {Caves}, \citenamefont {Thorne}, \citenamefont {Drever}, \citenamefont {Sandberg},\ and\ \citenamefont {Zimmermann}}]{caves1980rmpbackation} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {C.~M.}\ \bibnamefont {Caves}}, \bibinfo {author} {\bibfnamefont {K.~S.}\ \bibnamefont {Thorne}}, \bibinfo {author} {\bibfnamefont {R.~W.~P.}\ \bibnamefont {Drever}}, \bibinfo {author} {\bibfnamefont {V.~D.}\ \bibnamefont {Sandberg}},\ and\ \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Zimmermann}},\ }\bibfield {title} {\bibinfo {title} {On the measurement of a weak classical force coupled to a quantum-mechanical oscillator. i. issues of principle},\ }\href {https://doi.org/10.1103/RevModPhys.52.341} {\bibfield {journal} {\bibinfo {journal} {Rev. Mod. Phys.}\ }\textbf {\bibinfo {volume} {52}},\ \bibinfo {pages} {341} (\bibinfo {year} {1980})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Gietka}\ and\ \citenamefont {Busch}(2021)}]{gietka2021invertedoscDicke} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Gietka}}\ and\ \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Busch}},\ }\bibfield {title} {\bibinfo {title} {Inverted harmonic oscillator dynamics of the nonequilibrium phase transition in the dicke model},\ }\href {https://doi.org/10.1103/PhysRevE.104.034132} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. E}\ }\textbf {\bibinfo {volume} {104}},\ \bibinfo {pages} {034132} (\bibinfo {year} {2021})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Holstein}\ and\ \citenamefont {Primakoff}(1940)}]{1940HPtransf} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Holstein}}\ and\ \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Primakoff}},\ }\bibfield {title} {\bibinfo {title} {Field dependence of the intrinsic domain magnetization of a ferromagnet},\ }\href {https://doi.org/10.1103/PhysRev.58.1098} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev.}\ }\textbf {\bibinfo {volume} {58}},\ \bibinfo {pages} {1098} (\bibinfo {year} {1940})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Plankensteiner}\ \emph {et~al.}(2022)\citenamefont {Plankensteiner}, \citenamefont {Hotter},\ and\ \citenamefont {Ritsch}}]{Plankensteiner2022quantumcumulantsjl} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Plankensteiner}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Hotter}},\ and\ \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Ritsch}},\ }\bibfield {title} {\bibinfo {title} {Quantum{C}umulants.jl: {A} {J}ulia framework for generalized mean-field equations in open quantum systems},\ }\href {https://dx.doi.org/10.22331/q-2022-01-04-617} {\bibfield {journal} {\bibinfo {journal} {{Quantum}}\ }\textbf {\bibinfo {volume} {6}} (\bibinfo {year} {2022})}\BibitemShut {NoStop} \bibitem [{sup()}]{supp} \BibitemOpen \href@noop {} {}\bibinfo {howpublished} {\url{https://qojulia.github.io/QuantumCumulants.jl/stable/examples/unique_squeezing/}}\BibitemShut {NoStop} \bibitem [{\citenamefont {Rakhubovsky}\ \emph {et~al.}(2020)\citenamefont {Rakhubovsky}, \citenamefont {Moore}, \citenamefont {Deli\ifmmode~\acute{c}\else \'{c}\fi{}}, \citenamefont {Kiesel}, \citenamefont {Aspelmeyer},\ and\ \citenamefont {Filip}}]{uros2020} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {A.~A.}\ \bibnamefont {Rakhubovsky}}, \bibinfo {author} {\bibfnamefont {D.~W.}\ \bibnamefont {Moore}}, \bibinfo {author} {\bibfnamefont {U.~c.~v.}\ \bibnamefont {Deli\ifmmode~\acute{c}\else \'{c}\fi{}}}, \bibinfo {author} {\bibfnamefont {N.}~\bibnamefont {Kiesel}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Aspelmeyer}},\ and\ \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Filip}},\ }\bibfield {title} {\bibinfo {title} {Detecting nonclassical correlations in levitated cavity optomechanics},\ }\href {https://doi.org/10.1103/PhysRevApplied.14.054052} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Appl.}\ }\textbf {\bibinfo {volume} {14}},\ \bibinfo {pages} {054052} (\bibinfo {year} {2020})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Rudolph}\ \emph {et~al.}(2022)\citenamefont {Rudolph}, \citenamefont {Deli\ifmmode~\acute{c}\else \'{c}\fi{}}, \citenamefont {Aspelmeyer}, \citenamefont {Hornberger},\ and\ \citenamefont {Stickler}}]{uros2022} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Rudolph}}, \bibinfo {author} {\bibfnamefont {U.~c.~v.}\ \bibnamefont {Deli\ifmmode~\acute{c}\else \'{c}\fi{}}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Aspelmeyer}}, \bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Hornberger}},\ and\ \bibinfo {author} {\bibfnamefont {B.~A.}\ \bibnamefont {Stickler}},\ }\bibfield {title} {\bibinfo {title} {Force-gradient sensing and entanglement via feedback cooling of interacting nanoparticles},\ }\href {https://doi.org/10.1103/PhysRevLett.129.193602} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {129}},\ \bibinfo {pages} {193602} (\bibinfo {year} {2022})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Thompson}(2015)}]{2015ionCoulombCrystal} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {R.~C.}\ \bibnamefont {Thompson}},\ }\bibfield {title} {\bibinfo {title} {Ion coulomb crystals},\ }\href {https://doi.org/10.1080/00107514.2014.989715} {\bibfield {journal} {\bibinfo {journal} {Contemp. Phys.}\ }\textbf {\bibinfo {volume} {56}},\ \bibinfo {pages} {63} (\bibinfo {year} {2015})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Safavi-Naini}\ \emph {et~al.}(2018)\citenamefont {Safavi-Naini}, \citenamefont {Lewis-Swan}, \citenamefont {Bohnet}, \citenamefont {G\"arttner}, \citenamefont {Gilmore}, \citenamefont {Jordan}, \citenamefont {Cohn}, \citenamefont {Freericks}, \citenamefont {Rey},\ and\ \citenamefont {Bollinger}}]{2019CrystalDickesym} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Safavi-Naini}}, \bibinfo {author} {\bibfnamefont {R.~J.}\ \bibnamefont {Lewis-Swan}}, \bibinfo {author} {\bibfnamefont {J.~G.}\ \bibnamefont {Bohnet}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {G\"arttner}}, \bibinfo {author} {\bibfnamefont {K.~A.}\ \bibnamefont {Gilmore}}, \bibinfo {author} {\bibfnamefont {J.~E.}\ \bibnamefont {Jordan}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Cohn}}, \bibinfo {author} {\bibfnamefont {J.~K.}\ \bibnamefont {Freericks}}, \bibinfo {author} {\bibfnamefont {A.~M.}\ \bibnamefont {Rey}},\ and\ \bibinfo {author} {\bibfnamefont {J.~J.}\ \bibnamefont {Bollinger}},\ }\bibfield {title} {\bibinfo {title} {Verification of a many-ion simulator of the dicke model through slow quenches across a phase transition},\ }\href {https://doi.org/10.1103/PhysRevLett.121.040503} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {121}},\ \bibinfo {pages} {040503} (\bibinfo {year} {2018})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Hertzberg}\ \emph {et~al.}(2010)\citenamefont {Hertzberg}, \citenamefont {Rocheleau}, \citenamefont {Ndukum}, \citenamefont {Savva}, \citenamefont {Clerk},\ and\ \citenamefont {Schwab}}]{schwab2010nanoQBFmeasure} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.~B.}\ \bibnamefont {Hertzberg}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Rocheleau}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Ndukum}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Savva}}, \bibinfo {author} {\bibfnamefont {A.~A.}\ \bibnamefont {Clerk}},\ and\ \bibinfo {author} {\bibfnamefont {K.~C.}\ \bibnamefont {Schwab}},\ }\bibfield {title} {\bibinfo {title} {Back-action-evading measurements of nanomechanical motion},\ }\href {https://doi.org/10.1038/nphys1479} {\bibfield {journal} {\bibinfo {journal} {Nature Physics}\ }\textbf {\bibinfo {volume} {6}},\ \bibinfo {pages} {213} (\bibinfo {year} {2010})}\BibitemShut {NoStop} \bibitem [{\citenamefont {M{\o}ller}\ \emph {et~al.}(2017)\citenamefont {M{\o}ller}, \citenamefont {Thomas}, \citenamefont {Vasilakis}, \citenamefont {Zeuthen}, \citenamefont {Tsaturyan}, \citenamefont {Balabas}, \citenamefont {Jensen}, \citenamefont {Schliesser}, \citenamefont {Hammerer},\ and\ \citenamefont {Polzik}}]{polzik2017baemeasure} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {C.~B.}\ \bibnamefont {M{\o}ller}}, \bibinfo {author} {\bibfnamefont {R.~A.}\ \bibnamefont {Thomas}}, \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Vasilakis}}, \bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Zeuthen}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Tsaturyan}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Balabas}}, \bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Jensen}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Schliesser}}, \bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Hammerer}},\ and\ \bibinfo {author} {\bibfnamefont {E.~S.}\ \bibnamefont {Polzik}},\ }\bibfield {title} {\bibinfo {title} {Quantum back-action-evading measurement of motion in a negative mass reference frame},\ }\href {https://doi.org/10.1038/nature22980} {\bibfield {journal} {\bibinfo {journal} {Nature}\ }\textbf {\bibinfo {volume} {547}},\ \bibinfo {pages} {191} (\bibinfo {year} {2017})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Pavlov}\ \emph {et~al.}(2023)\citenamefont {Pavlov}, \citenamefont {Porras},\ and\ \citenamefont {Ivanov}}]{porras2023ddmetrologycollectivespin} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {V.~P.}\ \bibnamefont {Pavlov}}, \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Porras}},\ and\ \bibinfo {author} {\bibfnamefont {P.~A.}\ \bibnamefont {Ivanov}},\ }\href {https://doi.org/10.48550/ARXIV.2302.05216} {\bibinfo {title} {Quantum metrology with critical driven-dissipative collective spin system}} (\bibinfo {year} {2023})\BibitemShut {NoStop} \bibitem [{\citenamefont {Ilias}\ \emph {et~al.}(2023)\citenamefont {Ilias}, \citenamefont {Yang}, \citenamefont {Huelga},\ and\ \citenamefont {Plenio}}]{ilias2023criticalityenhanced} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Ilias}}, \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Yang}}, \bibinfo {author} {\bibfnamefont {S.~F.}\ \bibnamefont {Huelga}},\ and\ \bibinfo {author} {\bibfnamefont {M.~B.}\ \bibnamefont {Plenio}},\ }\href@noop {} {\bibinfo {title} {Criticality-enhanced electromagnetic field sensor with single trapped ions}} (\bibinfo {year} {2023}),\ \Eprint {https://arxiv.org/abs/2304.02050} {arXiv:2304.02050 [quant-ph]} \BibitemShut {NoStop} \bibitem [{tob()}]{tobe2023} \BibitemOpen \href@noop {} {}\bibinfo {note} {K. Gietka, C. Hotter, and H. Ritsch (unpublished)}\BibitemShut {NoStop} \bibitem [{\citenamefont {Mirkhalaf}\ \emph {et~al.}(2020)\citenamefont {Mirkhalaf}, \citenamefont {Witkowska},\ and\ \citenamefont {Lepori}}]{2020criticalwitkowska} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {S.~S.}\ \bibnamefont {Mirkhalaf}}, \bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Witkowska}},\ and\ \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Lepori}},\ }\bibfield {title} {\bibinfo {title} {Supersensitive quantum sensor based on criticality in an antiferromagnetic spinor condensate},\ }\href {https://doi.org/10.1103/PhysRevA.101.043609} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {101}},\ \bibinfo {pages} {043609} (\bibinfo {year} {2020})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Gietka}\ \emph {et~al.}(2021)\citenamefont {Gietka}, \citenamefont {Metz}, \citenamefont {Keller},\ and\ \citenamefont {Li}}]{Gietka2021adiabaticcritical} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Gietka}}, \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Metz}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Keller}},\ and\ \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Li}},\ }\bibfield {title} {\bibinfo {title} {Adiabatic critical quantum metrology cannot reach the {H}eisenberg limit even when shortcuts to adiabaticity are applied},\ }\href {https://doi.org/10.22331/q-2021-07-01-489} {\bibfield {journal} {\bibinfo {journal} {{Quantum}}\ }\textbf {\bibinfo {volume} {5}},\ \bibinfo {pages} {489} (\bibinfo {year} {2021})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Liu}\ \emph {et~al.}(2021)\citenamefont {Liu}, \citenamefont {Chen}, \citenamefont {Jiang}, \citenamefont {Yang}, \citenamefont {Wu}, \citenamefont {Li}, \citenamefont {Yuan}, \citenamefont {Peng},\ and\ \citenamefont {Du}}]{2021liuexperiment} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Liu}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Chen}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Jiang}}, \bibinfo {author} {\bibfnamefont {X.}~\bibnamefont {Yang}}, \bibinfo {author} {\bibfnamefont {Z.}~\bibnamefont {Wu}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Li}}, \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Yuan}}, \bibinfo {author} {\bibfnamefont {X.}~\bibnamefont {Peng}},\ and\ \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Du}},\ }\bibfield {title} {\bibinfo {title} {Experimental critical quantum metrology with the heisenberg scaling},\ }\href {https://doi.org/10.1038/s41534-021-00507-x} {\bibfield {journal} {\bibinfo {journal} {npj Quantum Information}\ }\textbf {\bibinfo {volume} {7}},\ \bibinfo {pages} {170} (\bibinfo {year} {2021})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Mirkhalaf}\ \emph {et~al.}(2021)\citenamefont {Mirkhalaf}, \citenamefont {Benedicto~Orenes}, \citenamefont {Mitchell},\ and\ \citenamefont {Witkowska}}]{2021criticalwitkowska} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {S.~S.}\ \bibnamefont {Mirkhalaf}}, \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Benedicto~Orenes}}, \bibinfo {author} {\bibfnamefont {M.~W.}\ \bibnamefont {Mitchell}},\ and\ \bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Witkowska}},\ }\bibfield {title} {\bibinfo {title} {Criticality-enhanced quantum sensing in ferromagnetic bose-einstein condensates: Role of readout measurement and detection noise},\ }\href {https://doi.org/10.1103/PhysRevA.103.023317} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {103}},\ \bibinfo {pages} {023317} (\bibinfo {year} {2021})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Ilias}\ \emph {et~al.}(2022)\citenamefont {Ilias}, \citenamefont {Yang}, \citenamefont {Huelga},\ and\ \citenamefont {Plenio}}]{2022PRXQuantumcontinus} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Ilias}}, \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Yang}}, \bibinfo {author} {\bibfnamefont {S.~F.}\ \bibnamefont {Huelga}},\ and\ \bibinfo {author} {\bibfnamefont {M.~B.}\ \bibnamefont {Plenio}},\ }\bibfield {title} {\bibinfo {title} {Criticality-enhanced quantum sensing via continuous measurement},\ }\href {https://doi.org/10.1103/PRXQuantum.3.010354} {\bibfield {journal} {\bibinfo {journal} {PRX Quantum}\ }\textbf {\bibinfo {volume} {3}},\ \bibinfo {pages} {010354} (\bibinfo {year} {2022})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Garbe}\ \emph {et~al.}(2022{\natexlab{a}})\citenamefont {Garbe}, \citenamefont {Abah}, \citenamefont {Felicetti},\ and\ \citenamefont {Puebla}}]{2022_Garbe_heisenbegkible} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Garbe}}, \bibinfo {author} {\bibfnamefont {O.}~\bibnamefont {Abah}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Felicetti}},\ and\ \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Puebla}},\ }\bibfield {title} {\bibinfo {title} {Critical quantum metrology with fully-connected models: from heisenberg to kibble–zurek scaling},\ }\href {https://doi.org/10.1088/2058-9565/ac6ca5} {\bibfield {journal} {\bibinfo {journal} {Quantum Science and Technology}\ }\textbf {\bibinfo {volume} {7}},\ \bibinfo {pages} {035010} (\bibinfo {year} {2022}{\natexlab{a}})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Gietka}\ \emph {et~al.}(2022)\citenamefont {Gietka}, \citenamefont {Ruks},\ and\ \citenamefont {Busch}}]{Gietka2022understanding} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Gietka}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Ruks}},\ and\ \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Busch}},\ }\bibfield {title} {\bibinfo {title} {Understanding and {I}mproving {C}ritical {M}etrology. {Q}uenching {S}uperradiant {L}ight-{M}atter {S}ystems {B}eyond the {C}ritical {P}oint},\ }\href {https://doi.org/10.22331/q-2022-04-27-700} {\bibfield {journal} {\bibinfo {journal} {{Quantum}}\ }\textbf {\bibinfo {volume} {6}},\ \bibinfo {pages} {700} (\bibinfo {year} {2022})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Ying}\ \emph {et~al.}(2022)\citenamefont {Ying}, \citenamefont {Felicetti}, \citenamefont {Liu},\ and\ \citenamefont {Braak}}]{2022entropycritical} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {Z.-J.}\ \bibnamefont {Ying}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Felicetti}}, \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Liu}},\ and\ \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Braak}},\ }\bibfield {title} {\bibinfo {title} {Critical quantum metrology in the non-linear quantum rabi model},\ }\href {https://doi.org/10.3390/e24081015} {\bibfield {journal} {\bibinfo {journal} {Entropy}\ }\textbf {\bibinfo {volume} {24}} (\bibinfo {year} {2022})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Ding}\ \emph {et~al.}(2022)\citenamefont {Ding}, \citenamefont {Liu}, \citenamefont {Shi}, \citenamefont {Guo}, \citenamefont {M{\o}lmer},\ and\ \citenamefont {Adams}}]{2022dingnature} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {D.-S.}\ \bibnamefont {Ding}}, \bibinfo {author} {\bibfnamefont {Z.-K.}\ \bibnamefont {Liu}}, \bibinfo {author} {\bibfnamefont {B.-S.}\ \bibnamefont {Shi}}, \bibinfo {author} {\bibfnamefont {G.-C.}\ \bibnamefont {Guo}}, \bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {M{\o}lmer}},\ and\ \bibinfo {author} {\bibfnamefont {C.~S.}\ \bibnamefont {Adams}},\ }\bibfield {title} {\bibinfo {title} {Enhanced metrology at the critical point of a many-body rydberg atomic system},\ }\href {https://doi.org/10.1038/s41567-022-01777-8} {\bibfield {journal} {\bibinfo {journal} {Nature Physics}\ } (\bibinfo {year} {2022})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Aybar}\ \emph {et~al.}(2022)\citenamefont {Aybar}, \citenamefont {Niezgoda}, \citenamefont {Mirkhalaf}, \citenamefont {Mitchell}, \citenamefont {Benedicto~Orenes},\ and\ \citenamefont {Witkowska}}]{Aybar2022criticalquantum} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Aybar}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Niezgoda}}, \bibinfo {author} {\bibfnamefont {S.~S.}\ \bibnamefont {Mirkhalaf}}, \bibinfo {author} {\bibfnamefont {M.~W.}\ \bibnamefont {Mitchell}}, \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Benedicto~Orenes}},\ and\ \bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Witkowska}},\ }\bibfield {title} {\bibinfo {title} {Critical quantum thermometry and its feasibility in spin systems},\ }\href {https://doi.org/10.22331/q-2022-09-19-808} {\bibfield {journal} {\bibinfo {journal} {{Quantum}}\ }\textbf {\bibinfo {volume} {6}},\ \bibinfo {pages} {808} (\bibinfo {year} {2022})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Garbe}\ \emph {et~al.}(2022{\natexlab{b}})\citenamefont {Garbe}, \citenamefont {Abah}, \citenamefont {Felicetti},\ and\ \citenamefont {Puebla}}]{2022garbeexponetialsqueezing} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Garbe}}, \bibinfo {author} {\bibfnamefont {O.}~\bibnamefont {Abah}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Felicetti}},\ and\ \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Puebla}},\ }\bibfield {title} {\bibinfo {title} {Exponential time-scaling of estimation precision by reaching a quantum critical point},\ }\href {https://doi.org/10.1103/PhysRevResearch.4.043061} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Research}\ }\textbf {\bibinfo {volume} {4}},\ \bibinfo {pages} {043061} (\bibinfo {year} {2022}{\natexlab{b}})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Gietka}\ and\ \citenamefont {Ritsch}(2023)}]{gietka2023squeezingHeisenberg} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Gietka}}\ and\ \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Ritsch}},\ }\bibfield {title} {\bibinfo {title} {Squeezing and overcoming the heisenberg scaling with spin-orbit coupled quantum gases},\ }\href {https://doi.org/10.1103/PhysRevLett.130.090802} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {130}},\ \bibinfo {pages} {090802} (\bibinfo {year} {2023})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Aspelmeyer}\ \emph {et~al.}(2014)\citenamefont {Aspelmeyer}, \citenamefont {Kippenberg},\ and\ \citenamefont {Marquardt}}]{aspelmeyer2014cavityoptom} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Aspelmeyer}}, \bibinfo {author} {\bibfnamefont {T.~J.}\ \bibnamefont {Kippenberg}},\ and\ \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Marquardt}},\ }\bibfield {title} {\bibinfo {title} {Cavity optomechanics},\ }\href {https://doi.org/10.1103/RevModPhys.86.1391} {\bibfield {journal} {\bibinfo {journal} {Rev. Mod. Phys.}\ }\textbf {\bibinfo {volume} {86}},\ \bibinfo {pages} {1391} (\bibinfo {year} {2014})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Zhang}\ \emph {et~al.}(2016)\citenamefont {Zhang}, \citenamefont {Mossman}, \citenamefont {Busch}, \citenamefont {Engels},\ and\ \citenamefont {Zhang}}]{busch2016socreview} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Zhang}}, \bibinfo {author} {\bibfnamefont {M.~E.}\ \bibnamefont {Mossman}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Busch}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Engels}},\ and\ \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Zhang}},\ }\bibfield {title} {\bibinfo {title} {Properties of spin--orbit-coupled bose--einstein condensates},\ }\href {https://doi.org/10.1007/s11467-016-0560-y} {\bibfield {journal} {\bibinfo {journal} {Frontiers of Physics}\ }\textbf {\bibinfo {volume} {11}},\ \bibinfo {pages} {118103} (\bibinfo {year} {2016})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Cai}\ \emph {et~al.}(2021)\citenamefont {Cai}, \citenamefont {Liu}, \citenamefont {Zhao}, \citenamefont {Wu}, \citenamefont {Mei}, \citenamefont {Jiang}, \citenamefont {He}, \citenamefont {Zhang}, \citenamefont {Zhou},\ and\ \citenamefont {Duan}}]{duan2021qrbphonon} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {M.~L.}\ \bibnamefont {Cai}}, \bibinfo {author} {\bibfnamefont {Z.~D.}\ \bibnamefont {Liu}}, \bibinfo {author} {\bibfnamefont {W.~D.}\ \bibnamefont {Zhao}}, \bibinfo {author} {\bibfnamefont {Y.~K.}\ \bibnamefont {Wu}}, \bibinfo {author} {\bibfnamefont {Q.~X.}\ \bibnamefont {Mei}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Jiang}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {He}}, \bibinfo {author} {\bibfnamefont {X.}~\bibnamefont {Zhang}}, \bibinfo {author} {\bibfnamefont {Z.~C.}\ \bibnamefont {Zhou}},\ and\ \bibinfo {author} {\bibfnamefont {L.~M.}\ \bibnamefont {Duan}},\ }\bibfield {title} {\bibinfo {title} {Observation of a quantum phase transition in the quantum rabi model with a single trapped ion},\ }\href {https://doi.org/10.1038/s41467-021-21425-8} {\bibfield {journal} {\bibinfo {journal} {Nature Communications}\ }\textbf {\bibinfo {volume} {12}},\ \bibinfo {pages} {1126} (\bibinfo {year} {2021})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Yunusova}\ \emph {et~al.}(2019)\citenamefont {Yunusova}, \citenamefont {Konstantinov}, \citenamefont {Bouchiat},\ and\ \citenamefont {Chepelianskii}}]{konstantinov2019qrmhelium} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {K.~M.}\ \bibnamefont {Yunusova}}, \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Konstantinov}}, \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Bouchiat}},\ and\ \bibinfo {author} {\bibfnamefont {A.~D.}\ \bibnamefont {Chepelianskii}},\ }\bibfield {title} {\bibinfo {title} {Coupling between rydberg states and landau levels of electrons trapped on liquid helium},\ }\href {https://doi.org/10.1103/PhysRevLett.122.176802} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {122}},\ \bibinfo {pages} {176802} (\bibinfo {year} {2019})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Kustura}\ \emph {et~al.}(2022)\citenamefont {Kustura}, \citenamefont {Gonzalez-Ballestero}, \citenamefont {Sommer}, \citenamefont {Meyer}, \citenamefont {Quidant},\ and\ \citenamefont {Romero-Isart}}]{kustura2022mechanicalsqueezing} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Kustura}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Gonzalez-Ballestero}}, \bibinfo {author} {\bibfnamefont {A.~d. l.~R.}\ \bibnamefont {Sommer}}, \bibinfo {author} {\bibfnamefont {N.}~\bibnamefont {Meyer}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Quidant}},\ and\ \bibinfo {author} {\bibfnamefont {O.}~\bibnamefont {Romero-Isart}},\ }\bibfield {title} {\bibinfo {title} {Mechanical squeezing via unstable dynamics in a microcavity},\ }\href {https://doi.org/10.1103/PhysRevLett.128.143601} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {128}},\ \bibinfo {pages} {143601} (\bibinfo {year} {2022})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Kr\"{a}mer}\ \emph {et~al.}(2018)\citenamefont {Kr\"{a}mer}, \citenamefont {Plankensteiner}, \citenamefont {Ostermann},\ and\ \citenamefont {Ritsch}}]{kramer2018quantumoptics} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Kr\"{a}mer}}, \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Plankensteiner}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Ostermann}},\ and\ \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Ritsch}},\ }\bibfield {title} {\bibinfo {title} {{QuantumOptics}.jl: A {J}ulia framework for simulating open quantum systems},\ }\href {https://dx.doi.org/10.1016/j.cpc.2018.02.004} {\bibfield {journal} {\bibinfo {journal} {Comput. Phys. Commun}\ }\textbf {\bibinfo {volume} {227}} (\bibinfo {year} {2018})}\BibitemShut {NoStop} \end{thebibliography} \end{document}
{\bf E}gin{document} \centerline{\Large\bf TRANSIENT NEAREST NEIGHBOR RANDOM WALK} \centerline{\Large\bf AND BESSEL PROCESS} \renewcommand{1}{1} \noindent \textbf{Endre Cs\'{a}ki}\footnote{Research supported by the Hungarian National Foundation for Scientif\/ic Research, Grant No. K 61052 and K 67961.}\newline Alfr\'ed R\'enyi Institute of Mathematics, Hungarian Academy of Sciences, Budapest, P.O.B. 127, H-1364, Hungary. E-mail address: [email protected] \renewcommand{1}{2} \noindent \textbf{Ant\'{o}nia F\"{o}ldes}\footnote{Research supported by a PSC CUNY Grant, No. 69020-0038.}\newline Department of Mathematics, College of Staten Island, CUNY, 2800 Victory Blvd., Staten Island, New York 10314, U.S.A. E-mail address: [email protected] \noindent \textbf{P\'al R\'ev\'esz}$^1$ \newline Institut f\"ur Statistik und Wahrscheinlichkeitstheorie, Technische Universit\"at Wien, Wiedner Hauptstrasse 8-10/107 A-1040 Vienna, Austria. E-mail address: [email protected] \noindent \textit{Abstract:} We prove strong invariance principle between a transient Bessel process and a certain nearest neighbor (NN) random walk that is constructed from the former by using stopping times. It is also shown that their local times are close enough to share the same strong limit theorems. It is shown furthermore, that if the difference between the distributions of two NN random walks are small, then the walks themselves can be constructed so that they are close enough. Finally, some consequences concerning strong limit theorems are discussed. \noindent AMS 2000 Subject Classification: Primary 60F17; Secondary 60F15, 60J10, 60J55, 60J60. \noindent Keywords: transient random walk, Bessel process, strong invariance principle, local time, strong theorems. \noindent Running head: NN random walk and Bessel process. \renewcommand{\arabic{section}.}{\arabic{section}.} \section{Introduction} \renewcommand{\arabic{section}.}{\arabic{section}} \setcounter{equation}{0} \setcounter{theorem}{0} \setcounter{lemma}{0} In this paper we consider a nearest neighbor (NN) random walk, defined as follows: let $X_0=0,\ X_1,X_2,{{\lambda}mbda_d}ots$ be a Markov chain with {\bf E}gin{eqnarray}{\lambda}bel{defff} E_i&:=&{\bf P}(X_{n+1}=i+1\mid X_n=i)=1-{\bf P}(X_{n+1}=i-1\mid X_n=i)\\ &=&\left(ft\{{\bf E}gin{array}{ll} 1\quad & {\rm if}\quad i=0\\ \nonumber 1/2+p_i\quad & {\rm if}\quad i=1,2,{{\lambda}mbda_d}ots, \end{array}\right)ght. \end{eqnarray} where $-1/2\left(q p_i\left(q 1/2,\ i=1,2,{{\lambda}mbda_d}ots$. In case $0< p_i\left(q 1/2$ the sequence $\{X_i\}$ describes the motion of a particle which starts at zero, moves over the nonnegative integers and going away from 0 with a larger probability than to the direction of 0. We will be interested in the case when $p_i\sim B/4i$ with $B>0$ as $i\to\infty$. We want to show that in certain sense, this Markov chain is a discrete analogue of continuous Bessel process and establish a strong invariance principle between these two processes. The properties of the discrete model, often called birth and death chain, connections with orthogonal polynomials in particular, has been treated extensively in the literature. See e.g. the classical paper by Karlin and McGregor \cite{KMG}, or more recent papers by Coolen-Schrijner and Van Doorn \cite{C-SD} and Dette \cite{DE01}. In an earlier paper \cite{CSFR} we investigated the local time of this Markov chain in the transient case. There is a well-known result in the literature (cf. e.g. Chung \cite{CH}) characterizing those sequences $\{p_i\}$ for which $\{X_i\}$ is transient (resp. recurrent). \noindent {\bf Theorem A:} (\cite{CH}, page 74) {\it Let $X_n$ be a Markov chain with transition probabilities given in {\rm (\ref{defff})} with $-1/2<p_i<1/2$, $i=1,2,{{\lambda}mbda_d}ots$ Define {\bf E}gg U_i:={\frac{1-E_i}{E_i}}={\frac{1/2-p_i}{1/2+p_i}} {\lambda}bel{uif} \end{equation} Then $X_n$ is transient if and only if} $$\sum_{k=1}^{\infty} \prod_{i=1}^k U_i < \infty.$$ As a consequence, the Markov chain $(X_n)$ with $p_R\sim B/4R,\, R\to\infty$ is transient if $B>1$ and recurrent if $B<1$. The Bessel process of order $\nu$, denoted by $Y_\nu(t),\, t\geq 0$ is a diffusion process on the line with generator $$ \frac{1}{2}\frac{d^2}{dx^2}+\frac{2\nu+1}{2x}\frac{d}{dx}. $$ $d=2\nu+2$ is the dimension of the Bessel process. If $d$ is a positive integer, then $Y_\nu(\cdot)$ is the absolute value of a $d$-dimensional Brownian motion. The Bessel process $Y_\nu(t)$ is transient if and only if $\nu>0$. The properties of the Bessel process were extensively studied in the literature. Cf. Borodin and Salminen \cite{BS}, Revuz and Yor \cite{RY}, Knight \cite{KN81}. Lamperti \cite{LA63} determined the limiting distribution of $X_n$ and also proved a weak convergence theorem in a more general setting. His result in our case reads as follows. \noindent {\bf Theorem B:} (\cite{LA63}) {\it Let $X_n$ be a Markov chain with transition probabilities given in {\rm (\ref{defff})} with $-1/2<p_i<1/2$, $i=1,2,{{\lambda}mbda_d}ots$ If $\,\,\lim_{R\to\infty}\,Rp_R=B/4>-1/4$,} {\it then the following weak convergence holds}: $$ \frac{X_{[nt]}}{\sqrt{n}}\Longrightarrow Y_{(B-1)/2}(t) $$ { \it in the space D}[0,1]. {\it In particular,} $$ \lim_{n\to \infty} {\bf P}\left(ft(\frac{X_n}{\sqrt{n}}<x\right)ght)= \frac{1}{2^{B/2-1/2}\Gamma(B/2+1/2)}\int_0^x u^Be^{-u^2/2}\, du. $$ In Theorems A and B values of $p_i$ can be negative. In the sequel however we deal only with the case when $p_i$ are non-negative, and the chain is transient, which will be assumed throughout without mentioning it. Let {\bf E}gg D(R,\infty):=1+\sum_{j=1}^{\infty}\prod_{i=1}^j U_{R+i}, \end{equation} and define {\bf E}gg p_R^*:=\frac{\frac{1}{2}+p_R}{D(R, \infty)}=1-q^*_R \end{equation} Now let $\xi(R,\infty)$, $R=0,1,2,{{\lambda}mbda_d}ots$ be the total local time at $R$ of the Markov chain $\{X_n\}$, i.e. {\bf E}gg \xi(R,\infty):=\#\{n\geq 0: X_n=R\}. {\lambda}bel{loct} \end{equation} \noindent {\bf Theorem C:} (\cite{CSFR}) {\it For a transient NN random walk} {\bf E}gg {\bf P}(\xi(R,\infty)=k)=p_R^*(q_R ^*)^{k-1}, \qquad k=1,2,{{\lambda}mbda_d}ots {\lambda}bel{locelo} \end{equation} Moreover, $\eta(R,t)$, $R>0$ will denote the local time of the Bessel process, i.e. $$ \eta(R,t):=\lim_{\varepsilon\to 0}\frac1{2\varepsilon}\int_0^t I\{Y_\nu(s)\in (R-\varepsilon,R+\varepsilon)\}\, ds,\qquad \eta(R,\infty):=\lim_{t \to \infty }\eta(R,t). $$ It is well-known that $\eta(R,\infty)$ has exponential distribution (see e.g. \cite{BS}). {\bf E}gg {\bf P}(\eta(R,\infty)< x)= 1-\exp\left(ft(-\frac{\nu }{R}\,x\right)ght). \end{equation} \newline For $0<a<b$ let {\bf E}gin{equation} \tau:=\tau(a,b)=\min\{t\geq 0:\, Y_\nu(t)\notin (a,b)\}. {\lambda}bel{stopping} \end{equation} Then we have (cf. Borodin and Salminen \cite{BS}, Section 6, 3.0.1 and 3.0.4). \noindent {\bf Theorem D:} {\it For} $0<a<x<b$ {\it we have} {\bf E}gin{equation} {\bf P}_x(Y_\nu(\tau)=a)=1-{\bf P}_x(Y_\nu(\tau)=b)= \frac{x^{-2\nu}-b^{-2\nu}}{a^{-2\nu}-b^{-2\nu}}, {\lambda}bel{hit} \end{equation} {\bf E}gin{equation} {\bf E}_xe^{-{\alpha}pha\tau}=\frac{S_\nu(b\sqrt{2{\alpha}pha},x\sqrt{2{\alpha}pha})+ S_\nu(x\sqrt{2{\alpha}pha},a\sqrt{2{\alpha}pha})} {S_\nu(b\sqrt{2{\alpha}pha},a\sqrt{2{\alpha}pha})}, {\lambda}bel{lap} \end{equation} \noindent {\it where} {\bf E}gin{equation} S_\nu(u,v)=(uv)^{-\nu}(I_\nu(u)K_\nu(v)-K_\nu(u)I_\nu(v)), {\lambda}bel{snu} \end{equation} \noindent $I_\nu$ {\it and} $K_\nu$ {\it being the modified Bessel functions of the first and second kind, resp.} Here and in what follows ${\bf P}_x$ and ${\bf E}_x$ denote conditional probability, resp. expectation under $Y_{\nu}(0)=x.$ For simplicity we will use ${\bf P}_0={\bf P},$ and ${\bf E}_0={\bf E}.$ Now consider $Y_\nu(t),\, t\geq 0$, a Bessel process of order $\nu$, $Y_\nu(0)=0$, and let $X_n,\, n=0,1,2,{{\lambda}mbda_d}ots$ be an NN random walk with $p_0=p_1=1/2$, {\bf E}gin{equation} p_R=\frac{(R-1)^{-2\nu}-R^{-2\nu}}{(R-1)^{-2\nu}-(R+1)^{-2\nu}}-\frac12,\qquad R=2,3,{{\lambda}mbda_d}ots {\lambda}bel{pr} \end{equation} Our main results are strong invariance principles concerning Bessel process, NN random walk and their local times. {\bf E}gin{theorem} On a suitable probability space we can construct a Bessel process $\{Y_\nu(t),\, t\geq 0\},$ $\nu>0$ and an NN random walk $\{X_n,\, n=0,1,2,{{\lambda}mbda_d}ots\}$ with $p_R$ as in {\rm (\ref{pr})} such that for any $\varepsilon>0$, as $n\to\infty$ we have {\bf E}gin{equation} Y_\nu(n)-X_n=O(n^{1/4+\varepsilon})\qquad {\rm a.s.} {\lambda}bel{inv} \end{equation} \end{theorem} Our strong invariance principle for local times reads as follows. {\bf E}gin{theorem} Let $Y_\nu(t)$ and $X_n$ as in {\rm Theorem 1.1} and let $\eta$ and $\xi$ their respective local times. As $R\to\infty$, we have {\bf E}gin{equation} \xi(R,\infty)-\eta(R,\infty)=O(R^{1/2}\log R)\quad {\rm a.s.} \end{equation} \end{theorem} We prove the following strong invariance principle between two NN random walks. {\bf E}gin{theorem} Let $\{X_n^{(1)}\}_{n=0}^\infty$ and $\{X_n^{(2)}\}_{n=0}^\infty$ be two NN random walk with $p_j^{(1)}$ and $p_j^{(2)}$, resp. Assume that {\bf E}gin{equation} \left(ft|p_j^{(1)}-\frac{B}{4j}\right)ght|\left(q \frac{C}{j^{\gamma}mma} {\lambda}bel{pj1} \end{equation} and {\bf E}gin{equation} \left(ft|p_j^{(2)}-\frac{B}{4j}\right)ght|\left(q \frac{C}{j^{\gamma}mma} {\lambda}bel{pj2} \end{equation} $j=1,2,{{\lambda}mbda_d}ots$ with $B>1$, $1<{\gamma}mma\left(q 2$ and some non-negative constant $C$. Then on a suitable probability space one can construct $\{X_n^{(1)}\}$ and $\{X_n^{(2)}\}$ such that as $n\to \infty$ $$ |X_n^{(1)}-X_n^{(2)}|=O((X_n^{(1)}+X_n^{(2)})^{2-{\gamma}mma}) =O((n\log\log n)^{1-{\gamma}mma/2})\quad{\rm a.s.} $$ \end{theorem} The organization of the paper is as follows. In Section 2 we will present some well-known facts and prove some preliminary results. Sections 3-5 contain the proofs of Theorems 1.1-1.3, respectively. In Section 6 we prove strong theorems (most of them are integral tests) which easily follow from Theorems 1.1 and 1.2 and the corresponding results for Bessel process. In Section 7, using our Theorem 1.3 in both directions, we prove an integral test for the local time of the NN-walk, and a strong theorem for the speed of escape of the Bessel process. \renewcommand{\arabic{section}.}{\arabic{section}.} \section{Preliminaries} \renewcommand{\arabic{section}.}{\arabic{section}} \setcounter{equation}{0} \setcounter{theorem}{0} \setcounter{lemma}{0} {\bf E}gin{lemma} Let $Y_\nu(\cdot)$ be a Bessel process starting from $x=R$ and let $\tau$ be the stopping time defined by {\rm (\ref{stopping})} with $a=R-1$ and $b=R+1$. Let $p_R$ be defined by {\rm (\ref{pr})}. Then as $R\to\infty$ {\bf E}gin{equation} p_R=\frac{2\nu+1}{4R}+O\left(ft(\frac1{R^2}\right)ght), {\lambda}bel{pr2} \end{equation} {\bf E}gin{equation} {\bf E}_R(\tau)=1+O\left(ft(\frac{1}{R}\right)ght), {\lambda}bel{ertau} \end{equation} {\bf E}gin{equation} Var_R(\tau)=O(1). {\lambda}bel{vartau} \end{equation} \end{lemma} \noindent {\bf Proof:} For $\nu=1/2$, i.e. for $d=3$-dimensional Bessel process, in case $x=R$, $a=R-1$, $b=R+1$ we have $$ {\bf E}_R(e^{{\lambda}mbda\tau})=\frac1{{\cal O}s(\sqrt{2{\lambda}mbda})} $$ which does not depend on $R$. We prove that this holds asymptotically in general, when $\nu>0$. Using the identity (cf. \cite{BS}, page 449 and \cite{WA}, page 78) {\bf E}gin{eqnarray}{\lambda}bel{deef} K_\nu(x)&=& \left(ft\{{\bf E}gin{array}{ll}\displaystyle{\frac{\pi}{2\sin(\nu\pi)} (I_{-\nu}(x)-I_\nu(x))}\quad {\rm if} \,\, \nu \,\,{\rm is\,\, not\,\, an\,\, integer\,\,}\\ & \\ \nonumber \lim_{\mu\to \nu}K_{\mu}(x) \quad{\rm if\,\,} \nu\,\, {\rm is\,\, an\,\, integer} \end{array}\right)ght. \end{eqnarray} \noindent and the series expansion $$ I_\nu(x)=\sum_{k=0}^\infty\frac{(x/2)^{\nu+2k}}{k!\Gamma(\nu+k+1)}, $$ one can see that the coefficient of $-{\alpha}pha$ in the Taylor series expansion of the Laplace transform (\ref{lap}) is $$ {\bf E}_x(\tau)=\frac1{2(\nu+1)}\frac{(b^2-x^2)a^{-2\nu}+(x^2-a^2)b^{-2\nu} -(b^2-a^2)x^{-2\nu}}{a^{-2\nu}-b^{-2\nu}} $$ from which by putting $x=R$, $a=R-1$, $b=R+1$, we obtain $$ {\bf E}_R(\tau)=\frac1{2(\nu+1)}\frac{(2R+1)(R-1)^{-2\nu}+(2R-1)(R+1)^{-2\nu} -4R^{1-2\nu}}{(R-1)^{-2\nu}-(R+1)^{-2\nu}} $$ giving (\ref{ertau}) after some calculations. (\ref{vartau}) can also be obtained similarly, but it seems quite complicated. A simpler argument is to use moment generating function and expansion of the Bessel functions for imaginary arguments near infinity. Put ${\alpha}pha=-{\lambda}mbda$ into (\ref{lap}) to obtain {\bf E}gin{equation} {\bf E}_x(e^{{\lambda}mbda\tau})=\frac{S_\nu(ib\sqrt{2{\lambda}mbda},ix\sqrt{2{\lambda}mbda})+ S_\nu(ix\sqrt{2{\lambda}mbda},ia\sqrt{2{\lambda}mbda})} {S_\nu(ib\sqrt{2{\lambda}mbda},ia\sqrt{2{\lambda}mbda})}, {\lambda}bel{mgf} \end{equation} where $i=\sqrt{-1}$. We use the following asymptotic expansions (cf. Erd\'elyi et al.\,\cite{EA}, page 86, or Watson \cite{WA}, pages 202, 219) $$ I_\nu(z)=(2\pi z)^{-1/2}\left(ft(e^z+ie^{-z+i\nu\pi}+O(|z|^{-1})\right)ght), $$ $$ K_\nu(z)=\left(ft(\frac{\pi}{2z}\right)ght)^{1/2}\left(ft(e^{-z}+O(|z|^{-1})\right)ght). $$ Hence one obtains for ${\lambda}mbda>0$ fixed, and $x<b,\,$ $$ S_\nu(ib\sqrt{2{\lambda}mbda},ix\sqrt{2{\lambda}mbda})= (-2{\lambda}mbda b x)^{-\nu} (I_\nu(ib\sqrt{2{\lambda}mbda})K_{\nu}(ix\sqrt{2{\lambda}mbda}) -I_\nu(ix\sqrt{2{\lambda}mbda})K_{\nu}(ib\sqrt{2{\lambda}mbda})) $$ $$ =\frac12 (-2{\lambda}mbda b x)^{-\nu-1/2} \left(ft(e^{i(b-x)\sqrt{2{\lambda}mbda}}-e^{-i(b-x)\sqrt{2{\lambda}mbda}} +O\left(ft(\frac1x\right)ght)\right)ght), \quad x\to\infty. $$ One can obtain asymptotic expansions similarly for $S_\nu(ix\sqrt{2{\lambda}mbda},ia\sqrt{2{\lambda}mbda})$, $S_\nu(ib\sqrt{2{\lambda}mbda},ia\sqrt{2{\lambda}mbda})$. Putting these into (\ref{mgf}), with $x=R$, $a=R-1$, $b=R+1$, we get as $R \to \infty$ $$ {\bf E}_R(e^{{\lambda}mbda\tau})=\frac{(R^2+R)^{-\nu-1/2}+(R^2-R)^{-\nu-1/2}} {(R^2-1)^{-\nu-1/2}}\, \, \frac{e^{i\sqrt{2{\lambda}mbda}}-e^{-i\sqrt{2{\lambda}mbda}} +O\left(ft(\frac1R\right)ght)} {e^{2i\sqrt{2{\lambda}mbda}}-e^{-2i\sqrt{2{\lambda}mbda}} +O\left(ft(\frac1R\right)ght)} $$ $$ =\frac1{{\cal O}s(\sqrt{2{\lambda}mbda})}+O\left(ft(\frac1R\right)ght). $$ Hence putting ${\lambda}mbda=1$, there exists a constant $C$ such that ${\bf E}_R(e^{\tau})\left(q C$ for all $R=1,2,{{\lambda}mbda_d}ots$ By Markov's inequality we have $$ {\bf P}_R(\tau>t)={\bf P}_R(e^\tau>e^t)\left(q Ce^{-t}, $$ from which ${\bf E}_R(\tau^2)\left(q 2C$, implying (\ref{vartau}). $\Box$ Here and throughout $C,C_1,C_2,{{\lambda}mbda_d}ots$ denotes unimportant positive (possibly random) constants whose values may change from line to line. Recall the definition of the upper and lower classes for a stochastic process $Z(t),\, t\geq 0$ defined on a probability space $(\Omega,{{\cal A}l F}, P)$ (cf. R\'ev\'esz \cite{R05}, p. 33). The function $a_1(t)$ belongs to the {\it upper-upper class} of $Z(t)$ ($a_1(t)\in {\rm UUC}(Z(t)$) if for almost all $\omega \in \Omega $ there exists a $t_0(\omega)>0 $ such that $Z(t)<a_1(t)$ if $t>t_0(\omega).$ The function $a_2(t)$ belongs to the {\it upper-lower class} of $Z(t)$ ($a_1(t)\in {\rm ULC}(Z(t)$) if for almost all $\omega \in \Omega $ there exists a sequence of positive numbers $0<t_1=t_1(\omega)<t_2=t_2(\omega)< {{\lambda}mbda_d}ots $ with $\lim_{i\to\infty}t_i=\infty$ such that $Z(t_i)\geq a_2(t_i)$, $(i=1,2,{{\lambda}mbda_d}ots).$ The function $a_3(t)$ belongs to the {\it lower-upper class } of $Z(t)$ ($a_3(t)\in {\rm LUC}(Z(t)$) if for almost all $\omega \in \Omega $ there exists a sequence of positive numbers $0<t_1=t_1(\omega)<t_2=t_2(\omega)< {{\lambda}mbda_d}ots $ with $\lim_{i\to\infty}t_i=\infty$ such that $Z(t_i)\left(q a_3(t_i)$, $(i=1,2,{{\lambda}mbda_d}ots).$ The function $a_4(t)$ belongs to the {\it lower-lower class} of $Z(t)$ ($a_4(t)\in {\rm LLC}(Z(t)$) if for almost all $\omega \in \Omega $ there exists a $t_0(\omega)>0 $ such that $Z(t)> a_4(t)$ if $t>t_0(\omega).$ The following lower class results are due to Dvoretzky and Erd\H os \cite{DE} for integer $d=2\nu+2$. In the general case when $\nu>0$, the proof is similar (cf. also Knight \cite{KN81} and Chaumont and Pardo \cite{CP} in the case of positive self-similar Markov processes). \noindent{\bf Theorem E:} {\it Let $\nu>0$ and let $b(t)$ be a non-increasing, non-negative function.} {\bf E}gin{itemize} \item $t^{1/2}b(t) \in {\rm LLC}(Y_\nu(t)) $ \qquad {\it if and only if} \qquad $\displaystyle{ \int_1^\infty (b(2^t))^{2\nu}\, dt< \infty.}$ {\lambda}bel{at} \end{itemize} It follows e.g. that in case $\nu>0$, for any $\varepsilon>0$ we have {\bf E}gin{equation} Y_\nu(t)\geq t^{1/2-\varepsilon} {\lambda}bel{ylower} \end{equation} almost surely for all sufficiently large $t$. In fact, from our invariance principle it will follow that the integral test in Theorem E holds also for our Markov chain $(X_n)$. In the proof however we need an analogue of (\ref{ylower}) for $X_n$. One can easily calculate the exact distribution of $\xi(R,\infty),$ the total local time of $X_n$ of Theorem 1.1 according to Theorem C. \newline {\bf Lemma A:} {\it If $p_R$ is given by {\rm (\ref{pr})}, then} $\xi(R,\infty)$ {\it has geometric distribution {\rm (\ref{locelo})} with} {\bf E}gg p_R^*=\frac{\frac{1}{2}+p_R}{D(R,\infty)}= \frac{(\frac{1}{2}+p_R)((R+1)^{2\nu}-R^{2\nu})}{(R+1)^{2\nu}}= \frac{\nu}{R}+O\left(ft(\frac{1}{R}\right)ght). \end{equation} {\bf E}gin{lemma} For any $\delta>0$ we have $$ X_n\geq n^{1/2-\delta} $$ almost surely for all large enough $n$. \end{lemma} \noindent{\bf Proof:} From Lemma A it is easy to conclude that almost surely for some $R_0>0$ $$\xi(R,\infty)\left(q CR\log R$$ if $R\geq R_0$, with some random positive constant $C$. Hence the time $\displaystyle{\sum_{R=1}^S}\xi(R,\infty)$ which the particle spent up to $\infty$ in $[1,S]$ is less than $$\sum_{R=1}^{R_0-1}\xi(R,\infty)+C\sum_{R=R_0}^S R\log R\left(q C_1S^{2+\delta}$$ with some (random) $C_1>0$. Consequently, after $C_1S^{2+\delta}$ steps the particle will be farther away from the origin than $S$. Let $$n=[C_1S^{2+\delta}],$$ then $$S\geq\left(ft(\frac{n}{C_1}\right)ght)^{1/(2+\delta)}$$ and hence $$ X_n\geq \left(ft(\frac{n}{C_1}\right)ght)^{1/(2+\delta)}\geq n^{1/2-\delta} $$ for $n$ large enough. This proves the Lemma. $\Box$ \renewcommand{\arabic{section}.}{\arabic{section}.} \section{Proof of Theorem 1.1} \renewcommand{\arabic{section}.}{\arabic{section}} \setcounter{equation}{0} \setcounter{theorem}{0} \setcounter{lemma}{0} Def\/ine the sequences $(\tau_n)$, $t_0=0,$ $t_n:=\tau_1+{{\lambda}mbda_d}ots+\tau_n$ as follows: {\bf E}gin{eqnarray*} \tau_1&:=&\min\{t:\ t>0,\ Y_\nu(t)=1\},\\ \tau_2&:=&\min\{t:\ t>0,\ Y_\nu(t+t_1)=2\},\\ \tau_n&:=&\min\{t:\ t>0,\ |Y_\nu(t+t_{n-1})-Y_\nu(t_{n-1})|=1\}\quad {\rm for} \quad n=3,4,{{\lambda}mbda_d}ots \end{eqnarray*} Let $X_n=Y_\nu(t_n)$. Then (cf. (\ref{pr})) it is an NN random walk with $p_0=p_1=1/2$, $$ p_R=\frac{(R-1)^{-2\nu}-R^{-2\nu}}{(R-1)^{-2\nu}-(R+1)^{-2\nu}}-\frac12, \qquad R=2,3,{{\lambda}mbda_d}ots $$ Let ${{\cal A}l F}_n$ be the $\sigma$-algebra generated by $(\tau_k,\ Y_\nu(\tau_k))_{k=1}^n$ and consider $$ M_n:=\sum_{i=1}^n(\tau_i-{\bf E}(\tau_i\mid {{\cal A}l F}_{i-1})). $$ Then the sequence $(M_n)_{n\geq 1}$ is a martingale with respect to $({{\cal A}l F}_n)_{n\geq 1}$. It follows from (\ref{ertau}) of Lemma 2.1 that for $i=2,3,{{\lambda}mbda_d}ots$ we have $$ {\bf E}(\tau_i\mid {{\cal A}l F}_{i-1})={\bf E}(\tau_i\mid Y_\nu(t_{i-1})) =1+O\left(ft(\frac{1}{Y_\nu(t_{i-1})}\right)ght). $$ Hence $$ |t_n-n|\left(q |M_n| +|\tau_1-1|+C_1\sum_{i=2}^n\frac{1}{Y_\nu(t_{i-1})}= |M_n| +|\tau_1-1|+C_1\sum_{i=2}^n\frac{1}{X_{i-1}} $$ with some (random) constant $C_1$. By (\ref{vartau}) of Lemma 2.1 we have ${\bf E} M_n^2\left(q Cn$. Let $\varepsilon>0$ be arbitrary and define $n_k=[k^{1/\varepsilon}]$. From the martingale inequality we get $$ {\bf P}\left(ft(\max_{n_{k-1}\left(q n\left(q n_k}|M_n|\geq C_1n_{k-1}^{1/2+\varepsilon}\right)ght) \left(q \frac{C_2}{n_k^{2\varepsilon}}, $$ hence we obtain by Borel-Cantelli lemma $$ \max_{n_{k-1}\left(q n\left(q n_k}|M_n|\left(q C_1 n_{k-1}^{1/2+\varepsilon} $$ almost surely for large $k$. Hence we also have $$ |M_n|=O(n^{1/2+\varepsilon})\qquad {\rm a.s.} $$ By Lemma 2.2 $$ \sum_{i=2}^n\frac1{X_{i-1}}=O(n^{1/2+\varepsilon})\qquad {\rm a.s.}, $$ consequently {\bf E}gin{equation} |t_n-n|=O(n^{1/2+\varepsilon})\qquad{\rm a.s.} {\lambda}bel{tminusn} \end{equation} It is well-known (cf. \cite{BS}, p. 69) that $Y_\nu(t)$ satisfies the stochastic differential equation {\bf E}gg dY_\nu(t)=dW(t)+\frac{2\nu+1}{2Y_\nu(t)}dt, {\lambda}bel{dife} \end{equation} where $W(t)$ is a standard Wiener process. Hence $$ X_n-Y_\nu(n)=Y_\nu(t_n)-Y_\nu(n)= W(t_n)-W(n)+\int_{t_n}^n\frac{2\nu+1}{2Y_\nu(s)}\, ds, $$ consequently, $$ |X_n-Y_\nu(n)|\left(q |W(t_n)-W(n)|+\frac{(2\nu+1)|t_n-n|}{2} \max_{\min(n,t_n)\left(q t\left(q \max(n,t_n)}\frac1{Y_\nu(t)}. $$ Now by (\ref{tminusn}) and (\ref{ylower}) the last term is $O(n^{2\varepsilon})$ almost surely and since for the increments of the Wiener process (cf. \cite{CsR}, page 30) $$ |W(t_n)-W(n)|=O(n^{1/4+\varepsilon})\qquad {\rm a.s.} $$ as $n\to\infty$, we have (\ref{inv}) of Theorem 1.1. $\Box$ \renewcommand{\arabic{section}.}{\arabic{section}.} \section{Proof of Theorem 1.2} \renewcommand{\arabic{section}.}{\arabic{section}} \setcounter{equation}{0} \setcounter{theorem}{0} \setcounter{lemma}{0} For $R>0$ integer define {\bf E}gin{eqnarray*} \kappa_1&:=&\min\{t\geq 0:\, Y_\nu(t)=R\},\\ \delta_1&:=&\min\{t\geq \kappa_1:\, Y_\nu(t)\notin (R-1,R+1)\},\\ \kappa_i&:=&\min\{t\geq \delta_{i-1}:\, Y_\nu(t)=R\},\\ \delta_i&:=&\min\{t\geq \kappa_i:\, Y_\nu(t)\notin (R-1,R+1)\},\\ \kappa^*&:=&\max\{t\geq 0\, :Y_{\nu}(t)=R\}, \end{eqnarray*} $i=2,3,{{\lambda}mbda_d}ots$ Consider the local times at $R$ of the Bessel process during excursions around $R$, i.e. let $$ \zeta_i:=\eta(R,\delta_i)-\eta(R,\kappa_i),\quad i=1,2,{{\lambda}mbda_d}ots, $$ $$\tilde\zeta:=\eta(R,\infty)-\eta(R,\kappa^*).$$ We have $$ \eta(R,\infty)=\sum_{i=1}^{\xi(R,\infty)-1}\zeta_i+\tilde\zeta. $$ {\bf E}gin{lemma} {\bf E}gg {\bf E}\left(ft(e^{{\lambda}mbda \eta(R,\infty)} \right)ght)=\frac{p^*_R\, \varphi({\lambda}mbda)}{1-q_R^*\,\varphi({\lambda}mbda)}, \end{equation} where {\bf E}gg p_R^*=\frac{A_R}{A_R+B_R}\,\frac{(R+1)^{2\nu}-R^{2\nu}}{(R+1)^{2\nu}},\quad q_R^*=1-p_R^*, \end{equation} {\bf E}gg \varphi({\lambda}mbda)=\frac{\nu (A_R+B_R)}{\nu (A_R+B_R)-{\lambda}mbda R^{2\nu+1}A_R B_R}, {\lambda}bel{filam} \end{equation} and {\bf E}gg A_R=(R-1) ^{-2\nu}-R ^{-2\nu},\qquad B_R=R^{-2\nu}-(R+1) ^{-2\nu}. \end{equation} \end{lemma} \noindent {\bf Proof:} By (\cite{BS}, p. 395, 3.3.2) $\zeta_i$ are i.i.d. random variables having exponential distribution with moment generating function $\varphi({\lambda}mbda)$ given in (\ref{filam}). Moreover, it is obvious that $\tilde\zeta$ is independent from $\sum_{i=1}^{\xi(R,\infty)-1}\zeta_i.$ Furthermore, $\tilde\zeta$ is the local time of $R$ under the condition that starting from $R, $ $Y_\nu(t)$ will reach $R+1$ before $R-1.$ Hence its distribution can be calculated from formula 3.3.5(b) of \cite{BS}, and its moment generating function happens to be equal to $\varphi({\lambda}mbda)$ of (\ref{filam}). $\Box$ We can see $$ \theta:={\bf E}(\zeta_i)={\bf E}(\tilde\zeta)=\frac{\nu(A_R+B_R)} {R^{2\nu+1}A_R B_R}=1+O\left(ft(\frac1R\right)ght),\quad R\to\infty. $$ $${\bf P}(|\eta(R,\infty)-\xi(R,\infty)|\geq u)= {\bf P}\left(ft(\left(ft|\sum_{i=1}^{\xi(R,\infty)-1}(\zeta_i-\theta)+ \tilde\zeta-\theta\right)ght|\geq u\right)ght)$$ $$\left(q {\bf P}(\xi(R.\infty)>N)+{\bf P} \left(ft(\max_{k\left(q N}\left(ft|\sum_{i=1}^k(\zeta_i-\theta)\right)ght|\geq u\right)ght) $$ $$ \left(q (q^*_R)^N+ e^{-{\lambda}mbda u}\left(ft(\left(ft(\frac{e^{{\lambda}mbda \theta}}{1+{\lambda}mbda \theta}\right)ght)^N+ \left(ft(\frac{e^{-{\lambda}mbda \theta}}{1-{\lambda}mbda \theta}\right)ght)^N\right)ght). $$ In the above calculation we used the common moment generating function (\ref{filam}) of $\zeta_i $ and $\tilde\zeta$, the exact distribution of $\xi(R,\infty)$ (see (\ref{locelo})) and the exponential Kolmogorov inequality. Estimating the above expression with standard methods and selecting $$N=CR\log R, \quad u=CR^{1/2} \log R,\quad {\lambda}mbda=\frac{u}{\theta^2 N}$$ we conclude that $${\bf P}(|\eta(R,\infty)-\xi(R,\infty)|\geq CR^{1/2} \log R ) \left(q C_1 \exp{\left(ft(-\frac{C\log R}{2 \theta}\right)ght)}.$$ With a big enough $C$ the right hand side of the above inequality is summable in $R,$ hence Theorem 1.2 follows by the Borel-Cantelli lemma. $\Box$ \renewcommand{\arabic{section}.}{\arabic{section}.} \section{Proof of Theorem 1.3} \renewcommand{\arabic{section}.}{\arabic{section}} \setcounter{equation}{0} \setcounter{theorem}{0} \setcounter{lemma}{0} Let $p_j^{(1)}$ and $p_j^{(2)}$ as in Theorem 1.3. Define the two-dimensional Markov chain $(X_n^{(1)},X_n^{(2)})$ as follows. If $p_j^{(1)}\geq p_k^{(2)}$, then let {\bf E}gin{eqnarray*} {\bf P}\left(ft((X_{n+1}^{(1)},X_{n+1}^{(2)})=(j+1,k+1)\mid (X_n^{(1)},X_n^{(2)})=(j,k)\right)ght)&=&\frac12+p_k^{(2)}\\ {\bf P}\left(ft((X_{n+1}^{(1)},X_{n+1}^{(2)})=(j+1,k-1)\mid (X_n^{(1)},X_n^{(2)})=(j,k)\right)ght)&=&p_j^{(1)}-p_k^{(2)}\\ {\bf P}\left(ft((X_{n+1}^{(1)},X_{n+1}^{(2)})=(j-1,k-1)\mid (X_n^{(1)},X_n^{(2)})=(j,k)\right)ght)&=&\frac12-p_j^{(1)}. \end{eqnarray*} If, however $p_j^{(1)}\left(q p_k^{(2)}$, then let {\bf E}gin{eqnarray*} {\bf P}\left(ft((X_{n+1}^{(1)},X_{n+1}^{(2)})=(j+1,k+1)\mid (X_n^{(1)},X_n^{(2)})=(j,k)\right)ght)&=&\frac12+p_j^{(1)}\\ {\bf P}\left(ft((X_{n+1}^{(1)},X_{n+1}^{(2)})=(j-1,k+1)\mid (X_n^{(1)},X_n^{(2)})=(j,k)\right)ght)&=&p_k^{(2)}-p_j^{(1)}\\ {\bf P}\left(ft((X_{n+1}^{(1)},X_{n+1}^{(2)})=(j-1,k-1)\mid (X_n^{(1)},X_n^{(2)})=(j,k)\right)ght)&=&\frac12-p_k^{(2)}. \end{eqnarray*} Then it can be easily seen that $X_n^{(1)}$ and $X_n^{(2)}$ are two NN random walks as desired. Consider the following 4 cases. {\bf E}gin{itemize} \item{(i)} $p_j^{(1)}\left(q p_k^{(2)}$, $j\left(q k$, \item{(ii)} $p_j^{(1)}\left(q p_k^{(2)}$, $j\geq k$, \item{(iii)} $p_j^{(1)}\geq p_k^{(2)}$, $j\left(q k$, \item{(iv)} $p_j^{(1)}\geq p_k^{(2)}$, $j\geq k$. \end{itemize} In case (i) from (\ref{pj1}) and (\ref{pj2}) we obtain $$ \frac{B}{4j}-\frac{C}{j^{\gamma}mma}\left(q \frac{B}{4k}+\frac{C}{k^{\gamma}mma} \left(q \frac{B}{4k}+\frac{C}{kj^{{\gamma}mma-1}}, $$ implying $$ k-j\left(q \frac{2Cj^{2-{\gamma}mma}}{B/4-Cj^{1-{\gamma}mma}}= O(j^{2-{\gamma}mma}) $$ if $j\to\infty$. So in this case if $X_n^{(1)}=j$ and $X_n^{(2)}=k$, then we have $$ 0\left(q X_n^{(2)}-X_n^{(1)}=O((X_n^{(1)})^{2-{\gamma}mma}) $$ if $n\to\infty$. In case (ii) either $X_{n+1}^{(1)}-X_{n+1}^{(2)}=X_{n}^{(1)}-X_{n}^{(2)}$, or $X_{n+1}^{(1)}-X_{n+1}^{(2)}=X_{n}^{(1)}-X_{n}^{(2)}-2$, so that we have $$ -2\left(q X_{n+1}^{(1)}-X_{n+1}^{(2)}\left(q X_{n}^{(1)}-X_{n}^{(2)}. $$ Similar procedure shows that in case (iii) $$ -2\left(q X_{n+1}^{(2)}-X_{n+1}^{(1)}\left(q X_{n}^{(2)}-X_{n}^{(1)} $$ and in case (iv) $$ 0\left(q X_n^{(1)}-X_n^{(2)}=O((X_n^{(2)})^{2-{\gamma}mma}). $$ Hence Theorem 1.3 follows from the law of the iterated logarithm for $X_n^{(i)}$ (cf. \cite{BRS}). $\Box$ \renewcommand{\arabic{section}.}{\arabic{section}.} \section{Strong theorems} \renewcommand{\arabic{section}.}{\arabic{section}} \setcounter{equation}{0} \setcounter{theorem}{0} \setcounter{lemma}{0} As usual, applying Theorem 1.1 and Theorem 1.3, we can give limit results valid for one of the processes to the other process involved. In this section we denote $Y_\nu(t)$ by $Y(t)$ and define the following related processes. $$ M(t):=\max_{0\left(q s\left(q t}Y(s), \qquad Q_n:=\max_{1\left(q k\left(q n}X_k. $$ The future infimums are defined as $$ I(t):=\inf_{s\geq t} Y(s), \qquad J_n:=\inf_{k\geq n}X_k. $$ Escape processes are defined by $$ A(t):=\sup\{s:\, Y(s)\left(q t\}, \qquad G_n:=\sup\{k:\, X_k\left(q n\}. $$ Laws of the iterated logarithm are known for Bessel processes (cf. \cite{BS}) and NN random walks (cf. \cite{BRS}) as well. Upper class results for Bessel process read as follows (cf. Orey and Pruitt \cite{OP} for integral $d$, and Pardo \cite{PA} for the case of positive self-similar Markov processes). \noindent {\bf Theorem F}: {\it Let $a(t)$ be a non-decreasing non-negative continuous function. Then for $\nu\geq 0$ $$ \displaystyle{ t^{1/2}a(t)\in {\rm UUC}(Y(t))\qquad if \,\, and \,\, only \,\, if \qquad\int_1^\infty\frac{(a(x))^{2\nu+2}}{x}e^{-a^2(x)/2}\, dx<\infty.} $$} Now Theorems 1.1, 1.3 and Theorems E and F together imply the following result. {\bf E}gin{theorem} Let $\{X_n\}$ be an NN random walk with $p_R$ satisfying $$ p_R=\frac{B}{4R}+O\left(ft(\frac1{R^{1+\delta}}\right)ght),\quad R\to\infty $$ with $B>1$ and for some $\delta>0$. Let furthermore $a(t)$ be a non-decreasing non-negative function. Then $$ \displaystyle{n^{1/2}a(n)\in {\rm UUC}(X_n) \qquad if \,\, and \,\, only \,\, if \qquad \sum_{k=1}^\infty\frac{(a(k))^{B+1}}{k}e^{-a^2(k)/2}<\infty.} $$ If $b(t)$ is a non-increasing non-negative function, then $$ \displaystyle{n^{1/2}b(n)\in {\rm LLC}(X_n) \qquad if \,\, and \,\, only \,\, if \qquad \sum_{k=1}^\infty (b(2^k))^{B-1}<\infty.} $$ \end{theorem} Next we prove the following invariance principles for the processes defined above. {\bf E}gin{theorem} Let $Y(t)$ and $X_n$ as in {\rm Theorem 1.1.} Then for any $\varepsilon>0$ we have {\bf E}gg |M(n)-Q_n|=O(n^{1/4+\varepsilon})\quad {\rm a.s.} {\lambda}bel{invmq} \end{equation} and {\bf E}gg |I(n)-J_n|=O(n^{1/4+\varepsilon})\quad {\rm a.s.} {\lambda}bel{invij} \end{equation} \end{theorem} \noindent {\bf Proof:} Define $\tilde s, s^*, \tilde k, k^*$ by $$ Y(\tilde s)=M(n),\quad Y(s^*)=I(n), \quad X_{\tilde k}=Q_n, \quad X_{k^*}=J_n. $$ Then as $n\to\infty$, we have almost surely $$ Q_n-M(n)=X_{\tilde k}-Y(\tilde s)\left(q X_{\tilde k}-Y(\tilde k) =O(n^{1/4+\varepsilon}) $$ and $$ M(n)-Q_n=Y(\tilde s)-X_{\tilde k}=Y(\tilde s)-Y([\tilde s]) -(X_{[\tilde s]}-Y([\tilde s]))+X_{[\tilde s]}-X_{\tilde k} $$ $$ \left(q Y(\tilde s)-Y([\tilde s]) -(X_{[\tilde s]}-Y([\tilde s])=Y(\tilde s)-Y([\tilde s]) +O(n^{1/4+\varepsilon}) $$ By (\ref{dife}) and recalling the results on the increments of the Wiener process (see \cite{CsR} page 30) we get $$ Y(\tilde s)-Y([\tilde s])=W(\tilde s)-W([\tilde s])+ \int_{[\tilde s]}^{\tilde s}\frac{2\nu+1}{2Y(s)}\, ds $$ $$ \left(q \sup_{0\left(q t\left(q n}\sup_{0\left(q s\left(q 1}|W(t+s)-W(t)| +\frac{2\nu+1}{2}\max_{[\tilde s]\left(q t\left(q \tilde s}\frac{1}{Y(t)} =O(\log n) \quad{\rm a.s.}, $$ since $Y(t)$ in the interval $([\tilde s],\tilde s)$ is bounded away from zero. Hence (\ref{invmq}) follows. To show (\ref{invij}), note that $n\left(q s^*\left(q n^{1+{\alpha}pha}$ and $n\left(q k^*\left(q n^{1+{\alpha}pha}$ for any ${\alpha}pha>0$ almost surely for all large $n$. Then as $n\to\infty$ $$ I(n)-J_n\left(q Y(k^*)-X_{k^*}=O((k^*)^{1/4+\varepsilon})= O(n^{(1+{\alpha}pha)(1/4+\varepsilon)})\quad {\rm a.s.} $$ On the other hand, $$ J_n-I(n)\left(q X_{k^*}-Y([s^*])+Y([s^*])-Y(s^*)= O(n^{(1+{\alpha}pha)(1/4+\varepsilon)})+Y([s^*])-Y(s^*). $$ By (\ref{dife}), taking into account that when applying this formula the integral contribution is negative, and recalling again the results on the increments of the Wiener process, we get $$ Y([s^*])-Y(s^*)\left(q W([s^*])-W(s^*)\left(q \sup_{0\left(q t\left(q n^{1+{\alpha}pha}}\sup_{0\left(q s\left(q 1}|W(t+s)-W(t)|=O(\log n)\quad {\rm a.s.} $$ as $n\to\infty$. Hence $$ |I(n)-J_n|=O(n^{(1+{\alpha}pha)(1/4+\varepsilon)})\quad {\rm a.s.} $$ Since ${\alpha}pha>0$ and $\varepsilon>0$ are arbitrary, (\ref{invij}) follows. This completes the proof of Theorem 6.2. $\Box$ {\bf E}gin{theorem} Let $X_n^{(1)}$ and $X_n^{(2)}$ as in {\rm Theorem 1.3} and let $Q_n^{(1)}$ and $Q_n^{(2)}$ be the corresponding maximums, while let $J_n^{(1)}$ and $J_n^{(2)}$ be the corresponding future infimum processes. Then for any $\varepsilon>0$, as $n\to\infty$ we have {\bf E}gg |Q_n^{(1)}-Q_n^{(2)}|=O(n^{1-{\gamma}mma/2+\varepsilon})\quad {\rm a.s.} {\lambda}bel{invq1q2} \end{equation} and {\bf E}gg |J_n^{(1)}-J_n^{(2)}|=O(n^{1-{\gamma}mma/2+\varepsilon})\quad {\rm a.s.} {\lambda}bel{invj1j2} \end{equation} \end{theorem} \noindent {\bf Proof:} Define $\tilde k_i, k_i^*,\, i=1,2$ by $$ X^{(i)}_{\tilde k_i}=Q_n^{(i)}, \quad X_{k_i^*}^{(i)}=J_n^{(i)}. $$ Then $$ |Q_n^{(1)}-Q_n^{(2)}|\left(q \max(X_{\tilde k_1}^{(1)}-X_{\tilde k_1}^{(2)}, X_{\tilde k_2}^{(1)}-X_{\tilde k_2}^{(2)}) =O((n\log\log n)^{1-{\gamma}mma/2})\quad {\rm a.s.}, $$ proving (\ref{invq1q2}). Moreover, for any ${\alpha}pha>0$, $n\left(q k_i^*\left(q n^{1+{\alpha}pha}$ almost surely for large $n$, hence we have $$ |J_n^{(1)}-J_n^{(2)}|\left(q \max(X_{k_1^*}^{(1)}-X_{k_1^*}^{(2)}, X_{k_2^*}^{(1)}-X_{k_2^*}^{(2)}) =O((n\log\log n)^{(1+{\alpha}pha)(1-{\gamma}mma/2)})\quad {\rm a.s.} $$ Since ${\alpha}pha$ is arbitrary, (\ref{invj1j2}) follows. This completes the proof of Theorem 6.3. $\Box$ Khoshnevisan et al. \cite{KLS} (for $I(t)$ and $A(t)$), Adelman and Shi \cite{AS}, and Shi \cite{Shi} (for $Y(t)-I(t)$) proved the following upper and lower class results. \noindent {\bf Theorem G}: {\it Let $\varphi(t)$ be a non-increasing, and $\psi(t)$ be a non-decreasing function, both non-negative. Then for} $\nu>0$ {\bf E}gin{itemize} \item $\displaystyle{t^{1/2}\psi(t)\in {\rm UUC}(I(t))}$ \qquad {\it if and only if} \qquad $\displaystyle{\int_1^\infty\frac{(\psi(x))^{2\nu}}{x}e^{-\psi^2(x)/2}\, dx<\infty,}$ \item $\displaystyle{t^2\varphi(t)\in {\rm LLC}(A(t))} $ \qquad {\it if and only if} \qquad $ \displaystyle{\int_1^\infty\frac1{x\varphi^{\nu}(x)}e^{-1/2\varphi(x)}\, dx<\infty. }$ \item $ \displaystyle{t^{1/2}\psi(t)\in {\rm UUC}(Y(t)-I(t))} $ \qquad {\it if and only if} \qquad $ \displaystyle{\int_1^\infty\frac1{x\psi^{2\nu-2}(x)}e^{-\psi^2(x)/2}\, dx<\infty,} $ \end{itemize} \noindent {\bf Theorem H}: { \it Let $\rho(t)>0$ be such that $(\log\rho(t))/\log t$ is non-decreasing.} Then {\bf E}gin{itemize} \item $\displaystyle{1/\rho(t)\in {\rm LLC}(M(t)-I(t)) }$ \qquad {\it if and only if} \qquad $ \displaystyle{\int_1^\infty \frac{dx}{x\log\rho(x)}<\infty.}$ \end{itemize} Taking into account that $J_n$ and $G_n$ are inverses of each other, immediate consequences of Theorems F,\, G,\, H, Theorems 6.2 and 6.3 are the following upper and lower class results. {\bf E}gin{theorem} Let $X_n$ be as in {\rm Theorem 6.1} and let $\varphi(t)$ be a non-increasing and $\psi(t)$ be a non-decresing function, both non-negative. Then {\bf E}gin{itemize} \item $ \displaystyle{n^{1/2}\psi(n)\in {\rm UUC}(J_n)}$ \qquad if and only if \qquad $\displaystyle{\sum_{k=1}^\infty\frac{(\psi(k))^{B-1}}{k} e^{-\psi^2(k)/2}<\infty,}$ \item $\displaystyle{n^2\varphi(n)\in {\rm LLC}(G_n) }$ \qquad if and only if \qquad $\displaystyle{\sum_{k=1}^\infty\frac1{k\varphi^{(B-1)/2}(k)}e^{-1/2\varphi(k)} <\infty.} $ \item $\displaystyle{ n^{1/2}\psi(n)\in {\rm UUC}(X_n-J_n)} $ \qquad if and only if \qquad $ \displaystyle{\sum_{k=1}^\infty\frac1{k\psi^{B-3}(k)}e^{-\psi^2(k)/2}<\infty,} $ \end{itemize} \end{theorem} {\bf E}gin{theorem} Let $\rho(t)>0$ be such that $(\log\rho(t))/\log t$ is non-decreasing. {\bf E}gin{itemize} \item $\displaystyle{ 1/\rho(n)\in {\rm LLC}(Q_n-J_n) }$ \qquad if and only if \qquad $ \displaystyle{\sum_{k=2}^\infty \frac1{k\log \rho(k)}<\infty.} $ \end{itemize} \end{theorem} \renewcommand{\arabic{section}.}{\arabic{section}.} \section{Local time} \renewcommand{\arabic{section}.}{\arabic{section}} \setcounter{equation}{0} \setcounter{theorem}{0} \setcounter{lemma}{0} We will need the following result from Yor \cite{Yor}, page 52. \noindent {\bf Theorem J:} {\it For the local time of a Bessel process of order $\nu$ we have} $$\eta(R,\infty)\stackrel{{\cal D}}{=} (2\nu)^{-1}R^{1-2\nu}Y_0^2(R^{\,2\nu}), $$ {\it where $Y_0$ is a two-dimensional Bessel process and $\stackrel{{\cal D}}{=}$ means equality in distribution.} Hence applying Theorem F for $\nu=0$, we get \noindent {\bf Theorem K}: {\it If $f(x)$ is non-decreasing, non-negative function, then} {\bf E}gin{itemize} \item $\displaystyle{ Rf(R)\in {\rm UUC}(\eta(R,\infty))}$ \qquad {\it if and only if} \qquad $\displaystyle{\int_1^\infty \frac{f(x)}{x}e^{-\nu f(x)}\, dx<\infty}. $ \end{itemize} From this and Theorem 1.2 we get the following result. {\bf E}gin{theorem}{\it If $f(x)$ is non-decreasing, non-negative function, then} {\bf E}gin{itemize} \item $\displaystyle{ Rf(R)\in {\rm UUC}(\xi(R,\infty))}$ \qquad if and only if\qquad $\displaystyle{ \sum_{k=1}^\infty \frac{f(k)}{ k} e^{-\nu f(k)}<\infty.} $ \end{itemize} \end{theorem} In \cite{CSFR} we proved the following result. \noindent {\bf Theorem L:} {\it Let $\displaystyle{ p_R=\frac{B}{4R}+O\left(ft(\frac{1}{R^{\gamma}mma}\right)ght)}$ with $ B>1$, and ${\gamma}mma>1$. Then with probability $1$ there exist infinitely many $R$ for which $$\xi(R+j,\infty)=1$$ for each $j=0,1,2,{{\lambda}mbda_d}ots,[\log\log R/ \log 2]$. Moreover, with probability $1$ for each $R$ large enough and $\varepsilon>0$ there exists an $$R\left(q S\left(q {\frac{(1+\varepsilon)\log\log R}{\log 2}}$$ such that $$\xi(S,\infty)>1.$$ } \noindent {\bf Remark 1:} In fact in \cite{CSFR} we proved this result in the case when $p_R= B/{4R}$ but the same proof works also in the case of Theorem L. This theorem applies e.g. for the case when $p_R$ is given by (\ref{pr}), which in turn, gives the following result for the Bessel process. Let {\bf E}gin{itemize} \item[(i)] $\kappa(R):=\inf\{t:\ Y_\nu(t)=R\}$, \item[(ii)] $\kappa^*(R):=\sup\{t:\ Y_\nu(t)=R\}$, \item[(iii)] $\Psi(R)$ be the largest integer for which the event $$A(R)=\bigcap_{j=-1}^{\Psi(R)}\{\kappa^*(R+j)<\kappa(R+j+1)\}$$ occurs. \end{itemize} $A(R)$ means that $Y_\nu(t)$ moves from $R$ to $R+1$ before returning to $R-1$, it goes from $R+1$ to $R+2$ before returning to $R$, ${{\lambda}mbda_d}ots$ and also from $R+\Psi(R)$ to $R+\Psi(R)+1$ and it never returns to $R+\Psi(R)-1$. We say that the process $Y_\nu(t)$ escapes through $(R,R+\Psi(R))$ with large velocity. {\bf E}gin{theorem} $$\limsup_{R\to\infty}{\frac{\Psi(R)}{\log\log R}}= {\frac{1}{\log 2}}\quad {\rm a.s.}$$ \end{theorem} \noindent {\bf Remark 2:} The statement of Theorem 7.2 (for integral $d=2\nu+2$) was formulated in \cite{R05}, p. 291 as a Conjecture. {\bf E}gin{thebibliography}{99} \bibitem{AS} ADELMAN, O. and SHI, Z.: The measure of the overlap of past and future under a transient Bessel process. {\em Stochastics Stochastics Rep.} {\bf 57} (1996), 169--183. \bibitem{BS} BORODIN, A.N. and SALMINEN, P.: {\em Handbook of Brownian Motion -- Facts and Formulae}. Birkh\"auser, Basel, (1996). \bibitem{BRS} BR\'EZIS, H., ROSENKRANTZ, W. and SINGER, B.: An extension of Khintchine's estimate for large deviations to a class of Markov chains converging to a singular diffusion. {\em Comm. Pure Appl. Math.} {\bf 24} (1971), 705--726. \bibitem{CP} CHAUMONT, L. and PARDO, J.C.: The lower envelope of positive self-similar Markov Processes. {\em Electr. J. Probab.} {\bf 11} (2006), 1321--1341. \bibitem{CH} CHUNG, K.L.: {\em Markov Chains with Stationary Transition Probabilities}. 2nd ed. Springer-Verlag, New York, 1967. \bibitem{C-SD} COOLIN-SCHRIJNER, P. and VAN DOORN, E.A.: Analysis of random walks using orthogonal polynomials. {\em J. Comput. Appl. Math.} {\bf 99} (1998), 387--399. \bibitem{CSFR} CS\'AKI, E., F\"OLDES, A. and R\'EV\'ESZ, P.: Transient nearest neighbor random walk on the line. {\em J.Theor. Probab.}, to appear. \bibitem{CsR} CS\"ORG\H O, M. and R\'EV\'ESZ, P.: {\em Strong Approximations in Probability and Statistics}. Academic Press, New York, 1981. \bibitem{DE01} DETTE, H.: First return probabilities of birth and death chains and associated orthogonal polynomials. {\em Proc. Amer. Math. Soc.} {\bf 129} (2001), 1805--1815. \bibitem{DE} DVORETZKY, A. and ERD\H OS, P.: Some problems on random walk in space. {\em Proc. Second Berkeley Symposium on Mathematical Statistics and Probability}, 1950. pp. 353--367. University of California Press, Berkeley and Los Angeles, 1951. \bibitem{EA} ERD\'ELYI, A. MAGNUS, W. OBERHETTIGER, F. and TRICOMI, F.G.: {\em Higher Transcendental Functions.} Vol. 2, McGraw - Hill, New York, 1953. \bibitem{KMG} KARLIN, S. and McGREGOR, J.: Random walks. {\em Illinois J. Math.} {\bf 3} (1959), 66--81. \bibitem{KLS} KHOSHNEVISAN, D., LEWIS, M.L. and SHI, Z.: On a problem of Erd\H os and Taylor. {\em Ann. Probab.} {\bf 24} (1996), 761--787. \bibitem {KN81} KNIGHT, F.B.: {\em Essentials of Brownian Motion and Diffusion}. Am. Math. Soc., Providence, R.I., 1981. \bibitem{LA63} LAMPERTI, J.: A new class of probability limit theorems. {\em J. Math. Mech.} {\bf 11} (1962), 749--772. \bibitem{OP} OREY, S. and PRUITT, W.E.: Sample functions of the $N$-parameter Wiener process. {\em Ann. Probab.} {\bf 1} (1973),\ 138--163. \bibitem{PA} PARDO, J.C.: The upper envelope of positive self-similar Markov processes. {\em arXiv:math.PR/0703071} \bibitem{R05} R\'{E}V\'{E}SZ, P.: {\em Random Walk in Random and Non-Random Environments}. 2nd ed. World Scientific, Singapore, 2005. \bibitem{RY} REVUZ, D. and YOR, M.: {\em Continuous Martingales and Brownian Motion}. 3rd ed. Springer-Verlag, Berlin, 1999. \bibitem{Shi} SHI, Z.: How long does it take a transient Bessel process to reach its future infimum? {\em S\'eminaire de Probabilit\'es,} {\bf XXX}, {\em Lecture Notes in Math.,} {\bf 1626}, Springer, Berlin, 1996, 207--217, \bibitem{WA} WATSON, G.N.: {\em A Treatise on the Theory of Bessel Functions}. 2nd ed. Cambridge University Press, Cambridge, 1944. \bibitem{Yor} YOR, M.: {\em Some Aspects of Brownian Motion. Part I: Some Special Functionals}. Birkh\"auser, Basel, 1992. \end{thebibliography} \end{document}
\begin{document} \baselineskip=22pt \thispagestyle{empty} \begin{center}{\Large\bf The Cauchy problem for the Ostrovsky equation }\\[1ex] {\Large\bf with negative dispersion at the critical regularity}\\[4ex] {Yongsheng LI$^\dag$ \quad\quad Jianhua HUANG$^\ddag$\footnote{Corresponding Author: J.Huang,\quad Email: [email protected]} \quad and \quad Wei YAN$^*$}\\[2ex] {$^\dag$Department of Mathematics, South China University of Technology,}\\ {Guangzhou, Guangdong 510640, P. R. China}\\[2ex] {$^\ddag$ College of Science, National University of Defense and Technology,}\\ { Changsha, P. R. China\quad 410073}\\[2ex] {$^*$College of Mathematics and Information Science, Henan Normal University,}\\ {Xinxiang, Henan 453007, P. R. China} \end{center} \noindent{\bf Abstract.} In this paper, we investigate the Cauchy problem for the Ostrovsky equation \begin{eqnarray*} \partial_{x}\left(u_{t}-\beta \partial_{x}^{3}u +\frac{1}{2}\partial_{x}(u^{2})\right) -\gamma u=0, \end{eqnarray*} in the Sobolev space $H^{-3/4}(\R)$. Here $\beta>0(<0)$ corresponds to the positive (negative) dispersion of the media, respectively. P. Isaza and J. Mej\'{\i}a (J. Diff. Eqns. 230(2006), 601-681; Nonli. Anal. 70(2009), 2306-2316), K. Tsugawa (J. Diff. Eqns. 247(2009), 3163-3180) proved that the problem is locally well-posed in $H^s(\R)$ when $s>-3/4$ and ill-posed when $s<-3/4$. By using some modified Bourgain spaces, we prove that the problem is locally well-posed in $H^{-3/4}(\R)$ with $\beta <0$ and $\gamma>0.$ The new ingredient that we introduce in this paper is Lemmas 2.1-2.6. \noindent {\bf Keywords}: Ostrovsky equation; Cauchy problem; Critical regularity; Dyadic bilinear estimates \noindent {\bf AMS Subject Classification}: 35G25 \noindent {\bf Short Title:} The Cauchy problem for the Ostrovsky Equation {\large\bf 1. Introduction} \setcounter{Theorem}{0} \setcounter{Lemma}{0} \setcounter{section}{1} In this paper, we consider the Ostrovsky equation $$ \partial_{x}\left(u_{t}-\beta \partial_{x}^{3}u +\frac{1}{2}\partial_{x}(u^{2})\right) -\gamma u=0. $$ This equation is a mathematical model of the propagation of weakly nonlinear long waves in a rotating liquid. It was introduced by Ostrovsky in \cite{O} as a model for weakly nonlinear long waves, by taking into account of the Coriolis force, to describe the propagation of surface waves in the ocean in a rotating frame of reference. The parameter $\gamma$ is a positive number and measures the effect of rotation, and the parameter $\beta$ is a nozero real number of both signs and reflects the type of dispersion of the media. When $\beta<0$, the equation has negative dispersion and describes surface and internal waves in the ocean and surface waves in a shallow channel with an uneven bottom. When $\beta>0$, the equation has positive dispersion and describes capillary waves on the surface of liquid or for oblique magneto-acoustic waves (see \cite{Be,GaSt,GiGrSt}). In the absence of rotation (that is, $\gamma = 0$), it becomes the Korteweg-de Vries equation. By changing variables the above Ostrovsky equation can be written in the form \begin{equation} u_{t}-\beta\partial_{x}^{3}u +\frac{1}{2}\partial_{x}(u^{2})- \gamma \partial_{x}^{-1} u =0.\label{1.01} \end{equation} The Ostrovsky equation has many important properties, such as solitary waves or soliton solutions, etc., and it has closed relation to the KdV equation (see \cite{LL,LV,ZL,Tsu}). It draws much attention of physists and mathematician. Many people have investigated the Cauchy problem for (\ref{1.01}), for instance, see \cite{GL, GH,HJ,HJ0, I,LL, IM1,IM2,IM3,IM4,IM5,LM,LV,VL,Z,ZL}. By using the Fourier retriction norm method introduced in \cite{B,Bourgain-GAFA93}, Isaza and Mej\'{\i}a \cite{IM1} proved that (\ref{1.01}) is locally well-posed in $H^{s}(\R)$ with $s>-\frac{3}{4}$ in the negative dispersion case and is locally well-posed in $H^{s}(\R)$ with $s>-\frac{1}{2}$ in the positive dispersion case. Later they showed the ill-posedness in $H^{s}(\R)$ for $s<-\frac{3}{4}$ (\cite{IM3}). Recently, Tsugawa \cite{Tsu} proved the time local well-posedness in some anisotropic Sobolev space $H^{s,a}(\R)$ with $s>-a/2-3/4$ and $0\leq a\leq 1$. The result includes the time local well-posedness in $H^{s}(\R)$ with $s>-3/4$ for both positive and negative dispersion Ostrovsky equation. Thus, $s=-\frac{3}{4}$ is the critical regularity index for (\ref{1.01}) in the both dispersion cases. Tsugawa considered also the weak rotation limit and proved that the solution of the Ostrovsky equation converges to the solution of the KdV equation when the rotation parameter $\gamma$ goes to 0. However, the well-posedness of the Ostrovsky equation in the critical case has been still open. In this paper we study the Cauchy problem of the Ostrovsky equation (\ref{1.01}) with negative dispersion complimented with the initial condition \begin{equation} u(0,x)=u_{0}(x),\quad x\in \R.\label{1.02} \end{equation} Compared with the KdV equation, the structure of the Ostrovsky equation is more complicated. More precisely, the phase function of the KdV equation is the smooth function $\xi^{3}$, while the phase function of the Ostrovsky equation is $\beta\xi^{3}+\frac{\gamma}{\xi}$, which has a singular point $\xi=0$. For the KdV equation, just as done in \cite{BT}, two simple identities $a^{3}+b^{3}-\frac{(a+b)^{3}}{4}=\frac{3}{4}(a+b)(a-b)^{2}$ and $(a+b)^{3}-a^{3}-b^{3}=3ab(a+b)$ are valid to establish some key bilinear estimates, which guarantee the wellposedness in the critical space $H^{s}(\R)$ with $s=-3/4$ (see Guo \cite{G} and Kishimoto \cite{Kis}). Obviously, if $u=u(x,t)$ is the solution to (\ref{1.01}), then $v(x,t)=\beta^{-1} u(x,-\beta^{-1} t)$ is the solution to the following equation \begin{eqnarray} v_{t}+ v_{xxx}+\frac{1}{2}(v^{2})_{x}+\beta^{-1} \gamma\partial_{x}^{-1}v=0\label{1.03}. \end{eqnarray} Without loss of generality, throughout this paper, we can assume that $\beta=-1,\gamma=1$. For the Ostrovsky equation in this paper, the identities that we can utilize are $$ a^{3}-\frac{1}{a}+ b^{3}-\frac{1}{b} -\frac{(a+b)^{3}}{4}+\frac{4}{a+b} = \frac{3}{4}(a+b)(a-b)^{2}\left[1-\frac{4}{3 ab(a+b)^{2}}\right] $$ and $$ (a+b)^{3}-\frac{1}{a+b}- a^{3}+\frac{1}{a}-b^{3}+\frac{1}{b} = 3ab(a+b)+ \frac{a^{2}+ab+b^{2}}{ab(a+b)}. $$ These identities enable us to construct reasonable splitting of the spectral domains so as to establish the crucial bilinear estimates for the local wellposedness of the problem. As in \cite{BT,IK,IKT,T,Kis,G}, we may apply appropriate Besov-type spaces to establish the dyadic bilinear estimates and finally we are able to show that the Cauchy problem for (\ref{1.01}) is locally well-posed in $H^{-3/4}(\R)$ with $\beta <0,\gamma>0.$ We give some notations before stating the main results. Throughout this paper, $0<\epsilon<{10^{-4}}$. $C$ is a positive constant which may vary from line to line. $A\sim B$ means that $|B|\leq |A|\leq 4|B|$. $A\gg B$ means that $|A|\geq 4|B|.$ $\psi(t)$ is a smooth function supported in $[-1,2]$ and equals to $1$ in $[0,1]$. $\mathscr{F}$ denotes the Fourier transformation with respect to both space and time variables and $\mathscr{F}^{-1}$ denotes the inverse transformation of $\mathscr{F}$, while $\mathscr{F}_{x}$ denotes the Fourier transformation with respect to the space variable and $\mathscr{F}^{-1}_{x}$ denotes the inverse transformation of $\mathscr{F}^{-1}_{x}$. Denote \begin{eqnarray*} &&D:=\left\{(\tau,\xi)\in \R^{2}:|\xi|\leq1/8, |\tau|\geq |\xi|^{-3}\right\},\\ &&A_{j}=\left\{(\tau,\xi)\in \R^{2}: 2^{j}\leq \langle\xi\rangle<2^{j+1}\right\},\\ &&B_{k}=\left\{(\tau,\xi)\in \R^{2}: 2^{k}\leq \left\langle\tau-\xi^{3}+\frac{1}{\xi} \right\rangle<2^{k+1}\right\}, \end{eqnarray*} where $j,k$ are nonnegative integers. The restriction $|\xi|\leq \frac{1}{8}$ in the spectral domain $D$ is chosen according to the structure of the phase function $-\xi^{3}+\frac{1}{\xi}$ of the Ostrovsky equation. The Bourgain space $ X^{s, \>b}= \left\{u\in \mathscr{S}^{'}(\R^{2})\, :\, \|u\|_{X^{s,\,b}}<\infty\right\} $ is defined by the norm $$ \|u\|_{X^{s, \>b}} = \left\|\langle\xi\rangle^{s} \left\langle\tau-\xi^{3} + \frac{1}{\xi}\right\rangle^{b}\mathscr{F}u\right\|_{L_{\tau\xi}^{2}(\SR^{2})}. $$ The space $ X^{s,\,b,\> 1} = \left\{u\in \mathscr{S}^{'}(\R^{2})\, :\, \|u\|^{X_{s,\>b,\>1}}<\infty\right\} $ is defined by where \begin{eqnarray*} \|u\|_{X^{s,b,1}} &=& \biggl\|\biggl(\left\|\langle\xi\rangle^{s} \left\langle\tau-\xi^{3}+\frac{1}{\xi}\right\rangle ^{b} \mathscr{F}u \right\|_{L_{\tau\xi}^{2}(A_{j}\cap B_{k})}\biggr)_{j,\>k\geq 0} \biggr\|_{\ell_{j}^{2}(\ell_{k}^{1})} \nonumber\\ &\sim& \biggl[\sum_{j}2^{js}\biggl(\sum_{k}2^{bk}\|\mathscr{F}u\|_{L_{\tau\xi}^{2} (A_{j}\cap B_{k})}\biggr)^{2}\biggr]^{1/2}. \end{eqnarray*} We shall also use the norms $\|u\|_{X}$ and $\|u\|_{Y}$ of the spaces $$ X = \left\{u\in \mathscr{S}^{'}(\R^{2})\, :\, \|u\|_{X}<\infty\right\}, \,$$$$ Y = \left\{u\in \mathscr{S}^{'}(\R^{2})\, :\, \|u\|_{Y}<\infty\right\}, $$ where \begin{eqnarray*} \|u\|_{X} &=& \left\|\mathscr{F}^{-1}[\chi_{D^{c}}\mathscr{F}u]\right\|_{X^{-\frac{3}{4}, \frac{1}{2}, 1}} +\|\mathscr{F}^{-1}[\chi_{ D}\mathscr{F}u]\|_{X^{-\frac{3}{4}, \frac{1}{2}}},\\ \|u\|_{Y} &=& \left\|\langle\xi\rangle^{-3/4}\mathscr{F}u\right\|_{L_{\xi}^{2}L_{\tau}^{1}}, \end{eqnarray*} where $D^{c}=\R^2_{\tau\xi}\setminus D$ is the complementary set of $D$ given above. The spaces $\hat{X}, \hat{X}^{s,\>b,\>1}$ and $\hat{X}^{s,\>b}$ are defined corresponding to the following norms \begin{eqnarray*} \|f\|_{\hat{X}}=\|\mathscr{F}^{-1}f\|_{X},\quad \|f\|_{\hat{X}^{s,b,1}}= \|\mathscr{F}^{-1}f\|_{X^{s,b,1}}, \quad \|f\|_{\hat{X}^{s,b}}=\|\mathscr{F}^{-1}f\|_{X^{s,b}}. \end{eqnarray*} The space $ X_{T}$ is the restriction of $X$ onto the finite time interval $[-T,T]$ and is defined according to the norm \begin{equation} \|u\|_{X_{T}} =\inf \left\{\|v\|_{X}:v\in X, u(t)=v(t) \>\> {\rm for} \> -T\leq t\leq T\right\}.\label{1.04} \end{equation} The main result of this paper is as follow. \begin{Theorem}\label{Thm1} The Cauchy problem (\ref{1.01})(\ref{1.02}) is locally well-posed in $H^{-3/4}(\R)$ with $\beta <0,\gamma >0$. That is, for $u_{0} \in H^{-3/4}(\R)$, there exist a $T>0$ and a solution $u\in C([-T, T]; H^{-3/4}(\R))$, and the solution map $u_0\mapsto u(t)$ is locally Lipschitz continuous from $ H^{-3/4}(\R)$ into $C([-T, T]; H^{-3/4}(\R))$. \end{Theorem} The rest of the paper is arranged as follows. In Section 2, we give some preliminaries. In Section 3, we show two crucial dyadic bilinear estimates and then apply them to establish bilinear estimates. In Section 4, we prove the Theorem 1.1. Finally in Section 5, we give an appendix and show two examples of the bilinear estimates in standard Bourgain spaces. \noindent {\bf Remark:} Local well-posedness of the Cauchy problem for the Ostrovsky equation with positive dispersion at the critical regularity and the global well-posedness of the Cauchy problem for the Ostrovsky equation in $H^{s}(\R)$ with $s\geq -3/4$ has not been established up to now, we will be devoted to the problem later. \noindent{\large\bf 2. Preliminaries } \setcounter{equation}{0} \setcounter{Theorem}{0} \setcounter{Lemma}{0} \setcounter{section}{2} In this section, we make some preparations. These includes the estimates for some convolutions and basic inequality about the the phase functions which are used to get the dyadic bilinear estimates in the Section 3. We also give some elementary estimates for the unitary group corresponding to the Ostrovky equation. \begin{Lemma}\label{Lemma2.1} Assume that $f,g\in \mathscr{S}'(\R^{2})$, $\supp f\subset A_{j_{1}},\, \supp g \subset A_{j_{2}}$ and \begin{eqnarray*} K:=\inf\left\{|\xi_{1}-\xi_{2}|:\exists\> \tau_{1},\tau_{2},\> s.t.\>(\xi_{1},\tau_{1})\in \supp f ,\, (\xi_{2},\tau_{2})\in \supp g \right\}>0, \end{eqnarray*} If \begin{eqnarray} (\xi_{1},\tau_{1})\in \supp f ,\, (\xi_{2},\tau_{2})\in \supp g,\>\xi_{1}\xi_{2}<0\label{2.01} \end{eqnarray} or \begin{eqnarray} (\xi_{1},\tau_{1})\in \supp f ,\, (\xi_{2},\tau_{2})\in \supp g,\>\xi_{1}\xi_{2}\geq 0,\quad \left|1-\frac{4}{3\xi^{2}\xi_{1}\xi_{2}}\right| > \frac{1}{2},\label{2.02} \end{eqnarray} then \begin{eqnarray} &&\||\xi|^{1/4}f\ast g\|_{L^{2}(\SR^{2})}\leq C\|f\|_{\hat{X}^{0,\frac{1}{2},1}} \|g\|_{\hat{X}^{0,\frac{1}{2},1}},\label{2.03}\\ &&\||\xi|^{1/2}f\ast g\|_{L^{2}(\SR^{2})}\leq CK^{-1/2} \|f\|_{\hat{X}^{0,\frac{1}{2},1}}\|g\|_{\hat{X}^{0,\frac{1}{2},1}}.\label{2.04} \end{eqnarray} \end{Lemma} {\bf Proof.} First we prove \begin{eqnarray} &&\left|\int_{\SR^{2}}\int_{\!\!\!\mbox{\scriptsize $ \begin{array}{l} \xi=\xi_{1}+\xi_{2}\\ \tau=\tau_{1}+\tau_{2} \end{array} $}}(\chi_{B_{k_{1}}}f)(\xi_{1},\tau_{1})\> (\chi_{B_{k_{2}}}g)(\xi_{2},\tau_{2})\> |\xi|^{1/4} h(\tau,\xi) d\xi_{1}d\tau_{1}d\xi d\tau \right|\nonumber\\ &&\leq C2^{\frac{k_{1}+k_{2}}{2}}\|f\|_{L_{\xi\tau}^{2}(B_{k_{1}})}\|g\|_{L_{\xi\tau}^{2}(B_{k_{2}})} \|h\|_{L_{\xi\tau}^{2}}.\label{2.05} \end{eqnarray} and \begin{eqnarray} && \quad \left|\int_{\SR^{2}}\int_{\!\!\!\mbox{\scriptsize $ \begin{array}{l} \xi=\xi_{1}+\xi_{2}\\ \tau=\tau_{1}+\tau_{2} \end{array} $}} (\chi_{B_{k_{1}}}f)(\xi_{1},\tau_{1})\> (\chi_{B_{k_{2}}}g)(\xi_{2},\tau_{2})\> |\xi|^{1/2}\, h(\xi,\tau) d\xi_{1}d\tau_{1}d\xi d\tau \right|\nonumber\\ && \leq CK^{-1/2}2^{\frac{k_{1}+k_{2}}{2}}\> \|f\|_{L_{\xi\tau}^{2}(B_{k_{1}})}\> \|g\|_{L_{\xi\tau}^{2}(B_{k_{2}})}\> \|h\|_{L_{\xi\tau}^{2}} \label{2.06} \end{eqnarray} if (\ref{2.01}) or (\ref{2.02}) is valid. By using the Cauchy-Schwartz inequality and the Fubini theorem, we obtain \begin{eqnarray} && \quad \left|\int_{\SR^{2}}\int_{\!\!\!\mbox{\scriptsize $ \begin{array}{l} \xi=\xi_{1}+\xi_{2}\\ \tau=\tau_{1}+\tau_{2} \end{array} $}} (\chi_{B_{k_{1}}}f)(\xi_{1},\tau_{1})\> (\chi_{B_{k_{2}}}g)(\xi_{2},\tau_{2})\> h(\xi,\tau) d\xi_{1}d\tau_{1}d\xi d\tau \right|\nonumber\\ && \leq C\sup\limits_{(\xi,\>\tau)\in \SR^{2}}m_{1}(\xi,\tau)^{1/2}\> \|f\|_{L_{\xi\tau}^{2}(B_{k_{1}})}\> \|g\|_{L_{\xi\tau}^{2}(B_{k_{2}})}\> \|h\|_{L_{\xi\tau}^{2}}, \label{2.07} \end{eqnarray} where \begin{eqnarray*} &\displaystyle m_{1}(\tau,\xi)=\int\chi_{\Lambda_{1}}(\xi_{1},\tau_{1},\xi,\tau)d\xi_{1}d\tau_{1}, &\\ &\displaystyle \Lambda_{1}:=\left\{(\xi_{1},\tau_{1},\xi,\tau)\in \R^{4}\,:\, (\xi_{1},\tau_{1})\in \supp f,\> (\xi_{2},\tau_{2})\in \supp g\right\}, & \end{eqnarray*} in which $\tau=\tau_1+\tau_2,\xi=\xi_1+\xi_2$ and (\ref{2.01}) or (\ref{2.02}) is valid. Thus, the proofs of (\ref{2.05}) and (\ref{2.06}) are reduced to \begin{eqnarray} m_{1}(\tau,\xi)\leq C{\rm min}\left\{|\xi|^{-1/2}2^{k_{1}+k_{2}},K^{-1}|\xi|^{-1} 2^{k_{1}+k_{2}}\right\}.\label{2.08} \end{eqnarray} For fixed $\tau,\xi\neq 0$, let $E_1$ and $E_2$ be the projections of $\Lambda_{1}$ onto the $\xi_1$-axis and $\tau_1$-axis respectively. We show \begin{eqnarray} &&\mes E_1 \le C \min\left\{ |\xi|^{-1/2}(2^{k_{1}/2}+2^{k_{2}/2} ), K^{-1} |\xi|^{-1} (2^{k_{1}}+2^{k_{2}}) \right\}, \label{2.09}\\ &&\mes E_2 \le C \min \left\{2^{k_{1}}, 2^{k_{2}}\right\}, \label{2.010} \end{eqnarray} then (\ref{2.08}) follows. As in the introduction, it is easily checked that \begin{eqnarray} && \tau-\frac{\xi^{3}}{4}+\frac{4}{\xi}-\left(\tau_{1}-\xi_{1}^{3} + \frac{1}{\xi_{1}}\right) -\left(\tau_{2}-\xi_{2}^{3}+ \frac{1}{\xi_{2}}\right)\nonumber\\&& = \frac{3}{4}\xi(\xi_{1}-\xi_{2})^{2}\left[1-\frac{4}{3\xi^{2}\xi_{1}\xi_{2}}\right]. \label{2.011} \end{eqnarray} From (\ref{2.011}), we have that \begin{equation} \max \Biggl\{\frac{4|M-C(2^{k_{1}}+2^{k_{2}})|} {3|\xi|\left|1-\frac{4}{3\xi^{2}\xi_{1}\xi_{2}}\right|}, K^{2}\Biggr\} \leq |2\xi_{1}-\xi|^{2} \leq \frac{4|M+C(2^{k_{1}}+2^{k_{2}})|} {3|\xi|\left|1-\frac{4}{3\xi^{2}\xi_{1}\xi_{2}}\right|}, \label{2.012} \end{equation} where $ M=\tau-\frac{\xi^{3}}{4}+\frac{4}{\xi} $ and $C$ is some generic positive constant. \noindent Case (\ref{2.01}) holds: in this case, $\xi_{1}\xi_{2}<0$. \noindent When $K\geq \Biggl\{ \dfrac{4|M-C(2^{k_{1}}+2^{k_{2}})|} {3|\xi|\Bigl||1-\dfrac{4}{3\xi^{2}\xi_{1}\xi_{2}}\Bigr|} \Biggr\}^{1/2}$, the length of the interval that $|2\xi_{1}-\xi|$ lies in is bounded by \begin{eqnarray} && \Biggl\{ \frac{4|M+C(2^{k_{1}}+2^{k_{2}})|} {3|\xi|\left|1-\frac{4}{3\xi^{2}\xi_{1}\xi_{2}}\right|} \Biggr\}^{1/2}-K\nonumber\\ && \qquad = \displaystyle \dfrac{\dfrac{4|M+C(2^{k_{1}}+2^{k_{2}})|} {3|\xi|\left|1-\frac{4}{3\xi^{2}\xi_{1}\xi_{2}}\right|} -K^{2}} {\Biggl\{\dfrac{4|M+C(2^{k_{1}}+2^{k_{2}})|} {3|\xi|\left|1-\frac{4}{3\xi^{2}\xi_{1}\xi_{2}}\right|} \Biggr\}^{1/2} +K}\nonumber\\ &&\qquad \leq \frac{ \dfrac{4|M+C(2^{k_{1}}+2^{k_{2}})|} {3|\xi|\left|1-\frac{4}{3\xi^{2}\xi_{1}\xi_{2}}\right|} - \dfrac{4|M-C(2^{k_{1}}+2^{k_{2}})|} {3|\xi|\left|1-\frac{4}{3\xi^{2}\xi_{1}\xi_{2}}\right|} } {\Biggl\{\dfrac{4|M+C(2^{k_{1}}+2^{k_{2}})|} {3|\xi|\left|1-\frac{4}{3\xi^{2}\xi_{1}\xi_{2}}\right|} \Biggr\}^{1/2}+K}\nonumber\\ &&\qquad\leq \dfrac{C(2^{k_{1}}+2^{k_{2}})} {|\xi|K\left|1-\frac{4}{3\xi^{2}\xi_{1}\xi_{2}}\right|} \leq \dfrac{C(2^{k_{1}}+2^{k_{2}})}{|\xi|K}. \label{2.013} \end{eqnarray} From the first {\it inequality} of the above, such length of the interval of $|2\xi_{1}-\xi|$ is also bounded by \begin{eqnarray} \frac{C(2^{k_{1}/2}+2^{k_{2}/2})}{|\xi|^{1/2}\left|1-\frac{4} {3\xi^{2}\xi_{1}\xi_{2}}\right|^{1/2}}\leq \frac{C(2^{k_{1}/2}+2^{k_{2}/2})}{|\xi|^{1/2}}.\label{2.014} \end{eqnarray} By (\ref{2.013}) and (\ref{2.014}), we obtain that the measure of $E_1$ in this part is bounded by \begin{equation} C \min\left\{ |\xi|^{-1/2}(2^{k_{1}/2}+2^{k_{2}/2} ), K^{-1} |\xi|^{-1} (2^{k_{1}}+2^{k_{2}}) \right\}. \label{2.015} \end{equation} When $K\leq \Biggl\{ \dfrac{4|M-C(2^{k_{1}}+2^{k_{2}})|} {3|\xi|\left|1-\frac{4}{3\xi^{2}\xi_{1}\xi_{2}}\right|} \Biggr\}^{1/2}$, the length of the interval of $|2\xi_{1}-\xi|$ is bounded by \begin{eqnarray*} &&\Biggl\{ \frac{4|M+C(2^{k_{1}}+2^{k_{2}})|} {3|\xi|\left|1-\frac{4}{3\xi^{2}\xi_{1}\xi_{2}}\right|} \Biggr\}^{1/2} - \Biggl\{ \frac{4|M-C(2^{k_{1}}+2^{k_{2}})|} {3|\xi|\left|1-\frac{4}{3\xi^{2}\xi_{1}\xi_{2}}\right|} \Biggr\}^{1/2},\nonumber\\&&\leq C \frac{ \dfrac{4|M+C(2^{k_{1}}+2^{k_{2}})|} {3|\xi|\left|1-\frac{4}{3\xi^{2}\xi_{1}\xi_{2}}\right|} - \dfrac{4|M-C(2^{k_{1}}+2^{k_{2}})|} {3|\xi|\left|1-\frac{4}{3\xi^{2}\xi_{1}\xi_{2}}\right|} } {\Biggl\{\dfrac{4|M+C(2^{k_{1}}+2^{k_{2}})|} {3|\xi|\left|1-\frac{4}{3\xi^{2}\xi_{1}\xi_{2}}\right|} \Biggr\}^{1/2}+K},\nonumber\\ \end{eqnarray*} similar to (\ref{2.013}) and (\ref{2.014}), the measure of $E_1$ in this part is bounded by (\ref{2.015}). \noindent Case (\ref{2.02}) holds: in this case, $\xi_{1}\xi_{2}\geq0$ and $\left|1-\frac{4} {3\xi^{2}\xi_{1}\xi_{2}}\right| > \frac{1}{2}$, it can be proved similarly that the measure of $E_1$ in this part is also bounded by (\ref{2.015}). Recall that $(\xi_1,\tau_1)\in B_{k_1}$ and $(\xi_2,\tau_2)\in B_{k_2}$, \begin{eqnarray} \left|\tau_{1}-\xi_{1}^{3}+\frac{1}{\xi_{1}}\right| \leq C2^{k_{1}}, \quad\left|\tau-\tau_{1}-(\xi-\xi_{1})^{3}+\frac{1}{\xi-\xi_{1}}\right| \leq C2^{k_{2}},\label{2.016} \end{eqnarray} thus we get (\ref{2.010}). Consequently, we have (\ref{2.06}). By using the Cauchy-Schwartz inequality and the triangle inequality, we have that \begin{eqnarray} &&\hspace{-1cm}\left|\int_{\SR^{2}}\int_{\!\!\!\mbox{\scriptsize $ \begin{array}{l} \xi=\xi_{1}+\xi_{2}\\ \tau=\tau_{1}+\tau_{2} \end{array} $}}f(\xi_{1},\tau_{1})g(\xi_{2},\tau_{2})h(\xi,\tau)d\xi_{1}d\tau_{1} d\xi d\tau \right|\nonumber\\ &&\hspace{-1cm}\leq C\sum_{k_{1}}\sum_{k_{2}}\left|\int_{\SR^{2}} \int_{\!\!\!\mbox{\scriptsize $ \begin{array}{l} \xi=\xi_{1}+\xi_{2}\\ \tau=\tau_{1}+\tau_{2} \end{array} $}}(\chi_{B_{k_{1}}}f)(\xi_{1},\tau_{1})(\chi_{B_{k_{2}}}g)(\xi_{2},\tau_{2}) h(\xi,\tau)d\xi_{1}d\tau_{1}d\xi d\tau \right|.\label{2.017} \end{eqnarray} Combining (\ref{2.07}), (\ref{2.08}) with (\ref{2.017}), we have (\ref{2.03})-(\ref{2.04}). We have completed the proof of Lemma 2.1. \begin{Lemma}\label{Lemma2.2} Assume that $f,g\in \mathscr{S}'(\R)$, $\supp f\subset A_{j_{1}},\, \supp g \subset A_{j_{2}}$.If \begin{eqnarray} K_{1}:=\inf\left\{|\xi_{1}-\xi_{2}|:\exists\> \tau_{1},\tau_{2},\> s.t.\>(\xi_{1},\tau_{1})\in \supp f ,\, (\xi_{2},\tau_{2})\in \supp g\right\}\geq2,\label{2.018} \end{eqnarray} and \begin{eqnarray} (\xi_{1},\tau_{1})\in \supp f,\, (\xi_{2},\tau_{2})\in \supp g,\>\xi_{1}\xi_{2}\geq 0,\quad\left|1-\frac{4}{3\xi^{2}\xi_{1}\xi_{2}}\right| \leq \frac{1}{2},\label{2.019} \end{eqnarray} then \begin{eqnarray} &&\||\xi|^{1/4}f\ast g\|_{L^{2}(\SR^{2})} \leq C\|f\|_{\hat{X}^{0,\frac{1}{2},1}}\|g\|_{\hat{X}^{0,\frac{1}{2},1}},\label{2.020}\\ && \||\xi|^{1/2}f\ast g\|_{L^{2}(\SR^{2})}\leq CK_{1}^{-1/2} \|f\|_{\hat{X}^{0,\frac{1}{2},1}}\|g\|_{\hat{X}^{0,\frac{1}{2},1}}.\label{2.021} \end{eqnarray} \end{Lemma} {\bf Proof.} First we prove \begin{eqnarray} &&\left|\int_{\SR^{2}}\int_{\!\!\!\mbox{\scriptsize $ \begin{array}{l} \xi=\xi_{1}+\xi_{2}\\ \tau=\tau_{1}+\tau_{2} \end{array} $}}(\chi_{B_{k_{1}}}f)(\xi_{1},\tau_{1})\> (\chi_{B_{k_{2}}}g)(\xi_{2},\tau_{2})\> |\xi|^{1/4} h(\xi,\tau) d\xi_{1}d\tau_{1}d\xi d\tau \right|\nonumber\\ &&\leq C2^{\frac{k_{1}+k_{2}}{2}}\|f\|_{L_{\xi\tau}^{2}(B_{k_{1}})}\|g\|_{L_{\xi\tau}^{2}(B_{k_{2}})} \|h\|_{L_{\xi\tau}^{2}}.\label{2.022} \end{eqnarray} and \begin{eqnarray} && \quad \left|\int_{\SR^{2}}\int_{\!\!\!\mbox{\scriptsize $ \begin{array}{l} \xi=\xi_{1}+\xi_{2}\\ \tau=\tau_{1}+\tau_{2} \end{array} $}} (\chi_{B_{k_{1}}}f)(\xi_{1},\tau_{1})\> (\chi_{B_{k_{2}}}g)(\xi_{2},\tau_{2})\> |\xi|^{1/2}\, h(\xi,\tau) d\xi_{1}d\tau_{1}d\xi d\tau \right|\nonumber\\ && \leq CK_{1}^{-1/2}\> 2^{\frac{k_{1}+k_{2}}{2}}\|f\|_{L_{\xi\tau}^{2}(B_{k_{1}})}\> \|g\|_{L_{\xi\tau}^{2}(B_{k_{2}})}\> \|h\|_{L_{\xi\tau}^{2}} \label{2.023} \end{eqnarray} if (\ref{2.019}) is valid. By using the Cauchy-Schwartz inequality and the Fubini theorem, we obtain \begin{eqnarray} && \quad \left|\int_{\SR^{2}}\int_{\!\!\!\mbox{\scriptsize $ \begin{array}{l} \xi=\xi_{1}+\xi_{2}\\ \tau=\tau_{1}+\tau_{2} \end{array} $}} (\chi_{B_{k_{1}}}f)(\xi_{1},\tau_{1})\> (\chi_{B_{k_{2}}}g)(\xi_{2},\tau_{2})\> h(\xi,\tau) d\xi_{1}d\tau_{1}d\xi d\tau \right|\nonumber\\ && \leq C\sup\limits_{(\xi,\tau\>)\in \SR^{2}}m_{2}(\xi,\tau)^{1/2}\> \|f\|_{L_{\xi\tau}^{2}(B_{k_{1}})}\> \|g\|_{L_{\xi\tau}^{2}(B_{k_{2}})}\> \|h\|_{L_{\xi\tau}^{2}}, \label{2.024} \end{eqnarray} where \begin{eqnarray*} &\displaystyle m_{2}(\xi,\tau)=\int\chi_{\Lambda_{2}}(\xi_{1},\tau_{1},\xi,\tau)d\xi_{1} d\tau_{1}, &\\ &\displaystyle \Lambda_{2}:=\left\{(\xi_{1},\tau_{1},\xi,\tau)\in \R^{4}\,:\, (\xi_{1},\tau_{1})\in \supp f,\> (\xi_{2},\tau_{2})\in \supp g\right\}, & \end{eqnarray*} in which $\tau=\tau_1+\tau_2,\xi=\xi_1+\xi_2$ and (\ref{2.019}) is valid. Thus, the proofs of (\ref{2.022}) and (\ref{2.023}) are reduced to \begin{eqnarray} m_{2}(\tau,\xi)\leq C{\rm min}\left\{|\xi|^{-1/2}2^{k_{1}+k_{2}},K_{1}^{-1}|\xi|^{-1} 2^{k_{1}+k_{2}}\right\}.\label{2.025} \end{eqnarray} For fixed $\tau,\xi\neq 0$, let $E_3$ and $E_4$ be the projections of $\Lambda_{2}$ onto the $\xi_1$-axis and $\tau_1$-axis respectively. We show \begin{eqnarray} &&\mes E_3 \le C \min\left\{ |\xi|^{-1/2}(2^{k_{1}/2}+2^{k_{2}/2} ), K_{1}^{-1} |\xi|^{-1} (2^{k_{1}}+2^{k_{2}}) \right\}, \label{2.026}\\ &&\mes E_4 \leq C \min \left\{2^{k_{1}}, 2^{k_{2}}\right\}, \label{2.027} \end{eqnarray} then (\ref{2.025}) follows. Since $\xi_{1}\xi_{2}\geq0$ and $\left|1-\frac{4}{3 \xi^{2}\xi_{1}\xi_{2}}\right|\leq \frac{1}{2}$, we have \begin{eqnarray*} \frac{1}{2}\leq \frac{4}{3\xi^{2}\xi_{1}\xi_{2}}\leq\frac{3}{2}. \end{eqnarray*} The above inequality is equivalent to \begin{eqnarray} \frac{8}{9\xi^{2}}\leq \xi_{1}(\xi-\xi_{1})\leq \frac{8}{3\xi^{2}},\label{2.028} \end{eqnarray} from which we have \begin{eqnarray} \frac12 \left({\xi-\sqrt{\xi^{2}-\frac{32}{9\xi^{2}}}}\,\right) \leq \xi_{1} \leq \frac12\left({\xi-\sqrt{\xi^{2}-\frac{32}{3\xi^{2}}}}\,\right) \label{2.029} \end{eqnarray} or \begin{eqnarray} \frac12\left({\xi+\sqrt{\xi^{2}-\frac{32}{3\xi^{2}}}}\,\right) \leq \xi_{1} \leq \frac12\left({\xi+\sqrt{\xi^{2}-\frac{32}{9\xi^{2}}}}\,\right) \label{2.030}. \end{eqnarray} From (\ref{2.029}) and (\ref{2.030}), we see that the measure of $E_1$ in this part is bounded by \begin{eqnarray} C|\xi|^{-2}\leq C|\xi|^{-1}K_{1}^{-1}\leq C{\rm min}\left\{ \frac{(2^{k_{1}}+2^{k_{2}})} {|\xi|K_{1}},\frac{(2^{k_{1}/2}+2^{k_{2}/2})}{|\xi|^{1/2}}\right\}\label{2.031} \end{eqnarray} since $2\leq K_{1}\leq |\xi_{1}-\xi_{2}|\leq |\xi_{1}+\xi_{2}|=|\xi|.$ Thus, we have (\ref{2.026}). Since $(\xi_1,\tau_1)\in B_{k_1}$ and $(\xi_2,\tau_2)\in B_{k_2}$, \begin{eqnarray} \left|\tau_{1}-\xi_{1}^{3}+\frac{1}{\xi_{1}}\right| \leq C2^{k_{1}}, \quad\left|\tau-\tau_{1}-(\xi-\xi_{1})^{3}+\frac{1}{\xi-\xi_{1}}\right| \leq C2^{k_{2}},\label{2.032} \end{eqnarray} we get (\ref{2.027}). Consequently, we have (\ref{2.025}). By using the Cauchy-Schwartz inequality and the triangle inequality, we have that \begin{eqnarray} &&\hspace{-1cm}\left|\int_{\SR^{2}}\int_{\!\!\!\mbox{\scriptsize $ \begin{array}{l} \xi=\xi_{1}+\xi_{2}\\ \tau=\tau_{1}+\tau_{2} \end{array} $}}f(\xi_{1},\tau_{1})g(\xi_{2},\tau_{2})h(\xi,\tau)d\xi_{1}d\tau_{1} d\xi d\tau \right|\nonumber\\ &&\hspace{-1cm}\leq C\sum_{k_{1}}\sum_{k_{2}}\left|\int_{\SR^{2}} \int_{\!\!\!\mbox{\scriptsize $ \begin{array}{l} \xi=\xi_{1}+\xi_{2}\\ \tau=\tau_{1}+\tau_{2} \end{array} $}}(\chi_{B_{k_{1}}}f)(\xi_{1},\tau_{1})(\chi_{B_{k_{2}}}g)(\xi_{2},\tau_{2}) h(\xi,\tau)d\xi_{1}d\tau_{1}d\xi d\tau \right|.\label{2.033} \end{eqnarray} Combining (\ref{2.022}), (\ref{2.023}) with (\ref{2.033}), we have (\ref{2.020})-(\ref{2.021}). We have completed the proof of Lemma 2.2. \noindent {\bf Remark 1:} From the proof process of (\ref{2.021}), to obtain (\ref{2.021}), it is sufficient to require that $K_{1}>0.$ \begin{Lemma}\label{Lemma2.3} Assume that $f,g\in \mathscr{S}'(\R^{2})$, $\supp f\subset A_{j_{1}},\, \supp g \subset A_{j_{2}}$. \begin{eqnarray*} K_{2}:=\inf\left\{|\xi_{1}-\xi_{2}|:\exists\> \tau_{1},\tau_{2},\> s.t.\>(\xi_{1},\tau_{1})\in \supp f ,\, (\xi_{2},\tau_{2})\in \supp g\right\}>0, \end{eqnarray*} then \begin{eqnarray*} \||\xi|^{1/2}f\ast g\|_{L^{2}(\SR^{2})}\leq CK_{2}^{-1/2} \|f\|_{\hat{X}^{0,\frac{1}{2},1}}\|g\|_{\hat{X}^{0,\frac{1}{2},1}}. \end{eqnarray*} \end{Lemma} {\bf Proof.} Combining Lemma 2.1 with Lemma 2.2 and Remark 1, we have that Lemma 2.3. \begin{Lemma}\label{Lemma2.4} Assume that $f\in \mathscr{S}^{'}(\R^{2})$ , $g\in \mathscr{S}(\R^{2})$ with $\mathop{\rm supp} f\subset A_{j}$ for some $j\geq0$ and $\,\Omega\subset \R^{2}$ has positive measure. Let \begin{eqnarray*} K_{3}:=\inf\left\{|\xi_{1}+\xi|:\exists\, \tau,\tau_{1}\> s.t.\> (\xi,\tau)\in \Omega,(\xi_{1},\tau_{1})\in \supp f \right\}>0. \end{eqnarray*} If \begin{eqnarray} (\xi,\tau)\in \Omega,(\xi_{1},\tau_{1})\in \supp f,\>\xi\xi_{1}>0\label{2.034} \end{eqnarray} or \begin{eqnarray} (\xi,\tau)\in \Omega ,\, (\xi_{1},\tau_{1})\in \supp f,\>\xi\xi_{1}\geq 0,\quad \left|1+\frac{4}{3\xi\xi_{1}\xi_{2}^{2}}\right| > \frac{1}{2}.\label{2.035} \end{eqnarray} Then, for any $k\geq0$, we have \begin{eqnarray} &&\|f\ast g\|_{L^{2}(B_{k})} \leq C2^{k/4}\|f\|_{\hat{X}^{0,\frac{1}{2},1}}\||\xi|^{-1/4}g\|_{L^{2}(\SR^{2})} \label{2.036},\\ &&\|f\ast g\|_{L^{2}(\Omega\cap B_{k})} \leq C2^{k/2}K_{3}^{-1/2} \|f\|_{\hat{X}_{0,\frac{1}{2},1}}\> \|\,|\xi|^{-1/2}g\|_{L^{2}(\SR^{2})}\> . \label{2.037} \end{eqnarray} \end{Lemma} {\bf Proof.} First we prove \begin{eqnarray} &&\quad \left|\int_{\SR^{2}}\int_{\!\!\!\mbox{\scriptsize $ \begin{array}{l} \xi=\xi_{1}+\xi_{2}\\ \tau=\tau_{1}+\tau_{2} \end{array} $}} (\chi_{B_{k_{1}}}f(\xi_{1},\tau_{1}))g(\xi_{2},\tau_{2}) h(\xi,\tau)d\xi_{1}d\tau_{1}d\xi d\tau \right|\nonumber\\ && \leq C2^{k_{1}/2}2^{k/4}\|f\|_{L_{\xi\tau}^{2}(B_{k_{1}})}\,\|\,|\xi|^{-1/4}g\|_{L_{\xi\tau}^{2}} \|h\|_{L_{\xi\tau}^{2}}\label{2.038} \end{eqnarray} for any $h\in L^{2}(\R^2)$ with $\mathop{\rm supp}h\subset B_{k}$ and \begin{eqnarray} &&\quad \left|\int_{\SR^{2}}\int_{\!\!\!\mbox{\scriptsize $ \begin{array}{l} \xi=\xi_{1}+\xi_{2}\\ \tau=\tau_{1}+\tau_{2} \end{array} $}}(\chi_{B_{k_{1}}}f(\xi_{1},\tau_{1}))g(\xi_{2},\tau_{2})h(\xi,\tau) d\xi_{1}d\tau_{1}d\xi d\tau \right|\nonumber\\ &&\leq CK_{3}^{-1/2}2^{\frac{k+k_{1}}{2}}\|f\|_{L_{\xi\tau}^{2}(B_{k_{1}})}\, \|\,|\xi|^{-1/2}g\|_{L_{\xi\tau}^{2}} \|h\|_{L_{\xi\tau}^{2}}\label{2.039} \end{eqnarray} for any $h\in L^{2}(\R^2)$ with $\mathop{\rm supp}h\subset B_{k}\cap \Omega.$ By using the Cauchy-Schwartz inequality and the Fubini theorem, we obtain \begin{eqnarray} &&\quad \left|\int_{\SR^{2}}\int_{\!\!\!\mbox{\scriptsize $ \begin{array}{l} \xi=\xi_{1}+\xi_{2}\\ \tau=\tau_{1}+\tau_{2} \end{array} $}}(\chi_{B_{k_{1}}}f(\xi_{1},\tau_{1}))g(\xi_{2},\tau_{2})h(\xi,\tau) d\xi_{1}d\tau_{1}d\xi d\tau \right| \nonumber\\ && \leq C\sup\limits_{(\xi_{2}, \tau_{2})\in \SR^{2}}m_{3}(\xi_{2},\tau_{2})^{1/2} \|f\|_{L_{\xi\tau}^{2}}\|g\|_{L_{\xi\tau}^{2}} \|h\|_{L_{\xi\tau}^{2}},\label{2.040} \end{eqnarray} where \begin{eqnarray*} &\displaystyle m_{3}(\xi_{2},\tau_{2})=\int\chi_{\Lambda_{2}}(\xi_{2},\tau_{2},\xi,\tau)d\xi d\tau, &\\ &\displaystyle \Lambda_{3}:=\left\{(\xi_{2},\tau_{2},\xi,\tau)\in \R^{4}\, :\, (\xi_{1},\tau_{1})\in \supp f,\quad (\xi,\tau)\in \supp h\right\}, & \end{eqnarray*} in which $\tau=\tau_1+\tau_2,\xi=\xi_1+\xi_2$. Hence, the proofs of (\ref{2.038}) and (\ref{2.039}) are reduced to \begin{eqnarray} m_{3}(\xi_{2},\tau_{2})\leq C{\rm min}\left\{|\xi_{2}|^{-1/2} 2^{k/2+k_{1}},K_3^{-1}|\xi_{2}|^{-1}2^{k+k_{1}}\right\}.\label{2.041} \end{eqnarray} For fixed $\tau,\xi\neq 0$, $(\xi,\tau)\in B_{k}$, we let $F_1$ and $F_2$ be the projections of $\Lambda_3$ onto the the $\xi$-axis and $\tau$-axis respectively. We shall show \begin{eqnarray} &&\mes F_1\le C\min \left\{|\xi_{2}|^{-1/2}(2^{k/2}+2^{k_{1}}), K_{3}^{-1}|\xi_{2}|^{-1}(2^{k}+2^{k_{1}}) \right\}, \label{2.042}\\ &&\mes F_2\le C\min \left\{2^{k},2^{k_{1}} \right\}, \label{2.043} \end{eqnarray} then (\ref{2.041}) follows. Similar to (\ref{2.011}), we have \begin{eqnarray} &&\tau_{2}-\frac{\xi_{2}^{3}}{4}+\frac{4}{\xi_{2}} -\left(\tau-\xi^{3}+\frac{1}{\xi}\right) +\left(\tau_{1}-\xi_{1}^{3}+\frac{1}{\xi_{1}}\right) \nonumber\\&&= \frac{3}{4}\xi_{2}(2\xi-\xi_{2})^{2}\left[1+\frac{4} {3\xi\xi_{1}\xi_{2}^{2}}\right].\quad \label{2.044} \end{eqnarray} From (\ref{2.044}), we get \begin{eqnarray} {\max}\Biggl\{ \frac{4|M_{1}-C(2^{k}+2^{k_{1}})|} {3|\xi_{2}|\left|1+\frac{4}{3\xi\xi_{1}\xi_{2}^{2}} \right|},\> K_{3}^{2}\Biggr\} \leq |2\xi-\xi_{1}|^{2} \leq \frac{4|M_{1}+C(2^{k}+2^{k_{1}})|} {3|\xi_{2}|\left|1+\frac{4}{3\xi\xi_{1}\xi_{2}^{2}}\right|}, \label{2.045} \end{eqnarray} where $ M_{1}=\tau_{2}-\frac{\xi_{2}^{3}}{4}+\frac{4}{\xi_{2}}.$ \noindent When (\ref{2.034}) holds: in this case $\xi\xi_{1}>0$, thus, we have that \begin{eqnarray*} \left|1+\frac{4}{3\xi\xi_{1}\xi_{2}^{2}}\right|>\frac{1}{2}. \end{eqnarray*} We consider \begin{eqnarray*} &&K_{3}\geq \Biggl\{ \dfrac{4|M_{1}-C(2^{k}+2^{k_{1}})|} {3|\xi_{2}|\left|1+\frac{4}{3\xi\xi_{1}\xi_{2}^{2}}\right|} \Biggr\}^{1/2},\\ &&K_{3}\leq \Biggl\{\dfrac{4|M_{1}-C(2^{k}+2^{k_{1}})|} {3|\xi_{2}|\left|1+\frac{4}{3\xi\xi_{1}\xi_{2}^{2}}\right|}\Biggr\}^{1/2}. \end{eqnarray*} When $K_{3}\geq \Biggl\{ \dfrac{4|M_{1}-C(2^{k}+2^{k_{1}})|} {3|\xi_{2}|\left|1+\frac{4}{3\xi\xi_{1}\xi_{2}^{2}}\right|} \Biggr\}^{1/2}$, from (\ref{2.045}), we have that the length of the interval that $|2\xi-\xi_{1}|$ lies in is bounded by \begin{eqnarray} && \Biggl\{\frac{4|M_{1}+C(2^{k}+2^{k_{1}})|} {3|\xi_{2}|\left|1+\frac{4}{3\xi\xi_{1}\xi_{2}^{2}}\right|} \Biggr\}^{1/2}-K_{3}\nonumber\\ &&\qquad = \dfrac{\dfrac{4|M_{1}+C (2^{k}+2^{k_{1}})|} {3|\xi_{2}|\left|1+\frac{4}{3\xi\xi_{1}\xi_{2}^{2}}\right|} -K_{3}^{2}} {\Biggl\{\dfrac{4|M_{1}+C(2^{k}+2^{k_{1}})|} {3|\xi_{2}|\left|1+\frac{4}{3\xi\xi_{1}\xi_{2}^{2}}\right|} \Biggr\}^{1/2} + K_{3}}\nonumber\\ &&\qquad \leq \frac { \dfrac{4|M_{1}+C(2^{k}+2^{k_{1}})|} {3|\xi_{2}|\left|1+\frac{4}{3\xi\xi_{1}\xi_{2}^{2}}\right|} - \dfrac{4|M_{1}-C(2^{k}+2^{k_{1}})|} {3|\xi_{2}|\left|1+\frac{4}{3\xi\xi_{1}\xi_{2}^{2}}\right|} } { \Biggl\{ \dfrac{4|M_{1}+C(2^{k}+2^{k_{1}})|} {3|\xi_{2}|\left|1+\frac{4}{3\xi\xi_{1}\xi_{2}^{2}}\right|} \Biggr\}^{1/2} +K_{3} } \nonumber\\ &&\qquad \leq \dfrac{C(2^{k}+2^{k_{1}})} {|\xi_{2}|K_{3}\left|1+\frac{4}{3\xi\xi_{1}\xi_{2}^{2}}\right|} \leq \frac{C(2^{k}+2^{k_{1}})}{|\xi_{2}|K_{3}}. \label{2.046} \end{eqnarray} Moreover, from the first {\it inequality} of the above, such length of the interval of $|2\xi-\xi_1|$ is also bounded by \begin{equation} \frac{C(2^{k/2}+2^{k_{1}/2})} {|\xi_{2}|^{1/2}\left|1+\frac{4}{3\xi\xi_{1}\xi_{2}^{2}} \right|^{1/2}} \leq \frac{C(2^{k/2}+2^{k_{1}/2})}{|\xi_{2}|^{1/2}}. \label{2.047} \end{equation} From (\ref{2.046}) and (\ref{2.047}), we infer that the measure of $F_1$ in this part is bounded by \begin{equation} C\min \left\{|\xi_{2}|^{-1/2}(2^{k}+2^{k_{1}/2}), K_{3}^{-1}|\xi_{2}|^{-1}(2^{k}+2^{k_{1}}) \right\}. \label{2.048} \end{equation} When $K_{3}\leq \Biggl\{\dfrac{4|M_{1}-C(2^{k}+2^{k_{1}})|} {3|\xi_{2}|\left|1+\frac{4}{3\xi\xi_{1}\xi_{2}^{2}}\right|}\Biggr\}^{1/2}$, the length of the interval of $|2\xi-\xi_{2}|$ is bounded by \begin{eqnarray*} &&\Biggl\{\frac{4|M_{1}+C(2^{k}+2^{k_{1}})|} {3|\xi_{2}|\left|1+\frac{4}{3\xi\xi_{1}\xi_{2}^{2}}\right|} \Biggr\} ^{1/2} - \Biggl\{ \frac{4|M_{1}-C(2^{k}+2^{k_{1}})|} {3|\xi_{2}|\left|1+\frac{4}{3\xi\xi_{1}\xi_{2}^{2}} \right|} \Biggr\} ^{1/2}\nonumber\\&&\leq C\frac { \dfrac{4|M_{1}+C(2^{k}+2^{k_{1}})|} {3|\xi_{2}|\left|1+\frac{4}{3\xi\xi_{1}\xi_{2}^{2}}\right|} - \dfrac{4|M_{1}-C(2^{k}+2^{k_{1}})|} {3|\xi_{2}|\left|1+\frac{4}{3\xi\xi_{1}\xi_{2}^{2}}\right|} } { \Biggl\{ \dfrac{4|M_{1}+C(2^{k}+2^{k_{1}})|} {3|\xi_{2}|\left|1+\frac{4}{3\xi\xi_{1}\xi_{2}^{2}}\right|} \Biggr\}^{1/2} +K_{3}}. \end{eqnarray*} Similar to (\ref{2.046}) and (\ref{2.047}), the measure of $F_1$ in this part is also bounded by (\ref{2.048}). \noindent When (\ref{2.035}) holds: $\xi\xi_{1}\leq0$ and $\left|1+\frac{4}{3\xi\xi_{1} \xi_{2}^{2}}\right| > \frac{1}{2}$, it can be proved similarly that the measure of $F_1$ in this part is also bounded by (\ref{2.048}). Recall that $(\xi,\tau)\in B_{k}$, $(\xi_1,\tau_1)\in B_{k_1}$, \begin{eqnarray} \left|\tau-\xi^{3}+ \frac{1}{\xi}\right|\leq C2^{k}, \quad\left|\tau_{1}-\xi_{1}^{3}+ \frac{1}{\xi_{1}}\right| \leq C2^{k_{1}},\label{2.049} \end{eqnarray} we get the estimate (\ref{2.043}) for $F_2$. Consequently, we have (\ref{2.041}). By using the Cauchy-Schwartz inequality and the triangle inequality, we have that \begin{eqnarray} &&\hspace{-1cm}\left|\int_{\SR^{2}}\int_{\!\!\!\mbox{\scriptsize $ \begin{array}{l} \xi=\xi_{1}+\xi_{2}\\ \tau=\tau_{1}+\tau_{2} \end{array} $}}f(\xi_{1},\tau_{1})g(\xi_{2},\tau_{2})h(\xi,\tau) d\xi_{1}d\tau_{1}d\xi d\tau \right|\nonumber\\ &&\hspace{-1cm}\leq C\sum_{k_{1}}\left|\int_{\SR^{2}} \int_{\!\!\!\mbox{\scriptsize $ \begin{array}{l} \xi=\xi_{1}+\xi_{2}\\ \tau=\tau_{1}+\tau_{2} \end{array} $}}(\chi_{B_{k_{1}}}f(\xi_{1},\tau_{1}))g(\xi_{2},\tau_{2}) h(\xi,\tau)d\xi_{1}d\tau_{1}d\xi d\tau \right|.\label{2.050} \end{eqnarray} Combining (\ref{2.038}), (\ref{2.039}) with (\ref{2.050}), we have (\ref{2.036}) and (\ref{2.037}). We have completed the proof of Lemma \ref{Lemma2.4}. \begin{Lemma}\label{Lemma2.5} Assume that $f\in \mathscr{S}^{'}(\R^{2})$ , $g\in \mathscr{S}(\R^{2})$ with $\mathop{\rm supp} f\subset A_{j}$ for some $j\geq0$ and $\,\Omega\subset \R^{2}$ has positive measure. Let \begin{eqnarray*} K_{4}:=\inf\left\{|\xi_{1}+\xi|:\exists\, \tau,\tau_{1}\> s.t.\> (\xi,\tau)\in \Omega,(\xi_{1},\tau_{1})\in \supp f \right\}\geq 2. \end{eqnarray*} If \begin{eqnarray} (\xi,\tau)\in \Omega ,\, (\xi_{1},\tau_{1})\in \supp f,\>\xi\xi_{1}\leq 0,\quad \left|1+\frac{4}{3\xi\xi_{1}\xi_{2}^{2}}\right| \leq\frac{1}{2},\label{2.051} \end{eqnarray} then, for any $k\geq0$, we have \begin{eqnarray} &&\|f\ast g\|_{L^{2}(B_{k})} \leq C2^{k/4}\|f\|_{\hat{X}^{0,\frac{1}{2},1}}\||\xi|^{-1/4}g\|_{L^{2}(\SR^{2})} \label{2.052},\\ &&\|f\ast g\|_{L^{2}(\Omega\cap B_{k})} \leq C2^{k/2}K_{4}^{-1/2} \|f\|_{\hat{X}_{0,\frac{1}{2},1}}\> \|\,|\xi|^{-1/2}g\|_{L^{2}(\SR^{2})}\> . \label{2.053} \end{eqnarray} \end{Lemma} {\bf Proof.} First we prove \begin{eqnarray} &&\quad \left|\int_{\SR^{2}}\int_{\!\!\!\mbox{\scriptsize $ \begin{array}{l} \xi=\xi_{1}+\xi_{2}\\ \tau=\tau_{1}+\tau_{2} \end{array} $}} (\chi_{B_{k_{1}}}f(\xi_{1},\tau_{1}))g(\xi_{2},\tau_{2}) h(\xi,\tau)d\xi_{1}d\tau_{1}d\xi d\tau \right|\nonumber\\ && \leq C2^{k_{1}/2}2^{k/4} \|f\|_{L_{\xi\tau}^{2}(B_{k_{2}})}\,\|\,|\xi|^{-1/4}g\|_{L_{\xi\tau}^{2}} \|h\|_{L_{\xi\tau}^{2}}\label{2.054} \end{eqnarray} for any $h\in L^{2}(\R^2)$ with $\mathop{\rm supp}h\subset B_{k}$ and \begin{eqnarray} &&\quad \left|\int_{\SR^{2}}\int_{\!\!\!\mbox{\scriptsize $ \begin{array}{l} \xi=\xi_{1}+\xi_{2}\\ \tau=\tau_{1}+\tau_{2} \end{array} $}}(\chi_{B_{k_{1}}}f(\xi_{1},\tau_{1}))g(\xi_{2},\tau_{2})h(\xi,\tau) d\xi_{1}d\tau_{1}d\xi d\tau \right|\nonumber\\ &&\leq CK_{4}^{-1/2}2^{k/2+k_{1}/2}\|f\|_{L_{\xi\tau}^{2}(B_{k_{2}})}\, \|\,|\xi|^{-1/2}g\|_{L_{\xi\tau}^{2}} \|h\|_{L_{\xi\tau}^{2}}\label{2.055} \end{eqnarray} for any $h\in L^{2}(\R^2)$ with $\mathop{\rm supp}h\subset B_{k}\cap \Omega.$ By using the Cauchy-Schwartz inequality and the Fubini theorem, we obtain \begin{eqnarray} &&\quad \left|\int_{\SR^{2}}\int_{\!\!\!\mbox{\scriptsize $ \begin{array}{l} \xi=\xi_{1}+\xi_{2}\\ \tau=\tau_{1}+\tau_{2} \end{array} $}}(\chi_{B_{k_{1}}})f(\xi_{1},\tau_{1})g(\xi_{2},\tau_{2})h(\xi,\tau) d\xi_{1}d\tau_{1}d\xi d\tau \right| \nonumber\\ && \leq C\sup\limits_{( \xi_{2}, \tau_{2})\in \SR^{2}}m_{4}(\xi_{2},\tau_{2})^{1/2} \|f\|_{L_{\xi\tau}^{2}}\|g\|_{L_{\xi\tau}^{2}} \|h\|_{L_{\xi\tau}^{2}},\label{2.056} \end{eqnarray} where \begin{eqnarray*} &\displaystyle m_{4}(\xi_{2},\tau_{2})=\int\chi_{\Lambda_{4}}(\xi_{2},\tau_{2},\xi,\tau)d\xi d\tau, &\\ &\displaystyle \Lambda_{4}:=\left\{(\xi_{2},\tau_{2},\xi,\tau)\in \R^{4}\, :\, (\xi_{1},\tau_{1})\in\supp f,(\xi,\tau)\in \supp g\right\}, & \end{eqnarray*} in which $\tau=\tau_1+\tau_2,\xi=\xi_1+\xi_2$. Hence, the proofs of (\ref{2.054}) and (\ref{2.055}) are reduced to \begin{eqnarray} m_{4}(\tau,\xi)\leq C{\rm min}\left\{|\xi_{2}|^{-1/2} 2^{k/2+k_{1}/2},K_4^{-1}|\xi_{2}|^{-1}2^{k+k_{1}}\right\}.\label{2.057} \end{eqnarray} For fixed $\tau,\xi\neq 0$, $(\xi,\tau)\in B_{k}$, we let $F_3$ and $F_4$ be the projections of $\Lambda_4$ onto the the $\xi_1$-axis and $\tau_1$-axis respectively. We shall show \begin{eqnarray} &&\mes F_3\le C\min \left\{|\xi_{2}|^{-1/2}(2^{k/2}+2^{k_{1}/2}), K_{4}^{-1}|\xi_{2}|^{-1}(2^{k}+2^{k_{1}}) \right\}, \label{2.058}\\ &&\mes F_4\le C\min \left\{2^{k},2^{k_{1}} \right\}, \label{2.059} \end{eqnarray} then (\ref{2.057}) follows. When $\xi\xi_{1}\leq 0$ and $\left|1+\frac{4}{3\xi\xi_{1}\xi_{2}^{2}}\right|\geq \frac{1}{2}$, we have \begin{eqnarray} \frac{8}{9\xi_{2}^{2}}\leq \xi(\xi-\xi_{2})\leq \frac{8}{3\xi_{2}^{2}}.\label{2.060} \end{eqnarray} From (\ref{2.060}), we have that \begin{eqnarray} \frac12\left({\xi_{2}+\sqrt{\xi_{2}^{2}+\frac{32}{9\xi_{2}^{2}}}}\,\right) \leq \xi\leq \frac12\left({\xi_{2}+\sqrt{\xi_{2}^{2}+\frac{32}{3\xi_{2}^{2}}}}\,\right) \label{2.061} \end{eqnarray} or \begin{eqnarray} \frac12\left({\xi_{2}-\sqrt{\xi_{2}^{2}+\frac{32}{3\xi_{2}^{2}}}}\right) \leq \xi \leq \frac12\left({\xi_{2}-\sqrt{\xi_{2}^{2}+\frac{32}{9\xi_{2}^{2}}}}\,\right) \label{2.062}. \end{eqnarray} From (\ref{2.061}) and (\ref{2.062}), we see that the measure of $F_3$ is bounded by \begin{eqnarray*} C|\xi_{2}|^{-2}\leq C{\rm min}\left\{|\xi_{2}|^{-1/2} 2^{k/2+k_{1}/2},K_4^{-1}|\xi_{2}|^{-1}2^{k+k_{1}}\right\}. \end{eqnarray*} Recall that $(\xi,\tau)\in B_{k}$, $(\xi_1,\tau_1)\in B_{k_1}$, \begin{eqnarray} \left|\tau-\xi^{3}+ \frac{1}{\xi}\right|\leq C2^{k}, \quad\left|\tau_{1}-\xi_{1}^{3}+ \frac{1}{\xi_{1}}\right| \leq C2^{k_{1}},\label{2.063} \end{eqnarray} we get the estimate (\ref{2.059}) for $F_4$. Consequently, we have (\ref{2.057}). By using the Cauchy-Schwartz inequality and the triangle inequality, we have that \begin{eqnarray} &&\hspace{-1cm}\left|\int_{\SR^{2}}\int_{\!\!\!\mbox{\scriptsize $ \begin{array}{l} \xi=\xi_{1}+\xi_{2}\\ \tau=\tau_{1}+\tau_{2} \end{array} $}}f(\xi_{1},\tau_{1})g(\xi_{2},\tau_{2})h(\xi,\tau) d\xi_{1}d\tau_{1}d\xi d\tau \right|\nonumber\\ &&\hspace{-1cm}\leq C\sum_{k_{1}}\left|\int_{\SR^{2}} \int_{\!\!\!\mbox{\scriptsize $ \begin{array}{l} \xi=\xi_{1}+\xi_{2}\\ \tau=\tau_{1}+\tau_{2} \end{array} $}}(\chi_{B_{k_{1}}}f(\xi_{1},\tau_{1}))g(\xi_{2},\tau_{2}) h(\xi,\tau)d\xi_{1}d\tau_{1}d\xi d\tau \right|.\label{2.064} \end{eqnarray} Combining (\ref{2.054}), (\ref{2.055}) with (\ref{2.064}), we have (\ref{2.052}) and (\ref{2.053}). We have completed the proof of Lemma 2.5. \noindent {\bf Remark 2:} From the proof process of (\ref{2.053}), to obtain (\ref{2.053}), it is sufficient to require that $K_{4}>0.$ \begin{Lemma}\label{Lemma2.6} Assume that $f\in \mathscr{S}^{'}(\R^{2})$ , $g\in \mathscr{S}(\R^{2})$ with $\mathop{\rm supp} f\subset A_{j}$ for some $j\geq0$ and $\,\Omega\subset \R^{2}$ has positive measure. If \begin{eqnarray*} K_{5}:=\inf\left\{|\xi_{1}+\xi|:\exists\, \tau,\tau_{1}\> s.t.\> (\xi,\tau)\in \Omega,(\xi_{1},\tau_{1})\in \supp f \right\}>0. \end{eqnarray*} Then \begin{eqnarray*} \|f\ast g\|_{L^{2}(\Omega\cap B_{k})} \leq C2^{k/2}K_{5}^{-1/2} \|f\|_{\hat{X}_{0,\frac{1}{2},1}}\> \|\,|\xi|^{-1/2}g\|_{L^{2}(\SR^{2})}\> \end{eqnarray*} \end{Lemma} {\bf Proof.} Combining Lemma 2.4 with Lemma 2.5, Remark 2, we have that Lemma 2.6. \begin{Lemma}\label{Lemma2.7} Let $j,N\in \N$, $\gamma_{0}=\frac 12 j\geq 2^{N+2}, \gamma_{n+1}=2\log_{2} {\gamma_{n}}$, $6\le \gamma_{_N}<8$. Then \begin{eqnarray} \sum_{n=0}^{N-1}\frac{1}{\sqrt{\gamma_{n}}}\label{2.065} \end{eqnarray} is bounded uniformly in $j$ and $N.$ \end{Lemma} {\bf Proof.} We claim that \begin{eqnarray} \gamma_{n}\geq2^{N+2-n},\quad0\leq n\leq N-1.\label{2.066} \end{eqnarray} Let $ a_{n}=\gamma_{N-n}, $ then (\ref{2.066}) is equivalent to \begin{eqnarray} a_{n}=\gamma_{_{N-n}}\geq 2^{n+2}.\label{2.067} \end{eqnarray} We prove (\ref{2.067}) by induction. When $n=1$, $a_{1}=\gamma_{_{N-1}}=2^{\frac 12 \gamma_{_N}}\geq 2^{3}=8.$ We assume that for $n=k$, $a_{k}=\gamma_{_{N-k}}\geq 2^{k+2}.$ Then for $n=k+1,$ we have that $ a_{k+1}= \gamma_{_{N-k-1}}=2^{\frac12{\gamma_{_{N-k}}}} \geq 2^{2^{k+1}}\geq 2^{k+3}. $ Thus we have (\ref{2.067}). Consequently, we have \begin{eqnarray} \sum_{n=0}^{N-1}\frac{1}{\sqrt{\gamma_{n}}}\leq \sum_{n=0}^{N-1}\frac{1}{2^{(N+2-n)/2}} =\frac{\sqrt{2}+1}{2}(1-2^{-N/2})\leq \frac{\sqrt{2}+1}{2}.\label{2.068} \end{eqnarray} We have completed the proof of Lemma \ref{Lemma2.7}. \noindent{\bf Remark:} The conclusion of Lemma 2.7 can be found in page 460 of \cite{Kis}, however, the proof is not given. \begin{Lemma}\label{Lemma2.8} Let $\tau=\tau_1+\tau_{2}, \xi=\xi_1+\xi_{2}.$ Then \begin{eqnarray*} &&\quad \max\left\{3|\xi\xi_{1}\xi_{2}|, \frac{\xi_{1}^{2}+\xi_{1}\xi_{2}+\xi_{2}^{2}}{|\xi\xi_{1}\xi_{2}|}\right\}\\ && \leq\left|\left(\tau-\xi^{3}+\frac{1}{\xi}\right)- \left(\tau_{1}-\xi_{1}^{3}+\frac{1}{\xi_{1}}\right) -\left(\tau_{2}-\xi_{2}^{3}+\frac{1}{\xi_{2}}\right)\right|\nonumber\\ &&\leq 2{\rm max}\left\{3|\xi\xi_{1}\xi_{2}|, \frac{\xi_{1}^{2}+\xi_{1}\xi_{2}+\xi_{2}^{2}} {|\xi\xi_{1}\xi_{2}|}\right\}. \end{eqnarray*} \end{Lemma} {\bf Proof.} By a direct computation, since $3\xi\xi_{1}\xi_{2}\times \frac{\xi_{1}^{2}+\xi_{1}\xi_{2}+\xi_{2}^{2}}{\xi\xi_{1}\xi_{2}} >0,$ we have that \begin{eqnarray} &&\quad \max\left\{3|\xi\xi_{1}\xi_{2}|, \frac{\xi_{1}^{2}+\xi_{1}\xi_{2}+\xi_{2}^{2}} {|\xi\xi_{1}\xi_{2}|}\right\} \nonumber\\ &&\leq\left|\left(\tau-\xi^{3}+\frac{1}{\xi}\right)- \left(\tau_{1}-\xi_{1}^{3}+\frac{1}{\xi_{1}}\right) -\left(\tau_{2}-\xi_{2}^{3}+\frac{1}{\xi_{2}}\right)\right| \nonumber\\ && =\left|3\xi\xi_{1}\xi_{2}+\frac{\xi_{1}^{2} +\xi_{1}\xi_{2}+\xi_{2}^{2}}{\xi\xi_{1}\xi_{2}}\right| \nonumber\\ &&\leq 2\max\left\{3|\xi\xi_{1}\xi_{2}|, \frac{\xi_{1}^{2}+\xi_{1}\xi_{2}+\xi_{2}^{2}} {|\xi\xi_{1}\xi_{2}|}\right\} \label{2.069}. \end{eqnarray} We have completed the proof of Lemma \ref{Lemma2.8}. \begin{Lemma}\label{Lemma2.9} The space $\hat{X}$ has the following properties. (i) For any $b>1/2$, there exists $C>0$ such that \begin{eqnarray} \||f\|_{\hat{X}}&\leq& C\|f\|_{\hat{X}^{-3/4,\>b}}.\label{2.070} \end{eqnarray} (ii) For $1< p\leq 2$, there exists $C>0$ such that \begin{eqnarray} &&\|\langle\xi \rangle^{-3/4} f\|_{L_{\xi}^{2}L_{\tau}^{p}}\leq C\|f\|_{\hat{X}},\label{2.071}\\ &&\|\langle\xi \rangle^{-3/4} f\|_{L_{\xi}^{2}L_{\tau}^{1}}\leq C\|f\|_{\hat{X}^{-3/4,\frac{1}{2},1}}\label{2.072}. \end{eqnarray} \end{Lemma} {\bf Proof.} (i) can be proved similarly to (i) of \cite{Kis}. (\ref{2.071}) can be proved similarly to $1<p\leq 2$ of (ii) in \cite{Kis}. (\ref{2.072}) can be proved similarly to \cite{BT}. We have completed the proof of Lemma \ref{Lemma2.9}. \begin{Lemma}\label{Lemma2.10} Let $e^{-t(-\partial_{x}^{3}+\partial_{x}^{-1})}u_{0}$ be the solution to the linear equation (\ref{1.01}). Then we have the following estimates \begin{eqnarray} &&\hspace{-1cm}\left\|e^{-t(-\partial_{x}^{3}+\partial_{x}^{-1})}u_{0}\right\|_{X_{T}}+\sup \limits_{-T\leq t\leq T}\|e^{-t(-\partial_{x}^{3}+\partial_{x}^{-1})}u_{0}\|_{H_{x}^{-3/4}(\SR)}\leq C\|u_{0}\|_{H_{x}^{-3/4}(\SR)}\label{2.073} \end{eqnarray} and \begin{eqnarray} &&\left\|\int_{0}^{t}e^{-(t-s)(-\partial_{x}^{3}+\partial_{x}^{-1})} F(s)ds\right\|_{X_{T}} +\sup\limits_{-T\leq t\leq T}\left\|\int_{0}^{t} e^{-(t-s)(-\partial_{x}^{3}+\partial_{x}^{-1})} F(s)ds\right\|_{H_{x}^{-3/4}(\SR)} \qquad \nonumber\\ &&\hspace{-5mm}\leq C\left\|\mathscr{F}^{-1} \Bigl(\left\langle\tau-\xi^{3} +\frac{1}{\xi}\right\rangle^{-1}\mathscr{F}F\Bigr)\right\|_{X} +\left\|\mathscr{F}^{-1}\Bigl(\left\langle\tau-\xi^{3} +\frac{1}{\xi}\right\rangle^{-1}\mathscr{F}F\Bigr)\right\|_{Y}, \label{2.074} \end{eqnarray} where $0\leq T\leq1.$ \end{Lemma} Lemma \ref{Lemma2.10} can be proved similarly to Lemma 4.1 of \cite{Kis}. \noindent{\large\bf 3. Bilinear estimates } \setcounter{equation}{0} \setcounter{Theorem}{0} \setcounter{Lemma}{0} \setcounter{section}{3} In this section, we give the proof of Lemmas 3.1-3.2 which is the core of this paper. \begin{Lemma}\label{Lemma3.1} Suppose $f, g \in \mathscr{S}'(\R^{2})$, $\supp f\subset A_{j_{1}}$ and $\supp g\subset A_{j_{2}}$. Then we have \begin{eqnarray} \left\|I_{A_{j}}\left\langle\tau-\xi^{3}+\frac {1}{\xi}\right\rangle^{-1} \xi f*g\right\|_{\hat{X}} &\leq& C(j,j_{1},j_{2})\|f\|_{\hat{X}}\|g\|_{\hat{X}}, \label{3.01}\\ \left\|I_{A_{j}}\langle\xi\rangle^{-3/4} \left\langle\tau-\xi^{3}+\frac{1}{\xi}\right\rangle^{-1} \xi f*g\right\|_{L_{\xi}^{2}L_{\tau}^{1}} &\leq& C(j,j_{1},j_{2})\|f\|_{\hat{X}}\|g\|_{\hat{X}} \label{3.02} \end{eqnarray} for $j\geq0$ in the following cases. (i) At least two of $j,j_{1},j_{2}$ are less than 30 and $C(j,j_{1},j_{2})\sim 1$. (ii) $j_{1},j_{2}\geq30$, $|j_{1}-j_{2}|\leq 10$, $0<j<j_{1}-9$ and $C(j,j_{1},j_{2})\sim 2^{-\frac{3}{8} j}$. (iii) $j,j_{1}\geq 30,$ $|j-j_{1}|\leq 10$, $0<j_{2}<j-10$ and $C(j,j_{1},j_{2})\sim2^{-\frac{1}{4}(j-j_{2})}.$ (iv) $j,j_{2}\geq 30,$ $|j-j_{2}|\leq 10$, $0<j_{1}<j-10$ and $C(j,j_{1},j_{2})\sim2^{-\frac{1}{4}(j-j_{1})}.$ (v) $j,j_{1},j_{2}\geq 30$, $|j-j_{1}|\leq 10$, $|j-j_{2}|\leq 10$ and $C(j,j_{1},j_{2})\sim 1.$ (vi) $j_{1},j_{2}\geq30$, $j=0$ and $C(j,j_{1},j_{2})\sim 1.$ (vii) $j,j_{1}\geq 30$, $j_{2}=0$ and $C(j,j_{1},j_{2})\sim 1.$ (viii) $j,j_{2}\geq 30$, $j_{1}=0,$ and $C(j,j_{1},j_{2})\sim 1.$ \end{Lemma} {\bf Proof.} (i) In this case we may assume that $j,j_{1},j_{2}$ are all less than 40. By using the Young inequality and (\ref{2.071})-(\ref{2.072}), we have \begin{eqnarray} &&\left\|I_{A_{j}}\left\langle\tau-\xi^{3}+\frac{1}{\xi}\right\rangle^{-1}\xi f*g\right\|_{\hat{X}}\leq C \left\|I_{A_{j}}\left\langle\tau-\xi^{3}+\frac{1}{\xi}\right\rangle^{-1}\xi f*g\right\|_{\hat{X}^{-3/4,1/2,1}} \nonumber\\&&\leq C\|f*g\|_{L^{2}}\leq C \|f\|_{L_{\xi}^{2}L_{\tau}^{4/3}}\|g\|_{L_{\xi}^{2}L_{\tau}^{4/3}}\leq C \|f\|_{\hat{X}}\|g\|_{\hat{X}}\label{3.03} \end{eqnarray} and \begin{eqnarray} &&\left\|I_{A_{j}}\left\langle\tau-\xi^{3}+\frac{1}{\xi}\right\rangle^{-1}\langle\xi\rangle^{-3/4}\xi f*g\right\|_{L_{\xi}^{2}L_{\tau}^{1}} \leq C\left\| f*g\right\|_{L_{\xi}^{2}L_{\tau}^{2}}\nonumber\\&&\leq C \|f\|_{L_{\xi}^{2}L_{\tau}^{4/3}}\|g\|_{L_{\xi}^{2}L_{\tau}^{4/3}}\leq C\|f\|_{\hat{X}} \|g\|_{\hat{X}}\label{3.04}. \end{eqnarray} (ii) In this case, we restrict $f$ to $B_{k_{1}}$ and $g$ to $B_{k_{2}}$, by using Lemma 2.8, we have that \begin{eqnarray} 2^{k_{max}}:=2^{{\rm max}\{k,\>k_{1},\>k_{2}\}}\geq C2^{j+2j_{1}}\label{3.05}. \end{eqnarray} When $2^{k}\geq C2^{j+2j_{1}}$ which yields that $2^{j/4}2^{-k/2}\leq C2^{-3j/4}2^{-j_{1}}2^{j/2},$ by using Lemma 2.3, we have that \begin{eqnarray} &&\left\|I_{A_{j}}\left\langle\tau-\xi^{3}+\frac{1}{\xi}\right\rangle^{-1}\xi f*g\right\|_{\hat{X}}\leq C2^{j/4} \sum_{k\geq0}2^{-k/2}\left\| f*g\right\|_{L^{2}(A_{j}\cap B_{k})}\nonumber\\ &&\leq C2^{-\frac{3}{4}j}2^{-j_{1}}\sum_{k=j+2j_{1}+O(1)}\left\||\xi|^{1/2} f*g\right\|_{L^{2}}\leq C2^{-\frac{3}{4}j}2^{-\frac{3}{2}j_{1}} \|f\|_{\hat{X}^{0,1/2,1}}\|g\|_{\hat{X}^{0,1/2,1}}\nonumber\\&& \leq C2^{-\frac{3}{4}j} \|f\|_{\hat{X}^{-\frac{3}{4},1/2,1}}\|g\|_{\hat{X}^{-\frac{3}{4},1/2,1}}\nonumber\\&&\leq C2^{-\frac{3}{8}j}\|f\|_{\hat{X}} \|g\|_{\hat{X}} \label{3.06}. \end{eqnarray} When $2^{k_{1}}\geq C2^{j+2j_{1}}$ which yields that $2^{-\frac{3}{8}(j+2j_{1})}2^{3k_{1}/8} 2^{\frac{1}{8}(k_{1}-k)}\geq C,$ by using Lemma 2.6, we have that \begin{eqnarray} &&\hspace{-0.5cm}\left\|I_{A_{j}}\left\langle\tau-\xi^{3}+ \frac{1}{\xi}\right\rangle^{-1}\xi f*g\right\|_{\hat{X}}\nonumber\\&&\leq C2^{-j/8} 2^{-3j_{1}/4}\sum_{k\geq0}2^{-5k/8}\left\|\left(\left\langle\tau-\xi^{3} +\frac{1}{\xi}\right\rangle^{1/2} f\right)*g \right\|_{L^{2}(A_{j}\cap B_{k})}\nonumber\\ &&\leq C2^{-\frac{1}{8}j}2^{-\frac{7}{4}j_{1}}\|f\|_{\hat{X}^{0,1/2,1}} \|g\|_{\hat{X}^{0,1/2,1}}\nonumber\\&& \leq C2^{-\frac{3}{8}j} \|f\|_{\hat{X}^{-\frac{3}{4},1/2,1}}\|g\|_{\hat{X}^{-\frac{3}{4},1/2,1}}\nonumber\\&&\leq C2^{-\frac{3}{8}j}\|f\|_{\hat{X}} \|g\|_{\hat{X}}\label{3.07}. \end{eqnarray} When $2^{k_{2}}\geq C2^{j+2j_{1}}$, this case can be treated similarly to case $2^{k_{1}}\geq C2^{j+2j_{1}}$. In this case, by using the Cauchy-Schwartz inequality with respect to $\tau$ and a proof similar to the above cases, we have that \begin{eqnarray} &&\left\|I_{A_{j}}\left\langle\tau-\xi^{3}+\frac{1}{\xi}\right\rangle^{-1} \xi \langle \xi\rangle ^{-3/4}f*g\right\|_{L_{\xi}^{2}L_{\tau}^{1}} \nonumber\\&&\leq C2^{j/4}\sum_{k\geq0}2^{-k/2}\left\| f*g\right\| _{L_{\xi}^{2}L_{\tau}^{2}}\nonumber\\&& \leq C\left[2^{-\frac{3}{4}j}2^{-\frac{3}{2}j_{1}} +2^{-\frac{1}{8}j}2^{-\frac{7}{4}j_{1}}\right]\|f\|_{\hat{X}^{0,1/2,1}} \|g\|_{\hat{X}^{0,1/2,1}}\nonumber\\&& \leq C2^{-\frac{3}{8}j} \|f\|_{\hat{X}^{-\frac{3}{4},1/2,1}}\|g\|_{\hat{X}^{-\frac{3}{4},1/2,1}}\nonumber\\&&\leq C2^{-\frac{3}{8}j}\|f\|_{\hat{X}} \|g\|_{\hat{X}}\label{3.08}. \end{eqnarray} (iii) In this case, from Lemma \ref{Lemma2.8}, we have that $2^{k_{\rm max}}\geq C|\xi \xi_{1}\xi_{2}|\geq C2^{2j+j_{2}}$ In this case, the left hand side of (\ref{3.01})-(\ref{3.02}) can be bounded by \begin{eqnarray} C2^{j/4}\sum_{k\geq0}2^{-k/2}\left\| f*g\right\|_{L_{\xi}^{2}L_{\tau}^{2}}\label{3.09}. \end{eqnarray} When $2^{k}\sim 2^{k_{\rm max}}\geq C2^{2j+j_{2}}$, with the aid of Lemma 2.3, (\ref{3.09}) can be bounded by \begin{eqnarray} &&C2^{-\frac{7}{4}j}2^{-j_{2}/2}\|f\|_{\hat{X}^{0,1/2,1}}\|g\|_{\hat{X}^{0,1/2,1}}\nonumber\\&&\leq C2^{-j+\frac{j_{2}}{4}}\|f\|_{\hat{X}^{-\frac{3}{4},1/2,1}}\|g\|_{\hat{X}^{-\frac{3}{4},1/2,1}}\nonumber\\ &&\leq C2^{-j+\frac{j_{2}}{4}}\|f\|_{\hat{X}} \|g\|_{\hat{X}}\label{3.010}. \end{eqnarray} When $2^{k_{1}}\sim 2^{k_{\rm max}}\geq C2^{2j+j_{2}}$, with the aid of (\ref{2.036}) and the fact that $2^{k_{1}/2}2^{-\frac{1}{2}(2j+j_{2})}\geq C,$ we have that (\ref{3.09}) can be bounded by \begin{eqnarray} &&C2^{-3j/4}2^{-j_{2}/2}2^{k_{1}/2}\sum_{k\geq0}2^{-k/2}\left\| f*g\right\|_{L_{\xi}^{2}L_{\tau}^{2}}\nonumber\\&& \leq C2^{-3j/4}2^{-j_{2}/2}\sum_{k\geq 0}2^{-k/4}2^{k_{1}/2}\||\xi|^{-1/4}f\|_{L^{2}}\|g\|_{\hat{X}^{0,1/2,1}}\nonumber\\&&\leq C2^{-\frac{1}{4}(j-j_{2})}\|f\|_{\hat{X}^{-\frac{3}{4},1/2,1}}\|g\|_{\hat{X}^{-\frac{3}{4},1/2,1}}\nonumber\\ &&\leq C2^{-\frac{1}{4}(j-j_{2})}\|f\|_{\hat{X}} \|g\|_{\hat{X}}\label{3.011}. \end{eqnarray} When $2^{k_{2}}\sim 2^{k_{\rm max}}\geq C2^{2j+j_{2}}$, by using the fact that $2^{k_{2}/2}2^{-\frac{1}{2}(2j+j_{2})}\geq C,$ in this case $2^{j_{2}}\ll 2^{j}$, we have that (\ref{3.09}) can be bounded by \begin{eqnarray} C2^{-3j/4}2^{-j_{2}/2}2^{k_{2}/2}\sum_{k\geq0}2^{-k/2}\left\| f*g\right\|_{L_{\xi}^{2}L_{\tau}^{2}}\label{3.012}, \end{eqnarray} if $k\leq 10j,$ by using (\ref{2.037}) and $2^{j/4}\geq j(j\geq 20)$, we have that (\ref{3.012}) can be bounded by \begin{eqnarray} &&C2^{-5j/4}2^{-j_{2}}\sum_{k\geq0}2^{-k/4}\|f\|_{\hat{X}^{0,1/2,1}}\|g\|_{\hat{X}^{0,1/2,1}}\nonumber\\&&\leq Cj2^{-\frac{1}{2}j-\frac{j_{2}}{4}}\|f\|_{\hat{X}^{-\frac{3}{4},1/2,1}}\|g\|_{\hat{X}^{-\frac{3}{4},1/2,1}}\nonumber\\&&\leq C2^{-\frac{1}{4}(j-j_{2})}\|f\|_{\hat{X}^{-\frac{3}{4},1/2,1}}\|g\|_{\hat{X}^{-\frac{3}{4},1/2,1}}\nonumber\\ &&\leq C2^{-\frac{1}{4}(j-j_{2})}\|f\|_{\hat{X}} \|g\|_{\hat{X}}\label{3.013}, \end{eqnarray} if $k\geq 10j,$ we have that (\ref{3.012}) can be bounded by \begin{eqnarray} &&C2^{-5j/4}2^{-j_{2}}\sum_{k\geq0}2^{-k/4}\|f\|_{\hat{X}^{0,1/2,1}} \|g\|_{\hat{X}^{0,1/2,1}}\nonumber\\&&\leq C2^{-15j/4}2^{-j_{2}}\|f\|_{\hat{X}^{0,1/2,1}}\|g\|_{\hat{X}^{0,1/2,1}} \nonumber\\&&\leq C2^{-3j-j_{2}/4} \|f\|_{\hat{X}^{-\frac{3}{4},1/2,1}}\|g\|_{\hat{X}^{-\frac{3}{4},1/2,1}} \nonumber\\ &&\leq C2^{-3j-j_{2}/4}\|f\|_{\hat{X}} \|g\|_{\hat{X}}\label{3.014}. \end{eqnarray} (iv) This case can be proved similarly to case (iii). \noindent(v) In this case, from Lemma \ref{Lemma2.8}, we have that $2^{k_{\rm max}}\geq C|\xi \xi_{1}\xi_{2}|\geq C2^{3j}\sim 2^{3j_{1}}\sim 2^{3j_{2}}.$ In this case, the left hand side of (\ref{3.01})-(\ref{3.02}) can be bounded by \begin{eqnarray} C2^{j/4}\sum_{k\geq0}2^{-k/2}\left\| f*g\right\|_{L_{\xi}^{2}L_{\tau}^{2}}\label{3.015}. \end{eqnarray} When $2^{k}\sim 2^{k_{\rm max}}\geq C2^{2j+j_{2}}$, with the aid of (\ref{2.03}), (\ref{3.015}) can be bounded by \begin{eqnarray} &&C2^{-3j/2}\|f\|_{\hat{X}^{0,1/2,1}}\|g\|_{\hat{X}^{0,1/2,1}} \leq C\|f\|_{\hat{X}^{-\frac{3}{4},1/2,1}}\|g\|_{\hat{X}^{-\frac{3}{4},1/2,1}}\nonumber\\ &&\leq C\|f\|_{\hat{X}} \|g\|_{\hat{X}}\label{3.016}. \end{eqnarray} When $2^{k_{1}}\sim 2^{k_{\rm max}}\geq C2^{3j}$, with the aid of (\ref{2.036}) and the fact that $2^{k_{1}/2}2^{-\frac{3}{2}j}\geq C,$ (\ref{3.015}) can be bounded by \begin{eqnarray} &&C2^{-5j/4}2^{k_{1}/2}\sum_{k\geq0}2^{-k/2}\left\| f*g\right\|_{L_{\xi}^{2} L_{\tau}^{2}}\nonumber\\&& \leq C2^{-\frac{5}{4}j}\|f\|_{\hat{X}^{0,1/2,1}}\|g\|_{\hat{X}^{0,1/2,1}}\nonumber\\&&\leq C \|f\|_{\hat{X}^{-\frac{3}{4},1/2,1}}\|g\|_{\hat{X}^{-\frac{3}{4},1/2,1}}\nonumber\\&& \leq C\|f\|_{\hat{X}} \|g\|_{\hat{X}}\label{3.017}. \end{eqnarray} When $2^{k_{2}}\sim 2^{k_{\rm max}}\geq C2^{2j+j_{2}}$, this case can be proved similarly to case $2^{k_{1}}\sim 2^{k_{\rm max}}\geq C2^{3j}$. \noindent (vi) (a) When $2^{k_{1}}\sim 2^{k_{\rm max}}\geq 2^{2j_{1}+j}$, this case can be proved similarly to $2^{k_{1}}\sim 2^{k_{\rm max}}$ of (ii). (b) When $2^{k_{2}}\sim 2^{k_{\rm max}} \geq 2^{2j_{1}+j}$, this case can be proved similarly to $2^{k_{2}}\sim 2^{k_{\rm max}}$ of (ii). (c) When $2^{k}\geq 2^{2j_{1}+j}.$ In this case, $|\xi|\left\langle\tau-\xi^{3}+\frac{1}{\xi} \right\rangle^{-1}\leq C 2^{-2j_{1}}$ or $|\xi|\leq 2^{k-2j_{1}}$ in $B_{k}.$ By using the H\"older inequality in $\xi$ and the Young inequality as well as (\ref{2.038}), we have that \begin{eqnarray} &&\left\||\xi|\left\langle\tau-\xi^{3}+\frac{1}{\xi}\right\rangle^{-1}\langle\xi\rangle^{-3/4}f*g\right\|_{L_{\xi}^{2} L_{\tau}^{1}(A_{0})}\nonumber\\&&\leq C2^{-2j_{1}}\|f*g\|_{L_{\xi}^{\infty}L_{\tau}^{1} (A_{0})}\nonumber\\&&\leq C2^{-j_{1}/2}\left\|\langle\xi\rangle^{-3/4}f\right\|_{L_{\xi}^{2}L_{\tau}^{1}} \left\|\langle\xi\rangle^{-3/4}g\right\|_{L_{\xi}^{2}L_{\tau}^{1}}\nonumber\\&& \leq C2^{-j_{1}/2}\|f\|_{\hat{X}} \|g\|_{\hat{X}}\label{3.018}. \end{eqnarray} When $(\xi,\tau)\in D$, since $|\xi|\left\langle\tau-\xi^{3}+\frac{1}{\xi}\right\rangle^{-1/2}\leq C |\xi|^{1/2}2^{-j_{1}}$, by using Lemma 2.3, we have that \begin{eqnarray} &&\left\|I_{A_{j}}\left\langle\tau-\xi^{3}+\frac{1}{\xi}\right\rangle^{-1}\xi f\ast g\right\|_{\hat{X}^{-3/4,1/2}}\nonumber\\&&\leq C\left\|\xi\left\langle\tau-\xi^{3}+\frac{1}{\xi}\right\rangle^{-1/2}f*g\right\|_{L^{2}}\nonumber \\&&\leq C2^{-j_{1}}\left\||\xi|^{1/2}f*g\right\|_{L^{2}}\nonumber\\ &&\leq C2^{-j_{1}}2^{-\frac{j_{1}}{2}}\|f\|_{\hat{X}^{0,1/2,1}} \|g\|_{\hat{X}^{0,1/2,1}}\nonumber\\&&\leq C\|f\|_{\hat{X}^{-3/4,1/2,1}}\|g\|_{\hat{X}^{-3/4,1/2,1}}\nonumber\\&& \leq C\|f\|_{\hat{X}} \|g\|_{\hat{X}}.\label{3.019} \end{eqnarray} When $(\xi,\tau)$ is outside of $D$ and $|\xi|\leq \frac{1}{8}$, we have that \begin{eqnarray} |\xi|^{-3}\geq |\tau|=\left|\tau-\xi^{3}+\frac{1}{\xi}\right|+1-|\xi|^{3} -|\xi|^{-1}-1\geq 2^{k}-|\xi|^{3}-|\xi|^{-1}-1.\label{3.020} \end{eqnarray} From (\ref{3.020}), we have that $|\xi|\leq C2^{-j_{1}/2}.$ We consider the following two cases: \begin{eqnarray} &&(1): 2^{-(3-\epsilon)j_{1}}\leq |\xi|\leq C2^{-j_{1}/2},\label{3.021}\\ &&(2):|\xi|\leq 2^{-(3-\epsilon)j_{1}}\label{3.022}. \end{eqnarray} Case (1) can be proved similarly to $f*g\subset \left\{(\xi,\tau)\in \R^{2}:|\xi|\leq 1, |\tau|\leq |\xi|^{-3}\right\}$ of (iv) in \cite{Kis}. Now we deal with case (2). In this case, we have that \begin{eqnarray} &&\left\|I_{A_{j}}\left\langle\tau-\xi^{3}+\frac{1}{\xi}\right \rangle^{-1}\xi f*g\right\|_{\hat{X}}\nonumber\\&&\leq C\left\|I_{A_{j}}\left\langle\tau-\xi^{3}+\frac{1}{\xi}\right \rangle^{-1}\xi f*g\right\|_{\hat{X}^{-3/4,1/2,1}}\nonumber\\ &&\leq C2^{j}\sum_{k\geq0}2^{-k/2}\|f*g\|_{L^{2}(A_{j}\cap B_{k})} \leq C(v_{1}+v_{2}),\label{3.023} \end{eqnarray} where \begin{eqnarray} &&v_{1}=\sum_{j=-\infty}^{-(3-\epsilon)j_{1}}2^{j}\sum_{k\geq2j_{1}+j}^{10j_{1}} 2^{-k/2}\left\|f*g\right\|_{L^{2}(B_{k})},\nonumber\\ &&v_{2}=\sum_{j=-\infty}^{-(3-\epsilon)j_{1}}2^{j}\sum_{k\geq10j_{1}} 2^{-k/2} \left\|f*g\right\|_{L^{2}(B_{k})}\nonumber. \end{eqnarray} By using the Cauchy-Schwartz inequality with respect to $\tau$ and $\frac{j_{1}}{2^{(1-\epsilon)j_{1}}}\leq 1(j\geq 10)$, we have that \begin{eqnarray} &&v_{1}\leq Cj_{1}\sum_{j=-\infty}^{-(3-\epsilon)j_{1}}2^{j}\|f*g\|_{L_{\xi}^{2}L_{\tau}^{\infty}} \leq Cj_{1}\sum_{j=-\infty}^{-(3-\epsilon)j_{1}}2^{j}\|f\|_{L_{\xi}^{2}L_{\tau}^{2}} \|g\|_{L_{\xi}^{1}L_{\tau}^{2}}\nonumber\\&&\leq C j_{1}2^{-(\frac{5}{2}-\epsilon)j_{1}}\|f\|_{L_{\xi}^{2}L_{\tau}^{2}}\|g\|_{L_{\xi}^{2} L_{\tau}^{2}}\leq Cj_{1}2^{-(1-\epsilon)j_{1}}\|f\|_{\hat{X}^{-3/4,1/2,1}} \|g\|_{\hat{X}^{-3/4,1/2,1}}\nonumber\\&&\leq C\|f\|_{\hat{X}^{-3/4,1/2,1}} \|g\|_{\hat{X}^{-3/4,1/2,1}}\nonumber\\&&\leq C\|f\|_{\hat{X}} \|g\|_{\hat{X}}\nonumber. \end{eqnarray} By using Lemma 2.3, we have that \begin{eqnarray} &&v_{2}\leq C2^{-11j_{1}/2}\sum_{j=-\infty}^{-(3-\epsilon)j_{1}}2^{j/2} \|f\|_{\hat{X}^{0,1/2,1}} \|g\|_{\hat{X}^{0,1/2,1}}\leq C2^{-\frac{14-\epsilon}{2}j_{1}} \|f\|_{\hat{X}^{0,1/2,1}} \|g\|_{\hat{X}^{0,1/2,1}}\nonumber\\&&\leq C2^{-\frac{11-\epsilon}{2}j_{1}} \|f\|_{\hat{X}^{-3/4,1/2,1}} \|g\|_{\hat{X}^{-3/4,1/2,1}}\leq C\|f\|_{\hat{X}^{-3/4,1/2,1}} \|g\|_{\hat{X}^{-3/4,1/2,1}}\nonumber\\&&\leq C\|f\|_{\hat{X}} \|g\|_{\hat{X}}\nonumber. \end{eqnarray} When $(\xi,\tau)$ is outside of $D$ and $|\xi|\geq \frac{1}{8}$, this case can be proved similarly to (ii). \noindent(vii) When $(\xi_{2},\tau_{2})\in A_{0}$ we consider $|\xi_{2}|\leq 2^{-2j},$ $2^{-2j}\leq|\xi_{2}|\leq \frac{1}{8} $ and $\frac{1}{8}<|\xi_{2}|\leq 1$, respectively. \noindent (a) Case $|\xi_{2}|\leq 2^{-2j}.$ By using the Young inequality and the H\"older inequality and (\ref{2.072}), we have that \begin{eqnarray} &&\left\|I_{A_{j}}\xi f*g\right\|_{\hat{X}^{-3/4,-1/2,1}}\leq C2^{j}\left\|\langle\xi\rangle^{-3/4}f \right\|_{L_{\xi}^{2}L_{\tau}^{1}}\left\|g\right\|_{L_{\xi}^{1}L_{\tau}^{2}}\nonumber\\&&\leq C\left\|\langle\xi\rangle^{-3/4}f\right\|_{L_{\xi}^{2}L_{\tau}^{1}} \left\|g\right\|_{L_{\xi}^{2}L_{\tau}^{2}}\nonumber\\&&\leq C\|f\|_{\hat{X}^{-3/4,1/2,1}} \left\|g\right\|_{\hat{X}^{-3/4,1/2}}\nonumber\\&&\leq C\|f\|_{\hat{X}} \|g\|_{\hat{X}}.\label{3.024} \end{eqnarray} (b) $2^{-2j}\leq |\xi_{2}|\leq\frac{1}{8} ,$ cases $2^{k}\sim 2^{k_{max}}$ and $2^{k_{1}}\sim 2^{k_{max}}$ can be proved similarly to cases $2^{k}\sim 2^{k_{max}}$ and $2^{k_{1}}\sim 2^{k_{max}}$ of (v) in \cite{Kis}. \noindent Case $2^{k_{2}}\sim 2^{k_{max}}$ and $(\tau_{2},\xi_{2})\in D.$ \noindent We only consider $2^{k_{2}}\sim 2^{k_{max}}>4{\rm max} \left\{2^{k},2^{k_{1}}\right\},$ otherwise, $2^{k_{2}}\sim 2^{k_{max}}\leq 4{\rm max}\left\{2^{k},2^{k_{1}}\right\}$ which boils down to cases $2^{k}\sim 2^{k_{max}}$ and $2^{k_{1}}\sim 2^{k_{max}}$. In this case, we claim that $3|\xi\xi_{1}\xi_{2}|\geq \frac{\xi_{1}^{2}+\xi_{1}\xi_{2} +\xi_{2}^{2}}{|\xi\xi_{1}\xi_{2}|}$. If $3|\xi\xi_{1}\xi_{2}|\leq \frac{\xi_{1}^{2}+\xi_{1}\xi_{2}+\xi_{2}^{2}}{|\xi\xi_{1}\xi_{2}|},$ since $|\tau_{2}|\geq |\xi_{2}|^{-3},$ then we have \begin{eqnarray} \frac{1}{2}|\xi_{2}|^{-3}\leq |\tau_{2}|-|\xi_{2}|^{3}-\frac{1}{|\xi_{2}|} \leq\left|\tau_{2}-\xi_{2}^{3}+\frac{1}{\xi_{2}}\right|\leq \frac{4(\xi_{1}^{2} +\xi_{1}\xi_{2}+\xi_{2}^{2})}{|\xi\xi_{1}\xi_{2}|}\label{3.025}, \end{eqnarray} from (\ref{3.025}), we have \begin{eqnarray} \frac{\xi_{1}^{2}+\xi_{1}\xi_{2}+\xi_{2}^{2}}{|\xi\xi_{1}|}\geq \frac{1}{8}|\xi_{2}|^{-2}\geq 8\label{3.026}, \end{eqnarray} from (\ref{3.026}), since $\xi\xi_{1}>0,$ we have that \begin{eqnarray} \xi_{1}^{2}+\xi_{1}\xi_{2}+\xi_{2}^{2}\geq 8|\xi\xi_{1}|=8\xi\xi_{1}=8\xi_{1}^{2} +8\xi_{1}\xi_{2},\label{3.027} \end{eqnarray} which yields \begin{eqnarray} 7\xi_{1}^{2}+7\xi_{1}\xi_{2}\leq \xi_{2}^{2}\leq \frac{1}{64}\label{3.028}. \end{eqnarray} (\ref{3.028}) is invalid. Thus, $3|\xi\xi_{1}\xi_{2}|\geq \frac{\xi_{1}^{2} +\xi_{1}\xi_{2}+\xi_{2}^{2}}{|\xi\xi_{1}\xi_{2}|}$. Thus, in this case $2^{-3j_{2}}\sim\frac{1}{2}|\xi_{2}|^{-3}\leq |\tau_{2}-\xi_{2}^{3} +\frac{1}{\xi_{2}}|\sim |\xi\xi_{1}\xi_{2}|\sim 2^{2j+j_{2}}$ which yields $|\xi_{2}|\sim 2^{j_{2}}\sim2^{-j/2}.$ Consequently, $C2^{3j/2}\leq |\tau_{2}|\sim \left|\tau_{2}-\xi_{2}^{3}+\frac{1}{\xi_{2}} \right|\leq C2^{2j}.$ Without loss of generality, we can assume that $g$ is supported on $A_{0}\cap B_{[3j/2,2j]}$. \noindent(1) When $g$ is supported on $[B_{3j/2,\frac{3j}{2}+\gamma]}$ with $0\leq \gamma \leq \frac{j}{2}$, for any $\gamma^{'}\geq0,$ by using the Young inequality and $|\xi|\leq C|\xi_{2}|^{-1/2},$ we have that \begin{eqnarray} &&\left\|I_{B_{\geq \gamma^{'}}}\xi f*g\right\|_{\widehat{X}^{-3/4,-\frac{1}{2},1}} \nonumber\\&&\leq C\sum_{k\geq \gamma^{'}}2^{-k/2}\left\|\langle\xi\rangle^{-3/4}f\right\|_{L_{\xi}^{2} L_{\tau}^{1}}\left\||\xi|^{-1/2}\left\langle\tau-\xi^{3} +\frac{1}{\xi}\right\rangle ^{1/2}g\right\|_{L_{\xi}^{1}L_{\tau}^{2}(B_{[\frac{3j}{2},\frac{3j}{2}+\gamma]})}\nonumber\\ &&\leq C2^{-\gamma^{'}/2}\|f\|_{\widehat{X}^{-3/4,-\frac{1}{2},1}}\||\xi|^{-1/2}\|_{L_{\xi}^{2} (\{C2^{-j/2}\leq |\xi|\leq C2^{-\frac{j}{2}+\gamma}\})}\|g\|_{\widehat{X}^{0,\frac{1}{2}}}\nonumber\\ &&\leq C(\langle\gamma\rangle)^{1/2}2^{-\gamma^{'}/2}\|f\|_{\widehat{X}^{-3/4,\frac{1}{2},1}} \|g\|_{\widehat{X}^{-3/4,\frac{1}{2}}}\nonumber\\&&\leq C\|f\|_{\hat{X}} \|g\|_{\hat{X}}\label{3.029} \end{eqnarray} (2) When $g$ is supported on $B_{[\frac{3j}{2}+\gamma^{'},\frac{3j}{2}]}$ with $0\leq \gamma^{'} \leq \frac{j}{2}$, for any $\gamma\geq0,$ by using the Young inequality and $|\xi|\leq C|\xi_{2}|^{-1/2},$ by using Lemma 2.6, we have that \begin{eqnarray} &&\left\|I_{B_{\leq \gamma}}\xi f*g\right\|_{\widehat{X}^{-3/4,-\frac{1}{2},1}}\nonumber \\&&\leq C\sum_{k\leq \gamma}2^{-k/2}\left\|(\langle\xi\rangle^{-3/4}f)*(|\xi|^{-1/2}\left\langle\tau-\xi^{3} +\frac{1}{\xi}\right\rangle ^{1/2}g)\right\|_{L^{2}(B_{k})}\nonumber\\ &&\leq C\langle\gamma\rangle 2^{-j/2}\|f\|_{\widehat{X}^{-3/4,\frac{1}{2},1}}\left\||\xi|^{-1/2} \left\langle\tau-\xi^{3}+\frac{1}{\xi}\right\rangle^{1/2}g\right\|_{L_{\tau\xi}^{2}(|\xi|\geq C2^{-\frac{j}{2}+\gamma^{'}})}\nonumber\\ &&\leq C(\langle\gamma\rangle)2^{-\gamma^{'}/2}\|f\|_{\widehat{X}^{-3/4,\frac{1}{2},1}} \|g\|_{\widehat{X}^{-3/4,\frac{1}{2}}}\nonumber\\&&\leq C\|f\|_{\hat{X}} \|g\|_{\hat{X}}\label{3.030} \end{eqnarray} Let $\gamma_{0}=\frac{j}{2}(\geq 2^{N+2})$, $\gamma_{n+1}=2{\rm log}_{2}{\gamma_{n}},$ $6\leq \gamma_{N}<8.$ Firstly, we apply (1) with $\gamma=\gamma_{0}$ and $\gamma^{'}=\gamma_{1}$, then apply (2) with $\gamma=\gamma_{1}, \gamma^{'}=\gamma_{2}$. Repeating this procedure, at the end applying (1) with $\gamma=\gamma_{N-1}$ and $\gamma^{'}=0,$ combining (1) with (2), by using Lemma \ref{Lemma2.7}, we have that \begin{eqnarray} &&\left\|\xi f*g\right\|_{\widehat{X}^{-3/4,-\frac{1}{2},1}}\nonumber\\&&\leq C(1+\sum_{n=0}^{N-1} \frac{1}{\gamma_{n}^{1/2}})\|f\|_{\widehat{X}^{-3/4,\frac{1}{2},1}}\|g\|_{\widehat{X}^{-3/4,\frac{1}{2},1}}\nonumber\\ &&\leq C\|f\|_{\widehat{X}^{-3/4,\frac{1}{2},1}}\|g\|_{\widehat{X}^{-3/4,\frac{1}{2}}}\nonumber\\&&\leq C\|f\|_{\hat{X}} \|g\|_{\hat{X}}.\label{3.031} \end{eqnarray} Case $2^{k_{2}}\sim 2^{k_{max}}$ and $(\xi_{2},\tau_{2})$ is outside of $D.$ In this case, from Lemma \ref{Lemma2.8}, we have that $2^{k_{2}}\sim 2^{k_{\rm max}}\geq C 2^{2j}|\xi_{2}|$ which yields that $|\xi_{2}|\leq C2^{k_{2}-2j}$. By using the proof similar to (3.10) of \cite{Kis}, we have that \begin{eqnarray} \left\|\xi f*g\right\|_{\widehat{X}^{-3/4,-\frac{1}{2},1}} \leq C\|f\|_{\widehat{X}^{-3/4,\frac{1}{2},1}}\|g\|_{\widehat{X}^{-3/4,\frac{1}{2},1}}\leq C\|f\|_{\hat{X}} \|g\|_{\hat{X}}.\label{3.032} \end{eqnarray} By using the Cauchy-Schwartz inequality in $\tau$, we have that (\ref{3.02}) can be bounded by \begin{eqnarray*} \left\|\xi f*g\right\|_{\widehat{X}^{-3/4,-\frac{1}{2},1}} \end{eqnarray*} in this case, which can be proved similarly to (\ref{3.01}) in this case. \noindent (c) $\frac{1}{8}< |\xi_{2}|<1$. This case can be proved similarly to case (iii). \noindent(viii) This case can be proved similarly to case (vii). We have completed the proof of Lemma 3.1. \begin{Lemma}\label{Lem3.2} Let $u,v\in X$, then \begin{eqnarray} &&\left\|\mathscr{F}^{-1}\left[\left\langle\tau-\xi^{3}+ \frac {1}{\xi}\right\rangle^{-1}\mathscr{F}\left[\partial_{x}(uv)\right]\right] \right\|_{X}+ \left\|\mathscr{F}^{-1}\left[\left\langle\tau-\xi^{3}+ \frac{1}{\xi}\right\rangle^{-1}\mathscr{F}\left[\partial_{x}(uv)\right]\right] \right\|_{Y}\nonumber\\&&\leq C\|u\|_{X}\|v\|_{X}.\label{3.045} \end{eqnarray} \end{Lemma} {\bf Proof.} To prove (\ref{3.045}), it suffices to prove that \begin{eqnarray} &&\left\|\mathscr{F}^{-1}\left[\left\langle\tau-\xi^{3}+ \frac {1}{\xi}\right\rangle^{-1}\mathscr{F}\left[\partial_{x}(uv)\right]\right] \right\|_{X}\leq C\|u\|_{X}\|v\|_{X}.\label{3.046}\\ &&\left\|\mathscr{F}^{-1}\left[\left\langle\tau-\xi^{3}+ \frac{1}{\xi}\right\rangle^{-1}\mathscr{F}\left[\partial_{x}(uv)\right]\right] \right\|_{Y}\leq C\|u\|_{X}\|v\|_{X}.\label{3.047} \end{eqnarray} We first prove (\ref{3.046}). By using $\|f\|_{\hat{X}}^{2}=\sum\limits_{j\geq 0}\|I_{A_{j}}f\|_{\hat{X}}^{2}$, we have that \begin{eqnarray*} &&\left\|\mathscr{F}^{-1}\left[\left\langle\tau-\xi^{3}+ \frac {1}{\xi}\right\rangle^{-1}\mathscr{F}\left[\partial_{x}(uv)\right]\right] \right\|_{X}^{2}\nonumber\\&&=\sum_{j,j_{1}j_{2}\geq0}\left\|\xi\left\langle\tau-\xi^{3}+ \frac {1}{\xi}\right\rangle^{-1}I_{A_{j}}(I_{A_{j_{1}}}\mathscr{F}u)*(I_{A_{j_{2}}}\mathscr{F}v) \right\|_{\hat{X}}^{2}=\sum_{j=1}^{6}T_{j}, \end{eqnarray*} where \begin{eqnarray*} &&T_{1}=\sum_{j,j_{1}j_{2}\geq0,\>i}\left\|\xi\left\langle\tau-\xi^{3}+ \frac {1}{\xi}\right\rangle^{-1}I_{A_{j}}(I_{A_{j_{1}}}\mathscr{F}u)*(I_{A_{j_{2}}}\mathscr{F}v) \right\|_{\hat{X}}^{2},\nonumber\\ &&T_{2}=\sum_{j,j_{1}j_{2}\geq0,\>ii}\left\|\xi\left\langle\tau-\xi^{3}+ \frac {1}{\xi}\right\rangle^{-1}I_{A_{j}}(I_{A_{j_{1}}}\mathscr{F}u)*(I_{A_{j_{2}}}\mathscr{F}v) \right\|_{\hat{X}}^{2},\nonumber\\ &&T_{3}=\sum_{j,j_{1}j_{2}\geq0,\>iii}\left\|\xi\left\langle\tau-\xi^{3}+ \frac {1}{\xi}\right\rangle^{-1}I_{A_{j}}(I_{A_{j_{1}}}\mathscr{F}u)*(I_{A_{j_{2}}}\mathscr{F}v) \right\|_{\hat{X}}^{2},\nonumber\\ &&T_{4}=\sum_{j,j_{1}j_{2}\geq0,\>iv}\left\|\xi\left\langle\tau-\xi^{3}+ \frac {1}{\xi}\right\rangle^{-1}I_{A_{j}}(I_{A_{j_{1}}}\mathscr{F}u)*(I_{A_{j_{2}}}\mathscr{F}v) \right\|_{\hat{X}}^{2},\nonumber\\ &&T_{5}=\sum_{j,j_{1}j_{2}\geq0,\>v}\left\|\xi\left\langle\tau-\xi^{3}+ \frac {1}{\xi}\right\rangle^{-1}I_{A_{j}}(I_{A_{j_{1}}}\mathscr{F}u)*(I_{A_{j_{2}}}\mathscr{F}v) \right\|_{\hat{X}}^{2},\nonumber\\ &&T_{6}=\sum_{j,j_{1}j_{2}\geq0,\>vi}\left\|\xi\left\langle\tau-\xi^{3}+ \frac {1}{\xi}\right\rangle^{-1}I_{A_{j}}(I_{A_{j_{1}}}\mathscr{F}u)*(I_{A_{j_{2}}}\mathscr{F}v) \right\|_{\hat{X}}^{2},\nonumber\\ &&T_{7}=\sum_{j,j_{1}j_{2}\geq0,\>vii}\left\|\xi\left\langle\tau-\xi^{3}+ \frac {1}{\xi}\right\rangle^{-1}I_{A_{j}}(I_{A_{j_{1}}}\mathscr{F}u)*(I_{A_{j_{2}}}\mathscr{F}v) \right\|_{\hat{X}}^{2},\nonumber\\ &&T_{8}=\sum_{j,j_{1}j_{2}\geq0,\>viii}\left\|\xi\left\langle\tau-\xi^{3}+ \frac {1}{\xi}\right\rangle^{-1}I_{A_{j}}(I_{A_{j_{1}}}\mathscr{F}u)*(I_{A_{j_{2}}}\mathscr{F}v) \right\|_{\hat{X}}^{2}. \end{eqnarray*} here $(i),(ii),(iii),(iv),(v),(vi),(vii), (viii)$ is case $(i),(ii),(iii),(iv),(v),(vi),(vii), (viii)$ of Lemmas 3.1, 3.2. Combining $T_{j} (1\leq j\leq 8,j\in N)$, Lemmas 3.1, 3.2 with $\|f\|_{\hat{X}}^{2}=\sum_{j\geq 0}\|I_{A_{j}}f\|_{\hat{X}}^{2}$, we easily obtain (\ref{3.046}). By using a proof similarly to (\ref{3.046}), we easily obtain (\ref{3.047}). We have completed the proof of Lemma 3.2. \noindent {\large\bf 4. Proof of Theorem 1.1} \setcounter{equation}{0} \setcounter{Theorem}{0} \setcounter{Lemma}{0} \setcounter{section}{4} (\ref{1.01})-(\ref{1.02}) is equivalent to the following integral equation: \begin{eqnarray} u(t)=e^{-t(-\partial_{x}^{3}+\partial_{x}^{-1})}u_{0}+\frac{1}{2}\int_{0}^{t}e^{-(t-s) (-\partial_{x}^{3}+\partial_{x}^{-1})}\partial_{x}(u^{2})ds.\label{5.01} \end{eqnarray} We define \begin{eqnarray} \Phi(u)=e^{-t(-\partial_{x}^{3}+\partial_{x}^{-1})}u_{0}+\frac{1}{2}\int_{0}^{t}e^{-(t-s) (-\partial_{x}^{3}+\partial_{x}^{-1})}\partial_{x}(u^{2})ds.\label{5.02} \end{eqnarray} By using Lemma \ref{Lemma2.10} and Lemma \ref{Lem3.2}, we have that \begin{eqnarray} \|\Phi(u)\|_{X_{1}}+\sup\limits_{-1\leq t\leq1}\|\Phi(u)\|_{H^{-3/4}(\SR)} \leq C\|u_{0}\|_{H^{-3/4}(\SR)}+C\|u\|_{X_{1}}^{2}, \end{eqnarray} when $\|u_{0}\|_{H^{-3/4}}$ is sufficiently small, we have that $\Phi(u)$ is a contraction mapping on some closed ball in $X_{1}\cap C_{t}^{0}([-1,1]; H^{-3/4}(\R))$. Thus $\Phi$ have a fixed point $u$, which is the local solution of (\ref{5.01}) and thus (\ref{1.01})(\ref{1.02}). For large data, by taking $u_{\lambda 0}(x)=\lambda^{-2}u_{0}\left(\frac{x}{\lambda}\right),$ we have that $ \|u_{\lambda 0}\|_{H^{-3/4}(\SR)}\leq C\lambda^{-3/4}\|u_{0}\|_{H^{-3/4}(\SR)}. $ Taking $\lambda$ sufficiently large, then $\|u_{\lambda 0}\|_{H^{s}(\SR)}$ is sufficiently small, then there is a solution to (\ref{1.01}) associated to the initial function $u_{\lambda 0}(x,0)$, and thus (\ref{1.01})(\ref{1.02}) admit a solution. The Lipschitz dependence of solutions on the data and the uniqueness of the solutions can be found in \cite{MT,Kis}. We have completed the proof of Theorem 1.1. \section{\large\bf Appendix} \setcounter{equation}{0} \setcounter{Theorem}{0} \setcounter{Lemma}{0} \setcounter{section}{5} {\bf Example 1.} (high $\times$ high$\mapsto$ low interaction.) Let $Rec$ be the region in $R^{2}_{\tau\xi}$ inside the parallelogram with vertices \begin{eqnarray} &&(\tau,\xi)=(N^{3},N), (N^{3}+N^{\frac{3}{2}},N+\frac{1}{3}N^{-\frac{1}{2}}),\label{7.01}\\ &&\left((N+\frac{1}{3}N^{-\frac{1}{2}})^{3}, N+\frac{1}{3}N^{-\frac{1}{2}}\right), \left(N^{3}+\frac{1}{3}+\frac{1}{27}N^{-\frac{3}{2}},N\right),\label{6.02} \end{eqnarray} where $N$ is a sufficiently large positive number. It is easily checked that $Rec$ is included in the region $\left\{|\tau-\xi^{3}+\frac{1}{\xi}|<1\right\},$ has the longest side pointing at the direction $(3N^{2},1)$ and $|Rec|\sim N^{-1/2}.$ We put $R_{0}$ equal to the translation of $R$ centered at the origin. Let \begin{eqnarray} \mathscr{F}u(\tau,\xi):=I_{Rec}, \mathscr{F}v(\tau,\xi)=\mathscr{F}u(-\tau,-\xi), \end{eqnarray} where $I_{\Omega}$ denotes the characteristic function of a set $\Omega.$ By a direct computation, we have that \begin{eqnarray} \|u\|_{X^{-\frac{3}{4},b}}=\|v\|_{X^{-\frac{3}{4},b}}\sim N^{-1}, \mathscr{F}(uv)\geq CN^{-\frac{1}{2}}I_{R_{0}}, \|\partial_{x}(uv)\|_{X^{-\frac{3}{4},b-1}}\geq CN^{\frac{6b-11}{4}}.\label{6.03} \end{eqnarray} Then \begin{eqnarray} \|\partial_{x}(fg)\|_{X^{-\frac{3}{4},\>b-1}}\leq C\|f\|_{X^{-\frac{3}{4},\>b}}\|g\|_{X^{-\frac{3}{4},\>b}}\label{6.04}. \end{eqnarray} is invalid for $b>1/2.$ {\bf Example 2.} (high $\times$ low $\mapsto$ high interaction.) Let $\mathscr{F}u=I_{Rec},$ $\mathscr{F}v=I_{R_{0}}.$ By a direct computation, we have that \begin{eqnarray} &&\|u\|_{X^{-\frac{3}{4},b}}\sim N^{-1},\|v\|_{X^{-\frac{3}{4},b}}\sim N^{\frac{6b-1}{4}}, \nonumber\\&&\mathscr{F}(uv) \geq CN^{-\frac{1}{2}}I_{Rec},\|\partial_{x}(uv)\|_{X^{-frac{3}{4},b}}\geq CN^{1/2},\label{6.05} \end{eqnarray} thus, \begin{eqnarray} \|\partial_{x}(fg)\|_{X^{-\frac{3}{4},\>b-1}}\leq C\|f\|_{X^{-\frac{3}{4},\>b}}\|g\|_{X^{-\frac{3}{4},\>b}}\label{6.06} \end{eqnarray} is invalid for $b<1/2.$ \leftline{\large \bf Acknowledgments} \noindent This work is supported by the Natural Science Foundation of China under grant numbers 11171116 and 11401180. The first author is also supported in part by the Fundamental Research Funds for the Central Universities of China under the grant number 2012ZZ0072. The second author is supported by the NSF of China (No.11371367) and Fundamental research program of NUDT(JC12-02-03). The third author is also supported by the Natural Science Foundation of China under grant number 14IRTSTHN023.. \leftline{\large\bf References} \end{document}